[r-cran-spatstat] 36/38: New upstream version 1.52-1

Andreas Tille tille at debian.org
Fri Oct 20 14:27:34 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-spatstat.

commit 605e7fe0d2f299ea82c00578f2026b2a442b1c2b
Author: Andreas Tille <tille at debian.org>
Date:   Fri Oct 20 16:24:22 2017 +0200

    New upstream version 1.52-1
---
 DESCRIPTION                              |   221 +
 MD5                                      |  1740 +++++
 NAMESPACE                                |  3734 ++++++++++
 NEWS                                     | 11039 +++++++++++++++++++++++++++++
 R/FGmultiInhom.R                         |   252 +
 R/Fest.R                                 |   196 +
 R/First.R                                |    61 +
 R/GJfox.R                                |    91 +
 R/Gcom.R                                 |   215 +
 R/Gest.R                                 |   131 +
 R/Gmulti.R                               |   238 +
 R/Gres.R                                 |    70 +
 R/Hest.R                                 |   121 +
 R/Iest.R                                 |    84 +
 R/Jest.R                                 |    74 +
 R/Jinhom.R                               |   368 +
 R/Jmulti.R                               |   178 +
 R/Kcom.R                                 |   399 ++
 R/Kest.R                                 |  1018 +++
 R/Kinhom.R                               |   463 ++
 R/Kmeasure.R                             |   521 ++
 R/Kmodel.R                               |    15 +
 R/Kmulti.R                               |   375 +
 R/Kmulti.inhom.R                         |   488 ++
 R/Kres.R                                 |    72 +
 R/Kscaled.R                              |   176 +
 R/Ksector.R                              |   231 +
 R/Math.im.R                              |    39 +
 R/Math.imlist.R                          |    40 +
 R/Math.linim.R                           |    48 +
 R/Tstat.R                                |   237 +
 R/aaaa.R                                 |    45 +
 R/adaptive.density.R                     |    59 +
 R/addvar.R                               |   369 +
 R/affine.R                               |   337 +
 R/allstats.R                             |    47 +
 R/alltypes.R                             |   198 +
 R/anova.mppm.R                           |   262 +
 R/anova.ppm.R                            |   308 +
 R/applynbd.R                             |    95 +
 R/areadiff.R                             |   269 +
 R/areainter.R                            |   359 +
 R/as.im.R                                |   318 +
 R/auc.R                                  |   164 +
 R/badgey.R                               |   205 +
 R/bc.R                                   |    73 +
 R/beginner.R                             |    36 +
 R/bermantest.R                           |   303 +
 R/blur.R                                 |   102 +
 R/boundingbox.R                          |   203 +
 R/boundingcircle.R                       |    69 +
 R/breakpts.R                             |   221 +
 R/bugtable.R                             |    79 +
 R/bw.diggle.R                            |    95 +
 R/bw.optim.R                             |   111 +
 R/bw.pcf.R                               |   172 +
 R/bw.ppl.R                               |    33 +
 R/by.ppp.R                               |    19 +
 R/cdf.test.mppm.R                        |   256 +
 R/cdftest.R                              |   437 ++
 R/centroid.R                             |   169 +
 R/circdensity.R                          |    49 +
 R/clarkevans.R                           |   205 +
 R/classes.R                              |    52 +
 R/clickjoin.R                            |    31 +
 R/clicklpp.R                             |    62 +
 R/clickpoly.R                            |    75 +
 R/clickppp.R                             |    85 +
 R/clip.psp.R                             |   242 +
 R/close3Dpairs.R                         |   213 +
 R/closepairs.R                           |   612 ++
 R/clusterfunctions.R                     |   101 +
 R/clusterinfo.R                          |   695 ++
 R/clusterset.R                           |    76 +
 R/colourschemes.R                        |    38 +
 R/colourtables.R                         |   530 ++
 R/colourtools.R                          |   184 +
 R/compareFit.R                           |    72 +
 R/compileK.R                             |   117 +
 R/concom.R                               |   131 +
 R/connected.R                            |   199 +
 R/convexify.R                            |    20 +
 R/copyExampleFiles.R                     |    42 +
 R/covariates.R                           |    49 +
 R/covering.R                             |    37 +
 R/crossdistlpp.R                         |   123 +
 R/cut.ppp.R                              |    54 +
 R/daogenton.R                            |   237 +
 R/datasetup.R                            |    13 +
 R/dclftest.R                             |   360 +
 R/defaultwin.R                           |    54 +
 R/deldir.R                               |   365 +
 R/deltametric.R                          |    39 +
 R/density.lpp.R                          |   275 +
 R/density.ppp.R                          |   833 +++
 R/density.psp.R                          |    76 +
 R/derivfv.R                              |   144 +
 R/detPPF-class.R                         |   255 +
 R/detpointprocfamilyfun.R                |   493 ++
 R/dg.R                                   |   167 +
 R/dgs.R                                  |   115 +
 R/diagnoseppm.R                          |   439 ++
 R/diagram.R                              |   322 +
 R/digestCovariates.R                     |    66 +
 R/disc.R                                 |   128 +
 R/discarea.R                             |   111 +
 R/dist2dpath.R                           |    68 +
 R/distan3D.R                             |   299 +
 R/distances.R                            |   203 +
 R/distances.psp.R                        |   141 +
 R/distanxD.R                             |   227 +
 R/distbdry.R                             |   222 +
 R/distcdf.R                              |   117 +
 R/distfun.R                              |   117 +
 R/distfunlpp.R                           |    45 +
 R/distmap.R                              |   121 +
 R/dppm.R                                 |   158 +
 R/dppmclass.R                            |    35 +
 R/dummify.R                              |    39 +
 R/dummy.R                                |   409 ++
 R/edgeRipley.R                           |   185 +
 R/edgeTrans.R                            |   150 +
 R/edges2triangles.R                      |   124 +
 R/edit.R                                 |    38 +
 R/eem.R                                  |    17 +
 R/effectfun.R                            |   187 +
 R/envelope.R                             |  2002 ++++++
 R/envelope3.R                            |    78 +
 R/envelopeArray.R                        |    95 +
 R/envelopelpp.R                          |   213 +
 R/eval.fasp.R                            |    95 +
 R/eval.fv.R                              |   282 +
 R/eval.im.R                              |   248 +
 R/evalcovar.R                            |   415 ++
 R/ewcdf.R                                |   126 +
 R/exactMPLEstrauss.R                     |    71 +
 R/exactPdt.R                             |    74 +
 R/exactdt.R                              |    75 +
 R/factors.R                              |    64 +
 R/fardist.R                              |    63 +
 R/fasp.R                                 |   219 +
 R/fgk3.R                                 |   497 ++
 R/fii.R                                  |   242 +
 R/fiksel.R                               |   182 +
 R/fitted.mppm.R                          |    63 +
 R/fitted.ppm.R                           |   134 +
 R/flipxy.R                               |    59 +
 R/fourierbasis.R                         |    20 +
 R/fryplot.R                              |    81 +
 R/funxy.R                                |   107 +
 R/fv.R                                   |  1465 ++++
 R/geyer.R                                |   365 +
 R/hackglmm.R                             |   103 +
 R/hardcore.R                             |   120 +
 R/harmonic.R                             |    58 +
 R/hasclose.R                             |   189 +
 R/hasenvelope.R                          |    27 +
 R/headtail.R                             |    22 +
 R/hermite.R                              |    76 +
 R/hexagons.R                             |    90 +
 R/hierarchy.R                            |    50 +
 R/hierhard.R                             |   196 +
 R/hierpair.family.R                      |   321 +
 R/hierstrauss.R                          |   239 +
 R/hierstrhard.R                          |   317 +
 R/ho.R                                   |    79 +
 R/hopskel.R                              |    84 +
 R/hybrid.R                               |   314 +
 R/hybrid.family.R                        |   173 +
 R/hyperframe.R                           |   640 ++
 R/hypersub.R                             |   213 +
 R/idw.R                                  |    69 +
 R/images.R                               |  1179 +++
 R/indicator.R                            |    20 +
 R/indices.R                              |   239 +
 R/infline.R                              |   255 +
 R/inforder.family.R                      |    94 +
 R/intensity.R                            |   304 +
 R/interact.R                             |   332 +
 R/interactions.R                         |   246 +
 R/interp.im.R                            |    61 +
 R/iplot.R                                |   347 +
 R/iplotlayered.R                         |   314 +
 R/ippm.R                                 |   257 +
 R/is.cadlag.R                            |    11 +
 R/is.subset.owin.R                       |    87 +
 R/istat.R                                |   165 +
 R/kernel2d.R                             |   131 +
 R/kernels.R                              |   275 +
 R/kmrs.R                                 |   242 +
 R/kppm.R                                 |  1726 +++++
 R/laslett.R                              |   339 +
 R/layered.R                              |   393 +
 R/lennard.R                              |   112 +
 R/levelset.R                             |    41 +
 R/leverage.R                             |   883 +++
 R/linalg.R                               |   241 +
 R/lindirichlet.R                         |   158 +
 R/linearK.R                              |   281 +
 R/linearKmulti.R                         |   293 +
 R/lineardisc.R                           |   245 +
 R/linearmrkcon.R                         |    63 +
 R/linearpcf.R                            |   177 +
 R/linearpcfmulti.R                       |   294 +
 R/linequad.R                             |   254 +
 R/linfun.R                               |   140 +
 R/linim.R                                |   652 ++
 R/linnet.R                               |   614 ++
 R/linnetsurgery.R                        |   200 +
 R/lintess.R                              |   259 +
 R/listof.R                               |    54 +
 R/lixellate.R                            |   105 +
 R/localK.R                               |   223 +
 R/localpcf.R                             |   206 +
 R/logistic.R                             |   391 +
 R/lohboot.R                              |   118 +
 R/lpp.R                                  |   664 ++
 R/lppm.R                                 |   343 +
 R/lurking.R                              |   482 ++
 R/markcorr.R                             |   846 +++
 R/marks.R                                |   388 +
 R/marktable.R                            |    65 +
 R/matrixpower.R                          |    78 +
 R/measures.R                             |   676 ++
 R/mincontrast.R                          |   909 +++
 R/minkowski.R                            |    89 +
 R/minnndist.R                            |    64 +
 R/model.depends.R                        |    86 +
 R/morisita.R                             |    41 +
 R/morphology.R                           |   395 ++
 R/mpl.R                                  |  1542 ++++
 R/mppm.R                                 |   659 ++
 R/multihard.R                            |   191 +
 R/multipair.util.R                       |    31 +
 R/multistrauss.R                         |   237 +
 R/multistrhard.R                         |   351 +
 R/nearestsegment.R                       |    79 +
 R/newformula.R                           |    18 +
 R/news.R                                 |    16 +
 R/nnclean.R                              |   243 +
 R/nncorr.R                               |   136 +
 R/nncross.R                              |   243 +
 R/nncross3D.R                            |   231 +
 R/nndensity.R                            |    36 +
 R/nndist.R                               |   365 +
 R/nndistlpp.R                            |   654 ++
 R/nnfun.R                                |    77 +
 R/nnfunlpp.R                             |    43 +
 R/nnmap.R                                |   219 +
 R/nnmark.R                               |    48 +
 R/nnorient.R                             |   139 +
 R/objsurf.R                              |   123 +
 R/options.R                              |   607 ++
 R/ord.R                                  |    52 +
 R/ord.family.R                           |   130 +
 R/ordthresh.R                            |    63 +
 R/otherpackages.R                        |    79 +
 R/pairdistlpp.R                          |   104 +
 R/pairorient.R                           |   218 +
 R/pairpiece.R                            |   130 +
 R/pairs.im.R                             |   134 +
 R/pairsat.family.R                       |   252 +
 R/pairwise.R                             |    78 +
 R/pairwise.family.R                      |   480 ++
 R/parameters.R                           |    30 +
 R/parres.R                               |   592 ++
 R/pcf.R                                  |   375 +
 R/pcfinhom.R                             |   234 +
 R/pcfmulti.R                             |   253 +
 R/pcfmulti.inhom.R                       |   298 +
 R/penttinen.R                            |    79 +
 R/percy.R                                |   100 +
 R/periodify.R                            |   127 +
 R/persp.im.R                             |   329 +
 R/pickoption.R                           |    51 +
 R/pixellate.R                            |   226 +
 R/plot.anylist.R                         |   571 ++
 R/plot.fasp.R                            |   183 +
 R/plot.fv.R                              |   768 ++
 R/plot.im.R                              |   772 ++
 R/plot.mppm.R                            |    24 +
 R/plot.owin.R                            |   296 +
 R/plot.plotppm.R                         |   144 +
 R/plot.ppm.R                             |   112 +
 R/plot.ppp.R                             |   500 ++
 R/plot3d.R                               |   197 +
 R/pointsonlines.R                        |    50 +
 R/poisson.R                              |    46 +
 R/polygood.R                             |   199 +
 R/polynom.R                              |    84 +
 R/pool.R                                 |    99 +
 R/pp3.R                                  |   254 +
 R/ppm.R                                  |   279 +
 R/ppmclass.R                             |   955 +++
 R/ppp.R                                  |   686 ++
 R/pppmatch.R                             |   823 +++
 R/ppqq.R                                 |   114 +
 R/ppx.R                                  |   543 ++
 R/predict.ppm.R                          |   793 +++
 R/predictmppm.R                          |   370 +
 R/profilepl.R                            |   367 +
 R/progress.R                             |   314 +
 R/psp.R                                  |   753 ++
 R/psp2pix.R                              |   132 +
 R/pspcross.R                             |   328 +
 R/psst.R                                 |   206 +
 R/psstA.R                                |   157 +
 R/psstG.R                                |   184 +
 R/qqplotppm.R                            |   333 +
 R/quadclass.R                            |   318 +
 R/quadratcount.R                         |   213 +
 R/quadratmtest.R                         |    17 +
 R/quadratresample.R                      |    45 +
 R/quadrattest.R                          |   506 ++
 R/quadscheme.R                           |   340 +
 R/quantess.R                             |   222 +
 R/quantiledensity.R                      |    94 +
 R/quasirandom.R                          |    55 +
 R/rLGCP.R                                |    94 +
 R/rPerfect.R                             |   411 ++
 R/rags.R                                 |    80 +
 R/random.R                               |  1084 +++
 R/randomImage.R                          |    21 +
 R/randomNS.R                             |   410 ++
 R/randomlpp.R                            |   101 +
 R/randommk.R                             |   477 ++
 R/randomonlines.R                        |   220 +
 R/randomseg.R                            |    80 +
 R/randomtess.R                           |    62 +
 R/rat.R                                  |   166 +
 R/reach.R                                |    54 +
 R/reduceformula.R                        |    91 +
 R/relrisk.R                              |   493 ++
 R/relrisk.ppm.R                          |   386 +
 R/replace.ppp.R                          |    74 +
 R/rescale.R                              |    65 +
 R/rescue.rectangle.R                     |    33 +
 R/resid4plot.R                           |   693 ++
 R/residppm.R                             |   119 +
 R/residuals.mppm.R                       |    90 +
 R/rho2hat.R                              |   267 +
 R/rhohat.R                               |   623 ++
 R/ripras.R                               |    62 +
 R/rknn.R                                 |    45 +
 R/rlabel.R                               |    27 +
 R/rmh.R                                  |     7 +
 R/rmh.default.R                          |  1084 +++
 R/rmh.ppm.R                              |   167 +
 R/rmhResolveTypes.R                      |    96 +
 R/rmhcontrol.R                           |   231 +
 R/rmhexpand.R                            |   220 +
 R/rmhmodel.R                             |  1335 ++++
 R/rmhmodel.ppm.R                         |   427 ++
 R/rmhsnoop.R                             |   573 ++
 R/rmhstart.R                             |    91 +
 R/rmhtemper.R                            |    76 +
 R/rose.R                                 |   306 +
 R/rotate.R                               |    95 +
 R/rotmean.R                              |    44 +
 R/round.R                                |    44 +
 R/rppm.R                                 |   126 +
 R/rshift.R                               |   175 +
 R/rshift.psp.R                           |    64 +
 R/satpiece.R                             |   136 +
 R/saturated.R                            |    59 +
 R/scanstat.R                             |   317 +
 R/scriptUtils.R                          |    39 +
 R/sdr.R                                  |   263 +
 R/segtest.R                              |    62 +
 R/setcov.R                               |   117 +
 R/sharpen.R                              |    65 +
 R/sigtrace.R                             |   170 +
 R/simplepanel.R                          |   221 +
 R/simulate.detPPF.R                      |   384 +
 R/simulatelppm.R                         |    35 +
 R/slrm.R                                 |   637 ++
 R/smooth.ppp.R                           |   818 +++
 R/smoothfun.R                            |    59 +
 R/smoothfv.R                             |    54 +
 R/softcore.R                             |   115 +
 R/solist.R                               |   219 +
 R/sparse3Darray.R                        |   916 +++
 R/sparsecommon.R                         |   232 +
 R/sparselinalg.R                         |   259 +
 R/spatialcdf.R                           |    60 +
 R/split.ppp.R                            |   323 +
 R/split.ppx.R                            |   143 +
 R/ssf.R                                  |   238 +
 R/stienen.R                              |    59 +
 R/strauss.R                              |   199 +
 R/strausshard.R                          |   135 +
 R/studpermutest.R                        |   561 ++
 R/subfits.R                              |   518 ++
 R/subset.R                               |    85 +
 R/suffstat.R                             |   120 +
 R/summary.im.R                           |   149 +
 R/summary.kppm.R                         |   143 +
 R/summary.mppm.R                         |   258 +
 R/summary.ppm.R                          |   586 ++
 R/summary.quad.R                         |   162 +
 R/superimpose.R                          |   272 +
 R/symbolmap.R                            |   659 ++
 R/sysdata.rda                            |   Bin 0 -> 97776 bytes
 R/terse.R                                |    50 +
 R/tess.R                                 |   892 +++
 R/texture.R                              |   373 +
 R/timed.R                                |   104 +
 R/transect.R                             |    83 +
 R/transmat.R                             |    68 +
 R/treebranches.R                         |   211 +
 R/triangulate.R                          |    36 +
 R/triplet.family.R                       |    93 +
 R/triplets.R                             |   155 +
 R/unique.ppp.R                           |   197 +
 R/units.R                                |   208 +
 R/unnormdensity.R                        |    67 +
 R/unstack.R                              |    85 +
 R/update.ppm.R                           |   375 +
 R/util.R                                 |   375 +
 R/varblock.R                             |   163 +
 R/varcount.R                             |    65 +
 R/vblogistic.R                           |   281 +
 R/vcov.kppm.R                            |   157 +
 R/vcov.mppm.R                            |   189 +
 R/vcov.ppm.R                             |  1674 +++++
 R/versions.R                             |    57 +
 R/weightedStats.R                        |   103 +
 R/weights.R                              |   301 +
 R/window.R                               |  1173 +++
 R/wingeom.R                              |  1102 +++
 R/zclustermodel.R                        |    89 +
 build/vignette.rds                       |   Bin 0 -> 362 bytes
 data/Kovesi.rda                          |   Bin 0 -> 13820 bytes
 data/amacrine.rda                        |   Bin 0 -> 3215 bytes
 data/anemones.rda                        |   Bin 0 -> 1079 bytes
 data/ants.rda                            |   Bin 0 -> 2053 bytes
 data/austates.rda                        |   Bin 0 -> 40084 bytes
 data/bdspots.rda                         |   Bin 0 -> 25860 bytes
 data/bei.rda                             |   Bin 0 -> 182512 bytes
 data/betacells.rda                       |   Bin 0 -> 1893 bytes
 data/bramblecanes.rda                    |   Bin 0 -> 3753 bytes
 data/bronzefilter.rda                    |   Bin 0 -> 3808 bytes
 data/cells.rda                           |   Bin 0 -> 576 bytes
 data/chicago.rda                         |   Bin 0 -> 526172 bytes
 data/chorley.rda                         |   Bin 0 -> 6464 bytes
 data/clmfires.rda                        |   Bin 0 -> 581332 bytes
 data/copper.rda                          |   Bin 0 -> 5011 bytes
 data/datalist                            |    55 +
 data/demohyper.rda                       |   Bin 0 -> 27364 bytes
 data/demopat.rda                         |   Bin 0 -> 1343 bytes
 data/dendrite.rda                        |   Bin 0 -> 23600 bytes
 data/finpines.rda                        |   Bin 0 -> 2757 bytes
 data/flu.rda                             |   Bin 0 -> 81300 bytes
 data/ganglia.rda                         |   Bin 0 -> 1228 bytes
 data/gordon.rda                          |   Bin 0 -> 3028 bytes
 data/gorillas.rda                        |   Bin 0 -> 103956 bytes
 data/hamster.rda                         |   Bin 0 -> 2169 bytes
 data/heather.rda                         |   Bin 0 -> 52826 bytes
 data/humberside.rda                      |   Bin 0 -> 1737 bytes
 data/hyytiala.rda                        |   Bin 0 -> 2552 bytes
 data/japanesepines.rda                   |   Bin 0 -> 621 bytes
 data/lansing.rda                         |   Bin 0 -> 7115 bytes
 data/letterR.rda                         |   Bin 0 -> 609 bytes
 data/longleaf.rda                        |   Bin 0 -> 3582 bytes
 data/mucosa.rda                          |   Bin 0 -> 13424 bytes
 data/murchison.rda                       |   Bin 0 -> 53236 bytes
 data/nbfires.rda                         |   Bin 0 -> 167324 bytes
 data/nztrees.rda                         |   Bin 0 -> 543 bytes
 data/osteo.rda                           |   Bin 0 -> 3946 bytes
 data/paracou.rda                         |   Bin 0 -> 4870 bytes
 data/ponderosa.rda                       |   Bin 0 -> 1789 bytes
 data/pyramidal.rda                       |   Bin 0 -> 6158 bytes
 data/redwood.rda                         |   Bin 0 -> 609 bytes
 data/redwood3.rda                        |   Bin 0 -> 802 bytes
 data/redwoodfull.rda                     |   Bin 0 -> 2874 bytes
 data/residualspaper.rda                  |   Bin 0 -> 527947 bytes
 data/shapley.rda                         |   Bin 0 -> 37584 bytes
 data/simba.rda                           |   Bin 0 -> 6463 bytes
 data/simdat.rda                          |   Bin 0 -> 1368 bytes
 data/simplenet.rda                       |   Bin 0 -> 1289 bytes
 data/spiders.rda                         |   Bin 0 -> 8872 bytes
 data/sporophores.rda                     |   Bin 0 -> 6331 bytes
 data/spruces.rda                         |   Bin 0 -> 1200 bytes
 data/swedishpines.rda                    |   Bin 0 -> 496 bytes
 data/urkiola.rda                         |   Bin 0 -> 4792 bytes
 data/vesicles.rda                        |   Bin 0 -> 6544 bytes
 data/waka.rda                            |   Bin 0 -> 4362 bytes
 data/waterstriders.rda                   |   Bin 0 -> 1055 bytes
 debian/README.source                     |    19 -
 debian/README.test                       |    13 -
 debian/changelog                         |    50 -
 debian/compat                            |     1 -
 debian/control                           |    43 -
 debian/copyright                         |    68 -
 debian/docs                              |     3 -
 debian/lintian-overrides                 |     2 -
 debian/rules                             |    14 -
 debian/source/format                     |     1 -
 debian/tests/control                     |     3 -
 debian/tests/run-unit-test               |    19 -
 debian/upstream/metadata                 |    10 -
 debian/watch                             |     2 -
 demo/00Index                             |     4 +
 demo/data.R                              |   137 +
 demo/diagnose.R                          |   161 +
 demo/spatstat.R                          |   726 ++
 demo/sumfun.R                            |   169 +
 inst/CITATION                            |    60 +
 inst/doc/BEGINNER.txt                    |    37 +
 inst/doc/datasets.R                      |   517 ++
 inst/doc/datasets.Rnw                    |   870 +++
 inst/doc/datasets.pdf                    |   Bin 0 -> 3756413 bytes
 inst/doc/getstart.R                      |   151 +
 inst/doc/getstart.Rnw                    |   397 ++
 inst/doc/getstart.pdf                    |   Bin 0 -> 121981 bytes
 inst/doc/packagesizes.txt                |   192 +
 inst/doc/replicated.R                    |   528 ++
 inst/doc/replicated.Rnw                  |  1525 ++++
 inst/doc/replicated.pdf                  |   Bin 0 -> 396777 bytes
 inst/doc/shapefiles.R                    |   162 +
 inst/doc/shapefiles.Rnw                  |   497 ++
 inst/doc/shapefiles.pdf                  |   Bin 0 -> 79258 bytes
 inst/doc/spatstatlocalsize.txt           |     2 +
 inst/doc/updates.R                       |    81 +
 inst/doc/updates.Rnw                     |  2197 ++++++
 inst/doc/updates.pdf                     |   Bin 0 -> 177672 bytes
 inst/ratfor/Makefile                     |    58 +
 inst/ratfor/dppll.r                      |    55 +
 inst/ratfor/inxypOld.r                   |    49 +
 inst/rawdata/amacrine/amacrine.txt       |   295 +
 inst/rawdata/finpines/finpines.txt       |   127 +
 inst/rawdata/gorillas/vegetation.asc     |   155 +
 inst/rawdata/osteo/osteo36.txt           |    30 +
 inst/rawdata/sandholes/sandholes.jpg     |   Bin 0 -> 95101 bytes
 inst/rawdata/vesicles/activezone.txt     |    10 +
 inst/rawdata/vesicles/mitochondria.txt   |    24 +
 inst/rawdata/vesicles/presynapse.txt     |    70 +
 inst/rawdata/vesicles/vesicles.csv       |    38 +
 inst/rawdata/vesicles/vesicles.txt       |    38 +
 inst/rawdata/vesicles/vesiclesimage.tif  |   Bin 0 -> 106299 bytes
 inst/rawdata/vesicles/vesiclesmask.tif   |   Bin 0 -> 2136 bytes
 inst/rawdata/vesicles/vesicleswindow.csv |    93 +
 inst/rawdata/vesicles/vesicleswindow.txt |    93 +
 man/AreaInter.Rd                         |   208 +
 man/BadGey.Rd                            |   120 +
 man/CDF.Rd                               |    56 +
 man/Concom.Rd                            |   148 +
 man/DiggleGatesStibbard.Rd               |    86 +
 man/DiggleGratton.Rd                     |    84 +
 man/Emark.Rd                             |   186 +
 man/Extract.anylist.Rd                   |    52 +
 man/Extract.fasp.Rd                      |    69 +
 man/Extract.fv.Rd                        |   110 +
 man/Extract.hyperframe.Rd                |   122 +
 man/Extract.im.Rd                        |   213 +
 man/Extract.influence.ppm.Rd             |    64 +
 man/Extract.layered.Rd                   |    88 +
 man/Extract.leverage.ppm.Rd              |    74 +
 man/Extract.linim.Rd                     |    62 +
 man/Extract.linnet.Rd                    |    66 +
 man/Extract.listof.Rd                    |    47 +
 man/Extract.lpp.Rd                       |   101 +
 man/Extract.msr.Rd                       |    51 +
 man/Extract.owin.Rd                      |    51 +
 man/Extract.ppp.Rd                       |   206 +
 man/Extract.ppx.Rd                       |    71 +
 man/Extract.psp.Rd                       |    98 +
 man/Extract.quad.Rd                      |    50 +
 man/Extract.solist.Rd                    |    63 +
 man/Extract.splitppp.Rd                  |    55 +
 man/Extract.ssf.Rd                       |    46 +
 man/Extract.tess.Rd                      |    69 +
 man/F3est.Rd                             |   162 +
 man/Fest.Rd                              |   311 +
 man/Fiksel.Rd                            |   114 +
 man/Finhom.Rd                            |   178 +
 man/FmultiInhom.Rd                       |    80 +
 man/Frame.Rd                             |    74 +
 man/G3est.Rd                             |   118 +
 man/Gcom.Rd                              |   273 +
 man/Gcross.Rd                            |   246 +
 man/Gdot.Rd                              |   237 +
 man/Gest.Rd                              |   240 +
 man/Geyer.Rd                             |   124 +
 man/Gfox.Rd                              |   118 +
 man/Ginhom.Rd                            |   186 +
 man/Gmulti.Rd                            |   206 +
 man/GmultiInhom.Rd                       |   100 +
 man/Gres.Rd                              |   102 +
 man/Hardcore.Rd                          |    95 +
 man/Hest.Rd                              |   166 +
 man/HierHard.Rd                          |   120 +
 man/HierStrauss.Rd                       |   120 +
 man/HierStraussHard.Rd                   |   127 +
 man/Hybrid.Rd                            |   101 +
 man/Iest.Rd                              |   145 +
 man/Jcross.Rd                            |   193 +
 man/Jdot.Rd                              |   200 +
 man/Jest.Rd                              |   250 +
 man/Jinhom.Rd                            |   182 +
 man/Jmulti.Rd                            |   164 +
 man/K3est.Rd                             |   116 +
 man/Kcom.Rd                              |   245 +
 man/Kcross.Rd                            |   217 +
 man/Kcross.inhom.Rd                      |   321 +
 man/Kdot.Rd                              |   211 +
 man/Kdot.inhom.Rd                        |   301 +
 man/Kest.Rd                              |   336 +
 man/Kest.fft.Rd                          |   107 +
 man/Kinhom.Rd                            |   408 ++
 man/Kmark.Rd                             |   191 +
 man/Kmeasure.Rd                          |   180 +
 man/Kmodel.Rd                            |    62 +
 man/Kmodel.dppm.Rd                       |    42 +
 man/Kmodel.kppm.Rd                       |    69 +
 man/Kmodel.ppm.Rd                        |    83 +
 man/Kmulti.Rd                            |   223 +
 man/Kmulti.inhom.Rd                      |   285 +
 man/Kovesi.Rd                            |    88 +
 man/Kres.Rd                              |   106 +
 man/Kscaled.Rd                           |   243 +
 man/Ksector.Rd                           |    95 +
 man/LambertW.Rd                          |    61 +
 man/Lcross.Rd                            |    93 +
 man/Lcross.inhom.Rd                      |   118 +
 man/Ldot.Rd                              |    86 +
 man/Ldot.inhom.Rd                        |   103 +
 man/LennardJones.Rd                      |   142 +
 man/Lest.Rd                              |    95 +
 man/Linhom.Rd                            |    88 +
 man/Math.im.Rd                           |   116 +
 man/Math.imlist.Rd                       |   105 +
 man/Math.linim.Rd                        |   108 +
 man/MinkowskiSum.Rd                      |   112 +
 man/MultiHard.Rd                         |    88 +
 man/MultiStrauss.Rd                      |   113 +
 man/MultiStraussHard.Rd                  |   101 +
 man/Ops.msr.Rd                           |    60 +
 man/Ord.Rd                               |    70 +
 man/OrdThresh.Rd                         |    66 +
 man/PPversion.Rd                         |    90 +
 man/PairPiece.Rd                         |   108 +
 man/Pairwise.Rd                          |   107 +
 man/Penttinen.Rd                         |    84 +
 man/Poisson.Rd                           |    72 +
 man/Replace.im.Rd                        |   133 +
 man/SatPiece.Rd                          |   127 +
 man/Saturated.Rd                         |    40 +
 man/Smooth.Rd                            |    39 +
 man/Smooth.fv.Rd                         |    98 +
 man/Smooth.msr.Rd                        |    75 +
 man/Smooth.ppp.Rd                        |   209 +
 man/Smooth.ssf.Rd                        |    45 +
 man/Smoothfun.ppp.Rd                     |    73 +
 man/Softcore.Rd                          |   128 +
 man/Strauss.Rd                           |   100 +
 man/StraussHard.Rd                       |   124 +
 man/Triplets.Rd                          |   102 +
 man/Tstat.Rd                             |    91 +
 man/Window.Rd                            |   110 +
 man/WindowOnly.Rd                        |   104 +
 man/adaptive.density.Rd                  |   101 +
 man/add.texture.Rd                       |    76 +
 man/addvar.Rd                            |   183 +
 man/affine.Rd                            |    47 +
 man/affine.im.Rd                         |    56 +
 man/affine.linnet.Rd                     |    94 +
 man/affine.lpp.Rd                        |    96 +
 man/affine.owin.Rd                       |    65 +
 man/affine.ppp.Rd                        |    61 +
 man/affine.psp.Rd                        |    63 +
 man/affine.tess.Rd                       |   109 +
 man/allstats.Rd                          |    95 +
 man/alltypes.Rd                          |   252 +
 man/amacrine.Rd                          |    43 +
 man/anemones.Rd                          |    70 +
 man/angles.psp.Rd                        |    59 +
 man/anova.lppm.Rd                        |   113 +
 man/anova.mppm.Rd                        |   131 +
 man/anova.ppm.Rd                         |   175 +
 man/anova.slrm.Rd                        |    61 +
 man/ants.Rd                              |   169 +
 man/anyNA.im.Rd                          |    46 +
 man/anylist.Rd                           |    57 +
 man/append.psp.Rd                        |    47 +
 man/applynbd.Rd                          |   222 +
 man/area.owin.Rd                         |    75 +
 man/areaGain.Rd                          |    80 +
 man/areaLoss.Rd                          |    74 +
 man/as.box3.Rd                           |    52 +
 man/as.boxx.Rd                           |    47 +
 man/as.data.frame.envelope.Rd            |    46 +
 man/as.data.frame.hyperframe.Rd          |    49 +
 man/as.data.frame.im.Rd                  |    40 +
 man/as.data.frame.owin.Rd                |    63 +
 man/as.data.frame.ppp.Rd                 |    40 +
 man/as.data.frame.psp.Rd                 |    45 +
 man/as.data.frame.tess.Rd                |    55 +
 man/as.function.fv.Rd                    |   118 +
 man/as.function.im.Rd                    |    50 +
 man/as.function.leverage.ppm.Rd          |    51 +
 man/as.function.owin.Rd                  |    48 +
 man/as.function.tess.Rd                  |    60 +
 man/as.fv.Rd                             |   108 +
 man/as.hyperframe.Rd                     |    95 +
 man/as.hyperframe.ppx.Rd                 |    80 +
 man/as.im.Rd                             |   272 +
 man/as.interact.Rd                       |    66 +
 man/as.layered.Rd                        |    86 +
 man/as.linfun.Rd                         |    80 +
 man/as.linim.Rd                          |    97 +
 man/as.linnet.linim.Rd                   |    68 +
 man/as.linnet.psp.Rd                     |    79 +
 man/as.lpp.Rd                            |    95 +
 man/as.mask.Rd                           |   101 +
 man/as.mask.psp.Rd                       |    64 +
 man/as.matrix.im.Rd                      |    55 +
 man/as.matrix.owin.Rd                    |    50 +
 man/as.owin.Rd                           |   241 +
 man/as.polygonal.Rd                      |    69 +
 man/as.ppm.Rd                            |    75 +
 man/as.ppp.Rd                            |   154 +
 man/as.psp.Rd                            |   170 +
 man/as.rectangle.Rd                      |    67 +
 man/as.solist.Rd                         |    53 +
 man/as.tess.Rd                           |    91 +
 man/auc.Rd                               |   110 +
 man/austates.Rd                          |    36 +
 man/bc.ppm.Rd                            |    75 +
 man/bdist.pixels.Rd                      |    84 +
 man/bdist.points.Rd                      |    50 +
 man/bdist.tiles.Rd                       |    47 +
 man/bdspots.Rd                           |    87 +
 man/beachcolours.Rd                      |    94 +
 man/beginner.Rd                          |    44 +
 man/begins.Rd                            |    42 +
 man/bei.Rd                               |    71 +
 man/berman.test.Rd                       |   194 +
 man/betacells.Rd                         |   101 +
 man/bind.fv.Rd                           |   114 +
 man/bits.test.Rd                         |   137 +
 man/blur.Rd                              |   103 +
 man/border.Rd                            |    67 +
 man/bounding.box.xy.Rd                   |    58 +
 man/boundingbox.Rd                       |    77 +
 man/boundingcircle.Rd                    |    85 +
 man/box3.Rd                              |    61 +
 man/boxx.Rd                              |    51 +
 man/bramblecanes.Rd                      |    66 +
 man/branchlabelfun.Rd                    |    69 +
 man/bronzefilter.Rd                      |    55 +
 man/bugfixes.Rd                          |    67 +
 man/bw.diggle.Rd                         |   114 +
 man/bw.frac.Rd                           |    79 +
 man/bw.pcf.Rd                            |   137 +
 man/bw.ppl.Rd                            |   100 +
 man/bw.relrisk.Rd                        |   112 +
 man/bw.scott.Rd                          |    64 +
 man/bw.smoothppp.Rd                      |    93 +
 man/bw.stoyan.Rd                         |    69 +
 man/by.im.Rd                             |    62 +
 man/by.ppp.Rd                            |    84 +
 man/cauchy.estK.Rd                       |   152 +
 man/cauchy.estpcf.Rd                     |   159 +
 man/cbind.hyperframe.Rd                  |    63 +
 man/cdf.test.Rd                          |   291 +
 man/cdf.test.mppm.Rd                     |   207 +
 man/cells.Rd                             |    39 +
 man/centroid.owin.Rd                     |    83 +
 man/chicago.Rd                           |    66 +
 man/chop.tess.Rd                         |    60 +
 man/chorley.Rd                           |   104 +
 man/circdensity.Rd                       |    62 +
 man/clarkevans.Rd                        |   135 +
 man/clarkevans.test.Rd                   |   108 +
 man/clickbox.Rd                          |    50 +
 man/clickdist.Rd                         |    38 +
 man/clickjoin.Rd                         |    72 +
 man/clicklpp.Rd                          |    83 +
 man/clickpoly.Rd                         |    68 +
 man/clickppp.Rd                          |    83 +
 man/clip.infline.Rd                      |    48 +
 man/clmfires.Rd                          |   102 +
 man/closepairs.Rd                        |   159 +
 man/closepairs.pp3.Rd                    |   115 +
 man/closetriples.Rd                      |    42 +
 man/closing.Rd                           |    84 +
 man/clusterfield.Rd                      |   108 +
 man/clusterfit.Rd                        |   143 +
 man/clusterkernel.Rd                     |    56 +
 man/clusterradius.Rd                     |    99 +
 man/clusterset.Rd                        |   145 +
 man/coef.mppm.Rd                         |   106 +
 man/coef.ppm.Rd                          |    79 +
 man/coef.slrm.Rd                         |    51 +
 man/collapse.fv.Rd                       |    93 +
 man/colourmap.Rd                         |   118 +
 man/colourtools.Rd                       |   188 +
 man/commonGrid.Rd                        |    61 +
 man/compareFit.Rd                        |   130 +
 man/compatible.Rd                        |    41 +
 man/compatible.fasp.Rd                   |    41 +
 man/compatible.fv.Rd                     |    42 +
 man/compatible.im.Rd                     |    41 +
 man/compileK.Rd                          |   128 +
 man/complement.owin.Rd                   |    71 +
 man/concatxy.Rd                          |    49 +
 man/connected.Rd                         |   123 +
 man/connected.linnet.Rd                  |    61 +
 man/connected.lpp.Rd                     |    78 +
 man/connected.ppp.Rd                     |    68 +
 man/contour.im.Rd                        |   119 +
 man/contour.imlist.Rd                    |    57 +
 man/convexhull.Rd                        |    47 +
 man/convexhull.xy.Rd                     |    58 +
 man/convexify.Rd                         |    68 +
 man/convolve.im.Rd                       |    79 +
 man/coords.Rd                            |    84 +
 man/copper.Rd                            |   119 +
 man/copyExampleFiles.Rd                  |    54 +
 man/corners.Rd                           |    44 +
 man/covering.Rd                          |    51 +
 man/crossdist.Rd                         |    54 +
 man/crossdist.default.Rd                 |    89 +
 man/crossdist.lpp.Rd                     |    72 +
 man/crossdist.pp3.Rd                     |    73 +
 man/crossdist.ppp.Rd                     |    85 +
 man/crossdist.ppx.Rd                     |    62 +
 man/crossdist.psp.Rd                     |    81 +
 man/crossing.linnet.Rd                   |    44 +
 man/crossing.psp.Rd                      |    73 +
 man/cut.im.Rd                            |    67 +
 man/cut.lpp.Rd                           |   111 +
 man/cut.ppp.Rd                           |   138 +
 man/data.ppm.Rd                          |    49 +
 man/dclf.progress.Rd                     |   160 +
 man/dclf.sigtrace.Rd                     |   168 +
 man/dclf.test.Rd                         |   270 +
 man/default.dummy.Rd                     |   114 +
 man/default.expand.Rd                    |   108 +
 man/default.rmhcontrol.Rd                |    59 +
 man/delaunay.Rd                          |    51 +
 man/delaunayDistance.Rd                  |    54 +
 man/delaunayNetwork.Rd                   |    56 +
 man/deletebranch.Rd                      |    94 +
 man/deltametric.Rd                       |   103 +
 man/demohyper.Rd                         |    42 +
 man/demopat.Rd                           |    26 +
 man/dendrite.Rd                          |    46 +
 man/density.lpp.Rd                       |   131 +
 man/density.ppp.Rd                       |   383 +
 man/density.psp.Rd                       |    77 +
 man/density.splitppp.Rd                  |    80 +
 man/deriv.fv.Rd                          |   115 +
 man/detpointprocfamilyfun.Rd             |   183 +
 man/dfbetas.ppm.Rd                       |    95 +
 man/dg.envelope.Rd                       |   130 +
 man/dg.progress.Rd                       |   183 +
 man/dg.sigtrace.Rd                       |   180 +
 man/dg.test.Rd                           |   148 +
 man/diagnose.ppm.Rd                      |   416 ++
 man/diameter.Rd                          |    44 +
 man/diameter.box3.Rd                     |    82 +
 man/diameter.boxx.Rd                     |    74 +
 man/diameter.linnet.Rd                   |    52 +
 man/diameter.owin.Rd                     |    52 +
 man/dilated.areas.Rd                     |    79 +
 man/dilation.Rd                          |    83 +
 man/dim.detpointprocfamily.Rd            |    22 +
 man/dimhat.Rd                            |    53 +
 man/dirichlet.Rd                         |    58 +
 man/dirichletAreas.Rd                    |    50 +
 man/dirichletVertices.Rd                 |    71 +
 man/dirichletWeights.Rd                  |    61 +
 man/disc.Rd                              |    85 +
 man/discpartarea.Rd                      |    74 +
 man/discretise.Rd                        |    89 +
 man/discs.Rd                             |   101 +
 man/distcdf.Rd                           |   109 +
 man/distfun.Rd                           |   101 +
 man/distfun.lpp.Rd                       |    87 +
 man/distmap.Rd                           |    63 +
 man/distmap.owin.Rd                      |    98 +
 man/distmap.ppp.Rd                       |    76 +
 man/distmap.psp.Rd                       |    75 +
 man/divide.linnet.Rd                     |    46 +
 man/dkernel.Rd                           |    94 +
 man/dmixpois.Rd                          |    96 +
 man/domain.Rd                            |   145 +
 man/dppBessel.Rd                         |    42 +
 man/dppCauchy.Rd                         |    50 +
 man/dppGauss.Rd                          |    47 +
 man/dppMatern.Rd                         |    51 +
 man/dppPowerExp.Rd                       |    48 +
 man/dppapproxkernel.Rd                   |    30 +
 man/dppapproxpcf.Rd                      |    31 +
 man/dppeigen.Rd                          |    29 +
 man/dppkernel.Rd                         |    28 +
 man/dppm.Rd                              |   321 +
 man/dppparbounds.Rd                      |    33 +
 man/dppspecden.Rd                        |    30 +
 man/dppspecdenrange.Rd                   |    30 +
 man/dummify.Rd                           |    63 +
 man/dummy.ppm.Rd                         |    74 +
 man/duplicated.ppp.Rd                    |    95 +
 man/edge.Ripley.Rd                       |   106 +
 man/edge.Trans.Rd                        |   146 +
 man/edges.Rd                             |    49 +
 man/edges2triangles.Rd                   |    63 +
 man/edges2vees.Rd                        |    58 +
 man/edit.hyperframe.Rd                   |    56 +
 man/edit.ppp.Rd                          |    68 +
 man/eem.Rd                               |    84 +
 man/effectfun.Rd                         |   106 +
 man/ellipse.Rd                           |    76 +
 man/emend.Rd                             |    47 +
 man/emend.ppm.Rd                         |   116 +
 man/endpoints.psp.Rd                     |    91 +
 man/envelope.Rd                          |   812 +++
 man/envelope.envelope.Rd                 |   100 +
 man/envelope.lpp.Rd                      |   264 +
 man/envelope.pp3.Rd                      |   239 +
 man/envelopeArray.Rd                     |    82 +
 man/eroded.areas.Rd                      |    58 +
 man/erosion.Rd                           |    91 +
 man/erosionAny.Rd                        |    71 +
 man/eval.fasp.Rd                         |    96 +
 man/eval.fv.Rd                           |   143 +
 man/eval.im.Rd                           |    90 +
 man/eval.linim.Rd                        |    86 +
 man/ewcdf.Rd                             |    59 +
 man/exactMPLEstrauss.Rd                  |   123 +
 man/expand.owin.Rd                       |    55 +
 man/fardist.Rd                           |    65 +
 man/fasp.object.Rd                       |   103 +
 man/finpines.Rd                          |    67 +
 man/fitin.Rd                             |    83 +
 man/fitted.lppm.Rd                       |    90 +
 man/fitted.mppm.Rd                       |    83 +
 man/fitted.ppm.Rd                        |   137 +
 man/fitted.slrm.Rd                       |    54 +
 man/fixef.mppm.Rd                        |    62 +
 man/flipxy.Rd                            |    52 +
 man/flu.Rd                               |   108 +
 man/foo.Rd                               |    59 +
 man/formula.fv.Rd                        |    77 +
 man/formula.ppm.Rd                       |    72 +
 man/fourierbasis.Rd                      |    63 +
 man/fryplot.Rd                           |   149 +
 man/funxy.Rd                             |    67 +
 man/fv.Rd                                |   202 +
 man/fv.object.Rd                         |    59 +
 man/fvnames.Rd                           |    86 +
 man/ganglia.Rd                           |    71 +
 man/gauss.hermite.Rd                     |    61 +
 man/gordon.Rd                            |    35 +
 man/gorillas.Rd                          |   151 +
 man/gridcentres.Rd                       |    76 +
 man/gridweights.Rd                       |    69 +
 man/grow.boxx.Rd                         |    49 +
 man/grow.rectangle.Rd                    |    61 +
 man/hamster.Rd                           |    48 +
 man/harmonic.Rd                          |    74 +
 man/harmonise.Rd                         |    53 +
 man/harmonise.fv.Rd                      |    92 +
 man/harmonise.im.Rd                      |    67 +
 man/harmonise.msr.Rd                     |    44 +
 man/harmonise.owin.Rd                    |    68 +
 man/has.close.Rd                         |    71 +
 man/headtail.Rd                          |    80 +
 man/heather.Rd                           |   109 +
 man/hextess.Rd                           |    89 +
 man/hierpair.family.Rd                   |    38 +
 man/hist.funxy.Rd                        |    67 +
 man/hist.im.Rd                           |    78 +
 man/hopskel.Rd                           |   107 +
 man/humberside.Rd                        |    86 +
 man/hybrid.family.Rd                     |    40 +
 man/hyperframe.Rd                        |   108 +
 man/hyytiala.Rd                          |    41 +
 man/identify.ppp.Rd                      |    62 +
 man/identify.psp.Rd                      |    61 +
 man/idw.Rd                               |   115 +
 man/im.Rd                                |   146 +
 man/im.apply.Rd                          |    55 +
 man/im.object.Rd                         |    97 +
 man/imcov.Rd                             |    73 +
 man/improve.kppm.Rd                      |   136 +
 man/incircle.Rd                          |    59 +
 man/increment.fv.Rd                      |    48 +
 man/infline.Rd                           |   100 +
 man/influence.ppm.Rd                     |    95 +
 man/inforder.family.Rd                   |    45 +
 man/insertVertices.Rd                    |    82 +
 man/inside.boxx.Rd                       |    71 +
 man/inside.owin.Rd                       |    89 +
 man/integral.im.Rd                       |    83 +
 man/integral.linim.Rd                    |    61 +
 man/integral.msr.Rd                      |    67 +
 man/intensity.Rd                         |    51 +
 man/intensity.dppm.Rd                    |    35 +
 man/intensity.lpp.Rd                     |    48 +
 man/intensity.ppm.Rd                     |    95 +
 man/intensity.ppp.Rd                     |    92 +
 man/intensity.ppx.Rd                     |    46 +
 man/intensity.quadratcount.Rd            |    74 +
 man/interp.colourmap.Rd                  |    56 +
 man/interp.im.Rd                         |    62 +
 man/intersect.owin.Rd                    |   130 +
 man/intersect.tess.Rd                    |    82 +
 man/invoke.symbolmap.Rd                  |    72 +
 man/iplot.Rd                             |    93 +
 man/ippm.Rd                              |   171 +
 man/is.connected.Rd                      |    62 +
 man/is.connected.ppp.Rd                  |    54 +
 man/is.convex.Rd                         |    43 +
 man/is.dppm.Rd                           |    18 +
 man/is.empty.Rd                          |    51 +
 man/is.hybrid.Rd                         |    73 +
 man/is.im.Rd                             |    33 +
 man/is.lpp.Rd                            |    30 +
 man/is.marked.Rd                         |    51 +
 man/is.marked.ppm.Rd                     |    89 +
 man/is.marked.ppp.Rd                     |    62 +
 man/is.multitype.Rd                      |    57 +
 man/is.multitype.ppm.Rd                  |    89 +
 man/is.multitype.ppp.Rd                  |    70 +
 man/is.owin.Rd                           |    36 +
 man/is.ppm.Rd                            |    40 +
 man/is.ppp.Rd                            |    36 +
 man/is.rectangle.Rd                      |    42 +
 man/is.stationary.Rd                     |   123 +
 man/is.subset.owin.Rd                    |    50 +
 man/istat.Rd                             |    63 +
 man/japanesepines.Rd                     |    39 +
 man/kaplan.meier.Rd                      |    87 +
 man/kernel.factor.Rd                     |    57 +
 man/kernel.moment.Rd                     |    66 +
 man/kernel.squint.Rd                     |    63 +
 man/km.rs.Rd                             |    90 +
 man/kppm.Rd                              |   424 ++
 man/lansing.Rd                           |   106 +
 man/laslett.Rd                           |   152 +
 man/latest.news.Rd                       |    58 +
 man/layered.Rd                           |    92 +
 man/layerplotargs.Rd                     |    69 +
 man/layout.boxes.Rd                      |    60 +
 man/lengths.psp.Rd                       |    51 +
 man/letterR.Rd                           |    20 +
 man/levelset.Rd                          |    70 +
 man/leverage.ppm.Rd                      |   102 +
 man/lgcp.estK.Rd                         |   245 +
 man/lgcp.estpcf.Rd                       |   227 +
 man/linearK.Rd                           |    79 +
 man/linearKcross.Rd                      |    89 +
 man/linearKcross.inhom.Rd                |   126 +
 man/linearKdot.Rd                        |    86 +
 man/linearKdot.inhom.Rd                  |   122 +
 man/linearKinhom.Rd                      |   165 +
 man/lineardirichlet.Rd                   |    50 +
 man/lineardisc.Rd                        |   108 +
 man/linearmarkconnect.Rd                 |    92 +
 man/linearmarkequal.Rd                   |    77 +
 man/linearpcf.Rd                         |   102 +
 man/linearpcfcross.Rd                    |    93 +
 man/linearpcfcross.inhom.Rd              |   128 +
 man/linearpcfdot.Rd                      |    89 +
 man/linearpcfdot.inhom.Rd                |   124 +
 man/linearpcfinhom.Rd                    |   146 +
 man/linequad.Rd                          |    65 +
 man/linfun.Rd                            |    74 +
 man/linim.Rd                             |    88 +
 man/linnet.Rd                            |    99 +
 man/lintess.Rd                           |    80 +
 man/lixellate.Rd                         |    83 +
 man/localK.Rd                            |   132 +
 man/localKinhom.Rd                       |   139 +
 man/localpcf.Rd                          |   151 +
 man/logLik.dppm.Rd                       |   103 +
 man/logLik.kppm.Rd                       |   102 +
 man/logLik.mppm.Rd                       |   127 +
 man/logLik.ppm.Rd                        |   155 +
 man/logLik.slrm.Rd                       |    65 +
 man/lohboot.Rd                           |   139 +
 man/longleaf.Rd                          |    54 +
 man/lpp.Rd                               |   125 +
 man/lppm.Rd                              |   120 +
 man/lurking.Rd                           |   281 +
 man/lut.Rd                               |    97 +
 man/macros/defns.Rd                      |    19 +
 man/markconnect.Rd                       |   186 +
 man/markcorr.Rd                          |   319 +
 man/markcrosscorr.Rd                     |   117 +
 man/marks.Rd                             |   115 +
 man/marks.psp.Rd                         |    79 +
 man/marks.tess.Rd                        |    81 +
 man/markstat.Rd                          |   107 +
 man/marktable.Rd                         |    86 +
 man/markvario.Rd                         |   119 +
 man/matchingdist.Rd                      |   105 +
 man/matclust.estK.Rd                     |   173 +
 man/matclust.estpcf.Rd                   |   175 +
 man/matrixpower.Rd                       |    65 +
 man/maxnndist.Rd                         |    63 +
 man/mean.im.Rd                           |    87 +
 man/mean.linim.Rd                        |    65 +
 man/measureVariation.Rd                  |    77 +
 man/mergeLevels.Rd                       |    75 +
 man/methods.box3.Rd                      |    61 +
 man/methods.boxx.Rd                      |    61 +
 man/methods.dppm.Rd                      |    67 +
 man/methods.fii.Rd                       |   100 +
 man/methods.funxy.Rd                     |    74 +
 man/methods.kppm.Rd                      |    69 +
 man/methods.layered.Rd                   |    93 +
 man/methods.linfun.Rd                    |    90 +
 man/methods.linim.Rd                     |   101 +
 man/methods.linnet.Rd                    |   161 +
 man/methods.lpp.Rd                       |   106 +
 man/methods.lppm.Rd                      |   113 +
 man/methods.objsurf.Rd                   |    62 +
 man/methods.pp3.Rd                       |    71 +
 man/methods.ppx.Rd                       |    59 +
 man/methods.rho2hat.Rd                   |    78 +
 man/methods.rhohat.Rd                    |   113 +
 man/methods.slrm.Rd                      |    73 +
 man/methods.ssf.Rd                       |   110 +
 man/methods.units.Rd                     |    75 +
 man/methods.zclustermodel.Rd             |    66 +
 man/midpoints.psp.Rd                     |    40 +
 man/mincontrast.Rd                       |   159 +
 man/miplot.Rd                            |    74 +
 man/model.depends.Rd                     |   109 +
 man/model.frame.ppm.Rd                   |    83 +
 man/model.images.Rd                      |   144 +
 man/model.matrix.ppm.Rd                  |   133 +
 man/model.matrix.slrm.Rd                 |    64 +
 man/moribund.Rd                          |    64 +
 man/mppm.Rd                              |   271 +
 man/msr.Rd                               |   146 +
 man/mucosa.Rd                            |    54 +
 man/multiplicity.ppp.Rd                  |    77 +
 man/murchison.Rd                         |   108 +
 man/nbfires.Rd                           |   225 +
 man/nearest.raster.point.Rd              |    67 +
 man/nearestsegment.Rd                    |    54 +
 man/nestsplit.Rd                         |    85 +
 man/nnclean.Rd                           |   132 +
 man/nncorr.Rd                            |   212 +
 man/nncross.Rd                           |   194 +
 man/nncross.lpp.Rd                       |   127 +
 man/nncross.pp3.Rd                       |   174 +
 man/nndensity.Rd                         |    90 +
 man/nndist.Rd                            |   186 +
 man/nndist.lpp.Rd                        |    63 +
 man/nndist.pp3.Rd                        |   105 +
 man/nndist.ppx.Rd                        |   104 +
 man/nndist.psp.Rd                        |    92 +
 man/nnfun.Rd                             |    94 +
 man/nnfun.lpp.Rd                         |    90 +
 man/nnmap.Rd                             |   138 +
 man/nnmark.Rd                            |   115 +
 man/nnorient.Rd                          |   109 +
 man/nnwhich.Rd                           |   163 +
 man/nnwhich.lpp.Rd                       |    66 +
 man/nnwhich.pp3.Rd                       |    90 +
 man/nnwhich.ppx.Rd                       |    94 +
 man/nobjects.Rd                          |    57 +
 man/npfun.Rd                             |    46 +
 man/npoints.Rd                           |    47 +
 man/nsegments.Rd                         |    39 +
 man/nvertices.Rd                         |    47 +
 man/nztrees.Rd                           |    48 +
 man/objsurf.Rd                           |    97 +
 man/opening.Rd                           |    87 +
 man/ord.family.Rd                        |    63 +
 man/osteo.Rd                             |   145 +
 man/overlap.owin.Rd                      |    47 +
 man/owin.Rd                              |   204 +
 man/owin.object.Rd                       |   131 +
 man/padimage.Rd                          |    69 +
 man/pairdist.Rd                          |    56 +
 man/pairdist.default.Rd                  |    92 +
 man/pairdist.lpp.Rd                      |    55 +
 man/pairdist.pp3.Rd                      |    70 +
 man/pairdist.ppp.Rd                      |    82 +
 man/pairdist.ppx.Rd                      |    53 +
 man/pairdist.psp.Rd                      |    78 +
 man/pairorient.Rd                        |   112 +
 man/pairs.im.Rd                          |    93 +
 man/pairs.linim.Rd                       |    69 +
 man/pairsat.family.Rd                    |    67 +
 man/pairwise.family.Rd                   |    58 +
 man/panel.contour.Rd                     |    81 +
 man/paracou.Rd                           |    54 +
 man/parameters.Rd                        |    65 +
 man/parres.Rd                            |   204 +
 man/pcf.Rd                               |   121 +
 man/pcf.fasp.Rd                          |   141 +
 man/pcf.fv.Rd                            |   149 +
 man/pcf.ppp.Rd                           |   273 +
 man/pcf3est.Rd                           |   137 +
 man/pcfcross.Rd                          |   185 +
 man/pcfcross.inhom.Rd                    |   151 +
 man/pcfdot.Rd                            |   178 +
 man/pcfdot.inhom.Rd                      |   148 +
 man/pcfinhom.Rd                          |   208 +
 man/pcfmulti.Rd                          |   148 +
 man/perimeter.Rd                         |    50 +
 man/periodify.Rd                         |   114 +
 man/persp.im.Rd                          |   153 +
 man/perspPoints.Rd                       |    86 +
 man/pixelcentres.Rd                      |    55 +
 man/pixellate.Rd                         |    66 +
 man/pixellate.owin.Rd                    |    84 +
 man/pixellate.ppp.Rd                     |   114 +
 man/pixellate.psp.Rd                     |    88 +
 man/pixelquad.Rd                         |    86 +
 man/plot.anylist.Rd                      |   233 +
 man/plot.bermantest.Rd                   |    93 +
 man/plot.cdftest.Rd                      |   113 +
 man/plot.colourmap.Rd                    |    97 +
 man/plot.dppm.Rd                         |    70 +
 man/plot.envelope.Rd                     |    59 +
 man/plot.fasp.Rd                         |   159 +
 man/plot.fv.Rd                           |   248 +
 man/plot.hyperframe.Rd                   |   106 +
 man/plot.im.Rd                           |   391 +
 man/plot.imlist.Rd                       |    87 +
 man/plot.influence.ppm.Rd                |    79 +
 man/plot.kppm.Rd                         |    96 +
 man/plot.laslett.Rd                      |    64 +
 man/plot.layered.Rd                      |   111 +
 man/plot.leverage.ppm.Rd                 |    80 +
 man/plot.linim.Rd                        |   118 +
 man/plot.linnet.Rd                       |    57 +
 man/plot.lintess.Rd                      |    74 +
 man/plot.listof.Rd                       |   234 +
 man/plot.lpp.Rd                          |   105 +
 man/plot.lppm.Rd                         |    56 +
 man/plot.mppm.Rd                         |    92 +
 man/plot.msr.Rd                          |   104 +
 man/plot.onearrow.Rd                     |    94 +
 man/plot.owin.Rd                         |   211 +
 man/plot.plotppm.Rd                      |   104 +
 man/plot.pp3.Rd                          |    81 +
 man/plot.ppm.Rd                          |   180 +
 man/plot.ppp.Rd                          |   386 +
 man/plot.psp.Rd                          |   122 +
 man/plot.quad.Rd                         |    83 +
 man/plot.quadratcount.Rd                 |    84 +
 man/plot.quadrattest.Rd                  |    62 +
 man/plot.rppm.Rd                         |    74 +
 man/plot.scan.test.Rd                    |    85 +
 man/plot.slrm.Rd                         |    60 +
 man/plot.solist.Rd                       |   225 +
 man/plot.splitppp.Rd                     |    64 +
 man/plot.ssf.Rd                          |   102 +
 man/plot.symbolmap.Rd                    |    85 +
 man/plot.tess.Rd                         |    75 +
 man/plot.textstring.Rd                   |    49 +
 man/plot.texturemap.Rd                   |    97 +
 man/plot.yardstick.Rd                    |    98 +
 man/points.lpp.Rd                        |    63 +
 man/pointsOnLines.Rd                     |    58 +
 man/polynom.Rd                           |    61 +
 man/ponderosa.Rd                         |    61 +
 man/pool.Rd                              |    43 +
 man/pool.anylist.Rd                      |    56 +
 man/pool.envelope.Rd                     |    95 +
 man/pool.fasp.Rd                         |    65 +
 man/pool.fv.Rd                           |    61 +
 man/pool.quadrattest.Rd                  |    91 +
 man/pool.rat.Rd                          |   108 +
 man/pp3.Rd                               |    48 +
 man/ppm.Rd                               |   431 ++
 man/ppm.object.Rd                        |   164 +
 man/ppm.ppp.Rd                           |   869 +++
 man/ppmInfluence.Rd                      |    92 +
 man/ppp.Rd                               |   245 +
 man/ppp.object.Rd                        |   134 +
 man/pppdist.Rd                           |   222 +
 man/pppmatching.Rd                       |    83 +
 man/pppmatching.object.Rd                |    87 +
 man/ppx.Rd                               |   100 +
 man/predict.dppm.Rd                      |    62 +
 man/predict.kppm.Rd                      |    62 +
 man/predict.lppm.Rd                      |   109 +
 man/predict.mppm.Rd                      |   131 +
 man/predict.ppm.Rd                       |   372 +
 man/predict.rppm.Rd                      |    80 +
 man/predict.slrm.Rd                      |    94 +
 man/print.im.Rd                          |    36 +
 man/print.owin.Rd                        |    42 +
 man/print.ppm.Rd                         |    64 +
 man/print.ppp.Rd                         |    45 +
 man/print.psp.Rd                         |    36 +
 man/print.quad.Rd                        |    46 +
 man/profilepl.Rd                         |   181 +
 man/progressreport.Rd                    |   118 +
 man/project2segment.Rd                   |    75 +
 man/project2set.Rd                       |    64 +
 man/prune.rppm.Rd                        |    54 +
 man/pseudoR2.Rd                          |    71 +
 man/psib.Rd                              |    61 +
 man/psp.Rd                               |    91 +
 man/psp.object.Rd                        |    90 +
 man/psst.Rd                              |   156 +
 man/psstA.Rd                             |   193 +
 man/psstG.Rd                             |   150 +
 man/pyramidal.Rd                         |    45 +
 man/qqplot.ppm.Rd                        |   382 +
 man/quad.object.Rd                       |    87 +
 man/quad.ppm.Rd                          |    95 +
 man/quadrat.test.Rd                      |   281 +
 man/quadrat.test.mppm.Rd                 |   122 +
 man/quadrat.test.splitppp.Rd             |    66 +
 man/quadratcount.Rd                      |   179 +
 man/quadratresample.Rd                   |    75 +
 man/quadrats.Rd                          |    90 +
 man/quadscheme.Rd                        |   165 +
 man/quadscheme.logi.Rd                   |   142 +
 man/quantess.Rd                          |   112 +
 man/quantile.density.Rd                  |    83 +
 man/quantile.ewcdf.Rd                    |    73 +
 man/quantile.im.Rd                       |    57 +
 man/quasirandom.Rd                       |   107 +
 man/rCauchy.Rd                           |   166 +
 man/rDGS.Rd                              |   112 +
 man/rDiggleGratton.Rd                    |   132 +
 man/rGaussPoisson.Rd                     |    76 +
 man/rHardcore.Rd                         |   105 +
 man/rLGCP.Rd                             |   140 +
 man/rMatClust.Rd                         |   191 +
 man/rMaternI.Rd                          |    84 +
 man/rMaternII.Rd                         |    92 +
 man/rMosaicField.Rd                      |    60 +
 man/rMosaicSet.Rd                        |    58 +
 man/rNeymanScott.Rd                      |   235 +
 man/rPenttinen.Rd                        |   124 +
 man/rPoissonCluster.Rd                   |   133 +
 man/rQuasi.Rd                            |    57 +
 man/rSSI.Rd                              |   136 +
 man/rStrauss.Rd                          |   143 +
 man/rStraussHard.Rd                      |   112 +
 man/rThomas.Rd                           |   183 +
 man/rVarGamma.Rd                         |   178 +
 man/rags.Rd                              |    61 +
 man/ragsAreaInter.Rd                     |    98 +
 man/ragsMultiHard.Rd                     |    85 +
 man/ranef.mppm.Rd                        |    65 +
 man/range.fv.Rd                          |    59 +
 man/raster.x.Rd                          |    87 +
 man/rat.Rd                               |    64 +
 man/rcell.Rd                             |   108 +
 man/rcellnumber.Rd                       |    59 +
 man/rdpp.Rd                              |    60 +
 man/reach.Rd                             |   154 +
 man/reach.dppm.Rd                        |    42 +
 man/reduced.sample.Rd                    |    94 +
 man/redwood.Rd                           |    70 +
 man/redwoodfull.Rd                       |   108 +
 man/reflect.Rd                           |    52 +
 man/regularpolygon.Rd                    |    68 +
 man/relevel.im.Rd                        |    64 +
 man/reload.or.compute.Rd                 |    72 +
 man/relrisk.Rd                           |    53 +
 man/relrisk.ppm.Rd                       |   206 +
 man/relrisk.ppp.Rd                       |   238 +
 man/requireversion.Rd                    |    44 +
 man/rescale.Rd                           |    80 +
 man/rescale.im.Rd                        |    69 +
 man/rescale.owin.Rd                      |    70 +
 man/rescale.ppp.Rd                       |    71 +
 man/rescale.psp.Rd                       |    72 +
 man/rescue.rectangle.Rd                  |    53 +
 man/residuals.dppm.Rd                    |    53 +
 man/residuals.kppm.Rd                    |    53 +
 man/residuals.mppm.Rd                    |    81 +
 man/residuals.ppm.Rd                     |   241 +
 man/residualspaper.Rd                    |    93 +
 man/rex.Rd                               |    99 +
 man/rgbim.Rd                             |    89 +
 man/rho2hat.Rd                           |   109 +
 man/rhohat.Rd                            |   312 +
 man/ripras.Rd                            |   107 +
 man/rjitter.Rd                           |    71 +
 man/rknn.Rd                              |    70 +
 man/rlabel.Rd                            |    77 +
 man/rlinegrid.Rd                         |    41 +
 man/rlpp.Rd                              |    77 +
 man/rmh.Rd                               |    90 +
 man/rmh.default.Rd                       |   695 ++
 man/rmh.ppm.Rd                           |   263 +
 man/rmhcontrol.Rd                        |   333 +
 man/rmhexpand.Rd                         |   157 +
 man/rmhmodel.Rd                          |   100 +
 man/rmhmodel.default.Rd                  |   533 ++
 man/rmhmodel.list.Rd                     |   150 +
 man/rmhmodel.ppm.Rd                      |   135 +
 man/rmhstart.Rd                          |   116 +
 man/rmpoint.Rd                           |   300 +
 man/rmpoispp.Rd                          |   209 +
 man/rnoise.Rd                            |    69 +
 man/roc.Rd                               |   107 +
 man/rose.Rd                              |   153 +
 man/rotate.Rd                            |    37 +
 man/rotate.im.Rd                         |    51 +
 man/rotate.infline.Rd                    |    82 +
 man/rotate.owin.Rd                       |    60 +
 man/rotate.ppp.Rd                        |    54 +
 man/rotate.psp.Rd                        |    57 +
 man/rotmean.Rd                           |    89 +
 man/round.ppp.Rd                         |    55 +
 man/rounding.Rd                          |    83 +
 man/rpoint.Rd                            |   133 +
 man/rpoisline.Rd                         |    55 +
 man/rpoislinetess.Rd                     |    57 +
 man/rpoislpp.Rd                          |    67 +
 man/rpoispp.Rd                           |   173 +
 man/rpoispp3.Rd                          |    62 +
 man/rpoisppOnLines.Rd                    |   116 +
 man/rpoisppx.Rd                          |    63 +
 man/rppm.Rd                              |    69 +
 man/rshift.Rd                            |    65 +
 man/rshift.ppp.Rd                        |   196 +
 man/rshift.psp.Rd                        |   117 +
 man/rshift.splitppp.Rd                   |    81 +
 man/rstrat.Rd                            |    76 +
 man/rsyst.Rd                             |    89 +
 man/rtemper.Rd                           |    87 +
 man/rthin.Rd                             |   103 +
 man/run.simplepanel.Rd                   |   154 +
 man/runifdisc.Rd                         |    70 +
 man/runiflpp.Rd                          |    60 +
 man/runifpoint.Rd                        |   107 +
 man/runifpoint3.Rd                       |    52 +
 man/runifpointOnLines.Rd                 |    63 +
 man/runifpointx.Rd                       |    53 +
 man/scalardilate.Rd                      |    85 +
 man/scaletointerval.Rd                   |    56 +
 man/scan.test.Rd                         |   158 +
 man/scanLRTS.Rd                          |   150 +
 man/scanpp.Rd                            |    96 +
 man/sdr.Rd                               |   111 +
 man/sdrPredict.Rd                        |    48 +
 man/segregation.test.Rd                  |    99 +
 man/selfcrossing.psp.Rd                  |    47 +
 man/selfcut.psp.Rd                       |    48 +
 man/sessionLibs.Rd                       |    38 +
 man/setcov.Rd                            |    71 +
 man/shapley.Rd                           |    98 +
 man/sharpen.Rd                           |   101 +
 man/shift.Rd                             |    43 +
 man/shift.im.Rd                          |    61 +
 man/shift.owin.Rd                        |    62 +
 man/shift.ppp.Rd                         |    66 +
 man/shift.psp.Rd                         |    63 +
 man/sidelengths.owin.Rd                  |    61 +
 man/simba.Rd                             |    37 +
 man/simdat.Rd                            |    30 +
 man/simplenet.Rd                         |    18 +
 man/simplepanel.Rd                       |   233 +
 man/simplify.owin.Rd                     |    57 +
 man/simulate.dppm.Rd                     |   121 +
 man/simulate.kppm.Rd                     |    89 +
 man/simulate.lppm.Rd                     |    75 +
 man/simulate.mppm.Rd                     |    62 +
 man/simulate.ppm.Rd                      |   123 +
 man/simulate.slrm.Rd                     |    88 +
 man/slrm.Rd                              |   191 +
 man/solapply.Rd                          |    69 +
 man/solist.Rd                            |    97 +
 man/solutionset.Rd                       |    88 +
 man/spatdim.Rd                           |    50 +
 man/spatialcdf.Rd                        |   105 +
 man/spatstat-deprecated.Rd               |   110 +
 man/spatstat-internal.Rd                 |  1599 +++++
 man/spatstat-package.Rd                  |  1927 +++++
 man/spatstat.options.Rd                  |   412 ++
 man/spiders.Rd                           |    66 +
 man/split.hyperframe.Rd                  |    71 +
 man/split.im.Rd                          |    69 +
 man/split.msr.Rd                         |    84 +
 man/split.ppp.Rd                         |   204 +
 man/split.ppx.Rd                         |   117 +
 man/spokes.Rd                            |    96 +
 man/sporophores.Rd                       |    45 +
 man/spruces.Rd                           |    62 +
 man/square.Rd                            |    63 +
 man/ssf.Rd                               |    59 +
 man/stieltjes.Rd                         |    71 +
 man/stienen.Rd                           |    83 +
 man/stratrand.Rd                         |    80 +
 man/studpermu.test.Rd                    |   127 +
 man/subfits.Rd                           |    84 +
 man/subset.hyperframe.Rd                 |    83 +
 man/subset.ppp.Rd                        |   144 +
 man/subspaceDistance.Rd                  |    52 +
 man/suffstat.Rd                          |   117 +
 man/summary.anylist.Rd                   |    44 +
 man/summary.im.Rd                        |    71 +
 man/summary.kppm.Rd                      |    79 +
 man/summary.listof.Rd                    |    43 +
 man/summary.owin.Rd                      |    41 +
 man/summary.ppm.Rd                       |    97 +
 man/summary.ppp.Rd                       |    62 +
 man/summary.psp.Rd                       |    36 +
 man/summary.quad.Rd                      |    62 +
 man/summary.solist.Rd                    |    47 +
 man/summary.splitppp.Rd                  |    45 +
 man/sumouter.Rd                          |    92 +
 man/superimpose.Rd                       |   198 +
 man/superimpose.lpp.Rd                   |    79 +
 man/swedishpines.Rd                      |    52 +
 man/symbolmap.Rd                         |   152 +
 man/tess.Rd                              |   160 +
 man/texturemap.Rd                        |    60 +
 man/textureplot.Rd                       |   112 +
 man/thinNetwork.Rd                       |    84 +
 man/thomas.estK.Rd                       |   166 +
 man/thomas.estpcf.Rd                     |   170 +
 man/tile.areas.Rd                        |    46 +
 man/tileindex.Rd                         |    54 +
 man/tilenames.Rd                         |    42 +
 man/tiles.Rd                             |    44 +
 man/tiles.empty.Rd                       |    55 +
 man/timeTaken.Rd                         |    48 +
 man/timed.Rd                             |    89 +
 man/transect.im.Rd                       |    76 +
 man/transmat.Rd                          |    89 +
 man/treebranchlabels.Rd                  |    76 +
 man/treeprune.Rd                         |    63 +
 man/triangulate.owin.Rd                  |    47 +
 man/trim.rectangle.Rd                    |    54 +
 man/triplet.family.Rd                    |    46 +
 man/tweak.colourmap.Rd                   |    62 +
 man/union.quad.Rd                        |    46 +
 man/unique.ppp.Rd                        |    64 +
 man/unitname.Rd                          |   125 +
 man/unmark.Rd                            |    54 +
 man/unnormdensity.Rd                     |    78 +
 man/unstack.msr.Rd                       |    54 +
 man/unstack.ppp.Rd                       |    65 +
 man/update.detpointprocfamily.Rd         |    31 +
 man/update.interact.Rd                   |    51 +
 man/update.kppm.Rd                       |    78 +
 man/update.ppm.Rd                        |   176 +
 man/update.rmhcontrol.Rd                 |    43 +
 man/update.symbolmap.Rd                  |    53 +
 man/urkiola.Rd                           |    34 +
 man/valid.Rd                             |    51 +
 man/valid.detpointprocfamily.Rd          |    34 +
 man/valid.ppm.Rd                         |    85 +
 man/varblock.Rd                          |   130 +
 man/varcount.Rd                          |   107 +
 man/vargamma.estK.Rd                     |   171 +
 man/vargamma.estpcf.Rd                   |   173 +
 man/vcov.kppm.Rd                         |   100 +
 man/vcov.mppm.Rd                         |    86 +
 man/vcov.ppm.Rd                          |   229 +
 man/vcov.slrm.Rd                         |   107 +
 man/vertices.Rd                          |    59 +
 man/vesicles.Rd                          |    91 +
 man/volume.Rd                            |    47 +
 man/waka.Rd                              |    44 +
 man/waterstriders.Rd                     |    59 +
 man/weighted.median.Rd                   |    61 +
 man/where.max.Rd                         |    61 +
 man/whichhalfplane.Rd                    |    45 +
 man/whist.Rd                             |    75 +
 man/will.expand.Rd                       |    51 +
 man/with.fv.Rd                           |   117 +
 man/with.hyperframe.Rd                   |    80 +
 man/with.msr.Rd                          |    81 +
 man/with.ssf.Rd                          |    62 +
 man/yardstick.Rd                         |    88 +
 man/zapsmall.im.Rd                       |    40 +
 man/zclustermodel.Rd                     |    52 +
 src/Ediggatsti.c                         |    82 +
 src/Ediggra.c                            |    92 +
 src/Efiksel.c                            |    79 +
 src/Egeyer.c                             |   100 +
 src/Estrauss.c                           |    75 +
 src/Kborder.c                            |    46 +
 src/Kborder.h                            |   210 +
 src/Knone.c                              |    47 +
 src/Knone.h                              |   176 +
 src/Krect.c                              |    78 +
 src/KrectBody.h                          |   195 +
 src/KrectFunDec.h                        |   108 +
 src/KrectIncrem.h                        |    94 +
 src/KrectV1.h                            |    19 +
 src/KrectV2.h                            |    19 +
 src/KrectV3.h                            |    19 +
 src/KrectV4.h                            |    19 +
 src/Perfect.cc                           |   849 +++
 src/PerfectDGS.h                         |   195 +
 src/PerfectDiggleGratton.h               |   203 +
 src/PerfectHardcore.h                    |   174 +
 src/PerfectPenttinen.h                   |   200 +
 src/PerfectStrauss.h                     |   302 +
 src/PerfectStraussHard.h                 |   188 +
 src/areadiff.c                           |   303 +
 src/areaint.c                            |   308 +
 src/areapair.c                           |    99 +
 src/auctionbf.c                          |   258 +
 src/badgey.c                             |   513 ++
 src/bdrymask.c                           |    57 +
 src/call3d.c                             |   560 ++
 src/chunkloop.h                          |    37 +
 src/close3pair.c                         |    76 +
 src/closefuns.h                          |  1168 +++
 src/closepair.c                          |   538 ++
 src/connect.c                            |   143 +
 src/constants.h                          |    25 +
 src/corrections.c                        |   391 +
 src/crossloop.h                          |    63 +
 src/denspt.c                             |   512 ++
 src/densptcross.c                        |   320 +
 src/dgs.c                                |   125 +
 src/digber.c                             |    67 +
 src/diggra.c                             |   154 +
 src/dinfty.c                             |   139 +
 src/discarea.c                           |   275 +
 src/discs.c                              |    97 +
 src/dist2.c                              |   100 +
 src/dist2.h                              |    86 +
 src/dist2dpath.c                         |    25 +
 src/dist2dpath.h                         |   184 +
 src/distan3.c                            |   497 ++
 src/distances.c                          |   430 ++
 src/distmapbin.c                         |   124 +
 src/dwpure.c                             |   318 +
 src/exactPdist.c                         |   148 +
 src/exactdist.c                          |   236 +
 src/f3.c                                 |   499 ++
 src/fardist.c                            |    30 +
 src/fardist.h                            |    80 +
 src/fexitc.c                             |    16 +
 src/fiksel.c                             |   153 +
 src/functable.h                          |    58 +
 src/g3.c                                 |   265 +
 src/geom3.h                              |    20 +
 src/getcif.c                             |    74 +
 src/geyer.c                              |   433 ++
 src/hardcore.c                           |   109 +
 src/hasclose.c                           |    50 +
 src/hasclose.h                           |   403 ++
 src/idw.c                                |   146 +
 src/init.c                               |   213 +
 src/k3.c                                 |   161 +
 src/knn3Ddist.h                          |   188 +
 src/knn3DdistX.h                         |   233 +
 src/knnXdist.h                           |   297 +
 src/knndist.h                            |   204 +
 src/knndistance.c                        |   246 +
 src/knngrid.c                            |   116 +
 src/knngrid.h                            |   245 +
 src/lennard.c                            |   158 +
 src/linSnncross.c                        |    39 +
 src/linSnncross.h                        |   132 +
 src/linalg.c                             |   263 +
 src/lincrossdist.c                       |    85 +
 src/lineardisc.c                         |   316 +
 src/linearradius.c                       |    79 +
 src/linequad.c                           |    35 +
 src/linequad.h                           |   553 ++
 src/linknnd.c                            |    31 +
 src/linknnd.h                            |   165 +
 src/linnncross.c                         |    37 +
 src/linnncross.h                         |   136 +
 src/linnndist.c                          |   186 +
 src/linpairdist.c                        |    83 +
 src/linvdist.c                           |    30 +
 src/linvdist.h                           |   150 +
 src/linvknndist.c                        |   241 +
 src/lixel.c                              |   134 +
 src/localpcf.c                           |    23 +
 src/localpcf.h                           |   115 +
 src/loccum.c                             |    79 +
 src/loccums.h                            |   106 +
 src/loccumx.h                            |   101 +
 src/lookup.c                             |   218 +
 src/looptest.h                           |    12 +
 src/massdisthack.c                       |    70 +
 src/maxnnd.h                             |   109 +
 src/methas.c                             |   423 ++
 src/methas.h                             |   120 +
 src/mhloop.h                             |   511 ++
 src/mhsnoop.c                            |   185 +
 src/mhsnoop.h                            |    20 +
 src/mhsnoopdef.h                         |    23 +
 src/mhv1.h                               |    20 +
 src/mhv2.h                               |    21 +
 src/mhv3.h                               |    20 +
 src/mhv4.h                               |    21 +
 src/mhv5.h                               |    21 +
 src/minnnd.c                             |    39 +
 src/minnnd.h                             |    97 +
 src/multihard.c                          |   174 +
 src/nn3Ddist.c                           |   419 ++
 src/nn3Ddist.h                           |   101 +
 src/nn3DdistX.h                          |   127 +
 src/nnMDdist.c                           |   840 +++
 src/nndist.h                             |   117 +
 src/nndistX.h                            |   141 +
 src/nndistance.c                         |   215 +
 src/nngrid.c                             |   109 +
 src/nngrid.h                             |   131 +
 src/pairloop.h                           |    67 +
 src/pcf3.c                               |   205 +
 src/penttinen.c                          |   139 +
 src/poly2im.c                            |   331 +
 src/proto.h                              |   223 +
 src/quasirandom.c                        |    38 +
 src/raster.h                             |    88 +
 src/rthin.c                              |    83 +
 src/scan.c                               |    92 +
 src/seg2pix.c                            |   265 +
 src/seg2pix.h                            |   176 +
 src/segdens.c                            |    53 +
 src/sftcr.c                              |   112 +
 src/sparselinalg.c                       |    24 +
 src/spasumsymout.h                       |   166 +
 src/sphefrac.c                           |   170 +
 src/sphevol.c                            |   222 +
 src/straush.c                            |   132 +
 src/straushm.c                           |   250 +
 src/strauss.c                            |   117 +
 src/straussm.c                           |   218 +
 src/sumsymouter.h                        |    88 +
 src/trigraf.c                            |  1211 ++++
 src/triplets.c                           |   135 +
 src/veegraf.c                            |   156 +
 src/whist.c                              |    51 +
 src/xyseg.c                              |   830 +++
 src/yesno.h                              |     9 +
 tests/badwindow.txt                      |  1203 ++++
 tests/selfcross.txt                      |    22 +
 tests/testsAtoF.R                        |   753 ++
 tests/testsGtoK.R                        |   197 +
 tests/testsLtoM.R                        |   476 ++
 tests/testsNtoP.R                        |   624 ++
 tests/testsQtoR.R                        |   850 +++
 tests/testsStoZ.R                        |   644 ++
 vignettes/datasets.Rnw                   |   870 +++
 vignettes/getstart.Rnw                   |   397 ++
 vignettes/hexagon.eps                    |   114 +
 vignettes/hexagon.pdf                    |    83 +
 vignettes/irregpoly.eps                  |   119 +
 vignettes/irregpoly.pdf                  |    84 +
 vignettes/replicated.Rnw                 |  1525 ++++
 vignettes/shapefiles.Rnw                 |   497 ++
 vignettes/updates.Rnw                    |  2197 ++++++
 1755 files changed, 280883 insertions(+), 248 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..4e7d354
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,221 @@
+Package: spatstat
+Version: 1.52-1
+Nickname: Apophenia
+Date: 2017-08-16
+Title: Spatial Point Pattern Analysis, Model-Fitting, Simulation, Tests
+Author: Adrian Baddeley <Adrian.Baddeley at curtin.edu.au>,
+	Rolf Turner <r.turner at auckland.ac.nz> 
+        and Ege Rubak <rubak at math.aau.dk>,
+	with substantial contributions of code by 
+	Kasper Klitgaard Berthelsen;
+	Ottmar Cronie;
+	Yongtao Guan;
+	Ute Hahn;
+	Abdollah Jalilian;
+	Marie-Colette van Lieshout;
+	Greg McSwiggan;
+	Tuomas Rajala;
+	Suman Rakshit;
+	Dominic Schuhmacher;
+	Rasmus Waagepetersen;
+ 	and Hangsheng Wang.
+	Additional contributions 
+	by M. Adepeju;
+        C. Anderson; 
+        Q.W. Ang; 
+        M. Austenfeld;
+	S. Azaele; 
+	M. Baddeley;
+	C. Beale; 
+	M. Bell;
+	R. Bernhardt; 
+	T. Bendtsen;
+	A. Bevan;
+	B. Biggerstaff;
+	A. Bilgrau;
+	L. Bischof;
+	C. Biscio;
+	R. Bivand;
+	J.M. Blanco Moreno;
+	F. Bonneu;
+	J. Burgos; 
+	S. Byers; 
+	Y.M. Chang; 
+	J.B. Chen; 
+	I. Chernayavsky; 
+	Y.C. Chin; 
+	B. Christensen; 
+	J.-F. Coeurjolly;
+	K. Colyvas;
+	R. Corria Ainslie;
+	R. Cotton;
+	M. de la Cruz; 
+	P. Dalgaard; 
+	M. D'Antuono;
+        S. Das;
+	T. Davies;
+	P.J. Diggle; 
+	P. Donnelly;
+	I. Dryden; 
+	S. Eglen; 
+	A. El-Gabbas;
+        B. Fandohan;
+        O. Flores;
+	E.D. Ford;
+        P. Forbes;
+	S. Frank; 
+	J. Franklin; 
+	N. Funwi-Gabga;
+        O. Garcia;
+	A. Gault; 
+ 	J. Geldmann;
+	M. Genton;
+	S. Ghalandarayeshi;
+	J. Gilbey;
+	J. Goldstick;
+	P. Grabarnik; 
+	C. Graf; 
+	U. Hahn; 
+	A. Hardegen; 
+	M.B. Hansen; 
+	M. Hazelton; 
+	J. Heikkinen; 
+	M. Hering; 
+	M. Herrmann; 
+	P. Hewson;
+	K. Hingee;
+	K. Hornik; 
+	P. Hunziker; 
+	J. Hywood;
+	R. Ihaka;
+	C. Icos; 
+	A. Jammalamadaka;
+	R. John-Chandran; 
+	D. Johnson; 
+	M. Khanmohammadi;
+	R. Klaver;
+	P. Kovesi;
+	M. Kuhn; 
+	J. Laake; 
+	F. Lavancier;
+	T. Lawrence; 
+	R.A. Lamb; 
+	J. Lee; 
+	G.P. Leser; 
+	H.T. Li;
+	G. Limitsios;
+	A. Lister;
+	B. Madin;
+	M. Maechler;
+	J. Marcus;
+	K. Marchikanti; 
+	R. Mark; 
+	J. Mateu;
+	P. McCullagh; 
+	U. Mehlig;
+	F. Mestre;
+	S. Meyer; 
+	X.C. Mi;
+	L. De Middeleer;
+	R.K. Milne; 
+        E. Miranda;
+	J. Moller; 
+	M. Moradi;
+	V. Morera Pujol; 
+	E. Mudrak;
+        G.M. Nair;
+	N. Najari;
+	N. Nava;
+	L.S. Nielsen; 
+	F. Nunes; 
+	J.R. Nyengaard;
+	J. Oehlschlaegel;
+	T. Onkelinx;
+	S. O'Riordan;
+	E. Parilov; 
+	J. Picka; 
+	N. Picard; 
+	M. Porter;
+	S. Protsiv;
+	A. Raftery; 
+	S. Rakshit; 
+	B. Ramage;
+	P. Ramon;
+	X. Raynaud;
+	N. Read; 
+	M. Reiter; 
+        I. Renner;
+	T.O. Richardson;  
+	B.D. Ripley;  
+	E. Rosenbaum; 
+	B. Rowlingson; 
+	J. Rudokas;
+	J. Rudge;
+	C. Ryan; 
+	F. Safavimanesh;
+	A. Sarkka; 
+	C. Schank; 
+	K. Schladitz; 
+	S. Schutte;
+	B.T. Scott; 
+        O. Semboli;
+	F. Semecurbe;
+	V. Shcherbakov;
+	G.C. Shen;
+        P. Shi;
+	H.-J. Ship;
+	T.L. Silva;
+	I.-M. Sintorn; 
+	Y. Song; 
+	M. Spiess; 
+	M. Stevenson; 
+	K. Stucki; 
+	M. Sumner; 
+	P. Surovy; 
+	B. Taylor; 
+	T. Thorarinsdottir;
+	B. Turlach; 
+	T. Tvedebrink;
+        K. Ummer;
+	M. Uppala;
+	A. van Burgel; 
+	T. Verbeke; 
+        M. Vihtakari;
+	A. Villers; 
+        F. Vinatier;
+        S. Voss;
+	S. Wagner;
+	H. Wang; 
+	H. Wendrock; 
+	J. Wild;
+	C. Witthoft;
+	S. Wong;
+	M. Woringer;
+	M.E. Zamboni
+	and
+	A. Zeileis.
+Maintainer: Adrian Baddeley <Adrian.Baddeley at curtin.edu.au>
+Depends: R (>= 3.3.0), stats, graphics, grDevices, utils, methods,
+        nlme, rpart
+Imports: spatstat.utils (>= 1.7-0), mgcv, Matrix, deldir (>= 0.0-21),
+        abind, tensor, polyclip (>= 1.5-0), goftest
+Suggests: sm, maptools, gsl, locfit, spatial, rpanel, tkrplot,
+        RandomFields (>= 3.1.24.1), RandomFieldsUtils(>= 0.3.3.1),
+        fftwtools (>= 0.9-8)
+Remotes: spatstat/spatstat.utils
+Description: Comprehensive open-source toolbox for analysing Spatial Point Patterns. Focused mainly on two-dimensional point patterns, including multitype/marked points, in any spatial region. Also supports three-dimensional point patterns, space-time point patterns in any number of dimensions, point patterns on a linear network, and patterns of other geometrical objects. Supports spatial covariate data such as pixel images. 
+	Contains over 2000 functions for plotting spatial data, exploratory data analysis, model-fitting, simulation, spatial sampling, model diagnostics, and formal inference. 
+	Data types include point patterns, line segment patterns, spatial windows, pixel images, tessellations, and linear networks. 
+	Exploratory methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle [...]
+	Parametric models can be fitted to point pattern data using the functions ppm(), kppm(), slrm(), dppm() similar to glm(). Types of models include Poisson, Gibbs and Cox point processes, Neyman-Scott cluster processes, and determinantal point processes. Models may involve dependence on covariates, inter-point interaction, cluster formation and dependence on marks. Models are fitted by maximum likelihood, logistic regression, minimum contrast, and composite likelihood methods. 
+	A model can be fitted to a list of point patterns (replicated point pattern data) using the function mppm(). The model can include random effects and fixed effects depending on the experimental design, in addition to all the features listed above.
+	Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise(), AIC()). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots.
+License: GPL (>= 2)
+URL: http://www.spatstat.org
+LazyData: true
+NeedsCompilation: yes
+ByteCompile: true
+BugReports: https://github.com/spatstat/spatstat/issues
+Packaged: 2017-08-16 06:41:56 UTC; adrian
+Repository: CRAN
+Date/Publication: 2017-08-16 09:01:38 UTC
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..827c912
--- /dev/null
+++ b/MD5
@@ -0,0 +1,1740 @@
+6d410e8476d6ccbce92426a0e7d61f73 *DESCRIPTION
+7a4a11d07efa27ec4d1d49153b7aa657 *NAMESPACE
+c5389254af15e5c8bb48aa4921c99979 *NEWS
+5e1711cba2ad40a53219b172b6fb99dc *R/FGmultiInhom.R
+380a93554202560db6d72939b511ec99 *R/Fest.R
+ed28d193338b66310420a29ef0cd8408 *R/First.R
+de486529eadf1b89d727c4865135efd7 *R/GJfox.R
+227d455ca897f91d7dd879c3f8539a65 *R/Gcom.R
+634f9eb8fec91a40ed0eb1389dbd7042 *R/Gest.R
+d3fa3abc1515437f625927587ac80278 *R/Gmulti.R
+1101d753c8f35af184bfa8ff36a64486 *R/Gres.R
+f23e2cbb6ee07011049657331935581f *R/Hest.R
+2a2cef04f4aa319405856086ffed48e2 *R/Iest.R
+d3e7548bb5eb3f305e9da1e5485ece35 *R/Jest.R
+d49147625234ff5703f9ff441ada2813 *R/Jinhom.R
+e840187383b058d56058fa83b82db02f *R/Jmulti.R
+47d818e8f09b9eaae578d7a94a4f6e71 *R/Kcom.R
+b048e9405af29aa208aebf691ad05a68 *R/Kest.R
+1f0f4f61907ffd5da9d5ce9262d62896 *R/Kinhom.R
+fce7353c575a80a31b49a8c7d515aaab *R/Kmeasure.R
+a8742620874e75312563d5844b76ac3d *R/Kmodel.R
+1d1f72a7c0fffefd2f96b93bec8ffafd *R/Kmulti.R
+5e417749366d82e0a0a0bf33d08b55fb *R/Kmulti.inhom.R
+abbf6db440f53371a4f757cb17274de6 *R/Kres.R
+ea1f01b5689ec60ffa0488a28ffac454 *R/Kscaled.R
+94033aebde371087c4c246cb9fc7ef16 *R/Ksector.R
+4e2cdb96fc1f583ebab0b39195656906 *R/Math.im.R
+6ea4db23fe667009de99cf5bacfc5511 *R/Math.imlist.R
+21833591715359ed24c75c0436a30366 *R/Math.linim.R
+cd21940efea7c50e968c4f6775c8e5cf *R/Tstat.R
+69fd69139c158314a9cfc75cdf7c4762 *R/aaaa.R
+d528b57827c35f5df4ee4d01ed20a3ff *R/adaptive.density.R
+286e37ecff8524697d89955543166505 *R/addvar.R
+d6804d59baa350c92b853c7d9e1ca7d2 *R/affine.R
+948e229cfe013701e32cbf3690856c2d *R/allstats.R
+fc73a78175654eac4f754898bb0de421 *R/alltypes.R
+11baa91da1e8a2e68529ccc3f21b3c09 *R/anova.mppm.R
+24f6d3abeff639bc98c5226cb79e64c5 *R/anova.ppm.R
+842580bf1e8ff51f0749f24582ffe646 *R/applynbd.R
+2ffb106a46f58e99a86469d6e9c71174 *R/areadiff.R
+6823bd6c644835b979f7407d60099de3 *R/areainter.R
+7bf8e40692b4b1de8a73edfa4a711d6e *R/as.im.R
+e933ffbde340ca92d90226034dc027eb *R/auc.R
+aed3d05661b614e76d2ca1181af71d5c *R/badgey.R
+85b90545fe7081b0deef6d0b2ed2481a *R/bc.R
+789f24a9b671b204cca2bea93c57a49e *R/beginner.R
+f1193955614b0b88873d0c653b99bdbd *R/bermantest.R
+64f8eee699bbfcf3c0d90ad1814f963d *R/blur.R
+7434cff6e94c6ac8eac21623c7532e8c *R/boundingbox.R
+cc3f8bffb96b961a96ba8ad83c30886c *R/boundingcircle.R
+7ec0a56277da1b01a244c3749806934a *R/breakpts.R
+5c98f522a635f8da3fdad5ccf60cb388 *R/bugtable.R
+5ff3ffed95f449815b9432494eeae8b3 *R/bw.diggle.R
+42f231888d5fdce7e53c41efb64f9fd7 *R/bw.optim.R
+378dc97af1f488f3610f31c61a00d960 *R/bw.pcf.R
+a0e399a8f635ed69a7904e2f5164f2a6 *R/bw.ppl.R
+af810a4aec1706ad6f5b8ea1de775f0e *R/by.ppp.R
+f6c50293c09bd8b03c89f5f8a6051a6d *R/cdf.test.mppm.R
+cd63b2734588421c555f8fb9572575b8 *R/cdftest.R
+5efd1803e37d7b6109ce6a9e60879748 *R/centroid.R
+ab416fd2e977a6192d5764c5dce2eda9 *R/circdensity.R
+c17c44e7db04f6205bbcc061b9bdf7fe *R/clarkevans.R
+55b8ccb851f8a2cf879ff18a57963304 *R/classes.R
+e317941f2ea501d3f8f31733bd17173b *R/clickjoin.R
+46f71fabebc3f2cd3060bbc35dcab8d4 *R/clicklpp.R
+661b2e31b502ac5b57ddd74f8c5b89c6 *R/clickpoly.R
+3c61b16ed3d95747d6e1eda91adb36bc *R/clickppp.R
+75551661b0aed9ecd75bed68400e638c *R/clip.psp.R
+77d64459b26599f5ec5590d8a798ad8b *R/close3Dpairs.R
+c4d4821b820bffba026deedc8277c62d *R/closepairs.R
+665d650e33e2625b0f275b7ec449b48f *R/clusterfunctions.R
+510010060f80bc874ded543566e267d8 *R/clusterinfo.R
+b2a666ee329acd511ee4f5f10dad5c79 *R/clusterset.R
+04a6a7ee95ebfb72d926543a1f648253 *R/colourschemes.R
+af5b68ffeff7856ea2d22c9f8b9a918c *R/colourtables.R
+d04b501a2d7090f736d1ab04d4ffb713 *R/colourtools.R
+8b56cc31365f5a6fa4fa148894eb0a67 *R/compareFit.R
+a4b6af14e0796fe45a4abd3f37f69f66 *R/compileK.R
+ee9efbe243dad84530fab391beea5b45 *R/concom.R
+4db6c643d4110b3966eb17866823f230 *R/connected.R
+eecb29c4bf786f00b4eff413d5a04efd *R/convexify.R
+bab6320f88300558ee6bd2f361a22cb4 *R/copyExampleFiles.R
+507c554ad23eb96d9f5e7dde076fb80e *R/covariates.R
+34513811a31ead96ea98ba0fd695390b *R/covering.R
+da21dc26f7cdb0a08c645116a05481ce *R/crossdistlpp.R
+96e52355fc9f3275a0d195245a047f77 *R/cut.ppp.R
+4b5abf8151752cda014ad4404c4d3b3d *R/daogenton.R
+f955b7d8038fa1585e10430b6bf9fc01 *R/datasetup.R
+18bc06f2ec35c5681ed5cffcb3c384eb *R/dclftest.R
+53e0fb6d1af23e321722eea5e595653a *R/defaultwin.R
+9c0bd6afbb507bedcf7f6283f47f7b7b *R/deldir.R
+539a7b69048787ca2824e60941858704 *R/deltametric.R
+2cc842357e30e4d8a5a3e2c7225bf332 *R/density.lpp.R
+3a6da5d450f5bda8ce033e5352de1547 *R/density.ppp.R
+5aa565ac5016411dd2c9f00a0fcb0f46 *R/density.psp.R
+d158fb2a1a99c51f82b61aca2073d15d *R/derivfv.R
+c027a0425cb16790aa2f605b68e48a75 *R/detPPF-class.R
+2926aabd8533958849d4b1d4858823d0 *R/detpointprocfamilyfun.R
+3fe6c1fd4e144554204c5789944d6e83 *R/dg.R
+528ad0fe5084e7c9772de1fd40cca1d5 *R/dgs.R
+b2d3b97a49bfce78011088375dfbc7a2 *R/diagnoseppm.R
+0ee9ce9fa1700351bfc71402908a6b46 *R/diagram.R
+4f08e0198ab68d963ac80012063fb6fb *R/digestCovariates.R
+2382f86d9fe3dc1ad12175d6249706d0 *R/disc.R
+afc28b279d4ee328a6e2935dced9175e *R/discarea.R
+a8ec36b9d6c7dae5266f2c1961296c74 *R/dist2dpath.R
+eb8b9b627f5ffdb68e93a208ff165a77 *R/distan3D.R
+9820e884830f7694ed97358c6bb04913 *R/distances.R
+a90f2d580f886a276c423ff8cdc0221a *R/distances.psp.R
+7a9087886b66301769fcf94a6e20a8cf *R/distanxD.R
+37abb1b9eb553fee07699fd9c78b5a9c *R/distbdry.R
+aa66fd2c2cb128704755391efffe402f *R/distcdf.R
+e077a6b98a5774abf85b0023d392ff7c *R/distfun.R
+fb30d8c97ce5a52baa12b6cd400ce371 *R/distfunlpp.R
+3d5b8e18bb3fd5ff0176a9c0e008bf27 *R/distmap.R
+d0a40c2539530b80a2c56153fb9c5491 *R/dppm.R
+cf9f2dca552d561b0a9b1a31bbcaa0be *R/dppmclass.R
+ed7beb4930906f00a1d7b9f0fa47f18c *R/dummify.R
+7815c8e89c75969fa1166564cff61227 *R/dummy.R
+aea30200bddaa63d824a1727405795fc *R/edgeRipley.R
+11c2b02b8bd7ddf3c422a19b1cf9b451 *R/edgeTrans.R
+5dedf671d0e785e963af44634c27c62e *R/edges2triangles.R
+1539189a0ae07f00bc8488fb54b8f071 *R/edit.R
+f41cfef5a017692ee84c46e395aa4c36 *R/eem.R
+fa491205118f62630678f36924d233ca *R/effectfun.R
+16ca4a0d6243bf8ac92e38d1be870f3c *R/envelope.R
+137a3968b56d57a1316076471b8e028a *R/envelope3.R
+70b7255db85f551617e5ffac3c610299 *R/envelopeArray.R
+a207b14a959b369c5d7d837a2f7b940f *R/envelopelpp.R
+e330bb6fa7a99e028497cf61aa37905c *R/eval.fasp.R
+ff3cd0276897dc1e4fb035339e201e63 *R/eval.fv.R
+acf0120150f94dc97578d37c7098613d *R/eval.im.R
+944451449ef91a49e6ef233e7a745b00 *R/evalcovar.R
+35b119ece94b3a1527870b106d7fa08c *R/ewcdf.R
+cf259b89afe368a0b167a6c58c615648 *R/exactMPLEstrauss.R
+93f4d4218957ab82492423dfb49f2522 *R/exactPdt.R
+ed28a5a602cccb2462693d267888c468 *R/exactdt.R
+55128c282f0fbecda3fbccec5c55be21 *R/factors.R
+6c3913843ec541a115f8b3c8a2cb1fb3 *R/fardist.R
+dd968a2f9b87caf7bbd83ecb20ca7f40 *R/fasp.R
+a613494a2f7dfc0e060ad785ebbbdc78 *R/fgk3.R
+8c54fb7878e2deddad520a63fe98b826 *R/fii.R
+53803076aff675d0ac5fd7a4722e4f1c *R/fiksel.R
+d0c7203c1b8b5bd514913583fb2733c2 *R/fitted.mppm.R
+6ae89590c88402caf9a722d2fbfa9fd8 *R/fitted.ppm.R
+9b4accc71b99440893950bbb817ba851 *R/flipxy.R
+d9aa777d1b59addc43b9a2fcb6d8711b *R/fourierbasis.R
+78fdc58026c4b2c6747b07289224f1b6 *R/fryplot.R
+86264b95ef1b50e8fd33e9b8f14f243c *R/funxy.R
+512956a4c8e8d65b6984e7c37eb7e8c7 *R/fv.R
+99f1495bb864342fd9a86339e2fc9c49 *R/geyer.R
+eb1f3c876a7c6627ca82ec3d8c193260 *R/hackglmm.R
+3d1c0a72df0c59e40b784cb04029694f *R/hardcore.R
+f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R
+27d1b0c9a1d43eab62f466db5fa90b85 *R/hasclose.R
+7576337da12b9579db9130c9e609822f *R/hasenvelope.R
+44ad75b9d79a08a4689e2a101d8c6bd5 *R/headtail.R
+123085aba3859361ca3f643e7c956c6f *R/hermite.R
+1e6f05124674b83e0d62227fa4588b2c *R/hexagons.R
+0fb7e0bb6cdf5a5a43a68bf3f63b04c4 *R/hierarchy.R
+e076a46be3a1610a4c6627bc52f50c71 *R/hierhard.R
+b54beaf8c8cd0e0a447bb11d1e7e4972 *R/hierpair.family.R
+0bbb0d6e10a91c54f17142006cb5811a *R/hierstrauss.R
+7d769e6877d34cf5b2c9ca9d4b5c78af *R/hierstrhard.R
+cb789e61235a77960c6b51d880da4c8d *R/ho.R
+e3447ff115c6062ebb3c5d3f30b68f1b *R/hopskel.R
+8f27935d61bd20d7f3db6b1ee67ca06d *R/hybrid.R
+a941bb1777fdc6a6ba28776240487a33 *R/hybrid.family.R
+3079dd3bd618f23c663071f5c7a7e755 *R/hyperframe.R
+148c4d628f13507ae4e5cc5783d1f274 *R/hypersub.R
+80057f9f11a0a26a9a3b75407a0c8f05 *R/idw.R
+c3df9c2ff02a1dfc0de1ae145004f6be *R/images.R
+d2a3e9a8e03f10a0978d3d8af8b32180 *R/indicator.R
+a87e74c429097d3740cb20466f0ecfc9 *R/indices.R
+97169059771b69f8f89ea8dc15d2867f *R/infline.R
+b5d86a0ef4b0631df4e32747acd13171 *R/inforder.family.R
+e302e9fadd6204cbb46265a58e415c4a *R/intensity.R
+870fa33ac789dc5dd7bb031d70cb01e6 *R/interact.R
+475c2212ff5bb6318748b0c8243c4caf *R/interactions.R
+39a5a07c0cb7f85bf84d6c9d8edde030 *R/interp.im.R
+db5574753bf54b88204aee53f13f86e3 *R/iplot.R
+e60ea20fa4389884ee22c550d12ef058 *R/iplotlayered.R
+d5ec9fb4f8c91e3183b71d7f4ebf6783 *R/ippm.R
+be78559ca577d66f04b72bbc5a2684cb *R/is.cadlag.R
+3daa3f95a778a039d0aec8f57b6ebfe4 *R/is.subset.owin.R
+e4b9b1f05b000b5a69c2e992ca7aaa09 *R/istat.R
+587fae1d11000f1650e45d955b2675ef *R/kernel2d.R
+9a419fe9b5320d538bbd6c8b61faa8b5 *R/kernels.R
+79abe3ad5c54621a3dc219c8051ad102 *R/kmrs.R
+5b4ffe1f13fcc25da22bb23e97328d8e *R/kppm.R
+c6a963c659a5874585a22522cc07d285 *R/laslett.R
+a44ea103d52b7cfc4445578bf5636ac6 *R/layered.R
+bea5de9ece8720246fe8c7932645a492 *R/lennard.R
+5affa028f6bbd36a38ebb1ecba7e7f5e *R/levelset.R
+5447ac9d761bc6a2ac9013ec29d160bd *R/leverage.R
+9df10e52c06ff2e69c7560f8c4379068 *R/linalg.R
+330d75bfae835d637ddee04525ac7d6e *R/lindirichlet.R
+fe2945637341ddce52b478f75d30d3b9 *R/linearK.R
+429271155d7ad823164444bec3068e76 *R/linearKmulti.R
+2af0a9aa60f0e5dc19b92fff09e576f5 *R/lineardisc.R
+f2f04bfaec86cf00552471bb235c43db *R/linearmrkcon.R
+fffeaff44ff9ac5712b6e309bfa6c051 *R/linearpcf.R
+ea3c990d1867f7d1361ceb2b3609b82c *R/linearpcfmulti.R
+d4afccb12a075e58904517028bdcc4e6 *R/linequad.R
+f656ced26a02f15f41a5c9fefe01d6f5 *R/linfun.R
+6fc66ddb7e28ad56e6c17c833ee76963 *R/linim.R
+9f5d706548bd4bd4eef99a5b76cae618 *R/linnet.R
+c84fa219074f4a57fa74ed1a9d1f13f1 *R/linnetsurgery.R
+8a5ab79bfd496a94a023d4a0dd0b7da3 *R/lintess.R
+52a89f9b4655094430fc919796ae31c7 *R/listof.R
+dcfe6d6f17e9fe3abfcc4d728286a07b *R/lixellate.R
+d6c15515249187e299711d037a496eb0 *R/localK.R
+046b3c934e596c11a3ef23c6d3050e8c *R/localpcf.R
+5d1eac32541b7459937ad9657a1f768b *R/logistic.R
+79b2893aa8f7d6da819b4fc6d774e38d *R/lohboot.R
+eebf0f2f68a65853944783d14bd776a0 *R/lpp.R
+6d8cf46bf07a3bfe3d432212d6c59919 *R/lppm.R
+638aaf3c095860e6cd3383c2b374d8a3 *R/lurking.R
+37deccc3696c5a48c6acef19dba2c0cc *R/markcorr.R
+e03a3921cbb451137f495c068fcd33e6 *R/marks.R
+6a3d9ee234a765226a04a6c3c23663eb *R/marktable.R
+ea3c639566286389d8c3aa4549f1cb6e *R/matrixpower.R
+122663a73ac8dc2505cde314cf75122d *R/measures.R
+11274026a6676f44edd79ba3ddc7a80b *R/mincontrast.R
+6abbec9263d9ff7773029523af1c1aaa *R/minkowski.R
+7948768c9430d5b3b2365726523e78dc *R/minnndist.R
+ba8782eb90ee38cc0a41750b16962bda *R/model.depends.R
+5a89d6b1f9f3df7749d8953db2583f14 *R/morisita.R
+a69e2a67f2135cfb2e9414b0d9154bb2 *R/morphology.R
+558e1b60587ea55a4a7ad3a1a7b01d94 *R/mpl.R
+6fa509593629ac83c8956f7bdaa462c4 *R/mppm.R
+dc1d4c0c4b31cd65c3ddbc6b86ba4ff4 *R/multihard.R
+23ade743ff6f39f31ff1bf52ee73f088 *R/multipair.util.R
+8f2545648901dd2988e32bd2cd61a99c *R/multistrauss.R
+fd6a391d09ffa9ea20e479950bf1d3c8 *R/multistrhard.R
+a2995358cffbe933b3a5c22933b03537 *R/nearestsegment.R
+f87cac6159ed6e7399812c89e2ae7dd2 *R/newformula.R
+1cbd67649b5abf2fb99bd67b1f14b3a4 *R/news.R
+ab0739722eeb45406471a20c68fbc49a *R/nnclean.R
+56e256896deacb82338889e6124f3644 *R/nncorr.R
+26b334ec773508a099b853feca7feee2 *R/nncross.R
+09b95d6428ea9d64f0ab8caed513f688 *R/nncross3D.R
+eba47f8399e92f6bdd92ad6504973280 *R/nndensity.R
+dd035ea9b9c38630ad82778b8bea2734 *R/nndist.R
+b17089e11b6fc28803715e0c2c4c80a5 *R/nndistlpp.R
+f68e4d831bd072237a5c8fb60ff294d2 *R/nnfun.R
+074d8beee30549e746082d7146b82f11 *R/nnfunlpp.R
+02c47fe1a3b2104bbf0935da6b8f25dc *R/nnmap.R
+b221ae2d690a07454c1faae331565807 *R/nnmark.R
+9b9905828a135e8b36e2d031b06e3a56 *R/nnorient.R
+a2b94b1e048d56444616460d62da489d *R/objsurf.R
+b80aa0341a5e4d53548273662bcb7ff0 *R/options.R
+39b46ec1a232b9a703ad0760cda45f5a *R/ord.R
+351116d5be6f0c962874e927ccf6c416 *R/ord.family.R
+ef444b3f933f760902368994e197c59e *R/ordthresh.R
+930fed507a70fa0c78d412c296228e1b *R/otherpackages.R
+717ef43c7fa89d127ff9919f740f1b21 *R/pairdistlpp.R
+04a1d68481b2f4f676610397cdaf0b21 *R/pairorient.R
+19584df2d0ad0061648ee51f70d0bb2a *R/pairpiece.R
+7a32a6552c99225a840ac73444a48ed4 *R/pairs.im.R
+466b544caf507d4c55ab60882d0b7945 *R/pairsat.family.R
+64f20b43bc0da9058da304d59354e44a *R/pairwise.R
+88f67d802f23a0958625804f2865c606 *R/pairwise.family.R
+7e219c0e44487ff76d3c06bb86a48984 *R/parameters.R
+72113584f098ace9ff784e8f1bbae131 *R/parres.R
+afbbcf7829d779789195527e235ae8b9 *R/pcf.R
+b67128056bc43645c83e5386d5744045 *R/pcfinhom.R
+c76e896661080247e9b5e95e3d1cab0b *R/pcfmulti.R
+8dfd5c29c944568943c7144165e91bab *R/pcfmulti.inhom.R
+dc3c2e276c6590be4760bb56d26b4bdf *R/penttinen.R
+dfb884749a05ca2792358f6d1ff35f0e *R/percy.R
+4e2cfc0fbe156faabfc39466b943f522 *R/periodify.R
+1d16cfd9960cff167575d94927c65244 *R/persp.im.R
+8ef771abd0f6cd80c90354fc70e205f5 *R/pickoption.R
+750978b020d2374379a5ceec9daf914b *R/pixellate.R
+2d80a6c0992be3ddca54105cf8a59869 *R/plot.anylist.R
+6cd130d91ac285c2220504f42fda6c46 *R/plot.fasp.R
+cfac4f99a5923164d43b9600d198faa1 *R/plot.fv.R
+521f28924fd26bfa1ba002eba8970f39 *R/plot.im.R
+2773c384cfbd702d010b28ec7852c1d6 *R/plot.mppm.R
+e587b76b875421c8abbf6e2a123a52fe *R/plot.owin.R
+d5a505a3595402321313152da0cf42a2 *R/plot.plotppm.R
+125840a6a0920802ff82d6989a807046 *R/plot.ppm.R
+73509bde87f9d6e7b5a90826191c8870 *R/plot.ppp.R
+afe5f45c9640d77644e16d89f7ccf620 *R/plot3d.R
+3700a0ace938e63861194b9aa6de5a83 *R/pointsonlines.R
+1a0c9f29d3c585dd7bbb9c9872188e05 *R/poisson.R
+d219c34acd3456bdf4bebff5254f6ab8 *R/polygood.R
+eff91a3e3e11682e74644666801c4fc9 *R/polynom.R
+ced1f0fff95ddcc732a15c28d5374c5d *R/pool.R
+c90a0106194e05afb693abf65b6fbf98 *R/pp3.R
+ba2d86883306409a785162c9b178ba90 *R/ppm.R
+cc99af57b4ae25100643e24c974108f5 *R/ppmclass.R
+d7e7ab96efb78bd01ab9a37ad8361cb2 *R/ppp.R
+711ec61ac0331e821a46dfd8fab43f27 *R/pppmatch.R
+d06c9bdab82a9d64ccabe3154e91bcb9 *R/ppqq.R
+c11360c41b18ce0b5b3d65e7987f214f *R/ppx.R
+14f10941149da57fd2835cb84916fb4a *R/predict.ppm.R
+67236f840124ed31fe5be47d6724e4a0 *R/predictmppm.R
+4c2fa4db8a5241e45fc6a1a0ed5ecf2d *R/profilepl.R
+f1a20af4200e23511ecbc06c75be2e33 *R/progress.R
+08414842a05850accce9dfb656d4bc7e *R/psp.R
+810df9bfc77d25ffae6eb75f376a2e9f *R/psp2pix.R
+64a4d0dc641b780930791d125a5e8978 *R/pspcross.R
+0f544843cf76c9e96904d9cb9b758c3d *R/psst.R
+b794c41496b10af9b552c6c57a2b81df *R/psstA.R
+de957e4255898c05cced10e7f08c065e *R/psstG.R
+037230fd025f8fd9308b0b4b98760799 *R/qqplotppm.R
+b5d703a4b5c573d3f70a7bae6dc86620 *R/quadclass.R
+427ceadf3465047c40765f37a1b8d0e6 *R/quadratcount.R
+99aa240d18a1c29ee5156bc18017f63f *R/quadratmtest.R
+cc7fc0270683fcf28f0f85df7986c0cf *R/quadratresample.R
+2951d9945ef610ae7a314070db1823c6 *R/quadrattest.R
+ba81abd6fe6794df409e9434dc1897f4 *R/quadscheme.R
+98e0c4bd3399ee5af59ee8bff8311876 *R/quantess.R
+24f39fe4ae6c36437f7fb5caa8cab62c *R/quantiledensity.R
+115a41aaf2d3c55b68f56802fcd56020 *R/quasirandom.R
+c94cc3c652b342b1f61d7b1a4c2d3cf2 *R/rLGCP.R
+826c16455c2ff593392615c64c4eada5 *R/rPerfect.R
+6e0b2255bf8b42f5a236c391b10306de *R/rags.R
+c3e2ce9e0b3eb45a04f06ff0f9bbf0c8 *R/random.R
+73b70afa74d324923fd971bc1a4f8bbc *R/randomImage.R
+33cf2609e42688de9d12fb8511bc6871 *R/randomNS.R
+8207a3585cc3256849c84509d3482718 *R/randomlpp.R
+6a53fb3470cda89aaf6ecfb6467dc33c *R/randommk.R
+e202d8e0becb58b38447cea341089432 *R/randomonlines.R
+2e3a3bb0b90807144b8d36e69d4df3cb *R/randomseg.R
+24972883102c87da259be0da8e982db7 *R/randomtess.R
+6483654e7478864c9833dbf2247f6881 *R/rat.R
+a8a6f895acc18aa94be66b546be6c17f *R/reach.R
+511b88658d51796d9a0daf71b44b9cb4 *R/reduceformula.R
+73737547daa1a7dff58c5f68c6beb350 *R/relrisk.R
+10087f7be400e45e753c146c9c36a054 *R/relrisk.ppm.R
+99f3f901362509f3494658b3b853981a *R/replace.ppp.R
+75609e221d9edcba28f52c6554cc8639 *R/rescale.R
+7a685c4e7cf92aa99ec1c6bebe0e7cd5 *R/rescue.rectangle.R
+fc977246af62b82b80bdf1ecec365664 *R/resid4plot.R
+7dda79e949765275356a0552b47e2a2b *R/residppm.R
+15f1ea6eff30e3b2b696ca6e9a3f5d4f *R/residuals.mppm.R
+45e4675f61f4637fbfbb4f2c22b86ffd *R/rho2hat.R
+f83a7816b9c396e64da2e26870a8b085 *R/rhohat.R
+9f131d0cb36ed648e36e623b90b674a9 *R/ripras.R
+f47c0eccaa94d6655967d988635109f5 *R/rknn.R
+568ec3eb53def1d7b801f58b3dc58452 *R/rlabel.R
+35f1e78c4ec615ebdf02bf522d15dbe7 *R/rmh.R
+7a1597cd81e8bf23f5c1f1923a2485de *R/rmh.default.R
+167c21797276863ddd5ce4086c4cdd13 *R/rmh.ppm.R
+7968359dbc6c8929bbae8698ca339565 *R/rmhResolveTypes.R
+6d56dc58f41590dcf1f7ae9187a609a4 *R/rmhcontrol.R
+15998f271ee8c97b82a16a551a524cf4 *R/rmhexpand.R
+1d184c09d72a11669946213002d04fda *R/rmhmodel.R
+e9b62883c43b618c2eed5ad3729c0a23 *R/rmhmodel.ppm.R
+cf21774f29b957fa630c08d4db3c9cb5 *R/rmhsnoop.R
+112482932314aa9b1dba3ec93d6d0892 *R/rmhstart.R
+bf21ab55a2690786c4631d4efb808e9f *R/rmhtemper.R
+e9b546ea6437c8c859621b2d15e211ea *R/rose.R
+f55cbf2dddbd415b9d1be26b0f2e2af0 *R/rotate.R
+3e578da354709729b7022d4764c6a1ec *R/rotmean.R
+00c58075e8e3a791b26efd1f11b7f3e6 *R/round.R
+4fcd2dee7f630b38180d057ea7802a20 *R/rppm.R
+cc177bd23c6c65d1dc16d42dbda7dc8f *R/rshift.R
+d232999f512896464f8a1cc92f6d1661 *R/rshift.psp.R
+e64dda6db52ba50d4e4049ded9c6fc8a *R/satpiece.R
+39289933fcd94c7fefc4c303bf6887f5 *R/saturated.R
+42850d1f9770d402d5c115f8437ace70 *R/scanstat.R
+154960e7b9e6f83009976521bcae5734 *R/scriptUtils.R
+b78826700a4657b758d4f198b635f9d0 *R/sdr.R
+6e9989251f16919b9802f21093e8ac69 *R/segtest.R
+4e79b8bc03182442d9246b637a42c7cb *R/setcov.R
+048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R
+c0e7adf01137747788fad043b581c8e7 *R/sigtrace.R
+a6f1e5c108c1a2700af50c9fbe28d417 *R/simplepanel.R
+726d8b7d4040f25222b228f1d99beea3 *R/simulate.detPPF.R
+1770fb0dff8c1a7a9a7ad77431489b00 *R/simulatelppm.R
+1ccfb5949adbf54095c4f314e7c7db12 *R/slrm.R
+bd8f2db009e68112b36e1b12e83a0fcf *R/smooth.ppp.R
+cbecefbec629cf2780114a39e1b29eff *R/smoothfun.R
+385402b5f2a439e787efb62f28aa65e9 *R/smoothfv.R
+d013ffbb016df9e12452a90d803c9b18 *R/softcore.R
+ae772bacbeaeaa047e3b2250cd385885 *R/solist.R
+ecf9a1367b59dc3c38a434e1be27f6f8 *R/sparse3Darray.R
+9e0d912170f34c8517000d089cb88f03 *R/sparsecommon.R
+8f0ae7e41ae1795c6e0386b1bd8b5718 *R/sparselinalg.R
+e424c7d5c7eecff9656de3d1844d69ba *R/spatialcdf.R
+7c54cdbe15c58ab578c6e6a56fae631d *R/split.ppp.R
+5db355095825c7dd19a2b30df08f4652 *R/split.ppx.R
+503eeb0499d4ebec56f62b4805416799 *R/ssf.R
+3fa186fc8b0829bee18e98a5f76e1afb *R/stienen.R
+fd1e6bb816634ff979534dbd6f951c20 *R/strauss.R
+aa3c672e1030e60e1dbce383181f9f09 *R/strausshard.R
+8a634ba847c42ffc724f3f888a864b69 *R/studpermutest.R
+943cb60f12087230d18f53f21b4515e8 *R/subfits.R
+f4585ca133a2ae2c121018e12a765e76 *R/subset.R
+5ce5c7351ab113de0450a01fa495759d *R/suffstat.R
+0bcf7dd8d2ba1f985741420fc8c7c0f3 *R/summary.im.R
+e3e70ccc44580a8f7bbd86be9d3f47db *R/summary.kppm.R
+398d2b3d789d26c1e7f285134c4f8fce *R/summary.mppm.R
+f770a5206c8b9e290f91bdf99fbfadad *R/summary.ppm.R
+f3c8a499c7512c3d0d7bea8db3564fe9 *R/summary.quad.R
+17ac6b693751c046d1f9d2d8ecdff095 *R/superimpose.R
+5f9f094fe91578b015227c22d2ed5a96 *R/symbolmap.R
+5db46d44d728b2eab7261189b7e9b297 *R/sysdata.rda
+52c7080a314ab087e20d9303f89d7d05 *R/terse.R
+60680732fdb1509bd32f6275b6e74e35 *R/tess.R
+911200b0d21f6772f56dce486f9d07d9 *R/texture.R
+c3032a7090caf3ec6606ec27f85a6772 *R/timed.R
+ac4c53fd6fb6d473d0477a842831fa4c *R/transect.R
+7143eaf3c90beea6b15d60fc9fb65c77 *R/transmat.R
+98ac37e50a983e82784c24f1b41bc91f *R/treebranches.R
+746a816204b6077878f2bb7e3bdb4fdb *R/triangulate.R
+6852ab624a8f0e62a8e6a362efb8e055 *R/triplet.family.R
+101269992fdb2a14ef41176c3f61d697 *R/triplets.R
+652c45292114c90424b3b7f657a7b096 *R/unique.ppp.R
+d9d1240c99738775d158dafac3dced4b *R/units.R
+0b29ba3c73fde1d1d67bea586bcf29eb *R/unnormdensity.R
+07416272f0608019708764e89ec51944 *R/unstack.R
+40e2bcbbe0b0d2c92ea047e213e2c53d *R/update.ppm.R
+28c6bc1a35ca918f09fee0153493e030 *R/util.R
+7ed4538851138ac6212059a289531087 *R/varblock.R
+902d97c8541b066b0b2d8f5bf86be07e *R/varcount.R
+02692ff1a7f933f21c52ddb780ec580e *R/vblogistic.R
+ef46faf16027dd9ccf4e6785680b2699 *R/vcov.kppm.R
+1394c73ff04627b29af0e76e78027b6a *R/vcov.mppm.R
+c591ed93e6809f3e917c6a3f7bb79f25 *R/vcov.ppm.R
+81e6b9cd4f4b32664161bea0fe03a84d *R/versions.R
+466c043d66aebf5056e3a8ea2970d74d *R/weightedStats.R
+fc84b5f741b3893c2683b7d12250b053 *R/weights.R
+07918bccceba9f182ea295c6c5495bca *R/window.R
+1b0b972f5d906c80227137f2d3c5b305 *R/wingeom.R
+32a5d788998a986177a7360f4434ec13 *R/zclustermodel.R
+a03eb570c12dafe38ae32bd4d652e632 *build/vignette.rds
+174e2b783c74886334afce68e0049242 *data/Kovesi.rda
+08b52fb0088ea1277ae9dccbb4f030ef *data/amacrine.rda
+bb0ececc0479148f2e2d76fe1fef2382 *data/anemones.rda
+0d19ee44bbfe1e5d81d6eef5d3fd4754 *data/ants.rda
+68f451b62912587096fa89cc2968d959 *data/austates.rda
+f73295eef5848accf40625bb882ae3a4 *data/bdspots.rda
+1611f11de95c744058459748da423ed6 *data/bei.rda
+ae12959b3317252cbf865d3d0544829e *data/betacells.rda
+c5b424104995ce3988eb939b664dd4ab *data/bramblecanes.rda
+87ca4b40a1f4978fd6546cdc8424a457 *data/bronzefilter.rda
+2bc670b48b7777fbcfc45e1efe31184c *data/cells.rda
+adb65f9a6b3b1b9d571ce4f8b375a71c *data/chicago.rda
+e246311ae31f4e8fa7c4d1b568832313 *data/chorley.rda
+8dd35c28572545042d6afef080a58c7f *data/clmfires.rda
+97b56a169d9d43c9ecc26bbda1a1d70f *data/copper.rda
+2318098f26a43316000e10b0fa77b452 *data/datalist
+814c771760c02287bcf4d3493c27bc7d *data/demohyper.rda
+61349edbea644f60ad19135ca7d2a721 *data/demopat.rda
+98eaa1b6f1aa20323f63efcc10b5cd12 *data/dendrite.rda
+636f9107158761089b3d427db4b89773 *data/finpines.rda
+57983bc5e842d433f75e1d385f596eba *data/flu.rda
+e059c9c442a190c1c54bc4eb4bb8214e *data/ganglia.rda
+2a9f11fe10080f32554be49aecae6242 *data/gordon.rda
+a9dc54ae1de5e98b8e8bd3c108380417 *data/gorillas.rda
+2b4d469275bc7dc5c6976c2fb02af215 *data/hamster.rda
+99cde07585300cedecf90a1e82893b23 *data/heather.rda
+f04ac149e088235d01f02ead51150b43 *data/humberside.rda
+bc53d51fedd521b35d5b3336814890b8 *data/hyytiala.rda
+7b0309bf8b5508e433e4b25d3d43ab7d *data/japanesepines.rda
+b9f99d9eced07cced931a31758b5e302 *data/lansing.rda
+f2b1a241816f63ded48b997d6339eff4 *data/letterR.rda
+a87cfcf01a97874faf6cbfeba036e1a4 *data/longleaf.rda
+557ddc3de3173f4ce0aff4dc39394763 *data/mucosa.rda
+437ecc9fbcb84638e881155570bcf049 *data/murchison.rda
+f8717778ca20e0be01fe088089ec8ac6 *data/nbfires.rda
+c19a85b9b13314f215f5db09b34edd67 *data/nztrees.rda
+84328f1ef36a83882a29c3fde8e5a1da *data/osteo.rda
+a6326c4504d6fb6e771f4eb169b171d9 *data/paracou.rda
+461e4537c055b3ea6fe1f4bfb90f8793 *data/ponderosa.rda
+47053e1301708b296b47f976b39533ac *data/pyramidal.rda
+0db60178265d89b4789629525d19a656 *data/redwood.rda
+ee8d7bcd5ef33b60acb4c8e751edd0fe *data/redwood3.rda
+89491d60952d1bda011991b3874f3eb5 *data/redwoodfull.rda
+1cd6f4d46c6cf85950ec07af1e8312b0 *data/residualspaper.rda
+93efa59e4c95da5dfcfd0ea09393a276 *data/shapley.rda
+66fa68c236ceda9402677ec625dc9f7e *data/simba.rda
+810e5d2259c26d740bf7d58b5af4f2ff *data/simdat.rda
+c8671be1343015b1e08229d136a36d07 *data/simplenet.rda
+4a1a6d5ef80e6deb969d676e823cb8d1 *data/spiders.rda
+e205437789cb0ef67030050bfa7d2302 *data/sporophores.rda
+f732dfa4a171ed53c7d2be25a25a2ae3 *data/spruces.rda
+76e36c41148b49d3032ef645c1f07d11 *data/swedishpines.rda
+459e51208426d9c4d42b7c9c2e1f39e1 *data/urkiola.rda
+6e48c1cd7c202121a3619e2d076efbf3 *data/vesicles.rda
+41c4c044d79adb202e06104dfbc97fed *data/waka.rda
+1def1d08a31acda6d64d98180d05281a *data/waterstriders.rda
+864845ec756c9bdd720c4399e3aff644 *demo/00Index
+68d9e760c09449956b5a218bf1c91fbd *demo/data.R
+69fec98d1b46c8c3205c44b677e6da15 *demo/diagnose.R
+7b2ce490538f0c5f81ec16714d8eac9c *demo/spatstat.R
+b88a8fdba50e164d51309cfb78c2028e *demo/sumfun.R
+407a8cca8bfc8a3407cc02f319e92e98 *inst/CITATION
+40b7c225a16a40129627f60b05dcbe32 *inst/doc/BEGINNER.txt
+0bca7859e2d6b9a1a87fa49316d12037 *inst/doc/datasets.R
+3c3e82583903858cc663c50624f075ee *inst/doc/datasets.Rnw
+03c213fdbfc9d85da0ae2b8b0759b4fd *inst/doc/datasets.pdf
+85bf7ddebb18b1556010a9d2c6a22e61 *inst/doc/getstart.R
+3cc6b729932901e25778c21cb679eab0 *inst/doc/getstart.Rnw
+8f69d02d5608d239f5bebaf52059da0a *inst/doc/getstart.pdf
+abba35afa78b130ec7385d29bfc44e96 *inst/doc/packagesizes.txt
+04c10abc1b6237495542834c2e9c7868 *inst/doc/replicated.R
+f420eafa6b29f360b72269c91c485167 *inst/doc/replicated.Rnw
+e96febef4f0249e44d2e2613f0aaaadc *inst/doc/replicated.pdf
+6ad41658f8ffaf90445efc191679fef8 *inst/doc/shapefiles.R
+593a297bf4451016def2e174ea62b099 *inst/doc/shapefiles.Rnw
+34d7779a9ce38c209a962565de4aebf9 *inst/doc/shapefiles.pdf
+fbb9c768d0595adca9e5325b7fbd849b *inst/doc/spatstatlocalsize.txt
+264edd047ec92735c38845978b6d65ad *inst/doc/updates.R
+2dab51568addf8e96262c4fd3322571e *inst/doc/updates.Rnw
+1ad2ea24f56574cabb866562cef9bc74 *inst/doc/updates.pdf
+12e68895fef0d3aa0bde45a0ddbadfa4 *inst/ratfor/Makefile
+22e8a5189942ba190c13475b35459c7f *inst/ratfor/dppll.r
+6d471ec061ea91398ba16323de56b9db *inst/ratfor/inxypOld.r
+5bd85f0a101e5b39b9d3dd591bcf657f *inst/rawdata/amacrine/amacrine.txt
+5d385fac345198c0abc886db56d60cd5 *inst/rawdata/finpines/finpines.txt
+af2c22765b01d39a1efc4e82e958ec48 *inst/rawdata/gorillas/vegetation.asc
+add7704f3f0d627c818d1d0052d95964 *inst/rawdata/osteo/osteo36.txt
+a9c56bdedef6ca09bc3abca5187dbc90 *inst/rawdata/sandholes/sandholes.jpg
+ee8726f4f1c4a43da6c1826a016844a3 *inst/rawdata/vesicles/activezone.txt
+12e1f9c6f0b061dae508af6bf030a2cd *inst/rawdata/vesicles/mitochondria.txt
+cffa360131ec346baebd9bfcf3150be6 *inst/rawdata/vesicles/presynapse.txt
+7598921535774304c799a7ddeea2dc36 *inst/rawdata/vesicles/vesicles.csv
+1d1cffe05c85380eea5a4d0402705271 *inst/rawdata/vesicles/vesicles.txt
+4ffa8d56e5dc6fbda3b3d677d7f80b50 *inst/rawdata/vesicles/vesiclesimage.tif
+1127fd6a66fd3b7653de7d51ba8b3c10 *inst/rawdata/vesicles/vesiclesmask.tif
+dffcdb453f4ff4c29ca1f12889f94b76 *inst/rawdata/vesicles/vesicleswindow.csv
+bd3c9e3c64d3407632619dbeb7705b7b *inst/rawdata/vesicles/vesicleswindow.txt
+8fedcb4ea9b0d19e2da232b4b45c0626 *man/AreaInter.Rd
+f441ed342c42aab693974f9d6482997b *man/BadGey.Rd
+1d8d9afeacbb8e67e56abf455ebfe6d6 *man/CDF.Rd
+fd2b04c59c0df2498ca684f01f3f1f86 *man/Concom.Rd
+282a83310afb9506c9a035c96e55bedd *man/DiggleGatesStibbard.Rd
+c59392fc5fa60782bf91c8c0675de144 *man/DiggleGratton.Rd
+7c842797134bed1851c806d99de4a0a0 *man/Emark.Rd
+951ee2df828ad3de8c17340e0c5255b9 *man/Extract.anylist.Rd
+d6cce504ef640b6cdd4e88a012fd52de *man/Extract.fasp.Rd
+e50f40eb875c124095941fc99a486f36 *man/Extract.fv.Rd
+6baaa916bde99c7c78b8ee52dd949a1f *man/Extract.hyperframe.Rd
+7e4de82b92ba6d2135c405cc5ec7564b *man/Extract.im.Rd
+62c8609253cd6cca6b78e76ead3864f0 *man/Extract.influence.ppm.Rd
+9df24cebfa86a43b332426d55b0d65cf *man/Extract.layered.Rd
+c165f89bfa18ceb0aafb57ba7c3977f9 *man/Extract.leverage.ppm.Rd
+de0b5e2b05e4341b1f5de7261804a993 *man/Extract.linim.Rd
+dc6c0e3c01c25332b25485da48c1f918 *man/Extract.linnet.Rd
+bfa41eea5bb69facbbecd9de7dc33106 *man/Extract.listof.Rd
+5538d78ad2f67efa3bbf9fea43483a5f *man/Extract.lpp.Rd
+91e440f304e3e4a4e021236bcce45108 *man/Extract.msr.Rd
+740e2f7917d0e8e8df64e3f223ea09d6 *man/Extract.owin.Rd
+5c9311308cb414457f540e618d59078a *man/Extract.ppp.Rd
+3dc6242c1c60d1c44b59899b58f07129 *man/Extract.ppx.Rd
+8ff65133fc2b94fdba9cf10fed0a92b0 *man/Extract.psp.Rd
+298518688a3bb5c06e7f4b332965f163 *man/Extract.quad.Rd
+4555b288918db5e360c216aad5b314e9 *man/Extract.solist.Rd
+d19bef46a23ecb0ab93af0ed35850d76 *man/Extract.splitppp.Rd
+279ecfbecb82ff618333037b97bb953b *man/Extract.ssf.Rd
+7f25d30fc29a62aa0499b509dcc22d52 *man/Extract.tess.Rd
+563f9349613f728ccdc5c7e2edd9db37 *man/F3est.Rd
+2ebdff1ff498c5d21fe8920a6255a6a5 *man/Fest.Rd
+21ddb0ef529a342599d30a4ed85da941 *man/Fiksel.Rd
+403214182a5ae9ab45bd0ed4ed5b2b96 *man/Finhom.Rd
+2cde77506f7625f97adcda2d2056a7d2 *man/FmultiInhom.Rd
+ba39391af699fcb8246c5b2af6a2ce06 *man/Frame.Rd
+4d35bfd3f54d1f1733a73d4727ae6f94 *man/G3est.Rd
+ebf156c5c9a5852a3f6ece9a52ea3d0c *man/Gcom.Rd
+5a51479a48d4247feef59af98acc62cc *man/Gcross.Rd
+0f7fb9b0fb9c1a658f34bf706cb1cc73 *man/Gdot.Rd
+d876a87fd0e28f9a30d2deb945f9ac7c *man/Gest.Rd
+7bc2fc829a69f32d91fc2f8bae9bbf57 *man/Geyer.Rd
+c22248f619df98176a5f15bd24c15602 *man/Gfox.Rd
+1c0f40bfecfeb2b67f90fa3ea8c9b83e *man/Ginhom.Rd
+fc20837f195b91fff99f485ff9403fe2 *man/Gmulti.Rd
+887f381bda44dabd416d26336263f902 *man/GmultiInhom.Rd
+4020d4c91210a91557500533d771652c *man/Gres.Rd
+d3d2b569cf8a6c694141d49f8484a80c *man/Hardcore.Rd
+cddf79075d4c3849899b29d6a4552132 *man/Hest.Rd
+77c49a32e912ecaced766cadad6476ee *man/HierHard.Rd
+ebcb391ba5dcf25006f76797e8140278 *man/HierStrauss.Rd
+1234e600c429e1b4e513e6aafa007cec *man/HierStraussHard.Rd
+4759ad6c9c0cdee6c1759f7d13a4fa0d *man/Hybrid.Rd
+857f637abeb713f20381e6ad5277852c *man/Iest.Rd
+f349addeda8ccc8db7f7663a57cbc067 *man/Jcross.Rd
+5cdbc2a706600d5a77de0c44976184ab *man/Jdot.Rd
+a4b9764604f960f6ba23305ba69332f5 *man/Jest.Rd
+342fa010fd85fe9d2d58c313e40c390a *man/Jinhom.Rd
+c4c794997556f644475e44bfeaaab636 *man/Jmulti.Rd
+1e284fb57855d43a5579dd9fe62e8f2d *man/K3est.Rd
+102e8e2aa4917c2eb31986fa9749a45a *man/Kcom.Rd
+7911449fe5a316294577d1badf980820 *man/Kcross.Rd
+9a8ff697e04046d15b5d648c9185de4a *man/Kcross.inhom.Rd
+0906bcda182df31e3b9690933b189a3e *man/Kdot.Rd
+4f6a45d1aed62498f357ae00833e640a *man/Kdot.inhom.Rd
+5591528fc6e40dd45438938a335e4985 *man/Kest.Rd
+e8604ed0e68a6c592107645cb6555b62 *man/Kest.fft.Rd
+ee1996918d537d56874e84afb1b5b2c9 *man/Kinhom.Rd
+1a6b32af2b8f01a02c072267551d29be *man/Kmark.Rd
+eab68e49bbe614323b1a9a6efab6121b *man/Kmeasure.Rd
+c396e17ccc63be7995b7902317b7f3e6 *man/Kmodel.Rd
+89f20169df3dfbd7ea1826d2e87003f4 *man/Kmodel.dppm.Rd
+421fcb36cf31cd17e9514bea3346fed8 *man/Kmodel.kppm.Rd
+9adad596d661756331cac79aa953ec94 *man/Kmodel.ppm.Rd
+37b26fec6bb4442cb7f0cc82cd1bd64e *man/Kmulti.Rd
+07a5f036384ed840eb006ac2d24e1f10 *man/Kmulti.inhom.Rd
+e5c631bccf5b969b81fc0c9f8058b07c *man/Kovesi.Rd
+af6036e4b4a4c599c1b233bdf5d92d7e *man/Kres.Rd
+2409f9d2191ef5ef6e05b5655c9d094e *man/Kscaled.Rd
+c5987d1db8f0582adf5d742e70cd7377 *man/Ksector.Rd
+9115a22a373040ef2d7209718e4fbe29 *man/LambertW.Rd
+b443285d64e6b9f2755d92c3f06e6120 *man/Lcross.Rd
+42f98a6d2a6dc5671ab0a5cbd4883d33 *man/Lcross.inhom.Rd
+50e55c7b1f5ffd7badeac28cd2a80adf *man/Ldot.Rd
+ec909f6e04ba4dcd4f9d1579900be538 *man/Ldot.inhom.Rd
+4794ecef3ec1412d2423325f35cfec42 *man/LennardJones.Rd
+a668e989958013ed86179a52f603952f *man/Lest.Rd
+cded9f0225c7ade832b012dff7941d39 *man/Linhom.Rd
+019a96958fd216110d6c221a32cc5605 *man/Math.im.Rd
+7fd06b632c6c5cd04786c925bd2c3999 *man/Math.imlist.Rd
+cd470834ba92eb4d985d63e956fab73d *man/Math.linim.Rd
+fcd0b10c4c71c6d01500b70c894e578d *man/MinkowskiSum.Rd
+26a9db71cd8fa55fdc1eb42afaa2907f *man/MultiHard.Rd
+62f6b6f26e3d078704b4742b9e43bb13 *man/MultiStrauss.Rd
+bf2dcf70457431c00a3049bb814dbb33 *man/MultiStraussHard.Rd
+176bbee178c7111abc5d6a0fe97ba0fd *man/Ops.msr.Rd
+e61d4cfd0d9bacea2346f5c064f28fe6 *man/Ord.Rd
+37b2dff8a8916eea7e7927961b3c86bc *man/OrdThresh.Rd
+3856350ef8ce867f1b9fa855082b74f4 *man/PPversion.Rd
+e5df8b20b03a422103c24fa834a8f32c *man/PairPiece.Rd
+404b13dc8185a43d0206f2e54e3878a0 *man/Pairwise.Rd
+084575ea7ae835815f09f0f3db1824f4 *man/Penttinen.Rd
+04bb79d763acc68544149340fc7b7dd9 *man/Poisson.Rd
+5470d9f56e7d27ffd495d9dc345f0e75 *man/Replace.im.Rd
+d4fddff9acfab9982c86ae4f9b79343d *man/SatPiece.Rd
+586b157510810340fd0b1f34adba6819 *man/Saturated.Rd
+89216a124723c5fb1c2347c7446f8ce6 *man/Smooth.Rd
+466dcdc6cc4b3995e072f9ff9c958ccf *man/Smooth.fv.Rd
+2348f3e403116fe986e0f4b056cabbe3 *man/Smooth.msr.Rd
+30109888bd3df0c3940bbdff3d1d05c0 *man/Smooth.ppp.Rd
+8ad465a3b182d156f6e38d0c928542bc *man/Smooth.ssf.Rd
+1c62395938f274307c386238cbf0ebe1 *man/Smoothfun.ppp.Rd
+10e77a36abf54decc7cab4196a6989af *man/Softcore.Rd
+8ba2cb10456824cd83c49a70fe1d41a8 *man/Strauss.Rd
+dfb8b417187d6dfae4408a4caa8fefa0 *man/StraussHard.Rd
+d5569b50779abf3f0033d9381c6fc13c *man/Triplets.Rd
+207c090d4efc85174fc0d99a0464f89d *man/Tstat.Rd
+e900b1d972859fc84b3204e049bf9968 *man/Window.Rd
+f116c40218b20e1743e1febdb81a4287 *man/WindowOnly.Rd
+2f49d9f7b5b69c352ab0c8b90ff470fb *man/adaptive.density.Rd
+da5e83b6e5a25aedb3393de676c087eb *man/add.texture.Rd
+96d3635cd31ad5fa40a142d03ebf11a6 *man/addvar.Rd
+ad01dbc80f6193c73a59989b0e1d03c1 *man/affine.Rd
+f321206a180ad57fe5acecc685b7644d *man/affine.im.Rd
+ef8a00b237a0279520f8c333a533b44d *man/affine.linnet.Rd
+86d9daaea62cea829ae99d97391db561 *man/affine.lpp.Rd
+2bda2d4b230d431cf1158e897dac57f9 *man/affine.owin.Rd
+21f132fd230e2ece3fdd1d9e9e372093 *man/affine.ppp.Rd
+4ca5484af674c88bbcbfc0a54368835d *man/affine.psp.Rd
+b87eed274e5d9c4bea4722c59b418c16 *man/affine.tess.Rd
+f936f4c065ca59299257510de3a62fc9 *man/allstats.Rd
+117241411b6dac48f191670497a39e80 *man/alltypes.Rd
+77afccb9bbb3c6b7c1bdfef0acfc4cf7 *man/amacrine.Rd
+0e6e07086b59f572f1573b41425453e7 *man/anemones.Rd
+bcd959ac0b1e77e9bb3a19c00681bdf3 *man/angles.psp.Rd
+5fcd23067d4ca2c676f57bf3dc7c71d5 *man/anova.lppm.Rd
+b843c4f3e8f1ff429a7972f995548da9 *man/anova.mppm.Rd
+d30797ca2a0b728a7ece2cb01daf07b1 *man/anova.ppm.Rd
+d8d90d340989a5c40405dc5a97f5487d *man/anova.slrm.Rd
+d467b8f9e049c3244cbe808617b5f798 *man/ants.Rd
+63c074e020a138be1c4661864b75e937 *man/anyNA.im.Rd
+3afe3645a550e7085f8ee4813516f8e8 *man/anylist.Rd
+aee56da55b0f47fb6cc49314ccafaf8a *man/append.psp.Rd
+ebafa74187c6ebc98eaec232d17e43af *man/applynbd.Rd
+57adf249c91fde7afa6a3e4932f5ae54 *man/area.owin.Rd
+4ac55fdf97cfc648eb839bf267e35255 *man/areaGain.Rd
+34ea12a4ef430cccd02fb208a0980d11 *man/areaLoss.Rd
+ceacda934ca7dddd84f18eae8e28ae9c *man/as.box3.Rd
+037c6fddb0fde0d07a30da270c7d804c *man/as.boxx.Rd
+2dd8d36757c24a29aaed2c837229539f *man/as.data.frame.envelope.Rd
+0ec47d1f67cdada328f5a9d4b9b71916 *man/as.data.frame.hyperframe.Rd
+2b0d501dcd65f686140f155e2079cdab *man/as.data.frame.im.Rd
+436c8c6f3941b1c98b4f310161ef71db *man/as.data.frame.owin.Rd
+9b1b64af3fe5a91f74937c9622aee20b *man/as.data.frame.ppp.Rd
+625df6c52e611eb00c67e1b12efd4efd *man/as.data.frame.psp.Rd
+d643ee803cbff8a55324e84208db6787 *man/as.data.frame.tess.Rd
+028d508f316097c621dcbaef53a7d9b4 *man/as.function.fv.Rd
+3097e1e816d23fd1e56fc1d63ebbad45 *man/as.function.im.Rd
+ac2273a069a3ce20d19cd8e5a1c4bcb6 *man/as.function.leverage.ppm.Rd
+9d688576d849022fa3326ff0a5a251b8 *man/as.function.owin.Rd
+216c2723695f08917d5bc6dccb80c483 *man/as.function.tess.Rd
+e7f77be0adf5a08ab2c965f624a83f80 *man/as.fv.Rd
+14eb58bdc3b207105128c6dc51fb86e5 *man/as.hyperframe.Rd
+53a356f14c2d00ff5328f09fe2b6d210 *man/as.hyperframe.ppx.Rd
+7d359e7a3e6d41a9bae0d141c8f541ff *man/as.im.Rd
+5e20b5864259039bd8ca272aee68027f *man/as.interact.Rd
+1e916c8013dbf03efc8967072d5a147b *man/as.layered.Rd
+949d97f0ac584fa259d33e0c49e6910f *man/as.linfun.Rd
+a485b734be9b2fc8236c648d0013aae2 *man/as.linim.Rd
+ffbac6474298e0323e33c45b918f4437 *man/as.linnet.linim.Rd
+d52772e67af0ba3019677890f016af27 *man/as.linnet.psp.Rd
+a87cd4c1ecdfa262347b3a0d97b41192 *man/as.lpp.Rd
+8574dfba2a3941a4217c9cf2ffd9a2a0 *man/as.mask.Rd
+4092204230e2003cb69e4659f0772972 *man/as.mask.psp.Rd
+36c4175e14e918f18f124fb401e25943 *man/as.matrix.im.Rd
+4f0323fe12b26266d955603f279fe9fe *man/as.matrix.owin.Rd
+4e69bae442947b114c8e71434ce2f44c *man/as.owin.Rd
+5c624477d81097c64ae4879ebf3f18e3 *man/as.polygonal.Rd
+dc165e3f06b729b80bd883fb226f700a *man/as.ppm.Rd
+272b9e0258c8af6bcaea3f272a67cd84 *man/as.ppp.Rd
+13cbca3dff795f0ce585afe910e25d10 *man/as.psp.Rd
+43498a7fed17e562b24c5141290e177a *man/as.rectangle.Rd
+6a714102b91252c9af411c4426119bc3 *man/as.solist.Rd
+df020957289f4dbcd83e1800b6efb124 *man/as.tess.Rd
+1ffc758f7822de70bbbad12b6a2f7961 *man/auc.Rd
+2ee55713e4e1903169c17b50b71fb1d2 *man/austates.Rd
+3df7907e462038f9d2f410983c4be948 *man/bc.ppm.Rd
+5d2ca9b9cb773088b584b20013096828 *man/bdist.pixels.Rd
+6f91e4b42fe96806b3677d7bec18d9cd *man/bdist.points.Rd
+2001c44828382ca663e828c03a993233 *man/bdist.tiles.Rd
+6f53a111f5d27f2b0f94e802c3a1606d *man/bdspots.Rd
+8fc8b29789a472e000c4c22734b12865 *man/beachcolours.Rd
+a843cd73ef1835fe4ea1a0ae21377f01 *man/beginner.Rd
+34bfe7bb20f5983fe4266325b268d30b *man/begins.Rd
+6b53714e666b0a0994f624ee1ce0f131 *man/bei.Rd
+ec3d1d0d1e0275d05f8c8620ae5ae3bc *man/berman.test.Rd
+07590dc52a72eb6dfd20d382089e341c *man/betacells.Rd
+5da7cfc96a5c926f20691a304f45d901 *man/bind.fv.Rd
+0e95688703c12c3882be360885d45f53 *man/bits.test.Rd
+2bd2f8d1858fcf9dc15783bfae44b7e6 *man/blur.Rd
+e6ba5d3a73902ccab3c5756130de7e44 *man/border.Rd
+ffc9dcc131ee08a2de771dc48376ba9a *man/bounding.box.xy.Rd
+0ce808e45591c98f62d77bbb46d4287d *man/boundingbox.Rd
+c3a04159e912cbcde9a325492fdde92c *man/boundingcircle.Rd
+3d31bf6cfa4a213f6ef7524001440339 *man/box3.Rd
+54834701b5ec9fb27880597f2e7593e3 *man/boxx.Rd
+3d63735c1fa0d8169ffbac24d1186b80 *man/bramblecanes.Rd
+eed7fe041f626623f67b718b48f0c387 *man/branchlabelfun.Rd
+fc7aba2bf2445646a5a8dfea4a7313d0 *man/bronzefilter.Rd
+67668120b7f4ffdeaa3ea8f442d749e7 *man/bugfixes.Rd
+2ddd26ffb3961c8d0dd7e222e611bd2a *man/bw.diggle.Rd
+64d95c72ae1007af65d991f6e02dcf80 *man/bw.frac.Rd
+7a26763b16c9994d15ca4e530f854e66 *man/bw.pcf.Rd
+3c8bb59eae14904e02f3d38de3987176 *man/bw.ppl.Rd
+02615cf3a642d7ce83c6a8c41878c74e *man/bw.relrisk.Rd
+814c3f54ba9dad42e6eafb4531cb27d5 *man/bw.scott.Rd
+d98e541b77851d88f1b0d4cf31cad3b4 *man/bw.smoothppp.Rd
+0640aa92927b2ff274cfdbbc620cbc7c *man/bw.stoyan.Rd
+4b6e4a877960de8cb174776093ba332d *man/by.im.Rd
+c431579f078cbfa38cb40ff75266f494 *man/by.ppp.Rd
+93cc9742468264a22db86420d04ac4bf *man/cauchy.estK.Rd
+74388c55b993f154afaa7a6bd14fd048 *man/cauchy.estpcf.Rd
+4e6af84798c586d0fb2639e82d615251 *man/cbind.hyperframe.Rd
+85f17f8c9f597df93b4c60efa8038e76 *man/cdf.test.Rd
+18015a55635c4993c9b9c5309f37fdaa *man/cdf.test.mppm.Rd
+6f8d58a5c1320e43f9b35574d48a7603 *man/cells.Rd
+721dafe0173a124d815016e25927e960 *man/centroid.owin.Rd
+70449d259fc0d8593676ca872f03cf75 *man/chicago.Rd
+ac1f611664f0e91ed78191eabe1d6ecd *man/chop.tess.Rd
+9374635b1bbd4e9229f82ea1f01a09d4 *man/chorley.Rd
+48ff174a1fddbefc4c47fbf7bb09f816 *man/circdensity.Rd
+3b351b5f30e29753d670d961d8415c17 *man/clarkevans.Rd
+f8f1dce1636d1274a13844b7ba1ebf05 *man/clarkevans.test.Rd
+7654a284e984253e10452f98b495237f *man/clickbox.Rd
+bc2149002879baf26a34499534af39e1 *man/clickdist.Rd
+ca00f4d880a5cd81ce7d9a4b125bf2e0 *man/clickjoin.Rd
+6dd835e65a81b6072b8d24272c47b600 *man/clicklpp.Rd
+3295d098a0ee9741a140b28cad7307c9 *man/clickpoly.Rd
+0db2141942eebc8b8461975ca3ed3dc1 *man/clickppp.Rd
+bde6cf2f59210136c60d71b1b2923138 *man/clip.infline.Rd
+8709f6c8af7b86caf155093e8e555fed *man/clmfires.Rd
+0251bef6f229a72adbfa0b1e7209f32a *man/closepairs.Rd
+17fa0c49ef74a43f240889d90665acea *man/closepairs.pp3.Rd
+8e7548e11708ceb8cda1b98cca310fd3 *man/closetriples.Rd
+c655b9c9bcd0726251bb8239cff191ff *man/closing.Rd
+20b584adbf9de04a16dd5d241887ea03 *man/clusterfield.Rd
+3d329a051f092321d473c272564b3e6e *man/clusterfit.Rd
+d4547fd8acb0c39986339c0e6aadca9d *man/clusterkernel.Rd
+06c717935a0c03f8b50b63ef8d7b35bb *man/clusterradius.Rd
+9969967ef0c3cb82ce73a0be6f08fe39 *man/clusterset.Rd
+dd5b0370ff9308b4ff96985941b94cd7 *man/coef.mppm.Rd
+0c3bbbf66c63e7ff5c00665c2d6692dc *man/coef.ppm.Rd
+8e1270ae95df370c3c9ef1ec6ec8d3bd *man/coef.slrm.Rd
+cb6e2c28b4393eaae15198da6c0a6028 *man/collapse.fv.Rd
+815b557eec83bb3e480b1f238446c245 *man/colourmap.Rd
+fccf4a85a80abff62c0f97657fd5021c *man/colourtools.Rd
+7577667cef680abd3a2ec8d13fa413c0 *man/commonGrid.Rd
+4fced4c12b414fa7eb95606a0aee8e59 *man/compareFit.Rd
+daa2f0ca4dbf0a35bf279c038507ec42 *man/compatible.Rd
+bbdd91aecc3f370e6d5349d7a72d56fa *man/compatible.fasp.Rd
+f38f5c88a38c76ceff89005c2cfcc8b7 *man/compatible.fv.Rd
+0dbb6f2874f36a2409367b425a20970b *man/compatible.im.Rd
+86f39d6bbc2448fa0a8ea7c8f5405c1b *man/compileK.Rd
+edc5b84dd7d0d6f60e5c27a2a7b8874f *man/complement.owin.Rd
+9c37062f39c1f519e59ef50d6dabf3fe *man/concatxy.Rd
+855d8ab6682b817023e28929db270f91 *man/connected.Rd
+66c243bbd2743d13c1b5d5745b1f1d08 *man/connected.linnet.Rd
+601e1ae3f06558038923e882c069b456 *man/connected.lpp.Rd
+280b7a290611fa3a37ed7a049bd5640e *man/connected.ppp.Rd
+7c22775202e81c10f23a210beba00e2c *man/contour.im.Rd
+5fb384aadaccd28b925cc1ebc69f135a *man/contour.imlist.Rd
+4cf1b288cffdead7f85cf0c0b86d42ea *man/convexhull.Rd
+6ae9c7cf8d7a0140679b72f24916669f *man/convexhull.xy.Rd
+b323e7ff70db6054fe6b1412bd88e92f *man/convexify.Rd
+b79d752bb9228bc32fec25a2c488fb2f *man/convolve.im.Rd
+94c8ba44e070e19b5aeb426294a865ae *man/coords.Rd
+b83d3c3e054011a84f58df63bb77d581 *man/copper.Rd
+a9bd28193b7bd6df6188b99de18a1b33 *man/copyExampleFiles.Rd
+043d477a1eb8019a72195306231fa2be *man/corners.Rd
+2c34a94c784871f85be912723b7bfb46 *man/covering.Rd
+270668697df2da8368c716055fa16a39 *man/crossdist.Rd
+0a3b28ff053854b6d9cb321304a3cfd0 *man/crossdist.default.Rd
+b0c7f58b1d9393deb439402c83ad0fbb *man/crossdist.lpp.Rd
+cbf5b84279b9901707b0e61ba9b80290 *man/crossdist.pp3.Rd
+86bc3c6c78087ee16d8523de2ba09428 *man/crossdist.ppp.Rd
+b69412588854da90e9af9cc0c38a49c9 *man/crossdist.ppx.Rd
+ef11492e48f734cbdf46f6768ab05be5 *man/crossdist.psp.Rd
+58cac3a3319f60ca98f5572ad296bd41 *man/crossing.linnet.Rd
+661c50d342e9e32e7cc02d041c7ac0be *man/crossing.psp.Rd
+af5b5fcc49b56411f212cb487cb1b0ce *man/cut.im.Rd
+a6cd74dea247cd759420023c3a9fd0ea *man/cut.lpp.Rd
+b3403e532bd0990c22a6d08cb888a8d9 *man/cut.ppp.Rd
+827aff2a554b82e1613672fd2baa0542 *man/data.ppm.Rd
+5a937a4d7cd2da5b2babdd2066816ae6 *man/dclf.progress.Rd
+cfe58cc740d905ec11772f662a1115a2 *man/dclf.sigtrace.Rd
+5a7f493aac4e5f2be9a88712eb73fe9c *man/dclf.test.Rd
+c53a24542be4a9eb16621ec99a4bb45e *man/default.dummy.Rd
+0ec93174356b4c09b9e90c5886dd50b8 *man/default.expand.Rd
+abb5e748d59b40a306e411526a5b2b17 *man/default.rmhcontrol.Rd
+aa8044cc7f49b4534077806138c7bbd6 *man/delaunay.Rd
+0c1110c95832a3655e0db53d8d809ea7 *man/delaunayDistance.Rd
+652c6ff5511e6a5ad1fd5c338590fef8 *man/delaunayNetwork.Rd
+c9aabaae8e19078decca8cb19c6b7ab5 *man/deletebranch.Rd
+b374a9ff6e2ac8635022a2d778d3e8a5 *man/deltametric.Rd
+38d726f21bba04a1df116580866e910b *man/demohyper.Rd
+96daf1087b07894bf45d6c144834baf7 *man/demopat.Rd
+5e78ffad06d5223282b04ced4c1a5c47 *man/dendrite.Rd
+062e5ffe5a4b34071de57ac2c4ea6486 *man/density.lpp.Rd
+48e490ba4c9e04344f9be91aa6eb3704 *man/density.ppp.Rd
+09926a4ef79118437810d30413f3acac *man/density.psp.Rd
+2513790a42beb1924372b3d6a9bef779 *man/density.splitppp.Rd
+50fca06c24aac752c750d95c8f56f7f6 *man/deriv.fv.Rd
+cbdbe94416949b5299d9ae68b0875705 *man/detpointprocfamilyfun.Rd
+453e9e64e024c7d9b2bf3a16a265dd3d *man/dfbetas.ppm.Rd
+bf827df6f97572729e9e8c4268d94c77 *man/dg.envelope.Rd
+d81c8d8e2470d6081243c61dd0829a14 *man/dg.progress.Rd
+869ceab579b8674d6f7a686c01d3197b *man/dg.sigtrace.Rd
+5edbb7cfbde253a31a72d93c3659126b *man/dg.test.Rd
+0cbe18d4651cf05e1fba2b83d7aab0ec *man/diagnose.ppm.Rd
+feca6dece00297bcde1938f768c71985 *man/diameter.Rd
+464efabd5c13eb3ea9c32c6f63c786f5 *man/diameter.box3.Rd
+4c28781cc6cdbb4a0122e422396920f3 *man/diameter.boxx.Rd
+041bedc39fc1f905ec3c6964cbed8119 *man/diameter.linnet.Rd
+03f9b542f39c6984b4c97467e4c8b482 *man/diameter.owin.Rd
+40a782e809b0b3254e8ec2e8a0eea4a7 *man/dilated.areas.Rd
+5c772598d07d22fd501e097fec2efa95 *man/dilation.Rd
+382a56f92a804d52582cf716cdf79b09 *man/dim.detpointprocfamily.Rd
+b72e48220d7edbac9fc1686c28abd50f *man/dimhat.Rd
+6a1b619a11efac3fd663196971650713 *man/dirichlet.Rd
+1ad1403e16fd89dac665e9a8aa252076 *man/dirichletAreas.Rd
+b8cb279192ea77edccb3110fc3319209 *man/dirichletVertices.Rd
+dd23abfd3e95775f341cac33045517e1 *man/dirichletWeights.Rd
+d76568876272da7abd5e822e75d8097a *man/disc.Rd
+b8173608c10e7504c3a424e9a45388e9 *man/discpartarea.Rd
+972e9a47b875c69b39489b73231909c1 *man/discretise.Rd
+78a3f03297cf56c59e91165460890f93 *man/discs.Rd
+ea33970727fe02497a888d725d9f24ea *man/distcdf.Rd
+b40ca490fdec737f0b3a62fbdb0502b3 *man/distfun.Rd
+645074b7fae18a7483a1a15b0ba8a371 *man/distfun.lpp.Rd
+faf4889f5aa8f5a78ec409ceadff1866 *man/distmap.Rd
+e3c58049d4baecc3370bf36eae896977 *man/distmap.owin.Rd
+39d6077ce1b03560432870885cc17985 *man/distmap.ppp.Rd
+d342c07a66859851fde8c661f720b0d2 *man/distmap.psp.Rd
+873905c02500608230d9ebd9d245a839 *man/divide.linnet.Rd
+80cc6cd76ccc874db32f2df71e16435b *man/dkernel.Rd
+8203fba31ada9ac91ebc681f14b3ab27 *man/dmixpois.Rd
+a6aa371b1538557b295c207c2541d1dc *man/domain.Rd
+e93b2a5629b7d0568ee47583179a89eb *man/dppBessel.Rd
+b86411019f323d1c651babd1b0b3d1af *man/dppCauchy.Rd
+4c062a1337877fd761fbf42041a09de8 *man/dppGauss.Rd
+6d17cd8399d74a1baeffe9689e09e00d *man/dppMatern.Rd
+f7b2bc9db23620c49b897a61935ccede *man/dppPowerExp.Rd
+43fca14b1c64f1a91fbdd8ec2e56b40f *man/dppapproxkernel.Rd
+71f1f80278289aab6dbf5823a5131626 *man/dppapproxpcf.Rd
+edb8c34118a8284a8946cddfda3392d6 *man/dppeigen.Rd
+96abd9ffe6be8d538ddffd6bcab72001 *man/dppkernel.Rd
+8a55e7897568dfd9f7991420051a449b *man/dppm.Rd
+435c26403f233030ea066d2135f121c8 *man/dppparbounds.Rd
+976360c41648d086a5d572c80803ee46 *man/dppspecden.Rd
+1f7ad57545508cbaf3ebdf359509c96f *man/dppspecdenrange.Rd
+cfe1652069012d2187f56f99553015aa *man/dummify.Rd
+c24c8a8fa5eb8b63614db81c18feb432 *man/dummy.ppm.Rd
+f6b361ee19802275e239b31f06d60879 *man/duplicated.ppp.Rd
+c7dafd56c8c1989b45484ebf5d49371a *man/edge.Ripley.Rd
+57d514e98cfdcf5106306495284b167f *man/edge.Trans.Rd
+2c8a2832c36a28895f184e502e3b6624 *man/edges.Rd
+34b1bf16cb0a8c40bffabcd2d64655ed *man/edges2triangles.Rd
+b1150d865b52154abe4431d659e9142f *man/edges2vees.Rd
+1e473e02de85c9fcf7691e98ff40d1f1 *man/edit.hyperframe.Rd
+b7283957228c1bd0b45da50abde3bc0b *man/edit.ppp.Rd
+87e2d038d7d67ecc47943d0ab8648c67 *man/eem.Rd
+67ac031a69f72a6d41fe228e101637df *man/effectfun.Rd
+73298504ab6eb8d5f0a67fd42576eb9d *man/ellipse.Rd
+62f625d02e3671424d4280d6c7de0428 *man/emend.Rd
+98047ebf1418a60d55f2609f089ef970 *man/emend.ppm.Rd
+c05922d67d600d554bd789b4811359c3 *man/endpoints.psp.Rd
+c4e6b043ea0551041d560626facc5849 *man/envelope.Rd
+0cc8bc4984ea85d93d847009e5777e48 *man/envelope.envelope.Rd
+5f5c7922a4e28946c70f6ff5c5b9de06 *man/envelope.lpp.Rd
+c74cb2b8a121a0866ca70d2e09d8e8c9 *man/envelope.pp3.Rd
+417ab6fe29f16cd10ec714b550af8374 *man/envelopeArray.Rd
+406ab1cde1b506f881777c6895445180 *man/eroded.areas.Rd
+417361fe6d1aaa8a94539398ce43533e *man/erosion.Rd
+efb075b7df9320223a187d100cc85c27 *man/erosionAny.Rd
+e5b8141ede5431bec1877bd7cc706ae0 *man/eval.fasp.Rd
+fe65f0c5f1631b2f524b77123f982895 *man/eval.fv.Rd
+87a4a3bb708ab0b130c2580149e5e674 *man/eval.im.Rd
+285508b0731c9de89e05f8f99d73a982 *man/eval.linim.Rd
+9d5bb092702c6a62cde37a5c4e330e10 *man/ewcdf.Rd
+9376bcd787b75df61fbdea8934659ec4 *man/exactMPLEstrauss.Rd
+6e1d4f4674976dcd96b752dcf9063a90 *man/expand.owin.Rd
+0cf5188fd022cb4c001997fafddc5600 *man/fardist.Rd
+60624a18119aca69226f751383d3d871 *man/fasp.object.Rd
+2f48d1b60ffea24dddae65b3f3b214d6 *man/finpines.Rd
+8a35f3f8a782c037b03c965081798cee *man/fitin.Rd
+c0ea753a8c1dd4801157715779193173 *man/fitted.lppm.Rd
+786b1dd29f8c2ef4ac4dcb377d273734 *man/fitted.mppm.Rd
+d5745de4c00845499688737c8f9c7ab4 *man/fitted.ppm.Rd
+9f6a06b4a7c477ca26c6bcc62a345213 *man/fitted.slrm.Rd
+ced7b79616a49227632d3d793f3fbeb1 *man/fixef.mppm.Rd
+753f4eafa062d230841d8009565c22a1 *man/flipxy.Rd
+4bfa8df44629b2c7d1276b33fd96ae3e *man/flu.Rd
+782d7ab24890616e64a5bacb50c9fbc3 *man/foo.Rd
+590d81abc370d01da0718fe8c8ed0c77 *man/formula.fv.Rd
+da28ccd90714621c7ba25c37b8297431 *man/formula.ppm.Rd
+87606fcae485faaaf60a53c3136bf71a *man/fourierbasis.Rd
+e411490a37b7ee3668c8ce893aa27a51 *man/fryplot.Rd
+88544b48ae1f433d914a1bd882642a5e *man/funxy.Rd
+cd5826c148618e9ff20da78773361299 *man/fv.Rd
+81a53ee2dd2c8afdda608a90d37c9f04 *man/fv.object.Rd
+b6564e6129a8973dfde9e469969cf283 *man/fvnames.Rd
+6b6a59ffc0a3fd0c38db2bdc4d056180 *man/ganglia.Rd
+8786b2679753de57cff66911fd4822a9 *man/gauss.hermite.Rd
+e03b800770db1fb75812c54884534ce6 *man/gordon.Rd
+4d8853ed6ce57038677490e3bc9b4ace *man/gorillas.Rd
+4014660ad0eea9fd14e7848f88612216 *man/gridcentres.Rd
+7c4ae72e764a190364f40fc736c9907f *man/gridweights.Rd
+8682c3236a127ecaa7ae430c3f9e72e3 *man/grow.boxx.Rd
+30375bff5e98a7539edce32aa22edb24 *man/grow.rectangle.Rd
+9c5c09ffb67e0bc66a921a269d9176c1 *man/hamster.Rd
+c341ff01dd0540df584ea23c2e055443 *man/harmonic.Rd
+e5f06d04dbaca03d1cbbf798e5a313ae *man/harmonise.Rd
+15ceb7a4e4d0ff84b3769143d69f82da *man/harmonise.fv.Rd
+7657b089c0d5ba1e359906786c7d80f8 *man/harmonise.im.Rd
+b2477cde55321142f298c95657f38e34 *man/harmonise.msr.Rd
+05eae00175dbeeaf8860df3f4b2559eb *man/harmonise.owin.Rd
+ba4ff281b41ed359b2c19118c4266f13 *man/has.close.Rd
+aed9f3cceb2feee6b031b650b76de1e3 *man/headtail.Rd
+5869e489039ea04159f59545f5a3bb60 *man/heather.Rd
+316a20049d16deea0ef33177d5fde00d *man/hextess.Rd
+059480c5911a91480fa9c38114732e51 *man/hierpair.family.Rd
+724e1c23039b744db082d4f33aabb3e5 *man/hist.funxy.Rd
+b47d63a2c205b434fe8d474bf5743b7a *man/hist.im.Rd
+823fe8a8414ef12e57e9da5e850bb61c *man/hopskel.Rd
+d0ac620d8a91e35f8fc032697819564a *man/humberside.Rd
+ca60f43560f10690985bc9e1144ff921 *man/hybrid.family.Rd
+adaeecad93471373f5014890c54b3084 *man/hyperframe.Rd
+ce512a5e916d4162a531a7e6dc6e6680 *man/hyytiala.Rd
+96f775face0dab2a7597a80a87dd5f99 *man/identify.ppp.Rd
+501a97c1754a87ce59faec88b6cdab4a *man/identify.psp.Rd
+5d8223387c54210727270d31ebc8dd14 *man/idw.Rd
+dcdf56d0f94e5f1848a701defb8615b6 *man/im.Rd
+fb116c8c2e11187dcd5792dd08ab18f0 *man/im.apply.Rd
+569c8cf7b3cec628e96e74ff545dfa8b *man/im.object.Rd
+5cf099bfda09c699261c0f8f04e2d2d0 *man/imcov.Rd
+4247b7dde78daaaf7aed382865eb0302 *man/improve.kppm.Rd
+36fb37cec394cd3a85fe7e53fa7f83e4 *man/incircle.Rd
+15c74c0a3626c007f606b73c46c658a0 *man/increment.fv.Rd
+af42f01a5a5904b676358687d1615fee *man/infline.Rd
+ceb9faaca226d310b73fca29f226f14b *man/influence.ppm.Rd
+75939f951b1f8f20b1a585050ee676c9 *man/inforder.family.Rd
+f7d7ae965369db9c0bb7fc97665a9008 *man/insertVertices.Rd
+a1ebb4c40a9876e357c402dd4952d415 *man/inside.boxx.Rd
+5f2fb734bab533e325c12831ddf3295d *man/inside.owin.Rd
+b02df1b8b7baaa48fe54339c406c4ba7 *man/integral.im.Rd
+91177756f188db2d9f75c3f6ba611350 *man/integral.linim.Rd
+fe606542bccb1f91c5de90f4d4e86cbd *man/integral.msr.Rd
+60293f2d3c2c7afa1f197d3b5ce74afb *man/intensity.Rd
+753620f42fe3e253ec926fc3a250add3 *man/intensity.dppm.Rd
+e696007219b960dc30c74bf2cfdcd222 *man/intensity.lpp.Rd
+8255a135069e2b020f2bb2a536805556 *man/intensity.ppm.Rd
+bb796724312d4a8c5861ea710aaeefea *man/intensity.ppp.Rd
+5460b9db190b08032b212593e6575759 *man/intensity.ppx.Rd
+09c1f979ba1643a0765b78dea079aabe *man/intensity.quadratcount.Rd
+9603be44e328138b5dc4e9d71f939ed2 *man/interp.colourmap.Rd
+8e4a20bcbfe9daa593882d3800ba570e *man/interp.im.Rd
+f67a7a66cdbba06a5feae81cd6d648ac *man/intersect.owin.Rd
+59de099fdf6f0a824ca78e977871859d *man/intersect.tess.Rd
+bc0d899193807240689833f26a3b01b6 *man/invoke.symbolmap.Rd
+61c0965c0544368c8f81d8f57495a0d9 *man/iplot.Rd
+c7dc4cf87b4d4e2f39d4ed6271914d18 *man/ippm.Rd
+8912e01a6e2ff5bddbe831d50d93ac6f *man/is.connected.Rd
+4ca865e9e624a8130ee672fbd01dd78f *man/is.connected.ppp.Rd
+a3704f6c85f8c3d9fa4d111160467730 *man/is.convex.Rd
+342ef62d8db2ccc867de36df82a4cec6 *man/is.dppm.Rd
+034fc8f4553fa6fb630179b1a8f4af1a *man/is.empty.Rd
+9760b1ba9982fd060eeb48c4fb3f5d4f *man/is.hybrid.Rd
+d6d24c19b232dd6551c046c8e5077e8b *man/is.im.Rd
+35e01c299472deb5431bca514719f6df *man/is.lpp.Rd
+253decbb8eff4048b12bb117f29a98c3 *man/is.marked.Rd
+779def2b9ffd4380112c9ed8fbb6a598 *man/is.marked.ppm.Rd
+f9a44d75ef704fc9f6fee89009e6653e *man/is.marked.ppp.Rd
+d271645928bf25109f599e6e8f2939cf *man/is.multitype.Rd
+c65436a1731a1fd918988f84fedc9f0a *man/is.multitype.ppm.Rd
+77fb7841d66dcfb478e03e7fa5192b84 *man/is.multitype.ppp.Rd
+0e40fc24543a9a2b342776b6ff5973ee *man/is.owin.Rd
+68c1be4581db533f711fc7534b037741 *man/is.ppm.Rd
+6b810b065b12c1b3272e6cfd52f317c2 *man/is.ppp.Rd
+006f6e69e5c82636c59f1c8f31b42e99 *man/is.rectangle.Rd
+4a19e24fdd91dcf51f330c5a0d7b452d *man/is.stationary.Rd
+6402d47263125b9acf4330fa429db005 *man/is.subset.owin.Rd
+c3bb8b1ff39d47aaa9b7e7740758d2c5 *man/istat.Rd
+907bc70733d63ff4328d38b65721b316 *man/japanesepines.Rd
+55a3c5b55340b4242fe64a2e244235f9 *man/kaplan.meier.Rd
+4baac51cd91d96502fc48a9953680066 *man/kernel.factor.Rd
+c3c9b102625d526bf520ddfcb16eb08b *man/kernel.moment.Rd
+549a8a0da9ff9822fed5d908f2c602bd *man/kernel.squint.Rd
+1efcd196df498043d360a07bacdda65e *man/km.rs.Rd
+041f7d395a50d6041df8bb160e60a260 *man/kppm.Rd
+d0f12ab0ca79efa35667f0f13270a012 *man/lansing.Rd
+a93fc9982cfd2e1ef0f14f2d2413b871 *man/laslett.Rd
+1887eaf6447de3ac419dac4de48c479f *man/latest.news.Rd
+e3ddd04a6557fd348a3345aef1f75d6b *man/layered.Rd
+15b9725c1210edccb56275d9aa304aa4 *man/layerplotargs.Rd
+7fc06f7ce92236daa4481801dfa2cf11 *man/layout.boxes.Rd
+bc40e1e1903d77258899e0a24554cbc1 *man/lengths.psp.Rd
+e9c49444e5050d0a34364f06eb0c22d3 *man/letterR.Rd
+210fc7aaf9caf507e3a1fe5364b1cab4 *man/levelset.Rd
+57e2adfb239f48e0e96329b0d99df7d6 *man/leverage.ppm.Rd
+d83d1b8412b1fd69adb36643f3402aa0 *man/lgcp.estK.Rd
+337ec904c2cd88c7a0954e011599684e *man/lgcp.estpcf.Rd
+5670b9dced04d800e69b6fe9a7b0166a *man/linearK.Rd
+d9cd6ac53d4ee887cfadd5daa6804086 *man/linearKcross.Rd
+1a3f04915ac19e6ffb73e14296ba6a0e *man/linearKcross.inhom.Rd
+8870826dd05afbb3073eaddfdc5720fb *man/linearKdot.Rd
+22b5b3e8b9f8a8d2aea8e655702f6646 *man/linearKdot.inhom.Rd
+ec0561370f5cd03b012d18df42178259 *man/linearKinhom.Rd
+61a9c0ee9a32fd5cb5cbf1f3c7cbe7a4 *man/lineardirichlet.Rd
+7e56d02c053058c6dc7131f5b8251dfd *man/lineardisc.Rd
+371c104feea0d1006a03244b6a7da51d *man/linearmarkconnect.Rd
+82a2fcf819afea83c7ec4dd19444b28d *man/linearmarkequal.Rd
+68490464e097e0b5444c9d6661239168 *man/linearpcf.Rd
+7ca5a2a446f5988f03a55ef23a41f008 *man/linearpcfcross.Rd
+60dbcb91f212de8c899f15461b434785 *man/linearpcfcross.inhom.Rd
+48a488b33f714b2f10d03b9e46d6c419 *man/linearpcfdot.Rd
+7591a888809ac480e0e4b99a824d9e6b *man/linearpcfdot.inhom.Rd
+00213d6b96fdc117fcd312c2e252d0b3 *man/linearpcfinhom.Rd
+5d975d941b25a1a85efa787f5ce8445e *man/linequad.Rd
+d84c050f2b06d80230539d24eade3fb4 *man/linfun.Rd
+9b8c092ac817d6b543939e2b538b3506 *man/linim.Rd
+0cb20cb32ff1e22f11f4deaddfb92b5b *man/linnet.Rd
+4722b591312a0d13c4a3665cafa27033 *man/lintess.Rd
+cc314c90ddb3ebc15144776d5d532b6e *man/lixellate.Rd
+35c1383dd89ecbec5d86cc0599ce2a3a *man/localK.Rd
+c04ad5c7ef4dd4df2e6a7d383f6afb2b *man/localKinhom.Rd
+28c667be5ea403f73423832e10a02511 *man/localpcf.Rd
+7caeac313765d2713f998e992babcd1b *man/logLik.dppm.Rd
+7f0b8d2d587fb6a191ffccb73a3f40ec *man/logLik.kppm.Rd
+de9a9690e5ef55aaddd586b960f3a0a5 *man/logLik.mppm.Rd
+92a5886b04d3ec37055d6911fd104738 *man/logLik.ppm.Rd
+ca7223a4495046290195eadc2f265c6f *man/logLik.slrm.Rd
+f992845d01509962abf25dd53979da6c *man/lohboot.Rd
+8de1379bdea5aa88239d1bcffa87fd07 *man/longleaf.Rd
+6543dcb134c194c7cb3c8ba5a312a3a2 *man/lpp.Rd
+bf49bae5aaa3f6fed410486f152c7317 *man/lppm.Rd
+69bc04df7ec86067ffda73ac1531a574 *man/lurking.Rd
+77bd8c2a20c818ce6deaf30547dfdd2c *man/lut.Rd
+8ff61b15a83accee7cc5be6b9363b347 *man/macros/defns.Rd
+f4b50c29a62b24f51e0ab42ae004ed57 *man/markconnect.Rd
+9a69d529c353003ad90ffbbbe239a6e0 *man/markcorr.Rd
+bb18727cba02e6e22e104c57e95382e3 *man/markcrosscorr.Rd
+d2a9ee8b6a6e538dbf76f281f811f7da *man/marks.Rd
+cbe8805c7a3c8b2b452921800ab86f4e *man/marks.psp.Rd
+439fc0facc5077a83a691e35f1b1721c *man/marks.tess.Rd
+ed22c51331fd52293a4bb2be76a9b3d6 *man/markstat.Rd
+6001c3ed60cf32729c2272a2411ee82a *man/marktable.Rd
+9cff51b3f36ee7c8d536cc0cc6d9c1a5 *man/markvario.Rd
+c6b7406bc7d4c64c92ed0ff18466d388 *man/matchingdist.Rd
+f9904c9903eaa1d3acd8275691f3a8b4 *man/matclust.estK.Rd
+09e2aba2c8a675f5959dc3ca460ea2c3 *man/matclust.estpcf.Rd
+2bbafee72c33faca8f1edf1c0a747419 *man/matrixpower.Rd
+b3a9f6b4f7be5b70bde09bf76a15e81c *man/maxnndist.Rd
+85c96d136f6c8e9dc5c9c8aa8870c98e *man/mean.im.Rd
+29c7e9c341f6f886f03a1879daf361b7 *man/mean.linim.Rd
+03ac1df8cc2261c02be9b71361c5f963 *man/measureVariation.Rd
+d7deffaef7f7438de2c1fb8b261838e7 *man/mergeLevels.Rd
+fa199b0a326a764f216644737864ad6e *man/methods.box3.Rd
+7d4203dcec605900f9384915a75a1ab6 *man/methods.boxx.Rd
+a9506c6df413353de7decacd955bc0b7 *man/methods.dppm.Rd
+bc0d58ebfe4623fbd2189083d39694de *man/methods.fii.Rd
+7df59a0670ba7bb652fbd49d23ff57eb *man/methods.funxy.Rd
+8a584ba69dc28ef1c9d3b8f4e60896aa *man/methods.kppm.Rd
+f9e72503a0844daef9fae2ee9e9d6fd2 *man/methods.layered.Rd
+c7092d0e6e5ec9cb37a4570dec353e2a *man/methods.linfun.Rd
+2a61d8e256f05f52d82a00a9a26e1cb1 *man/methods.linim.Rd
+9d28b79bc945da4be167dddf404e24a1 *man/methods.linnet.Rd
+246f2128f8257fe960c3ab49cdd2e434 *man/methods.lpp.Rd
+d4b958e06f1771e797d014f3eba2c6b5 *man/methods.lppm.Rd
+7134071842e63af453a7f892b219c80d *man/methods.objsurf.Rd
+1def2b75a472a68c70d30228b758972b *man/methods.pp3.Rd
+97fbcb516f976097d46319b4e6e2ce3a *man/methods.ppx.Rd
+4af538fd19d3df6cbf3e5594fef186c0 *man/methods.rho2hat.Rd
+07479c50b6c25452ceed595a27321a33 *man/methods.rhohat.Rd
+149524760f50d0c8a3464db5658e5d0b *man/methods.slrm.Rd
+8b16e399ce9824952249f0f5c191ed42 *man/methods.ssf.Rd
+d1305f3f7d5688a0279c483f30c7177a *man/methods.units.Rd
+1a20b184eb2ada390fc37b75dd319fbc *man/methods.zclustermodel.Rd
+0e3707b8d67aa954eb254c5e8c23ee75 *man/midpoints.psp.Rd
+49777e619db7fb9cd8c155956f34bf01 *man/mincontrast.Rd
+aa070c264b489fd2cf5873bc3cd8a7b4 *man/miplot.Rd
+bc47733e01454abe17ed482062b87989 *man/model.depends.Rd
+88e8bf9a4c5321313dc8ee242b8befc8 *man/model.frame.ppm.Rd
+e1070be931a9d90999dc20c01222b778 *man/model.images.Rd
+f00ed50435704c29e0b0efb94c13eb0d *man/model.matrix.ppm.Rd
+79e1e90120fcca70b67df62340a8fe59 *man/model.matrix.slrm.Rd
+13f1e66542a80cef46b4ccca75fa286f *man/moribund.Rd
+ffc32bc802cc35ba416a1d0072288e1e *man/mppm.Rd
+924a30e423945269945021ef8235ca78 *man/msr.Rd
+447afb4144fcd2b61f0c5b88a95e3bd3 *man/mucosa.Rd
+fd7c2d5c8c968ca3d3e04a464f725b30 *man/multiplicity.ppp.Rd
+2694cc016c4cd0d310120ed9460aeace *man/murchison.Rd
+e49f2dd4a8732fee2dfaea7235a895fc *man/nbfires.Rd
+e05c4f7abf7dc0c6542a15c9f492932f *man/nearest.raster.point.Rd
+dccd900d22e8fe855ae3c65eaf8b7dc1 *man/nearestsegment.Rd
+238f51712a59ac7b198bd400cbcaf693 *man/nestsplit.Rd
+0c3d278f757e4c193c54966f4146f6d6 *man/nnclean.Rd
+3200cea0010f61f950984b088c1982a5 *man/nncorr.Rd
+9e8545168f071d57745cf82c82373ed9 *man/nncross.Rd
+af1283be6eb9ac2adee7c074df7f2db8 *man/nncross.lpp.Rd
+73f03fa0d438544d0d3e869fadbc8cb4 *man/nncross.pp3.Rd
+50ece9f5db4cda508c95aa459fe1a38b *man/nndensity.Rd
+30f7b4e6f339e889be7d466511d9f430 *man/nndist.Rd
+e451198f4e4ed0016adf019c46ed98d7 *man/nndist.lpp.Rd
+1f5b99cb663fe52063e037e0858b7909 *man/nndist.pp3.Rd
+b35b16b4268865e5de862c1eb1fd961b *man/nndist.ppx.Rd
+65f0c650eb79dfc75348587c3519cf79 *man/nndist.psp.Rd
+95f8e34ec2dc3f3e57b119b3520ed30a *man/nnfun.Rd
+b9e62915866e51f520d1e6ec63023195 *man/nnfun.lpp.Rd
+d0696d6e56df786abdc2c9b6949e12d5 *man/nnmap.Rd
+481424540c1db4f01a71e201d1162412 *man/nnmark.Rd
+418a896aa7d1c53313e7022184ea350a *man/nnorient.Rd
+092cf57239d359afc339f54592310756 *man/nnwhich.Rd
+85383deb36661f8e585646c68b914b59 *man/nnwhich.lpp.Rd
+0c2cde6a1be83d4ca0fee3da38549f49 *man/nnwhich.pp3.Rd
+c42d68ad1588309b050beb4a53d5ec6b *man/nnwhich.ppx.Rd
+5ca7e87fe4f00c669c03283a114f734c *man/nobjects.Rd
+f67feb8a8e18129e39306376861386f5 *man/npfun.Rd
+fbe2ea78291bdba8b54502e7dc2098f2 *man/npoints.Rd
+20d138bd69544c6250d3dadb29305c6f *man/nsegments.Rd
+4e831d956a71811de072131538ffa9f0 *man/nvertices.Rd
+47ab44b07e3219d9f29d1d451c5dd745 *man/nztrees.Rd
+291f0f4c0c6d0f396f6c793b6b432f1b *man/objsurf.Rd
+52f27b19004ee43069f7d9a187bf71c5 *man/opening.Rd
+fbb2162039c49aa32691d13932f84263 *man/ord.family.Rd
+f7f53d50400fb02c386c019d5c23f55d *man/osteo.Rd
+0b059320eb292ee2c82683b6630bac7e *man/overlap.owin.Rd
+ae656f0bd4d46dc1596e9ca11f74dbcb *man/owin.Rd
+09a475e019a4a377357f721f11bb5ff9 *man/owin.object.Rd
+a334b67ef716e9124152624f15662c5f *man/padimage.Rd
+8853d6a32603f3fa8a5d8314c23139d7 *man/pairdist.Rd
+009548ceb3b1273a5d50f1fb404df79b *man/pairdist.default.Rd
+4165070b062fb4d950205c19e2464b52 *man/pairdist.lpp.Rd
+55dfd9519eb3f69a926a3ffdfcf345b0 *man/pairdist.pp3.Rd
+6c69280a6330cdbf13fa31eb8c424641 *man/pairdist.ppp.Rd
+a9042dfb1c08d23195d8d3d85ff372e9 *man/pairdist.ppx.Rd
+24967c12a5bfd7520004da9088fb1d55 *man/pairdist.psp.Rd
+918a8ff0eeda4cbba23369b7b8ace2c0 *man/pairorient.Rd
+f818fa41fa27f203d98e291eb930fb83 *man/pairs.im.Rd
+8b8744286691b257a63805405a626ed0 *man/pairs.linim.Rd
+e693f86f09458e493af2b7b6712fd770 *man/pairsat.family.Rd
+175a81ea3c116a4f938a8cec99adb42c *man/pairwise.family.Rd
+499e39168a72133ae33c5bf8f2461a94 *man/panel.contour.Rd
+9e61a7dc57f0d989481c1baa5fd5bdc8 *man/paracou.Rd
+7c1d6a7436f6cbf698c044221d39c622 *man/parameters.Rd
+59e860a968da911fbe03bef31987f44b *man/parres.Rd
+9b06494a831f88b18f8196c687770fa4 *man/pcf.Rd
+dfadd4ff1016669603491af6a4362311 *man/pcf.fasp.Rd
+aeb5cdc38dbcd2acefb53b9b192eb5a5 *man/pcf.fv.Rd
+2c231d6b423c8e9b48aba24a6b0fad3c *man/pcf.ppp.Rd
+1cab396336b8eab6fca38a9d12e8ec79 *man/pcf3est.Rd
+35e9c91309a30d2ed93ea6ceecf41442 *man/pcfcross.Rd
+f9f8dbf05c3a13aa2d765b14a84f6865 *man/pcfcross.inhom.Rd
+c745bbb2ee3919ce065ea50628f8e657 *man/pcfdot.Rd
+b84a4cb9f3aa18d23a8c8e34a452a240 *man/pcfdot.inhom.Rd
+3a2b13ef8e8011a2c7a7289b8d099bde *man/pcfinhom.Rd
+5bcf6eda621e887fdcb11d5b309a97ef *man/pcfmulti.Rd
+b55454aa2048e9b74f11307440aecfe1 *man/perimeter.Rd
+330185579b97739a5fbbd58d4d11cf5c *man/periodify.Rd
+7a9781d54573d010f6fe988f6f26ccdb *man/persp.im.Rd
+699bce269544143e18548ec248b25212 *man/perspPoints.Rd
+218a10f9f210cd6a8832231405a01fc5 *man/pixelcentres.Rd
+7365e405b0ee8bff0d995d463d094ea4 *man/pixellate.Rd
+fe37268ae52121b4177a1774d1b2b952 *man/pixellate.owin.Rd
+73171e37934907d02d3215c9dc100edc *man/pixellate.ppp.Rd
+956fc61c9d1adbb69b41cf26b45520c8 *man/pixellate.psp.Rd
+a2aafee99f73fb9b36ce11734cf8fbd2 *man/pixelquad.Rd
+7e1e9e78288eb74fde9af85eddcf00ce *man/plot.anylist.Rd
+9ff682b1593457aa4003c2b041400b96 *man/plot.bermantest.Rd
+d308f7035d40be415c0a1d4959e2bd80 *man/plot.cdftest.Rd
+ff7e74301e126b393e855ebf323aace0 *man/plot.colourmap.Rd
+40a182b39983f3f816409890bfffaf17 *man/plot.dppm.Rd
+d3467a14b7475b1bd3d345be2413305e *man/plot.envelope.Rd
+00112835ac7e9ca52154b8d7e8b15bc4 *man/plot.fasp.Rd
+2902f7b797e405cc56a82bfbde45558b *man/plot.fv.Rd
+bbceedd23382559bced05aeab5d33761 *man/plot.hyperframe.Rd
+7cc337128cc0959f29df487178de2b48 *man/plot.im.Rd
+0115240b221ea63bb355f83505a4c08c *man/plot.imlist.Rd
+7b833d3f4991ea3ac2b66dc6a2e2f105 *man/plot.influence.ppm.Rd
+2b31e001e7b3cae5affd64006249ea77 *man/plot.kppm.Rd
+3ef61cef6dcb869e4bdfa7de9b14ba78 *man/plot.laslett.Rd
+9aa99e1d1b95df354762b42cdf4dc356 *man/plot.layered.Rd
+cf3a33a8cec1a0aacfb30cb13a353187 *man/plot.leverage.ppm.Rd
+c81d7e263fb0fc2ab73e76adb0bf5a70 *man/plot.linim.Rd
+c2a1e4dc6ad004174de60d028e7ee574 *man/plot.linnet.Rd
+a403097554aa2970b4d347d9c8ec9d8b *man/plot.lintess.Rd
+0c939757473bce2b6586d6c6ff0f5b22 *man/plot.listof.Rd
+8dbbca580a0a1b71b1beb14660f87ac8 *man/plot.lpp.Rd
+8af4ffb510808a99e8df8abed117eedf *man/plot.lppm.Rd
+7480127473564ad5c3a57efdf68d9d36 *man/plot.mppm.Rd
+9a1578d276090b2baca9b0a5e68f3663 *man/plot.msr.Rd
+cabf331ae711a9fa76a4499c17f699c9 *man/plot.onearrow.Rd
+096cd0aa4d9e07f1d00b0e81df9fb8ba *man/plot.owin.Rd
+b3f8636aee9f1ddea0a9baabcf2d9e37 *man/plot.plotppm.Rd
+94ab7f2bc0e1f75829355964133d1866 *man/plot.pp3.Rd
+165d0b38adc114a488d82a308cc2c30c *man/plot.ppm.Rd
+b37e3628eda4ec64580ec41c05ef8dd9 *man/plot.ppp.Rd
+4b841110d0c062d2d1abab19c81a8c39 *man/plot.psp.Rd
+f2a2afff874266688981a56ba0f0887d *man/plot.quad.Rd
+5d1d72327dba7d662ec2ab7b8ea72a28 *man/plot.quadratcount.Rd
+4be5e426df9bf6b93ab71ac270e35417 *man/plot.quadrattest.Rd
+29a48bdc9d2be508ee8f66afaf0f475d *man/plot.rppm.Rd
+623d09d5790ab06711fbdbc9e72b145c *man/plot.scan.test.Rd
+8c87c3c115a50447499916049d547a01 *man/plot.slrm.Rd
+5c5b810250001e46c3d09d5fa659a376 *man/plot.solist.Rd
+4ef4ce06a8d1027d9988db609fbb92b8 *man/plot.splitppp.Rd
+05201ed055b58b3a7219bad00c740d20 *man/plot.ssf.Rd
+b018681806c0568dd66a8e228295f376 *man/plot.symbolmap.Rd
+a74733004df1d1c7984a38b4ac7c3dc9 *man/plot.tess.Rd
+33824b3b288724f2e4f23977f60d8564 *man/plot.textstring.Rd
+00cd55cb42db85a314c8511ce34128cb *man/plot.texturemap.Rd
+9eaa193b31b3538c61dfc1d41f0686d6 *man/plot.yardstick.Rd
+d8fc082a4e08900675049aa011262b07 *man/points.lpp.Rd
+b43cc7007474913e4029cebf81b0d9b8 *man/pointsOnLines.Rd
+daf959532330f2c700243ef0693ffa37 *man/polynom.Rd
+d9aae8e7675eef5b86acc13255aa41de *man/ponderosa.Rd
+1e4ffe51385b95fa44f17d5ebbc1e023 *man/pool.Rd
+7b0c3d7a78fc8ff459956f54d439c066 *man/pool.anylist.Rd
+ab8ac452b1a923e9577b138e7d4be21b *man/pool.envelope.Rd
+22838536f87dc87f6bb34bd5761f1966 *man/pool.fasp.Rd
+a0059ae9ec4770fc7f7760eb11e28344 *man/pool.fv.Rd
+29a3c5eb8f035c5c972e7bc8f5b25ae4 *man/pool.quadrattest.Rd
+a0cf222f8b437868440d320d8febb4b4 *man/pool.rat.Rd
+91d70b91a0ac2d74cda084c57f6bc76d *man/pp3.Rd
+2528a47d883094f9f3ba758660efc7e9 *man/ppm.Rd
+e812bf103f917895aed07763d3edaf58 *man/ppm.object.Rd
+4633000f7349d3226a0489838d119bab *man/ppm.ppp.Rd
+06ded16b9b26cd20a35a660fb3d0140a *man/ppmInfluence.Rd
+c691a22008d88d1874b5ff1ad005ea9d *man/ppp.Rd
+1738af59525001fc4b29aa2adf280a5c *man/ppp.object.Rd
+6f75e88f3528f9c911993cf41f935f04 *man/pppdist.Rd
+98837b590eaee95efc01b043300ec182 *man/pppmatching.Rd
+4bdb6dcbce07348faefcb5b76986e72b *man/pppmatching.object.Rd
+7c4452d5ed6b0d2fb04c2d829d62ebec *man/ppx.Rd
+55186ded7f38c9d5289aeb25034517aa *man/predict.dppm.Rd
+3136a25f701450a7b2ed5c0caf25b3f6 *man/predict.kppm.Rd
+0a4a7f74691f676391a543f30d8c4a20 *man/predict.lppm.Rd
+60e0b9c0c4f793dc28883f44719b614e *man/predict.mppm.Rd
+722265a38047d3a640598a42c9055117 *man/predict.ppm.Rd
+baf7a18910afda5c22e97a8b394f35ec *man/predict.rppm.Rd
+cfb7e1f07e6b80ba77bdaa92e2fcc486 *man/predict.slrm.Rd
+600900c6d48dfab3d5aef02377c99e6a *man/print.im.Rd
+d245091b62b8fe10f8913b76dad465fe *man/print.owin.Rd
+9963e52e777b7d2d562f0ced86f1148d *man/print.ppm.Rd
+9efd9c2dc831f6343afb506be3497144 *man/print.ppp.Rd
+6e0624fc0182d41c6b557eb57c682a31 *man/print.psp.Rd
+bdfd0ef9b27f33d246fb7a02b2008eae *man/print.quad.Rd
+9a7f341f0d9153394be94d2448171bea *man/profilepl.Rd
+5cfbfd93e44996df886024331b390082 *man/progressreport.Rd
+a0abb0988a6e917ae0eba09cffb0a7b3 *man/project2segment.Rd
+9df38351cc29ede4dd7ffae6706f5d68 *man/project2set.Rd
+a8d0bb1cb4832789478c2fd9fdff494c *man/prune.rppm.Rd
+e8261efc22ee48c1163505690ad6a058 *man/pseudoR2.Rd
+e42dda996499718a515fc0c75d079256 *man/psib.Rd
+679efdc74051fcbf726f1a5ab2704715 *man/psp.Rd
+e5eeca299bd8b3b0e45c1ed7d5f5c0e3 *man/psp.object.Rd
+65b965bae874bef9d0cd00f478b8defe *man/psst.Rd
+bd21593fe299ad1aa20407c1ab1148ee *man/psstA.Rd
+fd787ca540ddd5b4a08004d2a405fff1 *man/psstG.Rd
+310f87263aaf42e207dc9677d0403178 *man/pyramidal.Rd
+642843730b1200deb20fc22a6708dcfc *man/qqplot.ppm.Rd
+64ff11570ca6ac940fddc6c7bdb26b0b *man/quad.object.Rd
+72261006dfc38b28a929ebbf29310c7a *man/quad.ppm.Rd
+dc5d046a5b2e31d19a41bdafd60b3ebc *man/quadrat.test.Rd
+e5e8567142ba69f23a29231576d8a6c0 *man/quadrat.test.mppm.Rd
+ea895b1d7a9679c3c48123012a7e01e0 *man/quadrat.test.splitppp.Rd
+58514018045e526b5126305571b87c9e *man/quadratcount.Rd
+399b5a89c244df698c3c85b7c620d388 *man/quadratresample.Rd
+80dae4e98404c3d5507e8245fc8b296b *man/quadrats.Rd
+1753816156050fb9daf986acb7462896 *man/quadscheme.Rd
+8837f075074d53be595ccd9f7184a746 *man/quadscheme.logi.Rd
+70b14b52d6c94fdeceffceb6035f4c6d *man/quantess.Rd
+f74a00203731aed9b80c02c66ff765a1 *man/quantile.density.Rd
+7ebe2c0d4c1bbd72eb5dc80ce6c7bba2 *man/quantile.ewcdf.Rd
+dd719c0c8110bc2772f8d7912ecebcda *man/quantile.im.Rd
+68961b333902f94238d28d7dff64bfdf *man/quasirandom.Rd
+e10e5d6e7d8fbd709e9562445bd65293 *man/rCauchy.Rd
+c44b5314f861024182e01c7c4c3af77d *man/rDGS.Rd
+aa1f43d63a6a816cc1fc01c64bfb97ac *man/rDiggleGratton.Rd
+08e89870e624574222db2a21bd3cb9b7 *man/rGaussPoisson.Rd
+c35b42f9c82b60228d8bc46b57603d48 *man/rHardcore.Rd
+883b0d553c3fd612afb9ba11ff2479d0 *man/rLGCP.Rd
+e695a3ed67890793342883aededb7b66 *man/rMatClust.Rd
+511430ade45d202f94a1f900ec32e11a *man/rMaternI.Rd
+e299efe394110eb20ec344b65b84798d *man/rMaternII.Rd
+40d40454aa82ff81249c9d31e8b930a6 *man/rMosaicField.Rd
+168e3c311208ef80aebb1b8fa91a1010 *man/rMosaicSet.Rd
+bbbe71903aabcf8ceecfef2706a0f8c2 *man/rNeymanScott.Rd
+90ca689e7c131820d65877d1861a3558 *man/rPenttinen.Rd
+958b981db668a82a6e9f119302584b10 *man/rPoissonCluster.Rd
+7266a51131d3884bf96b03e561721671 *man/rQuasi.Rd
+946044fbcef67d750f2a19149852d447 *man/rSSI.Rd
+93d08f7522dd01a17f3ce0f98c25a7e1 *man/rStrauss.Rd
+decf0b6738f33dbe72f5d9fb4440e236 *man/rStraussHard.Rd
+945e082e1dfc1677d07440e78737d41a *man/rThomas.Rd
+1e3830535c87494f824cfc8afe448078 *man/rVarGamma.Rd
+4aa8c9349b680a3eae25a4ef140a0338 *man/rags.Rd
+d799247ffbfec55868bfcb7ba832bcef *man/ragsAreaInter.Rd
+ba10e2653bf0888cecb2e0cc2d7005e1 *man/ragsMultiHard.Rd
+94ccf22fc4d72433a17890fabb98cf4a *man/ranef.mppm.Rd
+f5859cdb173e22e9551ab30f0c78f1d0 *man/range.fv.Rd
+cb18fac0c337eab9dd887f2de53fdbe7 *man/raster.x.Rd
+48db7d85e4d70016c314a208457d4c86 *man/rat.Rd
+b0e60f3b6aabef35da869dbae738e921 *man/rcell.Rd
+55aeb0c742804dd2fd18971d10ebdce1 *man/rcellnumber.Rd
+4f00b83f740761ad77278241656c18ee *man/rdpp.Rd
+15e63641e557c242a4e892e75db64181 *man/reach.Rd
+dab1cca5d71faa0a2925104fcda1cffd *man/reach.dppm.Rd
+396ba365547cdcad60faa9d6210ece8c *man/reduced.sample.Rd
+e897749cf089ef500466caa8c433a2dc *man/redwood.Rd
+e7e9011c28cc14950949aa1b5ebb6d1f *man/redwoodfull.Rd
+a0c68ea64422a6edba5f9231338f0807 *man/reflect.Rd
+df95624d2972e9f5eb296c1ee9856092 *man/regularpolygon.Rd
+20be7aeda8e4da71d02f871e2202115b *man/relevel.im.Rd
+b141bc23f32e04b067b2c4c969071f29 *man/reload.or.compute.Rd
+8738ccac6e37447e056972c18eb48254 *man/relrisk.Rd
+86698c033a7ba6d0c3fe448bdf389e1d *man/relrisk.ppm.Rd
+23ac60965284d4e0ba9ecbf98bba4ab4 *man/relrisk.ppp.Rd
+6a34e089d79f2632c80c0772cea5a9c9 *man/requireversion.Rd
+1fe871400bf9e2fe2c9b94a40153baa1 *man/rescale.Rd
+20004859dc29aa4363ad80a948fe23db *man/rescale.im.Rd
+058b59b5213d55db80da61e4a0de97fc *man/rescale.owin.Rd
+d0dded0a368a3eaefcd26b1e5636d998 *man/rescale.ppp.Rd
+13920d3f7b1391f8bd02f8e2b325a40d *man/rescale.psp.Rd
+12334801657f6ed3d3b0e6b3c80eee35 *man/rescue.rectangle.Rd
+bcd155a7da4cc55760a6bded7ddc8a8e *man/residuals.dppm.Rd
+0418405470085449656f5fc8d4d87999 *man/residuals.kppm.Rd
+97247522acadec62cece039cefef1232 *man/residuals.mppm.Rd
+8a53451a5f8633fb83ad5a80ab337090 *man/residuals.ppm.Rd
+8692bd4bbf04b6571b6688c00e31ef7c *man/residualspaper.Rd
+627fcde54f29940a08a9c1def1673bfc *man/rex.Rd
+c324aefed511422dad008c9391a9ec75 *man/rgbim.Rd
+fa83ddd0842b49a342c54511d97b787c *man/rho2hat.Rd
+64735071ffa4dc64dcbf17c81e0a881f *man/rhohat.Rd
+ee9d83dbf3d66ff2f0ee41dd85c5d319 *man/ripras.Rd
+3dd03a5b2c65b157024e2525b6502630 *man/rjitter.Rd
+6dc4bbb5b1b2e45f381673a7488bbd44 *man/rknn.Rd
+b8233b7e743adb538fb6fe0945a22551 *man/rlabel.Rd
+1303979d82a3cc654db7fbe57f3a8b90 *man/rlinegrid.Rd
+4c0dc89855eeaef976d52181c2ec7184 *man/rlpp.Rd
+3a88872dff11d1c5d3ce1e2414fce8ce *man/rmh.Rd
+184c9687898d2c737cdb8c9de7cddb10 *man/rmh.default.Rd
+a45639e352f724bc9b50a25f26eb24b8 *man/rmh.ppm.Rd
+f521aabb67e4121f2ba35c162d1d5949 *man/rmhcontrol.Rd
+7fb92fafe4152451c5f54116faae6d69 *man/rmhexpand.Rd
+e8f0e4807085e833452c79734ba703e9 *man/rmhmodel.Rd
+c2a413171b993aba80c492b6fa400f1a *man/rmhmodel.default.Rd
+754d31bbe18493634e1fd5021d3bc163 *man/rmhmodel.list.Rd
+b74fce12c103d1c45c14c78ebf3e4495 *man/rmhmodel.ppm.Rd
+0e15021edfdc11ed67d82edb9279d1d7 *man/rmhstart.Rd
+6daa23722b901914bfec1925fe57ec22 *man/rmpoint.Rd
+c9efb98fb4600489851034e4914d0cbc *man/rmpoispp.Rd
+00b9cb8b6413301c0182c77f3c7180d6 *man/rnoise.Rd
+b3f9f224e26c1d86df005584781d2bd9 *man/roc.Rd
+b062a825c0b76bc5292d540ff065b8bf *man/rose.Rd
+46de1489970165e679298e0bfa806389 *man/rotate.Rd
+4d6db4921d7dc47a815b93d02076a05c *man/rotate.im.Rd
+1cca2bf91ce0897c70c83eebe2e0df46 *man/rotate.infline.Rd
+420f42b78f4b75d037ce9e68ab287e90 *man/rotate.owin.Rd
+c8f5365f2f6e58785862f72a7d6e8244 *man/rotate.ppp.Rd
+9f3fade667205c62415a1f97fd609bcb *man/rotate.psp.Rd
+23e4e349594aaf9d57f6307a596e0feb *man/rotmean.Rd
+51349aa10f2e3d2f2cae88a16c642a39 *man/round.ppp.Rd
+c9d186c7862c636325ad11cad7a62bfb *man/rounding.Rd
+e7439e3db078d957ad0bb78411706335 *man/rpoint.Rd
+b6a91ef76fbc45e3cb1bef941d8e4b83 *man/rpoisline.Rd
+c7a03bb1f0e2e57e0fe02e29d9e5c935 *man/rpoislinetess.Rd
+431cc7fdc28659d5404cbacc19720b52 *man/rpoislpp.Rd
+1267b0b52b75d4626575776dabc3e18c *man/rpoispp.Rd
+5a98dd78a76b9d187fa5cc2fce68d8e5 *man/rpoispp3.Rd
+94244193c38e8b8d8d051949eaca339b *man/rpoisppOnLines.Rd
+a6b80bce2cc88f746bf34ad4e7048d6f *man/rpoisppx.Rd
+2071b7797faa3874b4cafa424d149b3c *man/rppm.Rd
+df2d3a4e251d836e48a93416afc150ce *man/rshift.Rd
+48db298e9fc094f8d5f422336d44cdb7 *man/rshift.ppp.Rd
+7025e64603cca3771c59a17930a9d413 *man/rshift.psp.Rd
+7e169778102b366e7422e82c1f8b424f *man/rshift.splitppp.Rd
+1638325f01a8308a7eba1f4d3efc5b5b *man/rstrat.Rd
+034d6d14ca1d6cf729f94f7e17531169 *man/rsyst.Rd
+907a1f4d777f2a09565480f0202197f1 *man/rtemper.Rd
+a0c8a5fd715c2806a4cfc73bf89da58b *man/rthin.Rd
+c5d3d8890255ea2ed99542aa58eb4e81 *man/run.simplepanel.Rd
+0f58540ffbc0d6b01fc785934fde788c *man/runifdisc.Rd
+d5d02f9cd0793e69a1c46b8eadeca5a9 *man/runiflpp.Rd
+f00c10fda16472141dae745742629b39 *man/runifpoint.Rd
+2de1693c1362e6e987c05312d0f8a150 *man/runifpoint3.Rd
+08314d4010367c3ab6e780e4632b3226 *man/runifpointOnLines.Rd
+a9273f2fccb179783c06c7ff39ec6492 *man/runifpointx.Rd
+d7fd8b0fe60dd663e1559c6893526f5b *man/scalardilate.Rd
+a847cfd828fed5a9b2405240961865c5 *man/scaletointerval.Rd
+be7df2e3d96dd962d36880cb3c21d655 *man/scan.test.Rd
+b9fab8b1b77760071c342225e9d34130 *man/scanLRTS.Rd
+a8c5e46c67438271cec58b90060c1da4 *man/scanpp.Rd
+645cc8ca418fd2d56b14df5a54f15420 *man/sdr.Rd
+20d7ec0572c3f2faa5b0916ae5f5398b *man/sdrPredict.Rd
+f7720a4e8908af1298edd04ad488848b *man/segregation.test.Rd
+844656835d998b29a13720cf3dc80200 *man/selfcrossing.psp.Rd
+400bd1e88e42c09193aabe2ec5b719a3 *man/selfcut.psp.Rd
+ab09297e840f225a974b8cba729ad138 *man/sessionLibs.Rd
+903812cd2ac3c69daed146cbac19ec4d *man/setcov.Rd
+23a4d59639eea402a65a37b3335ddb2f *man/shapley.Rd
+1072ec85cf03b2046765d9b449371fb9 *man/sharpen.Rd
+c9d619e191ae1c73f0df5fe95c1185ef *man/shift.Rd
+874df23a3e9ff4ba46a58c6b5ef0c117 *man/shift.im.Rd
+53a3724667d6deab8c4bd7349cd5f8b0 *man/shift.owin.Rd
+1ea27441fa6ec6477756c169b4938c86 *man/shift.ppp.Rd
+bccef0619f0c7eac00444722e187366b *man/shift.psp.Rd
+ecbaeaebcafe20952c1a38fb8410e0ce *man/sidelengths.owin.Rd
+bbf30cfafd8f9e64d508117c68f2ae69 *man/simba.Rd
+8350af3a225d82762ce0bf5c0bc7f4dd *man/simdat.Rd
+9eec44b00a8171e80e128560a7ec682d *man/simplenet.Rd
+881207cb3e615e02e85e14abf7bdf82c *man/simplepanel.Rd
+99ebdd81548bc884bd7dc69feed637a2 *man/simplify.owin.Rd
+2bf34305fa23d735a296918cfaa07ecc *man/simulate.dppm.Rd
+9b3094be5f1b129a78ea3e4e0e709a4e *man/simulate.kppm.Rd
+a77f193e9fc39cc11d662323d2194f43 *man/simulate.lppm.Rd
+33b7b6d9e3c230b14c9197929c78676d *man/simulate.mppm.Rd
+a327ab464756e2b6c6a447e6b99967a7 *man/simulate.ppm.Rd
+4e92e07224d7111e24cadf4b88a12b6b *man/simulate.slrm.Rd
+66e8d8b3849d7ccefb9704b881e445c8 *man/slrm.Rd
+efad4027d784fc5e84d50f618186763a *man/solapply.Rd
+9e7d2dd1f496c85303346a3ae6bfff89 *man/solist.Rd
+364be560f6b2de1dcffa0e5fd6d923eb *man/solutionset.Rd
+6ed1fc01d32b892f519262a6bd79631a *man/spatdim.Rd
+485b15a8d2d0e0a32aa9b0cca58db576 *man/spatialcdf.Rd
+6b4d49938686bfe2e954db93576dfbcd *man/spatstat-deprecated.Rd
+55733b34b587a5e4dc88a0da6907b342 *man/spatstat-internal.Rd
+f5c9320a6db0f01b5a92dd2a6edf7f6b *man/spatstat-package.Rd
+f53c1df3734c3d34c08885d33ee29aa1 *man/spatstat.options.Rd
+1ff641da64b69d3e9daf0fe90a9306b2 *man/spiders.Rd
+c542af7c96e45067fd97f43574d48da6 *man/split.hyperframe.Rd
+c46240165ce8970f5b8d1cf27ade18e1 *man/split.im.Rd
+a85f30741ee936b330a7aba10aa312d9 *man/split.msr.Rd
+bea1ff73e6daf2875a0220816856d99a *man/split.ppp.Rd
+d6988b958f39b67bdfd3579061ec50c5 *man/split.ppx.Rd
+f8ca3f4632db9ba53e39edb98c39e95c *man/spokes.Rd
+e6c281657385e90333faa2cbca7781ed *man/sporophores.Rd
+a39ee1e6b3ae08547e77d445fd3fb117 *man/spruces.Rd
+4a8813dd800e5b74f847a19947e1d682 *man/square.Rd
+079af91858f6ac8d4e2dde7034740579 *man/ssf.Rd
+8d86f7821a1c4b1b4bbbb1b95279fa33 *man/stieltjes.Rd
+4badd1f6e4c47d32dadaac43970b5422 *man/stienen.Rd
+a6106ff5f15272f6e0bcd6920caf56c5 *man/stratrand.Rd
+f7843d1eb6cd970539806af30d1a561b *man/studpermu.test.Rd
+b6eb34e0f266961c3a67430ba21f1e62 *man/subfits.Rd
+623138d90e1dc24eba152d8c2b5928c2 *man/subset.hyperframe.Rd
+0affd4c192dbe497ed6458020faff409 *man/subset.ppp.Rd
+f1c7accea61aea9da256092d74cce2aa *man/subspaceDistance.Rd
+26cd85f0b3b557bd27cf21dc6f68a63a *man/suffstat.Rd
+6f40b323e1ce8a8774f8a5368bed3167 *man/summary.anylist.Rd
+d0fe66866ca1d4901ad822a22de28094 *man/summary.im.Rd
+beed1c5e94dfbb7907d0803c88b879a0 *man/summary.kppm.Rd
+48df7eebf9876aa61c2a0b5271fac5d9 *man/summary.listof.Rd
+4892b549bbe36df282b1b29bb478858f *man/summary.owin.Rd
+db0406db36fe20690fbc5ac295ce28d1 *man/summary.ppm.Rd
+3539853c347f338355fd6a84a8f493e2 *man/summary.ppp.Rd
+f3e0a6f7d1ecd0c771e02c3ecf8f2bf9 *man/summary.psp.Rd
+7a1165051c8ab100aab5b7f05d4dd02e *man/summary.quad.Rd
+935671509f14b24888c6baa8152b53b7 *man/summary.solist.Rd
+38e24aa9efb53685da86c344a2159827 *man/summary.splitppp.Rd
+0c537cfad89f4f0caa8739f96dc44977 *man/sumouter.Rd
+4e61d58afb5646fa59a4f0daf6cfadec *man/superimpose.Rd
+9555c94642212f7cfbb22fe921eab696 *man/superimpose.lpp.Rd
+95ea22ab4416813de626a03925cef713 *man/swedishpines.Rd
+09a3c8bb94a975eb23a398d8123bc4f0 *man/symbolmap.Rd
+e10b29a48f2f3fad126dc0e911d6f997 *man/tess.Rd
+44e4516ec3b5e2d588d381b7ac48697e *man/texturemap.Rd
+2c95ed0887fe249bd0c7f20649fc29d8 *man/textureplot.Rd
+99e7580b6a1f46033c1e2d67611a671a *man/thinNetwork.Rd
+85668ad3685e742f5f540a3269406d5d *man/thomas.estK.Rd
+5083b5bec798fe2f17b0c8986d8ac24c *man/thomas.estpcf.Rd
+2f381662f92656dc153405817e035cc8 *man/tile.areas.Rd
+1859aa0f8ece255a91fcbb2a689a211f *man/tileindex.Rd
+f15cbd559cc22c0cbca8d85ce131c8c6 *man/tilenames.Rd
+1e0468de33d16a5faf3848ec1f6a6693 *man/tiles.Rd
+fd49521e7c21490bf42ec11b02aca531 *man/tiles.empty.Rd
+ede1768dec67583e60706b8e226d5481 *man/timeTaken.Rd
+992911899fba6ca5fc6439da3cf51805 *man/timed.Rd
+f05325c96c5a7f9e85e8e420af4d9726 *man/transect.im.Rd
+33855ed0e811bb2984cdf15156ca0a21 *man/transmat.Rd
+ac1d70b547af8d8efc12a3d4c28ee0ed *man/treebranchlabels.Rd
+a76fcd8c05b143c22a30edb3248e45a9 *man/treeprune.Rd
+fc56759d36af58ff75ffddb35ed1fca5 *man/triangulate.owin.Rd
+df2c4468d4c21b91dca2b6b89cf20bd9 *man/trim.rectangle.Rd
+b64a871fdee9d5938466314f3b4e4a11 *man/triplet.family.Rd
+487c7d402407cfc551069cd97000c0e9 *man/tweak.colourmap.Rd
+9363f374d1d9638193af018b2b9b956b *man/union.quad.Rd
+372e71d05d84621b1d50af8470af914f *man/unique.ppp.Rd
+570b6a76b685cd738eadb3715c3251c5 *man/unitname.Rd
+61992a11d7919419002356813917f96b *man/unmark.Rd
+898e839b9ce21c5f019b69814bd91220 *man/unnormdensity.Rd
+d11a2ad5dd1042b6caff2a7aac7aa935 *man/unstack.msr.Rd
+ce187617e16d3d669f8f8462ffbc0aa2 *man/unstack.ppp.Rd
+d97b7f4286814cf89ce5f06a76409744 *man/update.detpointprocfamily.Rd
+7e613050b5075767ca9d48d7070dd386 *man/update.interact.Rd
+5f73a555da54aa666f7e9d8f39d3f412 *man/update.kppm.Rd
+98ce0cb16c8a4fe6980b9265881b58ea *man/update.ppm.Rd
+70f976c07e44c9fe6bf41b9d55d326cc *man/update.rmhcontrol.Rd
+8ca5be2ace0c48113afbaf2822674a55 *man/update.symbolmap.Rd
+d817ae87042f9230ae0105e82545c00f *man/urkiola.Rd
+47bd28833a40a74899d25734078640d6 *man/valid.Rd
+9449cb5c1fec24621c998a506be0eac2 *man/valid.detpointprocfamily.Rd
+1ed9f6e59dad62161fc6867d14156a24 *man/valid.ppm.Rd
+9b9f643ceb5ba73627013d63dd7515d6 *man/varblock.Rd
+75ce8887852822a4d08a9e44076c5976 *man/varcount.Rd
+dfa61aa27f9908e772b4dbfc8db2d708 *man/vargamma.estK.Rd
+20bdec51627e17637f8e487de060250e *man/vargamma.estpcf.Rd
+60e741ac8e8fafd23cba745a70bc5ec0 *man/vcov.kppm.Rd
+f85824c3c9ec3a4c31f04be59552caf7 *man/vcov.mppm.Rd
+b3a372e669c2c9bb08716b065974bf18 *man/vcov.ppm.Rd
+eb7578d51b7ad9f21067d0bbba362167 *man/vcov.slrm.Rd
+bc6d0d00510542ec7b835bf5fc94fbd1 *man/vertices.Rd
+f1d769547f7ecdd010247c5dd3d9fcac *man/vesicles.Rd
+1d850409bd4f915052fa4be2b1e62ae1 *man/volume.Rd
+8fa8c74acb0f8f829fab98c15b03647d *man/waka.Rd
+ff892aa005e53e07897278bebdf11f87 *man/waterstriders.Rd
+b3fe75f22e0494e4484532f8b079a50d *man/weighted.median.Rd
+dd00a09e89c41f24564ceab5dba86724 *man/where.max.Rd
+03f030dc0305af42897046f755430da8 *man/whichhalfplane.Rd
+cd1d44aff4d46566233ded55e833a25e *man/whist.Rd
+513778fbca80df00f2ea2b710263fe3c *man/will.expand.Rd
+f42290be6d3a75590b290967ad76c483 *man/with.fv.Rd
+b135bf634a66784497a9cb068415a686 *man/with.hyperframe.Rd
+bcdfdae48e2a3d9d36fc53fa185801c4 *man/with.msr.Rd
+d02099194c00a636349b66309fed886e *man/with.ssf.Rd
+be681008100cebc209f8f1b6e89c65e5 *man/yardstick.Rd
+33376b40fd1dfd0f3fad8c6ec146fcd4 *man/zapsmall.im.Rd
+46bdd584bf584fb298bfe431934e36cd *man/zclustermodel.Rd
+d61686fa57dc89a776b964008f2ab788 *src/Ediggatsti.c
+30c67666b16108cd865c83c4a9136d1f *src/Ediggra.c
+4afd831630f91c00e28b7d0ac10f0318 *src/Efiksel.c
+2eb027adf82b6e2fbee693030507ea46 *src/Egeyer.c
+e7911c3e7d963bdd0ecc494457ccaa43 *src/Estrauss.c
+183f2fb6304e391e54ab0ed6c6928237 *src/Kborder.c
+f5ddb0253e3280cc428e534f1c3b76a6 *src/Kborder.h
+83589fde9faccd8b888bce02b02fddfc *src/Knone.c
+c394c76e3bf450475cc8cc5789b8ebf5 *src/Knone.h
+5d190c3c4bf0a137fd9ef0d9918b5c5d *src/Krect.c
+707949facbb1443a42c08d275604ce41 *src/KrectBody.h
+30d4b06e1c13087513d74d41528b75e4 *src/KrectFunDec.h
+f7ad99f0f44158cc2c9100e38cc1b185 *src/KrectIncrem.h
+08a4f417aed6959d94da3abc8bc55d0f *src/KrectV1.h
+92268e0af4222764daf4b106a12bbadc *src/KrectV2.h
+263f2296e12aee28f73dff92cef5dd61 *src/KrectV3.h
+4ab4852a66f7a56b52e999f326f179c2 *src/KrectV4.h
+34e856d0118899ad881a1b784cbbf13e *src/Perfect.cc
+c0ac68682e8c7e26ce52b8c76fd3d4ab *src/PerfectDGS.h
+3a040c136a2b157e7854524d2c5c9c11 *src/PerfectDiggleGratton.h
+5bc6a1478093ba8401ef8ff47d939298 *src/PerfectHardcore.h
+af1babec03bedabd8cdd2ccb694352f4 *src/PerfectPenttinen.h
+786067ae57897a4392667fa2acab7595 *src/PerfectStrauss.h
+ec004bfa0111b9b3f3c55e7ede7fb4a4 *src/PerfectStraussHard.h
+051ca2afe6d6e5904ebba115bdcbf8e4 *src/areadiff.c
+6ba6b3e1b37ebe8ac0680c5e071188f6 *src/areaint.c
+9255d4a6902de2807401c997cd223174 *src/areapair.c
+15c96da5753675c30125760d4cd874a7 *src/auctionbf.c
+89cad006e13a81a4b793d89b2b3bb7cf *src/badgey.c
+af433219da832d098301a5bb0321c3f9 *src/bdrymask.c
+27c73a0901b9d346ba84974f7481ff3c *src/call3d.c
+3c6ee73e77c45154372fe8dbb893902f *src/chunkloop.h
+55666e0e32f21396ec026be9f10432f4 *src/close3pair.c
+3026aae4bccf4129d9571ece001f2d47 *src/closefuns.h
+347f9c6df447899f91d2ac76f0388a5d *src/closepair.c
+72e219e7bf86c2a0eba609edb6be5aeb *src/connect.c
+e41c08db1ce0cf5deab867112738c6ac *src/constants.h
+7436c08a79ce611f48ebac47564a24b1 *src/corrections.c
+635ab7b038a7cd6f11e4410584f3af0d *src/crossloop.h
+d758abce63375967e345d4da2fe6511d *src/denspt.c
+787ec21b1dbf8298ca482a2751cfc2cd *src/densptcross.c
+fd5c0ecd545b4d9e50d0d4e1e202beb3 *src/dgs.c
+d7d12f4878e93a3932efc744f93aaa35 *src/digber.c
+57fcffca69c7d7deb5937acbb6955dad *src/diggra.c
+9d438b3137f879ab21f85378bba410a6 *src/dinfty.c
+7e1e7445f9d0faeac100e98764c73391 *src/discarea.c
+d045f6d3df16aef7f02d55321072f53a *src/discs.c
+69884b5b42f142e6d0e863b1eafc3ea8 *src/dist2.c
+e36dd81e7d3589ea68b5d51e232d2139 *src/dist2.h
+30e6782eea1d200aeb5af29554e86e99 *src/dist2dpath.c
+a1809fc2566fabacd87f85780fed4431 *src/dist2dpath.h
+3bbbb08f1bd01cd2632e7f68329e34a8 *src/distan3.c
+fef8cbc507da19739ace9ba4b4530f7d *src/distances.c
+7114501c777e2bf2f5cf36bcfb13494b *src/distmapbin.c
+7028f09de82307e1e46de69c104b7423 *src/dwpure.c
+e73182d2331b6f41c46a793bf8606e88 *src/exactPdist.c
+60642750dc3fb722bc05b35a2237f5ca *src/exactdist.c
+79bdd83f960746df0bef5cea8bbaaead *src/f3.c
+1df01cb8fdb284bf568e84b097777f9e *src/fardist.c
+392c9edaa96b756912bf59450dd72ebd *src/fardist.h
+ab7588df53688ba2bb383afaaa58d0d7 *src/fexitc.c
+9ad3159a4625df6b45245dedba8b816d *src/fiksel.c
+0ba81075007b6ab741f3eda829da8e99 *src/functable.h
+f2bb0d35bc86962384f01c6959409981 *src/g3.c
+3280a084e3cdcb931801c42fcd111d2e *src/geom3.h
+5e13151e750d3fedb93005afc8c67954 *src/getcif.c
+c4d587523e2d2e58615eb0d2084a2167 *src/geyer.c
+3228576b7ca41179fe5a99fd0a4d4001 *src/hardcore.c
+f64c32ad80b0c295009994ffb7299670 *src/hasclose.c
+4b4909519c2235d56140ed94a66374b8 *src/hasclose.h
+50f80d5b4ec82eb0b7e86bbf5a98c1d0 *src/idw.c
+d474816720bcff0f69293c8861dfa492 *src/init.c
+9c79e8972c24446466e9dcb30ad82217 *src/k3.c
+04fa575485349dece8695de21e011188 *src/knn3Ddist.h
+493c3e5d2f83c09bf713b173a9e6658a *src/knn3DdistX.h
+f129ad504bf8946cf6a755c7f04fe253 *src/knnXdist.h
+07039b406a87967282c97eb2bfd60707 *src/knndist.h
+7ecc9842efabd93bc452bbcf485dbbb8 *src/knndistance.c
+55295547fd74e0cdd004ba3547e181e2 *src/knngrid.c
+d2225a652d72aa868364c0cbaedc4a68 *src/knngrid.h
+5ca88ac5e99e094f0b91183500a4f433 *src/lennard.c
+a81dc0ace22bc97932e31ddbb1e7b95f *src/linSnncross.c
+4dc17cc3b593bbc0c362ce52ec762afb *src/linSnncross.h
+82ee6cc3ed8a206c2a61fa929efc078e *src/linalg.c
+0a65286b5bc50c1afd304fbc390db156 *src/lincrossdist.c
+899d81e77cf5c2150a8bbe1e735bca12 *src/lineardisc.c
+a1abe981338133807bc3c97f98f06bb1 *src/linearradius.c
+e816be5e76902429899caba859ad35a9 *src/linequad.c
+b6334a6d4f756bef8eabaf5188201630 *src/linequad.h
+8633ccb9f797ad3d142543da4ac94e94 *src/linknnd.c
+cc1a021dae521e4b69f5572a72d494b5 *src/linknnd.h
+bb011958a4675e5acc2fcd32718c3dde *src/linnncross.c
+76444dbcb11ebd259c5b9a0cd96448a3 *src/linnncross.h
+82b4b8dad34974380b777eb5c38f4c07 *src/linnndist.c
+79080eb00c6ea7482a81cf056d83c0a5 *src/linpairdist.c
+4e28ba1603b36e8900ed9ae2a5468f32 *src/linvdist.c
+7b18944df45bde5b578c54c204005349 *src/linvdist.h
+d66cc3f9557baec5a39fdd7c405719ed *src/linvknndist.c
+e717a4283c2f753738a147d6f5d27b92 *src/lixel.c
+ea49927ad721530df8bc059694b46df9 *src/localpcf.c
+cc9a75e32ca0e80ff4e45b9721888faa *src/localpcf.h
+65afd0160cdcbe16b1905b34878c685e *src/loccum.c
+af8d07773e8fff1a7b1eee6cbe26d45d *src/loccums.h
+576eb8024f1b70c876b3f81359ec7f97 *src/loccumx.h
+ec8a6f16dafb28572ff44a370cb1ab02 *src/lookup.c
+d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h
+458aaf8343056834e27096e20cfb4a98 *src/massdisthack.c
+a9c90573cb2c76ea5a936e8c6d8f53d5 *src/maxnnd.h
+9e0b28ecd67dd085ab596f3ae9fafa50 *src/methas.c
+69d57274cda1c955d631a7c241cb9a00 *src/methas.h
+20c0be6b768369562ef2ce594041f394 *src/mhloop.h
+4bfdc5f406a612060458999e9319bbbc *src/mhsnoop.c
+3aec5d482a96acefc8bcf9ccef064f57 *src/mhsnoop.h
+dbcb22b795dda5203ac34fc61e930365 *src/mhsnoopdef.h
+19ca30742351e422fac32fe27613a504 *src/mhv1.h
+9729a578722aa471c9330a01af5a5c09 *src/mhv2.h
+3d9d655e94d771cbf55ffdfbb1124492 *src/mhv3.h
+4bca34bd89e9bcb606838b28f3ea8eaf *src/mhv4.h
+3c46198668482eb6d0fa28ab76a0abdf *src/mhv5.h
+302994d092504354cf1ffb8ccdbcf892 *src/minnnd.c
+2b6fcc4df2171c2f5924bc9ea120e076 *src/minnnd.h
+dc4453e1a8317eab70e9710371c384d2 *src/multihard.c
+bbf9e1d275d180289b9155f04db9de6b *src/nn3Ddist.c
+ee5ed316bb3f302e25ab36eab25078fe *src/nn3Ddist.h
+609029dcaa0bbcf85efbe6f330d1ddce *src/nn3DdistX.h
+e56ce2952ae715addc650117b828caa3 *src/nnMDdist.c
+77417067aa7539794fef337365495dff *src/nndist.h
+af1ef3af29ac5dc30a190234e9b28e0b *src/nndistX.h
+5deb863d2ce3f7706c0c6c35244017ff *src/nndistance.c
+93dff60f269800a42b3dc330294e8c97 *src/nngrid.c
+74149ebdd825d1d392ce4636d9e5fc7e *src/nngrid.h
+4e3eeae474afde5606e63d6e9e49a508 *src/pairloop.h
+9d1981b78382e7e368e2cf9cee262342 *src/pcf3.c
+887daec80901782cc831ba2dbcd5b3be *src/penttinen.c
+0f0d9de8e74630e340e60769b8dce84f *src/poly2im.c
+1dab0cc97b4c4b3ea4825a2b8cf8be86 *src/proto.h
+dc7d8f0ee5ffe7f9397c25b648d93c1e *src/quasirandom.c
+a387ad5b47dd254334ec4bdf510e7b35 *src/raster.h
+7de5b856327c5b46ba0b02afdfc7a179 *src/rthin.c
+cdda9b160cf3edae4c6cadbce7bad53f *src/scan.c
+abe76267dd3491976da91e9c6c4c9d6f *src/seg2pix.c
+5fdaae31c5336c9f4f4ca799c7faf722 *src/seg2pix.h
+c85b38af977450a3adb73c3e9d168574 *src/segdens.c
+3a5e04ac4ad9fc0efd10ef39dc55f041 *src/sftcr.c
+d6299bd9553d4ecbc7b84482f4c0dcd3 *src/sparselinalg.c
+1f5554a9241e29019ef254f6781aff22 *src/spasumsymout.h
+616cfb8ef04f625dd3395fb5e6a38f14 *src/sphefrac.c
+7877dac5d832e95257442e8b7fa8f02b *src/sphevol.c
+18b99b034669b76b4b9ccaef945200f0 *src/straush.c
+e072e3a74914a74af746481c3a3b8b3b *src/straushm.c
+28d7ac41aaef4367e9d57b020ed5fb3c *src/strauss.c
+0cf60fa5405e4b7f31cde35a0d390351 *src/straussm.c
+2143b5d2f472c4190dea1114a8fef54a *src/sumsymouter.h
+7e5839c8b2ed13edc4206ffb94d3862a *src/trigraf.c
+03e65a27588194512db2649bec6e5277 *src/triplets.c
+94d4b6605e4a2c5271dd567ea7648fd0 *src/veegraf.c
+04de34ab52bb76737583dd68367bb10a *src/whist.c
+41552329d886ee7870caddcf0f580243 *src/xyseg.c
+5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h
+5662cc3feeb8b25a9e413547d82f4074 *tests/badwindow.txt
+c1624129ac630ef4653e3119d70ffa5b *tests/selfcross.txt
+9aed2f34d444c81eca7c865bf3bc6661 *tests/testsAtoF.R
+c118a4f58a3607a2865b92e07342715d *tests/testsGtoK.R
+079b444d735bec083c8513311c5d561b *tests/testsLtoM.R
+5cc2ab3c075b448f3c2f7758013eefa6 *tests/testsNtoP.R
+de6343ad4989615bf62cc92fb3807039 *tests/testsQtoR.R
+ebebd7e7aee6f456169e9f416accbca7 *tests/testsStoZ.R
+3c3e82583903858cc663c50624f075ee *vignettes/datasets.Rnw
+3cc6b729932901e25778c21cb679eab0 *vignettes/getstart.Rnw
+8cda84badf5153c61e2b6b2e7bf14322 *vignettes/hexagon.eps
+28c409e6cfde065a32cdc922787086ec *vignettes/hexagon.pdf
+5d818e3b6c4cc36b55b35289c3282394 *vignettes/irregpoly.eps
+1dd34a3acaa93d24bf0388fa83caf892 *vignettes/irregpoly.pdf
+f420eafa6b29f360b72269c91c485167 *vignettes/replicated.Rnw
+593a297bf4451016def2e174ea62b099 *vignettes/shapefiles.Rnw
+2dab51568addf8e96262c4fd3322571e *vignettes/updates.Rnw
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..685ade2
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,3734 @@
+# spatstat NAMESPACE file
+
+import(stats,graphics,grDevices,utils,methods)
+import(spatstat.utils)
+import(polyclip,goftest)
+import(Matrix,nlme,rpart)
+importFrom(deldir,
+           deldir,duplicatedxy,tile.list)
+importFrom(abind,abind)
+importFrom(tensor,tensor)
+importFrom(mgcv,
+           gam,gam.control,anova.gam,formula.gam,predict.gam,
+           print.gam,summary.gam,vcov.gam,s)
+
+# ....  load dynamic library .....
+#      (native routines are now registered in init.c)
+useDynLib(spatstat, .registration=TRUE)
+
+# Do not edit the following.
+# It is generated automatically.
+
+
+
+
+# .................................................. 
+#  load dynamic library
+#      (native routines are now registered in init.c)
+# .................................................. 
+
+useDynLib(spatstat, .registration=TRUE)
+
+# .................................................. 
+# Automatically-generated list of documented objects 
+# .................................................. 
+export("acedist.noshow")
+export("acedist.show")
+export("active.interactions")
+export("adaptcoef")
+export("adaptive.density")
+export("add.texture")
+export("addvar")
+export("adjust.ratfv")
+export("affine")
+export("affine.im") 
+export("affine.layered")
+export("affine.linim")
+export("affine.linnet") 
+export("affine.lpp") 
+export("affine.owin") 
+export("affine.ppp") 
+export("affine.psp")
+export("affine.tess") 
+export("affinexy")
+export("affinexypolygon")
+export("AIC.dppm")
+export("AIC.kppm")
+export("AIC.mppm")
+export("AIC.ppm")
+export("allElementsIdentical")
+export("allstats")
+export("alltypes")
+export("ang2rad")
+export("angles.psp")
+export("anova.lppm")
+export("anova.mppm")
+export("anova.ppm")
+export("anova.slrm")
+export("anycrossing.psp")
+export("anyDuplicated.ppp")
+export("anyDuplicated.ppx")
+export("anylapply")
+export("[<-.anylist")
+export("[.anylist")
+export("anylist")
+export("anyNA.im")
+export("anyNA.sparse3Darray")
+export("aperm.sparse3Darray")
+export("append.psp")
+export("ApplyConnected")
+export("applynbd")
+export("applyPolyclipArgs")
+export("applySparseEntries")
+export("apply.ssf")
+export("applytolayers")
+export("area")
+export("area.default")
+export("areadelta2")       
+export("areaGain")
+export("areaGain.diri")       
+export("areaGain.grid")       
+export("AreaInter")
+export("areaLoss")
+export("areaLoss.diri")       
+export("areaLoss.grid")
+export("area.owin")
+export("as.anylist")
+export("as.array.im")
+export("as.array.sparse3Darray")
+export("as.box3")
+export("as.boxx")
+export("as.breakpts")
+export("as.character.units")
+export("as.data.frame.bw.optim")
+export("as.data.frame.envelope")
+export("as.data.frame.fv")
+export("as.data.frame.hyperframe")
+export("as.data.frame.im")
+export("as.data.frame.linfun")
+export("as.data.frame.linim")
+export("as.data.frame.owin")
+export("as.data.frame.ppp")
+export("as.data.frame.ppx")
+export("as.data.frame.psp")
+export("as.data.frame.tess")
+export("as.double.im")
+export("as.function.fv")
+export("as.function.im")
+export("as.function.leverage.ppm")
+export("as.function.linfun")
+export("as.function.owin")
+export("as.function.rhohat")
+export("as.function.ssf")
+export("as.function.tess")
+export("as.fv")
+export("as.fv.bw.optim")
+export("as.fv.data.frame")
+export("as.fv.dppm")
+export("as.fv.fasp")
+export("as.fv.fv")
+export("as.fv.kppm")
+export("as.fv.matrix")
+export("as.fv.minconfit")
+export("as.hyperframe")
+export("as.hyperframe.anylist")
+export("as.hyperframe.data.frame")
+export("as.hyperframe.default")
+export("as.hyperframe.hyperframe")
+export("as.hyperframe.listof")
+export("as.hyperframe.ppx")
+export("as.im")
+export("as.im.data.frame")
+export("as.im.default")
+export("as.im.distfun")
+export("as.im.function")
+export("as.im.funxy")
+export("as.im.im")
+export("as.im.leverage.ppm")
+export("as.im.linim")
+export("as.imlist")
+export("as.im.matrix")
+export("as.im.nnfun")
+export("as.im.owin")
+export("as.im.ppp")
+export("as.im.scan.test")
+export("as.im.Smoothfun")
+export("as.im.ssf")
+export("as.im.tess")
+export("as.interact")
+export("as.interact.fii")
+export("as.interact.interact")
+export("as.interact.ppm")
+export("as.layered")
+export("as.layered.default")
+export("as.layered.listof")
+export("as.layered.msr")
+export("as.layered.ppp")
+export("as.layered.solist")
+export("as.layered.splitppp")
+export("as.linfun")
+export("as.linfun.linfun")
+export("as.linfun.linim")
+export("as.linfun.lintess")
+export("as.linim")
+export("as.linim.default")
+export("as.linim.linfun")
+export("as.linim.linim")
+export("as.linnet")
+export("as.linnet.linfun")
+export("as.linnet.linim")
+export("as.linnet.linnet")
+export("as.linnet.lintess")
+export("as.linnet.lpp")
+export("as.linnet.lppm")
+export("as.linnet.psp")
+export("as.list.hyperframe")
+export("as.listof")
+export("as.lpp")
+export("as.mask")
+export("as.mask.psp")
+export("as.matrix.im")
+export("as.matrix.owin")
+export("as.matrix.ppx")
+export("as.owin")
+export("as.owin.boxx")
+export("as.owin.data.frame")
+export("as.owin.default")
+export("as.owin.distfun")
+export("as.owin.dppm")
+export("as.owin.funxy")
+export("as.owin.im")
+export("as.owin.influence.ppm")
+export("as.owin.kppm")
+export("as.owin.layered")
+export("as.owin.leverage.ppm")
+export("as.owin.linfun")
+export("as.owin.linnet")
+export("as.owin.lintess")
+export("as.owin.lpp")
+export("as.owin.lppm")
+export("as.owin.msr")
+export("as.owin.nnfun")
+export("as.owin.owin")
+export("as.owin.ppm")
+export("as.owin.ppp")
+export("as.owin.psp")
+export("as.owin.quad")
+export("as.owin.quadratcount")
+export("as.owin.quadrattest")
+export("as.owin.rmhmodel")
+export("as.owin.tess")
+export("as.polygonal")
+export("as.ppm")
+export("as.ppm.dppm")
+export("as.ppm.kppm")
+export("as.ppm.lppm")
+export("as.ppm.ppm")
+export("as.ppm.profilepl")
+export("as.ppm.rppm")
+export("as.ppp")
+export("as.ppp.data.frame")
+export("as.ppp.default")
+export("as.ppp.influence.ppm")
+export("as.ppplist")
+export("as.ppp.lpp")
+export("as.ppp.matrix")
+export("as.ppp.ppp")
+export("as.ppp.psp")
+export("as.ppp.quad")
+export("as.ppp.ssf")
+export("as.psp")
+export("as.psp.data.frame")
+export("as.psp.default")
+export("as.psp.linnet")
+export("as.psp.lpp")
+export("as.psp.matrix")
+export("as.psp.owin")
+export("as.psp.psp")
+export("as.rectangle")
+export("assemble.plot.objects")       
+export("as.solist")
+export("as.sparse3Darray")
+export("as.tess")
+export("as.tess.im")
+export("as.tess.list")
+export("as.tess.owin")
+export("as.tess.quadratcount")
+export("as.tess.quadrattest")
+export("as.tess.tess")
+export("as.units")
+export("AsymmDistance.psp")
+export("auc")
+export("auc.kppm")
+export("auc.lpp")
+export("auc.lppm")
+export("auc.ppm")
+export("auc.ppp")
+export("augment.msr")
+export("BadGey")
+export("BartCalc")
+export("bbEngine")
+export("bc")
+export("bc.ppm")
+export("bdist.pixels")
+export("bdist.points")
+export("bdist.tiles")
+export("bdry.mask")
+export("beachcolourmap")
+export("beachcolours")
+export("beginner")
+export("begins")
+export("berman.test")
+export("bermantest")
+export("bermantestEngine")
+export("berman.test.lpp")
+export("bermantest.lpp")
+export("berman.test.lppm")
+export("bermantest.lppm")
+export("berman.test.ppm")
+export("bermantest.ppm")
+export("berman.test.ppp")
+export("bermantest.ppp")
+export("bilinearform")
+export("bind.fv")
+export("bind.ratfv")
+export("bind.sparse3Darray")
+export("bits.test")
+export("blankcoefnames")
+export("blur")
+export("border")
+export("bounding.box")
+export("boundingbox")
+export("bounding.box3")
+export("boundingbox.default")
+export("boundingbox.im")
+export("boundingbox.owin")
+export("boundingbox.ppp")
+export("boundingbox.solist")
+export("bounding.box.xy")
+export("boundingcentre")
+export("boundingcentre.owin")
+export("boundingcentre.ppp")
+export("boundingcircle")
+export("boundingcircle.owin")
+export("boundingcircle.ppp")
+export("boundingradius")
+export("boundingradius.linnet")
+export("boundingradius.owin")
+export("boundingradius.ppp")
+export("box3")
+export("boxx")
+export("branchlabelfun")
+export("break.holes")
+export("breakpts")
+export("breakpts.from.r")
+export("bt.frame")
+export("bugfixes")
+export("bw.diggle")
+export("bw.frac")
+export("bw.optim")
+export("bw.pcf")
+export("bw.ppl")
+export("bw.relrisk")
+export("bw.scott")
+export("bw.smoothppp")
+export("bw.stoyan")
+export("by.im")
+export("by.ppp")
+export("calc.DR")
+export("calc.NNIR")
+export("calc.SAVE")
+export("calc.SIR")
+export("calc.TSE")
+export("cannot.update")
+export("cartesian")
+export("cauchy.estK")
+export("cauchy.estpcf")
+export("cbind.fv")
+export("cbind.hyperframe")
+export("CDF")
+export("CDF.density")
+export("cdf.test")
+export("cdf.test.lpp")
+export("cdf.test.lppm")
+export("cdf.test.mppm")
+export("cdf.test.ppm")
+export("cdf.test.ppp")
+export("cdf.test.slrm")
+export("cellmiddles")
+export("censtimeCDFest")
+export("centroid.owin")
+export("change.default.expand")          
+export("checkbigmatrix")          
+export("checkfields")          
+export("check.finespacing")
+export("check.hist.lengths")
+export("check.mat.mul")
+export("checksolve")          
+export("check.testfun")
+export("chop.tess")
+export("circdensity")
+export("circticks")
+export("circumradius")
+export("circumradius.linnet")
+export("circumradius.owin")
+export("circumradius.ppp")
+export("clarkevans")
+export("clarkevansCalc")
+export("clarkevans.test")
+export("clear.simplepanel")
+export("clf.test")
+export("clickbox")
+export("clickdist")
+export("clickjoin")
+export("clicklpp")
+export("clickpoly")
+export("clickppp")
+export("clip.infline")
+export("clippoly.psp")
+export("clip.psp")
+export("cliprect.psp")
+export("closepaircounts")
+export("closepairs")
+export("closepairs.pp3")
+export("closepairs.ppp")
+export("closethresh")
+export("closetriples")
+export("closing")
+export("closing.owin")
+export("closing.ppp")
+export("closing.psp")
+export("clusterfield")
+export("clusterfield.character")
+export("clusterfield.function")
+export("clusterfield.kppm")
+export("clusterfit")
+export("clusterkernel")
+export("clusterkernel.character")
+export("clusterkernel.kppm")
+export("clusterradius")
+export("clusterradius.character")
+export("clusterradius.kppm")
+export("clusterset")
+export("cobble.xy")
+export("codetime")
+export("coef.dppm")
+export("coef.fii")
+export("coef.kppm")
+export("coef.lppm")
+export("coef.mppm")
+export("coef.ppm")
+export("coef.slrm")
+export("coef.summary.fii")
+export("coef.summary.kppm")
+export("coef.summary.ppm")
+export("coef.vblogit") 
+export("coerce.marks.numeric")
+export("col2hex")
+export("col.args.to.grey")
+export("collapse.anylist")
+export("collapse.fv")
+export("colourmap")
+export("colouroutputs<-")
+export("colouroutputs")
+export("commonGrid")
+export("commonPolyclipArgs")
+export("compareFit")
+export("compatible")
+export("compatible.fasp")
+export("compatible.fv")
+export("compatible.im")
+export("compatible.rat")
+export("compatible.units")
+export("compileCDF")
+export("compileK")
+export("compilepcf")
+export("complementarycolour")
+export("complement.owin")
+export("Complex.im")
+export("Complex.imlist")
+export("Complex.linim")
+export("concatxy")
+export("Concom")
+export("conform.imagelist")
+export("conform.ratfv")
+export("connected")
+export("connected.im")
+export("connected.linnet")
+export("connected.lpp")
+export("connected.owin")
+export("connected.ppp")
+export("conspire")
+export("contour.funxy")
+export("contour.im")
+export("contour.imlist")
+export("contour.listof")
+export("contour.objsurf")
+export("contour.ssf")
+export("convexhull")
+export("convexhull.xy")
+export("convexify")
+export("convolve.im")
+export("coords<-")
+export("coords")
+export("coords<-.ppp")
+export("coords.ppp")
+export("coords<-.ppx")
+export("coords.ppx")
+export("copyExampleFiles")
+export("corners")
+export("countends")
+export("countingweights")
+export("covering")
+export("CressieReadName")
+export("CressieReadStatistic")
+export("CressieReadSymbol")
+export("crossdist")
+export("crossdist.default")
+export("crossdist.lpp")
+export("crossdist.pp3")
+export("crossdist.ppp")
+export("crossdist.ppx")
+export("crossdist.psp")
+export("crossing.linnet")
+export("crossing.psp")
+export("crosspaircounts")
+export("crosspairquad")
+export("crosspairs")
+export("crosspairs.pp3")
+export("crosspairs.ppp")
+export("cut.im")
+export("cut.lpp")
+export("cut.ppp")
+export("CVforPCF")
+export("damaged.ppm")
+export("datagen.rpoisppOnLines")
+export("datagen.runifpointOnLines")
+export("datagen.runifpoisppOnLines")
+export("data.mppm")
+export("data.ppm")
+export("dclf.progress")
+export("dclf.sigtrace")
+export("dclf.test")
+export("default.clipwindow")
+export("default.dummy")
+export("default.expand")
+export("default.linnet.tolerance")
+export("default.ntile")
+export("default.n.tiling")
+export("default.rmhcontrol")
+export("delaunay")
+export("delaunay.distance")
+export("delaunayDistance")
+export("delaunay.network")
+export("delaunayNetwork")
+export("deletebranch")
+export("deletebranch.linnet")
+export("deletebranch.lpp")
+export("deltametric")
+export("deltasuffstat")
+export("densitycrossEngine")
+export("density.lpp")
+export("densitypointsEngine")
+export("density.ppp")
+export("density.ppplist")
+export("density.psp")
+export("density.splitppp")
+export("density.splitppx")
+export("deriv.fv")
+export("detpointprocfamilyfun")
+export("deviance.lppm")
+export("deviance.ppm")
+export("Deviation")
+export("dfbetas.ppm")
+export("dflt.redraw")
+export("dg.envelope")
+export("dg.progress")
+export("dg.sigtrace")
+export("dg.test")
+export("diagnose.ppm")
+export("diagnose.ppm.engine")
+export("[.diagramobj")
+export("diagramobj")
+export("diameter")
+export("diameter.box3")
+export("diameter.boxx")
+export("diameter.linnet")
+export("diameter.owin")
+export("digestCovariates")
+export("DiggleGatesStibbard")
+export("DiggleGratton")
+export("digital.volume")
+export("dilated.areas")
+export("dilate.owin")
+export("dilation")
+export("dilationAny")
+export("dilation.owin")
+export("dilation.ppp")
+export("dilation.psp")
+export("dim.detpointprocfamily")
+export("dim.fasp")               
+export("dimhat")
+export("dim.hyperframe")               
+export("dim.im")               
+export("dim.msr")
+export("dimnames<-.fasp")
+export("dimnames.fasp")               
+export("dimnames.msr")
+export("dimnames<-.sparse3Darray")
+export("dimnames.sparse3Darray")
+export("dim.owin")
+export("dim<-.sparse3Darray")
+export("dim.sparse3Darray")
+export("dirichlet")
+export("dirichletAreas")
+export("dirichlet.edges")
+export("dirichletEdges")
+export("dirichlet.network")
+export("dirichletNetwork")
+export("dirichlet.vertices")
+export("dirichletVertices")
+export("dirichlet.weights")
+export("dirichletWeights")
+export("disc")
+export("discpartarea")
+export("discretise")
+export("discs")
+export("dist2dpath")
+export("distcdf")
+export("distfun")
+export("distfun.lpp")
+export("distfun.owin")
+export("distfun.ppp")
+export("distfun.psp")
+export("distmap")
+export("distmap.owin")
+export("distmap.ppp")
+export("distmap.psp")
+export("distributecbind")
+export("divide.linnet")
+export("dkernel")
+export("dknn")
+export("dmixpois")
+export("do.as.im")
+export("do.call.plotfun")
+export("do.istat")
+export("domain")
+export("domain.distfun")
+export("domain.dppm")
+export("domain.funxy")
+export("domain.im")
+export("domain.im")
+export("domain.influence.ppm")
+export("domain.kppm")
+export("domain.layered")
+export("domain.leverage.ppm")
+export("domain.linfun")
+export("domain.lintess")
+export("domain.lpp")
+export("domain.lpp")
+export("domain.lppm")
+export("domain.msr")
+export("domain.nnfun")
+export("domain.pp3")
+export("domain.ppm")
+export("domain.ppp")
+export("domain.ppx")
+export("domain.psp")
+export("domain.quad")
+export("domain.quadratcount")
+export("domain.quadrattest")
+export("domain.rmhmodel")
+export("domain.tess")
+export("doMultiStraussHard")
+export("dppapproxkernel")
+export("dppapproxpcf")
+export("dppBessel")
+export("dppCauchy")
+export("dppeigen")
+export("dppGauss")
+export("dppkernel")
+export("dppm")
+export("dppMatern")
+export("dppmFixAlgorithm")
+export("dppmFixIntensity")
+export("dppparbounds")
+export("dppPowerExp")
+export("dppspecden")
+export("dppspecdenrange")
+export("dummify")
+export("dummy.ppm")
+export("duplicated.ppp")
+export("duplicated.ppx")
+export("edge.Ripley")
+export("edges")
+export("edges2triangles")
+export("edges2vees")
+export("edge.Trans")
+export("edit.hyperframe")
+export("edit.im")
+export("edit.ppp")
+export("edit.psp")
+export("eem")
+export("effectfun")
+export("ellipse")
+export("Emark")
+export("emend")
+export("emend.lppm")
+export("emend.ppm")
+export("emptywindow")
+export("endpoints.psp")
+export("EntriesToSparse")
+export("envelope")
+export("envelopeArray")
+export("envelopeEngine")
+export("envelope.envelope")
+export("envelope.hasenvelope")
+export("envelope.kppm")
+export("envelope.lpp")
+export("envelope.lppm")
+export("envelope.matrix")
+export("envelope.pp3")
+export("envelope.ppm")
+export("envelope.ppp")
+export("envelopeProgressData")
+export("envelopeTest")
+export("equalpairs")          
+export("equalpairs.quad")          
+export("equalsfun.quad")          
+export("equals.quad")          
+export("eroded.areas")
+export("eroded.volumes")
+export("eroded.volumes.box3")
+export("eroded.volumes.boxx")
+export("erodemask")
+export("erode.owin")
+export("erosion")
+export("erosionAny")
+export("erosion.owin")
+export("erosion.ppp")
+export("erosion.psp")
+export("evalCovar")
+export("evalCovariate")
+export("evalCovar.lppm")
+export("evalCovar.ppm")
+export("eval.fasp")
+export("eval.fv")
+export("eval.hyper")
+export("eval.im")
+export("evalInteraction")
+export("evalInterEngine")
+export("eval.linim")
+export("evalPairPotential")
+export("evalSparse3Dentrywise")
+export("evaluate2Dkernel")
+export("even.breaks.owin")
+export("ewcdf")
+export("exactdt")              
+export("exactMPLEstrauss")
+export("exactPdt")
+export("existsSpatstatVariable")
+export("expand.owin")
+export("expandSparse")
+export("expandSpecialLists")
+export("expandwinPerfect")
+export("ExpSmoothLog")
+export("extractAIC.dppm")
+export("extractAIC.kppm")
+export("extractAIC.lppm")
+export("extractAIC.mppm")
+export("extractAIC.ppm")
+export("extractAIC.slrm")
+export("extractAtomicQtests")
+export("extractbranch")
+export("extractbranch.linnet")
+export("extractbranch.lpp")
+export("f3Cengine")
+export("f3engine")
+export("F3est")
+export("fakemaintitle")
+export("family.vblogit") 
+export("fardist")
+export("fardist.owin")
+export("fardist.ppp")
+export("[.fasp")
+export("fasp")
+export("FDMKERNEL")
+export("Fest")
+export("fft2D")
+export("fftwAvailable")
+export("Fhazard")
+export("fii")
+export("Fiksel")
+export("fill.coefs")
+export("fillNA")
+export("findbestlegendpos")
+export("findcbind")
+export("findCovariate")
+export("Finhom")
+export("fitin")
+export("fitin.ppm")
+export("fitted.dppm")
+export("fitted.kppm")
+export("fitted.lppm")
+export("fitted.mppm")
+export("fitted.ppm")
+export("fitted.rppm")
+export("fitted.slrm")
+export("fixef.mppm")
+export("flatfname")
+export("flipxy")
+export("flipxy.im")
+export("flipxy.infline")
+export("flipxy.layered")
+export("flipxy.owin")
+export("flipxypolygon")
+export("flipxy.ppp")
+export("flipxy.psp")
+export("FmultiInhom")
+export("foo")
+export("forbid.logi")
+export("FormatFaspFormulae")
+export("format.numberwithunit")
+export("formula<-")
+export("formula.dppm")
+export("formula<-.fv")
+export("formula.fv")
+export("formula.kppm")
+export("formula.lppm")
+export("formula.ppm")
+export("formula.slrm")
+export("fourierbasis")
+export("Frame<-")
+export("Frame")
+export("framebottomleft")
+export("Frame.default")
+export("Frame<-.im")
+export("Frame<-.owin")
+export("Frame<-.ppp")
+export("fryplot")
+export("frypoints")
+export("funxy")
+export("[<-.fv")
+export("[.fv")
+export("$<-.fv")
+export("fv")
+export("fvexprmap")
+export("fvlabelmap")
+export("fvlabels<-")
+export("fvlabels")
+export("fvlegend")
+export("fvnames<-")
+export("fvnames")
+export("g3Cengine")
+export("g3engine")
+export("G3est")
+export("gauss.hermite")
+export("Gcom")
+export("Gcross")
+export("Gdot")
+export("Gest")
+export("getCall.mppm")
+export("getdataname")
+export("getfields")
+export("getglmdata")
+export("getglmfit")
+export("getglmsubset")
+export("getlambda.lpp")
+export("getlastshift")
+export("getppmdatasubset")
+export("getppmOriginalCovariates")
+export("getRandomFieldsModelGen")
+export("getSpatstatVariable")
+export("getSumFun")
+export("Geyer")
+export("geyercounts")
+export("geyerdelta2")
+export("Gfox")
+export("Ginhom")
+export("GLMpredict")
+export("Gmulti")
+export("GmultiInhom")
+export("good.correction.K")
+export("Gres")
+export("grid1index")
+export("gridcenters")
+export("gridcentres")
+export("gridindex")            
+export("gridweights")
+export("grokIndexVector")
+export("grow.box3")
+export("grow.boxx")
+export("grow.mask")
+export("grow.rectangle")
+export("grow.simplepanel")
+export("hackglmmPQL")
+export("Halton")
+export("Hammersley")
+export("handle.r.b.args")
+export("handle.rshift.args")
+export("Hardcore")
+export("harmonic")
+export("harmonise")
+export("harmonise.fv")
+export("harmonise.im")
+export("harmonise.msr")
+export("harmonise.owin")
+export("harmonize")
+export("harmonize.fv")
+export("harmonize.im")
+export("harmonize.owin")
+export("has.close")
+export("has.close.default")
+export("has.close.pp3")
+export("has.close.ppp")
+export("hasenvelope")
+export("has.offset")
+export("has.offset.term")
+export("head.hyperframe")
+export("head.ppp")
+export("head.ppx")
+export("head.psp")
+export("head.tess")
+export("HermiteCoefs")
+export("Hest")
+export("hexagon")
+export("hexgrid")
+export("hextess")
+export("hierarchicalordering")
+export("HierHard")
+export("hiermat")
+export("hierpair.family")
+export("HierStrauss")
+export("HierStraussHard")
+export("hist.funxy")
+export("hist.im")
+export("ho.engine")
+export("hopskel")
+export("hopskel.test")
+export("hsvim")
+export("hsvNA")
+export("Hybrid")
+export("hybrid.family")
+export("[<-.hyperframe")
+export("[.hyperframe")
+export("$<-.hyperframe")
+export("$.hyperframe")
+export("hyperframe")
+export("IdenticalRows")
+export("identify.lpp")
+export("identify.ppp")
+export("identify.psp")
+export("idorempty")
+export("idw")
+export("Iest")
+export("illegal.iformula")
+export("[<-.im")
+export("[.im")
+export("im")
+export("image.im")
+export("image.imlist")
+export("image.listof")
+export("image.objsurf")
+export("image.ssf")
+export("im.apply")
+export("imcov")
+export("implemented.for.K")
+export("impliedcoefficients")
+export("impliedpresence")
+export("improve.kppm")
+export("incircle")
+export("increment.fv")
+export("infline")
+export("[.influence.ppm")
+export("influence.ppm")
+export("inforder.family")
+export("inpoint")
+export("inradius")
+export("insertVertices")
+export("inside3Darray")
+export("inside.boxx")
+export("inside.owin")
+export("instantiate.interact")
+export("integral")
+export("integral.im")
+export("integral.linfun")
+export("integral.linim")
+export("integral.msr")
+export("integral.ssf")
+export("intensity")
+export("intensity.detpointprocfamily")
+export("intensity.dppm")
+export("intensity.lpp")
+export("intensity.ppm")
+export("intensity.ppp")
+export("intensity.ppx")
+export("intensity.quadratcount")
+export("intensity.splitppp")
+export("interactionfamilyname")
+export("intermaker")
+export("interp.colourmap")
+export("interp.colours")
+export("interp.im")
+export("intersect.owin")
+export("intersect.tess")
+export("intX.owin")     
+export("intX.xypolygon")     
+export("intY.owin")     
+export("intY.xypolygon")
+export("invokeColourmapRule")
+export("invoke.symbolmap")
+export("iplot")
+export("iplot.default")
+export("iplot.layered")
+export("iplot.linnet")
+export("iplot.lpp")
+export("iplot.ppp")
+export("ippm")
+export("is.atomicQtest")
+export("is.cadlag")
+export("is.col.argname")
+export("is.colour")
+export("is.connected")
+export("is.connected.default")
+export("is.connected.linnet")
+export("is.connected.ppp")
+export("is.convex")
+export("is.data")
+export("is.dppm")
+export("is.empty")
+export("is.empty.default")
+export("is.empty.owin")
+export("is.empty.ppp")
+export("is.empty.psp")
+export("is.expandable")
+export("is.expandable.ppm")
+export("is.expandable.rmhmodel")
+export("is.fv")
+export("is.grey")
+export("is.hybrid")
+export("is.hybrid.interact")
+export("is.hybrid.ppm")
+export("is.hyperframe")
+export("is.im")
+export("is.infline")
+export("is.interact")
+export("is.kppm")
+export("is.lpp")
+export("is.lppm")
+export("is.marked")
+export("is.marked.default")    
+export("is.marked.lppm")
+export("is.marked.msr")    
+export("is.marked.ppm")
+export("is.marked.ppp")
+export("is.marked.psp")    
+export("is.marked.quad")
+export("is.mask")
+export("is.mppm")
+export("is.multitype")
+export("is.multitype.default")    
+export("is.multitype.lpp")
+export("is.multitype.lppm")
+export("is.multitype.msr")    
+export("is.multitype.ppm")
+export("is.multitype.ppp")
+export("is.multitype.quad")    
+export("is.owin")
+export("is.poisson")
+export("is.poisson.interact")
+export("is.poisson.kppm")
+export("is.poisson.lppm")
+export("is.poisson.mppm")
+export("is.poisson.ppm")
+export("is.poisson.rmhmodel")
+export("is.poisson.slrm")
+export("is.polygonal")
+export("is.pp3")
+export("is.ppm")
+export("is.ppp")
+export("is.ppx")
+export("is.psp")
+export("is.rectangle")
+export("isRelevantZero")
+export("is.scov")
+export("is.slrm")
+export("is.sob")
+export("is.stationary")
+export("is.stationary.detpointprocfamily")
+export("is.stationary.dppm")
+export("is.stationary.kppm")
+export("is.stationary.lppm")
+export("is.stationary.ppm")
+export("is.stationary.rmhmodel")
+export("is.stationary.slrm")
+export("is.subset.owin")
+export("istat")
+export("is.tess")
+export("Jcross")
+export("Jdot")
+export("Jest")
+export("Jfox")
+export("Jinhom")
+export("Jmulti")
+export("k3engine")
+export("K3est")
+export("kaplan.meier")
+export("Kborder.engine")
+export("Kcom")
+export("Kcross")
+export("Kcross.inhom")
+export("Kdot")
+export("Kdot.inhom")
+export("kernel.factor")
+export("kernel.moment")
+export("kernel.squint")
+export("Kest")
+export("Kest.fft")
+export("killinteraction")
+export("Kinhom")
+export("Kmark")
+export("Kmeasure")
+export("Kmodel")
+export("Kmodel.detpointprocfamily")
+export("Kmodel.dppm")
+export("Kmodel.kppm")
+export("Kmodel.ppm")
+export("km.rs")
+export("km.rs.opt")
+export("Kmulti")
+export("Kmulti.inhom")
+export("Knone.engine")
+export("Kount")
+export("Kpcf.kppm")               
+export("kppm")
+export("kppmComLik")
+export("kppm.formula")
+export("kppmMinCon")
+export("kppmPalmLik")
+export("kppm.ppp")
+export("kppm.quad")
+export("kraever")
+export("kraeverRandomFields")
+export("Krect.engine")
+export("Kres")
+export("Kscaled")
+export("Ksector")
+export("ksmooth.ppp")
+export("kstest")
+export("kstest.lpp")
+export("kstest.lppm")
+export("ks.test.ppm")
+export("kstest.ppm")
+export("kstest.ppp")
+export("kstest.slrm")
+export("Kwtsum")               
+export("labels.dppm")
+export("labels.kppm")
+export("labels.ppm")
+export("labels.slrm")
+export("LambertW")
+export("laslett")
+export("latest.news")
+export("[<-.layered")
+export("[.layered")
+export("[[<-.layered")
+export("layered")
+export("layerplotargs<-")
+export("layerplotargs")
+export("layout.boxes")
+export("Lcross")
+export("Lcross.inhom")
+export("Ldot")
+export("Ldot.inhom")
+export("lengths.psp")
+export("LennardJones")
+export("Lest")
+export("levelsAsFactor")
+export("levelset")
+export("levels<-.im")
+export("levels.im")
+export("leverage")
+export("[.leverage.ppm")
+export("leverage.ppm")
+export("lgcp.estK")
+export("lgcp.estpcf")
+export("lineardirichlet")
+export("lineardisc")
+export("linearK")
+export("linearKcross")
+export("linearKcross.inhom")
+export("linearKdot")
+export("linearKdot.inhom")
+export("linearKengine")
+export("linearKinhom")
+export("linearKmulti")
+export("linearKmultiEngine")
+export("linearKmulti.inhom")
+export("linearmarkconnect")
+export("linearmarkequal")
+export("linearpcf")
+export("linearpcfcross")
+export("linearpcfcross.inhom")
+export("linearpcfdot")
+export("linearpcfdot.inhom")
+export("linearpcfengine")
+export("linearpcfinhom")
+export("linearpcfmulti")
+export("linearPCFmultiEngine")
+export("linearpcfmulti.inhom")
+export("linequad")
+export("linfun")
+export("Linhom")
+export("[.linim")
+export("linim")
+export("[.linnet")
+export("linnet")
+export("lintess")
+export("[<-.listof")
+export("listof")
+export("lixellate")
+export("local2lpp")
+export("localK")
+export("localKengine")
+export("localKinhom")
+export("localL")
+export("localLinhom")
+export("localpcf")
+export("localpcfengine")
+export("localpcfinhom")
+export("[.localpcfmatrix")
+export("localpcfmatrix")
+export("logicalIndex")
+export("logi.dummy")
+export("logi.engine")
+export("logLik.dppm")
+export("logLik.kppm")
+export("logLik.lppm")
+export("logLik.mppm")
+export("logLik.ppm")
+export("logLik.slrm")
+export("logLik.vblogit")
+export("lohboot")
+export("lookup2DkernelInfo")
+export("lookup.im")
+export("[.lpp")
+export("lpp")
+export("lppm")
+export("lppm.formula")
+export("lppm.lpp")
+export("Lscaled")
+export("lurking")               
+export("lut")
+export("mad.progress")
+export("mad.sigtrace")
+export("mad.test")
+export("majorminorversion")
+export("make.even.breaks")
+export("makefvlabel")
+export("makeLinnetTolerance")
+export("makeunits")
+export("mapSparseEntries")
+export("marginSums")
+export("markappend")
+export("markappendop")
+export("markcbind")
+export("markconnect")
+export("markcorr")
+export("markcorrint")
+export("markcrosscorr")
+export("markformat")
+export("markformat.default")
+export("markformat.ppp")
+export("markformat.ppx")
+export("markformat.psp")
+export("markmean")
+export("markreplicateop")
+export("marks<-")           
+export("marks")
+export("mark.scale.default")
+export("marks.default")           
+export("marks<-.lpp")
+export("markspace.integral")
+export("marks<-.ppp")           
+export("marks.ppp")           
+export("marks<-.ppx")           
+export("marks.ppx")           
+export("marks<-.psp")
+export("marks.psp")
+export("marks.quad")           
+export("marks<-.ssf")
+export("marks.ssf")
+export("markstat")
+export("marks<-.tess")
+export("marks.tess")
+export("marksubset")
+export("marksubsetop")
+export("marktable")
+export("markvar")
+export("markvario")
+export("mask2df")
+export("maskLaslett")
+export("match2DkernelName")
+export("matchingdist")
+export("match.kernel")
+export("matclust.estK")
+export("matclust.estpcf")
+export("Math.im")
+export("Math.imlist")
+export("Math.linim")
+export("Math.sparse3Darray")
+export("matrixinvsqrt")
+export("matrixpower")
+export("matrixsqrt")
+export("maxflow")
+export("max.fv")
+export("maxnndist")
+export("max.ssf")
+export("mctest.progress")
+export("mctest.sigtrace")
+export("mctestSigtraceEngine")
+export("mean.im")
+export("mean.linim")
+export("meanlistfv")
+export("meanX.owin")
+export("meanY.owin")
+export("measureNegative")
+export("measurePositive")
+export("measureVariation")
+export("median.im")
+export("median.linim")
+export("mergeLevels")
+export("midpoints.psp")
+export("mincontrast")
+export("min.fv")
+export("MinkowskiSum")
+export("minnndist")
+export("min.ssf")
+export("miplot")
+export("model.covariates")
+export("model.depends")
+export("model.frame.dppm")
+export("modelFrameGam")
+export("model.frame.kppm")
+export("model.frame.lppm")
+export("model.frame.ppm")
+export("model.images")
+export("model.images.dppm")
+export("model.images.kppm")
+export("model.images.lppm")
+export("model.images.ppm")
+export("model.images.slrm")
+export("model.is.additive")
+export("model.matrix.dppm")
+export("model.matrix.ippm")
+export("model.matrix.kppm")
+export("model.matrix.lppm")
+export("model.matrix.ppm")
+export("model.matrix.slrm")
+export("model.se.image")
+export("mpl")
+export("mpl.engine")
+export("mpl.get.covariates")
+export("mpl.prepare")
+export("mpl.usable")
+export("mppm")
+export("[.msr")
+export("msr")
+export("MultiHard")
+export("MultiPair.checkmatrix")
+export("multiplicity")
+export("multiplicity.data.frame")
+export("multiplicity.default")
+export("multiplicityNumeric")
+export("multiplicity.ppp")
+export("multiplicity.ppx")
+export("multiply.only.finite.entries")
+export("MultiStrauss")
+export("MultiStraussHard")
+export("na.handle.im")
+export("names<-.fv")
+export("names<-.hyperframe")
+export("names.hyperframe")
+export("nearest.neighbour")
+export("nearest.pixel")
+export("nearest.raster.point")
+export("nearestsegment")
+export("nearest.valid.pixel")
+export("nestsplit")
+export("newformula")
+export("newstyle.coeff.handling")
+export("nnclean")
+export("nncleanEngine")
+export("nnclean.pp3")
+export("nnclean.ppp")
+export("nncorr")
+export("nncross")
+export("nncross.default")
+export("nncross.lpp")
+export("nncross.pp3")
+export("nncross.ppp")
+export("nndcumfun")
+export("nndensity")
+export("nndensity.ppp")
+export("nndist")
+export("nndist.default")
+export("nndist.lpp")
+export("nndist.pp3")
+export("nndist.ppp")
+export("nndist.ppx")
+export("nndist.psp")
+export("nnfun")
+export("nnfun.lpp")
+export("nnfun.ppp")
+export("nnfun.psp")
+export("nnmap")
+export("nnmark")
+export("nnmean")
+export("nnorient")
+export("nnvario")
+export("nnwhich")
+export("nnwhich.default")
+export("nnwhich.lpp")
+export("nnwhich.pp3")
+export("nnwhich.ppp")
+export("nnwhich.ppx")
+export("nobjects")
+export("nobjects.ppp")
+export("nobjects.ppx")
+export("nobjects.psp")
+export("nobjects.tess")
+export("nobs.dppm")
+export("nobs.kppm")
+export("nobs.lppm")
+export("nobs.mppm")
+export("nobs.ppm")
+export("no.trend.ppm")
+export("npfun")
+export("npoints")
+export("npoints.pp3")
+export("npoints.ppp")
+export("npoints.ppx")
+export("n.quad")
+export("nsegments")
+export("nsegments.linnet")
+export("nsegments.lpp")
+export("nsegments.psp")
+export("numberwithunit")
+export("numeric.columns")
+export("nvertices")
+export("nvertices.default")
+export("nvertices.linnet")
+export("nvertices.owin")
+export("objsurf")
+export("objsurf.dppm")
+export("objsurfEngine")
+export("objsurf.kppm")
+export("objsurf.minconfit")
+export("onearrow")
+export("onecolumn")
+export("opening")
+export("opening.owin")
+export("opening.ppp")
+export("opening.psp")
+export("Ops.im")
+export("Ops.imlist")
+export("Ops.linim")
+export("Ops.msr")
+export("Ops.sparse3Darray")
+export("optimStatus")
+export("Ord")
+export("ord.family")
+export("OrdThresh")
+export("outdated.interact")
+export("overlap.owin")
+export("oversize.quad")    
+export("[.owin")
+export("owin")
+export("owin2polypath")
+export("owinpoly2mask")
+export("owinpolycheck")
+export("padimage")
+export("pairdist")
+export("pairdist.default")
+export("pairdist.lpp")
+export("pairdist.pp3")
+export("pairdist.ppp")
+export("pairdist.ppx")
+export("pairdist.psp")
+export("pairorient")
+export("PairPiece")
+export("pairsat.family")
+export("pairs.im")
+export("pairs.linim")
+export("pairs.listof")
+export("pairs.solist")
+export("Pairwise")
+export("pairwise.family")
+export("paletteindex")
+export("paletteindex")
+export("panel.contour")
+export("panel.histogram")
+export("panel.image")
+export("parameters")
+export("parameters.dppm")
+export("parameters.fii")
+export("parameters.interact")
+export("parameters.kppm")
+export("parameters.ppm")
+export("parameters.profilepl")
+export("param.quad")
+export("parbreak")
+export("parres")
+export("partialModelMatrix")
+export("pcf")
+export("pcf3engine")
+export("pcf3est")
+export("pcfcross")
+export("pcfcross.inhom")
+export("pcfdot")
+export("pcfdot.inhom")
+export("pcf.fasp")
+export("pcf.fv")
+export("pcfinhom")
+export("pcfmodel")
+export("pcfmodel.detpointprocfamily")
+export("pcfmodel.dppm")
+export("pcfmodel.kppm")
+export("pcfmodel.ppm")
+export("pcfmodel.zclustermodel")
+export("pcfmulti")
+export("pcfmulti.inhom")
+export("pcf.ppp")
+export("PDEdensityLPP")
+export("Penttinen")
+export("perimeter")
+export("periodify")
+export("periodify.owin")
+export("periodify.ppp")
+export("periodify.psp")
+export("perspContour")
+export("persp.funxy")
+export("persp.im")
+export("persp.leverage.ppm")
+export("perspLines")
+export("persp.objsurf")
+export("perspPoints")
+export("perspSegments")
+export("pickoption")
+export("pixelcentres")
+export("pixellate")
+export("pixellate.linnet")
+export("pixellate.owin")
+export("pixellate.ppp")
+export("pixellate.psp")
+export("pixelquad")
+export("pkernel")
+export("pknn")
+export("plan.legend.layout")
+export("plot3Dpoints")
+export("plot.addvar")
+export("plot.anylist")
+export("plot.barplotdata")
+export("plot.bermantest")
+export("plot.bw.frac")
+export("plot.bw.optim")
+export("plot.cdftest")
+export("plot.colourmap")
+export("plot.diagppm")
+export("plot.dppm")
+export("plotEachLayer")
+export("plot.envelope")
+export("ploterodeimage")
+export("ploterodewin")
+export("plot.fasp")
+export("plot.fii")
+export("plot.foo")
+export("plot.funxy")
+export("plot.fv")
+export("plot.hyperframe")
+export("plot.im")
+export("plot.imlist")
+export("plot.infline")
+export("plot.influence.ppm")
+export("plot.kppm")
+export("plot.kstest")
+export("plot.laslett")
+export("plot.layered")
+export("plot.leverage.ppm")
+export("plot.linfun")
+export("plot.linim")
+export("plot.linnet")
+export("plot.lintess")
+export("plot.listof")
+export("plot.localpcfmatrix")
+export("plot.lpp")
+export("plot.lppm")
+export("plot.lurk")
+export("plot.minconfit")
+export("plot.mppm")
+export("plot.msr")
+export("plot.objsurf")
+export("plot.onearrow")
+export("plot.owin")
+export("plot.parres")
+export("plot.plotpairsim")
+export("plot.plotppm")
+export("plotPolygonBdry")
+export("plot.pp3")
+export("plot.ppm")
+export("plot.ppp")
+export("plot.pppmatching")
+export("plot.ppx")
+export("plot.profilepl")
+export("plot.psp")
+export("plot.qqppm")
+export("plot.quad")
+export("plot.quadratcount")
+export("plot.quadrattest")
+export("plot.rho2hat")
+export("plot.rhohat")
+export("plot.rppm")
+export("plot.scan.test")
+export("plot.slrm")
+export("plot.solist")
+export("plot.spatialcdf")
+export("plot.splitppp")
+export("plot.ssf")
+export("plot.studpermutest")
+export("plot.symbolmap")
+export("plot.tess")
+export("plot.textstring")
+export("plot.texturemap")
+export("plot.yardstick")
+export("pmixpois")
+export("pointgrid")
+export("pointsAlongNetwork")
+export("points.lpp")
+export("pointsOnLines")
+export("PoisSaddle")
+export("PoisSaddleArea")
+export("PoisSaddleGeyer")
+export("PoisSaddlePairwise")
+export("Poisson")
+export("polyLaslett")
+export("polynom")
+export("polytileareaEngine")
+export("pool")
+export("pool.anylist")
+export("pool.envelope")
+export("pool.fasp")
+export("pool.fv")
+export("pool.quadrattest")
+export("pool.rat")
+export("positiveIndex")
+export("[.pp3")
+export("pp3")
+export("ppllengine")
+export("ppm")
+export("ppmCovariates")
+export("ppm.default")
+export("ppmDerivatives")
+export("ppm.formula")
+export("ppmInfluence")
+export("ppmInfluenceEngine")
+export("PPMmodelmatrix")
+export("ppm.ppp")
+export("ppm.quad")
+export("[<-.ppp")
+export("[.ppp")
+export("ppp")
+export("pppdist")
+export("pppdist.mat")
+export("pppdist.prohorov")
+export("pppmatching")
+export("ppsubset")
+export("PPversion")
+export("[.ppx")
+export("ppx")
+export("predict.dppm")
+export("predict.kppm")
+export("predict.lppm")
+export("predict.mppm")
+export("predict.ppm")
+export("predict.profilepl")
+export("predict.rho2hat")
+export("predict.rhohat")
+export("predict.rppm")
+export("predict.slrm")
+export("predict.vblogit")
+export("predict.zclustermodel")
+export("prefixfv")
+export("prepareTitle")
+export("print.addvar")    
+export("print.anylist")    
+export("print.autoexec")    
+export("print.box3")
+export("print.boxx")
+export("print.bt.frame")
+export("print.bugtable")
+export("print.bw.frac")
+export("print.bw.optim")
+export("print.colourmap")
+export("print.detpointprocfamily")
+export("print.detpointprocfamilyfun")
+export("print.diagppm")
+export("print.distfun")
+export("print.dppm")
+export("print.envelope")
+export("print.ewcdf")
+export("print.fasp")       
+export("print.fii")
+export("print.funxy")       
+export("print.fv")       
+export("print.fvfun")       
+export("print.hasenvelope")       
+export("print.hierarchicalordering")
+export("print.hyperframe")
+export("print.im")
+export("print.indicfun")       
+export("print.infline")
+export("print.influence.ppm")       
+export("print.interact")       
+export("print.intermaker")       
+export("print.isf")
+export("print.kppm")
+export("print.laslett")
+export("print.layered")
+export("print.leverage.ppm")
+export("print.linfun")
+export("print.linim")
+export("print.linnet")
+export("print.lintess")
+export("print.localpcfmatrix")
+export("print.lpp")
+export("print.lppm")
+export("print.lut")
+export("print.minconfit")
+export("print.mppm")
+export("print.msr")
+export("print.nnfun")
+export("print.numberwithunit")
+export("print.objsurf")
+export("print.onearrow")
+export("print.owin")
+export("print.parres")
+export("print.plotpairsim")
+export("print.plotppm")
+export("print.pp3")
+export("print.ppm")
+export("print.ppp")
+export("print.pppmatching")
+export("print.ppx")
+export("print.profilepl")
+export("print.psp")
+export("print.qqppm")
+export("print.quad")
+export("print.quadrattest")
+export("print.rat")
+export("print.rho2hat")
+export("print.rhohat")
+export("print.rmhcontrol")
+export("print.rmhexpand")
+export("print.rmhInfoList")
+export("print.rmhmodel")
+export("print.rmhstart")
+export("print.rppm")
+export("print.simplepanel")
+export("print.slrm")
+export("print.Smoothfun")       
+export("print.solist")
+export("print.sparse3Darray")
+export("print.splitppp")
+export("print.splitppx")
+export("print.ssf")
+export("printStatus")
+export("print.summary.fii")
+export("print.summary.hyperframe")
+export("print.summary.im")
+export("print.summary.kppm")
+export("print.summary.linim")
+export("print.summary.linnet")
+export("print.summary.lintess")
+export("print.summary.listof")
+export("print.summary.logiquad")
+export("print.summary.lpp")
+export("print.summary.lut")
+export("print.summary.mppm")
+export("print.summary.owin")
+export("print.summary.pp3")
+export("print.summary.ppm")
+export("print.summary.ppp")
+export("print.summary.psp")
+export("print.summary.quad")
+export("print.summary.rmhexpand")
+export("print.summary.solist")
+export("print.summary.splitppp")
+export("print.summary.splitppx")
+export("print.summary.units")
+export("print.symbolmap")       
+export("print.tess")
+export("print.textstring")
+export("print.texturemap")
+export("print.timed")
+export("print.units")
+export("print.vblogit")
+export("print.yardstick")
+export("print.zclustermodel")
+export("profilepl")
+export("progressreport")
+export("project2segment")
+export("project2set")
+export("project3Dhom")
+export("project.ppm")
+export("prune.rppm")
+export("pseudoR2")
+export("pseudoR2.lppm")
+export("pseudoR2.ppm")
+export("psib")
+export("psib.kppm")
+export("[.psp")
+export("psp")
+export("psst")
+export("psstA")
+export("psstG")
+export("putlastshift")
+export("putSpatstatVariable")
+export("qkernel")
+export("qknn")
+export("qmixpois")
+export("qqplot.ppm")
+export("QQversion")
+export("[.quad")
+export("quad")
+export("quadBlockSizes")
+export("quadform")
+export("quad.mppm")
+export("quad.ppm")
+export("quadratcount")
+export("quadratcount.ppp")
+export("quadratcount.splitppp")
+export("quadratresample")
+export("quadrats")
+export("quadrat.test")
+export("quadrat.testEngine")
+export("quadrat.test.mppm")
+export("quadrat.test.ppm")
+export("quadrat.test.ppp")
+export("quadrat.test.quadratcount")
+export("quadrat.test.splitppp")
+export("quadscheme")
+export("quadscheme.logi")
+export("quadscheme.replicated")
+export("quadscheme.spatial")
+export("quantess")
+export("quantess.im")
+export("quantess.owin")
+export("quantess.ppp")
+export("quantile.density")
+export("quantile.ewcdf")
+export("quantile.im")
+export("quantile.linim")
+export("rags")
+export("ragsAreaInter")
+export("ragsMultiHard")
+export("RandomFieldsSafe")
+export("ranef.mppm")
+export("range.fv")
+export("range.ssf")
+export("rastersample")
+export("raster.x")
+export("rasterx.im")
+export("rasterx.mask")
+export("raster.xy")
+export("rasterxy.im")
+export("rasterxy.mask")
+export("raster.y")
+export("rastery.im")
+export("rastery.mask")
+export("[.rat")
+export("rat")
+export("ratfv")
+export("rbindCompatibleDataFrames")
+export("rbind.hyperframe")
+export("rCauchy")
+export("rcell")
+export("rcellnumber")
+export("rDGS")
+export("rDiggleGratton")
+export("rdpp")
+export("reach")
+export("reach.detpointprocfamily")
+export("reach.dppm")
+export("reach.fii")
+export("reach.interact")
+export("reach.ppm")
+export("reach.rmhmodel")
+export("rebadge.as.crossfun")
+export("rebadge.as.dotfun")
+export("rebadge.fv")
+export("rebound")
+export("rebound.im")
+export("rebound.owin")
+export("rebound.ppp")
+export("rebound.psp")
+export("recognise.spatstat.type")
+export("reconcile.fv")
+export("rectquadrat.breaks")
+export("rectquadrat.countEngine")
+export("redraw.simplepanel")
+export("reduced.sample")
+export("reduceformula")
+export("reflect")
+export("reflect.default")
+export("reflect.im")
+export("reflect.infline")
+export("reflect.layered")
+export("reflect.tess") 
+export("regularpolygon")
+export("reheat")
+export("reincarnate.interact")
+export("RelevantDeviation")
+export("RelevantEmpty")
+export("RelevantZero")
+export("relevel.im")
+export("relevel.ppp")
+export("relevel.ppx")
+export("reload.or.compute")
+export("relrisk")
+export("relrisk.ppm")
+export("relrisk.ppp")
+export("rename.fv")
+export("repair.image.xycoords")
+export("repair.old.factor.image")
+export("replacementIndex")
+export("representativeRows")
+export("requireversion")
+export("resampleNetworkDataFrame")
+export("rescale")
+export("rescale.im")
+export("rescale.layered")
+export("rescale.linnet")
+export("rescale.lpp")
+export("rescale.owin")
+export("rescale.ppp")
+export("rescale.psp")
+export("rescale.units")
+export("rescue.rectangle")
+export("reset.spatstat.options")
+export("resid1panel")
+export("resid1plot")
+export("resid4plot")
+export("residuals.dppm")
+export("residuals.kppm")
+export("residuals.mppm")
+export("residuals.ppm")
+export("resolve.2D.kernel")
+export("resolveEinfo")
+export("resolve.vargamma.shape")
+export("restrict.mask")
+export("reversePolyclipArgs")
+export("rex")
+export("rGaussPoisson")
+export("rgb2hex")
+export("rgb2hsva")
+export("rgbim")
+export("rgbNA")
+export("rHardcore")
+export("rho2hat")
+export("rhohat")
+export("rhohatCalc")
+export("rhohatEngine")
+export("rhohat.lpp")
+export("rhohat.lppm")
+export("rhohat.ppm")
+export("rhohat.ppp")
+export("rhohat.quad")
+export("ripras")
+export("rjitter")
+export("rkernel")
+export("rknn")
+export("rlabel")
+export("rLGCP")
+export("rlinegrid")
+export("rlpp")
+export("rMatClust")
+export("rMaternI")
+export("rMaternII")
+export("rMaternInhibition")
+export("rmax.Rigid")
+export("rmax.Ripley")
+export("rmax.rule")
+export("rmax.Trans")
+export("rmh")
+export("rmhcontrol")
+export("rmhcontrol.default")
+export("rmhcontrol.list")
+export("rmhcontrol.rmhcontrol")
+export("rmh.default")
+export("rmhEngine")
+export("rmhexpand")
+export("RmhExpandRule")
+export("rmhmodel")
+export("rmhmodel.default")
+export("rmhmodel.list")
+export("rmhmodel.ppm")
+export("rmhmodel.rmhmodel")
+export("rmh.ppm")
+export("rmhResolveControl")
+export("rmhResolveExpansion")
+export("rmhResolveTypes")
+export("rmhsnoop")
+export("rmhSnoopEnv")
+export("rmhstart")
+export("rmhstart.default")
+export("rmhstart.list")
+export("rmhstart.rmhstart")
+export("rmixpois")
+export("rMosaicField")
+export("rMosaicSet")
+export("rmpoint")
+export("rmpoint.I.allim")
+export("rmpoispp")
+export("rNeymanScott")
+export("rnoise")
+export("roc")
+export("rocData")
+export("roc.kppm")
+export("roc.lpp")
+export("roc.lppm")
+export("rocModel")
+export("roc.ppm")
+export("roc.ppp")
+export("rose")
+export("roseContinuous")
+export("rose.default")
+export("rose.density")
+export("rose.fv")
+export("rose.histogram")
+export("rotate")
+export("rotate.im")
+export("rotate.infline")
+export("rotate.layered")
+export("rotate.linnet")
+export("rotate.lpp")
+export("rotate.owin")
+export("rotate.ppp")
+export("rotate.psp")
+export("rotate.tess") 
+export("rotmean")
+export("rotxy")
+export("rotxypolygon")
+export("rounding")
+export("rounding.default")
+export("rounding.pp3")
+export("rounding.ppp")
+export("rounding.ppx")
+export("round.pp3")
+export("round.ppp")
+export("round.ppx")
+export("row.names<-.hyperframe")
+export("row.names.hyperframe")
+export("rPenttinen")
+export("rpoint")
+export("rpoint.multi")
+export("rpoisline")
+export("rpoislinetess")
+export("rpoislpp")
+export("rpoispp")
+export("rpoispp3")
+export("rpoisppOnLines")
+export("rpoisppx")
+export("rPoissonCluster")
+export("rppm")
+export("rQuasi")
+export("rshift")
+export("rshift.ppp")
+export("rshift.psp")
+export("rshift.splitppp")
+export("rSSI")
+export("rstrat")
+export("rStrauss")
+export("rStraussHard")
+export("rsyst")
+export("rtemper")
+export("rthin")
+export("rThomas")
+export("rtoro")
+export("ruletextline")
+export("runifdisc")
+export("runiflpp")
+export("runifpoint")
+export("runifpoint3")
+export("runifpointOnLines")
+export("runifpointx")
+export("runifpoispp")          
+export("runifpoisppOnLines")          
+export("runifrect")
+export("run.simplepanel")
+export("rVarGamma")
+export("safedeldir")
+export("safelookup")
+export("samecolour")
+export("SatPiece")
+export("Saturated")
+export("scalardilate")
+export("scalardilate.breakpts")
+export("scalardilate.default")
+export("scalardilate.diagramobj")
+export("scalardilate.im")
+export("scalardilate.layered")
+export("scalardilate.linim")
+export("scalardilate.linnet")
+export("scalardilate.lpp")
+export("scalardilate.msr")
+export("scalardilate.owin")
+export("scalardilate.ppp")
+export("scalardilate.psp")
+export("scalardilate.tess") 
+export("scaletointerval")
+export("scaletointerval.default")
+export("scaletointerval.im")
+export("scanBinomLRTS")
+export("scanLRTS")
+export("scanmeasure")
+export("scanmeasure.im")
+export("scanmeasure.ppp")
+export("scanPoisLRTS")
+export("scanpp")
+export("scan.test")
+export("sdr")
+export("sdrPredict")
+export("second.moment.calc")
+export("second.moment.engine")
+export("segregation.test")
+export("segregation.test.ppp")
+export("selfcrossing.psp")
+export("selfcut.psp")
+export("sessionLibs")
+export("setcov")
+export("setmarks")
+export("setminus.owin")
+export("sewpcf")
+export("sewsmod")
+export("sharpen")
+export("sharpen.ppp")
+export("shift")
+export("shift.diagramobj")              
+export("shift.im")
+export("shift.infline")
+export("shift.influence.ppm")              
+export("shift.layered")
+export("shift.leverage.ppm")              
+export("shift.linim")
+export("shift.linnet")
+export("shift.lpp") 
+export("shift.msr")              
+export("shift.owin")
+export("shift.ppp")
+export("shift.psp")
+export("shift.quadratcount")              
+export("shift.quadrattest")              
+export("shift.tess") 
+export("shiftxy")              
+export("shiftxypolygon")              
+export("shortside")
+export("shortside.box3")
+export("shortside.boxx")
+export("shortside.owin")
+export("sidelengths")
+export("sidelengths.box3")
+export("sidelengths.boxx")
+export("sidelengths.owin")
+export("signalStatus")
+export("simplepanel")
+export("simplify.owin")
+export("simulate.detpointprocfamily")
+export("simulate.dppm")
+export("simulate.kppm")
+export("simulate.lppm")
+export("simulate.mppm")
+export("simulate.ppm")
+export("simulate.profilepl")
+export("simulate.rhohat")
+export("simulate.slrm")
+export("simulrecipe")              
+export("slrAssemblePixelData")
+export("slrm")
+export("slr.prepare")
+export("Smooth")
+export("smoothcrossEngine")              
+export("Smoothfun")
+export("Smoothfun.ppp")
+export("smooth.fv")
+export("Smooth.fv")
+export("Smooth.im")
+export("smooth.msr")
+export("Smooth.msr")
+export("smoothpointsEngine")              
+export("smooth.ppp")
+export("Smooth.ppp")
+export("Smooth.solist")
+export("Smooth.ssf")
+export("Softcore")
+export("solapply")
+export("[<-.solist")
+export("[.solist")
+export("solist")
+export("solutionset")
+export("sortalongsegment")
+export("sort.im")
+export("[<-.sparse3Darray")
+export("[.sparse3Darray")
+export("sparse3Darray")
+export("SparseEntries")
+export("SparseIndices")
+export("sparseVectorCumul")
+export("spatdim")
+export("spatialcdf")
+export("spatialCDFframe")
+export("spatialCDFtest")
+export("spatstatClusterModelInfo")
+export("spatstatDiagnostic")
+export("spatstatDPPModelInfo")
+export("spatstat.options")
+export("spatstat.rawdata.location")
+export("spatstatRmhInfo")
+export("spatstat.xy.coords")
+export("sp.foundclass")
+export("sp.foundclasses")
+export("sphere.volume")
+export("splitHybridInteraction")
+export("split<-.hyperframe")
+export("split.hyperframe")
+export("split.im")
+export("split.msr")
+export("[<-.splitppp")
+export("[.splitppp")
+export("split<-.ppp")
+export("split.ppp")
+export("[<-.splitppx")
+export("[.splitppx")
+export("split.ppx")
+export("spokes")
+export("square")
+export("[.ssf")
+export("ssf")
+export("stieltjes")
+export("stienen")
+export("stienenSet")
+export("store.versionstring.spatstat")
+export("stratrand")
+export("Strauss")
+export("strausscounts")
+export("StraussHard")
+export("str.hyperframe")
+export("studpermu.test")
+export("subfits")
+export("subfits.new")
+export("subfits.old")
+export("subset.hyperframe")
+export("subset.lpp")
+export("subset.pp3")
+export("subset.ppp")
+export("subset.ppx")
+export("subspaceDistance")
+export("suffloc")
+export("suffstat")
+export("suffstat.generic")
+export("suffstat.poisson")
+export("summarise.trend")
+export("summary.anylist")
+export("summary.envelope")
+export("summary.fii")
+export("summary.funxy")
+export("summary.hyperframe")
+export("summary.im")
+export("Summary.im")
+export("Summary.imlist")
+export("summary.kppm")
+export("summary.linfun")
+export("summary.linim")
+export("Summary.linim")
+export("summary.linnet")
+export("summary.lintess")
+export("summary.listof")
+export("summary.logiquad")
+export("summary.lpp")
+export("summary.lppm")
+export("summary.lut")
+export("summary.mppm")
+export("summary.owin")
+export("summary.pp3")
+export("summary.ppm")
+export("summary.ppp")
+export("summary.pppmatching")
+export("summary.ppx")
+export("summary.profilepl")
+export("summary.psp")
+export("summary.quad")
+export("summary.rmhexpand")
+export("summary.solist")
+export("Summary.sparse3Darray")
+export("summary.splitppp")
+export("summary.splitppx")
+export("summary.units")
+export("summary.vblogit")
+export("sumouter")
+export("sumsymouter")
+export("sumsymouterSparse")
+export("superimpose")
+export("superimpose.default")
+export("superimpose.lpp")
+export("superimposeMarks")
+export("superimpose.ppp")
+export("superimpose.ppplist")
+export("superimpose.psp")
+export("superimposePSP")
+export("superimpose.splitppp")
+export("symbolmap")
+export("symbolmaptype")
+export("tail.hyperframe")
+export("tail.ppp")
+export("tail.ppx")
+export("tail.psp")
+export("tail.tess")
+export("tenseur")
+export("tensor1x1")
+export("terms.dppm")
+export("terms.kppm")
+export("terms.lppm")
+export("terms.mppm")
+export("terms.ppm")
+export("terms.slrm")
+export("[<-.tess")
+export("[.tess")
+export("tess")
+export("test.crossing.psp")
+export("test.selfcrossing.psp")
+export("textstring")
+export("texturemap")
+export("textureplot")
+export("thinjump")
+export("thinNetwork")
+export("thomas.estK")
+export("thomas.estpcf")
+export("tile.areas")
+export("tilecentroids")        
+export("tileindex")
+export("tilenames<-")
+export("tilenames")
+export("tiles")
+export("tiles.empty")
+export("timed")
+export("timeTaken")
+export("to.grey")
+export("to.opaque")
+export("totalVariation")
+export("to.transparent")
+export("transect.im")
+export("transmat")
+export("treebranchlabels")
+export("treeprune")
+export("trianglediameters")
+export("triangulate.owin")
+export("trim.mask")        
+export("trim.rectangle")
+export("triplet.family")
+export("Triplets")
+export("Tstat")
+export("tweak.coefs")
+export("tweak.colourmap")
+export("tweak.fv.entry")
+export("tweak.ratfv.entry")
+export("twostage.test")
+export("unionOfSparseIndices")
+export("union.owin")
+export("union.quad")
+export("unique.ppp")
+export("unique.ppx")
+export("unitname<-")
+export("unitname")
+export("unitname<-.box3")
+export("unitname.box3")
+export("unitname<-.boxx")
+export("unitname.boxx")
+export("unitname<-.default")
+export("unitname.default")
+export("unitname<-.dppm")
+export("unitname.dppm")
+export("unitname<-.im")
+export("unitname.im")
+export("unitname<-.kppm")
+export("unitname.kppm")
+export("unitname<-.linnet")
+export("unitname.linnet")
+export("unitname<-.lpp")
+export("unitname.lpp")
+export("unitname<-.minconfit")
+export("unitname.minconfit")
+export("unitname<-.owin")
+export("unitname.owin")
+export("unitname<-.pp3")
+export("unitname.pp3")
+export("unitname<-.ppm")
+export("unitname.ppm")
+export("unitname<-.ppp")
+export("unitname.ppp")
+export("unitname<-.ppx")
+export("unitname.ppx")
+export("unitname<-.psp")
+export("unitname.psp")
+export("unitname<-.quad")
+export("unitname.quad")
+export("unitname<-.slrm")
+export("unitname.slrm")
+export("unitname<-.tess")
+export("unitname.tess")
+export("unit.square")
+export("unmark")
+export("unmark.lpp")
+export("unmark.ppp")
+export("unmark.ppx")
+export("unmark.psp")
+export("unmark.splitppp")
+export("unmark.ssf")
+export("unmark.tess")
+export("unnormdensity")
+export("unstackFilter")
+export("unstack.layered")
+export("unstack.lpp")
+export("unstack.msr")
+export("unstack.ppp")
+export("unstack.psp")
+export("unstack.solist")
+export("update.detpointprocfamily")
+export("update.im")
+export("update.interact")
+export("update.ippm")
+export("update.kppm")
+export("update.lppm")
+export("update.ppm")
+export("update.rmhcontrol")
+export("update.rmhstart")
+export("update.slrm")
+export("update.symbolmap")
+export("valid")
+export("validate2Dkernel")
+export("validate.angles")        
+export("validate.lpp.coords")
+export("validate.mask")        
+export("validate.quad")        
+export("valid.detpointprocfamily")
+export("valid.lppm")
+export("valid.ppm")
+export("validradius")
+export("vanilla.fv")
+export("varblock")
+export("varcount")
+export("varcountEngine")
+export("vargamma.estK")
+export("vargamma.estpcf")
+export("vcov.kppm")
+export("vcov.lppm")
+export("vcov.mppm")
+export("vcov.ppm")
+export("vcov.slrm")
+export("vdCorput")
+export("verifyclass")
+export("versionstring.interact")
+export("versionstring.ppm")
+export("versionstring.spatstat")
+export("vertexdegree")
+export("vertices")
+export("vertices.linnet")
+export("vertices.owin")
+export("Vmark")
+export("volume")
+export("volume.box3")
+export("volume.boxx")
+export("volume.linnet")
+export("volume.owin")
+export("warn.once")
+export("waxlyrical")
+export("weighted.median")
+export("weighted.quantile")
+export("weighted.var")
+export("where.max")
+export("where.min")
+export("whichhalfplane")
+export("which.max.im")
+export("whist")
+export("will.expand")
+export("Window<-")
+export("Window")
+export("Window.distfun")
+export("Window.dppm")
+export("Window.funxy")
+export("Window<-.im")
+export("Window.im")
+export("Window.influence.ppm")
+export("Window.kppm")
+export("Window.layered")
+export("Window.leverage.ppm")
+export("Window<-.linnet")
+export("Window.linnet")
+export("Window.lintess")
+export("Window<-.lpp")
+export("Window.lpp")
+export("Window.lppm")
+export("Window.msr")
+export("Window.nnfun")
+export("Window.ppm")
+export("Window<-.ppp")
+export("Window.ppp")
+export("Window<-.psp")
+export("Window.psp")
+export("Window.quad")
+export("Window.quadratcount")
+export("Window.quadrattest")
+export("Window.rmhmodel")
+export("windows.mppm")
+export("Window.tess")
+export("with.fv")
+export("with.hyperframe")
+export("with.msr")
+export("with.ssf")
+export("w.quad")               
+export("X2testEngine")
+export("x.quad")
+export("xtfrm.im")
+export("xy.grid")
+export("xypolygon2psp")
+export("xypolyselfint")
+export("yardstick")
+export("y.quad")
+export("zapsmall.im")
+export("zclustermodel")
+
+# .......  Special cases ........... 
+export("%(-)%")
+export("%(+)%")
+export("%mapp%")
+export("%mark%")
+export("%mrep%")
+export("%msub%")
+export("%unit%")
+S3method("Complex", "im")
+S3method("Complex", "imlist")
+S3method("Complex", "linim")
+S3method("Math", "im")
+S3method("Math", "imlist")
+S3method("Math", "linim")
+S3method("Math", "sparse3Darray")
+S3method("mean", "im")
+S3method("median", "im")
+S3method("Ops", "im")
+S3method("Ops", "imlist")
+S3method("Ops", "linim")
+S3method("Ops", "msr")
+S3method("Ops", "sparse3Darray")
+S3method("Summary", "im")
+S3method("Summary", "imlist")
+S3method("Summary", "linim")
+S3method("Summary", "sparse3Darray")
+# .......  End of special cases  ... 
+
+# ......................................... 
+# Automatically generated list of S3 methods
+# ......................................... 
+  S3method("affine", "im")
+  S3method("affine", "layered")
+  S3method("affine", "linim")
+  S3method("affine", "linnet")
+  S3method("affine", "lpp")
+  S3method("affine", "owin")
+ S3method("affine", "ppp")
+ S3method("affine", "psp")
+  S3method("affine", "tess")
+S3method("AIC", "dppm")
+S3method("AIC", "kppm")
+ S3method("AIC", "mppm")
+S3method("AIC", "ppm")
+  S3method("anova", "lppm")
+  S3method("anova", "mppm")
+  S3method("anova", "ppm")
+  S3method("anova", "slrm")
+ S3method("anyDuplicated", "ppp")
+ S3method("anyDuplicated", "ppx")
+  S3method("[", "anylist")
+  S3method("anyNA", "im")
+S3method("anyNA", "sparse3Darray")
+S3method("aperm", "sparse3Darray")
+ S3method("area", "default")
+ S3method("area", "owin")
+  S3method("as.array", "im")
+S3method("as.array", "sparse3Darray")
+S3method("as.character", "units")
+S3method("as.data.frame", "bw.optim")
+S3method("as.data.frame", "envelope")
+S3method("as.data.frame", "fv")
+S3method("as.data.frame", "hyperframe")
+  S3method("as.data.frame", "im")
+  S3method("as.data.frame", "linfun")
+  S3method("as.data.frame", "linim")
+S3method("as.data.frame", "owin")
+S3method("as.data.frame", "ppp")
+S3method("as.data.frame", "ppx")
+S3method("as.data.frame", "psp")
+S3method("as.data.frame", "tess")
+S3method("as.double", "im")
+  S3method("as.function", "fv")
+ S3method("as.function", "im")
+ S3method("as.function", "leverage.ppm")
+  S3method("as.function", "linfun")
+ S3method("as.function", "owin")
+  S3method("as.function", "rhohat")
+  S3method("as.function", "ssf")
+  S3method("as.function", "tess")
+  S3method("as.fv", "bw.optim")
+  S3method("as.fv", "data.frame")
+  S3method("as.fv", "dppm")
+  S3method("as.fv", "fasp")
+  S3method("as.fv", "fv")
+  S3method("as.fv", "kppm")
+  S3method("as.fv", "matrix")
+  S3method("as.fv", "minconfit")
+S3method("as.hyperframe", "anylist")
+S3method("as.hyperframe", "data.frame")
+S3method("as.hyperframe", "default")
+S3method("as.hyperframe", "hyperframe")
+S3method("as.hyperframe", "listof")
+S3method("as.hyperframe", "ppx")
+  S3method("as.im", "data.frame")
+  S3method("as.im", "default")
+  S3method("as.im", "distfun")
+  S3method("as.im", "function")
+  S3method("as.im", "funxy")
+  S3method("as.im", "im")
+  S3method("as.im", "leverage.ppm")
+  S3method("as.im", "linim")
+  S3method("as.im", "matrix")
+  S3method("as.im", "nnfun")
+  S3method("as.im", "owin")
+S3method("as.im", "ppp")
+ S3method("as.im", "scan.test")
+  S3method("as.im", "Smoothfun")
+  S3method("as.im", "ssf")
+  S3method("as.im", "tess")
+S3method("as.interact", "fii")
+S3method("as.interact", "interact")
+S3method("as.interact", "ppm")
+ S3method("as.layered", "default")
+ S3method("as.layered", "listof")
+ S3method("as.layered", "msr")
+ S3method("as.layered", "ppp")
+ S3method("as.layered", "solist")
+ S3method("as.layered", "splitppp")
+S3method("as.linfun", "linfun")
+  S3method("as.linfun", "linim")
+  S3method("as.linfun", "lintess")
+  S3method("as.linim", "default")
+  S3method("as.linim", "linfun")
+  S3method("as.linim", "linim")
+ S3method("as.linnet", "linfun")
+ S3method("as.linnet", "linim")
+S3method("as.linnet", "linnet")
+ S3method("as.linnet", "lintess")
+ S3method("as.linnet", "lpp")
+  S3method("as.linnet", "lppm")
+ S3method("as.linnet", "psp")
+S3method("as.list", "hyperframe")
+  S3method("as.matrix", "im")
+  S3method("as.matrix", "owin")
+S3method("as.matrix", "ppx")
+ S3method("as.owin", "boxx")
+ S3method("as.owin", "data.frame")
+ S3method("as.owin", "default")
+ S3method("as.owin", "distfun")
+ S3method("as.owin", "dppm")
+ S3method("as.owin", "funxy")
+ S3method("as.owin", "im")
+ S3method("as.owin", "influence.ppm")
+ S3method("as.owin", "kppm")
+ S3method("as.owin", "layered")
+ S3method("as.owin", "leverage.ppm")
+  S3method("as.owin", "linfun")
+S3method("as.owin", "linnet")
+S3method("as.owin", "lintess")
+ S3method("as.owin", "lpp")
+ S3method("as.owin", "lppm")
+ S3method("as.owin", "msr")
+ S3method("as.owin", "nnfun")
+ S3method("as.owin", "owin")
+ S3method("as.owin", "ppm")
+ S3method("as.owin", "ppp")
+ S3method("as.owin", "psp")
+ S3method("as.owin", "quad")
+ S3method("as.owin", "quadratcount")
+ S3method("as.owin", "quadrattest")
+ S3method("as.owin", "rmhmodel")
+ S3method("as.owin", "tess")
+S3method("as.ppm", "dppm")
+S3method("as.ppm", "kppm")
+S3method("as.ppm", "lppm")
+S3method("as.ppm", "ppm")
+S3method("as.ppm", "profilepl")
+S3method("as.ppm", "rppm")
+  S3method("as.ppp", "data.frame")
+  S3method("as.ppp", "default")
+  S3method("as.ppp", "influence.ppm")
+S3method("as.ppp", "lpp")
+  S3method("as.ppp", "matrix")
+  S3method("as.ppp", "ppp")
+  S3method("as.ppp", "psp")
+  S3method("as.ppp", "quad")
+  S3method("as.ppp", "ssf")
+  S3method("as.psp", "data.frame")
+  S3method("as.psp", "default")
+S3method("as.psp", "linnet")
+S3method("as.psp", "lpp")
+  S3method("as.psp", "matrix")
+S3method("as.psp", "owin")
+  S3method("as.psp", "psp")
+ S3method("as.tess", "im")
+ S3method("as.tess", "list")
+ S3method("as.tess", "owin")
+ S3method("as.tess", "quadratcount")
+ S3method("as.tess", "quadrattest")
+ S3method("as.tess", "tess")
+S3method("auc", "kppm")
+S3method("auc", "lpp")
+S3method("auc", "lppm")
+S3method("auc", "ppm")
+S3method("auc", "ppp")
+  S3method("bc", "ppm")
+S3method("berman.test", "lpp")
+S3method("berman.test", "lppm")
+S3method("berman.test", "ppm")
+S3method("berman.test", "ppp")
+S3method("boundingbox", "default")
+S3method("boundingbox", "im")
+S3method("boundingbox", "owin")
+S3method("boundingbox", "ppp")
+S3method("boundingbox", "solist")
+S3method("boundingcentre", "owin")
+S3method("boundingcentre", "ppp")
+S3method("boundingcircle", "owin")
+S3method("boundingcircle", "ppp")
+S3method("boundingradius", "linnet")
+S3method("boundingradius", "owin")
+S3method("boundingradius", "ppp")
+S3method("by", "im")
+ S3method("by", "ppp")
+S3method("cbind", "fv")
+S3method("cbind", "hyperframe")
+S3method("CDF", "density")
+S3method("cdf.test", "lpp")
+S3method("cdf.test", "lppm")
+S3method("cdf.test", "mppm")
+S3method("cdf.test", "ppm")
+S3method("cdf.test", "ppp")
+S3method("cdf.test", "slrm")
+S3method("circumradius", "linnet")
+S3method("circumradius", "owin")
+S3method("circumradius", "ppp")
+S3method("closepairs", "pp3")
+S3method("closepairs", "ppp")
+ S3method("closing", "owin")
+ S3method("closing", "ppp")
+ S3method("closing", "psp")
+  S3method("clusterfield", "character")
+  S3method("clusterfield", "function")
+  S3method("clusterfield", "kppm")
+S3method("clusterkernel", "character")
+S3method("clusterkernel", "kppm")
+S3method("clusterradius", "character")
+S3method("clusterradius", "kppm")
+S3method("coef", "dppm")
+S3method("coef", "fii")
+S3method("coef", "kppm")
+  S3method("coef", "lppm")
+  S3method("coef", "mppm")
+  S3method("coef", "ppm")
+  S3method("coef", "slrm")
+S3method("coef", "summary.fii")
+S3method("coef", "summary.kppm")
+S3method("coef", "summary.ppm")
+S3method("coef", "vblogit")
+S3method("collapse", "anylist")
+S3method("collapse", "fv")
+  S3method("compatible", "fasp")
+  S3method("compatible", "fv")
+ S3method("compatible", "im")
+S3method("compatible", "rat")
+  S3method("compatible", "units")
+S3method("connected", "im")
+S3method("connected", "linnet")
+S3method("connected", "lpp")
+S3method("connected", "owin")
+S3method("connected", "ppp")
+S3method("contour", "funxy")
+   S3method("contour", "im")
+  S3method("contour", "imlist")
+  S3method("contour", "listof")
+S3method("contour", "objsurf")
+S3method("contour", "ssf")
+  S3method("coords", "ppp")
+  S3method("coords", "ppx")
+  S3method("crossdist", "default")
+  S3method("crossdist", "lpp")
+  S3method("crossdist", "pp3")
+  S3method("crossdist", "ppp")
+  S3method("crossdist", "ppx")
+  S3method("crossdist", "psp")
+S3method("crosspairs", "pp3")
+S3method("crosspairs", "ppp")
+  S3method("cut", "im")
+  S3method("cut", "lpp")
+  S3method("cut", "ppp")
+S3method("deletebranch", "linnet")
+S3method("deletebranch", "lpp")
+S3method("density", "lpp")
+  S3method("density", "ppp")
+  S3method("density", "ppplist")
+  S3method("density", "psp")
+  S3method("density", "splitppp")
+S3method("density", "splitppx")
+S3method("deriv", "fv")
+  S3method("deviance", "lppm")
+S3method("deviance", "ppm")
+S3method("dfbetas", "ppm")
+S3method("[", "diagramobj")
+S3method("diameter", "box3")
+S3method("diameter", "boxx")
+S3method("diameter", "linnet")
+ S3method("diameter", "owin")
+ S3method("dilation", "owin")
+ S3method("dilation", "ppp")
+ S3method("dilation", "psp")
+  S3method("dim", "detpointprocfamily")
+S3method("dim", "fasp")
+S3method("dim", "hyperframe")
+S3method("dim", "im")
+S3method("dim", "msr")
+S3method("dimnames", "fasp")
+S3method("dimnames", "msr")
+S3method("dimnames", "sparse3Darray")
+S3method("dim", "owin")
+S3method("dim", "sparse3Darray")
+  S3method("distfun", "lpp")
+  S3method("distfun", "owin")
+  S3method("distfun", "ppp")
+  S3method("distfun", "psp")
+  S3method("distmap", "owin")
+  S3method("distmap", "ppp")
+  S3method("distmap", "psp")
+ S3method("domain", "distfun")
+ S3method("domain", "dppm")
+ S3method("domain", "funxy")
+   S3method("domain", "im")
+ S3method("domain", "im")
+ S3method("domain", "influence.ppm")
+ S3method("domain", "kppm")
+ S3method("domain", "layered")
+ S3method("domain", "leverage.ppm")
+ S3method("domain", "linfun")
+ S3method("domain", "lintess")
+   S3method("domain", "lpp")
+ S3method("domain", "lpp")
+ S3method("domain", "lppm")
+ S3method("domain", "msr")
+ S3method("domain", "nnfun")
+   S3method("domain", "pp3")
+ S3method("domain", "ppm")
+   S3method("domain", "ppp")
+   S3method("domain", "ppx")
+   S3method("domain", "psp")
+ S3method("domain", "quad")
+ S3method("domain", "quadratcount")
+ S3method("domain", "quadrattest")
+ S3method("domain", "rmhmodel")
+ S3method("domain", "tess")
+ S3method("duplicated", "ppp")
+ S3method("duplicated", "ppx")
+S3method("edit", "hyperframe")
+S3method("edit", "im")
+S3method("edit", "ppp")
+S3method("edit", "psp")
+  S3method("emend", "lppm")
+S3method("emend", "ppm")
+S3method("envelope", "envelope")
+S3method("envelope", "hasenvelope")
+  S3method("envelope", "kppm")
+  S3method("envelope", "lpp")
+  S3method("envelope", "lppm")
+S3method("envelope", "matrix")
+  S3method("envelope", "pp3")
+  S3method("envelope", "ppm")
+  S3method("envelope", "ppp")
+S3method("eroded.volumes", "box3")
+S3method("eroded.volumes", "boxx")
+ S3method("erosion", "owin")
+ S3method("erosion", "ppp")
+ S3method("erosion", "psp")
+S3method("evalCovar", "lppm")
+S3method("evalCovar", "ppm")
+S3method("extractAIC", "dppm")
+S3method("extractAIC", "kppm")
+  S3method("extractAIC", "lppm")
+ S3method("extractAIC", "mppm")
+S3method("extractAIC", "ppm")
+S3method("extractAIC", "slrm")
+S3method("extractbranch", "linnet")
+S3method("extractbranch", "lpp")
+S3method("family", "vblogit")
+  S3method("fardist", "owin")
+  S3method("fardist", "ppp")
+  S3method("[", "fasp")
+S3method("fitin", "ppm")
+  S3method("fitted", "dppm")
+  S3method("fitted", "kppm")
+S3method("fitted", "lppm")
+ S3method("fitted", "mppm")
+  S3method("fitted", "ppm")
+S3method("fitted", "rppm")
+  S3method("fitted", "slrm")
+ S3method("fixef", "mppm")
+ S3method("flipxy", "im")
+S3method("flipxy", "infline")
+  S3method("flipxy", "layered")
+ S3method("flipxy", "owin")
+ S3method("flipxy", "ppp")
+ S3method("flipxy", "psp")
+S3method("format", "numberwithunit")
+S3method("formula", "dppm")
+S3method("formula", "fv")
+S3method("formula", "kppm")
+  S3method("formula", "lppm")
+S3method("formula", "ppm")
+S3method("formula", "slrm")
+   S3method("Frame", "default")
+  S3method("[", "fv")
+ S3method("getCall", "mppm")
+S3method("harmonise", "fv")
+S3method("harmonise", "im")
+S3method("harmonise", "msr")
+S3method("harmonise", "owin")
+S3method("harmonize", "fv")
+S3method("harmonize", "im")
+S3method("harmonize", "owin")
+  S3method("has.close", "default")
+  S3method("has.close", "pp3")
+  S3method("has.close", "ppp")
+S3method("head", "hyperframe")
+  S3method("head", "ppp")
+  S3method("head", "ppx")
+  S3method("head", "psp")
+  S3method("head", "tess")
+  S3method("hist", "funxy")
+  S3method("hist", "im")
+  S3method("[", "hyperframe")
+  S3method("$", "hyperframe")
+  S3method("identify", "lpp")
+  S3method("identify", "ppp")
+  S3method("identify", "psp")
+  S3method("[", "im")
+  S3method("image", "im")
+  S3method("image", "imlist")
+  S3method("image", "listof")
+S3method("image", "objsurf")
+S3method("image", "ssf")
+  S3method("[", "influence.ppm")
+S3method("influence", "ppm")
+S3method("integral", "im")
+S3method("integral", "linfun")
+S3method("integral", "linim")
+S3method("integral", "msr")
+  S3method("integral", "ssf")
+  S3method("intensity", "detpointprocfamily")
+  S3method("intensity", "dppm")
+S3method("intensity", "lpp")
+S3method("intensity", "ppm")
+S3method("intensity", "ppp")
+  S3method("intensity", "ppx")
+S3method("intensity", "quadratcount")
+S3method("intensity", "splitppp")
+ S3method("iplot", "default")
+ S3method("iplot", "layered")
+ S3method("iplot", "linnet")
+ S3method("iplot", "lpp")
+ S3method("iplot", "ppp")
+S3method("is.connected", "default")
+S3method("is.connected", "linnet")
+S3method("is.connected", "ppp")
+S3method("is.empty", "default")
+S3method("is.empty", "owin")
+S3method("is.empty", "ppp")
+S3method("is.empty", "psp")
+S3method("is.expandable", "ppm")
+S3method("is.expandable", "rmhmodel")
+S3method("is.hybrid", "interact")
+S3method("is.hybrid", "ppm")
+S3method("is.marked", "default")
+  S3method("is.marked", "lppm")
+S3method("is.marked", "msr")
+  S3method("is.marked", "ppm")
+  S3method("is.marked", "ppp")
+S3method("is.marked", "psp")
+S3method("is.marked", "quad")
+S3method("is.multitype", "default")
+  S3method("is.multitype", "lpp")
+  S3method("is.multitype", "lppm")
+S3method("is.multitype", "msr")
+  S3method("is.multitype", "ppm")
+  S3method("is.multitype", "ppp")
+S3method("is.multitype", "quad")
+S3method("is.poisson", "interact")
+S3method("is.poisson", "kppm")
+S3method("is.poisson", "lppm")
+S3method("is.poisson", "mppm")
+S3method("is.poisson", "ppm")
+S3method("is.poisson", "rmhmodel")
+S3method("is.poisson", "slrm")
+S3method("is.stationary", "detpointprocfamily")
+S3method("is.stationary", "dppm")
+S3method("is.stationary", "kppm")
+S3method("is.stationary", "lppm")
+S3method("is.stationary", "ppm")
+S3method("is.stationary", "rmhmodel")
+S3method("is.stationary", "slrm")
+   S3method("Kmodel", "detpointprocfamily")
+   S3method("Kmodel", "dppm")
+   S3method("Kmodel", "kppm")
+   S3method("Kmodel", "ppm")
+  S3method("kppm", "formula")
+  S3method("kppm", "ppp")
+S3method("kppm", "quad")
+S3method("labels", "dppm")
+S3method("labels", "kppm")
+S3method("labels", "ppm")
+S3method("labels", "slrm")
+  S3method("[", "layered")
+S3method("levels", "im")
+  S3method("[", "leverage.ppm")
+S3method("leverage", "ppm")
+  S3method("[", "linim")
+  S3method("[", "linnet")
+S3method("[", "localpcfmatrix")
+S3method("logLik", "dppm")
+S3method("logLik", "kppm")
+  S3method("logLik", "lppm")
+ S3method("logLik", "mppm")
+S3method("logLik", "ppm")
+  S3method("logLik", "slrm")
+S3method("logLik", "vblogit")
+  S3method("[", "lpp")
+S3method("lppm", "formula")
+S3method("lppm", "lpp")
+S3method("markformat", "default")
+S3method("markformat", "ppp")
+S3method("markformat", "ppx")
+S3method("markformat", "psp")
+S3method("marks", "default")
+S3method("marks", "ppp")
+S3method("marks", "ppx")
+S3method("marks", "psp")
+S3method("marks", "quad")
+  S3method("marks", "ssf")
+S3method("marks", "tess")
+  S3method("max", "fv")
+  S3method("max", "ssf")
+  S3method("mean", "linim")
+  S3method("median", "linim")
+  S3method("min", "fv")
+  S3method("min", "ssf")
+ S3method("model.frame", "dppm")
+ S3method("model.frame", "kppm")
+ S3method("model.frame", "lppm")
+ S3method("model.frame", "ppm")
+  S3method("model.images", "dppm")
+  S3method("model.images", "kppm")
+  S3method("model.images", "lppm")
+  S3method("model.images", "ppm")
+  S3method("model.images", "slrm")
+   S3method("model.matrix", "dppm")
+   S3method("model.matrix", "ippm")
+   S3method("model.matrix", "kppm")
+   S3method("model.matrix", "lppm")
+   S3method("model.matrix", "ppm")
+   S3method("model.matrix", "slrm")
+S3method("[", "msr")
+ S3method("multiplicity", "data.frame")
+ S3method("multiplicity", "default")
+ S3method("multiplicity", "ppp")
+ S3method("multiplicity", "ppx")
+S3method("names", "hyperframe")
+  S3method("nnclean", "pp3")
+  S3method("nnclean", "ppp")
+  S3method("nncross", "default")
+  S3method("nncross", "lpp")
+  S3method("nncross", "pp3")
+  S3method("nncross", "ppp")
+S3method("nndensity", "ppp")
+  S3method("nndist", "default")
+S3method("nndist", "lpp")
+  S3method("nndist", "pp3")
+  S3method("nndist", "ppp")
+  S3method("nndist", "ppx")
+  S3method("nndist", "psp")
+  S3method("nnfun", "lpp")
+  S3method("nnfun", "ppp")
+  S3method("nnfun", "psp")
+  S3method("nnwhich", "default")
+S3method("nnwhich", "lpp")
+  S3method("nnwhich", "pp3")
+  S3method("nnwhich", "ppp")
+  S3method("nnwhich", "ppx")
+  S3method("nobjects", "ppp")
+  S3method("nobjects", "ppx")
+  S3method("nobjects", "psp")
+  S3method("nobjects", "tess")
+S3method("nobs", "dppm")
+S3method("nobs", "kppm")
+  S3method("nobs", "lppm")
+ S3method("nobs", "mppm")
+S3method("nobs", "ppm")
+  S3method("npoints", "pp3")
+  S3method("npoints", "ppp")
+  S3method("npoints", "ppx")
+S3method("nsegments", "linnet")
+S3method("nsegments", "lpp")
+   S3method("nsegments", "psp")
+  S3method("nvertices", "default")
+S3method("nvertices", "linnet")
+  S3method("nvertices", "owin")
+S3method("objsurf", "dppm")
+S3method("objsurf", "kppm")
+S3method("objsurf", "minconfit")
+ S3method("opening", "owin")
+ S3method("opening", "ppp")
+ S3method("opening", "psp")
+  S3method("[", "owin")
+  S3method("pairdist", "default")
+S3method("pairdist", "lpp")
+  S3method("pairdist", "pp3")
+  S3method("pairdist", "ppp")
+  S3method("pairdist", "ppx")
+  S3method("pairdist", "psp")
+S3method("pairs", "im")
+S3method("pairs", "linim")
+S3method("pairs", "listof")
+S3method("pairs", "solist")
+S3method("parameters", "dppm")
+S3method("parameters", "fii")
+S3method("parameters", "interact")
+S3method("parameters", "kppm")
+S3method("parameters", "ppm")
+S3method("parameters", "profilepl")
+ S3method("pcf", "fasp")
+  S3method("pcf", "fv")
+   S3method("pcfmodel", "detpointprocfamily")
+   S3method("pcfmodel", "dppm")
+   S3method("pcfmodel", "kppm")
+   S3method("pcfmodel", "ppm")
+ S3method("pcfmodel", "zclustermodel")
+  S3method("pcf", "ppp")
+S3method("periodify", "owin")
+S3method("periodify", "ppp")
+S3method("periodify", "psp")
+S3method("persp", "funxy")
+  S3method("persp", "im")
+ S3method("persp", "leverage.ppm")
+S3method("persp", "objsurf")
+S3method("pixellate", "linnet")
+S3method("pixellate", "owin")
+S3method("pixellate", "ppp")
+S3method("pixellate", "psp")
+S3method("plot", "addvar")
+  S3method("plot", "anylist")
+S3method("plot", "barplotdata")
+S3method("plot", "bermantest")
+S3method("plot", "bw.frac")
+S3method("plot", "bw.optim")
+S3method("plot", "cdftest")
+S3method("plot", "colourmap")
+  S3method("plot", "diagppm")
+  S3method("plot", "dppm")
+ S3method("plot", "envelope")
+   S3method("plot", "fasp")
+S3method("plot", "fii")
+S3method("plot", "foo")
+S3method("plot", "funxy")
+ S3method("plot", "fv")
+   S3method("plot", "hyperframe")
+  S3method("plot", "im")
+  S3method("plot", "imlist")
+S3method("plot", "infline")
+ S3method("plot", "influence.ppm")
+  S3method("plot", "kppm")
+S3method("plot", "kstest")
+S3method("plot", "laslett")
+S3method("plot", "layered")
+ S3method("plot", "leverage.ppm")
+  S3method("plot", "linfun")
+S3method("plot", "linim")
+ S3method("plot", "linnet")
+S3method("plot", "lintess")
+  S3method("plot", "listof")
+S3method("plot", "localpcfmatrix")
+S3method("plot", "lpp")
+  S3method("plot", "lppm")
+S3method("plot", "lurk")
+S3method("plot", "minconfit")
+  S3method("plot", "mppm")
+  S3method("plot", "msr")
+S3method("plot", "objsurf")
+ S3method("plot", "onearrow")
+ S3method("plot", "owin")
+S3method("plot", "parres")
+S3method("plot", "plotpairsim")
+  S3method("plot", "plotppm")
+ S3method("plot", "pp3")
+  S3method("plot", "ppm")
+ S3method("plot", "ppp")
+S3method("plot", "pppmatching")
+S3method("plot", "ppx")
+S3method("plot", "profilepl")
+  S3method("plot", "psp")
+S3method("plot", "qqppm")
+  S3method("plot", "quad")
+S3method("plot", "quadratcount")
+ S3method("plot", "quadrattest")
+S3method("plot", "rho2hat")
+S3method("plot", "rhohat")
+S3method("plot", "rppm")
+ S3method("plot", "scan.test")
+  S3method("plot", "slrm")
+  S3method("plot", "solist")
+S3method("plot", "spatialcdf")
+  S3method("plot", "splitppp")
+S3method("plot", "ssf")
+S3method("plot", "studpermutest")
+S3method("plot", "symbolmap")
+  S3method("plot", "tess")
+S3method("plot", "textstring")
+ S3method("plot", "texturemap")
+ S3method("plot", "yardstick")
+S3method("points", "lpp")
+S3method("pool", "anylist")
+S3method("pool", "envelope")
+S3method("pool", "fasp")
+S3method("pool", "fv")
+S3method("pool", "quadrattest")
+S3method("pool", "rat")
+S3method("[", "pp3")
+S3method("ppm", "default")
+   S3method("ppm", "formula")
+   S3method("ppm", "ppp")
+   S3method("ppm", "quad")
+  S3method("[", "ppp")
+  S3method("[", "ppx")
+  S3method("predict", "dppm")
+  S3method("predict", "kppm")
+S3method("predict", "lppm")
+S3method("predict", "mppm")
+   S3method("predict", "ppm")
+S3method("predict", "profilepl")
+S3method("predict", "rho2hat")
+S3method("predict", "rhohat")
+S3method("predict", "rppm")
+ S3method("predict", "slrm")
+S3method("predict", "vblogit")
+ S3method("predict", "zclustermodel")
+S3method("print", "addvar")
+S3method("print", "anylist")
+S3method("print", "autoexec")
+  S3method("print", "box3")
+  S3method("print", "boxx")
+S3method("print", "bt.frame")
+S3method("print", "bugtable")
+S3method("print", "bw.frac")
+S3method("print", "bw.optim")
+S3method("print", "colourmap")
+S3method("print", "detpointprocfamily")
+S3method("print", "detpointprocfamilyfun")
+S3method("print", "diagppm")
+S3method("print", "distfun")
+S3method("print", "dppm")
+S3method("print", "envelope")
+S3method("print", "ewcdf")
+S3method("print", "fasp")
+S3method("print", "fii")
+S3method("print", "funxy")
+S3method("print", "fv")
+S3method("print", "fvfun")
+S3method("print", "hasenvelope")
+S3method("print", "hierarchicalordering")
+S3method("print", "hyperframe")
+  S3method("print", "im")
+S3method("print", "indicfun")
+S3method("print", "infline")
+S3method("print", "influence.ppm")
+S3method("print", "interact")
+S3method("print", "intermaker")
+S3method("print", "isf")
+S3method("print", "kppm")
+S3method("print", "laslett")
+S3method("print", "layered")
+S3method("print", "leverage.ppm")
+  S3method("print", "linfun")
+  S3method("print", "linim")
+S3method("print", "linnet")
+S3method("print", "lintess")
+S3method("print", "localpcfmatrix")
+S3method("print", "lpp")
+  S3method("print", "lppm")
+S3method("print", "lut")
+S3method("print", "minconfit")
+S3method("print", "mppm")
+S3method("print", "msr")
+S3method("print", "nnfun")
+S3method("print", "numberwithunit")
+S3method("print", "objsurf")
+S3method("print", "onearrow")
+  S3method("print", "owin")
+S3method("print", "parres")
+S3method("print", "plotpairsim")
+S3method("print", "plotppm")
+  S3method("print", "pp3")
+ S3method("print", "ppm")
+  S3method("print", "ppp")
+S3method("print", "pppmatching")
+S3method("print", "ppx")
+S3method("print", "profilepl")
+  S3method("print", "psp")
+S3method("print", "qqppm")
+ S3method("print", "quad")
+S3method("print", "quadrattest")
+S3method("print", "rat")
+S3method("print", "rho2hat")
+S3method("print", "rhohat")
+S3method("print", "rmhcontrol")
+S3method("print", "rmhexpand")
+S3method("print", "rmhInfoList")
+S3method("print", "rmhmodel")
+S3method("print", "rmhstart")
+S3method("print", "rppm")
+S3method("print", "simplepanel")
+S3method("print", "slrm")
+S3method("print", "Smoothfun")
+S3method("print", "solist")
+S3method("print", "sparse3Darray")
+S3method("print", "splitppp")
+S3method("print", "splitppx")
+  S3method("print", "ssf")
+S3method("print", "summary.fii")
+S3method("print", "summary.hyperframe")
+  S3method("print", "summary.im")
+  S3method("print", "summary.kppm")
+S3method("print", "summary.linim")
+S3method("print", "summary.linnet")
+S3method("print", "summary.lintess")
+S3method("print", "summary.listof")
+S3method("print", "summary.logiquad")
+S3method("print", "summary.lpp")
+S3method("print", "summary.lut")
+S3method("print", "summary.mppm")
+S3method("print", "summary.owin")
+  S3method("print", "summary.pp3")
+  S3method("print", "summary.ppm")
+S3method("print", "summary.ppp")
+S3method("print", "summary.psp")
+  S3method("print", "summary.quad")
+S3method("print", "summary.rmhexpand")
+S3method("print", "summary.solist")
+S3method("print", "summary.splitppp")
+S3method("print", "summary.splitppx")
+S3method("print", "summary.units")
+S3method("print", "symbolmap")
+S3method("print", "tess")
+S3method("print", "textstring")
+S3method("print", "texturemap")
+S3method("print", "timed")
+  S3method("print", "units")
+S3method("print", "vblogit")
+S3method("print", "yardstick")
+ S3method("print", "zclustermodel")
+S3method("prune", "rppm")
+  S3method("pseudoR2", "lppm")
+  S3method("pseudoR2", "ppm")
+  S3method("psib", "kppm")
+  S3method("[", "psp")
+  S3method("[", "quad")
+  S3method("quadratcount", "ppp")
+  S3method("quadratcount", "splitppp")
+  S3method("quadrat.test", "mppm")
+S3method("quadrat.test", "ppm")
+S3method("quadrat.test", "ppp")
+S3method("quadrat.test", "quadratcount")
+S3method("quadrat.test", "splitppp")
+S3method("quantess", "im")
+S3method("quantess", "owin")
+S3method("quantess", "ppp")
+S3method("quantile", "density")
+  S3method("quantile", "ewcdf")
+S3method("quantile", "im")
+  S3method("quantile", "linim")
+ S3method("ranef", "mppm")
+  S3method("range", "fv")
+  S3method("range", "ssf")
+S3method("[", "rat")
+S3method("rbind", "hyperframe")
+  S3method("reach", "detpointprocfamily")
+  S3method("reach", "dppm")
+  S3method("reach", "fii")
+  S3method("reach", "interact")
+  S3method("reach", "ppm")
+  S3method("reach", "rmhmodel")
+S3method("rebound", "im")
+S3method("rebound", "owin")
+S3method("rebound", "ppp")
+S3method("rebound", "psp")
+  S3method("reflect", "default")
+  S3method("reflect", "im")
+S3method("reflect", "infline")
+  S3method("reflect", "layered")
+  S3method("reflect", "tess")
+  S3method("relevel", "im")
+  S3method("relevel", "ppp")
+  S3method("relevel", "ppx")
+S3method("relrisk", "ppm")
+S3method("relrisk", "ppp")
+ S3method("rescale", "im")
+  S3method("rescale", "layered")
+  S3method("rescale", "linnet")
+  S3method("rescale", "lpp")
+  S3method("rescale", "owin")
+ S3method("rescale", "ppp")
+ S3method("rescale", "psp")
+  S3method("rescale", "units")
+  S3method("residuals", "dppm")
+  S3method("residuals", "kppm")
+S3method("residuals", "mppm")
+  S3method("residuals", "ppm")
+S3method("rhohat", "lpp")
+S3method("rhohat", "lppm")
+S3method("rhohat", "ppm")
+S3method("rhohat", "ppp")
+S3method("rhohat", "quad")
+   S3method("rmhcontrol", "default")
+S3method("rmhcontrol", "list")
+S3method("rmhcontrol", "rmhcontrol")
+   S3method("rmh", "default")
+  S3method("rmhmodel", "default")
+   S3method("rmhmodel", "list")
+  S3method("rmhmodel", "ppm")
+S3method("rmhmodel", "rmhmodel")
+  S3method("rmh", "ppm")
+   S3method("rmhstart", "default")
+S3method("rmhstart", "list")
+S3method("rmhstart", "rmhstart")
+S3method("roc", "kppm")
+S3method("roc", "lpp")
+S3method("roc", "lppm")
+S3method("roc", "ppm")
+S3method("roc", "ppp")
+S3method("rose", "default")
+S3method("rose", "density")
+S3method("rose", "fv")
+S3method("rose", "histogram")
+ S3method("rotate", "im")
+S3method("rotate", "infline")
+  S3method("rotate", "layered")
+  S3method("rotate", "linnet")
+  S3method("rotate", "lpp")
+ S3method("rotate", "owin")
+ S3method("rotate", "ppp")
+ S3method("rotate", "psp")
+  S3method("rotate", "tess")
+S3method("rounding", "default")
+S3method("rounding", "pp3")
+S3method("rounding", "ppp")
+S3method("rounding", "ppx")
+S3method("round", "pp3")
+S3method("round", "ppp")
+S3method("round", "ppx")
+S3method("row.names", "hyperframe")
+   S3method("rshift", "ppp")
+   S3method("rshift", "psp")
+   S3method("rshift", "splitppp")
+S3method("scalardilate", "breakpts")
+  S3method("scalardilate", "default")
+S3method("scalardilate", "diagramobj")
+  S3method("scalardilate", "im")
+  S3method("scalardilate", "layered")
+  S3method("scalardilate", "linim")
+  S3method("scalardilate", "linnet")
+  S3method("scalardilate", "lpp")
+S3method("scalardilate", "msr")
+  S3method("scalardilate", "owin")
+  S3method("scalardilate", "ppp")
+  S3method("scalardilate", "psp")
+  S3method("scalardilate", "tess")
+  S3method("scaletointerval", "default")
+  S3method("scaletointerval", "im")
+S3method("scanmeasure", "im")
+S3method("scanmeasure", "ppp")
+S3method("segregation.test", "ppp")
+S3method("sharpen", "ppp")
+S3method("shift", "diagramobj")
+ S3method("shift", "im")
+S3method("shift", "infline")
+S3method("shift", "influence.ppm")
+  S3method("shift", "layered")
+S3method("shift", "leverage.ppm")
+  S3method("shift", "linim")
+  S3method("shift", "linnet")
+  S3method("shift", "lpp")
+S3method("shift", "msr")
+ S3method("shift", "owin")
+ S3method("shift", "ppp")
+ S3method("shift", "psp")
+S3method("shift", "quadratcount")
+S3method("shift", "quadrattest")
+  S3method("shift", "tess")
+S3method("shortside", "box3")
+S3method("shortside", "boxx")
+ S3method("shortside", "owin")
+S3method("sidelengths", "box3")
+S3method("sidelengths", "boxx")
+ S3method("sidelengths", "owin")
+  S3method("simulate", "detpointprocfamily")
+  S3method("simulate", "dppm")
+  S3method("simulate", "kppm")
+  S3method("simulate", "lppm")
+  S3method("simulate", "mppm")
+  S3method("simulate", "ppm")
+S3method("simulate", "profilepl")
+S3method("simulate", "rhohat")
+  S3method("simulate", "slrm")
+S3method("Smoothfun", "ppp")
+S3method("Smooth", "fv")
+S3method("Smooth", "im")
+ S3method("Smooth", "msr")
+S3method("Smooth", "ppp")
+S3method("Smooth", "solist")
+ S3method("Smooth", "ssf")
+  S3method("[", "solist")
+S3method("sort", "im")
+S3method("[", "sparse3Darray")
+  S3method("split", "hyperframe")
+S3method("split", "im")
+  S3method("split", "msr")
+  S3method("[", "splitppp")
+  S3method("split", "ppp")
+  S3method("split", "ppx")
+S3method("[", "splitppx")
+ S3method("[", "ssf")
+S3method("str", "hyperframe")
+S3method("subset", "hyperframe")
+S3method("subset", "lpp")
+S3method("subset", "pp3")
+S3method("subset", "ppp")
+S3method("subset", "ppx")
+ S3method("summary", "anylist")
+S3method("summary", "envelope")
+S3method("summary", "fii")
+S3method("summary", "funxy")
+S3method("summary", "hyperframe")
+  S3method("summary", "im")
+  S3method("summary", "kppm")
+  S3method("summary", "linfun")
+  S3method("summary", "linim")
+S3method("summary", "linnet")
+S3method("summary", "lintess")
+ S3method("summary", "listof")
+S3method("summary", "logiquad")
+S3method("summary", "lpp")
+  S3method("summary", "lppm")
+S3method("summary", "lut")
+S3method("summary", "mppm")
+  S3method("summary", "owin")
+  S3method("summary", "pp3")
+  S3method("summary", "ppm")
+ S3method("summary", "ppp")
+S3method("summary", "pppmatching")
+S3method("summary", "ppx")
+S3method("summary", "profilepl")
+  S3method("summary", "psp")
+  S3method("summary", "quad")
+S3method("summary", "rmhexpand")
+ S3method("summary", "solist")
+ S3method("summary", "splitppp")
+S3method("summary", "splitppx")
+  S3method("summary", "units")
+S3method("summary", "vblogit")
+  S3method("superimpose", "default")
+  S3method("superimpose", "lpp")
+  S3method("superimpose", "ppp")
+  S3method("superimpose", "ppplist")
+  S3method("superimpose", "psp")
+  S3method("superimpose", "splitppp")
+S3method("tail", "hyperframe")
+  S3method("tail", "ppp")
+  S3method("tail", "ppx")
+  S3method("tail", "psp")
+  S3method("tail", "tess")
+S3method("terms", "dppm")
+S3method("terms", "kppm")
+  S3method("terms", "lppm")
+ S3method("terms", "mppm")
+S3method("terms", "ppm")
+S3method("terms", "slrm")
+  S3method("[", "tess")
+ S3method("unique", "ppp")
+ S3method("unique", "ppx")
+  S3method("unitname", "box3")
+  S3method("unitname", "boxx")
+S3method("unitname", "default")
+S3method("unitname", "dppm")
+S3method("unitname", "im")
+S3method("unitname", "kppm")
+S3method("unitname", "linnet")
+S3method("unitname", "lpp")
+S3method("unitname", "minconfit")
+S3method("unitname", "owin")
+  S3method("unitname", "pp3")
+S3method("unitname", "ppm")
+S3method("unitname", "ppp")
+S3method("unitname", "ppx")
+S3method("unitname", "psp")
+S3method("unitname", "quad")
+S3method("unitname", "slrm")
+S3method("unitname", "tess")
+S3method("unmark", "lpp")
+ S3method("unmark", "ppp")
+ S3method("unmark", "ppx")
+ S3method("unmark", "psp")
+ S3method("unmark", "splitppp")
+  S3method("unmark", "ssf")
+S3method("unmark", "tess")
+S3method("unstack", "layered")
+S3method("unstack", "lpp")
+S3method("unstack", "msr")
+S3method("unstack", "ppp")
+S3method("unstack", "psp")
+S3method("unstack", "solist")
+  S3method("update", "detpointprocfamily")
+S3method("update", "im")
+S3method("update", "interact")
+S3method("update", "ippm")
+ S3method("update", "kppm")
+  S3method("update", "lppm")
+  S3method("update", "ppm")
+  S3method("update", "rmhcontrol")
+S3method("update", "rmhstart")
+S3method("update", "slrm")
+S3method("update", "symbolmap")
+   S3method("valid", "detpointprocfamily")
+  S3method("valid", "lppm")
+  S3method("valid", "ppm")
+   S3method("vcov", "kppm")
+  S3method("vcov", "lppm")
+   S3method("vcov", "mppm")
+  S3method("vcov", "ppm")
+  S3method("vcov", "slrm")
+S3method("vertices", "linnet")
+ S3method("vertices", "owin")
+S3method("volume", "box3")
+S3method("volume", "boxx")
+S3method("volume", "linnet")
+ S3method("volume", "owin")
+ S3method("Window", "distfun")
+ S3method("Window", "dppm")
+ S3method("Window", "funxy")
+   S3method("Window", "im")
+ S3method("Window", "influence.ppm")
+ S3method("Window", "kppm")
+ S3method("Window", "layered")
+ S3method("Window", "leverage.ppm")
+S3method("Window", "linnet")
+S3method("Window", "lintess")
+ S3method("Window", "lpp")
+ S3method("Window", "lppm")
+ S3method("Window", "msr")
+ S3method("Window", "nnfun")
+ S3method("Window", "ppm")
+   S3method("Window", "ppp")
+   S3method("Window", "psp")
+ S3method("Window", "quad")
+ S3method("Window", "quadratcount")
+ S3method("Window", "quadrattest")
+ S3method("Window", "rmhmodel")
+ S3method("Window", "tess")
+S3method("with", "fv")
+S3method("with", "hyperframe")
+S3method("with", "msr")
+  S3method("with", "ssf")
+S3method("xtfrm", "im")
+# ......................................... 
+#      Assignment methods                   
+# ......................................... 
+  S3method("[<-", "anylist")
+  S3method("coords<-", "ppp")
+  S3method("coords<-", "ppx")
+S3method("dimnames<-", "fasp")
+S3method("dimnames<-", "sparse3Darray")
+S3method("dim<-", "sparse3Darray")
+S3method("formula<-", "fv")
+   S3method("Frame<-", "im")
+   S3method("Frame<-", "owin")
+   S3method("Frame<-", "ppp")
+  S3method("[<-", "fv")
+  S3method("$<-", "fv")
+  S3method("[<-", "hyperframe")
+  S3method("$<-", "hyperframe")
+  S3method("[<-", "im")
+  S3method("[<-", "layered")
+  S3method("[[<-", "layered")
+S3method("levels<-", "im")
+  S3method("[<-", "listof")
+S3method("marks<-", "lpp")
+S3method("marks<-", "ppp")
+S3method("marks<-", "ppx")
+S3method("marks<-", "psp")
+  S3method("marks<-", "ssf")
+S3method("marks<-", "tess")
+S3method("names<-", "fv")
+S3method("names<-", "hyperframe")
+  S3method("[<-", "ppp")
+S3method("row.names<-", "hyperframe")
+  S3method("[<-", "solist")
+S3method("[<-", "sparse3Darray")
+  S3method("split<-", "hyperframe")
+  S3method("[<-", "splitppp")
+  S3method("split<-", "ppp")
+S3method("[<-", "splitppx")
+  S3method("[<-", "tess")
+  S3method("unitname<-", "box3")
+  S3method("unitname<-", "boxx")
+S3method("unitname<-", "default")
+S3method("unitname<-", "dppm")
+S3method("unitname<-", "im")
+S3method("unitname<-", "kppm")
+S3method("unitname<-", "linnet")
+S3method("unitname<-", "lpp")
+S3method("unitname<-", "minconfit")
+S3method("unitname<-", "owin")
+  S3method("unitname<-", "pp3")
+S3method("unitname<-", "ppm")
+S3method("unitname<-", "ppp")
+S3method("unitname<-", "ppx")
+S3method("unitname<-", "psp")
+S3method("unitname<-", "quad")
+S3method("unitname<-", "slrm")
+S3method("unitname<-", "tess")
+   S3method("Window<-", "im")
+S3method("Window<-", "linnet")
+S3method("Window<-", "lpp")
+   S3method("Window<-", "ppp")
+   S3method("Window<-", "psp")
+# ......................................... 
+#      End of methods                       
+# ......................................... 
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..cbd514b
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,11039 @@
+
+        CHANGES IN spatstat VERSION 1.52-1
+
+OVERVIEW
+
+    o Bug fix to satisfy the development version of R.
+    
+    o Nickname: "Apophenia"
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o Ops.imlist
+    Improved the 'names' of the result.
+
+BUG FIXES
+
+    o bw.smoothppp
+    Crashes in R-devel.
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.52-0
+
+OVERVIEW
+
+    o We thank Nicholas Read, Abdollah Jalilian, Suman Rakshit,
+    Dominic Schuhmacher and Rasmus Waagepetersen for contributions.
+
+    o Important bug fixes.
+    
+    o Now handles disconnected linear networks.
+
+    o Effect function is now available for all types of fitted model.
+
+    o A model can be fitted or re-fitted to a sub-region of data.
+
+    o More support for measures.
+
+    o 'Pool' operations improved.
+
+    o Geometric-mean smoothing.
+    
+    o Changed algorithm defaults in ippm.
+
+    o Version nickname: "Rudimentary Lathe"
+
+NEW FUNCTIONS
+
+    o as.data.frame.envelope
+    Extract function data from an envelope object,
+    including the functions for the simulated data ('simfuns')
+    if they were saved.
+
+    o is.connected, is.connected.default, is.connected.linnet
+    Determines whether a spatial object consists of
+    one topologically connected piece, or several pieces.
+
+    o is.connected.ppp
+    Determines whether a point pattern is connected after
+    all pairs of points closer than distance R are joined.
+
+    o hist.funxy
+    Histogram of values of a spatial function.
+
+    o model.matrix.ippm
+    Method for 'model.matrix' which allows computation of
+    regular and irregular score components.
+
+    o harmonise.msr
+    Convert several measures (objects of class 'msr')
+    to a common quadrature scheme.
+    
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o Smooth.ppp
+    New argument 'geometric' supports geometric-mean smoothing.
+    
+    o Kinhom
+    New argument 'ratio'.
+    
+    o linearKinhom, linearpcfinhom
+    Changed default behaviour when 'lambda' is a fitted model.
+    New arguments 'update' and 'leaveoneout'.
+    
+    o linearK, linearKinhom, linearpcf, linearpcfinhom, compilepcf
+    Ratio calculations are now supported.
+    New argument 'ratio'.
+    
+    o effectfun
+    Now works for 'ppm', 'kppm', 'lppm', 'dppm', 'rppm' and 'profilepl' objects.
+
+    o ppm, kppm
+    The argument 'subset' can now be a window (class 'owin')
+    specifying the subset of data to which the model should be fitted.
+
+    o fitted.lppm
+    New argument 'leaveoneout' allows leave-one-out computation of fitted value.
+
+    o pool.rat
+    New arguments 'relabel' and 'variance'.
+
+    o density.lpp
+    The return value is a pixel image of class 'linim' in all cases.
+    
+    o plot.linim, plot.linfun
+    A scale bar is now plotted when style="width".
+    New argument 'legend'.
+
+    o ippm
+    Default values for the parameters of the optimisation algorithm (nlm.args)
+    have changed.
+
+    o ippm
+    The internal format of the result has been extended slightly.
+    
+    o bind.fv
+    New argument 'clip'.
+
+    o as.im.distfun
+    New argument 'approx' specifies the choice of algorithm.
+
+    o "[.psp"
+    New argument 'fragments' specifies whether to keep fragments of
+    line segments that are cut by the new window, or only to retain
+    segments that lie entirely inside the window.
+
+    o predict.rhohat
+    New argument 'what' determines which value should be calculated:
+    the function estimate, the upper/lower confidence limits, or the
+    standard error.
+
+    o pool.fv
+    New arguments 'relabel' and 'variance'
+    
+    o pool.rat
+    New argument 'weights'.
+    
+    o plot.msr
+    New argument 'massthresh'.
+
+    o Ops.msr
+    Calculations like A+B can now be performed even when the measures A and B
+    are not defined on the same quadrature scheme.
+    
+    o density.ppp
+    New argument 'verbose'.
+
+    o bw.pcf
+    New argument 'verbose'.
+    
+    o hist.im
+    New argument 'xname'.
+
+    o [.leverage.ppm
+    New argument 'update'.
+    
+    o [.layered
+    Additional arguments '...' are now passed to other methods.
+    
+    o logLik.ppm
+    The warning about pseudolikelihood ('log likelihood not available')
+    is given only once, and is not repeated in subsequent calls,
+    within a spatstat session.
+
+    o kppm
+    Refuses to fit a log-Gaussian Cox model with anisotropic covariance.
+
+    o plot.linim, plot.linfun
+    The return value has a different format.
+    Arguments have been renamed and reorganised.
+
+    o density.lpp
+    New argument 'old'.
+
+    o ippm
+    Accelerated.
+
+    o Smooth.ppp
+    Now exits gracefully if any mark values are NA, NaN or Inf.
+
+    o timeTaken
+    Now exits gracefully if there is no timing information.
+
+    o nbfires
+    The unit of length for the coordinates is now specified in this dataset.
+    
+BUG FIXES
+
+    o bw.pcf
+    Results were totally incorrect due to a typo.
+    [Spotted by Abdollah Jalilian and Rasmus Waagepetersen.]
+    Fixed.
+
+    o predict.rho2hat
+    Results were incorrect for a rho2hat object computed from a point pattern.
+    Fixed.
+    
+    o density.ppp
+    If the smoothing bandwidth was very small (e.g.\ smaller than pixel width),
+    results were inaccurate if the default resolution was used,
+    and completely wrong if another resolution was specified.
+    [Spotted by Dominic Schuhmacher.]
+    Fixed.
+    
+    o linearK, linearKinhom, linearpcf, linearpcfinhom, linearKcross,
+    linearKdot, linearpcfcross, linearpcfdot, linearKcross.inhom,
+    linearKdot.inhom, linearpcfcross.inhom, linearpcfdot.inhom
+    Crashed if the network was disconnected.
+    Fixed.
+
+    o crossdist.lpp
+    Crashed if the network was disconnected.
+    Fixed.
+
+    o countends
+    Crashed if the network was disconnected.
+    Fixed.
+
+    o model.images.ppm
+    Crashed for models fitted using 'covfunargs'.
+    Fixed.
+
+    o model.matrix.ppm
+    Crashed for models fitted using 'covfunargs',
+    if argument 'Q' was given.
+    Fixed.
+
+    o polynom
+    Expansion of some polynomials caused an error message
+    about 'invalid model formula'.
+    Fixed.
+    
+    o plot.ppp
+    The argument 'type="n"' did not suppress plotting of the legend,
+    for marked point patterns.
+    Fixed.
+
+    o plot.psp
+    Ignored 'show.all' when 'add=TRUE'.
+    Fixed.
+
+    o intensity.ppm
+    Result had incorrect 'names' attribute in some cases.
+    Fixed.
+
+    o marks<-.ppx
+    The assignment marks(X) <- a, where 'a' is a single atomic value,
+    caused an error if 'X' contained zero points.
+    Fixed
+
+    o model.depends
+    Crashed when applied to regression models fitted by 'gam',
+    or point process models fitted by 'ppm' with 'use.gam=TRUE'.
+    Fixed.
+    
+    o pool.fv
+    Crashed sometimes, if the arguments did not have the same set of
+    column names.
+    Fixed.
+
+    o pool.rat
+    Crashed with an error message from 'fmt'
+    if there were more than 20 objects to be pooled.
+    Fixed.
+
+    o linearK
+    The 'theo' column was missing if npoints(X) < 2 and correction="Ang".
+    Fixed.
+    
+    o model.matrix.ppm
+    Result was malformed if the model was fitted with 'use.gam=TRUE'.
+    Fixed.
+    
+    o effectfun
+    Crashed if 'covname' was omitted,
+    if the model was fitted with 'use.gam=TRUE'.
+    Fixed.
+    
+    o nncross.lpp
+    Result had incorrect format if Y was empty, in some cases.
+    Fixed.
+
+    o linearKinhom
+    Plot label for y axis was incorrect.
+    [Spotted by Suman Rakshit.]
+    Fixed.
+
+    o plot.solist
+    If the entries were 'linim' objects, they were plotted using image()
+    so arguments like 'style="w"' were ignored.
+    Fixed.
+
+    o as.ppp.data.frame
+    Crashed if X was an object of class 'tbl_df' from the dplyr package.
+    Fixed.
+    
+    o plot.lpp
+    Crashed if there were multiple columns of marks.
+    Fixed.
+    
+        CHANGES IN spatstat VERSION 1.51-0
+
+OVERVIEW
+
+    o We thank Greg McSwiggan, Mehdi Moradi and Tammy L Silva for contributions.
+
+    o New fast algorithm for kernel smoothing on a linear network.
+
+    o Leverage and influence diagnostics extended to Poisson/Gibbs models
+      fitted by logistic composite likelihood.
+      
+    o Two-stage Monte Carlo test.
+    
+    o Dirichlet/Voronoi tessellation on a linear network.
+
+    o Thinning of point patterns on a linear network.
+
+    o More support for functions and tessellations on a linear network.
+
+    o Improvements and bug fixes.
+
+    o Version nickname: 'Poetic Licence'
+
+NEW FUNCTIONS
+
+    o bits.test:
+    Balanced Independent Two-Stage Monte Carlo test,
+    an improvement on the Dao-Genton test.
+
+    o lineardirichlet
+    Computes the Dirichlet-Voronoi tessellation associated with a
+    point pattern on a linear network.
+
+    o domain.lintess, domain.linfun
+    Extract the linear network from a 'lintess' or 'linfun' object.
+
+    o summary.lintess
+    Summary of a tessellation on a linear network.
+
+    o clicklpp
+    Interactively add points on a linear network.
+    
+    o envelopeArray
+    Generate an array of envelopes using a function that returns 'fasp' objects.
+    
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o density.lpp
+    New fast algorithm (up to 1000 times faster) for the default case
+    where kernel="gaussian" and continuous=TRUE.
+    Generously contributed by Greg McSwiggan.
+    
+    o leverage.ppm, influence.ppm, dfbetas.ppm
+    These methods now work for models that were fitted by logistic
+    composite likelihood (method='logi'). 
+    
+    o rthin
+    Argument X can now be a point pattern on a linear network (class 'lpp').
+
+    o fitted.ppm
+    New option: type = "link"
+    
+    o update.kppm
+    New argument 'evaluate'.
+
+    o integral.linfun
+    New argument 'delta' controls step length of approximation to integral.
+
+    o as.linim.default
+    New argument 'delta' controls spacing of sample points in internal data.
+
+    o as.linfun.lintess
+    New argument 'values' specifies the function value for each tile.
+    New argument 'navalue'.
+
+BUG FIXES
+
+    o leverage.ppm, influence.ppm, dfbetas.ppm
+    Results for Gibbs models were incorrect due to a mathematical error.
+    (Results for Poisson models were correct). 
+    Fixed.
+    
+    o leverage.ppm, influence.ppm, dfbetas.ppm, ppmInfluence
+    Calculations were incorrect for a Geyer model fitted using
+    an edge correction other than "border" or "none".
+    Fixed.
+    
+    o step, kppm, update.kppm
+    'step' did not work for kppm objects in some cases
+    due to a scoping problem in update.kppm.
+    Fixed.
+    
+    o improve.kppm
+    Crashed if the window was not a rectangle.
+    Fixed.
+
+    o pcf.ppp, pcfinhom
+    Crashed if kernel="epa" rather than "epanechnikov".
+    Fixed.
+
+    o alltypes
+    Crashed if envelope=TRUE and reuse=FALSE.
+    Fixed.
+
+    o pairdist.lpp, nndist.lpp, nnwhich.lpp, nncross.lpp
+    Crashed if the network was disconnected.
+    Fixed.
+
+    o as.im.linim, as.linim.linim
+    Additional arguments such as 'eps' and 'dimyx' were ignored.
+    Fixed.
+    
+    o as.im.default
+    Arguments 'eps and 'xy' were ignored if X was a single numeric value.
+    Fixed.
+    
+    o 'timed' class
+    Printing of these objects did not work in some locales.
+    Fixed.
+
+    o runifpoint
+    Ignored 'drop' argument if the window was a rectangle.
+    Fixed.
+    
+        CHANGES IN spatstat VERSION 1.50-0
+
+OVERVIEW
+
+    o We thank Richard Cotton, Adrian Heyner, Abdollah Jalilian,
+    Dominic Schuhmacher and Rasmus Waagepetersen for contributions.
+
+    o spatstat now 'Imports' the package 'spatstat.utils'.
+
+    o Bandwidth selection for pair correlation function.
+
+    o Improvements and bug fixes.
+
+    o Version nickname: 'Bunyip Aristocracy'
+
+NEW PACKAGE STRUCTURE
+
+    o spatstat is being split into several sub-packages, to satisfy
+    the requirements of CRAN. This should not affect the user:
+    existing code will continue to work in the same way.
+    Currently there are two sub-packages, called 'spatstat.utils'
+    and 'spatstat'. Typing 'library(spatstat)' will load the familiar
+    'spatstat' package which can be used as before, and will silently
+    import the 'spatstat.utils' package. The 'spatstat.utils' package
+    contains utility functions that were originally written for 'spatstat':
+    they were undocumented internal functions in 'spatstat', but are now
+    documented and accessible in a separate package because they may be
+    useful for other purposes. To access these functions, you need to
+    type 'library(spatstat.utils)'. 
+    
+
+NEW FUNCTIONS
+
+    o bw.pcf
+    Bandwidth selection for pair correlation function.
+    Original code contributed by Abdollah Jalilian and Rasmus Waagepetersen.
+
+    o grow.box3
+    Expand a three-dimensional box.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o as.owin
+    Now refuses to convert a 'box3' to a two-dimensional window.
+
+    o pixellate.ppp
+    If the pattern is empty, the result is an integer-valued image
+    (by default) for consistency with the results for non-empty patterns.
+
+    o ppp
+    If the coordinate vectors x and y contain NA, NaN or infinite values,
+    these points are deleted with a warning, instead of causing a fatal error.
+
+    o ppm
+    Argument 'interaction' can now be a function that makes an interaction,
+    such as Poisson, Hardcore, MultiHard.
+
+    o pcf, pcfinhom
+    New argument 'close' for advanced use.
+
+    o runifpointx, rpoisppx
+    New argument 'drop'.
+
+    o shapley, ponderosa
+    In these installed datasets, the functions shapley.extra$plotit
+    and ponderosa.extra$plotit have changed slightly (to accommodate the
+    dependence on the package spatstat.utils).
+
+    o kppm
+    Improved printed output.
+
+BUG FIXES
+
+    o rMaternI, rMaternII
+    If 'win' was a three-dimensional box of class 'box3', 
+    the result was a two-dimensional point pattern.
+    [Spotted by Adrian Heyner.]
+    Fixed.
+
+    o rmhmodel.ppm, simulate.ppm
+    Crashed when applied to a fitted Lennard-Jones model.
+    [Spotted by Dominic Schuhmacher.]
+    Fixed.
+    
+    o leverage.ppm, influence.ppm, dfbetas.ppm
+    Crashed when applied to some hard-core models.
+    Fixed.
+
+    o "[.ppx"
+    The format of the result was slightly malformed 
+    if exactly one point was selected.
+    Fixed.
+
+    o unmark.lpp, marks<-.lpp
+    The result had class c("lpp", "lpp", "ppx") instead of c("lpp", "ppx").
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.49-0
+
+OVERVIEW
+
+    o We thank Tilman Davies, Kassel Hingee, Abdollah Jalilian,
+    Brian Ripley and Dominic Schuhmacher for contributions.
+
+    o spatstat now 'Suggests' the package 'fftwtools'.
+
+    o Operations on signed measures.
+
+    o Operations on lists of pixel images.
+
+    o Improved pixellation of point patterns.
+
+    o Stieltjes integral extended.
+
+    o Subset operators extended.
+
+    o Greatly accelerated 'rmh' when using 'nsave'
+
+    o Some computations accelerated.
+
+    o Size of namespace reduced, for efficiency.
+
+    o Bug fixes.
+
+    o Version nickname: 'So-Called Software'
+
+NEW DEPENDENCIES
+
+    o fftwtools
+    spatstat now 'Suggests' the package 'fftwtools'.
+    This package provides a very fast implementation of 
+    the Fast Fourier Transform, leading to much faster computation
+    in the spatstat functions 'density.ppp', 'relrisk.ppp', 'convolve.im',
+    'blur', 'scan.test' and many other functions.
+    The 'fftwtools' package requires the external software library 'fftw'.
+    We strongly recommend installing this library if possible.
+
+NEW FUNCTIONS
+
+    o hexagon, regularpolygon
+    Create regular polygons.
+
+    o Ops.msr
+    Arithmetic operations for measures.
+
+    o Math.imlist, Ops.imlist, Summary.imlist, Complex.imlist
+    Arithmetic operations for lists of pixel images.
+
+    o measurePositive, measureNegative, measureVariation, totalVariation
+    Positive and negative parts of a measure, and variation of a measure.
+
+    o as.function.owin
+    Convert a spatial window to a function (x,y), the indicator function.
+
+    o as.function.ssf
+    Convert an object of class 'ssf' to a function(x,y)
+
+    o as.function.leverage.ppm
+    Convert an object of class 'leverage.ppm' to a function(x,y)
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o stieltjes
+    Argument 'M' can be a stepfun object (such as an empirical CDF).
+
+    o quantile.ewcdf
+    The function is now normalised to the range [0,1]
+    before the quantiles are computed. 
+    This can be suppressed by setting normalise=FALSE.
+
+    o pixellate.ppp
+    New arguments 'fractional' and 'preserve' for more accurate discretisation.
+
+    o "[.layered"
+    Subset index i can now be an 'owin' object.
+
+    o "[.solist"
+    Subset index i can now be an 'owin' object.
+
+    o plot.solist, plot.imlist, plot.anylist
+    Result is now an (invisible) list containing the result
+    from executing the plot of each panel.
+
+    o ppp
+    New argument 'checkdup'.
+
+    o Summary.im
+    Argument 'na.rm' is no longer ignored.
+
+    o cdf.test 
+    The methods for classes ppp, ppm, lpp, lppm, slrm
+    have a new argument 'interpolate'.
+
+    o as.solist
+    The argument x can now be a spatial object;
+    as.solist(cells) is the same as solist(cells).
+
+    o bw.diggle, bw.ppl, bw.relrisk, bw.smoothppp
+    These functions now extract and store the name of the unit of length
+    from the point pattern dataset. When the bandwidth selection criterion
+    is plotted, the name of the unit of length is shown on the x-axis.
+    
+    o polynom
+    This function now has a help file.
+
+    o rmhcontrol
+    New parameter 'pstage' determines when to generate random proposal points.
+
+    o rmh
+    Accelerated, in the case where multiple patterns are saved using 'nsave'.
+
+    o bdist.pixels
+    Accelerated for polygonal windows. New argument 'method'.
+
+    o spatstat namespace
+    The namespace of the spatstat package has been shortened
+    (by internally registering the native routines) which should
+    make the package run faster.
+
+    o sum.im, range.im, max.im, min.im
+    These functions have been removed, as they are now subsumed in Summary.im.
+
+BUG FIXES
+
+    o plot.msr
+    If one of 'nrows' or 'ncols' was specified, but not both,
+    an obscure error occurred.
+    Fixed.
+
+    o plot.solist, plot.imlist, plot.anylist
+    Crashed if 'nrows' and 'ncols' were given values implying
+    that some rows or columns would not contain any plots.
+    Fixed.
+
+    o as.ppp.lpp
+    Crashed if there was more than one column of marks.
+    Fixed.
+
+    o has.close.pp3
+    Results were incorrect, or a crash occurred, when argument 'Y' was given.
+    Fixed.
+
+    o rmpoispp
+    If 'lambda' was a list of images, 'names(lambda)' was ignored,
+    rather than serving as the default value of 'types'.
+    Fixed.
+
+    o bugfixes
+    Output was garbled, in rare cases.
+    Fixed.
+
+    o kppm
+    Result was malformed when clusters="VarGamma" and method="clik2".
+    Spotted by Abdollah Jalilian.
+    Fixed.
+
+    o QQversion
+    Plotting labels were malformed.
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.48-0
+
+OVERVIEW
+
+    o We thank Kim Colyvas, Yongtao Guan, Gopalan Nair, 
+    Nader Najari, Suman Rakshit, Ian Renner and Hangsheng Wang 
+    for contributions.
+
+    o Sufficient Dimension Reduction for point processes.
+
+    o Alternating Gibbs Sampler for point process simulation.
+
+    o Intensity approximation for area-interaction and Geyer models.
+
+    o New class of spatially sampled functions.
+
+    o ROC and AUC extended to other types of point patterns and models.
+
+    o More support for linear networks.
+
+    o More support for infinite straight lines.
+
+    o Simulation of 'rhohat' objects.
+
+    o Kernel smoothing accelerated.
+
+    o Methods for 'head' and 'tail' for spatial patterns.
+
+    o More low-level functionality.
+
+    o Improvements and bug fixes.
+
+    o spatstat now has more than 1000 help files.
+
+    o Nickname: 'Model Prisoner'
+
+NEW CLASSES
+
+    o ssf
+    Class of spatially sampled functions.
+
+NEW FUNCTIONS
+
+    o sdr, dimhat
+    Sufficient Dimension Reduction for point processes.
+    Matlab code contributed by Yongtao Guan, translated by Suman Rakshit.
+
+    o rags, ragsAreaInter, ragsMultiHard
+    Alternating Gibbs Sampler for point processes.
+
+    o psib
+    Sibling probability (index of clustering strength in a cluster process).
+
+    o bugfixes    
+    List all bug fixes in recent versions of a package.
+
+    o roc.kppm, roc.lppm, roc.lpp
+    Methods for 'roc' (receiver operating characteristic curve)
+    for fitted models of class 'kppm' and 'lppm'
+    and point patterns of class 'lpp'
+
+    o auc.kppm, auc.lppm, auc.lpp
+    Methods for 'auc' (area under the ROC curve)
+    for fitted models of class 'kppm' and 'lppm'
+    and point patterns of class 'lpp'
+
+    o rlpp
+    Random points on a linear network with a specified probability density.
+
+    o cut.lpp
+    Method for 'cut' for point patterns on a linear network.
+
+    o crossing.linnet
+    Find crossing points between a linear network and another set of lines.
+
+    o ssf
+    Create a spatially sampled function
+
+    o print.ssf, plot.ssf, contour.ssf, image.ssf
+    Display a spatially sampled function
+
+    o as.im.ssf, as.ppp.ssf, marks.ssf, marks<-.ssf, unmark.ssf, [.ssf, with.ssf
+    Manipulate data in a spatially sampled function
+
+    o Smooth.ssf
+    Smooth a spatially sampled function 
+
+    o integral.ssf
+    Approximate integral of spatially sampled function
+
+    o simulate.rhohat
+    Generate a Poisson random point pattern with intensity that is
+    a function of a covariate, given by a 'rhohat' object.
+
+    o head.ppp, head.ppx, head.psp, head.tess, 
+    tail.ppp, tail.ppx, tail.psp, tail.tess
+    Methods for 'head' and 'tail' for spatial patterns.
+
+    o as.data.frame.tess
+    Convert a tessellation to a data frame.
+
+    o timeTaken
+    Extract the timing data from a 'timed' object or objects.
+
+    o rotate.infline, shift.infline, reflect.infline, flipxy.infline
+    Geometrical transformations for infinite straight lines.
+
+    o whichhalfplane
+    Determine which side of an infinite line a point lies on.
+
+    o points.lpp
+    Method for 'points' for point patterns on a linear network.
+
+    o pairs.linim
+    Pairs plot for images on a linear network.
+
+    o has.close
+    Faster way to check whether a point has a close neighbour.
+
+    o closetriples
+    Low-level function to find all close triples of points.
+
+    o matrixpower, matrixsqrt, matrixinvsqrt
+    Raise a matrix to any power.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o intensity.ppm
+    Intensity approximation is now available for the Geyer saturation process
+    and the area-interaction process (results of research with Gopalan Nair).
+
+    o envelope.lpp, envelope.lppm
+    New arguments 'fix.n' and 'fix.marks' allow envelopes to be computed
+    using simulations conditional on the observed number of points.
+
+    o "[.im"
+    The subset index "i" can now be a linear network (object of class 'linnet').
+    The result of "x[i, drop=FALSE]" is then a pixel image of class 'linim'.
+
+    o cut.ppp
+    Argument z can be "x" or "y" indicating one of the spatial coordinates.
+
+    o rThomas, rMatClust, rCauchy, rVarGamma, rPoissonCluster, rNeymanScott
+    New argument 'saveparents'.
+
+    o lintess
+    Argument 'df' can be missing or NULL, 
+    resulting in a tesellation with only one tile.
+
+    o lpp
+    X can be missing or NULL, resulting in an empty point pattern.
+
+    o plot.lintess
+    Improved plot method, with more options.
+
+    o rpoisline
+    Also returns information about the original infinite random lines.
+
+    o density.ppp, Smooth.ppp
+    Accelerated.
+
+    o density.psp
+    New argument 'method' controls the method of computation.
+    New faster option 'method="FFT"'    
+
+    o nndist.lpp
+    Accelerated.
+
+BUG FIXES
+
+    o F3est
+    Estimates of F(r) for the largest value of r were wildly incorrect.
+    Fixed.
+
+    o clip.infline
+    Results were incorrect unless the midpoint of the window
+    was the coordinate origin.
+    Fixed.
+
+    o integral.linim
+    Results were inaccurate if many of the segment lengths were 
+    shorter than the width of a pixel.
+    Fixed.
+
+    o predict.lppm
+    Bizarre error messages about 'class too long' or 'names too long'
+    occurred if the model was multitype.
+    Fixed.
+
+    o superimpose
+    Point patterns containing 0 points were ignored
+    when determining the list of possible marks.
+    Fixed.
+
+    o chop.tess
+    Vertical lines were not handled correctly 
+    with pixellated tessellations.
+    Fixed.
+
+    o timed
+    Argument 'timetaken' was ignored.
+    Fixed.    
+
+    o ppm
+    Crashed if method="logi" and the 'covariates' were a data frame.
+    [Spotted by Kim Colyvas and Ian Renner.]
+    Fixed.
+
+    o rpoislpp, runiflpp
+    Crashed if nsim > 1.
+    Fixed.
+
+    o rpoisline
+    Crashed if zero lines were generated.
+    Fixed.
+
+    o model.frame.ppm
+    Crashed if the original model was fitted to a data frame of covariates
+    and there were NA's amongst the covariate values.
+    [Spotted by Kim Colyvas.]
+    Fixed.
+
+    o any, all
+    When applied to pixel images (objects of class 'im') the result 
+    was sometimes NA when a finite value should have been returned.
+    Fixed.    
+   
+    o predict.rhohat
+    When the original data were on a linear network,
+    the result of predict.rhohat did not belong to the correct class 'linim'.
+    Fixed.
+ 
+        CHANGES IN spatstat VERSION 1.47-0
+
+OVERVIEW
+
+    o We thank Marcel Austenfeld, Guy Bayegnak, Tilman Davies, Cenk Icos, 
+    Jorge Mateu, Frederico Mestre, Mehdi Moradi, Virginia Morera Pujol,
+    Suman Rakshit and Sven Wagner for contributions.
+
+    o Non-Gaussian smoothing kernels.
+
+    o Important bug fix in linearK, linearpcf
+
+    o Changed internal format of linnet and lpp objects.
+
+    o Faster computation in linear networks.
+
+    o Bias correction techniques.
+
+    o Bounding circle of a spatial object.
+
+    o Minkowski sum also applicable to point patterns and line segment patterns.
+
+    o Option to plot marked points as arrows.
+
+    o Kernel smoothing accelerated.
+
+    o Workaround for bug in some graphics drivers affecting image orientation.
+
+    o Bug fixes and improvements.
+
+    o Version nickname: 'Responsible Gambler'
+
+NEW FUNCTIONS
+
+    o anyNA.im
+    Method for 'anyNA' for pixel images.
+
+    o bc
+    Bias correction (Newton-Raphson) for fitted model parameters. 
+    See also 'rex'.
+
+    o boundingcircle, boundingcentre
+    Find the smallest circle enclosing a window or point pattern.
+
+    o "[.linim"
+    Subset operator for pixel images on a linear network.
+
+    o mean.linim, median.linim, quantile.linim
+    The mean, median, or quantiles of pixel values in a 
+    pixel image on a linear network.
+
+    o rex
+    Richardson extrapolation for numerical integrals and 
+    statistical model parameter estimates. 
+
+    o weighted.median, weighted.quantile
+    Median or quantile of numerical data with associated weights.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o linear networks
+    The internal format of a 'linnet' (linear network) object
+    has been changed. Existing datasets of class 'linnet' and 'lpp'
+    are still supported. However, computation will be faster if they
+    are converted to the new format. To convert a linnet object L
+    to the new format, use L <- as.linnet(L). To convert an lpp object X 
+    to the new format, use X <- as.lpp(X).
+
+    o density.ppp, Smooth.ppp
+    New argument 'kernel' allows the user to specify the smoothing kernel.
+
+    o density.ppp, Smooth.ppp
+    Argument 'weights' can now be a pixel image.
+
+    o MinkowskiSum, %(+)%
+    Now accepts arguments which are point patterns or line segment patterns
+    as well as windows.
+
+    o plot.im
+    New argument 'workaround' to avoid a bug in some device drivers
+    that causes the image to be displayed in the wrong spatial orientation.
+    [Thanks to Marcel Austenfeld for drawing attention to this.]
+
+    o sumouter
+    New argument 'y' allows computation of asymmetric outer products.
+
+    o linearKinhom, linearpcfinhom
+    New argument 'normpower'.
+
+    o rmh.default, rmh.ppm
+    New arguments 'nsim', 'saveinfo'.
+
+    o symbolmap, plot.ppp, plot.lpp
+    New option: shape="arrows" 
+
+    o rcellnumber
+    New argument 'mu'.
+
+    o lengths.psp
+    New argument 'squared'.
+
+    o plot.linfun
+    Now passes arguments to the function being plotted.
+
+    o as.linnet.psp
+    If the line segment pattern has marks, then the resulting linear network
+    also carries these marks in the $lines component.
+
+    o summary.owin, summary.im
+    The fraction of frame area that is occupied by the window/image
+    is now reported.
+
+    o density.ppp, Smooth.ppp
+    Computation accelerated by about 15% 
+    in the case where at='points' and kernel='gaussian'.
+
+    o linearK, linearpcf
+    Accelerated by about 40%.
+
+    o pixellate.ppp
+    Accelerated in the case where weights are given
+
+    o density.ppp
+    Accelerated in the cases where weights are given or 'diggle=TRUE'
+    
+    o dilation.ppp
+    Improved geometrical accuracy.
+    Now accepts arguments to control resolution of polygonal approximation.
+
+    o discs
+    New argument 'npoly'. 
+    Accelerated in some cases. 
+
+    o plot.pp3
+    New arguments 'box.front', 'box.back' control plotting of the box.
+
+    o grow.rectangle
+    New argument 'fraction'.
+
+    o nnfun.lpp
+    New argument 'k'.
+
+    o bw.ppl
+    New argument 'sigma'.
+
+    o lppm
+    New argument 'random' controls placement of dummy points.
+
+    o rhohat.lpp
+    New argument 'random' controls placement of dummy points.
+
+    o quadrat.test.ppm
+    Accelerated in the case where the original window is a rectangle.
+
+    o kppm, mincontrast, cauchy.estpcf, lgcp.estpcf, matclust.estpcf,
+    thomas.estpcf, vargamma.estpcf
+    A warning about infinite values of the summary function 
+    no longer occurs when the default settings are used.
+
+    o circumradius
+    This function is now deprecated, in favour of 'boundingradius'
+
+    o print.quad
+    More information is printed.
+
+BUG FIXES
+
+    o linearK, linearpcf, and relatives:
+    These functions were sometimes greatly underestimated
+    when the network had segments shorter than 10 coordinate units.
+    [Bug introduced in spatstat 1.44-0, december 2015.]
+    Fixed.
+
+    o integral.linim, integral.linfun
+    Results were slightly inaccurate because of a bias in the
+    distribution of sample points.
+    [Bug introduced in spatstat 1.41-0, february 2015.]
+    Fixed.
+
+    o intensity.ppm
+    Result was incorrect for Gibbs models if the model was *exactly* 
+    equivalent to a Poisson process (i.e. if all interaction
+    coefficients were exactly zero).
+    [Bug introduced in spatstat 1.28-1, june 2012.]
+    Fixed.
+
+    o rSSI
+    Sometimes terminated prematurely.
+    [Spotted by Frederico Mestre.]
+    Fixed.
+
+    o perspPoints
+    Crashed if the image Z contained NA 
+    (i.e. if Z was only defined on a subset of the bounding frame).
+    Spotted by Guy Bayegnak.
+    Fixed.
+
+    o plot.ppp, plot.lpp
+    Crashed if the argument 'shape' was given.
+    Fixed.
+
+    o plot.kppm
+    Crashed if the model was not fitted by minimum contrast.
+    Fixed.
+
+    o superimpose
+    Crashed if the argument was a 'solist' containing line segment patterns.
+    Fixed.
+
+    o Jest
+    Crashed sometimes, depending on the shape of the observation window.
+    [Spotted by Cenk Icos.]
+    Fixed.
+
+    o plot.studpermutest
+    Crashed when the summary statistic was a multitype pair correlation 
+    function or multitype K function. [Spotted by Sven Wagner.]
+    Fixed.
+
+    o pool.anylist
+    Crashed with a message about buffer size,
+    if the list was longer than about 100 items.
+    Fixed.
+
+    o diagnose.ppm, plot.diagppm
+    Crashed in some cases when cumulative=FALSE.
+    Fixed.
+
+    o leverage.ppm, influence.ppm, dfbetas.ppm
+    Crashed sometimes with a message about wrong replacement length.
+    [Spotted by Virginia Morera Pujol.]
+    Fixed.
+
+    o as.linnet.psp
+    Crashed with marked segment patterns, if any segments were very short.
+    [Spotted by Suman Rakshit.]
+    Fixed.
+
+    o stieltjes
+    Returned NA if some values of f were not finite.
+    Fixed.
+
+    o plot.symbolmap
+    If a new plot window was initialised, it was sometimes too small
+    to contain the geometric figures (circles, squares etc) in the symbol map.
+    Fixed.
+
+    o plot.ppp, plot.im
+    Ignored xlim, ylim. 
+    Fixed.
+
+    o rhohat.lpp
+    Ignored nd, eps.
+    Fixed.
+
+    o nnfun.lpp
+    Print method gave incorrect information about the point pattern.
+    Fixed.
+
+    o "[.fv"
+    The default plot formula was not updated.
+    Fixed.
+
+    o fitted.ppm
+    The result was sometimes a 1-dimensional array rather than a numeric vector.
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.46-1
+
+OVERVIEW
+
+    o Important bug fix.
+
+    o Version nickname: 'Spoiler Alert'
+
+BUG FIXES
+
+    o density.ppp, Smooth.ppp
+    The results of density(X, at="points") and Smooth(X, at="points")
+    were incorrect in some cases. The contribution from the 
+    left-most data point (the point with the smallest x coordinate) 
+    was omitted.   [Bug introduced in spatstat 1.26-0, April 2012.]
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.46-0
+
+OVERVIEW
+
+    o We thank Corey Anderson and Sebastian Meyer for contributions.
+
+    o spatstat now depends on R 3.3.0 or later.
+
+    o Improvements to inhomogeneous multitype K and L functions.
+
+    o Variance approximation for pair correlation function.
+
+    o Leverage and influence for multitype point process models.
+
+    o Functions for extracting components of vector-valued objects.
+
+    o Important bug fix in Smooth.ppp
+
+    o Minor improvements and bug fixes.
+
+    o Version nickname: 'Multidimensional Toothbrush'
+
+NEW FUNCTIONS
+
+    o split.msr
+    Decompose a measure into parts.
+
+    o unstack.msr
+    Decompose a vector-valued measure into its component measures.
+
+    o unstack.ppp, unstack.psp, unstack.lpp
+    Given a spatial pattern with several columns of marks,
+    separate the columns and return a list of spatial patterns, 
+    each having only one column of marks.
+
+    o kernel.squint
+    Integral of squared kernel, for the kernels used in density estimation.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o Kcross.inhom, Kdot.inhom, Kmulti.inhom, Ldot.inhom, Lcross.inhom
+    These functions now allow intensity values to be given by
+    a fitted point process model.
+    New arguments 'update', 'leaveoneout', 'lambdaX'.
+
+    o diagnose.ppm
+    Infinite values of 'rbord' are now ignored and treated as zero.
+    This ensures that diagnose.ppm has a sensible default
+    when the fitted model has infinite reach.
+
+    o pcf.ppp
+    Now calculates an analytic approximation to the variance of
+    the estimate of the pair correlation function (when var.approx=TRUE).
+    Now returns the smoothing bandwidth used, as an attribute of the result.
+    
+    o plot.ppp
+    When 'clipwin' is given, any parts of the boundary of the window of x
+    that lie inside 'clipwin' will also be plotted.
+
+    o plot.msr
+    Now handles multitype measures.
+    New argument 'multiplot'.
+
+    o plot.anylist
+    If a list entry x[[i]] belongs to class 'anylist', it will be expanded
+    so that each entry x[[i]][[j]] will be plotted as a separate panel.
+
+    o influence.ppm, leverage.ppm
+    These can now be applied to multitype point process models
+    and the results can be plotted.
+
+    o plot.influence.ppm, plot.leverage.ppm
+    New argument 'multiplot'.
+
+    o plot.anylist, plot.solist, plot.listof
+    New arguments panel.begin.args, panel.end.args
+
+    o influence.ppm, leverage.ppm, dfbetas.ppm
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+BUG FIXES
+
+    o Smooth.ppp
+    Results were incorrect when at='points' and leaveoneout=FALSE.
+    [Bug introduced in spatstat 1.20-5, October 2010.]
+    Fixed.
+
+    o funxy
+    Did not correctly handle one-line functions:
+    the resulting objects evaluated the wrong function in some cases.
+    [Spotted by Sebastian Meyer. Bug introduced in spatstat 1.45-0]
+    Fixed.
+
+    o mppm
+    Did not recognise the variable 'marks' in a formula.
+    Fixed.
+
+    o Smooth.ppp, bw.smoothppp
+    Crashed if X had two columns of marks and one column was constant.
+    [Bug introduced in spatstat 1.38-0, October 2014]
+    Fixed.
+
+    o Smooth.ppp
+    Results for 'at="points"' were garbled, for some values of 'sigma',
+    if X had more than one column of marks.
+    [Bug introduced in spatstat 1.38-0, October 2014]
+    Fixed.
+
+    o plot.layered
+    Crashed if one layer was a point pattern with several columns of marks.
+    Fixed. 
+
+    o plot.ppm
+    Sometimes gave a spurious warning about a singular matrix.
+    Fixed.
+
+    o setminus.owin
+    Gave wrong or strange answer if the correct answer was empty.
+    Fixed.
+
+    o parameters.dppm
+    Crashed, due to a typo.
+    Fixed.
+
+    o progressreport
+    Crashed if n = 1. 
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.45-2
+
+OVERVIEW
+
+    o We thank Ottmar Cronie, Virginia Morera Pujol,
+    Sven Wagner and Marie-Colette van Lieshout for contributions.
+
+    o Recursive-partition point process models.
+
+    o Minkowski sum, morphological dilation and erosion with any shape.
+
+    o Important bug fix in spatial CDF tests.
+
+    o More bug fixes for replicated patterns.
+
+    o Simulate a model fitted to replicated point patterns.
+
+    o Inhomogeneous multitype F and G functions.
+
+    o Summary functions recognise correction="all"
+
+    o Leverage and influence code handles bigger datasets.
+
+    o More support for pixel images.
+
+    o Improved progress reports.
+
+    o New dataset 'redwood3'
+
+    o spatstat now Depends on the package 'rpart'
+
+    o Version nickname: 'Caretaker Mode'
+
+NEW DATASETS
+
+    o redwood3
+    A more accurate version of the 'redwood' data.
+
+NEW FUNCTIONS
+
+    o as.im.data.frame
+    Build a pixel image from a data frame of coordinates and pixel values.
+
+    o covering
+    Cover a window using discs of a given radius.
+
+    o dilationAny, erosionAny, %(-)%
+    Morphological dilation and erosion by any shape.
+
+    o FmultiInhom, GmultiInhom
+    Inhomogeneous multitype/marked versions of the summary functions Fest, Gest.
+
+    o kernel.moment
+    Moment or incomplete moment of smoothing kernel.
+
+    o MinkowskiSum, %(+)%
+    Minkowski sum of two windows: A %(+)% B, or MinkowskiSum(A,B)
+
+    o nobjects
+    New generic function for counting the number of 'things' in a dataset.
+    There are methods for ppp, ppx, psp, tess.
+
+    o parameters.interact, parameters.fii
+    Extract parameters from interpoint interactions.
+    [These existing functions are now documented.]
+
+    o ppmInfluence
+    Calculate leverage.ppm, influence.ppm and dfbetas.ppm efficiently.
+
+    o rppm, plot.rppm, predict.rppm, prune.rppm
+    Recursive-partition point process models
+
+    o simulate.mppm
+    Simulate a point process model fitted to replicated point patterns.
+
+    o update.interact
+    Update the parameters of an interpoint interaction.
+    [This existing function is now documented.]
+
+    o where.max, where.min
+    Find the spatial location(s) where a pixel image achieves its
+    maximum or minimum value.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o cdf.test.mppm
+    Now handles Gibbs models.
+    Now recognises covariate="x" or "y".
+
+    o leverage.ppm, influence.ppm, dfbetas.ppm
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+    o plot.im
+    Now handles complex-valued images.
+
+    o connected.im
+    Now handles a logical-valued image properly.
+
+    o qqplot.ppm
+    Argument 'expr' can now be a list of point patterns,
+    or an envelope object containing a list of point patterns.
+
+    o as.layered
+    Default method now handles a (vanilla) list of spatial objects.
+
+    o summary functions
+    The argument 'correction="all"' is now recognised: it selects
+    all the available options. 
+        This applies to Fest, F3est, Gest, Gcross, Gdot, Gmulti, G3est, Gfox,
+    	Gcom, Gres, Hest, Jest, Jmulti, Jcross, Jdot, Jfox,
+	Kest, Kinhom, Kmulti, Kcross, Kdot, Kcom, Kres, 
+	Kmulti.inhom, Kcross.inhom, Kdot.inhom, Kscaled, Ksector, Kmark, 
+	K3est, Lscaled, markcorr, markcrosscorr,
+	nnorient, pairorient, pcfinhom, pcfcross.inhom, pcfcross, pcf, Tstat.
+
+    o clarkevans
+    The argument 'correction="all"' is now recognised: it selects
+    all the available options. [This is also the default.]
+
+    o predict.mppm
+    The argument 'type="all"' is now recognised: it selects
+    all the available options. [This is also the default.]
+
+    o plot.kppm
+    The argument 'what="all"' is now recognised: it selects
+    all the available options. [This is also the default.]
+
+    o connected.im, connected.owin
+    Arguments '...' now determine pixel resolution.
+
+    o anova.mppm
+    New argument 'fine'
+
+    o as.owin.data.frame
+    New argument 'step'
+
+    o discs
+    Now accepts a single numeric value for 'radii'.
+
+    o plot.ppp, plot.profilepl, plot.quadratcount, plot.quadrattest, plot.tess
+    Now recognise graphics parameters for text, such as 'family' and 'srt'
+
+    o as.function.tess
+    New argument 'values' specifies the function values.
+
+    o cdf.test
+    Calculations are more robust against numerical rounding effects.
+
+    o progressreport
+    Behaviour improved. New arguments 'tick', 'showtime'.
+
+    o simulate.ppm
+    New argument 'verbose'
+
+    o compileK, compilepcf
+    These internal functions are now documented.
+
+BUG FIXES
+
+    o cdf.test.ppm
+    Calculation of p-values was incorrect for Gibbs models: 
+    1-p was computed instead of p.
+    [Spotted by Sven Wagner.]
+    Fixed.
+
+    o subfits
+    The interaction coefficients of the submodels were incorrect
+    for Gibbs models with a multitype interaction (MultiStrauss, etc).
+    [Spotted by Sven Wagner.]
+    Fixed.
+
+    o subfits
+    Crashed when a Gibbs model included factor-valued spatial covariates
+    and not all levels of the factor were present in each row of the data.
+    [Spotted by Sven Wagner.]
+    Fixed.
+
+    o subfits
+    For Gibbs models with a multitype interaction (MultiStrauss, etc),
+    computation of the conditional intensity caused an error.
+    [Spotted by Sven Wagner.]
+    Fixed.
+
+    o diagnose.ppm
+    Crashed if what="smooth", when the original window was a rectangle.
+    [Spotted by Virginia Morera Pujol.]
+    Fixed.
+
+    o mppm
+    The x and y coordinates were not permitted in the 
+    random-effects formula 'random'.
+    [Spotted by Sven Wagner.]
+    Fixed.
+
+    o vcov.ppm
+    The result had no 'dimnames', if the model was fitted using method="ho".
+    Fixed.
+
+        CHANGES IN spatstat VERSION 1.45-1
+
+OVERVIEW
+
+    o This version was never released.
+
+        CHANGES IN spatstat VERSION 1.45-0
+
+OVERVIEW
+
+    o We thank Monsuru Adepeju, Mario D'Antuono, Markus Herrmann, 
+    Paul Hewson, Kassel Hingee, Greg McSwiggan, Suman Rakshit and Sven Wagner 
+    for contributions.
+
+    o Important bug fix in leverage/influence diagnostics for Gibbs models.
+
+    o Numerous bug fixes in code for replicated point patterns.
+
+    o Surgery on linear networks.
+
+    o Tessellations on a linear network.
+
+    o Laslett's Transform.
+
+    o Colour maps for point patterns with continuous marks are easier to define.
+
+    o Pair correlation function estimates can be pooled.
+
+    o Stipulate a particular version of a package.
+
+    o Fixed namespace problems arising when spatstat is not loaded.
+
+    o Bug fixes and performance improvements.
+
+    o spatstat now contains 100,000 lines of R code. 
+
+    o Version nickname: 'One Lakh'
+
+NEW FUNCTIONS
+
+    o laslett
+    Laslett's Transform.
+    [Thanks to Kassel Hingee]
+
+    o lintess
+    Tessellation on a linear network.
+
+    o divide.linnet 
+    Divide a linear network into pieces demarcated by a point pattern.
+
+    o insertVertices
+    Insert new vertices in a linear network.
+
+    o thinNetwork
+    Remove vertices and/or segments from a linear network etc
+
+    o connected.linnet
+    Find connected components of a linear network.
+
+    o nvertices, nvertices.linnet, nvertices.owin
+    Count the number of vertices in a linear network 
+    or vertices of the boundary of a window.
+
+    o as.data.frame.linim, as.data.frame.linfun
+    Extract a data frame of spatial locations and function values
+    from an object of class 'linim' or 'linfun'.
+
+    o as.linfun, as.linfun.linim, as.linfun.lintess
+    Convert other kinds of data to a 'linfun' object.
+
+    o requireversion
+    Require a particular version of a package
+    (for use in stand-alone R scripts).
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o [.linnet, [.lpp
+    New argument 'snip' determines what to do with segments of the network
+    that cross the boundary of the window. 
+    Default behaviour has changed.
+
+    o pcfinhom
+    Default behaviour is changed when 'lambda' is a fitted model.
+    The default is now to re-fit the model to the data before computing pcf.
+    New arguments 'update' and 'leaveoneout' control this.
+
+    o envelope methods
+    New argument 'funYargs' contains arguments to the summary function
+    when applied to the data pattern only.
+
+    o plot.ppp, plot.lpp
+    For a point pattern with continuous marks ('real numbers')
+    the colour arguments 'cols', 'fg', 'bg' can now be vectors of 
+    colour values, and will be used to determine the default colour map
+    for the marks.
+
+    o symbolmap
+    Now accepts a vector of colour values for the arguments 'col', 'cols',
+    'fg', 'bg' if argument 'range' is given.
+
+    o closepairs.ppp, closepairs.pp3
+    New arguments 'distinct' and 'neat' allow more options.
+
+    o closepairs.ppp, closepairs.pp3
+    Argument 'ordered' has been replaced by 'twice'
+    (but 'ordered' is still accepted, with a warning).
+
+    o closepairs.ppp, closepairs.pp3
+    Performance improved (computation time and memory requirements reduced.)
+    This should improve the performance of many functions in spatstat.
+
+    o Geyer
+    The saturation parameter 'sat' can now be less than 1.
+
+    o lpp, as.lpp
+    These functions now handle the case where 'seg' and 'tp' are given
+    but 'x' and 'y' are missing.
+
+    o linnet
+    If the argument 'edges' is given, then this argument now determines the
+    ordering of the sequence of line segments. For example, the i-th row
+    of 'edges' specifies the i-th line segment in as.psp(L).
+
+    o funxy, distfun
+    The functions created by funxy and distfun have arguments (x,y).
+    The user may now give a ppp or lpp object for the argument 'x',
+    instead of giving two coordinate vectors 'x' and 'y'.
+
+    o crossing.psp
+    New argument 'details' gives more information about the intersections
+    between the segments.
+
+    o subset.ppp, subset.lpp, subset.pp3, subset.ppx
+    The argument 'subset' can now be any argument acceptable to the "[" method.
+    
+    o density.lpp
+    New argument 'weights'.
+
+    o pcf.ppp
+    New argument 'ratio' allows several estimates of pcf to be pooled.
+
+    o summary.ppm
+    New argument 'fine' selects the algorithm for variance estimation.
+
+    o texturemap
+    Argument 'textures' can be missing or NULL.
+
+    o plot.lpp
+    New argument 'show.network'
+
+    o linnet
+    New argument 'warn'
+
+    o mppm
+    Performs more checks for consistency of the input data.
+    
+    o mppm
+    New arguments 'gcontrol' and 'reltol.pql' control the fitting algorithm.
+
+    o edge.Trans
+    New argument 'gW' for efficiency.
+
+    o pool.fv
+    The default plot of the pooled function no longer includes 
+    the variance curves.
+
+    o clickpoly
+    The polygon is now drawn progressively as the user clicks new vertices.
+    
+    o Kest 
+    Accelerated computation (for translation and rigid corrections)
+    when window is an irregular shape.
+
+    o vcov.ppm, leverage.ppm, influence.ppm, dfbetas.ppm
+    Performance slightly improved, for Gibbs models.
+
+    o Internal code
+    Performance slightly improved.
+
+    o Fest, Hest
+    Additional checks for errors in input data.
+
+BUGS
+
+   o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm
+   Calculations were completely incorrect for Gibbs models, 
+   due to a coding error. 
+   Fixed.
+
+   o update.kppm
+   If the call to 'update' did not include a formula argument
+   or a point pattern argument, then all arguments were ignored. 
+   Example: update(fit, improve.type="quasi") was identical to 'fit'.
+   Fixed.
+
+   o diagnose.ppm
+   When applied to a model obtained from subfits(), 
+   in the default case (oldstyle=FALSE) 
+   the variance calculations were incorrect.
+   Consequently the dotted lines representing significance bands were 
+   incorrect. An error or warning about negative variances occurred sometimes.
+   However, calculations with oldstyle=TRUE were correct.
+   The default has now been changed to oldstyle=TRUE for such models.
+
+   o [.lpp
+   The local coordinate 'seg' was completely incorrect,
+   when 'i' was a window.
+   Fixed.
+
+   o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm
+   Crashed for Gibbs models in which the coefficient vector had length 1,
+   such as the stationary Hardcore model.
+   Fixed.
+
+   o subfits
+   Crashed if the model included factor-valued spatial covariates.
+   [Spotted by Sven Wagner]
+   Fixed.
+
+   o subfits
+   If the model included factor-valued spatial covariates, and if
+   not all levels of the factor were present in each row of the data,
+   the resulting objects were malformed and caused errors in other code.
+   [Spotted by Sven Wagner]
+   Fixed.
+
+   o subfits
+   Crashed with some random-effects formulas.
+   [Spotted by Sven Wagner]
+   Fixed.
+
+   o improve.kppm
+   An error message about a missing object 'gminus1' occurred
+   when vcov=TRUE, fast.vcov=FALSE and type="clik1" or "wclik1".
+   Fixed.
+
+   o plot.profilepl
+   Failed with a message about a missing object 'finite'.
+   Fixed.
+
+   o selfcut.psp
+   Gave an error if marks(A) was a vector rather than a data frame.
+   [Spotted by Paul Hewson.]
+   Fixed.
+
+   o suffstat
+   Gave an error for point process models with Geyer interaction.
+   Fixed.
+
+   o nncross.lpp, distfun.lpp
+   Crashed with obscure errors if Y consisted of a single point.
+   Fixed.
+
+   o scan.test, scanmeasure
+   Crashed sometimes with an error message from 'grow.mask'.
+   Fixed.
+
+   o dppm
+   Crashed sometimes with a message that the point pattern could not be found.
+   [Scoping bug.]
+   Fixed.
+
+   o mppm, profilepl
+   Crashed, with a message about 'SpatstatVersion',
+   if the 'spatstat' package was neither loaded nor attached.
+   [Spotted by Markus Herrmann.]
+   Fixed.
+
+   o qqplot.ppm
+   Crashed sometimes when applied to a model obtained from subfits().
+   Fixed.
+
+   o anova.mppm
+   Crashed sometimes with a message about mismatched coefficients.
+   [Spotted by Sven Wagner.]
+   Fixed.
+
+   o anova.mppm
+   Crashed sometimes with a message about unrecognised option 'type="score"'.
+   [Spotted by Sven Wagner.]
+   Fixed.
+
+   o split.ppx
+   Crashed if 'f' was not a factor.
+   Fixed.
+
+   o idw
+   The result was a pixel image defined in the rectangle Frame(X)
+   instead of Window(X). 
+   Fixed.
+
+   o ppm
+   Parameter estimates were slightly inaccurate 
+   when the model included the Geyer() interaction 
+   and the "isotropic" edge correction was used.
+   Fixed. 
+
+   o [.ppx
+   Crashed if the number of points selected was less than 2.
+   Fixed.
+
+   o linnet
+   Crashed if there were no line segments at all.
+   Fixed.
+
+   o kppm, improve.kppm
+   Crashed if the model was stationary and improve.type != "none".
+   Fixed.
+
+   o as.linim.default
+   Did not correctly handle factor-valued data.
+   Fixed.
+
+   o texturemap
+   Crashed if no graphical arguments were specified.
+   Fixed.
+
+   o vcov.mppm
+   Ignored "..." arguments.
+   Fixed.
+
+   o Kest
+   If ratio=TRUE and correction=c('border', 'none') 
+   the result did not contain ratio information.
+   Fixed.
+   
+   o plot.ppp, plot.lpp
+   Arguments 'chars' and 'cols' were ignored in some cases.
+   Fixed.
+
+   o ppm
+   Ignored argument 'emend'.
+   Fixed.
+
+   o plot.dppm
+   Gave warnings about unrecognised argument 'objectname'.
+   Fixed.
+
+   o overlap.owin
+   Sometimes returned a very small negative value, 
+   when the correct answer was 0.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.44-1
+
+OVERVIEW
+
+    o We thank Brian Ripley for contributions.
+
+    o Urgent bug fix.
+
+    o More support for replicated point patterns.
+
+    o More support for tessellations.
+
+    o Version nickname: 'Gift Horse'
+
+NEW FUNCTIONS
+
+   o as.function.tess
+   Convert a tessellation to a function(x,y). The function value
+   indicates which tile of the tessellation contains the point (x,y).
+
+   o tileindex
+   Determine which tile of a tessellation contains a given point (x,y).
+
+   o persp.leverage.ppm
+   Method for persp plots for objects of class leverage.ppm
+
+   o AIC.mppm, extractAIC.mppm
+   AIC for point process models fitted to replicated point patterns.
+
+   o nobs.mppm, terms.mppm, getCall.mppm
+   Methods for point process models fitted to replicated point patterns.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o anova.mppm
+   Now handles Gibbs models, and performs the
+   adjusted composite likelihood ratio test.
+
+   o update, step
+   These functions now work for models of class 'mppm'.
+
+   o textureplot
+   Argument x can now be something acceptable to as.im
+
+   o logLik.mppm
+   New argument 'warn'.
+
+BUGS
+
+  o nncross.lpp, nnwhich.lpp, distfun.lpp
+  Caused a segmentation fault.
+  [Spotted by Brian Ripley.]
+  Fixed.
+
+  o anova.ppm
+  If a single 'object' was given, and the object was a Gibbs model,
+  then 'adjust' was effectively set to FALSE.
+  Fixed.
+
+        CHANGES IN spatstat VERSION 1.44-0
+
+OVERVIEW
+
+   o We thank Jonas Geldmann, Andrew Hardegen, Kassel Hingee,
+     Tom Lawrence, Robin Milne, Gopalan Nair, Suman Rakshit, Peijian Shi
+     and Rasmus Waagepetersen for contributions.
+  
+   o More support for multidimensional point patterns and point processes.
+
+   o More options for envelopes and related Monte Carlo tests.
+
+   o More support for model comparison.
+
+   o k-th nearest neighbours on a linear network.
+
+   o Penttinen process can be simulated (by Metropolis-Hastings or CFTP).
+
+   o Calculate the predicted variance of number of points.
+
+   o Convexifying operation for sets.
+
+   o Subdivide a linear network.
+
+   o Accelerated algorithms for linear networks.
+
+   o Quadrat counting accelerated, in some cases.
+
+   o Version nickname: 'The Sound of One Hand Typing'
+
+NEW FUNCTIONS
+
+   o rPenttinen
+     Simulate the Penttinen process using perfect simulation.
+
+   o varcount
+     Given a point process model, compute the predicted variance
+     of the number of points falling in a window.
+
+   o inside.boxx
+     Test whether multidimensional points lie inside a specified 
+     multidimensional box.
+
+   o lixellate
+     Divide each segment of a linear network into smaller segments.
+
+   o nsegments.linnet, nsegments.lpp
+     Count the number of line segments in a linear network.
+
+   o grow.boxx
+     Expand a multidimensional box.
+
+   o deviance.ppm, deviance.lppm
+     Deviance for a fitted point process model.
+
+   o pseudoR2
+     Pseudo-R-squared for a fitted point process model.
+
+   o tiles.empty
+     Checks whether each tile of a tessellation is empty or nonempty.
+
+   o summary.linim
+     Summary for a pixel image on a linear network.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o rMaternI, rMaternII
+     These functions can now generate random patterns in 
+     three dimensions and higher dimensions, when the argument
+     'win' is of class 'box3' or 'boxx'.
+
+   o "[.ppx"
+     The subset index 'i' may now be a spatial domain
+     of class 'boxx' or 'box3'.
+
+   o rmh.ppm, rmhmodel.ppm, simulate.ppm
+   A model fitted using the 'Penttinen' interaction can now be simulated.
+
+   o rmh.default, rmhmodel.default
+   These functions now recognise cif='penttinen' for the Penttinen interaction.
+
+   o envelope
+   New argument 'clamp' gives greater control over one-sided envelopes.
+
+   o dclf.test, mad.test, dclf.progress, mad.progress, 
+     dclf.sigtrace, mad.sigtrace
+   New argument 'clamp' determines the test statistic for one-sided tests.
+
+   o dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace,
+    mctest.progress, mctest.sigtrace, dg.progress, dg.sigtrace
+   New argument 'rmin' determines the left endpoint of the test interval.
+
+   o dclf.test, mad.test, dg.test, dg.progress, dg.sigtrace, dg.envelope
+    (also accepted by dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace)
+   New argument 'leaveout' specifies how to calculate the deviation
+   between the observed summary function and nominal reference value.
+
+   o envelope
+   New argument 'funargs'
+
+   o Hest
+   Argument X can now be a pixel image with logical values.
+   New argument 'W'. [Based on code by Kassel Hingee.]
+
+   o nncross.lpp, distfun.lpp
+   New argument 'k' allows calculation of k-th nearest neighbour.
+   Computation accelerated.
+
+   o logLik.ppm
+   New argument 'absolute'.
+
+   o plot.kppm
+   New arguments 'pause' and 'xname'.
+
+   o tess
+   Argument 'window' is ignored when xgrid, ygrid are given.
+
+   o as.polygonal
+   Can now repair errors in polygon data, if repair=TRUE.
+
+   o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen
+   New argument 'drop'.
+
+   o Kest.fft
+   Now has '...' arguments allowing control of spatial resolution.
+
+   o lppm
+   Computation accelerated.
+
+   o quadratcount.ppp
+   Computation accelerated in some cases.
+
+   o dg.test
+   Computation accelerated.
+
+BUGS
+
+   o runifpointx, rpoisppx
+   Crashed if nsim > 1.
+   Fixed.
+
+   o triangulate.owin
+   Results were incorrect in some special cases.
+   Fixed.
+
+   o quadrat.test, clarkevans.test
+   In rare cases, the computed Monte Carlo p-value could have been
+   greater than 1. This could have occurred only when nsim was an even number
+   and when the correct p-value was equal to 1.
+   Fixed.
+
+   o linearmarkequal
+   Result was a data frame instead of an 'fv' object.
+   Fixed.
+
+   o point-in-polygon test
+   The function inside.owin could take a very long time to check
+   whether points are inside a polygonal window, if the coordinates
+   were very large numbers. This was due to numerical overflow.
+   (Fixed??)
+
+   o as.fv.kppm	
+   Crashed if the model was not fitted by minimum contrast.
+   Fixed.
+
+   o plot.fv
+   Crashed in some obscure cases.
+   Fixed.
+
+   o collapse.fv
+   Did not allow 'same=NULL'.
+   Fixed.
+
+   o dclf.progress, mad.progress, dg.progress, 
+     dclf.sigtrace, mad.sigtrace, dg.sigtrace
+   The results could not be re-plotted using a plot formula,
+   because the internal data were slightly corrupted.
+   Fixed.
+
+   o Kest.fft
+   Result was incorrectly normalised.
+   Fixed.
+
+   o crosspairs
+   If X and Y were identical point patterns,
+   the result was not necessarily symmetric
+   (on some machines) due to numerical artifacts.
+   Fixed.
+
+   o plot.fv
+   Lines were not correctly clipped to the plot region when 'ylim' was given.
+   Fixed.
+
+   o pool.envelope
+   The 'scale' argument was not handled correctly.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.43-0
+
+OVERVIEW
+
+   o We thank Leanne Bischof, Christophe Biscio, Belarmain Fandohan,
+   Andrew Hardegen, Frederic Lavancier, Tom Lawrence, Martin Maechler, 
+   Greg McSwiggan, Robin Milne, Gopalan Nair, Tuomas Rajala, 
+   Suman Rakshit, Ben Ramage, Francois Semecurbe and Ida-Maria Sintorn 
+   for contributions.
+
+   o spatstat now depends on the package 'nlme'.
+
+   o spatstat now depends on R 3.2.2 or later.
+
+   o Simulation algorithms have been accelerated; simulation outcomes 
+   are *not* identical to those obtained from previous versions of spatstat. 
+
+   o Determinantal point process models.
+
+   o Random-effects and mixed-effects models for replicated patterns.
+
+   o Dao-Genton test, and corresponding simulation envelopes.
+
+   o Simulated annealing and simulated tempering.
+
+   o spatstat colour tools now handle transparent colours.
+
+   o Improvements to "[" and subset() methods
+
+   o Extensions to kernel smoothing on a linear network.
+
+   o Support for one-dimensional smoothing kernels.
+
+   o Bug fix in Metropolis-Hastings simulation.
+
+   o Mark correlation function may include weights.
+
+   o Cross-correlation version of the mark correlation function.
+
+   o Variance calculations for replicated patterns.
+
+   o Penttinen pairwise interaction model.
+
+   o Contour plots with colours determined by a colour map.
+
+   o New dataset: Australian states and territories.
+
+   o More support for multi-dimensional point patterns.
+
+   o Minor improvements and bug fixes.
+
+   o Version nickname: "Mixed Effects"
+
+NEW DATASET
+
+   o austates
+   The states and large mainland territories of Australia
+   represented as polygonal regions forming a tessellation.
+
+NEW FUNCTIONS
+
+   o dppm
+   Fit a determinantal point process model to point pattern data.
+
+   o fitted.dppm, predict.dppm, intensity.dppm
+   Predict a fitted dppm object.
+
+   o logLik.dppm, AIC.dppm, extractAIC.dppm, nobs.dppm
+   Likelihood and AIC for determinantal point process models
+   (enabling the use of 'step')
+
+   o coef.dppm, formula.dppm, print.dppm, terms.dppm, labels.dppm,
+   model.frame.dppm, model.matrix.dppm, model.images.dppm,
+   is.stationary.dppm, reach.dppm, unitname.dppm, unitname<-.dppm, Window.dppm
+    Various methods for dppm objects.
+
+   o parameters.dppm
+   Extract meaningful list of model parameters
+
+   o objsurf.dppm
+   Objective function surface of a dppm object
+
+   o residuals.dppm
+   Residual measure for a dppm object.
+
+   o dppBessel, dppCauchy, dppGauss, dppMatern,	dppPowerExp
+   Determinantal Point Process models.
+
+   o update.dppmodel            
+   Set parameter values in a dpp model.
+
+   o is.stationary.dppmodel, print.dppmodel, reach.dppmodel, valid.dppmodel    
+   Basic information about a dpp model
+
+   o rdpp, simulate.dppmodel		 
+   Simulation of a dpp model.
+
+   o intensity.dppmodel, Kmodel.dppmodel, pcfmodel.dppmodel
+   Moments of a dpp model
+
+   o dim.dppmodel, dppapproxkernel, dppapproxpcf, dppeigen, dppfamily,
+   dppkernel, dppparbounds, dppspecdenrange, dppspecden
+   Helper functions for dpp models.
+
+   o dclf.sigtrace, mad.sigtrace, mctest.sigtrace
+   Significance trace of Monte Carlo test
+
+   o dg.test
+   Dao-Genton adjusted Monte Carlo goodness-of-fit test.
+
+   o dg.envelope
+   Simulation envelopes corresponding to Dao-Genton test.
+
+   o dg.sigtrace
+   Significance trace for Dao-Genton test
+
+   o dg.progress
+   Progress plot for Dao-Genton test
+
+   o markcrosscorr
+   Mark cross-correlation function for point patterns with
+   several columns of marks
+
+   o fixef.mppm, ranef.mppm
+   Extract fixed effects and random effects from a point process model
+   fitted to replicated point patterns.
+
+   o rtemper
+   Simulated annealing or simulated tempering.
+
+   o to.opaque, to.transparent
+   Change transparency value in colours
+
+   o rgb2hsva
+   Convert RGB to HSV data, like rgb2hsv, but preserving transparency.
+
+   o superimpose.ppplist, superimpose.splitppp
+   New methods for 'superimpose' for lists of point patterns.
+
+   o dkernel, pkernel, qkernel, rkernel
+   Probability density, cumulative probability, quantiles
+   and random generation from distributions used in basic one-dimensional
+   kernel smoothing.
+
+   o kernel.factor
+   Auxiliary calculations for one-dimensional kernel smoothing.
+
+   o PPversion, QQversion
+   Transformation of a summary function to its P-P or Q-Q counterpart.
+
+   o spatdim
+   Spatial dimension of any object in the spatstat package.
+
+   o as.boxx
+   Convert data to a multi-dimensional box.
+
+   o intensity.ppx
+   Method for 'intensity' for multi-dimensional space-time point patterns.
+
+   o fourierbasis
+   Evaluate Fourier basis functions in any number of dimensions.
+
+   o valid
+   New generic function, with methods valid.ppm, valid.lppm, valid.dppmodel
+   
+   o emend, emend.ppm, emend.lppm
+   New generic function with methods for ppm and lppm.
+   emend.ppm is equivalent to project.ppm
+
+   o Penttinen
+   New pairwise interaction model.
+
+   o quantile.density
+   Calculates quantiles from kernel density estimates.
+
+   o CDF.density
+   Calculates cumulative distribution function from kernel density estimates.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o simulation
+   Several basic simulation algorithms have been accelerated.
+   Consequently, simulation outcomes are not identical to 
+   those obtained with previous versions of spatstat, even when the
+   same random seed is used. To ensure compatibility with previous
+   versions of spatstat, revert to the slower code by setting
+   spatstat.options(fastthin=FALSE, fastpois=FALSE).
+
+   o mppm
+   Now handles models with a random effect component.
+   New argument 'random' is a formula specifying the random effect.
+
+   o vcov.mppm
+   Now handles models with Gibbs interactions.
+
+   o [.ppp
+   New argument 'clip' determines whether the window is clipped.
+
+   o [.ppp
+   The previously-unused argument 'drop' now determines whether 
+   to remove unused levels of a factor.
+
+   o [.pp3, [.lpp, [.ppx, subset.ppp, subset.pp3, subset.lpp, subset.ppx
+   These methods now have an argument 'drop' which determines
+   whether to remove unused levels of a factor.
+
+   o density.lpp
+   Now supports both the 'equal-split continuous' and 
+   'equal-split discontinuous' smoothers. New argument 'continuous' 
+   determines the choice of smoother.
+
+   o envelope
+   New argument 'scale' allows global envelopes to have 
+   width proportional to a specified function of r,
+   rather than constant width.
+
+   o dclf.test, mad.test, dclf.progress, mad.progress, mctest.progress
+   New argument 'scale' allows summary function values to be rescaled
+   before the comparison is performed.
+
+   o dclf.test, mad.test
+   New argument 'interpolate' supports interpolation of p-value.
+
+   o dclf.progress, mad.progress, mctest.progress
+   New argument 'interpolate' supports interpolation of critical value of test.
+
+   o simulate.ppm
+   New argument 'w' controls the window of the simulated patterns.
+
+   o default.rmhcontrol, default.rmhexpand
+   New argument 'w'.
+
+   o markcorr
+   New argument 'weights' allows computation of the weighted version
+   of the mark correlation function.
+
+   o density.lpp
+   New argument 'kernel' specifies the smoothing kernel.
+   Any of the standard one-dimensional smoothing kernels can be used.
+
+   o contour.im
+   New argument 'col' specifies the colour of the contour lines.
+   If 'col' is a colour map, then the contours are drawn in different colours.
+
+   o plot.ppp
+   The default colour for the points is now a transparent grey,
+   if this is supported by the plot device.
+
+   o rgbim, hsvim
+   New argument 'A' controls the alpha (transparency) channel.
+
+   o rgb2hex, col2hex, paletteindex, is.colour, samecolour,
+   complementarycolour, is.grey, to.grey
+   These colour tools now handle transparent colours.
+
+   o rgb2hex
+   New argument 'maxColorValue'
+
+   o to.grey
+   New argument 'transparent'.
+
+   o progressreport
+   New argument 'state'
+   New option: style="tk"
+
+   o rLGCP
+   This function no longer requires the package 'RandomFields'
+   to be loaded explicitly. 
+
+   o kppm
+   Fitting a model with clusters="LGCP" no longer requires the
+   package 'RandomFields' to be loaded explicitly.
+
+   o rpoispp
+   Accelerated, when 'lambda' is a pixel image.
+
+   o rthin
+   Accelerated, when 'P' is a single number.
+
+   o spatstat.options
+   New options 'fastthin' and 'fastpois' enable fast simulation algorithms.
+   Set these options to FALSE to reproduce results obtained with
+   previous versions of spatstat.
+
+   o split.ppp
+   The splitting variable 'f' can now be a logical vector.
+
+   o collapse.fv
+   This is now treated as a method for the 'nlme' generic 'collapse'.
+   Its syntax has been adjusted slightly.
+
+   o diagnose.ppm, plot.diagppm
+   New arguments col.neg, col.smooth control the colour maps.
+
+   o valid.ppm
+   This is now a method for the generic function 'valid'.
+
+   o ppm.ppp, ppm.quad
+   New argument 'emend', equivalent to 'project'.
+
+   o "[<-.im"
+   Accepts an array for 'value'.
+
+   o as.im.function
+   New argument 'strict'.
+
+   o bw.ppl
+   New argument 'weights'.
+
+   o plot.mppm
+   New argument 'se'.
+   
+   o dclf.test, mad.test
+   Formal arguments 'use.theo' and 'internal' have been removed.
+
+   o predict.kppm, residuals.kppm
+   Now issues a warning when the calculation ignores the 
+   cluster/Cox component and treats the model as if it were Poisson.
+   (This currently happens in predict.kppm when se=TRUE or interval != "none",
+   and in residuals.kppm when type != "raw").
+
+BUG FIXES
+
+   o lpp
+   Crashed if X was a 4-column matrix.
+   Fixed.
+
+   o plot.fv
+   Crashed with some graphics devices, if legend=TRUE.
+   Fixed.
+
+   o effectfun
+   Crashed if 'covname' was missing.
+   Fixed.
+
+   o rVarGamma, rMatClust, rThomas, rCauchy, rNeymanScott
+   Crashed if 'kappa' was a function or image instead of a single number.
+   [Spotted by Ben Ramage.]
+   Fixed.
+
+   o plot.mppm
+   Crashed with a message about "figure margins too large"
+   unless the argument se=FALSE was given explicitly.
+   Fixed.
+
+   o opening.owin, closing.owin
+   Crashed sometimes, with a message about a rectangle not containing a window.
+   Fixed.
+
+   o persp.im
+   Crashed if all pixel values were equal to zero (unless zlim was given).
+   Fixed.
+
+   o predict.ppm
+   Crashed sometimes if the model was fitted with use.gam=TRUE.
+
+   o as.linim.linfun
+   Generated an error ('L must be a linear network')
+   if extra arguments were given.
+
+   o as.function.fv
+   Generated an error when executed in the 'covr' package.
+   Fixed.
+
+   o rmh, simulate.ppm
+   Results were incorrect for inhomogeneous multitype models
+   simulated with fixall=TRUE (i.e. prescribing a fixed number of
+   points of each type) if the model was segregated 
+   (i.e. if different types of points had different first order trend).
+   Fixed.
+
+   o dclf.progress, mad.progress
+   Ignored the argument 'alternative'. 
+   Fixed.
+   
+   o $<-.hyperframe, [<-.hyperframe
+   Result was garbled if 'value' was a hyperframe with one column.
+
+   o rmh.ppm
+   Argument 'w' was ignored in some cases.
+   Fixed.
+
+   o Hest
+   There was an artefact at r=0 when conditional=TRUE.
+   Fixed.
+
+   o [.msr
+   The result of M[W] where W is a window
+   was a measure with window W, instead of intersect.owin(W, Window(M)).
+   Fixed.
+
+   o pool.envelope
+   Did not always respect the value of 'use.theory'.
+   Fixed.
+
+   o envelope, pool.envelope
+   If 'ginterval' was given, the results were in a slightly incorrect format.
+   Fixed.
+
+   o pool.envelope
+   Did not check for compatible values of 'ginterval'.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.42-2
+
+OVERVIEW
+
+   o We thank Bob Klaver and Harold-Jeffrey Ship for contributions.
+
+   o Improvements to simulation of Neyman-Scott processes.
+
+   o Improvements to fitting of Neyman-Scott models.
+
+   o Extended functionality for pixel images.
+
+   o Fitted intensity on linear network
+
+   o Triangulation of windows.
+
+   o Corrected an edge correction.
+
+   o Bug fixes and performance improvements.
+
+   o Nickname: 'Barking at Balloons'
+
+NEW FUNCTIONS
+
+   o triangulate.owin
+   Decompose a spatial window into triangles.
+
+   o fitted.lppm
+   Fitted intensity values for a point process on a linear network.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o rThomas, rMatClust, rCauchy, rVarGamma
+   When the model is approximately Poisson, it is simulated using rpoispp. 
+   This avoids computations which would require huge amounts of memory. 
+   New argument 'poisthresh' controls this behaviour.
+
+   o update.kppm
+   Now handles additional arguments in any order, with or without names.
+   Changed arguments. Improved behaviour.
+
+   o kppm, clusterfit
+   New argument 'algorithm' specifies the choice of optimisation algorithm.
+
+   o kppm
+   Left hand side of formula can now involve entries in the list 'data'.
+
+   o rotmean
+   New argument 'padzero'. 
+   Default behaviour has changed.
+
+   o rose.default
+   New argument 'weights'.
+
+   o rose
+   New arguments 'start' and 'clockwise' specify the convention
+   for measuring and plotting angles.
+
+   o padimage 
+   New argument 'W' allows an image to be padded out to fill any window.
+
+   o union.owin
+   Improved behaviour when there are more than 2 windows.
+
+   o clusterset
+   Improved behaviour.
+
+   o affine.owin
+   Allows transformation matrix to be singular, if the window is polygonal.
+
+BUG FIXES
+
+   o spatstat
+   spatstat could not be installed on some 64-bit VM systems
+   because of an apparent bug in R.
+   Fixed.
+
+   o rThomas, rMatClust, rCauchy, rVarGamma
+   Large values of the scale parameter could cause the algorithm to freeze
+   or require huge amounts of memory.
+   Fixed.
+
+   o pcf, pcfinhom
+   Crashed if the point pattern was empty.
+   Fixed.
+
+   o plot.fv
+   Gave an error message if all 'y' values were equal, when legend=TRUE.
+   Fixed.
+
+   o rose.default
+   Display was incorrect when unit="radian".
+   Fixed.
+
+   o Kest
+   Ohser-Stoyan rigid motion correction (correction='rigid')
+   was calculated incorrectly at large distances.
+   Fixed.
+
+   o summary.im
+   Issued a warning about numerical overflow in some cases.
+   [Spotted by Bob Klaver.]
+   Fixed.
+
+   o plot.im
+   Sometimes warned that 'box' is not a graphical parameter.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.42-1
+
+OVERVIEW
+
+   o We thank Andrew Hardegen, Tom Lawrence, Robin Milne, Suman Rakshit,
+   and Brian Ripley for contributions.
+
+   o Urgent bug fix.
+
+   o More robust simulation of cluster processes.
+
+   o Slightly accelerated.
+
+   o Version nickname: 'Vogon Poetry'
+
+NEW FUNCTIONS
+
+   o boundingbox.solist
+   Method for boundingbox for lists of spatial objects.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o rThomas, rMatClust, rCauchy, rVarGamma, rNeymanScott
+   New faster algorithm which is more robust against extreme values of
+   the parameters.
+
+   o rNeymanScott
+   New argument 'nonempty' controls choice of algorithm.
+
+   o solist, as.solist
+   Accelerated. 
+
+   o as.list.hyperframe
+   Accelerated. 
+
+BUG FIXES
+
+   o residuals.mppm
+   Brought some computers to a grinding halt, due to the bug in solist().
+   Fixed.
+
+   o solist, as.solist
+   In rare cases, the format was corrupted, or the algorithm never terminated.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.42-0
+
+OVERVIEW
+
+   o We thank Anders Bilgrau, Ute Hahn, Jack Hywood, Tuomas Rajala,
+   Cody Schank, Olivia Semboli and Ben Taylor for contributions.
+
+   o Version nickname: 'Life, The Universe and Everything'
+
+   o Permutation test for difference between groups of point patterns.
+
+   o Variational Bayes estimation for point process models.
+
+   o Score test in anova.ppm
+
+   o ROC curve, and discrimination index AUC, for fitted models.
+   
+   o Interactive text editor for spatial datasets.
+
+   o Tools for analysing data on a tree. 
+
+   o Kernel density/intensity estimation on a linear network.
+
+   o Random pixel noise.
+
+   o Improved behaviour of polygon geometry operations.
+
+   o Improved support for cluster and Cox models.
+
+   o Improved basic support for owin objects.
+
+   o Improved support for tessellations.
+
+   o More hierarchical Gibbs interactions.
+
+   o Modifications to Kest.
+
+   o summary method for Cox and cluster models.
+
+   o class 'listof' is almost completely replaced by 'anylist' and 'solist'.
+
+   o Improvements and bug fixes.
+
+   o spatstat now depends on R version 3.2.0 or later.
+
+NEW FUNCTIONS
+
+   o studpermu.test
+   Studentised permutation test for difference between groups of point patterns.
+   Generously contributed by Ute Hahn.
+
+   o AIC.kppm, extractAIC.kppm, logLik.kppm, nobs.kppm
+   Methods for computing AIC for fitted Cox and cluster models.
+
+   o transmat
+   Convert pixel arrays between different display conventions.
+
+   o roc
+   Receiver Operating Characteristic curve.
+
+   o auc
+   Discrimination index AUC (area under the ROC curve) 
+
+   o edit.ppp, edit.psp, edit.im
+   Interactive text editor works for spatial datasets.
+
+   o edit.hyperframe
+   Interactive text editor works for hyperframes.
+
+   o parameters
+   Extract all parameters from a fitted model.
+
+   o density.lpp
+   Kernel estimation of point process intensity on a linear network.
+
+   o extractbranch, deletebranch, treeprune, treebranchlabels, begins
+   Tools for analysing data on a tree.
+
+   o rnoise
+   Random pixel noise.
+
+   o as.data.frame.owin
+   Convert a window to a data frame.
+
+   o harmonise.owin
+   Convert several binary mask windows to a common pixel grid.
+
+   o copyExampleFiles
+   Copy the raw data files from an installed dataset
+   to a chosen folder, for use in a practice exercise.
+
+   o density.ppplist
+   Method for 'density' for lists of point patterns.
+
+   o inradius
+   Radius of largest circle inside a window.
+
+   o mergeLevels
+   Merge different levels of a factor.
+   
+   o relevel.im, relevel.ppp, relevel.ppx
+   Change the reference level of a factor.
+
+   o simulate.profilepl
+   simulation method for models fitted by profile maximum pseudolikelihood.
+
+   o predict.rho2hat
+   Prediction method for class rho2hat
+
+   o with.msr
+   Evaluate (an expression involving) components of a measure. 
+
+   o summary.kppm, print.summary.kppm, coef.summary.kppm
+   Methods for 'summary' and 'coef(summary(..))' for Cox and cluster models.
+
+   o as.im.funxy
+   Method for as.im for class funxy.
+
+   o shift.linim, scalardilate.linim, affine.linim
+   Geometrical transformations for 'linim' objects.
+
+   o Smooth.solist
+   Smooth method for a list of spatial objects.
+
+   o unitname.tess, unitname<-.tess
+   Tessellations now keep track of the name of the unit of length.
+
+   o dirichletAreas
+   Faster algorithm for tile.areas(dirichlet(X)).
+
+   o identify.lpp
+   Method for 'identify' for point patterns on a linear network.
+
+   o HierStraussHard, HierHard
+   Hierarchical interactions for Gibbs models.
+
+   o delaunayDistance, delaunayNetwork, dirichletEdges, 
+   dirichletNetwork, dirichletVertices, dirichletWeights
+      These functions will replace delaunay.distance, delaunay.network, 
+   dirichlet.edges, dirichlet.network, dirichlet.vertices and
+   dirichlet.weights respectively.
+   The latter are now 'deprecated'.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm
+   Now supports Variational Bayes fitting method.
+
+   o kppm
+   'AIC' and 'step' now work for kppm objects
+   fitted using maximum Palm likelihood.
+
+   o kppm
+   The default for the weight function 'weightfun' has been changed, 
+   for better performance.
+
+   o envelope
+   envelope methods now have argument 'use.theory' specifying whether
+   to use the 'theoretical' value of the summary function when constructing
+   simultaneous envelopes.
+
+   o anova.ppm
+   Now performs the Score Test, for Poisson models only,
+   if argument test="Rao" or test="score".
+
+   o Kest
+   New argument 'rmax' controls maximum value of argument 'r'
+
+   o diagnose.ppm
+   Now computes and prints the null standard deviation of the
+   smoothed Pearson residual field, when appropriate.
+
+   o nncorr, nnmean, nnvario
+   New argument 'k' specifies k-th nearest neighbour.
+
+   o quadrat.test.ppp, quadrat.test.quadratcount
+   New argument 'lambda' supports a test of the Poisson process 
+   with given intensity 'lambda'.
+
+   o clickpoly, clickbox
+   These functions now handle graphical arguments to polygon()
+   when drawing the resulting polygon or rectangle.
+
+   o owin, as.owin, as.mask
+   owin(mask=D) or as.owin(D) or as.mask(D) will produce a binary mask window
+   if D is a data frame with two columns of (x,y) coordinates
+   or a data frame with three columns containing (x,y,logical).
+
+   o as.owin.data.frame
+   W can now be a data frame with only two columns,
+   giving the spatial coordinates of the pixels that are inside the window.
+
+   o rose
+   Tick marks now have labels showing the angle (in degrees or radians).
+
+   o distcdf
+   New argument 'regularise' determines whether values at short distances
+   will be smoothed to avoid discretisation artefacts.
+
+   o rpoislinetess
+   Return value now has an attribute 'lines' giving the 
+   realisation of the Poisson line process.
+
+   o intersect.owin, union.owin, setminus.owin
+   New argument 'p' controls resolution of polygon clipping algorithm.
+
+   o intersect.owin, union.owin
+   Arguments may be lists of windows, of class 'solist'.
+   Formal arguments A and B have been removed.
+
+   o superimpose
+   Now handles lists of point patterns 
+   (objects of class 'ppplist' or 'splitppp')
+
+   o density.ppp
+   New argument 'positive' allows the user to stipulate that
+   density values must be positive (avoiding numerical errors
+   which occasionally produce small negative values).
+
+   o adaptive.density
+   Now accepts f = 0 (uniform intensity estimate) and
+   f = 1 (Voronoi intensity estimate) as well as 0 < f < 1.
+   Algorithm accelerated.
+
+   o rSSI
+   Can now generate inhomogeneous patterns.
+
+   o effectfun
+   Now works for 'kppm' and 'lppm' objects as well.
+
+   o integral.im, integral.msr
+   Argument 'domain' can now be a tessellation;
+   the integral over each tile of the tessellation is returned.
+
+   o allstats, compareFit, markcorr, split.ppx, by.ppp
+   Result is now of class 'anylist'.
+
+   o by.im, density.splitppp, idw, model.images, nnmark, pixellate.ppp,
+   predict.lppm, predict.ppm, quadratcount.splitppp, quadratresample, relrisk, 
+   Smooth.msr, split.im, tiles
+   Result is now of class 'solist'.
+
+   o split.ppp
+   New argument 'reduce'.
+   Result now inherits class 'ppplist' and 'solist', as well as 'splitppp'
+
+   o rLGCP
+   New argument 'nsim' allows multiple patterns to be generated.
+
+   o alltypes
+   New argument 'reuse' determines whether all simulation envelopes
+   are based on the same set of simulated patterns, or on independent sets.
+
+   o rpoispp, runifpoint
+   New argument 'ex' makes it possible to generate a random pattern
+   similar to an example point pattern.
+
+   o effectfun
+   Argument 'covname' is not needed if the model has only one covariate.
+
+   o quadratcount
+   Argument 'tess' can now be anything acceptable to as.tess.
+
+   o tess
+   New argument 'unitname' specifies the name of the unit of length.
+   If it is missing, unitname information will be extracted from the
+   other data.
+
+   o intersect.tess, chop.tess, quadrats
+   Results of these functions now have the same 'unitname' as their input.
+
+   o persp.im, nnclean, plot.qqppm, plot.bw.optim
+   These plotting functions now obey spatstat.options('monochrome')
+
+   o lurking
+   Now returns an object of class 'lurk' which has a plot method.
+   Two-standard-deviation limits are now plotted using grey shading.
+
+   o marktable
+   New argument 'N' for studying the N nearest neighbours.   
+   New argument 'collapse' for manipulating the contingency table.
+
+   o harmonise.fv
+   Now discards columns with names which do not match.
+
+   o eval.fv
+   New argument 'equiv' can be used to declare that two columns
+   with different names in different objects are equivalent.
+
+   o quantile.ewcdf
+   New argument 'type' controls the type of quantile.
+   
+   o plot.imlist
+   New argument 'plotcommand' specifies how to plot each image.
+
+   o persp.im
+   The lower extent of the apron can now be controlled by 'zlim'.
+
+   o quadscheme
+   Argument 'method' is partially matched.
+
+   o Kdot, Ldot
+   New argument 'from' is an alternative to 'i'.
+
+   o Kcross, Lcross
+   New arguments 'from' and 'to' are alternatives to 'i' and 'j' respectively.
+
+   o varblock
+   Changed the ordering (and therefore default colours/styles) 
+   of curves in the plot, to match other functions like lohboot.
+
+   o bw.diggle
+   New argument 'nr' controls accuracy.
+
+   o textureplot
+   Now accepts a pixel image, a tessellation, or anything acceptable to as.tess.
+
+   o textureplot
+   Line spacing in legend now matches line spacing in main display.
+   
+   o [.tess
+   Subset index can now be a window.
+
+   o plot.tess
+   Can now plot a text label in each tile.
+
+   o plot.tess
+   New argument 'do.plot'.
+
+   o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss
+   Printed output of fitted model now respects spatstat.options('terse').
+
+   o print.ppm
+   Reduced redundancy in output in some cases.
+
+   o print.msr
+   Responds better to spatstat.options('terse').
+
+   o print.ppm, print.fii, print.interact
+   Irregular parameters are now printed to the number of significant figures
+   specified by options("digits").
+
+   o square
+   New argument 'unitname'.
+
+   o plot.fv
+   Return value is now invisible.
+
+   o delaunay.distance, delaunay.network, dirichlet.edges, 
+   dirichlet.network, dirichlet.vertices
+      These functions are now 'deprecated', and will be replaced by
+      delaunayDistance, delaunayNetwork, dirichletEdges, 
+      dirichletNetwork and dirichletVertices respectively.
+
+   o data(residualspaper)
+   In the real datasets (Fig1 and Fig11), the name of the unit of length
+   has now been recorded.
+
+   o rLGCP
+   This function now requires the package 'RandomFields'
+   to be loaded explicitly by library(RandomFields) or require(RandomFields), 
+   unless model="exp".
+
+   o iplot, istat
+   These functions now require the package 'rpanel' 
+   to be loaded explicitly by library(rpanel) or require(rpanel).
+
+   o ppm, quadscheme
+   Improved calculation of Dirichlet weights.
+
+   o countends
+   New argument 'toler' controls numerical errors
+
+   o diagnose.ppm
+   Improved handling of additional graphics arguments.
+
+   o pcf3est
+   Mathematical labels changed.
+
+   o plot.hyperframe
+   Default margin spacing has been increased.
+
+BUG FIXES
+
+   o Kinhom, Linhom
+   The value of 'theo' was erroneously rescaled 
+   by a small amount, when renormalise=TRUE (the default).
+   Fixed.
+
+   o Kmark
+   Values were erroneously rescaled.
+   Fixed.
+
+   o union.owin
+   Strange results were sometimes obtained when taking the union 
+   of more than two windows.
+   Fixed.
+
+   o rpoispp3 
+   Implementation was incorrect for nsim > 1.
+   (Results may have been incorrect.)
+   Spotted by Jack Hywood.
+   Fixed.
+   
+   o as.owin.data.frame
+   Crashed if the window was not connected.
+   Fixed.
+
+   o Frame<-
+   Crashed when applied to a binary mask.
+   Fixed.
+
+   o rho2hat
+   Crashed if cov1="x" and cov2="y".
+   Fixed.
+
+   o as.mask
+   Crashed sometimes when only the argument 'xy' was given.
+   Fixed.
+
+   o ppm
+   Crashed (rarely) when method='ho' if the simulated pattern was empty.
+   Fixed.
+
+   o istat, iplot
+   Crashed in recent versions of rpanel.
+   Fixed.
+
+   o convexhull
+   Crashed if applied to a 'psp' object.
+   Fixed.
+
+   o plot.ppm
+   Crashed with message about 'variable lengths differ'.
+   Fixed.
+
+   o plot.solist
+   Crashed when applied to a list of point patterns
+   if some patterns had more than one column of marks.
+   Fixed.
+
+   o Smooth.ppp
+   Crashed if applied to a point pattern with several columns of marks
+   if some of the columns were factors.
+   Fixed.
+
+   o runifpoint3, rpoispp3
+   Crashed if nsim > 1.
+   Spotted by Jack Hywood.
+   Fixed.
+
+   o hist.im
+   Crashed if argument 'freq' was given.
+   Fixed.
+
+   o MultiStraussHard
+   Generated misleading error messages (e.g. 'model is invalid') 
+   when arguments 'iradii' and 'hradii' did not have the same pattern of NA's.
+   Fixed.
+
+   o plot.solist
+   Figures were sometimes aligned incorrectly 
+   when the argument 'panel.args' was given.
+   Fixed.
+
+   o scaletointerval
+   Results sometimes fell slightly outside the desired interval
+   due to numerical error.
+   Fixed.
+
+   o plot.solist
+   Behaved incorrectly when plotcommand='persp'.
+   Fixed.
+
+   o "[.hyperframe"
+   Sometimes returned an 'anylist' when it should have returned a 'solist'.
+   Fixed.
+
+   o plot.im
+   Did not plot surrounding frame box when ribbon=FALSE.
+   Fixed.
+
+   o envelope
+   The functions stored when savefuns=TRUE did not inherit 
+   the correct name for the unit of length.
+   Fixed.
+
+   o print.ppm, print.fii, print.interact
+   Layout was misaligned.
+   Fixed.
+
+   o plot.plotppm
+   Paused for input when it was not appropriate.
+   Fixed.
+
+   o plot.fv
+   On png devices, the legend box was drawn with a white background,
+   obscuring the main plot. 
+   Fixed.
+
+   o plot.owin, plot.ppp, plot.im
+   There was unnecessary extra space above the main title.
+   Fixed.
+
+   o plot.rho2hat
+   Colour map ribbon was drawn but not annotated.
+   Fixed.
+
+   o density.splitppp, density.ppplist
+   Format was out of order if se=TRUE.
+   Fixed.
+
+   o MultiStraussHard
+   project.ppm sometimes yielded a model that was still invalid.
+   Fixed.
+
+        CHANGES IN spatstat VERSION 1.41-1
+
+OVERVIEW
+
+   o This is identical to the major release 1.41-0 except for
+    minor bug fixes. The change log for 1.41-0 is repeated here
+    with minor modifications.
+
+   o Version nickname: 'Ides of March'
+
+   o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka,
+   Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss 
+   for contributions.
+
+   o Fixed a bug causing a segmentation fault.
+
+   o Standard errors for kernel estimates of intensity.
+
+   o Test for segregation.
+
+   o Tessellations may now have marks.
+
+   o Nested splitting.
+
+   o More support for cluster models. Reorganised parametrisation.
+
+   o Sparse data representation of linear networks.
+
+   o More support for data on a linear network.
+
+   o New datasets: 'spiders' and 'dendrite'.
+
+   o Improvements and bug fixes.
+
+   o spatstat no longer uses Fortran.
+
+   o spatstat no longer depends on the package 'scatterplot3d'.
+
+   o spatstat now imports (rather than 'suggests') the Matrix package.
+
+NEW DATASETS
+
+   o dendrite
+   Dendritic spines on the dendrite network of a neuron.
+   A point pattern on a linear network.
+   Generously contributed by Aruna Jammalamadaka.
+
+   o spiders
+   Spider webs on the mortar lines of a brick wall.
+   A point pattern on a linear network.
+   Generously contributed by Sasha Voss.
+
+NEW FUNCTIONS
+
+   o segregation.test
+   Test of spatial segregation of types in a multitype point pattern.
+
+   o clusterfield, clusterkernel
+   Compute the cluster kernel (offspring density) of a cluster process model,
+   or compute the cluster field generated by superimposing copies 
+   of the cluster kernel at specified locations.
+
+   o clusterradius
+   Compute the radius of the support of the offspring density
+   of a cluster process model. 
+
+   o as.linnet.psp
+   Convert a line segment pattern to a linear network
+   by guessing the connectivity using a distance threshold.
+
+   o iplot.linnet, iplot.lpp
+   Methods for interactive plotting 'iplot' for objects of class lpp and linnet.
+
+   o Mathematical operations are now supported for pixel images
+   on a linear network. See help(Math.linim)
+
+   o dirichlet.network, delaunay.network
+   The linear networks formed by the Dirichlet tessellation 
+   and Delaunay triangulation.
+
+   o dirichlet.edges
+   The edges of the Dirichlet tessellation.
+
+   o selfcut.psp
+   Cut line segments where they cross each other.
+
+   o vertices.linnet
+   Extract the vertices (nodes) of the linear network.
+
+   o vertexdegree
+   Compute the degree of each vertex in a linear network.
+
+   o pixellate.linnet
+   Pixellate a linear network.
+
+   o subset.hyperframe
+   'subset' method for class 'hyperframe'.
+
+   o head.hyperframe, tail.hyperframe
+   'head' and 'tail' methods for hyperframes.
+
+   o clickdist
+   Measures the distance between two spatial locations clicked by the user.
+
+   o solapply, anylapply
+   wrappers for 'lapply' which return a list of class 'solist' or 'anylist'.
+
+   o Kmark
+   Weighted K-function. 
+   Identical to 'markcorrint' and will eventually replace it.
+
+   o marks.tess, marks<-.tess, unmark.tess:
+   Extract or change the marks associated with the tiles of a tessellation.
+
+   o quantess
+   Quantile tessellation: divide space into pieces which contain
+   equal amounts of 'stuff'.
+
+   o nestsplit
+   Nested split
+
+   o integral
+   New generic function for integrating functions,
+   with methods for 'im', 'msr', 'linim' and 'linfun'.
+
+   o selfcut.psp
+   Cut line segments where they cross each other
+
+   o as.function.im
+   Convert a pixel image to a function(x,y).
+
+   o as.linnet.linim
+   Extract the linear network from a 'linim' object.
+
+   o pool.fv, pool.anylist
+   New methods for 'pool' 
+
+   o Window.linnet
+   Extract the two-dimensional window containing a linear network.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o linnet, lpp
+   A linear network can now be built in 'sparse matrix' form
+   which requires much less memory. 
+
+   o chicago
+   The Chicago street crimes data are now stored in 'sparse matrix' form.
+   To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE)
+
+   o kppm
+   The parametrisation of cluster models has been reorganised.
+   The scale parameter is now always called 'scale'.
+   Results should be backward-compatible.
+
+   o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, 
+   thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf
+    The parametrisation of cluster models has been reorganised.
+    The scale parameter is now always called 'scale'.
+   
+   o plot.kppm 
+   Also plots the cluster kernel.
+
+   o density.ppp
+   New argument 'se' allows calculation of standard errors as well.
+
+   o plot.pp3
+   Now produces a genuine perspective view.
+   New arguments control the eye position for the perspective view.
+
+   o Emark, Vmark
+   These functions can now work with multiple columns of marks.
+
+   o pixellate.psp
+   Can now count the number of segments that intersect each pixel, 
+   instead of the total length of intersection.
+
+   o linfun
+   If  g = linfun(f, L),
+   the function f will always be called as f(x,y,seg,tp, ...)
+   It is no longer expected to handle the case where 'seg' and 'tp' are absent.
+   The resulting function g can now be called as g(X) where X is an lpp object,
+   or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates.
+
+   o tess
+   New argument 'marks' allows marks to be associated with tiles.
+
+   o anova.lppm
+   Outdated argument 'override' has been removed.
+
+   o split<-.ppp
+   Preserves the original ordering of the data, if possible.
+
+   o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss
+   Zero values in the interaction radii are now treated as NA.
+   Improved handling of missing arguments.
+   Printed output now respects options('width')
+
+   o linearKinhom, linearKcross.inhom, linearKdot.inhom,
+     linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom
+   If the intensity argument lambda, lambdaI, lambdaJ, lambdadot
+   is a fitted point process model, the model is first updated
+   by re-fitting it to the data, before computing the fitted intensity.
+
+   o solutionset
+   The expression will be evaluated using pixel arithmetic (Math.im)
+   if it cannot be evaluated using eval.im.
+
+   o to.grey
+   Now uses better weights for the R, G, B channels.
+
+   o rVarGamma
+   Accelerated.
+
+   o summary.mppm, print.mppm
+   These functions now respect options('width') and spatstat.options('terse').
+
+   o print.quadrattest
+   Now respects options('width') and spatstat.options('terse').
+
+   o print.pp3
+   Now respects options('width')
+
+   o print.lpp
+   Now respects options('width') and options('digits').
+
+   o print.owin, print.im, print.summary.owin, print.summary.im
+   Now respect options('width').
+
+   o nnmean
+   Now yields a vector, instead of a 1-column matrix,
+   when there is only a single column of marks.
+
+   o pairdist.psp, crossdist.psp, nndist.psp
+   The option 'method="Fortran"' is no longer supported.
+   The default is 'method="C"'.
+ 
+   o [.hyperframe:
+   When a row of data is extracted with drop=TRUE, the result
+   belongs to class 'anylist'.
+
+   o installation of spatstat
+   A Fortran compiler is no longer needed to compile spatstat from source.
+
+   o hyperframe class
+   The internal structure of hyperframes has changed slightly:
+   columns of objects are now stored and returned as
+   lists of class 'anylist' or 'solist'.
+   There should be no change in behaviour.
+
+   o datasets
+   Internal format of the datasets 
+   	    bdspots, bei, clmfires, demohyper, flu, gorillas, heather, 
+	    Kovesi, murchison, osteo, pyramidal, waterstriders
+   has changed slightly to use the classes 'anylist' and 'solist'.
+   There should be no change in behaviour.
+
+   o K3est
+   New argument 'ratio'.
+
+   o spatstat.options
+   New option 'par.points3d' sets default arguments for plot.pp3. 
+
+   o diagnose.ppm
+   New arguments 'xlab', 'ylab', 'rlab' determine the labels
+   in the 4-panel plot, and new argument 'outer' controls their position.
+   The confusing default value for 'compute.sd' has been changed.
+
+   o iplot.layered
+   New argument 'visible' controls which layers are initially visible.
+
+   o plot.lpp
+   New argument 'show.window' controls whether to plot the containing window.
+   
+   o textstring
+   Any number of spatial locations (x,y) can be specified,
+   with a corresponding vector of text strings.
+
+   o plot.hyperframe
+   New argument 'mar' 
+
+   o plot.linnet
+   New argument 'do.plot'
+
+   o summary.hyperframe
+   Improved output.
+
+   o eval.linim
+   Improved scoping rules.
+
+   o pixellate.owin
+   Accelerated.
+
+   o summary.linnet
+   Now prints more information, and respects options('digits').
+
+   o rmpoispp, rmpoint
+   The vector of possible types of points will default to the 'names' vector
+   of the argument 'lambda', 'n', or 'f' where appropriate.
+
+   o rpoislpp
+   Argument 'L' can be omitted when lambda is a 'linim' or 'linfun'
+
+   o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm
+   New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern
+   rather than a list containing one point pattern.
+
+   o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, 
+   rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson,
+   rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas,
+   rCauchy, rVarGamma, rpoispp3, runifpoint3
+   New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern
+   rather than a list containing one point pattern.
+
+   o spatstat.options
+   New option 'units.paren' controls the type of parenthesis
+   enclosing the explanatory text about the unit of length,
+   in print.ppm, plot.fv, etc.
+
+   o closepairs, crosspairs
+   New option: what="ijd" returns only the indices i, j and the distance d
+
+   o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma
+   Argument names have changed.
+
+BUG FIXES
+
+   o sumouter
+   A segmentation fault could occur if any data were NA.
+   Fixed.
+
+   o simulate.kppm
+   Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) 
+   with an error message from the RandomFields package.
+   Fixed.
+
+   o ppm,  predict.ppm, profilepl
+   Crashed sometimes with message 
+   "interaction evaluator did not return a matrix".
+   Fixed.
+
+   o lppm
+   step() did not work correctly on 'lppm' objects.
+   Fixed.
+
+   o quadscheme
+   If quadscheme() was called explicitly, with the stipulated number of tiles
+   exceeding the number of dummy points given, then the quadrature weights 
+   were sometimes vastly inflated - total quadrature weight was
+   much larger than window area. Spotted by Ian Renner. 
+   Fixed.   
+
+   o predict.rhohat
+   Result was incorrect for data on a non-rectangular window 
+   (and a warning was issued about incorrect vector length).
+   Fixed.
+
+   o Math.im
+   Unary operators did not work (e.g."-x")
+   Fixed.
+   
+   o density.ppp
+   Crashed when at="points" if the dataset had exactly 1 point.
+   Fixed.
+
+   o rSSI
+   Crashed if nsim > 1.
+   Fixed.
+
+   o influence.ppm, leverage.ppm, dfbetas.ppm
+   Crashed or issued a warning if any quadrature points had 
+   conditional intensity zero under the model (negative infinite values
+   of the sufficient statistic).
+   Fixed.
+
+   o clickppp, clickpoly
+   Did not work correctly in the RStudio display device.
+   Fixed.
+
+   o Iest
+   Ignored the arguments 'r' and 'eps'.
+   Fixed.
+
+   o markvario
+   Result was garbled, when X had more than one column of marks.
+   Fixed.
+
+   o rMatClust, rVarGamma, rCauchy, rNeymanScott
+   Result was a list, but not a 'solist', when nsim > 1.
+   Fixed.
+
+   o print.mppm, summary.mppm, subfits
+   Crashed if a Poisson interaction was implied but not given explicitly.
+   Fixed.
+
+   o Kest
+   Crashed if ratio=TRUE and the window was a rectangle.
+   Fixed.
+
+   o anova.ppm
+   Crashed sometimes with message 'models were not all fitted
+   to the same size of dataset'. 
+   (This occurred if there were quadrature points with
+   conditional intensity equal to zero in some models but not in all models.)
+   Fixed.
+
+   o vcov.kppm
+   Occasionally ran out of memory.
+   Fixed.
+
+   o as.linim.linfun
+   Erroneously converted the pixel values to numeric values.
+   Fixed.
+
+   o as.owin.layered
+   Ignored layers with zero area.
+   Fixed.
+
+   o plot.ppm
+   Paused the plot between frames even when there was only one frame.
+   Fixed.
+
+   o plot.layered
+   Did not allocate space for legends of 'lpp' objects.
+   Fixed.
+
+   o plot.lpp
+   Ignored symbolmap arguments like 'cex'
+   and confused the arguments 'col' and 'cols'.
+   Fixed.
+
+   o plot.diagppm
+   Ignored add=TRUE in some cases.
+   Fixed.
+
+   o iplot.layered
+   Did not handle 'diagramobj' objects correctly.
+   Fixed.
+
+   o plot.yardstick
+   Changed arguments.
+
+      CHANGES IN spatstat VERSION 1.41-0
+
+OVERVIEW
+
+   o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka,
+   Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss 
+   for contributions.
+
+   o Fixed a bug causing a segmentation fault.
+
+   o Standard errors for kernel estimates of intensity.
+
+   o Test for segregation.
+
+   o Tessellations may now have marks.
+
+   o Nested splitting.
+
+   o More support for cluster models. Reorganised parametrisation.
+
+   o Sparse data representation of linear networks.
+
+   o More support for data on a linear network.
+
+   o New datasets: 'spiders' and 'dendrite'.
+
+   o Improvements and bug fixes.
+
+   o spatstat no longer uses Fortran.
+
+   o spatstat no longer depends on the package 'scatterplot3d'.
+
+   o spatstat now imports (rather than 'suggests') the Matrix package.
+
+   o Nickname: 'Team Australia'
+
+NEW DATASETS
+
+   o dendrite
+   Dendritic spines on the dendrite network of a neuron.
+   A point pattern on a linear network.
+   Generously contributed by Aruna Jammalamadaka.
+
+   o spiders
+   Spider webs on the mortar lines of a brick wall.
+   A point pattern on a linear network.
+   Generously contributed by Sasha Voss.
+
+NEW FUNCTIONS
+
+   o segregation.test
+   Test of spatial segregation of types in a multitype point pattern.
+
+   o clusterfield, clusterkernel
+   Compute the cluster kernel (offspring density) of a cluster process model,
+   or compute the cluster field generated by superimposing copies 
+   of the cluster kernel at specified locations.
+
+   o clusterradius
+   Compute the radius of the support of the offspring density
+   of a cluster process model. 
+
+   o as.linnet.psp
+   Convert a line segment pattern to a linear network
+   by guessing the connectivity using a distance threshold.
+
+   o iplot.linnet, iplot.lpp
+   Methods for interactive plotting 'iplot' for objects of class lpp and linnet.
+
+   o Mathematical operations are now supported for pixel images
+   on a linear network. See help(Math.linim)
+
+   o dirichlet.network, delaunay.network
+   The linear networks formed by the Dirichlet tessellation 
+   and Delaunay triangulation.
+
+   o dirichlet.edges
+   The edges of the Dirichlet tessellation.
+
+   o selfcut.psp
+   Cut line segments where they cross each other.
+
+   o vertices.linnet
+   Extract the vertices (nodes) of the linear network.
+
+   o vertexdegree
+   Compute the degree of each vertex in a linear network.
+
+   o pixellate.linnet
+   Pixellate a linear network.
+
+   o subset.hyperframe
+   'subset' method for class 'hyperframe'.
+
+   o head.hyperframe, tail.hyperframe
+   'head' and 'tail' methods for hyperframes.
+
+   o clickdist
+   Measures the distance between two spatial locations clicked by the user.
+
+   o solapply, anylapply
+   wrappers for 'lapply' which return a list of class 'solist' or 'anylist'.
+
+   o Kmark
+   Weighted K-function. 
+   Identical to 'markcorrint' and will eventually replace it.
+
+   o marks.tess, marks<-.tess, unmark.tess:
+   Extract or change the marks associated with the tiles of a tessellation.
+
+   o quantess
+   Quantile tessellation: divide space into pieces which contain
+   equal amounts of 'stuff'.
+
+   o nestsplit
+   Nested split
+
+   o integral
+   New generic function for integrating functions,
+   with methods for 'im', 'msr', 'linim' and 'linfun'.
+
+   o selfcut.psp
+   Cut line segments where they cross each other
+
+   o as.function.im
+   Convert a pixel image to a function(x,y).
+
+   o as.linnet.linim
+   Extract the linear network from a 'linim' object.
+
+   o pool.fv, pool.anylist
+   New methods for 'pool' 
+
+   o Window.linnet
+   Extract the two-dimensional window containing a linear network.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o linnet, lpp
+   A linear network can now be built in 'sparse matrix' form
+   which requires much less memory. 
+
+   o chicago
+   The Chicago street crimes data are now stored in 'sparse matrix' form.
+   To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE)
+
+   o kppm
+   The parametrisation of cluster models has been reorganised.
+   The scale parameter is now always called 'scale'.
+   Results should be backward-compatible.
+
+   o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, 
+   thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf
+    The parametrisation of cluster models has been reorganised.
+    The scale parameter is now always called 'scale'.
+   
+   o plot.kppm 
+   Also plots the cluster kernel.
+
+   o density.ppp
+   New argument 'se' allows calculation of standard errors as well.
+
+   o plot.pp3
+   Now produces a genuine perspective view.
+   New arguments control the eye position for the perspective view.
+
+   o Emark, Vmark
+   These functions can now work with multiple columns of marks.
+
+   o pixellate.psp
+   Can now count the number of segments that intersect each pixel, 
+   instead of the total length of intersection.
+
+   o linfun
+   If  g = linfun(f, L),
+   the function f will always be called as f(x,y,seg,tp, ...)
+   It is no longer expected to handle the case where 'seg' and 'tp' are absent.
+   The resulting function g can now be called as g(X) where X is an lpp object,
+   or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates.
+
+   o tess
+   New argument 'marks' allows marks to be associated with tiles.
+
+   o anova.lppm
+   Outdated argument 'override' has been removed.
+
+   o split<-.ppp
+   Preserves the original ordering of the data, if possible.
+
+   o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss
+   Zero values in the interaction radii are now treated as NA.
+   Improved handling of missing arguments.
+   Printed output now respects options('width')
+
+   o linearKinhom, linearKcross.inhom, linearKdot.inhom,
+     linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom
+   If the intensity argument lambda, lambdaI, lambdaJ, lambdadot
+   is a fitted point process model, the model is first updated
+   by re-fitting it to the data, before computing the fitted intensity.
+
+   o solutionset
+   The expression will be evaluated using pixel arithmetic (Math.im)
+   if it cannot be evaluated using eval.im.
+
+   o to.grey
+   Now uses better weights for the R, G, B channels.
+
+   o rVarGamma
+   Accelerated.
+
+   o summary.mppm, print.mppm
+   These functions now respect options('width') and spatstat.options('terse').
+
+   o print.quadrattest
+   Now respects options('width') and spatstat.options('terse').
+
+   o print.pp3
+   Now respects options('width')
+
+   o print.lpp
+   Now respects options('width') and options('digits').
+
+   o print.owin, print.im, print.summary.owin, print.summary.im
+   Now respect options('width').
+
+   o nnmean
+   Now yields a vector, instead of a 1-column matrix,
+   when there is only a single column of marks.
+
+   o pairdist.psp, crossdist.psp, nndist.psp
+   The option 'method="Fortran"' is no longer supported.
+   The default is 'method="C"'.
+ 
+   o [.hyperframe:
+   When a row of data is extracted with drop=TRUE, the result
+   belongs to class 'anylist'.
+
+   o installation of spatstat
+   A Fortran compiler is no longer needed to compile spatstat from source.
+
+   o hyperframe class
+   The internal structure of hyperframes has changed slightly:
+   columns of objects are now stored and returned as
+   lists of class 'anylist' or 'solist'.
+   There should be no change in behaviour.
+
+   o datasets
+   Internal format of the datasets 
+   	    bdspots, bei, clmfires, demohyper, flu, gorillas, heather, 
+	    Kovesi, murchison, osteo, pyramidal, waterstriders
+   has changed slightly to use the classes 'anylist' and 'solist'.
+   There should be no change in behaviour.
+
+   o K3est
+   New argument 'ratio'.
+
+   o spatstat.options
+   New option 'par.points3d' sets default arguments for plot.pp3. 
+
+   o diagnose.ppm
+   New arguments 'xlab', 'ylab', 'rlab' determine the labels
+   in the 4-panel plot, and new argument 'outer' controls their position.
+   The confusing default value for 'compute.sd' has been changed.
+
+   o iplot.layered
+   New argument 'visible' controls which layers are initially visible.
+
+   o plot.lpp
+   New argument 'show.window' controls whether to plot the containing window.
+   
+   o textstring
+   Any number of spatial locations (x,y) can be specified,
+   with a corresponding vector of text strings.
+
+   o plot.hyperframe
+   New argument 'mar' 
+
+   o plot.linnet
+   New argument 'do.plot'
+
+   o summary.hyperframe
+   Improved output.
+
+   o eval.linim
+   Improved scoping rules.
+
+   o pixellate.owin
+   Accelerated.
+
+   o summary.linnet
+   Now prints more information, and respects options('digits').
+
+   o rmpoispp, rmpoint
+   The vector of possible types of points will default to the 'names' vector
+   of the argument 'lambda', 'n', or 'f' where appropriate.
+
+   o rpoislpp
+   Argument 'L' can be omitted when lambda is a 'linim' or 'linfun'
+
+   o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm
+   New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern
+   rather than a list containing one point pattern.
+
+   o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, 
+   rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson,
+   rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas,
+   rCauchy, rVarGamma
+   New argument 'drop'
+
+   o spatstat.options
+   New option 'units.paren' controls the type of parenthesis
+   enclosing the explanatory text about the unit of length,
+   in print.ppm, plot.fv, etc.
+
+   o closepairs, crosspairs
+   New option: what="ijd" returns only the indices i, j and the distance d
+
+   o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma
+   Argument names have changed.
+
+BUG FIXES
+
+   o sumouter
+   A segmentation fault could occur if any data were NA.
+   Fixed.
+
+   o simulate.kppm
+   Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) 
+   with an error message from the RandomFields package.
+   Fixed.
+
+   o ppm,  predict.ppm, profilepl
+   Crashed sometimes with message 
+   "interaction evaluator did not return a matrix".
+   Fixed.
+
+   o lppm
+   step() did not work correctly on 'lppm' objects.
+   Fixed.
+
+   o quadscheme
+   If quadscheme() was called explicitly, with the stipulated number of tiles
+   exceeding the number of dummy points given, then the quadrature weights 
+   were sometimes vastly inflated - total quadrature weight was
+   much larger than window area. Spotted by Ian Renner. 
+   Fixed.   
+
+   o predict.rhohat
+   Result was incorrect for data on a non-rectangular window 
+   (and a warning was issued about incorrect vector length).
+   Fixed.
+
+   o Math.im
+   Unary operators did not work (e.g."-x")
+   Fixed.
+   
+   o density.ppp
+   Crashed when at="points" if the dataset had exactly 1 point.
+   Fixed.
+
+   o rSSI
+   Crashed if nsim > 1.
+   Fixed.
+
+   o influence.ppm, leverage.ppm, dfbetas.ppm
+   Crashed or issued a warning if any quadrature points had 
+   conditional intensity zero under the model (negative infinite values
+   of the sufficient statistic).
+   Fixed.
+
+   o clickppp, clickpoly
+   Did not work correctly in the RStudio display device.
+   Fixed.
+
+   o Iest
+   Ignored the arguments 'r' and 'eps'.
+   Fixed.
+
+   o markvario
+   Result was garbled, when X had more than one column of marks.
+   Fixed.
+
+   o rMatClust, rVarGamma, rCauchy, rNeymanScott
+   Result was a list, but not a 'solist', when nsim > 1.
+   Fixed.
+
+   o print.mppm, summary.mppm, subfits
+   Crashed if a Poisson interaction was implied but not given explicitly.
+   Fixed.
+
+   o Kest
+   Crashed if ratio=TRUE and the window was a rectangle.
+   Fixed.
+
+   o anova.ppm
+   Crashed sometimes with message 'models were not all fitted
+   to the same size of dataset'. 
+   (This occurred if there were quadrature points with
+   conditional intensity equal to zero in some models but not in all models.)
+   Fixed.
+
+   o vcov.kppm
+   Occasionally ran out of memory.
+   Fixed.
+
+   o as.linim.linfun
+   Erroneously converted the pixel values to numeric values.
+   Fixed.
+
+   o as.owin.layered
+   Ignored layers with zero area.
+   Fixed.
+
+   o plot.ppm
+   Paused the plot between frames even when there was only one frame.
+   Fixed.
+
+   o plot.layered
+   Did not allocate space for legends of 'lpp' objects.
+   Fixed.
+
+   o plot.lpp
+   Ignored symbolmap arguments like 'cex'
+   and confused the arguments 'col' and 'cols'.
+   Fixed.
+
+   o plot.diagppm
+   Ignored add=TRUE in some cases.
+   Fixed.
+
+   o iplot.layered
+   Did not handle 'diagramobj' objects correctly.
+   Fixed.
+
+   o plot.yardstick
+   Changed arguments.
+
+	CHANGES IN spatstat VERSION 1.40-0
+
+OVERVIEW
+
+   o We thank Markus Herrmann, Peter Kovesi, Andrew Lister, 
+   Enrique Miranda, Tuomas Rajala, Brian Ripley, Dominic Schuhmacher 
+   and Maxime Woringer for contributions.
+
+   o Important bug fixes.
+
+   o Mathematical operators now apply to images.
+
+   o Parametric estimates of relative risk from fitted point process models.
+
+   o Standard errors for relative risk (parametric and non-parametric).
+
+   o Kernel smoothing and rose diagrams for angular data.
+
+   o Perceptually uniform colour maps. 
+
+   o Hierarchical interactions for multitype patterns.
+
+   o Hard core parameters in all interactions no longer need to be specified
+     and will be estimated from data.
+
+   o Improvements to analysis of deviance and model selection.
+
+   o New datasets.
+
+   o New vignette, summarising all datasets installed with spatstat.
+
+   o Tests and diagnostics now include a Monte Carlo option.
+
+   o Faster checking of large datasets.
+
+   o Faster simulations.
+
+   o Code for drawing diagrams (arrows, scale bars).
+
+   o Version nickname: 'Do The Maths'
+
+NEW DATASETS
+
+   o bdspots
+   Breakdown spots on microelectronic capacitor electrodes.
+   Generously contributed by Prof Enrique Miranda.
+
+   o Kovesi
+   Colour maps with perceptually uniform contrast.
+   Generously contributed by Peter Kovesi.
+
+NEW FUNCTIONS
+
+   o Mathematical operations are now supported for images.
+   For example:   alpha <- atan(bei.extra$grad) * 180/pi
+   See help(Math.im)
+
+   o relrisk.ppm
+   Spatially-varying probabilities of different types of points
+   predicted by a fitted point process model.
+
+   o circdensity
+   Kernel density estimate for angular data
+
+   o rose
+   Rose diagram (rose of directions) for angular data
+
+   o nnorient
+   Nearest neighbour orientation distribution.
+
+   o AIC.ppm
+   Calculate AIC of a Gibbs model using Takeuchi's rule.
+
+   o interp.colours
+   Interpolate a sequence of colour values.
+
+   o anyDuplicated.ppp, anyDuplicated.ppx
+   Fast replacements for any(duplicated(x)) for point patterns.
+
+   o textstring, onearrow, yardstick
+   Objects representing a text string, an arrow, or a scale bar,
+   for use in drawing spatial diagrams.
+
+   o plot.imlist, image.imlist, contour.imlist
+   Methods for the new class 'imlist'
+
+   o [<-.layered,  [[<-.layered
+   More support for class 'layered'
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o (vignettes)
+   New vignette 'datasets' summarises all the datasets installed
+   with the spatstat package.
+
+   o relrisk
+   The function relrisk is now generic, with methods for ppp and ppm.
+   New argument 'relative' specifies whether to calculate the relative risk
+   or the absolute probability of each type of point.
+   New argument 'se' specifies whether to calculate standard errors.
+
+   o plot.im
+   The default colour map for plotting images,
+   specified by spatstat.options('image.colfun'), 
+   has been changed to a perceptually uniform map.
+
+   o DiggleGratton, Fiksel, MultiHard, MultiStraussHard
+   The hard core distance parameters in these models
+   can now be omitted by the user, and will be estimated automatically
+   from data (by the 'self-starting' feature of interactions).
+   This was already true of Hardcore and StraussHard.
+
+   o Hybrid
+   Hybrid models now apply the 'self-starting' feature
+   to each component model. 
+
+   o anova.ppm
+   Can now reconcile models fitted using different dummy points,
+   different values of 'rbord', different values of 'use.gam', etc. 
+
+   o profilepl
+   New argument 'aic' makes it possible to optimise the parameters 
+   by minimising AIC.
+
+   o profilepl
+   No longer requires values for parameters which are 'optional' 
+   (such as the hard core distance).
+
+   o rmh, simulate.ppm, rmh.ppm, rmh.default
+   The Metropolis-Hastings algorithm now starts by deleting any points
+   in the initial state that are 'illegal' (i.e. whose conditional intensity
+   is equal to zero). This ensures that the result of rmh 
+   never contains illegal points.
+
+   o runifpoint, rpoispp, rStrauss, rHardcore, rStraussHard,
+   rDiggleGratton, rDGS, runifdisc, rpoint, rMaternI, rMaternII, rSSI,
+   rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rjitter,
+   rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma, rmpoint, rmpoispp, 
+   runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp, runifpointx, 
+   rpoisppx, runifpoint3, rpoispp3
+      These random point pattern generators now have an argument 'nsim'
+      specifying the number of simulated realisations to be generated.
+
+   o pairorient
+   New argument 'cumulative'.
+   New algorithm to compute kernel estimate of probability density.
+   Default behaviour changed.
+   Argument 'units' has been renamed 'unit' for consistency.
+   Labels and descriptions of columns have been corrected.
+
+   o predict.ppm
+   New syntax (backward-compatible).
+   New argument 'se' replaces option 'type="se"'.
+   Old argument 'total' is deprecated: use 'window' and set 'type="count"'.
+   
+   o cdf.test
+   The methods for class 'ppm' and 'lppm' now handle Gibbs models
+   and perform a Monte Carlo test in this case.
+
+   o lurking, diagnose.ppm
+   Lurking variable plot can now include simulation envelopes.
+
+   o rmh.ppm
+   New argument 'w' determines the window in which the simulated pattern
+   is generated.
+
+   o ppp
+   Accelerated.
+
+   o Gcom, Gres
+   When conditional=TRUE and restrict=TRUE, the Hanisch estimate
+   was not calculated exactly as described in Appendix E.1 of 
+   Baddeley, Rubak and Moller (2011). The intensity was estimated 
+   on the full window rather than the eroded window.
+   Fixed.   
+
+   o step, drop1, add1, extractAIC
+   The AIC of a Gibbs model is now calculated using Takeuchi's rule
+   for the degrees of freedom.
+
+   o model.matrix.ppm, model.matrix.kppm
+   New argument 'Q' allows prediction at any desired locations.
+
+   o vcov.ppm
+   New argument 'fine' gives more control over computation.
+
+   o predict.ppm
+   For multitype models, when the result is a list of images,
+   the names of list entries are now identical to the mark levels
+   (e.g. "hickory" instead of "markhickory")
+
+   o print.slrm
+   Output now respects options('width')
+
+   o image.listof
+   New argument 'ribmar' controls margin space around the ribbon
+   when equal.ribbon=TRUE.
+
+   o integral.im
+   New argument 'domain' specifies the domain of integration.
+
+   o plot.fasp
+   New argument 'transpose' allows rows and columns to be exchanged.
+
+   o plot.im
+   The list 'ribargs' can now include the parameter 'labels'.
+
+   o rmh, rpoint, rpoispp, rmpoint, rmpoispp
+   Accelerated, for inhomogeneous processes.
+
+   o stienen
+   Now recognises the parameter 'lwd'. 
+
+   o suffstat
+   Accelerated (also affects ppm with method='ho').
+
+   o Poisson, AreaInter, BadGey, Concom, DiggleGatesStibbard, DiggleGratton,
+   Fiksel, Geyer, Hardcore, Hybrid, LennardJones, MultiHard, MultiStrauss,
+   MultiStraussHard, OrdThresh, Ord, PairPiece, Pairwise, SatPiece, 
+   Saturated, Softcore, Strauss, StraussHard, Triplets
+      These functions can now be printed (by typing the function name)
+      to give a sensible description of the required syntax.
+
+   o fitin
+   A plot of the fitted interpoint interaction of a point process model
+   e.g. plot(fitin(ppm(swedishpines ~ 1, Strauss(9))))
+   now shows the unit of length on the x-axis.
+   
+   o fitin
+   Plots of the fitted interpoint interaction are now possible
+   for some higher-order interactions such as Geyer and AreaInter.
+
+   o anova.ppm
+   New argument 'warn' to suppress warnings.
+
+   o rmhmodel.ppm
+   Argument 'win' renamed 'w' for consistency with other functions.
+
+   o print.ppm
+   Printed output for the fitted regular parameters
+   now respects options('digits').
+
+   o print.ppm, print.summary.ppm
+   Output now respects options('width') and spatstat.options('terse')
+
+   o print.ppm
+   By default, standard errors are not printed 
+   for a model fitted with method="logi" (due to computational load)
+
+   o plot.profilepl
+   Now recognises 'lty', 'lwd', 'col' etc
+
+   o vesicles, gorillas
+   Some of the raw data files for these datasets are also installed in spatstat
+   for demonstration and training purposes. 
+
+BUG FIXES
+
+   o rmh, rmh.ppm, rmh.default, simulate.ppm
+   The result of simulating a model with a hard core
+   did not necessarily respect the hard core constraint,
+   and simulation of a model with strong inhibition
+   did not necessarily converge. 
+   This only happened if the first order trend was large,
+   the starting state (n.start or x.start) was not given,
+   and the number of iterations (nrep) was not very large.
+   It occurred because of a poor choice for the default starting state.
+   Bug was present since about 2010.
+   Fixed.
+   
+   o markcorrint
+   Results were completely incorrect.
+   Bug introduced in spatstat 1.39-0, october 2014.
+   Fixed.
+
+   o Kinhom
+   Ignored argument 'reciplambda2' in some cases.
+   Bug introduced in spatstat 1.39-0, october 2014.
+   Fixed.
+
+   o relrisk
+   When at="pixels", a small fraction of pixel values were sometimes
+   wildly inaccurate, due to numerical errors. This affected the 
+   range of values in the result, and therefore the appearance of plots.
+   Fixed.
+
+   o model.images
+   Crashed if the model was multitype.
+   Fixed.
+
+   o profilepl
+   Crashed in some cases when the interaction was multitype.
+   [Spotted by Andrew Lister.]
+   Fixed.
+
+   o profilepl
+   Crashed if the model involved covariates that were not
+   given in a 'data' argument.
+   Fixed.
+
+   o envelope.ppm
+   Crashed if global=TRUE and savefuns=TRUE.
+   Fixed.
+
+   o setminus.owin
+   Crashed if the result was empty and the input was polygonal.
+   Fixed.
+
+   o predict.ppm
+   Crashed sometimes when type="cif" and ngrid was very large.
+   Fixed.
+
+   o pixelquad
+   If X was a multitype point pattern, the result was mangled.
+   Fixed.
+
+   o relrisk
+   Did not accept a character string value for the argument 'case'.
+   Fixed.
+
+   o intensity.ppm
+   Format of result was incorrect for ppm(Y ~ 1) where Y is multitype.
+   Fixed.
+
+   o $<-.hyperframe
+   Columns containing character values were converted to factors.
+   Fixed.
+
+   o clickppp
+   Sometimes filled the window in solid black colour..
+   Fixed.
+
+   o plot.psp
+   Ignored 'show.all' in some cases.
+   Fixed.
+
+   o plot.ppp
+   Warned about NA values amongst the marks, even if there were no NA's
+   in the column(s) of marks selected by the argument 'which.marks'.
+   Fixed.
+
+   o stienen
+   Did not suppress the border circles when border=FALSE.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.39-1
+
+OVERVIEW
+
+   o Urgent bug fix.
+
+   o We thank Suman Rakshit and Brian Ripley for contributions.
+
+BUG FIXES
+
+   o bdry.mask, convexhull
+   In R-devel only, these functions could return an empty window,
+   causing errors in other packages.
+   [Spotted by Brian Ripley.]
+   Fixed.
+
+   o project2segment
+   An error occurred if any line segments had length zero.
+   [Spotted by Suman Rakshit.]
+   Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.39-0
+
+OVERVIEW
+
+   o We thank Shane Frank, Shaaban Ghalandarayeshi, Ute Hahn, 
+     Mahdieh Khanmohammadi, Nicoletta Nava, Jens Randel Nyengaard,
+     Sebastian Schutte, Rasmus Waagepetersen and Carl G. Witthoft 
+     for contributions.
+
+   o ANOVA extended to Gibbs models.
+
+   o Improved algorithm for locally-scaled K-function.
+
+   o Leave-one-out calculation of fitted values in ppm objects.
+
+   o New dataset: presynaptic vesicles.
+
+   o Geometrical operations with windows and images.
+
+   o More edge corrections for K-function.
+
+   o Improved handling and plotting of 'fv' objects.
+
+   o Utilities for perspective views of surfaces.
+
+   o New classes 'anylist', 'solist' will ultimately replace 'listof'.
+
+   o Bug fixes.
+
+   o Version nickname: 'Smoke and Mirrors'
+
+NEW DATASETS
+
+   o vesicles
+   Synaptic vesicles 
+   (includes raw data files for training purposes)
+
+NEW CLASSES
+
+   o anylist
+   List of objects. 
+   (A replacement for 'listof')
+
+   o solist
+   List of two-dimensional spatial objects.
+   (A replacement for some uses of 'listof')
+
+NEW FUNCTIONS
+
+   o perspPoints, perspLines, perspSegments, perspContour
+   Draw points and lines on a surface, as seen in perspective view.
+
+   o hopskel.test
+   Hopkins-Skellam test of CSR
+
+   o project2set
+   For each data point in a point pattern X, find the nearest
+   spatial location in a given window W.
+
+   o stienen, stienenset
+   Stienen diagrams
+
+   o dirichlet.vertices
+   Vertices of the Dirichlet tessellation
+
+   o discs 
+   Union of discs.
+   Given a point pattern dataset recording the locations and diameters
+   of objects, find the region covered by the objects.
+
+   o increment.fv
+   Increments of a summary function: g(x) = f(x+h)-f(x-h).
+
+   o rotmean
+   Rotational average of pixel values in an image
+
+   o fardist
+   Distance from each pixel/data point to farthest part of window boundary
+
+   o circumradius.owin
+   Circumradius of a window
+
+   o rmax.Trans, rmax.Ripley
+   Compute the maximum distance 'r' for which the 
+   translation edge correction and isotropic edge correction   
+   are valid.
+
+   o is.grey
+   Determines whether a colour value is a shade of grey.
+
+   o harmonise
+   Convert several objects of the same class to a common format.
+   (New generic function with existing methods harmonise.im and harmonise.fv)
+
+   o area
+   New generic function, with methods for 'owin' and 'default'.
+
+   o Fhazard
+   Hazard rate of empty space function
+
+   o anylist, as.anylist, [.anylist, [<-.anylist, print.anylist, summary.anylist
+   Support for new class 'anylist'
+
+   o solist, as.solist, [.solist, [<-.solist, print.solist, summary.solist
+   Support for new class 'solist'
+
+   o plot.anylist, plot.solist
+   Plot methods for the classes 'anylist' and 'solist'
+   (Currently identical to 'plot.listof')
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o anova.ppm
+   Now applies to Gibbs models as well as Poisson models,
+   using adjusted composite likelihood ratio test statistic.
+
+   o persp.im
+   If visible=TRUE, the algorithm will also calculate which pixels of x
+   are visible in the perspective view. This is useful for drawing
+   points or lines on a perspective surface.
+
+   o Kscaled
+   Improved algorithm [thanks to Ute Hahn.]
+   New arguments 'renormalise' and 'normpower' allow renormalisation
+   of intensity, similar to Kinhom.
+
+   o Kest
+   New option: correction="rigid" computes the rigid motion correction.
+
+   o pairwise interactions 
+   Fitted parameters and other calculations for pairwise interaction models
+   DiggleGatesStibbard, DiggleGratton, Fiksel, Geyer, Strauss 
+   may change slightly due to a change in handling numerical rounding effects.
+
+   o eval.fv
+   Functions no longer need to have exactly the same sequence of 'r' values.
+   They will now be made compatible using 'harmonise.fv'.
+
+   o fitted.ppm
+   New argument 'leaveoneout' allows leave-one-out calculation
+   of fitted intensity at original data points.
+
+   o Kinhom, Linhom
+   New argument 'leaveoneout' specifies whether the leave-one-out rule
+   should be applied when calculating the fitted intensities. 
+   
+   o crosspaircounts
+   Results may change slightly due to a change in handling numerical 
+   rounding effects.
+
+   o Fest, Gest
+   New argument 'domain' supports bootstrap methods.
+ 
+   o plot.fv
+   New argument 'mathfont' determines the font (e.g. plain, italic, bold)
+   for mathematical expressions on the axes and in the legend.
+   Defaults to italic.
+
+   o scanpp 
+   Upgraded to handle multiple columns of mark data.
+
+   o circumradius
+   The function 'circumradius' is now generic, with methods
+   for the classes 'owin' and 'linnet'.
+
+   o edge.Trans
+   New argument 'give.rmax' 
+
+   o fvnames, plot.fv
+   The symbol '.a' is now recognised. It stands for 'all function values'.
+
+   o as.function.fv
+   Argument 'extrapolate' can have length 1 or 2.
+
+   o varblock
+   New argument 'confidence' determines the confidence level.
+
+   o $<-.fv
+   This can now be used to add an extra column to an 'fv' object
+   (previously it refused).
+
+   o minnndist, maxnndist
+   New argument 'positive'. If TRUE, coincident points are ignored:
+   the nearest-neighbour distance of a point is the distance to the
+   nearest point that does not coincide with the current point.
+
+   o plot.fv
+   Improved handling of 'shade' argument.
+
+   o Kmeasure
+   Now passes '...' arguments to as.mask()
+
+   o Ksector
+   Now allows start < 0.  
+   New arguments 'units' and 'domain'.
+
+   o pairorient
+   New arguments 'units' and 'domain'.
+
+   o eroded.areas
+   New argument 'subset'
+
+   o disc
+   New argument 'delta' 
+
+   o plot.plotppm
+   New argument 'pppargs'
+
+   o harmonise.fv, harmonise.im
+   These are now methods for the new generic 'harmonise'
+
+   o Fest, Gest
+   These functions now also compute the theoretical value of hazard
+   for a Poisson process, if correction = "km".
+
+   o with.fv
+   Improved mathematical labels.
+
+   o Gfox, Jfox
+   Improved mathematical labels.
+
+   o area.owin
+   This function is now a method for the new generic 'area'
+
+   o edges
+   Default for argument 'check' changed to FALSE.
+
+BUG FIXES
+
+   o varblock
+   Calculations were incorrect if more than one column of 
+   edge corrections was computed. 
+   [Bug introduced in spatstat 1.21-1, november 2010.]
+   Fixed.
+
+   o varblock
+   Crashed if one of the quadrats contained no data points.
+   Fixed.
+
+   o lohboot
+   Interval was calculated wrongly when global=TRUE and fun="Lest" or "Linhom".
+   Fixed.
+
+   o nnmark
+   Crashed when at="points" if there was only a single column of marks.
+   [Spotted by Shane Frank.]
+   Fixed.
+
+   o plot.msr
+   Some elements of the plot were omitted or cut off.
+   Fixed.
+
+   o plot.msr
+   Did not work with 'equal.scales=TRUE'.
+   Fixed.
+
+   o plot.msr, augment.msr
+   Crashed if every data point was duplicated.
+   Fixed.
+
+   o as.im.owin
+   Crashed if X was a 1 x 1 pixel array.
+   Fixed.
+
+   o owin
+   Coordinates of polygon data were altered slightly when fix=TRUE. 
+   [Spotted by Carl Witthoft.]
+   Fixed.
+   
+   o objects of class 'fv'
+   Assigning a new value to names(x) or colnames(x) or dimnames(x)
+   would cause the internal data format to become corrupted.
+   Fixed.
+
+   o to.grey, complementarycolour
+   Did not work properly on 'colourmap' objects.
+   Fixed.
+
+   o Kest
+   Ignored argument 'var.approx' if the window was a rectangle.
+   Fixed.
+
+   o rmh.ppm, rmhmodel.ppm
+   Ignored the argument 'new.coef'.
+   [Spotted by Sebastian Schutte]
+   Fixed.
+
+   o as.function.fv
+   The meanings of 'extrapolate=TRUE' and 'extrapolate=FALSE' were swapped.
+   Fixed.
+
+   o varblock
+   Handled the case 'fun=Lest' incorrectly.
+   Fixed.
+
+   o [.fv
+   Sometimes garbled the internal data format, causing plot.fv to crash.
+   Fixed.
+
+   o range.fv
+   Sometimes returned NA even when na.rm=TRUE.
+   Fixed.
+
+   o Fest
+   Argument 'eps' was not interpreted correctly.
+   Fixed.
+
+   o plot.fv
+   Argument 'lwd' was not passed to legend()
+
+   o flipxy.owin
+   Sometimes deleted the name of the unit of length.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.38-1
+
+OVERVIEW
+
+   o We thank Ute Hahn and Xavier Raynaud for contributions.
+
+   o Urgent Bug Fixes.
+
+   o Nickname: 'Le Hardi'
+
+NEW FUNCTIONS
+
+   o "[<-.fv", "$<-.fv"
+   Subset replacement methods for 'fv' objects.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o clarkevans.test
+   Simulations are now performed with a fixed number of points.
+
+   o plot.owin, plot.ppp, plot.psp, plot.im
+   The default size of the outer margin of white space has been
+   reduced. 
+
+   o dclf.test
+   Improved information in printed output.
+
+BUG FIXES
+
+   o update.ppm
+   Results were incorrect in several cases.
+   [Spotted by Xavier Raynaud.]
+   Bug introduced in spatstat 1.38-0.
+   Fixed.
+
+   o Kinhom, Linhom
+   Calculations were incorrect if 'lambda' was a fitted point process model.
+   [Spotted by Xavier Raynaud.]
+   Bug introduced in spatstat 1.38-0.
+   Fixed.
+
+   o envelope.envelope
+   Ignored the arguments 'global' and 'VARIANCE'.
+   Fixed.
+
+   o fv objects
+   If 'f' was an object of class 'fv', then an assignment like f$name <- NULL
+   mangled the internal format of the object 'f', leading to errors
+   in print.fv and plot.fv.
+   [Spotted by Ute Hahn.]
+   Fixed.
+
+   o split.ppp
+   split(X, A) where A is a rectangular tessellation,
+   produced errors if the window of 'A' did not include the window of 'X'.
+   [Spotted by Ute Hahn.]
+   Fixed.
+
+   o names<-.hyperframe
+   Mangled the internal format.
+   [Spotted by Ute Hahn.]
+   Fixed.
+   
+   o plot.fv
+   y axis label was incorrect in some cases when the 'fv' object
+   had only a single column of function values.
+   [Spotted by Ute Hahn.]
+   Fixed.
+
+   
+	CHANGES IN spatstat VERSION 1.38-0
+
+OVERVIEW
+
+   o We thank Malissa Baddeley, Colin Beale, Oscar Garcia, Daniel Esser, 
+     David Ford, Eric Gilleland, Andrew Hardegen, Philipp Hunziker, 
+     Abdollah Jalilian, Tom Lawrence, Lore De Middeleer, Robin Milne, 
+     Mike Porter, Suman Rakshit, Pablo Ramon, Jason Rudokas, 
+     Christopher Ryan, Dominic Schuhmacher, Medha Uppala 
+     and Rasmus Waagepetersen for contributions.
+
+   o spatstat now Requires the package 'goftest' 
+     and Suggests the package 'Matrix'.
+
+   o New dataset: 'sporophores'
+
+   o Palm likelihood method for fitting cluster processes and Cox processes.
+
+   o Quasi-likelihood and weighted composite likelihood methods
+     for estimating trend in cluster processes and Cox processes.
+
+   o Further extensions to model formulas in ppm and kppm.
+
+   o Faster variance calculations for ppm objects.
+
+   o One-sided tests and one-sided envelopes of summary functions.
+
+   o Cramer-Von Mises and Anderson-Darling tests of spatial distribution.
+
+   o Cressie-Read test statistic in quadrat counting tests.
+
+   o Spatial cumulative distribution functions.
+
+   o Faster algorithm for point pattern matching.
+
+   o Improvements to plots.
+
+   o Increased support for envelopes.
+
+   o New generic functions 'Window', 'Frame' and 'domain'.
+
+   o Directional K-function and directional distribution.
+
+   o Raster calculations accelerated.
+
+   o Summary functions accelerated.
+
+   o Many improvements and bug fixes.
+
+   o Version nickname: 'Wicked Plot'
+
+NEW DATASETS
+
+   o sporophores
+   Spatial pattern of three species of mycorrhizal fungi around a tree.
+   [Contributed by E. David Ford.]
+
+NEW FUNCTIONS
+
+   o improve.kppm
+   Re-estimate the trend in a kppm (cluster or Cox) model
+   using quasi-likelihood or weighted first-order composite likelihood.
+   [Contributed by Abdollah Jalilian and Rasmus Waagepetersen.]
+
+   o Window, Window<-
+   Generic functions to extract and change the window of a spatial object
+   in two dimensions. Methods for ppp, psp, im, and many others.
+
+   o Frame, Frame<-
+   Generic functions to extract and change the containing rectangle ('frame')
+   of a spatial object in two dimensions. 
+
+   o domain
+   Generic function to extract the spatial domain of a spatial object
+   in any number of dimensions. 
+
+   o Ksector
+   Directional version of the K-function.
+
+   o pairorient
+   Point pair orientation distribution.
+
+   o spatialcdf
+   Compute the spatial cumulative distribution of a spatial covariate,
+   optionally using spatially-varying weights.
+
+   o cdf.test 
+   [Supersedes 'kstest'.]
+   Test of goodness-of-fit of a Poisson point process model.
+   The observed and predicted distributions of the values of a 
+   spatial covariate are compared using either the
+   Kolmogorov-Smirnov, Cramer-Von Mises or Anderson-Darling test.
+
+   o berman.test
+   Replaces 'bermantest'.
+
+   o harmonise.fv
+   Make several functions compatible.
+
+   o simulate.lppm
+   Simulate a fitted point process model on a linear network.
+
+   o subset.ppp, subset.lpp, subset.pp3, subset.ppx
+   Methods for 'subset', for point patterns.
+
+   o closepairs.pp3, crosspairs.pp3
+   Low-level functions to find all close pairs of points in three dimensions
+
+   o volume.linnet
+   Method for the generic 'volume'. Returns the length of the linear network.
+
+   o padimage
+   Pad the border of a pixel image.
+ 
+   o as.layered
+   Convert spatial data to a layered object.
+
+   o panel.contour, panel.image, panel.histogram
+   Panel functions for 'pairs' plots.
+
+   o range.fv, min.fv, max.fv
+   Range, minimum and maximum of function values.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm.formula
+   The left hand side of the formula can now be the name of 
+   an object in the list 'data', or an expression involving such objects.
+
+   o ppm
+   The right hand side of the formula can now include 
+   the symbol '.' representing all covariates in the list 'data'.
+
+   o ppm
+   New argument 'subset' makes it possible to fit the model
+   in a subset of the spatial domain defined by an expression.
+
+   o kppm
+   New option: method="palm", will fit the model by maximising Palm likelihood.
+
+   o pppdist
+   Substantially accelerated. 
+   New argument 'auction' controls choice of algorithm.
+
+   o rhohat
+   New arguments 'weights' and 'horvitz' for weighted calculations.
+
+   o persp.im
+   Surface heights and colours can now be controlled by different images.
+   Option to draw a grey apron around the sides of the perspective plot. 
+   Return value has a new attribute 'expand'.
+
+   o plot.listof
+   New arguments 'halign' and 'valign' give improved control
+   over the alignment of panels.
+
+   o plot.listof
+   If all components of the list are objects of class 'fv' 
+   representing functions, then if equal.scales=TRUE, these
+   functions will all be plotted with the same axes 
+   (i.e. the same xlim and the same ylim).
+
+   o envelope
+   The argument 'transform' is now processed by 'with.fv'
+   giving more options, such as 'transform=expression(. - r)'
+
+   o envelope, dclf.test, mad.test
+   One-sided tests and one-sided envelopes can now be produced, 
+   by specifying the argument 'alternative'.
+
+   o dclf.test, mad.test
+   A pointwise test at fixed distance 'r' can now be performed
+   by setting rinterval = c(r,r).
+
+   o envelope
+   New arguments 'fix.n' and 'fix.marks' for envelope.ppp and envelope.ppm
+   make it easy to generate simulated patterns conditional on the
+   total number of points, or on the number of points of each type.
+
+   o quadrat.test
+   Can now calculate the Cressie-Read test statistic
+   instead of the Pearson X2 statistic.
+
+   o Kres, Gres, Kcom, Gcom, psst, psstA, psstG
+   New argument 'model' makes it easier to generate simulation envelopes
+   of the residual summary functions.
+
+   o layered, plot.layered
+   The layer plot arguments can include the argument '.plot'
+   specifying a function to perform the plotting instead of the
+   generic 'plot'.
+
+   o deriv.fv
+   New arguments make it possible to differentiate a periodic function.
+
+   o ppm
+   Argument 'data' or 'covariates' can now include entries
+   which are not spatial covariates, provided they do not appear 
+   in the model formula.
+
+   o closepairs, crosspairs
+   These functions are now generic, with methods for 'ppp' and 'pp3'
+
+   o rLGCP
+   Updated to conform to new usage of RandomFields package.
+   Argument syntax has changed.
+   Now allows control over pixel resolution.
+
+   o bw.diggle
+   New arguments 'correction' and 'hmax' for controlling the calculation.
+
+   o predict.lppm
+   New argument 'new.coef' for computing predictions with a 
+   different vector of model coefficients.
+
+   o predict.ppm
+   If 'locations' is a pixel image, its pixels determine the
+   spatial locations for prediction.
+
+   o cut.ppp
+   Argument 'z' can now be a window.
+
+   o split.ppp
+   Argument 'f' can now be a window.
+
+   o print.ppm, summary.ppm, coef.summary.ppm
+   The table of parameter estimates, standard errors and confidence intervals
+   now also includes the value of the (signed square root) Wald test statistic.
+
+   o plot.im
+   Now automatically detects problems in some Windows graphics displays
+   and tries to avoid them.
+
+   o plot.im
+   The position of axis tick marks alongside the colour ribbon
+   can now be controlled using the parameter 'at' in the argument 'ribargs'.
+
+   o plot.ppp
+   Can now plot numeric marks using characters chosen by 'pch' or 'chars'
+   with size determined by mark value.
+
+   o plot.ppp
+   New argument 'meansize' for controlling mark scale.
+
+   o hsvim, rgbim
+   New argument 'autoscale' causes automatic scaling of colour channel values.
+
+   o plot.ppp
+   If type='n', a legend is now displayed when x is a marked point pattern.
+
+   o whist
+   Accelerated by a factor of 5.
+
+   o Fest, Jest
+   Accelerated by a factor of 2 to 3.
+
+   o fryplot
+   Accelerated.
+   Now displays a legend if the point pattern is marked.
+   Now handles numerical marks nicely.
+   New argument 'axes'.
+
+   o frypoints
+   Accelerated.
+   New arguments 'to', 'from' and 'dmax'.
+
+   o duplicated.ppp
+   New option: rule = 'unmark'
+
+   o rjitter
+   Argument 'radius' now has a default.
+
+   o Smooth.msr
+   New argument 'drop'
+
+   o LambertW
+   Now handles NA and infinite values.
+
+   o update.ppm
+   Now handles formulae with a left-hand side.
+
+   o raster.x, raster.y, raster.xy
+   These functions can now handle images, as well as masks.
+
+   o Smooth.ppp
+   If the mark values are exactly constant, the resulting smoothed values
+   are now exactly constant.
+
+   o eval.im, eval.fv, eval.fasp
+   Argument 'envir' can now be a list, instead of an environment.
+
+   o plot.ppp
+   The printout (of the resulting symbol map object)
+   now displays the numerical value of the mark scale.
+
+   o with.fv
+   Improved mathematical labels.
+
+   o plot.fv
+   Improved mathematical labels on x axis.
+
+   o ppm
+   Improved error messages.
+
+   o vcov.ppm
+   Computations greatly accelerated for Hybrid interactions
+   and for Area-interaction models.
+
+   o vcov.kppm
+   Computations greatly accelerated (when fast=TRUE)   
+
+   o interp.im
+   Argument 'x' can now be a point pattern.
+
+   o pool.envelope
+   Improved handling of text information.
+
+   o miplot
+   Improved layout.
+
+   o print.summary.ppp
+   Improved layout. 
+   Now respects spatstat.options('terse')
+
+   o print.profilepl
+   Improved layout. 
+   Now respects spatstat.options('terse')
+
+   o anova.ppm
+   Now respects spatstat.options('terse')
+
+   o print.fv, print.envelope
+   Now respect spatstat.options('terse') and options('width')
+   
+   o summary.envelope
+   Now respects options('width')
+   
+   o kstest, bermantest
+   These functions will soon be Deprecated.
+   They are retained only for backward compatibility.
+
+BUG FIXES
+
+   o vcov.ppm
+   Sometimes gave wrong answers for Poisson models fitted by method='logi'.
+   Fixed.
+
+   o unnormdensity
+   If weights were missing, the density was normalised,
+   contrary to the documentation.
+   Fixed.
+
+   o logLik.ppm, anova.ppm, AIC
+   For models fitted by 'ippm', the number of degrees of freedom was incorrect.
+   Fixed.
+
+   o im.apply
+   Pixels outside the window were not assigned the value NA as they should.
+   Fixed.
+
+   o pixellate.owin
+   Crashed, unpredictably, if the pixel raster had unequal numbers
+   of rows and columns.
+   [Spotted by Rasmus Waagepetersen.]
+   Fixed.
+
+   o vcov.ppm
+   Crashed for pairwise interaction models fitted by method="logi".
+   Fixed.
+
+   o predict.ppm
+   Crashed for models fitted by method="logi"
+   if the model included external covariates.
+   Fixed.
+
+   o predict.ppm
+   Crashed if the argument 'covariates' or 'data' in the original call to 'ppm'
+   included entries that were not spatial covariates. [These entries were
+   ignored by ppm but caused predict.ppm to crash.]
+   Fixed.
+
+   o simulate.kppm, rNeymanScott, rThomas, rMatClust
+   Crashed randomly when simulating an inhomogeneous model.
+   [Spotted by Philipp Hunziker.]
+   Fixed.
+   
+   o bw.diggle
+   In some extreme cases, generated an error message
+   about `NaN values in Foreign function call.'
+   [Spotted by Colin Beale.]
+   Fixed.
+
+   o textureplot
+   Crashed if 'spacing' was too large.
+   Fixed.
+
+   o superimpose.psp
+   Crashed if the result was empty.
+   Fixed.
+
+   o istat
+   Crashed with an error message about 'vars'.
+   Fixed.
+
+   o dirichlet, delaunay, delaunay.distance
+   Crashed in rare cases due to a problem in package 'deldir'. 
+   [Spotted by Pierre Legendre.]
+   Fixed.
+
+   o rgbim, hsvim
+   Crashed if any argument was constant.
+   Fixed.
+
+   o scaletointerval
+   Crashed if x was constant.
+   Fixed.
+
+   o linnet, [.linnet
+   Crashed if the result contained only a single vertex.
+   [Spotted by Daniel Esser.]
+   Fixed.
+
+   o plot.fv
+   If some of the function values were NA,
+   they were replaced by fictitious values
+   (by linearly interpolating).
+   Fixed.
+
+   o crossdist.ppp
+   Ignored argument 'squared' if periodic=FALSE.
+   [Spotted by Mike Porter.]
+   Fixed.
+
+   o marks<-.ppp
+   Ignored argument 'drop'.
+   [Spotted by Oscar Garcia.]
+   Fixed.
+ 
+   o update.ppm
+   Sometimes did not respect the argument 'use.internal'.
+   Fixed.
+
+   o plot.rhohat
+   Did not respect the argument 'limitsonly'.
+   Fixed.
+
+   o contour.im
+   Argument 'axes' defaulted to TRUE, but FALSE was intended.
+   Fixed.
+
+   o print.hyperframe, as.data.frame.hyperframe
+   Column names were mangled if the hyperframe had a single row.
+   Fixed.
+
+   o as.psp.data.frame
+   Generated a warning about partially-matched names in a data frame.
+   [Spotted by Eric Gilleland.]
+   Fixed.
+
+   o plot.leverage.ppm
+   Generated a warning from 'contour.default'
+   if the leverage function was constant.
+   Fixed.
+
+   o plot.diagppm
+   Issued warnings about unrecognised graphics parameters.
+   Fixed.
+
+   o update.symbolmap
+   Discarded information about the range of input values.
+   Fixed.
+
+   o plot.fv
+   Label for y axis was garbled, if argument 'shade' was given.
+   Fixed.
+
+   o plot.ppp
+   The legend was sometimes plotted when it should not have been 
+   (e.g. when add=TRUE).
+   Fixed.
+
+   o plot.listof, plot.im
+   In an array of plots, containing both images and other spatial objects,
+   the titles of the panels were not correctly aligned.
+   Fixed.
+
+   o plot.tess, plot.quadratcount
+   Ignored arguments like 'cex.main'.
+   Fixed.
+
+   o iplot
+   Navigation buttons (Left, Right, Up, Down, Zoom In, Zoom Out)
+   did not immediately refresh the plot.
+   Fixed.
+
+   o iplot.layered
+   Reported an error 'invalid argument type' if all layers were deselected.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.37-0
+
+OVERVIEW
+
+   o Ege Rubak is now a joint author of spatstat. 
+
+   o We thank Peter Forbes, Tom Lawrence and Mikko Vihtakari for contributions.
+
+   o Spatstat now exceeds 100,000 lines of code.
+
+   o New syntax for point process models (ppm, kppm, lppm)
+     equivalent to syntax of lm, glm, ...
+
+   o Covariates in ppm and kppm can now be tessellations.
+
+   o Confidence intervals and prediction intervals for fitted models.
+
+   o Quasirandom point patterns and sequences.
+
+   o Plots using texture fill.
+
+   o Support for mappings from data to graphical symbols and textures.
+
+   o Automatic re-fitting of model in Ginhom, Kinhom, Finhom, Jinhom.
+
+   o Support for Mixed Poisson distribution.
+
+   o Interpretation of mark scale parameters has changed in plot.ppp
+
+   o Syntax of multitype interactions (eg MultiStrauss) has changed.
+
+   o Bug fix in Metropolis-Hastings simulation of 'StraussHard' models
+
+   o Changed default behaviour of perfect simulation algorithms.
+
+   o Improvements to layout of text output.
+
+   o Version nickname: 'Model Prisoner'
+
+NEW CLASSES
+
+   o symbolmap
+   An object of class 'symbolmap' represents a mapping
+   from data to graphical symbols
+
+   o texturemap
+   An object of class 'texturemap' represents a mapping
+   from data to graphical textures.
+
+NEW FUNCTIONS
+
+   o split.hyperframe, split<-.hyperframe
+   methods for split and split<- for hyperframes.
+
+   o dmixpois, pmixpois, qmixpois, rmixpois
+   (log-)normal mixture of Poisson distributions.
+
+   o vdCorput, Halton, Hammersley, rQuasi
+   quasirandom sequences and quasirandom point patterns.
+
+   o Smoothfun
+   create a function(x,y) equivalent to the result of Smooth.ppp
+
+   o minnndist, maxnndist
+   Faster ways to compute min(nndist(X)), max(nndist(X))
+
+   o add.texture
+   Draw a simple texture inside a specified region.
+
+   o textureplot
+   Display a factor-valued pixel image using texture fill.
+
+   o texturemap
+   Create a texture map
+
+   o plot.texturemap
+   Plot a texture map in the style of a legend
+   
+   o symbolmap
+   Create a symbol map
+
+   o update.symbolmap
+   Modify a symbol map
+
+   o invoke.symbolmap
+   Apply symbol map to data values, and plot them
+
+   o plot.symbolmap 
+   Plot the symbol map in the style of a legend
+
+   o as.owin.boxx
+   Converts a 'boxx' to an 'owin' if possible.
+
+   o ellipse
+   Create an elliptical window.
+
+   o clickbox
+   Interactively specify a rectangle, by point-and-click on a graphics device.
+
+   o complementarycolour
+   Compute the complementary colour value of a given colour value,
+   or the complementary colour map of a given colour map.
+
+   o gauss.hermite
+   Gauss-Hermite quadrature approximation to the expectation
+   of any function of a normally-distributed random variable.	
+
+   o boundingbox
+   Generic function, replaces bounding.box
+
+   o edges
+   Extract boundary edges of a window. 
+   Replaces and extends 'as.psp.owin'
+
+   o pixelcentres
+   Extract centres of pixels as a point pattern.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm, kppm, lppm
+   NEW SYNTAX FOR POINT PROCESS MODELS
+   The model-fitting functions 'ppm', 'kppm' and 'lppm' now accept
+   a syntax similar to 'lm' or 'glm', for example  ppm(X ~ Z), 
+   but still accept the older syntax ppm(X, ~Z). 
+   To support both kinds of syntax, the functions 'ppm' and 'kppm'
+   are now generic, with methods for the classes 'formula', 'ppp' and 'quad'. 
+   The 'formula' method handles a syntax like ppm(X ~ Z) while the
+   'ppp' method handles the old syntax ppm(X, ~Z).
+   Similarly 'lppm' is generic with methods for 'formula' and 'lpp'.
+
+   o ppm, kppm, lppm
+   Covariates appearing in the model formula can be objects 
+   which exist in the R session, instead of always having to be
+   elements of the list `covariates'.
+   
+   o ppm.formula, kppm.formula, lppm.formula
+   Formulae involving polynom() are now expanded, symbolically,
+   so that polynom(x, 3) becomes x + I(x^2) + I(x^3) 
+   and polynom(x,y,2) becomes x + y + I(x^2) + I(x*y) + I(y^2).
+   This neatens the model output, and also makes it possible
+   for anova() and step() to add or delete single terms in the polynomial.
+
+   o predict.ppm
+   New argument 'interval' allows confidence intervals or prediction intervals
+   to be calculated.
+
+   o predict.ppm
+   New argument 'total' allows for prediction of the total number of points
+   in a specified region.
+   
+   o plot.ppp, plot.lpp
+   For marked point patterns, a legend is automatically added
+   to the plot, by default. Arguments have changed: new arguments
+   include parameters of the legend, and an optional symbol map.
+   Result has changed: it is now an object of class 'symbolmap'.
+
+   o plot.ppp, plot.lpp
+   Interpretation of the parameters 'markscale' and 'maxsize' 
+   has changed. The size of a circle in the plot is now defined 
+   as the circle's diameter instead of its radius. (Size of
+   a square is measured, as before, by its side length). 
+
+   o parres
+   Now handles the case where the fitted model is not separable
+   but its restriction to the given 'subregion' is separable.
+
+   o envelope
+   Now issues a warning if the usage of envelope() appears to be `invalid' 
+   in the sense that the simulated patterns and the data pattern
+   have not been treated equally.
+
+   o Kinhom, Finhom, Ginhom, Jinhom
+   New argument 'update'.
+   If 'lambda' is a fitted model (class ppm or kppm) and update=TRUE,
+   the model is re-fitted to the data pattern, before the intensities
+   are computed.
+
+   o rDiggleGratton, rDGS, rHardcore, rStrauss, rStraussHard
+   By default the point pattern is now generated on
+   a larger window, and trimmed to the original window.
+   New argument expand=TRUE.
+
+   o MultiStrauss, MultiHard, MultiStraussHard
+   The syntax of these functions has changed.
+   The new code should still accept the old syntax.
+
+   o rhohat
+   rhohat.ppp and rhohat.quad have new argument 'baseline'
+
+   o ippm
+   Algorithm improved. Argument syntax changed.
+
+   o default.dummy, quadscheme
+   Dummy points can now be generated by a quasirandom sequence.
+
+   o plot.owin
+   The window can now be filled with one of 8 different textures.
+   Arguments changed. 
+
+   o ppm, kppm
+   Covariates in the model can now be tessellations.
+
+   o [.im
+   New argument 'tight' allows the resulting image to be trimmed 
+   to the smallest possible rectangle.
+
+   o [.psp, rlinegrid, rpoisline
+   These functions now handle binary mask windows.
+
+   o rotate
+   The user can specify the centre of rotation.
+
+   o rescale
+   rescale() and all its methods now have argument 'unitname'
+   which can be used to change the name of the unit of length.
+
+   o anova.ppm
+   Output format has been improved. 
+   Number of columns of result has changed.
+
+   o print.ppp, print.summary.ppp, print.owin, print.summary.owin,
+   print.im, print.summary.im, print.fv, print.msr, print.profilepl
+   These functions now avoid over-running the text margin
+   (i.e. they respect options('width') where possible).
+
+   o layerplotargs<-
+   Now handles any spatial object, converting it to a 'layered' object.
+
+   o effectfun
+   Improved display in case se.fit=TRUE.
+
+   o scaletointerval
+   New argument 'xrange'
+
+   o contour.im
+   New argument 'show.all'.
+   Default value of 'axes' changed to FALSE.
+
+   o identify.ppp
+   Now handles multivariate marks. 
+
+   o plot.listof
+   Improved layout.
+   New arguments 'hsep', 'vsep'.
+   Argument 'mar.panel' may have length 1, 2 or 4.
+
+   o plot.splitppp
+   This function is no longer identical to plot.listof.
+   Instead it is a much simpler function which just
+   calls plot.listof with equal.scales=TRUE.
+
+   o anova.ppm
+   Output is neater.
+
+   o plot.layered
+   New argument 'do.plot'
+
+   o plot.psp
+   New argument 'do.plot'
+   
+   o as.psp.owin
+   New argument 'window'
+
+   o plot.im, contour.im, textureplot
+   New argument 'clipwin'
+
+   o plot.ppp
+   New argument 'clipwin'
+
+   o plot.msr
+   New argument 'how' allows density to be plotted as image and/or contour
+
+   o diagnose.ppm, plot.diagppm
+   More options for 'plot.neg'
+
+   o plot.leverage.ppm, plot.influence.ppm, plot.msr
+   Argument 'clipwin' can now be used to restrict the display
+   to a subset of the full data.
+
+   o [.hyperframe, [<-.hyperframe, $.hyperframe, $<-.hyperframe
+   These functions are now documented.
+
+   o leverage.ppm, influence.ppm, dfbetas.ppm
+   Resulting objects are now smaller (in memory size).
+
+   o print.ppm
+   Now indicates whether the irregular parameters 'covfunargs'
+   were optimised (by profilepl or ippm) or whether they were simply
+   provided by the user.
+
+   o plot.ppp
+   A point pattern with numerical marks can now be plotted as
+   filled dots with colours determined by the marks, 
+   by setting pch=21 and bg=<colourmap>
+
+   o colourmap
+   Now handles dates and date-time values (of class 'Date' or 'POSIXt').
+
+   o plot.ppp, print.ppp, summary.ppp
+   Improved handling of dates and date-time values 
+   (of class 'Date' or 'POSIXt') in the marks of a point pattern.
+
+   o cut.im
+   Now refuses to handle images whose pixel values are
+   factor, logical or character.
+
+   o centroid.owin
+   New argument 'as.ppp'
+
+   o superimpose
+   Improved default names for columns of marks.
+
+   o Softcore()
+   Improved printout.
+
+   o kppm, lgcp.estpcf, lgcp.estK
+   Adjusted to new structure of RandomFields package.
+   No change in syntax.
+
+   o data(murchison)
+   This dataset now belongs to class 'listof' so that it can be
+   plotted directly.
+
+   o data(clmfires)
+   The format of the covariate data has changed.
+   The objects 'clmcov100' and 'clmcov200' are now 
+   elements of a list 'clmfires.extra'.
+
+   o bounding.box
+   This function is now Deprecated; it has been replaced 
+   by the generic boundingbox().
+
+   o as.psp.owin 
+   This function is now Deprecated; it has been replaced 
+   and extended by the function edges().
+
+   o plot.kstest
+   Changed defaults so that the two curves are distinguishable.
+
+   o with.fv
+   Improved mathematical labels.
+
+BUG FIXES
+
+   o intensity.quadratcount
+   Values were incorrect for a rectangular tessellation
+   (the matrix of intensities was transposed).
+   Fixed.
+
+   o rmh, simulate.ppm
+   Simulation of the Strauss-hard core model (StraussHard) was incorrect
+   (intensity of the simulated process was about 15% too low).
+   Bug introduced in spatstat 1.31-0 (January 2013).
+
+   o intensity.quadratcount
+   Crashed for a rectangular tessellation with only a single row or column.
+   Fixed.
+   
+   o model.images.ppm
+   Crashed sometimes if the argument W was given.
+   Fixed.
+
+   o eval.im
+   Crashed when applied to images with only a single row or column.
+   Fixed.
+
+   o ppp, marks<-.ppp
+   If the marks were a vector of dates, they were erroneously 
+   converted to numbers.
+   Fixed.
+
+   o ippm
+   Crashed if the model formula included an offset term
+   that was not a function.
+   Fixed.
+
+   o leverage.ppm
+   Crashed sometimes when the model had irregular parameters ('covfunargs').
+   Fixed.
+
+   o residuals.ppm
+   Crashed sometimes when type='score'.
+   Fixed.
+
+   o scaletointerval
+   Did not handle dates and date-time values correctly.
+   Fixed.
+
+   o rbind.hyperframe, as.list.hyperframe
+   Gave incorrect results for hyperframes with 1 row.
+   Fixed.
+
+   o Kinhom
+   Did not renormalise the result (even when renormalise=TRUE), in some cases.
+   Spotted by Peter Forbes.
+   Fixed.
+
+   o disc
+   If mask=TRUE the disc was erroneously clipped to the square [-1,1] x [-1,1].
+   Fixed.
+
+   o plot.fv
+   Sometimes shaded the wrong half of the graph 
+   when the 'shade' coordinates were infinite.
+   Fixed.
+
+   o print.ppm
+   Gave an error message if the coefficient vector had length zero.
+   Fixed.
+
+   o vcov.ppm
+   Gave an error message if the coefficient vector had length zero.
+   Fixed.
+
+   o plot.distfun, as.im.distfun
+   These functions effectively ignored the argument 'invert' 
+   in the original call to distfun.
+   Fixed.
+
+   o plot.msr
+   Ignored certain additional arguments such as 'pch'.
+   Fixed.
+
+   o cut.im
+   Crashed if the image had 1 row or 1 column of pixels.
+   Fixed.
+
+   o iplot.ppp
+   Crashed with message about missing object 'vals'.
+   Fixed.
+
+   o effectfun
+   Demanded a value for every covariate supplied in the original call to ppm, 
+   even for covariates which were not used in the model.
+   Fixed.
+
+   o plot.listof, plot.hyperframe
+   When plotting 3D point patterns (class pp3), these functions
+   issued warnings about 'add' being an unrecognised graphics argument.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.36-0
+
+OVERVIEW
+
+   o We thank Sebastian Meyer, Kevin Ummer, Jean-Francois Coeurjolly,
+   Ege Rubak, Rasmus Waagepetersen, Oscar Garcia and Sourav Das
+   for contributions.
+
+   o Important change to package dependencies.
+
+   o Geometrical inconsistencies in polygons are now repaired automatically.
+
+   o Improved quadrature schemes and reduced bias in ppm.
+
+   o New vignette 'Summary of Recent Changes to Spatstat'.
+
+   o Approximation to K function and pcf for Gibbs models.
+
+   o Counterpart of 'apply' for lists of images.
+
+   o Hexagonal grids and tessellations.
+
+   o Extensions to scan test and Allard-Fraley cluster set estimator.
+
+   o Change the parameters of a fitted model before simulating it.
+
+   o Accelerated Kest, Kinhom for rectangular windows.
+
+   o Extensions and improvements to plotting functions.
+
+   o Improvements to labelling of 'fv' objects.
+
+   o New demo of summary functions.
+
+   o More methods for 'intensity'.
+
+   o Version nickname: 'Intense Scrutiny'
+
+NEW FUNCTIONS
+
+   o Kmodel.ppm, pcfmodel.ppm
+   Compute approximation to K-function or pair correlation function
+   of a Gibbs point process model.
+
+   o im.apply
+   Apply a function to corresponding pixel values in several images.
+
+   o hexgrid, hextess
+   Create a hexagonal grid of points, or a tessellation of hexagonal tiles
+
+   o shift.tess, rotate.tess, reflect.tess, scalardilate.tess, affine.tess
+   Apply a geometrical transformation to a tessellation.
+
+   o quantile.ewcdf
+   Extract quantiles from a weighted cumulative distribution function.
+
+   o scanLRTS
+   Evaluate the spatially-varying test statistic for the scan test.
+
+   o pcfmulti
+   General multitype pair correlation function
+
+   o intensity.splitppp
+   Estimate intensity in each component of a split point pattern.
+
+   o intensity.quadratcount
+   Use quadrat counts to estimate intensity in each quadrat.
+
+   o as.owin.quadratcount, as.owin.quadrattest
+   Extract the spatial window in which quadrat counts were performed.
+
+   o reload.or.compute
+   Utility function for R scripts: 
+   either reload results from file, or compute them.
+
+   o to.grey
+   Convert colour to greyscale.
+
+   o Smooth.im
+   Method for Smooth() for pixel images. Currently identical to blur().
+
+   o demo(sumfun)
+   Demonstration of nonparametric summary functions in spatstat.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o Package Dependencies
+   spatstat now "Imports" (rather than "Depends" on) 
+   the libraries mgcv, deldir, abind, tensor, polyclip.
+   This means that these libraries are not accessible to the user
+   unless the user explicitly loads them by typing 'library(mgcv)' and so on.
+
+   o owin, as.owin
+   Polygon data are no longer subjected to strict checks on 
+   geometrical validity (self-crossing points, overlaps etc.)
+   Instead, polygon geometry is automatically repaired.
+   
+   o ppm
+   The default quadrature scheme for a point pattern
+   has been improved (in the case of a non-rectangular window)
+   to remove a possible source of bias.
+
+   o Performance 
+   various parts of spatstat now run slightly faster.
+
+   o scan.test 
+   Now handles multiple values of circle radius 'r'.
+
+   o plot.scan.test, as.im.scan.test
+   These functions can now give the optimal value of circle radius 'r'.
+
+   o pcfcross, pcfdot
+   Algorithms have been reimplemented using a single-pass kernel smoother
+   and now run much faster. Bandwidth selection rule improved.
+
+   o plot.listof, plot.splitppp
+   Default behaviour has changed: panels are now plotted on different scales. 
+
+   o plot.listof, plot.splitppp
+   When 'equal.scales=TRUE' the panels are plotted on exactly equal scales
+   and are exactly aligned (under certain conditions).
+
+   o ppp, marks.ppp, marks<-.ppp
+   New argument 'drop' determines whether a data frame with a single column
+   will be converted to a vector.
+
+   o simulate.ppm, rmh.ppm, rmhmodel.ppm
+   New argument 'new.coef' allows the user to change the parameters
+   of a fitted model, before it is simulated.
+
+   o logLik.ppm
+   New argument 'new.coef' allows the user to evaluate the 
+   loglikelihood for a different value of the parameter.
+
+   o clusterset
+   The argument 'result' has been renamed 'what'.
+   It is now possible to give multiple values to 'what'
+   so that both types of result can be computed together.
+
+   o residuals.ppm
+   Argument 'coefs' has been renamed 'new.coef' for consistency
+   with fitted.ppm etc. 
+
+   o residuals.ppm
+   If drop=TRUE the window associated with the residuals
+   is now taken to be the domain of integration of the composite likelihood.
+   
+   o intensity.ppp
+   Now has argument 'weights'
+
+   o density.ppp, Smooth.ppp, markmean, markvar, intensity.ppp
+   Argument 'weights' can now be an 'expression'.
+
+   o pcf
+   New argument 'domain' causes the computation to be restricted to 
+   a subset of the window.
+
+   o nnclean
+   The result now has attributes which give the fitted parameter values,
+   information about the fitting procedure, and the histogram bar heights.
+
+   o nnclean
+   Extra arguments are now passed to hist.default.
+
+   o plot.tess
+   For a tessellation represented by a pixel image, 
+   plot.tess no longer treats the pixel labels as palette colours.
+
+   o relrisk
+   New argument 'case' allows the user to specify which mark value
+   corresponds to the cases in a case-control dataset.
+
+   o Kinhom
+   Now accepts correction="good" 
+
+   o spatstat.options
+   New option ('monochrome') controls whether plots generated by spatstat
+   will be displayed in colour or in greyscale. This will eventually be
+   applied to all plot commands in spatstat. 
+
+   o plot.im, persp.im, contour.im, plot.owin, plot.psp, plot.fv, plot.fasp
+   These functions now obey spatstat.options('monochrome')
+
+   o plot.ppp, plot.owin, plot.im, plot.psp, plot.tess, plot.layered
+   New universal argument 'show.all' determines what happens
+   when a plot is added to an existing plot. If show.all = TRUE
+   then everything is plotted, including the main title and colour ribbon.
+
+   o plot.ppp
+   New argument 'show.window' 
+
+   o plot.im 
+   New arguments 'add' and 'do.plot'.
+   More arguments recognised by 'ribargs'
+
+   o plot.layered
+   New arguments 'add', 'main' 
+   Better argument handling.
+
+   o plot.fv
+   Improved handling of argument 'shade'
+
+   o layered, layerplotargs, plot.layered
+   The plotting argument can now be a list of length 1,
+   which will be replicated to the correct length.
+
+   o varblock
+   Ugly legends have been repaired.
+
+   o quad.ppm
+   New argument 'clip'
+
+   o edge.Trans
+   New arguments 'dx', 'dy'
+
+   o disc
+   Argument 'centre' can be in various formats.
+
+   o affine, shift
+   Argument 'vec' can be in various formats.
+
+   o Geyer, BadGey
+   A warning is no longer issued when the parameter 'sat' is fractional.
+
+   o adaptive.density
+   Now has argument 'verbose'
+
+   o Smooth.ppp
+   'sigma' is now a formal argument of Smooth.ppp
+
+   o plot.quadratcount, plot.quadrattest
+   These functions have now been documented.
+
+   o Summary functions and envelopes
+   Improved mathematical labels in plots.
+
+   o Kest
+   Accelerated, in the case of a rectangular window.
+
+   o Kscaled
+   Argument 'lambda' can now be a fitted model (class ppm)
+
+   o print.fv
+   Improved layout.
+
+   o plot.bermantest
+   Improved graphics.
+
+   o which.max.im
+   This function is now deprecated.
+   which.max.im(x) is superseded by im.apply(x, which.max)
+
+   o smooth.ppp, smooth.fv, smooth.msr
+   These functions are now deprecated, in favour of 'Smooth' with a capital 'S'
+
+BUG FIXES
+
+   o bw.ppl
+   Crashed if the point pattern had multiple points at the same location.
+   Fixed.
+
+   o quantile
+   Crashed when applied to the result of 'ewcdf'.
+   Fixed.
+
+   o marks<-.ppp
+   Crashed with a message about 'unrecognised format'
+   if the current or replacement values of marks
+   were date/time values (belonging to class 'Date' or 'POSIXt').
+   Fixed.
+
+   o plot.im
+   Crashed in case log=TRUE if the window was not a rectangle.
+   Fixed.
+
+   o vcov.ppm
+   Crashed sometimes for models with a hard core term
+   (Hardcore, StraussHard, MultiHard or MultiStrauss interactions).
+   Spotted by Rasmus Waagepetersen.
+   Fixed.
+
+   o multiplicity.data.frame
+   Results were incorrect and included NA's (spotted by Sebastian Meyer).
+   Fixed.
+
+   o markvar
+   Values were incorrect.
+   Fixed.
+
+   o Smooth.ppp
+   Ignored argument 'diggle'.
+   Fixed.
+
+   o rotate.im, affine.im
+   Factor-valued images were not handled correctly.
+   Fixed.
+
+   o shift.layered
+   If argument 'origin' was used, different layers were shifted by 
+   different amounts.
+   Fixed.
+
+   o tile.areas
+   Sometimes returned a list instead of a numeric vector.
+   Fixed.
+
+   o print.ppp
+   If the marks were date/time values (belonging to class 'Date' or 'POSIXt'),
+   print.ppp reported that they were double precision numbers.
+   Fixed.
+
+   o plot.layered
+   Graphics were mangled if the argument 'add=FALSE' was given explicitly.
+   Fixed.
+
+   o Smooth.ppp
+   The argument 'sigma' was only recognised if it was explicitly named.
+   For example in 'Smooth(X, 5)' the '5' was ignored.
+   Fixed.
+
+   o clusterset
+   The bounding frame of the result was smaller than the
+   original bounding frame of the point pattern dataset,
+   when result="domain" and exact=TRUE.
+   Fixed.
+
+   o plot.im
+   Ignored argument 'col' if it was a 'function(n)'.
+   Fixed.
+
+   o Kinhom
+   Ignored argument 'correction' if there were more than 1000 points.
+   Fixed.
+
+   o [.fv
+   Mangled the plot label for the y axis.
+   Fixed.
+
+   o cbind.fv
+   Mangled the plot label for the y axis.
+   Fixed.
+
+   o plot.envelope
+   Main title was always 'x'.
+   Fixed.
+
+   o print.ppp
+   Ran over the right margin.
+   Fixed.
+
+   o union.owin, intersect.owin, setminus.owin
+   Sometimes deleted the name of the unit of length.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.35-0
+
+OVERVIEW
+
+   o We thank Melanie Bell, Leanne Bischof, Ida-Maria Sintorn,
+     Ege Rubak, Martin Hazelton, Oscar Garcia, 
+     Rasmus Waagepetersen, Abdollah Jalilian
+     and Jens Oehlschlaegel for contributions.
+
+   o Support for analysing replicated spatial point patterns.
+
+   o New vignette on analysing replicated spatial point patterns.
+
+   o Objective function surface plots.
+
+   o Estimator of point process intensity using nearest neighbour distances.
+
+   o Improved estimator of pair correlation function.
+
+   o Four new datasets. 
+
+   o Simple point-and-click interface functions for general use.
+
+   o More support for fv objects.
+
+   o More support for ppx objects.
+
+   o Extensions to nearest neighbour functions.
+
+   o Morphological operations accelerated.
+
+   o Bug fix to pair correlation functions.
+
+   o Bug fix to k-th nearest neighbour distances
+
+   o Version nickname: 'Multiple Personality'
+
+NEW CLASSES
+
+  o mppm
+  An object of class 'mppm' represents a Gibbs point process model
+  fitted to several point pattern datasets. The point patterns may
+  be treated as independent replicates of the same point process,
+  or as the responses in an experimental design, so that the 
+  model may depend on covariates associated with the design.
+  Methods for this class include print, plot, predict, anova and so on.
+
+  o objsurf
+  An object of class 'objsurf' contains values of the likelihood 
+  or objective function in a neighbourhood of the maximum.
+
+  o simplepanel
+  An object of class 'simplepanel' represents a spatial arrangement of buttons
+  that respond to mouse clicks, supporting a simple, robust graphical interface.
+  
+NEW FUNCTIONS
+
+  o mppm
+  Fit a Gibbs model to several point patterns.
+  The point pattern data may be organised as a designed experiment
+  and the model may depend on covariates associated with the design.
+
+  o anova.mppm
+  Analysis of Deviance for models of class mppm
+
+  o coef.mppm
+  Extract fitted coefficients from a model of class mppm
+
+  o fitted.mppm
+  Fitted intensity or conditional intensity for a model of class mppm
+
+  o kstest.mppm
+  Kolmogorov-Smirnov test of goodness-of-fit for a model of class mppm
+
+  o logLik.mppm
+  log likelihood or log pseudolikelihood for a model of class mppm
+
+  o plot.mppm
+  Plot the fitted  intensity or conditional intensity of a model of class mppm
+
+  o predict.mppm
+  Compute the fitted  intensity or conditional intensity 
+  of a model of class mppm
+
+  o quadrat.test
+  Quadrat counting test of goodness-of-fit for a model of class mppm
+
+  o residuals.mppm
+  Point process residuals for a model of class mppm
+
+  o subfits
+  Extract point process models for each individual point pattern dataset,
+  from a model of class mppm
+
+  o vcov.mppm
+  Variance-covariance matrix for a model of class mppm
+
+  o integral.msr
+  Integral of a measure.
+
+  o objsurf
+  For a model fitted by optimising an objective function, this command
+  computes the objective function in a neighbourhood of the optimal value.
+
+  o contour.objsurf, image.objsurf, persp.objsurf, plot.objsurf
+  Plot an 'objsurf' object.
+
+  o fvnames
+  Define groups of columns in a function value table, for use in plot.fv, etc
+
+  o multiplicity
+  New generic function for which multiplicity.ppp is a method.
+
+  o unique.ppx, duplicated.ppx, multiplicity.ppx
+  Methods for unique(), duplicated() and multiplicity() for 'ppx' objects.
+  These also work for 'pp3' and 'lpp' objects.
+
+  o closepairs, crosspairs, closepaircounts, crosspaircounts
+  Low-level functions for finding all close pairs of points 
+
+  o nndensity 
+  Estimate point process intensity using k-th nearest neighbour distances
+
+  o simplepanel, run.simplepanel
+  Support for a simple point-and-click interface for general use.
+
+NEW DATASETS
+
+   o pyramidal
+   Diggle-Lange-Benes data on pyramidal neurons in cingulate cortex.
+   31 point patterns divided into 3 groups.
+   
+   o waterstriders
+   Nummelin-Penttinen waterstriders data.
+   Three independent replicates of a point pattern formed by insects.
+
+   o simba
+   Simulated data example for mppm.
+   Two groups of point patterns with different interpoint interactions.
+
+   o demohyper
+   Simulated data example for mppm.
+   Point patterns and pixel image covariates, in two groups
+   with different regression coefficients.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o plot.hyperframe
+   The argument 'e' now has a different format.
+   Instead of plot(h, plot(XYZ)) one must now type plot(h, quote(plot(XYZ)))
+   This is necessary in order to avoid problems with 'S4 method dispatch'.
+   
+   o pcf.ppp, pcfinhom
+   New argument 'divisor' enables better performance of the estimator
+   of pair correlation function for distances close to zero.
+   
+   o applynbd
+   The arguments N, R and criterion may now be specified together.
+
+   o markstat
+   The arguments N and R may now be specified together.
+
+   o ppx
+   New argument 'simplify' allows the result to be converted to
+   an object of class 'ppp' or 'pp3' if appropriate.
+
+   o as.function.fv
+   Now allows multiple columns to be interpolated
+
+   o multiplicity.ppp
+   This function is now a method for the generic 'multiplicity'.
+   It has also been accelerated.
+
+   o nnfun.ppp, distfun.ppp
+   New argument 'k' allows these functions to compute k-th nearest neighbours.
+
+   o rVarGamma, kppm, vargamma.estK, vargamma.estpcf
+   New argument 'nu.pcf' provides an alternative way to specify
+   the kernel shape in the VarGamma model, instead of the existing
+   argument 'nu.ker'. Function calls that use the ambiguous argument
+   name 'nu' will no longer be accepted.
+
+   o nnmap
+   Image is now clipped to the original window.
+
+   o dilation, erosion, opening, closing
+   Polygonal computations greatly accelerated.
+
+   o plot.colourmap
+   Improved appearance and increased options, for discrete colourmaps.
+
+   o plot.msr
+   Improved appearance
+
+   o plot.ppp, plot.owin
+   An `empty' plot can now be generated by setting type="n"
+
+   o nndist.ppp, nnwhich.ppp, nncross.ppp
+   Column names of the result are now more informative.
+
+BUG FIXES
+
+   o nncross.ppp
+   Results were completely incorrect when k > 1.
+   Spotted by Jens Oehschlaegel. 
+   Bug was introduced in spatstat 1.34-1. 
+   Fixed.
+
+   o rVarGamma
+   Simulations were incorrect; they were generated using the wrong value
+   of the parameter 'nu.ker'. Spotted by Rasmus Waagepetersen and
+   Abdollah Jalilian. Bug was always present.		    
+   Fixed.
+   
+   o pair correlation functions (pcf.ppp, pcfdot, pcfcross, pcfinhom, ...)
+   The result had a negative bias at the maximum 'r' value,
+   because contributions to the pcf estimate from interpoint distances
+   greater than max(r) were mistakenly omitted. 
+   Spotted by Rasmus Waagepetersen and Abdollah Jalilian. 
+   Bug was always present.
+   Fixed.
+
+   o demo(spatstat)
+   This demonstration script had some unwanted side-effects, such as
+   rescaling the coordinates of standard datasets 'bramblecanes', 
+   'amacrine' and 'demopat', which caused the demonstration to crash 
+   when it was repeated several times, and caused errors in demo(data).
+   Fixed.
+
+   o rmh
+   Visual debugger crashed sometimes with message 'XI not found'.
+   Fixed.
+
+   o predict.ppm
+   Crashed if the model was fitted using 'covfunargs'.
+   Fixed.
+
+   o bounding.box
+   Crashed if one of the arguments was NULL.
+   Fixed.
+
+   o multiplicity.ppp
+   Did not handle data frames of marks. 
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.34-1
+
+OVERVIEW
+
+   o We thank Kurt Hornik, Ted Rosenbaum, Ege Rubak and Achim Zeileis
+    for contributions.
+
+   o Important bug fix.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o as.box3
+   Now accepts objects of class 'ppx' or 'boxx'.
+
+   o crossdist.ppp, crossdist.pp3, crossdist.default
+   New argument 'squared' allows the squared distances to be computed
+   (saving computation time in some applications)
+
+BUG FIXES
+
+   o union.owin, is.subset.owin, dilation.owin
+   Results were sometimes completely wrong for polygons with holes.
+   Spotted by Ted Rosenbaum.
+   Fixed.
+
+   o psstA, areaLoss
+   Crashed in some cases, with error message
+   'Number of items to replace is not a multiple of replacement length'. 
+   Spotted by Achim Zeileis.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.34-0
+
+OVERVIEW
+
+   o We thank Andrew Bevan, Ege Rubak, Aruna Jammalamadaka, 
+   Greg McSwiggan, Jeff Marcus, Jose M Blanco Moreno,
+   and Brian Ripley for contributions.
+
+   o spatstat and all its dependencies are now Free Open Source.
+
+   o spatstat does not require the package 'gpclib' any more.
+   
+   o spatstat now depends on the packages 'tensor', 'abind' and 'polyclip'
+
+   o polygon clipping is now enabled always.
+
+   o Substantially more support for point patterns on linear networks.
+
+   o Faster computations for pairwise interaction models.
+
+   o Bug fixes in nearest neighbour calculations.
+
+   o Bug fix in leverage and influence diagnostics.
+
+   o Version nickname: "Window Cleaner"
+
+   o spatstat now requires R version 3.0.2 or later
+
+NEW FUNCTIONS
+
+   o as.lpp
+   Convert data to a point pattern on a linear network.
+
+   o distfun.lpp
+   Distance function for point pattern on a linear network.
+
+   o eval.linim
+   Evaluate expression involving pixel images on a linear network.
+
+   o linearKcross, linearKdot, linearKcross.inhom, linearKdot.inhom
+   Multitype K functions for point patterns on a linear network
+
+   o linearmarkconnect, linearmarkequal
+   Mark connection function and mark equality function 
+   for multitype point patterns on a linear network
+
+   o linearpcfcross, linearpcfdot, linearpcfcross.inhom, linearpcfdot.inhom
+   Multitype pair correlation functions for point patterns on a linear network
+
+   o linfun
+   New class of functions defined on a linear network
+
+   o nndist.lpp, nnwhich.lpp, nncross.lpp
+   Methods for nndist, nnwhich, nncross for point patterns on a linear network
+
+   o nnfun.lpp
+   Method for nnfun for point patterns on a linear network
+
+   o vcov.lppm
+   Variance-covariance matrix for parameter estimates of a fitted
+   point process model on a linear network.
+
+   o bilinearform
+   Computes a bilinear form 
+
+   o tilenames, tilenames<-
+   Extract or change the names of tiles in a tessellation.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o package dependencies 
+   Previous versions of spatstat used the package 'gpclib' 
+   to perform geometrical calculations on polygons.
+   Spatstat now uses the package 'polyclip' for polygon calculations instead.
+
+   o free open-source licence
+   The restrictive licence conditions of 'gpclib' no longer apply 
+   to users of spatstat. Spatstat and all its dependencies are now
+   covered by a free open-source licence.
+
+   o polygon clipping
+   In previous versions of spatstat, geometrical calculations
+   on polygons could be performed 'exactly' using gpclib
+   or 'approximately' using pixel discretisation. 
+   Polygon calculations are now always performed 'exactly'.
+   
+   o intersect.owin, union.owin, setminus.owin
+   If A and B are polygons, the result is a polygon.
+
+   o erosion, dilation, opening, closing
+   If the original set is a polygon, the result is a polygon.
+
+   o intersect.tess, dirichlet
+   The tiles of the resulting tessellation are polygons 
+   if the input was polygonal.
+
+   o plot.owin
+   Polygons with holes can now be plotted with filled colours
+   on any device.
+
+   o lppm
+   New arguments 'eps' and 'nd' control the quadrature scheme.
+
+   o pairwise interaction Gibbs models
+   Many calculations for these models have been accelerated.
+
+BUG FIXES
+
+   o nncross.pp3
+   Values were completely incorrect in some cases.
+   Usually accompanied by a warning about NA values.
+   (Spotted by Andrew Bevan.)
+   Fixed.
+
+   o nnmap, nnmark
+   A small proportion of pixels had incorrect values.
+   [These were the pixels lying on the boundary of a Dirichlet cell.]
+   Fixed. 
+
+   o leverage.ppm, influence.ppm, dfbetas.ppm
+   Results were incorrect for non-Poisson processes.
+   Fixed.
+
+   o distcdf
+   Results were incorrect in some cases
+   when W was a window and V was a point pattern.
+   Fixed.
+
+   o Kcross, Kdot, pcfcross, pcfdot
+   Results were incorrect in some rare cases.
+   Fixed.
+
+   o as.fv.kppm
+   Erroneously returned a NULL value.
+   Fixed.
+
+   o vcov.ppm
+   For point process models fitted with method = 'logi',
+   sometimes crashed with error "object 'fit' not found".
+   (Spotted by Ege Rubak).
+   Fixed.
+
+   o vcov.ppm
+   For multitype point process models, sometimes crashed
+   with error "argument 'par' is missing".
+   Fixed.
+
+   o plot.im
+   Crashed if some of the pixel values were infinite.
+   Fixed.
+
+   o owin 
+   owin(poly=..) crashed if there were NA's in the polygon coordinates.
+   Spotted by Jeff Marcus.
+   Fixed.
+
+   o plot.fv
+   Crashed, giving an incomprehensible error,
+   if the plot formula contained a number with a decimal point.
+   Fixed.
+
+   o alltypes
+   Crashed if envelopes=TRUE and global=TRUE, 
+   with error message 'csr.theo not found'.
+   Spotted by Jose M Blanco Moreno.
+   Fixed.
+
+   o chop.tess, rMosaicField
+   Format of result was garbled in some cases.
+   Fixed.
+
+   o vcov.ppm
+   Sometimes gave an irrelevant warning "parallel option not available".
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.33-0
+
+OVERVIEW
+
+   o We thank Kurt Hornik and Brian Ripley for advice.
+
+   o The package namespace has been modified.
+
+   o Numerous internal changes.
+
+   o Likelihood cross-validation for smoothing bandwidth.
+
+   o More flexible models of intensity in cluster/Cox processes.
+
+   o New generic function for smoothing.
+
+   o Version nickname: 'Titanic Deckchair'
+
+NEW FUNCTIONS
+
+    o bw.ppl
+    Likelihood cross-validation technique 
+    for bandwidth selection in kernel smoothing.
+
+    o is.lppm, is.kppm, is.slrm
+    Tests whether an object is of class 'lppm', 'kppm' or 'slrm'
+
+    o Smooth
+    New generic function for spatial smoothing.
+
+    o Smooth.ppp, Smooth.fv, Smooth.msr
+    Methods for Smooth
+    (identical to smooth.ppp, smooth.fv, smooth.msr respectively)
+
+    o fitted.kppm
+    Method for 'fitted' for cluster/Cox models
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o namespace
+    The namespace of the spatstat package has been changed.
+
+    o internal functions
+    Some undocumented internal functions are no longer visible,
+    as they are no longer exported in the namespace. These functions
+    can still be accessed using the form spatstat:::functionname.
+    Functions that are not visible are not guaranteed to exist 
+    or to remain the same in future. 
+
+    o methods
+    For some generic functions defined in the spatstat package,
+    it is possible that R may fail to find one of the methods
+    for the generic. This is a temporary problem due to a restriction
+    on the size of the namespace in R 3.0.1. It will be fixed in 
+    future versions of R and spatstat. It only applies to methods
+    for a generic which is a spatstat function (such as nndist)
+    and does not apply to methods for generics defined elsewhere
+    (such as density). In the meantime, if this problem should occur, 
+    it can be avoided by calling the method explicitly, in the form
+    spatstat:::genericname.classname.
+
+    o speed
+    The package should run slightly faster overall, due to the
+    improvement of the namespace, and changes to internal code.
+
+    o envelope
+    New argument 'envir.simul' determines the environment
+    in which to evaluate the expression 'simulate'.
+
+    o kppm
+    More flexible models of the intensity, and greater control over the
+    intensity fitting procedure, are now possible using the arguments
+    'covfunargs', 'use.gam', 'nd', 'eps' passed to ppm.
+    Also the argument 'X' may now be a quadrature scheme.
+
+    o distcdf
+    Arguments W and V can now be point patterns.
+
+    o Kest
+    New option: correction = "good" 
+    selects the best edge correction that can be computed in reasonable time.
+
+    o bw.diggle
+    Accelerated.
+
+    o predict.ppm
+    Calculation of standard error has been accelerated.
+
+    o smooth.ppp, smooth.fv, smooth.msr
+    These functions will soon be 'Deprecated' in favour of
+    the methods Smooth.ppp, Smooth.fv, Smooth.msr respectively.
+
+    o stratrand, overlap.owin, update.slrm, edge.Trans, edge.Ripley
+    These already-existing functions are now documented.
+
+BUG FIXES
+
+    o kppm, matclust.estpcf, pcfmodel
+    The pair correlation function of the Matern Cluster Process
+    was evaluated incorrectly at distances close to 0.
+    This could have affected the fitted parameters 
+    in matclust.estpcf() or kppm(clusters="MatClust").
+    Fixed.
+
+    o anova.ppm
+    Would cause an error in future versions of R
+    when 'anova.glm' is removed from the namespace.
+    Fixed.
+
+	CHANGES IN spatstat VERSION 1.32-0
+
+OVERVIEW
+
+   o We thank Ege Rubak for major contributions.
+
+   o Thanks also to Patrick Donnelly, Andrew Hardegen,
+     Tom Lawrence, Robin Milne, Gopalan Nair and Sean O'Riordan.
+
+   o New 'logistic likelihood' method for fitting Gibbs models.
+
+   o Substantial acceleration of several functions
+     including profile maximum pseudolikelihood
+     and variance calculations for Gibbs models.
+
+   o Nearest neighbours for point patterns in 3D
+
+   o Nearest-neighbour interpolation in 2D
+
+   o New 'progress plots' 
+
+   o Hard core thresholds can be estimated automatically.
+
+   o More support for colour maps
+
+   o More support for 'fv' objects
+
+   o Spatstat now has version nicknames.
+   The current version is "Logistical Nightmare".
+ 
+   o Minor improvements and bug fixes.
+
+NEW FUNCTIONS
+
+   o nncross.pp3
+   Method for 'nncross' for point patterns in 3D
+
+   o nnmark
+   Mark of nearest neighbour - can be used for interpolation
+
+   o dclf.progress, mad.progress
+   Progress plots (envelope representations) for the DCLF and MAD tests.
+
+   o deriv.fv
+   Numerical differentiation for 'fv' objects.
+
+   o interp.colourmap
+   Smooth interpolation of colour map objects - makes it easy to build
+   colour maps with gradual changes in colour
+
+   o tweak.colourmap
+   Change individual colour values in a colour map object
+
+   o beachcolourmap
+   Colour scheme appropriate for `altitudes' (signed numerical values)
+
+   o as.fv
+   Convert various kinds of data to an 'fv' object
+
+   o quadscheme.logi
+   Generates quadrature schemes for the logistic method of ppm.
+
+   o beginner
+   Introduction for beginners.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm
+   New option: method = "logi" 
+   Fits a Gibbs model by the newly developed 'logistic likelihood' method
+   which is often faster and more accurate than maximum pseudolikelihood.
+   Code contributed by Ege Rubak.
+   
+   o profilepl
+   Greatly accelerated, especially for area-interaction models.
+
+   o vcov.ppm
+   Greatly accelerated for higher-order interaction models.
+
+   o smooth.ppp
+   Now handles bandwidths equal to zero (by invoking 'nnmark')
+
+   o Hardcore, StraussHard
+   The hard core distance 'hc' can now be omitted; 
+   it will be estimated from data.
+
+   o plot.ppp
+   Now behaves differently if there are multiple columns of marks.
+   Each column of marks is plotted, in a series of separate plots
+   arranged side-by-side.
+
+   o plot.im
+   Argument 'col' can now be a function
+
+   o lohboot
+   Now computes confidence intervals for L-functions as well
+   (fun="Lest" or fun="Linhom")
+
+   o dclf.test, mad.test
+   The argument X can now be an object produced by a previous call
+   to dclf.test or mad.
+
+   o plot.fv
+   Labelling of plots has been improved in some cases.
+
+   o smooth.fv
+   Further options added.
+
+   o density.ppp
+   The argument 'weights' can now be a matrix.
+
+   o smooth.ppp
+   Accelerated, when there are several columns of marks.
+
+   o density.ppp
+   Accelerated slightly.
+
+   o simulate.ppm, simulate.kppm
+   The total computation time is also returned.
+
+   o simulate.kppm
+   Now catches errors (such as 'insufficient memory').
+   
+   o latest.news, licence.polygons
+   Can now be executed by typing the name of the function without parentheses.
+
+   o latest.news
+   The text is now displayed one page at a time.
+
+BUG FIXES
+
+   o Hest, Gfox, Jfox
+   The 'raw' estimate was not computed correctly (or at least 
+   it was not the raw estimate described in the help files).
+   Spotted by Tom Lawrence.
+   Fixed.
+
+   o edges2vees
+   Format of result was incorrect if there were fewer than 3 edges.
+   Fixed.
+
+   o Jfox
+   The theoretical value (corresponding to independence between X and Y)
+   was erroneously given as 0 instead of 1.
+   Spotted by Patrick Donnelly.
+   Fixed.
+
+   o ppm, quadscheme, default.dummy 
+   If the grid spacing parameter 'eps' was specified,
+   the quadrature scheme was sometimes slightly incorrect
+   (missing a few dummy points near the window boundary).
+   Fixed.
+
+   o print.timed
+   Matrices were printed incorrectly.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.31-3
+
+OVERVIEW
+
+   o spatstat now 'Suggests' the package 'tensor'
+
+   o Code slightly accelerated.
+
+   o More support for pooling of envelopes.
+
+   o Bug fixes.
+
+NEW FUNCTIONS
+
+   o nnmap
+   Given a point pattern, finds the k-th nearest point in the pattern
+   from each pixel in a raster.
+
+   o coef.fii, coef.summary.fii
+   Extract the interaction coefficients of a fitted interpoint interaction
+
+   o edges2vees
+   Low-level function for finding triples in a graph.
+
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o predict.ppm
+   New argument 'correction' allows choice of edge correction
+   when calculating the conditional intensity.
+
+   o pool.envelope
+   New arguments 'savefuns' and 'savepatterns'.
+
+   o pool.envelope
+   Envelopes generated with VARIANCE=TRUE can now be pooled.
+
+   o pool.envelope
+   The plot settings of the input data are now respected.
+
+   o Numerous functions have been slightly accelerated.
+
+BUG FIXES
+
+   o predict.ppm
+   Calculation of the conditional intensity omitted the edge correction
+   if correction='translate' or correction='periodic'.
+   Fixed.
+
+   o shift.lpp, rotate.lpp, scalardilate.lpp, affine.lpp,
+    shift.linnet, rotate.linnet, scalardilate.linnet, affine.linnet
+   The enclosing window was not correctly transformed. 
+   Fixed.
+
+   o rHardcore, rStraussHard, rDiggleGratton, rDGS
+   The return value was invisible.
+   Fixed.
+
+   o ppm
+   In rare cases the results obtained with forcefit=FALSE and forcefit=TRUE
+   were different, due to numerical rounding effects.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.31-2
+
+OVERVIEW
+
+   o We thank Robin Corria Anslie, Julian Gilbey, Kiran Marchikanti,
+   Ege Rubak and Thordis Linda Thorarinsdottir for contributions.
+
+   o spatstat now depends on R 3.0.0
+
+   o More support for linear networks
+
+   o More functionality for nearest neighbours
+
+   o Bug fix in fitting Geyer model
+
+   o Performance improvements and bug fixes
+
+NEW FUNCTIONS
+
+   o affine.lpp, shift.lpp, rotate.lpp, rescale.lpp, scalardilate.lpp
+   Geometrical transformations for point patterns on a linear network
+
+   o affine.linnet, shift.linnet, rotate.linnet, 
+   rescale.linnet, scalardilate.linnet
+   Geometrical transformations for linear networks
+
+   o [.linnet
+   Subset operator for linear networks
+
+   o timed
+   Records the computation time taken
+   
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o nncross
+   nncross.ppp can now find the k-th nearest neighbours, for any k.
+
+   o nndist, nnwhich
+   New argument 'by' makes it possible to find nearest neighbours
+   belonging to specified subsets in a point pattern, for example,
+   the nearest neighbour of each type in a multitype point pattern.
+
+   o [.fv
+   Now handles the argument 'drop'.
+
+   o with.fv
+   Argument 'drop' replaced by new argument 'fun'
+   (with different interpretation).
+
+   o [.lpp
+   Subset index may now be a window (class 'owin')
+
+   o Kest
+   Options correction='border' and correction='none'
+   now run about 4 times faster, thanks to Julian Gilbey.
+
+   o density.ppp
+   Numerical underflow no longer occurs when sigma is very small
+   and 'at="points"'. A warning is no longer issued.
+   Thanks to Robin Corria Anslie.
+
+   o crossing.psp
+   New argument 'fatal' allows the user to handle empty intersections
+
+   o union.owin
+   It is now guaranteed that if A is a subset of B, then union.owin(A,B)=B.
+
+   o plot.colourmap
+   Now passes arguments to axis() to control the plot.
+   Appearance of plot improved.
+
+   o image.listof
+   Now passes arguments to plot.colourmap() if equal.ribbon=TRUE.
+
+   o kppm
+   Accelerated (especially for large datasets).
+
+   o plot.envelope
+   plot.envelope is now equivalent to plot.fv and is essentially redundant.
+
+   o rThomas, rMatClust, rNeymanScott
+   Improved explanations in help files.
+
+   o All functions
+   Many functions have been slightly accelerated.
+
+BUG FIXES
+
+   o ppm
+   Results were incorrect for the Geyer saturation model
+   with a non-integer value of the saturation parameter 'sat'.
+   Spotted by Thordis Linda Thorarinsdottir.
+   Bug introduced in spatstat 1.20-0, July 2010.
+   Fixed.
+
+   o ppm
+   Fitting a stationary Poisson process using a nonzero value of 'rbord',
+   as in "ppm(X, rbord=R)" with R > 0, gave incorrect results.
+   Fixed.
+
+   o predict.slrm
+   Crashed with message 
+   'longer object length is not a multiple of shorter object length' 
+   if the original data window was not a rectangle.
+   Fixed.
+
+   o iplot
+   Main title was sometimes incorrect.
+   Fixed.
+
+   o plot.layered
+   Ignored argument 'main' in some cases.
+   Fixed.
+
+   o plot.listof, image.listof
+   Crashed sometimes with a message 'figure margins too large'
+   when equal.ribbon=TRUE.
+   Fixed.
+
+   o print.ppx
+   Crashed if the object contained local coordinates.
+   Fixed.
+
+   o transect.im
+   Crashed if the transect lay partially outside the image domain.
+   Fixed.
+
+   o rthin
+   Crashed if X was empty.
+   Fixed.
+
+   o max.im, min.im, range.im
+   Ignored additional arguments after the first argument.
+   Fixed.
+
+   o update.lppm
+   Updated object did not remember the name of the original dataset.
+   Fixed.
+
+   o envelope
+   Grey shading disappeared from plots of envelope objects 
+   when the envelopes were transformed using eval.fv or eval.fasp.
+   Fixed.
+
+   
+	CHANGES IN spatstat VERSION 1.31-1
+
+OVERVIEW
+
+   o We thank Marcelino de la Cruz, Daniel Esser, Jason Goldstick,
+     Abdollah Jalilian, Ege Rubak and Fabrice Vinatier for contributions.
+
+   o Nonparametric estimation and tests for 
+     point patterns in a linear network.
+
+   o More support for 'layered' objects.
+
+   o Find clumps in a point pattern.
+
+   o Connected component interaction model.
+
+   o Improvements to interactive plots.
+
+   o Visual debugger for Metropolis-Hastings algorithm.
+
+   o Bug fix in Metropolis-Hastings simulation of Geyer process.
+
+   o Faster Metropolis-Hastings simulation.
+
+   o Faster computation of 'envelope', 'fv' and 'fasp' objects.
+
+   o Improvements and bug fixes.
+
+NEW FUNCTIONS
+
+   o connected.ppp
+   Find clumps in a point pattern.
+
+   o kstest.lpp, kstest.lppm
+   The spatial Kolmogorov-Smirnov test can now be applied 
+   to point patterns on a linear network (class 'lpp') and 
+   point processes on a linear network (class 'lppm').
+
+   o bermantest.lpp, bermantest.lppm
+   Berman's Z1 and Z2 tests can now be applied to point patterns
+   on a linear network (class 'lpp') and point processes on a linear
+   network (class 'lppm').
+
+   o rhohat.lpp, rhohat.lppm
+   Nonparametric estimation of the dependence of a point pattern
+   on a spatial covariate: 'rhohat' now applies to 
+   objects of class 'lpp' and 'lppm'.
+
+   o intensity.lpp
+   Empirical intensity of a point pattern on a linear network.
+
+   o as.function.rhohat
+   Converts a 'rhohat' object to a function,
+   with extrapolation beyond the endpoints.
+
+   o [.layered
+   Subset operator for layered objects.
+
+   o shift, rotate, affine, rescale, reflect, flipxy, scalardilate
+   These geometrical transformations now work for 'layered' objects.
+
+   o iplot.layered
+   Interactive plotting for 'layered' objects.
+
+   o as.owin.layered
+   Method for as.owin for layered objects.
+
+   o [.owin
+   Subset operator for windows, equivalent to intersect.owin.
+
+   o rcellnumber
+   Generates random integers for the Baddeley-Silverman counterexample.
+
+   o is.lpp
+   Tests whether an object is a point pattern on a linear network.
+
+   o is.stationary.lppm, is.poisson.lppm
+   New methods for is.stationary and is.poisson for class 'lppm'
+
+   o sessionLibs
+   Print library names and version numbers (for use in Sweave scripts)
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o iplot
+   iplot is now generic, with methods for 'ppp', 'layered' and 'default'.
+   iplot methods now support zoom and pan navigation.
+
+   o rmh.default
+   New argument 'snoop' allows the user to activate a visual debugger
+   for the Metropolis-Hastings algorithm.
+
+   o connected
+   connected() is now generic, with methods for 'im', 'owin' and 'ppp'.
+
+   o alltypes
+   Now works for lpp objects 
+
+   o rlabel
+   Now works for lpp, pp3, ppx objects
+
+   o plot.kstest
+   Can now perform P-P and Q-Q plots as well.
+
+   o plot.fasp
+   New argument 'samey' controls whether all panels 
+   have the same y limits.
+
+   o plot.fasp
+   Changed default value of 'samex'.
+
+   o Objects of class 'envelope', 'fv' and 'fasp' 
+   Reduced computation time and storage required for these objects.
+   
+   o pcfmodel.kppm
+   Improved calculation.
+
+   o plot.fv
+   Improved collision-avoidance algorithm
+   (for avoiding overlaps between curves and legend)
+
+   o ppm
+   Improved error handling
+
+   o envelope
+   All methods for 'envelope' now handle fun=NULL   
+
+   o setminus.owin
+   Better handling of the case where both arguments are rectangles.
+
+   o rmh
+   Simulation has been further accelerated.
+
+   o lppm
+   Accelerated.
+
+   o vcov.ppm
+   Accelerated.
+
+   o marktable
+   Accelerated.
+
+   o Triplets() interaction
+   Accelerated.
+
+   o alltypes
+   Accelerated when envelope=TRUE.
+
+BUG FIXES
+
+   o rmh
+   Simulation of the Geyer saturation process was incorrect.
+   [Bug introduced in previous version, spatstat 1.31-0.]
+   Fixed.
+
+   o rmh
+   Simulation of the Geyer saturation process was 
+   incorrectly initialised, so that the results of a short run 
+   (i.e. small value of 'nrep') were incorrect, 
+   while long runs were correct.
+   [Bug introduced in spatstat 1.17-0, october 2009.]
+   Fixed.
+
+   o ppm 
+   Objects fitted with use.gam=TRUE caused fatal errors in
+   various functions including print, summary, vcov and model.frame.
+   Spotted by Jason Goldstick.
+   Fixed.
+
+   o lpp, runiflpp, rpoislpp
+   Empty point patterns caused an error.
+   Fixed.
+
+   o rmh.default
+   Crashed for hybrid models, with message 
+   'Attempt to apply non-function'.
+   Spotted by Ege Rubak.
+   Fixed.
+
+   o relrisk
+   Crashed when 'at="points"' for a multitype pattern
+   with more than 2 types.
+   Spotted by Marcelino de la Cruz.
+   Fixed.
+
+   o erosion.owin, dilation.psp, border
+   Ignored the arguments "..." in some cases
+   (namely when the window was polygonal and 'gpclib' was disabled).
+   Fixed.
+
+   o rsyst, rcell
+   Did not correctly handle the argument 'dx'.
+   Spotted by Fabrice Vinatier.
+   Fixed.
+
+   o correction="trans"
+   Various functions such as Kest 
+   no longer recognised 'correction = "trans"'.
+   Fixed.
+
+   o istat
+   Crashed with an error message about envelopes. 
+   Fixed.
+
+   o summary.ppm, print.ppm
+   p-values which were exactly equal to zero were reported as NA.
+   Fixed.
+
+   o [.im
+   Crashed if the intersection consisted of
+   a single row or column of pixels.
+   Fixed.
+
+   o plot.im
+   Sometimes incorrectly displayed an image consisting of 
+   a single row or column of pixels.
+   Fixed.
+
+   o plot.layered
+   The plot region was determined by the first layer,
+   so that objects in subsequent layers could sometimes 
+   fall outside the plot region.
+   Fixed.
+
+   o transect.im
+   If the arguments 'from' and 'to' were numeric vectors of length 2,
+   the result was garbled.
+   Fixed.   
+
+   o Inhomogeneous K functions and pair correlation functions
+   [Kinhom, pcfinhom, Kcross.inhom, Kdot.inhom, pcfcross.inhom, etc.]
+   These functions reported an error 'lambda is not a vector'
+   if the intensity argument lambda was computed using density(, at="points").
+   Fixed.
+
+   o rlabel
+   Did not accept a point pattern with a hyperframe of marks.
+   Fixed.
+
+   o alltypes
+   Crashed when envelope=TRUE if the summary function 'fun'
+   did not have default values for the marks i and j.
+   Fixed.
+
+   o Kres, Gres, psst, psstA
+   Ignored the unit of length.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.31-0
+
+OVERVIEW
+
+   o We thank Frederic Lavancier and Ege Rubak for contributions.
+
+   o Major bug fix in simulation of area-interaction process.
+
+   o Metropolis-Hastings simulations accelerated.
+
+   o Rounding of spatial coordinates
+
+   o clmfires dataset corrected.
+
+   o Bug fixes and minor improvements.
+
+NEW FUNCTIONS
+
+   o round.ppp
+   Round the spatial coordinates of a point pattern 
+   to a specified number of decimal places.
+
+   o rounding
+   Determine whether a dataset has been rounded.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o rmh
+   Simulation of the following models has been accelerated:
+   areaint, dgs, diggra, fiksel, geyer, hardcore, lennard, multihard,
+   strauss, straush, straussm, strausshm.
+
+   o rmh
+   The transition history of the simulation (which is saved if 'track=TRUE')
+   now also contains the value of the Hastings ratio for each proposal.
+
+   o clmfires
+   The clmfires dataset has been modified to remove errors and inconsistencies.
+
+   o plot.linim
+   Appearance of the plot has been improved, when style='width'.
+
+   o summary.ppm
+   Now reports whether the spatial coordinates have been rounded.
+
+   o dclf.test, mad.test
+   The range of distance values ('rinterval') used in the test
+   is now printed in the test output, and is saved as an attribute.
+
+BUG FIXES
+
+   o rmh
+   Simulation of the Area-Interaction model was completely incorrect.
+   Spotted by Frederic Lavancier.
+   The bug was introduced in spatstat version 1.23-6 or later.
+   Fixed.
+
+   o dclf.test
+   The test statistic was incorrectly scaled (by a few percent).
+   This did not affect the p-value of the test.
+   Fixed.
+
+   o ppx
+   If argument 'coord.type' was missing, various errors occurred:
+   a crash may have occurred, or the results may have depended
+   on the storage type of the data. 
+   Spotted by Ege Rubak.
+   Fixed. 
+
+   o plot.ppx
+   Crashed for 1-dimensional point patterns.
+   Spotted by Ege Rubak.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.30-0
+
+
+OVERVIEW 
+
+   o We thank Jorge Mateu, Andrew Bevan, Olivier Flores, Marie-Colette van Lieshout, Nicolas Picard and Ege Rubak for contributions.
+
+   o The spatstat manual now exceeds 1000 pages.
+
+   o Hybrids of point process models.
+
+   o Five new datasets
+
+   o Second order composite likelihood method for kppm.
+
+   o Inhomogeneous F, G and J functions.
+
+   o Delaunay graph distance
+
+   o Fixed serious bug in 'lppm' for marked patterns.
+
+   o bug fix in some calculations for Geyer model
+
+   o Improvements to linear networks code
+
+   o Pixel images can now be displayed with a logarithmic colour map.
+
+   o spatstat now formally 'Depends' on the R core package 'grDevices'
+
+   o miscellaneous improvements and bug fixes
+
+NEW DATASETS
+
+   o clmfires
+   Forest fires in Castilla-La Mancha
+
+   o gordon
+   People sitting on the grass in Gordon Square, London
+
+   o hyytiala 
+   Mixed forest in Hyytiala, Finland (marked by species)
+
+   o paracou
+   Kimboto trees in Paracou, French Guiana (marked as adult/juvenile)
+
+   o waka
+   Trees in Waka national park (marked with diameters)
+
+NEW FUNCTIONS
+
+   o Hybrid
+   The hybrid of several point process interactions
+   [Joint research with Jorge Mateu and Andrew Bevan]
+
+   o is.hybrid
+   Recognise a hybrid interaction or hybrid point process model.
+
+   o Finhom, Ginhom, Jinhom
+   Inhomogeneous versions of the F, G and J functions
+   [Thanks to Marie-Colette van Lieshout]
+
+   o delaunay.distance
+   Graph distance in the Delaunay triangulation.
+
+   o distcdf
+   Cumulative distribution function of the distance between
+   two independent random points in a given window.
+
+   o bw.frac
+   Bandwidth selection based on window geometry
+
+   o shortside.owin, sidelengths.owin
+   Side lengths of (enclosing rectangle of) a window
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm
+   Can now fit models with 'hybrid' interactions
+   [Joint research with Jorge Mateu and Andrew Bevan]
+
+   o kppm
+   Now has the option of fitting models using Guan's (2006)
+   second order composite likelihood.
+
+   o envelope.lpp
+   Now handles multitype point patterns.
+
+   o envelope.envelope
+   New argument 'transform' allows the user to apply a transformation
+   to previously-computed summary functions.
+
+   o runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp
+   Can now generate multitype point patterns.
+
+   o rmhmodel, rmh, simulate.ppm
+   Now handle point process models with 'hybrid' interactions.
+
+   o kppm
+   Accelerated, and more reliable, due to better choice of starting values 
+   in the optimisation procedure.
+
+   o kppm
+   The internal format of kppm objects has changed.
+
+   o minimum contrast estimation
+   Error messages from the optimising function 'optim' are now trapped
+   and handled.
+
+   o rhohat
+   This command is now generic, with methods for ppp, quad, and ppm.
+
+   o raster.x, raster.y, raster.xy
+   These functions have a new argument 'drop'
+
+   o summary.ppm
+   Improved behaviour when the model covariates are a data frame.
+
+   o progressreport
+   Output improved.
+
+   o second order summary functions
+   (Kest, Lest, Kinhom, pcf.ppp, Kdot, Kcross, Ldot etc etc)
+   These functions now accept correction="translation" as an 
+   alternative to correction = "translate", for consistency.
+
+   o plot.im
+   New argument 'log' allows colour map to be equally spaced on a log scale.
+
+   o as.owin.ppm, as.owin.kppm
+   New argument 'from' allows the user to extract the spatial window
+   of the point data (from="points") or the covariate images (from="covariates")
+
+   o dclf.test, mad.test
+   The rule for handling tied values of the test statistic has been changed.
+   The tied values are now randomly ordered to obtain a randomised integer rank.
+
+   o with.fv
+   New argument 'enclos' allows evaluation in other environments
+
+BUG FIXES
+
+   o lppm
+   For multitype patterns, the fitted model was completely incorrect
+   due to an error in constructing the quadrature scheme.
+   Fixed.
+
+   o Geyer
+   For point process models with the 'Geyer' interaction, 
+   vcov.ppm() and suffstat() sometimes gave incorrect answers.
+   [Spotted by Ege Rubak.]
+   Fixed.
+
+   o as.im.im
+   Did not correctly handle factor-valued images
+   if one of the arguments 'dimyx', 'eps', 'xy' was given.
+   Fixed.
+
+   o envelope.lppm
+   Crashed if the model was multitype.
+   Fixed.
+
+   o lpp
+   Did not handle empty patterns.
+   Fixed.
+
+   o density.ppp
+   If 'sigma' was a bandwidth selection function such as bw.scott()
+   which returned a numeric vector of length 2, a warning message was issued, 
+   and the smoothing bandwidth was erroneously taken to be
+   the first element of the vector.
+   Fixed.
+
+   o Fest, Jcross, Jdot, Jmulti
+   If these functions were computed using correction = 'rs', 
+   plotting them would sometimes give an error,      
+   with the message "no finite x/y limits".
+   Fixed.
+
+   o pcfmodel.kppm
+   For models with clusters="VarGamma"
+   the value of the pcf at distance r=0 was given as NaN. 
+   Fixed.
+
+   o vcov.ppm
+   Result was incorrect in rare cases, due to numerical rounding effects.
+   Fixed.
+
+   o rLGCP, simulate.kppm
+   For models fitted to point patterns in an irregular window, 
+   simulation sometimes failed, with a message that the image 'mu' 
+   did not cover the simulation window.
+   (Spotted by George Limitsios.)
+   Fixed.
+
+   o rLGCP, simulate.kppm
+   Crashed sometimes with an error about unequal x and y steps
+   (from 'GaussRF').
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.29-0
+
+OVERVIEW 
+
+   o We thank Colin Beale, Li Haitao, Frederic Lavancier, Erika Mudrak and Ege Rubak for contributions.
+
+   o random sequential packing
+ 
+   o Allard-Fraley estimator
+
+   o method for pooling several quadrat tests
+
+   o better control over dummy points in ppm
+
+   o more support for data on a linear network
+
+   o nearest neighbour map 
+
+   o changes to subsetting of images
+
+   o improvements and bug fixes 
+
+NEW FUNCTIONS
+
+   o clusterset
+   Allard-Fraley estimator of high-density features in a point pattern
+
+   o pool.quadrattest
+   Pool several quadrat tests
+
+   o nnfun
+   Nearest-neighbour map of a point pattern or a line segment pattern
+
+   o as.ppm
+   Converts various kinds of objects to ppm
+
+   o crossdist.lpp
+   Shortest-path distances between pairs of points in a linear network
+
+   o nobs.lppm
+   Method for 'nobs' for lppm objects.
+
+   o as.linim
+   Converts various kinds of objects to 'linim'
+
+   o model.images.slrm
+   Method for model.images for slrm objects
+
+   o rotate.im
+   Rotate a pixel image
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o "[.im" and "[<-.im"
+   New argument 'j' allows any type of matrix indexing to be used.
+
+   o "[.im"
+   Default behaviour changed in the case of a rectangular subset.
+   New argument 'rescue' can be set to TRUE to reinstate previous behaviour.
+
+   o rSSI
+   Performs 'Random Sequential Packing' if n=Inf.
+
+   o ppm
+   New argument 'eps' determines the spacing between dummy points.
+   (also works for related functions quadscheme, default.dummy, ...)
+
+   o fitted.ppm, predict.ppm
+   Argument 'new.coef' specifies a vector of parameter values
+   to replace the fitted coefficients of the model.
+
+   o lppm
+   Stepwise model selection using step() now works for lppm objects.
+
+   o vcov.slrm
+   Can now calculate correlation matrix or Fisher information matrix
+   as well as variance-covariance matrix.
+
+   o eval.fv
+   Improved behaviour when plotted.
+
+   o "[.fv"
+   Improved behaviour when plotted.
+
+   o lohboot
+   When the result is plotted, the confidence limits are now shaded.
+
+   o lohboot
+   New argument 'global' allows global (simultaneous) confidence bands
+   instead of pointwise confidence intervals.
+
+   o vcov.ppm
+   Accelerated by 30% in some cases.
+
+   o quadrat.test.splitppp
+   The result is now a single object of class 'quadrattest'
+
+   o progressreport
+   Improved output (also affects many functions which print progress reports)
+
+   o Full redwood data (redwoodfull)
+   Plot function redwoodfull.extra$plotit has been slightly improved.
+
+   o nncross
+   This function is now generic, with methods for 'ppp' and 'default'.
+
+   o distfun
+   The internal format of objects of class 'distfun' has been changed.
+
+   o duplicated.ppp, unique.ppp   
+   New argument 'rule' allows behaviour to be consistent with package 'deldir'
+
+BUG FIXES
+
+   o bdist.tiles
+   Values were incorrect in some cases due to numerical error.
+   (Spotted by Erika Mudrak.)
+   Fixed.
+
+   o vcov.ppm, suffstat
+   These functions sometimes gave incorrect values 
+   for marked point process models.
+   Fixed.
+
+   o simulate.ppm, predict.ppm
+   Did not correctly handle the 'window' argument. (Spotted by Li Haitao).
+   Fixed.
+
+   o smooth.ppp, markmean
+   If sigma was very small, strange values were produced, due to
+   numerical underflow. (Spotted by Colin Beale).
+   Fixed.
+
+   o MultiHard, MultiStrauss, MultiStraussHard
+   Crashed if the data point pattern was empty.
+   (Spotted by Ege Rubak).
+   Fixed.
+
+   o vcov.ppm
+   Crashed sporadically, with multitype interactions.
+   (Spotted by Ege Rubak).
+   Fixed.
+
+   o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS
+   If the simulated pattern was empty, these functions would either
+   crash, or return a pattern containing 1 point.
+   (Spotted by Frederic Lavancier).
+   Fixed.
+
+   o model.matrix.slrm
+   Crashed if the model was fitted using split pixels.
+   Fixed.
+
+   o residuals.ppm, diagnose.ppm
+   Did not always correctly handle models that included offset terms.
+   Fixed.
+
+   o project.ppm
+   When a model was projected by project.ppm or by ppm(project=TRUE),
+   the edge corrections used the projected models were sometimes different
+   from the edge corrections in the original model, so that the 
+   projected and unprojected models were not comparable.
+   Fixed.
+
+   o plot.listof, plot.splitppp
+   Crashed sometimes due to a scoping problem.
+   Fixed.
+
+   o dclf.test, mad.test
+   Crashed if any of the function values were infinite or NaN.
+   Fixed.
+
+   o psstA
+   Default plot did not show the horizontal line at y=0
+   corresponding to a perfect fit.
+   Fixed.
+
+   o vcov.ppm
+   names attribute was spelt incorrectly in some cases.
+   Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.28-2
+
+OVERVIEW 
+
+   o We thank Thomas Bendtsen, Ya-Mei Chang, Daniel Esser, 
+     Robert John-Chandran, Ege Rubak and Yong Song for contributions.
+
+   o New code for Partial Residual Plots and Added Variable Plots.
+
+   o maximum profile pseudolikelihood computations vastly accelerated.
+
+   o New dataset: cells in gastric mucosa
+
+   o now possible to capture every k-th state of Metropolis-Hastings algorithm.
+
+   o size of 'ppm' objects reduced.
+
+   o scope of 'intensity.ppm' extended.
+
+   o quadrat.test can now perform Monte Carlo tests and one/two-sided tests
+
+   o improvements to 'plot.fv'
+
+   o improvement to 'rescale'
+
+   o some datasets reorganised.
+
+   o numerous bug fixes
+
+NEW DATASET
+
+   o mucosa
+   Cells in gastric mucosa
+   Kindly contributed by Dr Thomas Bendtsen
+
+NEW FUNCTIONS
+
+   o parres
+   Partial residual plots for spatial point process models.
+   A diagnostic for the form of a covariate effect.
+
+   o addvar
+   Added variable plots for spatial point process models.
+   A diagnostic for the existence of a covariate effect.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+  o profilepl
+  Accelerated (typically by a factor of 5).
+
+  o rmh, rmhcontrol
+  It is now possible to save every k-th iteration of the Metropolis-Hastings
+  algorithm. The arguments 'nsave' and 'nburn' may be given to rmh
+  or to rmhcontrol. They specify that the point pattern will be saved
+  every 'nsave' iterations, after an initial burn-in of 'nburn' iterations.
+
+  o simulate.ppm
+  New argument 'singlerun' determines whether the simulated patterns
+  are generated using independent runs of the Metropolis-Hastings algorithm
+  or are obtained by performing one long run of the algorithm and saving
+  every k-th iteration.
+
+  o exactMPLEstrauss
+  New argument 'project' determines whether the parameter gamma
+  is constrained to lie in [0,1].
+
+  o intensity.ppm
+  Now works for stationary point process models with the interactions
+  DiggleGratton, DiggleGatesStibbard, Fiksel, PairPiece and Softcore.
+
+  o plot.fv
+  Improved algorithm for avoiding collisions between graphics and legend.
+
+  o plot.fv
+  New argument 'log' allows plotting on logarithmic axes.
+
+  o envelope
+  Can now calculate an estimate of the true significance level
+  of the "wrong" test (which declares the observed summary function
+  to be significant if it lies outside the pointwise critical boundary
+  anywhere). Controlled by new argument 'do.pwrong'.
+
+  o quadrat.test
+  New argument 'alternative' allows choice of alternative hypothesis
+  and returns one-sided or two-sided p-values as appropriate.
+
+  o quadrat.test
+  Can now perform Monte Carlo test as well 
+  (for use in small samples where the chi^2 approximation is inaccurate)
+
+  o Softcore
+  Improved numerical stability.
+  New argument 'sigma0' for manual control over rescaling.
+
+  o rescale
+  If scale argument 's' is missing, then the data are rescaled
+  to native units. For example if the current unit is 0.1 metres, 
+  coordinates will be re-expressed in metres.
+
+  o psst
+  Extra argument 'verbose=TRUE'
+
+  o is.subset.owin
+  Accelerated for polygonal windows
+
+  o rmh.default
+  'track' is no longer a formal argument of rmh.default; it is now a 
+  parameter of rmhcontrol. However there is no change in usage: 
+  the argument 'track' can still be given to rmh.default.
+
+  o clf.test
+  Has been renamed 'dclf.test' 
+  to give proper attribution to Peter Diggle.
+
+  o betacells
+  This dataset has been restructured. The vector of cell profile areas,
+  formerly given by betacells.extra$area, has now been included
+  as a column of marks in the point pattern 'betacells'.
+
+  o ants
+  The function ants.extra$plot() has been renamed plotit() 
+  for conformity with other datasets.
+
+  o redwoodfull
+  The function redwoodfull.extra$plot() has been renamed plotit() 
+  for conformity with other datasets.
+
+  o nbfires
+  For conformity with other datasets, there is now an object nbfires.extra
+
+BUG FIXES
+
+  o ripras
+  Expansion factor was incorrect in the rectangular case.
+  Fixed.
+
+  o Triplets
+  Crashed sometimes with error "dim(X) must have positive length".
+  Fixed.
+
+  o affine.im
+  Crashed in the case of a diagonal transformation matrix!
+  Spotted by Ege Rubak.
+  Fixed.
+
+  o envelope.envelope
+  Ignored the argument 'global'.
+  Fixed.
+
+  o MultiStraussHard
+  The printed output showed the hardcore radii as NULL.
+  Spotted by Ege Rubak.
+  Fixed.
+
+  o "[.psp"
+  Crashed if the data were generated by rpoisline().
+  Spotted by Marcelino de la Cruz.
+  Fixed.
+
+  o plot.linim
+  If style="colour", the main title was always "x".
+  Fixed.
+
+  o plot.ppx
+  Setting add=TRUE did not prevent the domain being plotted.
+  Fixed.
+
+  o rmh
+  Crashed if x.start was an empty point pattern.
+  Spotted by Ege Rubak.
+  Fixed.
+
+  o as.ppp.data.frame
+  Crashed if any points lay outside the window.
+  Spotted by Ege Rubak.
+  Fixed.
+
+  o Ripley isotropic edge correction
+  Divide-by-zero error in rare cases.
+  Spotted by Daniel Esser.
+  Fixed.
+
+  o summary functions
+  For many of the summary functions (e.g. Kest, pcf),
+  the result of saving the object to disc was an enormous file.
+  Spotted by Robert John-Chandran.
+  Fixed.
+
+  o pcf.fv
+  Default plot was wrongly coloured.
+  Fixed.
+  
+	CHANGES IN spatstat VERSION 1.28-1
+
+OVERVIEW 
+
+   o We thank Ege Rubak, Gopal Nair, Jens Oehlschlaegel and Mike Zamboni
+     for contributions.
+
+   o New approximation to the intensity of a fitted Gibbs model.
+
+   o Minor improvements and bug fixes
+
+   o spatstat now 'Suggests' the package 'gsl'
+
+NEW FUNCTIONS
+
+   o intensity, intensity.ppp, intensity.ppm
+   Calculate the intensity of a dataset or fitted model.
+   Includes new approximation to the intensity of a fitted Gibbs model
+
+   o LambertW
+   Lambert's W-function
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+  o envelope
+  Improved plot labels for envelopes that were generated using
+  the 'transform' argument.
+
+  o plot.fv
+  Improved algorithm for collision detection.
+
+  o plot.im
+  Now returns the colour map used.
+
+  o plot.listof, plot.splitppp
+  Slight change to handling of plot.begin and plot.end
+
+  o square
+  Now accepts vectors of length 2
+
+  o plot.fii
+  Increased resolution of the plot obtained from plot(fitin(ppm(...))) 
+
+  o image.listof
+  If equal.ribbon=TRUE, the colour ribbon will no longer be 
+  displayed repeatedly for each panel, 
+  but will now be plotted only once, at the right hand side of the plot array.
+
+BUG FIXES
+
+  o vcov.ppm
+  Results were sometimes incorrect for a Gibbs model with non-trivial trend.
+  Spotted by Ege Rubak.
+  Fixed.
+
+  o nncross
+  In rare cases the results could be slightly incorrect.
+  Spotted by Jens Oehlschlaegel.
+  Fixed.
+
+  o plot.fv
+  When add=TRUE, the x limits were sometimes truncated.
+  Spotted by Mike Zamboni.
+  Fixed.
+
+  o plot.im
+  Labels for the tick marks on the colour ribbon
+  were sometimes ridiculous, e.g. "2.00000001".
+  Fixed.
+
+	CHANGES IN spatstat VERSION 1.28-0
+
+OVERVIEW 
+
+   o We thank Farzaneh Safavimanesh, Andrew Hardegen and Tom Lawrence 
+     for contributions.
+
+   o Improvements to 3D summary functions.
+
+   o A multidimensional point pattern (ppx) can now have 'local' coordinates
+   as well as spatial and temporal coordinates and marks.
+
+   o Changed format for point patterns on a linear network (lpp).
+   Changes are backward compatible. Many computations run faster.
+
+   o More support for fitted cluster models (kppm).
+
+   o split method for multidimensional point patterns (ppx)
+   and point patterns on a linear network (lpp).
+
+   o Fixed bug causing errors in plot.im
+
+   o Miscellaneous improvements and bug fixes
+
+NEW FUNCTIONS
+
+   o exactMPLEstrauss
+   Fits the stationary Strauss point process model
+   using an exact maximum pseudolikelihood technique.
+   This is mainly intended for technical investigation of algorithms.
+
+   o split.ppx
+   Method for 'split' for multidimensional point patterns (class 'ppx').
+   This also works for point patterns on a linear network (class 'lpp').
+
+   o model.images
+   This function is now generic, with methods for classes ppm, kppm, lppm
+
+   o model.frame, model.matrix
+   These generic functions now have methods for classes kppm, lppm
+
+   o as.owin.kppm, as.owin.lppm
+   New methods for 'as.owin' for objects of class kppm, lppm
+
+   o as.linnet.lppm
+   Extracts the linear network in which a point process model was fitted.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o class 'ppx'
+   An object of class 'ppx' may now include 'local' coordinates 
+   as well as 'spatial' and 'temporal' coordinates, and marks.
+
+   o ppx
+   Arguments have changed.
+   
+   o class 'lpp'
+   The internal format of lpp objects has been extended
+   (but is backward-compatible). Many computations run faster.
+   To convert an object to the new format: X <- lpp(as.ppp(X), as.linnet(X)).
+
+   o F3est
+   Calculation of theoretical Poisson curve ('theo') has changed,
+   and is now controlled by the argument 'sphere'.
+
+   o rmh, rmhstart
+   The initial state ('start') can now be missing or null.
+
+   o im, as.im
+   The pixel coordinates in an image object are now generated
+   more accurately. This avoids a numerical error in plot.im.
+
+   o eval.fv, eval.fasp
+   Evaluation is now applied only to columns that contain
+   values of the function itself (rather than values of the
+   derivative, hazard rate, etc). This is controlled by the
+   new argument 'dotonly'.
+
+   o spatstat.options
+   New option 'nvoxel'
+
+   o quad.ppm
+   Now accepts kppm objects.
+
+   o str
+   This generic function (for inspecting the internal structure of an object)
+   now produces sensible output for objects of class 'hyperframe', 'ppx', 'lpp'
+
+   o ppx, coords.ppx, coords<-.ppx
+   The arguments to these functions have changed.
+
+   o lgcp.estK, Kmodel
+   Computation can be greatly accelerated 
+   by setting spatstat.options(fastK.lgcp=TRUE).
+
+   o G3est
+   Computation accelerated.
+
+   o envelope
+   Computation slightly accelerated.
+
+   o spatstat.options
+   New option 'fastK.lgcp' 
+
+BUG FIXES
+
+   o nndist.psp
+   Caused an error if length(k) > 1.
+   Fixed.
+
+   o plot.im
+   Sometimes reported an error 
+   "useRaster=TRUE can only be used with a regular grid."
+   This was due to numerical rounding effects on the coordinates
+   of a pixel image. 
+   Fixed.
+   
+   o plot.fv
+   If a formula was used to specify the plot,
+   the names of variables in the formula were sometimes
+   incorrectly matched to *functions*.
+   Spotted by Farzaneh Safavimanesh.
+   Fixed.
+
+   o F3est
+   Took a very long time if the containing box was very flat,
+   due to the default value of 'vside'.
+   Fixed.
+
+   o rmh, rmhmodel
+   An erroneous warning about 'outdated format of rmhmodel object' 
+   sometimes occurred.
+   Fixed.
+
+   o marks<-.ppx
+   Names of result were incorrect. Fixed.
+
+   o hyperframe class
+   Various minor bug fixes.
+
+
+	CHANGES IN spatstat VERSION 1.27-0
+
+OVERVIEW 
+
+   o Variance estimates are now available for all Gibbs point process models.
+
+   o Cressie-Loosmore-Ford test implemented
+
+   o plot.fv now avoids collisions between the legend and the graphics.
+
+   o Extension to predict.ppm
+
+   o Improvements to envelopes and multitype summary functions. 
+
+   o Line transects of a pixel image.
+
+   o Changes to defaults in Metropolis-Hastings simulations.
+
+   o More geometrical operations
+
+   o Bug fixes.
+
+   o We thank Aruna Jammalamadaka for contributions.
+
+NEW FUNCTIONS
+
+   o clf.test
+   Perform the Cressie (1991)/ Loosmore and Ford (2006) test of CSR 
+   (or another model)
+
+   o mad.test
+   Perform the Maximum Absolute Deviation test of CSR (or another model).
+
+   o convolve.im
+   Compute convolution of pixel images.
+
+   o Kmulti.inhom
+   Counterpart of 'Kmulti' for spatially-varying intensity.
+
+   o rmhexpand
+   Specify a simulation window, or a rule for expanding the
+   simulation window, in Metropolis-Hastings simulation (rmh)
+
+   o transect.im
+   Extract pixel values along a line transect.
+
+   o affine.im
+   Apply an affine transformation to a pixel image.
+
+   o scalardilate
+   Perform scalar dilation of a geometrical object 
+   relative to a specified origin.
+
+   o reflect
+   Reflect a geometrical object through the origin.
+
+   o "[.lpp", "[.ppx"
+   Subset operators for the classes "lpp" (point pattern on 
+   linear network) and "ppx" (multidimensional space-time
+   point pattern).
+ 
+   o is.rectangle, is.polygonal, is.mask
+   Determine whether a window w is a rectangle, a domain with polygonal 
+   boundaries, or a binary pixel mask.
+
+   o has.offset
+   Determines whether a fitted model object (of any kind)
+   has an offset.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o predict.ppm
+   This function can now calculate the conditional intensity of a model
+   relative to any point pattern X (not just the original data pattern).
+
+   o vcov.ppm
+   This function now handles all Gibbs point process models.
+
+   o plot.fv
+   Collisions between the legend box and the graphics are now detected
+   and avoided. 
+
+   o rmh.ppm, rmh.default, simulate.ppm, qqplot.ppm, envelope.ppm
+   These functions now have slightly different default behaviour
+   because of changes to the handling of arguments to 'rmhcontrol'.
+
+   o rmhcontrol
+   The default value of the parameters 'periodic' and 'expand'
+   has changed.
+
+   o rmhcontrol
+   The parameter 'expand' can now be in any format 
+   acceptable to rmhexpand().
+
+   o rmh.ppm, rmh.default, simulate.ppm
+   Any 'rmhcontrol' parameter can now be given directly 
+   as an argument to rmh.ppm, rmh.default or simulate.ppm.
+
+   o Kmulti, Gmulti, Jmulti
+   The arguments I, J can now be any kind of subset index
+   or can be functions that yield a subset index.
+
+   o envelope.envelope
+   In envelope(E, fun=NULL) if E does not contain simulated
+   summary functions, but does contain simulated point patterns, 
+   then 'fun' now defaults to Kest, instead of flagging an error.
+
+   o print.ppp, summary.ppp
+   If the point pattern x was generated by Metropolis-Hastings
+   simulation using 'rmh', then print(x) and summary(x) show 
+   information about the simulation parameters.
+
+   o print.ppm
+   Standard errors for the parameter estimates,
+   and confidence intervals for the parameters,
+   can now be printed for all Gibbs models 
+   (but are printed only for Poisson models by default).
+
+   o eval.im
+   Images with incompatible dimensions are now resampled 
+   to make them compatible (if harmonize=TRUE).
+
+   o spatstat.options
+   New option 'print.ppm.SE' controls whether standard errors 
+   and confidence intervals are printed for all Gibbs models,
+   for Poisson models only, or are never printed.
+
+   o inside.owin
+   Now accepts the form list(x,y) for the first argument.
+
+   o image.listof
+   New argument 'equal.ribbon' allows several images to be plotted
+   with the same colour map.
+
+   o is.subset.owin
+   Improved accuracy in marginal cases.
+
+   o expand.owin
+   Functionality extended to handle all types of expansion rule.
+
+   o default.rmhcontrol, default.expand
+   These functions now work with models of class 'rmhmodel' 
+   as well as 'ppm'
+
+   o print.rmhcontrol
+   Output improved.
+
+BUG FIXES
+
+   o linearK, linearKinhom
+   If any data points were located exactly at a vertex of the 
+   linear network, the weights for Ang's correction were incorrect, 
+   due to numerical error. This sometimes produced infinite 
+   or NA values of the linear K function.
+   Fixed.
+   
+   o predict.ppm
+   In some cases, predict.ppm(type="cif") generated a spurious warning 
+   that "number of rows of result is not a multiple of vector length."
+   Fixed.
+
+   o crossing.psp
+   Results were sometimes incorrect due to numerical rounding error
+   associated with GCC bug #323.
+   Fixed.
+
+   o MultiHard, MultiStrauss, MultiStraussHard
+   If the mark values contained non-alphanumeric characters,
+   the names of the interaction coefficients in coef(ppm(...)) 
+   were sometimes garbled.
+   Fixed.
+
+   o profilepl
+   For edge corrections other than the border correction,
+   an error message about 'rbord' would sometimes occur.
+   Fixed.
+
+   o is.marked, is.multitype
+   These functions gave the wrong answer for 'lpp' objects.
+   Fixed.
+
+   o marks<-.lpp,  marks<-.ppx
+   Format of result was garbled if new columns of marks were added.
+   Fixed.
+
+   o reach.rmhmodel
+   Gave the wrong answer for Geyer and BadGey models.
+   Fixed.
+
+   o envelope.envelope
+   Ignored the argument 'savefuns'.
+   Fixed.
+
+   o BadGey
+   Sometimes wrongly asserted that the parameter 'sat' was invalid.
+   Occurred only in ppm(project=TRUE).
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.26-1
+
+OVERVIEW 
+
+   o Variance-covariance matrix for Gibbs point process models.
+
+   o Bootstrap confidence bands for pair correlation function and K function.
+
+   o Bug fix in scan test.
+
+   o Area-interaction model accelerated.
+
+   o we thank Jean-Francois Coeurjolly and Ege Rubak for contributions.
+
+NEW FUNCTIONS
+
+  o lohboot
+  Computes bootstrap confidence bands for pair correlation function
+  and K function using Loh's (2008) mark bootstrap.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+  o vcov.ppm
+  Now works for all Gibbs point process models, thanks to new code (and theory)
+  from Jean-Francois Coeurjolly and Ege Rubak
+
+  o AreaInter
+  Computations related to the area-interaction point process 
+  (ppm, predict.ppm, residuals.ppm, diagnose.ppm, qqplot.ppm)
+  have been accelerated.
+
+BUG FIXES
+
+  o scan.test
+  Results were sometimes incorrect due to numerical instability
+  (a 'Gibbs phenomenon'). 
+  Fixed.
+  
+	CHANGES IN spatstat VERSION 1.26-0
+
+OVERVIEW 
+
+   o We thank Jens Oehlschlaegel for contributions.
+
+   o Further substantial acceleration of spatstat functions.
+
+   o Workaround for bug in RandomFields package.
+
+   o Numerous modifications to internal code.
+
+NEW FUNCTIONS
+
+   o RandomFieldsSafe
+   There is a bug in the package 'RandomFields' (version <= 2.0.54)
+   which causes a crash to occur, in the development version of R
+   but not in R 2.15.0. To avoid crashing spatstat, we have written
+   the temporary, undocumented function RandomFieldsSafe() 
+   which returns TRUE if it is safe to use the RandomFields package.
+   Examples in the spatstat help files for kppm, lgcp.estK, lgcp.estpcf
+   and rLGCP are only executed if RandomFieldsSafe() returns TRUE.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o Many functions
+   Many spatstat functions now run faster, and will handle larger datasets,
+   thanks to improvements in the internal code, 
+   following suggestions from Jens Oehlschlaegel.
+
+   o Many functions
+   The response to an 'Interrupt' signal is slightly slower.
+
+	CHANGES IN spatstat VERSION 1.25-5
+
+OVERVIEW 
+
+   o We thank Ya-Mei Chang, Jens Oehlschlaegel and Yong Song for contributions.
+
+   o Extended functionality of 'rhohat' to local likelihood smoothing
+   and bivariate smoothing.
+
+   o Nearest neighbour distance computations accelerated.
+
+   o spatstat now 'Suggests:' the package 'locfit'
+
+NEW FUNCTIONS
+
+   o rho2hat
+   Bivariate extension of 'rhohat' for estimating spatial residual risk, 
+   or intensity as a function of two covariates.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o rhohat
+   Estimation can now be performed using local likelihood fitting
+   with the 'locfit' package, or using kernel smoothing.
+
+   o nncross
+   Substantially accelerated.
+   New arguments added to control the return value and the sorting of data.
+
+BUG FIXES
+
+   o plot.msr
+   Crashed if the argument 'box' was given.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.25-4
+
+OVERVIEW 
+
+   o We thank Jonathan Lee and Sergiy Protsiv for contributions.
+
+   o Improvements and bug fixes to K function for very large datasets
+
+NEW FUNCTIONS
+
+   o rStraussHard
+   Perfect simulation for Strauss-hardcore process (with gamma <= 1)
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o plot.im
+   The colour ribbon can now be placed left, right, top or bottom 
+   using new argument 'ribside'
+
+   o profilepl
+   Does not generate warnings when some of the candidate models
+   have zero likelihood - for example when fitting model with a hard core.
+
+   o Kest
+   Now includes fast algorithm for 'correction="none"'
+   which will handle patterns containing millions of points.
+
+BUG FIXES
+
+   o Kest, Lest
+   Gave incorrect values in very large datasets, due to numerical overflow.
+   `Very large' typically means about 1 million points in a random pattern, 
+   or 100,000 points in a tightly clustered pattern.
+   [Overflow cannot occur unless there are at least 46,341 points.]
+   [Spotted by Sergiy Protsiv.]
+   Fixed.
+
+   o Kest, Lest
+   Ignored 'ratio=TRUE' if the argument 'domain' was given.
+   [Spotted by Jonathan Lee.]
+   Fixed.
+
+   o rjitter
+   Output was sometimes incorrect. 
+   [Spotted by Sergiy Protsiv.]
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.25-3
+
+OVERVIEW 
+
+   o We thank Daniel Esser for contributions.
+
+   o Improved support for fitted point process models.
+
+   o Bug fixes.
+
+NEW FUNCTIONS
+
+   o simulate.slrm
+   Method for 'simulate' for spatial logistic regression models.
+
+   o labels.ppm, labels.kppm, labels.slrm
+   Methods for 'labels' for fitted point process models.
+
+   o commonGrid
+   Determine a common spatial domain and pixel resolution
+   for several pixel images and/or binary masks
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o effectfun
+   Now has argument 'se.fit' allowing calculation of standard errors
+   and confidence intervals.
+
+   o [.msr
+   Now handles character-valued indices. 
+
+   o print.summary.ppm
+   Output gives a more precise description of the fitting method.
+
+   o ppm, kppm, slrm
+   Confidence intervals for the fitted trend parameters
+   can now be obtained using 'confint' 
+
+   o predict.slrm
+   New argument 'window' 
+
+   o union.owin
+   Now handles a single argument: union.owin(A) returns A.
+
+BUG FIXES
+
+   o selfcrossing.psp
+   y coordinate values were incorrect.
+   [Spotted by Daniel Esser.]
+   Fixed.
+
+   o as.im.owin
+   Did not handle a binary mask with a 1 x 1 pixel array.
+   Fixed.
+
+   o predict.slrm
+   Results of predict(object, newdata) were incorrect 
+   if the spatial domain of 'newdata' was larger than the original domain.
+   Fixed.
+
+   o ppm
+   If the model was the uniform Poisson process,
+   the argument 'rbord' was ignored.
+   Fixed.
+
+   o image subset assignment "[<-.im"
+   Generated an error if the indexing argument 'i' 
+   was a point pattern containing zero points.
+   Fixed.
+
+   o hyperframe subset assignment "[<-.hyperframe"
+   Did not correctly handle the case where a single column 
+   of the hyperframe was to be changed.
+   Fixed.
+   
+   o help(bw.relrisk), help(rmh.ppm), help(plot.plotppm)
+   These help files had the side-effect of changing some options
+   in spatstat.options.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.25-2
+
+OVERVIEW 
+
+   o We thank Abdollah Jalilian and Thierry Onkelinx for contributions.
+
+   o Very Important Bug fixes.
+
+   o Improved mechanism for handling 'invalid' point processes
+
+NEW FUNCTIONS
+
+   o as.matrix.owin
+   Converts a window to a logical matrix.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o project.ppm
+     Improved algorithm.
+     Now handles terms in the trend formula as well as the interaction.
+     The projected point process is now obtained by re-fitting the model,
+     and is guaranteed to be the maximum pseudolikelihood fit.
+
+   o plot.im
+     Now handles many arguments recognised by plot.default
+     such as 'cex.main'. Also handles argument 'box'.
+     New argument 'ribargs' contains parameters controlling the
+     ribbon plot only.      
+
+   o spatstat.options
+     New option 'project.fast' allows a faster shortcut for project.ppm
+
+   o spatstat.options
+     New options 'rmh.p', 'rmh.q', 'rmh.nrep' determine the default values
+     of the parameters p, q and nrep of the Metropolis-Hastings algorithm.
+     See rmhcontrol
+
+   o ppm
+     Slightly accelerated.
+
+BUG FIXES
+
+   o nncross, distfun, AreaInter
+     Results of nncross were possibly incorrect 
+     when X and Y did not have the same window. 
+     This bug affected values of 'distfun' and may also 
+     have affected ppm objects with interaction 'AreaInter'.
+     [Spotted by Thierry Onkelinx]
+     Bug introduced in spatstat 1.9-4 (June 2006).
+     Fixed.
+
+   o rCauchy
+     Simulations were incorrect in the sense that 
+     the value of 'omega' was inadvertently doubled 
+     (i.e. omega was incorrectly replaced by 2 * omega).
+     Bug introduced in spatstat 1.25-0.
+     Fixed.
+
+   o plot.im
+     White lines were present in the image display, on some graphics devices,
+     due to changes in R 2.14.
+     Fixed.
+
+   o update.ppm
+     The result of 'update(object, formula)' sometimes contained errors
+     in the internal format.
+     Bug introduced in spatstat 1.25-0.
+     Fixed.
+
+   o example(AreaInter), example(bw.smoothppp),
+   example(Kest.fft), example(plot.owin), example(predict.ppm),
+   example(simulate.ppm)
+	Executing these examples had the side-effect of
+	changing some of the parameters in spatstat.options.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.25-1
+
+OVERVIEW 
+
+   o We thank Neba Funwi-Gabga and Jorge Mateu for contributions.
+
+   o New dataset of gorilla nest sites
+
+   o New functions for perfect simulation 
+
+   o Bug fix for rare crashes in rStrauss
+
+   o Code for ensuring a fitted point process model is a valid point process
+
+NEW DATASET
+
+   o gorillas
+   Gorilla nest sites in a National Park in Cameroon.
+   Generously contributed by Neba Funwi-Gabga
+
+NEW FUNCTIONS
+
+   o rDiggleGratton, rDGS, rHardcore
+   Perfect simulation for the Diggle-Gratton process,
+   Diggle-Gates-Stibbard process, and Hardcore process.
+
+   o bw.scott
+   Scott's rule of thumb for bandwidth selection in
+   multidimensional smoothing
+
+   o valid.ppm
+   Checks whether a fitted point process model is a valid point process
+
+   o project.ppm
+   Forces a fitted point process model to be a valid point process
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o ppm
+   New argument 'project' determines whether the fitted model
+   is forced to be a valid point process
+
+   o linnet
+   Substantially accelerated.
+
+   o rStrauss
+   Slightly accelerated.
+
+   o summary.lpp
+   Now prints the units of length.
+
+BUG FIXES
+
+   o rStrauss
+   Crashed rarely (once every 10 000 realisations)
+   with a memory segmentation fault.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.25-0
+
+OVERVIEW 
+
+   o Leverage and influence for point process models
+
+   o New cluster models (support for model-fitting and simulation).
+
+   o Fit irregular parameters in trend of point process model
+
+   o Third order summary statistic.
+
+   o Improvements to speed and robustness of code.
+
+   o spatstat now depends on R 2.14
+
+   o We thank Abdollah Jalilian and Rasmus Waagepetersen for contributions.
+
+NEW FUNCTIONS
+
+   o leverage.ppm, influence.ppm, dfbetas.ppm
+   Leverage and influence for point process models
+
+   o ippm
+   Experimental extension to 'ppm' which fits irregular parameters in trend
+   by Fisher scoring algorithm.
+
+   o Tstat
+   Third order summary statistic for point patterns
+   based on counting triangles.
+
+   o rCauchy, rVarGamma
+   simulation of a Neyman-Scott process with Cauchy clusters
+   or Variance Gamma (Bessel) clusters.
+   Contributed by Abdollah Jalilian.
+
+   o rPoissonCluster
+   simulation of a general Poisson cluster process
+
+   o model.covariates
+   Identify the covariates involved in a model (lm, glm, ppm etc)
+
+   o as.im.distfun
+   Converts a 'distfun' to a pixel image.
+
+   o cauchy.estK, cauchy.estpcf, vargamma.estK, vargamma.estpcf
+   Low-level model-fitting functions for the Neyman-Scott process 
+   with Cauchy or Variance-Gamma cluster kernel.
+   Contributed by Abdollah Jalilian.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o kppm
+   Now accepts clusters="Cauchy" or clusters="VarGamma"
+   for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel.
+   Code contributed by Abdollah Jalilian.
+
+   o rNeymanScott
+   Argument 'rcluster' may now take a different format.
+
+   o psst
+   Argument 'funcorrection' changed to 'funargs' allowing greater flexibility.
+
+   o plot.fv, plot.envelope
+   New argument 'limitsonly' allows calculation of 
+   a common x,y scale for several plots.
+
+   o overall speed 
+   spatstat is now byte-compiled and runs slightly faster.
+
+   o user interrupt
+   Long calculations in spatstat now respond to the Interrupt/Stop signal.
+
+   o update.ppm
+   Now runs faster and uses much less memory,
+   when the update only affects the model formula (trend formula).
+   
+   o rNeymanScott, rThomas, rMatClust
+   Accelerated thanks to Rasmus Waagepetersen.
+
+   o multitype data and models
+   Second order multitype statistics (such as Kcross, pcfcross)
+   and multitype interaction models (such as MultiStrauss)
+   now run faster, by a further 5%.
+
+BUG FIXES
+
+   o distfun
+   Some manipulations involving 'distfun' objects failed if the
+   original data X in distfun(X) did not have a rectangular window.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.24-2
+
+OVERVIEW 
+
+   o Geyer's triplet interaction
+
+   o more functionality for replicated point patterns
+
+   o changed default for simulation window in point process simulation
+
+   o changed default for edge correction in Kcom, Gcom
+ 
+   o data in spatstat is now lazy-loaded
+
+   o bug fixes 
+
+NEW FUNCTIONS
+
+   o Triplets
+   Geyer's triplet interaction, for point process models
+
+   o coef.summary.ppm
+   New method coef.summary.ppm
+   You can now type 'coef(summary(fit))' to extract a table
+   of the fitted coefficients of the point process model 'fit'
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o data in spatstat are now lazy-loaded
+   so you don't have to type data(amacrine), etc.
+
+   o rmh.default, rmh.ppm, simulate.ppm
+   These now handle the 'triplets' interaction
+   
+   o fryplot
+   Now has arguments 'to' and 'from', allowing 
+   selection of a subset of points.
+
+   o fryplot, frypoints
+   These functions now handle marked point patterns properly.
+
+   o Kcross, Kdot, Kmulti
+   New argument 'ratio' determines whether the numerator and denominator
+   of the estimate of the multitype K-function will be stored. 
+   This enables analysis of replicated point patterns, 
+   using 'pool.rat()' to pool the K function estimates.
+
+   o rmh.ppm, simulate.ppm, default.expand
+   For point process models which have a trend depending only on x and y,
+   the simulation window is now taken to be the same as the original 
+   window containing the data (by default). 
+   That is, `expansion' does not take place, by default.
+   (In previous versions of spatstat the simulation window was larger
+   than the original data window.)
+   
+   o rmh.ppm, simulate.ppm
+   The argument sequence for these functions has changed.
+   New argument 'expand' allows more explicit control over simulation domain.
+
+   o Kcom, Gcom
+   New argument 'conditional' gives more explicit control
+   over choice of edge correction in compensator.
+   Simplified defaults for edge correction.  
+
+   o Kinhom
+   Improved plot labels.
+
+   o profilepl
+   Printed output improved.
+
+BUG FIXES
+
+   o Lest
+   The variance approximations (Lotwick-Silverman and Ripley)
+   obtained with var.approx=TRUE, were incorrect for Lest 
+   (although they were correct for Kest) due to a coding error.
+   Fixed.
+   
+   o simulate.ppm
+   Ignored the argument 'control' in some cases. 
+   Fixed.
+
+   o pcf and its relatives (pcfinhom, pcfcross.inhom, pcfdot.inhom)
+   Sometimes gave a warning about 'extra arguments ignored'.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.24-1
+
+OVERVIEW 
+
+   o Spatial Scan Test
+
+   o Functionality for replicated point patterns
+
+   o Bug fixes
+
+NEW FUNCTIONS
+
+   o scan.test
+     Spatial scan test of clustering
+
+   o rat
+   New class of 'ratio objects'
+
+   o pool.rat
+   New method for 'pool'.
+   Combines K function estimates for replicated point patterns (etc) 
+   by computing ratio-of-sums 
+
+   o unnormdensity
+   Weighted kernel density with weights that do not sum to 1
+   and may be negative.
+
+   o compatible
+   New generic function with methods for 'fv', 'im', 'fasp' and 'units'
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o Kest
+   New argument 'ratio' determines whether the numerator and denominator
+   of the estimate of the K-function will be stored. 
+   This enables analysis of replicated point patterns, 
+   using 'pool.rat()' to pool the K function estimates.
+
+   o Lest
+   Now handles theoretical variance estimates (using delta method)
+   if var.approx=TRUE
+
+   o as.mask
+   Argument 'eps' can now be a 2-vector, specifying x and y resolutions.
+
+   o default.expand
+   Behaviour changed slightly.
+
+   o plot.listof, plot.splitppp, contour.listof, image.listof
+   The arguments 'panel.begin' and 'panel.end' can now be objects
+   such as windows. 
+
+BUG FIXES
+
+   o rgbim, hsvim
+   Did not work on images with non-rectangular domains.
+   Fixed.
+
+   o scaletointerval
+   Did not handle NA's.
+   Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.24-0
+
+OVERVIEW 
+
+   o This version was not released publicly.
+
+
+	CHANGES IN spatstat VERSION 1.23-6
+
+OVERVIEW 
+
+   o Spatial covariance functions of windows and pixel images.
+
+   o Area-interaction models can now be fitted in non-rectangular windows
+
+   o Bug fix for envelope of inhomogeneous Poisson process
+
+   o Bug fix for raster conversion
+
+   o New vignette on 'Getting Started with Spatstat'
+
+   o Code accelerated.
+
+NEW FUNCTIONS
+
+   o imcov
+   Spatial covariance function of pixel image
+   or spatial cross-covariance function of two pixel images
+
+   o harmonise.im
+   Make several pixel images compatible 
+   by converting them to the same pixel grid
+
+   o contour.listof, image.listof
+   Methods for contour() and image() for lists of objects
+
+   o dummify
+   Convert data to numeric values by constructing dummy variables.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o setcov
+   Can now compute the `cross-covariance' between two regions
+
+   o AreaInter
+   Point process models with the AreaInter() interaction
+   can now be fitted to point pattern data X in any window.
+
+   o areaGain, areaLoss
+   These now handle arbitrary windows W.
+   They are now more accurate when r is very small.
+
+   o Kcom
+   Computation vastly accelerated, for non-rectangular windows.
+
+   o vignettes
+   New vignette 'Getting Started with the Spatstat Package'
+
+   o nncorr, nnmean, nnvario
+   These functions now handle data frames of marks.
+
+BUG FIXES
+
+   o envelope.ppm
+   If the model was an inhomogeneous Poisson process, 
+   the resulting envelope object was incorrect
+   (the simulations were correct, but the envelopes were calculated
+   assuming the model was CSR). Bug was introduced in spatstat 1.23-5.
+   Fixed.
+
+   o envelope.ppm
+   If the model was an inhomogeneous Poisson process 
+   with intensity a function of x and y only,
+   overflow errors sometimes occurred ('insufficient storage' 
+   or 'attempting to generate a large number of random points').
+   Fixed.
+
+   o as.im.im
+   The result of as.im(X, W) was incorrect if 'W' did not cover 'X'.
+   Fixed.
+
+   o as.mask
+   The result of as.mask(w, xy) was incorrect if 'xy' did not cover 'w'.
+   Fixed.
+
+   o plot.fv
+   Legend was incorrectly labelled if 'shade' variables were not included
+   in the plot formula.
+   Fixed.
+
+   o areaGain, areaLoss
+   Crashed if the radius r was close to zero.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.23-5
+
+OVERVIEW 
+
+   o Bug fix to bandwidth selection.
+
+   o Functions to pool data from several objects of the same class.
+
+   o Improvements and bug fixes.
+
+   o We thank Michael Sumner for contributions.
+
+NEW FUNCTIONS
+
+   o pool
+   Pool data from several objects of the same class
+
+   o pool.envelope
+   Pool simulated data from several envelope objects and create a new envelope
+
+   o pool.fasp
+   Pool simulated data from several function arrays and create a new array
+
+   o envelope.envelope
+   Recalculate an envelope from simulated data using different parameters
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o bw.diggle, bw.relrisk, bw.smoothppp, bw.optim
+   Plot method modified.
+
+   o model.depends
+   Now also recognises 'offset' terms.
+
+BUG FIXES
+
+   o bw.diggle
+   Bandwidth was too large by a factor of 2.
+   Fixed.
+
+   o plot.psp
+   Crashed if any marks were NA.
+   Fixed.
+
+   o pointsOnLines
+   Crashed if any segments had zero length.
+   Ignored argument 'np' in some cases.
+   Fixed.
+
+   o stieltjes
+   Crashed if M had only a single column of function values.
+   Fixed.
+
+	CHANGES IN spatstat VERSION 1.23-4
+
+OVERVIEW 
+
+    o Bandwidth selection for density.ppp and smooth.ppp
+
+    o Layered plots. 
+
+    o Model-handling facilities.
+
+    o Improvements and bug fixes.
+
+
+NEW FUNCTIONS
+
+   o bw.diggle
+   Bandwidth selection for density.ppp by mean square error cross-validation.
+
+   o bw.smoothppp
+   Bandwidth selection for smooth.ppp by least-squares cross-validation.
+
+   o layered, plot.layered
+   A simple mechanism for controlling plots that consist of
+   several successive layers of data.
+
+   o model.depends
+   Given a fitted model (of any kind), identify which of the covariates
+   is involved in each term of the model.
+
+   o model.is.additive
+   Determine whether a fitted model  (of any kind) is additive, 
+   in the sense that each term in the model involves at most one covariate.
+
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o smooth.ppp
+   Bandwidth 'sigma' is now selected by least-squares cross-validation
+
+   o bw.relrisk
+   Computation in large datasets accelerated.
+   New arguments 'hmin', 'hmax' control the range of trial values of bandwidth.
+
+   o Hest, Gfox, Jfox
+   Improved algebraic labels for plot
+
+   o spatstat.options
+   New parameter 'n.bandwidth'
+
+   o density.ppp, smooth.ppp
+   Slightly accelerated.
+
+   o point-in-polygon test
+   Accelerated.
+
+BUG FIXES
+
+    o with.fv
+    Mathematical labels were incorrect in some cases.
+    Fixed.
+
+    o bw.relrisk
+    Implementation of method="weightedleastsquares" was incorrect
+    and was equivalent to method="leastsquares".
+    Fixed.
+
+    o smooth.ppp
+    NaN values occurred if the bandwidth was very small.
+    Fixed.
+
+	CHANGES IN spatstat VERSION 1.23-3
+
+OVERVIEW 
+
+    o Urgent bug fix.
+
+BUG FIXES
+
+    o crossing.psp
+    Crashed occasionally with a message about NA or NaN values.
+    Fixed.
+
+    o affine.ppp
+    Crashed if the point pattern was empty.
+    Fixed.
+
+	CHANGES IN spatstat VERSION 1.23-2
+
+OVERVIEW 
+
+    o Bug fixes.
+
+    o Several functions have been accelerated.
+
+    o We thank Marcelino de la Cruz and Ben Madin for contributions.
+
+NEW FUNCTIONS
+
+    o sumouter, quadform
+    Evaluate certain quadratic forms.
+
+    o flipxy
+    Exchange x and y coordinates.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o vcov.ppm
+   Accelerated.
+
+   o owin, as.owin
+   Checking the validity of polygons has been accelerated.
+
+   o crossing.psp, selfcrossing.psp
+   Accelerated.
+
+BUG FIXES
+
+    o split.ppp
+    If drop=TRUE then some of the point patterns had the wrong windows.
+    Spotted by Marcelino de la Cruz.
+    Fixed.
+
+    o split.ppp
+    Crashed if the tessellation did not cover the point pattern.
+    Fixed.
+
+    o predict.ppm
+    Crashed when type="se" if NA's were present.
+    Spotted by Ben Madin.
+    Fixed.
+
+    o plot.ppp
+    Incorrectly handled the case where both 'col' and 'cols' were present.
+    Fixed.
+
+    o polygon geometry
+    The point-in-polygon test gave the wrong answer in some boundary cases.
+    Fixed.
+    
+
+	CHANGES IN spatstat VERSION 1.23-1
+
+OVERVIEW 
+
+    o Important bug fix to 'localpcf'.
+
+    o Inverse-distance weighted smoothing.
+
+    o Inhomogeneous versions of neighbourhood density functions.
+
+    o Internal repairs and bug fixes.
+
+    o We thank Mike Kuhn and Ben Madin for contributions.
+
+NEW FUNCTIONS
+
+    o idw
+      Inverse-distance weighted smoothing.
+
+    o localKinhom, localLinhom, localpcfinhom
+      Inhomogeneous versions of localK, localL, localpcf
+
+BUG FIXES
+
+    o localpcf
+      The columns of the result were in the wrong order.
+      [i.e. pair correlation functions were associated with the wrong points.]
+      Fixed.
+
+    o delaunay
+      If the union of several Delaunay triangles formed a triangle,
+      this was erroneously included in the result of delaunay().
+      Fixed.
+
+    o predict.ppm, plot.ppm
+      Sometimes crashed with a warning about 'subscript out of bounds'.
+      Fixed.      
+      
+    o point-in-polygon test
+      Vertices of a polygon were sometimes incorrectly classified
+      as lying outside the polygon. 
+      Fixed.
+
+    o Internal code
+      Numerous tweaks and repairs to satisfy the package checker
+      for the future R version 2.14.
+    
+	CHANGES IN spatstat VERSION 1.23-0
+
+OVERVIEW 
+
+    o  point patterns on a linear network:
+       new tools including geometrically-corrected linear K function, 
+       pair correlation function, point process models, envelopes
+
+    o  changes to renormalisation of estimates in Kinhom and pcfinhom
+
+    o  new dataset: Chicago street crime
+
+    o  spatstat now 'Suggests:' the package RandomFields
+
+    o  spatstat now has a Namespace
+
+    o  we thank Mike Kuhn, Monia Mahling, Brian Ripley for contributions.
+
+NEW DATASET
+
+    o  chicago
+    Street crimes in the University district of Chicago.
+    A point pattern on a linear network.
+
+NEW FUNCTIONS
+
+    o envelope.lpp
+      Simulation envelopes for point patterns on a linear network
+
+    o lineardisc
+      Compute the 'disc' of radius r in a linear network 
+
+    o linearpcf 
+      Pair correlation for point pattern on a linear network
+
+    o linearKinhom, linearpcfinhom
+      Inhomogeneous versions of the K function and pair correlation function
+      for point patterns on a linear network
+
+    o lppm 
+      Fit point process models on a linear network.
+
+    o anova.lppm
+      Analysis of deviance for point process models on a linear network.
+
+    o predict.lppm
+      Prediction for point process models on a linear network.
+
+    o envelope.lppm
+      Simulation envelopes for point process models on a linear network.
+
+    o linim
+      Pixel image on a linear network
+
+    o plot.linim
+      Plot a pixel image on a linear network
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o linearK
+     New argument 'correction'.
+     Geometrically-corrected estimation is performed by default
+     (based on forthcoming paper by Ang, Baddeley and Nair)
+
+   o Kinhom
+     New argument 'normpower' allows different types of renormalisation.
+
+   o pcfinhom
+     Now performs renormalisation of estimate. 
+     Default behaviour changed - estimates are now renormalised by default.
+
+BUG FIXES
+
+    o  density.ppp
+       Crashed if argument 'varcov' was given.
+       Fixed.
+    
+	CHANGES IN spatstat VERSION 1.22-4
+
+OVERVIEW 
+
+    o  new diagnostics based on score residuals
+
+    o  new dataset
+
+    o  improvements to plotting summary functions 
+
+    o  We thank Ege Rubak, Jesper Moller, George Leser, Robert Lamb
+       and Ulf Mehlig for contributions.
+
+NEW FUNCTIONS
+
+    o  Gcom, Gres, Kcom, Kres
+       New diagnostics for fitted Gibbs or Poisson point process models
+       based on score residuals.
+       Gcom is the compensator of the G function
+       Gres is the residual of the G function
+       Kcom is the compensator of the K function
+       Kres is the residual of the K function
+
+    o  psst, psstA, psstG
+       New diagnostics for fitted Gibbs or Poisson point process models
+       based on pseudoscore residuals.
+       psst is the pseudoscore diagnostic for a general alternative
+       psstA is the pseudoscore diagnostic for an Area-interaction alternative
+       psstG is the pseudoscore diagnostic for a Geyer saturation alternative
+
+    o  compareFit
+       Computes and compares several point process models
+       fitted to the same dataset, using a chosen diagnostic.
+
+    o  as.interact
+       Extracts the interpoint interaction structure (without parameters)
+       from a fitted point process model or similar object.
+
+NEW DATASET
+
+    o  flu
+       Spatial point patterns giving the locations of 
+       influenza virus proteins on cell membranes.
+       Kindly released by Dr George Leser and Dr Robert Lamb.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+   o  pixel images and grids
+      The default size of a pixel grid, given by spatstat.options("npixel"),
+      has been changed from 100 to 128. A power of 2 gives faster
+      and more accurate results in many cases.
+
+   o  residuals.ppm
+       New arguments 'coefs' and 'quad' for advanced use
+       (make it possible to compute residuals from a
+        modified version of the fitted model.)
+
+   o   relrisk 	
+       New argument 'casecontrol' determines whether a bivariate
+       point pattern should be treated as case-control data.
+
+   o   plot.fv
+       Further improvements in mathematical labels.
+
+   o   plot.fv
+       The formula can now include the symbols .x and .y
+       as abbreviation for the function argument and the 
+       recommended function value, respectively.
+
+   o   plot.fv
+       New argument 'add'
+
+BUG FIXES
+
+   o   multitype summary functions
+       (Kcross, Kdot, Gcross, Gdot, .....)
+       Plotting these functions generated an error if the 
+       name of one of the types of points contained spaces,
+       e.g. "Escherichia coli". 
+       Fixed.
+
+	CHANGES IN spatstat VERSION 1.22-3
+
+OVERVIEW 
+
+    o  Important bug fix to simulation code
+
+    o  Miscellaneous improvements
+
+    o  spatstat now depends on R 2.13.0 or later
+
+    o  We thank Ege Rubak, Kaspar Stucki, Vadim Shcherbakov,
+       Jesper Moller and Ben Taylor for contributions.
+
+NEW FUNCTIONS
+
+    o  is.stationary, is.poisson
+       New generic functions for testing whether a point process model 
+       is stationary and/or Poisson.
+       Methods for ppm, kppm, slrm etc
+
+    o  raster.xy
+       raster coordinates of a pixel mask
+
+    o  zapsmall.im
+       'zapsmall' for pixel images
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o  density.ppp
+       New argument 'diggle' allows choice of edge correction
+
+    o  rotate.owin, affine.owin
+       These functions now handle binary pixel masks.
+       New argument 'rescue' determines whether rectangles will be preserved
+
+BUG FIXES
+
+    o  rmh, simulate.ppm
+       Serious bug - simulation was completely incorrect in the case of 
+       a multitype point process with an interaction that does not depend
+       on the marks, such as ppm(betacells, ~marks, Strauss(60))
+       The calling parameters were garbled.
+       Fixed.
+
+    o  effectfun
+       Crashed if the covariate was a function(x,y).
+       Fixed.
+
+    o  lurking
+       Gave erroneous error messages about 'damaged' models.
+       Fixed.
+
+    o  envelope.ppm
+       Did not recognise when the fitted model was equivalent to CSR.
+       Fixed.
+
+    o  plot.ppx
+       Crashed in some cases. Fixed.
+
+	CHANGES IN spatstat VERSION 1.22-2
+
+OVERVIEW 
+
+    o  Fitting and simulation of log-Gaussian Cox processes
+       with any covariance function
+
+    o  More support for 'kppm' and 'rhohat' objects
+
+    o  K-function for point patterns on a linear network
+
+    o  Metropolis-Hastings algorithm now saves its transition history
+
+    o  Easier control of dummy points in ppm
+
+    o  Convert an 'fv' object to an R function 
+
+    o  spatstat now depends on the package 'RandomFields'
+
+    o  We thank Abdollah Jalilian, Shen Guochun, Rasmus Waagepetersen,
+       Ege Rubak and Ang Qi Wei for contributions.
+
+NEW FUNCTIONS
+
+    o linearK
+      Computes the Okabe-Yamada network K-function 
+      for a point pattern on a linear network.
+
+    o pairdist.lpp
+      Shortest-path distances between each pair of points 
+      on a linear network.
+
+    o vcov.kppm
+      Asymptotic variance-covariance matrix for regression parameters
+      in kppm object. 
+      [Contributed by Abdollah Jalilian and Rasmus Waagepetersen]
+
+    o rLGCP
+      Simulation of log-Gaussian Cox processes
+      [Contributed by Abdollah Jalilian and Rasmus Waagepetersen]
+
+    o predict.rhohat
+      Method for 'predict' for objects of class 'rhohat'
+      Computes a pixel image of the predicted intensity.
+
+    o Kmodel, pcfmodel
+      Generic functions that compute the K-function or pair correlation function
+      of a point process *model*. 
+      So far the only methods are for the class 'kppm'.
+
+    o as.function.fv
+      Converts a function value table (class 'fv') to a function in R
+
+    o coef.kppm
+      Method for 'coef' for objects of class 'kppm'
+
+    o unitname, unitname<- 
+      These generic functions now have methods for fitted model objects
+      (classes ppm, slrm, kppm, minconfit) and quadrature schemes (quad).
+
+    o nobs.ppm
+      Method for 'nobs' for class 'ppm'.
+      Returns the number of points in the original data.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o kppm
+      Can now fit a log-Gaussian Cox process
+
+    o simulate.kppm
+      Can now simulate a fitted log-Gaussian Cox process
+
+    o lgcp.estK, lgcp.estpcf
+      These functions previously fitted a log-Gaussian Cox process
+      with exponential covariance. They can now fit a log-Gaussian Cox process
+      with any covariance function implemented by the RandomFields package.
+
+    o rmh
+      If track=TRUE, the history of transitions of the Metropolis-Hastings
+      algorithm is saved and returned.
+
+    o ppm
+      New argument 'nd' controls the number of dummy points.
+
+    o as.fv
+      Now handles objects of class kppm or minconfit.
+
+    o rhohat
+      If covariate = "x" or "y", the resulting object has the same
+      'unitname' as the original point pattern data.
+
+    o rhohat
+      Now has arguments 'eps, 'dimyx' to control pixel resolution.
+
+    o MultiStrauss, MultiHard, MultiStraussHard
+      Default value of 'types' has been changed to NULL.
+
+    o data(ants)
+      The auxiliary data 'ants.extra' now includes a function called 'side'
+      determining whether a given location is in the scrub or field region.
+      Can be used as a covariate in ppm, kppm, slrm.
+
+    o print.ppm
+      Now has argument 'what' to allow only selected information to be printed.
+
+BUG FIXES
+
+    o profilepl
+      Crashed in some cases involving multitype interactions.
+      Fixed.
+
+    o plot.splitppp
+      Behaved incorrectly if 'main' was an expression.
+      Fixed.
+
+    o effectfun
+      Crashed in trivial cases.
+      Fixed.
+
+    o kppm, thomas.estpcf, matclust.estpcf, lgcp.estpcf
+      Gave a spurious warning message.
+      Fixed.
+
+    o step
+      When applied to ppm objects this gave a spurious warning.
+      Fixed.
+    
+	CHANGES IN spatstat VERSION 1.22-1
+
+OVERVIEW 
+
+    o  marked line segment patterns can now be plotted
+
+    o  multitype point process models are now 'self-starting'
+
+    o  new functions to manipulate colour images
+
+NEW FUNCTIONS
+
+    o rgbim, hsvim
+      Specify three colour channels.
+      These functions convert three pixel images with numeric values
+      into a single image whose pixel values are strings representing colours.
+
+    o scaletointerval
+      Generic utility function to rescale data (including spatial data)
+      to a specified interval
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o plot.im
+      Can now plot images whose pixel values are strings representing colours.
+      New argument 'valuesAreColours'
+  
+    o plot.psp
+      Now handles marked line segment patterns 
+      and plots the marks as colours.
+
+    o MultiHard, MultiStrauss, MultiStraussHard
+      The argument 'types' can now be omitted;
+      it will be inferred from the point pattern data.
+
+    o rhohat
+      Improved mathematical labels (when the result of rhohat is plotted)
+
+    o plot.fv
+      Minor improvements in graphics
+
+BUG FIXES
+
+    o  several minor bug fixes and improvements to satisfy R-devel
+
+
+	CHANGES IN spatstat VERSION 1.22-0
+
+OVERVIEW 
+
+    o  support for point patterns on a linear network
+
+    o  'superimpose' is now generic
+
+    o  improved mathematical labels when plotting functions
+
+NEW CLASSES
+
+    o linnet
+      An object of class 'linnet' represents a linear network,
+      i.e. a connected network of line segments, such as a road network.
+      Methods for this class include plot, print, summary etc.
+
+    o lpp
+      An object of class 'lpp' represents a point pattern on a linear network,
+      such as a record of the locations of road accidents on a road network.
+      Methods for this class include plot, print, summary etc.
+
+NEW FUNCTIONS
+
+    o runiflpp
+      Uniformly distributed random points on a linear network
+
+    o rpoislpp
+      Poisson point process on a linear network
+
+    o clickjoin
+      Interactive graphics to create a linear network
+    
+    o superimpose
+      The function 'superimpose' is now generic, with methods
+      for ppp, psp and a default method.
+
+    o as.ppp.psp 
+      New method for as.ppp extracts the endpoints and marks
+      from a line segment pattern
+
+NEW DATASETS
+
+    o simplenet
+      Simple example of a linear network
+
+SIGNIFICANT USER-VISIBLE CHANGES
+  
+    o superimposePSP 
+      This function is now deprecated in favour of 'superimpose'
+
+    o superimpose
+      Now handles data frames of marks.
+
+    o plot.fv
+      Argument 'legendmath' now defaults to TRUE.
+      New argument 'legendargs' gives more control over appearance of legend.
+      Increased default spacing between lines in legend.
+
+    o eval.fv, with.fv
+      Functions computed using eval.fv or with.fv
+      now have better labels when plotted.
+
+    o summary functions
+    (Kest, Kest.fft, Kcross, Kdot, Kmulti,
+    Kinhom, Kcross.inhom, Kdot.inhom, Kmulti.inhom,
+    Lest, Lcross, Ldot, 
+    pcf, pcfcross, pcfdot, pcfinhom, pcfcross.inhom, pcfdot.inhom,
+    Fest, Gest, Gcross, Gdot, Gmulti,
+    Jest, Jcross, Jdot, Jmulti, Iest,
+    localL, localK,
+    markcorr, markvario, markconnect, Emark, Vmark, 
+    allstats, alltypes)
+         Improved plot labels.
+
+BUG FIXES
+
+    o superimpose
+      If the marks components of patterns consisted of character
+      vectors (rather than factors or non-factor numeric vectors)
+      an error was triggered.
+      Fixed.
+
+    o plot.fv
+      The y axis limits did not always cover the range of values
+      if the argument 'shade' was used.
+      Fixed.
+
+    o plot.rhohat
+      The y axis label was sometimes incorrect.
+      Fixed.
+
+    o plot.rhohat
+      If argument 'xlim' was used, a warning was generated from 'rug'.
+      Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.21-6
+
+OVERVIEW 
+
+    o   A line segment pattern can now have a data frame of marks.
+
+    o	Various minor extensions and alterations in behaviour
+
+NEW FUNCTIONS
+
+    o   nsegments
+    	Number of segments in a line segment pattern
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   psp class
+        A line segment pattern (object of class 'psp') can now have a 
+	data frame of marks. 
+
+    o   density.ppp
+    	New argument 'adjust' makes it easy to adjust the smoothing bandwidth
+
+    o   plot.envelope
+    	If the upper envelope is NA but the lower envelope is finite,
+	the upper limit is now treated as +Infinity
+
+    o 	msr
+        Argument 'continuous' renamed 'density'
+
+BUG FIXES    
+
+    o   [.psp 
+        In X[W] if X is a line segment pattern and W is a polygonal window,
+	marks were sometimes discarded, leading to an error.
+	Fixed.
+
+    o   [.psp 
+        In X[W] if X is a line segment pattern and W is a rectangular window,
+	if the marks of X were factor values, they were converted to integers.
+	Fixed.
+
+    o   superimposePSP
+    	If the marks were a factor, they were mistakenly converted to integers.
+	Fixed.
+
+    o   is.marked.ppp 
+    	Did not generate a fatal error when na.action="fatal" as described
+	in the help file. 
+	Fixed. 
+
+	CHANGES IN spatstat VERSION 1.21-5
+
+OVERVIEW 
+
+    o	Increased numerical stability.
+	
+    o	New 'self-starting' feature of interpoint interactions.
+	
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o 	ppm
+        Interaction objects may now be 'self-starting'
+    	i.e. initial parameter estimates can be computed
+	from the point pattern dataset. 
+	So far, only the LennardJones() interaction 
+	has a self-starting feature.
+
+    o   LennardJones
+	Increased numerical stability.
+	New (optional) scaling argument 'sigma0'.
+	Interpoint distances are automatically rescaled
+	using 'self-starting' feature. 
+		
+    o	vcov.ppm
+	New argument 'matrix.action' controls what happens when
+	the matrix is ill-conditioned.
+	Changed name of argument 'gamaction' to 'gam.action'
+
+    o   rmhmodel.ppm
+	Default resolution of trend image has been increased.
+
+    o   is.poisson.ppm
+	Accelerated.
+
+    o   ppm, kppm, qqplot.ppm
+	Improved robustness to numerical error
+
+
+	CHANGES IN spatstat VERSION 1.21-4
+
+OVERVIEW 
+
+    o	Urgent bug fix
+
+BUG FIXES
+ 
+    o   print.summary.ppm exited with an error message,
+    	if the model had external covariates.
+    	Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.21-3
+
+OVERVIEW 
+
+    o	Point process model covariates may now depend on additional parameters.
+
+    o	New class of signed measures, for residual analysis.
+
+    o   Miscellaneous improvements and bug fixes.
+
+NEW FUNCTIONS
+
+    o   clarkevans.test
+    	Classical Clark-Evans test of randomness
+
+    o   msr
+	New class 'msr' of signed measures and vector-valued measures
+	supporting residual analysis.
+
+    o   quadrat.test.quadratcount
+	Method for 'quadrat.test' for objects of class 'quadratcount'
+	(allows a chi-squared test to be performed on quadrat counts
+	rather than recomputing from the original data)
+
+    o   tile.areas
+	Computes areas of tiles in a tessellation (efficiently)
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   ppm
+	The spatial trend can now depend on additional parameters.
+	This is done by allowing spatial covariate functions
+	to have	additional parameters: function(x, y, ...)
+	where ... is controlled by the new argument 'covfunargs' to ppm
+
+    o   profilepl
+	Can now maximise over trend parameters as well as interaction parameters
+
+    o   residuals.ppm
+    	The value returned by residuals.ppm is now an object 
+	of class 'msr'. It can be plotted directly.
+
+    o   eval.im
+	When the argument 'envir' is used, eval.im() now recognises
+	functions as well as variables in 'envir'
+
+    o   colourmap
+	The argument 'col' can now be any kind of colour data
+
+    o   persp.im
+	The 'colmap' argument can now be a 'colourmap' object
+
+    o   ppm
+    	The print and summary methods for 'ppm' objects 
+	now show standard errors for parameter estimates
+	if the model is Poisson.
+
+    o   quadrat.test
+	The print method for 'quadrattest' objects
+	now displays information about the quadrats
+
+    o   lurking
+    	Improved format of x axis label
+
+    o   distmap.ppp
+   	Internal code is more robust.
+
+BUG FIXES
+
+    o  im
+	Did not correctly handle 1 x 1 arrays.
+	Fixed.
+
+    o   as.mask, pixellate.ppp
+    	Weird things happened if the argument 'eps' was set
+	to a value greater than the size of the window.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.21-2
+
+OVERVIEW 
+
+    o   New multitype hardcore interaction.
+
+    o   Nonparametric estimation of covariate effects on point patterns.
+
+    o   Output of 'Kmeasure' has been rescaled.
+
+    o   Numerous improvements and bug fixes.
+
+NEW FUNCTIONS
+
+    o   MultiHard
+    	multitype hard core interaction for use in ppm() 
+
+    o   coords<-
+    	Assign new coordinates to the points in a point pattern
+
+    o   rhohat
+    	Kernel estimate for the effect of a spatial covariate
+	on point process intensity
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   as.ppp.matrix, as.ppp.data.frame
+    	These methods for 'as.ppp' now accept a matrix or data frame
+	with any number of columns (>= 2) and interpret the additional
+	columns as marks.
+
+    o   Kmeasure
+    	The interpretation of the output has changed:
+	the pixel values are now density estimates.
+
+    o   rmh.ppm, rmhmodel.ppm
+    	These functions now accept a point process model
+	fitted with the 'MultiHard' interaction
+
+    o   rmh.default, rmhmodel.default
+    	These functions now accept the option: cif='multihard'
+	defining a multitype hard core interaction.	
+
+    o   markcorr
+    	Now handles a data frame of marks
+
+    o   varblock
+    	Improved estimate in the case of the K function
+
+    o   colourmap, lut
+    	New argument 'range' makes it easier to specify a colour map
+	or lookup table
+
+    o   [<-.hyperframe
+    	Now handles multiple columns
+
+    o   plot.fv
+    	Improved y axis labels
+
+    o   spatstat.options
+    	New option 'par.fv' controls default parameters for line plotting
+
+    o   rmhmodel
+    	More safety checks on parameter values.
+
+    o   quadratresample
+    	New argument 'verbose'
+
+    o   smooth.fv
+    	Default value of 'which' has been changed.
+
+BUG FIXES
+
+    o   Kest
+    	If the argument 'domain' was used, the resulting estimate
+	was not correctly normalised.
+	Fixed.
+
+    o   Kest
+    	The Lotwick-Silverman variance approximation was incorrectly
+	calculated. (Spotted by Ian Dryden and Igor Chernayavsky).
+	Fixed.
+    	
+    o   plot.owin, plot.ppp
+    	Display of binary masks was garbled if the window was 
+	empty or if it was equivalent to a rectangle.
+	Fixed.
+
+    o   plot.bermantest
+    	One of the vertical lines for the Z1 test was in the wrong place.
+	Fixed.
+
+    o   marks<-.ppx
+    	Crashed in some cases.
+	Fixed.
+
+    o   is.convex
+    	An irrelevant warning was issued (for non-convex polygons).
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.21-1
+
+OVERVIEW 
+
+    o   Confidence intervals for K-function and other statistics
+
+    o   Bug fixes for smoothing and relative risk estimation
+
+NEW FUNCTIONS
+
+    o   varblock
+    	Variance estimation (and confidence intervals)
+	for summary statistics such as Kest, using subdivision technique
+
+    o   bw.stoyan
+    	Bandwidth selection by Stoyan's rule of thumb.
+
+    o   which.max.im
+     	Applied to a list of images, this determines which image
+	has the largest value at each pixel.
+
+    o   as.array.im
+    	Convert image to array
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   smooth.ppp, markmean, sharpen.ppp, relrisk, bw.relrisk
+    	Further acceleration achieved.
+
+    o   Kest
+    	Argument 'correction' now explicitly overrides automatic defaults
+
+    o   plot.fv
+    	More robust handling of 'shade'
+
+BUG FIXES
+
+    o   relrisk
+    	Format of relrisk(at="points") was incorrect.
+	Fixed.
+
+    o   bw.relrisk
+    	Result was incorrect in the default case method="likelihood"
+	because of previous bug.
+	Fixed.
+    	
+    o   Jdot, Jcross, Jmulti
+    	Return value did not include the hazard function, when correction="km"
+	Fixed.
+
+    o   Jdot, Jcross, Jmulti
+    	Format of output was incompatible with format of Jest.
+	Fixed.
+
+   
+	CHANGES IN spatstat VERSION 1.21-0
+
+OVERVIEW 
+
+    o   Implemented Spatial Logistic Regression
+
+    o   Implemented nonparametric estimation of relative risk
+        with bandwidth selection by cross-validation.
+
+    o   Smoothing functions can handle a data frame of marks.
+
+    o   New options in Kinhom; default behaviour has changed.
+
+NEW FUNCTIONS
+
+    o   slrm
+    	Fit a spatial logistic regression model
+
+    o   anova.slrm, coef.slrm, fitted.slrm, logLik.slrm, plot.slrm, predict.slrm
+    	Methods for spatial logistic regression models
+
+    o   relrisk
+    	Nonparametric estimation of relative risk 
+
+    o   bw.relrisk
+    	Automatic bandwidth selection by cross-validation
+
+    o   default.rmhcontrol
+        Sets default values of Metropolis-Hastings parameters
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   smooth.ppp, markmean
+    	These functions now accept a data frame of marks.
+
+    o   Kinhom
+	Default behaviour has changed. 
+    	New argument 'renormalise=TRUE' determines scaling of estimator
+	and affects bias and variance in small samples.
+
+    o   residuals.ppm
+    	Now also computes the score residuals.
+
+    o   plot.im
+    	New argument 'ribscale'
+
+    o   plot.listof, plot.splitppp
+    	New arguments panel.begin, panel.end and panel.args
+
+    o   ppp
+    	Now checks for NA/NaN/Inf values in the coordinates
+
+    o   envelope.ppm
+    	Changed default value of 'control' 
+    	New argument 'nrep'
+    	
+    o   qqplot.ppm
+    	Changed default value of 'control' 
+
+BUG FIXES
+
+    o   marks<-.ppp, setmarks, %mark%
+    	A matrix of marks was accepted by ppp() 
+	but not by these assignment functions. 
+	Fixed.
+
+    o   density.ppp, smooth.ppp, sharpen.ppp, markmean
+    	Crashed if the bandwidth was extremely small.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.20-5
+
+OVERVIEW 
+
+    o   Accelerated computations of kernel smoothing.
+
+    o	Implemented Choi-Hall data sharpening.
+
+NEW FUNCTIONS
+
+    o   sharpen.ppp
+    	Performs Choi-Hall data sharpening of a point pattern
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   density.ppp, smooth.ppp
+    	Computation has been vastly accelerated 
+	for density(X, at="points") and smooth.ppp(X, at="points")
+
+    o   Kinhom
+    	Accelerated in case where lambda=NULL
+	
+    o   Vignette 'shapefiles' updated
+
+
+	CHANGES IN spatstat VERSION 1.20-4
+
+OVERVIEW
+
+    o   New functions for inhomogeneous point patterns
+    	and local analysis.
+
+    o   Pair correlation function for 3D point patterns
+
+    o   Minor improvements and bug fixes to simulation code and image functions
+
+
+NEW FUNCTIONS
+
+    o   pcf3est
+    	Pair correlation function for 3D point patterns.
+
+    o   Kscaled, Lscaled
+	Estimator of the template K function (and L-function)
+	for a locally-scaled point process.
+        
+    o   localpcf
+        Local version of pair correlation function
+
+    o   identify.psp
+    	Method for 'identify' for line segment patterns.
+
+    o   as.im.matrix
+    	Converts a matrix to a pixel image
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   rMaternI, rMaternII
+	New argument 'stationary=TRUE' controls whether the 
+	simulated process is stationary (inside the simulation window).
+	Default simulation behaviour has changed.
+
+    o   im
+        New arguments 'xrange', 'yrange' 
+
+    o   envelope
+	Improvements to robustness of code.
+
+BUG FIXES
+
+   o    quadratcount
+   	If V was a tessellation created using a factor-valued image,
+	quadratcount(X, tess=V) crashed with the error
+	"Tessellation does not contain all the points of X".
+	Fixed.
+
+   o	[.im
+   	If Z was a factor valued image and X was a point pattern
+	then Z[X] was not a factor.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.20-3
+
+OVERVIEW
+
+    o   minor improvements (mostly internal).
+
+NEW FUNCTIONS
+
+    o   unmark.ppx
+    	Method for 'unmark' for general space-time point patterns
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   plot.ppx
+    	Now handles marked patterns, in two-dimensional case
+
+    o   as.psp.psp
+    	Default value of argument 'check' set to FALSE
+
+	CHANGES IN spatstat VERSION 1.20-2
+
+OVERVIEW
+
+    o   Extensions to minimum contrast estimation.
+
+    o   Bug fix in simulation of Lennard-Jones model.
+	
+    o	More support for distance functions.
+	
+    o	Changes to point process simulations.
+
+
+NEW FUNCTIONS
+
+    o   thomas.estpcf
+    	Fit Thomas process model by minimum contrast using the
+	pair correlation function (instead of the K-function).
+
+    o   matclust.estpcf
+    	Fit Matern Cluster model by minimum contrast using the
+	pair correlation function (instead of the K-function).
+
+    o   lgcp.estpcf
+    	Fit log-Gaussian Cox process model by minimum contrast using the
+	pair correlation function (instead of the K-function).
+
+    o   contour.distfun,  persp.distfun
+    	Methods for 'contour' and 'persp' for distance functions
+
+    o   default.expand
+        Computes default window for simulation of a fitted point process model.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   kppm
+    	Models can now be fitted using either the K-function
+	or the pair correlation function.
+
+    o   ppm
+    	The list of covariates can now include windows 
+	(objects of class 'owin'). A window will be treated
+	as a logical covariate that equals TRUE inside the window
+	and FALSE outside it.
+
+    o   plot.distfun
+    	Pixel resolution can now be controlled.
+
+    o   envelope.ppm, qqplot.ppm
+    	The default value of 'control' has changed;
+	simulation results may be slightly different.
+
+    o   rmh
+    	Slightly accelerated.
+
+BUG FIXES
+
+    o   rmh
+    	Simulation of the Lennard-Jones model (cif = 'lennard') was incorrect
+	due to an obscure bug, introduced in spatstat 1.20-1.
+	Fixed.
+
+    o   thomas.estK, matclust.estK, lgcp.estK
+    	The value of 'lambda' (if given) 
+	was ignored if X was a point pattern.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.20-1
+
+OVERVIEW
+
+    o   Further increases in speed and efficiency of ppm and rmh
+
+    o   New pairwise interaction model 
+
+
+NEW FUNCTIONS
+
+    o   DiggleGatesStibbard
+    	Diggle-Gates-Stibbard pairwise interaction for use in ppm()
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   ppm
+    	has been accelerated by a factor of 10 for the BadGey interaction.
+
+    o   rmh
+    	simulation of the Lennard-Jones model (cif='lennard')
+	has been greatly accelerated.
+
+    o   rmh, rmhmodel.ppm
+    	Point process models fitted by ppm() using the DiggleGatesStibbard
+	interaction can be simulated automatically using rmh.
+
+BUG FIXES
+
+    o   fitin
+    	The plot of a fitted Hardcore interaction was incorrect.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.20-0
+
+OVERVIEW
+
+    o   spatstat now contains over 1000 functions.
+
+    o   Substantial increase in speed and efficiency 
+        of model-fitting code.
+
+    o   Changes to factor-valued images.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o	ppm
+	has been accelerated by a factor of 10,
+        and can handle datasets with 20,000 points,
+        for the following interactions: 
+	DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard
+
+    o   predict.ppm 
+        accelerated by a factor of 3 (when type = "cif")
+	with vastly reduced memory requirements
+	for the following interactions: 
+	DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard
+
+    o   pixel images (class "im")
+    	The internal representation of factor-valued images has changed.
+	Existing objects in the old format should still work.
+
+    o   im
+	The syntax for creating a factor-valued image has changed.
+    	Argument 'lev' has been deleted.
+
+    o   ppm 
+    	Some warnings have been reworded for greater clarity.
+
+BUG FIXES
+
+    o   [.im
+    	Mishandled some factor-valued images.
+	Fixed.
+
+    o   hist.im
+    	Produced slightly erroneous output for some factor-valued images.
+	Fixed.
+
+    o   plot.owin
+    	Filled polygons appeared to contain criss-cross lines
+	on some graphics drivers.
+	Fixed.
+
+    o   deltametric
+    	Did not handle windows with different enclosing frames
+	(error message: 'dA and dB are incompatible')
+	Fixed.
+
+    o   quadratcount
+    	Crashed if the pattern was empty and the window was a rectangle.
+	(Noticed by Sandro Azaele)
+	Fixed.
+
+    o   rNeymanScott
+    	Crashed if the parent process realisation was empty.
+	(Noticed by Sandro Azaele)
+	Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.19-3
+
+ACKNOWLEDGEMENTS
+
+    o   We thank David Dereudre for contributions.    
+
+OVERVIEW
+
+    o   Urgent bug fix to Metropolis-Hastings for Lennard-Jones model.
+
+    o   Miscellaneous additions to plotting and colour management.
+
+NEW FUNCTIONS
+
+    o   col2hex, rgb2hex, paletteindex, samecolour
+    	Functions for converting and comparing colours.
+
+    o   plot.envelope
+	New method for plotting envelopes.
+	By default the area between the upper and lower envelopes
+	is shaded in grey.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o	plot.fasp
+	If the entries in the array are envelopes,
+	they are plotted using plot.envelope
+	(hence the envelope region is shaded grey).
+
+    o   plot.fv
+        Now displays mathematical notation for each curve,
+	if legendmath=TRUE.
+
+    o   print.fv
+	Now prints the available range of 'r' values
+	as well as the recommended range of 'r' values.
+
+BUG FIXES
+
+    o   rmh
+        Simulation of Lennard-Jones model was incorrect;
+	the simulations were effectively Poisson patterns.
+	(Spotted by David Dereudre.)
+	Fixed.
+
+    o   plot.fv
+	Did not correctly handle formulas that included I( )
+	Fixed.
+
+   
+	CHANGES IN spatstat VERSION 1.19-2
+
+ACKNOWLEDGEMENTS
+
+    o   We thank Jorge Mateu, Michael Sumner and Sebastian Luque
+        for contributions.    
+
+OVERVIEW
+
+    o   More support for fitted point process models and pixel images.
+
+    o   Improved plotting of pixel images and envelopes.
+        
+    o   Simulation algorithm for Lennard-Jones process.
+	
+    o   Improvements and bug fixes to envelopes. 
+	
+    o   Bug fixes to Metropolis-Hastings simulation.
+
+NEW FUNCTIONS
+
+    o   pairs.im
+    	Creates a scatterplot matrix for several pixel images.
+
+    o   model.frame.ppm
+    	Method for 'model.frame' for point process models.
+
+    o   sort.im
+        Method for 'sort' for pixel images.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   plot.fv, plot.fasp
+ 	New argument 'shade' enables confidence intervals
+        or significance bands to be displayed as filled grey shading.
+
+    o   LennardJones
+    	The parametrisation of this interaction function has been changed.
+
+    o   rmh, rmhmodel
+        These functions will now simulate a point process model
+	that was fitted using the LennardJones() interaction.
+
+    o   rmh.default, rmhmodel.default
+    	These functions will now simulate a point process model
+	with the Lennard-Jones interaction (cif='lennard').
+
+    o   ecdf
+    	This function now works for pixel images.
+
+    o   dim, row, col
+        These functions now work for pixel images.
+
+    o   order
+    	This function now works for pixel images.
+
+    o   [.im and [<-.im 
+    	The subset index can now be any valid subset index for a matrix.
+
+    o   density.ppp, smooth.ppp
+    	The return value now has attributes 'sigma' and 'varcov'
+	reporting the smoothing bandwidth.
+
+    o   plot.im
+        The argument 'col' can now be a 'colourmap' object.
+        This makes it possible to specify a fixed mapping between
+	numbers and colours (e.g. so that it is consistent between
+	plots of several different images).
+
+    o   rmh, spatstat.options
+    	spatstat.options now recognises the parameter 'expand' 
+	which determines the default window expansion factor in rmh.
+
+    o   rmh
+    	Improved handling of ppm objects with covariates.
+
+    o   kstest
+        The 'covariate' can now be one of the characters "x" or "y"
+	indicating the Cartesian coordinates.
+
+BUG FIXES
+
+    o   model.matrix.ppm
+    	For a fitted model that used a large number of quadrature points,
+	model.matrix.ppm sometimes reported an internal error about mismatch
+	between the model matrix and the quadrature scheme.
+	Fixed.
+
+    o   plot.ppx
+    	Minor bugs fixed.
+
+    o   rmh
+    	In rare cases, the simulated point pattern 
+	included multiple points at the origin (0,0).
+	(Bug introduced in spatstat 1.17-0.)
+	Fixed.
+
+    o   rmh, rmhmodel.ppm
+    	Crashed when applied to a fitted multitype point process model
+	if the model involved more than one covariate image.
+	(Spotted by Jorge Mateu)
+	Fixed.
+
+    o   density.psp
+    	If any segment had zero length, the result contained NaN values.
+	(Spotted by Michael Sumner and Sebastian Luque.)
+	Fixed.
+
+    o   envelope
+    	Crashed with fun=Lest or fun=Linhom if the number of points in 
+	a simulated pattern exceeded 3000.
+	Fixed.
+
+    o   plot.kstest
+    	Main title was corrupted if the covariate was a function.
+	Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.19-1
+
+OVERVIEW
+
+    o New dataset: replicated 3D point patterns. 
+      
+    o Improvements to Metropolis-Hastings simulation code.
+      
+    o More support for hyperframes. 
+
+    o Bug fixes.
+
+NEW DATASETS
+
+    o  osteo:
+       Osteocyte Lacunae data: replicated 3D point patterns
+       
+NEW FUNCTIONS
+
+    o  rbind.hyperframe:
+       Method for rbind for hyperframes.
+
+    o  as.data.frame.hyperframe:
+       Converts a hyperframe to a data frame.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o  Fiksel:
+       Fitted point process models (class ppm) with the Fiksel()
+       double exponential interaction can now be simulated by rmh.
+
+    o rmh.default: 
+       Point processes with the Fiksel interaction can now be
+       simulated by specifying parameters in rmh.default.
+
+    o  logLik.ppm:
+        New argument 'warn' controls warnings.
+
+    o  profilepl:
+        No longer issues spurious warnings.
+  
+BUG FIXES
+
+    o   Hardcore, rmh:
+    	Simulation of the 'Hardcore' process was incorrect.
+	The hard core radius was erroneously set to zero 
+	so that the simulated patterns were Poisson.
+	Fixed.
+
+    o   fitin:
+        A plot of the pairwise interaction function of a fitted model,
+	generated by plot(fitin(model)) where model <- ppm(...), 
+	was sometimes incorrect when the model included a hard core.
+	Fixed.
+
+
+	CHANGES IN spatstat VERSION 1.19-0
+
+OVERVIEW
+
+    o   Numerous bugs fixed in the implementation of
+    	the Huang-Ogata approximate maximum likelihood method.
+
+    o   New interpoint interaction model.
+
+NEW FUNCTIONS
+
+    o   Fiksel: 
+        new interpoint interaction: Fiksel's double exponential model.
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   runifpoint, rpoispp, envelope
+    	These functions now issue a warning if the number of random points
+	to be generated is very large. This traps a common error in
+	simulation experiments.
+
+BUG FIXES
+
+    o   predict.ppm, fitted.ppm:
+        Predictions and fitted values were incorrect for 
+	objects fitted using ppm(..., method="ho").
+	Fixed.
+
+    o   logLik, AIC:
+        Values of logLik() and AIC() were incorrect for 
+    	objects fitted using ppm(..., method="ho").
+	Fixed.
+
+    o   profilepl:
+    	Results were incorrect if the argument 'method="ho"' was used.
+	Fixed.
+
+    o   fitin
+        The result of fitin() was incorrect for 
+    	objects fitted using ppm(..., method="ho").
+	Fixed.
+
+    o   rmhcontrol: 
+    	rmhcontrol(NULL) generated an error.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.18-4
+
+ACKNOWLEDGEMENTS
+
+    o   We thank Michael Sumner for contributions.    
+
+BUG FIXES
+
+    o   pixellate.psp: segments shorter than one pixel width
+    	were measured incorrectly if the 'weights' argument was present. 
+	Fixed.
+
+NEW FUNCTIONS
+
+    o   pairdist.ppx, crossdist.ppx, nndist.ppx, nnwhich.ppx:
+    	Methods for pairdist, crossdist, nndist, nnwhich 
+	for multidimensional point patterns (class 'ppx')
+
+    o	runifpointx, rpoisppx:
+	Random point patterns in any number of dimensions
+
+    o   boxx:
+	Multidimensional box in any number of dimensions
+
+    o   diameter.boxx, volume.boxx, shortside.boxx, eroded.volumes.boxx:
+        Geometrical computations for multidimensional boxes
+
+    o   sum.im, max.im, min.im: 
+        Methods for sum(), min(), max() for pixel images.
+
+    o   as.matrix.ppx:
+	Convert a multidimensional point pattern to a matrix
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   plot.ppp: New argument 'zap'
+
+    o   diameter: This function is now generic, 
+        with methods for "owin", "box3" and "boxx"
+
+    o   eroded.volumes: This function is now generic, 
+        with methods for "box3" and "boxx"
+
+	CHANGES IN spatstat VERSION 1.18-3
+
+ACKNOWLEDGEMENTS
+
+    o   We thank Michael Sumner for contributions.    
+
+BUG FIXES
+
+    o   pixellate.psp: segments shorter than one pixel width
+    	were measured incorrectly. Fixed.
+
+    o   fv: 'alim' not handled correctly.  Fixed.
+
+NEW FUNCTIONS
+
+    o   smooth.fv:
+        Applies spline smoothing to the columns of an fv object.
+        
+
+	CHANGES IN spatstat VERSION 1.18-2
+
+ACKNOWLEDGEMENTS
+
+    o   We thank Michael Sumner for contributions.    
+
+NEW FUNCTIONS
+
+    o   Gfox, Jfox:
+        Foxall's G and J functions
+
+    o   as.owin.distfun: 
+        New method for as.owin extracts the domain of a distfun object.
+    
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   distfun: objects of class 'distfun', when called as functions,
+        will now accept either two vectors (x,y) or a point pattern x.
+
+    o   Hest: this function can now compute the	Hanisch estimator. 
+        It now has arguments 'r', 'breaks' and 'correction', 
+	like other summary functions. 
+
+    o   Hest: new argument 'conditional'.
+
+BUG FIXES
+
+    o   pixellate.psp: Values were sometimes incorrect due to coding error.
+    	(Spotted by Michael Sumner)
+        Fixed.
+
+    o   kstest: Crashed if the covariate contained NA's.
+	Fixed.
+
+    o   kstest: Crashed if X was a multitype point pattern 
+	in which some mark values were unrepresented.
+	Fixed.
+
+    o   lurking: Minor bug in handling of NA values.
+    	Fixed.
+
+    o   Hest: labels of columns were incorrect. 
+        Fixed.
+
+	CHANGES IN spatstat VERSION 1.18-1
+
+ACKNOWLEDGEMENTS
+
+    o   we thank Andrew Bevan and Ege Rubak for suggestions.
+
+NEW FUNCTIONS
+
+    o   Hardcore: Hard core interaction (for use in ppm)
+
+    o   envelope.pp3: simulation envelopes for 3D point patterns
+
+    o   npoints: number of points in a point pattern of any kind
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   rmh.ppm, rmhmodel.ppm:
+    	It is now possible to simulate Gibbs point process models
+	that are fitted to multitype point patterns using a
+	non-multitype interaction, e.g.
+	     data(amacrine)
+	     fit <- ppm(amacrine, ~marks, Strauss(0.1))
+	     rmh(fit, ...)
+
+    o   rmh.ppm, rmhmodel.ppm, rmh.default, rmhmodel.default:
+    	Hard core models can be simulated.
+
+    o	rmh.default, rmhmodel.default:
+   	The argument 'par' is now required to be a list, in all cases
+	(previously it was sometimes a list and sometimes a vector).
+
+    o   Fest:
+        Calculation has been accelerated in some cases.
+
+    o   summary.pp3 now returns an object of class 'summary.pp3'
+    	containing useful summary information. It is plotted by
+	'plot.summary.pp3'.
+
+    o   F3est, G3est, K3est: these functions now accept 
+        'correction="best"' 
+
+    o   union.owin, intersect.owin: 
+    	these functions now handle any number of windows.
+
+    o   envelope.ppp, envelope.ppm, envelope.kppm: 
+        argument lists have changed slightly
+
+BUG FIXES
+
+    o   Fest: The result of Fest(X, correction="rs") had a slightly
+    	corrupted format, so that envelope(X, Fest, correction="rs")
+	in fact computed the envelopes based on the "km" correction.
+	(Spotted by Ege Rubak).
+	Fixed.
+
+    o   rmh (rmh.ppm, rmhmodel.ppm):
+	rmh sometimes failed for non-stationary point process models,
+	with a message about "missing value where TRUE/FALSE needed".
+	(Spotted by Andrew Bevan).
+	Fixed.
+
+    o   diagnose.ppm, lurking:
+        Calculations were not always correct if the model
+	had conditional intensity equal to zero at some locations.
+	Fixed.
+
+    o   ppm, profilepl:
+        If data points are illegal under the model (i.e. if any
+        data points have conditional intensity equal to zero)
+        the log pseudolikelihood should be -Inf but was sometimes
+	returned as a finite value. Thus profilepl did not always 
+	work correctly for models with a hard core.
+	Fixed.
+
+    o   F3est, G3est: 
+    	Debug messages were printed unnecessarily.
+	Fixed.
+
+	CHANGES IN spatstat VERSION 1.18-0
+
+ACKNOWLEDGEMENTS
+
+    o   we thank Ege Rubak and Tyler Dean Rudolph for suggestions.
+
+HEADLINES
+
+    o   A point pattern is now allowed to have a data frame of marks
+        (previously the marks had to be a vector).
+
+    o   Extended capabilities for 'envelope' and 'kstest'.
+
+NEW FUNCTIONS
+
+    o	pixellate.psp,  as.mask.psp
+    	Convert a line segment pattern to a pixel image or binary mask
+
+    o   as.data.frame.im
+        Convert a pixel image to a data frame
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   A point pattern is now allowed to have a data frame of marks
+        (previously the marks had to be a vector).
+
+    o   Many functions in spatstat now handle point patterns with 
+        a data frame of marks. 
+	These include print.ppp, summary.ppp, plot.ppp, split.ppp.
+
+    o   finpines, nbfires, shapley:
+        The format of these datasets has changed. 
+        They are now point patterns with a data frame of marks.
+
+    o	envelope() is now generic, with methods for "ppp", "ppm" and "kppm".
+
+    o   kstest() now handles multitype point patterns and 
+        multitype point process models.
+
+    o   nnclean() now returns a point pattern with a data frame of marks.
+
+    o   plot.ppp() has new argument 'which.marks' to select one column
+        from a data frame of marks to be plotted. 
+
+    o   plot.ppp() now handles marks that are POSIX times.
+
+    o   complement.owin now handles any object acceptable to as.owin.
+
+BUG FIXES
+
+    o   erosion(w) and opening(w) crashed if w was not a window.
+        Fixed.
+
+    o   diameter() and eroded.areas() refused to work if w was not a window.
+        Fixed.
+
+	CHANGES IN spatstat VERSION 1.17-6
+
+ACKNOWLEDGEMENTS
+
+    o   We thank Simon Byers and Adrian Raftery for generous contributions.
+
+OVERVIEW
+
+    o 	Nearest neighbour clutter removal algorithm
+
+    o   New documentation for the 'fv' class.
+	
+    o	Minor improvements and bug fixes.
+
+NEW FUNCTIONS
+
+    o	nnclean: Nearest neighbour clutter removal for recognising features 
+	in spatial point patterns. Technique of Byers and Raftery (1998) 
+	[From original code by Simon Byers and Adrian Raftery,
+	adapted for spatstat.]
+
+    o	marks.ppx, marks<-.ppx: Methods for extracting and changing marks
+	in a multidimensional point pattern
+
+    o   latest.news: print news about the current version of the package
+
+SIGNIFICANT USER-VISIBLE CHANGES
+
+    o   news: spatstat now has a NEWS file which can be printed by
+	typing news(package="spatstat").
+
+    o	areaGain, areaLoss: New algorithms in case exact=TRUE.
+	Syntax slightly modified.
+
+    o	with.hyperframe: 
+		- The result now inherits 'names' from the row names
+		of the hyperframe. 
+
+		- New argument 'enclos' controls the environment
+		in which the expression is evaluated.
+
+		- The algorithm is now smarter at simplifying the result
+		when simplify=TRUE.
+
+    o	update.ppm: Tweaked to improve the ability of ppm objects
+	to be re-fitted in different contexts.
+
+
+ADVANCED USERS ONLY
+   
+    o	Documentation for the class 'fv' of function value tables
+
+	- fv: Creates an object of class 'fv' 
+
+	- cbind.fv, collapse.fv: Combine objects of class 'fv'		
+
+	- bind.fv: Add additional columns of data to an 'fv' object
+
+		
+BUG FIXES
+
+    o	"$<-.hyperframe" destroyed the row names of the hyperframe.
+	Fixed.	
+
+    o	model.matrix.ppm had minor inconsistencies.
+	Fixed.		
+
+    o	ppm: The fitted coefficient vector had incorrect format
+	in the default case of a uniform Poisson process. 
+	Fixed.
+
+    o	plot.ppx: Crashed if the argument 'main' was given.
+	Fixed.
+
+    o   envelope.ppp: Crashed if the object returned by 'fun' 
+    	did not include a column called "theo".
+    	Fixed.
diff --git a/R/FGmultiInhom.R b/R/FGmultiInhom.R
new file mode 100644
index 0000000..076a135
--- /dev/null
+++ b/R/FGmultiInhom.R
@@ -0,0 +1,252 @@
+#'
+#'     FGmultiInhom.R
+#' 
+#'     inhomogeneous multitype G and F functions
+#'
+#'     Original code by Ottmar Cronie and Marie-Colette van Lieshout
+#'
+#'     Rewritten for spatstat by Adrian Baddeley
+#'
+#'     GmultiInhom
+#'     FmultiInhom
+#'
+#'      $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $
+
+GmultiInhom <- function(X, I, J, 
+                        lambda=NULL, lambdaI=NULL, lambdaJ=NULL,
+                        lambdamin=NULL,
+                        ...,
+                        r=NULL, 
+                        ReferenceMeasureMarkSetI=NULL,
+                        ratio=FALSE){
+  if(!is.ppp(X) || !is.marked(X))
+    stop("X should be a marked point pattern")
+  W <- Window(X)
+  nX <- npoints(X)
+  
+  #' handle r argument
+  rmax <- rmax.rule("G", W, intensity(X))
+  bks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmax)
+  r    <- bks$r
+  rmax <- bks$max
+  nr   <- length(r)
+  
+  #' Accept any kind of index for I; convert it to a logical index
+  I <- ppsubset(X, I)
+  if(is.null(I))
+    stop("I must be a valid subset index")
+  XI <- X[I]
+  nI <- sum(I)
+  if (nI == 0) 
+    stop("No points satisfy condition I")
+  
+  if(!is.null(ReferenceMeasureMarkSetI)) {
+    check.1.real(ReferenceMeasureMarkSetI)
+    stopifnot(ReferenceMeasureMarkSetI >= 0)
+  }
+
+  #' likewise for J
+  if(missing(J) || is.null(J)) {
+    J <- rep(TRUE, nX)
+  } else {
+    J <- ppsubset(X, J)
+  }
+  XJ <- X[J]
+  nJ <- sum(J)
+  if (nJ == 0) 
+    stop("No points satisfy condition J")
+
+  #' supply either lambda, or lambdaI and lambdaJ
+  lam.given <- !is.null(lambda)
+  lamIJ.given <- !is.null(lambdaI) || !is.null(lambdaJ)
+  if(lam.given == lamIJ.given || is.null(lambdaI) != is.null(lambdaJ))
+    stop(paste("Supply either a vector lambda of length equal to npoints(X),",
+               "or two vectors lambdaI, lambdaJ of lengths",
+               "equal to npoints(X[I]) and npoints(X[J]) respectively"),
+         call.=FALSE)
+  
+  if(lamIJ.given) {
+    #' lambdaI and lambdaJ given
+    check.nvector(lambdaI, nI, things="points of X[I]")
+    stopifnot(all(lambdaI > 0))
+    check.nvector(lambdaJ, nJ, things="points of X[J]")
+    stopifnot(all(lambdaJ > 0))
+    if(is.null(lambdamin)){
+      stop(paste("Supply lambdamin - a single positive number which is",
+                 "smaller than the values in lambdaJ"),
+           call.=FALSE)
+    }
+    check.1.real(lambdamin)
+    stopifnot(lambdamin > 0)
+    stopifnot(lambdamin <= min(lambdaJ))
+  } else {
+    #' lambda given
+    check.nvector(lambda, nX, things="points of X")
+    stopifnot(all(lambda > 0))
+    lambdaI <- lambda[I]
+    lambdaJ <- lambda[J]
+    if(is.null(lambdamin)){
+      stop(paste("Supply lambdamin - a single positive number which is",
+                 "smaller than the values in lambda"),
+           call.=FALSE)
+    }
+    check.1.real(lambdamin)
+    stopifnot(lambdamin > 0)
+    stopifnot(lambdamin <= min(lambda))
+  }
+  
+  #' Calculate 1/lambda(x_i,y_i,m_i))
+  #'           for all (x_i,y_i,m_i) with m_i in I
+  invlambdaI <- 1/lambdaI
+  #' Calculate (1 - lambda_min/lambda(x_i,y_i,m_i))
+  #'           for all (x_i,y_i,m_i) with m_i in J
+  Coeff <- 1-(lambdamin/lambdaJ)
+  ## CoeffMatrix <- matrix(rep(Coeff,times=nI), nrow=nI, byrow=TRUE)
+
+  #' distances
+  ## DistanceXItoXJ <- crossdist(XI,XJ)
+
+  #' eroded areas and boundary distances
+  areaWr <- eroded.areas(W, r)
+  bdistXI <- bdist.points(XI)
+
+  #' for each point x in XI, determine largest r such that x \in W-r
+  ibI <- fastFindInterval(bdistXI, r, labels=TRUE)
+  #' count of points inside W-r for each r
+  ## NumberEroded <- revcumsum(table(ibI))
+    
+  #' denominator
+  #' sum invlambdaI for all points x \in W-r
+  DenominatorN <- c(sum(invlambdaI),
+                    revcumsum(natozero(tapply(invlambdaI, ibI, sum))))
+  if(!is.null(ReferenceMeasureMarkSetI))
+    DenominatorA <- areaWr * ReferenceMeasureMarkSetI
+
+  #' local products of weights
+  #' sort data points in order of increasing x coordinate
+  xxI <- XI$x
+  yyI <- XI$y
+  oXI <- fave.order(xxI)
+  xIord <- xxI[oXI]
+  yIord <- yyI[oXI]
+  #'
+  xxJ <- XJ$x
+  yyJ <- XJ$y
+  vvJ <- Coeff
+  oXJ <- fave.order(xxJ)
+  xJord <- xxJ[oXJ]
+  yJord <- yyJ[oXJ]
+  vJord <- vvJ[oXJ]
+  # compute local cumulative products
+  z <- .C("locxprod",
+          ntest = as.integer(nI),
+          xtest = as.double(xIord),
+          ytest = as.double(yIord),
+          ndata = as.integer(nJ),
+          xdata = as.double(xJord),
+          ydata = as.double(yJord),
+          vdata = as.double(vJord),
+          nr = as.integer(nr),
+          rmax = as.double(rmax),
+          ans = as.double(numeric(nI * nr)),
+          PACKAGE = "spatstat")
+  ans <- matrix(z$ans, nrow=nr, ncol=nI)
+  #' revert to original ordering
+  loccumprod <- matrix(,  nrow=nr, ncol=nI)
+  loccumprod[, oXI] <- ans
+
+  #' border correction
+  outside <- outer(r, bdistXI, ">")
+  loccumprod[outside] <- 0
+  #' weight by 1/lambdaI
+  wlcp <- loccumprod * matrix(invlambdaI, byrow=TRUE, nr, nI)
+  #' sum over I for each fixed r
+  numer <- .rowSums(wlcp, nr, nI)
+
+  # pack up
+  Gdf <- data.frame(r=r, theo = 1 - exp(- lambdamin * pi * r^2))
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  theo.denom <- rep.int(nI, nr)
+  fname <- c("G", "list(inhom,I,J)")
+  G <- ratfv(Gdf, NULL, theo.denom,
+             "r", quote(G[inhom, I, J](r)),
+             "theo", NULL, c(0,rmax),
+             c("r", makefvlabel(NULL, NULL, fname, "pois")),
+             desc,
+             fname=fname,
+             yexp=quote(G[list(inhom,I,J)](r)),
+             ratio=ratio)
+  # add border corrected (Hamilton Principle) estimate
+  G <- bind.ratfv(G,
+                  data.frame(bord=DenominatorN-numer), DenominatorN,
+                  makefvlabel(NULL, "hat", fname, "bord"),
+                  "border estimate of %s",
+                  "bord",
+                  ratio=ratio)
+  fvnames(G, ".") <- c("bord", "theo")
+  # add modified border corrected (non-Hamilton-Principle) estimate
+  if(!is.null(ReferenceMeasureMarkSetI)) {
+    G <- bind.ratfv(G,
+                    data.frame(bordm=DenominatorA-numer),
+                    DenominatorA,
+                    makefvlabel(NULL, "hat", fname, "bordm"),
+                    "modified border estimate of %s",
+                    "bordm",
+                    ratio=ratio)
+    fvnames(G, ".") <- c("bord", "bordm", "theo")
+  }
+  # 
+  formula(G) <- . ~ r
+  unitname(G) <- unitname(X)
+  if(ratio)
+    G <- conform.ratfv(G)
+
+  return(G)
+}
+
+#' marked inhomogeneous F
+
+FmultiInhom <- function(X, J,
+                        lambda=NULL,lambdaJ=NULL,
+                        lambdamin=NULL,
+                        ...,
+                        r=NULL) {
+  if(!is.ppp(X) || !is.marked(X))
+    stop("X should be a marked point pattern")
+  nX <- npoints(X)
+  
+  #' Accept any kind of index for J; convert it to a logical index
+  J <- ppsubset(X, J)
+  if(is.null(J))
+    stop("J must be a valid subset index")
+  XJ <- X[J]
+  nJ <- sum(J)
+  if (nJ == 0) 
+    stop("No points satisfy condition J")
+  
+  if(is.null(lambda) == is.null(lambdaJ))
+    stop(paste("Supply either a vector lambda of length equal to npoints(X),",
+               "or a vector lambdaJ of length equal to npoints(X[J])"),
+         call.=FALSE)
+  if(is.null(lambdamin))
+    stop("Supply a value for lambdamin", call.=FALSE)
+  check.1.real(lambdamin)
+  
+  if(!is.null(lambda)) {
+    check.nvector(lambda, nX)
+    stopifnot(all(lambda > 0))
+    stopifnot(lambdamin <= min(lambda[J]))
+    lambdaJ <- lambda[J]
+  } else {
+    check.nvector(lambdaJ, nJ)
+    stopifnot(all(lambdaJ > 0))
+    stopifnot(lambdamin <= min(lambdaJ))
+  }
+
+  FJ <- Finhom(XJ, lambda=lambdaJ, lmin=lambdamin, r=r)
+  FJ <- rebadge.fv(FJ,
+                   new.ylab  = quote(F[inhom, J](r)),
+                   new.fname = c("F", "list(inhom,J)"),
+                   new.yexp   = quote(F[list(inhom,J)](r)))
+  return(FJ)
+}
diff --git a/R/Fest.R b/R/Fest.R
new file mode 100755
index 0000000..e4b53a9
--- /dev/null
+++ b/R/Fest.R
@@ -0,0 +1,196 @@
+#
+#	Fest.R
+#
+#	Computes estimates of the empty space function
+#
+#	$Revision: 4.43 $	$Date: 2016/10/04 01:04:13 $
+#
+
+Fhazard <- function(X, ...) {
+  Z <- Fest(X, ...)
+  if(!any(names(Z) == "km"))
+    stop("Kaplan-Meier estimator 'km' is required for hazard rate")
+  ## strip off Poisson F
+  Z <- Z[, (colnames(Z) != "theo")]
+  ## relabel the fv object
+  Z <- rebadge.fv(Z,
+                  new.ylab=quote(h(r)),
+                  new.fname="h",
+                  tags=c("hazard", "theohaz"),
+                  new.tags=c("hazard", "theo"),
+                  new.labl=c("hat(%s)[km](r)", "%s[pois](r)"),
+                  new.desc=c(
+                      "Kaplan-Meier estimate of %s",
+                      "theoretical Poisson %s"),
+                  new.dotnames=c("hazard", "theo"),
+                  new.preferred="hazard")
+  ## strip off unwanted bits
+  Z <- Z[, c("r", "hazard", "theo")]
+  return(Z)
+}
+
+Fest <- function(X, ..., eps = NULL, r=NULL, breaks=NULL,
+                 correction=c("rs", "km", "cs"),
+                 domain=NULL) {
+  verifyclass(X, "ppp")
+  if(!is.null(domain))
+      stopifnot(is.subset.owin(domain, Window(X)))
+  rorbgiven <- !is.null(r) || !is.null(breaks)
+    
+  ## Intensity estimate
+  W <- X$window
+  npts <- npoints(X)
+  lambda <- npts/area(W)
+  
+  ## First discretise
+  dwin <- as.mask(W, eps=eps)
+  dX <- ppp(X$x, X$y, window=dwin, check=FALSE)
+
+  ## histogram breakpoints
+  rmaxdefault <- rmax.rule("F", dwin, lambda)
+  breaks <- handle.r.b.args(r, breaks, dwin, eps, 
+                            rmaxdefault=rmaxdefault)
+  rvals <- breaks$r
+  rmax  <- breaks$max
+
+  if(rorbgiven) check.finespacing(rvals,
+                                  if(is.null(eps)) NULL else eps/4,
+                                  dwin,
+                                  rmaxdefault=rmaxdefault,
+                                  action="fatal",
+                                  rname="r", 
+                                  context="in Fest(X, r)")
+                                
+  ## choose correction(s)
+#  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction)) {
+    correction <- c("rs", "km", "cs")
+  } else correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="rs",
+                             rs="rs",
+                             KM="km",
+                             km="km",
+                             Kaplan="km",
+                             cs="cs",
+                             ChiuStoyan="cs",
+                             Hanisch="cs",
+                             han="cs",
+                             best="km"),
+                           multi=TRUE)
+  
+  ## initialise fv object
+  df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2))
+  Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r,
+          c(0,rmax),
+          c("r", "%s[pois](r)"), 
+          c("distance argument r", "theoretical Poisson %s"),
+          fname="F")
+  nr <- length(rvals)
+  zeroes <- numeric(nr)
+
+  ##  compute distances and censoring distances
+  if(X$window$type == "rectangle") {
+    ## original data were in a rectangle
+    ## output of exactdt() is sufficient
+    e <- exactdt(dX)
+    dist <- e$d
+    bdry <- e$b
+    if(!is.null(domain)) {
+      ok <- inside.owin(raster.xy(e$w), , domain)
+      dist <- dist[ok]
+      bdry <- bdry[ok]
+    }
+  } else {
+    ## window is irregular..
+    # Distance transform & boundary distance for all pixels
+    e <- exactdt(dX)
+    b <- bdist.pixels(dX$window, style="matrix")
+    ## select only those pixels inside mask
+    mm <- dwin$m
+    if(!is.null(domain)) {
+      ok <- inside.owin(raster.xy(e$w), , domain)
+      mm <- as.vector(mm) & ok
+    }
+    dist <- e$d[mm]
+    bdry <- b[mm]
+  }
+  
+  ## censoring indicators
+  d <- (dist <= bdry)
+  ##  observed distances
+  o <- pmin.int(dist, bdry)
+
+  ## start calculating estimates of F
+  
+  if("none" %in% correction) {
+    ##  UNCORRECTED e.d.f. of empty space distances
+    if(npts == 0)
+      edf <- zeroes
+    else {
+      hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts
+      edf <- cumsum(hh)/length(dist)
+    }
+    Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)",
+                 "uncorrected estimate of %s", "raw")
+  }
+  
+  if("cs" %in% correction) {
+    ## Chiu-Stoyan correction
+    if(npts == 0)
+      cs <- zeroes
+    else {
+      ##  uncensored distances
+      x <- dist[d]
+      ##  weights
+      a <- eroded.areas(W, rvals)
+      ## calculate Hanisch estimator
+      h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts
+      H <- cumsum(h/a)
+      cs <- H/max(H[is.finite(H)])
+    }
+    ## add to fv object
+    Z <- bind.fv(Z, data.frame(cs=cs),
+                 "hat(%s)[cs](r)", 
+                 "Chiu-Stoyan estimate of %s",
+                 "cs")
+  }
+
+  if(any(correction %in% c("rs", "km"))) {
+    ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators
+    want.rs <- "rs" %in% correction
+    want.km <- "km" %in% correction
+    selection <- c(want.rs, want.km, want.km, want.km)
+    tags <- c("rs", "km", "hazard", "theohaz")[selection]
+    labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)",
+                "hat(h)[km](r)", "h[pois](r)")[selection]
+    descr <- c("border corrected estimate of %s",
+               "Kaplan-Meier estimate of %s",
+               "Kaplan-Meier estimate of hazard function h(r)",
+               "theoretical Poisson hazard h(r)")[selection]
+    if(npts == 0) {
+      result <- as.data.frame(matrix(0, nr, length(tags)))
+      names(result) <- tags
+    } else {
+      result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs)
+      result$theohaz <- 2 * pi * lambda * rvals
+      result <- as.data.frame(result[tags])
+    }
+    ## add to fv object
+    Z <- bind.fv(Z, result,
+                 labels, descr, if(want.km) "km" else "rs")
+  }
+  
+  ## wrap up
+  unitname(Z) <- unitname(X)
+  
+  ## remove 'hazard' from the dotnames
+  nama <- names(Z)
+  fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz")))
+  
+  ## determine recommended plot range
+  attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9]))
+  return(Z)
+}
+
+	
diff --git a/R/First.R b/R/First.R
new file mode 100755
index 0000000..1d5650e
--- /dev/null
+++ b/R/First.R
@@ -0,0 +1,61 @@
+#  First.R
+#
+#  $Revision: 1.45 $ $Date: 2016/04/25 02:34:40 $
+#
+
+.onLoad <- function(...) reset.spatstat.options()
+
+.onAttach <- function(libname, pkgname) {
+  store.versionstring.spatstat()
+  ver <- versionstring.spatstat()
+  descfile <- system.file("DESCRIPTION", package="spatstat")
+  ni <- as.character(read.dcf(file=descfile, fields="Nickname"))
+  msg <- paste("\nspatstat", ver,
+               "     ",
+               paren(paste("nickname:", sQuote(ni))),
+               "\nFor an introduction to spatstat, type",
+               sQuote("beginner"), "\n")
+  packageStartupMessage(msg)
+  if(exists("getRversion") && getRversion() >= "3.2.2") {
+    ## check versions
+    rv <- R.Version()
+    rdate <- with(rv, ISOdate(year, month, day))
+    if(Sys.Date() - as.Date(rdate) > 270) {
+      ## R version is really old; just warn about this
+      packageStartupMessage(paste("\nNote:",
+                                  rv$version.string,
+                                  "is more than 9 months old;",
+            "we strongly recommend upgrading to the latest version"))
+    } else {
+      ## warn if spatstat version is old
+      packdate <- as.Date(read.dcf(file=descfile, fields="Date"))
+      elapsed <- Sys.Date() - packdate
+      if(elapsed > 75) {
+        if(elapsed > 365) {
+          n <- floor(elapsed/365)
+          unit <- "year"
+          sowhat <- "we strongly recommend upgrading to the latest version."
+        } else if(elapsed > 100) {
+          n <- floor(elapsed/30)
+          unit <- "month"
+          sowhat <- "we recommend upgrading to the latest version."
+        } else {
+          n <- floor(elapsed/7)
+          unit <- "week"
+          sowhat <- "a newer version should be available."
+        }
+        expired <- if(n == 1) paste("a", unit) else paste(n, paste0(unit, "s"))
+        packageStartupMessage(paste("\nNote: spatstat version", ver,
+                                    "is out of date by more than",
+                                    paste0(expired, ";"), 
+                                    sowhat))
+      }
+    }
+  }
+  # hack to avoid namespace/load quirks 
+  # .C("attachRFoptions", package="RandomFields")  #DontDeclare
+  #
+  invisible(NULL)
+}
+
+  
diff --git a/R/GJfox.R b/R/GJfox.R
new file mode 100755
index 0000000..0d41569
--- /dev/null
+++ b/R/GJfox.R
@@ -0,0 +1,91 @@
+#
+#  GJfox.R
+#
+#  Foxall G-function and J-function
+#
+#  $Revision: 1.7 $   $Date: 2014/10/14 04:00:43 $
+#
+Gfox <- function(X, Y, r=NULL, breaks=NULL,
+                 correction=c("km", "rs", "han"), ...) {
+  stopifnot(is.ppp(X))
+  if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y)))
+    stop("Y should be an object of class ppp, psp or owin")
+  if(!identical(unitname(X), unitname(Y)))
+    warning("X and Y are not in the same units")
+  # 
+  if(is.null(correction))
+    correction <- c("rs", "km", "cs")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             raw="none",
+                             border="rs",
+                             rs="rs",
+                             KM="km",
+                             km="km",
+                             Kaplan="km",
+                             han="han",
+                             Hanisch="han",
+                             best="km"),
+                           multi=TRUE)
+  corxtable <- c("km", "rs", "han", "none") 
+  corx <- as.list(corxtable %in% correction)
+  names(corx) <- corxtable
+# ensure compatible windows
+  WX <- as.owin(X)
+  WY <- as.owin(Y)
+  if(!is.subset.owin(WX, WY)) {
+    warning("Trimming the window of X to be a subset of the window of Y")
+    WX <- intersect.owin(WX, WY)
+    X <- X[WX]
+  }
+# compute distances and censoring distances
+  D <- distfun(Y)
+  dist <- D(X)
+  bdry <- bdist.points(X[WY])
+# histogram breakpoints 
+  dmax <- max(dist)
+  breaks <- handle.r.b.args(r, breaks, WX, NULL, rmaxdefault=dmax)
+  rval <- breaks$r
+# censoring indicators
+  d <- (dist <= bdry)
+#  observed distances
+  o <- pmin.int(dist, bdry)
+# calculate estimates
+  Z <- censtimeCDFest(o, bdry, d, breaks,
+                      KM=corx$km,
+                      RS=corx$rs,
+                      HAN=corx$han,
+                      RAW=corx$none,
+                      han.denom=if(corx$han) eroded.areas(WX, rval) else NULL,
+                      tt=dist)
+# relabel
+  Z <- rebadge.fv(Z, quote(G[fox](r)), c("G", "fox"))
+  unitname(Z) <- unitname(Y)
+  return(Z)
+}
+
+Jfox <- function(X, Y, r=NULL, breaks=NULL,
+                 correction=c("km", "rs", "han"), ...) {
+  H <- Hest(Y, r=r, breaks=breaks, correction=correction, ...)
+  G <- Gfox(X, Y, r=H$r, correction=correction, ...)
+  # derive J-function
+  J <- eval.fv((1-G)/(1-H), dotonly=FALSE)
+  # correct calculation of hazard is different
+  if("hazard" %in% names(J))
+    J$hazard <- G$hazard - H$hazard
+  # base labels on 'J' rather than full expression
+  attr(J, "labl") <- attr(H, "labl")
+  # add column of 1's
+  J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "%s[theo](r)",
+               "theoretical value of %s for independence")
+  # rename 
+  J <- rebadge.fv(J, quote(J[fox](r)), c("J", "fox"))
+  funs <- c("km", "han", "rs", "raw", "theo")
+  fvnames(J, ".") <- funs[funs %in% names(J)]
+  unitname(J) <- unitname(Y)
+  return(J)
+}
+
+
+	
+
diff --git a/R/Gcom.R b/R/Gcom.R
new file mode 100755
index 0000000..608f7cb
--- /dev/null
+++ b/R/Gcom.R
@@ -0,0 +1,215 @@
+#
+#	Gcom.R
+#
+#	Model compensator of G 
+#
+#	$Revision: 1.8 $	$Date: 2014/11/10 13:20:25 $
+#
+################################################################################
+#
+
+
+Gcom <- function(object, r=NULL, breaks=NULL, ...,
+                 correction=c("border", "Hanisch"),
+                 conditional=!is.poisson(object),
+                 restrict=FALSE,
+                 model=NULL,
+                 trend=~1, interaction=Poisson(),
+                 rbord=reach(interaction),
+                 ppmcorrection="border",
+                 truecoef=NULL, hi.res=NULL) {
+  if(inherits(object, "ppm")) {
+    fit <- object
+  } else if(is.ppp(object) || inherits(object, "quad")) {
+    if(is.ppp(object)) object <- quadscheme(object, ...)
+    if(!is.null(model)) {
+      fit <- update(model, Q=object, forcefit=TRUE)
+    } else {
+      fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord,
+                 forcefit=TRUE)
+    }
+  } else 
+    stop("object should be a fitted point process model or a point pattern")
+
+  if(missing(conditional) || is.null(conditional))
+    conditional <- !is.poisson(fit)
+  
+#  rfixed <- !is.null(r) || !is.null(breaks)
+  
+  # selection of edge corrections
+#  correction.given <- !missing(correction) && !is.null(correction)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             Hanisch="Hanisch",
+                             hanisch="Hanisch",
+                             best="Hanisch"),
+                           multi=TRUE)
+
+  # Extract data and quadrature points
+  Q <- quad.ppm(fit, drop=FALSE)
+  X <- data.ppm(fit)
+  Win <- X$window
+
+  # edge correction algorithm 
+  algo <- if(!conditional) "classical" else
+          if(restrict) "restricted" else "reweighted"
+
+  # conditioning on border region?
+  if(!conditional) {
+    Wfree <- Win
+  } else {
+    rbord <- fit$rbord
+    Wfree <- erosion(Win, rbord)
+    if(restrict) {
+      retain <- inside.owin(union.quad(Q), , Wfree)
+      Q <- Q[Wfree]
+      X <- X[Wfree]
+      Win <- Wfree
+    } 
+  }
+
+  # Extract quadrature info
+  U <- union.quad(Q)
+  Z <- is.data(Q) # indicator data/dummy
+#  E <- equalsfun.quad(Q)
+  WQ <- w.quad(Q)  # quadrature weights
+
+  # basic statistics
+  npts <- npoints(X)
+  areaW <- area(Win)
+  lambda <- npts/areaW
+  
+  # quadrature points used
+  USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n)
+  
+  # adjustments to account for restricted domain 
+  if(conditional && spatstat.options("eroded.intensity")) {
+    npts.used <- sum(Z & USED)
+    area.used <- sum(WQ[USED])
+    lambda.used <- npts.used/area.used
+  } else {
+    npts.used <- npts
+    area.used <- areaW
+    lambda.used <- lambda
+  }
+  
+  #  determine breakpoints for r values
+  rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda)
+  breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault)
+  rvals <- breaks$r
+  rmax  <- breaks$max
+  
+  # residuals
+  resid <- residuals(fit, type="raw",drop=FALSE,
+                    new.coef=truecoef, quad=hi.res)
+  rescts  <- with(resid, "continuous")
+  if(restrict) {
+    # keep only data inside Wfree
+    rescts  <- rescts[retain]
+  }
+  # absolute weight for continuous integrals
+#  wc   <- -rescts
+
+  # nearest neighbours (quadrature point to data point)
+  nn <- nncross(U, X, seq(U$n), seq(X$n))
+  dIJ <- nn$dist
+  I <- seq(U$n)
+#  J <- nn$which
+  DD <- Z <- (I <= X$n)  # TRUE for data points
+  wcIJ <- -rescts
+
+  # determine whether a quadrature point will be used in integral
+  okI <- USED[I]
+
+   # initialise fv object
+  r <- breaks$r
+  df <- data.frame(r=r, pois=1 - exp(-pi * lambda.used * r^2))
+  G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r,
+          alim=c(0, rmax),
+          labl=c("r","%s[pois](r)"),
+          desc=c("distance argument r", "theoretical Poisson %s"),
+          fname="G")
+
+  #  distance to boundary
+  b <- bI <- bdist.points(U)
+
+  dotnames <- character(0)
+
+  # Border method
+  if("border" %in% correction) {
+    # reduced sample for G(r) of data only
+    ZUSED <- Z & USED
+    RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[ZUSED], breaks)
+    Gb <- RSX$numerator/RSX$denom.count
+    G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)",
+                 "border-corrected nonparametric estimate of %s",
+                 "border")
+    # reduced sample for adjustment integral
+    RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[ZUSED],
+                  rep.int(1, sum(ZUSED)), breaks)
+    Gbcom <- RSD$numerator/(1 + RSD$denominator)
+    
+    G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)",
+                 "model compensator of border-corrected %s",
+                 "bcom")
+
+    dotnames <- c("border", "bcom", "pois")
+  }
+
+  # Hanisch correction for data
+  if("Hanisch" %in% correction) {
+    nnd <- dIJ[DD & okI]
+    bdry <- bI[DD & okI]
+    # weights
+    ea <- eroded.areas(Win, rvals)
+    if(algo == "reweighted") {
+      # replace weight(r) by weight(max(rbord,r))
+      ea[rvals < rbord] <- eroded.areas(Win, rbord)
+    }
+    # compute
+    x <- nnd[nnd <= bdry]
+    h <- whist(x[x <= rmax], breaks=breaks$val)
+    H <- (1/lambda.used) * cumsum(h/ea)
+    # glue on 
+    G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)",
+                 "Hanisch correction estimate of %s",
+                 "han")
+    # Hanisch correction for adjustment integral
+    nnd <- dIJ[okI]
+    bdry <- bI[okI]
+    wt   <- wcIJ[okI]
+    x <- nnd[nnd <= bdry]
+    wt <- wt[nnd <= bdry]
+    h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax])
+    lambdaplus <- (npts.used + 1)/area.used
+    Hint <- (1/lambdaplus) * cumsum(h/ea)
+    # glue on 
+    G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)",
+                 "model compensator of Hanisch-corrected %s",
+                 "hcom")
+    # pseudovariance for Hanisch residual
+    Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2)
+    G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)",
+                 "Poincare variance for Hanisch corrected %s",
+                 "hcom")
+    # default plot does not show all components
+    dotnames <- c("han", "hcom", dotnames)
+  }
+  # compute sensible 'alim'
+  endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) }
+  amax <- endpoint(G$pois, G$r, 0.99)
+  if(length(dotnames) > 0) 
+    amax <- max(amax,
+                unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE],
+                              endpoint,
+                              r=r, f=0.9)))
+  attr(G, "alim") <- c(0, amax)
+  #  
+  fvnames(G, ".") <- dotnames
+  unitname(G) <- unitname(X)
+  # secret tag used by 'Gres'
+  attr(G, "maker") <- "Gcom"
+  return(G)
+}
+
diff --git a/R/Gest.R b/R/Gest.R
new file mode 100755
index 0000000..63c3a44
--- /dev/null
+++ b/R/Gest.R
@@ -0,0 +1,131 @@
+#
+#	Gest.S
+#
+#	Compute estimates of nearest neighbour distance distribution function G
+#
+#	$Revision: 4.31 $	$Date: 2015/10/21 09:06:57 $
+#
+################################################################################
+#
+"Gest" <-
+"nearest.neighbour" <-
+function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"),
+         domain=NULL) {
+  verifyclass(X, "ppp")
+  if(!is.null(domain))
+      stopifnot(is.subset.owin(domain, Window(X)))
+  
+  ##
+  W <- X$window
+  npts <- npoints(X)
+  lambda <- npts/area(W)
+  
+  ## determine r values 
+  rmaxdefault <- rmax.rule("G", W, lambda)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  rvals <- breaks$r
+  rmax  <- breaks$max
+  zeroes <- numeric(length(rvals))
+
+  ## choose correction(s)
+#  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction)) {
+    correction <- c("rs", "km", "han")
+  } else correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="rs",
+                             rs="rs",
+                             KM="km",
+                             km="km",
+                             Kaplan="km",
+                             han="han",
+                             Hanisch="han",
+                             cs="han",
+                             ChiuStoyan="han",
+                             best="km"),
+                           multi=TRUE)
+
+  ##  compute nearest neighbour distances
+  nnd <- nndist(X$x, X$y)
+  ##  distance to boundary
+  bdry <- bdist.points(X)
+  ## restrict to subset ?
+  if(!is.null(domain)) {
+    ok <- inside.owin(X, w=domain)
+    nnd <- nnd[ok]
+    bdry <- bdry[ok]
+  }
+  ##  observations
+  o <- pmin.int(nnd,bdry)
+  ##  censoring indicators
+  d <- (nnd <= bdry)
+
+  ## initialise fv object
+  df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2))
+  Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r,
+          c(0,rmax),
+          c("r", "%s[pois](r)"), 
+          c("distance argument r", "theoretical Poisson %s"),
+          fname="G")
+
+  if("none" %in% correction) {
+    ##  UNCORRECTED e.d.f. of nearest neighbour distances: use with care
+    if(npts <= 1)
+      edf <- zeroes
+    else {
+      hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts
+      edf <- cumsum(hh)/length(nnd)
+    }
+    Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)",
+                 "uncorrected estimate of %s", "raw")
+  }
+  if("han" %in% correction) {
+    if(npts <= 1)
+      G <- zeroes
+    else {
+      ##  uncensored distances
+      x <- nnd[d]
+      ##  weights
+      a <- eroded.areas(W, rvals, subset=domain)
+      ## calculate Hanisch estimator
+      h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts
+      G <- cumsum(h/a)
+      G <- G/max(G[is.finite(G)])
+    }
+    ## add to fv object
+    Z <- bind.fv(Z, data.frame(han=G),
+                 "hat(%s)[han](r)", 
+                 "Hanisch estimate of %s",
+                 "han")
+    ## modify recommended plot range
+    attr(Z, "alim") <- range(rvals[G <= 0.9])
+  }
+
+  if(any(correction %in% c("rs", "km"))) {
+    ## calculate Kaplan-Meier and border correction (Reduced Sample) estimators
+    if(npts == 0)
+      result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes, theohaz=zeroes)
+    else {
+      result <- km.rs(o, bdry, d, breaks)
+      result$theohaz <- 2 * pi * lambda * rvals
+      result <- as.data.frame(result[c("rs", "km", "hazard", "theohaz")])
+    }
+    ## add to fv object
+    Z <- bind.fv(Z, result,
+                 c("hat(%s)[bord](r)", "hat(%s)[km](r)",
+                   "hat(h)[km](r)", "h[pois](r)"),
+                 c("border corrected estimate of %s",
+                   "Kaplan-Meier estimate of %s",
+                   "Kaplan-Meier estimate of hazard function h(r)",
+                   "theoretical Poisson hazard function h(r)"),
+                 "km")
+    
+    ## modify recommended plot range
+    attr(Z, "alim") <- range(rvals[result$km <= 0.9])
+  }
+  nama <- names(Z)
+  fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz")))
+  unitname(Z) <- unitname(X)
+  return(Z)
+}	
+
diff --git a/R/Gmulti.R b/R/Gmulti.R
new file mode 100755
index 0000000..d757b68
--- /dev/null
+++ b/R/Gmulti.R
@@ -0,0 +1,238 @@
+#	Gmulti.S
+#
+#	Compute estimates of nearest neighbour distance distribution functions
+#	for multitype point patterns
+#
+#	S functions:	
+#		Gcross                G_{ij}
+#		Gdot		      G_{i\bullet}
+#		Gmulti	              (generic)
+#
+#	$Revision: 4.43 $	$Date: 2015/10/21 09:06:57 $
+#
+################################################################################
+
+"Gcross" <-		
+function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"))
+{
+#	computes G_{ij} estimates
+#
+#	X		marked point pattern (of class 'ppp')
+#	i,j		the two mark values to be compared
+#  
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#
+  X <- as.ppp(X)
+  if(!is.marked(X, dfok=FALSE))
+    stop(paste("point pattern has no", sQuote("marks")))
+  stopifnot(is.multitype(X))
+#
+  marx <- marks(X, dfok=FALSE)
+  if(missing(i)) i <- levels(marx)[1]
+  if(missing(j)) j <- levels(marx)[2]
+#  
+  I <- (marx == i)
+  if(sum(I) == 0) stop("No points are of type i")
+        
+  if(i == j)
+    result <- Gest(X[I], r=r, breaks=breaks, ...)
+  else {
+    J <- (marx == j)
+    if(sum(J) == 0) stop("No points are of type j")
+    result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ...,
+                     correction=correction)
+  }
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(result,
+               substitute(G[i,j](r), list(i=iname, j=jname)),
+               c("G", paste0("list(", iname, ",", jname, ")")),
+               new.yexp=substitute(G[list(i,j)](r),
+                                   list(i=iname,j=jname)))
+  return(result)
+}	
+
+"Gdot" <- 	
+function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) {
+#  Computes estimate of 
+#      G_{i\bullet}(t) = 
+#  P( a further point of pattern in B(0,t)| a type i point at 0 )
+#	
+#	X		marked point pattern (of class ppp)
+#  
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#
+  X <- as.ppp(X)
+  if(!is.marked(X))
+    stop(paste("point pattern has no", sQuote("marks")))
+  stopifnot(is.multitype(X))
+#
+  marx <- marks(X, dfok=FALSE)
+  if(missing(i)) i <- levels(marx)[1]
+  I <- (marx == i)
+  if(sum(I) == 0) stop("No points are of type i")
+  J <- rep.int(TRUE, X$n)	# i.e. all points
+# 
+  result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ...,
+                   correction=correction)
+  iname <- make.parseable(paste(i))
+  result <- rebadge.fv(result,
+                  substitute(G[i ~ dot](r), list(i=iname)),
+                  c("G", paste(iname, "~ symbol(\"\\267\")")),
+                  new.yexp=substitute(G[i ~ symbol("\267")](r), list(i=iname)))
+  return(result)
+}	
+
+	
+##########
+
+"Gmulti" <- 	
+function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL,
+         correction=c("rs", "km", "han")) {
+#
+#  engine for computing the estimate of G_{ij} or G_{i\bullet}
+#  depending on selection of I, J
+#  
+#	X		marked point pattern (of class ppp)
+#	
+#	I,J		logical vectors of length equal to the number of points
+#			and identifying the two subsets of points to be
+#			compared.
+#  
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#
+  verifyclass(X, "ppp")
+  W <- X$window
+  npts <- npoints(X)
+  areaW <- area(W)
+# check I and J
+  I <- ppsubset(X, I)
+  J <- ppsubset(X, J)
+  if(is.null(I) || is.null(J))
+    stop("I and J must be valid subset indices")
+  nI <- sum(I)
+  nJ <- sum(J)
+  if(nI == 0) stop("No points satisfy condition I")
+  if(nJ == 0) stop("No points satisfy condition J")
+
+  if(is.null(disjoint))
+    disjoint <- !any(I & J)
+# choose correction(s)
+#  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("rs", "km", "han")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="rs",
+                             rs="rs",
+                             KM="km",
+                             km="km",
+                             Kaplan="km",
+                             han="han",
+                             Hanisch="han",
+                             best="km"),
+                           multi=TRUE)
+#  determine breakpoints for r values
+  lamJ <- nJ/areaW
+  rmaxdefault <- rmax.rule("G", W, lamJ)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+#  brks <- breaks$val
+  rmax <- breaks$max
+  rvals <- breaks$r
+  zeroes <- numeric(length(rvals))
+# initialise fv object
+  df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2))
+  fname <- c("G", "list(I,J)")
+  Z <- fv(df, "r", quote(G[I,J](r)), "theo", . ~ r,
+          c(0,rmax),
+          c("r", makefvlabel(NULL, NULL, fname, "pois")),
+          c("distance argument r", "theoretical Poisson %s"),
+          fname=fname,
+          yexp=quote(G[list(I,J)](r)))
+#  "type I to type J" nearest neighbour distances
+  XI <- X[I]
+  XJ <- X[J]
+  if(disjoint) 
+    nnd <- nncross(XI, XJ, what="dist")
+  else {
+    seqnp <- seq_len(npts)
+    iX <- seqnp[I]
+    iY <- seqnp[J]
+    nnd <- nncross(XI, XJ, iX, iY, what="dist")
+  }
+#  distance to boundary from each type i point
+  bdry <- bdist.points(XI)
+#  observations
+  o <- pmin.int(nnd,bdry)
+#  censoring indicators
+  d <- (nnd <= bdry)
+#
+# calculate estimates
+  
+  if("none" %in% correction) {
+    #  UNCORRECTED e.d.f. of nearest neighbour distances: use with care
+    if(npts == 0)
+      edf <- zeroes
+    else {
+      hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts
+      edf <- cumsum(hh)/length(nnd)
+    }
+    Z <- bind.fv(Z, data.frame(raw=edf),
+                 makefvlabel(NULL, "hat", fname, "raw"),
+                 "uncorrected estimate of %s", "raw")
+  }
+
+  if("han" %in% correction) {
+    # Hanisch style estimator
+    if(npts == 0)
+      G <- zeroes
+    else {
+      #  uncensored distances
+      x <- nnd[d]
+      #  weights
+      a <- eroded.areas(W, rvals)
+      # calculate Hanisch estimator
+      h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts
+      G <- cumsum(h/a)
+      G <- G/max(G[is.finite(G)])
+    }
+    # add to fv object
+    Z <- bind.fv(Z, data.frame(han=G),
+                 makefvlabel(NULL, "hat", fname, "han"),
+                 "Hanisch estimate of %s",
+                 "han")
+    # modify recommended plot range
+    attr(Z, "alim") <- range(rvals[G <= 0.9])
+  }
+  
+  if(any(correction %in% c("rs", "km"))) {
+    # calculate Kaplan-Meier and border correction (Reduced Sample) estimators
+    if(npts == 0)
+      result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes)
+    else {
+      result <- km.rs(o, bdry, d, breaks)
+      result <- as.data.frame(result[c("rs", "km", "hazard")])
+    }
+    # add to fv object
+    Z <- bind.fv(Z, result,
+                 c(makefvlabel(NULL, "hat", fname, "bord"),
+                   makefvlabel(NULL, "hat", fname, "km"),
+                   "hazard(r)"),
+                 c("border corrected estimate of %s",
+                   "Kaplan-Meier estimate of %s",
+                   "Kaplan-Meier estimate of hazard function lambda(r)"),
+                 "km")
+    # modify recommended plot range
+    attr(Z, "alim") <- range(rvals[result$km <= 0.9])
+  }
+  nama <- names(Z)
+  fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))])
+  unitname(Z) <- unitname(X)
+  return(Z)
+}	
+
+
diff --git a/R/Gres.R b/R/Gres.R
new file mode 100755
index 0000000..027652e
--- /dev/null
+++ b/R/Gres.R
@@ -0,0 +1,70 @@
+#
+#	Gres.R
+#
+#	Residual G 
+#
+#	$Revision: 1.3 $	$Date: 2013/04/25 06:37:43 $
+#
+#############################################################################
+#
+
+Gres <- function(object, ...) {
+  if(!is.fv(object)) {
+    # usual case where 'object' is a ppm, ppp or quad
+    G <- Gcom(object, ...)
+  } else {
+    # case where 'object' is the output of 'Gcom'
+    a <- attr(object, "maker")
+    if(is.null(a) || a != "Gcom")
+      stop("fv object was not created by Gcom")
+    G <- object
+    if(length(list(...)) > 0)
+      warning("Extra arguments ignored")
+  }
+  # initialise fv object
+  df <- data.frame(r=G$r, theo=numeric(length(G$r)))
+  desc <- c("distance argument r", "value 0 corresponding to perfect fit")
+  ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL),
+            "theo", . ~ r,
+            attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G")
+  # add residual estimates
+  nam <- names(G)
+  if(all(c("border","bcom") %in% nam))
+    ans <- bind.fv(ans,
+                    data.frame(bres=with(G, border-bcom)),
+                    "bold(R)~hat(%s)[bord](r)",
+                    "border corrected residual of %s",
+                    "bres")
+  if(all(c("han","hcom") %in% nam))
+    ans <- bind.fv(ans,
+                    data.frame(hres=with(G, han-hcom)),
+                    "bold(R)~hat(%s)[han](r)",
+                    "Hanisch corrected residual of %s",
+                    "hres")
+  if("hvar" %in% nam) {
+    savedotnames <- fvnames(ans, ".")
+    hsd <- with(G, sqrt(hvar))
+    ans <- bind.fv(ans,
+                    data.frame(hvar=with(G, hvar),
+                               hsd = hsd,
+                               hi =  2*hsd,
+                               lo = -2*hsd),
+                    c("bold(C)^2~hat(%s)[han](r)",
+                      "sqrt(bold(C)^2~hat(%s)[han](r))",
+                      "bold(R)~hat(%s)[Hi](r)",
+                      "bold(R)~hat(%s)[Lo](r)"),
+                    c("pseudovariance of Hanisch corrected residual %s",
+                      "pseudo-SD of Hanisch corrected residual %s",
+                      "upper critical band for Hanisch corrected residual %s",
+                      "lower critical band for Hanisch corrected residual %s"),
+                    "hres")
+    ans <- bind.fv(ans,
+                   data.frame(hstdres=with(ans, hres/hsd)),
+                   "bold(T)~hat(%s)[han](r)",
+                   "standardised Hanisch-corrected residual %s",
+                   "hres")
+    fvnames(ans, ".") <- c(savedotnames, c("hi", "lo"))
+  }
+  unitname(ans) <- unitname(G)
+  return(ans)
+}
diff --git a/R/Hest.R b/R/Hest.R
new file mode 100755
index 0000000..9d3fb25
--- /dev/null
+++ b/R/Hest.R
@@ -0,0 +1,121 @@
+#
+#    Hest.R
+#
+#  Contact distribution for a random set
+#
+#
+Hest <- local({
+  Hest <- function(X, r=NULL, breaks=NULL,
+                   ...,
+                   W,
+                   correction=c("km", "rs", "han"),
+                   conditional=TRUE) {
+    rorbgiven <- !is.null(r) || !is.null(breaks)
+    if(is.ppp(X) || is.psp(X)) {
+      XX <- X
+      W0 <- Window(X)
+    } else if(is.owin(X)) {
+      XX <- X
+      W0 <- Frame(X)
+    } else if(is.im(X)) {
+      if(X$type != "logical")
+        stop("When X is an image, its pixel values should be logical values")
+      XX <- solutionset(X)
+      W0 <- Window(X)
+    } else stop("X should be an object of class ppp, psp, owin or im")
+    ##
+    if(given.W <- !missing(W)) {
+      if(!is.subset.owin(W, W0))
+        stop("W is not a subset of the observation window of X")
+    } else {
+      W <- W0
+    }
+    ## handle corrections
+    if(is.null(correction))
+      correction <- c("rs", "km", "cs")
+    correction <- pickoption("correction", correction,
+                             c(none="none",
+                               raw="none",
+                               border="rs",
+                               rs="rs",
+                               KM="km",
+                               km="km",
+                               Kaplan="km",
+                               han="han",
+                               Hanisch="han",
+                               best="km"),
+                             multi=TRUE)
+    corxtable <- c("km", "rs", "han", "none") 
+    corx <- as.list(corxtable %in% correction)
+    names(corx) <- corxtable
+    ## compute distance map
+    D <- distmap(XX, ...)
+    pixeps <- with(D, min(xstep, ystep))
+    if(!given.W && !is.im(X)) {
+      B <- attr(D, "bdry")
+    } else {
+      B <- distmap(W, invert=TRUE, ...)
+      har <- harmonise(D=D, B=B)
+      D <- har$D[W, drop=FALSE]
+      B <- har$B[W, drop=FALSE]
+    }
+    ## histogram breakpoints 
+    dmax <- max(D)
+    breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax)
+    rval <- breaks$r
+    if(rorbgiven) check.finespacing(rval, rname="r", eps=pixeps/4, W,
+                                    rmaxdefault=dmax,
+                                    context="in Hest(X,r)",
+                                    action="fatal")
+    ##  extract distances and censoring distances
+    dist <- as.vector(as.matrix(D))
+    bdry <- as.vector(as.matrix(B))
+    ok <- !is.na(dist) & !is.na(bdry)
+    dist <- dist[ok]
+    bdry <- bdry[ok]
+    ## delete zero distances
+    if(is.owin(X) || is.im(X)) {
+      pos <- (dist > 0)
+      areafraction <- 1 - mean(pos)
+      dist <- dist[pos]
+      bdry <- bdry[pos]
+    }
+    ## censoring indicators
+    d <- (dist <= bdry)
+    ##  observed distances
+    o <- pmin.int(dist, bdry)
+    ## calculate estimates
+    Z <- censtimeCDFest(o, bdry, d, breaks,
+                        KM=corx$km,
+                        RS=corx$rs,
+                        HAN=corx$han,
+                        RAW=corx$none,
+                        han.denom=if(corx$han) eroded.areas(W, rval) else NULL,
+                        tt=dist)
+    ## conditional on d > 0 ?
+    if(is.owin(X) || is.im(X)) {
+      if(conditional) {
+        if(corx$km)   Z$km  <- condition(Z$km)
+        if(corx$rs)   Z$rs  <- condition(Z$rs)
+        if(corx$han)  Z$han <- condition(Z$han)
+        if(corx$none) Z$raw <- condition(Z$raw)
+      } else {
+        if(corx$km)   Z$km  <- reconstitute(Z$km, areafraction) 
+        if(corx$rs)   Z$rs  <- reconstitute(Z$rs, areafraction) 
+        if(corx$han)  Z$han <- reconstitute(Z$han, areafraction) 
+        if(corx$none) Z$raw <- reconstitute(Z$raw, areafraction) 
+      }
+    }
+    ## relabel
+    Z <- rebadge.fv(Z, substitute(H(r), NULL), "H")
+    unitname(Z) <- unitname(X)
+    return(Z)
+  }
+
+  condition <- function(x) { (x - x[1])/(1-x[1]) }
+  reconstitute <- function(x, p) { p + (1-p) * x }
+
+  Hest
+})
+
+
diff --git a/R/Iest.R b/R/Iest.R
new file mode 100755
index 0000000..40a4c9c
--- /dev/null
+++ b/R/Iest.R
@@ -0,0 +1,84 @@
+#	Iest.R
+#
+#	I function
+#
+#	$Revision: 1.15 $	$Date: 2016/04/25 02:34:40 $
+#
+#
+#
+Iest <- local({
+
+  Iest <- function(X, ...,
+                   eps=NULL, r = NULL, breaks = NULL, correction=NULL) {
+
+    X <- as.ppp(X)
+    if(!is.multitype(X))
+      stop("Only applicable to multitype point patterns")
+    marx <- marks(X, dfok=FALSE)
+    ntypes <- length(levels(marx))
+
+    Y <- unmark(split(X))
+  
+    ## relative proportions 
+    ni <- sapply(Y, npoints)
+    fi <- ni/sum(ni)
+
+    ## J function of pattern regardless of type
+    Jdotdot <- Jest(unmark(X),
+                    correction=correction, r=r, eps=eps, breaks=breaks)
+    rvals <- Jdotdot$r
+  
+    ## J function of subpattern of each type i
+    Jii <- lapply(Y, Jest, r=rvals, correction=correction)
+    nrvals <- lengths(lapply(Jii, getElement, name="r"))
+    if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals))
+      stop("Internal error: J function objects have different lengths")
+
+    ## initialise fv object
+    alim <- attr(Jdotdot, "alim")
+    Z <- fv(data.frame(r=rvals, theo=0),
+            "r", substitute(I(r), NULL), "theo",
+            . ~ r, alim,
+            c("r", "%s[pois](r)"),
+            c("distance argument r", "theoretical Poisson %s"),
+            fname="I")
+  
+    ## Estimates of each type
+    namii <- unlist(lapply(Jii, names))
+    namdd <- names(Jdotdot)
+    bothnames <- namii[namii %in% namdd]
+  
+    if("un" %in% bothnames) {
+      Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE)
+      Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un
+      Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)",
+                   "uncorrected estimate of %s", "un")
+    }
+    if("rs" %in% bothnames) {
+      Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE)
+      Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs    
+      Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)",
+                   "border corrected estimate of %s", "rs")
+    }
+    if("han" %in% bothnames) {
+      Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE)
+      Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han
+      Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)",
+                   "Hanisch-style estimate of %s", "han")
+    }
+    if("km" %in% bothnames) {
+      Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE)
+      Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km
+      Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)",
+                   "Kaplan-Meier estimate of %s", "km")
+    }
+    unitname(Z) <- unitname(X)
+    return(Z)
+  }
+
+  extract <- function(Zlist, what) sapply(Zlist, "[[", i=what)
+
+  Iest
+})
+
+
diff --git a/R/Jest.R b/R/Jest.R
new file mode 100755
index 0000000..a5b2027
--- /dev/null
+++ b/R/Jest.R
@@ -0,0 +1,74 @@
+#	Jest.S
+#
+#	Usual invocation to compute J function
+#	if F and G are not required 
+#
+#	$Revision: 4.21 $	$Date: 2016/10/04 02:33:50 $
+#
+#
+#
+Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) {
+  X <- as.ppp(X)
+  W <- Window(X)
+  rmaxdefault <- rmax.rule("J", W, intensity(X))
+  brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  # compute F and G 
+  FF <- Fest(X, eps, breaks=brks, correction=correction)
+  G <- Gest(X, breaks=brks, correction=correction)
+  # initialise fv object
+  rvals <- FF$r
+  rmax  <- max(rvals)
+#  Fvals <- FF[[attr(FF, "valu")]]
+  Z <- fv(data.frame(r=rvals, theo=1),
+          "r", substitute(J(r), NULL),
+          "theo",
+          . ~ r,
+          c(0,rmax),
+          c("r", "%s[pois](r)"),
+          c("distance argument r", "theoretical Poisson %s"),
+          fname="J")
+  # compute J function estimates
+  # this has to be done manually because of the mismatch between names
+  Fnames <- names(FF)
+  Gnames <- names(G)
+  if("raw" %in% Gnames && "raw" %in% Fnames) {
+    Jun <- ratiotweak(1-G$raw, 1-FF$raw)
+    Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)",
+                 "uncorrected estimate of %s", "un")
+    attr(Z, "alim") <- range(rvals[FF$raw <= 0.9])
+  }
+  if("rs" %in% Gnames && "rs" %in% Fnames) {
+    Jrs <- ratiotweak(1-G$rs, 1-FF$rs)
+    Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)",
+                 "border corrected estimate of %s", "rs")
+    attr(Z, "alim") <- range(rvals[FF$rs <= 0.9])
+  }
+  if("han" %in% Gnames && "cs" %in% Fnames) {
+    Jhan <- ratiotweak(1-G$han, 1-FF$cs)
+    Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)",
+                 "Hanisch-style estimate of %s", "han")
+    attr(Z, "alim") <- range(rvals[FF$cs <= 0.9])
+  }
+  if("km" %in% Gnames && "km" %in% Fnames) {
+    Jkm <- ratiotweak(1-G$km, 1-FF$km)
+    Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)",
+                 "Kaplan-Meier estimate of %s", "km")
+    attr(Z, "alim") <- range(rvals[FF$km <= 0.9])
+  }
+  if("hazard" %in% Gnames && "hazard" %in% Fnames) {
+    Jhaz <- G$hazard - FF$hazard
+    Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)",
+                 "Kaplan-Meier estimate of derivative of log(%s)")
+  }
+# set default plotting values and order
+  nama <- names(Z)
+  fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))])
+  
+# add more info        
+  attr(Z, "F") <- FF
+  attr(Z, "G") <- G
+
+  unitname(Z) <- unitname(X)
+  return(Z)
+}
+
diff --git a/R/Jinhom.R b/R/Jinhom.R
new file mode 100644
index 0000000..77dabbd
--- /dev/null
+++ b/R/Jinhom.R
@@ -0,0 +1,368 @@
+#
+# Jinhom.R
+#
+#  $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $
+#
+
+Ginhom <- function(X, lambda=NULL, lmin=NULL,
+                   ...,
+                   sigma=NULL, varcov=NULL,
+                   r=NULL, breaks=NULL,
+                   ratio=FALSE, update = TRUE) {
+  
+  stopifnot(is.ppp(X))
+
+  npts <- npoints(X)
+  W <- as.owin(X)
+  areaW <- area(W)
+  miss.update <- missing(update)
+
+  # determine 'r' values
+  rmaxdefault <- rmax.rule("G", W, npts/areaW)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  if(!breaks$even)
+    stop("r values must be evenly spaced")
+  r <- breaks$r
+  rmax <- breaks$max
+  nr <- length(r)
+
+  dangerous <- "lambda"
+  danger <- TRUE
+  
+  # Intensity values at data points
+  if(is.null(lambda)) {
+    # No intensity data provided
+    danger <- FALSE
+    # Estimate density at points by leave-one-out kernel smoothing
+    lamX <- density(X, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+    lambdaX <- as.numeric(lamX)
+    # negative or zero values are due to numerical error
+    lambdaX <- pmax.int(lambdaX, .Machine$double.eps)
+  } else {
+    # lambda values provided
+    if(is.im(lambda)) 
+      lambdaX <- safelookup(lambda, X)
+    else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) {
+          model <- lambda
+          if(!update) {
+            ## just use intensity of fitted model
+            lambdaX <- predict(lambda, locations=X, type="trend")
+          } else {
+            ## re-fit model to data X
+            model <-
+              if(is.ppm(model)) update(model, Q=X) else update(model, X=X)
+            lambdaX <- fitted(model, dataonly=TRUE)
+            danger <- FALSE
+            if(miss.update) 
+              warn.once(key="Ginhom.update",
+                        "The behaviour of Ginhom when lambda is a ppm object",
+                        "has changed (in spatstat 1.37-0 and later).",
+                        "See help(Ginhom)")
+          }
+        } else if(is.function(lambda)) 
+      lambdaX <- lambda(X$x, X$y)
+    else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) {
+      lambdaX <- lambda
+      check.nvector(lambdaX, npts)
+    } else stop(paste(sQuote("lambda"),
+                      "should be a vector, a pixel image, or a function"))
+    # negative values are illegal
+    minX <- min(lambdaX)
+    if(minX < 0)
+      stop("Negative values of lambda were encountered at data points")
+    if(minX == 0)
+      stop("Zero values of lambda were encountered at data points")
+  }
+  # Minimum intensity
+  if(!is.null(lmin)) {
+    check.1.real(lmin)
+    stopifnot(lmin >= 0)
+  } else {
+    # Compute minimum value over window
+    if(is.null(lambda)) {
+      # extract previously selected smoothing bandwidth
+      sigma <- attr(lamX, "sigma")
+      varcov <- attr(lamX, "varcov")
+      # estimate density on a pixel grid and minimise
+      lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels")
+      lmin <- min(lam)
+      # negative or zero values may occur due to numerical error
+      lmin <- max(lmin, .Machine$double.eps)
+    } else {
+      if(is.im(lambda)) 
+        lmin <- min(lambda)
+      else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) 
+        lmin <- min(predict(lambda))
+      else if(is.function(lambda)) 
+        lmin <- min(as.im(lambda, W))
+      else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) 
+        lmin <- min(lambdaX)
+    }
+    if(lmin < 0)
+      stop("Negative values of intensity encountered")
+    # ensure lmin < lambdaX
+    lmin <- min(lmin, lambdaX)
+  }
+  # Compute intensity factor
+  lratio <- lmin/lambdaX
+  vv <- 1 - lratio
+  bad <- (lratio > 1)
+  if((nbad <- sum(bad)) > 0)
+    stop(paste("Value of", sQuote("lmin"), "exceeds",
+               nbad, gettext(nbad, "value", "values"),
+               "of", sQuote("lambda")))
+   # sort data points in order of increasing x coordinate
+  xx <- X$x
+  yy <- X$y
+  oX <- fave.order(xx)
+  xord <- xx[oX]
+  yord <- yy[oX]
+  vord <- vv[oX]
+  # compute local cumulative products
+  z <- .C("locprod",
+          n = as.integer(npts),
+          x = as.double(xord),
+          y = as.double(yord),
+          v = as.double(vord),
+          nr = as.integer(nr),
+          rmax = as.double(rmax),
+          ans = as.double(numeric(npts * nr)),
+          PACKAGE = "spatstat")
+  ans <- matrix(z$ans, nrow=nr, ncol=npts)
+  # revert to original ordering
+  loccumprod <- matrix(,  nrow=nr, ncol=npts)
+  loccumprod[, oX] <- ans
+  # border correction
+  bX <- bdist.points(X)
+  ok <- outer(r, bX, "<=")
+  denom <- .rowSums(ok, nr, npts)
+  loccumprod[!ok] <- 0
+  numer <- .rowSums(loccumprod, nr, npts)
+  # pack up
+  Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2))
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  theo.denom <- rep.int(npts, nr)
+  G <- ratfv(Gdf, NULL, theo.denom,
+             "r", quote(G[inhom](r)),
+             "theo", NULL, c(0,rmax),
+             c("r", "{%s[%s]^{pois}}(r)"),
+             desc,
+             fname=c("G", "inhom"),
+             ratio=ratio)
+  G <- bind.ratfv(G,
+                  data.frame(bord=denom-numer), denom,
+                   "{hat(%s)[%s]^{bord}}(r)",
+                  "border estimate of %s",
+                  "bord",
+                  ratio=ratio)
+  # 
+  formula(G) <- . ~ r
+  fvnames(G, ".") <- c("bord", "theo")
+  unitname(G) <- unitname(X)
+  if(ratio)
+    G <- conform.ratfv(G)
+  if(danger)
+    attr(G, "dangerous") <- dangerous
+  return(G)
+}
+
+
+   
+
+Finhom <- function(X, lambda=NULL, lmin=NULL,
+                   ...,
+                   sigma=NULL, varcov=NULL,
+                   r=NULL, breaks=NULL,
+                   ratio=FALSE, update = TRUE) {
+  
+  stopifnot(is.ppp(X))
+
+  npts <- npoints(X)
+  W <- as.owin(X)
+  areaW <- area(W)
+  miss.update <- missing(update)
+
+  # determine 'r' values
+  rmaxdefault <- rmax.rule("F", W, npts/areaW)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  if(!breaks$even)
+    stop("r values must be evenly spaced")
+  r <- breaks$r
+  rmax <- breaks$max
+  nr <- length(r)
+
+  dangerous <- "lambda"
+  danger <- TRUE
+  
+  # Intensity values at data points
+  if(is.null(lambda)) {
+    # No intensity data provided
+    danger <- FALSE
+    # Estimate density at points by leave-one-out kernel smoothing
+    lamX <- density(X, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+    lambdaX <- as.numeric(lamX)
+    # negative or zero values are due to numerical error
+    lambdaX <- pmax.int(lambdaX, .Machine$double.eps)
+  } else {
+    # lambda values provided
+    if(is.im(lambda)) 
+      lambdaX <- safelookup(lambda, X)
+    else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) {
+          model <- lambda
+          if(!update) {
+            ## just use intensity of fitted model
+            lambdaX <- predict(lambda, locations=X, type="trend")
+          } else {
+            ## re-fit model to data X
+            model <-
+              if(is.ppm(model)) update(model, Q=X) else update(model, X=X)
+            lambdaX <- fitted(model, dataonly=TRUE)
+            danger <- FALSE
+            if(miss.update) 
+              warn.once(key="Finhom.update",
+                        "The behaviour of Finhom when lambda is a ppm object",
+                        "has changed (in spatstat 1.37-0 and later).",
+                        "See help(Finhom)")
+          }
+        } else if(is.function(lambda)) 
+      lambdaX <- lambda(X$x, X$y)
+    else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) {
+      lambdaX <- lambda
+      check.nvector(lambdaX, npts)
+    } else stop(paste(sQuote("lambda"),
+                      "should be a vector, a pixel image, or a function"))
+    # negative values are illegal
+    minX <- min(lambdaX)
+    if(minX < 0)
+      stop("Negative values of lambda were encountered at data points")
+    if(minX == 0)
+      stop("Zero values of lambda were encountered at data points")
+  }
+  # Minimum intensity
+  if(!is.null(lmin)) {
+    check.1.real(lmin)
+    stopifnot(lmin >= 0)
+  } else {
+    # Compute minimum value over window
+    if(is.null(lambda)) {
+      # extract previously selected smoothing bandwidth
+      sigma <- attr(lamX, "sigma")
+      varcov <- attr(lamX, "varcov")
+      # estimate density on a pixel grid and minimise
+      lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels")
+      lmin <- min(lam)
+      # negative or zero values may occur due to numerical error
+      lmin <- max(lmin, .Machine$double.eps)
+    } else {
+      if(is.im(lambda)) 
+        lmin <- min(lambda)
+      else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) 
+        lmin <- min(predict(lambda))
+      else if(is.function(lambda)) 
+        lmin <- min(as.im(lambda, W))
+      else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) 
+        lmin <- min(lambdaX)
+    }
+    if(lmin < 0)
+      stop("Negative values of intensity encountered")
+    # ensure lmin < lambdaX
+    lmin <- min(lmin, lambdaX)
+  }
+  # Compute intensity factor
+  lratio <- lmin/lambdaX
+  vv <- 1 - lratio
+  bad <- (lratio > 1)
+  if((nbad <- sum(bad)) > 0)
+    stop(paste("Value of", sQuote("lmin"), "exceeds",
+               nbad, gettext(nbad, "value", "values"),
+               "of", sQuote("lambda")))
+  # sort data points in order of increasing x coordinate
+  xx <- X$x
+  yy <- X$y
+  oX <- fave.order(xx)
+  xord <- xx[oX]
+  yord <- yy[oX]
+  vord <- vv[oX]
+  # determine pixel grid and compute distance to boundary
+  M <- do.call.matched(as.mask, append(list(w=W), list(...)))
+  bM <- bdist.pixels(M, style="matrix")
+  bM <- as.vector(bM)
+  # x, y coordinates of pixels are already sorted by increasing x
+  xM <- as.vector(rasterx.mask(M))
+  yM <- as.vector(rastery.mask(M))
+  nM <- length(xM)
+  # compute local cumulative products
+  z <- .C("locxprod",
+         ntest = as.integer(nM),
+         xtest = as.double(xM),
+         ytest = as.double(yM),
+         ndata = as.integer(npts),
+         xdata = as.double(xord),
+         ydata = as.double(yord),
+         vdata = as.double(vord),
+         nr = as.integer(nr),
+         rmax = as.double(rmax),
+         ans = as.double(numeric(nM * nr)),
+         PACKAGE = "spatstat")
+  loccumprod <- matrix(z$ans, nrow=nr, ncol=nM)
+  # border correction
+  ok <- outer(r, bM, "<=")
+  denom <- .rowSums(ok, nr, nM)
+  loccumprod[!ok] <- 0
+  numer <- .rowSums(loccumprod, nr, nM)
+  # pack up
+  Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2))
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  theo.denom <- rep.int(npts, nr)
+  FX <- ratfv(Fdf, NULL, theo.denom,
+              "r",
+              quote(F[inhom](r)),
+              "theo", NULL, c(0,rmax),
+              c("r","{%s[%s]^{pois}}(r)"),
+              desc,
+              fname=c("F", "inhom"),
+              ratio=ratio)
+  FX <- bind.ratfv(FX,
+                  data.frame(bord=denom-numer), denom,
+                  "{hat(%s)[%s]^{bord}}(r)",
+                  "border estimate of %s",
+                  "bord",
+                  ratio=ratio)
+  # 
+  formula(FX) <- . ~ r
+  fvnames(FX, ".") <- c("bord", "theo")
+  unitname(FX) <- unitname(X)
+  if(ratio)
+    FX <- conform.ratfv(FX)
+  if(danger)
+    attr(FX, "dangerous") <- dangerous
+  return(FX)
+}
+
+Jinhom <- function(X, lambda=NULL, lmin=NULL,
+                   ...,
+                   sigma=NULL, varcov=NULL,
+                   r=NULL, breaks=NULL, update = TRUE) {
+  if(missing(update) & (is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)))
+    warn.once(key="Jinhom.update",
+              "The behaviour of Jinhom when lambda is a ppm object",
+              "has changed (in spatstat 1.37-0 and later).",
+              "See help(Jinhom)")
+        
+  GX <- Ginhom(X, lambda=lambda, lmin=lmin, ...,
+               sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE, update=update)
+  r <- GX$r
+  FX <- Finhom(X, lambda=lambda, lmin=lmin, ...,
+               sigma=sigma, varcov=varcov, r=r, ratio=FALSE, update=update)
+  JX <- eval.fv((1-GX)/(1-FX))
+  # relabel the fv object
+  JX <- rebadge.fv(JX, quote(J[inhom](r)), c("J","inhom"),
+                  names(JX), new.labl=attr(GX, "labl"))
+  # tack on extra info
+  attr(JX, "G") <- GX
+  attr(JX, "F") <- FX
+  attr(JX, "dangerous") <- attr(GX, "dangerous")
+  return(JX)
+}
diff --git a/R/Jmulti.R b/R/Jmulti.R
new file mode 100755
index 0000000..43a32af
--- /dev/null
+++ b/R/Jmulti.R
@@ -0,0 +1,178 @@
+#	Jmulti.S
+#
+#	Usual invocations to compute multitype J function(s)
+#	if F and G are not required 
+#
+#	$Revision: 4.39 $	$Date: 2014/10/24 00:22:30 $
+#
+#
+#
+"Jcross" <-
+function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) {
+#
+#       multitype J function J_{ij}(r)
+#  
+#	X:		point pattern (an object of class 'ppp')
+#       i, j:           types for which J_{i,j}(r) is calculated  
+#	eps:		raster grid mesh size for distance transform
+#				(unless specified by X$window)
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#
+  X <- as.ppp(X)
+  if(!is.marked(X))
+    stop(paste("point pattern has no", sQuote("marks")))
+  stopifnot(is.multitype(X))
+#
+  marx <- marks(X, dfok=FALSE)
+  if(missing(i)) i <- levels(marx)[1]
+  if(missing(j)) j <- levels(marx)[2]
+#
+  I <- (marx == i)
+  if(sum(I) == 0)
+    stop(paste("No points have mark = ", i))
+#        
+  if(i == j)
+    result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction)
+  else {
+    J <- (marx == j)
+    result <- Jmulti(X, I, J,
+                     eps=eps, r=r, breaks=breaks, disjoint=TRUE,
+                     correction=correction)
+  }
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(result,
+               substitute(J[i,j](r),
+                          list(i=iname,j=jname)),
+               c("J", paste0("list(", iname, ",", jname, ")")),
+               new.yexp=substitute(J[list(i,j)](r),
+                                      list(i=iname,j=jname)))
+  return(result)
+}
+
+"Jdot" <-
+function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) {
+#  
+#    multitype J function J_{i\dot}(r)
+#  
+#	X:		point pattern (an object of class 'ppp')
+#       i:              mark i for which we calculate J_{i\cdot}(r)  
+#	eps:		raster grid mesh size for distance transform
+#				(unless specified by X$window)
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#
+  X <- as.ppp(X)
+  if(!is.marked(X))
+    stop(paste("point pattern has no", sQuote("marks")))
+  stopifnot(is.multitype(X))
+#
+  marx <- marks(X, dfok=FALSE)
+  if(missing(i)) i <- levels(marx)[1]
+#  
+  I <- (marx == i)
+  if(sum(I) == 0)
+    stop(paste("No points have mark = ", i))          
+  J <- rep.int(TRUE, X$n)
+#  
+  result <- Jmulti(X, I, J,
+                   eps=eps, r=r, breaks=breaks, disjoint=FALSE,
+                   correction=correction)
+  iname <- make.parseable(paste(i))
+  result <-
+    rebadge.fv(result,
+               substitute(J[i ~ dot](r), list(i=iname)),
+               c("J", paste(iname, "~ symbol(\"\\267\")")),
+               new.yexp=substitute(J[i ~ symbol("\267")](r), list(i=iname)))
+  return(result)
+}
+
+"Jmulti" <- 	
+function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL,
+         correction=NULL) {
+#  
+#    multitype J function (generic engine)
+#  
+#	X		marked point pattern (of class ppp)
+#	
+#	I,J		logical vectors of length equal to the number of points
+#			and identifying the two subsets of points to be
+#			compared.
+#  
+#	eps:		raster grid mesh size for distance transform
+#				(unless specified by X$window)
+#  
+#       r:              (optional) values of argument r  
+#	breaks:		(optional) breakpoints for argument r
+#  
+#
+  X <- as.ppp(X)
+  W<- X$window
+  rmaxdefault <- rmax.rule("J", W)
+  brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val
+  I <- ppsubset(X, I)
+  J <- ppsubset(X, J)
+  if(is.null(I) || is.null(J))
+    stop("I and J must be valid subset indices")
+  FJ <- Fest(X[J], eps, breaks=brks, correction=correction)
+  GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction)
+  rvals <- FJ$r
+  Fnames <- names(FJ)
+  Gnames <- names(GIJ)
+  bothnames <- Fnames[Fnames %in% Gnames]
+  # initialise fv object
+  alim <- attr(FJ, "alim")
+  fname <- c("J", "list(I,J)")
+  Z <- fv(data.frame(r=rvals, theo=1),
+          "r", quote(J[I,J](r)), "theo",
+          . ~ r, alim,
+          c("r", makefvlabel(NULL, NULL, fname, "pois")),
+          c("distance argument r", "theoretical Poisson %s"),
+          fname=fname,
+          yexp=quote(J[list(I,J)](r)))
+  # add pieces manually
+  ratio <- function(a, b) {
+    result <- a/b
+    result[ b == 0 ] <- NA
+    result
+  }
+  if("raw" %in% bothnames) {
+    Jun <- ratio(1-GIJ$raw, 1-FJ$raw)
+    Z <- bind.fv(Z, data.frame(un=Jun),
+                 makefvlabel(NULL, "hat", fname, "un"),
+                 "uncorrected estimate of %s", "un")
+  }
+  if("rs" %in% bothnames) {
+    Jrs <- ratio(1-GIJ$rs, 1-FJ$rs)
+    Z <- bind.fv(Z, data.frame(rs=Jrs),
+                 makefvlabel(NULL, "hat", fname, "rs"),
+                 "border corrected estimate of %s", "rs")
+  }
+  if("han" %in% Gnames && "cs" %in% Fnames) {
+    Jhan <- ratio(1-GIJ$han, 1-FJ$cs)
+    Z <- bind.fv(Z, data.frame(han=Jhan),
+                 makefvlabel(NULL, "hat", fname, "han"),
+                 "Hanisch-style estimate of %s", "han")
+  }
+  if("km" %in% bothnames) {
+    Jkm <- ratio(1-GIJ$km, 1-FJ$km)
+    Z <- bind.fv(Z, data.frame(km=Jkm),
+                 makefvlabel(NULL, "hat", fname, "km"),
+                 "Kaplan-Meier estimate of %s", "km")
+    if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) {
+      Jhaz <- GIJ$hazard - FJ$hazard
+      Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)",
+                   "Kaplan-Meier estimate of derivative of log(%s)")
+    } 
+  }
+# set default plotting values and order
+  nama <- names(Z)
+  fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))])
+# add other info
+  attr(Z, "G") <- GIJ
+  attr(Z, "F") <- FJ
+  unitname(Z) <- unitname(X)
+  return(Z)
+}
diff --git a/R/Kcom.R b/R/Kcom.R
new file mode 100755
index 0000000..34c3f17
--- /dev/null
+++ b/R/Kcom.R
@@ -0,0 +1,399 @@
+#
+#  Kcom.R
+#
+#   model compensated K-function
+#
+# $Revision: 1.14 $ $Date: 2015/10/21 09:06:57 $
+#
+
+Kcom <- local({
+
+  Kcom <- function(object, r=NULL, breaks=NULL, ..., 
+                   correction=c("border", "isotropic", "translate"),
+                   conditional=!is.poisson(object),
+                   restrict=FALSE,
+                   model=NULL, 
+                   trend=~1, interaction=Poisson(), rbord=reach(interaction),
+                   compute.var=TRUE,
+                   truecoef=NULL, hi.res=NULL) {
+  if(inherits(object, "ppm")) {
+    fit <- object
+  } else if(is.ppp(object) || inherits(object, "quad")) {
+    if(is.ppp(object)) object <- quadscheme(object, ...)
+    if(!is.null(model)) {
+      fit <- update(model, Q=object, forcefit=TRUE)
+    } else {
+      fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord,
+                 forcefit=TRUE)
+    }
+  } else 
+    stop("object should be a fitted point process model or a point pattern")
+  
+  if(missing(conditional) || is.null(conditional))
+    conditional <- !is.poisson(fit)
+
+#  rfixed <- !is.null(r) || !is.null(breaks)
+  
+  # Extract data and window
+  Q <- quad.ppm(fit, drop=FALSE)
+  X <- data.ppm(fit)
+  Win <- X$window
+
+  # selection of edge corrections
+  correction.given <- !missing(correction) && !is.null(correction)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             ripley="isotropic",
+                             trans="translation",
+                             translate="translation",
+                             translation="translation",
+                             best="best"),
+                           multi=TRUE)
+  correction <- implemented.for.K(correction, Win$type, correction.given)
+
+  opt <- list(bord = any(correction == "border"),
+              tran = any(correction == "translation"),
+              ripl = any(correction == "isotropic"))
+  if(sum(unlist(opt)) == 0)
+    stop("No corrections selected")
+  
+  # edge correction algorithm
+  algo <- if(!conditional) "classical" else
+          if(restrict) "restricted" else "reweighted"
+
+  # conditioning on border region?
+  if(!conditional) {
+    Wfree <- Win
+  } else {
+    rbord <- fit$rbord
+    Wfree <- erosion(Win, rbord)
+    if(restrict) {
+      retain <- inside.owin(union.quad(Q), , Wfree)
+      # Throw away boundary data
+      Q <- Q[Wfree]
+      X <- X[Wfree]
+      Win <- Wfree
+    }
+  }
+  
+  # Extract quadrature info
+  U <- union.quad(Q)
+  Z <- is.data(Q) # indicator data/dummy
+  E <- equalsfun.quad(Q)
+  WQ <- w.quad(Q)  # quadrature weights
+
+  # quadrature points used 
+  USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n)
+
+  # basic statistics
+  npts <- npoints(X)
+  areaW <- area(Win)
+  lambda <- npts/areaW
+  lambda2 <- npts * (npts - 1)/(areaW^2)
+
+  # adjustments to account for restricted domain of pseudolikelihood
+  if(algo == "reweighted") {
+    npts.used <- sum(Z & USED)
+    area.used <- sum(WQ[USED])
+#    lambda.used <- npts.used/area.used
+#    lambda2.used <- npts.used * (npts.used - 1)/(area.used^2)
+  } else {
+    npts.used <- npts
+    area.used <- areaW
+#    lambda.used <- lambda
+#    lambda2.used <- lambda2
+  }
+  
+  # 'r' values
+  rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npts/areaW)
+  breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+#  nr <- length(r)
+  rmax <- breaks$max
+
+  
+  # recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+        
+  # this will be the output data frame
+  K <- data.frame(r=r, pois=pi * r^2)
+  desc <- c("distance argument r", "expected %s for CSR")
+  K <- fv(K, "r", substitute(K(r), NULL),
+            "pois", , alim, c("r","%s[pois](r)"), desc, fname="K")
+
+  ############### start computing ##################
+
+  # residuals
+  resid <- residuals(fit, type="raw",drop=FALSE,
+                    new.coef=truecoef, quad=hi.res)
+  resval  <- with(resid, "increment")
+  rescts  <- with(resid, "continuous")
+  if(restrict) {
+    # keep only data inside Wfree
+    resval <- resval[retain]
+    rescts <- rescts[retain]
+  }
+  
+  # close pairs of points
+  # (quadrature point to data point)
+  clos <- crosspairs(U, X, rmax, what="ijd")
+  dIJ <- clos$d
+  I   <- clos$i
+  J   <- clos$j
+  UI <- U[I]
+  XJ <- X[J]
+  EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] 
+  ZI <- Z[I]     # TRUE if U[I[k]] is a data point
+  DD <- ZI & !EIJ  # TRUE for pairs of distinct data points only
+#  nDD <- sum(DD)
+
+  # determine whether a quadrature point will be used in integral
+  okI <- USED[I]
+  
+  if(spatstat.options("Kcom.remove.zeroes"))
+    okI <- okI & !EIJ
+  
+  # residual weights
+#  wIJ <- ifelseXY(EIJ, rescts[I], resval[I])
+  # absolute weight for continuous integrals
+  wc   <- -rescts
+  wcIJ <- -rescts[I]
+
+  ####################################################
+  
+  if(opt$bord) {
+    # border method
+    # Compute distances to boundary
+    # (in restricted case, the window of U has been adjusted)
+    b <- bdist.points(U)
+    bI <- b[I]
+    # reduced sample for K(r) of data only
+    RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks)
+#    Kb <- RSX$numerator/(lambda.used * RSX$denom.count)
+    Kb <- RSX$numerator/(lambda * RSX$denom.count)
+    K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)",
+                 nzpaste(algo,
+                         "border-corrected nonparametric estimate of %s"),
+                 "border")
+    # reduced sample for adjustment integral
+    RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI],
+                  b[Z & USED], rep.int(1, npts.used), breaks)
+#    lambdaU <- (npts.used + 1)/area.used
+    lambdaU <- (npts + 1)/areaW
+    Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU)
+
+    K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)",
+                 nzpaste("model compensator of",
+                         algo, "border-corrected %s"),
+                 "border")
+  }
+  if(opt$tran) {
+    # translation correction
+    edgewt <- switch(algo,
+                     classical  = edge.Trans(UI, XJ, paired=TRUE),
+                     restricted = edge.Trans(UI, XJ, paired=TRUE),
+                     reweighted = edge.Trans.modif(UI, XJ, Win, Wfree,
+                       paired=TRUE))
+    wh   <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI])
+    whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI])    
+    Ktrans <- cumsum(whDD)/(lambda2 * area.used)
+    Ktrans[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)",
+                 nzpaste(algo,
+                         "translation-corrected nonparametric estimate of %s"),
+                 "trans")
+#    lambda2U <- (npts.used + 1) * npts.used/(area.used^2)
+    lambda2U <- (npts + 1) * npts/(areaW^2)
+    Ktrans <- cumsum(wh)/(lambda2U * area.used)
+    Ktrans[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)",
+                 nzpaste("model compensator of",
+                         algo,
+                         "translation-corrected %s"),
+                 "trans")
+  }
+  if(opt$ripl) {
+    # Ripley isotropic correction
+    edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1))
+    wh   <- whist(dIJ[okI],     breaks$val, (edgewt * wcIJ)[okI])
+    whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI])    
+#    Kiso <- cumsum(whDD)/(lambda2.used * area.used)
+    Kiso <- cumsum(whDD)/(lambda2 * area.used)
+    Kiso[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)",
+                 nzpaste(algo,
+                         "isotropic-corrected nonparametric estimate of %s"),
+                 "iso")
+#    lambda2U <- (npts.used + 1) * npts.used/(area.used^2)
+    lambda2U <- (npts + 1) * npts/(areaW^2)    
+    Kiso <- cumsum(wh)/(lambda2U * area.used)
+    Kiso[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)",
+                 nzpaste("model compensator of",
+                         algo, "isotropic-corrected %s"),
+                 "iso")
+    #
+    if(compute.var) {
+      savedotnames <- fvnames(K, ".")
+      # compute contribution to compensator from each quadrature point
+      dOK <- dIJ[okI]
+      eOK <- edgewt[okI]
+      iOK <- I[okI]
+      denom <- lambda2U * area.used
+      variso <- varsumiso <- 0 * Kiso
+      for(i in sort(unique(iOK))) {
+        relevant <- (iOK == i)
+        tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant])
+        localterm <- cumsum(tincrem)/denom
+        variso <- variso + wc[i] * localterm^2
+        if(Z[i])
+          varsumiso <- varsumiso + localterm^2
+      }
+      sdiso <- sqrt(variso)
+      K <- bind.fv(K, data.frame(ivar=variso,
+                                 isd =sdiso,
+                                 ihi = 2*sdiso,
+                                 ilo = -2*sdiso,
+                                 ivarsum=varsumiso),
+                   c("bold(C)^2~hat(%s)[iso](r)",
+                     "sqrt(bold(C)^2~hat(%s)[iso](r))",
+                     "bold(R)~hat(%s)[hi](r)",
+                     "bold(R)~hat(%s)[lo](r)",
+                     "hat(C)^2~hat(%s)[iso](r)"),
+                   c("Poincare variance of isotropic-corrected %s",
+                     "sqrt(Poincare variance)  of isotropic-corrected %s",
+                     "upper critical band for isotropic-corrected %s",
+                     "lower critical band for isotropic-corrected %s",
+                     "data estimate of Poincare variance of %s"),
+                   "iso")
+      # fvnames(K, ".") <- c(savedotnames, "isd")
+      fvnames(K, ".") <- savedotnames
+    }
+  }
+
+  # default is to display all corrections
+  formula(K) <- . ~ r
+  unitname(K) <- unitname(X)
+  # secret tag used by 'Kres'
+  attr(K, "maker") <- "Kcom"
+  return(K)
+}
+
+# `reweighted' translation edge correction
+edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window,
+                             exact=FALSE, paired=FALSE,
+                             trim=spatstat.options("maxedgewt")) {
+
+  # computes edge correction factor
+  #  f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j])))
+  
+  X <- as.ppp(X, WX)
+
+  W <- X$window
+  x <- X$x
+  y <- X$y
+
+  Y <- as.ppp(Y, WY)
+  xx <- Y$x
+  yy <- Y$y
+
+  nX <- npoints(X)
+  nY <- npoints(Y)
+  if(paired && (nX != nY))
+    stop("X and Y should have equal length when paired=TRUE")
+  
+  # For irregular polygons, exact evaluation is very slow;
+  # so use pixel approximation, unless exact=TRUE
+  if(!exact) {
+    if(WX$type == "polygonal")
+      WX <- as.mask(WX)
+    if(WY$type == "polygonal")
+      WY <- as.mask(WX)
+  }
+
+  typeX <- WX$type
+  typeY <- WY$type
+
+  if(typeX == "rectangle" && typeY == "rectangle") {
+    # Fast code for this case
+    if(!paired) {
+      DX <- abs(outer(x,xx,"-"))
+      DY <- abs(outer(y,yy,"-"))
+    } else {
+      DX <- abs(xx - x)
+      DY <- abs(yy - y)
+    }
+    A <- WX$xrange
+    B <- WX$yrange
+    a <- WY$xrange 
+    b <- WY$yrange
+    # compute width and height of intersection
+    wide  <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX)
+    high  <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY)
+    # edge correction weight
+    weight <- diff(a) * diff(b) / (wide * high)
+    if(!paired)
+      weight <- matrix(weight, nrow=X$n, ncol=Y$n)
+  } else if(typeX %in% c("rectangle", "polygonal")
+            && typeY %in% c("rectangle", "polygonal")) {
+    # This code is SLOW
+    WX <- as.polygonal(WX)
+    WY <- as.polygonal(WY)
+    a <- area(W)
+    if(!paired) {
+      weight <- matrix(, nrow=nX, ncol=nY)
+      if(nX > 0 && nY > 0) {
+        for(i in seq_len(nX)) {
+          X.i <- c(x[i], y[i])
+          for(j in seq_len(nY)) {
+            shiftvector <- X.i - c(xx[j],yy[j])
+            WXshift <- shift(WX, shiftvector)
+            b <- overlap.owin(WY, WXshift)
+            weight[i,j] <- a/b
+          }
+        }
+      }
+    } else {
+      nX <- npoints(X)
+      weight <- numeric(nX)
+      if(nX > 0) {
+        for(i in seq_len(nX)) {
+          shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i])
+          WXshift <- shift(WX, shiftvector)
+          b <- overlap.owin(WY, WXshift)
+          weight[i] <- a/b
+        }
+      }
+    }
+  } else {
+    WX <- as.mask(WX)
+    WY <- as.mask(WY)
+    # make difference vectors
+    if(!paired) {
+      DX <- outer(x,xx,"-")
+      DY <- outer(y,yy,"-")
+    } else {
+      DX <- x - xx
+      DY <- y - yy
+    }
+    # compute set cross-covariance
+    g <- setcov(WY,WX)
+    # evaluate set cross-covariance at these vectors
+    gvalues <- lookup.im(g, as.vector(DX), as.vector(DY),
+                         naok=TRUE, strict=FALSE)
+    weight <- area(WY)/gvalues
+  }
+
+  # clip high values
+  if(length(weight) > 0)
+    weight <- pmin.int(weight, trim)
+  if(!paired) 
+    weight <- matrix(weight, nrow=X$n, ncol=Y$n)
+  return(weight)
+}
+
+Kcom
+})
diff --git a/R/Kest.R b/R/Kest.R
new file mode 100755
index 0000000..8acbcab
--- /dev/null
+++ b/R/Kest.R
@@ -0,0 +1,1018 @@
+#
+#	Kest.R		Estimation of K function
+#
+#	$Revision: 5.120 $	$Date: 2017/06/05 10:31:58 $
+#
+#
+# -------- functions ----------------------------------------
+#	Kest()		compute estimate of K
+#                       using various edge corrections
+#
+#
+# -------- standard arguments ------------------------------	
+#	X		point pattern (of class 'ppp')
+#
+#	r		distance values at which to compute K	
+#
+# -------- standard output ------------------------------
+#      A data frame (class "fv") with columns named
+#
+#	r:		same as input
+#
+#	trans:		K function estimated by translation correction
+#
+#	iso:		K function estimated by Ripley isotropic correction
+#
+#	theo:		K function for Poisson ( = pi * r ^2 )
+#
+#	border:		K function estimated by border method
+#			using standard formula (denominator = count of points)
+#
+#       bord.modif:	K function estimated by border method
+#			using modified formula 
+#			(denominator = area of eroded window
+#
+# ------------------------------------------------------------------------
+
+"Lest" <- function(X, ...) {
+  K <- Kest(X, ...)
+  L <- eval.fv(sqrt(K/pi), dotonly=FALSE)
+  # handle variance estimates
+  if(any(varcols <- colnames(K) %in% c("rip", "ls"))) {
+    r <- with(L, .x)
+    L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2
+    # fix 0/0
+    n <- npoints(X)
+    A <- area(Window(X))
+    if(any(colnames(K) == "rip"))
+      L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi)
+    if(any(colnames(K) == "ls"))
+      L[r == 0, "ls"]  <- (2 * A/(n * (n-1)))/(4 * pi)
+  }
+  # relabel the fv object
+  L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl"))
+  #
+  return(L)  
+}
+
+"Kest"<-
+function(X, ..., r=NULL, rmax=NULL, breaks=NULL, 
+         correction=c("border", "isotropic", "Ripley", "translate"),
+         nlarge=3000, domain=NULL, var.approx=FALSE,
+         ratio=FALSE)
+{
+  verifyclass(X, "ppp")
+  nlarge.given <- !missing(nlarge) && !is.null(nlarge)
+  rfixed <- !is.null(r) || !is.null(breaks)
+  npts <- npoints(X)
+  W <- X$window
+  areaW <- area(W)
+  lambda <- npts/areaW
+  lambda2 <- (npts * (npts - 1))/(areaW^2)
+
+  if(!is.null(domain)) {
+    # estimate based on contributions from a subdomain
+    domain <- as.owin(domain)
+    if(!is.subset.owin(domain, W))
+      stop(paste(dQuote("domain"),
+                 "is not a subset of the window of X"))
+    # trick Kdot() into doing it
+    indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE))
+    Kd <- Kdot(X %mark% indom, i="TRUE",
+               r=r, breaks=breaks, correction=correction,
+               ratio=ratio)
+    # relabel and exit
+    Kd <- rebadge.fv(Kd, quote(K(r)), "K")
+    return(Kd)
+  }
+
+  rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda)
+  if(is.infinite(rmaxdefault)) rmaxdefault <- diameter(W)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+
+  # choose correction(s)
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("border", "isotropic", "Ripley", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             rigid="rigid",
+                             good="good",
+                             best="best"),
+                           multi=TRUE)
+#  best.wanted <- ("best" %in% correction)
+  # replace 'good' by the optimal choice for this size of dataset
+  if("good" %in% correction)
+    correction[correction == "good"] <- good.correction.K(X)
+  # retain only corrections that are implemented for the window
+  correction <- implemented.for.K(correction, W$type, correction.given)
+  
+  # recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  ###########################################
+  # Efficient code for border correction and no correction
+  # Usable only if r values are evenly spaced from 0 to rmax
+  # Invoked automatically if number of points is large
+
+  can.do.fast <- breaks$even
+  large.n    <- (npts >= nlarge)
+#  demand.best <- correction.given && best.wanted
+  large.n.trigger <- large.n && !correction.given
+  fastcorrections <- c("border", "bord.modif", "none")
+  fastdefault     <- "border"
+  correction.fast   <- all(correction %in% fastcorrections)
+  will.do.fast <- can.do.fast && (correction.fast || large.n.trigger)
+  asked <- correction.fast || (nlarge.given && large.n.trigger)
+  if(asked && !can.do.fast)
+    warning("r values not evenly spaced - cannot use efficient code")
+  if(will.do.fast) {
+    # determine correction(s)
+    ok <- correction %in% fastcorrections
+    correction <- if(any(ok)) correction[ok] else fastdefault
+    bord <- any(correction %in% c("border", "bord.modif"))
+    none <- any(correction =="none")
+    if(!all(ok)) {
+      # some corrections were overridden; notify user
+      corx <- c(if(bord) "border correction estimate" else NULL,
+                if(none) "uncorrected estimate" else NULL)
+      corx <- paste(corx, collapse=" and ")
+      message(paste("number of data points exceeds",
+                    nlarge, "- computing", corx , "only"))
+    }
+    # restrict r values to recommended range, unless specifically requested
+    if(!rfixed) 
+      r <- seq(from=0, to=alim[2], length.out=length(r))
+    if(bord)
+      Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio)
+    if(none)
+      Kn <- Knone.engine(X, max(r), length(r), ratio=ratio)
+    if(bord && none) {
+      Kn <- Kn[ , names(Kn) != "theo"]
+      yn <- fvnames(Kb, ".y")
+      Kbn <- if(!ratio) bind.fv(Kb, Kn, preferred=yn) else
+             bind.ratfv(Kb, Kn, preferred=yn)
+      return(Kbn)
+    }
+    if(bord) return(Kb)
+    if(none) return(Kn) 
+  }
+
+  do.fast.rectangle <-
+    can.do.fast && is.rectangle(W) &&
+      spatstat.options("use.Krect") && !any(correction == "rigid")
+  
+  if(do.fast.rectangle) {
+    ###########################################
+    ## Fast code for rectangular window
+    ###########################################
+    K <-  Krect.engine(X, rmax, length(r), correction, ratio=ratio)
+    attr(K, "alim") <- alim
+  } else {
+    ###########################################
+    ## Slower code
+    ###########################################
+
+    ## this will be the output data frame
+    Kdf <- data.frame(r=r, theo = pi * r^2)
+    desc <- c("distance argument r", "theoretical Poisson %s")
+    denom <- lambda2 * areaW
+    K <- ratfv(Kdf, NULL, denom,
+               "r", quote(K(r)),
+               "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K",
+               ratio=ratio)
+  
+    ## identify all close pairs
+    rmax <- max(r)
+    what <- 
+	if(any(correction %in% c("translate", "isotropic"))) "all" else "ijd"
+    close <- closepairs(X, rmax, what=what)
+    DIJ <- close$d
+
+    ## precompute set covariance of window
+    gW <- NULL
+    if(any(correction %in% c("translate", "rigid", "isotropic")))
+      gW <- setcov(W)
+    
+    if(any(correction == "none")) {
+      ## uncorrected! For demonstration purposes only!
+      wh <- whist(DIJ, breaks$val)  # no weights
+      numKun <- cumsum(wh)
+      denKun <- lambda2 * areaW
+      ## uncorrected estimate of K
+      K <- bind.ratfv(K,
+                      data.frame(un=numKun), denKun,
+                      "hat(%s)[un](r)",
+                      "uncorrected estimate of %s",
+                      "un",
+                      ratio=ratio)
+    }
+  
+    if(any(correction == "border" | correction == "bord.modif")) {
+      ## border method
+      ## Compute distances to boundary
+      b <- bdist.points(X)
+      I <- close$i
+      bI <- b[I]
+      ## apply reduced sample algorithm
+      RS <- Kount(DIJ, bI, b, breaks)
+      if(any(correction == "bord.modif")) {
+        ## modified border correction
+        denom.area <- eroded.areas(W, r)
+        numKbm <- RS$numerator
+        denKbm <- lambda2 * denom.area
+        K <- bind.ratfv(K,
+                        data.frame(bord.modif=numKbm),
+                        data.frame(bord.modif=denKbm),
+                        "hat(%s)[bordm](r)",
+                        "modified border-corrected estimate of %s",
+                        "bord.modif",
+                        ratio=ratio)
+      }
+      if(any(correction == "border")) {
+        numKb <- RS$numerator
+        denKb <- lambda * RS$denom.count
+        K <- bind.ratfv(K,
+                        data.frame(border=numKb), 
+                        data.frame(border=denKb), 
+                        "hat(%s)[bord](r)",
+                        "border-corrected estimate of %s",
+                        "border",
+                        ratio=ratio)
+      }
+    }
+
+    if(any(correction == "translate")) {
+      ## Ohser-Stoyan translation correction
+      edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE,
+                           gW = gW, give.rmax=TRUE)
+      wh <- whist(DIJ, breaks$val, edgewt)
+      numKtrans <- cumsum(wh)
+      denKtrans <- lambda2 * areaW
+      h <- attr(edgewt, "rmax")
+      numKtrans[r >= h] <- NA
+      K <- bind.ratfv(K,
+                      data.frame(trans=numKtrans),
+                      denKtrans,
+                      "hat(%s)[trans](r)",
+                      "translation-corrected estimate of %s",
+                      "trans",
+                      ratio=ratio)
+    }
+    if(any(correction == "rigid")) {
+      ## Ohser-Stoyan rigid motion correction
+      CW <- rotmean(gW)
+      edgewt <- areaW/as.function(CW)(DIJ)
+      wh <- whist(DIJ, breaks$val, edgewt)
+      numKrigid <- cumsum(wh)
+      denKrigid <- lambda2 * areaW
+      h <- rmax.Rigid(X, gW) #sic: X not W
+      numKrigid[r >= h] <- NA
+      K <- bind.ratfv(K,
+                      data.frame(rigid=numKrigid),
+                      denKrigid,
+                      "hat(%s)[rigid](r)",
+                      "rigid motion-corrected estimate of %s",
+                      "rigid",
+                      ratio=ratio)
+    }
+    if(any(correction == "isotropic")) {
+      ## Ripley isotropic correction
+      XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+      edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1))
+      wh <- whist(DIJ, breaks$val, edgewt)
+      numKiso <- cumsum(wh)
+      denKiso <- lambda2 * areaW
+      h <- boundingradius(W)
+      numKiso[r >= h] <- NA
+      K <- bind.ratfv(K,
+                      data.frame(iso=numKiso),
+                      denKiso,
+                      "hat(%s)[iso](r)",
+                      "Ripley isotropic correction estimate of %s",
+                      "iso",
+                      ratio=ratio)
+    }
+  }
+
+  #############################
+  ##  VARIANCE APPROXIMATION
+  #############################
+
+  if(var.approx) {
+    ## Compute variance approximations
+    A <- areaW
+    P <- perimeter(W)
+    n <- npts
+    ## Ripley asymptotic approximation
+    rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2
+                                + 0.13 * (n/A) * P * r^5/A^2)
+    if(!ratio) {
+      K <- bind.fv(K, data.frame(rip=rip),
+                 "vR(r)", 
+                 "Ripley approximation to var(%s) under CSR",
+                 "iso")
+    } else {
+      den <- (n-1)^2
+      ripnum <- den * rip
+      ripden <- rep.int(den, length(rip))
+      K <- bind.ratfv(K,
+                      data.frame(rip=ripnum),
+                      data.frame(rip=ripden),
+                      "vR(r)", 
+                      "Ripley approximation to var(%s) under CSR",
+                      "iso")
+    }
+    if(W$type == "rectangle") {
+      # Lotwick-Silverman
+      a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2
+      a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3
+      # contains correction to typo on p52 of Diggle 2003
+      # cf Lotwick & Silverman 1982 eq (5)
+      br <- (pi * r^2/A) * (1 - pi * r^2/A) +
+        (1.0716 * P * r^3 + 2.2375 * r^4)/A^2
+      ls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1))
+      # add column 
+      if(!ratio) {
+        K <- bind.fv(K, data.frame(ls=ls), "vLS(r)",
+                     "Lotwick-Silverman approx to var(%s) under CSR",
+                     "iso")
+      } else {
+        den <- n*(n-1)
+        lsnum <- ls * den
+        lsden <- rep.int(den, length(ls))
+        K <- bind.ratfv(K,
+                        data.frame(ls=lsnum),
+                        data.frame(ls=lsden),
+                        "vLS(r)",
+                        "Lotwick-Silverman approx to var(%s) under CSR",
+                        "iso")
+      }
+    }
+  }
+
+  ### FINISH OFF #####
+  ## default plot will display all edge corrections
+  formula(K) <- . ~ r
+  nama <- rev(colnames(K))
+  fvnames(K, ".") <- setdiff(nama, c("r", "rip", "ls"))
+  ##
+  unitname(K) <- unitname(X)
+  # copy to other components
+  if(ratio)
+    K <- conform.ratfv(K)
+
+  return(K)
+}
+
+################################################################  
+#############  SUPPORTING ALGORITHMS ###########################
+################################################################  
+
+Kount <- function(dIJ, bI, b, breaks) {
+  #
+  # "internal" routine to compute border-correction estimate of K or Kij
+  #
+  # dIJ:  vector containing pairwise distances for selected I,J pairs
+  # bI:   corresponding vector of boundary distances for I
+  # b:    vector of ALL distances to window boundary
+  #
+  # breaks : breakpts object
+  #
+
+  stopifnot(length(dIJ) == length(bI))
+  
+  # determine which distances d_{ij} were observed without censoring
+  uncen <- (dIJ <= bI)
+  # histogram of noncensored distances
+  nco <- whist(dIJ[uncen], breaks$val)
+  # histogram of censoring times for noncensored distances
+  ncc <- whist(bI[uncen], breaks$val)
+  # histogram of censoring times (yes, this is a different total size)
+  cen <- whist(b, breaks$val)
+  # count censoring times beyond rightmost breakpoint
+  uppercen <- sum(b > max(breaks$val))
+  # go
+  RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen)
+  # extract results
+  numerator <- RS$numerator
+  denom.count <- RS$denominator
+  # check
+  if(length(numerator) != breaks$ncells)
+    stop("internal error: length(numerator) != breaks$ncells")
+  if(length(denom.count) != breaks$ncells)
+    stop("internal error: length(denom.count) != breaks$ncells")
+  
+  return(list(numerator=numerator, denom.count=denom.count))
+}
+
+#### interface to C code for border method
+
+Kborder.engine <- function(X, rmax, nr=100,
+                           correction=c("border", "bord.modif"),
+                           weights=NULL, ratio=FALSE) 
+{
+  verifyclass(X, "ppp")
+  npts <- npoints(X)
+  W <- as.owin(X)
+
+  areaW <- area(W)
+  lambda <- npts/areaW
+  lambda2 <- (npts * (npts - 1))/(areaW^2)
+
+  if(missing(rmax))
+    rmax <- diameter(W)/4
+  r <- seq(from=0, to=rmax, length.out=nr)
+
+  # this will be the output data frame
+  Kdf <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  Kfv <- fv(Kdf, "r", quote(K(r)),
+          "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K")
+
+  if(ratio) {
+    # save numerator and denominator
+    denom <- lambda2 * areaW
+    numK <- eval.fv(denom * Kfv)
+    denK <- eval.fv(denom + Kfv * 0)
+    attributes(numK) <- attributes(denK) <- attributes(Kfv)
+    numK <- rebadge.fv(numK, tags="theo",
+                       new.desc="numerator for theoretical Poisson %s")
+    denK <- rebadge.fv(denK, tags="theo",
+                       new.desc="denominator for theoretical Poisson %s")
+  }
+  
+  ####### start computing ############
+  # sort in ascending order of x coordinate
+  orderX <- fave.order(X$x)
+  Xsort <- X[orderX]
+  x <- Xsort$x
+  y <- Xsort$y
+  
+  # boundary distances
+  b <- bdist.points(Xsort)
+
+  # call the C code
+  if(is.null(weights)) {
+    # determine whether the numerator can be stored as an integer
+    bigint <- .Machine$integer.max
+    if(npts < sqrt(bigint)) {
+      # yes - use faster integer arithmetic
+      res <- .C("KborderI",
+                nxy=as.integer(npts),
+                x=as.double(x),
+                y=as.double(y),
+                b=as.double(b),
+                nr=as.integer(nr),
+                rmax=as.double(rmax),
+                numer=as.integer(integer(nr)),
+                denom=as.integer(integer(nr)),
+                PACKAGE = "spatstat")
+    } else {
+      # no - need double precision storage
+      res <- .C("KborderD",
+                nxy=as.integer(npts),
+                x=as.double(x),
+                y=as.double(y),
+                b=as.double(b),
+                nr=as.integer(nr),
+                rmax=as.double(rmax),
+                numer=as.double(numeric(nr)),
+                denom=as.double(numeric(nr)),
+                PACKAGE = "spatstat")
+    }
+    if("bord.modif" %in% correction) {
+      denom.area <- eroded.areas(W, r)
+      numKbm <- res$numer
+      denKbm <- lambda2 * denom.area
+      bm <- numKbm/denKbm
+      Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)",
+                   "modified border-corrected estimate of %s",
+                   "bord.modif")
+      if(ratio) {
+        # save numerator and denominator
+        numK <- bind.fv(numK, data.frame(bord.modif=numKbm),
+                        "hat(%s)[bordm](r)",
+                        "numerator of modified border-corrected estimate of %s",
+                        "bord.modif")
+        denK <- bind.fv(denK, data.frame(bord.modif=denKbm),
+                        "hat(%s)[bordm](r)",
+                        "denominator of modified border-corrected estimate of %s",
+                        "bord.modif")
+      }
+    }
+    if("border" %in% correction) {
+      numKb <- res$numer
+      denKb <- lambda * res$denom
+      bord <- numKb/denKb
+      Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)",
+                   "border-corrected estimate of %s",
+                   "border")
+      if(ratio) {
+        numK <- bind.fv(numK, data.frame(border=numKb),
+                        "hat(%s)[bord](r)",
+                        "numerator of border-corrected estimate of %s",
+                        "border")
+        denK <- bind.fv(denK, data.frame(border=denKb),
+                        "hat(%s)[bord](r)",
+                        "denominator of border-corrected estimate of %s",
+                        "border")
+      }
+    }
+  } else {
+    # weighted version
+    if(is.numeric(weights)) {
+      if(length(weights) != X$n)
+        stop("length of weights argument does not match number of points in X")
+    } else {
+      wim <- as.im(weights, W)
+      weights <- wim[X, drop=FALSE]
+      if(anyNA(weights))
+        stop("domain of weights image does not contain all points of X")
+    }
+    weights.Xsort <- weights[orderX]
+    res <- .C("Kwborder",
+              nxy=as.integer(npts),
+              x=as.double(x),
+              y=as.double(y),
+              w=as.double(weights.Xsort),
+              b=as.double(b),
+              nr=as.integer(nr),
+              rmax=as.double(rmax),
+              numer=as.double(numeric(nr)),
+              denom=as.double(numeric(nr)),
+              PACKAGE = "spatstat")
+    if("border" %in% correction) {
+      bord <- res$numer/res$denom
+      Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)",
+                     "border-corrected estimate of %s",
+                     "border")
+      if(ratio) {
+        numK <- bind.fv(numK, data.frame(border=res$numer),
+                        "hat(%s)[bord](r)",
+                        "numerator of border-corrected estimate of %s",
+                        "border")
+        denK <- bind.fv(denK, data.frame(border=res$denom),
+                        "hat(%s)[bord](r)",
+                        "denominator of border-corrected estimate of %s",
+                        "border")
+      }
+    }
+    if("bord.modif" %in% correction) {
+      numKbm <- res$numer
+      denKbm <- eroded.areas(W, r)
+      bm <- numKbm/denKbm
+      Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)",
+                     "modified border-corrected estimate of %s",
+                     "bord.modif")
+      if(ratio) {
+        # save numerator and denominator
+        numK <- bind.fv(numK, data.frame(bord.modif=numKbm),
+                        "hat(%s)[bordm](r)",
+                        "numerator of modified border-corrected estimate of %s",
+                        "bord.modif")
+        denK <- bind.fv(denK, data.frame(bord.modif=denKbm),
+                        "hat(%s)[bordm](r)",
+                        "denominator of modified border-corrected estimate of %s",
+                        "bord.modif")
+      }
+    }
+  }
+  ##
+  # default is to display them all
+  formula(Kfv) <- . ~ r
+  unitname(Kfv) <- unitname(X)
+  if(ratio) {
+    # finish off numerator and denominator
+    formula(numK) <- formula(denK) <- . ~ r
+    unitname(denK) <- unitname(numK) <- unitname(X)
+    # tack on to result
+    Kfv <- rat(Kfv, numK, denK, check=FALSE)
+  }
+  return(Kfv)
+}
+
+Knone.engine <- function(X, rmax, nr=100,
+                         weights=NULL, ratio=FALSE) 
+{
+  verifyclass(X, "ppp")
+  npts <- npoints(X)
+  W <- as.owin(X)
+
+  areaW <- area(W)
+#  lambda <- npts/areaW
+  lambda2 <- (npts * (npts - 1))/(areaW^2)
+  denom <- lambda2 * areaW
+
+  if(missing(rmax))
+    rmax <- diameter(W)/4
+  r <- seq(from=0, to=rmax, length.out=nr)
+
+  # this will be the output data frame
+  Kdf <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  Kfv <- fv(Kdf, "r", quote(K(r)),
+          "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K")
+
+  if(ratio) {
+    # save numerator and denominator
+    numK <- eval.fv(denom * Kfv)
+    denK <- eval.fv(denom + Kfv * 0)
+    attributes(numK) <- attributes(denK) <- attributes(Kfv)
+    numK <- rebadge.fv(numK, tags="theo",
+                       new.desc="numerator for theoretical Poisson %s")
+    denK <- rebadge.fv(denK, tags="theo",
+                       new.desc="denominator for theoretical Poisson %s")
+  }
+  
+  ####### start computing ############
+  # sort in ascending order of x coordinate
+  orderX <- fave.order(X$x)
+  Xsort <- X[orderX]
+  x <- Xsort$x
+  y <- Xsort$y
+  
+  # call the C code
+  if(is.null(weights)) {
+    # determine whether the numerator can be stored as an integer
+    bigint <- .Machine$integer.max
+    if(npts < sqrt(bigint)) {
+      # yes - use faster integer arithmetic
+      res <- .C("KnoneI",
+                nxy=as.integer(npts),
+                x=as.double(x),
+                y=as.double(y),
+                nr=as.integer(nr),
+                rmax=as.double(rmax),
+                numer=as.integer(integer(nr)),
+                PACKAGE = "spatstat")
+    } else {
+      # no - need double precision storage
+      res <- .C("KnoneD",
+                nxy=as.integer(npts),
+                x=as.double(x),
+                y=as.double(y),
+                nr=as.integer(nr),
+                rmax=as.double(rmax),
+                numer=as.double(numeric(nr)),
+                PACKAGE = "spatstat")
+    }
+
+    numKun <- res$numer
+    denKun <- denom # = lambda2 * areaW
+    Kun <- numKun/denKun
+  } else {
+    # weighted version
+    if(is.numeric(weights)) {
+      if(length(weights) != X$n)
+        stop("length of weights argument does not match number of points in X")
+    } else {
+      wim <- as.im(weights, W)
+      weights <- wim[X, drop=FALSE]
+      if(anyNA(weights))
+        stop("domain of weights image does not contain all points of X")
+    }
+    weights.Xsort <- weights[orderX]
+    res <- .C("Kwnone",
+              nxy=as.integer(npts),
+              x=as.double(x),
+              y=as.double(y),
+              w=as.double(weights.Xsort),
+              nr=as.integer(nr),
+              rmax=as.double(rmax),
+              numer=as.double(numeric(nr)),
+              PACKAGE = "spatstat")
+    numKun <- res$numer
+    denKun <- sum(weights)
+    Kun <- numKun/denKun
+  }
+
+  # tack on to fv object
+  Kfv <- bind.fv(Kfv, data.frame(un=Kun), "hat(%s)[un](r)",
+                 "uncorrected estimate of %s",
+                 "un")
+  if(ratio) {
+    numK <- bind.fv(numK, data.frame(un=numKun),
+                    "hat(%s)[un](r)",
+                    "numerator of uncorrected estimate of %s",
+                    "un")
+    denK <- bind.fv(denK, data.frame(un=denKun),
+                    "hat(%s)[un](r)",
+                    "denominator of uncorrected estimate of %s",
+                    "un")
+  }
+  ##
+  # default is to display them all
+  formula(Kfv) <- . ~ r
+  unitname(Kfv) <- unitname(X)
+  if(ratio) {
+    # finish off numerator and denominator
+    formula(numK) <- formula(denK) <- . ~ r
+    unitname(denK) <- unitname(numK) <- unitname(X)
+    # tack on to result
+    Kfv <- rat(Kfv, numK, denK, check=FALSE)
+  }
+  return(Kfv)
+}
+
+     
+
+rmax.rule <- function(fun="K", W, lambda) {
+  verifyclass(W, "owin")
+  switch(fun,
+         K = {
+           # Ripley's Rule
+           ripley <- min(diff(W$xrange), diff(W$yrange))/4
+           # Count at most 1000 neighbours per point
+           rlarge <- if(!missing(lambda)) sqrt(1000 /(pi * lambda)) else Inf
+           rmax <- min(rlarge, ripley)
+         },
+         Kscaled = {
+           ## rule of thumb for Kscaled
+           rdiam  <- diameter(as.rectangle(W))/2 * sqrt(lambda)
+           rmax <- min(10, rdiam)
+         },
+         F = ,
+         G = ,
+         J = {
+           # rule of thumb
+           rdiam  <- diameter(as.rectangle(W))/2
+           # Poisson process has F(rlarge) = 1 - 10^(-5)
+           rlarge <-
+             if(!missing(lambda)) sqrt(log(1e5)/(pi * lambda)) else Inf
+           rmax <- min(rlarge, rdiam)
+         },
+         stop(paste("Unrecognised function type", sQuote(fun)))
+         )
+  return(rmax)
+}
+           
+    
+implemented.for.K <- function(correction, windowtype, explicit) {
+  pixels <- (windowtype == "mask")
+  if(any(correction == "best")) {
+    # select best available correction
+    correction[correction == "best"] <- if(!pixels) "isotropic" else "translate"
+  } else {
+    # available selection of edge corrections depends on window
+    if(pixels) {
+      iso <- (correction == "isotropic") 
+      if(any(iso)) {
+        whinge <- "Isotropic correction not implemented for binary masks"
+        if(explicit) {
+          if(all(iso)) stop(whinge) else warning(whinge)
+        }
+        correction <- correction[!iso]
+      }
+    }
+  }
+  return(correction)
+}
+
+good.correction.K <- function(X) {
+  nX <- npoints(X)
+  W <- as.owin(X)
+  avail <- c("none",
+             if(nX < 1e5) "border" else NULL,
+             if(nX < 3000)"translate" else NULL,
+             if(nX < 1000 && !is.mask(W)) "isotropic" else NULL)
+  chosen <- rev(avail)[1]
+  return(chosen)
+}
+
+Krect.engine <- function(X, rmax, nr=100,
+                           correction,
+                           weights=NULL, ratio=FALSE, fname="K") {
+  verifyclass(X, "ppp")
+  npts <- npoints(X)
+  W <- as.owin(X)
+
+  areaW <- area(W)
+  width <- sidelengths(W)[1]
+  height <- sidelengths(W)[2]
+  lambda <- npts/areaW
+  lambda2 <- (npts * (npts - 1))/(areaW^2)
+
+  if(missing(rmax))
+    rmax <- diameter(W)/4
+  r <- seq(from=0, to=rmax, length.out=nr)
+
+  if(weighted <- !is.null(weights)) {
+    ## coerce weights to a vector
+    if(is.numeric(weights)) {
+      check.nvector(weights, npts)
+    } else {
+      wim <- as.im(weights, W)
+      weights <- wim[X, drop=FALSE]
+      if(anyNA(weights))
+        stop("domain of weights image does not contain all points of X")
+    }
+  }
+
+  # this will be the output data frame
+  Kdf <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  denom <- if(weighted) areaW else (lambda2 * areaW)
+  Kfv <- ratfv(Kdf, NULL, denom,
+               "r", quote(K(r)),
+               "theo", NULL, c(0,rmax),
+               c("r", makefvlabel(NULL, NULL, fname, "pois")),
+               desc, fname=fname,
+               ratio=ratio)
+
+  ####### prepare data ############
+
+  if(!all(correction == "translate")) {
+    ## Ensure rectangle has its bottom left corner at the origin
+    if(W$xrange[1] != 0 || W$yrange[1] != 0) {
+      X <- shift(X, origin="bottomleft")
+      W <- as.owin(X)
+    }
+  }
+
+  ## sort in ascending order of x coordinate
+  orderX <- fave.order(X$x)
+  x <- X$x[orderX]
+  y <- X$y[orderX]
+  if(weighted)
+    wt <- weights[orderX]
+
+  ## establish algorithm parameters
+  doIso <- "isotropic" %in% correction 
+  doTrans <- "translate" %in% correction
+  doBord <- any(c("border", "bord.modif") %in% correction)
+  doUnco <- "none" %in% correction
+  trimedge <- spatstat.options("maxedgewt")
+
+  ## allocate space for results
+  ziso   <- numeric(if(doIso) nr else 1L)
+  ztrans <- numeric(if(doTrans) nr else 1L)
+  
+  ## call the C code
+  if(weighted) {
+    ## weighted version
+    zbnumer <- numeric(if(doBord) nr else 1L)
+    zbdenom <- numeric(if(doBord) nr else 1L)
+    zunco   <- numeric(if(doUnco) nr else 1L)
+    res <- .C("KrectWtd",
+              width=as.double(width),
+              height=as.double(height),
+              nxy=as.integer(npts),
+              x=as.double(x),
+              y=as.double(y),
+              w=as.double(wt),
+              nr=as.integer(nr),
+              rmax=as.double(rmax),
+              trimedge=as.double(trimedge),
+              doIso=as.integer(doIso),
+              doTrans=as.integer(doTrans),
+              doBord=as.integer(doBord),
+              doUnco=as.integer(doUnco),
+              iso=as.double(ziso),
+              trans=as.double(ztrans),
+              bnumer=as.double(zbnumer),
+              bdenom=as.double(zbdenom),
+              unco=as.double(zunco),
+              PACKAGE = "spatstat")
+  } else if(npts < sqrt(.Machine$integer.max)) {
+    ## unweighted
+    ## numerator of border correction can be stored as an integer
+    ## use faster integer arithmetic
+    zbnumer <- integer(if(doBord) nr else 1L)
+    zbdenom <- integer(if(doBord) nr else 1L)
+    zunco   <- integer(if(doUnco) nr else 1L)
+    res <- .C("KrectInt",
+              width=as.double(width),
+              height=as.double(height),
+              nxy=as.integer(npts),
+              x=as.double(x),
+              y=as.double(y),
+              nr=as.integer(nr),
+              rmax=as.double(rmax),
+              trimedge=as.double(trimedge),
+              doIso=as.integer(doIso),
+              doTrans=as.integer(doTrans),
+              doBord=as.integer(doBord),
+              doUnco=as.integer(doUnco),
+              iso=as.double(ziso),
+              trans=as.double(ztrans),
+              bnumer=as.integer(zbnumer),
+              bdenom=as.integer(zbdenom),
+              unco=as.integer(zunco),
+              PACKAGE = "spatstat")
+  } else {
+    ## unweighted
+    ## need double precision storage
+    zbnumer <- numeric(if(doBord) nr else 1L)
+    zbdenom <- numeric(if(doBord) nr else 1L)
+    zunco   <- numeric(if(doUnco) nr else 1L)
+    res <- .C("KrectDbl",
+              width=as.double(width),
+              height=as.double(height),
+              nxy=as.integer(npts),
+              x=as.double(x),
+              y=as.double(y),
+              nr=as.integer(nr),
+              rmax=as.double(rmax),
+              trimedge=as.double(trimedge),
+              doIso=as.integer(doIso),
+              doTrans=as.integer(doTrans),
+              doBord=as.integer(doBord),
+              doUnco=as.integer(doUnco),
+              iso=as.double(ziso),
+              trans=as.double(ztrans),
+              bnumer=as.double(zbnumer),
+              bdenom=as.double(zbdenom),
+              unco=as.double(zunco),
+              PACKAGE = "spatstat")
+  }
+
+  ## Process corrections in reverse order of priority
+
+  ## Uncorrected estimate
+  if("none" %in% correction) {
+    numKun <- res$unco
+    denKun <- if(weighted) areaW else (lambda2 * areaW)
+    Kfv <- bind.ratfv(Kfv,
+                      data.frame(un=numKun),
+                      denKun,
+                      makefvlabel(NULL, "hat", fname, "un"),
+                      "uncorrected estimate of %s",
+                      "un",
+                      ratio=ratio)
+  }
+  
+  ## Modified border correction
+  if("bord.modif" %in% correction) {
+    denom.area <- eroded.areas(W, r)
+    numKbm <- res$bnumer
+    denKbm <- if(weighted) denom.area else (lambda2 * denom.area)
+    Kfv <- bind.ratfv(Kfv,
+                      data.frame(bord.modif=numKbm),
+                      denKbm,
+                      makefvlabel(NULL, "hat", fname, "bordm"),
+                      "modified border-corrected estimate of %s",
+                      "bord.modif",
+                      ratio=ratio)
+  }
+  ## Border correction
+  if("border" %in% correction) {
+    numKb <- res$bnumer
+    denKb <- if(weighted) res$bdenom else lambda * res$bdenom
+    Kfv <- bind.ratfv(Kfv,
+                      data.frame(border=numKb),
+                      denKb,
+                      makefvlabel(NULL, "hat", fname, "bord"),
+                      "border-corrected estimate of %s",
+                      "border",
+                      ratio=ratio)
+  }
+  
+  ## translation correction
+  if("translate" %in% correction) {
+    numKtrans <- res$trans
+    denKtrans <- if(weighted) areaW else (lambda2 * areaW)
+    h <- diameter(as.rectangle(W))/2
+    numKtrans[r >= h] <- NA
+    Kfv <- bind.ratfv(Kfv,
+                      data.frame(trans=numKtrans),
+                      denKtrans,
+                      makefvlabel(NULL, "hat", fname, "trans"),
+                      "translation-corrected estimate of %s",
+                      "trans",
+                      ratio=ratio)
+  }
+  ## isotropic correction
+  if("isotropic" %in% correction) {
+    numKiso <- res$iso
+    denKiso <- if(weighted) areaW else (lambda2 * areaW)
+    h <- diameter(as.rectangle(W))/2
+    numKiso[r >= h] <- NA
+    Kfv <- bind.ratfv(Kfv,
+                      data.frame(iso=numKiso),
+                      denKiso,
+                      makefvlabel(NULL, "hat", fname, "iso"),
+                      "isotropic-corrected estimate of %s",
+                      "iso",
+                      ratio=ratio)
+  }
+  ##
+  # default is to display them all
+  formula(Kfv) <- . ~ r
+  unitname(Kfv) <- unitname(X)
+  if(ratio) 
+    Kfv <- conform.ratfv(Kfv)
+  return(Kfv)
+}
+
+
+  
diff --git a/R/Kinhom.R b/R/Kinhom.R
new file mode 100755
index 0000000..12be1fe
--- /dev/null
+++ b/R/Kinhom.R
@@ -0,0 +1,463 @@
+#
+#	Kinhom.S	Estimation of K function for inhomogeneous patterns
+#
+#	$Revision: 1.93 $	$Date: 2017/07/18 10:14:42 $
+#
+#	Kinhom()	compute estimate of K_inhom
+#
+#
+#       Reference:
+#            Non- and semiparametric estimation of interaction
+#	     in inhomogeneous point patterns
+#            A.Baddeley, J.Moller, R.Waagepetersen
+#            Statistica Neerlandica 54 (2000) 329--350.
+#
+# -------- functions ----------------------------------------
+#	Kinhom()	compute estimate of K
+#                       using various edge corrections
+#
+#       Kwtsum()         internal routine for border correction
+#
+# -------- standard arguments ------------------------------	
+#	X		point pattern (of class 'ppp')
+#
+#	r		distance values at which to compute K	
+#
+#       lambda          vector of intensity values for points of X
+#
+# -------- standard output ------------------------------
+#      A data frame (class "fv") with columns named
+#
+#	r:		same as input
+#
+#	trans:		K function estimated by translation correction
+#
+#	iso:		K function estimated by Ripley isotropic correction
+#
+#	theo:		K function for Poisson ( = pi * r ^2 )
+#
+#	border:		K function estimated by border method
+#			(denominator = sum of weights of points)
+#
+#       bord.modif:	K function estimated by border method
+#			(denominator = area of eroded window)
+#
+# ------------------------------------------------------------------------
+
+"Linhom" <- function(...) {
+  K <- Kinhom(...)
+  L <- eval.fv(sqrt(pmax.int(K,0)/pi))
+  # relabel the fv object
+  L <- rebadge.fv(L, quote(L[inhom](r)), c("L", "inhom"),
+                  names(K), new.labl=attr(K, "labl"))
+  attr(L, "labl") <- attr(K, "labl")
+  attr(L, "dangerous") <- attr(K, "dangerous")
+  #
+  return(L)  
+}
+
+"Kinhom"<-
+  function (X, lambda=NULL, ..., r = NULL, breaks = NULL, 
+            correction=c("border", "bord.modif", "isotropic", "translate"),
+            renormalise=TRUE,
+            normpower=1,
+            update = TRUE,
+            leaveoneout = TRUE,
+            nlarge = 1000, 
+            lambda2=NULL,
+            reciplambda=NULL, reciplambda2=NULL,
+	    diagonal=TRUE,
+            sigma=NULL, varcov=NULL,
+	    ratio=FALSE)
+{
+    verifyclass(X, "ppp")
+    nlarge.given <- !missing(nlarge)
+    rfixed <- !missing(r) || !missing(breaks)
+    miss.update <- missing(update)
+    
+    # determine basic parameters
+    W <- X$window
+    npts <- npoints(X)
+    areaW <- area(W)
+    diamW <- diameter(W)
+    
+    rmaxdefault <- rmax.rule("K", W, npts/areaW)
+    breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+    r <- breaks$r
+    rmax <- breaks$max
+
+    # match corrections
+    correction.given <- !missing(correction) && !is.null(correction)
+    correction <- pickoption("correction", correction,
+                             c(none="none",
+                               border="border",
+                               "bord.modif"="bord.modif",
+                               isotropic="isotropic",
+                               Ripley="isotropic",
+                               trans="translate",
+                               translate="translate",
+                               translation="translate",
+                               good="good",
+                               best="best"),
+                             multi=TRUE)
+
+#    best.wanted <- ("best" %in% correction)
+    ## replace 'good' by the optimal choice for this size of dataset
+    if("good" %in% correction)
+      correction[correction == "good"] <- good.correction.K(X)
+    ## retain only corrections that are implemented for the window
+    correction <- implemented.for.K(correction, W$type, correction.given)
+
+    ###########################################################
+    # DETERMINE WEIGHTS AND VALIDATE
+    #
+    # The matrix 'lambda2' or 'reciplambda2' is sufficient information
+    # unless we want the border correction.
+    lambda2.given    <- !is.null(lambda2) || !is.null(reciplambda2)
+    lambda2.suffices <- !any(correction %in% c("bord", "bord.modif"))
+    
+    ## Arguments that are 'dangerous' for envelope, if fixed
+    dangerous <- c("lambda", "reciplambda", "lambda2", "reciplambda2")
+    danger <- TRUE
+    
+    # Use matrix of weights if it was provided and if it is sufficient
+    if(lambda2.suffices && lambda2.given) {
+      if(!is.null(reciplambda2)) 
+        check.nmatrix(reciplambda2, npts)
+      else {
+        check.nmatrix(lambda2, npts)
+        reciplambda2 <- 1/lambda2
+      }
+      # renormalise
+      if(renormalise) {
+        check.1.real(normpower)
+        stopifnot(normpower %in% 1:2)
+	rlam2 <- reciplambda2
+	if(!diagonal) diag(rlam2) <- 0
+	renorm.factor <- (areaW^2/sum(rlam2))^(normpower/2)
+      } 
+    } else {
+      # Vector lambda or reciplambda is required
+      if(missing(lambda) && is.null(reciplambda)) {
+        # No intensity data provided
+        danger <- FALSE
+        # Estimate density by leave-one-out kernel smoothing
+        lambda <- density(X, ..., sigma=sigma, varcov=varcov,
+                            at="points", leaveoneout=leaveoneout)
+        lambda <- as.numeric(lambda)
+        reciplambda <- 1/lambda
+      } else if(!is.null(reciplambda)) {
+        # 1/lambda values provided
+        if(is.im(reciplambda)) 
+          reciplambda <- safelookup(reciplambda, X)
+        else if(is.function(reciplambda))
+          reciplambda <- reciplambda(X$x, X$y)
+        else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda)))
+          check.nvector(reciplambda, npts)
+        else stop(paste(sQuote("reciplambda"),
+                        "should be a vector, a pixel image, or a function"))
+      } else {
+        # lambda values provided
+        if(is.im(lambda)) 
+          lambda <- safelookup(lambda, X)
+        else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) {
+          model <- lambda
+          if(!update) {
+            ## just use intensity of fitted model
+            lambda <- predict(model, locations=X, type="trend")
+          } else {
+            ## re-fit model to data X
+            if(is.ppm(model)) {
+              model <- update(model, Q=X)
+              lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+            } else if(is.kppm(model)) {
+              model <- update(model, X=X)
+              lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+            } else {
+              model <- update(model, X=X)
+              lambda <- fitted(model, dataonly=TRUE)
+            }
+            danger <- FALSE
+            if(miss.update) 
+              warn.once(key="Kinhom.update",
+                        "The behaviour of Kinhom when lambda is a ppm object",
+                        "has changed (in spatstat 1.37-0 and later).",
+                        "See help(Kinhom)")
+          }
+        } else if(is.function(lambda)) 
+          lambda <- lambda(X$x, X$y)
+        else if(is.numeric(lambda) && is.vector(as.numeric(lambda)))
+          check.nvector(lambda, npts)
+        else stop(paste(sQuote("lambda"),
+                          "should be a vector, a pixel image, or a function"))
+        # evaluate reciprocal
+        reciplambda <- 1/lambda
+      }
+      # renormalise
+      if(renormalise) {
+        check.1.real(normpower)
+        stopifnot(normpower %in% 1:2)
+        if(!diagonal && normpower == 2) {
+	  renorm.factor <- (areaW^2)/(sum(reciplambda)^2 - sum(reciplambda^2))
+	} else {
+          renorm.factor <- (areaW/sum(reciplambda))^normpower
+        }
+      } 
+    }
+
+    # recommended range of r values
+    alim <- c(0, min(rmax, rmaxdefault))
+        
+  ###########################################
+  # Efficient code for border correction and no correction
+  # Usable only if r values are evenly spaced from 0 to rmax
+  # Invoked automatically if number of points is large
+
+    can.do.fast <- breaks$even && !lambda2.given
+    large.n    <- (npts >= nlarge)
+#    demand.best <- correction.given && best.wanted
+    large.n.trigger <- large.n && !correction.given
+    fastcorrections <- c("border", "bord.modif", "none")
+    fastdefault <- "border"
+    correction.fast  <- all(correction %in% fastcorrections)
+    will.do.fast <- can.do.fast && (correction.fast || large.n.trigger)
+    asked.fast <- (correction.given && correction.fast) ||
+                  (nlarge.given && large.n.trigger)
+    if(!can.do.fast && asked.fast) {
+      whynot <-
+        if(!(breaks$even)) "r values not evenly spaced" else
+        if(!missing(lambda)) "matrix lambda2 was given" else NULL
+      warning(paste("cannot use efficient code", whynot, sep="; "))
+    }
+    if(will.do.fast) {
+      ## Compute Kinhom using fast algorithm(s)
+      ## determine correction(s)
+      ok <- correction %in% fastcorrections
+      correction <- if(any(ok)) correction[ok] else fastdefault
+      bord <- any(correction %in% c("border", "bord.modif"))
+      none <- any(correction =="none")
+      if(!all(ok)) {
+        ## some corrections were overridden; notify user
+        corx <- c(if(bord) "border correction estimate" else NULL,
+                  if(none) "uncorrected estimate" else NULL)
+        corx <- paste(corx, collapse=" and ")
+        message(paste("number of data points exceeds",
+                      nlarge, "- computing", corx , "only"))
+      }
+      ## restrict r values to recommended range, unless specifically requested
+      if(!rfixed) 
+        r <- seq(from=0, to=alim[2], length.out=length(r))
+      ## border method
+      if(bord) {
+        Kb <- Kborder.engine(X, max(r), length(r), correction,
+                             weights=reciplambda, ratio=ratio)
+        if(renormalise) {
+          ynames <- setdiff(fvnames(Kb, "*"), "theo")
+	  Kb <- adjust.ratfv(Kb, ynames, denfactor=1/renorm.factor)
+        }
+        Kb <- tweak.ratfv.entry(Kb, "border", new.labl="{hat(%s)[%s]^{bord}} (r)")
+        Kb <- tweak.ratfv.entry(Kb, "bord.modif", new.labl="{hat(%s)[%s]^{bordm}} (r)")
+      }
+      ## uncorrected
+      if(none) {
+        Kn <- Knone.engine(X, max(r), length(r), weights=reciplambda,
+	                   ratio=ratio)
+        if(renormalise) 
+	  Kn <- adjust.ratfv(Kn, "un", denfactor=1/renorm.factor)
+        Kn <- tweak.ratfv.entry(Kn, "un", new.labl="{hat(%s)[%s]^{un}} (r)")
+      }
+      K <-
+        if(bord && !none) Kb else
+        if(!bord && none) Kn else
+	if(!ratio) cbind.fv(Kb,  Kn[, c("r", "un")]) else 
+	bind.ratfv(Kb,  Kn[, c("r", "un")], ratio=TRUE)
+	
+      ## tweak labels
+      K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom"))
+      if(danger)
+        attr(K, "dangerous") <- dangerous
+      return(K)
+    }
+
+  ###########################################
+  # Fast code for rectangular window
+  ###########################################
+
+  if(can.do.fast && is.rectangle(W) && spatstat.options("use.Krect")) {
+    K <-  Krect.engine(X, rmax, length(r), correction,
+                        weights=reciplambda,
+			ratio=ratio, fname=c("K", "inhom"))
+    if(renormalise) {
+      allfun <- setdiff(fvnames(K, "*"), "theo")
+      K <- adjust.ratfv(K, allfun, denfactor=1/renorm.factor)
+    }
+    K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom"))
+    attr(K, "alim") <- alim
+    if(danger)
+      attr(K, "dangerous") <- dangerous
+    return(K)
+  }
+  
+  ###########################################
+  # Slower code
+  ###########################################
+        
+        
+    # this will be the output data frame
+    K <- data.frame(r=r, theo= pi * r^2)
+    desc <- c("distance argument r", "theoretical Poisson %s")
+    denom <- if(renormalise) (areaW / renorm.factor) else areaW
+    K <- ratfv(K, NULL, denom,
+               argu="r",
+	       ylab=quote(K[inhom](r)),
+               valu="theo",
+	       fmla=NULL,
+	       alim=alim,
+	       labl=c("r","{%s[%s]^{pois}}(r)"),
+	       desc=desc,
+               fname=c("K", "inhom"),
+	       ratio=ratio)
+
+    # identify all close pairs
+    rmax <- max(r)
+    what <- if(any(correction == "translate")) "all" else "ijd"
+    close <- closepairs(X, rmax, what=what)
+    dIJ <- close$d
+    # compute weights for these pairs
+    I <- close$i
+    J <- close$j
+#    wI <- reciplambda[I]
+    wIJ <- 
+      if(!lambda2.given)
+        reciplambda[I] * reciplambda[J]
+      else 
+        reciplambda2[cbind(I,J)]
+    # 
+
+    # compute edge corrected estimates
+    if(any(correction == "border" | correction == "bord.modif")) {
+      # border method
+      # Compute distances to boundary
+      b <- bdist.points(X)
+      bI <- b[I]
+      # apply reduced sample algorithm
+      RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks)
+      if(any(correction == "border")) {
+        Kb <- RS$ratio
+        if(renormalise)
+          Kb <- Kb * renorm.factor
+        K <- bind.ratfv(K,
+	                quotient = data.frame(border=Kb),
+			denominator = denom,
+	                labl = "{hat(%s)[%s]^{bord}}(r)",
+                        desc = "border-corrected estimate of %s",
+                        preferred = "border",
+		        ratio=ratio)
+      }
+      if(any(correction == "bord.modif")) {
+        Kbm <- RS$numerator/eroded.areas(W, r)
+        if(renormalise)
+          Kbm <- Kbm * renorm.factor
+    	K <- bind.ratfv(K,
+	                quotient = data.frame(bord.modif=Kbm),
+			denominator = denom,
+			labl = "{hat(%s)[%s]^{bordm}}(r)",
+                        desc = "modified border-corrected estimate of %s",
+                        preferred = "bord.modif",
+			ratio=ratio)
+      }
+    }
+    if(any(correction == "translate")) {
+      # translation correction
+      edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE)
+      allweight <- edgewt * wIJ
+      wh <- whist(dIJ, breaks$val, allweight)
+      Ktrans <- cumsum(wh)/areaW
+      if(renormalise)
+        Ktrans <- Ktrans * renorm.factor
+      rmax <- diamW/2
+      Ktrans[r >= rmax] <- NA
+      K <- bind.ratfv(K,
+                      quotient = data.frame(trans=Ktrans),
+		      denominator = denom,
+		      labl ="{hat(%s)[%s]^{trans}}(r)",
+                      desc = "translation-correction estimate of %s",
+                      preferred = "trans",
+		      ratio=ratio)
+    }
+    if(any(correction == "isotropic" | correction == "Ripley")) {
+      # Ripley isotropic correction
+      edgewt <- edge.Ripley(X[I], matrix(dIJ, ncol=1))
+      allweight <- edgewt * wIJ
+      wh <- whist(dIJ, breaks$val, allweight)
+      Kiso <- cumsum(wh)/areaW
+      if(renormalise)
+        Kiso <- Kiso * renorm.factor
+      rmax <- diamW/2
+      Kiso[r >= rmax] <- NA
+      K <- bind.ratfv(K,
+                      quotient = data.frame(iso=Kiso),
+		      denominator = denom,
+		      labl = "{hat(%s)[%s]^{iso}}(r)",
+                      desc = "Ripley isotropic correction estimate of %s",
+                      preferred = "iso",
+		      ratio=ratio)
+    }
+
+    # default is to display them all
+    formula(K) <- . ~ r
+    unitname(K) <- unitname(X)
+    if(danger)
+      attr(K, "dangerous") <- dangerous
+    return(K)
+}
+
+
+Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks) {
+  #
+  # "internal" routine to compute border-correction estimates of Kinhom
+  #
+  # dIJ:  vector containing pairwise distances for selected I,J pairs
+  # bI:   corresponding vector of boundary distances for I
+  # wIJ:  product weight for selected I, J pairs
+  #
+  # b:    vector of ALL distances to window boundary
+  # w:   weights for ALL points
+  #
+  # breaks : breakpts object
+  #
+
+  stopifnot(length(dIJ) == length(bI))
+  stopifnot(length(bI) == length(wIJ))
+  stopifnot(length(w) == length(b))
+
+  if(!is.finite(sum(w, wIJ)))
+    stop("Weights in K-function were infinite or NA")
+
+  bkval <- breaks$val
+  
+  # determine which distances d_{ij} were observed without censoring
+  uncen <- (dIJ <= bI)
+  #
+  # histogram of noncensored distances
+  nco <- whist(dIJ[uncen], bkval, wIJ[uncen])
+  # histogram of censoring times for noncensored distances
+  ncc <- whist(bI[uncen], bkval, wIJ[uncen])
+  # histogram of censoring times (yes, this is a different total size)
+  cen <- whist(b, bkval, w)
+  # total weight of censoring times beyond rightmost breakpoint
+  uppercen <- sum(w[b > breaks$max])
+  # go
+  RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen)
+  # extract results
+  numerator   <- RS$numerator
+  denominator <- RS$denominator
+  ratio        <- RS$numerator/RS$denominator
+  # check
+  if(length(numerator) != breaks$ncells)
+    stop("internal error: length(numerator) != breaks$ncells")
+  if(length(denominator) != breaks$ncells)
+    stop("internal error: length(denom.count) != breaks$ncells")
+  return(list(numerator=numerator, denominator=denominator, ratio=ratio))
+}
+	
diff --git a/R/Kmeasure.R b/R/Kmeasure.R
new file mode 100755
index 0000000..9df258d
--- /dev/null
+++ b/R/Kmeasure.R
@@ -0,0 +1,521 @@
+#
+#           Kmeasure.R
+#
+#           $Revision: 1.63 $    $Date: 2017/07/22 08:33:10 $
+#
+#     Kmeasure()         compute an estimate of the second order moment measure
+#
+#     Kest.fft()        use Kmeasure() to form an estimate of the K-function
+#
+#     second.moment.calc()    underlying algorithm
+#
+
+Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) {
+  stopifnot(is.ppp(X))
+  
+  sigma.given <- !missing(sigma) && !is.null(sigma)
+  varcov.given <- !is.null(varcov)
+  ngiven <- sigma.given + varcov.given
+  if(ngiven == 2)
+    stop(paste("Give only one of the arguments",
+               sQuote("sigma"), "and", sQuote("varcov")))
+  if(ngiven == 0)
+    stop(paste("Please specify smoothing bandwidth", sQuote("sigma"),
+               "or", sQuote("varcov")))
+  if(varcov.given) {
+    stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 )
+    sigma <- NULL
+  } else {
+    stopifnot(is.numeric(sigma))
+    stopifnot(length(sigma) %in% c(1,2))
+    stopifnot(all(sigma > 0))
+    if(length(sigma) == 2) {
+      varcov <- diag(sigma^2)
+      sigma <- NULL
+    }
+  }  
+
+  second.moment.calc(x=X, sigma=sigma, edge=edge,
+                     what="Kmeasure", varcov=varcov, ...)
+}
+
+second.moment.calc <- function(x, sigma=NULL, edge=TRUE,
+                               what=c("Kmeasure", "kernel", "smooth", 
+                                 "Bartlett", "edge", "smoothedge", "all"),
+                               ..., 
+                               varcov=NULL, expand=FALSE, debug=FALSE) {
+  if(is.null(sigma) && is.null(varcov))
+    stop("must specify sigma or varcov")
+  what <- match.arg(what)
+  sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov))))
+
+  xtype <- if(is.ppp(x)) "ppp" else
+           if(is.im(x)) "im" else
+           if(inherits(x, "imlist")) "imlist" else 
+           if(all(sapply(x, is.im))) "imlist" else
+           stop("x should be a point pattern or a pixel image")
+
+  nimages <- switch(xtype,
+                    ppp = 1,
+                    im = 1,
+                    imlist = length(x))
+
+  win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]])
+  win <- rescue.rectangle(win)
+  rec <- as.rectangle(win)
+  across <- min(diff(rec$xrange), diff(rec$yrange))
+  # determine whether to expand window
+  if(!expand || (6 * sig < across)) {
+    result <- second.moment.engine(x, sigma=sigma, edge=edge,
+                                   what=what, debug=debug, ..., varcov=varcov)
+    return(result)
+  }
+  # need to expand window
+  bigger <- grow.rectangle(rec, (7 * sig - across)/2)
+  switch(xtype,
+         ppp = {
+           # pixellate first (to preserve pixel resolution)
+           X <- pixellate(x, ..., padzero=TRUE)
+           np <- npoints(x)
+         },
+         im = {
+           X <- x
+           np <- NULL
+         },
+         imlist = {
+           X <- x
+           np <- NULL
+         })
+
+  # Now expand
+  if(nimages == 1) {
+    X <- rebound.im(X, bigger)
+    X <- na.handle.im(X, 0)
+  } else {
+    X <- lapply(X, rebound.im, rect=bigger)
+    X <- lapply(X, na.handle.im, na.replace=0)
+  }
+  # Compute!
+  out <- second.moment.engine(X, sigma=sigma, edge=edge,
+                              what=what, debug=debug, ...,
+                              obswin=win, varcov=varcov, npts=np)
+  # Now clip it
+  fbox <- shift(rec, origin="midpoint")
+  if(nimages == 1) {
+    result <- switch(what,
+                     kernel   = out[fbox],
+                     smooth   = out[win],
+                     Kmeasure = out[fbox],
+                     Bartlett = out[fbox],
+                     edge     = out[win],
+                     smoothedge = list(smooth=out$smooth[win],
+                       edge  =out$edge[win]),
+                     all      =
+                     list(kernel=out$kernel[fbox],
+                          smooth=out$smooth[win],
+                          Kmeasure=out$Kmeasure[fbox],
+                          Bartlett=out$Bartlett[fbox],
+                          edge=out$edge[win]))
+  } else {
+    result <-
+      switch(what,
+             kernel     = out[fbox], 
+             smooth     = lapply(out, "[", i=win),
+             Kmeasure   = lapply(out, "[", i=fbox),
+             Bartlett   = lapply(out, "[", i=fbox),
+             edge       = out[win],
+             smoothedge = list(
+               smooth = lapply(out$smooth, "[", i=win),
+               edge   = out$edge[win]),
+             all        = list(
+               kernel=out$kernel[fbox],
+               smooth=lapply(out$smooth, "[", i=win),
+               Kmeasure=lapply(out$Kmeasure, "[", i=fbox),
+               Bartlett=lapply(out$Bartlett, "[", i=fbox),
+               edge=out$edge[win]))
+  }
+  return(result)
+}
+
+second.moment.engine <-
+  function(x, sigma=NULL, edge=TRUE,
+           what=c("Kmeasure", "kernel", "smooth", 
+             "Bartlett", "edge", "smoothedge", "all"),
+           ...,
+           kernel="gaussian",
+           obswin = as.owin(x), varcov=NULL,
+           npts=NULL, debug=FALSE)
+{
+  what <- match.arg(what)
+  validate2Dkernel(kernel)
+
+  is.second.order <- what %in% c("Kmeasure", "Bartlett", "all")
+  needs.kernel <- what %in% c("kernel", "all", "Kmeasure")
+  returns.several <- what %in% c("all", "smoothedge")
+
+  # check whether Fastest Fourier Transform in the West is available
+  west <- fftwAvailable()
+  
+  if(returns.several)
+    result <- list() # several results will be returned in a list
+
+  if(is.ppp(x)) {
+    # convert list of points to mass distribution
+    X <- pixellate(x, ..., padzero=TRUE)
+    if(is.null(npts))
+      npts <- npoints(x)
+  } else X <- x
+  if(is.im(X)) {
+    Xlist <- list(X)
+    nimages <- 1
+  } else if(all(unlist(lapply(X, is.im)))) {
+    Xlist <- X
+    X <- Xlist[[1]]
+    nimages <- length(Xlist)
+    blanklist <- vector(mode="list", length=nimages)
+    names(blanklist) <- names(Xlist)
+  } else stop("internal error: unrecognised format for x")
+  unitsX <- unitname(X)
+  xstep <- X$xstep
+  ystep <- X$ystep
+  # ensure obswin has same bounding frame as X
+  if(!missing(obswin))
+    obswin <- rebound.owin(obswin, as.rectangle(X))
+  # go to work
+  Y <- X$v
+  Ylist <- lapply(Xlist, getElement, name="v")
+  # pad with zeroes
+  nr <- nrow(Y)
+  nc <- ncol(Y)
+  Ypad <- matrix(0, ncol=2*nc, nrow=2*nr)
+  Ypadlist <- rep(list(Ypad), nimages)
+  for(i in 1:nimages)
+    Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]]
+  Ypad <- Ypadlist[[1]]
+  lengthYpad <- 4 * nc * nr
+  # corresponding coordinates
+  xcol.pad <- X$xcol[1] + xstep * (0:(2*nc-1))
+  yrow.pad <- X$yrow[1] + ystep * (0:(2*nr-1))
+  # compute kernel and its Fourier transform
+  if(!needs.kernel && 
+     identical(kernel, "gaussian") &&
+     is.numeric(sigma) && (length(sigma) == 1) &&
+     spatstat.options('developer')) {
+    # compute Fourier transform of kernel directly (*experimental*)
+    ii <- c(0:(nr-1), nr:1)
+    jj <- c(0:(nc-1), nc:1)
+    zz <- -sigma^2 * pi^2/2
+    uu <- exp(zz * ii^2)
+    vv <- exp(zz * jj^2)
+    fK <- outer(uu, vv, "*")
+  } else {
+    # set up kernel
+    xcol.ker <- xstep * c(0:(nc-1),-(nc:1))
+    yrow.ker <- ystep * c(0:(nr-1),-(nr:1))
+    kerpixarea <- xstep * ystep
+    if(identical(kernel, "gaussian")) {
+      if(!is.null(sigma)) {
+        densX.ker <- dnorm(xcol.ker, sd=sigma)
+        densY.ker <- dnorm(yrow.ker, sd=sigma)
+        #' WAS:  Kern <- outer(densY.ker, densX.ker, "*") * kerpixarea
+        Kern <- outer(densY.ker, densX.ker, "*")
+        Kern <- Kern/sum(Kern)
+      } else if(!is.null(varcov)) {
+        ## anisotropic kernel
+        detSigma <- det(varcov)
+        Sinv <- solve(varcov)
+        halfSinv <- Sinv/2
+        constker <- kerpixarea/(2 * pi * sqrt(detSigma))
+        xsq <- matrix((xcol.ker^2)[col(Ypad)], ncol=2*nc, nrow=2*nr)
+        ysq <- matrix((yrow.ker^2)[row(Ypad)], ncol=2*nc, nrow=2*nr)
+        xy <- outer(yrow.ker, xcol.ker, "*")
+        Kern <- constker * exp(-(xsq * halfSinv[1,1]
+                                 + xy * (halfSinv[1,2]+halfSinv[2,1])
+                                 + ysq * halfSinv[2,2]))
+        Kern <- Kern/sum(Kern)
+      } else 
+        stop("Must specify either sigma or varcov")
+    } else {
+      ## non-Gaussian kernel
+      ## evaluate kernel at array of points
+      xker <- as.vector(xcol.ker[col(Ypad)])
+      yker <- as.vector(yrow.ker[row(Ypad)])
+      Kern <- evaluate2Dkernel(kernel, xker, yker,
+                               sigma=sigma, varcov=varcov, ...) * kerpixarea
+      Kern <- matrix(Kern, ncol=2*nc, nrow=2*nr)
+      Kern <- Kern/sum(Kern)
+    }
+
+    if(what %in% c("kernel", "all")) {
+      ## kernel will be returned
+      ## first rearrange it into spatially sensible order (monotone x and y)
+      rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1
+      ctwist <- (-nc):(nc-1) %% (2*nc) + 1
+      if(debug) {
+        if(any(fave.order(xcol.ker) != rtwist))
+          cat("something round the twist\n")
+      }
+      Kermit <- Kern[ rtwist, ctwist]
+      ker <- im(Kermit, xcol.ker[ctwist], yrow.ker[ rtwist], unitname=unitsX)
+      if(what == "kernel")
+        return(ker)
+      else 
+        result$kernel <- ker
+    }
+    ## convolve using fft
+    fK <- fft2D(Kern, west=west)
+  }
+  
+  if(what != "edge") {
+    if(nimages == 1) {
+      fY <- fft2D(Ypad, west=west)
+      sm <- fft2D(fY * fK, inverse=TRUE, west=west)/lengthYpad
+      if(debug) {
+        cat(paste("smooth: maximum imaginary part=",
+                  signif(max(Im(sm)),3), "\n"))
+        if(!is.null(npts))
+          cat(paste("smooth: mass error=",
+                    signif(sum(Mod(sm))-npts,3), "\n"))
+      }
+    } else {
+      fYlist <- smlist <- blanklist
+      for(i in 1:nimages) {
+        fYlist[[i]] <- fY.i <- fft2D(Ypadlist[[i]], west=west)
+        smlist[[i]] <- sm.i <-
+          fft2D(fY.i * fK, inverse=TRUE, west=west)/lengthYpad
+        if(debug) {
+          cat(paste("smooth component", i, ": maximum imaginary part=",
+                    signif(max(Im(sm.i)),3), "\n"))
+          if(!is.null(npts))
+            cat(paste("smooth component", i, ": mass error=",
+                      signif(sum(Mod(sm.i))-npts,3), "\n"))
+        }
+      }
+    }
+  }
+  if(what %in% c("smooth", "all", "smoothedge")) {
+    # compute smoothed point pattern without edge correction
+    if(nimages == 1) {
+      smo <- im(Re(sm)[1:nr, 1:nc],
+                xcol.pad[1:nc], yrow.pad[1:nr],
+                unitname=unitsX)
+      if(what == "smooth") {
+        return(smo)
+      } else {
+        result$smooth <- smo
+      }
+    } else {
+      smolist <- blanklist
+      for(i in 1:nimages) 
+        smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc],
+                           xcol.pad[1:nc], yrow.pad[1:nr],
+                           unitname=unitsX)
+      smolist <- as.solist(smolist)
+      if(what == "smooth") {
+        return(smolist)
+      } else {
+        result$smooth <- smolist
+      }
+    }
+  }
+
+  if(is.second.order) {
+    # compute Bartlett spectrum
+    if(nimages == 1) {
+      bart <- BartCalc(fY, fK)  ##  bart <- Mod(fY)^2 * fK
+    } else {
+      bartlist <- lapply(fYlist, BartCalc, fK=fK)
+    }
+  }
+  
+  if(what %in% c("Bartlett", "all")) {
+     # Bartlett spectrum will be returned
+     # rearrange into spatially sensible order (monotone x and y)
+    rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1
+    ctwist <- (-nc):(nc-1) %% (2*nc) + 1
+    if(nimages == 1) {
+      Bart <- bart[ rtwist, ctwist]
+      Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1))
+      if(what == "Bartlett")
+        return(Bartlett)
+      else
+        result$Bartlett <- Bartlett
+    } else {
+      Bartlist <- blanklist
+      for(i in 1:nimages) {
+        Bart <- (bartlist[[i]])[ rtwist, ctwist]
+        Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1))
+      }
+      Bartlist <- as.solist(Bartlist)
+      if(what == "Bartlett")
+        return(Bartlist)
+      else
+        result$Bartlett <- Bartlist
+    }
+  }
+  
+  #### ------- Second moment measure --------------
+  #
+  if(is.second.order) {
+    if(nimages == 1) {
+      mom <- fft2D(bart, inverse=TRUE, west=west)/lengthYpad
+      if(debug) {
+        cat(paste("2nd moment measure: maximum imaginary part=",
+                  signif(max(Im(mom)),3), "\n"))
+        if(!is.null(npts))
+          cat(paste("2nd moment measure: mass error=",
+                    signif(sum(Mod(mom))-npts^2, 3), "\n"))
+      }
+      mom <- Mod(mom)
+      # subtract (delta_0 * kernel) * npts
+      if(is.null(npts))
+        stop("Internal error: second moment measure requires npts")
+      mom <- mom - npts* Kern
+    } else {
+      momlist <- blanklist
+      for(i in 1:nimages) {
+        mom.i <- fft2D(bartlist[[i]], inverse=TRUE, west=west)/lengthYpad
+        if(debug) {
+          cat(paste("2nd moment measure: maximum imaginary part=",
+                    signif(max(Im(mom.i)),3), "\n"))
+          if(!is.null(npts))
+            cat(paste("2nd moment measure: mass error=",
+                      signif(sum(Mod(mom.i))-npts^2, 3), "\n"))
+        }
+        mom.i <- Mod(mom.i)
+        # subtract (delta_0 * kernel) * npts
+        if(is.null(npts))
+          stop("Internal error: second moment measure requires npts")
+        mom.i <- mom.i - npts* Kern
+        momlist[[i]] <- mom.i
+      }
+    }
+  }
+  # edge correction
+  if(edge || what %in% c("edge", "all", "smoothedge")) {
+    M <- as.mask(obswin, xy=list(x=X$xcol, y=X$yrow))$m
+    # previous line ensures M has same dimensions and scale as Y 
+    Mpad <- matrix(0, ncol=2*nc, nrow=2*nr)
+    Mpad[1:nr, 1:nc] <- M
+    lengthMpad <- 4 * nc * nr
+    fM <- fft2D(Mpad, west=west)
+    if(edge && is.second.order) {
+      # compute kernel-smoothed set covariance
+      # apply edge correction      
+      co <- fft2D(Mod(fM)^2 * fK, inverse=TRUE, west=west)/lengthMpad
+      co <- Mod(co) 
+      a <- sum(M)
+      wt <- a/co
+      me <- spatstat.options("maxedgewt")
+      weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr)
+      # apply edge correction to second moment measure
+      if(nimages == 1) {
+        mom <- mom * weight
+        # set to NA outside 'reasonable' region
+        mom[wt > 10] <- NA
+      } else {
+        wgt10 <- (wt > 10)
+        for(i in 1:nimages) {
+          mom.i <- momlist[[i]]
+          mom.i <- mom.i * weight
+          # set to NA outside 'reasonable' region
+          mom.i[wgt10] <- NA
+          momlist[[i]] <- mom.i
+        }
+      }
+    }
+  }
+  if(is.second.order) {
+    # rearrange second moment measure
+    # into spatially sensible order (monotone x and y)
+    rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1
+    ctwist <- (-nc):(nc-1) %% (2*nc) + 1
+    if(nimages == 1) {
+      mom <- mom[ rtwist, ctwist]
+    } else {
+      momlist <- lapply(momlist, "[", i=rtwist, j=ctwist)
+    }
+    if(debug) {
+      if(any(fave.order(xcol.ker) != rtwist))
+        cat("internal error: something round the twist\n")
+    }
+  }
+  if(what %in% c("edge", "all", "smoothedge")) {
+    # return convolution of window with kernel
+    # (evaluated inside window only)
+    con <- fft2D(fM * fK, inverse=TRUE, west=west)/lengthMpad
+    edg <- Mod(con[1:nr, 1:nc])
+    edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX)
+    if(what == "edge") 
+      return(edg)
+    else
+      result$edge <- edg
+  }
+  if(what == "smoothedge")
+    return(result)
+  # Second moment measure, density estimate
+  # Divide by number of points * lambda and convert mass to density
+  pixarea <- xstep * ystep
+  if(nimages == 1) {
+    mom <- mom * area(obswin) / (pixarea * npts^2)
+    # this is the second moment measure
+    mm <- im(mom, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX)
+    if(what == "Kmeasure")
+      return(mm)
+    else 
+      result$Kmeasure <- mm
+  } else {
+    ccc <- area(obswin) / (pixarea * npts^2)
+    mmlist <- blanklist
+    for(i in 1:nimages) {
+      mom.i <- momlist[[i]]
+      mom.i <- mom.i * ccc
+      # this is the second moment measure
+      mmlist[[i]] <-
+        im(mom.i, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX)
+    }
+    mmlist <- as.solist(mmlist)
+    if(what == "Kmeasure")
+      return(mmlist)
+    else 
+      result$Kmeasure <- mmlist
+  }
+  # what = "all", so return all computed objects
+  return(result)
+}
+
+BartCalc <- function(fY, fK) { Mod(fY)^2 * fK }
+  
+Kest.fft <- function(X, sigma, r=NULL, ..., breaks=NULL) {
+  verifyclass(X, "ppp")
+  W <- Window(X)
+  lambda <- npoints(X)/area(W)
+  rmaxdefault <- rmax.rule("K", W, lambda)        
+  bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  breaks <- bk$val
+  rvalues <- bk$r
+  u <- Kmeasure(X, sigma, ...)
+  xx <- rasterx.im(u)
+  yy <- rastery.im(u)
+  rr <- sqrt(xx^2 + yy^2)
+  tr <- whist(rr, breaks, u$v)
+  K  <- cumsum(tr) * with(u, xstep * ystep)
+  rmax <- min(rr[is.na(u$v)])
+  K[rvalues >= rmax] <- NA
+  result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K)
+  w <- X$window
+  alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4)
+  out <- fv(result,
+            "r", quote(K(r)),
+            "border",
+             . ~ r, alim,
+            c("r", "%s[pois](r)", "hat(%s)[fb](r)"),
+            c("distance argument r",
+              "theoretical Poisson %s",
+              "border-corrected FFT estimate of %s"),
+            fname="K",
+            unitname=unitname(X)
+            )
+  return(out)
+}
+
diff --git a/R/Kmodel.R b/R/Kmodel.R
new file mode 100755
index 0000000..4bb2be8
--- /dev/null
+++ b/R/Kmodel.R
@@ -0,0 +1,15 @@
+#
+#  Kmodel.R
+#
+# Kmodel and pcfmodel
+#
+#  $Revision: 1.1 $  $Date: 2011/05/30 14:02:21 $
+#
+
+Kmodel <- function(model, ...) {
+  UseMethod("Kmodel")
+}
+
+pcfmodel <- function(model, ...) {
+  UseMethod("pcfmodel")
+}
diff --git a/R/Kmulti.R b/R/Kmulti.R
new file mode 100755
index 0000000..9e5a6e0
--- /dev/null
+++ b/R/Kmulti.R
@@ -0,0 +1,375 @@
+#
+#	Kmulti.S		
+#
+#	Compute estimates of cross-type K functions
+#	for multitype point patterns
+#
+#	$Revision: 5.48 $	$Date: 2015/10/21 09:06:57 $
+#
+#
+# -------- functions ----------------------------------------
+#	Kcross()	cross-type K function K_{ij}
+#                       between types i and j
+#
+#	Kdot()          K_{i\bullet}
+#                       between type i and all points regardless of type
+#
+#       Kmulti()        (generic)
+#
+#
+# -------- standard arguments ------------------------------	
+#	X		point pattern (of class 'ppp')
+#				including 'marks' vector
+#	r		distance values at which to compute K	
+#
+# -------- standard output ------------------------------
+#      A data frame with columns named
+#
+#	r:		same as input
+#
+#	trans:		K function estimated by translation correction
+#
+#	iso:		K function estimated by Ripley isotropic correction
+#
+#	theo:		K function for Poisson ( = pi * r ^2 )
+#
+#	border:		K function estimated by border method
+#			using standard formula (denominator = count of points)
+#
+#       bord.modif:	K function estimated by border method
+#			using modified formula 
+#			(denominator = area of eroded window
+#
+# ------------------------------------------------------------------------
+
+"Lcross" <- function(X, i, j, ..., from, to) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1]
+  if(missing(j)) j <- if(!missing(to)) to else levels(marks(X))[2]
+  K <- Kcross(X, i, j, ...)
+  L <- eval.fv(sqrt(K/pi))
+  # relabel the fv object
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  L <- rebadge.fv(L,
+                  substitute(L[i,j](r),
+                             list(i=iname,j=jname)),
+                  c("L", paste0("list(", iname, ",", jname, ")")),
+                  new.yexp=substitute(L[list(i,j)](r),
+                                      list(i=iname,j=jname)))
+  attr(L, "labl") <- attr(K, "labl")
+  return(L)  
+}
+
+"Ldot" <- function(X, i, ..., from) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1]
+  K <- Kdot(X, i, ...)
+  L <- eval.fv(sqrt(K/pi))
+  # relabel the fv object
+  iname <- make.parseable(paste(i))
+  L <- rebadge.fv(L,
+                  substitute(L[i ~ dot](r), list(i=iname)),
+                  c("L", paste(iname, "~ symbol(\"\\267\")")), 
+                  new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname)))
+  attr(L, "labl") <- attr(K, "labl")
+  return(L)  
+}
+
+"Kcross" <- 
+function(X, i, j, r=NULL, breaks=NULL,
+         correction =c("border", "isotropic", "Ripley", "translate") , ...,
+         ratio=FALSE, from, to)
+{
+  verifyclass(X, "ppp")
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  if(missing(correction))
+    correction <- NULL
+  marx <- marks(X)
+  if(missing(i))
+    i <- if(!missing(from)) from else levels(marx)[1]
+  if(missing(j))
+    j <- if(!missing(to)) to else levels(marx)[2]
+  I <- (marx == i)
+  if(!any(I))
+    stop(paste("No points have mark i =", i))
+
+  if(i == j) {
+    result <- Kest(X[I],
+                   r=r, breaks=breaks, correction=correction, ...,
+                   ratio=ratio)
+  } else {
+    J <- (marx == j)
+    if(!any(J))
+      stop(paste("No points have mark j =", j))
+    result <- Kmulti(X, I, J,
+                     r=r, breaks=breaks, correction=correction, ...,
+                     ratio=ratio)
+  }
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(result, 
+               substitute(Kcross[i,j](r), list(i=iname,j=jname)),
+               c("K", paste0("list(", iname, ",", jname, ")")), 
+               new.yexp=substitute(K[list(i,j)](r),
+                                   list(i=iname,j=jname)))
+  return(result)
+}
+
+"Kdot" <- 
+function(X, i, r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate") , ...,
+         ratio=FALSE, from)
+{
+  verifyclass(X, "ppp")
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  if(missing(correction))
+    correction <- NULL
+
+  marx <- marks(X)
+  if(missing(i))
+    i <- if(!missing(from)) from else levels(marx)[1]
+        
+  I <- (marx == i)
+  J <- rep.int(TRUE, X$n)  # i.e. all points
+	
+  if(!any(I)) stop(paste("No points have mark i =", i))
+	
+  result <- Kmulti(X, I, J,
+                   r=r, breaks=breaks, correction=correction, ..., ratio=ratio)
+  iname <- make.parseable(paste(i))
+  result <-
+    rebadge.fv(result,
+               substitute(K[i ~ dot](r), list(i=iname)),
+               c("K", paste0(iname, "~ symbol(\"\\267\")")),
+               new.yexp=substitute(K[i ~ symbol("\267")](r), list(i=iname)))
+  return(result)
+}
+
+
+"Kmulti"<-
+function(X, I, J, r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate") , ...,
+         ratio=FALSE)
+{
+  verifyclass(X, "ppp")
+
+  npts <- npoints(X)
+  W <- X$window
+  areaW <- area(W)
+
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("border", "isotropic", "Ripley", "translate")
+  
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  I <- ppsubset(X, I)
+  J <- ppsubset(X, J)
+  if(is.null(I) || is.null(J))
+    stop("I and J must be valid subset indices")
+	
+  if(!any(I)) stop("no points belong to subset I")
+  if(!any(J)) stop("no points belong to subset J")
+		
+  nI <- sum(I)
+  nJ <- sum(J)
+  lambdaI <- nI/areaW
+  lambdaJ <- nJ/areaW
+
+  # r values 
+  rmaxdefault <- rmax.rule("K", W, lambdaJ)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+        
+  # recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+        
+  # this will be the output data frame
+  # It will be given more columns later
+  K <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  K <- fv(K, "r", quote(K[IJ](r)), 
+          "theo", , alim, c("r","{%s[%s]^{pois}}(r)"),
+          desc, fname=c("K", "list(I,J)"),
+          yexp=quote(K[list(I,J)](r)))
+  
+  # save numerator and denominator?
+  if(ratio) {
+    denom <- lambdaI * lambdaJ * areaW
+    numK <- eval.fv(denom * K)
+    denK <- eval.fv(denom + K * 0)
+    attributes(numK) <- attributes(denK) <- attributes(K)
+    attr(numK, "desc")[2] <- "numerator for theoretical Poisson %s"
+    attr(denK, "desc")[2] <- "denominator for theoretical Poisson %s"
+  }
+
+  # find close pairs of points
+  XI <- X[I]
+  XJ <- X[J]
+  close <- crosspairs(XI, XJ, max(r), what="ijd")
+# close$i and close$j are serial numbers in XI and XJ respectively;        
+# map them to original serial numbers in X
+  orig <- seq_len(npts)
+  imap <- orig[I]
+  jmap <- orig[J]
+  iX <- imap[close$i]
+  jX <- jmap[close$j]
+# eliminate any identical pairs
+  if(any(I & J)) {
+    ok <- (iX != jX)
+    if(!all(ok)) {
+      close$i  <- close$i[ok]
+      close$j  <- close$j[ok]
+      close$d  <- close$d[ok]
+    }
+  }
+# extract information for these pairs (relative to orderings of XI, XJ)
+  dcloseIJ <- close$d
+  icloseI  <- close$i
+  jcloseJ  <- close$j
+        
+# Compute estimates by each of the selected edge corrections.
+        
+  if(any(correction == "none")) {
+    # uncorrected! 
+    wh <- whist(dcloseIJ, breaks$val)  # no weights
+    numKun <- cumsum(wh)
+    denKun <- lambdaI * lambdaJ * areaW
+    Kun <- numKun/denKun
+    K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)",
+                 "uncorrected estimate of %s",
+                 "un")
+    if(ratio) {
+      # save numerator and denominator
+      numK <- bind.fv(numK, data.frame(un=numKun), "{hat(%s)[%s]^{un}}(r)",
+                 "numerator of uncorrected estimate of %s",
+                 "un")
+      denK <- bind.fv(denK, data.frame(un=denKun), "{hat(%s)[%s]^{un}}(r)",
+                 "denominator of uncorrected estimate of %s",
+                 "un")
+    }
+
+  }
+  if(any(correction == "border" | correction == "bord.modif")) {
+    # border method
+    # distance to boundary from each point of type I
+    bI <- bdist.points(XI)
+    # distance to boundary from first element of each (i, j) pair
+    bcloseI <- bI[icloseI]
+    # apply reduced sample algorithm
+    RS <- Kount(dcloseIJ, bcloseI, bI, breaks)
+    if(any(correction == "bord.modif")) {
+      denom.area <- eroded.areas(W, r)
+      numKbm <- RS$numerator
+      denKbm <- denom.area * nI * nJ
+      Kbm <- numKbm/denKbm
+      K <- bind.fv(K, data.frame(bord.modif=Kbm), "{hat(%s)[%s]^{bordm}}(r)",
+                   "modified border-corrected estimate of %s",
+                   "bord.modif")
+      if(ratio) {
+        # save numerator and denominator
+        numK <- bind.fv(numK, data.frame(bord.modif=numKbm),
+                        "{hat(%s)[%s]^{bordm}}(r)",
+                        "numerator of modified border-corrected estimate of %s",
+                        "bord.modif")
+        denK <- bind.fv(denK, data.frame(bord.modif=denKbm),
+                        "{hat(%s)[%s]^{bordm}}(r)",
+                        "denominator of modified border-corrected estimate of %s",
+                        "bord.modif")
+      }
+    }
+    if(any(correction == "border")) {
+      numKb <- RS$numerator
+      denKb <- lambdaJ * RS$denom.count
+      Kb <- numKb/denKb
+      K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)",
+                   "border-corrected estimate of %s",
+                   "border")
+      if(ratio) {
+        numK <- bind.fv(numK, data.frame(border=numKb),
+                        "{hat(%s)[%s]^{bord}}(r)",
+                        "numerator of border-corrected estimate of %s",
+                        "border")
+        denK <- bind.fv(denK, data.frame(border=denKb),
+                        "{hat(%s)[%s]^{bord}}(r)",
+                        "denominator of border-corrected estimate of %s",
+                        "border")
+      }
+    }
+  }
+  if(any(correction == "translate")) {
+    # translation correction
+    edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE)
+    wh <- whist(dcloseIJ, breaks$val, edgewt)
+    numKtrans <- cumsum(wh)
+    denKtrans <- lambdaI * lambdaJ * areaW
+    Ktrans <- numKtrans/denKtrans
+    rmax <- diameter(W)/2
+    Ktrans[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", 
+                 "translation-corrected estimate of %s",
+                 "trans")
+    if(ratio) {
+      numK <- bind.fv(numK, data.frame(trans=numKtrans),
+                      "{hat(%s)[%s]^{trans}}(r)",
+                      "numerator of translation-corrected estimate of %s",
+                      "trans")
+      denK <- bind.fv(denK, data.frame(trans=denKtrans),
+                      "{hat(%s)[%s]^{trans}}(r)",
+                      "denominator of translation-corrected estimate of %s",
+                      "trans")
+    }
+  }
+  if(any(correction == "isotropic")) {
+    # Ripley isotropic correction
+    edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1))
+    wh <- whist(dcloseIJ, breaks$val, edgewt)
+    numKiso <- cumsum(wh)
+    denKiso <- lambdaI * lambdaJ * areaW
+    Kiso <- numKiso/denKiso
+    rmax <- diameter(W)/2
+    Kiso[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)",
+                 "Ripley isotropic correction estimate of %s",
+                 "iso")
+   if(ratio) {
+      numK <- bind.fv(numK, data.frame(iso=numKiso), "{hat(%s)[%s]^{iso}}(r)",
+                      "numerator of Ripley isotropic correction estimate of %s",
+                      "iso")
+      denK <- bind.fv(denK, data.frame(iso=denKiso), "{hat(%s)[%s]^{iso}}(r)",
+                      "denominator of Ripley isotropic correction estimate of %s",
+                      "iso")
+    }
+  }
+  # default is to display them all
+  formula(K) <- . ~ r
+  unitname(K) <- unitname(X)
+  
+  if(ratio) {
+    # finish up numerator & denominator
+    formula(numK) <- formula(denK) <- . ~ r
+    unitname(numK) <- unitname(denK) <- unitname(K)
+    # tack on to result
+    K <- rat(K, numK, denK, check=FALSE)
+  }
+  return(K)
+}
diff --git a/R/Kmulti.inhom.R b/R/Kmulti.inhom.R
new file mode 100755
index 0000000..449de26
--- /dev/null
+++ b/R/Kmulti.inhom.R
@@ -0,0 +1,488 @@
+#
+#	Kmulti.inhom.S		
+#
+#	$Revision: 1.50 $	$Date: 2016/06/28 08:06:01 $
+#
+#
+# ------------------------------------------------------------------------
+
+Lcross.inhom <- function(X, i, j, ...) {
+  if(!is.multitype(X, dfok=FALSE))
+	stop("Point pattern must be multitype")
+  if(missing(i)) i <- levels(marks(X))[1]
+  if(missing(j)) j <- levels(marks(X))[2]
+  K <- Kcross.inhom(X, i, j, ...)
+  L <- eval.fv(sqrt(pmax.int(K,0)/pi))
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  # relabel the fv object
+  L <- rebadge.fv(L,
+                  substitute(L[inhom,i,j](r),
+                             list(i=iname,j=jname)),
+                  c("L", paste0("list", paren(paste("inhom", i, j, sep=",")))),
+                  new.yexp=substitute(L[list(inhom,i,j)](r),
+                                      list(i=iname,j=jname)))
+  attr(L, "labl") <- attr(K, "labl")
+  attr(L, "dangerous") <- attr(K, "dangerous")
+  return(L)  
+}
+
+Ldot.inhom <- function(X, i, ...) {
+  if(!is.multitype(X, dfok=FALSE))
+	stop("Point pattern must be multitype")
+  if(missing(i)) i <- levels(marks(X))[1]
+  K <- Kdot.inhom(X, i, ...)
+  L <- eval.fv(sqrt(pmax.int(K,0)/pi))
+  # relabel the fv object
+  iname <- make.parseable(paste(i))
+  L <- rebadge.fv(L,
+                  substitute(L[inhom, i ~ dot](r), list(i=iname)),
+                  c("L", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")),
+                  new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r),
+                    list(i=iname)))
+  attr(L, "labl") <- attr(K, "labl")
+  attr(L, "dangerous") <- attr(K, "dangerous")
+  return(L)  
+}
+
+"Kcross.inhom" <- 
+function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ...,
+         r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL,
+         lambdaIJ=NULL,
+         lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+{
+  verifyclass(X, "ppp")
+  if(!is.multitype(X, dfok=FALSE))
+	stop("Point pattern must be multitype")
+  if(missing(correction))
+    correction <- NULL
+  miss.update <- missing(update)
+  miss.leave <- missing(leaveoneout)
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+  if(missing(j))
+    j <- levels(marx)[2]
+  I <- (marx == i)
+  J <- (marx == j)
+  Iname <- paste("points with mark i =", i)
+  Jname <- paste("points with mark j =", j)
+  K <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ...,
+                    r=r,breaks=breaks,correction=correction,
+                    sigma=sigma, varcov=varcov,
+                    lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname,
+                    lambdaX=lambdaX, update=update, leaveoneout=leaveoneout,
+                    miss.update=miss.update, miss.leave=miss.leave)
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(K,
+               substitute(K[inhom,i,j](r),
+                          list(i=iname,j=jname)),
+               c("K", paste0("list", paren(paste("inhom", i, j, sep=",")))),
+               new.yexp=substitute(K[list(inhom,i,j)](r),
+                                   list(i=iname,j=jname)))
+  attr(result, "dangerous") <- attr(K, "dangerous")
+  return(result)
+}
+
+"Kdot.inhom" <- 
+function(X, i, lambdaI=NULL, lambdadot=NULL, ...,
+         r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL, 
+         lambdaIdot=NULL,
+         lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+{
+  verifyclass(X, "ppp")
+  if(!is.multitype(X, dfok=FALSE))
+	stop("Point pattern must be multitype")
+  if(missing(correction))
+    correction <- NULL
+  miss.update <- missing(update)
+  miss.leave <- missing(leaveoneout)
+
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+
+  I <- (marx == i)
+  J <- rep.int(TRUE, X$n)  # i.e. all points
+  Iname <- paste("points with mark i =", i)
+  Jname <- paste("points")
+	
+  K <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ...,
+                    r=r,breaks=breaks,correction=correction,
+                    sigma=sigma, varcov=varcov,
+                    lambdaIJ=lambdaIdot,
+                    Iname=Iname, Jname=Jname,
+                    lambdaX=lambdaX, update=update, leaveoneout=leaveoneout,
+                    miss.update=miss.update, miss.leave=miss.leave)
+  iname <- make.parseable(paste(i))
+  result <-
+    rebadge.fv(K,
+               substitute(K[inhom, i ~ dot](r), list(i=iname)),
+               c("K", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")),
+               new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r),
+                                   list(i=iname)))
+  if(!is.null(dang <- attr(K, "dangerous"))) {
+    dang[dang == "lambdaJ"] <- "lambdadot"
+    dang[dang == "lambdaIJ"] <- "lambdaIdot"
+    attr(result, "dangerous") <- dang
+  }
+  return(result)
+}
+
+
+"Kmulti.inhom"<-
+function(X, I, J, lambdaI=NULL, lambdaJ=NULL, 
+         ...,
+         r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate"),
+         lambdaIJ=NULL,
+         sigma=NULL, varcov=NULL,
+         lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+{
+  verifyclass(X, "ppp")
+
+  dflt <- list(Iname="points satisfying condition I",
+               Jname="points satisfying condition J",
+               miss.update=missing(update),
+               miss.leave=missing(leaveoneout))
+
+  extrargs <- resolve.defaults(list(...), dflt)
+  if(length(extrargs) > length(dflt))
+    warning("Additional arguments unrecognised")
+  Iname <- extrargs$Iname
+  Jname <- extrargs$Jname
+  miss.update <- extrargs$miss.update
+  miss.leave <- extrargs$miss.leave
+        
+  npts <- npoints(X)
+  W <- as.owin(X)
+  areaW <- area(W)
+
+  # validate edge correction
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("border", "isotropic", "Ripley", "translate")
+
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  # validate I, J
+  I <- ppsubset(X, I)
+  J <- ppsubset(X, J)
+  if(is.null(I) || is.null(J))
+    stop("I and J must be valid subset indices")
+  XI <- X[I]
+  XJ <- X[J]
+  
+  nI <- sum(I)
+  nJ <- sum(J)
+  if(nI == 0) stop(paste("There are no", Iname))
+  if(nJ == 0) stop(paste("There are no", Jname))
+
+  # r values 
+  rmaxdefault <- rmax.rule("K", W, nJ/areaW)
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+
+  dangerous <- c("lambdaI", "lambdaJ")
+  dangerI <- dangerJ <- TRUE
+
+  ## intensity data
+  if(!is.null(lambdaX)) {
+    ## Intensity values for all points of X
+    if(!is.null(lambdaI))
+      warning("lambdaI was ignored, because lambdaX was given", call.=FALSE)
+    if(!is.null(lambdaJ))
+      warning("lambdaJ was ignored, because lambdaX was given", call.=FALSE)
+    if(is.im(lambdaX)) {
+      ## Look up intensity values
+      lambdaI <- safelookup(lambdaX, X[I])
+      lambdaJ <- safelookup(lambdaX, X[J])
+    } else if(is.function(lambdaX)) {
+      ## evaluate function at locations
+      lambdaI <- lambdaX(XI$x, XI$y)
+      lambdaJ <- lambdaX(XJ$x, XJ$y)
+    } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) {
+      ## vector of intensity values
+      if(length(lambdaX) != npts)
+        stop(paste("The length of", sQuote("lambdaX"),
+                   "should equal the number of points of X"))
+      lambdaI <- lambdaX[I]
+      lambdaJ <- lambdaX[J]
+    } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) {
+      ## point process model provides intensity
+      model <- lambdaX
+      if(!update) {
+        ## just use intensity of fitted model
+        lambdaI <- predict(model, locations=XI, type="trend")
+        lambdaJ <- predict(model, locations=XJ, type="trend")
+      } else {
+        ## re-fit model to data X
+        if(is.ppm(model)) {
+          model <- update(model, Q=X)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else if(is.kppm(model)) {
+          model <- update(model, X=X)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else {
+          model <- update(model, X=X)
+          if(leaveoneout && !miss.leave)
+            warn.once("dppm.leaveoneout",
+                      "fitted.dppm(leaveoneout=TRUE)",
+                      "is not yet implemented")
+          lambdaX <- fitted(model, dataonly=TRUE)
+        }
+        lambdaI <- lambdaX[I]
+        lambdaJ <- lambdaX[J]
+        dangerI <- dangerJ <- FALSE
+        dangerous <- "lambdaIJ"
+        if(miss.update) 
+          warn.once(key="Kmulti.inhom.update",
+                    "The behaviour of Kmulti.inhom when lambda is a ppm object",
+                    "has changed (in spatstat 1.45-3 and later).",
+                    "See help(Kmulti.inhom)")
+      }
+    } else stop(paste("Argument lambdaX is not understood:",
+                      "it should be a numeric vector,",
+                      "an image, a function(x,y)",
+                      "or a fitted point process model (ppm, kppm or dppm)"))
+  } else {
+    ## lambdaI, lambdaJ expected
+    if(is.null(lambdaI)) {
+      ## estimate intensity
+      dangerI <- FALSE
+      dangerous <- setdiff(dangerous, "lambdaI")
+      lambdaI <- density(X[I], ..., sigma=sigma, varcov=varcov,
+                         at="points", leaveoneout=leaveoneout)
+    } else if(is.im(lambdaI)) {
+      ## look up intensity values
+      lambdaI <- safelookup(lambdaI, X[I])
+    } else if(is.function(lambdaI)) {
+      ## evaluate function at locations
+      lambdaI <- lambdaI(XI$x, XI$y)
+    } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) {
+      ## validate intensity vector
+      if(length(lambdaI) != nI)
+        stop(paste("The length of", sQuote("lambdaI"),
+                   "should equal the number of", Iname))
+    } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) {
+      ## point process model provides intensity
+      model <- lambdaI
+      if(!update) {
+        ## just use intensity of fitted model
+        lambdaI <- predict(model, locations=XI, type="trend")
+      } else {
+        ## re-fit model to data X
+        if(is.ppm(model)) {
+          model <- update(model, Q=X)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else if(is.kppm(model)) {
+          model <- update(model, X=X)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else {
+          model <- update(model, X=X)
+          if(leaveoneout && !miss.leave)
+            warn.once("dppm.leaveoneout",
+                      "fitted.dppm(leaveoneout=TRUE)",
+                      "is not yet implemented")
+          lambdaX <- fitted(model, dataonly=TRUE)
+        }
+        lambdaI <- lambdaX[I]
+        dangerI <- FALSE
+        dangerous <- setdiff(dangerous, "lambdaI")
+        if(miss.update) 
+          warn.once(key="Kmulti.inhom.update",
+                    "The behaviour of Kmulti.inhom when lambda is a ppm object",
+                    "has changed (in spatstat 1.45-3 and later).",
+                    "See help(Kmulti.inhom)")
+      }
+    } else stop(paste(sQuote("lambdaI"), "should be a vector or an image"))
+
+    if(is.null(lambdaJ)) {
+      ## estimate intensity
+      dangerJ <- FALSE
+      dangerous <- setdiff(dangerous, "lambdaJ")
+      lambdaJ <- density(X[J], ..., sigma=sigma, varcov=varcov,
+                         at="points", leaveoneout=leaveoneout)
+    } else if(is.im(lambdaJ)) {
+      ## look up intensity values
+      lambdaJ <- safelookup(lambdaJ, X[J])
+    } else if(is.function(lambdaJ)) {
+      ## evaluate function at locations
+      XJ <- X[J]
+      lambdaJ <- lambdaJ(XJ$x, XJ$y)
+    } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) {
+      ## validate intensity vector
+      if(length(lambdaJ) != nJ)
+        stop(paste("The length of", sQuote("lambdaJ"),
+                   "should equal the number of", Jname))
+    } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) {
+      ## point process model provides intensity
+      model <- lambdaJ
+      if(!update) {
+        ## just use intensity of fitted model
+        lambdaJ <- predict(model, locations=XJ, type="trend")
+      } else {
+        ## re-fit model to data X
+        if(is.ppm(model)) {
+          model <- update(model, Q=X)
+          if(leaveoneout && !miss.leave)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else if(is.kppm(model)) {
+          model <- update(model, X=X)
+          lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else {
+          model <- update(model, X=X)
+          if(leaveoneout && !miss.leave)
+            warn.once("dppm.leaveoneout",
+                      "fitted.pppm(leaveoneout=TRUE)",
+                      "is not yet implemented")
+          lambdaX <- fitted(model, dataonly=TRUE)
+        }
+        lambdaJ <- lambdaX[J]
+        dangerJ <- FALSE
+        dangerous <- setdiff(dangerous, "lambdaJ")
+        if(miss.update) 
+          warn.once(key="Kmulti.inhom.update",
+                    "The behaviour of Kmulti.inhom when lambda is a ppm object",
+                    "has changed (in spatstat 1.45-3 and later).",
+                    "See help(Kmulti.inhom)")
+      }
+    } else 
+      stop(paste(sQuote("lambdaJ"), "should be a vector or an image"))
+  }
+
+  ## Weight for each pair
+  if(!is.null(lambdaIJ)) {
+    dangerIJ <- TRUE
+    dangerous <- union(dangerous, "lambdaIJ")
+    if(!is.matrix(lambdaIJ))
+      stop("lambdaIJ should be a matrix")
+    if(nrow(lambdaIJ) != nI)
+      stop(paste("nrow(lambdaIJ) should equal the number of", Iname))
+    if(ncol(lambdaIJ) != nJ)
+      stop(paste("ncol(lambdaIJ) should equal the number of", Jname))
+  } else {
+    dangerIJ <- FALSE
+  }
+
+  danger <- dangerI || dangerJ || dangerIJ
+
+  # Recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+        
+  # this will be the output data frame
+  # It will be given more columns later
+  K <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  fname <- c("K", "list(inhom,I,J)")
+  K <- fv(K, "r", quote(K[inhom, I, J](r)),
+          "theo", , alim,
+          c("r", makefvlabel(NULL, NULL, fname, "pois")),
+          desc,
+          fname=fname,
+          yexp=quote(K[list(inhom,I,J)](r)))
+
+# identify close pairs of points
+  close <- crosspairs(XI, XJ, max(r), what="ijd")
+# map (i,j) to original serial numbers in X
+  orig <- seq_len(npts)
+  imap <- orig[I]
+  jmap <- orig[J]
+  iX <- imap[close$i]
+  jX <- jmap[close$j]
+# eliminate any identical pairs
+  if(any(I & J)) {
+    ok <- (iX != jX)
+    if(!all(ok)) {
+      close$i  <- close$i[ok]
+      close$j  <- close$j[ok]
+      close$d  <- close$d[ok]
+    }
+  }
+# extract information for these pairs (relative to orderings of XI, XJ)
+  dclose <- close$d
+  icloseI  <- close$i
+  jcloseJ  <- close$j
+        
+# Form weight for each pair
+  if(is.null(lambdaIJ))
+    weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ])
+  else 
+    weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)]
+
+# Compute estimates by each of the selected edge corrections.
+
+  if(any(correction == "border" | correction == "bord.modif")) {
+    # border method
+    # Compute distances to boundary
+    b <- bdist.points(XI)
+    bI <- b[icloseI]
+    # apply reduced sample algorithm
+    RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks)
+    if(any(correction == "border")) {
+      Kb <- RS$ratio
+      K <- bind.fv(K, data.frame(border=Kb),
+                   makefvlabel(NULL, "hat", fname, "bord"),
+                   "border-corrected estimate of %s",
+                   "border")
+    }
+    if(any(correction == "bord.modif")) {
+      Kbm <- RS$numerator/eroded.areas(W, r)
+      K <- bind.fv(K, data.frame(bord.modif=Kbm),
+                   makefvlabel(NULL, "hat", fname, "bordm"),
+                   "modified border-corrected estimate of %s",
+                   "bord.modif")
+    }
+  }
+  if(any(correction == "translate")) {
+    ## translation correction
+    edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE)
+    allweight <- edgewt * weight
+    wh <- whist(dclose, breaks$val, allweight)
+    Ktrans <- cumsum(wh)/areaW
+    rmax <- diameter(W)/2
+    Ktrans[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(trans=Ktrans),
+                 makefvlabel(NULL, "hat", fname, "trans"),
+                 "translation-corrected estimate of %s",
+                 "trans")
+  }
+  if(any(correction == "isotropic")) {
+    ## Ripley isotropic correction
+    edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1))
+    allweight <- edgewt * weight
+    wh <- whist(dclose, breaks$val, allweight)
+    Kiso <- cumsum(wh)/areaW
+    rmax <- diameter(W)/2
+    Kiso[r >= rmax] <- NA
+    K <- bind.fv(K, data.frame(iso=Kiso), 
+                 makefvlabel(NULL, "hat", fname, "iso"),
+                 "Ripley isotropic correction estimate of %s",
+                 "iso")
+  }
+  ## default is to display them all
+  formula(K) <- . ~ r
+  unitname(K) <- unitname(X)
+  if(danger)
+    attr(K, "dangerous") <- dangerous
+  return(K)
+}
diff --git a/R/Kres.R b/R/Kres.R
new file mode 100755
index 0000000..41d1bd6
--- /dev/null
+++ b/R/Kres.R
@@ -0,0 +1,72 @@
+#
+#	Kres.R
+#
+#	Residual K
+#
+#	$Revision: 1.3 $	$Date: 2013/04/25 06:37:43 $
+#
+#############################################################################
+#
+
+Kres <- function(object, ...) {
+  if(!is.fv(object)) {
+    # usual case where 'object' is a ppm, ppp or quad
+    K <- Kcom(object, ...)
+  } else {
+    # case where 'object' is the output of 'Kcom'
+    a <- attr(object, "maker")
+    if(is.null(a) || a != "Kcom")
+      stop("fv object was not created by Kcom")
+    K <- object
+    if(length(list(...)) > 0)
+      warning("Extra arguments ignored")
+  }
+  # initialise fv object
+  df <- data.frame(r=K$r, theo=numeric(length(K$r)))
+  desc <- c("distance argument r", "value 0 corresponding to perfect fit")
+  ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL),
+            "theo", . ~ r ,
+            attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K")
+  # add residual functions
+  nam <- names(K)
+  if("border" %in% nam)
+    ans <- bind.fv(ans,
+                    data.frame(bres=with(K, border-bcom)),
+                    "bold(R)~hat(%s)[bord](r)",
+                    "residual function %s based on border correction",
+                    "bres")
+  if(all(c("trans","tcom") %in% nam))
+    ans <- bind.fv(ans,
+                    data.frame(tres=with(K, trans-tcom)),
+                    "bold(R)~hat(%s)[trans](r)",
+                    "residual function %s based on translation correction",
+                    "tres")
+  if(all(c("iso","icom") %in% nam))
+    ans <- bind.fv(ans,
+                    data.frame(ires=with(K, iso-icom)),
+                    "bold(R)~hat(%s)[iso](r)",
+                    "residual function %s based on isotropic correction",
+                    "ires")
+  if("ivar" %in% nam) {
+    savedotnames <- fvnames(ans, ".")
+    ans <- bind.fv(ans,
+                   as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")],
+                    c("bold(C)^2~hat(%s)[iso](r)",
+                      "sqrt(bold(C)^2~hat(%s)[iso](r))",
+                      "bold(R)~hat(%s)[Hi](r)",
+                      "bold(R)~hat(%s)[Lo](r)"),
+                    c("pseudovariance of isotropic-corrected residual %s",
+                      "pseudo-SD of isotropic-corrected residual %s",
+                      "upper critical band for isotropic-corrected residual %s",
+                      "lower critical band for isotropic-corrected residual %s"),
+                    "ires")
+    ans <- bind.fv(ans,
+                   data.frame(istdres=with(ans, ires/isd)),
+                   "bold(T)~hat(%s)[iso](r)",
+                   "standardised isotropic-corrected residual %s",
+                   "ires")
+    fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo"))
+  }
+  unitname(ans) <- unitname(K)
+  return(ans)
+}
diff --git a/R/Kscaled.R b/R/Kscaled.R
new file mode 100755
index 0000000..7045f61
--- /dev/null
+++ b/R/Kscaled.R
@@ -0,0 +1,176 @@
+#
+#	Kscaled.R	Estimation of K function for locally-scaled process
+#
+#	$Revision: 1.16 $	$Date: 2015/02/22 03:00:48 $
+#
+
+"Lscaled" <- function(...) {
+  K <- Kscaled(...)
+  L <- eval.fv(sqrt(pmax.int(K,0)/pi))
+  # relabel the fv object
+  L <- rebadge.fv(L, quote(L[scaled](r)), c("L","scaled"))
+  attr(L, "labl") <- attr(K, "labl")
+  return(L)  
+}
+
+"Kscaled"<-
+  function (X, lambda=NULL, ..., r = NULL, breaks = NULL,
+            rmax = 2.5,
+            correction=c("border", "isotropic", "translate"),
+            renormalise=FALSE, normpower=1,
+            sigma=NULL, varcov=NULL)
+{
+  verifyclass(X, "ppp")
+#  rfixed <- !missing(r) || !missing(breaks)
+
+  ## determine basic parameters
+  W <- X$window
+  npts <- X$n
+  areaW <- area(W)
+  halfdiameter <- diameter(W)/2
+  
+  ## match corrections
+  correction.given <- !missing(correction) && !is.null(correction)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+#  best.wanted <- ("best" %in% correction)
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  ###########################################################
+  ## DETERMINE WEIGHTS AND VALIDATE
+  ##
+
+  if(missing(lambda)) {
+    ## No intensity data provided
+    ## Estimate density by leave-one-out kernel smoothing
+    lambda <- density(X, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+    lambda <- as.numeric(lambda)
+  } else {
+    ## lambda values provided
+    if(is.im(lambda)) 
+      lambda <- safelookup(lambda, X)
+    else if(is.function(lambda)) 
+      lambda <- lambda(X$x, X$y)
+    else if(is.ppm(lambda)) 
+      lambda <- safelookup(predict(lambda, type="trend"), X)
+    else if(!is.numeric(lambda) || !is.null(dim(lambda)))
+      stop(paste(sQuote("lambda"),
+                 "should be a vector, a pixel image, a function or a ppm"))
+    check.nvector(lambda, npts)
+  }
+
+  if(renormalise) {
+    ## renormalise. Here we only need half the power ;-)
+    check.1.real(normpower)
+    stopifnot(normpower %in% 1:2) 
+    renorm.factor <- (areaW/sum(1/lambda))^(normpower/2)
+    lambda <- lambda/renorm.factor
+  }     
+  ## Calculate range of r values using max lambda
+  sra <- sqrt(range(lambda))
+  minrescale <- sra[1]
+  maxrescale <- sra[2]
+
+  ## convert arguments to absolute distances 
+  absr <- if(!is.null(r)) r/maxrescale else NULL
+  absrmaxdefault <- min(rmax.rule("K", W), rmax/maxrescale)
+  absbreaks <-
+    if(!is.null(breaks)) scalardilate(breaks, 1/maxrescale) else NULL
+  ## determine absolute distances
+  absbreaks <- handle.r.b.args(absr, absbreaks, W, rmaxdefault=absrmaxdefault)
+  absr <- absbreaks$r
+  ## convert to rescaled distances
+  breaks <- scalardilate(absbreaks, maxrescale)
+  r <- breaks$r
+  rmax <- breaks$max
+  ## recommended range of scaled r values
+  alim <- c(0, min(rmax, maxrescale * absrmaxdefault))
+  rthresh <- minrescale * halfdiameter
+  ## maximum absolute distance ever needed
+  maxabsdist <- min(rmax/minrescale, halfdiameter)
+  
+  ## this will be the output data frame
+  K <- data.frame(r=r, theo= pi * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  K <- fv(K, "r", quote(K[scaled](r)),
+          "theo", , alim,
+          c("r","{%s[%s]^{pois}}(r)"),
+          desc,
+          fname=c("K", "scaled"))
+        
+  ## identify all relevant close pairs
+  what <- if(any(correction == "translate")) "all" else "ijd"
+  close <- closepairs(X, maxabsdist, what=what)
+  I <- close$i
+  J <- close$j
+  ## locally-scaled distances
+  sqrtLambda <- sqrt(lambda)
+  lamIJ <- (sqrtLambda[I] + sqrtLambda[J])/2
+  absDIJ <- close$d
+  DIJ <- absDIJ * lamIJ
+
+  XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+  
+  if(any(correction == "none")) {
+    ## uncorrected! For demonstration purposes only!
+    wh <- whist(DIJ, breaks$val)  # no weights
+    Kun <- cumsum(wh)/npts
+    K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)",
+                 "uncorrected estimate of %s",
+                 "un")
+  }
+  
+  if(any(correction == "border")) {
+    ## border method
+    ## Compute SCALED distances to boundary
+    b <- bdist.points(X) * sqrtLambda
+    bI <- b[I]
+    ## apply reduced sample algorithm to scaled distances
+    RS <- Kount(DIJ, bI, b, breaks)
+    Kb <- RS$numerator/RS$denom.count
+    Kb[r > rthresh] <- NA
+    K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)",
+                 "border-corrected estimate of %s",
+                 "border")
+  }
+
+  if(any(correction == "translate")) {
+    ## translation correction
+    XJ <- ppp(close$xj, close$yj, window=W, check=FALSE)
+    edgewt <- edge.Trans(XI, XJ, paired=TRUE)
+    wh <- whist(DIJ, breaks$val, edgewt)
+    Ktrans <- cumsum(wh)/npts
+    Ktrans[r >= rthresh] <- NA
+    K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)",
+                 "translation-corrected estimate of %s",
+                 "trans")
+  }
+  if(any(correction == "isotropic")) {
+    ## Ripley isotropic correction (using UN-SCALED distances)
+    edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1))
+    wh <- whist(DIJ, breaks$val, edgewt)
+    Kiso <- cumsum(wh)/npts
+    Kiso[r >= rthresh] <- NA
+    K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)",
+                 "Ripley isotropic correction estimate of %s",
+                 "iso")
+  }
+  ## default plot will display all edge corrections
+  formula(K) <- . ~ r
+  nama <- rev(colnames(K))
+  fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))]
+  ##
+  unitname(K) <- c("normalised unit", "normalised units")
+  return(K)
+}
+	
diff --git a/R/Ksector.R b/R/Ksector.R
new file mode 100644
index 0000000..b367a66
--- /dev/null
+++ b/R/Ksector.R
@@ -0,0 +1,231 @@
+#
+#	Ksector.R	Estimation of 'sector K function'
+#
+#	$Revision: 1.5 $	$Date: 2014/11/10 10:41:14 $
+#
+
+Ksector <- function(X, begin=0, end=360, ...,
+                    units=c("degrees", "radians"),
+                    r=NULL, breaks=NULL, 
+                    correction=c("border", "isotropic", "Ripley", "translate"),
+                    domain = NULL,
+                    ratio=FALSE, verbose=TRUE)
+{
+  verifyclass(X, "ppp")
+#  rfixed <- !is.null(r) || !is.null(breaks)
+  npts <- npoints(X)
+  W <- Window(X)
+  areaW <- area(W)
+  lambda <- npts/areaW
+  lambda2 <- (npts * (npts - 1))/(areaW^2)
+  rmaxdefault <- rmax.rule("K", W, lambda)        
+  breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  
+  if(!is.null(domain)) {
+    domain <- as.owin(domain)
+    stopifnot(is.subset.owin(domain, Window(X)))
+    areaW <- area(domain)
+  }
+
+  units <- match.arg(units)
+  switch(units,
+         radians = {
+           if(missing(end)) end <- 2 * pi
+           check.1.real(begin)
+           check.1.real(end)
+           check.in.range(begin, c(-pi, 2*pi))
+           check.in.range(end, c(0, 2*pi))
+           stopifnot(begin < end)
+           stopifnot((end - begin) <= 2 * pi)
+           BEGIN <- begin
+           END   <- end
+           Bname <- simplenumber(begin/pi, "pi") %orifnull% signif(begin, 3)
+           Ename <- simplenumber(end/pi, "pi") %orifnull% signif(end, 3)
+         },
+         degrees = {
+           check.1.real(begin)
+           check.1.real(end)
+           check.in.range(begin, c(-90, 360))
+           check.in.range(end, c(0, 360))
+           stopifnot(begin < end)
+           stopifnot((end - begin) <= 360)
+           if(verbose && (end - begin) <= 2 * pi)
+             warning("Very small interval in degrees: did you mean radians?")
+           BEGIN <- pi* (begin/180)
+           END   <- pi * (end/180)
+           Bname <- signif(begin, 3)
+           Ename <- signif(end, 3)
+         })
+  ## choose correction(s)
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("border", "isotropic", "Ripley", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             good="good",
+                             best="best"),
+                           multi=TRUE)
+#  best.wanted <- ("best" %in% correction)
+  ## replace 'good' by the optimal choice for this size of dataset
+  if("good" %in% correction)
+    correction[correction == "good"] <- good.correction.K(X)
+  ## retain only corrections that are implemented for the window
+  correction <- implemented.for.K(correction, W$type, correction.given)
+  
+  ## recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  ## labels
+  subscripts <- paste("sector", Bname, Ename, sep=",")
+  ylabel <- paste("K[", subscripts, "]")
+  ylab <-  eval(parse(text=paste("quote(", ylabel, ")")))
+#  ylab <-  parse(text=paste("K[sector,", Bname, ",", Ename, "]"))
+#  yexp <- substitute(K[list(sector,B,E)](r),
+#                     list(B=Bname, E=Ename))
+  yexp <-  parse(text=paste("K[list(", subscripts, ")]"))
+  fname <- c("K", paste("list", paren(subscripts)))
+  
+  ## this will be the output data frame
+  Kdf <- data.frame(r=r, theo = ((END-BEGIN)/2) * r^2)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  denom <- lambda2 * areaW
+  K <- ratfv(Kdf, NULL, denom,
+             "r",
+             ylab = ylab,
+             valu = "theo",
+             fmla = NULL,
+             alim =alim,
+             labl = c("r","{%s[%s]^{pois}}(r)"),
+             desc = desc,
+             fname=fname, yexp=yexp, 
+             ratio=ratio)
+  
+  ## identify all close pairs
+  rmax <- max(r)
+  close <- as.data.frame(closepairs(X, rmax))
+
+  if(!is.null(domain)) {
+    ## restrict to pairs with first point in 'domain'
+    indom <- with(close, inside.owin(xi, yi, domain))
+    close <- close[indom, , drop=FALSE]
+  }
+
+  ## select pairs in angular range
+  ang <- with(close, atan2(dy, dx)) %% (2*pi)
+  if(BEGIN >= 0) {
+    ## 0 <= begin < end
+    ok <- (BEGIN <= ang) & (ang <= END)
+  } else {
+    ## begin < 0 <= end
+    ok <- (ang >= 2 * pi + BEGIN) | (ang <= END)
+  }
+  close <- close[ok, , drop=FALSE]
+
+  ## pairwise distances
+  DIJ <- close$d
+
+  if(any(correction == "none")) {
+    # uncorrected! For demonstration purposes only!
+    wh <- whist(DIJ, breaks$val)  # no weights
+    numKun <- cumsum(wh)
+    denKun <- lambda2 * areaW
+    # uncorrected estimate of K
+    K <- bind.ratfv(K,
+                    data.frame(un=numKun), denKun,
+                    "{hat(%s)[%s]^{un}}(r)",
+                    "uncorrected estimate of %s",
+                    "un",
+                    ratio=ratio)
+  }
+  
+  if(any(correction == "border" | correction == "bord.modif")) {
+  # border method
+  # Compute distances to boundary
+    b <- bdist.points(X)
+    I <- close$i
+    bI <- b[I]
+    if(!is.null(domain))
+      b <- b[inside.owin(X, , w=domain)]
+  # apply reduced sample algorithm
+    RS <- Kount(DIJ, bI, b, breaks)
+    if(any(correction == "bord.modif")) {
+      # modified border correction
+      denom.area <- eroded.areas(W, r, subset=domain)
+      numKbm <- RS$numerator
+      denKbm <- lambda2 * denom.area
+      K <- bind.ratfv(K,
+                      data.frame(bord.modif=numKbm),
+                      data.frame(bord.modif=denKbm),
+                      "{hat(%s)[%s]^{bordm}}(r)",
+                      "modified border-corrected estimate of %s",
+                      "bord.modif",
+                      ratio=ratio)
+    }
+    if(any(correction == "border")) {
+      numKb <- RS$numerator
+      denKb <- lambda * RS$denom.count
+      K <- bind.ratfv(K,
+                      data.frame(border=numKb), 
+                      data.frame(border=denKb), 
+                      "{hat(%s)[%s]^{bord}}(r)",
+                      "border-corrected estimate of %s",
+                      "border",
+                      ratio=ratio)
+    }
+  }
+
+  if(any(correction == "translate")) {
+    ## Ohser-Stoyan translation correction
+    edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE)
+    wh <- whist(DIJ, breaks$val, edgewt)
+    numKtrans <- cumsum(wh)
+    denKtrans <- lambda2 * areaW
+    h <- diameter(as.rectangle(W))/2
+    numKtrans[r >= h] <- NA
+    K <- bind.ratfv(K,
+                    data.frame(trans=numKtrans),
+                    denKtrans,
+                    "{hat(%s)[%s]^{trans}}(r)",
+                    "translation-corrected estimate of %s",
+                    "trans",
+                    ratio=ratio)
+  }
+  if(any(correction == "isotropic")) {
+    ## Ripley isotropic correction
+    XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+    edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1))
+    wh <- whist(DIJ, breaks$val, edgewt)
+    numKiso <- cumsum(wh)
+    denKiso <- lambda2 * areaW
+    h <- diameter(W)/2
+    numKiso[r >= h] <- NA
+    K <- bind.ratfv(K,
+                 data.frame(iso=numKiso),
+                 denKiso,
+                 "{hat(%s)[%s]^{iso}}(r)",
+                 "Ripley isotropic correction estimate of %s",
+                 "iso",
+                 ratio=ratio)
+  }
+  #
+  # default plot will display all edge corrections
+  formula(K) <- . ~ r
+  nama <- rev(colnames(K))
+  nama <- nama[!(nama %in% c("r", "rip", "ls"))]
+  fvnames(K, ".") <- nama
+  unitname(K) <- unitname(X)
+  # copy to other components
+  if(ratio)
+    K <- conform.ratfv(K)
+
+  return(K)
+}
diff --git a/R/Math.im.R b/R/Math.im.R
new file mode 100644
index 0000000..4487324
--- /dev/null
+++ b/R/Math.im.R
@@ -0,0 +1,39 @@
+##
+##   Math.im.R
+##
+##   $Revision: 1.7 $ $Date: 2017/01/12 03:50:22 $
+##
+
+Ops.im <- function(e1,e2=NULL){
+    unary <- nargs() == 1L
+    if(unary){
+        if(!is.element(.Generic, c("!", "-", "+")))
+            stop("Unary usage is undefined for this operation for images.")
+        callstring <- paste(.Generic, "e1")
+    } else {
+        callstring <- paste("e1", .Generic, "e2")
+    }
+    expr <- parse(text = callstring)
+    return(do.call(eval.im, list(expr = expr)))
+}
+
+Math.im <- function(x, ...){
+    m <- do.call(.Generic, list(x$v, ...))
+    rslt <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange,
+               yrange = x$yrange, unitname = unitname(x))
+    return(rslt)
+}
+
+Summary.im <- function(..., na.rm=FALSE, drop=TRUE){
+  argh <- list(...)
+  ims <- sapply(argh, is.im)
+  argh[ims] <- lapply(argh[ims], getElement, name="v")
+  do.call(.Generic, c(argh, list(na.rm = na.rm || drop)))
+}
+
+Complex.im <- function(z){
+    m <- do.call(.Generic, list(z=z$v))
+    rslt <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange,
+               yrange = z$yrange, unitname = unitname(z))
+    return(rslt)
+}
diff --git a/R/Math.imlist.R b/R/Math.imlist.R
new file mode 100644
index 0000000..847b8b2
--- /dev/null
+++ b/R/Math.imlist.R
@@ -0,0 +1,40 @@
+##
+##   Math.imlist.R
+##
+##   $Revision: 1.4 $ $Date: 2017/08/15 03:46:57 $
+##
+
+Math.imlist <- function(x, ...){
+  solapply(x, .Generic, ...)
+}
+
+Complex.imlist <- function(z){
+  solapply(z, .Generic)
+}
+
+Summary.imlist <- function(..., na.rm=TRUE){
+  argh <- expandSpecialLists(list(...))
+  if(length(names(argh)) > 0) {
+    isim <- sapply(argh, is.im)
+    names(argh)[isim] <- ""
+  }
+  do.call(.Generic, c(argh, list(na.rm=na.rm)))
+}
+
+Ops.imlist <- function(e1,e2=NULL){
+  if(nargs() == 1L) {
+    #' unary operation
+    return(solapply(e1, .Generic))
+  } 
+  #' binary operation
+  if(inherits(e2, "imlist")) {
+    #' two image lists - must have equal length
+    v <- mapply(.Generic, unname(e1), unname(e2), SIMPLIFY=FALSE)
+    names(v) <- names(e1)
+    return(as.solist(v))
+  }
+  #' other binary operation e.g. imlist + constant, imlist + im
+  return(solapply(e1, .Generic, e2=e2))
+}
+
+
diff --git a/R/Math.linim.R b/R/Math.linim.R
new file mode 100644
index 0000000..dd68dd3
--- /dev/null
+++ b/R/Math.linim.R
@@ -0,0 +1,48 @@
+##
+##   Math.linim.R
+##
+##   $Revision: 1.3 $ $Date: 2015/02/15 10:50:01 $
+##
+
+Ops.linim <- function(e1,e2=NULL){
+    unary <- nargs() == 1L
+    if(unary){
+        if(!is.element(.Generic, c("!", "-", "+")))
+            stop("Unary usage is undefined for this operation for images.")
+        callstring <- paste(.Generic, "e1")
+    } else {
+        callstring <- paste("e1", .Generic, "e2")
+    }
+    expr <- parse(text = callstring)
+    return(do.call(eval.linim, list(expr = expr)))
+}
+
+Math.linim <- function(x, ...){
+    m <- do.call(.Generic, list(x[,,drop=FALSE], ...))
+    Z <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange,
+            yrange = x$yrange, unitname = unitname(x))
+    df <- attr(x, "df")
+    df$values <- do.call(.Generic, list(df$values, ...))
+    L <- attr(x, "L")
+    rslt <- linim(L, Z, df=df)
+    return(rslt)
+}
+
+Summary.linim <- function(..., na.rm){
+    args <- list(...)
+    argp <- lapply(args, "[")
+    argd <- if(is.element(.Generic, c("sum", "prod"))) list() else 
+            lapply(lapply(args, attr, which="df"), getElement, name="values")
+    do.call(.Generic, c(argp, argd, na.rm = na.rm))
+}
+
+Complex.linim <- function(z){
+    L <- attr(z, "L")
+    df <- attr(z, "df")
+    m <- do.call(.Generic, list(z=z[drop=TRUE]))
+    Z <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange,
+               yrange = z$yrange, unitname = unitname(z))
+    df$values <- do.call(.Generic, list(z=df$values))
+    rslt <- linim(L, Z, df=df)
+    return(rslt)
+}
diff --git a/R/Tstat.R b/R/Tstat.R
new file mode 100644
index 0000000..b5d7e9c
--- /dev/null
+++ b/R/Tstat.R
@@ -0,0 +1,237 @@
+#
+#	tstat.R		Estimation of T function
+#
+#	$Revision: 1.11 $	$Date: 2016/02/11 09:36:11 $
+#
+
+Tstat <- local({
+  
+  # helper functions
+  diffrange <- function(z) diff(range(z, na.rm=TRUE))
+  
+  edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) {
+    triid <- as.matrix(triid)
+    ntri <- nrow(triid)
+    if(ntri == 0) return(numeric(0))
+    W <- rescue.rectangle(as.owin(X))
+    if(W$type != "rectangle")
+      stop("Translation correction is only implemented for rectangular windows")
+    x <- matrix(X$x[triid], nrow=ntri)
+    y <- matrix(X$y[triid], nrow=ntri)
+    dx <- apply(x, 1, diffrange)
+    dy <- apply(y, 1, diffrange)
+    wide <- diff(W$xrange)
+    high <- diff(W$yrange)
+    weight <- wide * high/((wide - dx) * (high - dy))
+    weight <- pmin.int(trim, weight)
+    return(weight)
+  }
+  # helper function
+  implemented.for.T <- function(correction, windowtype, explicit) {
+    rect <- (windowtype == "rectangle")
+    if(any(correction == "best")) {
+      # select best available correction
+      correction <- if(rect) "translate" else "border"
+    } else {
+      # available selection of edge corrections depends on window
+      if(!rect) {
+        tra <- (correction == "translate") 
+        if(any(tra)) {
+          whinge <- "Translation correction is only implemented for rectangular windows"
+          if(explicit) {
+            if(all(tra)) stop(whinge) else warning(whinge)
+          }
+          correction <- correction[!tra]
+        }
+      }
+    }
+    return(correction)
+  }
+  # .......... main function ....................
+  Tstat <- function(X, ..., r=NULL, rmax=NULL,
+                    correction=c("border", "translate"),
+                    ratio=FALSE,
+                    verbose=TRUE)
+    {
+      verifyclass(X, "ppp")
+#      rfixed <- !is.null(r) 
+      npts <- npoints(X)
+      W <- Window(X)
+      areaW <- area(W)
+      lambda <- npts/areaW
+      lambda2 <- (npts * (npts - 1))/(areaW^2)
+      lambda3 <- (npts * (npts - 1) * (npts - 2))/(areaW^3)
+
+      rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda)
+      breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+      r <- breaks$r
+      rmax <- breaks$max
+
+      # choose correction(s)
+      correction.given <- !missing(correction) && !is.null(correction)
+      if(!correction.given)
+        correction <- c("border", "bord.modif", "translate")
+      correction <- pickoption("correction", correction,
+                               c(none="none",
+                                 border="border",
+                                 "bord.modif"="bord.modif",
+                                 trans="translate",
+                                 translate="translate",
+                                 translation="translate",
+                                 best="best"),
+                               multi=TRUE)
+      correction <- implemented.for.T(correction, W$type, correction.given)
+  
+      # recommended range of r values
+      alim <- c(0, min(rmax, rmaxdefault))
+
+      # this will be the output data frame
+      TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4)
+      desc <- c("distance argument r", "theoretical Poisson %s")
+      TT <- fv(TT, "r", quote(T(r)),
+               "theo", , alim, c("r","%s[pois](r)"), desc, fname="T")
+
+      # save numerator and denominator?
+      if(ratio) {
+        denom <- lambda2 * areaW
+        numT <- eval.fv(denom * TT)
+        denT <- eval.fv(denom + TT * 0)
+        attributes(numT) <- attributes(denT) <- attributes(T)
+        attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s"
+        attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s"
+      }
+  
+      # identify all close pairs
+      rmax <- max(r)
+      close <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE)
+      I <- close$i
+      J <- close$j
+      DIJ <- close$d
+
+      nI <- length(I)
+  
+      # estimate computation time
+      if(verbose) {
+        nTmax <- nI * (nI-1) /2
+        esttime <- exp(1.25 * log(nTmax) - 21.5)
+        message(paste("Searching", nTmax, "potential triangles;",
+                      "estimated time", codetime(esttime)))
+      }
+
+      # find triangles with their diameters
+      tri <- trianglediameters(I, J, DIJ, nvert=npts)
+      stopifnot(identical(colnames(tri), c("i", "j", "k", "diam")))
+      # reassemble so each triangle appears 3 times, once for each vertex
+      II <- with(tri, c(i, j, k))
+      DD <- with(tri, rep.int(diam, 3))
+  
+      if(any(correction == "none")) {
+        # uncorrected! For demonstration purposes only!
+        wh <- whist(DD, breaks$val)  # no weights
+        numTun <- cumsum(wh)
+        denTun <- lambda3 * areaW
+        # uncorrected estimate of T
+        Tun <- numTun/denTun
+        TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)",
+                      "uncorrected estimate of %s",
+                      "un")
+        if(ratio) {
+          # save numerator and denominator
+          numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)",
+                          "numerator of uncorrected estimate of %s",
+                          "un")
+          denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)",
+                          "denominator of uncorrected estimate of %s",
+                          "un")
+        }
+      }
+  
+      if(any(correction == "border" | correction == "bord.modif")) {
+      # border method
+      # Compute distances to boundary
+        b <- bdist.points(X)
+        bI <- b[II]
+      # apply reduced sample algorithm
+        RS <- Kount(DD, bI, b, breaks)
+        if(any(correction == "bord.modif")) {
+          # modified border correction
+          denom.area <- eroded.areas(W, r)
+          numTbm <- RS$numerator
+          denTbm <- lambda3 * denom.area
+          Tbm <- numTbm/denTbm
+          TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)",
+                        "modified border-corrected estimate of %s",
+                        "bord.modif")
+          if(ratio) {
+            # save numerator and denominator
+            numT <- bind.fv(numT, data.frame(bord.modif=numTbm),
+                            "hat(%s)[bordm](r)",
+                      "numerator of modified border-corrected estimate of %s",
+                            "bord.modif")
+            denT <- bind.fv(denT, data.frame(bord.modif=denTbm),
+                            "hat(%s)[bordm](r)",
+                      "denominator of modified border-corrected estimate of %s",
+                            "bord.modif")
+          }
+        }
+        if(any(correction == "border")) {
+          numTb <- RS$numerator
+          denTb <- lambda2 * RS$denom.count
+          Tb <- numTb/denTb
+          TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)",
+                        "border-corrected estimate of %s",
+                        "border")
+          if(ratio) {
+            numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)",
+                            "numerator of border-corrected estimate of %s",
+                            "border")
+            denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)",
+                            "denominator of border-corrected estimate of %s",
+                            "border")
+          }
+        }
+      }
+
+      if(any(correction == "translate")) {
+        # translation correction
+        # apply to triangle list
+        edgewt <- edgetri.Trans(X, tri[, 1:3])
+        wh <- whist(tri$diam, breaks$val, edgewt)
+        numTtrans <- 3 * cumsum(wh)
+        denTtrans <- lambda3 * areaW
+        Ttrans <- numTtrans/denTtrans
+        h <- diameter(W)/2
+        Ttrans[r >= h] <- NA
+        TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)",
+                      "translation-corrected estimate of %s",
+                      "trans")
+        if(ratio) {
+          numT <- bind.fv(numT, data.frame(trans=numTtrans),
+                          "hat(%s)[trans](r)",
+                          "numerator of translation-corrected estimate of %s",
+                          "trans")
+          denT <- bind.fv(denT, data.frame(trans=denTtrans),
+                          "hat(%s)[trans](r)",
+                          "denominator of translation-corrected estimate of %s",
+                          "trans")
+        }
+      }
+      # default plot will display all edge corrections
+      formula(TT) <- . ~ r
+      unitname(TT) <- unitname(X)
+      #
+      if(ratio) {
+        # finish up numerator & denominator
+        formula(numT) <- formula(denT) <- . ~ r
+        unitname(numT) <- unitname(denT) <- unitname(TT)
+        # tack on to result
+        TT <- rat(TT, numT, denT, check=FALSE)
+      }
+      return(TT)
+    }
+  
+  Tstat
+})
+
+
+
diff --git a/R/aaaa.R b/R/aaaa.R
new file mode 100644
index 0000000..c89637f
--- /dev/null
+++ b/R/aaaa.R
@@ -0,0 +1,45 @@
+#'
+#'    aaaa.R
+#'
+#'   Code that must be read before the rest of the R code in spatstat
+#' 
+#'    $Revision: 1.4 $  $Date: 2014/12/10 10:34:53 $
+
+#' ...................................................................
+#'   intermaker:
+#'   Class structure for functions like 'Strauss'
+#'   so they print a nice description.
+#'
+
+intermaker <- function(f, blank) {
+  # f is the creator function like 'Strauss'
+  class(f) <- c("intermaker", class(f))
+  # blank is the prototype interaction object: extract some fields
+  desired <- c("creator", "name", "par", "parnames", "pardesc")
+  avail <- desired[desired %in% names(blank)]
+  attr(f, "b") <- blank[avail]
+  return(f)
+}
+
+print.intermaker <- function(x, ...) {
+  b <- attr(x, "b")
+  argh <- names(formals(x))
+  explain <- NULL
+  if(length(argh) > 0) {
+    desc <- b$pardesc %orifnull% b$parnames
+    namep <- names(b$par)
+    if(length(desc) == length(namep) && all(argh %in% namep)) {
+      names(desc) <- namep
+      explain <- paste(", where",
+                       commasep(paste(sQuote(argh), "is the", desc[argh])))
+    }
+  }
+  blah <- paste0("Function ",
+                 b$creator,
+                 paren(paste(argh, collapse=", ")), 
+                 ": creates the interpoint interaction of the ",
+                 b$name,
+                 explain)
+  splat(blah)
+  return(invisible(NULL))
+}
diff --git a/R/adaptive.density.R b/R/adaptive.density.R
new file mode 100755
index 0000000..d7bc531
--- /dev/null
+++ b/R/adaptive.density.R
@@ -0,0 +1,59 @@
+#
+#  adaptive.density.R
+#
+#  $Revision: 1.8 $   $Date: 2015/07/11 08:19:26 $
+#
+#
+
+adaptive.density <- function(X, f=0.1, ..., nrep=1, verbose=TRUE) {
+  stopifnot(is.ppp(X))
+  npts <- npoints(X)
+  check.1.real(f)
+  if(badprobability(f))
+    stop("f should be a probability between 0 and 1")
+  ntess <- floor(f * npts)
+  if(ntess == 0) {
+    # naive estimate of intensity
+    if(f > 0 && verbose)
+      splat("Tiny threshold: returning uniform intensity estimate")
+    W <- X$window
+    lam <- npts/area(W)
+    return(as.im(lam, W, ...))
+  }
+  if(ntess == npts) {
+    ## Voronoi/Dirichlet estimate
+    tes <- dirichlet(X)
+#    tesim <- as.im(tes, ...)
+    tesim <- nnmap(X, what="which", ...)
+    lam <- 1/tile.areas(tes)
+    out <- eval.im(lam[tesim])
+    return(out)
+  }
+  if(nrep > 1) {
+    # estimate is the average of nrep randomised estimates
+    total <- 0
+    if(verbose)
+      cat(paste("Computing", nrep, "intensity estimates..."))
+    state <- list()
+    for(i in seq_len(nrep)) {
+      estimate <- adaptive.density(X, f, ..., nrep=1)
+      total <- eval.im(total + estimate)
+      if(verbose) state <- progressreport(i, nrep, state=state)
+    }
+    if(verbose) cat("Done.\n")
+    average <- eval.im(total/nrep)
+    return(average)
+  }
+  ncount <- npts - ntess
+  fcount <- ncount/npts
+  itess <- sample(seq_len(npts), ntess, replace=FALSE)
+  Xtess <- X[itess]
+  Xcount <- X[-itess]
+  tes <- dirichlet(Xtess)
+  lam <- unlist(lapply(split(Xcount, tes), intensity))
+#  tesim <- as.im(tes, ...)
+#  out <- eval.im(lam[as.integer(tesim)]/fcount)
+  tesim <- nnmap(Xtess, what="which", ...)
+  out <- eval.im(lam[tesim]/fcount)
+  return(out)
+}
diff --git a/R/addvar.R b/R/addvar.R
new file mode 100755
index 0000000..642e316
--- /dev/null
+++ b/R/addvar.R
@@ -0,0 +1,369 @@
+#
+# addvar.R
+#
+# added variable plot
+#
+#   $Revision: 1.11 $  $Date: 2016/10/23 10:36:58 $
+#
+
+
+addvar <- function(model, covariate, ...,
+                   subregion=NULL,
+                   bw="nrd0", adjust=1,
+                   from=NULL, to=NULL, n=512,
+                   bw.input = c("points", "quad"),
+                   bw.restrict = FALSE,
+                   covname, crosscheck=FALSE) {  
+
+  if(missing(covname))
+    covname <- sensiblevarname(deparse(substitute(covariate)), "X")
+  callstring <- paste(deparse(sys.call()), collapse = "")
+  
+  if(is.marked(model))
+    stop("Sorry, this is not yet implemented for marked models")
+      
+  if(is.null(adjust)) adjust <- 1
+  
+  bw.input <- match.arg(bw.input)
+  
+  # validate model
+  stopifnot(is.ppm(model))
+  if(is.null(getglmfit(model)))
+    model <- update(model, forcefit=TRUE)
+  modelcall <- model$callstring
+  if(is.null(modelcall))
+    modelcall <- model$call
+  
+  # extract spatial locations
+  Q <- quad.ppm(model)
+#  datapoints <- Q$data
+  quadpoints <- union.quad(Q)
+  Z <- is.data(Q)
+  wts <- w.quad(Q)
+  nQ <- n.quad(Q)
+  # fitted intensity
+  lam <- fitted(model, type="trend")
+  # subset of quadrature points used to fit model
+  subQset <- getglmsubset(model)
+  if(is.null(subQset)) subQset <- rep.int(TRUE, nQ)
+  # restriction to subregion
+  insubregion <- if(!is.null(subregion)) {
+    inside.owin(quadpoints, w=subregion)
+  } else rep.int(TRUE, nQ)
+
+  ################################################################
+  # Pearson residuals from point process model
+
+  yr <- residuals(model, type="Pearson")
+  yresid <- with(yr, "increment")
+  # averaged (then sum with weight 'wts')
+  yresid <- yresid/wts
+
+  #################################################################
+  # Covariates
+  #
+  # covariate data frame
+  df <- getglmdata(model)
+  if(!all(c("x", "y") %in% names(df))) {
+    xy <- as.data.frame(quadpoints)
+    notxy <- !(colnames(df) %in% c("x", "y"))
+    other <- df[, notxy]
+    df <- cbind(xy, other)
+  }
+  #
+  avail.covars <- names(df)
+  # covariates used in model 
+  used.covars   <- model.covariates(model)
+  fitted.covars <- model.covariates(model, offset=FALSE)
+  #
+  #################################################################
+  # identify the covariate
+  #
+  if(!is.character(covariate)) {
+    # Covariate is some kind of data, treated as external covariate
+    if(covname %in% fitted.covars)
+      stop(paste("covariate named", dQuote(covname),
+                 "is already used in model"))
+    covvalues <- evalCovariate(covariate, quadpoints)
+    # validate covvalues
+    if(is.null(covvalues))
+      stop("Unable to extract covariate values")
+    else if(length(covvalues) != npoints(quadpoints))
+      stop(paste("Internal error: number of covariate values =",
+                 length(covvalues), "!=", npoints(quadpoints),
+                 "= number of quadrature points"))
+    # tack onto data frame
+    covdf <- data.frame(covvalues)
+    names(covdf) <- covname
+    df <- cbind(df, covdf)
+  } else {
+    # Argument is name of covariate
+    covname <- covariate
+    if(length(covname) > 1)
+      stop("Must specify only one covariate")
+    #
+    if(covname %in% fitted.covars)
+      stop(paste("covariate", dQuote(covname), "already used in model"))
+    #
+    if(!(covname %in% avail.covars))
+      stop(paste("covariate", dQuote(covname), "not available"))
+    # 
+    covvalues <- df[, covname]
+  }
+  
+  ################################################################
+  # Pearson residuals from weighted linear regression of new covariate on others
+
+  rhs <- formula(model)
+  fo <- as.formula(paste(covname, paste(rhs, collapse=" ")))
+
+  fit <- lm(fo, data=df, weights=lam * wts)
+  xresid <- residuals(fit, type="pearson")/sqrt(wts)
+
+  if(crosscheck) {
+    message("Cross-checking...")
+    X <- model.matrix(fo, data=df)
+    V <- diag(lam * wts)
+    sqrtV <- diag(sqrt(lam * wts))
+    Info <- t(X) %*% V %*% X
+    H <- sqrtV %*% X  %*% solve(Info) %*% t(X) %*% sqrtV
+    nQ <- length(lam)
+    Id <- diag(1, nQ, nQ)
+    xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues
+    xresid.correct <- xresid.pearson/sqrt(wts)
+    abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE)
+    relerr <- abserr/diff(range(xresid.correct, finite=TRUE))
+    if(is.finite(relerr) && relerr > 0.01) {
+      warning("Large relative error in residual computation")
+    }
+    message("Done.")
+  }
+  # experiment suggests residuals(fit, "pearson") == xresid.correct
+  # and residuals(fit) equivalent to
+  # covvalues - X  %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues
+
+  #################################################################
+  # check for NA's etc
+
+  # locations that must have finite values 
+  operative <- if(bw.restrict) insubregion & subQset else subQset
+ 
+  nbg <- !is.finite(xresid) |  !is.finite(yresid)
+  if(any(offending <- nbg & operative)) {
+    warning(paste(sum(offending), "out of", length(offending),
+                  "covariate values discarded because",
+                  ngettext(sum(offending), "it is", "they are"),
+                  "NA or infinite"))
+  }
+  #################################################################
+  # Restrict data to 'operative' points
+  #                            with finite values
+
+  ok <- !nbg & operative
+  Q           <- Q[ok]
+  xresid      <- xresid[ok]
+  yresid      <- yresid[ok]
+  covvalues   <- covvalues[ok]
+  df          <- df[ok, ]
+  lam         <- lam[ok]
+  wts         <- wts[ok]
+  Z           <- Z[ok]
+  insubregion <- insubregion[ok]
+
+  ####################################################
+  # assemble data for smoothing 
+  xx <- xresid
+  yy <- yresid
+  ww <- wts
+  if(makefrom <- is.null(from))
+    from <- min(xresid)
+  if(maketo <- is.null(to))
+    to   <- max(xresid)
+  
+  ####################################################
+  # determine smoothing bandwidth
+  #     from 'operative' data
+
+  switch(bw.input,
+          quad = {
+           # bandwidth selection from covariate values at all quadrature points
+           numer <- unnormdensity(xx, weights=yy * ww,
+                                  bw=bw, adjust=adjust,
+                                  n=n,from=from,to=to, ...)
+           sigma <- numer$bw
+         },
+         points= {
+           # bandwidth selection from covariate values at data points
+           fake <- unnormdensity(xx[Z], weights=1/lam[Z],
+                                 bw=bw, adjust=adjust,
+                                 n=n,from=from,to=to, ...)
+           sigma <- fake$bw
+           numer <- unnormdensity(xx, weights=yy * ww,
+                                  bw=sigma, adjust=1,
+                                  n=n,from=from,to=to, ...)
+         })
+
+ ####################################################
+  # Restrict data and recompute numerator if required
+
+  if(!is.null(subregion) && !bw.restrict) {
+    # Bandwidth was computed on all data
+    # Restrict to subregion and recompute numerator
+    xx   <- xx[insubregion]
+    yy   <- yy[insubregion]
+    ww   <- ww[insubregion]
+    lam  <- lam[insubregion]
+    Z    <- Z[insubregion]
+    if(makefrom) from <- min(xx)
+    if(maketo)     to <- max(xx)
+    numer <- unnormdensity(xx, weights=yy * ww,
+                           bw=sigma, adjust=1,
+                           n=n,from=from,to=to, ...)
+  }
+
+ ####################################################
+  # Compute denominator
+  denom <- unnormdensity(xx,weights=ww,
+                           bw=sigma, adjust=1,
+                           n=n,from=from,to=to, ...)
+
+  ####################################################
+  # Determine recommended plot range
+
+  xr <- range(xresid[Z], finite=TRUE)
+  alim <- xr + 0.1 * diff(xr) * c(-1,1)
+  alim <- intersect.ranges(alim, c(from, to))
+  
+  ####################################################
+  # Compute terms 
+
+  interpolate <- function(x,y) {
+    if(inherits(x, "density") && missing(y))
+      approxfun(x$x, x$y, rule=2)
+    else 
+      approxfun(x, y, rule=2)
+  }
+  numfun <- interpolate(numer)
+  denfun <- interpolate(denom)
+  xxx <- numer$x
+  ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) }
+  yyy <- ratio(numfun(xxx), denfun(xxx))
+  # Null variance estimation
+  # smooth with weight 1 and smaller bandwidth
+  tau <- sigma/sqrt(2)
+  varnumer <- unnormdensity(xx,weights=ww,
+                            bw=tau,adjust=1,
+                            n=n,from=from,to=to, ...)
+  varnumfun <- interpolate(varnumer)
+  vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2)
+  safesqrt <- function(x) {
+    ok <- is.finite(x) & (x >= 0)
+    y <- rep.int(NA_real_, length(x))
+    y[ok] <- sqrt(x[ok])
+    return(y)
+  }
+  twosd <- 2 * safesqrt(vvv)
+  # pack into fv object
+  rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd)
+  nuc <- length(used.covars)
+  if(nuc == 0) {
+    given <- givenlab <- 1
+  } else if(nuc == 1) {
+    given <- givenlab <- used.covars
+  } else {
+    given <- commasep(used.covars, ", ")
+    givenlab <- paste("list", paren(given))
+  }
+  given <- paste("|", given)
+  xlab <- sprintf("r(paste(%s, '|', %s))", covname, givenlab)
+  ylab <- sprintf("r(paste(points, '|', %s))", givenlab)
+  yexpr <- parse(text=ylab)[[1L]]
+  desc <- c(paste("Pearson residual of covariate", covname, given),
+            paste("Smoothed Pearson residual of point process", given),
+            "Null expected value of point process residual",
+            "Null variance of point process residual",
+            "Upper limit of pointwise 5%% significance band",
+            "Lower limit of pointwise 5%% significance band")
+  rslt <- fv(rslt,
+             argu="rcov",
+             ylab=yexpr,
+             valu="rpts",
+             fmla= (. ~ rcov),
+             alim=alim,
+             labl=c(xlab,
+                    "%s",
+                    "0",
+                    "bold(var) ~ %s",
+                    "%s[hi]",
+                    "%s[lo]"),
+             desc=desc,
+             fname=ylab)
+  attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo")
+  # data associated with quadrature points
+  reserved <- (substr(colnames(df), 1L, 4L) == ".mpl")
+  isxy <- colnames(df) %in% c("x", "y")
+  dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid))
+  attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic
+  # auxiliary data
+  attr(rslt, "stuff") <- list(covname     = covname,
+                              xresid      = xresid,
+                              yresid      = yresid,
+                              covvalues   = covvalues,
+                              wts         = wts,
+                              bw          = bw,
+                              adjust      = adjust,
+                              sigma       = sigma,
+                              used.covars = used.covars,
+                              modelcall   = modelcall,
+                              callstring  = callstring,
+                              xlim        = c(from, to),
+                              xlab        = xlab,
+                              ylab        = ylab,
+                              lmcoef      = coef(fit),
+                              bw.input    = bw.input,
+                              bw.restrict = bw.restrict,
+                              restricted  = !is.null(subregion))
+  # finish
+  class(rslt) <- c("addvar", class(rslt))
+  return(rslt)
+}
+
+print.addvar <- function(x, ...) {
+  cat("Added variable plot diagnostic (class addvar)\n")
+  s <- attr(x, "stuff")
+  mc <- paste(s$modelcall, collapse="")
+  cat(paste("for the covariate", dQuote(s$covname),
+            "for the fitted model:",
+            if(nchar(mc) <= 30) "" else "\n\t",
+            mc, "\n\n"))
+  if(identical(s$restricted, TRUE))
+    cat("\t--Diagnostic computed for a subregion--\n")
+   cat(paste("Call:", s$callstring, "\n"))
+  cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5),
+                    "\n\n"))
+  NextMethod("print")
+}
+
+plot.addvar <- function(x, ..., do.points=FALSE) {
+  xname <- deparse(substitute(x))
+  s <- attr(x, "stuff")
+#  covname <- s$covname
+  xresid <- s$xresid
+  yresid <- s$yresid
+  # adjust y limits if intending to plot points as well
+  ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL
+  #
+  do.call(plot.fv, resolve.defaults(list(x), list(...),
+                                      list(main=xname,
+                                           shade=c("hi", "lo"),
+                                           legend=FALSE,
+                                           ylim.covers=ylimcover)))
+  # plot points
+  if(do.points)
+    do.call(points,
+            resolve.defaults(list(x=xresid, y=yresid),
+                             list(...),
+                             list(pch=3, cex=0.5)))
+  return(invisible(x))
+}
+
diff --git a/R/affine.R b/R/affine.R
new file mode 100755
index 0000000..d4f56f8
--- /dev/null
+++ b/R/affine.R
@@ -0,0 +1,337 @@
+#
+#	affine.R
+#
+#	$Revision: 1.49 $	$Date: 2016/10/23 10:36:58 $
+#
+
+affinexy <- function(X, mat=diag(c(1,1)), vec=c(0,0), invert=FALSE) {
+  if(length(X$x) == 0 && length(X$y) == 0)
+    return(list(x=numeric(0),y=numeric(0)))
+  if(invert) {
+    mat <- invmat <- solve(mat)
+    vec <- - as.numeric(invmat %*% vec)
+  }
+  # Y = M X + V
+  ans <- mat %*% rbind(X$x, X$y) + matrix(vec, nrow=2L, ncol=length(X$x))
+  return(list(x = ans[1L,],
+              y = ans[2L,]))
+}
+
+affinexypolygon <- function(p, mat=diag(c(1,1)), vec=c(0,0),
+                             detmat=det(mat)) {
+  # transform (x,y)
+  p[c("x","y")] <- affinexy(p, mat=mat, vec=vec)
+  # transform area
+  if(!is.null(p$area))
+    p$area <- p$area * detmat
+  # if map has negative sign, cyclic order was reversed; correct it
+  if(detmat < 0)
+    p <- reverse.xypolygon(p, adjust=TRUE)
+  return(p)
+}
+       
+"affine" <- function(X, ...) {
+  UseMethod("affine")
+}
+
+"affine.owin" <- function(X,  mat=diag(c(1,1)), vec=c(0,0), ...,
+                          rescue=TRUE) {
+  verifyclass(X, "owin")
+  vec <- as2vector(vec)
+  if(!is.matrix(mat) || any(dim(mat) != c(2,2)))
+    stop(paste(sQuote("mat"), "should be a 2 x 2 matrix"))
+  diagonalmatrix <- all(mat == diag(diag(mat)))
+  scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1)
+  newunits <- if(scaletransform) unitname(X) else as.units(NULL)
+  #
+  switch(X$type,
+         rectangle={
+           if(diagonalmatrix) {
+             # result is a rectangle
+             Y <- owin(range(mat[1L,1L] * X$xrange + vec[1L]),
+                       range(mat[2L,2L] * X$yrange + vec[2L]))
+             unitname(Y) <- newunits
+             return(Y)
+           } else {
+             # convert rectangle to polygon
+             P <- as.polygonal(X)
+             # call polygonal case
+             return(affine.owin(P, mat, vec, rescue=rescue))
+           }
+         },
+         polygonal={
+           # Transform the polygonal boundaries
+           bdry <- lapply(X$bdry, affinexypolygon, mat=mat, vec=vec,
+                          detmat=det(mat))
+           # Compile result
+           W <- owin(poly=bdry, check=FALSE, unitname=newunits)
+           # Result might be a rectangle: if so, convert to rectangle type
+           if(rescue)
+             W <- rescue.rectangle(W)
+           return(W)
+         },
+         mask={
+           # binary mask
+           if(sqrt(abs(det(mat))) < .Machine$double.eps)
+             stop("Matrix of linear transformation is singular")
+           newframe <- boundingbox(affinexy(corners(X), mat, vec))
+           W <- if(length(list(...)) > 0) as.mask(newframe, ...) else 
+                   as.mask(newframe, eps=with(X, min(xstep, ystep)))
+           pixelxy <- rasterxy.mask(W)
+           xybefore <- affinexy(pixelxy, mat, vec, invert=TRUE)
+           W$m[] <- with(xybefore, inside.owin(x, y, X))
+           W <- intersect.owin(W, boundingbox(W))
+           if(rescue)
+             W <- rescue.rectangle(W)
+           return(W)
+         },
+         stop("Unrecognised window type")
+         )
+}
+
+"affine.ppp" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) {
+  verifyclass(X, "ppp")
+  vec <- as2vector(vec)
+  r <- affinexy(X, mat, vec)
+  w <- affine.owin(X$window, mat, vec, ...)
+  return(ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE))
+}
+
+"affine.im" <- function(X,  mat=diag(c(1,1)), vec=c(0,0), ...) {
+  verifyclass(X, "im")
+  vec <- as2vector(vec)
+  if(!is.matrix(mat) || any(dim(mat) != c(2,2)))
+    stop(paste(sQuote("mat"), "should be a 2 x 2 matrix"))
+  # Inspect the determinant
+  detmat <- det(mat)
+  if(sqrt(abs(detmat)) < .Machine$double.eps)
+    stop("Matrix of linear transformation is singular")
+  #
+  diagonalmatrix <- all(mat == diag(diag(mat)))
+  scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1L)
+  newunits <- if(scaletransform) unitname(X) else as.units(NULL)
+  newpixels <- (length(list(...)) > 0)
+  #
+  if(diagonalmatrix && !newpixels) {
+    # diagonal matrix: apply map to row and column locations
+    v      <- X$v
+    d      <- X$dim
+    newbox <- affine(as.rectangle(X), mat=mat, vec=vec)
+    xscale <- diag(mat)[1L]
+    yscale <- diag(mat)[2L]
+    xcol <- xscale * X$xcol + vec[1L]
+    yrow <- yscale * X$yrow + vec[2L]
+    if(xscale < 0) {
+      # x scale is negative
+      xcol <- rev(xcol)
+      v <- v[, (d[2L]:1)]
+    }
+    if(yscale < 0) {
+      # y scale is negative
+      yrow <- rev(yrow)
+      v <- v[(d[1L]:1), ]
+    }
+    Y <- im(v, xcol=xcol, yrow=yrow,
+            xrange=newbox$xrange, yrange=newbox$yrange,
+            unitname=newunits)
+  } else {
+    # general case
+    # create box containing transformed image
+    newframe <- boundingbox(affinexy(corners(X), mat, vec))
+    W <- if(length(list(...)) > 0) as.mask(newframe, ...) else 
+    as.mask(newframe, eps=with(X, min(xstep, ystep)))
+    unitname(W) <- newunits
+    # raster for transformed image
+    naval <- switch(X$type,
+                    factor= , 
+                    integer = NA_integer_,
+                    logical = as.logical(NA_integer_),
+                    real = NA_real_,
+                    complex = NA_complex_, 
+                    character = NA_character_,
+                    NA)
+    Y <- as.im(W, value=naval)
+    # preimages of pixels of transformed image
+    xx <- as.vector(rasterx.im(Y))
+    yy <- as.vector(rastery.im(Y))
+    pre <- affinexy(list(x=xx, y=yy), mat, vec, invert=TRUE)
+    # sample original image
+    if(X$type != "factor") {
+      Y$v[] <- lookup.im(X, pre$x, pre$y, naok=TRUE)
+    } else {
+      lab <- levels(X)
+      lev <- seq_along(lab)
+      Y$v[] <- lookup.im(eval.im(as.integer(X)), pre$x, pre$y, naok=TRUE)
+      Y <- eval.im(factor(Y, levels=lev, labels=lab))
+    }
+  }
+  return(Y)
+}
+
+
+### ---------------------- reflect ----------------------------------
+
+reflect <- function(X) {
+  UseMethod("reflect")
+}
+
+reflect.default <- function(X) { affine(X, mat=diag(c(-1,-1))) }
+
+reflect.im <- function(X) {
+  stopifnot(is.im(X))
+  out <- with(X,
+              list(v      = v[dim[1L]:1, dim[2L]:1],
+                   dim    = dim,
+                   xrange = rev(-xrange),
+                   yrange = rev(-yrange),
+                   xstep  = xstep,
+                   ystep  = ystep,
+                   xcol   = rev(-xcol),
+                   yrow   = rev(-yrow),
+                   type   = type,
+                   units  = units))
+  class(out) <- "im"
+  return(out)
+}
+
+### ---------------------- shift ----------------------------------
+
+"shift" <- function(X, ...) {
+  UseMethod("shift")
+}
+
+shiftxy <- function(X, vec=c(0,0)) {
+  if(is.null(vec)) {
+    warning("Null displacement vector; treated as zero")
+    return(X)
+  }
+  list(x = X$x + vec[1L],
+       y = X$y + vec[2L])
+}
+
+shiftxypolygon <- function(p, vec=c(0,0)) {
+  # transform (x,y), retaining other data
+  p[c("x","y")] <- shiftxy(p, vec=vec)
+  return(p)
+}
+
+"shift.owin" <- function(X,  vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "owin")
+  if(!is.null(origin)) {
+    if(!missing(vec))
+      warning("argument vec ignored; overruled by argument origin")
+    if(is.numeric(origin)) {
+      locn <- origin
+    } else if(is.character(origin)) {
+      origin <- pickoption("origin", origin, c(centroid="centroid",
+                                               midpoint="midpoint",
+                                               bottomleft="bottomleft"))
+      locn <- switch(origin,
+                     centroid={ unlist(centroid.owin(X)) },
+                     midpoint={ c(mean(X$xrange), mean(X$yrange)) },
+                     bottomleft={ c(X$xrange[1L], X$yrange[1L]) })
+    } else stop("origin must be a character string or a numeric vector")
+    return(shift(X, -locn))
+  }
+  vec <- as2vector(vec)
+  # Shift the bounding box
+  X$xrange <- X$xrange + vec[1L]
+  X$yrange <- X$yrange + vec[2L]
+  switch(X$type,
+         rectangle={
+         },
+         polygonal={
+           # Shift the polygonal boundaries
+           X$bdry <- lapply(X$bdry, shiftxypolygon, vec=vec)
+         },
+         mask={
+           # Shift the pixel coordinates
+           X$xcol <- X$xcol + vec[1L]
+           X$yrow <- X$yrow + vec[2L]
+           # That's all --- the mask entries are unchanged
+         },
+         stop("Unrecognised window type")
+         )
+  # tack on shift vector
+  attr(X, "lastshift") <- vec
+  # units are unchanged
+  return(X)
+}
+
+"shift.ppp" <- function(X, vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "ppp")
+  if(!is.null(origin)) {
+    if(!missing(vec))
+      warning("argument vec ignored; overruled by argument origin")
+    if(is.numeric(origin)) {
+      locn <- origin
+    } else if(is.character(origin)) {
+      origin <- pickoption("origin", origin, c(centroid="centroid",
+                                               midpoint="midpoint",
+                                               bottomleft="bottomleft"))
+      W <- X$window
+      locn <- switch(origin,
+                     centroid={ unlist(centroid.owin(W)) },
+                     midpoint={ c(mean(W$xrange), mean(W$yrange)) },
+                     bottomleft={ c(W$xrange[1L], W$yrange[1L]) })
+    } else stop("origin must be a character string or a numeric vector")
+    vec <- -locn
+  }
+  vec <- as2vector(vec)
+  # perform shift
+  r <- shiftxy(X, vec)
+  w <- shift.owin(X$window, vec)
+  Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)
+  # tack on shift vector
+  attr(Y, "lastshift") <- vec
+  return(Y)
+}
+
+getlastshift <- function(X) {
+  v <- attr(X, "lastshift")
+  if(is.null(v))
+    stop(paste("Internal error: shifted object of class",
+               sQuote(as.character(class(X))[1L]),
+               "does not have \"lastshift\" attribute"),
+         call.=FALSE)
+  if(!(is.numeric(v) && length(v) == 2L))
+    stop("Internal error: \"lastshift\" attribute is not a vector",
+         call.=FALSE)
+  return(v)
+}
+
+putlastshift <- function(X, vec) {
+  attr(X, "lastshift") <- vec
+  return(X)
+}
+
+
+### ---------------------- scalar dilation ---------------------------------
+
+scalardilate <- function(X, f, ...) {
+  UseMethod("scalardilate")
+}
+
+scalardilate.default <- function(X, f, ...) {
+  trap.extra.arguments(..., .Context="In scalardilate(X,f)")
+  check.1.real(f, "In scalardilate(X,f)")
+  stopifnot(is.finite(f) && f > 0)
+  Y <- affine(X, mat=diag(c(f,f)))
+  return(Y)
+}
+
+scalardilate.im <- scalardilate.owin <- scalardilate.psp <- scalardilate.ppp <-
+  function(X, f, ..., origin=NULL) {
+  trap.extra.arguments(..., .Context="In scalardilate(X,f)")
+  check.1.real(f, "In scalardilate(X,f)")
+  stopifnot(is.finite(f) && f > 0)
+  if(!is.null(origin)) {
+    X <- shift(X, origin=origin)
+    negorig <- getlastshift(X)
+  } else negorig <- c(0,0)
+  Y <- affine(X, mat=diag(c(f, f)), vec = -negorig)
+  return(Y)
+}
+  
+
+
diff --git a/R/allstats.R b/R/allstats.R
new file mode 100755
index 0000000..e2665a8
--- /dev/null
+++ b/R/allstats.R
@@ -0,0 +1,47 @@
+#
+#
+#   allstats.R
+#
+#   $Revision: 1.18 $   $Date: 2016/02/11 10:17:12 $
+#
+#
+allstats <- function(pp, ..., dataname=NULL,verb=FALSE) {
+#
+# Function allstats --- to calculate the F, G, K, and J functions
+# for an unmarked point pattern.
+#
+  verifyclass(pp,"ppp")
+  if(is.marked(pp))
+    stop("This function is applicable only to unmarked patterns.\n")
+
+# estimate F, G and J 
+  if(verb) cat("Calculating F, G, J ...")
+  Jout <- do.call.matched(Jest,list(X=pp, ...))
+  if(verb) cat("ok.\n")
+
+# extract F, G and J
+  Fout <- attr(Jout, "F")
+  Gout <- attr(Jout, "G")
+  attr(Jout, "F") <- NULL
+  attr(Jout, "G") <- NULL
+  fns <- list("F function"=Fout,
+              "G function"=Gout,
+              "J function"=Jout)
+
+# compute second moment function K
+  if(verb) cat("Calculating K function...")
+  Kout <- do.call.matched(Kest, list(X=pp, ...))
+  fns <- append(fns, list("K function"=Kout))
+  if(verb) cat("done.\n")
+
+# add title
+  if(is.null(dataname))
+    dataname <- short.deparse(substitute(pp))
+  title <- paste("Four summary functions for ",
+              	dataname,".",sep="")
+  attr(fns, "title") <- title
+
+#
+  fns <- as.anylist(fns)
+  return(fns)
+}
diff --git a/R/alltypes.R b/R/alltypes.R
new file mode 100755
index 0000000..af2a480
--- /dev/null
+++ b/R/alltypes.R
@@ -0,0 +1,198 @@
+#
+#      alltypes.R
+#
+#   $Revision: 1.35 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+                                  
+alltypes <- function(X, fun="K", ...,
+                     dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) {
+#
+# Function 'alltypes' --- calculates a summary function for
+# each type, or each pair of types, in a multitype point pattern
+#
+  if(is.ppp(X)) classname <- "ppp" else
+  if(is.lpp(X)) classname <- "lpp" else
+  stop("X should be a ppp or lpp object")
+  
+  if(is.null(dataname))
+    dataname <- short.deparse(substitute(X))
+
+# --------------------------------------------------------------------  
+# First inspect marks
+
+  if(!is.marked(X)) {
+    nmarks <- 0
+    marklabels <- ""
+  } else {
+    if(!is.multitype(X))
+      stop("the marks must be a factor")
+    # ensure type names are parseable (for mathematical labels)
+    levels(marks(X)) <- make.parseable(levels(marks(X)))
+    mks <- marks(X)
+    ma <- levels(mks)
+    nmarks <- length(ma)
+    marklabels <- paste(ma)
+  }
+
+# ---------------------------------------------------------------------
+# determine function name
+
+  f.is.name <- is.name(substitute(fun))
+  fname <-
+    if(f.is.name)
+      paste(as.name(substitute(fun)))
+    else if(is.character(fun))
+      fun
+    else sQuote("fun") 
+
+# ---------------------------------------------------------------------
+# determine function to be called
+  
+  if(is.function(fun)) {
+    estimator <- fun
+  } else if(is.character(fun)) {
+    # First try matching one of the standard abbreviations K, G etc
+    estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE)
+    if(is.null(estimator))
+      estimator <- get(fun, mode="function")
+  } else 
+      stop(paste(sQuote("fun"), "should be a function or a character string"))
+  
+# ------------------------------------------------------------------  
+# determine how the function shall be called.
+#
+  indices.expected <- sum(c("i", "j") %in% names(formals(estimator)))
+
+  apply.to.split   <- (indices.expected == 0 && nmarks > 1)
+  if(apply.to.split)
+    ppsplit <- split(X)
+  
+# --------------------------------------------------------------------  
+# determine array dimensions and margin labels
+  witch <-
+    if(nmarks == 0)
+      matrix(1L, nrow=1L, ncol=1L, dimnames=list("",""))
+    else if (nmarks == 1) 
+      matrix(1L, nrow=1L, ncol=1L, dimnames=list(marklabels, marklabels))
+    else if(indices.expected != 2)
+      matrix(1L:nmarks, nrow=nmarks, ncol=1L,
+             dimnames=list(marklabels, ""))
+    else 
+      matrix(1L:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE,
+             dimnames=list(marklabels, marklabels))
+
+  # ------------ start computing -------------------------------  
+  # if computing envelopes, first generate simulated patterns
+  # using undocumented feature of envelope()
+  if(envelope && reuse) {
+    L <- do.call(spatstat::envelope,
+                 resolve.defaults(
+                                  list(X, fun=estimator),
+                                  list(internal=list(eject="patterns")),
+                                  list(...),
+				  switch(1L+indices.expected,
+                                          NULL,
+                                          list(i=ma[1L]),
+                                          list(i=ma[1L], j=ma[2L]),
+                                          NULL),
+                                  list(verbose=verb)))
+    intern <- attr(L, "internal")
+  } else intern <- L <- NULL
+
+  # compute function array and build up 'fasp' object
+  fns  <- list()
+  k   <- 0
+
+  for(i in 1L:nrow(witch)) {
+    Y <- if(apply.to.split) ppsplit[[i]] else X
+    for(j in 1L:ncol(witch)) {
+      if(verb) cat("i =",i,"j =",j,"\n")
+      currentfv <- 
+        if(!envelope) 
+          switch(1L+indices.expected,
+                 estimator(Y, ...),
+                 estimator(Y, i=ma[i], ...),
+                 estimator(Y, i=ma[i], j=ma[j], ...))
+        else
+          do.call(spatstat::envelope,
+                  resolve.defaults(
+                                   list(Y, estimator),
+                                   list(simulate=L, internal=intern),
+                                   list(verbose=FALSE),
+                                   list(...),
+                                   list(Yname=dataname),
+                                   switch(1L+indices.expected,
+                                          NULL,
+                                          list(i=ma[i]),
+                                          list(i=ma[i], j=ma[j]),
+                                          NULL)))
+      k <- k+1
+      fns[[k]] <- as.fv(currentfv)
+    }
+  }
+
+  # wrap up into 'fasp' object
+  title <- paste(if(nmarks > 1) "array of " else NULL,
+                 if(envelope) "envelopes of " else NULL,
+                 fname,
+                 if(nmarks <= 1) " function " else " functions ",
+                 "for ", dataname, ".", sep="")
+  
+  rslt <- fasp(fns, which=witch,
+               formulae=NULL,
+               dataname=dataname,
+               title=title,
+               checkfv=FALSE)
+  return(rslt)
+}
+
+# Lookup table for standard abbreviations of functions
+
+getSumFun <- local({
+
+  ftable <-
+  rbind(
+        data.frame(class="ppp", marked=FALSE,
+                   abbrev=c("F", "G", "J", "K", "L", "pcf"),
+                   full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"),
+                   stringsAsFactors=FALSE),
+        data.frame(class="ppp", marked=TRUE,
+                   abbrev=c("F", "G", "J", "K", "L", "pcf"),
+                   full=  c("Fest",
+                     "Gcross", "Jcross", "Kcross", "Lcross",
+                     "pcfcross"),
+                   stringsAsFactors=FALSE),
+        data.frame(class="lpp", marked=FALSE,
+                   abbrev=c("K", "pcf"),
+                   full=c("linearK", "linearpcf"),
+                   stringsAsFactors=FALSE),
+        data.frame(class="lpp", marked=TRUE,
+                   abbrev=c("K", "pcf"),
+                   full=c("linearKcross", "linearpcfcross"),
+                   stringsAsFactors=FALSE)
+        )
+
+  getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) {
+    matches <- with(ftable,
+                    which(abbrev == abbreviation &
+                          class == classname &
+                          marked == ismarked))
+    if(length(matches) == 0) {
+      if(!fatal)
+        return(NULL)
+      stop(paste("No match to function abbreviation",
+                 sQuote(abbreviation),
+                 "for class",
+                 sQuote(classname)))
+    }
+    if(length(matches) > 1)
+      stop("Ambiguous function name")
+    fullname <- ftable$full[matches]
+    get(fullname, mode="function")
+  }
+
+  getfun
+})
+
+
diff --git a/R/anova.mppm.R b/R/anova.mppm.R
new file mode 100755
index 0000000..05735bd
--- /dev/null
+++ b/R/anova.mppm.R
@@ -0,0 +1,262 @@
+#
+# anova.mppm.R
+#
+# $Revision: 1.13 $ $Date: 2017/08/08 07:18:43 $
+#
+
+anova.mppm <- local({
+
+  do.gripe <- function(...) warning(paste(...), call.=FALSE)
+  dont.gripe <- function(...) NULL
+  tests.choices <- c("Chisq", "LRT", "Rao", "score", "F", "Cp")
+  tests.avail <- c("Chisq", "LRT", "Rao", "score")
+  tests.random  <- c("Chisq", "LRT")
+  tests.Gibbs <- c("Chisq", "LRT")
+  totalnquad <- function(fit) sum(sapply(quad.mppm(fit), n.quad))
+  totalusedquad <- function(fit) with(fit$Fit$moadf, sum(.mpl.SUBSET))
+  fmlaString <- function(z) { paste(as.expression(formula(z))) }
+##  interString <- function(z) { as.interact(z)$creator }
+  
+  anova.mppm <- function(object, ..., test=NULL, adjust=TRUE,
+                         fine=FALSE, warn=TRUE) {
+    gripe <- if(warn) do.gripe else dont.gripe
+    argh <- list(...)
+
+    ## trap outmoded usage
+    if("override" %in% names(argh)) {
+      gripe("Argument 'override' is superseded and was ignored")
+      argh <- argh[-which(names(argh) == "override")]
+    }
+   
+    ## list of models
+    objex <- append(list(object), argh)
+
+    ## Check each model is an mppm object
+    if(!all(sapply(objex, is.mppm)))
+      stop(paste("Arguments must all be", sQuote("mppm"), "objects"))
+
+    ## are all models Poisson?
+    pois <- all(sapply(objex, is.poisson.mppm))
+    gibbs <- !pois
+
+    ## handle anova for a single object
+    expandedfrom1 <- FALSE
+    if(length(objex) == 1 && gibbs) {
+      ## we can't rely on anova.glm in this case
+      ## so we have to re-fit explicitly
+      Terms <- drop.scope(object)
+      if((nT <- length(Terms)) > 0) {
+        ## generate models by adding terms sequentially
+        objex <- vector(mode="list", length=nT+1)
+        for(n in 1L:nT) {
+          ## model containing terms 1, ..., n-1
+          fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - "))
+          fmla <- as.formula(fmla)
+          objex[[n]] <- update(object, fmla)
+        }
+        ## full model
+        objex[[nT+1L]] <- object
+        expandedfrom1 <- TRUE
+      }
+    }
+
+    ## All models fitted using same method?
+    Fits <- lapply(objex, getElement, name="Fit")
+    fitter <- unique(unlist(lapply(Fits, getElement, name="fitter")))
+    if(length(fitter) > 1)
+      stop(paste("Models are incompatible;",
+                 "they were fitted by different methods (",
+                 paste(fitter, collapse=", "), ")" ))
+
+    ## Choice of test
+    if(fitter == "glmmPQL") {
+      ## anova.lme requires different format of `test' argument
+      ## and does not recognise 'dispersion'
+      if(is.null(test))
+        test <- FALSE
+      else {
+        test <- match.arg(test, tests.choices)
+        if(!(test %in% tests.random))
+          stop(paste("Test", dQuote(test),
+                     "is not implemented for random effects models"))
+        test <- TRUE
+      }
+    } else if(!is.null(test)) {
+      test <- match.arg(test, tests.choices)
+      if(!(test %in% tests.avail))
+        stop(paste("test=", dQuote(test), "is not yet implemented"),
+             call.=FALSE)
+      if(!pois && !(test %in% tests.Gibbs))
+        stop(paste("test=", dQuote(test),
+                   "is only implemented for Poisson models"),
+             call.=FALSE)
+    }
+  
+
+    ## Extract glm fit objects 
+    fitz <- lapply(Fits, getElement, name="FIT")
+
+    ## Ensure all models were fitted using GLM, or all were fitted using GAM
+    isgam <- sapply(fitz, inherits, what="gam")
+    isglm <- sapply(fitz, inherits, what="glm")
+    usegam <- any(isgam)
+    if(usegam && any(isglm)) {
+      gripe("Models were re-fitted with use.gam=TRUE")
+      objex <- lapply(objex, update, use.gam=TRUE)
+    }
+
+    ## Finally do the appropriate ANOVA
+    opt <- list(test=test)
+    if(fitter != "glmmPQL") opt <- append(opt, list(dispersion=1))
+    result <- try(do.call(anova, append(fitz, opt)))
+    if(inherits(result, "try-error"))
+      stop("anova failed")
+  
+    ## Remove approximation-dependent columns if present
+    result[, "Resid. Dev"] <- NULL
+    ## replace 'residual df' by number of parameters in model
+    if("Resid. Df" %in% names(result)) {
+      ## count number of quadrature points used in each model
+      nq <- totalusedquad(objex[[1L]])
+      result[, "Resid. Df"] <- nq - result[, "Resid. Df"]
+      names(result)[match("Resid. Df", names(result))] <- "Npar"
+    }
+
+    ## edit header 
+    if(!is.null(h <- attr(result, "heading"))) {
+      ## remove .mpl.Y and .logi.Y from formulae if present
+      h <- gsub(".mpl.Y", "", h)
+      h <- gsub(".logi.Y", "", h)
+      ## delete GLM information if present
+      h <- gsub("Model: quasi, link: log", "", h)
+      h <- gsub("Model: binomial, link: logit", "", h)
+      h <- gsub("Response: ", "", h)
+      ## remove blank lines (up to 4 consecutive blanks can occur)
+      for(i in 1L:5L)
+        h <- gsub("\n\n", "\n", h)
+      if(length(objex) > 1 && length(h) > 1) {
+        ## anova(mod1, mod2, ...)
+        ## change names of models
+        fmlae <- unlist(lapply(objex, fmlaString))
+#        intrx <- unlist(lapply(objex, interString))
+        h[2L] <- paste("Model",
+                      paste0(1L:length(objex), ":"),
+                      fmlae,
+#                      "\t",
+#                      intrx,
+                      collapse="\n")
+      }
+      ## Add explanation if we did the stepwise thing ourselves
+      if(expandedfrom1)
+        h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1])
+      ## Contract spaces in output if spatstat.options('terse') >= 2
+      if(!waxlyrical('space'))
+        h <- gsub("\n$", "", h)
+      ## Put back
+      attr(result, "heading") <- h
+    }
+
+    if(adjust && !pois) {
+      ## issue warning, if not already given
+      if(warn) warn.once("anovaMppmAdjust",
+                         "anova.mppm now computes the *adjusted* deviances",
+                         "when the models are not Poisson processes.")
+      ## Corrected pseudolikelihood ratio 
+      nmodels <- length(objex)
+      if(nmodels > 1) {
+        cfac <- rep(1, nmodels)
+        for(i in 2:nmodels) {
+          a <- objex[[i-1]]
+          b <- objex[[i]]
+          df <- length(coef(a)) - length(coef(b))
+          if(df > 0) {
+            ibig <- i-1
+            ismal <- i
+          } else {
+            ibig <- i
+            ismal <- i-1
+            df <- -df
+          }
+          bigger <- objex[[ibig]]
+          smaller <- objex[[ismal]]
+          if(df == 0) {
+            gripe("Models", i-1, "and", i, "have the same dimension")
+          } else {
+            bignames <- names(coef(bigger))
+            smallnames <- names(coef(smaller))
+            injection <- match(smallnames, bignames)
+            if(any(uhoh <- is.na(injection))) {
+              gripe("Unable to match",
+                    ngettext(sum(uhoh), "coefficient", "coefficients"),
+                    commasep(sQuote(smallnames[uhoh])),
+                    "of model", ismal, 
+                    "to coefficients in model", ibig)
+            } else {
+              thetaDot <- 0 * coef(bigger)
+              thetaDot[injection] <- coef(smaller)
+              JH <- vcov(bigger, what="all", new.coef=thetaDot, fine=fine)
+#              J   <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log)
+#              H   <- if(!logi) JH$A1 else JH$Slog
+              J <- JH$fisher
+              H <- JH$internals$A1
+              G   <- H%*%solve(J)%*%H
+              if(df == 1) {
+                cfac[i] <- H[-injection,-injection]/G[-injection,-injection]
+              } else {
+                Res <- lapply(subfits(bigger),
+                              residuals,
+                              type="score",
+                              drop=TRUE, 
+                              new.coef=thetaDot, dropcoef=TRUE)
+                U <- sumcompatible(lapply(Res, integral.msr), names(thetaDot))
+                Uo <- U[-injection]
+                Uo <- matrix(Uo, ncol=1)
+                Hinv <- solve(H)
+                Ginv <- solve(G)
+                Hoo <- Hinv[-injection,-injection, drop=FALSE]
+                Goo <- Ginv[-injection,-injection, drop=FALSE]
+                ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo
+                cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo)
+              }
+            }
+          }
+        }
+        ## apply Pace et al (2011) adjustment to pseudo-deviances
+        ## (save attributes of 'result' for later reinstatement)
+        oldresult <- result
+        result$Deviance <- AdjDev <- result$Deviance * cfac
+        cn <- colnames(result)
+        colnames(result)[cn == "Deviance"] <- "AdjDeviance"
+        if("Pr(>Chi)" %in% colnames(result)) 
+          result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]),
+                                               df=abs(result$Df[-1L]),
+                                               lower.tail=FALSE))
+        class(result) <- class(oldresult)
+        attr(result, "heading") <- attr(oldresult, "heading")
+      }
+    }
+
+    return(result)
+  }
+
+  sumcompatible <- function(xlist, required) {
+    result <- numeric(length(required))
+    names(result) <- required
+    for(x in xlist) {
+      namx <- names(x)
+      if(!all(ok <- (namx %in% required)))
+        stop(paste("Internal error in sumcompatible:",
+                   "list entry", i, "contains unrecognised",
+                   ngettext(sum(!ok), "value", "values"),
+                   commasep(sQuote(namx[!ok]))),
+             call.=FALSE)
+      inject <- match(namx, required)
+      result[inject] <- result[inject] + x
+    }
+    return(result)
+  }
+    
+  anova.mppm
+})
+
+
diff --git a/R/anova.ppm.R b/R/anova.ppm.R
new file mode 100755
index 0000000..27b9424
--- /dev/null
+++ b/R/anova.ppm.R
@@ -0,0 +1,308 @@
+#
+#   anova.ppm.R
+#
+#  $Revision: 1.25 $   $Date: 2016/10/23 10:36:58 $
+#
+
+anova.ppm <- local({
+
+  do.gripe <- function(...) warning(paste(...), call.=FALSE)
+  dont.gripe <- function(...) NULL
+  nquad <- function(x) { if(inherits(x, "quad")) n.quad(x) else 0 }
+  fmlaString <- function(z) { paste(as.expression(formula(z))) }
+  interString <- function(z) { as.interact(z)$creator }
+
+  anova.ppm <- function(object, ..., test=NULL, adjust=TRUE, warn=TRUE,
+                        fine=FALSE) {
+    gripe <- if(warn) do.gripe else dont.gripe
+    if(!is.null(test)) {
+      test <- match.arg(test, c("Chisq", "LRT", "Rao", "score", "F", "Cp"))
+      if(test == "score") test <- "Rao"
+      if(!(test %in% c("Chisq", "LRT", "Rao")))
+        stop("test=", dQuote(test), "is not yet implemented")
+    }
+    ## trap outmoded usage
+    argh <- list(...)
+    if("override" %in% names(argh)) {
+      gripe("Argument 'override' is superseded and was ignored")
+      argh <- argh[-which(names(argh) == "override")]
+    }
+  
+    ## list of models
+    objex <- append(list(object), argh)
+    if(!all(sapply(objex, is.ppm)))
+      stop(paste("Arguments must all be", sQuote("ppm"), "objects"))
+    
+    ## all models Poisson?
+    pois <- all(sapply(objex, is.poisson.ppm))
+    gibbs <- !pois
+    ## any models fitted by ippm?
+    newton <- any(sapply(objex, inherits, what="ippm"))
+    
+    if(gibbs && !is.null(test) && test == "Rao")
+      stop("Score test is only implemented for Poisson models",
+           call.=FALSE)
+    
+    ## handle anova for a single object
+    expandedfrom1 <- FALSE
+    if(length(objex) == 1 && (gibbs || newton)) {
+      ## we can't rely on anova.glm in this case
+      ## so we have to re-fit explicitly
+      Terms <- drop.scope(object)
+      if((nT <- length(Terms)) > 0) {
+        ## generate models by adding terms sequentially
+        objex <- vector(mode="list", length=nT+1)
+        for(n in 1L:nT) {
+          ## model containing terms 1, ..., n-1
+          fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - "))
+          fmla <- as.formula(fmla)
+          objex[[n]] <- update(object, fmla)
+        }
+        ## full model
+        objex[[nT+1L]] <- object
+        expandedfrom1 <- TRUE
+      }
+    }
+
+    ## all models fitted by same method?
+    fitmethod <- unique(sapply(objex, getElement, name="method"))
+    if(length(fitmethod) > 1)
+      stop(paste("Models were fitted by different methods",
+                 commasep(sQuote(fitmethod)), 
+                 "- comparison is not possible"))
+    ## fitted by MPL or logistic?
+    if(!(fitmethod %in% c("mpl", "logi")))
+      stop(paste("Not implemented for models fitted by method=",
+                 sQuote(fitmethod)))
+    logi <- (fitmethod == "logi")
+
+    refitargs <- list()
+    fitz <- NULL
+  
+    ## fitted to same quadscheme using same edge correction?
+    if(length(objex) > 1) {
+      ## same data? 
+      datas <- lapply(objex, data.ppm)
+      samedata <- all(sapply(datas[-1L], identical, y=datas[[1L]]))
+      if(!samedata) stop("Models were fitted to different datasets")
+      ## same dummy points?
+      quads <- lapply(objex, quad.ppm)
+      samequad <- all(sapply(quads[-1L], identical, y=quads[[1L]]))
+      if(!samequad) {
+        gripe("Models were re-fitted using a common quadrature scheme")
+        sizes <- sapply(quads, nquad)
+        imax <- which.max(sizes)
+        bigQ <- quads[[imax]]
+        refitargs$Q <- bigQ
+      }
+      ## same edge correction?
+      corrxn <- unique(sapply(objex, getElement, name="correction"))
+      if(length(corrxn) > 1)
+        stop(paste("Models were fitting using different edge corrections",
+                   commasep(sQuote(corrxn))))
+      if(corrxn == "border") {
+        rbord <- unique(sapply(objex, getElement, name="rbord"))
+        if(length(rbord) > 1) {
+          gripe("Models were re-fitted using a common value of 'rbord'")
+          refitargs$rbord <- max(rbord)
+        }
+      } 
+      
+      ## Extract glmfit objects 
+      fitz <- lapply(objex, getglmfit)
+
+      ## Any trivial models? (uniform Poisson)
+      trivial <- sapply(fitz, is.null)
+      if(any(trivial))
+        refitargs$forcefit <- TRUE
+    
+      ## force all non-trivial models to be fitted using same method
+      ## (all using GLM or all using GAM)
+      isgam <- sapply(fitz, inherits, what="gam")
+      isglm <- sapply(fitz, inherits, what="glm")
+      usegam <- any(isgam)
+      if(usegam && any(isglm)) {
+        gripe("Models were re-fitted with use.gam=TRUE")
+        refitargs$use.gam <- TRUE
+        refitargs$forcefit <- TRUE
+      }
+
+      ## finally refit models
+      if(length(refitargs) > 0) {
+        objex <- do.call(lapply, append(list(X=objex, FUN=update),
+                                        refitargs))
+        fitz <- lapply(objex, getglmfit)
+      }
+    }
+  
+    ## Ensure GLM/GAM objects all use the same 'subset'
+    subz <-  lapply(objex, getglmsubset)
+    if(length(unique(subz)) > 1) {
+      subsub <- Reduce("&", subz)
+      fitz <- lapply(fitz, refittosubset, sub=subsub)
+      gripe("Models were re-fitted after discarding quadrature points",
+            "that were illegal under some of the models")
+    }
+  
+    ## If any models were fitted by ippm we need to correct the df
+    if(newton) {
+      nfree <- sapply(lapply(objex, logLik), attr, which="df")
+      ncanonical <- lengths(lapply(objex, coef))
+      nextra <- nfree - ncanonical
+      if(is.null(fitz))
+        fitz <- lapply(objex, getglmfit)
+      for(i in seq_along(fitz))
+        if(nextra[i] != 0)
+          fitz[[i]]$df.residual <- fitz[[i]]$df.residual - nextra[i]
+    }
+
+    ## Finally do the appropriate ANOVA
+    if(is.null(fitz)) fitz <- lapply(objex, getglmfit)
+    result <- do.call(anova, append(fitz, list(test=test, dispersion=1)))
+
+    ## Remove approximation-dependent columns if present
+    result[, "Resid. Dev"] <- NULL
+    ## replace 'residual df' by number of parameters in model
+    if("Resid. Df" %in% names(result)) {
+      ## count number of quadrature points used in each model
+      obj1 <- objex[[1L]]
+      ss <- getglmsubset(obj1)
+      nq <- if(!is.null(ss)) sum(ss) else n.quad(quad.ppm(obj1))
+      result[, "Resid. Df"] <- nq - result[, "Resid. Df"]
+      names(result)[match("Resid. Df", names(result))] <- "Npar"
+    }
+    
+    ## edit header 
+    if(!is.null(h <- attr(result, "heading"))) {
+      ## remove .mpl.Y and .logi.Y from formulae if present
+      h <- gsub(".mpl.Y", "", h)
+      h <- gsub(".logi.Y", "", h)
+      ## delete GLM information if present
+      h <- gsub("Model: quasi, link: log", "", h)
+      h <- gsub("Model: binomial, link: logit", "", h)
+      h <- gsub("Response: ", "", h)
+      ## remove blank lines (up to 4 consecutive blanks can occur)
+      for(i in 1L:5L)
+        h <- gsub("\n\n", "\n", h)
+      if(length(objex) > 1 && length(h) > 1) {
+        ## anova(mod1, mod2, ...)
+        ## change names of models
+        fmlae <- sapply(objex, fmlaString)
+        intrx <- sapply(objex, interString)
+        h[2L] <- paste("Model",
+                      paste0(1L:length(objex), ":"),
+                      fmlae,
+                      "\t",
+                      intrx,
+                      collapse="\n")
+      }
+      ## Add explanation if we did the stepwise thing ourselves
+      if(expandedfrom1)
+        h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1L])
+      ## Contract spaces in output if spatstat.options('terse') >= 2
+      if(!waxlyrical('space'))
+        h <- gsub("\n$", "", h)
+      ## Put back
+      attr(result, "heading") <- h
+    }
+  
+    if(adjust && gibbs) {
+      ## issue warning, if not already given
+      if(warn) warn.once("anovaAdjust",
+                         "anova.ppm now computes the *adjusted* deviances",
+                         "when the models are not Poisson processes.")
+      ## Corrected pseudolikelihood ratio 
+      nmodels <- length(objex)
+      if(nmodels > 1) {
+        cfac <- rep(1, nmodels)
+        for(i in 2:nmodels) {
+          a <- objex[[i-1]]
+          b <- objex[[i]]
+          df <- length(coef(a)) - length(coef(b))
+          if(df > 0) {
+            ibig <- i-1
+            ismal <- i
+          } else {
+            ibig <- i
+            ismal <- i-1
+            df <- -df
+          }
+          bigger <- objex[[ibig]]
+          smaller <- objex[[ismal]]
+          if(df == 0) {
+            gripe("Models", i-1, "and", i, "have the same dimension")
+          } else {
+            bignames <- names(coef(bigger))
+            smallnames <- names(coef(smaller))
+            injection <- match(smallnames, bignames)
+            if(any(uhoh <- is.na(injection))) {
+              gripe("Unable to match",
+                    ngettext(sum(uhoh), "coefficient", "coefficients"),
+                    commasep(sQuote(smallnames[uhoh])),
+                    "of model", ismal, 
+                    "to coefficients in model", ibig)
+            } else {
+              thetaDot <- 0 * coef(bigger)
+              thetaDot[injection] <- coef(smaller)
+              JH <- vcov(bigger, what="internals", new.coef=thetaDot, fine=fine)
+              J   <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log)
+              H   <- if(!logi) JH$A1 else JH$Slog
+              G   <- H%*%solve(J)%*%H
+              if(df == 1) {
+                cfac[i] <- H[-injection,-injection]/G[-injection,-injection]
+              } else {
+                Res <- residuals(bigger, type="score",
+                                 new.coef=thetaDot, drop=TRUE)
+                U <- integral.msr(Res)
+                Uo <- U[-injection]
+                Uo <- matrix(Uo, ncol=1)
+                Hinv <- solve(H)
+                Ginv <- solve(G)
+                Hoo <- Hinv[-injection,-injection, drop=FALSE]
+                Goo <- Ginv[-injection,-injection, drop=FALSE]
+                ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo
+                cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo)
+              }
+            }
+          }
+        }
+        ## apply Pace et al (2011) adjustment to pseudo-deviances
+        ## (save attributes of 'result' for later reinstatement)
+        oldresult <- result
+        result$Deviance <- AdjDev <- result$Deviance * cfac
+        cn <- colnames(result)
+        colnames(result)[cn == "Deviance"] <- "AdjDeviance"
+        if("Pr(>Chi)" %in% colnames(result)) 
+          result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]),
+                                               df=abs(result$Df[-1L]),
+                                               lower.tail=FALSE))
+        class(result) <- class(oldresult)
+        attr(result, "heading") <- attr(oldresult, "heading")
+      }
+    }
+
+    if(newton) {
+      ## calculation does not include 'covfunargs'
+      cfa <- lapply(lapply(objex, getElement, name="covfunargs"), names)
+      cfa <- unique(unlist(cfa))
+      action <- if(adjust && gibbs) "Adjustment to composite likelihood" else
+                if(test == "Rao") "Score test calculation" else NULL
+      if(!is.null(action)) 
+        gripe(action, "does not account for",
+              "irregular trend parameters (covfunargs)",
+              commasep(sQuote(cfa)))
+    }
+    return(result)
+  }
+
+  refittosubset <- function(fut, sub) {
+    etf <- environment(terms(fut))
+    gd <- get("glmdata", envir=etf)
+    gd$.mpl.SUBSET <- sub
+    assign("glmdata", gd, envir=etf)
+    up <- update(fut, evaluate=FALSE)
+    eval(up, envir=etf)
+  }
+
+  anova.ppm
+})
diff --git a/R/applynbd.R b/R/applynbd.R
new file mode 100755
index 0000000..e771567
--- /dev/null
+++ b/R/applynbd.R
@@ -0,0 +1,95 @@
+# 	applynbd.R
+#
+#     $Revision: 1.17 $     $Date: 2016/10/23 10:36:58 $
+#
+#  applynbd()
+# For each point, identify either
+#	 - all points within distance R
+#        - the closest N points  
+#        - those points satisfying some constraint
+# and apply the function FUN to them
+#
+#  markstat()
+#      simple application of applynbd
+#################################################################
+
+
+applynbd <- function(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, ...) {
+
+  if(is.null(N) && is.null(R) && is.null(criterion)) 
+    stop(paste("must specify at least one of the arguments",
+               commasep(sQuote(c("N","R","criterion")))))
+     
+  X <- as.ppp(X)
+  npts <- npoints(X)
+
+  # compute matrix of pairwise distances
+  dist <- pairdist(X)
+
+  # compute row ranks (avoid ties)
+  rankit <- function(x) {  u <- numeric(length(x)); u[fave.order(x)] <- seq_along(x); return(u) }
+  drank <- t(apply(dist, 1L, rankit)) - 1L
+
+  included <- matrix(TRUE, npts, npts)
+  if(!is.null(R)) {
+    # select points closer than R
+    included <- included & (dist <= R)
+  }
+  if(!is.null(N)) {
+    # select N closest points
+    if(N < 1)
+      stop("Value of N must be at least 1")
+    if(exclude)
+      included <- included & (drank <= N) 
+    else
+      included <- included & (drank <= N-1)
+  }
+  if(!is.null(criterion)) {
+    # some funny criterion
+    for(i in 1L:npts) 
+      included[i,] <- included[i,] & criterion(dist[i,], drank[i,])
+  }
+     
+  if(exclude) 
+    diag(included) <- FALSE
+
+  # bind into an array
+  a <- array(c(included, dist, drank, row(included)), dim=c(npts,npts,4))
+
+  # what to do with a[i, , ]
+  if(!is.marked(X)) 
+    go <- function(ai, Z, fun, ...) { 
+      which <- as.logical(ai[,1L])
+      distances <- ai[,2L]
+      dranks <- ai[,3L]
+      here <- ai[1L,4L]
+      fun(Y=Z[which],
+          current=c(x=Z$x[here], y=Z$y[here]),
+          dists=distances[which], dranks=dranks[which],
+          ...) 
+    }
+  else
+    go <- function(ai, Z, fun, ...) { 
+      which <- as.logical(ai[,1L])
+      distances <- ai[,2L]
+      dranks <- ai[,3L]
+      here <- ai[1L,4L]
+      fun(Y=Z[which],
+          current=Z[here],
+          dists=distances[which], dranks=dranks[which],
+          ...) 
+    }
+  
+  # do it
+  result <- apply(a, 1, go, Z=X, fun=FUN, ...)
+  
+  return(result)
+}
+
+markstat <- function(X, fun, N=NULL, R=NULL, ...) {
+  verifyclass(X, "ppp")
+  stopifnot(is.function(fun))
+  statfun <- function(Y, current, dists, dranks, func, ...)
+    { func(marks(Y, dfok=TRUE), ...) }
+  applynbd(X, statfun, R=R, N=N, func=fun, ...)
+}
diff --git a/R/areadiff.R b/R/areadiff.R
new file mode 100755
index 0000000..f8e4a15
--- /dev/null
+++ b/R/areadiff.R
@@ -0,0 +1,269 @@
+#
+# areadiff.R
+#
+#  $Revision: 1.33 $  $Date: 2017/06/05 10:31:58 $
+#
+# Computes sufficient statistic for area-interaction process
+#
+# Invokes areadiff.c
+#
+# areaLoss = area lost by removing X[i] from X
+
+areaLoss <- function(X, r, ..., W=as.owin(X),
+                     subset=NULL, exact=FALSE,
+                     ngrid=spatstat.options("ngrid.disc")) {
+  if(exact)
+    areaLoss.diri(X, r, ..., W=W, subset=subset)
+  else
+    areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid)
+}
+
+# areaGain = area gained by adding u[i] to X
+
+areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE,
+                     ngrid=spatstat.options("ngrid.disc")) {
+  if(exact)
+    areaGain.diri(u, X, r, ..., W=W)
+  else
+    areaGain.grid(u, X, r, W=W, ngrid=ngrid)
+}
+
+
+#////////////////////////////////////////////////////////////
+#    algorithms using Dirichlet tessellation
+#///////////////////////////////////////////////////////////
+
+areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) {
+  stopifnot(is.ppp(X))
+  npts <- npoints(X)
+  if(is.matrix(r)) {
+    if(sum(dim(r) > 1) > 1)
+      stop("r should be a vector or single value")
+    r <- as.vector(r)
+  }
+  nr <- length(r)
+  if(npts == 0)
+    return(matrix(, nrow=0, ncol=nr))
+  else if(npts == 1) 
+    return(matrix(discpartarea(X, r, W), nrow=1))
+  # set up output array
+  indices <- 1L:npts
+  if(!is.null(subset))
+    indices <- indices[subset]
+  out <- matrix(, nrow=length(indices), ncol=nr)
+  #
+  w <- X$window
+  pir2 <- pi * r^2
+  # dirichlet neighbour relation in entire pattern 
+  dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange))
+  a <- dd$delsgs[,5L]
+  b <- dd$delsgs[,6L]
+  for(k in seq_along(indices)) {
+    i <- indices[k]
+    # find all Delaunay neighbours of i 
+    jj <- c(b[a==i], a[b==i])
+    jj <- sort(unique(jj))
+    # extract only these points
+    Yminus <- X[jj]
+    Yplus  <- X[c(jj, i)]
+    # dilate
+    aplus <- dilated.areas(Yplus, r, W, exact=TRUE)
+    aminus <- dilated.areas(Yminus, r, W, exact=TRUE)
+    areas <- aplus - aminus
+    # area/(pi * r^2) must be positive and nonincreasing
+    y <- ifelseAX(r == 0, 1, areas/pir2)
+    y <- pmin.int(1, y)
+    ok <- is.finite(y)
+    y[ok] <- rev(cummax(rev(y[ok])))
+    areas <- pmax.int(0, y * pir2)
+    # save
+    out[k, ] <- areas
+  }
+  return(out)
+}
+
+areaGain.diri <- function(u, X, r, ..., W=as.owin(X), verbose=FALSE) {
+  stopifnot(is.ppp(X))
+  Y <- as.ppp(u, W=W)
+  nX <- X$n
+  nY <- Y$n
+  if(is.matrix(r)) {
+    if(sum(dim(r) > 1) > 1)
+      stop("r should be a vector or single value")
+    r <- as.vector(r)
+  }
+  nr <- length(r)
+  if(nY == 0)
+    return(matrix(, nrow=0, ncol=nr))
+  if(nX == 0)
+    return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE))
+  if(verbose)
+    splat("areaGain,",
+          nY, ngettext(nY, "point,", "points,"),
+          nr, ngettext(nr, "rvalue", "r values"))
+  out <- matrix(0, nrow=nY, ncol=nr)
+  pir2 <- pi * r^2
+  wbox <- as.rectangle(as.owin(X))
+  #
+  state <- list()
+  for(i in 1L:nY) {
+    if(verbose) state <- progressreport(i, nY, state=state)
+    V <- superimpose(Y[i], X, W=wbox, check=FALSE)
+    # Dirichlet neighbour relation for V
+    dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange))
+    aa <- dd$delsgs[,5L]
+    bb <- dd$delsgs[,6L]
+    # find all Delaunay neighbours of Y[1] in V
+    jj <- c(bb[aa==1L], aa[bb==1L])
+    jj <- sort(unique(jj))
+    # extract only these points
+    Zminus <- V[jj]
+    Zplus  <- V[c(1, jj)]
+    # dilate
+    aplus <- dilated.areas(Zplus, r, W, exact=TRUE)
+    aminus <- dilated.areas(Zminus, r, W, exact=TRUE)
+    areas <- aplus - aminus
+    # area/(pi * r^2) must be in [0,1] and nonincreasing
+    y <- ifelseAX(r == 0, 1, areas/pir2)
+    y <- pmin.int(1, y)
+    ok <- is.finite(y)
+    y[ok] <- rev(cummax(rev(y[ok])))
+    areas <- pmax.int(0, y * pir2)
+    # save
+    out[i,] <- areas
+  }
+  return(out)
+}
+
+#////////////////////////////////////////////////////////////////////////
+#    alternative implementations using grid counting in C
+#////////////////////////////////////////////////////////////////////////
+
+areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) {
+  verifyclass(X, "ppp")
+  u <- as.ppp(u, W=as.owin(X))
+  stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0))
+  #
+  nu <- u$n
+  nr <- length(r)
+  if(nr == 0)
+    return(numeric(0))
+  rmax <- max(r)
+  #
+  constrain <- !is.null(W)
+  if(constrain && (W$type != "rectangle")) {
+    # Constrained to an irregular window
+    # initialise to value for small-r
+    result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE)    
+    # vector of radii below which b(u,r) is disjoint from U(X,r)
+    rcrit.u <- nncross(u, X, what="dist")/2
+    rcrit.min <- min(rcrit.u)
+    # Use distance transform and set covariance
+    D <- distmap(X, ...)
+    DW <- D[W, drop=FALSE]
+    # distance from (0,0) - thresholded to make digital discs
+    discWin <- owin(c(-rmax,rmax),c(-rmax,rmax))
+    discWin <- as.mask(discWin, eps=min(D$xstep, rmax/4))
+    rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin)
+    # 
+    for(j in which(r > rcrit.min)) {
+      # rj is above the critical radius rcrit.u[i] for at least one point u[i]
+      rj <- r[j]
+      if(any(above <- (rj > rcrit.u))) {
+        Uncovered  <- levelset(DW, rj, ">")
+        DiscRj     <- levelset(rad, rj, "<=")
+        AreaGainIm <- setcov(Uncovered, DiscRj)
+        result[above, j] <- safelookup(AreaGainIm, u[above])
+      }
+    }
+    return(result)
+  }
+  #
+  #
+  xx <- X$x
+  yy <- X$y
+  result <- matrix(, nrow=nu, ncol=nr)
+  #
+  for(i in 1L:nu) {
+    # shift u[i] to origin
+    xu <- u$x[i]
+    yu <- u$y[i]
+    xshift <- xx - xu
+    yshift <- yy - yu
+    # find points within distance 2 rmax of origin
+    close <- (xshift^2 + yshift^2 < 4 * rmax^2)
+    nclose <- sum(close)
+    # invoke C routine
+    if(!constrain) {
+      z <- .C("areadifs",
+              rad = as.double(r),
+              nrads = as.integer(nr),
+              x   = as.double(xshift[close]),
+              y   = as.double(yshift[close]),
+              nn  = as.integer(nclose),
+              ngrid = as.integer(ngrid),
+              answer = as.double(numeric(nr)),
+              PACKAGE = "spatstat")
+      result[i,] <- z$answer
+    } else {
+      z <- .C("areaBdif",
+              rad = as.double(r),
+              nrads = as.integer(nr),
+              x   = as.double(xshift[close]),
+              y   = as.double(yshift[close]),
+              nn  = as.integer(nclose),
+              ngrid = as.integer(ngrid),
+              x0 = as.double(W$xrange[1L] - xu),
+              y0 = as.double(W$yrange[1L] - yu),
+              x1 = as.double(W$xrange[2L] - xu),
+              y1 = as.double(W$yrange[2L] - yu),
+              answer = as.double(numeric(nr)),
+              PACKAGE = "spatstat")
+      result[i,] <- z$answer
+    }
+  }
+  return(result)
+}
+
+areaLoss.grid <- function(X, r, ...,
+                          W=as.owin(X), subset=NULL,
+                          method = c("count", "distmap"),
+                          ngrid = spatstat.options("ngrid.disc"),
+                          exact = FALSE) {
+  verifyclass(X, "ppp")
+  n <- npoints(X)
+  nr <- length(r)
+  indices <- if(is.null(subset)) 1L:n else (1L:n)[subset]
+  answer <- matrix(, nrow=length(indices), ncol=nr)
+  if(missing(method)) {
+    method <- if(nr <= 20 || exact) "count" else "distmap"
+  } else method <- match.arg(method)
+  switch(method,
+         count = {
+           # one value of r: use grid-counting
+           for(k in seq_along(indices)) {
+             i <- indices[k]
+             answer[k,] <- areaGain(X[i], X[-i], r, W=W,
+                                    ngrid=ngrid, exact=exact)
+           }
+         },
+         distmap = {
+           # Many values of r: use distance transform
+           D <- distmap(X, ...)
+           DW <- D[W, drop=FALSE]
+           a <- area(Window(DW))
+           # empirical cdf of distance values
+           FW <- ecdf(DW[drop=TRUE])
+           # radii below which there are no overlaps
+           rcrit <- nndist(X)/2
+           for(k in seq_along(indices)) {
+             i <- indices[k]
+             Di <- distmap(X[-i], ...)
+             FiW <- ecdf(Di[W, drop=TRUE])
+             answer[k, ] <-
+               ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2)
+           }
+         })
+  return(answer)
+}
+
diff --git a/R/areainter.R b/R/areainter.R
new file mode 100755
index 0000000..ff15767
--- /dev/null
+++ b/R/areainter.R
@@ -0,0 +1,359 @@
+#
+#
+#    areainter.R
+#
+#    $Revision: 1.44 $	$Date: 2017/06/05 10:31:58 $
+#
+#    The area interaction
+#
+#    AreaInter()    create an instance of the area-interaction process
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#
+
+AreaInter <- local({
+
+  # area-interaction conditional intensity potential
+  #     corresponds to potential -C(x) = n(x) - A(x)/\pi r^2
+  areapot <- 
+    function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) {
+      uhoh <- !(correction %in% c("border", "none"))
+      if(any(uhoh)) {
+        nuh <- sum(uhoh)
+        warning(paste(ngettext(nuh, "Correction", "Corrections"),
+                      commasep(sQuote(correction[uhoh])),
+                      ngettext(nuh,
+                               "is not supported and was ignored",
+                               "are not supported and were ignored")))
+      }
+      r <- pars$r
+      if(is.null(r)) stop("internal error: r parameter not found")
+      n <- U$n
+      areas <- numeric(n)
+      dummies <- !(seq_len(n) %in% EqualPairs[,2L])
+      if(sum(dummies) > 0)
+        areas[dummies] <- areaGain(U[dummies], X, r, W=W)
+      ii <- EqualPairs[,1L]
+      jj <- EqualPairs[,2L]
+      areas[jj] <- areaLoss(X, r, subset=ii, W=W)
+      return(1 - areas/(pi * r^2))
+    }
+
+  #' fractional area of overlap of two unit discs at distance 2 * z
+  discOverlap <- function(z) {
+    z <- pmax(pmin(z, 1), -1)
+    (2/pi) * (acos(z) - z * sqrt(1 - z^2))
+  }
+  
+  # template object without family, par, version
+  BlankAI <- 
+  list(
+         name     = "Area-interaction process",
+         creator  = "AreaInter",
+         family   = "inforder.family", # evaluated later
+         pot      = areapot,
+         par      = list(r = NULL), # to be filled in
+         parnames = "disc radius",
+         init     = function(self) {
+                      r <- self$par$r
+                      if(!is.numeric(r) || length(r) != 1 || r <= 0)
+                       stop("disc radius r must be a positive number")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         plot = function(fint, ..., d=NULL, plotit=TRUE) {
+           verifyclass(fint, "fii")
+           inter <- fint$interaction
+           unitz <- unitname(fint)
+           if(!identical(inter$name, "Area-interaction process"))
+             stop("Tried to plot the wrong kind of interaction")
+           #' fitted interaction coefficient
+           theta <- fint$coefs[fint$Vnames]
+           #' interaction radius
+           r <- inter$par$r
+           xlim <- resolve.1.default(list(xlim=c(0, 1.25 * 2*r)), list(...)) 
+           rmax <- max(xlim, d)
+           if(is.null(d)) {
+             d <- seq(from=0, to=rmax, length.out=1024)
+           } else {
+             stopifnot(is.numeric(d) &&
+                       all(is.finite(d)) &&
+                       all(diff(d) > 0))
+           }
+           #' compute interaction between two points at distance d
+           y <- exp(theta * discOverlap(d/(2 * r)))
+           #' compute `fv' object
+           fun <- fv(data.frame(r=d, h=y, one=1),
+                     "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r,
+                     xlim, c("r", "h(r)", "1"),
+                     c("distance argument r",
+                       "maximal interaction h(r)",
+                       "reference value 1"),
+                     unitname=unitz)
+           if(plotit)
+             do.call(plot.fv,
+                     resolve.defaults(list(fun),
+                                      list(...),
+                                      list(ylim=range(0,1,y))))
+           return(invisible(fun))
+         },
+         #' end of function 'plot'
+         interpret =  function(coeffs, self) {
+           logeta <- as.numeric(coeffs[1L])
+           eta <- exp(logeta)
+           return(list(param=list(eta=eta),
+                       inames="interaction parameter eta",
+                       printable=signif(eta)))
+         },
+         valid = function(coeffs, self) {
+           eta <- ((self$interpret)(coeffs, self))$param$eta
+           return(is.finite(eta))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self))
+             return(NULL)
+           return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$r
+           if(anyNA(coeffs))
+             return(2 * r)
+           logeta <- coeffs[1L]
+           if(abs(logeta) <= epsilon)
+             return(0)
+           else
+             return(2 * r)
+         },
+         delta2 = function(X, inte, correction, ..., sparseOK=FALSE) {
+           # Sufficient statistic for second order conditional intensity
+           # Area-interaction model 
+           if(!(correction %in% c("border", "none")))
+             return(NULL)
+           r <- inte$par$r
+           areadelta2(X, r, sparseOK=sparseOK)
+         },
+       version=NULL # to be added
+  )
+  class(BlankAI) <- "interact"
+
+  AreaInter <- function(r) {
+    instantiate.interact(BlankAI, list(r=r))
+  }
+
+  AreaInter <- intermaker(AreaInter, BlankAI)
+  
+  AreaInter
+})
+
+
+areadelta2 <- local({
+
+  areadelta2 <- function(X, r, ..., sparseOK=FALSE) {
+    # Sufficient statistic for second order conditional intensity
+    # Area-interaction model 
+    if(is.ppp(X)) return(areadelppp(X, r, ..., sparseOK=sparseOK)) else
+    if(inherits(X, "quad")) return(areadelquad(X, r, sparseOK=sparseOK)) else
+    stop("internal error: X should be a ppp or quad object")
+  }
+
+  areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap"),
+                         sparseOK=FALSE) {
+    # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j
+    # i.e.  h(X[i]|X) - h(X[i]|X[-j])
+    #       where h is first order cif statistic
+    algorithm <- match.arg(algorithm)
+    nX <- npoints(X)
+    sparseOK <- sparseOK
+    result <- if(!sparseOK) matrix(0, nX, nX) else
+              sparseMatrix(i=integer(0), j=integer(0), x=numeric(0),
+                           dims=c(nX,nX))
+    if(nX < 2)
+      return(result)
+    if(algorithm == "C") {
+      # use special purpose C routine
+      # called once for each interacting pair of points
+      xx <- X$x
+      yy <- X$y
+      cl <- closepairs(X, 2 * r, what="indices", twice=FALSE, neat=FALSE)
+      I <- cl$i
+      J <- cl$j
+      eps <- r/spatstat.options("ngrid.disc")
+      for(k in seq_along(I)) {
+        i <- I[k]
+        j <- J[k]
+        # all neighbours of i
+        Ki <- union(J[I==i], I[J==i])
+        # all neighbours of j
+        Kj <- union(J[I==j], I[J==j])
+        # relevant neighbours
+        K <- setdiff(union(Ki, Kj), c(i,j))
+        # call C code
+        z <- .C("delta2area",
+                xa = as.double(xx[i]),
+                ya = as.double(yy[i]),
+                xb = as.double(xx[j]),
+                yb = as.double(yy[j]),
+                nother = as.integer(length(K)),
+                xother = as.double(xx[K]),
+                yother = as.double(yy[K]),
+                radius = as.double(r),
+                epsilon = as.double(eps),
+                pixcount = as.integer(integer(1L)),
+                PACKAGE = "spatstat")
+        result[i,j] <- result[j,i] <- z$pixcount
+      }
+      # normalise
+      result <- result * (eps^2)/(pi * r^2)
+      return(result)
+    }
+
+    # non-C algorithms
+    # confine attention to points which are interacting
+    relevant <- (nndist(X) <= 2 * r)
+    if(!all(relevant)) {
+      if(any(relevant)) {
+        # call self on subset
+        Dok <- areadelppp(X[relevant], r, algorithm, sparseOK=sparseOK)
+        result[relevant,relevant] <- Dok
+      }
+      return(result)
+    }
+
+    # .............. algorithm using interpreted code ...........
+    
+    # sort pattern in increasing order of x
+    sortX <- (algorithm == "nnmap")
+    if(sortX) {
+      oX <- fave.order(X$x)
+      X <- X[oX]
+    }
+
+    # area calculation may be restricted to window W for efficiency
+    W <- as.owin(X)
+    U <- as.rectangle(W)
+
+    # decide pixel resolution
+    eps <- r/spatstat.options("ngrid.disc")
+    npix <- prod(ceiling(sidelengths(U)/eps))
+    if(npix <= 2^20) {
+      # do it all in one go
+      tile <- list(NULL)
+    } else {
+      # divide into rectangular tiles
+      B <- as.rectangle(W)
+      ntile0 <- ceiling(npix/(2^20))
+      tile0area <- area(B)/ntile0
+      tile0side <- sqrt(tile0area)
+      nx <- ceiling(sidelengths(B)[1L]/tile0side)
+      ny <- ceiling(sidelengths(B)[2L]/tile0side)
+      tile <- tiles(quadrats(B, nx, ny))
+    }
+           
+    result <- matrix(0, nX, nX)
+    for(i in seq_len(length(tile))) {
+      # form pixel grid
+      Ti <- tile[[i]]
+      Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti)
+      if(algorithm == "nncross") {
+        # Trusted, slow algorithm using nncross
+        Z <- as.mask(Wi, eps=eps)
+        G <- as.ppp(rasterxy.mask(Z), U, check=FALSE)
+        # compute 3 nearest neighbours in X of each grid point
+        v <- nncross(G, X, k=1:3)
+        # select pixels which have exactly 2 neighbours within distance r
+        ok <- with(v, dist.3 > r & dist.2 <= r)
+        if(any(ok)) {
+          v <- v[ok, , drop=FALSE]
+          # accumulate pixel counts -> areas
+          counts <- with(v, table(i=factor(which.1, levels=1L:nX),
+                                  j=factor(which.2, levels=1L:nX)))
+          pixarea <- with(Z, xstep * ystep)
+          result <- result + pixarea * (counts + t(counts))
+        }
+      } else {
+        # Faster algorithm using nnmap
+        # compute 3 nearest neighbours in X of each grid point
+        stuff <- nnmap(X, k=1:3, W=Wi, eps=eps,
+                       is.sorted.X=TRUE, sortby="x",
+                       outputarray=TRUE)
+        dist.2 <- stuff$dist[2L,,]
+        dist.3 <- stuff$dist[3L,,]
+        which.1 <- stuff$which[1L,,]
+        which.2 <- stuff$which[2L,,]
+        ok <- (dist.3 > r & dist.2 <= r)
+        if(any(ok)) {
+          which.1 <- as.vector(which.1[ok])
+          which.2 <- as.vector(which.2[ok])
+          counts <- table(i=factor(which.1, levels=1L:nX),
+                          j=factor(which.2, levels=1L:nX))
+          pixarea <- attr(stuff, "pixarea")
+          result <- result + pixarea * (counts + t(counts))
+        }
+      }
+    }
+    if(sortX) {
+      # map back to original ordering
+      result[oX, oX] <- result
+    }
+    # normalise
+    result <- result/(pi * r^2)
+    return(result)
+  }
+
+  areadelquad <- function(Q, r, sparseOK=FALSE) {
+    # Sufficient statistic for second order conditional intensity
+    # Area-interaction model 
+    # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points 
+    # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r))
+    # where k ranges over all indices that are not equivalent to u[i,j]
+    U <- union.quad(Q)
+    Z <- is.data(Q)
+    nU <- npoints(U)
+    xx <- U$x
+    yy <- U$y
+    # identify all close pairs of quadrature points
+    cl <- closepairs(U, 2 * r, what="indices")
+    I <- cl$i
+    J <- cl$j
+    # find neighbours in X of each quadrature point
+    zJ <- Z[J]
+    neigh <- split(J[zJ], factor(I[zJ], levels=1L:nU))
+    # 
+    result <- if(!sparseOK) matrix(0, nU, nU) else
+              sparseMatrix(i=integer(0), j=integer(0), x=numeric(0),
+                           dims=c(nU,nU))
+    eps <- r/spatstat.options("ngrid.disc")
+    #
+    for(k in seq_along(I)) {
+      i <- I[k]
+      j <- J[k]
+      # all points of X close to U[i]
+      Ki <- neigh[[i]]
+      # all points of X close to U[j]
+      Kj <- neigh[[j]]
+      # relevant neighbours
+      K <- setdiff(union(Ki, Kj), c(i,j))
+      # call C code
+      z <- .C("delta2area",
+            xa = as.double(xx[i]),
+            ya = as.double(yy[i]),
+            xb = as.double(xx[j]),
+            yb = as.double(yy[j]),
+            nother = as.integer(length(K)),
+            xother = as.double(xx[K]),
+            yother = as.double(yy[K]),
+            radius = as.double(r),
+            epsilon = as.double(eps),
+            pixcount = as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+      result[i,j] <- z$pixcount
+    }
+    # normalise
+    result <- result * (eps^2)/(pi * r^2)
+    return(result)
+  }
+
+  areadelta2
+})
diff --git a/R/as.im.R b/R/as.im.R
new file mode 100755
index 0000000..7023ef6
--- /dev/null
+++ b/R/as.im.R
@@ -0,0 +1,318 @@
+#
+#    as.im.R
+#
+#    conversion to class "im"
+#
+#    $Revision: 1.47 $   $Date: 2017/06/05 10:31:58 $
+#
+#    as.im()
+#
+
+as.im <- function(X, ...) {
+  UseMethod("as.im")
+}
+
+as.im.im <- function(X, W=NULL, ...,
+                     eps=NULL, dimyx=NULL, xy=NULL,
+                     na.replace=NULL) {
+  X <- repair.old.factor.image(X)
+  if(is.null(W)) {
+    if(is.null(eps) && is.null(dimyx) && is.null(xy)) {
+      X <- repair.image.xycoords(X)
+      X <- na.handle.im(X, na.replace)
+      return(X)
+    }
+    # pixel raster determined by dimyx etc
+    W <- as.mask(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy)
+    # invoke as.im.owin
+    Y <- as.im(W)
+  } else {
+    # apply dimyx (etc) if present,
+    # otherwise use W to determine pixel raster
+    Y <- as.im(W, eps=eps, dimyx=dimyx, xy=xy)
+  }
+  # resample X onto raster of Y
+  Y <- rastersample(X, Y)
+
+  return(na.handle.im(Y, na.replace))
+}
+
+as.im.owin <- function(X, W=NULL, ...,
+                       eps=NULL, dimyx=NULL, xy=NULL,
+                       na.replace=NULL, value=1) {
+  if(!(is.null(eps) && is.null(dimyx) && is.null(xy))) {
+    # raster dimensions determined by dimyx etc
+    # convert X to a mask 
+    M <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy)
+    # convert mask to image
+    d <- M$dim
+    v <- matrix(value, d[1L], d[2L])
+    m <- M$m
+    v[!m] <- if(is.null(na.replace)) NA else na.replace
+    out <- im(v, M$xcol, M$yrow,
+              xrange=M$xrange, yrange=M$yrange,
+              unitname=unitname(X))
+    return(out)
+  }
+  if(!is.null(W) && is.owin(W) && W$type == "mask") {
+    # raster dimensions determined by W
+    # convert W to zero image
+    d <- W$dim
+    Z <- im(matrix(0, d[1L], d[2L]), W$xcol, W$yrow, unitname=unitname(X))    
+    # adjust values to indicator of X
+    Z[X] <- 1
+    if(missing(value) && is.null(na.replace)) {
+      # done
+      out <- Z
+    } else {
+      # map {0, 1} to {na.replace, value}
+      v <- matrix(ifelseAB(Z$v == 0, na.replace, value), d[1L], d[2L])
+      out <- im(v, W$xcol, W$yrow, unitname=unitname(X))
+    }
+    return(out)
+  }
+  if(X$type == "mask") {
+    # raster dimensions determined by X
+    # convert X to image
+    d <- X$dim
+    v <- matrix(value, d[1L], d[2L])
+    m <- X$m
+    v[!m] <- if(is.null(na.replace)) NA else na.replace
+    out <- im(v, xcol=X$xcol, yrow=X$yrow,
+              xrange=X$xrange, yrange=X$yrange, unitname=unitname(X))
+    return(out)
+  }
+  # X is not a mask.
+  # W is either missing, or is not a mask.
+  # Convert X to a image using default settings
+  M <- as.mask(X)
+  # convert mask to image
+  d <- M$dim
+  v <- matrix(value, d[1L], d[2L])
+  m <- M$m
+  v[!m] <- if(is.null(na.replace)) NA else na.replace
+  out <- im(v, M$xcol, M$yrow, unitname=unitname(X))
+  return(out)
+}
+
+as.im.funxy <- function(X, W=Window(X), ...) {
+  as.im.function(X, W=W, ...)
+}
+
+as.im.function <- function(X, W=NULL, ...,
+                           eps=NULL, dimyx=NULL, xy=NULL,
+                           na.replace=NULL, strict=FALSE) {
+  f <- X
+  if(is.null(W))
+    stop("A window W is required")
+  W <- as.owin(W)
+  W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy)
+  m <- W$m
+  funnywindow <- !all(m)
+
+  xx <- as.vector(rasterx.mask(W))
+  yy <- as.vector(rastery.mask(W))
+
+  argh <- list(...)
+  if(strict) argh <- argh[names(argh) %in% names(formals(f))]
+
+  # evaluate function value at each pixel 
+  if(!funnywindow) 
+    values <- do.call(f, append(list(xx, yy), argh))
+  else {
+    # evaluate only inside window
+    inside <- as.vector(m)
+    val <- do.call(f, append(list(xx[inside], yy[inside]), argh))
+    # create space for full matrix
+    msize <- length(m)
+    values <-
+      if(!is.factor(val))
+        vector(mode=typeof(val), length=msize)
+      else {
+        lev <- levels(val)
+        factor(rep.int(lev[1L], msize), levels=lev)
+      }
+    # copy values, assigning NA outside window
+    values[inside] <- val
+    values[!inside] <- NA
+  }
+
+  nc <- length(W$xcol)
+  nr <- length(W$yrow)
+  if(nr == 1 || nc == 1) {
+    # exception: can't determine pixel width/height from centres
+    out <- im(matrix(values, nr, nc),
+              xrange=W$xrange, yrange=W$yrange, unitname=unitname(W))
+  } else {
+    out <- im(values, W$xcol, W$yrow, unitname=unitname(W))
+  }
+  return(na.handle.im(out, na.replace))
+}
+
+as.im.matrix <- function(X, W=NULL, ...) {
+  nr <- nrow(X)
+  nc <- ncol(X)
+  if(is.null(W))
+    return(im(X, ...))
+  W <- as.owin(W)
+  if(W$type == "mask") {
+    xcol <- W$xcol
+    yrow <- W$yrow
+    # pixel coordinate information
+    if(length(xcol) == nc && length(yrow) == nr)
+      return(im(X, xcol, yrow, unitname=unitname(W)))
+  }
+  # range information
+  R <- as.rectangle(W)
+  xrange <- R$xrange
+  yrange <- R$yrange
+  return(im(X, xrange=xrange, yrange=yrange, unitname=unitname(W)))
+}
+
+as.im.default <- function(X, W=NULL, ...,
+                          eps=NULL, dimyx=NULL, xy=NULL,
+                          na.replace=NULL) {
+
+  if((is.vector(X) || is.factor(X)) && length(X) == 1) {
+    # numerical value: interpret as constant function
+    xvalue <- X
+    X <- function(xx, yy, ...) { rep.int(xvalue, length(xx)) }
+    return(as.im(X, W, ..., eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace))
+  }
+  
+  if(is.list(X) && checkfields(X, c("x","y","z"))) {
+    stopifnot(is.matrix(X$z))
+    z <- X$z
+    y <- X$y
+    x <- X$x
+    # Usual S convention as in contour.default() and image.default()
+    # Rows of z correspond to x values.
+    nr <- nrow(z)
+    nc <- ncol(z)
+    lx <- length(x)
+    ly <- length(y)
+    if(lx == nr + 1)
+      x <- (x[-1L] + x[-lx])/2
+    else if(lx != nr)
+      stop("length of x coordinate vector does not match number of rows of z")
+    if(ly == nc + 1)
+      y <- (y[-1L] + y[-ly])/2
+    else if(ly != nc)
+      stop("length of y coordinate vector does not match number of columns of z")
+    # convert to class "im"
+    out <- im(t(z), x, y)
+    # now apply W and dimyx if present
+    if(is.null(W) && !(is.null(eps) && is.null(dimyx) && is.null(xy)))
+      out <- as.im(out, eps=eps, dimyx=dimyx, xy=xy)
+    else if(!is.null(W))
+      out <- as.im(out, W=W, eps=eps, dimyx=dimyx, xy=xy)
+    return(na.handle.im(out, na.replace))
+  }
+  stop("Can't convert X to a pixel image")
+}
+
+as.im.ppp <- function(X, ...) {
+  pixellate(X, ..., weights=NULL, zeropad=FALSE)
+}
+
+as.im.data.frame <- function(X, ..., step, fatal=TRUE, drop=TRUE) {
+  if(missing(step)) {
+    xstep <- ystep <- NULL
+  } else {
+    step <- ensure2vector(step)
+    xstep <- step[1L]
+    ystep <- step[2L]
+  }
+  if(ncol(X) < 3) {
+    whinge <- "Argument 'X' must have at least 3 columns of data"
+    if(fatal) stop(whinge)
+    warning(whinge)
+    return(NULL)
+  }
+  ## extract (x,y) coordinates
+  mch <- matchNameOrPosition(c("x", "y", "z"), names(X))
+  x <- X[, mch[1L]]
+  y <- X[, mch[2L]]
+  z <- X[, -mch[1:2], drop=FALSE]
+  ## unique x,y coordinates
+  xx <- sort(unique(x))
+  yy <- sort(unique(y))
+  jj <- match(x, xx)
+  ii <- match(y, yy)
+  iijj <- cbind(ii, jj)
+  ## make matrix (for incomplete x, y sequence)
+  ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal)
+  if(!ok) return(NULL)
+  mm <- matrix(NA, length(yy), length(xx))
+  ## ensure xx and yy are complete equally-spaced sequences
+  fx <- fillseq(xx, step=xstep)
+  fy <- fillseq(yy, step=ystep)
+  xcol <- fx[[1L]]
+  yrow <- fy[[1L]]
+  ## trap very large matrices
+  ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal)
+  if(!ok) return(NULL)
+  ## mapping from xx to xcol, yy to yrow
+  jjj <- fx[[2L]]
+  iii <- fy[[2L]]
+  ## make matrix for full sequence
+  m <- matrix(NA, length(yrow), length(xcol))
+  ## run through columns of pixel values
+  nz <- ncol(z)
+  result <- vector(mode="list", length=nz)
+  names(result) <- colnames(z)
+  for(k in seq_len(nz)) {
+    zk <- z[,k]
+    mm[iijj] <- zk
+    m[iii,jjj] <- mm
+    lev <- levels(zk)
+    mo <- if(is.null(lev)) m else factor(as.vector(m), levels=lev)
+    result[[k]] <- im(mat=mo, xcol=xcol, yrow=yrow) 
+  }
+  if(nz == 1 && drop) result <- result[[1L]]
+  return(result)
+}
+
+
+# convert to image from some other format, then do something
+
+do.as.im <- function(x, action, ...,
+                     W = NULL, eps = NULL, dimyx = NULL, xy = NULL, 
+                     na.replace = NULL) {
+  Z <- as.im(x, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace)
+  Y <- do.call(action, list(Z, ...))
+  return(Y)
+}
+
+na.handle.im <- function(X, na.replace) {
+if(is.null(na.replace))
+  return(X)
+if(length(na.replace) != 1)
+  stop("na.replace should be a single value")
+X$v[is.na(X$v)] <- na.replace
+return(X)
+}
+
+repair.old.factor.image <- function(x) {
+  # convert from old to new representation of factor images
+  if(x$type != "factor")
+    return(x)
+  v <- x$v
+  isold <- !is.null(lev <- attr(x, "levels"))
+  isnew <- is.factor(v) && is.matrix(v)
+  if(isnew)
+    return(x)
+  if(!isold)
+    stop("Internal error: unrecognised format for factor-valued image")
+  v <- factor(v, levels=lev)
+  dim(v) <- x$dim
+  x$v <- v
+  return(x)
+}
+
+repair.image.xycoords <- function(x) {
+  v <- x$v
+  if(is.null(dim(v))) 
+    dim(v) <- c(length(x$yrow), length(x$xcol))
+  im(v, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x))
+}
diff --git a/R/auc.R b/R/auc.R
new file mode 100644
index 0000000..dfc7d81
--- /dev/null
+++ b/R/auc.R
@@ -0,0 +1,164 @@
+##
+## auc.R
+##
+##  Calculate ROC curve or area under it
+##
+## $Revision: 1.6 $ $Date: 2016/11/10 01:08:04 $
+
+roc <- function(X, ...) { UseMethod("roc") }
+
+roc.ppp <- function(X, covariate, ..., high=TRUE) {
+  nullmodel <- ppm(X)
+  result <- rocData(covariate, nullmodel, ..., high=high)
+  return(result)
+}
+
+roc.lpp <- function(X, covariate, ..., high=TRUE) {
+  nullmodel <- lppm(X)
+  result <- rocData(covariate, nullmodel, ..., high=high)
+  return(result)
+}
+
+rocData <- function(covariate, nullmodel, ..., high=TRUE) {
+  d <- spatialCDFframe(nullmodel, covariate, ...)
+  U <- d$values$U
+  ec <- if(high) ecdf(1-U) else ecdf(U)
+  p <- seq(0,1,length=1024)
+  df <- data.frame(p=p, fobs=ec(p), fnull=p)
+  result <- fv(df,
+               argu="p",
+               ylab=quote(roc(p)),
+               valu="fobs",
+               desc=c("fraction of area",
+                      "observed fraction of points",
+                      "expected fraction if no effect"),
+               fname="roc")
+  fvnames(result, ".") <- c("fobs", "fnull")
+  return(result)
+}
+
+roc.ppm <- function(X, ...) {
+  stopifnot(is.ppm(X))
+  model <- X
+  lambda <- predict(model, ...)
+  Y <- data.ppm(model)
+  nullmodel <- ppm(Y)
+  result <- rocModel(lambda, nullmodel, ...)
+  return(result)
+}
+
+roc.kppm <- function(X, ...) {
+  stopifnot(is.kppm(X))
+  model <- as.ppm(X)
+  lambda <- predict(model, ...)
+  Y <- data.ppm(model)
+  nullmodel <- ppm(Y)
+  result <- rocModel(lambda, nullmodel, ...)
+  return(result)
+}
+
+roc.lppm <- function(X, ...) {
+  stopifnot(is.lppm(X))
+  model <- X
+  lambda <- predict(model, ...)
+  Y <- X$X
+  nullmodel <- lppm(Y)
+  result <- rocModel(lambda, nullmodel, ...)
+  return(result)
+}
+
+rocModel <- function(lambda, nullmodel, ..., high) {
+  if(!missing(high))
+    warning("Argument 'high' is ignored when computing ROC for a fitted model")
+  d<- spatialCDFframe(nullmodel, lambda, ...) 
+  U <- d$values$U
+  ec <- ecdf(1-U) 
+  p <- seq(0,1,length=1024)
+  fobs <- ec(p)
+  FZ <- d$values$FZ
+  lambdavalues <- if(is.im(lambda)) lambda[] else unlist(lapply(lambda, "["))
+  F1Z <- ewcdf(lambdavalues, lambdavalues/sum(lambdavalues))    
+  pZ <- get("y", environment(FZ))
+  qZ <- get("x", environment(FZ))
+  FZinverse <- approxfun(pZ, qZ, rule=2)
+  ftheo <- 1 - F1Z(FZinverse(1-p))
+  df <- data.frame(p=p, fobs=fobs, ftheo=ftheo, fnull=p)
+  result <- fv(df,
+               argu="p",
+               ylab=quote(roc(p)),
+               valu="fobs",
+               fmla = . ~ p,
+               desc=c("fraction of area",
+                 "observed fraction of points",
+                 "expected fraction of points",
+                 "expected fraction if no effect"),
+               fname="roc")
+  fvnames(result, ".") <- c("fobs", "ftheo", "fnull")
+  return(result)
+}
+
+#    ......................................................
+
+auc <- function(X, ...) { UseMethod("auc") }
+
+auc.ppp <- function(X, covariate, ..., high=TRUE) {
+  d <- spatialCDFframe(ppm(X), covariate, ...)
+  U <- d$values$U
+  EU <- mean(U)
+  result <- if(high) EU else (1 - EU) 
+  return(result)
+}
+
+auc.lpp <- function(X, covariate, ..., high=TRUE) {
+  d <- spatialCDFframe(lppm(X), covariate, ...)
+  U <- d$values$U
+  EU <- mean(U)
+  result <- if(high) EU else (1 - EU) 
+  return(result)
+}
+
+auc.kppm <- function(X, ...) { auc(as.ppm(X), ...) }
+
+auc.ppm <- function(X, ...) {
+  model <- X
+  if(is.multitype(model)) {
+    # cheat
+    ro <- roc(model, ...)
+    aobs <- with(ro, mean(fobs))
+    atheo <- with(ro, mean(ftheo))
+  } else if(is.stationary(model)) {
+    aobs <- atheo <- 1/2
+  } else {
+    lambda <- intensity(model)
+    Fl <- ecdf(lambda[])
+    lambda <- as.im(lambda, Window(model))
+    X <- data.ppm(model)
+    lamX <- lambda[X]
+    aobs <- mean(Fl(lamX))
+    atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda)
+  }
+  result <- c(aobs, atheo)
+  names(result) <- c("obs", "theo")
+  return(result)
+}
+
+auc.lppm <- function(X, ...) {
+  stopifnot(inherits(X, "lppm"))
+  model <- X
+  if(is.multitype(model)) {
+    # cheat
+    ro <- roc(model, ...)
+    aobs <- with(ro, mean(fobs))
+    atheo <- with(ro, mean(ftheo))
+  } else {
+    lambda <- predict(model, ...)
+    Fl <- ecdf(lambda[])
+    lamX <- lambda[model$X]
+    aobs <- mean(Fl(lamX))
+    atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda)
+  }
+  result <- c(aobs, atheo)
+  names(result) <- c("obs", "theo")
+  return(result)
+}
+
diff --git a/R/badgey.R b/R/badgey.R
new file mode 100755
index 0000000..c5ec552
--- /dev/null
+++ b/R/badgey.R
@@ -0,0 +1,205 @@
+#
+#
+#    badgey.S
+#
+#    $Revision: 1.16 $	$Date: 2016/04/25 02:34:40 $
+#
+#    Hybrid Geyer process
+#
+#    BadGey()   create an instance of the process
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+BadGey <- local({
+
+  # ........... auxiliary functions ..............
+  delBG <- function(i, r, sat) {
+    r   <- r[-i]
+    if(length(r) == length(sat)) {
+      r   <- r[-i]
+      sat <- sat[-i]
+    } else if(length(sat) == 1) {
+      r <- r[-i]
+    } else stop("Mismatch in dimensions of arguments r and sat")
+    nr <- length(r)
+    if(nr == 0) return(Poisson())
+    if(nr == 1) return(Geyer(r, sat))
+    return(BadGey(r, sat))
+  }
+
+  # .............. template ....................
+  
+  BlankBG <- 
+  list(
+         name     = "hybrid Geyer process",
+         creator  = "BadGey",
+         family   = "pairsat.family",  # will be evaluated later
+         pot      = function(d, par) {
+                       r <- par$r
+                       nr <- length(r)
+                       out <- array(FALSE, dim=c(dim(d), nr))
+                       for(i in 1:nr) 
+                         out[,,i] <- (d <= r[i])
+                       out
+                    },
+         par      = list(r = NULL, sat=NULL), # to fill in later
+         parnames = c("interaction radii", "saturation parameters"),
+         init     = function(self) {
+                      r <- self$par$r
+                      sat <- self$par$sat
+                      if(!is.numeric(r) || !all(r > 0))
+                        stop("interaction radii r must be positive numbers")
+                      if(length(r) > 1 && !all(diff(r) > 0))
+                        stop("interaction radii r must be strictly increasing")
+                      if(!is.numeric(sat) || any(sat < 0))
+                        stop("saturation parameters must be nonnegative numbers")
+                      if(length(sat) != length(r) && length(sat) != 1)
+                        stop("vectors r and sat must have equal length")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           r <- self$par$r
+           npiece <- length(r)
+           # extract coefficients
+           gammas <- exp(as.numeric(coeffs))
+           # name them
+           gn <- gammas
+           names(gn) <- paste("[0,", r, ")", sep="")
+           #
+           return(list(param=list(gammas=gammas),
+                       inames="interaction parameters gamma_i",
+                       printable=dround(gn)))
+         },
+        valid = function(coeffs, self) {
+           # interaction parameters gamma must be
+           #   non-NA 
+           #   finite, if sat > 0
+           #   less than 1, if sat = Inf
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           sat <- self$par$sat
+           if(anyNA(gamma))
+             return(FALSE)
+           return(all((is.finite(gamma) | sat == 0)
+                      & (gamma <= 1 | sat != Inf)))
+        },
+        project = function(coeffs, self){
+          loggammas <- as.numeric(coeffs)
+          sat <- self$par$sat
+          r   <- self$par$r
+          good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0)
+          if(all(good))
+            return(NULL)
+          if(!any(good))
+            return(Poisson())
+          bad <- !good
+          if(spatstat.options("project.fast") || sum(bad) == 1) {
+            # remove smallest threshold with an unidentifiable parameter
+            firstbad <- min(which(bad))
+            return(delBG(firstbad, r, sat))
+          } else {
+            # consider all candidate submodels
+            subs <- lapply(which(bad), delBG, r=r, sat=sat)
+            return(subs)
+          }
+        },
+        irange = function(self, coeffs=NA, epsilon=0, ...) {
+          r <- self$par$r
+          sat <- self$par$sat
+          if(all(is.na(coeffs)))
+            return(2 * max(r))
+          gamma <- (self$interpret)(coeffs, self)$param$gammas
+          gamma[is.na(gamma)] <- 1
+          active <- (abs(log(gamma)) > epsilon) & (sat > 0)
+          if(!any(active))
+            return(0)
+          else return(2 * max(r[active]))
+        },
+       version=NULL, # to be added later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction,
+                         ..., halfway=FALSE) {
+         # fast evaluator for BadGey interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for BadGey")
+         r   <- potpars$r
+         sat <- potpars$sat
+         # ensure r and sat have equal length
+         if(length(r) != length(sat)) {
+           if(length(r) == 1)
+             r <- rep.int(r, length(sat))
+           else if(length(sat) == 1)
+             sat <- rep.int(sat, length(r))
+           else stop("lengths of r and sat do not match")
+         }
+         # first ensure all data points are in U
+         nX <- npoints(X)
+         nU <- npoints(U)
+         Xseq  <- seq_len(nX)
+         if(length(EqualPairs) == 0) {
+           # no data points currently included 
+           missingdata <- rep.int(TRUE, nX)
+         } else {
+           Xused <- EqualPairs[,1L]
+           missingdata <- !(Xseq %in% Xused)
+         }
+         somemissing <- any(missingdata)
+         if(somemissing) {
+           # add the missing data points
+           nmiss <- sum(missingdata)
+           U <- superimpose(U, X[missingdata], W=X$window)
+           # correspondingly augment the list of equal pairs
+           originalrows <- seq_len(nU)
+           newXindex <- Xseq[missingdata]
+           newUindex <- nU + seq_len(nmiss)
+           EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex))
+           nU <- nU + nmiss
+         }
+         nterms <- length(r)
+         answer <- matrix(, nrow=nU, ncol=nterms)
+         for(k in 1:nterms) {
+           # first determine saturated pair counts
+           counts <- strausscounts(U, X, r[k], EqualPairs) 
+           satcounts <- pmin.int(sat[k], counts)
+           # trapdoor used by suffstat() 
+           if(halfway) 
+             answer[,k] <- satcounts
+           else if(sat[k] == Inf)
+             answer[,k] <- 2 * satcounts
+           else {
+             # extract counts for data points
+             Uindex <- EqualPairs[,2L]
+             Xindex <- EqualPairs[,1L]
+             Xcounts <- integer(npoints(X))
+             Xcounts[Xindex] <- counts[Uindex]
+             # evaluate change in saturated counts of other data points
+             change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs)
+             answer[,k] <- satcounts + change
+           }
+         }
+         if(somemissing)
+           answer <- answer[originalrows, , drop=FALSE]
+         return(answer)
+       }
+  )
+  class(BlankBG) <- "interact"
+
+  BadGey <- function(r, sat) {
+    instantiate.interact(BlankBG, list(r=r, sat=sat))
+  }
+
+  BadGey <- intermaker(BadGey, BlankBG)
+  
+  BadGey
+
+})
+
+
diff --git a/R/bc.R b/R/bc.R
new file mode 100644
index 0000000..b9d7f5a
--- /dev/null
+++ b/R/bc.R
@@ -0,0 +1,73 @@
+#'     bc.R
+#' 
+#'  Bias correction techniques
+#'
+#'  $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $
+
+bc <- function(fit, ...) {
+  UseMethod("bc")
+}
+
+bc.ppm <- function(fit, ..., nfine=256) {
+  stopifnot(is.ppm(fit))
+  #
+  theta0 <- coef(fit)
+  nc <- length(theta0)
+  #
+  X <- data.ppm(fit)
+  Z <- is.data(quad.ppm(fit))
+  # evaluate sufficient statistic at data points
+  sufX <- model.matrix(fit)[Z, ]
+  if(ncol(sufX) != nc)
+    stop("Internal error: model.matrix does not match coef(model)")
+  # predict on fine grid
+  finemask <- as.mask(as.owin(fit), dimyx=nfine)
+  lamF <- predict(fit, type="cif", locations=finemask)
+  sufF <- model.images(fit, W=finemask)
+  if(length(sufF) != nc)
+    stop("Internal error: model.images does not match coef(model)")
+  # edge correction
+  if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) {
+    b <- bdist.pixels(finemask)
+    bX <- bdist.points(X)
+    excludeU <- eval.im(b < rbord)
+    retainX  <- (bX >= rbord)
+    sufX <- sufX[retainX, , drop=FALSE]
+  } else {
+    excludeU <- FALSE
+  }
+  # compute fine approximation to score
+  scoreX <- colSums(sufX)
+  scoreW <- numeric(nc)
+  for(k in seq_len(nc)) {
+    S <- sufF[[k]]
+    # infinite values of S may occur and correspond to zero cif
+    Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF))
+    scoreW[k] <- integral.im(Slam)
+  }
+  score <- scoreX - scoreW
+  # Newton-Raphson
+  Iinv <- vcov(fit, hessian=TRUE)
+  theta <- theta0 + Iinv %*% score
+  theta <- theta[ , 1L, drop=TRUE]
+  #
+#  return(list(theta0=theta0, theta=theta))
+  return(theta)
+}
+
+# Richardson extrapolation (generic)
+
+rex <- function(x, r=2, k=1, recursive=FALSE) {
+  # x should be a matrix
+  # whose columns are successive estimates of a parameter vector
+  # obtained using "grid step sizes" t, t/r, t/r^2, ...
+  # Estimate from step size t is assumed to converge at rate t^k
+  if(!is.matrix(x)) x <- matrix(x, nrow=1)
+  if(ncol(x) <= 1) return(x)
+  rk <- r^k
+  y <- (rk * x[, -1L, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1)
+  if(recursive)
+    y <- rex(y, r=r, k=k+1, recursive=TRUE)
+  return(y)
+}
+
diff --git a/R/beginner.R b/R/beginner.R
new file mode 100644
index 0000000..dbe5957
--- /dev/null
+++ b/R/beginner.R
@@ -0,0 +1,36 @@
+#
+#  beginner.R
+#
+# Helpful information for beginners
+#
+#  $Revision: 1.3 $  $Date: 2015/10/21 09:06:57 $
+#
+
+print.autoexec <- function(x, ...) { x() }
+
+beginner <- function(package="spatstat") {
+  package <- as.character(substitute(package))
+  RShowDoc("BEGINNER.txt", type="txt", package=package)
+  return(invisible(NULL))
+}
+
+class(beginner) <- "autoexec"
+
+foo <- local({
+  fooText <- paste0("Error: object 'foo' not found.\n\n",
+                    "'foo' is not a defined variable or function.\n",
+                    "It is a placeholder name, which serves only to ",
+                    "demonstrate a concept. It represents the name of ",
+                    "any desired object or function. ", 
+                    "Other placeholder names popular with computer scientists ",
+                    "are 'bar', 'foobar', 'qux' and 'mork'.")
+
+  foo <- function() {
+    splat(fooText) 
+    return(invisible(NULL))
+  }
+  class(foo) <- "autoexec"
+  foo
+})
+
+plot.foo <- function(x, ...) foo()
diff --git a/R/bermantest.R b/R/bermantest.R
new file mode 100755
index 0000000..645765f
--- /dev/null
+++ b/R/bermantest.R
@@ -0,0 +1,303 @@
+#
+# bermantest.R
+#
+# Test statistics from Berman (1986)
+#
+#  $Revision: 1.18 $  $Date: 2016/02/11 10:17:12 $
+#
+#
+
+# --------- outdated --------
+
+bermantest <- function(...) {
+  message("bermantest is out of date; use berman.test")
+#  .Deprecated("berman.test", package="spatstat")
+  berman.test(...)
+}
+
+bermantest.ppp <- function(...) {
+    message("bermantest.ppp is out of date; use berman.test.ppp")
+#  .Deprecated("berman.test.ppp", package="spatstat")
+  berman.test.ppp(...)
+}
+
+bermantest.ppm <- function(...) {
+    message("bermantest.ppm is out of date; use berman.test.ppm")
+#  .Deprecated("berman.test.ppm", package="spatstat")
+  berman.test.ppm(...)
+}
+
+bermantest.lpp <- function(...) {
+    message("bermantest.lpp is out of date; use berman.test.lpp")
+#  .Deprecated("berman.test.lpp", package="spatstat")
+  berman.test.lpp(...)
+}
+
+bermantest.lppm <- function(...) {
+    message("bermantest.lppm is out of date; use berman.test.lppm")
+#  .Deprecated("berman.test.lppm", package="spatstat")
+  berman.test.lppm(...)
+}
+
+# ---------------------------
+
+berman.test <- function(...) {
+  UseMethod("berman.test")
+}
+
+berman.test.ppp <-
+  function(X, covariate,
+           which=c("Z1", "Z2"),
+           alternative=c("two.sided", "less", "greater"),
+           ...) {
+    Xname <- short.deparse(substitute(X))
+    covname <- short.deparse(substitute(covariate))
+    if(is.character(covariate)) covname <- covariate
+    which <- match.arg(which)
+    alternative <- match.arg(alternative)
+
+    do.call(bermantestEngine,
+            resolve.defaults(list(ppm(X), covariate, which, alternative),
+                             list(...),
+                             list(modelname="CSR",
+                                  covname=covname, dataname=Xname)))
+}
+
+berman.test.ppm <- function(model, covariate,
+                           which=c("Z1", "Z2"),
+                           alternative=c("two.sided", "less", "greater"),
+                           ...) {
+  modelname <- short.deparse(substitute(model))
+  covname <- short.deparse(substitute(covariate))
+  if(is.character(covariate)) covname <- covariate
+  verifyclass(model, "ppm")
+  which <- match.arg(which)
+  alternative <- match.arg(alternative)
+  if(is.poisson(model) && is.stationary(model))
+    modelname <- "CSR"
+  do.call(bermantestEngine,
+          resolve.defaults(list(model, covariate, which, alternative),
+                           list(...),
+                           list(modelname=modelname,
+                                covname=covname,
+                                dataname=model$Qname)))
+}
+
+berman.test.lpp <-
+  function(X, covariate,
+           which=c("Z1", "Z2"),
+           alternative=c("two.sided", "less", "greater"),
+           ...) {
+    Xname <- short.deparse(substitute(X))
+    covname <- short.deparse(substitute(covariate))
+    if(is.character(covariate)) covname <- covariate
+    which <- match.arg(which)
+    alternative <- match.arg(alternative)
+
+    do.call(bermantestEngine,
+            resolve.defaults(list(lppm(X), covariate, which, alternative),
+                             list(...),
+                             list(modelname="CSR",
+                                  covname=covname, dataname=Xname)))
+}
+
+berman.test.lppm <- function(model, covariate,
+                           which=c("Z1", "Z2"),
+                           alternative=c("two.sided", "less", "greater"),
+                           ...) {
+  modelname <- short.deparse(substitute(model))
+  covname <- short.deparse(substitute(covariate))
+  if(is.character(covariate)) covname <- covariate
+  verifyclass(model, "lppm")
+  which <- match.arg(which)
+  alternative <- match.arg(alternative)
+  if(is.poisson(model) && is.stationary(model))
+    modelname <- "CSR"
+  do.call(bermantestEngine,
+          resolve.defaults(list(model, covariate, which, alternative),
+                           list(...),
+                           list(modelname=modelname,
+                                covname=covname,
+                                dataname=model$Xname)))
+}
+
+bermantestEngine <- function(model, covariate,
+                             which=c("Z1", "Z2"),
+                             alternative=c("two.sided", "less", "greater"),
+                             ...,
+                             modelname, covname, dataname="") {
+
+  csr <- is.poisson(model) && is.stationary(model)
+  if(missing(modelname))
+    modelname <- if(csr) "CSR" else short.deparse(substitute(model))
+  if(missing(covname)) {
+    covname <- short.deparse(substitute(covariate))
+    if(is.character(covariate)) covname <- covariate
+  }
+
+  which <- match.arg(which)
+  alternative <- match.arg(alternative)
+
+  if(!is.poisson(model))
+    stop("Only implemented for Poisson point process models")
+
+  # ........... first assemble data ...............
+  fram <- spatialCDFframe(model, covariate, ...,
+                        modelname=modelname,
+                        covname=covname,
+                        dataname=dataname)
+  fvalues <- fram$values
+  info    <- fram$info
+  # values of covariate at data points
+  ZX <- fvalues$ZX
+  # transformed to Unif[0,1] under H0
+  U  <- fvalues$U
+  # values of covariate at pixels
+  Zvalues <- fvalues$Zvalues
+  # corresponding pixel areas/weights
+  weights <- fvalues$weights
+  # intensity of model
+  lambda  <- fvalues$lambda
+
+  switch(which,
+         Z1={
+           #......... Berman Z1 statistic .....................
+           method <-
+             paste("Berman Z1 test of",
+                   if(info$csr) "CSR" else "inhomogeneous Poisson process",
+                   "in", info$spacename)
+           # sum of covariate values at data points
+           Sn <- sum(ZX)
+           # predicted mean and variance
+           lamwt <- lambda * weights
+           En    <- sum(lamwt)
+           ESn   <- sum(lamwt * Zvalues)
+           varSn <- sum(lamwt * Zvalues^2)
+           # working, for plot method
+           working <- list(meanZX=mean(ZX),
+                           meanZ=ESn/En)
+           # standardise
+           statistic <- (Sn - ESn)/sqrt(varSn)
+           names(statistic) <- "Z1"
+           p.value <- switch(alternative,
+                            two.sided=2 * pnorm(-abs(statistic)),
+                            less=pnorm(statistic),
+                            greater=pnorm(statistic, lower.tail=FALSE))
+           altblurb <- switch(alternative,
+                              two.sided="two-sided",
+                              less="mean value of covariate at random points is less than predicted under model",
+                              greater="mean value of covariate at random points is greater than predicted under model")
+           valuename <- paste("covariate",
+                              sQuote(paste(covname, collapse="")),
+                              "evaluated at points of",
+                              sQuote(dataname))
+         },
+         Z2={
+           #......... Berman Z2 statistic .....................
+           method <-
+             paste("Berman Z2 test of",
+                   if(info$csr) "CSR" else "inhomogeneous Poisson process",
+                   "in", info$spacename)
+           npts <- length(ZX)
+           statistic <- sqrt(12/npts) * (sum(U) - npts/2)
+           working <- list(meanU=mean(U))
+           names(statistic) <- "Z2"
+           p.value <- switch(alternative,
+                            two.sided=2 * pnorm(-abs(statistic)),
+                            less=pnorm(statistic),
+                            greater=pnorm(statistic, lower.tail=FALSE))
+           altblurb <- switch(alternative,
+                              two.sided="two-sided",
+                              less="covariate values at random points have lower quantiles than predicted under model",
+                              greater="covariate values at random points have higher quantiles than predicted under model")
+           valuename <- paste("covariate",
+                              sQuote(paste(covname, collapse="")),
+                              "evaluated at points of",
+                              sQuote(dataname), "\n\t",
+                              "and transformed to uniform distribution under",
+                              if(info$csr) modelname else sQuote(modelname))
+         })
+           
+  out <- list(statistic=statistic,
+              p.value=p.value,
+              alternative=altblurb,
+              method=method,
+              which=which,
+              working=working,
+              data.name=valuename,
+              fram=fram)
+  class(out) <- c("htest", "bermantest")
+  return(out)
+}
+
+plot.bermantest <-
+  function(x, ...,
+           lwd=par("lwd"), col=par("col"), lty=par("lty"),
+           lwd0=lwd, col0=2, lty0=2)
+{
+  fram <- x$fram
+  if(!is.null(fram)) {
+    values <- fram$values
+    info <- fram$info
+  } else {
+    # old style
+    ks <- x$ks
+    values <- attr(ks, "prep")
+    info <- attr(ks, "info")
+  }
+  work <- x$working
+  op <- options(useFancyQuotes=FALSE)
+  switch(x$which,
+         Z1={
+           # plot cdf's of Z
+           FZ <- values$FZ
+           xxx <- get("x", environment(FZ))
+           yyy <- get("y", environment(FZ))
+           main <- c(x$method,
+                     paste("based on distribution of covariate",
+                           sQuote(info$covname)),
+                     paste("Z1 statistic =", signif(x$statistic, 4)),
+                     paste("p-value=", signif(x$p.value, 4)))
+           do.call(plot.default,
+                   resolve.defaults(
+                                    list(x=xxx, y=yyy, type="l"),
+                                    list(...),
+                                    list(lwd=lwd0, col=col0, lty=lty0),
+                                    list(xlab=info$covname,
+                                         ylab="probability",
+                                         main=main)))
+           FZX <- values$FZX
+           if(is.null(FZX))
+             FZX <- ecdf(values$ZX)
+           plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty)
+           abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0, xpd=FALSE)
+           abline(v=work$meanZX, lwd=lwd,col=col, lty=lty, xpd=FALSE)
+         },
+         Z2={
+           # plot cdf of U
+           U <- values$U
+           cdfU <- ecdf(U)
+           main <- c(x$method,
+                     paste("based on distribution of covariate",
+                           sQuote(info$covname)),
+                     paste("Z2 statistic =", signif(x$statistic, 4)),
+                     paste("p-value=", signif(x$p.value, 4)))
+           do.call(plot.ecdf,
+                   resolve.defaults(
+                                    list(cdfU),
+                                    list(...),
+                                    list(do.points=FALSE, asp=1),
+                                    list(xlim=c(0,1), ylim=c(0,1)),
+                                    list(lwd=lwd, col=col, lty=lty),
+                                    list(xlab="U", ylab="relative frequency"),
+                                    list(main=main)))
+           abline(0,1,lwd=lwd0,col=col0,lty=lty0, xpd=FALSE)
+           abline(v=0.5, lwd=lwd0,col=col0,lty=lty0, xpd=FALSE)
+           abline(v=work$meanU, lwd=lwd,col=col,lty=lty, xpd=FALSE)
+         })
+  options(op)
+  return(invisible(NULL))
+}
+
+
+
diff --git a/R/blur.R b/R/blur.R
new file mode 100755
index 0000000..a637c1b
--- /dev/null
+++ b/R/blur.R
@@ -0,0 +1,102 @@
+#
+# blur.R
+#
+# apply Gaussian blur to an image
+#
+#    $Revision: 1.16 $   $Date: 2016/04/25 02:34:40 $
+#
+fillNA <- function(x, value=0) {
+  stopifnot(is.im(x))
+  v <- x$v
+  v[is.na(v)] <- value
+  x$v <- v
+  return(x)
+}
+
+Smooth.im <- function(X, sigma=NULL, ...,
+                      normalise=FALSE, bleed=TRUE, varcov=NULL) {
+  blur(X, sigma=sigma, ..., normalise=normalise, bleed=bleed, varcov=varcov)
+}
+
+blur <- function(x, sigma=NULL, ..., normalise=FALSE, bleed=TRUE, varcov=NULL) {
+  stopifnot(is.im(x))
+  # determine smoothing kernel 
+  sigma.given <- !is.null(sigma)
+  varcov.given <- !is.null(varcov)
+  if (sigma.given) {
+    stopifnot(is.numeric(sigma))
+    stopifnot(length(sigma) %in% c(1, 2))
+    stopifnot(all(sigma > 0))
+  }
+  if (varcov.given)
+    stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) ==
+              2)
+  ngiven <- varcov.given + sigma.given
+  switch(ngiven + 1L,
+         {
+           sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange))
+         }, {
+           if (sigma.given && length(sigma) == 2)
+             varcov <- diag(sigma^2)
+           if (!is.null(varcov))
+             sigma <- NULL
+         }, {
+           stop(paste("Give only one of the arguments", sQuote("sigma"),
+                      "and", sQuote("varcov")))
+         })
+  # replace NA's in image raster by zeroes 
+  X <- fillNA(x, 0)
+  # convolve with Gaussian
+  Y <- second.moment.calc(X, sigma=sigma, varcov=varcov, what="smooth")
+  # if no bleeding, we restrict data to the original boundary
+  if(!bleed)
+    Y$v[is.na(x$v)] <- NA
+  # 
+  if(!normalise)
+    return(Y)
+  # normalisation:
+  # convert original image to window (0/1 image)
+  Xone <- x
+  isna <- is.na(x$v)
+  Xone$v[isna] <- 0
+  Xone$v[!isna] <- 1
+  # convolve with Gaussian
+  Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., varcov=varcov, what="smooth")
+  # normalise
+  Z <- eval.im(Y/Ydenom)
+  return(Z)
+}
+  
+safelookup <- function(Z, x, factor=2, warn=TRUE) {
+  # x is a ppp
+  # evaluates Z[x], replacing any NA's by blur(Z)[x]
+  Zvals <- Z[x, drop=FALSE]
+  if(any(isna <- is.na(Zvals))) {
+    # First pass - look up values at neighbouring pixels if valid
+    XX <- x[isna]
+    rc <- nearest.valid.pixel(XX$x, XX$y, Z)
+    Zvals[isna] <- Z$v[cbind(rc$row, rc$col)]
+  }
+  if(any(isna <- is.na(Zvals))) {
+    # Second pass - extrapolate
+    XX <- x[isna]
+    pixdiam <- sqrt(Z$xstep^2 + Z$ystep^2)
+    # expand domain of Z 
+    RX <- as.rectangle(x)
+    RZ <- as.rectangle(Z)
+    bb <- boundingbox(RX, RZ)
+    big <- grow.rectangle(bb, 2 * pixdiam)
+    Z <- rebound.im(Z, big)
+    # now blur
+    Zblur <- blur(Z, factor * pixdiam, bleed=TRUE, normalise=TRUE)
+    Bvals <- Zblur[XX, drop=FALSE]
+    if(anyNA(Bvals)) 
+      stop("Internal error: pixel values were NA, even after blurring")
+    Zvals[isna] <- Bvals
+    if(warn)
+      warning(paste(sum(isna), "out of", npoints(x), "pixel values",
+                    "were outside the pixel image domain",
+                    "and were estimated by convolution"))
+  }
+  return(Zvals)
+}
diff --git a/R/boundingbox.R b/R/boundingbox.R
new file mode 100644
index 0000000..4365a8c
--- /dev/null
+++ b/R/boundingbox.R
@@ -0,0 +1,203 @@
+##
+## boundingbox.R
+##
+## $Revision: 1.8 $ $Date: 2016/02/11 10:17:12 $
+
+bounding.box <- function(...) {
+  .Deprecated("boundingbox", "spatstat")
+  boundingbox(...)
+}
+
+boundingbox <- function(...) {
+  ## remove any NULL arguments
+  arglist <- list(...)
+  if(any(isnull <- sapply(arglist, is.null))) {
+    if(length(arglist[!isnull]))
+       return(do.call(boundingbox, arglist[!isnull]))
+    stop("No non-null arguments given.\n")
+  }
+  UseMethod("boundingbox")
+}
+
+boundingbox.solist <- function(...) {
+  argh <- list(...)
+  issl <- sapply(argh, inherits, what="solist")
+  yarg <- c(do.call(c, argh[issl]), argh[!issl])
+  do.call(bbEngine, yarg)
+}
+
+boundingbox.ppp  <-
+boundingbox.psp  <-
+boundingbox.owin <-
+boundingbox.list <-
+boundingbox.im   <- function(...) {
+   bbEngine(...)
+}
+
+recognise.spatstat.type <- local({
+
+  knowntypes <- c("ppp","psp","owin","im")
+
+  function(x) {
+    for(kt in knowntypes)
+      if(inherits(x, kt)) return(kt)
+    if(is.list(x) && checkfields(x, c("x", "y"))
+       && is.numeric(x$x) && is.numeric(x$y) &&
+       is.vector(x$x) && is.vector(x$y) && length(x$x) == length(x$y))
+        return("listxy")
+    aso <- try(as.owin(x), silent=TRUE)
+    if(!inherits(aso, "try-error")) return("as.owin")
+    return("unknown")
+  }
+})
+
+bbEngine <- local({
+
+  bb.listxy <- function(X) owin(range(X$x), range(X$y))
+
+  bbEngine <- function(...) {
+    wins <- list(...)
+    ## first detect any numeric vector arguments
+    if(any(isnumvec <- unlist(lapply(wins, is.vector)) &
+           unlist(lapply(wins, is.numeric)))) {
+      ## invoke default method on these arguments
+      bb <- do.call(boundingbox, wins[isnumvec])
+      ## repack
+      wins <- append(wins[!isnumvec], list(bb))
+    }
+    if(length(wins) > 1) {
+      ## multiple arguments -- compute bounding box for each argument.
+      objtype <- unlist(lapply(wins, recognise.spatstat.type))
+      nbad <- sum(objtype == "unknown")
+      if(nbad > 0) {
+        whinge <- paste("Function boundingbox called with",
+                        nbad,"unrecognised",
+                        ngettext(nbad,"argument","arguments"))
+        stop(whinge, call.=FALSE)
+      }
+      if(any(isppp <- (objtype == "ppp"))) 
+        wins[isppp] <- lapply(wins[isppp], boundingbox)
+      if(any(islistxy <- (objtype == "listxy")))
+        wins[islistxy] <- lapply(wins[islistxy], bb.listxy)
+      ## then convert all windows to owin
+      wins <- lapply(wins, as.owin)
+      ## then take bounding box of each window
+      boxes <- lapply(wins, boundingbox)
+      ## discard NULL values
+      isnull <- unlist(lapply(boxes, is.null))
+      boxes <- boxes[!isnull]
+      ## take bounding box of these boxes
+      xrange <- range(unlist(lapply(boxes, getElement, name="xrange")))
+      yrange <- range(unlist(lapply(boxes, getElement, name="yrange")))
+      W <- owin(xrange, yrange)
+      ## If all of the windows have a common unit name, give
+      ## that unit name to the bounding box.
+      youse <- unique(t(sapply(boxes,unitname)))
+      if(nrow(youse)==1) {
+        ute <- unlist(youse[1L,])
+        unitname(W) <- ute
+      }
+      return(W)
+    }
+
+    ## single argument
+    w <- wins[[1L]]
+    if(is.null(w))
+      return(NULL)
+    
+    wtype <- recognise.spatstat.type(w)
+    ## point pattern?
+    if(wtype == "ppp")
+      return(boundingbox(coords(w)))
+    
+    ## list(x,y)
+    if(wtype == "listxy")
+      return(bb.listxy(w))
+          
+    ## convert to window
+    w <- as.owin(w)
+
+    ## determine a tight bounding box for the window w
+    switch(w$type,
+           rectangle = {
+             return(w)
+           },
+           polygonal = {
+             bdry <- w$bdry
+             if(length(bdry) == 0)
+               return(NULL)
+             xr <- range(unlist(lapply(bdry, rangeofx)))
+             yr <- range(unlist(lapply(bdry, rangeofy)))
+             return(owin(xr, yr, unitname=unitname(w)))
+           },
+           mask = {
+             m <- w$m
+             x <- rasterx.mask(w)
+             y <- rastery.mask(w)
+             xr <- range(x[m]) + c(-1,1) * w$xstep/2
+             yr <- range(y[m]) + c(-1,1) * w$ystep/2
+             return(owin(xr, yr, unitname=unitname(w)))
+           },
+           stop("unrecognised window type", w$type)
+           )
+  }
+
+  rangeofx <- function(a) range(a$x)
+  rangeofy <- function(a) range(a$y)
+  
+  bbEngine
+})
+
+
+boundingbox.default <- local({
+
+  bb.listxy <- function(X) owin(range(X$x), range(X$y))
+
+  boundingbox.default <- function(...) {
+    arglist <- list(...)
+    bb <- NULL
+    if(length(arglist) == 0)
+      return(bb)
+    ## handle numeric vector arguments
+    if(any(isnumvec <- unlist(lapply(arglist, is.vector)) &
+           unlist(lapply(arglist, is.numeric)))) {
+      nvec <- sum(isnumvec)
+      if(nvec != 2)
+        stop(paste("boundingbox.default expects 2 numeric vectors:",
+                   nvec, "were supplied"),
+             call.=FALSE)
+      vecs <- arglist[isnumvec]
+      x <- vecs[[1L]]
+      y <- vecs[[2L]]
+      bb <- if(length(x) == length(y)) owin(range(x), range(y)) else NULL
+      arglist <- arglist[!isnumvec]
+    }
+    if(length(arglist) == 0)
+      return(bb)
+    ## other objects are present
+    objtype <- unlist(lapply(arglist, recognise.spatstat.type))
+    ## Unrecognised?
+    nbad <- sum(objtype == "unknown")
+    if(nbad > 0) {
+      whinge <- paste("Function boundingbox called with",
+                      nbad,"unrecognised",
+                      ngettext(nbad,"argument","arguments"))
+      stop(whinge, call.=FALSE)
+    }
+    if(any(aso <- (objtype == "as.owin"))) {
+      ## promote objects to owin (to avoid infinite recursion!)
+      arglist[aso] <- lapply(arglist[aso], as.owin)
+    }
+    if(any(lxy <- (objtype == "listxy"))) {
+      ## handle list(x,y) objects 
+      arglist[lxy] <- lapply(arglist[lxy], bb.listxy)
+    }
+    result <- do.call(boundingbox,
+                      if(is.null(bb)) arglist else append(list(bb), arglist))
+    return(result)
+  }
+
+  boundingbox.default
+})
+
+
diff --git a/R/boundingcircle.R b/R/boundingcircle.R
new file mode 100644
index 0000000..e938fd9
--- /dev/null
+++ b/R/boundingcircle.R
@@ -0,0 +1,69 @@
+#'
+#'    boundingcircle.R
+#'
+#'  bounding circle and its centre
+#'
+#'  $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $
+#'
+
+circumradius <- function(x, ...) {
+  .Deprecated("boundingradius")
+  UseMethod("boundingradius")
+}
+circumradius.owin <- function(x, ...) {
+  .Deprecated("boundingradius.owin")
+  boundingradius.owin(x, ...)
+}
+circumradius.ppp <- function(x, ...) {
+  .Deprecated("boundingradius.ppp")
+  boundingradius.ppp(x, ...)
+}
+
+boundingradius <- function(x, ...) {
+  UseMethod("boundingradius")
+}
+
+boundingcentre <- function(x, ...) {
+  UseMethod("boundingcentre")
+}
+
+boundingcircle <- function(x, ...) {
+  UseMethod("boundingcircle")
+}
+
+#' owin
+
+boundingradius.owin <- function(x, ...) {
+  sqrt(min(fardist(x, ..., squared=TRUE)))
+}
+
+boundingcentre.owin <- function(x, ...) {
+  z <- where.min(fardist(x, ..., squared=TRUE))
+  Window(z) <- x
+  return(z)
+}
+
+boundingcircle.owin <- function(x, ...) {
+  d2 <- fardist(x, ..., squared=TRUE)
+  z <- where.min(d2)
+  r <- sqrt(min(d2))
+  w <- disc(centre=z, radius=r) 
+  return(w)
+}
+
+#' ppp
+
+boundingradius.ppp <- function(x, ...) {
+  boundingradius(convexhull(x), ...)
+}
+
+boundingcentre.ppp <- function(x, ...) {
+  z <- boundingcentre(convexhull(x), ...)
+  Window(z) <- Window(x)
+  return(z)
+}
+
+boundingcircle.ppp <- function(x, ...) {
+  boundingcircle(convexhull(x), ...)
+}
+
diff --git a/R/breakpts.R b/R/breakpts.R
new file mode 100755
index 0000000..0d6a956
--- /dev/null
+++ b/R/breakpts.R
@@ -0,0 +1,221 @@
+#
+#	breakpts.S
+#
+#	A simple class definition for the specification
+#       of histogram breakpoints in the special form we need them.
+#
+#	even.breaks()
+#
+#	$Revision: 1.21 $	$Date: 2017/06/05 10:31:58 $
+#
+#
+#       Other functions in this directory use the standard Splus function
+#	hist() to compute histograms of distance values.
+#       One argument of hist() is the vector 'breaks'
+#	of breakpoints for the histogram cells. 
+#
+#       The breakpoints must
+#            (a) span the range of the data
+#            (b) be given in increasing order
+#            (c) satisfy breaks[2] = 0,
+#
+#	The function make.even.breaks() will create suitable breakpoints.
+#
+#       Condition (c) means that the first histogram cell has
+#       *right* endpoint equal to 0.
+#
+#       Since all our distance values are nonnegative, the effect of (c) is
+#       that the first histogram cell counts the distance values which are
+#       exactly equal to 0. Hence F(0), the probability P{X = 0},
+#       is estimated without a discretisation bias.
+#
+#	We assume the histograms have followed the default counting rule
+#	in hist(), which is such that the k-th entry of the histogram
+#	counts the number of data values in 
+#		I_k = ( breaks[k],breaks[k+1] ]	for k > 1
+#		I_1 = [ breaks[1],breaks[2]   ]
+#
+#	The implementations of estimators of c.d.f's in this directory
+#       produce vectors of length = length(breaks)-1
+#       with value[k] = estimate of F(breaks[k+1]),
+#       i.e. value[k] is an estimate of the c.d.f. at the RIGHT endpoint
+#       of the kth histogram cell.
+#
+#       An object of class 'breakpts' contains:
+#
+#              $val     the actual breakpoints
+#              $max     the maximum value (= last breakpoint)
+#              $ncells  total number of histogram cells
+#              $r       right endpoints, r = val[-1]
+#              $even    logical = TRUE if cells known to be evenly spaced
+#              $npos    number of histogram cells on the positive halfline
+#                        = length(val) - 2,
+#                       or NULL if cells not evenly spaced
+#              $step    histogram cell width
+#                       or NULL if cells not evenly spaced
+#       
+# --------------------------------------------------------------------
+breakpts <- function(val, maxi, even=FALSE, npos=NULL, step=NULL) {
+  out <- list(val=val, max=maxi, ncells=length(val)-1L, r = val[-1L],
+              even=even, npos=npos, step=step)
+  class(out) <- "breakpts"
+  out
+}
+
+scalardilate.breakpts <- function(X, f, ...) {
+  out <- with(X,
+              list(val    = f*val,
+                   max    = f*max,
+                   ncells = ncells,
+                   r      = f*r,
+                   even   = even,
+                   npos   = npos,
+                   step   = f*step))
+  class(out) <- "breakpts"
+  out
+}  
+                            
+"make.even.breaks" <- 
+function(bmax, npos, bstep) {
+  if(bmax <= 0)
+    stop("bmax must be positive")
+  if(missing(bstep) && missing(npos))
+    stop(paste("Must specify either", sQuote("bstep"),
+               "or", sQuote("npos")))
+  if(!missing(npos)) {
+    bstep <- bmax/npos
+    val <- seq(from=0, to=bmax, length.out=npos+1L)
+    val <- c(-bstep,val)
+    right <- bmax
+  } else {
+    npos <- ceiling(bmax/bstep)
+    right <- bstep * npos
+    val <- seq(from=0, to=right, length.out=npos+1L)
+    val <- c(-bstep,val)
+  }
+  breakpts(val, right, TRUE, npos, bstep)
+}
+
+"as.breakpts" <- function(...) {
+
+  XL <- list(...)
+
+  if(length(XL) == 1L) {
+    # single argument
+    X <- XL[[1L]]
+
+    if(!is.null(class(X)) && class(X) == "breakpts")
+    # X already in correct form
+      return(X)
+  
+    if(is.vector(X) && length(X) > 2) {
+    # it's a vector
+      if(X[2L] != 0)
+        stop("breakpoints do not satisfy breaks[2] = 0")
+      # The following test for equal spacing is used in hist.default
+      steps <- diff(X)
+      if(diff(range(steps)) < 1e-07 * mean(steps))
+        # equally spaced
+        return(breakpts(X, max(X), TRUE, length(X)-2, steps[1L]))
+      else
+        # unknown spacing
+        return(breakpts(X, max(X), FALSE))
+    }
+  } else {
+
+    # There are multiple arguments.
+  
+    # exactly two arguments - interpret as even.breaks()
+    if(length(XL) == 2)
+      return(make.even.breaks(XL[[1L]], XL[[2L]]))
+
+    # two arguments 'max' and 'npos'
+  
+    if(!is.null(XL$max) && !is.null(XL$npos))
+      return(make.even.breaks(XL$max, XL$npos))
+
+    # otherwise
+    stop("Don't know how to convert these data to breakpoints")
+  }
+  # never reached
+}
+
+
+check.hist.lengths <- function(hist, breaks) {
+  verifyclass(breaks, "breakpts")
+  nh <- length(hist)
+  nb <- breaks$ncells
+  if(nh != nb)
+    stop(paste("Length of histogram =", nh,
+               "not equal to number of histogram cells =", nb))
+}
+
+breakpts.from.r <- function(r) {
+  if(!is.numeric(r) && !is.vector(r))
+    stop("r must be a numeric vector")
+  if(length(r) < 2)
+    stop(paste("r has length", length(r), "- must be at least 2"))
+  if(r[1L] != 0)
+    stop("First r value must be 0")
+  if(any(diff(r) <= 0))
+    stop("successive values of r must be increasing")
+  dr <- r[2L] - r[1L]
+  b <- c(-dr, r)
+  return(as.breakpts(b))
+}
+
+handle.r.b.args <- function(r=NULL, breaks=NULL, window, pixeps=NULL,
+                            rmaxdefault=NULL) {
+
+        if(!is.null(r) && !is.null(breaks))
+          stop(paste("Do not specify both",
+                     sQuote("r"), "and", sQuote("breaks")))
+  
+        if(!is.null(breaks)) {
+          breaks <- as.breakpts(breaks)
+        } else if(!is.null(r)) {
+          breaks <- breakpts.from.r(r)
+	} else {
+	   #' determine rmax
+	   #' ignore infinite or NA values of rmaxdefault
+          if(!is.null(rmaxdefault) && !is.finite(rmaxdefault))
+	     rmaxdefault <- NULL
+          rmax <- rmaxdefault %orifnull% diameter(Frame(window))
+          if(is.null(pixeps)) {
+            pixeps <- if(is.mask(window))
+                      min(window$xstep, window$ystep) else rmax/128
+          }
+          rstep <- pixeps/4
+          breaks <- make.even.breaks(rmax, bstep=rstep)
+        }
+
+        return(breaks)
+}
+
+check.finespacing <- function(r, eps=NULL, win=NULL,
+                              rmaxdefault = max(r), 
+                              context="",
+                              action=c("fatal", "warn", "silent"),
+                              rname) {
+  if(missing(rname)) rname <- deparse(substitute(r))
+  action <- match.arg(action)
+  if(is.null(eps)) {
+    b <- handle.r.b.args(window=win, rmaxdefault=rmaxdefault)
+    eps <- b$step
+  }
+  dr <- max(diff(r))
+  if(dr > eps * 1.01) {
+    whinge <- paste(context, "the successive", rname,
+                    "values must be finely spaced:",
+                    "given spacing =",
+                    paste0(signif(dr, 5), ";"),
+                    "required spacing <= ",
+                    signif(eps, 3))
+    switch(action,
+           fatal = stop(whinge, call.=FALSE),
+           warn = warning(whinge, call.=FALSE),
+           silent = {})
+    return(FALSE)
+  }
+  return(TRUE)
+}
diff --git a/R/bugtable.R b/R/bugtable.R
new file mode 100644
index 0000000..50fb150
--- /dev/null
+++ b/R/bugtable.R
@@ -0,0 +1,79 @@
+#'
+#'        bugtable.R
+#' 
+#'    $Revision: 1.3 $ $Date: 2017/01/07 04:20:31 $
+
+bugfixes <- function(sinceversion=NULL, sincedate=NULL,
+                     package="spatstat",
+                     show=TRUE) {
+  if(!is.null(sincedate) && package != "spatstat") {
+    #' news items after specified date
+    ne <- news(package=package)
+    if(is.null(ne) || is.null(ne$Date) || anyNA(ne$Date))
+      stop(paste(if(is.null(ne)) "News" else "Date",
+                    "information is not available for package",
+                 sQuote(package)), call.=FALSE)
+    a <- eval(substitute(news(Date >= SD & grepl("^BUG", Category),
+                              package=package),
+                         list(SD=sincedate)))
+  } else {
+    #' determine a corresponding version number
+    if(is.null(sinceversion) && is.null(sincedate)) {
+      #' default is latest version
+      dfile <- system.file("DESCRIPTION", package=package)
+      sinceversion <- read.dcf(file=dfile, fields="Version")
+    } else if(!is.null(sincedate) && package == "spatstat") {
+      #' read spatstat release history table
+      fname <- system.file("doc", "packagesizes.txt", package="spatstat")
+      p <- read.table(fname, header=TRUE, stringsAsFactors=FALSE)
+      #' find earliest package version on or after the given date
+      imin <- with(p, min(which(as.Date(date) >= sincedate)))
+      sinceversion <- p[imin, "version"]
+    }
+    a <- eval(substitute(news(Version >= sv & grepl("^BUG", Category),
+                              package=package),
+                         list(sv=sinceversion)))
+  }
+  if(!is.data.frame(a)) return(NULL)
+  #' split each entry into lines
+  alines <- strsplit(a$Text, "\n")
+  #' extract first line
+  f <- unname(sapply(alines, "[", i=1L))
+  #' extract body
+  b <- unname(lapply(alines, "[", i=-1L))
+  b <- unname(sapply(b, paste, collapse="\n"))
+  #' extract header from first line
+  h <- unname(sapply(strsplit(f, ":"), "[", i=1L))
+  h <- unname(sapply(strsplit(h, ","), "[", i=1L))
+  h <- unname(sapply(strsplit(h, " "), "[", i=1L))
+  #' sort by header
+  oo <- order(h, f)
+  #' rebuild
+  z <- data.frame(Header=h[oo],
+                  Firstline=f[oo],
+                  Body=b[oo],
+                  Version=a$Version[oo],
+                  stringsAsFactors=FALSE)
+  class(z) <- c("bugtable", class(z))
+  if(!show) return(z)
+  page(z, method="print")
+  return(invisible(z))
+}
+
+class(bugfixes) <- "autoexec"
+              
+print.bugtable <- function(x, ...) {
+  hprev <- ""
+  for(i in seq_len(nrow(x))) {
+    h <- x$Header[i]
+    f <- x$Firstline[i]
+    if(h != hprev) {
+      # new main header
+      cat("\n***", h, "***\n", fill=TRUE)
+    }
+    cat(x$Version[i], ":", f, fill=TRUE)
+    cat(x$Body[i], "\n", fill=TRUE)
+    hprev <- h
+  }
+  return(invisible(NULL))
+}
diff --git a/R/bw.diggle.R b/R/bw.diggle.R
new file mode 100644
index 0000000..30b1bba
--- /dev/null
+++ b/R/bw.diggle.R
@@ -0,0 +1,95 @@
+##
+## bw.diggle.R
+##
+## bandwidth selection rules bw.diggle and bw.scott (for density.ppp)
+##
+## $Revision: 1.5 $ $Date: 2017/06/05 10:31:58 $
+##
+
+bw.scott <- function(X) {
+  stopifnot(is.ppp(X))
+  n <- npoints(X)
+  sdx <- sqrt(var(X$x))
+  sdy <- sqrt(var(X$y))
+  return(c(sdx, sdy) * n^(-1/6))
+}
+
+bw.diggle <- local({
+
+  #' integrand 
+  phi <- function(x,h) { 
+    if(h <= 0) return(numeric(length(x)))
+    y <- pmax.int(0, pmin.int(1, x/(2 * h)))
+    4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2))
+  }
+  
+  #' secret option for debugging
+  mf <- function(..., method=c("C", "interpreted")) { match.arg(method) }
+
+  
+  bw.diggle <- function(X, ..., correction="good", hmax=NULL, nr=512) {
+    stopifnot(is.ppp(X))
+    method <- mf(...)
+    W <- Window(X)
+    lambda <- npoints(X)/area(W)
+    rmax <- if(!is.null(hmax)) (4 * hmax) else rmax.rule("K", W, lambda)
+    r <- seq(0, rmax, length=nr)
+    K <- Kest(X, r=r, correction=correction)
+    yname <- fvnames(K, ".y")
+    K <- K[, c("r", yname)]
+    ## check that K values can be passed to C code
+    if(any(bad <- !is.finite(K[[yname]]))) {
+      ## throw out bad values
+      lastgood <- min(which(bad)) - 1L
+      if(lastgood < 2L)
+        stop("K function yields too many NA/NaN values")
+      K <- K[1:lastgood, ]
+    }
+    rvals <- K$r
+    ## evaluation of M(r) requires K(2r)
+    rmax2 <- max(rvals)/2
+    if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2L], rmax2)
+    ok <- (rvals <= rmax2)
+    switch(method,
+           interpreted = {
+             rvals <- rvals[ok]
+             nr <- length(rvals)
+             J <- numeric(nr)
+             for(i in 1:nr) 
+               J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi)
+           },
+           C = {
+             nr <- length(rvals)
+             nrmax <- sum(ok)
+             dK <- diff(K[[yname]])
+             ndK <- length(dK)
+             z <- .C("digberJ",
+                     r=as.double(rvals),
+                     dK=as.double(dK),
+                     nr=as.integer(nr),
+                     nrmax=as.integer(nrmax),
+                     ndK=as.integer(ndK),
+                     J=as.double(numeric(nrmax)),
+                     PACKAGE = "spatstat")
+             J <- z$J
+             rvals <- rvals[ok]
+           })
+    pir2 <- pi * rvals^2
+    M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2
+    ## This calculation was for the uniform kernel on B(0,h)
+    ## Convert to standard deviation of (one-dimensional marginal) kernel
+    sigma <- rvals/2
+    result <- bw.optim(M, sigma,
+                       creator="bw.diggle",
+                       criterion="Berman-Diggle Cross-Validation",
+                       J=J,
+                       lambda=lambda,
+                       unitname=unitname(X))
+    return(result)
+  }
+
+  bw.diggle
+})
+
+
+
diff --git a/R/bw.optim.R b/R/bw.optim.R
new file mode 100755
index 0000000..f14659a
--- /dev/null
+++ b/R/bw.optim.R
@@ -0,0 +1,111 @@
+#
+# bw.optim.R
+#
+#  Class of optimised bandwidths
+#  Plotting the object displays the optimisation criterion
+#
+#  $Revision: 1.25 $  $Date: 2016/04/25 02:34:40 $
+#
+
+bw.optim <- function(cv, h, iopt=which.min(cv), ...,
+                     cvname, hname,
+                     criterion="cross-validation",
+                     unitname=NULL) {
+  if(missing(cvname) || is.null(cvname)) cvname <- deparse(substitute(cv))
+  if(missing(hname) || is.null(hname)) hname <- deparse(substitute(h))
+  stopifnot(is.numeric(cv))
+  stopifnot(is.numeric(h))
+  stopifnot(length(h) == length(cv))
+  result <- h[iopt]
+  attr(result, "cv") <- cv
+  attr(result, "h") <- h
+  attr(result, "iopt") <- iopt
+  attr(result, "labels") <- list(hname=hname, cvname=cvname)
+  attr(result, "info") <- list(...)
+  attr(result, "criterion") <- criterion
+  attr(result, "units") <- unitname
+  class(result) <- "bw.optim"
+  return(result)
+}
+
+print.bw.optim <- function(x, ...) {
+  y <- as.numeric(x)
+  names(y) <- attr(x, "labels")$hname
+  print(y, ...)
+  return(invisible(NULL))
+}
+
+as.data.frame.bw.optim <- function(x, ...) {
+  h <- attr(x, "h")
+  cv <- attr(x, "cv")
+  df <- data.frame(h, cv)
+  labels <- attr(x, "labels")
+  colnames(df) <- labels[c("hname", "cvname")]
+  info <- attr(x, "info")
+  if(length(info) > 0) {
+    lenfs <- lengths(info)
+    if(any(ok <- (lenfs == nrow(df)))) {
+      df <- cbind(df, as.data.frame(info[ok]))
+    }
+  }
+  return(df)
+}
+
+as.fv.bw.optim <- function(x) {
+  # convert to fv object
+  df <- as.data.frame(x)
+  dfnames <- colnames(df)
+  hname <- dfnames[1L]
+  cvname <- dfnames[2L]
+  descrip <- c("smoothing parameter",
+               paste(attr(x, "criterion"), "criterion"))
+  if(ncol(df) > 2)
+    descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)])))
+  labl <- c(hname, paste0(dfnames[-1L], paren(hname)))
+  yexp <- substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname)))
+  xfv <- fv(df,
+            argu=hname,
+            ylab=yexp,
+            valu=cvname,
+            labl=labl,
+            desc=descrip,
+            fname=cvname,
+            yexp=yexp)
+  fvnames(xfv, ".") <- cvname
+  unitname(xfv) <- unitname(x)
+  return(xfv)
+}
+
+plot.bw.optim <- function(x, ...,
+                          showopt=TRUE, optargs=list(lty=3, col="blue")) {
+  xname <- short.deparse(substitute(x))
+  # convert to fv object
+  xfv <- as.fv(x)
+  # plot cross-validation criterion
+  out <- do.call(plot.fv,
+                 resolve.defaults(list(x=xfv),
+                                  list(...),
+                                  list(main=xname)))
+  # Turn off 'showopt' if the x-variable is not the bandwidth
+  if(missing(showopt)) {
+    argh <- list(...)
+    isfmla <- unlist(lapply(argh, inherits, what="formula"))
+    if(any(isfmla)) {
+      fmla <- argh[[min(which(isfmla))]]
+      xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE))
+      if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x")))
+        showopt <- FALSE
+    }
+  }
+  # show optimal value?
+  if(showopt) {
+    hoptim <- as.numeric(x)
+    if(spatstat.options('monochrome'))
+      optargs <- col.args.to.grey(optargs)
+    do.call(abline, append(list(v=hoptim), optargs))
+  }
+  if(is.null(out)) return(invisible(NULL))
+  return(out)
+}
+
+
diff --git a/R/bw.pcf.R b/R/bw.pcf.R
new file mode 100644
index 0000000..0b0d5c2
--- /dev/null
+++ b/R/bw.pcf.R
@@ -0,0 +1,172 @@
+#'
+#' bw.pcf.R
+#'
+#' $Revision: 1.4 $  $Date: 2017/06/05 10:31:58 $
+#'
+#' bandwidth selection for pcf
+#' with least-squares cross-validation method
+#' 
+#' Original code by: Rasmus Waagepetersen and Abdollah Jalilian
+#'
+#' References:
+#' Guan, Y. (2007). A composite likelihood cross-validation approach in 
+#'   selecting bandwidth for the estimation of the pair correlation function. 
+#'   Scandinavian Journal of Statistics, 34(2), 336--346. 
+#'   DOI: http://doi.org/10.1111/j.1467-9469.2006.00533.x
+#' Guan, Y. (2007). A least-squares cross-validation bandwidth 
+#'   selection approach in pair correlation function estimations. 
+#'   Statistics & Probability Letters, 77(18), 1722--1729. 
+#'   DOI: http://doi.org/10.1016/j.spl.2007.04.016
+
+bw.pcf <- function(X, rmax=NULL, lambda=NULL, divisor="r", 
+                   kernel="epanechnikov", nr=10000, bias.correct=TRUE, 
+                   cv.method=c("compLik", "leastSQ"), simple=TRUE,
+                   srange=NULL, ..., verbose=FALSE)
+{
+  stopifnot(is.ppp(X))
+  X <- unmark(X)
+  win <- Window(X)
+  areaW <- area(win)
+  nX <- npoints(X)
+
+  cv.method <- match.arg(cv.method)
+  kernel <- match.kernel(kernel)
+  
+  #' maximum distance lag: rmax
+  if (is.null(rmax))
+    rmax <- rmax.rule("K", win,  nX/areaW)
+  if(is.null(srange))
+    srange <- c(0, rmax/4)
+  #' number of subintervals for discretization of [0, rmax]: nr
+  #' length of subintervals
+  discr <- rmax / nr
+  #' breaks of subintervals
+  rs <- seq(0, rmax, length.out= nr + 1)
+
+  #' closepairs distances: \\ u - v \\
+  #' Pre-compute close pair distances for use in 'pcf'
+  #'   we need close pairs up to a distance rmax + smax
+  #'   where 'smax' is the maximum halfwidth of the support of the kernel
+  smax <- srange[2] * (if(kernel == "gaussian") 2 else kernel.factor(kernel))
+  cpfull <- closepairs(X, rmax + smax, what="all", twice=TRUE)
+  
+  #' For cross-validation, restrict close pairs to distance rmax 
+  ok <- (cpfull$d <= rmax)
+  cp <- lapply(cpfull, "[", i=ok)
+
+  ds <- cp$d
+  #' determining closepairs distances are in which subinterval
+  idx <- round(ds / discr) + 1L
+  idx <- pmin.int(idx, nr+1L)
+  
+  #' translation edge correction factor: /W|/|W \cap W_{u-v}|
+  edgewt <- edge.Trans(dx=cp$dx, dy=cp$dy, W=win, paired=TRUE)
+  
+  if(homogeneous <- is.null(lambda)) {
+    #' homogeneous case
+    lambda <- nX/areaW
+    lambda2area <- lambda^2 * areaW
+    pcfargs <- list(X=X, r=rs,
+                    divisor=divisor, kernel=kernel, correction="translate",
+                    close=cpfull)
+    renorm.factor <- 1
+  } else {
+    # inhomogeneous case: lambda is assumed to be a numeric vector giving
+    # the intensity at the points of the point pattern X
+    check.nvector(lambda, nX)
+    lambda2area <- lambda[cp$i] * lambda[cp$j] * areaW
+    pcfargs <- list(X=X, lambda=lambda, r=rs,
+                    divisor=divisor, kernel=kernel, correction="translate",
+                    close=cpfull)
+    renorm.factor <- (areaW/sum(1/lambda))
+  }
+  
+  stuff <- list(cv.method=cv.method,
+                kernel=kernel,
+                homogeneous=homogeneous,
+                bias.correct=bias.correct,
+                simple = simple,
+                discr=discr,
+                rs=rs,
+                cp=cp,
+                ds=ds,
+                idx=idx,
+                edgewt=edgewt,
+                pcfargs=pcfargs,
+                lambda=lambda,
+                lambda2area=lambda2area,
+                renorm.factor=renorm.factor,
+		show=verbose)
+  stuff <- list2env(stuff)
+
+  #' find optimum bandwidth
+  z <- optimizeWithTrace(CVforPCF, srange, maximum=TRUE, stuff=stuff)
+
+  #' pack up
+  ox <- order(z$x)
+  sigma  <- z$x[ox]
+  cv     <- z$y[ox]
+  criterion <- switch(cv.method,
+                      compLik = "composite likelihood cross-validation",
+                      leastSQ = "least squares cross-validation")
+  result <- bw.optim(cv, sigma, which.max(cv),
+                     criterion = criterion,
+                     unitname=unitname(X))
+  return(result)
+}
+
+CVforPCF <- function(bw, stuff) {
+  stuff$bw <- bw
+  with(stuff, {
+    if(show) splat("bw=", bw)
+    #' values of pair correlation at breaks of subintervals
+    a <- append(pcfargs, list(bw=bw))
+    grs <- if(homogeneous) do.call(pcf.ppp, a) else do.call(pcfinhom, a)
+    grs <- grs$trans
+    #' bias correction
+    if (bias.correct) {
+      grs <- grs / pkernel(rs, kernel, 0, bw)
+      dcorrec <- pkernel(ds, kernel, 0, bw)
+    } else {
+      dcorrec <- 1
+    }
+    #' make sure that the estimated pair correlation at origin is finite
+    if (!is.finite(grs[1]))
+      grs[1] <- grs[2]
+    #' approximate the pair correlation values at closepairs distances
+    gds <- grs[idx]
+    wt <- edgewt / (2 * pi * ds * lambda2area * dcorrec) * renorm.factor
+    #' remove pairs to approximate the cross-validation term: g^{-(u, v)}
+    if (simple) {
+      gds <- gds - 2 * wt * dkernel(0, kernel, 0, bw)
+    } else {
+      cpi <- cp$i
+      cpj <- cp$j
+      for (k in 1:length(ds)) {
+        exclude <- (cpi == cpi[k]) | (cpj == cpj[k])
+        gds[k] <- gds[k] - 2 * sum(wt[exclude] * 
+                                   dkernel(ds[k] - ds[exclude],
+                                           kernel, 0, bw))
+      }
+    }
+    #' remove negative and zero values
+    gds <- pmax.int(.Machine$double.eps, gds)
+    switch(cv.method,
+           compLik={
+             #' composite likelihood cross-validation
+             #' the integral term: 2 \pi \int_{0}^{rmax} \hat g(r) r dr
+             normconst <- 2 * pi * sum(grs * rs) * discr
+             value <- mean(log(gds)) - log(normconst)
+           },
+           leastSQ={
+             #' least squares cross-validation
+             #' the integral term: 2 \pi \int_{0}^{rmax} \hat g^2(r) r dr
+             normconst <- 2 * pi * sum(grs^2 * rs) * discr
+             value <- 2 * sum(gds * edgewt / (lambda2area)) - normconst
+           },
+           stop("Unrecognised cross-validation method"))
+    if(show) splat("value=", value)
+    return(value)
+  })
+}
+
diff --git a/R/bw.ppl.R b/R/bw.ppl.R
new file mode 100644
index 0000000..4e1fc34
--- /dev/null
+++ b/R/bw.ppl.R
@@ -0,0 +1,33 @@
+#
+#   bw.ppl.R
+#
+#   Likelihood cross-validation for kernel smoother of point pattern
+#
+#   $Revision: 1.7 $ $Date: 2017/01/28 06:30:21 $
+#
+
+bw.ppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL) {
+  stopifnot(is.ppp(X))
+  if(!is.null(sigma)) {
+    stopifnot(is.numeric(sigma) && is.vector(sigma))
+    ns <- length(sigma)
+  } else {
+    if(!is.null(srange)) check.range(srange) else {
+      nnd <- nndist(X)
+      srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2)
+    }
+    sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns)
+  }
+  cv <- numeric(ns)
+  for(i in 1:ns) {
+    si <- sigma[i]
+    lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights)
+    lam <- density(X, sigma=si, weights=weights)
+    cv[i] <- sum(log(lamx)) - integral.im(lam)
+  }
+  result <- bw.optim(cv, sigma, iopt=which.max(cv), 
+                     creator="bw.ppl",
+                     criterion="Likelihood Cross-Validation",
+                     unitname=unitname(X))
+  return(result)
+}
diff --git a/R/by.ppp.R b/R/by.ppp.R
new file mode 100755
index 0000000..e4c97cd
--- /dev/null
+++ b/R/by.ppp.R
@@ -0,0 +1,19 @@
+#
+#   by.ppp.R
+#
+#  $Revision: 1.6 $  $Date: 2015/10/21 09:06:57 $
+#
+
+by.ppp <- function(data, INDICES=marks(data), FUN, ...) {
+  if(missing(INDICES))
+    INDICES <- marks(data, dfok=FALSE)
+  if(missing(FUN))
+    stop("FUN is missing")
+  y <- split(data, INDICES)
+  z <- list()
+  for(i in seq_along(y))
+    z[[i]] <- FUN(y[[i]], ...)
+  names(z) <- names(y)
+  z <- as.solist(z, demote=TRUE)
+  return(z)
+}
diff --git a/R/cdf.test.mppm.R b/R/cdf.test.mppm.R
new file mode 100755
index 0000000..446343c
--- /dev/null
+++ b/R/cdf.test.mppm.R
@@ -0,0 +1,256 @@
+#
+# cdf.test.mppm.R
+#
+# $Revision: 1.16 $  $Date: 2016/04/14 02:34:50 $
+#
+cdf.test.mppm <- local({
+
+  allpixelvalues <- function(z) { as.vector(as.matrix(z)) }
+
+  xcoord <- function(x, y) { x }
+  ycoord <- function(x, y) { y }
+  
+  cdf.test.mppm <- function(model, covariate,
+                            test=c("ks", "cvm", "ad"), ...,
+                            nsim=19, verbose=TRUE,
+                            interpolate=FALSE, fast=TRUE, jitter=TRUE) {
+    modelname <- short.deparse(substitute(model))
+    covname <- short.deparse(substitute(covariate))
+    test <- match.arg(test)
+    result <- PoissonTest(model, covariate, test=test, ...,
+                          verbose=FALSE,
+                          interpolate=interpolate, fast=fast, jitter=jitter,
+                          modelname=modelname, covname=covname,
+                          gibbsok=TRUE)
+    if(is.poisson(model))
+      return(result)
+    result$poisson.p.value <- pobs <- result$p.value
+    result$poisson.statistic <- tobs <- result$statistic
+    ## Simulate ...
+    Sims <- simulate(model, nsim=nsim, ..., verbose=verbose)
+    if(verbose) 
+      cat("Processing ...")
+    state <- list()
+    Yname <- model$Info$Yname
+    Data <- eval(getCall(model)$data,
+                 envir=environment(terms(model)))
+    sim.pvals <- sim.stats <- numeric(nsim)    
+    for(isim in 1:nsim) {
+      Data[,Yname] <- Sims[,isim,drop=FALSE]
+      modeli <- update(model, data=Data)
+      Ai <- PoissonTest(modeli, covariate, test=test, ...,
+                        verbose=FALSE,
+                        interpolate=interpolate, fast=fast, jitter=jitter,
+                        modelname=modelname, covname=covname,
+                        gibbsok=TRUE)
+      sim.pvals[isim] <- Ai$p.value
+      sim.stats[isim] <- Ai$statistic
+      if(verbose) state <- progressreport(isim, nsim, state=state)
+    }
+    ### COMPUTE p-value and pack up
+    result$sim.pvals <- sim.pvals
+    result$sim.stats <- sim.stats
+    ## Monte Carlo p-value
+    ## For tied p-values, first compare values of test statistics
+    ## (because p = 0 may occur due to rounding)
+    ## otherwise resolve ties by randomisation
+    nless <- sum(sim.pvals < pobs)
+    nplus <- sum(sim.pvals == pobs & sim.stats > tobs)
+    nties <- sum(sim.pvals == pobs & sim.stats == tobs) 
+    result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L)
+    ##
+    result$method <- c("Monte Carlo test of fitted Gibbs model",
+                       paste("based on", nsim, "repetitions of"),
+                       sub("Spatial", "spatial", result$method))
+    return(result)
+  }
+
+  PoissonTest <- function(model, covariate,
+                          test=c("ks", "cvm", "ad"), ..., verbose=TRUE,
+                          interpolate=FALSE, fast=TRUE, jitter=TRUE,
+                          gibbsok=FALSE,
+                          modelname, covname) {
+    if(missing(modelname)) modelname <- short.deparse(substitute(model))
+    if(missing(covname)) covname <- short.deparse(substitute(covariate))
+    test <- match.arg(test)
+    stopifnot(is.mppm(model))
+    if(!gibbsok && !is.poisson.mppm(model))
+      stop("Only implemented for Poisson models")
+    ## extract things from model
+    data  <- model$data
+    npat  <- model$npat
+    Y     <- data.mppm(model)
+    if(fast) {
+      ## extract original quadrature schemes and convert to point patterns
+      QQ  <- quad.mppm(model)
+      PP  <- lapply(QQ, union.quad)
+      Zweights <- lapply(QQ, w.quad)
+    } else
+      Zweights <- list()
+    ## `evaluate' covariate
+    if(verbose)
+      cat("Extracting covariate...")
+    if(identical(covariate, "x")) covariate <- xcoord
+    if(identical(covariate, "y")) covariate <- ycoord
+    if(is.character(covariate)) {
+      ## extract covariate with this name from data used to fit model
+      if(!(covariate %in% names(data)))
+        stop(paste("Model does not contain a covariate called",
+                   dQuote(covariate)))
+      covname <- covariate
+      covariate <- data[, covname, drop=TRUE]
+    } else if(inherits(covariate, c("listof", "anylist"))) {
+      if(length(covariate) != npat)
+        stop(paste("Length of list of covariate values does not match",
+                   "number of point patterns in data of original model"))
+    } else if(is.hyperframe(covariate)) {
+      ## extract first column
+      covariate <- covariate[,1L, drop=TRUE]
+      if(length(covariate) != npat)
+        stop(paste("Number of rows of covariate hyperframe does not match",
+                   "number of point patterns in data of original model"))
+    } else if(is.function(covariate) || is.im(covariate)) {
+      ## replicate to make a list
+      covariate <- as.anylist(rep(list(covariate), npat))
+    } else     
+      stop(paste("Format of argument", sQuote("covariates"), "not understood"))
+    if(verbose) {
+      cat("done.\nComputing statistics for each pattern...")
+      pstate <- list()
+    }
+    ## compile information for test from each row
+    Zvalues <- ZX <- Win <- list()
+    for(i in 1:npat) {
+      if(verbose) pstate <- progressreport(i, npat, state=pstate)
+      XI <- Y[[i]]
+      if(fast)
+        PI <- PP[[i]]
+      else
+        WI <- XI$window
+      covariateI <- covariate[[i]]
+      if(is.im(covariateI)) {
+        type <- "im"
+        ## evaluate at data points
+        ZXI <-
+          if(interpolate) interp.im(covariateI, XI$x, XI$y)
+          else covariateI[XI]
+        if(fast) {
+          ## covariate values for quadrature points
+          ZI <- covariateI[PI]
+        } else {
+          ## covariate image inside window
+          ZI <- covariateI[WI, drop=FALSE]
+          ## corresponding mask
+          WI <- as.owin(ZI)
+          ## pixel areas 
+          Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim))
+        }
+      } else if(is.function(covariateI)) {
+        type <- "function"
+        ## evaluate exactly at data points
+        ZXI <- covariateI(XI$x, XI$y)
+        if(fast) {
+          ## covariate values for quadrature points
+          ZI <- covariateI(PI$x, PI$y)
+        } else {
+          ## window
+          WI <- as.mask(WI)
+          ## covariate image inside window
+          ZI <- as.im(covariateI, W=WI)
+          ## pixel areas 
+          Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim))
+        }
+      } else
+        stop("covariate should be an image or a function(x,y)")
+      ZX[[i]] <- ZXI
+      if(fast)
+        Zvalues[[i]] <- ZI      
+      else {
+        Win[[i]] <- WI
+        ## values of covariate in window
+        Zvalues[[i]] <- allpixelvalues(ZI)
+      }
+    }
+
+    if(verbose)
+      cat("done.\nComputing predicted intensity...")
+
+    ## compute predicted intensities
+    trend <-
+      if(fast)
+        fitted(model, type="trend")
+      else
+        predict(model, type="trend", locations=Win, verbose=verbose)$trend
+  
+    if(verbose)
+      cat("done.\nExtracting...")
+    ## extract relevant values
+    lambda <- if(fast) trend else lapply(trend, allpixelvalues)
+    if(verbose)
+      cat("done.\nPerforming test...")
+  
+    ## flatten to vectors
+    lambda <- unlist(lambda)
+    Zweights <- unlist(Zweights)
+    Zvalues <- unlist(Zvalues)
+    ZX      <- unlist(ZX)
+    if(length(lambda) != length(Zvalues))
+      stop("Internal error: mismatch between predicted values and Z values")
+    if(length(Zvalues) != length(Zweights))
+      stop("Internal error: mismatch between Z values and Z weights")
+    lambda <- lambda * Zweights
+  
+    ## form weighted cdf of Z values in window
+    FZ <- ewcdf(Zvalues, lambda/sum(lambda))
+    ## Ensure support of cdf includes the range of the data
+    xxx <- knots(FZ)
+    yyy <- FZ(xxx)
+    if(min(xxx) > min(ZX)) {
+      xxx <- c(min(ZX), xxx)
+      yyy <- c(0, yyy)
+    }
+    if(max(xxx) < max(ZX)) {
+      xxx <- c(xxx, max(ZX))
+      yyy <- c(yyy, 1)
+    }
+    ## make piecewise linear approximation of cdf
+    FZ <- approxfun(xxx, yyy, rule=2)
+    ## evaluate at data points
+    if(!jitter)
+      U <- FZ(ZX)
+    else {
+      ## jitter observed values to avoid ties
+      grain <- min(diff(sort(unique(ZX))))/8
+      jit <- runif(length(ZX), min=0, max=grain)
+      sgn <- sample(c(-1L,1L), length(ZX), replace=TRUE)
+      sgn[ZX==min(xxx)] <- 1L
+      sgn[ZX==max(xxx)] <- -1L
+      U <- FZ(ZX + sgn*jit)
+    }
+
+    ## Test uniformity
+    result <- switch(test,
+                     ks  = ks.test(U, "punif", ...),
+                     cvm = cvm.test(U, "punif", ...),
+                     ad = ad.test(U, "punif", ...))
+    testname <- switch(test,
+                       ks="Kolmogorov-Smirnov",
+                       cvm="Cramer-Von Mises",
+                       ad="Anderson-Darling")
+    result$method <- paste("Spatial", testname, "test")
+    result$data.name <-
+      paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")),
+            "evaluated at data points of", sQuote(modelname))
+    if(verbose)
+      cat("done.\n")
+    class(result) <- c("cdftest", class(result))
+    attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda,
+                                 ZX = ZX, FZ = FZ, U = U, type = type)
+    attr(result, "info") <- list(modelname = modelname, covname = covname)
+    return(result)        
+  }
+
+  cdf.test.mppm
+
+})
+
diff --git a/R/cdftest.R b/R/cdftest.R
new file mode 100755
index 0000000..8bb235d
--- /dev/null
+++ b/R/cdftest.R
@@ -0,0 +1,437 @@
+#
+#  cdftest.R
+#
+#  $Revision: 2.15 $  $Date: 2017/01/18 07:58:44 $
+#
+#
+
+# --------- old -------------
+
+ks.test.ppm <- function(...) {
+  .Deprecated("cdf.test.ppm", package="spatstat")
+  cdf.test.ppm(...)
+}
+
+kstest <- kstest.ppp <- kstest.ppm <- kstest.lpp <- kstest.lppm <-
+  kstest.slrm <-
+  function(...) {
+    message("kstest is out of date; use cdf.test")
+#  .Deprecated("cdf.test", package="spatstat")
+  cdf.test(..., test="ks")
+}
+
+# ---------------------------
+
+cdf.test <- function(...) {
+  UseMethod("cdf.test")
+}
+
+cdf.test.ppp <-
+  function(X, covariate, test=c("ks", "cvm", "ad"), ...,
+           interpolate=TRUE, jitter=TRUE) {
+    Xname <- short.deparse(substitute(X))
+    covname <- singlestring(short.deparse(substitute(covariate)))
+    test <- match.arg(test)
+    if(is.character(covariate)) covname <- covariate
+    if(!is.marked(X, dfok=TRUE)) {
+      # unmarked
+      model <- ppm(X)
+      modelname <- "CSR"
+    } else if(is.multitype(X)) {
+      # multitype
+      mf <- summary(X)$marks$frequency
+      if(all(mf > 0)) {
+        model <- ppm(X ~marks)
+        modelname <- "CSRI"
+      } else {
+        warning("Ignoring marks, because some mark values have zero frequency")
+        X <- unmark(X)
+        model <- ppm(X)
+        modelname <- "CSR"
+      } 
+    } else {
+      # marked - general case
+      X <- unmark(X)
+      warning("marks ignored")
+      model <- ppm(X)
+      modelname <- "CSR"
+    }
+    do.call(spatialCDFtest,
+            resolve.defaults(list(model, covariate, test=test),
+                             list(interpolate=interpolate, jitter=jitter),
+                             list(...),
+                             list(modelname=modelname,
+                                  covname=covname, dataname=Xname)))
+}
+
+cdf.test.ppm <- 
+  function(model, covariate, test=c("ks", "cvm", "ad"), ...,
+           interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) {
+  modelname <- short.deparse(substitute(model))
+  covname <- singlestring(short.deparse(substitute(covariate)))
+  test <- match.arg(test)
+  verifyclass(model, "ppm")
+  if(is.character(covariate)) covname <- covariate
+  if(is.poisson(model) && is.stationary(model))
+    modelname <- "CSR"
+  do.call(spatialCDFtest,
+          resolve.defaults(list(model, covariate, test=test),
+                           list(interpolate=interpolate, jitter=jitter,
+                                nsim=nsim, verbose=verbose),
+                           list(...),
+                           list(modelname=modelname,
+                                covname=covname)))
+}
+
+cdf.test.lpp <-
+  function(X, covariate, test=c("ks", "cvm", "ad"), ...,
+           interpolate=TRUE, jitter=TRUE) {
+    Xname <- short.deparse(substitute(X))
+    covname <- singlestring(short.deparse(substitute(covariate)))
+    test <- match.arg(test)
+    if(is.character(covariate)) covname <- covariate
+    if(!is.marked(X, dfok=TRUE)) {
+      # unmarked
+      model <- lppm(X)
+      modelname <- "CSR"
+    } else if(is.multitype(X)) {
+      # multitype
+      mf <- table(marks(X))
+      if(all(mf > 0)) {
+        model <- lppm(X ~ marks)
+        modelname <- "CSRI"
+      } else {
+        warning("Ignoring marks, because some mark values have zero frequency")
+        X <- unmark(X)
+        model <- ppm(X)
+        modelname <- "CSR"
+      } 
+    } else {
+      # marked - general case
+      X <- unmark(X)
+      warning("marks ignored")
+      model <- ppm(X)
+      modelname <- "CSR"
+    }
+    do.call(spatialCDFtest,
+            resolve.defaults(list(model, covariate, test=test),
+                             list(interpolate=interpolate, jitter=jitter),
+                             list(...),
+                             list(modelname=modelname,
+                                  covname=covname, dataname=Xname)))
+}
+
+cdf.test.lppm <- function(model, covariate,
+                          test=c("ks", "cvm", "ad"),
+                          ..., interpolate=TRUE, jitter=TRUE,
+                          nsim=99, verbose=TRUE) {
+  modelname <- short.deparse(substitute(model))
+  covname <- singlestring(short.deparse(substitute(covariate)))
+  test <- match.arg(test)
+  verifyclass(model, "lppm")
+  if(is.character(covariate)) covname <- covariate
+  if(is.poisson(model) && is.stationary(model))
+    modelname <- "CSR"
+  do.call(spatialCDFtest,
+          resolve.defaults(list(model, covariate, test=test),
+                           list(interpolate=interpolate, jitter=jitter,
+                                nsim=nsim, verbose=verbose),
+                           list(...),
+                           list(modelname=modelname,
+                                covname=covname)))
+}
+
+
+cdf.test.slrm <- function(model, covariate,
+                          test=c("ks", "cvm", "ad"), ...,
+                          modelname=NULL, covname=NULL) {
+  # get names
+  if(is.null(modelname))
+    modelname <- short.deparse(substitute(model))
+  if(is.null(covname))
+    covname <- short.deparse(substitute(covariate))
+  dataname <- model$CallInfo$responsename
+  test <- match.arg(test)
+  #
+  stopifnot(is.slrm(model))
+  stopifnot(is.im(covariate))
+  # extract data
+  prob <- fitted(model)
+  covim <- as.im(covariate, W=as.owin(prob))
+  probvalu <- as.matrix(prob)
+  covvalu  <- as.matrix(covim)
+  ok <- !is.na(probvalu) & !is.na(covvalu)
+  probvalu <- as.vector(probvalu[ok])
+  covvalu <- as.vector(covvalu[ok])
+  # compile weighted cdf's
+  FZ <- ewcdf(covvalu, probvalu/sum(probvalu))
+  X <- model$Data$response
+  ZX <- safelookup(covim, X)
+  # Ensure support of cdf includes the range of the data
+  xxx <- knots(FZ)
+  yyy <- FZ(xxx)
+  if(min(xxx) > min(ZX)) {
+    xxx <- c(min(ZX), xxx)
+    yyy <- c(0, yyy)
+  }
+  if(max(xxx) < max(ZX)) {
+    xxx <- c(xxx, max(ZX))
+    yyy <- c(yyy, 1)
+  }
+  # make piecewise linear approximation of cdf
+  FZ <- approxfun(xxx, yyy, rule=2)
+  # now apply cdf
+  U <- FZ(ZX)
+  # Test uniformity of transformed values
+  result <- switch(test,
+                   ks  = ks.test(U, "punif", ...),
+                   cvm = cvm.test(U, "punif", ...),
+                   ad = ad.test(U, "punif", ...))
+  testname <- switch(test,
+                     ks="Kolmogorov-Smirnov",
+                     cvm="Cramer-Von Mises",
+                     ad="Anderson-Darling")
+
+  # modify the 'htest' entries
+  result$method <- paste("Spatial", testname, "test of",
+                         "inhomogeneous Poisson process",
+                         "in two dimensions")
+  result$data.name <-
+    paste("covariate", sQuote(paste(covname, collapse="")),
+          "evaluated at points of", sQuote(dataname),
+          "\n     and transformed to uniform distribution under",
+          sQuote(modelname))
+  # additional class 'cdftest'
+  class(result) <- c("cdftest", class(result))
+  attr(result, "prep") <-
+    list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U)
+  attr(result, "info") <- list(modelname=modelname, covname=covname,
+                               dataname=dataname, csr=FALSE)
+  return(result)        
+}
+
+#.............  helper functions ........................#
+
+spatialCDFtest <- function(model, covariate, test=c("ks", "cvm", "ad"),
+                           ...,
+                           dimyx=NULL, eps=NULL,
+                           interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE,
+                           modelname=NULL, covname=NULL, dataname=NULL) {
+  # conduct test based on comparison of CDF's of covariate values
+  test <- match.arg(test)
+  ispois <- is.poisson(model)
+  # compute the essential data
+  fra <- spatialCDFframe(model, covariate,
+                         dimyx=dimyx, eps=eps,
+                         interpolate=interpolate, jitter=jitter,
+                         modelname=modelname,
+                         covname=covname, dataname=dataname)
+  values <- fra$values
+  info   <- fra$info
+  ## Test uniformity of transformed values
+  U <- values$U
+  result <- switch(test,
+                   ks  = ks.test(U, "punif", ...),
+                   cvm = cvm.test(U, "punif", ...),
+                   ad = ad.test(U, "punif", ...))
+  testname <- switch(test,
+                     ks="Kolmogorov-Smirnov",
+                     cvm="Cramer-Von Mises",
+                     ad="Anderson-Darling")
+  ## 
+  if(!ispois) {
+    ## Gibbs model: perform Monte Carlo test
+    result$poisson.p.value <- pobs <- result$p.value
+    result$poisson.statistic <- tobs <- result$statistic
+    Xsim <- simulate(model, nsim=nsim, progress=verbose)
+    sim.pvals <- sim.stats <- numeric(nsim)
+    if(verbose) {
+      cat("Processing.. ")
+      state <- list()
+    }
+    for(i in seq_len(nsim)) {
+      model.i <- update(model, Xsim[[i]])
+      fra.i <- spatialCDFframe(model.i, covariate,
+                               dimyx=dimyx, eps=eps,
+                               interpolate=interpolate, jitter=jitter,
+                               modelname=modelname,
+                               covname=covname, dataname=dataname)
+      U.i <- fra.i$values$U
+      res.i <- switch(test,
+                      ks  = ks.test(U.i, "punif", ...),
+                      cvm = cvm.test(U.i, "punif", ...),
+                      ad = ad.test(U.i, "punif", ...))     
+      sim.pvals[i] <- res.i$p.value
+      sim.stats[i] <- res.i$statistic
+      if(verbose) state <- progressreport(i, nsim, state=state)
+    }
+    if(verbose) cat("Done.\n")
+    result$sim.pvals <- sim.pvals
+    result$sim.stats <- sim.stats
+    ## Monte Carlo p-value
+    ## For tied p-values, first compare values of test statistics
+    ## (because p = 0 may occur due to rounding)
+    ## otherwise resolve ties by randomisation
+    nless <- sum(sim.pvals < pobs)
+    nplus <- sum(sim.pvals == pobs & sim.stats > tobs)
+    nties <- sum(sim.pvals == pobs & sim.stats == tobs) 
+    result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L)
+  }
+  ## 
+  # modify the 'htest' entries
+  csr <- info$csr
+  modelname <- if(csr) "CSR" else
+               if(ispois) "inhomogeneous Poisson process" else "Gibbs process"
+  result$method <-
+    paste(if(ispois) "Spatial" else "Monte Carlo spatial",
+          testname, "test of", modelname, "in", info$spacename)
+  result$data.name <-
+    paste("covariate", sQuote(singlestring(info$covname)),
+          "evaluated at points of", sQuote(info$dataname), 
+          "\n     and transformed to uniform distribution under",
+          if(csr) info$modelname else sQuote(info$modelname))
+  
+  # additional class 'cdftest'
+  class(result) <- c("cdftest", class(result))
+  attr(result, "frame") <- fra
+  return(result)        
+}
+
+spatialCDFframe <- function(model, covariate, ...) {
+  # evaluate CDF of covariate values at data points and at pixels
+  stuff <- evalCovar(model, covariate, ...)
+  # extract 
+  values <- stuff$values
+#  info   <- stuff$info
+  Zvalues <- values$Zvalues
+  lambda  <- values$lambda
+  weights <- values$weights
+  ZX      <- values$ZX
+  # compute empirical cdf of Z values at points of X
+  FZX <- ecdf(ZX)
+  # form weighted cdf of Z values in window
+  wts <- lambda * weights
+  sumwts <- sum(wts)
+  FZ <- ewcdf(Zvalues, wts/sumwts)
+  # Ensure support of cdf includes the range of the data
+  xxx <- knots(FZ)
+  yyy <- FZ(xxx)
+  minZX <- min(ZX, na.rm=TRUE)
+  minxxx <- min(xxx, na.rm=TRUE)
+  if(minxxx > minZX) {
+    xxx <- c(minZX, xxx)
+    yyy <- c(0, yyy)
+  }
+  maxZX <- max(ZX, na.rm=TRUE)
+  maxxxx <- max(xxx, na.rm=TRUE)
+  if(maxxxx < maxZX) {
+    xxx <- c(xxx, maxZX)
+    yyy <- c(yyy, 1)
+  }
+  # make piecewise linear approximation of cdf
+  FZ <- approxfun(xxx, yyy, rule=2)
+  # now apply cdf
+  U <- FZ(ZX)
+
+  # pack up
+  stuff$values$FZ  <- FZ
+  stuff$values$FZX <- FZX
+  stuff$values$U   <- U
+  stuff$values$EN <- sumwts  ## integral of intensity = expected number of pts
+  class(stuff) <- "spatialCDFframe"
+  return(stuff)
+}
+
+plot.kstest <- function(x, ...) {
+  message("kstest is out of date; use cdf.test")
+#  .Deprecated("plot.cdftest", package="spatstat")
+  plot.cdftest(x, ...)
+}
+
+plot.cdftest <- function(x, ..., style=c("cdf", "PP", "QQ"),
+                        lwd=par("lwd"), col=par("col"), lty=par("lty"),
+                        lwd0=lwd, col0=2, lty0=2,
+                        do.legend=TRUE) {
+  style <- match.arg(style)
+  fram <- attr(x, "frame")
+  if(!is.null(fram)) {
+    values <- fram$values
+    info <- fram$info
+  } else {
+    # old style
+    values <- attr(x, "prep")
+    info <- attr(x, "info")
+  }
+  # cdf of covariate Z over window 
+  FZ <- values$FZ
+  # cdf of covariate values at data points
+  FZX <- values$FZX
+  # blurb
+  covname <- info$covname
+  covdescrip <- switch(covname,
+                       x="x coordinate",
+                       y="y coordinate",
+                       paste("covariate", dQuote(covname)))
+  # plot it
+  switch(style,
+         cdf={
+           # plot both cdf's superimposed
+           qZ <- get("x", environment(FZ))
+           pZ <- get("y", environment(FZ))
+           main <- c(x$method,
+                     paste("based on distribution of", covdescrip),
+                     paste("p-value=", signif(x$p.value, 4)))
+           do.call(plot.default,
+                   resolve.defaults(
+                                    list(x=qZ, y=pZ, type="l"),
+                                    list(...),
+                                    list(lwd=lwd0, col=col0, lty=lty0),
+                                    list(xlab=info$covname, ylab="probability",
+                                         main=main)))
+           plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty)
+           if(do.legend) 
+             legend("topleft", c("observed", "expected"),
+                    lwd=c(lwd,lwd0),
+                    col=c(col2hex(col), col2hex(col0)),
+                    lty=c(lty2char(lty),lty2char(lty0)))
+         },
+         PP={
+           # plot FZX o (FZ)^{-1}
+           pX <- get("y", environment(FZX))
+           qX <- get("x", environment(FZX))
+           p0 <- FZ(qX)
+           do.call(plot.default,
+                   resolve.defaults(
+                                    list(x=p0, y=pX),
+                                    list(...),
+                                    list(col=col),
+                                    list(xlim=c(0,1),
+                                         ylim=c(0,1),
+                                         xlab="Theoretical probability",
+                                         ylab="Observed probability",
+                                         main="")))
+           abline(0,1, lwd=lwd0, col=col0, lty=lty0)           
+         },
+         QQ={
+           # plot (FZX)^{-1} o FZ
+           pZ <- get("y", environment(FZ))
+           qZ <- get("x", environment(FZ))
+           FZinverse <- approxfun(pZ, qZ, rule=2)
+           pX <- get("y", environment(FZX))
+           qX <- get("x", environment(FZX))
+           qZX <- FZinverse(pX)
+           Zrange <- range(qZ, qX, qZX)
+           xlab <- paste("Theoretical quantile of", covname)
+           ylab <- paste("Observed quantile of", covname)
+           do.call(plot.default,
+                   resolve.defaults(
+                                    list(x=qZX, y=qX),
+                                    list(...),
+                                    list(col=col),
+                                    list(xlim=Zrange, ylim=Zrange,
+                                         xlab=xlab, ylab=ylab,
+                                         main="")))
+           abline(0,1, lwd=lwd0, col=col0, lty=lty0)           
+         })
+  return(invisible(NULL))
+}
diff --git a/R/centroid.R b/R/centroid.R
new file mode 100755
index 0000000..84e2e38
--- /dev/null
+++ b/R/centroid.R
@@ -0,0 +1,169 @@
+#
+#	centroid.S	Centroid of a window
+#			and related operations
+#
+#	$Revision: 1.6 $	$Date: 2014/11/10 08:20:59 $
+#
+# Function names (followed by "xypolygon" or "owin")
+#	
+#	intX            integral of x dx dy
+#	intY            integral of y dx dy
+#	meanX           mean of x dx dy
+#	meanY           mean of y dx dy
+#       centroid        (meanX, meanY)
+#		
+#-------------------------------------
+
+intX.xypolygon <- function(polly) {
+  #
+  # polly: list(x,y) vertices of a single polygon (n joins to 1)
+  #
+  verify.xypolygon(polly)
+  
+  x <- polly$x
+  y <- polly$y
+  
+#  nedges <- length(x)   # sic
+  
+  # place x axis below polygon
+  y <- y - min(y) 
+
+  # join vertex n to vertex 1
+  xr <- c(x, x[1L])
+  yr <- c(y, y[1L])
+
+  # slope
+  dx <- diff(xr)
+  dy <- diff(yr)
+  slope <- ifelseAX(dx == 0, 0, dy/dx)
+
+  # integrate
+  integrals <- x * y * dx + (y + slope * x) * (dx^2)/2 + slope * (dx^3)/3
+
+  -sum(integrals)
+}
+		
+intX.owin <- function(w) {
+	verifyclass(w, "owin")
+        switch(w$type,
+               rectangle = {
+		width  <- abs(diff(w$xrange))
+		height <- abs(diff(w$yrange))
+		answer <- width * height * mean(w$xrange)
+               },
+               polygonal = {
+                 answer <- sum(unlist(lapply(w$bdry, intX.xypolygon)))
+               },
+               mask = {
+                 pixelarea <- abs(w$xstep * w$ystep)
+		 x <- rasterx.mask(w, drop=TRUE)
+                 answer <- (pixelarea * length(x)) * mean(x)
+               },
+               stop("Unrecognised window type")
+        )
+        return(answer)
+}
+
+meanX.owin <- function(w) {
+	verifyclass(w, "owin")
+        switch(w$type,
+               rectangle = {
+		answer <- mean(w$xrange)
+               },
+               polygonal = {
+	         area <- sum(unlist(lapply(w$bdry, Area.xypolygon)))
+                 integrated <- sum(unlist(lapply(w$bdry, intX.xypolygon)))
+		 answer <- integrated/area
+               },
+               mask = {
+		 x <- rasterx.mask(w, drop=TRUE)
+                 answer <- mean(x)
+               },
+               stop("Unrecognised window type")
+        )
+        return(answer)
+}
+
+intY.xypolygon <- function(polly) {
+  #
+  # polly: list(x,y) vertices of a single polygon (n joins to 1)
+  #
+  verify.xypolygon(polly)
+  
+  x <- polly$x
+  y <- polly$y
+  
+#  nedges <- length(x)   # sic
+  
+  # place x axis below polygon
+  yadjust <- min(y)
+  y <- y - yadjust 
+
+  # join vertex n to vertex 1
+  xr <- c(x, x[1L])
+  yr <- c(y, y[1L])
+
+  # slope
+  dx <- diff(xr)
+  dy <- diff(yr)
+  slope <- ifelseAX(dx == 0, 0, dy/dx)
+
+  # integrate
+  integrals <- (1/2) * (dx * y^2 + slope * y * dx^2 + slope^2 * dx^3/3)
+  total <- sum(integrals) - yadjust * Area.xypolygon(polly)
+
+  # change sign to adhere to anticlockwise convention
+  -total
+}
+		
+intY.owin <- function(w) {
+	verifyclass(w, "owin")
+        switch(w$type,
+               rectangle = {
+		width  <- abs(diff(w$xrange))
+		height <- abs(diff(w$yrange))
+		answer <- width * height * mean(w$yrange)
+               },
+               polygonal = {
+                 answer <- sum(unlist(lapply(w$bdry, intY.xypolygon)))
+               },
+               mask = {
+                 pixelarea <- abs(w$xstep * w$ystep)
+		 y <- rastery.mask(w, drop=TRUE)
+                 answer <- (pixelarea * length(y)) * mean(y)
+               },
+               stop("Unrecognised window type")
+        )
+        return(answer)
+}
+
+meanY.owin <- function(w) {
+	verifyclass(w, "owin")
+        switch(w$type,
+               rectangle = {
+		answer <- mean(w$yrange)
+               },
+               polygonal = {
+	         area <- sum(unlist(lapply(w$bdry, Area.xypolygon)))
+                 integrated <- sum(unlist(lapply(w$bdry, intY.xypolygon)))
+		 answer <- integrated/area
+               },
+               mask = {
+		 y <- rastery.mask(w, drop=TRUE)
+                 answer <- mean(y)
+               },
+               stop("Unrecognised window type")
+        )
+        return(answer)
+}
+
+centroid.owin <- function(w, as.ppp = FALSE) {
+	w <- as.owin(w)
+        out <- list(x=meanX.owin(w), y=meanY.owin(w))
+        if(as.ppp){
+            if(!inside.owin(out$x, out$y, w))
+                w <- as.rectangle(w)
+            out <- as.ppp(out, W=w)
+        }
+	return(out)
+}
diff --git a/R/circdensity.R b/R/circdensity.R
new file mode 100644
index 0000000..f9c5afd
--- /dev/null
+++ b/R/circdensity.R
@@ -0,0 +1,49 @@
+#'
+#'   circdensity.R
+#'
+#' Kernel smoothing for circular data
+#'
+#'   $Revision: 1.3 $ $Date: 2014/12/04 06:49:20 $
+
+circdensity <- function(x, sigma="nrd0", ..., bw=NULL,
+                        weights=NULL,
+                        unit=c("degree", "radian")) {
+  xname <- short.deparse(substitute(x))
+  missu <- missing(unit)
+  if(missing(sigma) && !is.null(bw))
+    sigma <- bw
+  unit <- match.arg(unit)
+  unit <- validate.angles(x, unit, missu)
+  FullCircle <- switch(unit, degree = 360, radian = 2*pi)
+  if(is.character(sigma)) {
+    sigma <- switch(sigma,
+                     bcv  = bw.bcv,
+                     nrd  = bw.nrd,
+                     nrd0 = bw.nrd0,
+                     SJ   = bw.SJ,
+                     ucv  = bw.ucv,
+                     get(paste0("bw.", sigma), mode="function"))
+  }
+  if(is.function(sigma)) {
+    sigma <- sigma(x)
+    if(!(is.numeric(sigma) && length(sigma) == 1L && sigma > 0))
+      stop("Bandwidth selector should return a single positive number")
+  }
+  check.1.real(sigma)
+  #' replicate data
+  x <- x %% FullCircle
+  xx <- c(x - FullCircle, x, x + FullCircle)
+  #' replicate weights
+  if(!is.null(weights)) {
+    stopifnot(length(weights) == length(x))
+    weights <- rep(weights, 3)/3
+  }
+  #' smooth
+  z <- do.call(density.default,
+               resolve.defaults(list(x=xx, bw=sigma, weights=weights),
+                                list(...),
+                                list(from=0, to=FullCircle)))
+  z$y <- 3 * z$y
+  z$data.name <- xname
+  return(z)
+}
diff --git a/R/clarkevans.R b/R/clarkevans.R
new file mode 100755
index 0000000..244e5c0
--- /dev/null
+++ b/R/clarkevans.R
@@ -0,0 +1,205 @@
+## clarkevans.R
+## Clark-Evans statistic and test
+## $Revision: 1.17 $ $Date: 2015/10/19 05:03:37 $
+
+clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"),
+                       clipregion=NULL)
+{
+  verifyclass(X, "ppp")
+  W <- X$window
+
+  # validate correction argument
+  gavecorrection <- !missing(correction)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Donnelly="Donnelly",
+                             donnelly="Donnelly",
+                             guard="guard",
+                             cdf="cdf"),
+                           multi=TRUE)
+
+  if(("Donnelly" %in% correction) && (W$type != "rectangle")) {
+    if(gavecorrection)
+      warning("Donnelly correction only available for rectangular windows")
+    correction <- correction[correction != "Donnelly"]
+  }
+
+  # guard correction applied iff `clipregion' is present
+  isguard <- "guard" %in% correction
+  askguard <- any(isguard)
+  gaveguard <- !is.null(clipregion)
+  if(gaveguard)
+    clipregion <- as.owin(clipregion)
+  if(askguard && !gaveguard) {
+    warning("guard correction not performed; clipregion not specified")
+    correction <- correction[!isguard]
+  } else if(gaveguard && !askguard) 
+    correction <- c(correction, "guard")
+
+  result <- clarkevansCalc(X, correction, clipregion)
+  if(length(result) == 1L) result <- unname(result)
+  return(result)
+}
+
+clarkevans.test <- function(X, ..., 
+                            correction="none",
+                            clipregion=NULL,
+                            alternative=c("two.sided", "less", "greater",
+                                          "clustered", "regular"),
+                            nsim=999
+                            ) {
+  Xname <- short.deparse(substitute(X))
+  miss.nsim <- missing(nsim)
+
+  verifyclass(X, "ppp")
+  W <- Window(X)
+  nX <- npoints(X)
+  
+  # validate SINGLE correction
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Donnelly="Donnelly",
+                             donnelly="Donnelly",
+                             guard="guard",
+                             cdf="cdf"))
+  switch(correction,
+         none={
+           corrblurb <- "No edge correction"
+         },
+         Donnelly={
+           if(W$type != "rectangle")
+             stop("Donnelly correction only available for rectangular windows")
+           corrblurb <- "Donnelly correction"
+         },
+         guard={
+           if(is.null(clipregion))
+             stop("clipregion not specified")
+           clipregion <- as.owin(clipregion)
+           corrblurb <- "Guard correction"
+         },
+         cdf={
+           corrblurb <- "CDF correction"
+         })
+
+  # alternative hypothesis
+  if(missing(alternative) || is.null(alternative))
+    alternative <- "two.sided"
+  alternative <- pickoption("alternative", alternative,
+                           c(two.sided="two.sided",
+                             less="less",
+                             clustered="less",
+                             greater="greater",
+                             regular="greater"))
+
+  altblurb <-
+    switch(alternative,
+           two.sided="two-sided",
+           less="clustered (R < 1)",
+           greater="regular (R > 1)")
+
+  # compute observed value
+  statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion,
+                              working=TRUE)
+  working <- attr(statistic, "working")
+  #
+  if(correction == "none" && miss.nsim) {
+    # standard Normal p-value
+    SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts)
+    Z <- with(working, (Dobs - Dpois)/SE)
+    p.value <- switch(alternative,
+                      less=pnorm(Z),
+                      greater=1 - pnorm(Z),
+                      two.sided= 2*(1-pnorm(abs(Z))))
+    pvblurb <- "Z-test"
+  } else {
+    # Monte Carlo p-value
+    sims <- numeric(nsim)
+    for(i in 1:nsim) {
+      Xsim <- runifpoint(nX, win=W)
+      sims[i] <- clarkevansCalc(Xsim, correction=correction,
+                                clipregion=clipregion)
+    }
+    p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim)
+    p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim)
+    p.value <- switch(alternative,
+                      less=p.lower,
+                      greater=p.upper,
+                      two.sided=min(1, 2*min(p.lower, p.upper)))
+    
+    pvblurb <- paste("Monte Carlo test based on",
+                     nsim, "simulations of CSR with fixed n")
+  }
+
+  statistic <- as.numeric(statistic)
+  names(statistic) <- "R"
+  
+  out <- list(statistic=statistic,
+              p.value=p.value,
+              alternative=altblurb,
+              method=c("Clark-Evans test", corrblurb, pvblurb),
+              data.name=Xname)
+  class(out) <- "htest"
+  return(out)
+}
+
+clarkevansCalc <- function(X, correction="none", clipregion=NULL,
+                           working=FALSE) {
+  # calculations for Clark-Evans index or test
+  W <- Window(X)
+  areaW <- area(W)
+  npts <- npoints(X)
+  intensity <- npts/areaW
+  # R undefined for empty point pattern
+  if(npts == 0)
+    return(NA)
+  # Dobs = observed mean nearest neighbour distance
+  nndistX <- nndist(X)
+  Dobs <- mean(nndistX)
+  # Dpois = Expected mean nearest neighbour distance for Poisson process
+  Dpois <- 1/(2*sqrt(intensity))
+
+  statistic <- NULL
+  if(working) 
+    work <- list(areaW=areaW, npts=npts, intensity=intensity,
+                 Dobs=Dobs, Dpois=Dpois)
+  
+  # Naive uncorrected value
+  if("none" %in% correction) {
+    Rnaive <- Dobs/Dpois
+    statistic <- c(statistic, naive=Rnaive)
+  }
+  # Donnelly edge correction
+  if("Donnelly" %in% correction) {
+     # Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978
+    if(W$type == "rectangle") {
+      perim <- perimeter(W)
+      Dkevin  <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts
+      Rkevin <- Dobs/Dkevin
+      if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin))
+    } else 
+      Rkevin <- NA
+    statistic <- c(statistic, Donnelly=Rkevin)
+  }
+  # guard area method
+  if("guard" %in% correction && !is.null(clipregion)) {
+    # use nn distances from points inside `clipregion'
+    ok <- inside.owin(X, , clipregion)
+    Dguard <- mean(nndistX[ok])
+    Rguard <- Dguard/Dpois
+    if(working) work <- append(work, list(Dguard=Dguard))
+    statistic <- c(statistic, guard=Rguard)
+  }
+  if("cdf" %in% correction) {
+    # compute mean of estimated nearest-neighbour distance distribution G
+    G <- Gest(X)
+    numer <- stieltjes(function(x){x}, G)$km
+    denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km
+    Dcdf <- numer/denom
+    Rcdf <- Dcdf/Dpois
+    if(working) work <- append(work, list(Dcdf=Dcdf))
+    statistic <- c(statistic, cdf=Rcdf)
+  }
+  if(working) attr(statistic, "working") <- work
+
+  return(statistic)
+}
diff --git a/R/classes.R b/R/classes.R
new file mode 100755
index 0000000..08b11e3
--- /dev/null
+++ b/R/classes.R
@@ -0,0 +1,52 @@
+#
+#
+#	classes.S
+#
+#	$Revision: 1.7 $	$Date: 2006/10/09 03:38:14 $
+#
+#	Generic utilities for classes
+#
+#
+#--------------------------------------------------------------------------
+
+verifyclass <- function(X, C, N=deparse(substitute(X)), fatal=TRUE) {
+  if(!inherits(X, C)) {
+    if(fatal) {
+        gripe <- paste("argument", sQuote(N),
+                       "is not of class", sQuote(C))
+	stop(gripe)
+    } else 
+	return(FALSE)
+  }
+  return(TRUE)
+}
+
+#--------------------------------------------------------------------------
+
+checkfields <- function(X, L) {
+	  # X is a list, L is a vector of strings
+	  # Checks for presence of field named L[i] for all i
+	return(all(!is.na(match(L,names(X)))))
+}
+
+getfields <- function(X, L, fatal=TRUE) {
+	  # X is a list, L is a vector of strings
+	  # Extracts all fields with names L[i] from list X
+	  # Checks for presence of all desired fields
+	  # Returns the sublist of X with fields named L[i]
+	absent <- is.na(match(L, names(X)))
+	if(any(absent)) {
+		gripe <- paste("Needed the following components:",
+				paste(L, collapse=", "),
+				"\nThese ones were missing: ",
+				paste(L[absent], collapse=", "))
+		if(fatal)
+			stop(gripe)
+		else 
+			warning(gripe)
+	} 
+	return(X[L[!absent]])
+}
+
+
+
diff --git a/R/clickjoin.R b/R/clickjoin.R
new file mode 100755
index 0000000..285dfbb
--- /dev/null
+++ b/R/clickjoin.R
@@ -0,0 +1,31 @@
+#
+#  clickjoin.R
+#
+# interactive addition/deletion of segments between vertices
+#
+
+clickjoin <- function(X, ..., add=TRUE, m=NULL, join=TRUE) {
+  verifyclass(X, "ppp")
+  if(!(is.logical(join) && length(join) == 1))
+    stop("join should be a single logical value")
+  plot(X, add=add, pch=16)
+  if(is.null(m)) {
+    m <- matrix(FALSE, npoints(X), npoints(X))
+  } else {
+    stopifnot(is.matrix(m) && is.logical(m))
+    stopifnot(all(dim(m) == npoints(X)))
+    from <- as.vector(row(m)[m])
+    to   <- as.vector(col(m)[m])
+    with(X, segments(x[from], y[from], x[to], y[to]))
+  }
+  while(TRUE) {
+    twoid <- identify(X, plot=FALSE, n=2)
+    n <- length(twoid)
+    if(n == 0) break
+    if(n == 2) {
+      m[twoid[1L],twoid[2L]] <- m[twoid[2L],twoid[1L]] <- join
+      lines(X$x[twoid], X$y[twoid], ...)
+    }
+  }
+  return(m)
+}
diff --git a/R/clicklpp.R b/R/clicklpp.R
new file mode 100644
index 0000000..db7ca85
--- /dev/null
+++ b/R/clicklpp.R
@@ -0,0 +1,62 @@
+#'
+#' $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $
+#'
+
+clicklpp <- local({
+
+  clicklpp <- function(L, n=NULL, types=NULL, ...,
+                       add=FALSE, main=NULL, hook=NULL) {
+    if(!inherits(L, "linnet"))
+      stop("L should be a linear network", call.=FALSE)
+    instructions <-
+      if(!is.null(n)) paste("click", n, "times in window") else
+      paste("add points: click left mouse button in window\n",
+            "exit: press ESC or another mouse button")
+    if(is.null(main))
+      main <- instructions
+    W <- Window(L)
+  
+    ####  single type #########################
+    if(is.null(types)) {
+      plot(L, add=add, main=main)
+      if(!is.null(hook))
+        plot(hook, add=TRUE)
+      xy <- if(!is.null(n)) spatstatLocator(n=n, ...) else spatstatLocator(...)
+      ok <- inside.owin(xy, w=W)
+      if((nbad <- sum(!ok)) > 0) 
+        warning(paste("Ignored",
+	              nbad,
+	              ngettext(nbad, "point", "points"),
+		      "outside window"),
+	        call.=FALSE)
+      X <- as.lpp(xy$x[ok], xy$y[ok], L=L)
+      return(X)
+    }
+  
+    ##### multitype #######################
+    
+    ftypes <- factor(types, levels=types)
+    #' input points of type 1 
+    X <- getem(ftypes[1L], instructions, n=n, L=L, add=add, ..., pch=1)
+    X <- X %mark% ftypes[1L]
+    #' input points of types 2, 3, ... in turn
+    for(i in 2:length(types)) {
+      Xi <- getem(ftypes[i], instructions, n=n, L=L, add=add,
+                  ..., hook=X, pch=i)
+      Xi <- Xi %mark% ftypes[i]
+      X <- superimpose(X, Xi, L=L)
+    }
+    if(!add) 
+      plot(X, main="Final pattern")
+    return(X)
+  }
+
+  getem <- function(i, instr, ...) {
+    main <- paste("Points of type", sQuote(i), "\n", instr)
+    do.call(clicklpp, resolve.defaults(list(...), list(main=main)))
+  }
+
+  clicklpp
+})
+
+
diff --git a/R/clickpoly.R b/R/clickpoly.R
new file mode 100755
index 0000000..9892881
--- /dev/null
+++ b/R/clickpoly.R
@@ -0,0 +1,75 @@
+#
+# clickpoly.R
+#
+#
+# $Revision: 1.10 $  $Date: 2015/10/21 09:06:57 $
+#
+#
+
+clickpoly <- function(add=FALSE, nv=NULL, np=1, ...) {
+  if((!add) | dev.cur() == 1L) {
+    plot(0,0,type="n", xlab="", ylab="", xlim=c(0,1), ylim=c(0,1), asp=1.0,
+         axes=FALSE)
+    rect(0,0,1,1)
+  }
+  spatstatLocator(0) ## check locator is enabled
+  gon <- list()
+  stopifnot(np >= 1)
+  #
+  for(i in 1:np) {
+    if(np > 1)
+      cat(paste(".... Polygon number", i, ".....\n"))
+    if(!is.null(nv)) 
+      cat(paste("click", nv, "times in window\n"))
+    else
+      cat(paste("to add points: click left mouse button in window\n",
+                "      to exit: press ESC or click middle mouse button\n",
+                "[The last point should NOT repeat the first point]\n"))
+    xy <- do.call(spatstatLocator,
+                  resolve.defaults(if(!is.null(nv)) list(n=nv) else list(),
+                                   list(...),
+                                   list(type="o")))
+    if(Area.xypolygon(xy) < 0)
+      xy <- lapply(xy, rev)
+    gon[[i]] <- xy
+    plotPolygonBdry(owin(poly=xy), ...)
+  }
+  result <- owin(poly=gon)
+  plotPolygonBdry(result, ...)
+  return(result)
+}
+
+clickbox <- function(add=TRUE, ...) {
+  spatstatLocator(0) # check locator enabled
+  cat("Click two corners of a box\n")
+  if(!add) plot(owin(), main="Click two corners of a box") 
+  a <- try(spatstatLocator(1), silent=TRUE)
+  if(inherits(a, "try-error")) {
+    ## add=TRUE but there is no current plot
+    plot.new()
+    a <- spatstatLocator(1, ...)
+  }
+  abline(v=a$x)
+  abline(h=a$y)
+  b <- spatstatLocator(1, ...)
+  abline(v=b$x)
+  abline(h=b$y)
+  ab <- concatxy(a, b)
+  result <- owin(range(ab$x), range(ab$y))
+  plotPolygonBdry(result, ...)
+  return(result)
+}
+
+plotPolygonBdry <- function(x, ...) {
+  # filter appropriate arguments
+  argh <- list(...)
+  polyPars <- union(graphicsPars("lines"), graphicsPars("owin"))
+  polyargs <- argh[names(argh) %in% polyPars]
+  # change 'col' to 'border'
+  nama <- names(polyargs)
+  if(any(nama == "col") && !any(nama == "border"))
+    names(polyargs)[nama == "col"] <- "border"
+  # plot
+  do.call(plot.owin,
+          append(list(x=x, add=TRUE), polyargs))
+}
diff --git a/R/clickppp.R b/R/clickppp.R
new file mode 100755
index 0000000..ffeb5f6
--- /dev/null
+++ b/R/clickppp.R
@@ -0,0 +1,85 @@
+#' Dominic Schuhmacher's idea
+#'
+#' $Revision: 1.16 $ $Date: 2017/01/07 09:24:04 $
+#'
+
+clickppp <- local({
+
+  clickppp <- function(n=NULL, win=square(1), types=NULL, ...,
+                     add=FALSE, main=NULL, hook=NULL) {
+    win <- as.owin(win)
+    instructions <-
+      if(!is.null(n)) paste("click", n, "times in window") else
+      paste("add points: click left mouse button in window\n",
+            "exit: press ESC or another mouse button")
+    if(is.null(main))
+      main <- instructions
+  
+    ####  single type #########################
+    if(is.null(types)) {
+      plot(win, add=add, main=main, invert=TRUE)
+      if(!is.null(hook))
+        plot(hook, add=TRUE)
+      if(!is.null(n))
+        xy <- spatstatLocator(n=n, ...)
+      else
+        xy <- spatstatLocator(...)
+      #' check whether all points lie inside window
+      if((nout <- sum(!inside.owin(xy$x, xy$y, win))) > 0) {
+        warning(paste(nout,
+                      ngettext(nout, "point", "points"),
+                      "lying outside specified window; window was expanded"))
+        win <- boundingbox(win, xy)
+      }
+      X <- ppp(xy$x, xy$y, window=win)
+      return(X)
+    }
+  
+    ##### multitype #######################
+    
+    ftypes <- factor(types, levels=types)
+    #' input points of type 1 
+    X <- getem(ftypes[1L], instructions, n=n, win=win, add=add, ..., pch=1)
+    X <- X %mark% ftypes[1L]
+    #' input points of types 2, 3, ... in turn
+    naughty <- FALSE
+    for(i in 2:length(types)) {
+      Xi <- getem(ftypes[i], instructions, n=n, win=win, add=add,
+                  ..., hook=X, pch=i)
+      Xi <- Xi %mark% ftypes[i]
+      if(!naughty && identical(Xi$window, win)) {
+        #' normal case
+        X <- superimpose(X, Xi, W=win)
+      } else {
+        #' User has clicked outside original window.
+        naughty <- TRUE
+        #' Use bounding box for simplicity
+        bb <- boundingbox(Xi$window, X$window)
+        X <- superimpose(X, Xi, W=bb)
+      } 
+    }
+    if(!add) {
+      if(!naughty)
+        plot(X, main="Final pattern")
+      else {
+        plot(X$window, main="Final pattern (in expanded window)", invert=TRUE)
+        plot(win, add=TRUE, invert=TRUE)
+        plot(X, add=TRUE)
+      }
+    }
+    return(X)
+  }
+
+  getem <- function(i, instr, ...) {
+    main <- paste("Points of type", sQuote(i), "\n", instr)
+    do.call(clickppp, resolve.defaults(list(...), list(main=main)))
+  }
+
+  clickppp
+})
+
+
+clickdist <- function() {
+  a <- spatstatLocator(2)
+  return(pairdist(a)[1L,2L])
+}
diff --git a/R/clip.psp.R b/R/clip.psp.R
new file mode 100755
index 0000000..7e9c18c
--- /dev/null
+++ b/R/clip.psp.R
@@ -0,0 +1,242 @@
+#
+# clip.psp.R
+#
+#    $Revision: 1.19 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+ 
+########################################################
+# clipping operation (for subset)
+########################################################
+
+clip.psp <- function(x, window, check=TRUE, fragments=TRUE) {
+  verifyclass(x, "psp")
+  verifyclass(window, "owin")
+  if(check && !is.subset.owin(window, x$window))
+    warning("The clipping window is not a subset of the window containing the line segment pattern x")
+  if(x$n == 0) {
+    emptypattern <- psp(numeric(0), numeric(0), numeric(0), numeric(0),
+                      window=window, marks=x$marks)
+    return(emptypattern)
+  }
+  switch(window$type,
+         rectangle={
+           result <- cliprect.psp(x, window, fragments=fragments)
+         },
+         polygonal={
+           result <- clippoly.psp(x, window, fragments=fragments)
+         },
+         mask={
+           result <- clippoly.psp(x, as.polygonal(window), fragments=fragments)
+           result$window <- window
+         })
+  return(result)
+}
+
+
+#####
+#
+#  clipping to a rectangle
+#
+cliprect.psp <- function(x, window, fragments=TRUE) {
+  verifyclass(x, "psp")
+  verifyclass(window, "owin")
+  ends <- x$ends
+  marx <- marks(x, dfok=TRUE)
+  # find segments which are entirely inside the window
+  # (by convexity)
+  in0 <- inside.owin(ends$x0, ends$y0, window)
+  in1 <- inside.owin(ends$x1, ends$y1, window)
+  ok <- in0 & in1
+  # if all segments are inside, return them
+  if(all(ok))
+    return(as.psp(ends, window=window, marks=marx, check=FALSE))
+  # otherwise, store those segments which are inside the window
+  ends.inside <- ends[ok, , drop=FALSE]
+  marks.inside <- marx %msub% ok
+  x.inside <- as.psp(ends.inside, window=window, marks=marks.inside, check=FALSE)
+  if(!fragments)
+    return(x.inside)
+  # now consider the rest
+  ends <- ends[!ok, , drop=FALSE]
+  in0 <- in0[!ok] 
+  in1 <- in1[!ok]
+  marx <- marx %msub% (!ok)
+  # first clip segments to the range x \in [xmin, xmax]
+  # use parametric coordinates
+  small <- function(x) { abs(x) <= .Machine$double.eps }
+  tvalue <- function(z0, z1, zt) {
+    y1 <- z1 - z0
+    yt <- zt - z0
+    tval <- ifelseAX(small(y1), 0.5, yt/y1)
+    betwee <- (yt * (zt - z1)) <= 0
+    result <- ifelseXB(betwee, tval, NA)
+    return(result)
+  }
+  between <- function(x, r) { ((x-r[1L]) * (x-r[2L])) <= 0 }
+  tx <- cbind(ifelse0NA(between(ends$x0, window$xrange)),
+              ifelse1NA(between(ends$x1, window$xrange)),
+              tvalue(ends$x0, ends$x1, window$xrange[1L]),
+              tvalue(ends$x0, ends$x1, window$xrange[2L]))
+  # discard segments which do not lie in the x range 
+  nx <- apply(!is.na(tx), 1L, sum)
+  ok <- (nx >= 2)
+  if(!any(ok))
+    return(x.inside)
+  ends <- ends[ok, , drop=FALSE]
+  tx   <- tx[ok, , drop=FALSE]
+  in0  <- in0[ok]
+  in1  <- in1[ok]
+  marx <- marx %msub% ok
+  # Clip the segments to the x range
+  tmin <- apply(tx, 1L, min, na.rm=TRUE)
+  tmax <- apply(tx, 1L, max, na.rm=TRUE)
+  dx <- ends$x1 - ends$x0
+  dy <- ends$y1 - ends$y0
+  ends.xclipped <- data.frame(x0=ends$x0 + tmin * dx,
+                             y0=ends$y0 + tmin * dy,
+                             x1=ends$x0 + tmax * dx,
+                             y1=ends$y0 + tmax * dy)
+  # Now clip the segments to the range y \in [ymin, ymax]
+  ends <- ends.xclipped
+  in0 <- inside.owin(ends$x0, ends$y0, window)
+  in1 <- inside.owin(ends$x1, ends$y1, window)
+  ty <- cbind(ifelse0NA(in0),
+              ifelse1NA(in1),
+              tvalue(ends$y0, ends$y1, window$yrange[1L]),
+              tvalue(ends$y0, ends$y1, window$yrange[2L]))
+  # discard segments which do not lie in the y range 
+  ny <- apply(!is.na(ty), 1L, sum)
+  ok <- (ny >= 2)
+  if(!any(ok))
+    return(x.inside)
+  ends <- ends[ok, , drop=FALSE]
+  ty   <- ty[ok, , drop=FALSE]
+  in0  <- in0[ok]
+  in1  <- in1[ok]
+  marx <- marx %msub% ok
+  # Clip the segments to the y range
+  tmin <- apply(ty, 1L, min, na.rm=TRUE)
+  tmax <- apply(ty, 1L, max, na.rm=TRUE)
+  dx <- ends$x1 - ends$x0
+  dy <- ends$y1 - ends$y0
+  ends.clipped <- data.frame(x0=ends$x0 + tmin * dx,
+                             y0=ends$y0 + tmin * dy,
+                             x1=ends$x0 + tmax * dx,
+                             y1=ends$y0 + tmax * dy)
+  marks.clipped <- marx
+  # OK - segments clipped
+  # Put them together with the unclipped ones
+  ends.all <- rbind(ends.inside, ends.clipped)
+  marks.all <- marks.inside %mapp% marks.clipped
+  as.psp(ends.all, window=window, marks=marks.all, check=FALSE)
+}
+
+
+############################
+#
+#   clipping to a polygonal window
+#
+
+clippoly.psp <- function(s, window, fragments=TRUE) {
+  verifyclass(s, "psp")
+  verifyclass(window, "owin")
+  stopifnot(window$type == "polygonal")
+  marx <- marks(s)
+  has.marks <- !is.null(marx)
+  
+  eps <- .Machine$double.eps
+
+  # find the intersection points between segments and window edges
+  
+  ns <- s$n
+  es <- s$ends
+  x0s <- es$x0
+  y0s <- es$y0
+  dxs <- es$x1 - es$x0
+  dys <- es$y1 - es$y0
+
+  bdry <- edges(window)
+  nw <- bdry$n
+  ew <- bdry$ends
+  x0w <- ew$x0
+  y0w <- ew$y0
+  dxw <- ew$x1 - ew$x0
+  dyw <- ew$y1 - ew$y0
+
+  out <- .C("xysegint",
+            na=as.integer(ns),
+            x0a=as.double(x0s),
+            y0a=as.double(y0s),
+            dxa=as.double(dxs),
+            dya=as.double(dys), 
+            nb=as.integer(nw),
+            x0b=as.double(x0w),
+            y0b=as.double(y0w),
+            dxb=as.double(dxw),
+            dyb=as.double(dyw), 
+            eps=as.double(eps),
+            xx=as.double(numeric(ns * nw)),
+            yy=as.double(numeric(ns * nw)),
+            ta=as.double(numeric(ns * nw)),
+            tb=as.double(numeric(ns * nw)),
+            ok=as.integer(integer(ns * nw)),
+            PACKAGE = "spatstat")
+
+  hitting <- (matrix(out$ok, ns, nw) != 0)
+  ts <- matrix(out$ta, ns, nw)
+
+  anyhit <- matrowany(hitting)
+  
+  if(!fragments) {
+    #' retain only segments that avoid the boundary entirely
+    leftin <- inside.owin(es$x0, es$y0, window)
+    rightin <- inside.owin(es$x1, es$y1, window)
+    ok <- !anyhit & leftin & rightin
+    return(as.psp(es[ok,,drop=FALSE],
+                  window=window,
+		  marks=marx %msub% ok,
+		  check=FALSE))
+  }
+  # form all the chopped segments (whether in or out)
+
+  chopped <- s[numeric(0)]
+  chopped$window <- boundingbox(s$window, window)
+    
+  for(seg in seq_len(ns)) {
+    segment <- s$ends[seg, , drop=FALSE]
+    if(!anyhit[seg]) {
+      # no intersection with boundary - add single segment
+      chopped$ends <- rbind(chopped$ends, segment)
+    if(has.marks) chopped$marks <- (chopped$marks) %mapp% (marx %msub% seg)
+    } else {
+      # crosses boundary - add several pieces
+      tvals <- ts[seg,]
+      tvals <- sort(tvals[hitting[seg,]])
+      x0 <- segment$x0
+      dx <- segment$x1 - x0
+      y0 <- segment$y0
+      dy <- segment$y1 - y0
+      newones <- data.frame(x0 = x0 + c(0,tvals) * dx,
+                            y0 = y0 + c(0,tvals) * dy,
+                            x1 = x0 + c(tvals,1) * dx,
+                            y1 = y0 + c(tvals,1) * dy)
+      chopped$ends <- rbind(chopped$ends, newones)
+      if(has.marks) {
+        hitmarks <- marx %msub% seg
+        newmarks <- hitmarks %mrep% nrow(newones)
+        chopped$marks <-  (chopped$marks) %mapp% newmarks
+      }
+    }
+  }
+  chopped$n <- nrow(chopped$ends)
+  
+  # select those chopped segments which are inside the window
+  mid <- midpoints.psp(chopped)
+  ins <- inside.owin(mid$x, mid$y, window)
+  retained <- chopped[ins]
+  retained$window <- window
+  return(retained)
+}
+
+
diff --git a/R/close3Dpairs.R b/R/close3Dpairs.R
new file mode 100644
index 0000000..70a2165
--- /dev/null
+++ b/R/close3Dpairs.R
@@ -0,0 +1,213 @@
+#
+# close3Dpairs.R
+#
+#   $Revision: 1.9 $   $Date: 2017/06/05 10:31:58 $
+#
+#  extract the r-close pairs from a 3D dataset
+# 
+#
+closepairs.pp3 <- local({
+
+  closepairs.pp3 <- function(X, rmax, twice=TRUE,
+                             what=c("all", "indices"),
+                             distinct=TRUE, neat=TRUE, ...) {
+    verifyclass(X, "pp3")
+    what <- match.arg(what)
+    stopifnot(is.numeric(rmax) && length(rmax) == 1L)
+    stopifnot(is.finite(rmax))
+    stopifnot(rmax >= 0)
+    ordered <- list(...)$ordered
+    if(missing(twice) && !is.null(ordered)) {
+      warning("Obsolete argument 'ordered' has been replaced by 'twice'")
+      twice <- ordered
+    }
+    npts <- npoints(X)
+    nama <- switch(what,
+                   all = c("i", "j",
+                           "xi", "yi", "zi",
+                           "xj", "yj", "zj",
+                           "dx", "dy", "dz",
+                           "d"),
+                   indices = c("i", "j"))
+    names(nama) <- nama
+    if(npts == 0) {
+      null.answer <- lapply(nama, nuttink)
+      return(null.answer)
+    }
+    ## sort points by increasing x coordinate
+    oo <- fave.order(coords(X)$x)
+    Xsort <- X[oo]
+    ## First make an OVERESTIMATE of the number of pairs
+    nsize <- ceiling(5 * pi * (npts^2) * (rmax^3)/volume(as.box3(X)))
+    nsize <- max(1024, nsize)
+    if(nsize > .Machine$integer.max) {
+      warning(
+        "Estimated number of close pairs exceeds maximum possible integer",
+        call.=FALSE)
+      nsize <- .Machine$integer.max
+    }
+    ## Now extract pairs
+    XsortC <- coords(Xsort)
+    x <- XsortC$x
+    y <- XsortC$y
+    z <- XsortC$z
+    r <- rmax
+    ng <- nsize
+    storage.mode(x) <- "double"
+    storage.mode(y) <- "double"
+    storage.mode(z) <- "double"
+    storage.mode(r) <- "double"
+    storage.mode(ng) <- "integer"
+    ## go
+    a <- switch(what,
+                all = {
+                  .Call("close3pairs",
+                        xx=x, yy=y, zz=z, rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+                },
+                indices = {
+                  .Call("close3IJpairs",
+                        xx=x, yy=y, zz=z, rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+                })
+    names(a) <- nama
+    ## convert i,j indices to original sequence
+    a$i <- oo[a$i]
+    a$j <- oo[a$j]
+    ## handle options
+    if(twice) {
+      ## both (i, j) and (j, i) should be returned
+      a <- as.data.frame(a)
+      a <- as.list(rbind(a, swapdata(a, what)))
+    } else if(neat) {
+      ## enforce i < j
+      swap <- with(a, (j < i))
+      if(any(swap)) {
+        a <- as.data.frame(a)
+        a[swap,] <- swapdata(a[swap, ,drop=FALSE], what)
+        a <- as.list(a)
+      }
+    }
+    ## add pairs of identical points?
+    if(!distinct) {
+      ii <- seq_len(npts)
+      xtra <- data.frame(i = ii, j=ii)
+      if(what == "all") {
+        coo <- coords(X)[, c("x","y","z")]
+        zeroes <- rep(0, npts)
+        xtra <- cbind(xtra, coo, coo, zeroes, zeroes, zeroes, zeroes)
+      }
+      a <- as.list(rbind(as.data.frame(a), xtra))
+    }
+    ## done
+    return(a)
+  }
+
+  swapdata <- function(a, what) {
+    switch(what,
+           all = {
+             with(a, data.frame(i  =  j,
+                                j  =  i,
+                                xi =  xj,
+                                yi =  yj,
+                                zi =  zj,
+                                xj =  xi,
+                                yj =  yi,
+                                zj =  zi,
+                                dx = -dx,
+                                dy = -dy,
+                                dz = -dz,
+                                d  =  d))
+           },
+           indices = {
+             with(a, data.frame(i=j,
+                                j=i))
+           })
+  }
+  
+  nuttink <- function(x) numeric(0)
+
+  closepairs.pp3
+})
+
+#######################
+
+crosspairs.pp3 <- local({
+
+  crosspairs.pp3 <- function(X, Y, rmax, what=c("all", "indices"), ...) {
+    verifyclass(X, "pp3")
+    verifyclass(Y, "pp3")
+    what <- match.arg(what)
+    stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0)
+    nama <- switch(what,
+                   all = c("i", "j",
+                           "xi", "yi", "zi",
+                           "xj", "yj", "zj",
+                           "dx", "dy", "dz",
+                           "d"),
+                   indices = c("i", "j"))
+    names(nama) <- nama
+    nX <- npoints(X)
+    nY <- npoints(Y)
+    if(nX == 0 || nY == 0) {
+      null.answer <- lapply(nama, nuttink)
+      return(null.answer)
+    }
+    ## order patterns by increasing x coordinate
+    ooX <- fave.order(coords(X)$x)
+    Xsort <- X[ooX]
+    ooY <- fave.order(coords(Y)$x)
+    Ysort <- Y[ooY]
+    ## First (over)estimate the number of pairs
+    nsize <- ceiling(3 * pi * (rmax^3) * nX * nY/volume(as.box3(Y)))
+    nsize <- max(1024, nsize)
+    if(nsize > .Machine$integer.max) {
+      warning(
+        "Estimated number of close pairs exceeds maximum possible integer",
+        call.=FALSE)
+      nsize <- .Machine$integer.max
+    }
+    ## .Call
+    XsortC <- coords(Xsort)
+    YsortC <- coords(Ysort)
+    Xx <- XsortC$x
+    Xy <- XsortC$y
+    Xz <- XsortC$z
+    Yx <- YsortC$x
+    Yy <- YsortC$y
+    Yz <- YsortC$z
+    r <- rmax
+    ng <- nsize
+    storage.mode(Xx) <- storage.mode(Xy) <- storage.mode(Xz) <- "double"
+    storage.mode(Yx) <- storage.mode(Yy) <- storage.mode(Yz) <- "double"
+    storage.mode(r) <- "double"
+    storage.mode(ng) <- "integer"
+    ## go
+    a <- switch(what,
+                all = {
+                  .Call("cross3pairs",
+                        xx1=Xx, yy1=Xy, zz1=Xz,
+                        xx2=Yx, yy2=Yy, zz2=Yz,
+                        rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+                },
+                indices = {
+                  .Call("cross3IJpairs",
+                        xx1=Xx, yy1=Xy, zz1=Xz,
+                        xx2=Yx, yy2=Yy, zz2=Yz,
+                        rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+                })
+    names(a) <- nama
+    ## convert i,j indices to original sequence
+    a$i <- ooX[a$i]
+    a$j <- ooY[a$j]
+    return(a)
+  }
+
+  nuttink <- function(x) numeric(0)
+  
+  crosspairs.pp3
+})
+
+
diff --git a/R/closepairs.R b/R/closepairs.R
new file mode 100755
index 0000000..0d7ea5c
--- /dev/null
+++ b/R/closepairs.R
@@ -0,0 +1,612 @@
+#
+# closepairs.R
+#
+#   $Revision: 1.39 $   $Date: 2017/06/05 10:31:58 $
+#
+#  simply extract the r-close pairs from a dataset
+# 
+#  Less memory-hungry for large patterns
+#
+
+closepairs <- function(X, rmax, ...) {
+  UseMethod("closepairs")
+}
+  
+closepairs.ppp <- function(X, rmax, twice=TRUE,
+                           what=c("all", "indices", "ijd"),
+                           distinct=TRUE, neat=TRUE, 
+                           ...) {
+  verifyclass(X, "ppp")
+  what <- match.arg(what)
+  stopifnot(is.numeric(rmax) && length(rmax) == 1L)
+  stopifnot(is.finite(rmax))
+  stopifnot(rmax >= 0)
+  ordered <- list(...)$ordered
+  if(missing(twice) && !is.null(ordered)) {
+    warning("Obsolete argument 'ordered' has been replaced by 'twice'")
+    twice <- ordered
+  }
+  npts <- npoints(X)
+  null.answer <- switch(what,
+                        all = {
+                          list(i=integer(0),
+                               j=integer(0),
+                               xi=numeric(0),
+                               yi=numeric(0),
+                               xj=numeric(0),
+                               yj=numeric(0),
+                               dx=numeric(0),
+                               dy=numeric(0),
+                               d=numeric(0))
+                        },
+                        indices = {
+                          list(i=integer(0),
+                               j=integer(0))
+                        },
+                        ijd = {
+                          list(i=integer(0),
+                               j=integer(0),
+                               d=numeric(0))
+                        })
+  if(npts == 0)
+    return(null.answer)
+  # sort points by increasing x coordinate
+  oo <- fave.order(X$x)
+  Xsort <- X[oo]
+  # First make an OVERESTIMATE of the number of unordered pairs
+  nsize <- ceiling(2 * pi * (npts^2) * (rmax^2)/area(Window(X)))
+  nsize <- max(1024, nsize)
+  if(nsize > .Machine$integer.max) {
+    warning("Estimated number of close pairs exceeds maximum possible integer",
+            call.=FALSE)
+    nsize <- .Machine$integer.max
+  }
+  # Now extract pairs
+  if(spatstat.options("closepairs.newcode")) {
+    # ------------------- use new faster code ---------------------
+    # fast algorithms collect each distinct pair only once
+    got.twice <- FALSE
+    ng <- nsize
+    #
+    x <- Xsort$x
+    y <- Xsort$y
+    r <- rmax
+    storage.mode(x) <- "double"
+    storage.mode(y) <- "double"
+    storage.mode(r) <- "double"
+    storage.mode(ng) <- "integer"
+    switch(what,
+           all = {
+             z <- .Call("Vclosepairs",
+                        xx=x, yy=y, rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 9)
+               stop("Internal error: incorrect format returned from Vclosepairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+             xi <- z[[3L]]
+             yi <- z[[4L]]
+             xj <- z[[5L]]
+             yj <- z[[6L]]
+             dx <- z[[7L]]
+             dy <- z[[8L]]
+             d  <- z[[9L]]
+           },
+           indices = {
+             z <- .Call("VcloseIJpairs",
+                        xx=x, yy=y, rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 2)
+               stop("Internal error: incorrect format returned from VcloseIJpairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+           },
+           ijd = {
+             z <- .Call("VcloseIJDpairs",
+                        xx=x, yy=y, rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 3)
+               stop("Internal error: incorrect format returned from VcloseIJDpairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+             d  <- z[[3L]]
+           })
+
+  } else {
+    # ------------------- use older code --------------------------
+    if(!distinct) {
+      ii <- seq_len(npts)
+      xx <- X$x
+      yy <- X$y
+      zeroes <- rep(0, npts)
+      null.answer <- switch(what,
+                            all = {
+                              list(i=ii,
+                                   j=ii,
+                                   xi=xx,
+                                   yi=yy,
+                                   xj=xx,
+                                   yj=yy,
+                                   dx=zeroes,
+                                   dy=zeroes,
+                                   d=zeroes)
+                            },
+                            indices = {
+                              list(i=ii,
+                                   j=ii)
+                            },
+                            ijd = {
+                              list(i=ii,
+                                   j=ii,
+                                   d=zeroes)
+                            })
+    }
+
+    got.twice <- TRUE
+    nsize <- nsize * 2
+    z <-
+      .C("Fclosepairs",
+         nxy=as.integer(npts),
+         x=as.double(Xsort$x),
+         y=as.double(Xsort$y),
+         r=as.double(rmax),
+         noutmax=as.integer(nsize), 
+         nout=as.integer(integer(1L)),
+         iout=as.integer(integer(nsize)),
+         jout=as.integer(integer(nsize)), 
+         xiout=as.double(numeric(nsize)),
+         yiout=as.double(numeric(nsize)),
+         xjout=as.double(numeric(nsize)),
+         yjout=as.double(numeric(nsize)),
+         dxout=as.double(numeric(nsize)),
+         dyout=as.double(numeric(nsize)),
+         dout=as.double(numeric(nsize)),
+         status=as.integer(integer(1L)),
+         PACKAGE = "spatstat")
+
+    if(z$status != 0) {
+      # Guess was insufficient
+      # Obtain an OVERCOUNT of the number of pairs
+      # (to work around gcc bug #323)
+      rmaxplus <- 1.25 * rmax
+      nsize <- .C("paircount",
+                  nxy=as.integer(npts),
+                  x=as.double(Xsort$x),
+                  y=as.double(Xsort$y),
+                  rmaxi=as.double(rmaxplus),
+                  count=as.integer(integer(1L)),
+                  PACKAGE = "spatstat")$count
+      if(nsize <= 0)
+        return(null.answer)
+      # add a bit more for safety
+      nsize <- ceiling(1.1 * nsize) + 2 * npts
+      # now extract points
+      z <-
+        .C("Fclosepairs",
+           nxy=as.integer(npts),
+           x=as.double(Xsort$x),
+           y=as.double(Xsort$y),
+           r=as.double(rmax),
+           noutmax=as.integer(nsize), 
+           nout=as.integer(integer(1L)),
+           iout=as.integer(integer(nsize)),
+           jout=as.integer(integer(nsize)), 
+           xiout=as.double(numeric(nsize)),
+           yiout=as.double(numeric(nsize)),
+           xjout=as.double(numeric(nsize)),
+           yjout=as.double(numeric(nsize)),
+           dxout=as.double(numeric(nsize)),
+           dyout=as.double(numeric(nsize)),
+           dout=as.double(numeric(nsize)),
+           status=as.integer(integer(1L)),
+           PACKAGE = "spatstat")
+      if(z$status != 0)
+        stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize))
+    }
+  # trim vectors to the length indicated
+    npairs <- z$nout
+    if(npairs <= 0)
+      return(null.answer)
+    actual <- seq_len(npairs)
+    i  <- z$iout[actual]  # sic
+    j  <- z$jout[actual]
+    switch(what,
+           indices={},
+           all={
+             xi <- z$xiout[actual]
+             yi <- z$yiout[actual]
+             xj <- z$xjout[actual]
+             yj <- z$yjout[actual]
+             dx <- z$dxout[actual]
+             dy <- z$dyout[actual]
+             d <-  z$dout[actual]
+           },
+           ijd = {
+             d <- z$dout[actual]
+           })
+    # ------------------- end code switch ------------------------
+  }
+  
+  # convert i,j indices to original sequence
+  i <- oo[i]
+  j <- oo[j]
+  if(twice) {
+    ## both (i, j) and (j, i) should be returned
+    if(!got.twice) {
+      ## duplication required
+      iold <- i
+      jold <- j
+      i <- c(iold, jold)
+      j <- c(jold, iold)
+      switch(what,
+             indices = { },
+             ijd = {
+               d <- rep(d, 2)
+             },
+             all = {
+               xinew <- c(xi, xj)
+               yinew <- c(yi, yj)
+               xjnew <- c(xj, xi)
+               yjnew <- c(yj, yi)
+               xi <- xinew
+               yi <- yinew
+               xj <- xjnew
+               yj <- yjnew
+               dx <- c(dx, -dx)
+               dy <- c(dy, -dy)
+               d <- rep(d, 2)
+             })
+    }
+  } else {
+    ## only one of (i, j) and (j, i) should be returned
+    if(got.twice) {
+      ## remove duplication
+      ok <- (i < j)
+      i  <-  i[ok]
+      j  <-  j[ok]
+      switch(what,
+             indices = { },
+             all = {
+               xi <- xi[ok]
+               yi <- yi[ok]
+               xj <- xj[ok]
+               yj <- yj[ok]
+               dx <- dx[ok]
+               dy <- dy[ok]
+               d  <-  d[ok]
+             },
+             ijd = {
+               d  <-  d[ok]
+             })
+    } else if(neat) {
+      ## enforce i < j
+      swap <- (i > j)
+      tmp <- i[swap]
+      i[swap] <- j[swap]
+      j[swap] <- tmp
+      if(what == "all") {
+        xinew <- ifelse(swap, xj, xi)
+        yinew <- ifelse(swap, yj, yi)
+        xjnew <- ifelse(swap, xi, xj)
+        yjnew <- ifelse(swap, yi, yj)
+        xi <- xinew
+        yi <- yinew
+        xj <- xjnew
+        yj <- yjnew
+        dx[swap] <- -dx[swap]
+        dy[swap] <- -dy[swap]
+      }
+    } ## otherwise no action required
+  }
+  ## add pairs of identical points?
+  if(!distinct) {
+    ii <- seq_len(npts)
+    xx <- X$x
+    yy <- X$y
+    zeroes <- rep(0, npts)
+    i <- c(i, ii)
+    j <- c(j, ii)
+    switch(what,
+           ijd={
+             d  <- c(d, zeroes)
+           },
+           all = {
+             xi <- c(xi, xx)
+             yi <- c(yi, yy)
+             xj <- c(xj, xx)
+             yi <- c(yi, yy)
+             dx <- c(dx, zeroes)
+             dy <- c(dy, zeroes)
+             d  <- c(d, zeroes)
+           })
+  }
+  ## done
+  switch(what,
+         all = {
+           answer <- list(i=i,
+                          j=j,
+                          xi=xi, 
+                          yi=yi,
+                          xj=xj,
+                          yj=yj,
+                          dx=dx,
+                          dy=dy,
+                          d=d)
+         },
+         indices = {
+           answer <- list(i = i, j = j)
+         },
+         ijd = {
+           answer <- list(i=i, j=j, d=d)
+         })
+  return(answer)
+}
+
+#######################
+
+crosspairs <- function(X, Y, rmax, ...) {
+  UseMethod("crosspairs")
+}
+
+crosspairs.ppp <- function(X, Y, rmax, what=c("all", "indices", "ijd"), ...) {
+  verifyclass(X, "ppp")
+  verifyclass(Y, "ppp")
+  what <- match.arg(what)
+  stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0)
+  null.answer <- switch(what,
+                        all = {
+                          list(i=integer(0),
+                               j=integer(0),
+                               xi=numeric(0),
+                               yi=numeric(0),
+                               xj=numeric(0),
+                               yj=numeric(0),
+                               dx=numeric(0),
+                               dy=numeric(0),
+                               d=numeric(0))
+                        },
+                        indices = {
+                          list(i=integer(0),
+                               j=integer(0))
+                        },
+                        ijd = {
+                          list(i=integer(0),
+                               j=integer(0),
+                               d=numeric(0))
+                        })
+  nX <- npoints(X)
+  nY <- npoints(Y)
+  if(nX == 0 || nY == 0) return(null.answer)
+  # order patterns by increasing x coordinate
+  ooX <- fave.order(X$x)
+  Xsort <- X[ooX]
+  ooY <- fave.order(Y$x)
+  Ysort <- Y[ooY]
+  if(spatstat.options("crosspairs.newcode")) {
+    # ------------------- use new faster code ---------------------
+    # First (over)estimate the number of pairs
+    nsize <- ceiling(2 * pi * (rmax^2) * nX * nY/area(Window(Y)))
+    nsize <- max(1024, nsize)
+    if(nsize > .Machine$integer.max) {
+      warning(
+        "Estimated number of close pairs exceeds maximum possible integer",
+        call.=FALSE)
+      nsize <- .Machine$integer.max
+    }
+    # .Call
+    Xx <- Xsort$x
+    Xy <- Xsort$y
+    Yx <- Ysort$x
+    Yy <- Ysort$y
+    r <- rmax
+    ng <- nsize
+    storage.mode(Xx) <- storage.mode(Xy) <- "double"
+    storage.mode(Yx) <- storage.mode(Yy) <- "double"
+    storage.mode(r) <- "double"
+    storage.mode(ng) <- "integer"
+    switch(what,
+           all = {
+             z <- .Call("Vcrosspairs",
+                        xx1=Xx, yy1=Xy,
+                        xx2=Yx, yy2=Yy,
+                        rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 9)
+               stop("Internal error: incorrect format returned from Vcrosspairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+             xi <- z[[3L]]
+             yi <- z[[4L]]
+             xj <- z[[5L]]
+             yj <- z[[6L]]
+             dx <- z[[7L]]
+             dy <- z[[8L]]
+             d  <- z[[9L]]
+           },
+           indices = {
+             z <- .Call("VcrossIJpairs",
+                        xx1=Xx, yy1=Xy,
+                        xx2=Yx, yy2=Yy,
+                        rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 2)
+               stop("Internal error: incorrect format returned from VcrossIJpairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+           }, 
+           ijd = {
+             z <- .Call("VcrossIJDpairs",
+                        xx1=Xx, yy1=Xy,
+                        xx2=Yx, yy2=Yy,
+                        rr=r, nguess=ng,
+                        PACKAGE = "spatstat")
+             if(length(z) != 3)
+               stop("Internal error: incorrect format returned from VcrossIJDpairs")
+             i  <- z[[1L]]  # NB no increment required
+             j  <- z[[2L]]
+             d  <- z[[3L]]
+           })
+           
+  } else {
+    # Older code 
+    # obtain upper estimate of number of pairs
+    # (to work around gcc bug 323)
+    rmaxplus <- 1.25 * rmax
+    nsize <- .C("crosscount",
+                nn1=as.integer(X$n),
+                x1=as.double(Xsort$x),
+                y1=as.double(Xsort$y),
+                nn2=as.integer(Ysort$n),
+                x2=as.double(Ysort$x),
+                y2=as.double(Ysort$y),
+                rmaxi=as.double(rmaxplus),
+                count=as.integer(integer(1L)),
+                PACKAGE = "spatstat")$count
+    if(nsize <= 0)
+      return(null.answer)
+
+    # allow slightly more space to work around gcc bug #323
+    nsize <- ceiling(1.1 * nsize) + X$n + Y$n
+    
+    # now extract pairs
+    z <-
+      .C("Fcrosspairs",
+         nn1=as.integer(X$n),
+         x1=as.double(Xsort$x),
+         y1=as.double(Xsort$y),
+         nn2=as.integer(Y$n),
+         x2=as.double(Ysort$x),
+         y2=as.double(Ysort$y),
+         r=as.double(rmax),
+         noutmax=as.integer(nsize), 
+         nout=as.integer(integer(1L)),
+         iout=as.integer(integer(nsize)),
+         jout=as.integer(integer(nsize)), 
+         xiout=as.double(numeric(nsize)),
+         yiout=as.double(numeric(nsize)),
+         xjout=as.double(numeric(nsize)),
+         yjout=as.double(numeric(nsize)),
+         dxout=as.double(numeric(nsize)),
+         dyout=as.double(numeric(nsize)),
+         dout=as.double(numeric(nsize)),
+         status=as.integer(integer(1L)),
+         PACKAGE = "spatstat")
+    if(z$status != 0)
+      stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize))
+    # trim vectors to the length indicated
+    npairs <- z$nout
+    if(npairs <= 0)
+      return(null.answer)
+    actual <- seq_len(npairs)
+    i  <- z$iout[actual] # sic
+    j  <- z$jout[actual] 
+    xi <- z$xiout[actual]
+    yi <- z$yiout[actual]
+    xj <- z$xjout[actual]
+    yj <- z$yjout[actual]
+    dx <- z$dxout[actual]
+    dy <- z$dyout[actual]
+    d <-  z$dout[actual]
+  }
+  # convert i,j indices to original sequences
+  i <- ooX[i]
+  j <- ooY[j]
+  # done
+  switch(what,
+         all = {
+           answer <- list(i=i,
+                          j=j,
+                          xi=xi, 
+                          yi=yi,
+                          xj=xj,
+                          yj=yj,
+                          dx=dx,
+                          dy=dy,
+                          d=d)
+         },
+         indices = {
+           answer <- list(i=i, j=j)
+         },
+         ijd = {
+           answer <- list(i=i, j=j, d=d)
+         })
+  return(answer)
+}
+
+closethresh <- function(X, R, S, twice=TRUE, ...) {
+  # list all R-close pairs
+  # and indicate which of them are S-close (S < R)
+  # so that results are consistent with closepairs(X,S)
+  verifyclass(X, "ppp")
+  stopifnot(is.numeric(R) && length(R) == 1L && R >= 0)
+  stopifnot(is.numeric(S) && length(S) == 1L && S >= 0)
+  stopifnot(S < R)
+  ordered <- list(...)$ordered
+  if(missing(twice) && !is.null(ordered)) {
+    warning("Obsolete argument 'ordered' has been replaced by 'twice'")
+    twice <- ordered
+  }
+  npts <- npoints(X)
+   if(npts == 0)
+     return(list(i=integer(0), j=integer(0), t=logical(0)))
+  # sort points by increasing x coordinate
+  oo <- fave.order(X$x)
+  Xsort <- X[oo]
+  # First make an OVERESTIMATE of the number of pairs
+  nsize <- ceiling(4 * pi * (npts^2) * (R^2)/area(Window(X)))
+  nsize <- max(1024, nsize)
+  if(nsize > .Machine$integer.max) {
+    warning("Estimated number of close pairs exceeds maximum possible integer",
+            call.=FALSE)
+    nsize <- .Machine$integer.max
+  }
+  # Now extract pairs
+  x <- Xsort$x
+  y <- Xsort$y
+  r <- R
+  s <- S
+  ng <- nsize
+  storage.mode(x) <- "double"
+  storage.mode(y) <- "double"
+  storage.mode(r) <- "double"
+  storage.mode(s) <- "double"
+  storage.mode(ng) <- "integer"
+  z <- .Call("Vclosethresh",
+             xx=x, yy=y, rr=r, ss=s, nguess=ng,
+             PACKAGE = "spatstat")
+  if(length(z) != 3)
+    stop("Internal error: incorrect format returned from Vclosethresh")
+  i  <- z[[1L]]  # NB no increment required
+  j  <- z[[2L]]
+  th <- as.logical(z[[3L]])
+  
+  # convert i,j indices to original sequence
+  i <- oo[i]
+  j <- oo[j]
+  # fast C code only returns i < j
+  if(twice) {
+    iold <- i
+    jold <- j
+    i <- c(iold, jold)
+    j <- c(jold, iold)
+    th <- rep(th, 2)
+  }
+  # done
+  return(list(i=i, j=j, th=th))
+}
+
+crosspairquad <- function(Q, rmax, what=c("all", "indices")) {
+  # find all close pairs X[i], U[j]
+  stopifnot(inherits(Q, "quad"))
+  what <- match.arg(what)
+  X <- Q$data
+  D <- Q$dummy
+  clX <- closepairs(X=X, rmax=rmax, what=what)
+  clXD <- crosspairs(X=X, Y=D, rmax=rmax, what=what)
+  # convert all indices to serial numbers in union.quad(Q)
+  # assumes data are listed first
+  clXD$j <- npoints(X) + clXD$j
+  result <- list(rbind(as.data.frame(clX), as.data.frame(clXD)))
+  return(result)
+}
+
diff --git a/R/clusterfunctions.R b/R/clusterfunctions.R
new file mode 100644
index 0000000..61f1c46
--- /dev/null
+++ b/R/clusterfunctions.R
@@ -0,0 +1,101 @@
+## clusterfunctions.R
+##
+## Contains the generic functions:
+##  - clusterkernel
+##  - clusterfield
+##  - clusterradius.
+##
+##   $Revision: 1.3 $  $Date: 2015/02/23 00:21:39 $
+##
+
+clusterkernel <- function(model, ...) {
+  UseMethod("clusterkernel")
+}
+
+clusterkernel.kppm <- function(model, ...) {
+  kernelR <- Kpcf.kppm(model, what = "kernel")
+  f <- function(x, y = 0, ...){
+    kernelR(sqrt(x^2+y^2))
+  }
+  return(f)
+}
+
+clusterkernel.character <- function(model, ...){
+  info <- spatstatClusterModelInfo(model, onlyPCP = TRUE)
+  internalkernel <- info$kernel
+  dots <- list(...)
+  par <- c(kappa = 1, scale = dots$scale)
+  par <- info$checkpar(par, old = TRUE)
+  nam <- info$clustargsnames
+  margs <- NULL
+  if(!is.null(nam))
+    margs <- dots[nam]
+  f <- function(x, y = 0, ...){
+    internalkernel(par = par, rvals = sqrt(x^2+y^2), margs = margs)
+  }
+  return(f)
+}
+
+clusterfield <- function(model, locations = NULL, ...) {
+    UseMethod("clusterfield")
+}
+
+clusterfield.kppm <- function(model, locations = NULL, ...) {
+    f <- clusterkernel(model)
+    if(is.null(locations)){
+        if(!is.stationary(model))
+            stop("The model is non-stationary. The argument ",
+                 sQuote("locations"), " must be given.")
+        locations <- centroid.owin(Window(model), as.ppp = TRUE)
+    }
+    clusterfield.function(f, locations, ..., mu = model$mu)
+}
+
+clusterfield.character <- function(model, locations = NULL, ...){
+    f <- clusterkernel(model, ...)
+    clusterfield.function(f, locations, ...)
+}
+
+clusterfield.function <- function(model, locations = NULL, ..., mu = NULL) {
+    if(is.null(locations)){
+        locations <- ppp(.5, .5, window=square(1))
+    }
+    if(!inherits(locations, "ppp"))
+        stop("Argument ", sQuote("locations"), " must be a point pattern (ppp).")
+
+    if("sigma" %in% names(list(...)) && "sigma" %in% names(formals(model)))
+        warning("Currently ", sQuote("sigma"),
+                "cannot be passed as an extra argument to the kernel function. ",
+                "Please redefine the kernel function to use another argument name.")
+
+    rslt <- density(locations, kernel=model, ...)
+    if(is.null(mu))
+        return(rslt)
+    mu <- as.im(mu, W=rslt)
+    if(min(mu)<0)
+        stop("Cluster reference intensity ", sQuote("mu"), " is negative.")
+    return(rslt*mu)
+}
+
+clusterradius <- function(model, ...){
+    UseMethod("clusterradius")
+}
+
+clusterradius.character <- function(model, ..., thresh = NULL, precision = FALSE){
+    info <- spatstatClusterModelInfo(model, onlyPCP = TRUE)
+    rmax <- info$range(..., thresh = thresh)
+    if(precision){
+        ddist <- function(r) info$ddist(r, ...)
+        prec <- integrate(ddist, 0, rmax)
+        attr(rmax, "prec") <- prec
+    }
+    return(rmax)
+}
+
+clusterradius.kppm <- function(model, ..., thresh = NULL, precision = FALSE){
+    a <- list(model = model$clusters,
+              thresh = thresh,
+              precision = precision)
+    a <- append(a, as.list(c(model$clustpar, model$clustargs)))
+    do.call(clusterradius.character, a)
+}
diff --git a/R/clusterinfo.R b/R/clusterinfo.R
new file mode 100644
index 0000000..66a698e
--- /dev/null
+++ b/R/clusterinfo.R
@@ -0,0 +1,695 @@
+## lookup table of explicitly-known K functions and pcf
+## and algorithms for computing sensible starting parameters
+
+.Spatstat.ClusterModelInfoTable <- 
+  list(
+       Thomas=list(
+         ## Thomas process: old par = (kappa, sigma2) (internally used everywhere)
+         ## Thomas process: new par = (kappa, scale) (officially recommended for input/output)
+         modelname = "Thomas process", # In modelname field of mincon fv obj.
+         descname = "Thomas process", # In desc field of mincon fv obj.
+         modelabbrev = "Thomas process", # In fitted obj.
+         printmodelname = function(...) "Thomas process", # Used by print.kppm
+         parnames = c("kappa", "sigma2"),
+         clustargsnames = NULL,
+         checkpar = function(par, old = TRUE){
+             if(is.null(par))
+                 par <- c(kappa=1,scale=1)
+             if(any(par<=0))
+                 stop("par values must be positive.")
+             nam <- check.named.vector(par, c("kappa","sigma2"),
+                                       onError="null")
+             if(is.null(nam)) {
+               check.named.vector(par, c("kappa","scale"))
+               names(par)[2L] <- "sigma2"
+               par[2L] <- par[2L]^2
+             }
+             if(!old){
+                 names(par)[2L] <- "scale"
+                 par[2L] <- sqrt(par[2L])
+             }
+             return(par)
+         },
+         checkclustargs = function(margs, old = TRUE) list(),
+         resolvedots = function(...){
+             ## resolve dots for kppm and friends allowing for old/new par syntax
+             dots <- list(...)
+             nam <- names(dots)
+             out <- list()
+             if("ctrl" %in% nam){
+                 out$ctrl <- dots$ctrl
+             } else{
+                 out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")]
+             }
+             chk <- .Spatstat.ClusterModelInfoTable$Thomas$checkpar
+             if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar)
+             return(out)
+         },
+         # density function for the distance to offspring
+         ddist = function(r, scale, ...) {
+             2 * pi * r * dnorm(r, 0, scale)/sqrt(2*pi*scale^2)
+         },
+         ## Practical range of clusters
+         range = function(...){
+             dots <- list(...)
+             par <- dots$par
+             # Choose the first of the possible supplied values for scale:
+             scale <- c(dots$scale, dots$par[["scale"]], dots$sigma, dots$par[["sigma"]])[1L]
+             if(is.null(scale))
+                 stop("Argument ", sQuote("scale"), " must be given.")
+             thresh <- dots$thresh
+             if(!is.null(thresh)){
+               ## The squared length of isotropic Gaussian (sigma)
+               ## is exponential with mean 2 sigma^2
+               rmax <- scale * sqrt(2 * qexp(thresh, lower.tail=FALSE))
+               ## old code
+               ##  ddist <- .Spatstat.ClusterModelInfoTable$Thomas$ddist
+               ##  kernel0 <- clusterkernel("Thomas", scale = scale)(0,0)
+               ##  f <- function(r) ddist(r, scale = scale)-thresh*kernel0
+               ##  rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root
+             } else{
+                 rmax <- 4*scale
+             }
+             return(rmax)
+         },
+         kernel = function(par, rvals, ...) {
+             scale <- sqrt(par[2L])
+             dnorm(rvals, 0, scale)/sqrt(2*pi*scale^2)
+         },
+         isPCP=TRUE,
+         ## K-function
+         K = function(par,rvals, ...){
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           pi*rvals^2+(1-exp(-rvals^2/(4*par[2L])))/par[1L]
+         },
+         ## pair correlation function
+         pcf= function(par,rvals, ...){
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           1 + exp(-rvals^2/(4 * par[2L]))/(4 * pi * par[1L] * par[2L])
+         },
+         ## sensible starting parameters
+         selfstart = function(X) {
+           kappa <- intensity(X)
+           sigma2 <- 4 * mean(nndist(X))^2
+           c(kappa=kappa, sigma2=sigma2)
+         },
+         ## meaningful model parameters
+         interpret = function(par, lambda) {
+           kappa <- par[["kappa"]]
+           sigma <- sqrt(par[["sigma2"]])
+           mu <- if(is.numeric(lambda) && length(lambda) == 1)
+             lambda/kappa else NA
+           c(kappa=kappa, sigma=sigma, mu=mu)
+         },
+         ## Experimental: convert to/from canonical cluster parameters
+         tocanonical = function(par) {
+           kappa <- par[[1L]]
+           sigma2 <- par[[2L]]
+           c(strength=1/(kappa * sigma2), scale=sqrt(sigma2))
+         },
+         tohuman = function(can) {
+           strength <- can[[1L]]
+           scale <- can[[2L]]
+           sigma2 <- scale^2
+           c(kappa=1/(strength * sigma2), sigma2=sigma2)
+         }
+         ),
+       ## ...............................................
+       MatClust=list(
+         ## Matern Cluster process: old par = (kappa, R) (internally used everywhere)
+         ## Matern Cluster process: new par = (kappa, scale) (officially recommended for input/output)
+         modelname = "Matern cluster process", # In modelname field of mincon fv obj.
+         descname = "Matern cluster process", # In desc field of mincon fv obj.
+         modelabbrev = "Matern cluster process", # In fitted obj.
+         printmodelname = function(...) "Matern cluster process", # Used by print.kppm
+         parnames = c("kappa", "R"),
+         clustargsnames = NULL,
+         checkpar = function(par, old = TRUE){
+             if(is.null(par))
+                 par <- c(kappa=1,scale=1)
+             if(any(par<=0))
+                 stop("par values must be positive.")
+             nam <- check.named.vector(par, c("kappa","R"), onError="null")
+             if(is.null(nam)) {
+               check.named.vector(par, c("kappa","scale"))
+               names(par)[2L] <- "R"
+             }
+             if(!old){
+                 names(par)[2L] <- "scale"
+             }
+             return(par)
+         },
+         # density function for the distance to offspring
+         ddist = function(r, scale, ...) {
+             ifelse(r>scale, 0, 2 * r / scale^2)
+         },
+         ## Practical range of clusters
+         range = function(...){
+             dots <- list(...)
+             par <- dots$par
+             # Choose the first of the possible supplied values for scale:
+             scale <- c(dots$scale, dots$par[["scale"]], dots$R, dots$par[["R"]])[1L]
+             if(is.null(scale))
+                 stop("Argument ", sQuote("scale"), " must be given.")
+           if(!is.null(dots$thresh))
+               warning("Argument ", sQuote("thresh"), " is ignored for Matern Cluster model")
+             return(scale)
+         },
+         checkclustargs = function(margs, old = TRUE) list(),
+         resolvedots = function(...){
+             ## resolve dots for kppm and friends allowing for old/new par syntax
+             dots <- list(...)
+             nam <- names(dots)
+             out <- list()
+             if("ctrl" %in% nam){
+                 out$ctrl <- dots$ctrl
+             } else{
+                 out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")]
+             }
+             chk <- .Spatstat.ClusterModelInfoTable$MatClust$checkpar
+             if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar)
+             return(out)
+         },
+         kernel = function(par, rvals, ...) {
+             scale <- par[2L]
+             ifelse(rvals>scale, 0, 1/(pi*scale^2))
+         },
+         isPCP=TRUE,
+         K = function(par,rvals, ..., funaux){
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           kappa <- par[1L]
+           R <- par[2L]
+           Hfun <- funaux$Hfun
+           y <- pi * rvals^2 + (1/kappa) * Hfun(rvals/(2 * R))
+           return(y)
+         },
+         pcf= function(par,rvals, ..., funaux){
+             if(any(par <= 0))
+               return(rep.int(Inf, length(rvals)))
+             kappa <- par[1L]
+             R <- par[2L]
+             g <- funaux$g
+             y <- 1 + (1/(pi * kappa * R^2)) * g(rvals/(2 * R))
+             return(y)
+           },
+         funaux=list(
+           Hfun=function(zz) {
+             ok <- (zz < 1)
+             h <- numeric(length(zz))
+             h[!ok] <- 1
+             z <- zz[ok]
+             h[ok] <- 2 + (1/pi) * (
+                                    (8 * z^2 - 4) * acos(z)
+                                    - 2 * asin(z)
+                                    + 4 * z * sqrt((1 - z^2)^3)
+                                    - 6 * z * sqrt(1 - z^2)
+                                    )
+             return(h)
+           },
+           DOH=function(zz) {
+             ok <- (zz < 1)
+             h <- numeric(length(zz))
+             h[!ok] <- 0
+             z <- zz[ok]
+             h[ok] <- (16/pi) * (z * acos(z) - (z^2) * sqrt(1 - z^2))
+             return(h)
+           },
+           ## g(z) = DOH(z)/z has a limit at z=0.
+           g=function(zz) {
+             ok <- (zz < 1)
+             h <- numeric(length(zz))
+             h[!ok] <- 0
+             z <- zz[ok]
+             h[ok] <- (2/pi) * (acos(z) - z * sqrt(1 - z^2))
+             return(h)
+           }),
+         ## sensible starting paramters
+         selfstart = function(X) {
+           kappa <- intensity(X)
+           R <- 2 * mean(nndist(X)) 
+           c(kappa=kappa, R=R)
+         },
+         ## meaningful model parameters
+         interpret = function(par, lambda) {
+           kappa <- par[["kappa"]]
+           R     <- par[["R"]]
+           mu    <- if(is.numeric(lambda) && length(lambda) == 1)
+             lambda/kappa else NA           
+           c(kappa=kappa, R=R, mu=mu)
+         }
+         ),
+       ## ...............................................
+       Cauchy=list(
+         ## Neyman-Scott with Cauchy clusters: old par = (kappa, eta2) (internally used everywhere)
+         ## Neyman-Scott with Cauchy clusters: new par = (kappa, scale) (officially recommended for input/output)
+         modelname = "Neyman-Scott process with Cauchy kernel", # In modelname field of mincon fv obj.
+         descname = "Neyman-Scott process with Cauchy kernel", # In desc field of mincon fv obj.
+         modelabbrev = "Cauchy process", # In fitted obj.
+         printmodelname = function(...) "Cauchy process", # Used by print.kppm
+         parnames = c("kappa", "eta2"),
+         clustargsnames = NULL,
+         checkpar = function(par, old = TRUE){
+             if(is.null(par))
+                 par <- c(kappa=1,scale=1)
+             if(any(par<=0))
+                 stop("par values must be positive.")
+             nam <- check.named.vector(par, c("kappa","eta2"), onError="null")
+             if(is.null(nam)) {
+                 check.named.vector(par, c("kappa","scale"))
+                 names(par)[2L] <- "eta2"
+                 par[2L] <- (2*par[2L])^2
+             }
+             if(!old){
+                 names(par)[2L] <- "scale"
+                 par[2L] <- sqrt(par[2L])/2
+             }
+             return(par)
+         },
+         checkclustargs = function(margs, old = TRUE) list(),
+         resolvedots = function(...){
+             ## resolve dots for kppm and friends allowing for old/new par syntax
+             dots <- list(...)
+             nam <- names(dots)
+             out <- list()
+             if("ctrl" %in% nam){
+                 out$ctrl <- dots$ctrl
+             } else{
+                 out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")]
+             }
+             chk <- .Spatstat.ClusterModelInfoTable$Cauchy$checkpar
+             if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar)
+             return(out)
+         },
+         # density function for the distance to offspring
+         ddist = function(r, scale, ...) {
+             r/(scale^2) *  (1 + (r / scale)^2)^(-3/2)
+         },
+         ## Practical range of clusters
+         range = function(...){
+             dots <- list(...)
+             # Choose the first of the possible supplied values for scale:
+             scale <- c(dots$scale, dots$par[["scale"]])[1L]
+             if(is.null(scale))
+                 stop("Argument ", sQuote("scale"), " must be given.")
+             thresh <- dots$thresh %orifnull% 0.01
+             ## integral of ddist(r) dr is 1 - (1+(r/scale)^2)^(-1/2)
+             ## solve for integral = 1-thresh:
+             rmax <- scale * sqrt(1/thresh^2 - 1)
+             ## old code
+             ## ddist <- .Spatstat.ClusterModelInfoTable$Cauchy$ddist
+             ## kernel0 <- clusterkernel("Cauchy", scale = scale)(0,0)
+             ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0
+             ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root
+             return(rmax)
+         },
+         kernel = function(par, rvals, ...) {
+             scale <- sqrt(par[2L])/2
+             1/(2*pi*scale^2)*((1 + (rvals/scale)^2)^(-3/2))
+         },
+         isPCP=TRUE,
+         K = function(par,rvals, ...){
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           pi*rvals^2 + (1 - 1/sqrt(1 + rvals^2/par[2L]))/par[1L]
+         },
+         pcf= function(par,rvals, ...){
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           1 + ((1 + rvals^2/par[2L])^(-1.5))/(2 * pi * par[2L] * par[1L])
+         },
+         selfstart = function(X) {
+           kappa <- intensity(X)
+           eta2 <- 4 * mean(nndist(X))^2
+           c(kappa = kappa, eta2 = eta2)
+         },
+         ## meaningful model parameters
+         interpret = function(par, lambda) {
+           kappa <- par[["kappa"]]
+           omega <- sqrt(par[["eta2"]])/2
+           mu <- if(is.numeric(lambda) && length(lambda) == 1)
+             lambda/kappa else NA
+           c(kappa=kappa, omega=omega, mu=mu)
+         }
+         ),
+       ## ...............................................
+       VarGamma=list(
+         ## Neyman-Scott with VarianceGamma/Bessel clusters: old par = (kappa, eta) (internally used everywhere)
+         ## Neyman-Scott with VarianceGamma/Bessel clusters: new par = (kappa, scale) (officially recommended for input/output)
+         modelname = "Neyman-Scott process with Variance Gamma kernel", # In modelname field of mincon fv obj.
+         descname = "Neyman-Scott process with Variance Gamma kernel", # In desc field of mincon fv obj.
+         modelabbrev = "Variance Gamma process", # In fitted obj.
+         printmodelname = function(obj){ # Used by print.kppm
+             paste0("Variance Gamma process (nu=",
+                    signif(obj$clustargs[["nu"]], 2), ")")
+         },
+         parnames = c("kappa", "eta"),
+         clustargsnames = "nu",
+         checkpar = function(par, old = TRUE){
+             if(is.null(par))
+                 par <- c(kappa=1,scale=1)
+             if(any(par<=0))
+                 stop("par values must be positive.")
+             nam <- check.named.vector(par, c("kappa","eta"), onError="null")
+             if(is.null(nam)) {
+               check.named.vector(par, c("kappa","scale"))
+               names(par)[2L] <- "eta"
+             }
+             if(!old) names(par)[2L] <- "scale"
+             return(par)
+         },
+         checkclustargs = function(margs, old = TRUE){
+             if(!old)
+                 margs <- list(nu=margs$nu.ker)
+             return(margs)
+         },
+         resolvedots = function(...){
+             ## resolve dots for kppm and friends allowing for old/new par syntax
+             dots <- list(...)
+             nam <- names(dots)
+             out <- list()
+             if("ctrl" %in% nam){
+                 out$ctrl <- dots$ctrl
+             } else{
+                 out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")]
+             }
+             chk <- .Spatstat.ClusterModelInfoTable$VarGamma$checkpar
+             if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar)
+             nu <- dots$nu
+             if(is.null(nu)){
+                 nu <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker,
+                           silent = TRUE)
+                 if(inherits(nu, "try-error"))
+                     nu <- -1/4
+             } else{
+                 check.1.real(nu)
+                 stopifnot(nu > -1/2)
+             }
+             out$margs <- list(nu.ker=nu, nu.pcf=2*nu+1)
+             out$covmodel <- list(type="Kernel", model="VarGamma", margs=out$margs)
+             return(out)
+         },
+         # density function for the distance to offspring
+         ddist = function(r, scale, nu, ...) {
+             numer <- ((r/scale)^(nu+1)) * besselK(r/scale, nu)
+             numer[r==0] <- 0
+             denom <- (2^nu) * scale * gamma(nu + 1)
+             numer/denom
+         },
+         ## Practical range of clusters
+         range = function(...){
+             dots <- list(...)
+             # Choose the first of the possible supplied values for scale:
+             scale <- c(dots$scale, dots$par[["scale"]])[1L]
+             if(is.null(scale))
+                 stop("Argument ", sQuote("scale"), " must be given.")
+             # Find value of nu:
+             extra <- .Spatstat.ClusterModelInfoTable$VarGamma$resolvedots(...)
+             nu <- .Spatstat.ClusterModelInfoTable$VarGamma$checkclustargs(extra$margs, old=FALSE)$nu
+             if(is.null(nu))
+                 stop("Argument ", sQuote("nu"), " must be given.")
+             thresh <- dots$thresh
+             if(is.null(thresh))
+                 thresh <- .001
+             ddist <- .Spatstat.ClusterModelInfoTable$VarGamma$ddist
+             f1 <- function(rmx) {
+               integrate(ddist, 0, rmx, scale=scale, nu=nu)$value - (1 - thresh)
+             }
+             f <- Vectorize(f1)
+             ## old code
+             ## kernel0 <- clusterkernel("VarGamma", scale = scale, nu = nu)(0,0)
+             ## f <- function(r) ddist(r, scale = scale, nu = nu) - thresh*kernel0
+             rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root
+             return(rmax)
+         },
+         ## kernel function in polar coordinates (no angular argument).
+         kernel = function(par, rvals, ..., margs) {
+             scale <- as.numeric(par[2L])
+             nu <- margs$nu
+             if(is.null(nu))
+                 stop("Argument ", sQuote("nu"), " is missing.")
+             numer <- ((rvals/scale)^nu) * besselK(rvals/scale, nu)
+             numer[rvals==0] <- ifelse(nu>0, 2^(nu-1)*gamma(nu), Inf)
+             denom <- pi * (2^(nu+1)) * scale^2 * gamma(nu + 1)
+             numer/denom
+         },
+         isPCP=TRUE,
+         K = local({
+           ## K function requires integration of pair correlation
+           xgx <- function(x, par, nu.pcf) {
+             ## x * pcf(x) without check on par values
+             numer <- (x/par[2L])^nu.pcf * besselK(x/par[2L], nu.pcf)
+             denom <- 2^(nu.pcf+1) * pi * par[2L]^2 * par[1L] * gamma(nu.pcf + 1)
+             return(x * (1 + numer/denom))
+           }
+           vargammaK <- function(par,rvals, ..., margs){
+             ## margs = list(.. nu.pcf.. ) 
+             if(any(par <= 0))
+               return(rep.int(Inf, length(rvals)))
+             nu.pcf <- margs$nu.pcf
+             out <- numeric(length(rvals))
+             ok <- (rvals > 0)
+             rvalsok <- rvals[ok]
+             outok <- numeric(sum(ok))
+             for (i in 1:length(rvalsok))
+               outok[i] <- 2 * pi * integrate(xgx,
+                                              lower=0, upper=rvalsok[i],
+                                              par=par, nu.pcf=nu.pcf)$value
+             out[ok] <- outok
+             return(out)
+           }
+           ## Initiated integration in sub-subintervals, but it is unfinished!
+           ## vargammaK <- function(par,rvals, ..., margs){
+           ##   ## margs = list(.. nu.pcf.. ) 
+           ##   if(any(par <= 0))
+           ##     return(rep.int(Inf, length(rvals)))
+           ##   nu.pcf <- margs$nu.pcf
+           ##   out <- numeric(length(rvals))
+           ##   out[1L] <- if(rvals[1L] == 0) 0 else 
+           ##   integrate(xgx, lower=0, upper=rvals[1L],
+           ##             par = par, nu.pcf=nu.pcf)$value
+           ##   for (i in 2:length(rvals)) {
+           ##     delta <- integrate(xgx,
+           ##                        lower=rvals[i-1L], upper=rvals[i],
+           ##                        par=par, nu.pcf=nu.pcf)
+           ##     out[i]=out[i-1L]+delta$value
+           ##   }
+           ##   return(out)
+           ## }
+           vargammaK
+           }), ## end of 'local'
+         pcf= function(par,rvals, ..., margs){
+           ## margs = list(..nu.pcf..)
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           nu.pcf <- margs$nu.pcf
+           sig2 <- 1 / (4 * pi * (par[2L]^2) * nu.pcf * par[1L])
+           denom <- 2^(nu.pcf - 1) * gamma(nu.pcf)
+           rr <- rvals / par[2L]
+           ## Matern correlation function
+           fr <- ifelseXB(rr > 0,
+                        (rr^nu.pcf) * besselK(rr, nu.pcf) / denom,
+                        1)
+           return(1 + sig2 * fr)
+         },
+         parhandler = function(..., nu.ker = -1/4) {
+           check.1.real(nu.ker)
+           stopifnot(nu.ker > -1/2)
+           nu.pcf <- 2 * nu.ker + 1
+           return(list(type="Kernel",
+                       model="VarGamma",
+                       margs=list(nu.ker=nu.ker,
+                                  nu.pcf=nu.pcf)))
+         },
+         ## sensible starting values
+         selfstart = function(X) {
+           kappa <- intensity(X)
+           eta <- 2 * mean(nndist(X))
+           c(kappa=kappa, eta=eta)
+         },
+         ## meaningful model parameters
+         interpret = function(par, lambda) {
+           kappa <- par[["kappa"]]
+           omega <- par[["eta"]]
+           mu <- if(is.numeric(lambda) && length(lambda) == 1)
+             lambda/kappa else NA
+           c(kappa=kappa, omega=omega, mu=mu)
+         }
+         ),
+       ## ...............................................
+       LGCP=list(
+         ## Log Gaussian Cox process: old par = (sigma2, alpha) (internally used everywhere)
+         ## Log Gaussian Cox process: new par = (var, scale) (officially recommended for input/output)
+         modelname = "Log-Gaussian Cox process", # In modelname field of mincon fv obj.
+         descname = "LGCP", # In desc field of mincon fv obj.
+         modelabbrev = "log-Gaussian Cox process", # In fitted obj.
+         printmodelname = function(...) "log-Gaussian Cox process", # Used by print.kppm
+         parnames = c("sigma2", "alpha"),
+         checkpar = function(par, old = TRUE){
+             if(is.null(par))
+                 par <- c(var=1,scale=1)
+             if(any(par<=0))
+                 stop("par values must be positive.")
+             nam <- check.named.vector(par, c("sigma2","alpha"), onError="null")
+             if(is.null(nam)) {
+                 check.named.vector(par, c("var","scale"))
+                 names(par) <- c("sigma2", "alpha")
+             }
+             if(!old) names(par) <- c("var", "scale")
+             return(par)
+         },
+         checkclustargs = function(margs, old = TRUE) return(margs),
+         resolvedots = function(...){
+             ## resolve dots for kppm and friends allowing for old/new par syntax
+             dots <- list(...)
+             nam <- names(dots)
+             out <- list()
+             if("ctrl" %in% nam){
+                 out$ctrl <- dots$ctrl
+             } else{
+                 out$ctrl <- dots[nam %in% c("p", "q", "rmin", "rmax")]
+             }
+             chk <- .Spatstat.ClusterModelInfoTable$LGCP$checkpar
+             if(!is.null(dots$startpar)) out$startpar <- chk(dots$startpar)
+             cmod <- dots$covmodel
+             model <- cmod$model %orifnull% dots$model %orifnull% "exponential"
+             margs <- NULL
+             if(!identical(model, "exponential")) {
+               ## get the 'model generator' 
+               modgen <- getRandomFieldsModelGen(model)
+               attr(model, "modgen") <- modgen
+               if(is.null(cmod)){
+                 margsnam <- names(formals(modgen))
+                 margsnam <- margsnam[!(margsnam %in% c("var", "scale"))]
+                 margs <- dots[nam %in% margsnam]
+               } else{
+                 margs <- cmod[names(cmod)!="model"]
+               }
+             }
+             if(length(margs)==0) {
+                 margs <- NULL
+             } else {
+	         # detect anisotropic model
+		 if("Aniso" %in% names(margs))
+		   stop("Anisotropic covariance models cannot be used",
+		        call.=FALSE)
+	     }
+             out$margs <- margs
+             out$model <- model
+             out$covmodel <- list(type="Covariance", model=model, margs=margs)
+             return(out)
+         },
+         isPCP=FALSE,
+         ## calls relevant covariance function from RandomFields package
+         K = function(par, rvals, ..., model, margs) {
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           if(model == "exponential") {
+             ## For efficiency and to avoid need for RandomFields package
+             integrand <- function(r,par,...) 2*pi*r*exp(par[1L]*exp(-r/par[2L]))
+           } else {
+             kraeverRandomFields()
+             integrand <- function(r,par,model,margs) {
+               modgen <- attr(model, "modgen")
+               if(length(margs) == 0) {
+                 mod <- modgen(var=par[1L], scale=par[2L])
+               } else {
+                 mod <- do.call(modgen,
+                                append(list(var=par[1L], scale=par[2L]),
+                                       margs))
+               }
+               2*pi *r *exp(RandomFields::RFcov(model=mod, x=r))
+             }
+           }
+           nr <- length(rvals)
+           th <- numeric(nr)
+           if(spatstat.options("fastK.lgcp")) {
+             ## integrate using Simpson's rule
+             fvals <- integrand(r=rvals, par=par, model=model, margs=margs)
+             th[1L] <- rvals[1L] * fvals[1L]/2
+             if(nr > 1)
+               for(i in 2:nr)
+                 th[i] <- th[i-1L] +
+                   (rvals[i] - rvals[i-1L]) * (fvals[i] + fvals[i-1L])/2
+           } else {
+             ## integrate using 'integrate'
+             th[1L] <- if(rvals[1L] == 0) 0 else 
+             integrate(integrand,lower=0,upper=rvals[1L],
+                       par=par,model=model,margs=margs)$value
+             for (i in 2:length(rvals)) {
+               delta <- integrate(integrand,
+                                  lower=rvals[i-1L],upper=rvals[i],
+                                  par=par,model=model,margs=margs)
+               th[i]=th[i-1L]+delta$value
+             }
+           }
+           return(th)
+         },
+         pcf= function(par, rvals, ..., model, margs) {
+           if(any(par <= 0))
+             return(rep.int(Inf, length(rvals)))
+           if(model == "exponential") {
+             ## For efficiency and to avoid need for RandomFields package
+             gtheo <- exp(par[1L]*exp(-rvals/par[2L]))
+           } else {
+             kraeverRandomFields()
+             modgen <- attr(model, "modgen")
+             if(length(margs) == 0) {
+               mod <- modgen(var=par[1L], scale=par[2L])
+             } else {
+               mod <- do.call(modgen,
+                              append(list(var=par[1L], scale=par[2L]),
+                                     margs))
+             }
+             gtheo <- exp(RandomFields::RFcov(model=mod, x=rvals))
+           }
+           return(gtheo)
+         },
+         parhandler=function(model = "exponential", ...) {
+           if(!is.character(model))
+             stop("Covariance function model should be specified by name")
+           margs <- c(...)
+           if(!identical(model, "exponential")) {
+             ## get the 'model generator' 
+             modgen <- getRandomFieldsModelGen(model)
+             attr(model, "modgen") <- modgen
+           }
+           return(list(type="Covariance", model=model, margs=margs))
+         },
+         ## sensible starting values
+         selfstart = function(X) {
+           alpha <- 2 * mean(nndist(X))
+           c(sigma2=1, alpha=alpha)
+         },
+         ## meaningful model parameters
+         interpret = function(par, lambda) {
+           sigma2 <- par[["sigma2"]]
+           alpha  <- par[["alpha"]]
+           mu <- if(is.numeric(lambda) && length(lambda) == 1 && lambda > 0)
+             log(lambda) - sigma2/2 else NA
+           c(sigma2=sigma2, alpha=alpha, mu=mu)
+         }
+         )
+  )
+
+spatstatClusterModelInfo <- function(name, onlyPCP = FALSE) {
+  if(inherits(name, "detpointprocfamily"))
+    return(spatstatDPPModelInfo(name))
+  if(!is.character(name) || length(name) != 1)
+    stop("Argument must be a single character string", call.=FALSE)
+  TheTable <- .Spatstat.ClusterModelInfoTable
+  nama2 <- names(TheTable)
+  if(onlyPCP){
+    ok <- sapply(TheTable, getElement, name="isPCP")
+    nama2 <- nama2[ok]
+  } 
+  if(!(name %in% nama2))
+    stop(paste(sQuote(name), "is not recognised;",
+               "valid names are", commasep(sQuote(nama2))),
+         call.=FALSE)
+  out <- TheTable[[name]]
+  return(out)
+}
+
diff --git a/R/clusterset.R b/R/clusterset.R
new file mode 100644
index 0000000..77535f6
--- /dev/null
+++ b/R/clusterset.R
@@ -0,0 +1,76 @@
+#
+#   clusterset.R
+#
+#   Allard-Fraley estimator of cluster region
+#
+#   $Revision: 1.12 $  $Date: 2016/02/16 01:39:12 $
+#
+
+clusterset <- function(X, what=c("marks", "domain"),
+                       ...,
+                       verbose=TRUE,
+                       fast=FALSE,
+                       exact=!fast) {
+  stopifnot(is.ppp(X))
+  what <- match.arg(what, several.ok=TRUE)
+  if(!missing(exact)) stopifnot(is.logical(exact))
+  if(fast && exact)
+    stop("fast=TRUE is incompatible with exact=TRUE")
+  # compute duplication exactly as in deldir, or the universe will explode
+  X <- unique(unmark(X), rule="deldir", warn=TRUE)
+  n <- npoints(X)
+  W <- as.owin(X)
+  # discretised Dirichlet tessellation
+  if(verbose) cat("Computing Dirichlet tessellation...")
+  if(fast || !exact)
+    cellid <- as.im(nnfun(X), ...)
+  # compute tile areas
+  if(fast) {
+    a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n))
+    if(verbose) cat("done.\n")
+    a <- a + 0.5
+    A <- sum(a)
+  } else {
+    d <- dirichlet(X)
+    if(verbose) cat("done.\n")
+    D <- tiles(d)
+    suppressWarnings(id <- as.integer(names(D)))
+    if(anyNA(id) && ("marks" %in% what))
+      stop("Unable to map Dirichlet tiles to data points")
+    A <- area(W)
+    a <- unlist(lapply(D, area))
+  }
+  # determine optimal selection of tiles
+  ntile <- length(a)
+  o <- order(a)
+  b <- cumsum(a[o])
+  m <- seq_len(ntile)
+  logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b))
+  mopt <- which.max(logl)
+  picked <- o[seq_len(mopt)]
+  ## map tiles to points
+  if(!fast) picked <- id[picked]
+  ## logical vector
+  is.picked <- rep.int(FALSE, n)
+  is.picked[picked] <- TRUE
+  # construct result
+  out <- list(marks=NULL, domain=NULL)
+  if("marks" %in% what) {
+    ## label points
+    yesno <- factor(ifelse(is.picked, "yes", "no"), levels=c("no", "yes"))
+    out$marks <- X %mark% yesno
+  }
+  if("domain" %in% what) {
+    if(verbose) cat("Computing cluster set...")
+    if(exact) {
+      domain <- do.call(union.owin, unname(D[is.picked]))
+      domain <- rebound.owin(domain, as.rectangle(W))
+    } else {
+      domain <- eval.im(is.picked[cellid])
+    }
+    out$domain <- domain
+    if(verbose) cat("done.\n")
+  }
+  out <- if(length(what) == 1L) out[[what]] else out
+  return(out)
+}
diff --git a/R/colourschemes.R b/R/colourschemes.R
new file mode 100644
index 0000000..18067ba
--- /dev/null
+++ b/R/colourschemes.R
@@ -0,0 +1,38 @@
+#
+#  colourschemes.R
+#
+#  $Revision: 1.3 $  $Date: 2013/07/17 04:53:48 $
+#
+
+beachcolourmap <- function(range, ...) {
+  col <- beachcolours(range, ...)
+  z <- colourmap(col, range=range)
+  return(z)
+}
+
+beachcolours <- function(range, sealevel = 0, monochrome=FALSE,
+                         ncolours=if(monochrome) 16 else 64,
+                         nbeach=1) {
+  if(monochrome)
+    return(grey(seq(from=0,to=1,length.out=ncolours)))
+  stopifnot(is.numeric(range) && length(range) == 2)
+  stopifnot(all(is.finite(range)))
+  depths <- range[1L]
+  peaks <- range[2L]
+  dv <- diff(range)/(ncolours - 1L)
+  epsilon <- nbeach * dv/2
+  lowtide <- max(sealevel - epsilon, depths)
+  hightide <-  min(sealevel + epsilon, peaks)
+  countbetween <- function(a, b, delta) { max(0, round((b-a)/delta)) }
+  nsea <- countbetween(depths, lowtide, dv)
+  nbeach <- countbetween(lowtide,  hightide, dv)
+  nland <- countbetween(hightide,  peaks, dv)
+  colours <- character(0)
+  if(nsea > 0)  colours <- rev(rainbow(nsea, start=3/6,end=4/6)) # cyan/blue
+  if(nbeach > 0)  colours <- c(colours,
+                             rev(rainbow(nbeach, start=3/12,end=5/12))) # green
+  if(nland > 0)  colours <- c(colours,
+                              rev(rainbow(nland, start=0, end=1/6)))  # red/yellow
+  return(colours)
+}
+
diff --git a/R/colourtables.R b/R/colourtables.R
new file mode 100755
index 0000000..78f5ced
--- /dev/null
+++ b/R/colourtables.R
@@ -0,0 +1,530 @@
+#
+# colourtables.R
+#
+# support for colour maps and other lookup tables
+#
+# $Revision: 1.37 $ $Date: 2016/02/16 01:39:12 $
+#
+
+colourmap <- function(col, ..., range=NULL, breaks=NULL, inputs=NULL) {
+  if(nargs() == 0) {
+    ## null colour map
+    f <- lut()
+  } else {
+    ## validate colour data 
+    col2hex(col)
+    ## store without conversion
+    f <- lut(col, ..., range=range, breaks=breaks, inputs=inputs)
+  }
+  class(f) <- c("colourmap", class(f))
+  f
+}
+
+lut <- function(outputs, ..., range=NULL, breaks=NULL, inputs=NULL) {
+  if(nargs() == 0) {
+    ## null lookup table
+    f <- function(x, what="value"){NULL}
+    class(f) <- c("lut", class(f))
+    attr(f, "stuff") <- list(n=0)
+    return(f)
+  }
+  n <- length(outputs)
+  given <- c(!is.null(range), !is.null(breaks), !is.null(inputs))
+  names(given) <- c("range", "breaks", "inputs")
+  ngiven <- sum(given)
+  if(ngiven == 0)
+    stop(paste("One of the arguments",
+               sQuote("range"), ",", sQuote("breaks"), "or", sQuote("inputs"),
+               "should be given"))
+  if(ngiven > 1L) {
+    offending <- names(breaks)[given]
+    stop(paste("The arguments",
+               commasep(sQuote(offending)),
+               "are incompatible"))
+  }
+  if(!is.null(inputs)) {
+    # discrete set of input values mapped to output values
+    stopifnot(length(inputs) == length(outputs))
+    stuff <- list(n=n, discrete=TRUE, inputs=inputs, outputs=outputs)
+    f <- function(x, what="value") {
+      m <- match(x, stuff$inputs)
+      if(what == "index")
+        return(m)
+      cout <- stuff$outputs[m]
+      return(cout)
+    }
+  } else if(!is.null(range) && inherits(range, c("Date", "POSIXt"))) {
+    # date/time interval mapped to colours
+    timeclass <- if(inherits(range, "Date")) "Date" else "POSIXt"
+    if(is.null(breaks)) {
+      breaks <- seq(from=range[1L], to=range[2L], length.out=length(outputs)+1L)
+    } else {
+      if(!inherits(breaks, timeclass))
+        stop(paste("breaks should belong to class", dQuote(timeclass)),
+             call.=FALSE)
+      stopifnot(length(breaks) >= 2)
+      stopifnot(length(breaks) == length(outputs) + 1L)
+      if(!all(diff(breaks) > 0))
+        stop("breaks must be increasing")
+    }
+    stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs)
+    f <- function(x, what="value") {
+      x <- as.vector(as.numeric(x))
+      z <- findInterval(x, stuff$breaks,
+                        rightmost.closed=TRUE)
+      if(what == "index")
+        return(z)
+      cout <- stuff$outputs[z]
+      return(cout)
+    }
+  } else {
+    # interval of real line mapped to colours
+    if(is.null(breaks)) {
+      breaks <- seq(from=range[1L], to=range[2L], length.out=length(outputs)+1L)
+    } else {
+      stopifnot(is.numeric(breaks) && length(breaks) >= 2L)
+      stopifnot(length(breaks) == length(outputs) + 1L)
+      if(!all(diff(breaks) > 0))
+        stop("breaks must be increasing")
+    }
+    stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs)
+    f <- function(x, what="value") {
+      stopifnot(is.numeric(x))
+      x <- as.vector(x)
+      z <- findInterval(x, stuff$breaks,
+                        rightmost.closed=TRUE)
+      if(what == "index")
+        return(z)
+      cout <- stuff$outputs[z]
+      return(cout)
+    }
+  }
+  attr(f, "stuff") <- stuff
+  class(f) <- c("lut", class(f))
+  f
+}
+
+print.lut <- function(x, ...) {
+  if(inherits(x, "colourmap")) {
+    tablename <- "Colour map"
+    outputname <- "colour"
+  } else {
+    tablename  <- "Lookup table"
+    outputname <- "output"
+  }
+  stuff <- attr(x, "stuff")
+  n <- stuff$n
+  if(n == 0) {
+    ## Null map
+    cat(paste("Null", tablename, "\n"))
+    return(invisible(NULL))
+  }
+  if(stuff$discrete) {
+    cat(paste(tablename, "for discrete set of input values\n"))
+    out <- data.frame(input=stuff$inputs, output=stuff$outputs)
+  } else {
+    b <- stuff$breaks
+    cat(paste(tablename, "for the range", prange(b[c(1L,n+1L)]), "\n"))
+    leftend  <- rep("[", n)
+    rightend <- c(rep(")", n-1), "]")
+    inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="")
+    out <- data.frame(interval=inames, output=stuff$outputs)
+  }
+  colnames(out)[2L] <- outputname
+  print(out)
+  invisible(NULL)
+}
+
+print.colourmap <- function(x, ...) {
+  NextMethod("print")
+}
+
+summary.lut <- function(object, ...) {
+  s <- attr(object, "stuff")
+  if(inherits(object, "colourmap")) {
+    s$tablename <- "Colour map"
+    s$outputname <- "colour"
+  } else {
+    s$tablename  <- "Lookup table"
+    s$outputname <- "output"
+  }
+  class(s) <- "summary.lut"
+  return(s)
+}
+
+print.summary.lut <- function(x, ...) {
+  n <- x$n
+  if(n == 0) {
+    cat(paste("Null", x$tablename, "\n"))
+    return(invisible(NULL))
+  }
+  if(x$discrete) {
+    cat(paste(x$tablename, "for discrete set of input values\n"))
+    out <- data.frame(input=x$inputs, output=x$outputs)
+  } else {
+    b <- x$breaks
+    cat(paste(x$tablename, "for the range", prange(b[c(1L,n+1L)]), "\n"))
+    leftend  <- rep("[", n)
+    rightend <- c(rep(")", n-1L), "]")
+    inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="")
+    out <- data.frame(interval=inames, output=x$outputs)
+  }
+  colnames(out)[2L] <- x$outputname
+  print(out)  
+}
+
+plot.colourmap <- local({
+
+  # recognised additional arguments to image.default() and axis()
+  
+  imageparams <- c("main", "asp", "sub", "axes", "ann",
+                   "cex", "font", 
+                   "cex.axis", "cex.lab", "cex.main", "cex.sub",
+                   "col.axis", "col.lab", "col.main", "col.sub",
+                   "font.axis", "font.lab", "font.main", "font.sub")
+  axisparams <- c("cex", 
+                  "cex.axis", "cex.lab",
+                  "col.axis", "col.lab",
+                  "font.axis", "font.lab",
+                  "las", "mgp", "xaxp", "yaxp",
+                  "tck", "tcl", "xpd")
+
+  linmap <- function(x, from, to) {
+    to[1L] + diff(to) * (x - from[1L])/diff(from)
+  }
+
+  # rules to determine the ribbon dimensions when one dimension is given
+  widthrule <- function(heightrange, separate, n, gap) {
+    if(separate) 1 else diff(heightrange)/10
+  }
+  heightrule <- function(widthrange, separate, n, gap) {
+    (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) 
+  }
+
+  plot.colourmap <- function(x, ..., main,
+                             xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE,
+                             labelmap=NULL, gap=0.25, add=FALSE) {
+    if(missing(main))
+      main <- short.deparse(substitute(x))
+    stuff <- attr(x, "stuff")
+    col <- stuff$outputs
+    n   <- stuff$n
+    if(n == 0) {
+      ## Null map
+      return(invisible(NULL))
+    }
+    discrete <- stuff$discrete
+    if(discrete) {
+      check.1.real(gap, "In plot.colourmap")
+      explain.ifnot(gap >= 0, "In plot.colourmap")
+    }
+    separate <- discrete && (gap > 0)
+    if(is.null(labelmap)) {
+      labelmap <- function(x) x
+    } else if(is.numeric(labelmap) && length(labelmap) == 1L && !discrete) {
+      labscal <- labelmap
+      labelmap <- function(x) { x * labscal }
+    } else stopifnot(is.function(labelmap))
+
+    # determine pixel entries 'v' and colour map breakpoints 'bks'
+    # to be passed to 'image.default'
+    if(!discrete) {
+      # real numbers: continuous ribbon
+      bks <- stuff$breaks
+      rr <- range(bks)
+      v <- seq(from=rr[1L], to=rr[2L], length.out=max(n+1L, 1024))
+    } else if(!separate) {
+      # discrete values: blocks of colour, run together
+      v <- (1:n) - 0.5
+      bks <- 0:n
+      rr <- c(0,n)
+    } else {
+      # discrete values: separate blocks of colour
+      vleft <- (1+gap) * (0:(n-1L))
+      vright <- vleft + 1
+      v <- vleft + 0.5
+      rr <- c(0, n + (n-1)*gap)
+    }
+    # determine position of ribbon or blocks of colour
+    if(is.null(xlim) && is.null(ylim)) {
+      u <- widthrule(rr, separate, n, gap)
+      if(!vertical) {
+        xlim <- rr
+        ylim <- c(0,u)
+      } else {
+        xlim <- c(0,u)
+        ylim <- rr
+      }
+    } else if(is.null(ylim)) {
+      if(!vertical) 
+        ylim <- c(0, widthrule(xlim, separate, n, gap))
+      else 
+        ylim <- c(0, heightrule(xlim, separate, n, gap))
+    } else if(is.null(xlim)) {
+      if(!vertical) 
+        xlim <- c(0, heightrule(ylim, separate, n, gap))
+      else 
+        xlim <- c(0, widthrule(ylim, separate, n, gap))
+    } 
+
+    # .......... initialise plot ...............................
+    if(!add)
+      do.call.matched(plot.default,
+                      resolve.defaults(list(x=xlim, y=ylim,
+                                            type="n", main=main,
+                                            axes=FALSE, xlab="", ylab="",
+                                            asp=1.0),
+                                       list(...)))
+    
+    if(separate) {
+      # ................ plot separate blocks of colour .................
+      if(!vertical) {
+        # horizontal arrangement of blocks
+        xleft <- linmap(vleft, rr, xlim)
+        xright <- linmap(vright, rr, xlim)
+        y <- ylim
+        z <- matrix(1, 1L, 1L)
+        for(i in 1:n) {
+          x <- c(xleft[i], xright[i])
+          do.call.matched(image.default,
+                      resolve.defaults(list(x=x, y=y, z=z, add=TRUE),
+                                       list(...),
+                                       list(col=col[i])),
+                      extrargs=imageparams)
+                          
+        }
+      } else {
+        # vertical arrangement of blocks
+        x <- xlim 
+        ylow <- linmap(vleft, rr, ylim)
+        yupp <- linmap(vright, rr, ylim)
+        z <- matrix(1, 1L, 1L)
+        for(i in 1:n) {
+          y <- c(ylow[i], yupp[i])
+          do.call.matched(image.default,
+                      resolve.defaults(list(x=x, y=y, z=z, add=TRUE),
+                                       list(...),
+                                       list(col=col[i])),
+                      extrargs=imageparams)
+                          
+        }
+      }
+    } else {
+      # ................... plot ribbon image .............................
+      if(!vertical) {
+        # horizontal colour ribbon
+        x <- linmap(v, rr, xlim)
+        y <- ylim
+        z <- matrix(v, ncol=1L)
+      } else {
+        # vertical colour ribbon
+        y <- linmap(v, rr, ylim)
+        z <- matrix(v, nrow=1L)
+        x <- xlim
+      }
+      do.call.matched(image.default,
+                      resolve.defaults(list(x=x, y=y, z=z, add=TRUE),
+                                       list(...),
+                                       list(breaks=bks, col=col)),
+                      extrargs=imageparams)
+    }
+    if(axis) {
+      # ................. draw annotation ..................
+      if(!vertical) {
+          # add horizontal axis/annotation
+        if(discrete) {
+          la <- paste(labelmap(stuff$inputs))
+          at <- linmap(v, rr, xlim)
+        } else {
+          la <- prettyinside(rr)
+          at <- linmap(la, rr, xlim)
+          la <- labelmap(la)
+        }
+        # default axis position is below the ribbon (side=1)
+        sidecode <- resolve.1.default("side", list(...), list(side=1L))
+        if(!(sidecode %in% c(1L,3L)))
+          warning(paste("side =", sidecode,
+                        "is not consistent with horizontal orientation"))
+        pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode]
+        # don't draw axis lines if plotting separate blocks
+        lwd0 <- if(separate) 0 else 1
+        # draw axis
+        do.call.matched(graphics::axis,
+                        resolve.defaults(list(...),
+                                         list(side = 1L, pos = pos, at = at),
+                                         list(labels=la, lwd=lwd0)),
+                        extrargs=axisparams)
+      } else {
+        # add vertical axis
+        if(discrete) {
+          la <- paste(labelmap(stuff$inputs))
+          at <- linmap(v, rr, ylim)
+        } else {
+          la <- prettyinside(rr)
+          at <- linmap(la, rr, ylim)
+          la <- labelmap(la)
+        }
+        # default axis position is to the right of ribbon (side=4)
+        sidecode <- resolve.1.default("side", list(...), list(side=4))
+        if(!(sidecode %in% c(2L,4L)))
+          warning(paste("side =", sidecode,
+                        "is not consistent with vertical orientation"))
+        pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode]
+        # don't draw axis lines if plotting separate blocks
+        lwd0 <- if(separate) 0 else 1
+        # draw labels horizontally if plotting separate blocks
+        las0 <- if(separate) 1 else 0
+        # draw axis
+        do.call.matched(graphics::axis,
+                        resolve.defaults(list(...),
+                                         list(side=4, pos=pos, at=at),
+                                         list(labels=la, lwd=lwd0, las=las0)),
+                        extrargs=axisparams)
+      }
+    }
+    invisible(NULL)
+  }
+
+  plot.colourmap
+})
+
+
+# Interpolate a colourmap or lookup table defined on real numbers
+
+interp.colourmap <- function(m, n=512) {
+  if(!inherits(m, "colourmap"))
+    stop("m should be a colourmap")
+  st <- attr(m, "stuff")
+  if(st$discrete) {
+    # discrete set of input values mapped to colours
+    xknots <- st$inputs
+    # Ensure the inputs are real numbers
+    if(!is.numeric(xknots))
+      stop("Cannot interpolate: inputs are not numerical values")
+  } else {
+    # interval of real line, chopped into intervals, mapped to colours
+    # Find midpoints of intervals
+    bks <- st$breaks
+    nb <- length(bks)
+    xknots <- (bks[-1L] + bks[-nb])/2
+  }
+  # corresponding colours in hsv coordinates
+  yknots.hsv <- rgb2hsva(col2rgb(st$outputs, alpha=TRUE))
+  # transform 'hue' from polar to cartesian coordinate
+  # divide domain into n equal intervals
+  xrange <- range(xknots)
+  xbreaks <- seq(xrange[1L], xrange[2L], length=n+1L)
+  xx <- (xbreaks[-1L] + xbreaks[-(n+1L)])/2
+  # interpolate saturation and value in hsv coordinates
+  yy.sat <- approx(x=xknots, y=yknots.hsv["s", ], xout=xx)$y
+  yy.val <- approx(x=xknots, y=yknots.hsv["v", ], xout=xx)$y
+  # interpolate hue by first transforming polar to cartesian coordinate
+  yknots.hue <- 2 * pi * yknots.hsv["h", ]
+  yy.huex <- approx(x=xknots, y=cos(yknots.hue), xout=xx)$y
+  yy.huey <- approx(x=xknots, y=sin(yknots.hue), xout=xx)$y
+  yy.hue <- (atan2(yy.huey, yy.huex)/(2 * pi)) %% 1
+  # handle transparency
+  yknots.alpha <- yknots.hsv["alpha", ]
+  if(all(yknots.alpha == 1)) {
+    ## opaque colours: form using hue, sat, val
+    yy <- hsv(yy.hue, yy.sat, yy.val)
+  } else {
+    ## transparent colours: interpolate alpha
+    yy.alpha <- approx(x=xknots, y=yknots.alpha, xout=xx)$y
+    ## form colours using hue, sat, val, alpha
+    yy <- hsv(yy.hue, yy.sat, yy.val, yy.alpha)    
+  }
+  # done
+  f <- colourmap(yy, breaks=xbreaks)
+  return(f)
+}
+
+interp.colours <- function(x, length.out=512) {
+  y <- colourmap(x, range=c(0,1))
+  z <- interp.colourmap(y, length.out)
+  oo <- attr(z, "stuff")$outputs
+  return(oo)
+}
+
+tweak.colourmap <- local({
+
+  is.hex <- function(z) {
+    is.character(z) &&
+    all(nchar(z, keepNA=TRUE) %in% c(7L,9L)) &&
+    identical(substr(z, 1L, 7L), substr(col2hex(z), 1L, 7L))
+  }
+
+  tweak.colourmap <- function(m, col, ..., inputs=NULL, range=NULL) {
+    if(!inherits(m, "colourmap"))
+      stop("m should be a colourmap")
+    if(is.null(inputs) && is.null(range)) 
+      stop("Specify either inputs or range")
+    if(!is.null(inputs) && !is.null(range))
+      stop("Do not specify both inputs and range")
+    ## determine indices of colours to be changed
+    if(!is.null(inputs)) {
+      ix <- m(inputs, what="index")
+    } else {
+      if(!(is.numeric(range) && length(range) == 2 && diff(range) > 0))
+        stop("range should be a numeric vector of length 2 giving (min, max)")
+      if(length(col2hex(col)) != 1L)
+        stop("When range is given, col should be a single colour value")
+      ixr <- m(range, what="index")
+      ix <- (ixr[1L]):(ixr[2L])
+    }
+    ## reassign colours
+    st <- attr(m, "stuff")
+    outputs <- st$outputs
+    result.hex <- FALSE
+    if(is.hex(outputs)) {
+      ## convert replacement data to hex
+      col <- col2hex(col)
+      result.hex <- TRUE
+    } else if(is.hex(col)) {
+      ## convert existing data to hex
+      outputs <- col2hex(outputs)
+      result.hex <- TRUE
+    } else if(!(is.character(outputs) && is.character(col))) {
+      ## unrecognised format - convert both to hex
+      outputs <- col2hex(outputs)
+      col <- col2hex(col)
+      result.hex <- TRUE
+    }
+    if(result.hex) {
+      ## hex codes may be 7 or 9 characters
+      outlen <- nchar(outputs)
+      collen <- nchar(col)
+      if(length(unique(c(outlen, collen))) > 1L) {
+        ## convert all to 9 characters
+        if(any(bad <- (outlen == 7))) 
+          outputs[bad] <- paste0(outputs[bad], "FF")
+        if(any(bad <- (collen == 7))) 
+          col[bad] <- paste0(col[bad], "FF")
+      }
+    }
+    ## Finally, replace
+    outputs[ix] <- col
+    st$outputs <- outputs
+    attr(m, "stuff") <- st
+    assign("stuff", st, envir=environment(m))
+    return(m)
+  }
+
+  tweak.colourmap
+})
+
+colouroutputs <- function(x) {
+  stopifnot(inherits(x, "colourmap"))
+  attr(x, "stuff")$outputs
+}
+
+"colouroutputs<-" <- function(x, value) {
+  stopifnot(inherits(x, "colourmap"))
+  st <- attr(x, "stuff")
+  col2hex(value) # validates colours
+  st$outputs[] <- value
+  attr(x, "stuff") <- st
+  assign("stuff", st, envir=environment(x))
+  return(x)
+}
+
diff --git a/R/colourtools.R b/R/colourtools.R
new file mode 100755
index 0000000..1f99b39
--- /dev/null
+++ b/R/colourtools.R
@@ -0,0 +1,184 @@
+#
+#  colourtools.R
+#
+#   $Revision: 1.18 $   $Date: 2017/01/02 04:47:50 $
+#
+
+
+rgb2hex <- function(v, maxColorValue=255) {
+  stopifnot(is.numeric(v))
+  if(!is.matrix(v))
+    v <- matrix(v, nrow=1L)
+  if(ncol(v) %in% c(3, 4)) {
+    out <- rgb(v, maxColorValue=maxColorValue)
+    return(out)
+  } 
+  stop("v should be a vector of length 3 or 4, or a matrix with 3 or 4 columns")
+}
+
+rgb2hsva <- function(red, green=NULL, blue=NULL, alpha=NULL,
+                     maxColorValue=255) {
+  if(is.null(green) && is.null(blue) && is.null(alpha)) {
+    ## red should be a 3-row matrix of RGB values
+    ## or a 4-row matrix of RGBA values 
+    if(!is.matrix(red))
+      red <- matrix(red, ncol=1L)
+    ## check for an alpha channel
+    if(nrow(red) == 4) {
+      alpha <- red[4L,]
+      red <- red[-4L, , drop=FALSE]
+    }
+  }
+  y <- rgb2hsv(red, green, blue, maxColorValue=maxColorValue)
+  if(!is.null(alpha))
+    y <- rbind(y, alpha=alpha/maxColorValue)
+  return(y)
+}
+ 
+col2hex <- function(x) {
+  # convert to RGBA
+  y <- col2rgb(x, alpha=TRUE)
+  # remove alpha channel if all colours are opaque
+  if(all(y["alpha", ] == 255))
+    y <- y[1:3, , drop=FALSE]
+  # convert to hex 
+  z <- rgb2hex(t(y))
+  return(z)
+}
+
+paletteindex <- function(x) {
+  x <- col2hex(x)
+  p <- col2hex(palette())
+  m <- match(x, p)
+  return(m)
+}
+
+is.colour <- function(x) {
+  if(length(x) == 0) return(FALSE)
+  cx <- try(col2rgb(x), silent=TRUE)
+  bad <- inherits(cx, "try-error")
+  return(!bad)
+}
+
+samecolour <- function(x, y) { col2hex(x) == col2hex(y) }
+
+complementarycolour <- function(x) {
+  if(is.null(x)) return(NULL)
+  if(inherits(x, "colourmap")) {
+    colouroutputs(x) <- complementarycolour(colouroutputs(x))
+    return(x)
+  }
+  # convert to RGBA
+  y <- col2rgb(x, alpha=TRUE)
+  # complement of R, G, B
+  y[1:3, ] <- 255 - y[1:3, ]
+  # convert to colours
+  z <- rgb2hex(t(y))
+  return(z)
+}
+
+is.grey <- function(x) {
+  if(inherits(x, "colourmap")) x <- colouroutputs(x)
+  if(is.function(x)) return(NA)
+  y <- rgb2hsva(col2rgb(x, alpha=TRUE))
+  sat <- y["s", ]
+  alp <- y["alpha", ]
+  return(sat == 0 & alp == 1)
+}
+
+to.opaque <- function(x) {
+  if(all(!is.na(paletteindex(x))))
+    return(x) # preserve palette colours
+  rgb(t(col2rgb(x)), maxColorValue=255)
+}
+
+to.transparent <- function(x, fraction) {
+  if(all(fraction == 1))
+    return(to.opaque(x))
+  rgb(t(col2rgb(x))/255, alpha=fraction, maxColorValue=1)
+}
+  
+to.grey <- function(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE) {
+  if(is.null(x)) return(NULL)
+  if(inherits(x, "colourmap")) {
+    colouroutputs(x) <- to.grey(colouroutputs(x),
+                                weights=weights, transparent=transparent)
+    return(x)
+  }
+  if(is.function(x)) {
+    f <- x
+    g <- function(...) to.grey(f(...), weights=weights, transparent=transparent)
+    return(g)
+  }
+  ## preserve palette indices, if only using black/grey
+  if(all(!is.na(paletteindex(x))) && all(is.grey(x)))
+    return(x)
+  if(!transparent) {
+    y <- col2rgb(x)
+    g <- (weights %*% y)/(255 * sum(weights))
+    z <- grey(g)
+  } else {
+    yy <- col2rgb(x, alpha=TRUE)
+    y <- yy[1:3, , drop=FALSE]
+    g <- (weights %*% y)/(255 * sum(weights))
+    z <- grey(g, alpha=y[4L,])
+  }
+  return(z)
+}
+
+is.col.argname <- function(x) {
+  return(nzchar(x) & ((x == "col") | (substr(x, 1L, 4L) == "col.")))
+}
+
+col.args.to.grey <- function(x, ...) {
+  if(any(hit <- is.col.argname(names(x))))
+    x[hit] <- lapply(x[hit], to.grey, ...)
+  return(x)
+}
+
+# versions of rgb() and hsv() that work with NA values
+
+rgbNA <- function(red, green, blue, alpha=NULL, maxColorValue=1) {
+  df <- if(is.null(alpha)) data.frame(red=red, green=green, blue=blue) else
+        data.frame(red=red, green=green, blue=blue, alpha=alpha)
+  result <- rep(NA_character_, nrow(df))
+  ok <- complete.cases(df)
+  result[ok] <- if(is.null(alpha)) {
+    with(df, rgb(red[ok], green[ok], blue[ok],
+                 maxColorValue=maxColorValue))
+  } else {
+    with(df, rgb(red[ok], green[ok], blue[ok], alpha[ok],
+                 maxColorValue=maxColorValue))
+  }
+  return(result)
+}
+
+hsvNA <- function(h, s, v, alpha=NULL) {
+  df <- if(is.null(alpha)) data.frame(h=h, s=s, v=v) else
+                           data.frame(h=h, s=s, v=v, alpha=alpha)
+  result <- rep(NA_character_, nrow(df))
+  ok <- complete.cases(df)
+  result[ok] <- if(is.null(alpha)) {
+    with(df, hsv(h[ok], s[ok], v[ok]))
+  } else {  
+    with(df, hsv(h[ok], s[ok], v[ok], alpha[ok]))
+  }
+  return(result)
+}
+
+## This function traps the colour arguments
+## and converts to greyscale if required.
+
+do.call.plotfun <- function(fun, arglist, ...) {
+  if(spatstat.options("monochrome")) {
+    keys <- names(arglist)
+    if(!is.null(keys)) {
+      cols <- nzchar(keys) & ((keys %in% c("border", "col", "fg", "bg")) |
+                              (substr(keys, 1, 4) == "col."))
+      if(any(cols))
+        arglist[cols] <- lapply(arglist[cols], to.grey)
+    }
+  }
+  do.call.matched(fun, arglist, ...)
+}
+
diff --git a/R/compareFit.R b/R/compareFit.R
new file mode 100755
index 0000000..85ed402
--- /dev/null
+++ b/R/compareFit.R
@@ -0,0 +1,72 @@
+#
+# compareFit.R
+#
+# $Revision: 1.3 $  $Date: 2015/10/21 09:06:57 $
+
+compareFit <- function(object, Fun, r=NULL, breaks=NULL,
+                     ..., trend=~1, interaction=Poisson(),
+                     rbord=NULL, modelnames=NULL,
+                     same=NULL, different=NULL) {
+  dotargs <- list(...)
+  h <- hyperframe(obj=object, tren=trend, inte=interaction)
+  N <- nrow(h)
+  if(N == 0)
+    stop("No objects specified")
+  # determine rbord for summary statistics
+  if(is.null(rbord) && !is.null(interaction))
+    rbord <- max(with(h, reach(inte)))
+  h$rbord <- rbord
+  # try to get nice model names
+  if(is.null(modelnames)) {
+    if(inherits(trend, "formula") && is.interact(interaction) &&
+       inherits(object, c("anylist", "listof")) &&
+       all(nzchar(names(object))) &&
+       length(names(object)) == nrow(h))
+      modelnames <- names(object)
+    else if(inherits(trend, c("anylist", "listof")) &&
+            all(nzchar(names(trend))) &&
+            length(names(trend)) == nrow(h))
+      modelnames <- names(trend) 
+    else if(inherits(interaction, c("anylist", "listof")) &&
+            all(nzchar(names(interaction))) &&
+            length(names(interaction)) == nrow(h))
+      modelnames <- names(interaction)
+    else 
+      modelnames <- row.names(h)
+  }
+  row.names(h) <- make.names(modelnames)
+  # fix a common vector of r values
+  if(is.null(r)) {
+    # compute first function 
+    fun1 <- with(h[1L,,drop=TRUE,strip=FALSE],
+                 do.call(Fun,
+                         append(list(object=obj,
+                                     trend=tren,
+                                     interaction=inte,
+                                     rbord=rbord,
+                                     r=NULL, breaks=breaks),
+                                dotargs)))
+    # extract r values
+    r <- with(fun1, .x)
+  }
+  # compute the subsequent functions
+  if(N == 1L)
+    funs2toN <- NULL
+  else 
+    funs2toN <- with(h[-1L, , drop=TRUE, strip=FALSE],
+                     do.call(Fun,
+                             append(list(object=obj,
+                                         trend=tren,
+                                         interaction=inte,
+                                         rbord=rbord,
+                                         r=r),
+                                    dotargs)))
+  if(N == 2)
+    funs2toN <- list(funs2toN)
+  # collect all functions in a list
+  funs <- as.anylist(append(list(fun1), funs2toN))
+  names(funs) <- row.names(h)
+  # collapse together
+  out <- collapse.fv(funs, same=same, different=different)
+  return(out)
+}
diff --git a/R/compileK.R b/R/compileK.R
new file mode 100755
index 0000000..5398db1
--- /dev/null
+++ b/R/compileK.R
@@ -0,0 +1,117 @@
+# compileK
+#
+# Function to take a matrix of pairwise distances
+# and compile a 'K' function in the format required by spatstat.
+#
+#   $Revision: 1.9 $  $Date: 2017/06/05 10:31:58 $
+# -------------------------------------------------------------------
+
+compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE,
+                     fname="K") {
+  # process r values
+  breaks <- breakpts.from.r(r)
+  rmax <- breaks$max
+  r    <- breaks$r
+  # check that D is a symmetric matrix with nonnegative entries
+  if(check)
+    stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0))
+  # ignore the diagonal; throw away any D values greater than rmax
+  ok <- (D <= rmax & D > 0)
+  Dvalues <- D[ok]
+  #
+  # weights?
+  if(!is.null(weights)) {
+    stopifnot(is.matrix(weights) && all(dim(weights)==dim(D)))
+    wvalues <- weights[ok]
+  } else wvalues <- NULL
+  # count the number of D values in each interval (r[k], r[k+1L]]
+  counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues)
+  # cumulative counts: number of D values in [0, r[k])
+  Kcount <- cumsum(counts)
+  # divide by appropriate denominator
+  Kratio <- Kcount/denom
+  # wrap it up as an 'fv' object for use in spatstat
+  df <- data.frame(r=r, est=Kratio)
+  if(!ratio) {
+    K <- fv(df, "r", quote(K(r)), "est", . ~ r , c(0,rmax),
+            c("r", makefvlabel(NULL, "hat", fname)), 
+            c("distance argument r", "estimated %s"),
+            fname=fname)
+  } else {
+    num <- data.frame(r=r, est=Kcount)
+    den <- data.frame(r=r, est=denom)
+    K <- ratfv(num, den,
+               "r", quote(K(r)), "est", . ~ r , c(0,rmax),
+               c("r", makefvlabel(NULL, "hat", fname)), 
+               c("distance argument r", "estimated %s"),
+               fname=fname)
+  }
+  return(K)
+}
+
+
+compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE,
+                       endcorrect=TRUE, ratio=FALSE, ..., fname="g") {
+  # process r values
+  breaks <- breakpts.from.r(r)
+  if(!breaks$even)
+    stop("compilepcf: r values must be evenly spaced", call.=FALSE)
+  r    <- breaks$r
+  rmax <- breaks$max
+  # check that D is a symmetric matrix with nonnegative entries
+  if(check)
+    stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0))
+  # ignore the diagonal; throw away any D values greater than rmax
+  ok <- (D <= rmax & D > 0)
+  Dvalues <- D[ok]
+  #
+  # weights?
+  if(!is.null(weights)) {
+    stopifnot(is.matrix(weights) && all(dim(weights)==dim(D)))
+    wvalues <- weights[ok]
+    totwt <- sum(wvalues)
+    normwvalues <- wvalues/totwt
+  } else {
+    nv <- length(Dvalues)
+    normwvalues <- rep.int(1/nv, nv)
+    totwt <- nv
+  }
+  # form kernel estimate
+  rmin <- min(r)
+  rmax <- max(r)
+  nr   <- length(r)
+  den <- density(Dvalues, weights=normwvalues,
+                 from=rmin, to=rmax, n=nr, ...)
+  gval <- den$y * totwt
+  # normalise
+  gval <- gval/denom
+  # edge effect correction at r = 0
+  if(endcorrect) {
+    one <- do.call(density,
+                   resolve.defaults(
+                                    list(seq(rmin,rmax,length=512)),
+                                    list(bw=den$bw, adjust=1),
+                                    list(from=rmin, to=rmax, n=nr),
+                                    list(...)))
+    onefun <- approxfun(one$x, one$y, rule=2)
+    gval <- gval /((rmax-rmin) * onefun(den$x))
+  }
+  # wrap it up as an 'fv' object for use in spatstat
+  df <- data.frame(r=r, est=gval)
+  if(!ratio) {
+    g <- fv(df, "r", quote(g(r)), "est", . ~ r , c(0,rmax),
+            c("r", makefvlabel(NULL, "hat", fname)),
+    	    c("distance argument r", "estimated %s"),
+	    fname=fname)
+  } else {
+      num <- data.frame(r=r, est=gval * denom)
+      den <- data.frame(r=r, est=denom)
+      g <- ratfv(num, den,
+                 "r", quote(g(r)), "est", . ~ r , c(0,rmax),
+                 c("r", makefvlabel(NULL, "hat", fname)), 
+                 c("distance argument r", "estimated %s"),
+                 fname=fname)
+  }
+  attr(g, "bw") <- den$bw
+  return(g)
+}
diff --git a/R/concom.R b/R/concom.R
new file mode 100644
index 0000000..78baef0
--- /dev/null
+++ b/R/concom.R
@@ -0,0 +1,131 @@
+#
+#
+#    concom.R
+#
+#    $Revision: 1.4 $	$Date: 2016/04/25 02:34:40 $
+#
+#    The connected component interaction
+#
+#    Concom()    create an instance of the connected component interaction
+#                 [an object of class 'interact']
+#	
+# -------------------------------------------------------------------
+#
+
+Concom <- local({
+
+  connectedlabels <- function(X, R) {
+    connected(X, R, internal=TRUE)
+  }
+  
+  countcompo <- function(X, R) {
+    length(unique(connectedlabels(X, R)))
+  }
+
+   # change in number of components when point i is deleted
+  cocoDel <- function(X, R, subset=seq_len(npoints(X))) {
+    n <- length(subset)
+    ans <- integer(n)
+    if(n > 0) {
+      cX <- countcompo(X, R)
+      for(i in 1:n) 
+        ans[i] = countcompo(X[-subset[i]], R) - cX
+    }
+    return(ans)
+  }
+
+  # change in number of components when new point is added
+
+  cocoAdd <- function(U, X, R) {
+    U <- as.ppp(U, W=as.owin(X))
+    nU <- npoints(U)
+    cr <- crosspairs(U, X, R, what="indices")
+    lab <- connectedlabels(X, R)
+    hitcomp <- tapply(X=lab[cr$j],
+                      INDEX=factor(cr$i, levels=1:nU),
+                      FUN=unique, 
+                      simplify=FALSE)
+    nhit <- unname(lengths(hitcomp))
+    change <- 1L - nhit
+    return(change)
+  }
+
+  # connected component potential 
+  cocopot <- 
+    function(X,U,EqualPairs,pars,correction, ...) {
+      bad <- !(correction %in% c("border", "none"))
+      if((nbad <- sum(bad)) > 0) 
+        warning(paste("The",
+                      ngettext(nbad, "correction", "corrections"),
+                      commasep(sQuote(correction[!ok])),
+                      ngettext(nbad, "is", "are"),
+                      "not implemented"))
+      n <- U$n
+      answer <- numeric(n)
+      r <- pars$r
+      if(is.null(r)) stop("internal error: r parameter not found")
+      dummies <- !(seq_len(n) %in% EqualPairs[,2L])
+      if(sum(dummies) > 0)
+        answer[dummies] <- -cocoAdd(U[dummies], X, r)
+      ii <- EqualPairs[,1L]
+      jj <- EqualPairs[,2L]
+      answer[jj] <- cocoDel(X, r, subset=ii)
+      return(answer + 1L)
+    }
+
+  # template object without family, par, version
+  BlankCoco <- 
+  list(
+         name     = "Connected component process",
+         creator  = "Concom",
+         family   = "inforder.family", # evaluated later
+         pot      = cocopot,
+         par      = list(r = NULL), # to be filled in
+         parnames = "distance threshold",
+         init     = function(self) {
+                      r <- self$par$r
+                      if(!is.numeric(r) || length(r) != 1L || r <= 0)
+                       stop("distance threshold r must be a positive number")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           logeta <- as.numeric(coeffs[1L])
+           eta <- exp(logeta)
+           return(list(param=list(eta=eta),
+                       inames="interaction parameter eta",
+                       printable=signif(eta)))
+         },
+         valid = function(coeffs, self) {
+           eta <- ((self$interpret)(coeffs, self))$param$eta
+           return(is.finite(eta))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self))
+             return(NULL)
+           return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           if(anyNA(coeffs))
+             return(Inf)
+           logeta <- coeffs[1L]
+           if(abs(logeta) <= epsilon)
+             return(0)
+           else
+             return(Inf)
+         },
+       version=NULL # to be added
+  )
+  class(BlankCoco) <- "interact"
+
+  Concom <- function(r) {
+    instantiate.interact(BlankCoco, list(r=r))
+  }
+
+  Concom <- intermaker(Concom, BlankCoco)
+  
+  Concom
+})
+
+
+             
diff --git a/R/connected.R b/R/connected.R
new file mode 100755
index 0000000..252a034
--- /dev/null
+++ b/R/connected.R
@@ -0,0 +1,199 @@
+#
+# connected.R
+#
+# connected component transform
+#
+#    $Revision: 1.19 $  $Date: 2017/06/05 10:31:58 $
+#
+# Interpreted code for pixel images by Julian Burgos <jmburgos at u.washington.edu>
+# Rewritten in C by Adrian Baddeley
+#
+# Code for point patterns by Adrian Baddeley
+
+connected <- function(X, ...) {
+  UseMethod("connected")
+}
+
+connected.im <- function(X, ..., background=NA, method="C") {
+  W <- if(!is.na(background)) solutionset(X != background) else 
+       if(X$type == "logical") solutionset(X) else as.owin(X)
+  connected.owin(W, method=method, ...)
+}
+
+connected.owin <- function(X, ..., method="C") {
+  method <- pickoption("algorithm choice", method,
+                       c(C="C", interpreted="interpreted"))
+  # convert X to binary mask
+  X <- as.mask(X, ...)
+  #     
+  Y <- X$m
+  nr <- X$dim[1L]
+  nc <- X$dim[2L]
+
+  if(method == "C") {
+################ COMPILED CODE #########################
+# Pad border with FALSE
+    M <- rbind(FALSE, Y, FALSE)
+    M <- cbind(FALSE, M, FALSE)
+    # assign unique label to each foreground pixel 
+    L <- M
+    L[M] <- seq_len(sum(M))
+    L[!M] <- 0
+    # resolve labels
+    z <- .C("cocoImage",
+            mat=as.integer(t(L)),
+            nr=as.integer(nr),
+            nc=as.integer(nc),
+            PACKAGE = "spatstat")
+    # unpack
+    Z <- matrix(z$mat, nr+2, nc+2, byrow=TRUE)
+  } else {
+################ INTERPRETED CODE #########################
+# by Julian Burgos
+#  
+# Pad border with zeros
+    padY <- rbind(0, Y, 0)
+    padY <- cbind(0, padY, 0)
+    # Initialise 
+    Z <- matrix(0, nrow(padY), ncol(padY))
+    currentlab <- 1L
+    todo <- as.vector(t(Y))
+    equiv <- NULL
+  
+    # ........ main loop ..........................
+    while(any(todo)){
+      # pick first unresolved pixel
+      one <- which(todo)[1L]
+      onerow <- ceiling(one/nc)
+      onecol <- one -((onerow-1L)*nc)
+      parow=onerow+1L # Equivalent rows & column in padded matrix
+      pacol=onecol+1L
+      #Examine four previously scanned neighbors
+      # (use padded matrix to avoid edge issues)
+      nbrs <- rbind(c(parow-1L,pacol-1L),
+                    c(parow-1L,pacol),
+                    c(parow,  pacol-1L),
+                    c(parow-1L,pacol+1L))
+      px <- sum(padY[nbrs])
+      if (px==0){
+        # no neighbours: new component
+        Z[parow,pacol] <- currentlab
+        currentlab <- currentlab+1L
+        todo[one] <- FALSE
+      } else if(px==1L) {
+        # one neighbour: assign existing label
+        labs <- unique(Z[nbrs], na.rm=TRUE)
+        labs <- labs[labs != 0]
+        Z[parow,pacol] <- labs[1L]
+        currentlab <- max(Z)+1L
+        todo[one] <- FALSE
+      } else {
+        # more than one neighbour: possible merger of labels
+        labs <- unique(Z[nbrs], na.rm=TRUE)
+        labs <- labs[labs != 0]
+        labs <- sort(labs)
+        equiv <- rbind(equiv,c(labs,rep.int(0,times=4-length(labs))))
+        Z[parow,pacol] <- labs[1L]
+        currentlab <- max(Z)+1L
+        todo[one] <- FALSE
+      }
+    }
+    # ........... end of loop ............
+    # Resolve equivalences ................
+
+    if(length(equiv)>1L){
+      merges <- (equiv[,2L] > 1L)
+      nmerge <- sum(merges)
+      if(nmerge==1L)
+        equiv <- equiv[which(merges), , drop=FALSE]
+      else if(nmerge > 1L) {
+        relevant <- (equiv[,2L] > 0)
+        equiv <- equiv[relevant, , drop=FALSE]
+        equiv <- equiv[fave.order(equiv[,1L]),]
+      }
+      for (i in 1:nrow(equiv)){
+        current <- equiv[i, 1L]
+        for (j in 2:4){
+          twin <- equiv[i,j]
+          if (twin>0){
+            # Change labels matrix
+            Z[which(Z==twin)] <- current
+            # Update equivalence table
+            equiv[which(equiv==twin)] <- current
+          }
+        }
+      }
+    }
+  }
+
+  ########### COMMON CODE ############################
+    
+  # Renumber labels sequentially
+  mapped <- (Z != 0)
+  usedlabs <- sort(unique(as.vector(Z[mapped])))
+  nlabs <- length(usedlabs)
+  labtable <- cumsum(seq_len(max(usedlabs)) %in% usedlabs)
+  Z[mapped] <- labtable[Z[mapped]]
+
+  # banish zeroes
+  Z[!mapped] <- NA
+  
+  # strip borders
+  Z <- Z[2:(nrow(Z)-1L),2:(ncol(Z)-1L)]
+  # dress up 
+  Z <- im(factor(Z, levels=1:nlabs),
+          xcol=X$xcol, yrow=X$yrow, unitname=unitname(X))
+  return(Z)
+}
+
+connected.ppp <- function(X, R, ...) {
+  stopifnot(is.ppp(X))
+  check.1.real(R, "In connected.ppp")
+  stopifnot(R >= 0)
+  internal <- resolve.1.default("internal", list(...), list(internal=FALSE))
+  nv <- npoints(X)
+  cl <- closepairs(X, R, what="indices")
+  ie <- cl$i - 1L
+  je <- cl$j - 1L
+  ne <- length(ie)
+  zz <- .C("cocoGraph",
+           nv=as.integer(nv),
+           ne=as.integer(ne),
+           ie=as.integer(ie),
+           je=as.integer(je),
+           label=as.integer(integer(nv)),
+           status=as.integer(integer(1L)),
+           PACKAGE = "spatstat")
+  if(zz$status != 0)
+    stop("Internal error: connected.ppp did not converge")
+  if(internal)
+    return(zz$label)
+  lab <- zz$label + 1L
+  # Renumber labels sequentially 
+  lab <- as.integer(factor(lab))
+  # Convert labels to factor
+  lab <- factor(lab)
+  # Apply to points
+  Y <- X %mark% lab
+  return(Y)
+}
+
+# .................................................
+
+is.connected <- function(X, ...) {
+  UseMethod("is.connected")
+}
+
+is.connected.default <- function(X, ...) {
+  y <- connected(X, ...)
+  npieces <- length(levels(y))
+  if(npieces == 0)
+    stop("Unable to determine connectedness")
+  return(npieces == 1)
+}
+
+is.connected.ppp <- function(X, R, ...) {
+  lab <- connected(X, R, internal=TRUE)
+  npieces <- length(unique(lab))
+  return(npieces == 1)
+}
\ No newline at end of file
diff --git a/R/convexify.R b/R/convexify.R
new file mode 100644
index 0000000..8afffd9
--- /dev/null
+++ b/R/convexify.R
@@ -0,0 +1,20 @@
+##
+## convexify.R
+##
+## $Revision: 1.1 $ $Date: 2015/10/23 12:34:17 $
+
+convexify <- function(W, eps) {
+  if(!is.polygonal(W)) {
+    if(missing(eps)) eps <- diameter(Frame(W))/20
+    W <- simplify.owin(W, eps)
+  }
+  e <- edges(W)
+  len <- lengths.psp(e)
+  ang <- angles.psp(e, directed=TRUE)
+  df <- data.frame(ang=ang, len=len)
+  df <- df[order(df$ang), ]
+  df <- within(df, { dx <- len * cos(ang); dy <- len * sin(ang)})
+  owin(poly=with(df, list(x=cumsum(c(0,dx)), y=cumsum(c(0,dy)))))
+}
+
+    
diff --git a/R/copyExampleFiles.R b/R/copyExampleFiles.R
new file mode 100644
index 0000000..3abc33b
--- /dev/null
+++ b/R/copyExampleFiles.R
@@ -0,0 +1,42 @@
+##  copyExampleFiles.R
+##  $Revision: 1.10 $ $Date: 2015/03/11 05:58:50 $
+
+copyExampleFiles <- function(which, folder=getwd()) {
+  choices <- dir(system.file("rawdata", package="spatstat"))
+  if(missing(which) || is.null(which)) {
+    message(paste("Choices are: which=", commasep(sQuote(choices), " or ")))
+    return(invisible(NULL))
+  }
+  if(!interactive())
+    stop("Copying files requires an interactive session (by CRAN Policies).")
+  whichdata <- match.arg(which, choices)
+  sourcefolder <- system.file("rawdata", whichdata, package="spatstat")
+  sourcefiles <- dir(sourcefolder)
+  if(length(sourcefiles) == 0)
+      stop("No files available")
+  # set directory
+  oldfolder <- getwd()
+  setwd(folder)
+  on.exit(setwd(oldfolder))
+  # Warn user:
+  foldername <- if(identical(folder, oldfolder)) "the current folder" else
+                 paste("the folder", dQuote(folder))
+  splat("You are about to have been copying", 
+        ngettext(length(sourcefiles), "file", "files"),
+        commasep(dQuote(sourcefiles)), "to",
+        paste0(foldername, "."),
+        "This may overwrite existing files.")
+  # Ask permission:
+  answer <- readline("Do you want to continue? (y/n)[y] ")
+  if(!tolower(substr(answer, 1, 1)) %in% c("", "y")) {
+    splat("Aborting...")
+    return(invisible(NULL))
+  }
+  # 
+  for(fn in sourcefiles) {
+    frompath <- file.path(sourcefolder, fn)
+    file.copy(from = frompath, to = fn, overwrite=TRUE)
+  }
+  splat("Copying completed.")
+  return(invisible(NULL))
+}
diff --git a/R/covariates.R b/R/covariates.R
new file mode 100755
index 0000000..d9636fd
--- /dev/null
+++ b/R/covariates.R
@@ -0,0 +1,49 @@
+#
+# covariates.R
+#
+# evaluate covariates
+#
+#   $Revision: 1.3 $  $Date: 2015/10/21 09:06:57 $
+#
+
+evalCovariate <- function(covariate, locations) {
+  # evaluate covariate of any kind at specified locations
+  covvalues <-
+    if(is.im(covariate)) 
+      safelookup(covariate, locations)
+    else if(is.function(covariate)) 
+      covariate(locations$x, locations$y)
+    else if(is.numeric(covariate) || is.factor(covariate)) {
+      if(length(covariate) == 1L)
+        rep.int(covariate, length(locations$x))
+      else if(length(covariate) == length(locations$x))
+        covariate
+      else stop("Inappropriate length for covariate vector")
+    } else
+  stop("Covariate should be an image, a function or a factor/numeric vector")
+  return(covvalues)
+}
+
+ppmCovariates <- function(model) {
+  # generate list of all covariates in ppm (excluding marks)
+  stopifnot(is.ppm(model))
+  co <- as.list(model$covariates)
+  xy <- list(x=function(x,y){x}, y=function(x,y){y})
+  coplus <- append(co, xy)
+  return(as.anylist(coplus))
+}
+
+findCovariate <- function(covname, scope, scopename=NULL) {
+  # find the named covariate in the given ppm object or list
+  if(is.ppm(scope)) {
+    covlist <- ppmCovariates(scope)
+    if(missing(scopename)) scopename <- "covariates in model"
+  } else if(is.list(scope)) {
+    covlist <- scope
+  } else stop("scope should be a named list of covariates, or a ppm object")
+  if(!(covname %in% names(covlist))) 
+    stop(paste("covariate", dQuote(covname), "not found",
+               if(!is.null(scopename)) paste("amongst", scopename) else NULL))
+  covlist[[covname]]
+}
+
diff --git a/R/covering.R b/R/covering.R
new file mode 100644
index 0000000..e4001d5
--- /dev/null
+++ b/R/covering.R
@@ -0,0 +1,37 @@
+#'
+#'   covering.R
+#'
+#'  $Revision: 1.3 $  $Date: 2016/03/26 10:27:20 $
+#'
+
+covering <- function(W, r, ..., giveup=1000) {
+  W <- as.owin(W)
+  ## compute distance to boundary
+  D <- distmap(W, invert=TRUE, ...)
+  D <- D[W, drop=FALSE]
+  M <- as.owin(D)
+  pixstep <- max(M$xstep, M$ystep)
+  ## very small distances
+  if(r <= pixstep) {
+    warning("r is smaller than the pixel resolution: returning pixel centres",
+            call.=FALSE)
+    xy <- rasterxy.mask(M, drop=TRUE)
+    return(ppp(xy[,1L], xy[,2L], window=W, check=FALSE))
+  }
+  ## find the point of W farthest from the boundary
+  X <- where.max(D)
+  ## build a hexagonal grid through this point
+  ruse <- if(is.convex(W)) r else (r * 2/3)
+  ruse <- max(pixstep, ruse - pixstep)
+  H <- hexgrid(W, ruse, offset=c(X$x, X$y), origin=c(0,0))
+  if(npoints(H) == 0) H <- X
+  ## this may not suffice if W is irregular
+  for(i in 1:giveup) {
+    DH <- distmap(H)
+    if(max(DH) < ruse && npoints(H) > 0) return(H)
+    Hnew <- where.max(DH)
+    H <- superimpose(H, Hnew, W=W)
+  }
+  stop(paste("Failed to converge after adding", giveup, "points"), call.=FALSE)
+}
+
diff --git a/R/crossdistlpp.R b/R/crossdistlpp.R
new file mode 100644
index 0000000..9f9082a
--- /dev/null
+++ b/R/crossdistlpp.R
@@ -0,0 +1,123 @@
+#
+# crossdistlpp.R
+#
+#  $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $
+#
+#  crossdist.lpp
+#        Calculates the shortest-path distance from each point of X
+#        to each point of Y, where X and Y are point patterns
+#        on the same linear network.
+#
+
+crossdist.lpp <- function(X, Y, ..., method="C") {
+  stopifnot(inherits(X, "lpp"))
+  stopifnot(method %in% c("C", "interpreted"))
+  check <- resolve.defaults(list(...), list(check=TRUE))$check
+  #
+  nX <- npoints(X)
+  nY <- npoints(Y)
+  #
+  L <- as.linnet(X, sparse=FALSE)
+  if(check) {
+    LY <- as.linnet(Y, sparse=FALSE)
+    if(!identical(L, LY))
+      stop("X and Y are on different linear networks")
+  }
+
+  if(any(is.infinite(L$dpath))) {
+    #' disconnected network
+    lab <- connected(L, what="labels")
+    subsets <- split(seq_len(nvertices(L)), lab)
+    crossdistmat <- matrix(Inf,nX,nY)
+    for(subi in subsets) {
+      Xi <- thinNetwork(X, retainvertices=subi)
+      Yi <- thinNetwork(Y, retainvertices=subi)
+      whichX <- attr(Xi, "retainpoints")      
+      whichY <- attr(Yi, "retainpoints")      
+      crossdistmat[whichX, whichY] <- crossdist.lpp(Xi, Yi, method=method)
+    }
+    return(crossdistmat)
+  }
+
+  # network is connected
+  
+  P <- as.ppp(X)
+  Q <- as.ppp(Y)
+  #
+#  Lseg  <- L$lines
+  Lvert <- L$vertices
+  from  <- L$from
+  to    <- L$to
+  dpath <- L$dpath
+  
+  # nearest segment for each point
+  Xpro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg
+  Ypro <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE)$seg
+
+  if(method == "interpreted") {
+    # loop through all pairs of data points
+    crossdistmat <- matrix(,nX,nY)
+    for (i in 1:nX) {
+      Xproi <- Xpro[i]
+      Xi <- P[i]
+      nbi1 <- from[Xproi]
+      nbi2 <- to[Xproi]
+      vi1 <- Lvert[nbi1]
+      vi2 <- Lvert[nbi2]   
+      dXi1 <- crossdist(Xi, vi1)
+      dXi2 <- crossdist(Xi, vi2)
+      for (j in 1:nY) {
+        Yj <- Q[j]
+        Yproj <- Ypro[j]
+        if(Xproi == Yproj) {
+          # points i and j lie on the same segment
+          # use Euclidean distance
+          d <- crossdist(Xi, Yj)
+        } else {
+          # shortest path from i to j passes through ends of segments
+          nbj1 <- from[Yproj]
+          nbj2 <- to[Yproj]
+          vj1 <- Lvert[nbj1]
+          vj2 <- Lvert[nbj2]
+          # Calculate shortest of 4 possible paths from i to j
+          d1Yj <- crossdist(vj1,Yj)
+          d2Yj <- crossdist(vj2,Yj)
+          d11 <- dXi1 + dpath[nbi1,nbj1] + d1Yj
+          d12 <- dXi1 + dpath[nbi1,nbj2] + d2Yj
+          d21 <- dXi2 + dpath[nbi2,nbj1] + d1Yj
+          d22 <- dXi2 + dpath[nbi2,nbj2] + d2Yj
+          d <- min(d11,d12,d21,d22)
+        }
+        # store result
+        crossdistmat[i,j] <- d
+      }
+    }
+  } else {
+    # C code
+    # convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    Xsegmap <- Xpro - 1L
+    Ysegmap <- Ypro - 1L
+    zz <- .C("lincrossdist",
+             np = as.integer(nX),
+             xp = as.double(P$x),
+             yp = as.double(P$y),
+             nq = as.integer(nY),
+             xq = as.double(Q$x),
+             yq = as.double(Q$y),
+             nv = as.integer(Lvert$n),
+             xv = as.double(Lvert$x),
+             yv = as.double(Lvert$y),
+             ns = as.double(L$n),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             dpath = as.double(dpath),
+             psegmap = as.integer(Xsegmap),
+             qsegmap = as.integer(Ysegmap),
+             answer = as.double(numeric(nX * nY)),
+             PACKAGE = "spatstat")
+    crossdistmat <- matrix(zz$answer, nX, nY)
+  }
+  return(crossdistmat)
+}
diff --git a/R/cut.ppp.R b/R/cut.ppp.R
new file mode 100755
index 0000000..1b5ebda
--- /dev/null
+++ b/R/cut.ppp.R
@@ -0,0 +1,54 @@
+#
+#  cut.ppp.R
+#
+#  cut method for ppp objects
+#
+#  $Revision: 1.15 $   $Date: 2016/10/26 09:29:57 $
+#
+
+cut.ppp <- function(x, z=marks(x), ...) {
+  x <- as.ppp(x)
+  if(missing(z) || is.null(z)) {
+    z <- marks(x, dfok=TRUE)
+    if(is.null(z))
+      stop("x has no marks to cut")
+  }
+  if(is.character(z)) {
+    if(length(z) == npoints(x)) {
+      # interpret as a factor
+      z <- factor(z)
+    } else if((length(z) == 1L) && (z %in% colnames(df <- as.data.frame(x)))) {
+      # interpret as the name of a column of marks or coordinates
+      z <- df[, z]
+    } else stop("format of argument z not understood") 
+  }
+  if(is.factor(z) || is.vector(z)) {
+    stopifnot(length(z) == npoints(x))
+    g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z)
+    marks(x) <- g
+    return(x)
+  }
+  if(is.data.frame(z) || is.matrix(z)) {
+    stopifnot(nrow(z) == npoints(x))
+    # take first column 
+    z <- z[,1L]
+    g <- if(is.numeric(z)) cut(z, ...) else factor(z)
+    marks(x) <- g
+    return(x)
+  }
+  if(is.im(z)) 
+    return(cut(x, z[x, drop=FALSE], ...))
+
+  if(is.owin(z)) {
+    marks(x) <- factor(inside.owin(x$x, x$y, z), levels=c(FALSE, TRUE))
+    return(x)
+  }
+  
+  if(is.tess(z)) {
+    marks(x) <- tileindex(x$x, x$y, z)
+    return(x)
+  }
+
+  stop("Format of z not understood")
+} 
+
diff --git a/R/daogenton.R b/R/daogenton.R
new file mode 100644
index 0000000..42f93c6
--- /dev/null
+++ b/R/daogenton.R
@@ -0,0 +1,237 @@
+##
+##  daogenton.R
+##
+##  Dao-Genton adjusted p-values
+##
+##  $Revision: 1.14 $  $Date: 2017/06/05 10:31:58 $
+##
+
+bits.test <- function(X, ..., exponent=2, nsim=19,
+                    alternative=c("two.sided", "less", "greater"),
+                    leaveout=1, interpolate=FALSE,
+                    savefuns=FALSE, savepatterns=FALSE,
+                    verbose=TRUE) {
+  twostage.test(X, ..., exponent=exponent,
+                 nsim=nsim, nsimsub=nsim, reuse=FALSE, 
+                 alternative=match.arg(alternative),
+                 leaveout=leaveout, interpolate=interpolate,
+                 savefuns=savefuns, savepatterns=savepatterns,
+                 verbose=verbose,
+		 testblurb="Balanced Independent Two-stage Test") 
+}		    
+
+dg.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim-1,
+                    alternative=c("two.sided", "less", "greater"),
+                    reuse=TRUE, leaveout=1, interpolate=FALSE,
+                    savefuns=FALSE, savepatterns=FALSE,
+                    verbose=TRUE) {
+  if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub))
+    stop("nsim and nsimsub must be relatively prime")
+  twostage.test(X, ..., exponent=exponent,
+                 nsim=nsim, nsimsub=nsimsub, reuse=reuse, 
+                 alternative=match.arg(alternative),
+                 leaveout=leaveout, interpolate=interpolate,
+                 savefuns=savefuns, savepatterns=savepatterns,
+                 verbose=verbose,
+		 testblurb="Dao-Genton adjusted goodness-of-fit test")
+}		 
+		    
+twostage.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim,
+                    alternative=c("two.sided", "less", "greater"),
+                    reuse=FALSE, leaveout=1, interpolate=FALSE,
+                    savefuns=FALSE, savepatterns=FALSE,
+                    verbose=TRUE,
+		    testblurb="Two-stage Monte Carlo test") {
+  Xname <- short.deparse(substitute(X))
+  alternative <- match.arg(alternative)
+  env.here <- sys.frame(sys.nframe())
+  Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X)
+  # top-level test
+  if(verbose) cat("Applying first-stage test to original data... ")
+  tX <- envelopeTest(X, ...,
+                     nsim=nsim, alternative=alternative,
+                     leaveout=leaveout,
+                     interpolate=interpolate,
+                     exponent=exponent,
+                     savefuns=savefuns,
+                     savepatterns=savepatterns || reuse,
+                     verbose=FALSE,
+                     envir.simul=env.here)
+  pX <- tX$p.value
+  ## check special case
+  afortiori <- !interpolate && (nsimsub < nsim) &&
+               (pX == (1/(nsim+1)) || pX == 1)
+  if(afortiori) {
+    ## result is determined
+    padj <- pX
+    pY <- NULL
+  } else {
+    ## result is not yet determined
+    if(!reuse) {
+      if(verbose) cat("Repeating first-stage test... ")
+      tXX <- envelopeTest(X, ...,
+                          nsim=nsim, alternative=alternative,
+                          leaveout=leaveout,
+                          interpolate=interpolate,
+                          exponent=exponent,
+                          savefuns=savefuns, savepatterns=TRUE, verbose=FALSE,
+                          envir.simul=env.here)
+      ## extract simulated patterns 
+      Ylist <- attr(attr(tXX, "envelope"), "simpatterns")
+    } else {
+      Ylist <- attr(attr(tX, "envelope"), "simpatterns")
+    }
+    if(verbose) cat("Done.\n")
+    ## apply same test to each simulated pattern
+    if(verbose) cat(paste("Running second-stage tests on",
+                          nsim, "simulated patterns... "))
+    pY <- numeric(nsim)
+    for(i in 1:nsim) {
+      if(verbose) progressreport(i, nsim)
+      Yi <- Ylist[[i]]
+      ## if X is a model, fit it to Yi. Otherwise the implicit model is CSR.
+      if(Xismodel) Yi <- update(X, Yi)
+      tYi <- envelopeTest(Yi, ...,
+                          nsim=nsimsub, alternative=alternative,
+                          leaveout=leaveout,
+                          interpolate=interpolate,
+                          exponent=exponent, savepatterns=TRUE, verbose=FALSE,
+                          envir.simul=env.here)
+      pY[i] <- tYi$p.value
+    }
+    pY <- sort(pY)
+    ## compute adjusted p-value
+    padj <- (1 + sum(pY <= pX))/(1+nsim)
+  }
+  # pack up
+  method <- tX$method
+  method <- c(testblurb,
+              paste("based on", method[1L]),
+              paste("First stage:", method[2L]),
+              method[-(1:2)],
+              if(afortiori) {
+                paren(paste("Second stage was omitted: p0 =", pX,
+                            "implies p-value =", padj))
+              } else if(reuse) {
+                paste("Second stage: nested, ", nsimsub,
+                      "simulations for each first-stage simulation")
+              } else {
+                paste("Second stage:", nsim, "*", nsimsub,
+                      "nested simulations independent of first stage")
+              }
+              )
+  names(pX) <- "p0"
+  result <- structure(list(statistic = pX,
+                           p.value = padj,
+                           method = method,
+                           data.name = Xname),
+                      class="htest") 
+  attr(result, "rinterval") <- attr(tX, "rinterval")
+  attr(result, "pX") <- pX
+  attr(result, "pY") <- pY
+  if(savefuns || savepatterns)
+    result <- hasenvelope(result, attr(tX, "envelope"))
+  return(result)
+}
+
+dg.envelope <- function(X, ..., nsim=19,
+                        nsimsub=nsim-1,
+                        nrank=1,
+                        alternative=c("two.sided", "less", "greater"),
+                        leaveout=1,
+                        interpolate = FALSE,
+                        savefuns=FALSE, savepatterns=FALSE,
+                        verbose=TRUE) {
+  #  Xname <- short.deparse(substitute(X))
+  alternative <- match.arg(alternative)
+  env.here <- sys.frame(sys.nframe())
+  Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X)
+  # top-level test
+  if(verbose) cat("Applying test to original data... ")
+  tX <- envelopeTest(X, ...,
+                     alternative=alternative,
+                     leaveout=leaveout,
+                     interpolate = interpolate,
+                     nsim=nsim, nrank=nrank,
+                     exponent=Inf, savepatterns=TRUE, savefuns=TRUE,
+                     verbose=FALSE,
+                     envir.simul=env.here)
+  if(verbose) cat("Done.\n")
+  ## extract info
+  envX <- attr(tX, "envelope")
+  ## extract simulated patterns 
+  Ylist <- attr(envX, "simpatterns")
+  ##     SimFuns <- attr(envX, "simfuns")
+  # apply same test to each simulated pattern
+  if(verbose) cat(paste("Running tests on", nsim, "simulated patterns... "))
+  pvalY <- numeric(nsim)
+  for(i in 1:nsim) {
+    if(verbose) progressreport(i, nsim)
+    Yi <- Ylist[[i]]
+    # if X is a model, fit it to Yi. Otherwise the implicit model is CSR.
+    if(Xismodel) Yi <- update(X, Yi)
+    tYi <- envelopeTest(Yi, ...,
+                        alternative=alternative,
+                        leaveout=leaveout,
+                        interpolate = interpolate, save.interpolant = FALSE,
+                        nsim=nsimsub, nrank=nrank,
+                        exponent=Inf, savepatterns=TRUE, verbose=FALSE,
+                        envir.simul=env.here)
+    pvalY[i] <- tYi$p.value 
+  }
+  ## Find critical deviation
+  if(!interpolate) {
+    ## find critical rank 'l'
+    rankY <- pvalY * (nsimsub + 1)
+    dg.rank <- sort(rankY, na.last=TRUE)[nrank]
+    if(verbose) cat("dg.rank=", dg.rank, fill=TRUE)
+    ## extract deviation values from top-level simulation
+    simdev <- attr(tX, "statistics")[["sim"]]
+    ## find critical deviation
+    dg.crit <- sort(simdev, decreasing=TRUE, na.last=TRUE)[dg.rank]
+    if(verbose) cat("dg.crit=", dg.crit, fill=TRUE)
+  } else {
+    ## compute estimated cdf of t
+    fhat <- attr(tX, "density")[c("x", "y")]
+    fhat$z <- with(fhat, cumsum(y)/sum(y))  # 'within' upsets package checker
+    ## find critical (second stage) p-value
+    pcrit <- sort(pvalY, na.last=TRUE)[nrank]
+    ## compute corresponding upper quantile of estimated density of t
+    dg.crit <- with(fhat, { min(x[z >= 1 - pcrit]) })
+  }
+  ## make fv object, for now
+  refname <- if("theo" %in% names(envX)) "theo" else "mmean"
+  fname <- attr(envX, "fname")
+  result <- (as.fv(envX))[, c(fvnames(envX, ".x"),
+                            fvnames(envX, ".y"),
+                            refname)]
+  refval <- envX[[refname]]
+  ## 
+  newdata <- data.frame(hi=refval + dg.crit,
+                        lo=refval - dg.crit)
+  newlabl <- c(makefvlabel(NULL, NULL, fname, "hi"),
+               makefvlabel(NULL, NULL, fname, "lo"))
+  alpha <- nrank/(nsim+1)
+  alphatext <- paste0(100*alpha, "%%")
+  newdesc <- c(paste("upper", alphatext, "critical boundary for %s"),
+               paste("lower", alphatext, "critical boundary for %s"))
+  switch(alternative,
+         two.sided = { },
+         less = {
+           newdata$hi <- Inf
+           newlabl[1L] <- "infinity"
+           newdesc[1L] <- "infinite upper limit"
+         },
+         greater = {
+           newdata$lo <- -Inf
+           newlabl[2L] <- "infinity"
+           newdesc[2L] <- "infinite lower limit"
+         })
+  result <- bind.fv(result, newdata, newlabl, newdesc)
+  fvnames(result, ".") <- rev(fvnames(result, "."))
+  fvnames(result, ".s") <- c("lo", "hi")
+  if(savefuns || savepatterns)
+    result <- hasenvelope(result, envX)
+  return(result)
+}
+
diff --git a/R/datasetup.R b/R/datasetup.R
new file mode 100755
index 0000000..cc57510
--- /dev/null
+++ b/R/datasetup.R
@@ -0,0 +1,13 @@
+#
+#   When the package is installed, this tells us 
+#   the directory where the .tab files are stored
+#
+#   Typically data/murgatroyd.R reads data-raw/murgatroyd.tab
+#   and applies special processing
+#
+spatstat.rawdata.location <- function(...) {
+    locn <- system.file("data-raw", package="spatstat")
+    if(length(list(...)) != 0) 
+      locn <- paste(c(locn, ...), collapse=.Platform$file.sep)
+    return(locn)
+}
diff --git a/R/dclftest.R b/R/dclftest.R
new file mode 100644
index 0000000..84b85ae
--- /dev/null
+++ b/R/dclftest.R
@@ -0,0 +1,360 @@
+#
+#  dclftest.R
+#
+#  $Revision: 1.35 $  $Date: 2016/12/30 01:44:07 $
+#
+#  Monte Carlo tests for CSR (etc)
+#
+
+clf.test <- function(...) {
+ .Deprecated("dclf.test", package="spatstat")
+ dclf.test(...)
+}
+
+dclf.test <- function(X, ...,
+                      alternative=c("two.sided", "less", "greater"),
+                      rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, 
+                      interpolate=FALSE) {
+  Xname <- short.deparse(substitute(X))
+  envelopeTest(X, ..., exponent=2, alternative=alternative,
+                       rinterval=rinterval, leaveout=leaveout,
+                       scale=scale, clamp=clamp, interpolate=interpolate,
+                       Xname=Xname)
+}
+
+mad.test <- function(X, ...,
+                     alternative=c("two.sided", "less", "greater"),
+                     rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE,
+                     interpolate=FALSE) {
+  Xname <- short.deparse(substitute(X))
+  envelopeTest(X, ..., exponent=Inf, alternative=alternative,
+               rinterval=rinterval, leaveout=leaveout, 
+               scale=scale, clamp=clamp, interpolate=interpolate,
+               Xname=Xname)
+}
+
+## measure deviation of summary function
+## leaveout = 0: typically 'ref' is exact theoretical value
+##               Compute raw deviation.
+## leaveout = 1: 'ref' is mean of simulations *and* observed.
+##               Use algebra to compute leave-one-out deviation.
+## leaveout = 2: 'ref' is mean of simulations
+##               Use algebra to compute leave-two-out deviation.
+
+Deviation <- function(x, ref, leaveout, n, xi=x) {
+  if(leaveout == 0) return(x-ref)
+  if(leaveout == 1) return((x-ref) * (n+1)/n)
+  jackmean <- (n * ref - xi)/(n-1)
+  return(x - jackmean)
+}
+
+## Evaluate signed or absolute deviation,  
+## taking account of alternative hypothesis and possible scaling
+## (Large positive values always favorable to alternative)
+
+RelevantDeviation <- local({
+  
+  positivepart <- function(x) {
+    d <- dim(x)
+    y <- pmax(0, x)
+    if(!is.null(d)) y <- matrix(y, d[1L], d[2L])
+    return(y)
+  }
+
+  negativepart <- function(x) positivepart(-x)
+
+  RelevantDeviation <- function(x, alternative, clamp=FALSE, scaling=NULL) {
+    if(!is.null(scaling)) x <- x/scaling
+    switch(alternative,
+           two.sided = abs(x),
+           less = if(clamp) negativepart(x) else -x,
+           greater = if(clamp) positivepart(x) else x)
+  }
+
+  RelevantDeviation
+})
+
+  
+## workhorse function
+
+envelopeTest <-
+  function(X, ...,
+           exponent=1,
+           alternative=c("two.sided", "less", "greater"),
+           rinterval=NULL,
+           leaveout=1,
+           scale=NULL,
+           clamp=FALSE,
+           tie.rule=c("randomise","mean"),
+           interpolate=FALSE,
+           save.interpolant = TRUE,
+           save.envelope = savefuns || savepatterns,
+           savefuns = FALSE, 
+           savepatterns = FALSE,
+           Xname=NULL,
+           verbose=TRUE) {
+    if(is.null(Xname)) Xname <- short.deparse(substitute(X))
+    tie.rule <- match.arg(tie.rule)
+    alternative <- match.arg(alternative)
+    if(!(leaveout %in% 0:2))
+      stop("Argument leaveout should equal 0, 1 or 2")
+    force(save.envelope)
+    check.1.real(exponent)
+    explain.ifnot(exponent >= 0)
+    deviationtype <- switch(alternative,
+                            two.sided = "absolute",
+                            greater = if(clamp) "positive" else "signed",
+                            less = if(clamp) "negative" else "signed")
+    deviationblurb <- paste(deviationtype, "deviation")
+    ## compute or extract simulated functions
+    X <- envelope(X, ...,
+                  savefuns=TRUE, savepatterns=savepatterns,
+                  Yname=Xname, verbose=verbose)
+    Y <- attr(X, "simfuns")
+    ## extract values
+    r   <- with(X, .x)
+    obs <- with(X, .y)
+    sim <- as.matrix(as.data.frame(Y))[, -1L]
+    nsim <- ncol(sim)
+    nr <- length(r)
+    ## choose function as reference
+    has.theo <- ("theo" %in% names(X))
+    use.theo <- identical(attr(X, "einfo")$use.theory, TRUE)
+    if(use.theo && !has.theo)
+      warning("No theoretical function available; use.theory ignored")
+    if(use.theo && has.theo) {
+      theo.used <- TRUE
+      reference <- with(X, theo)
+      leaveout <- 0
+    } else {
+      theo.used <- FALSE
+      if(leaveout == 2) {
+        ## use sample mean of simulations only
+        reference <- apply(sim, 1L, mean, na.rm=TRUE)
+      } else {
+        ## use sample mean of simulations *and* observed 
+        reference <- apply(cbind(sim, obs), 1L, mean, na.rm=TRUE)
+      }
+    }
+    ## determine interval of r values for computation
+    rok <- r
+    if(!is.null(rinterval)) {
+      check.range(rinterval)
+      if(max(r) < rinterval[2L]) {
+        oldrinterval <- rinterval
+        rinterval <- intersect.ranges(rinterval, range(r), fatal=FALSE)
+        if(is.null(rinterval))
+          stop(paste("The specified rinterval",
+                     prange(oldrinterval),
+                     "has empty intersection",
+                     "with the range of r values",
+                     prange(range(r)), 
+                     "computed by the summary function"),
+               call.=FALSE)
+        if(verbose)
+          warning(paste("The interval", prange(oldrinterval),
+                        "is too large for the available data;",
+                        "it has been trimmed to", prange(rinterval)))
+      }
+      ok <- (rinterval[1L] <= r & r <= rinterval[2L])
+      nr <- sum(ok)
+      if(nr == 0) {
+        ## rinterval is very short: pick nearest r value
+        ok <- which.min(abs(r - mean(rinterval)))
+        nr <- 1L
+      }
+      rok <- r[ok]
+      obs <- obs[ok]
+      sim <- sim[ok, , drop=FALSE]
+      reference <- reference[ok]
+    } else {
+      rinterval <- range(r)
+      bad <- !matrowall(is.finite(as.matrix(X)))
+      if(any(bad)) {
+        if(bad[1L] && !any(bad[-1L])) {
+          ## ditch r = 0
+          rinterval <- c(r[2L], max(r))
+          if(verbose)
+            warning(paste("Some function values were infinite or NaN",
+                          "at distance r = 0;",
+                          "interval of r values was reset to",
+                          prange(rinterval)))
+          ok <- (rinterval[1L] <= r & r <= rinterval[2L])
+          rok <- r[ok]
+          obs <- obs[ok]
+          sim <- sim[ok, ]
+          reference <- reference[ok]
+          nr <- sum(ok)
+        } else {
+          ## problem
+          rbadmax <- paste(max(r[bad]), summary(unitname(X))$plural)
+          stop(paste("Some function values were infinite or NaN",
+                     "at distances r up to",
+                     paste(rbadmax, ".", sep=""),
+                     "Please specify a shorter", sQuote("rinterval")))
+        }
+      } 
+    } 
+
+    ## determine rescaling if any
+    if(is.null(scale)) {
+      scaling <- NULL
+    } else if(is.function(scale)) {
+      scaling <- scale(rok)
+      sname <- "scale(r)"
+      ans <- check.nvector(scaling, nr, things="values of r",
+                           fatal=FALSE, vname=sname)
+      if(!ans)
+        stop(attr(ans, "whinge"), call.=FALSE)
+      if(any(bad <- (scaling <= 0))) {
+        ## issue a warning unless this only happens at r=0
+        if(any(bad[rok > 0]))
+          warning(paste("Some values of", sname, "were negative or zero:",
+                        "scale was reset to 1 for these values"),
+                  call.=FALSE)
+        scaling[bad] <- 1
+      }
+    } else stop("Argument scale should be a function")
+
+    ## compute deviations
+    rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1L])
+    rawdevSim <- Deviation(sim, reference, leaveout, nsim)
+    ## evaluate signed/absolute deviation relevant to alternative
+    ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling)
+    dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling)
+
+    ## compute test statistic
+    if(is.infinite(exponent)) {
+      ## MAD
+      devdata <- max(ddat)
+      devsim <- apply(dsim, 2, max)
+      names(devdata) <- "mad"
+      testname <- paste("Maximum", deviationblurb, "test")
+      statisticblurb <- paste("Maximum", deviationblurb)
+    } else {
+      L <- if(nr > 1) diff(rinterval) else 1
+      if(exponent == 2) {
+        ## Cramer-von Mises
+        ddat2 <- if(clamp) ddat^2 else (sign(ddat) * ddat^2)
+        dsim2 <- if(clamp) dsim^2 else (sign(dsim) * dsim^2)
+        devdata <- L * mean(ddat2)
+        devsim  <- L * .colMeans(dsim2, nr, nsim)
+        names(devdata) <- "u"
+        testname <- "Diggle-Cressie-Loosmore-Ford test"
+        statisticblurb <- paste("Integral of squared", deviationblurb)
+      } else if(exponent == 1) {
+        ## integral absolute deviation
+        devdata <- L * mean(ddat)
+        devsim  <- L * .colMeans(dsim, nr, nsim)
+        names(devdata) <- "L1"
+        testname <- paste("Integral", deviationblurb, "test")
+        statisticblurb <- paste("Integral of", deviationblurb)
+      } else {
+        ## general p
+        if(clamp) {
+          ddatp <- ddat^exponent
+          dsimp <- dsim^exponent
+        } else {
+          ddatp <- sign(ddat) * (abs(ddat)^exponent)
+          dsimp <- sign(dsim) * (abs(dsim)^exponent)
+        }
+        devdata <- L * mean(ddatp)
+        devsim  <- L * .colMeans(dsimp, nr, nsim)
+        names(devdata) <- "Lp"
+        testname <- paste("Integrated",
+                          ordinal(exponent), "Power Deviation test")
+        statisticblurb <- paste("Integral of",
+                                ordinal(exponent), "power of",
+                                deviationblurb)
+      }
+    }
+    if(!interpolate) {
+      ## standard Monte Carlo test 
+      ## compute rank and p-value
+      datarank <- sum(devdata < devsim) + 1
+      nties <- sum(devdata == devsim)
+      if(nties > 0) {
+        tierank <- switch(tie.rule,
+                          mean = nties/2,
+                          randomise = sample(1:nties, 1L))
+        datarank <- datarank + tierank
+        if(verbose) message("Ties were encountered")
+      }
+      pvalue <- datarank/(nsim+1)
+      ## bookkeeping
+      statistic <- data.frame(devdata, rank=datarank)
+      colnames(statistic)[1L] <- names(devdata)
+    } else {
+      ## Dao-Genton style interpolation
+      fhat <- density(devsim)
+      pvalue <- with(fhat, {
+        if(max(x) <= devdata) 0 else
+        mean(y[x >= devdata]) * (max(x) - devdata)
+      })
+      statistic <- data.frame(devdata)
+      colnames(statistic)[1L] <- names(devdata)
+      nties <- 0
+    }
+    e <- attr(X, "einfo")
+    nullmodel <-
+      if(identical(e$csr, TRUE)) "CSR" else 
+    if(!is.null(e$simtype)) {
+      switch(e$simtype,
+             csr = "CSR",
+             rmh = paste("fitted",
+               if(identical(e$pois, TRUE)) "Poisson" else "Gibbs",
+               "model"),
+             kppm = "fitted cluster model",
+             expr = "model simulated by evaluating expression",
+             list = "model simulated by drawing patterns from a list",
+             "unrecognised model")
+    } else "unrecognised model"
+    fname <- deparse(attr(X, "ylab"))
+    uname <- with(summary(unitname(X)),
+                  if(!vanilla) paste(plural, explain) else NULL)
+    testtype <- paste0(if(interpolate) "Interpolated " else NULL,
+                       "Monte Carlo")
+    scaleblurb <- if(is.null(scale)) NULL else
+                  paste("Scale function:", paste(deparse(scale), collapse=" "))
+    refblurb <- if(theo.used) "theoretical" else "sample mean"
+    leaveblurb <- if(leaveout == 0) paste("observed minus", refblurb) else
+                  if(leaveout == 1) "leave-one-out" else "leave-two-out"
+    testname <- c(paste(testname, "of", nullmodel),
+                  paste(testtype, "test based on", nsim,
+                        "simulations", e$constraints), 
+                  paste("Summary function:", fname),
+                  paste("Reference function:", refblurb),
+                  paste("Alternative:", alternative),
+                  paste("Interval of distance values:",
+                        prange(rinterval), uname),
+                  scaleblurb,
+                  paste("Test statistic:", statisticblurb),
+                  paste("Deviation =", leaveblurb)
+                  )
+    result <- structure(list(statistic = statistic,
+                             p.value = pvalue,
+                             method = testname,
+                             data.name = e$Yname),
+                        class="htest")
+    attr(result, "rinterval") <- rinterval
+    if(save.interpolant && interpolate)
+      attr(result, "density") <- fhat
+    if(save.envelope) {
+      result <- hasenvelope(result, X)
+      attr(result, "statistics") <- list(data=devdata, sim=devsim)
+      attr(result, "info") <- list(exponent=exponent,
+                                   alternative=alternative,
+                                   nties=nties,
+                                   leaveout=leaveout,
+                                   interpolate=interpolate,
+                                   scale=scale, clamp=clamp,
+                                   tie.rule=tie.rule,
+                                   use.theo=use.theo)
+    }
+    return(result)
+  }
+
+
+
+    
+   
diff --git a/R/defaultwin.R b/R/defaultwin.R
new file mode 100755
index 0000000..f776a16
--- /dev/null
+++ b/R/defaultwin.R
@@ -0,0 +1,54 @@
+#
+#
+#  defaultwin.R
+#
+#   $Revision: 1.10 $   $Date: 2015/10/21 09:06:57 $
+#
+
+default.expand <- function(object, m=2, epsilon=1e-6, w=Window(object)) {
+  stopifnot(is.ppm(object) || inherits(object, "rmhmodel"))
+  # no expansion necessary if model is Poisson
+  if(is.poisson(object))
+    return(.no.expansion)
+  # default is no expansion if model is nonstationary
+  if(!is.stationary(object))
+    return(.no.expansion)
+  
+# Redundant since a non-expandable model is non-stationary
+#  if(!is.expandable(object))
+#    return(.no.expansion)
+  
+  # rule is to expand data window by distance d = m * reach
+  rr <- reach(object, epsilon=epsilon)
+  if(!is.finite(rr))
+    return(rmhexpand())
+  if(!is.numeric(m) || length(m) != 1 || m < 1)
+    stop("m should be a single number >= 1")
+  mr <- m * rr
+  rule <- rmhexpand(distance = mr)
+  # 
+  if(is.owin(w)) {
+    # apply rule to window
+    wplus <- expand.owin(w, rule)
+    # save as new expansion rule
+    rule <- rmhexpand(wplus)
+  }
+  return(rule)
+}
+
+default.clipwindow <- function(object, epsilon=1e-6) {
+  stopifnot(is.ppm(object) || inherits(object, "rmhmodel"))
+  # data window
+  w <- as.owin(object)
+  if(is.null(w)) return(NULL)
+  # interaction range of model
+  rr <- reach(object, epsilon=epsilon)
+  if(!is.finite(rr))
+    return(NULL)
+  if(rr == 0)
+    return(w)
+  else
+    return(erosion(w, rr))
+}
+
+  
diff --git a/R/deldir.R b/R/deldir.R
new file mode 100755
index 0000000..abeb5f1
--- /dev/null
+++ b/R/deldir.R
@@ -0,0 +1,365 @@
+#
+# deldir.R
+#
+# Interface to deldir package
+#
+#  $Revision: 1.28 $ $Date: 2017/06/05 10:31:58 $
+#
+
+.spst.triEnv <- new.env()
+
+assign("use.trigraf",  TRUE, envir=.spst.triEnv)
+assign("use.trigrafS", TRUE, envir=.spst.triEnv)
+assign("debug.delaunay", FALSE, envir=.spst.triEnv)
+
+dirichlet <- local({
+
+  dirichlet <- function(X) {
+    stopifnot(is.ppp(X))
+    X <- unique(X, rule="deldir", warn=TRUE)
+    w <- X$window
+    dd <- safedeldir(X)
+    if(is.null(dd)) return(NULL)
+    pp <- lapply(tile.list(dd), df2poly)
+    if(length(pp) == npoints(X))
+      names(pp) <- seq_len(npoints(X))
+    dir <- tess(tiles=pp, window=as.rectangle(w))
+    if(w$type != "rectangle")
+      dir <- intersect.tess(dir, w)
+    return(dir)
+  }
+
+  df2poly <- function(z) { owin(poly=z[c("x","y")]) }
+  
+  dirichlet
+})
+
+delaunay <- function(X) {
+  stopifnot(is.ppp(X))
+  X <- unique(X, rule="deldir", warn=TRUE)
+  nX <- npoints(X)
+  if(nX < 3) return(NULL)
+  w <- X$window
+  dd <- safedeldir(X)
+  if(is.null(dd)) return(NULL)
+  a <- dd$delsgs[,5L]
+  b <- dd$delsgs[,6L]
+  use.trigraf  <- get("use.trigraf", envir=.spst.triEnv)
+  use.trigrafS <- get("use.trigrafS", envir=.spst.triEnv)
+  debug.delaunay <- get("debug.delaunay", envir=.spst.triEnv)
+  if(use.trigrafS) {
+    # first ensure a[] < b[]
+    swap <- (a > b)
+    if(any(swap)) {
+      oldb <- b
+      b[swap] <- a[swap]
+      a[swap] <- oldb[swap]
+    }
+    # next ensure a is sorted
+    o <- order(a, b)
+    a <- a[o]
+    b <- b[o]
+    # 
+    nv <- nX
+    ne <- length(a)
+    ntmax <- ne
+    z <- .C("trigrafS",
+            nv = as.integer(nv),
+            ne = as.integer(ne),
+            ie = as.integer(a),
+            je = as.integer(b),
+            ntmax = as.integer(ntmax),
+            nt = as.integer(integer(1L)),
+            it = as.integer(integer(ne)),
+            jt = as.integer(integer(ne)),
+            kt = as.integer(integer(ne)),
+            status = as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+    if(z$status != 0)
+      stop("Internal error: overflow in trigrafS")
+    tlist <- with(z, cbind(it, jt, kt)[1:nt, ])
+  } else if(use.trigraf) {
+    nv <- nX
+    ne <- length(a)
+    ntmax <- ne
+    z <- .C("trigraf",
+            nv = as.integer(nv),
+            ne = as.integer(ne),
+            ie = as.integer(a),
+            je = as.integer(b),
+            ntmax = as.integer(ntmax),
+            nt = as.integer(integer(1L)),
+            it = as.integer(integer(ntmax)),
+            jt = as.integer(integer(ntmax)),
+            kt = as.integer(integer(ntmax)),
+            status = as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+    if(z$status != 0)
+      stop("Internal error: overflow in trigraf")
+    tlist <- with(z, cbind(it, jt, kt)[1:nt, ])
+  } else {
+    tlist <- matrix(integer(0), 0, 3)
+    for(i in seq_len(nX)) {
+      # find all Delaunay neighbours of i 
+      jj <- c(b[a==i], a[b==i])
+      jj <- sort(unique(jj))
+      # select those with a higher index than i
+      jj <- jj[jj > i]
+      # find pairs of neighbours which are Delaunay neighbours
+      # (thus, triangles where the first numbered vertex is i)
+      if(length(jj) > 0) 
+        for(j in jj) {
+          kk <- c(b[a == j], a[b == j])
+          kk <- kk[(kk %in% jj) & (kk > j)]
+          if(length(kk) > 0)
+            for(k in kk) 
+              # add (i,j,k) to list of triangles (i < j < k)
+              tlist <- rbind(tlist, c(i, j, k))
+        }
+    }
+  }
+  # At this point, `tlist' contains all triangles formed by the Delaunay edges,
+  # with vertices given in ascending order i < j < k in the 3 columns of tlist.
+  # Some of these triangles may not belong to the Delaunay triangulation.
+  # They will be weeded out later.
+  
+  # Assemble coordinates of triangles
+  x <- X$x
+  y <- X$y
+  xtri <- matrix(x[tlist], nrow(tlist), 3L)
+  ytri <- matrix(y[tlist], nrow(tlist), 3L)
+  # ensure triangle vertices are in anticlockwise order
+  ztri <- ytri - min(y)
+  dx <- cbind(xtri[,2L]-xtri[,1L], xtri[,3L]-xtri[,2L], xtri[,1L]-xtri[,3L])
+  zm <- cbind(ztri[,1L]+ztri[,2L], ztri[,2L]+ztri[,3L], ztri[,3L]+ztri[,1L])
+  negareas <- apply(dx * zm, 1L, sum)
+  clockwise <- (negareas > 0)
+  #
+  if(any(clockwise)) {
+    xc <- xtri[clockwise, , drop=FALSE]
+    yc <- ytri[clockwise, , drop=FALSE]
+    tc <- tlist[clockwise, , drop=FALSE]
+    xtri[clockwise,]  <- xc[,c(1L,3L,2L)]
+    ytri[clockwise,]  <- yc[,c(1L,3L,2L)]
+    tlist[clockwise,] <- tc[, c(1L,3L,2L)]
+  }
+  # At this point, triangle vertices are listed in anticlockwise order.
+  # The same directed edge (i, j) cannot appear twice.
+  # To weed out invalid triangles, check for such duplication
+  triedges <- rbind(tlist[, c(1L,2L)],
+                    tlist[, c(2L,3L)],
+                    tlist[, c(3L,1L)])
+  if(any(bad <- duplicated(triedges))) {
+    badedges <- unique(triedges[bad, , drop=FALSE])
+    ntri <- nrow(tlist)
+    triid <- rep.int(seq_len(ntri), 3)
+    illegal <- rep.int(FALSE, ntri)
+    for(j in seq_len(nrow(badedges))) {
+      from <- badedges[j, 1L]
+      to   <- badedges[j, 2L]
+      if(debug.delaunay)
+        cat(paste("Suspect edge from vertex", from, "to vertex", to, "\n"))
+      # find all triangles sharing this edge in this orientation
+      sustri <- triid[(triedges[,1L] == from) & (triedges[,2L] == to)]
+      if(debug.delaunay)
+        cat(paste("\tInvestigating triangles", commasep(sustri), "\n"))
+      # list all vertices associated with the suspect triangles
+      susvert <- sort(unique(as.vector(tlist[sustri, ])))
+      if(debug.delaunay)
+        cat(paste("\tInvestigating vertices", commasep(susvert), "\n"))
+      xsusvert <- x[susvert]
+      ysusvert <- y[susvert]
+      # take each triangle in turn and check whether it contains a data point
+      for(k in sustri) {
+        if(!illegal[k] &&
+           any(inside.triangle(xsusvert, ysusvert, xtri[k,], ytri[k,]))) {
+          if(debug.delaunay)
+            cat(paste("Triangle", k, "is illegal\n"))
+          illegal[k] <- TRUE
+        }
+      }
+    }
+    if(!any(illegal)) {
+      if(debug.delaunay)
+        cat("No illegal triangles found\n")
+    } else {
+      if(debug.delaunay)
+        cat(paste("Removing", sum(illegal), "triangles\n"))
+      tlist <- tlist[!illegal, , drop=FALSE]
+      xtri  <- xtri[!illegal, , drop=FALSE]
+      ytri  <- ytri[!illegal, , drop=FALSE]
+    }
+  }
+  # make tile list
+  tiles <- list()
+  for(m in seq_len(nrow(tlist))) {
+    p <- list(x=xtri[m,], y=ytri[m,])
+    tiles[[m]] <- owin(poly=p, check=FALSE)
+  }
+
+  wc <- convexhull.xy(x, y)
+  del <- tess(tiles=tiles, window=wc)
+  if(w$type != "rectangle")
+    del <- intersect.tess(del, w)
+  return(del)
+}
+
+delaunayDistance <- function(X) {
+  stopifnot(is.ppp(X))
+  nX <- npoints(X)
+  w <- as.owin(X)
+  ok <- !duplicated(X, rule="deldir")
+  Y <- X[ok] 
+  nY <- npoints(Y)
+  if(nY < 3) 
+    return(matrix(Inf, nX, nX))
+  dd <- deldir(Y$x, Y$y, rw=c(w$xrange,w$yrange))
+  if(is.null(dd)) return(NULL)
+  joins <- as.matrix(dd$delsgs[,5:6])
+  joins <- rbind(joins, joins[,2:1])
+  d <- matrix(-1L, nY, nY)
+  diag(d) <- 0
+  d[joins] <- 1
+  adj <- matrix(FALSE, nY, nY)
+  diag(adj) <- TRUE
+  adj[joins] <- TRUE
+  z <- .C("Idist2dpath",
+          nv = as.integer(nY),
+          d = as.integer(d), 
+          adj = as.integer(adj),
+          dpath = as.integer(integer(nY * nY)),
+          tol = as.integer(0),
+          niter = as.integer(integer(1L)), 
+          status = as.integer(integer(1L)),
+          PACKAGE = "spatstat")
+  if (z$status == -1L)
+    warning(paste("graph connectivity algorithm did not converge after", 
+                  z$niter, "iterations", "on", nY, "vertices and", 
+                  sum(adj) - nY, "edges"))
+  dpathY <- matrix(z$dpath, nY, nY)
+  if(all(ok)) {
+    dpathX <- dpathY
+  } else {
+    dpathX <- matrix(NA_integer_, nX, nX)
+    dpathX[ok, ok] <- dpathY
+  }
+  return(dpathX)
+}
+
+safedeldir <- function(X) {
+  rw <- with(X$window, c(xrange,yrange))
+  dd <- try(deldir(X$x, X$y, rw=rw))
+  if(!inherits(dd, "try-error") && inherits(dd, "deldir"))
+    return(dd)
+  warning("deldir failed; re-trying with slight perturbation of coordinates.",
+          call.=FALSE)
+  Y <- rjitter(X, mean(nndist(X))/100)
+  dd <- try(deldir(Y$x, Y$y, rw=rw))
+  if(!inherits(dd, "try-error") && inherits(dd, "deldir"))
+    return(dd)
+  warning("deldir failed even after perturbation of coordinates.", call.=FALSE)
+  return(NULL)
+}
+
+dirichletVertices <- function(X) {
+  DT <- tiles(dirichlet(X))
+  xy <- do.call(concatxy, lapply(DT, vertices))
+  Y <- unique(ppp(xy$x, xy$y, window=Window(X), check=FALSE))
+  b <- bdist.points(Y)
+  thresh <- diameter(Frame(X))/1000
+  Y <- Y[b > thresh]
+  return(Y)
+}
+
+dirichletAreas <- function(X) {
+  stopifnot(is.ppp(X))
+  X <- unmark(X)
+  win <- Window(X)
+  dup <- duplicated(X, rule="deldir")
+  if((anydup <- any(dup))) {
+    oldX <- X
+    X <- X[!dup]
+  }
+  switch(win$type,
+         rectangle = {
+           rw <- c(win$xrange, win$yrange)
+           dd <- deldir(X$x, X$y, dpl=NULL, rw=rw)
+           w <- dd$summary[, 'dir.area']
+         },
+         polygonal = {
+           w <- tile.areas(dirichlet(X))
+         },
+         mask = {
+           #' Nearest data point to each pixel:
+           tileid <- exactdt(X)$i
+           #' Restrict to window (result is a vector - OK)
+           tileid <- tileid[win$m]
+           #' Count pixels in each tile
+           id <- factor(tileid, levels=seq_len(X$n))
+           counts <- table(id)
+           #' Convert to digital area
+           pixelarea <- win$xstep * win$ystep
+           w <- pixelarea * as.numeric(counts)
+         })
+  if(!anydup)
+    return(w)
+  oldw <- numeric(npoints(oldX))
+  oldw[!dup] <- w
+  return(oldw)
+}
+
+delaunayNetwork <- function(X) {
+  stopifnot(is.ppp(X))
+  X <- unique(X, rule="deldir")
+  nX <- npoints(X)
+  if(nX == 0) return(NULL)
+  if(nX == 1L) return(linnet(X, !diag(TRUE)))
+  if(nX == 2L) return(linnet(X, !diag(c(TRUE,TRUE))))
+  dd <- safedeldir(X)
+  if(is.null(dd)) 
+    return(NULL)
+  joins <- as.matrix(dd$delsgs[, 5:6])
+  return(linnet(X, edges=joins))
+}
+
+dirichletEdges <- function(X) {
+  stopifnot(is.ppp(X))
+  X <- unique(X, rule="deldir")
+  nX <- npoints(X)
+  W <- Window(X)
+  if(nX < 2)
+    return(edges(W))
+  dd <- safedeldir(X)
+  if(is.null(dd))
+    return(edges(W))
+  return(as.psp(dd$dirsgs[,1:4], window=W))
+}
+
+dirichletNetwork <- function(X, ...) as.linnet(dirichletEdges(X), ...)
+
+## deprecated older names
+
+delaunay.distance <- function(...) {
+  .Deprecated("delaunayDistance", package="spatstat")
+  delaunayDistance(...)
+}
+
+delaunay.network <- function(...) {
+  .Deprecated("delaunayNetwork", package="spatstat")
+  delaunayNetwork(...)
+}
+
+dirichlet.edges <- function(...) {
+  .Deprecated("dirichletEdges", package="spatstat")
+  dirichletEdges(...)
+}
+
+dirichlet.network <- function(...) {
+  .Deprecated("dirichletNetwork", package="spatstat")
+  dirichletNetwork(...)
+}
+
+dirichlet.vertices <- function(...) {
+  .Deprecated("dirichletVertices", package="spatstat")
+  dirichletVertices(...)
+}
diff --git a/R/deltametric.R b/R/deltametric.R
new file mode 100755
index 0000000..5171b02
--- /dev/null
+++ b/R/deltametric.R
@@ -0,0 +1,39 @@
+#
+#   deltametric.R
+#
+#   Delta metric
+#
+#   $Revision: 1.4 $  $Date: 2014/10/24 00:22:30 $
+#
+
+deltametric <- function(A, B, p=2, c=Inf, ...) {
+  stopifnot(is.numeric(p) && length(p) == 1L && p > 0)
+  # ensure frames are identical
+  bb <- boundingbox(as.rectangle(A), as.rectangle(B))
+  # enforce identical frames
+  A <- rebound(A, bb)
+  B <- rebound(B, bb)
+  # compute distance functions
+  dA <- distmap(A, ...)
+  dB <- distmap(B, ...)
+  if(!is.infinite(c)) {
+    dA <- eval.im(pmin.int(dA, c))
+    dB <- eval.im(pmin.int(dB, c))
+  }
+  if(is.infinite(p)) {
+    # L^infinity
+    Z <- eval.im(abs(dA-dB))
+    delta <- summary(Z)$max
+  } else {
+    # L^p
+    Z <- eval.im(abs(dA-dB)^p)
+    iZ <- summary(Z)$mean
+    delta <- iZ^(1/p)
+  }
+  return(delta)
+}
+
+
+
+
+
diff --git a/R/density.lpp.R b/R/density.lpp.R
new file mode 100644
index 0000000..34e4a04
--- /dev/null
+++ b/R/density.lpp.R
@@ -0,0 +1,275 @@
+#'
+#'    density.lpp.R
+#'
+#'    Method for 'density' for lpp objects
+#'
+#'    Copyright (C) 2017 Greg McSwiggan and Adrian Baddeley
+#'
+
+density.lpp <- function(x, sigma, ...,
+                        weights=NULL,
+                        kernel="gaussian", 
+                        continuous=TRUE,
+                        epsilon=1e-6,
+                        verbose=TRUE, debug=FALSE, savehistory=TRUE,
+                        old=FALSE) {
+  stopifnot(inherits(x, "lpp"))
+  kernel <- match.kernel(kernel)
+
+  if(continuous && (kernel == "gaussian") && !old)
+     return(PDEdensityLPP(x, sigma, ..., weights=weights))
+
+  L <- as.linnet(x)
+  # weights
+  np <- npoints(x)
+  if(is.null(weights)) {
+    weights <- rep(1, np)
+  } else {
+    stopifnot(is.numeric(weights))
+    check.nvector(weights, np, oneok=TRUE)
+    if(length(weights) == 1L) weights <- rep(weights, np) 
+  }
+  # pixellate linear network
+  Llines <- as.psp(L)
+  linemask <- as.mask.psp(Llines, ...)
+  lineimage <- as.im(linemask)
+  # extract pixel centres
+  xx <- raster.x(linemask)
+  yy <- raster.y(linemask)
+  mm <- linemask$m
+  xx <- as.vector(xx[mm])
+  yy <- as.vector(yy[mm])
+  pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE)
+  pixdf <- data.frame(xc=xx, yc=yy)
+  # project pixel centres onto lines
+  p2s <- project2segment(pixelcentres, Llines)
+  projloc <- as.data.frame(p2s$Xproj)
+  projmap <- as.data.frame(p2s[c("mapXY", "tp")])
+  projdata <- cbind(pixdf, projloc, projmap)
+  # initialise pixel values
+  values <- rep(0, nrow(pixdf))
+  # Extract local coordinates of data
+  n <- npoints(x)
+  coo <- coords(x)
+  seg <- coo$seg
+  tp  <- coo$tp
+  # lengths of network segments
+  Llengths <- lengths.psp(Llines)
+  # initialise stack
+  stack <- data.frame(seg=integer(0), from=logical(0), 
+                  distance=numeric(0), weight=numeric(0))
+  # process each data point
+  for(i in seq_len(n)) {
+    segi <- seg[i]
+    tpi  <- tp[i]
+    len <- Llengths[segi]
+    # evaluate kernel on segment containing x[i]
+    relevant <- (projmap$mapXY == segi)
+    values[relevant] <- values[relevant] +
+      dkernel(len * (projmap$tp[relevant] - tpi),
+              kernel=kernel, sd=sigma)
+    # push the two tails onto the stack
+    stack <- rbind(data.frame(seg = c(segi, segi),
+                              from  = c(TRUE, FALSE), 
+                              distance = len * c(tpi, 1-tpi),
+                              weight = rep(weights[i], 2L)),
+                   stack)
+  }
+  Lfrom <- L$from
+  Lto   <- L$to
+  if(verbose)
+    niter <- 0
+  if(savehistory)
+    history <- data.frame(iter=integer(0), qlen=integer(0),
+                          totmass=numeric(0), maxmass=numeric(0))
+  # process the stack
+  while(nrow(stack) > 0) {
+    if(debug) print(stack)
+    masses <- with(stack, abs(weight) * pkernel(distance,
+                                                kernel=kernel,
+                                                sd=sigma,
+                                                lower.tail=FALSE))
+    totmass <- sum(masses)
+    maxmass <- max(masses)
+    if(savehistory)
+      history <- rbind(history,
+                       data.frame(iter=nrow(history)+1L,
+                                  qlen=nrow(stack),
+                                  totmass=totmass,
+                                  maxmass=maxmass))
+    if(verbose) {
+      niter <- niter + 1L
+      cat(paste("Iteration", niter, "\tStack length", nrow(stack), "\n"))
+      cat(paste("Total stack mass", totmass, "\tMaximum", maxmass, "\n"))
+    }
+    # trim
+    tiny <- (masses < epsilon)
+    if(any(tiny)) {
+      if(verbose) {
+        ntiny <- sum(tiny)
+        cat(paste("Removing", ntiny,
+                  "tiny", ngettext(ntiny, "tail", "tails"), "\n"))
+      }
+      stack <- stack[!tiny, ]
+    }
+    if(nrow(stack) == 0)
+      break;
+    # pop the top of the stack
+    H  <- stack[1L, , drop=FALSE]
+    stack <- stack[-1L, , drop=FALSE]
+    # segment and vertex
+    Hseg <- H$seg
+    Hvert <- if(H$from) Lfrom[Hseg] else Lto[Hseg]
+    Hdist <- H$distance
+    # find all segments incident to this vertex
+    incident <- which((Lfrom == Hvert) | (Lto == Hvert))
+    degree <- length(incident)
+    # exclude reflecting paths?
+    if(!continuous)
+      incident <- setdiff(incident, Hseg)
+    for(J in incident) {
+      lenJ <- Llengths[J]
+      # determine whether Hvert is the 'to' or 'from' endpoint of segment J
+      H.is.from <- (Lfrom[J] == Hvert)
+      # update weight
+      if(continuous) {
+        Jweight <- H$weight * (2/degree - (J == Hseg))
+      } else {
+        Jweight <- H$weight/(degree-1)
+      }
+      # increment density on segment
+      relevant <- (projmap$mapXY == J)
+      tp.rel <- projmap$tp[relevant]
+      d.rel <- lenJ * (if(H.is.from) tp.rel else (1 - tp.rel))
+      values[relevant] <- values[relevant] +
+        Jweight * dkernel(d.rel + Hdist, kernel=kernel, sd=sigma)
+      # push other end of segment onto stack
+      stack <- rbind(data.frame(seg = J,
+                                from  = !(H.is.from),
+                                distance = lenJ + Hdist,
+                                weight = Jweight),
+                     stack)
+    }
+  }
+  # attach values to nearest pixels
+  Z <- lineimage
+  Z[pixelcentres] <- values
+  # attach exact line position data
+  df <- cbind(projdata, values)
+  out <- linim(L, Z, df=df)
+  if(savehistory)
+    attr(out, "history") <- history
+  return(out)
+}
+
+density.splitppx <- function(x, sigma, ...) {
+  if(!all(sapply(x, is.lpp)))
+    stop("Only implemented for patterns on a linear network")
+  solapply(x, density.lpp, sigma=sigma, ...)
+}
+
+PDEdensityLPP <- function(x, sigma, ..., weights=NULL, 
+                          dx=NULL, dt=NULL, fun=FALSE) {
+  stopifnot(is.lpp(x))
+  L <- as.linnet(x)
+  check.1.real(sigma)
+  check.finite(sigma)
+  if(!is.null(weights)) 
+    check.nvector(weights, npoints(x))
+  if(is.null(dx)) {
+    #' segment lengths
+    lenths <- lengths.psp(as.psp(L))
+    lbar <- mean(lenths)
+    ltot <- sum(lenths)
+    #' specify 30 steps per segment, on average
+    dx <- lbar/30
+    D <- ceiling(ltot/dx)
+    dx <- ltot/D
+  } 
+  verdeg <- vertexdegree(L)
+  amb <- max(verdeg[L$from] + verdeg[L$to])
+  dtmax <- min(0.95 * (dx^2)/amb, sigma^2/(2 * 10), sigma * dx/6)
+  if(is.null(dt)) {
+    dt <- dtmax
+  } else if(dt > dtmax) {
+    stop(paste("dt is too large: maximum value", dtmax),
+         call.=FALSE)
+  }
+  a <- FDMKERNEL(lppobj=x, sigma=sigma, dtx=dx, dtt=dt,
+                 weights=weights,
+                 iterMax=1e6, sparse=TRUE)
+  result <- a$kernel_fun
+  if(!fun) result <- as.linim(result)
+  attr(result, "sigma") <- sigma
+  attr(result, "dx") <- a$deltax
+  attr(result, "dt") <- a$deltat
+  return(result)
+}
+
+# Greg's code 
+FDMKERNEL <- function(lppobj, sigma, dtt, weights=NULL, iterMax=5000, 
+	              sparse=FALSE, dtx) {
+  net2 <- as.linnet(lppobj)
+#  ends1 <- net2$lines$ends
+  lenfs <- lengths.psp(as.psp(net2))
+  seg_in_lengths <- pmax(1, round(lenfs/dtx))
+  new_lpp <- lixellate(lppobj, nsplit=seg_in_lengths)
+  net_nodes <- as.linnet(new_lpp)
+  vvv <- as.data.frame(vertices(net_nodes)) 
+  vertco_new <- vvv[, c("x", "y")]
+  vertseg_new <- vvv$segcoarse # marks
+  verttp_new <- vvv$tpcoarse   # marks
+  if(npoints(lppobj) == 0) {
+    U0 <- numeric(npoints(net_nodes$vertices))
+  } else {
+    tp1 <- as.numeric(new_lpp$data$tp)
+    tp2 <- as.vector(rbind(1 - tp1, tp1))
+    newseg <- as.integer(new_lpp$data$seg)
+    vert_init_events1 <- as.vector(rbind(net_nodes$from[newseg],
+                                         net_nodes$to[newseg]))
+    highest_vert <- npoints(net_nodes$vertices)
+    vert_numbers <- seq_len(highest_vert)
+    ff <- factor(vert_init_events1, levels=vert_numbers)
+    ww <- if(is.null(weights)) tp2 else (rep(weights, each=2) * tp2)
+    ww <- ww/dtx
+    U0 <- tapply(ww, ff, sum)
+    U0[is.na(U0)] <- 0
+  } 
+  M <- round((sigma^2)/(2*dtt))
+  if(M < 10) stop("No of time iterations must be > 10, decrease dtt")
+  if(M > iterMax)
+    stop("No of time iterations exceeds iterMax; increase dtt or increase iterMax")
+
+  alpha <- dtt/(dtx^2)
+
+  A1 <- net_nodes$m *1
+#  ml <- nrow(net_nodes$m)
+
+  degree <- colSums(A1)
+  dmax <- max(degree)
+
+  A2 <- A1 * alpha
+  diag(A2) <- 1 - alpha * degree
+  
+  if(1 - dmax*alpha < 0)
+     stop("alpha must satisfy (1 - HIGHEST VERTEX DEGREE * ALPHA) > 0; decrease dtt or decrease D")
+
+  if(npoints(lppobj) > 0) {
+    v <- as.numeric(U0)
+    for(j in 1:M)
+      v <- A2 %*% v
+    finalU <- as.numeric(v)
+  } else finalU <- U0
+  vert_new <- cbind(vertco_new, vertseg_new, verttp_new)
+  colnames(vert_new) <- c("x", "y", "seg", "tp")
+  Nodes <- lpp(vert_new, net2, check=FALSE)
+  nodemap <- nnfun(Nodes)
+  interpUxyst <- function(x, y, seg, tp) {
+    finalU[nodemap(x,y,seg,tp)]
+  }
+  interpU <- linfun(interpUxyst, net2)
+  out <- list(kernel_fun   = interpU,
+              deltax       = dtx,
+              deltat       = dtt)
+  return(out)
+}
diff --git a/R/density.ppp.R b/R/density.ppp.R
new file mode 100755
index 0000000..4fdbcff
--- /dev/null
+++ b/R/density.ppp.R
@@ -0,0 +1,833 @@
+#
+#  density.ppp.R
+#
+#  Method for 'density' for point patterns
+#
+#  $Revision: 1.86 $    $Date: 2017/06/05 10:31:58 $
+#
+
+ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) {
+  .Deprecated("density.ppp", package="spatstat")
+  density.ppp(x, sigma, ..., edge=edge)
+}
+
+density.ppp <- local({
+  
+density.ppp <- function(x, sigma=NULL, ...,
+                        weights=NULL, edge=TRUE, varcov=NULL,
+                        at="pixels", leaveoneout=TRUE,
+                        adjust=1, diggle=FALSE, se=FALSE, 
+                        kernel="gaussian",
+                        scalekernel=is.character(kernel),
+                        positive=FALSE, verbose=TRUE) {
+  verifyclass(x, "ppp")
+
+  output <- pickoption("output location type", at,
+                       c(pixels="pixels",
+                         points="points"))
+
+  if(!identical(kernel, "gaussian")) {
+    validate2Dkernel(kernel)
+    ## kernel is only partly implemented!
+    if(se)
+      stop("Standard errors are not implemented for non-Gaussian kernel")
+    if(verbose && (is.function(sigma) || (is.null(sigma) && is.null(varcov))))
+      warning("Bandwidth selection will be based on Gaussian kernel")
+  }
+  
+  ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust)
+  sigma <- ker$sigma
+  varcov <- ker$varcov
+
+  if(is.im(weights)) {
+    weights <- safelookup(weights, x) # includes warning if NA
+  } else if(is.expression(weights)) 
+    weights <- eval(weights, envir=as.data.frame(x), enclos=parent.frame())
+  if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0))
+    weights <- NULL 
+
+  if(se) {
+    # compute standard error
+    SE <- denspppSEcalc(x, sigma=sigma, varcov=varcov,
+                        ...,
+                        weights=weights, edge=edge, at=output,
+                        leaveoneout=leaveoneout, adjust=adjust,
+                        diggle=diggle)
+    if(positive) SE <- posify(SE)
+  }
+                         
+  if(output == "points") {
+    # VALUES AT DATA POINTS ONLY
+    result <- densitypointsEngine(x, sigma,
+                                  varcov=varcov,
+                                  kernel=kernel,
+                                  scalekernel=scalekernel,
+                                  weights=weights, edge=edge,
+                                  leaveoneout=leaveoneout,
+                                  diggle=diggle, ...)
+    if(verbose && !is.null(uhoh <- attr(result, "warnings"))) {
+      switch(uhoh,
+             underflow=warning("underflow due to very small bandwidth"),
+             warning(uhoh))
+    }
+    ## constrain values to be positive
+    if(positive) 
+      result <- posify(result)
+    if(se) 
+      result <- list(estimate=result, SE=SE)
+    return(result)
+  }
+  
+  # VALUES AT PIXELS
+  if(!edge) {
+    # no edge correction
+    edg <- NULL
+    raw <- second.moment.calc(x, sigma, what="smooth", ...,
+                              kernel=kernel,
+                              scalekernel=scalekernel,
+                              weights=weights, varcov=varcov)
+    raw <- divide.by.pixelarea(raw) 
+    smo <- raw
+  } else if(!diggle) {
+    # edge correction e(u)
+    both <- second.moment.calc(x, sigma, what="smoothedge", ...,
+                               kernel=kernel,
+                               scalekernel=scalekernel,
+                               weights=weights, varcov=varcov)
+    raw <- divide.by.pixelarea(both$smooth)
+    edg <- both$edge
+    smo <- if(is.im(raw)) eval.im(raw/edg) else
+           lapply(raw, divideimage, denom=edg)
+  } else {
+    # edge correction e(x_i)
+    edg <- second.moment.calc(x, sigma, what="edge", ...,
+                              scalekernel=scalekernel,
+                              kernel=kernel, varcov=varcov)
+    wi <- 1/safelookup(edg, x, warn=FALSE)
+    wi[!is.finite(wi)] <- 0
+    # edge correction becomes weight attached to points
+    if(is.null(weights)) {
+      newweights <- wi
+    } else if(is.matrix(weights) || is.data.frame(weights)) {
+      stopifnot(nrow(weights) == npoints(x))
+      newweights <- weights * wi
+    } else {
+      stopifnot(length(weights) == npoints(x))
+      newweights <- weights * wi
+    }
+    raw <- second.moment.calc(x, sigma, what="smooth", ...,
+                              kernel=kernel,
+                              scalekernel=scalekernel,
+                              weights=newweights, varcov=varcov)
+    raw <- divide.by.pixelarea(raw)
+    smo <- raw
+  }
+
+  result <- if(is.im(smo)) smo[x$window, drop=FALSE]
+            else solapply(smo, "[", i=x$window, drop=FALSE)
+
+  # internal use only
+  spill <- resolve.1.default(list(spill=FALSE), list(...))
+  if(spill)
+    return(list(result=result, sigma=sigma, varcov=varcov, raw = raw, edg=edg))
+
+  # constrain values to be positive
+  if(positive) 
+    result <- posify(result)
+
+  # normal return
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  attr(result, "kernel") <- kernel
+  if(se)
+    result <- list(estimate=result, SE=SE)
+  return(result)
+}
+
+divideimage <- function(numer, denom) eval.im(numer/denom)
+
+posify <- function(x, eps=.Machine$double.xmin) {
+  force(eps) # scalpel
+  if(is.im(x)) return(eval.im(pmax(eps, x)))
+  if(inherits(x, "solist")) return(solapply(x, posify, eps=eps))
+  if(is.numeric(x)) return(pmax(eps, x))
+  # data frame or list
+  if(is.list(x) && all(sapply(x, is.numeric)))
+    return(lapply(x, posify, eps=eps))
+  warning("Internal error: posify did not recognise data format")
+  return(x)
+}
+
+divide.by.pixelarea <- function(x) {
+  if(is.im(x)) {
+    x$v <- x$v/(x$xstep * x$ystep)
+  } else {
+    for(i in seq_along(x))
+      x[[i]]$v <- with(x[[i]], v/(xstep * ystep))
+  }
+  return(x)
+}
+
+denspppSEcalc <- function(x, sigma, varcov, ...,
+                          weights, edge, diggle, at) {
+  ## Calculate standard error, rather than estimate
+  tau <- taumat <- NULL
+  if(is.null(varcov)) {
+    varconst <- 1/(4 * pi * prod(sigma))
+    tau <- sigma/sqrt(2)
+  } else {
+    varconst <- 1/(4 * pi * sqrt(det(varcov)))
+    taumat <- varcov/2
+  }
+  ## Calculate edge correction weights
+  if(edge) {
+    edgeim <- second.moment.calc(x, sigma, what="edge", ...,
+                                 varcov=varcov)
+    if(diggle || at == "points") {
+      edgeX <- safelookup(edgeim, x, warn=FALSE)
+      diggleX <- 1/edgeX
+      diggleX[!is.finite(diggleX)] <- 0
+    }
+    edgeim <- edgeim[Window(x), drop=FALSE]
+  }
+  ## Perform smoothing
+  if(!edge) {
+    ## no edge correction
+    V <- density(x, sigma=tau, varcov=taumat, ...,
+                 weights=weights, edge=edge, diggle=diggle, at=at)
+  } else if(!diggle) {
+    ## edge correction e(u)
+    V <- density(x, sigma=tau, varcov=taumat, ...,
+                 weights=weights, edge=edge, diggle=diggle, at=at)
+    V <- if(at == "pixels") (V/edgeim) else (V * diggleX)
+  } else {
+    ## Diggle edge correction e(x_i)
+    wts <- diggleX * (weights %orifnull% 1)
+    V <- density(x, sigma=tau, varcov=taumat, ...,
+                 weights=wts, edge=edge, diggle=diggle, at=at)
+  }
+  V <- V * varconst
+  return(sqrt(V))
+}       
+
+
+density.ppp
+
+})
+
+densitypointsEngine <- function(x, sigma, ...,
+                                kernel="gaussian", 
+                                scalekernel=is.character(kernel),
+                                weights=NULL, edge=TRUE, varcov=NULL,
+                                leaveoneout=TRUE, diggle=FALSE,
+                                sorted=FALSE, spill=FALSE, cutoff=NULL) {
+  debugging <- spatstat.options("developer")
+  stopifnot(is.logical(leaveoneout))
+
+  validate2Dkernel(kernel)
+  if(is.character(kernel)) kernel <- match2DkernelName(kernel)
+  isgauss <- identical(kernel, "gaussian")
+
+  # constant factor in density computations
+  if(is.null(varcov)) {
+    const <- 1/sigma^2 
+  } else {
+    detSigma <- det(varcov)
+    Sinv <- solve(varcov)
+    const <- 1/sqrt(detSigma)
+  }
+  if(isgauss) {
+    # absorb leading constant in Gaussian density
+    const <- const/(2 * pi)
+  }
+  
+  if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0))
+    weights <- NULL
+  # Leave-one-out computation
+  # cutoff: contributions from pairs of distinct points
+  # closer than 8 standard deviations
+  sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov)))
+  if(is.null(cutoff)) 
+    cutoff <- 8 * sd
+  if(debugging)
+    cat(paste("cutoff=", cutoff, "\n"))
+
+  if(leaveoneout && npoints(x) > 1) {
+    # ensure each point has its closest neighbours within the cutoff
+    nndmax <- maxnndist(x)
+    cutoff <- max(2 * nndmax, cutoff)
+    if(debugging)
+      cat(paste("adjusted cutoff=", cutoff, "\n"))
+  }
+  # validate weights
+  if(is.null(weights)) {
+    k <- 1L
+  } else if(is.matrix(weights) || is.data.frame(weights)) {
+    k <- ncol(weights)
+    stopifnot(nrow(weights) == npoints(x))
+    weights <- as.data.frame(weights)
+    weightnames <- colnames(weights)
+  } else {
+    k <- 1L
+    stopifnot(length(weights) == npoints(x) || length(weights) == 1L)
+  }
+  # evaluate edge correction weights at points 
+  if(edge) {
+    win <- x$window
+    if(isgauss && is.null(varcov) && win$type == "rectangle") {
+      # evaluate Gaussian probabilities directly
+      xr <- win$xrange
+      yr <- win$yrange
+      xx <- x$x
+      yy <- x$y
+      xprob <-
+        pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma)
+      yprob <-
+        pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma)
+      edgeweight <- xprob * yprob
+    } else {
+      edg <- second.moment.calc(x, sigma=sigma,
+                                kernel=kernel,
+                                scalekernel=scalekernel,
+                                what="edge", varcov=varcov)
+      edgeweight <- safelookup(edg, x, warn=FALSE)
+    }
+    if(diggle) {
+      # Diggle edge correction
+      # edgeweight is attached to each point
+      if(is.null(weights)) {
+        k <- 1L
+        weights <- 1/edgeweight
+      } else {
+        weights <- weights/edgeweight
+      }
+    }
+  }
+
+  if(isgauss &&
+     spatstat.options("densityTransform") && spatstat.options("densityC")) {
+    ## .................. experimental C code .....................
+    if(debugging)
+      cat('Using experimental code!\n')
+    npts <- npoints(x)
+    result <- if(k == 1L) numeric(npts) else matrix(, npts, k)
+    xx <- x$x
+    yy <- x$y
+    ## transform to standard coordinates
+    if(is.null(varcov)) {
+      xx <- xx/(sqrt(2) * sigma)
+      yy <- yy/(sqrt(2) * sigma)
+    } else {
+      xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2)
+      xx <- xy[,1L]
+      yy <- xy[,2L]
+      sorted <- FALSE
+    }
+    ## cutoff in standard coordinates
+    cutoff <- cutoff/(sqrt(2) * sd)
+    ## sort into increasing order of x coordinate (required by C code)
+    if(!sorted) {
+      oo <- fave.order(xx)
+      xx <- xx[oo]
+      yy <- yy[oo]
+    }
+    if(is.null(weights)) {
+      zz <- .C("Gdenspt",
+               nxy     = as.integer(npts),
+               x       = as.double(xx),
+               y       = as.double(yy),
+               rmaxi   = as.double(cutoff),
+               result  = as.double(double(npts)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[oo] <- zz$result
+      result <- result * const
+    } else if(k == 1L) {
+      wtsort <- if(sorted) weights else weights[oo]
+      zz <- .C("Gwtdenspt",
+               nxy     = as.integer(npts),
+               x       = as.double(xx),
+               y       = as.double(yy),
+               rmaxi   = as.double(cutoff),
+               weight  = as.double(wtsort),
+               result  = as.double(double(npts)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[oo] <- zz$result 
+      result <- result * const
+    } else {
+      ## matrix of weights
+      wtsort <- if(sorted) weights else weights[oo, ]
+      for(j in 1:k) {
+        zz <- .C("Gwtdenspt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 rmaxi   = as.double(cutoff),
+                 weight  = as.double(wtsort[,j]),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result
+      }
+      result <- result * const
+    }
+  } else if(isgauss && spatstat.options("densityC")) {
+    # .................. C code ...........................
+    if(debugging)
+      cat('Using standard code.\n')
+    npts <- npoints(x)
+    result <- if(k == 1L) numeric(npts) else matrix(, npts, k)
+    # sort into increasing order of x coordinate (required by C code)
+    if(sorted) {
+      xx <- x$x
+      yy <- x$y
+    } else {
+      oo <- fave.order(x$x)
+      xx <- x$x[oo]
+      yy <- x$y[oo]
+    }
+    if(is.null(varcov)) {
+      # isotropic kernel
+      if(is.null(weights)) {
+        zz <- .C("denspt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 rmaxi   = as.double(cutoff),
+                 sig     = as.double(sd),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result 
+      } else if(k == 1L) {
+        wtsort <- if(sorted) weights else weights[oo]
+        zz <- .C("wtdenspt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 rmaxi   = as.double(cutoff),
+                 sig     = as.double(sd),
+                 weight  = as.double(wtsort),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result 
+       } else {
+        # matrix of weights
+        wtsort <- if(sorted) weights else weights[oo, ]
+        for(j in 1:k) {
+          zz <- .C("wtdenspt",
+                   nxy     = as.integer(npts),
+                   x       = as.double(xx),
+                   y       = as.double(yy),
+                   rmaxi   = as.double(cutoff),
+                   sig     = as.double(sd),
+                   weight  = as.double(wtsort[,j]),
+                   result  = as.double(double(npts)),
+                   PACKAGE = "spatstat")
+          if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result
+        }
+      }
+    } else {
+      # anisotropic kernel
+      flatSinv <- as.vector(t(Sinv))
+      if(is.null(weights)) {
+        zz <- .C("adenspt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 rmaxi   = as.double(cutoff),
+                 detsigma = as.double(detSigma),
+                 sinv    = as.double(flatSinv),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result 
+      } else if(k == 1L) {
+        # vector of weights
+        wtsort <- if(sorted) weights else weights[oo]
+        zz <- .C("awtdenspt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 rmaxi   = as.double(cutoff),
+                 detsigma = as.double(detSigma),
+                 sinv    = as.double(flatSinv),
+                 weight  = as.double(wtsort),
+                 result   = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result 
+      } else {
+        # matrix of weights
+        wtsort <- if(sorted) weights else weights[oo, ]
+        for(j in 1:k) {
+          zz <- .C("awtdenspt",
+                   nxy     = as.integer(npts),
+                   x       = as.double(xx),
+                   y       = as.double(yy),
+                   rmaxi   = as.double(cutoff),
+                   detsigma = as.double(detSigma),
+                   sinv    = as.double(flatSinv),
+                   weight  = as.double(wtsort[,j]),
+                   result  = as.double(double(npts)),
+                   PACKAGE = "spatstat")
+          if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result 
+        }
+      }
+    }
+  } else {
+    # ..... interpreted code .........................................
+    close <- closepairs(x, cutoff)
+    i <- close$i
+    j <- close$j
+    d <- close$d
+    npts <- npoints(x)
+    result <- if(k == 1L) numeric(npts) else matrix(, npts, k)
+    # evaluate contribution from each close pair (i,j)
+    if(isgauss) { 
+      if(is.null(varcov)) {
+        contrib <- const * exp(-d^2/(2 * sigma^2))
+      } else {
+        ## anisotropic kernel
+        dx <- close$dx
+        dy <- close$dy
+        contrib <- const * exp(-(dx * (dx * Sinv[1L,1L] + dy * Sinv[1L,2L])
+                                 + dy * (dx * Sinv[2L,1L] + dy * Sinv[2L,2L]))/2)
+      }
+    } else {
+      contrib <- evaluate2Dkernel(kernel, close$dx, close$dy,
+                                  sigma=sigma, varcov=varcov, ...)
+    }
+    ## sum (weighted) contributions
+    ifac <- factor(i, levels=1:npts)
+    if(is.null(weights)) {
+      result <- tapply(contrib, ifac, sum)
+    } else if(k == 1L) {
+      wcontrib <- contrib * weights[j]
+      result <- tapply(wcontrib, ifac, sum)
+    } else {
+      for(kk in 1:k) {
+        wcontribkk <- contrib * weights[j, kk]
+        result[,kk] <- tapply(wcontribkk, ifac, sum)
+      }
+    }
+    result[is.na(result)] <- 0
+    #
+  }
+  # ----- contribution from point itself ----------------
+  if(!leaveoneout) {
+    # add contribution from point itself
+    self <- const
+    if(!is.null(weights))
+      self <- self * weights
+    result <- result + self
+  }
+  # ........  Edge correction ........................................
+  if(edge && !diggle) 
+    result <- result/edgeweight
+
+  # ............. validate .................................
+  npts <- npoints(x)
+  if(k == 1L) {
+    result <- as.numeric(result)
+    if(length(result) != npts) 
+      stop(paste("Internal error: incorrect number of lambda values",
+                 "in leave-one-out method:",
+                 "length(lambda) = ", length(result),
+                 "!=", npts, "= npoints"))
+    if(anyNA(result)) {
+      nwrong <- sum(is.na(result))
+      stop(paste("Internal error:", nwrong, "NA or NaN",
+                 ngettext(nwrong, "value", "values"),
+                 "generated in leave-one-out method"))
+    }
+  } else {
+    if(ncol(result) != k)
+      stop(paste("Internal error: incorrect number of columns returned:",
+                 ncol(result), "!=", k))
+    colnames(result) <- weightnames
+    if(nrow(result) != npts) 
+      stop(paste("Internal error: incorrect number of rows of lambda values",
+                 "in leave-one-out method:",
+                 "nrow(lambda) = ", nrow(result),
+                 "!=", npts, "= npoints"))
+    if(anyNA(result)) {
+      nwrong <- sum(!complete.cases(result))
+      stop(paste("Internal error:", nwrong,
+                 ngettext(nwrong, "row", "rows"),
+                 "of NA values generated in leave-one-out method"))
+    }
+  }
+  if(spill)
+      return(list(result=result, sigma=sigma, varcov=varcov,
+                  edg=edgeweight))
+  # tack on bandwidth
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  # 
+  return(result)
+}
+
+resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL,
+                              adjust=1, bwfun=NULL, allow.zero=FALSE) {
+  if(is.function(sigma)) {
+    bwfun <- sigma
+    sigma <- NULL
+  }
+  if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) {
+    # call bandwidth selection function
+    bw <- do.call.matched(bwfun, resolve.defaults(list(X=x), list(...)))
+    # interpret the result as either sigma or varcov
+    if(!is.numeric(bw))
+      stop("bandwidth selector returned a non-numeric result")
+    if(length(bw) %in% c(1L,2L)) {
+      sigma <- as.numeric(bw)
+      if(!all(sigma > 0)) {
+        gripe <- "bandwidth selector returned negative value(s)"
+        if(allow.zero) warning(gripe) else stop(gripe)
+      }
+    } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) {
+      varcov <- bw
+      if(!all(eigen(varcov)$values > 0))
+        stop("bandwidth selector returned matrix with negative eigenvalues")
+    } else stop("bandwidth selector did not return a matrix or numeric value")
+  }
+  sigma.given <- !is.null(sigma)
+  varcov.given <- !is.null(varcov)
+  if(sigma.given) {
+    stopifnot(is.numeric(sigma))
+    stopifnot(length(sigma) %in% c(1L,2L))
+    if(!allow.zero)
+      stopifnot(all(sigma > 0))
+  }
+  if(varcov.given)
+    stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 )
+  # reconcile
+  ngiven <- varcov.given + sigma.given
+  switch(ngiven+1L,
+         {
+           # default
+           w <- x$window
+           sigma <- (1/8) * shortside(as.rectangle(w))
+         },
+         {
+           if(sigma.given && length(sigma) == 2) 
+             varcov <- diag(sigma^2)
+           if(!is.null(varcov))
+             sigma <- NULL
+         },
+         {
+           stop(paste("Give only one of the arguments",
+                      sQuote("sigma"), "and", sQuote("varcov")))
+         })
+  # apply adjustments
+  if(!is.null(sigma))  sigma <- adjust * sigma
+  if(!is.null(varcov)) varcov <- (adjust^2) * varcov
+  #
+  sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov)))
+  cutoff <- 8 * sd
+  uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL
+  result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh)
+  return(result)
+}
+
+
+densitycrossEngine <- function(Xdata, Xquery, sigma, ...,
+                               weights=NULL, edge=TRUE, varcov=NULL,
+                               diggle=FALSE,
+                               sorted=FALSE) {
+  if(!is.null(varcov)) {
+    detSigma <- det(varcov)
+    Sinv <- solve(varcov)
+  }
+  if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0))
+    weights <- NULL
+  ## Leave-one-out computation
+  ## cutoff: contributions from pairs of distinct points
+  ## closer than 8 standard deviations
+  sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov)))
+  cutoff <- 8 * sd
+  # validate weights
+  if(is.null(weights)) {
+    k <- 1L
+  } else if(is.matrix(weights) || is.data.frame(weights)) {
+    k <- ncol(weights)
+    stopifnot(nrow(weights) == npoints(Xdata))
+    weights <- as.data.frame(weights)
+    weightnames <- colnames(weights)
+  } else {
+    k <- 1L
+    stopifnot(length(weights) == npoints(Xdata) || length(weights) == 1L)
+  }
+  # evaluate edge correction weights at points 
+  if(edge) {
+    win <- Xdata$window
+    if(diggle) {
+      ## edge correction weights are attached to data points
+      xedge <- Xdata
+    } else {
+      ## edge correction weights are applied at query points
+      xedge <- Xquery
+      if(!all(inside.owin(Xquery, , win)))
+        stop(paste("Edge correction is not possible:",
+                   "some query points lie outside the data window"),
+             call.=FALSE)
+    }
+    if(is.null(varcov) && win$type == "rectangle") {
+        ## evaluate Gaussian probabilities directly
+      xr <- win$xrange
+      yr <- win$yrange
+      xx <- xedge$x
+      yy <- xedge$y
+      xprob <-
+        pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma)
+      yprob <-
+        pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma)
+      edgeweight <- xprob * yprob
+    } else {
+      edg <- second.moment.calc(Xdata, sigma=sigma,
+                                what="edge", varcov=varcov)
+      edgeweight <- safelookup(edg, xedge, warn=FALSE)
+    }
+    if(diggle) {
+      ## Diggle edge correction
+      ## edgeweight is attached to each data point
+      if(is.null(weights)) {
+        k <- 1L
+        weights <- 1/edgeweight
+      } else {
+        weights <- weights/edgeweight
+      }
+    }
+  }
+  
+  ndata <- npoints(Xdata)
+  nquery <- npoints(Xquery)
+  result <- if(k == 1L) numeric(nquery) else matrix(, nquery, k)
+  ## coordinates
+  xq <- Xquery$x
+  yq <- Xquery$y
+  xd <- Xdata$x
+  yd <- Xdata$y
+  if(!sorted) {
+    ## sort into increasing order of x coordinate (required by C code)
+    ooq <- fave.order(Xquery$x)
+    xq <- xq[ooq]
+    yq <- yq[ooq]
+    ood <- fave.order(Xdata$x)
+    xd <- xd[ood]
+    yd <- yd[ood]
+  }
+  if(is.null(varcov)) {
+    ## isotropic kernel
+    if(is.null(weights)) {
+      zz <- .C("crdenspt",
+               nquery  = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               rmaxi   = as.double(cutoff),
+               sig     = as.double(sd),
+               result  = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[ooq] <- zz$result 
+    } else if(k == 1L) {
+      wtsort <- if(sorted) weights else weights[ood]
+      zz <- .C("wtcrdenspt",
+               nquery  = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               wd      = as.double(wtsort),
+               rmaxi   = as.double(cutoff),
+               sig     = as.double(sd),
+               result  = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[ooq] <- zz$result 
+    } else {
+      ## matrix of weights
+      wtsort <- if(sorted) weights else weights[ood, ]
+      for(j in 1:k) {
+        zz <- .C("wtcrdenspt",
+                 nquery  = as.integer(nquery),
+                 xq      = as.double(xq),
+                 yq      = as.double(yq),
+                 ndata   = as.integer(ndata),
+                 xd      = as.double(xd),
+                 yd      = as.double(yd),
+                 wd      = as.double(wtsort[,j]),
+                 rmaxi   = as.double(cutoff),
+                 sig     = as.double(sd),
+                 result  = as.double(double(nquery)),
+                 PACKAGE = "spatstat")
+        if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result
+      }
+      colnames(result) <- weightnames
+    }
+  } else {
+    ## anisotropic kernel
+    flatSinv <- as.vector(t(Sinv))
+    if(is.null(weights)) {
+      zz <- .C("acrdenspt",
+               nquery  = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               rmaxi   = as.double(cutoff),
+               detsigma = as.double(detSigma),
+               sinv    = as.double(flatSinv),
+               result  = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[ooq] <- zz$result 
+    } else if(k == 1L) {
+      ## vector of weights
+      wtsort <- if(sorted) weights else weights[ood]
+      zz <- .C("awtcrdenspt",
+               nquery  = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               wd      = as.double(wtsort),
+               rmaxi   = as.double(cutoff),
+               detsigma = as.double(detSigma),
+               sinv    = as.double(flatSinv),
+               result   = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[ooq] <- zz$result 
+    } else {
+      ## matrix of weights
+      wtsort <- if(sorted) weights else weights[ood, ]
+      for(j in 1:k) {
+        zz <- .C("awtcrdenspt",
+                 nquery  = as.integer(nquery),
+                 xq      = as.double(xq),
+                 yq      = as.double(yq),
+                 ndata   = as.integer(ndata),
+                 xd      = as.double(xd),
+                 yd      = as.double(yd),
+                 wd      = as.double(wtsort[,j]),
+                 rmaxi   = as.double(cutoff),
+                 detsigma = as.double(detSigma),
+                 sinv    = as.double(flatSinv),
+                 result  = as.double(double(nquery)),
+                 PACKAGE = "spatstat")
+        if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result 
+      }
+      colnames(result) <- weightnames
+    }
+  }
+  # ........  Edge correction ........................................
+  if(edge && !diggle) 
+    result <- result/edgeweight
+
+  # tack on bandwidth
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  # 
+  return(result)
+}
+
+
diff --git a/R/density.psp.R b/R/density.psp.R
new file mode 100755
index 0000000..78cea86
--- /dev/null
+++ b/R/density.psp.R
@@ -0,0 +1,76 @@
+#
+#
+#  density.psp.R
+#
+#  $Revision: 1.9 $    $Date: 2017/06/05 10:31:58 $
+#
+#
+
+density.psp <- function(x, sigma, ..., edge=TRUE,
+                        method=c("FFT", "C", "interpreted")) {
+  verifyclass(x, "psp")
+  method <- match.arg(method)
+  w <- x$window
+  n <- x$n
+  if(missing(sigma))
+    sigma <- 0.1 * diameter(w)
+  w <- as.mask(w, ...)
+  len <- lengths.psp(x)
+  if(n == 0 || all(len == 0))
+    return(as.im(0, w))
+  #
+  ang <- angles.psp(x, directed=TRUE)
+  xy <- rasterxy.mask(w)
+  xx <- xy$x
+  yy <- xy$y
+  switch(method,
+         interpreted = {
+           #' compute matrix contribution from each segment 
+           coz <- cos(ang)
+           zin <- sin(ang)
+           for(i in seq_len(n)) {
+             en <- x$ends[i,]
+             dx <- xx - en$x0
+             dy <- yy - en$y0
+             u1 <- dx * coz[i] + dy * zin[i]
+             u2 <- - dx * zin[i] + dy * coz[i]
+             value <- dnorm(u2, sd=sigma) *
+               (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma))
+             totvalue <- if(i == 1L) value else (value + totvalue)
+           }
+           dens <- im(totvalue, w$xcol, w$yrow)
+         },
+         C = {
+           #' C implementation of the above
+           xs <- x$ends$x0
+           ys <- x$ends$y0
+           xp <- as.numeric(as.vector(xx))
+           yp <- as.numeric(as.vector(yy))
+           np <- length(xp)
+           z <- .C("segdens",
+                   sigma = as.double(sigma),
+                   ns = as.integer(n),
+                   xs = as.double(xs),
+                   ys = as.double(ys),
+                   alps = as.double(ang),
+                   lens = as.double(len),
+                   np = as.integer(np),
+                   xp = as.double(xp), 
+                   yp = as.double(yp),
+                   z = as.double(numeric(np)),
+                   PACKAGE = "spatstat")
+           dens <- im(z$z, w$xcol, w$yrow)
+         },
+         FFT = {
+           L <- pixellate(x, ...)
+           L <- L/with(L, xstep * ystep)
+           dens <- blur(L, sigma, normalise=edge, bleed=FALSE)
+         })
+  unitname(dens) <- unitname(x)
+  if(edge && method != "FFT") {
+    edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...)
+    dens <- eval.im(dens/edg)
+  }
+  dens <- dens[x$window, drop=FALSE]
+  return(dens)
+}
diff --git a/R/derivfv.R b/R/derivfv.R
new file mode 100644
index 0000000..eefec81
--- /dev/null
+++ b/R/derivfv.R
@@ -0,0 +1,144 @@
+#
+# derivfv.R
+#
+# differentiation for fv objects
+#
+#  $Revision: 1.6 $ $Date: 2014/10/24 00:22:30 $
+#
+
+deriv.fv <- local({
+
+  derivative <- function(y, r, ...) {
+    ss <- smooth.spline(r, y, ...)
+    predict(ss, r, deriv=1)$y
+  }
+  
+  deriv.fv <- function(expr, which="*", ...,
+                       method=c("spline", "numeric"),
+                       kinks=NULL,
+                       periodic=FALSE,
+                       Dperiodic=periodic) {
+    f <- expr
+    method <- match.arg(method)
+
+    ## select columns
+    ##  if(length(which) == 1L && which %in% .Spatstat.FvAbbrev) {
+    if(length(which) == 1L) {
+      if(which == ".x")
+        stop("Cannot smooth the function argument")
+      which <- fvnames(f, which)
+    }
+    
+    if(any(nbg <- !(which %in% names(f)))) 
+      stop(paste("Unrecognised column",
+                 ngettext(sum(nbg), "name", "names"),
+                 commasep(sQuote(which[nbg])), 
+                 "in argument", sQuote("which")))
+    relevant <- names(f) %in% which
+    ## get 
+    rname <- fvnames(f, ".x")
+    df <- as.data.frame(f)
+    rpos <- which(colnames(df) == rname)
+    rvals <- df[,rpos]
+    yvals <- df[,relevant,drop=FALSE]
+    nr <- length(rvals)
+    ##
+    if(Dperiodic) {
+      ## Derivative should be periodic
+      ## Recycle data to imitate periodicity
+      DR <- diff(range(rvals))
+      rvals <- c(rvals[-nr] - DR, rvals, rvals[-1L] + DR)
+      yleft <- yvals[-nr, , drop=FALSE]
+      yright <-  yvals[-1L, , drop=FALSE]
+      if(!periodic) {
+        ## original data are not periodic (e.g. cdf of angular variable)
+        ## but derivative must be periodic
+        jump <- matrix(as.numeric(yvals[nr,] - yvals[1L, ]),
+                       nr-1L, ncol(yvals), byrow=TRUE)
+        yleft <- yleft - jump
+        yright <- yright + jump
+      }
+      yvals <- rbind(yleft, yvals, yright)
+      actual <- nr:(2*nr - 1L)
+      NR <- length(rvals)
+    } else {
+      NR <- nr
+      actual <- 1:nr
+    }
+    ## cut x axis into intervals?
+    if(is.null(kinks)) {
+      cutx <- factor(rep(1, NR))
+    } else {
+      rr <- range(rvals)
+      if(periodic) 
+        kinks <- c(kinks-DR, kinks, kinks+DR)
+      breaks <- sort(unique(kinks))
+      if(breaks[1L] > rr[1L]) breaks <- c(rr[1L], breaks)
+      if(max(breaks) < rr[2L]) breaks <- c(breaks, rr[2L])
+      cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE)
+    }
+    ## process
+    for(segment in levels(cutx)) {
+      ii <- (cutx == segment)
+      yy <- yvals[ii, , drop=FALSE]
+      switch(method,
+             numeric = {
+               dydx <- apply(yy, 2, diff)/diff(rvals[ii])
+               nd <- nrow(dydx)
+               dydx <- rbind(dydx, dydx[nd, ])
+             },
+             spline = {
+               dydx <- apply(yy, 2, derivative, 
+                             r=rvals[ii], ...)
+         })
+      df[ii[actual], relevant] <- dydx[ actual, ]
+    }
+    ## pack up
+    result <- f
+    result[,] <- df
+    ## tweak name of function
+    if(!is.null(yl <- attr(f, "ylab")))
+      attr(result, "ylab") <- substitute(bold(D)~Fx, list(Fx=yl))
+    if(!is.null(ye <- attr(f, "yexp")))
+      attr(result, "yexp") <- substitute(bold(D)~Fx, list(Fx=ye))
+    ## tweak mathematical labels
+    attr(result, "labl")[relevant]  <-
+      paste0("bold(D)~", attr(f, "labl")[relevant])
+    return(result)
+  }
+
+  deriv.fv
+})
+
+
+increment.fv <- function(f, delta) {
+  stopifnot(is.fv(f))
+  check.1.real(delta)
+  stopifnot(delta > 0)
+  half <- delta/2
+  xx <- with(f, .x)
+  ynames <- fvnames(f, ".")
+  yy <- as.data.frame(lapply(ynames,
+                             function(a, xx, f, h) {
+                               g <- as.function(f, value=a)
+                               g(xx+h)-g(xx-h)
+                             },
+                             xx=xx, f=f, h=half))
+  Y <- f
+  Y[,ynames] <- yy
+  ## tweak name of function
+  if(!is.null(yl <- attr(f, "ylab")))
+    attr(Y, "ylab") <- substitute(Delta~Fx, list(Fx=yl))
+  if(!is.null(ye <- attr(f, "yexp")))
+    attr(Y, "yexp") <- substitute(Delta~Fx, list(Fx=ye))
+  ## tweak mathematical labels
+  relevant <- colnames(Y) %in% ynames
+  attr(Y, "labl")[relevant]  <-
+      paste0("Delta~", attr(f, "labl")[relevant])
+  ## tweak recommended range
+  attr(Y, "alim") <- intersect.ranges(attr(f, "alim"),
+                                      range(xx) + c(1,-1)*half)
+  return(Y)
+}
+
+  
diff --git a/R/detPPF-class.R b/R/detPPF-class.R
new file mode 100644
index 0000000..3221513
--- /dev/null
+++ b/R/detPPF-class.R
@@ -0,0 +1,255 @@
+## support for class 'detpointprocfamily'
+
+print.detpointprocfamily <- function(x, ...){
+  splat(x$name, "determinantal point process model",
+        ifelse(is.numeric(x$dim), paste("in dimension", x$dim), ""))
+  #' Not used:
+  #'  parnames <- names(x$par)
+  anyfixed <- length(x$fixedpar)>0
+  if(anyfixed){
+      fixedlambda <- NULL
+      if(!is.null(x$intensity) && is.element(x$intensity, names(x$fixedpar))){
+          lambda <- signif(x$fixedpar[[x$intensity]], 4)
+          x$fixedpar <- x$fixedpar[names(x$fixedpar)!=x$intensity]
+          fixedlambda <- paste(x$intensity, ifelse(is.null(x$thin), paste("=", lambda), "= an image"))
+      }
+      if(length(x$fixedpar)>0){
+          fixedparstring <- paste(names(x$fixedpar), signif(unlist(x$fixed),4), sep = " = ", collapse = ", ")
+          fixedparstring <- paste(fixedlambda, fixedparstring, sep=", ")
+      } else{
+          fixedparstring <- fixedlambda
+      }
+  }
+  ## Partially specified model:
+  if(length(x$freepar)>0){
+    splat("The model is only partially specified.")
+    splat("The following parameters are free (e.g. to be estimated by dppm):")
+    cat(x$freepar, sep = ", ")
+    cat("\n")
+    if(anyfixed){
+        cat("The fixed parameters are: ")
+        cat(fixedparstring, sep = ", ")
+    } else{
+        splat("There are no fixed parameters.")
+    }        
+  } else{
+    cat("The parameters are: ")
+    cat(fixedparstring, sep = ", ")
+  }
+  cat("\n")
+  if(!is.null(x$intensity)){
+    splat("The parameter", x$intensity,
+          "specifies the intensity of the process.")
+  }
+  if(is.character(x$dim)){
+    splat("The parameter", x$dim,
+          "specifies the dimension of the state space.")
+  }
+  invisible(NULL)
+}
+
+reach.detpointprocfamily <- function(x, ...){
+    model <- x
+    fun <- model$range
+    nam <- names(formals(fun))
+    do.call(model$range, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...)))
+}
+
+dppparbounds <- function(model, name, ...){
+    if(inherits(model, "dppm"))
+        model <- model$fitted
+    if(!inherits(model, "detpointprocfamily"))
+        stop("input model must be of class detpointprocfamily or dppm")
+    fun <- model$parbounds
+    nam <- names(formals(fun))
+    if(missing(name))
+        name <- nam[!is.element(nam, c("name", model$dim))]
+    rslt <- matrix(0,length(name), 2, dimnames = list(name, c("lower", "upper")))
+    for(nn in name){
+        tmp <- try(do.call(fun, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...), list(name=nn))), silent=TRUE)
+        if(class(tmp)=="try-error"){
+            rslt[nn,] <- c(NA, NA)
+        }else{
+            rslt[nn,] <- tmp
+        }
+    }
+    rslt
+}
+
+valid.detpointprocfamily <- function(object, ...){
+  if(length(object$freepar)>0)
+      return(NA)
+  ## If there is no function for checking validity we always return TRUE:
+  if(is.null(object$valid))
+    return(TRUE)
+  do.call(object$valid, object$fixedpar)
+}
+
+dppspecdenrange <- function(model){
+  ## If there is no function for checking finite range of spectral density we always return Inf:
+  fun <- model$specdenrange
+  if(is.null(fun))
+    return(Inf)
+  xx <- try(fun(model), silent = TRUE)
+  ifelse(class(xx)=="try-error", Inf, xx)
+}
+
+dppspecden <- function(model){
+  fun <- model$specden
+  if(is.null(fun))
+    stop("Spectral density unknown for this model!")
+  if(length(model$freepar)>0)
+    stop("Cannot extract the spectral density of a partially specified model. Please supply all parameters.")
+  specden <- function(x, ...){
+    allargs <- c(list(x), model$fixedpar, list(...))
+    do.call(fun, allargs)
+  }
+  return(specden)
+}
+
+dppkernel <- function(model, ...){
+  if(inherits(model, "dppm"))
+    model <- model$fitted
+  fun <- model$kernel
+  if(is.null(fun))
+    return(dppapproxkernel(model, ...))
+  if(length(model$freepar)>0)
+    stop("Cannot extract the kernel of a partially specified model. Please supply all parameters.")
+  firstarg <- names(formals(fun))[1L]
+  kernel <- function(x){
+    allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar)
+    do.call(fun, allargs)
+  }
+  return(kernel)
+}
+
+dppapproxkernel <- function(model, trunc = .99, W = NULL){
+    if(inherits(model, "dppm")){
+        W <- model$window
+        model <- model$fitted
+    }
+    ####### BACKDOOR TO SPHERICAL CASE ########
+    if(!is.null(spherefun <- model$approxkernelfun)){
+        spherefun <- get(spherefun)
+        rslt <- spherefun(model, trunc)
+        return(rslt)
+    }
+    ###########################################
+    d <- dim(model)
+    if(is.null(W))
+      W <- boxx(replicate(d, c(-.5,.5), simplify=FALSE))
+    W <- as.boxx(W)
+    if(d!=ncol(W$ranges))
+        stop(paste("The dimension of the window:", ncol(W$ranges), "is inconsistent with the dimension of the model:", d))
+    Wscale <- as.numeric(W$ranges[2L,]-W$ranges[1L,])
+    tmp <- dppeigen(model, trunc, Wscale, stationary=FALSE)
+    index <- tmp$index
+    eig <- tmp$eig
+    prec <- tmp$prec
+    trunc <- tmp$trunc
+    rm(tmp)
+    f <- function(r){
+        x <- matrix(0, nrow=length(r), ncol=d)
+        x[,1L] <- r
+        basis <- fourierbasis(x, index, win = W)
+        approx <- matrix(eig, nrow=length(eig), ncol=length(r)) * basis
+        return(Re(colSums(approx)))
+    }
+    attr(f, "dpp") <- list(prec = prec, trunc = trunc)
+    return(f)
+}
+
+pcfmodel.detpointprocfamily <- function(model, ...){
+  kernel <- dppkernel(model, ...)
+  f <- function(x){
+    1 - (kernel(x)/kernel(0))^2
+  }
+  return(f)
+}
+
+dppapproxpcf <- function(model, trunc = .99, W = NULL){
+  kernel <- dppapproxkernel(model, trunc = trunc, W = W)
+  f <- function(x){
+    1 - (kernel(x)/kernel(0))^2
+  }
+  attr(f, "dpp") <- attr(kernel, "dpp")
+  return(f)
+}
+
+Kmodel.detpointprocfamily <- function(model, ...){
+  if(length(model$freepar)>0)
+    stop("Cannot extract the K function of a partially specified model. Please supply all parameters.")
+  fun <- model$Kfun
+  if(!is.null(fun)){
+      firstarg <- names(formals(fun))[1L]
+      Kfun <- function(r){
+          allargs <- c(structure(list(r), .Names=firstarg), model$fixedpar)
+          do.call(fun, allargs)
+      }
+  } else{
+      pcf <- pcfmodel(model, ...)
+      intfun <- function(xx){
+          2*pi*xx*pcf(xx)
+      }
+      Kfun <- function(r){
+          r <- sort(r)
+          if(r[1L]<0)
+              stop("Negative values not allowed in K function!")
+          r <- c(0,r)
+          int <- unlist(lapply(2:length(r), function(i) integrate(intfun, r[i-1L], r[i], subdivisions=10)$value))
+          return(cumsum(int))
+      }
+  }
+  return(Kfun)
+}
+
+update.detpointprocfamily <- function(object, ...){
+     newpar <- list(...)
+     if(length(newpar)==1L && is.list(newpar[[1L]]) && !is.im(newpar[[1L]]))
+         newpar <- newpar[[1L]]
+     nam <- names(newpar)
+     if(length(newpar)>0&&is.null(nam))
+         stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form"))
+     oldpar <- object$fixedpar[!is.element(names(object$fixedpar), nam)]
+     thin <- object$thin
+     object <- do.call(object$caller, c(newpar,oldpar))
+     if(is.null(object$thin))
+        object$thin <- thin
+     return(object)
+}
+
+is.stationary.detpointprocfamily <- function(x){
+    if(is.null(x$intensity))
+        return(FALSE)
+    lambda <- getElement(x$fixedpar, x$intensity)
+    if(!is.null(lambda)&&is.numeric(lambda)&&is.null(x$thin))
+        return(TRUE)
+    return(FALSE)
+    
+}
+
+intensity.detpointprocfamily <- function(X, ...){
+    lambda <- NULL
+    if(!is.null(X$intensity))
+        lambda <- getElement(X$fixedpar, X$intensity)
+    if(!is.null(lambda)){
+      if(!is.null(X$thin))
+        lambda <- lambda*X$thin
+      return(lambda)
+    }
+    return(NA)
+}
+
+parameters.dppm <- parameters.detpointprocfamily <- function(model, ...){
+    if(inherits(model, "dppm"))
+        model <- model$fitted
+    c(model$fixed, structure(rep(NA,length(model$freepar)), .Names = model$freepar))
+}
+
+dim.detpointprocfamily <- function(x){
+    if(is.numeric(d <- x$dim)){
+        return(d)
+    } else{
+        return(getElement(x$fixedpar, d))
+    }
+}
diff --git a/R/detpointprocfamilyfun.R b/R/detpointprocfamilyfun.R
new file mode 100644
index 0000000..edf5b46
--- /dev/null
+++ b/R/detpointprocfamilyfun.R
@@ -0,0 +1,493 @@
+##    detpointprocfamilyfun.R
+##
+##    $Revision: 1.5 $   $Date: 2015/10/19 02:27:17 $
+##
+## This file contains the function `detpointprocfamilyfun'
+## to define new DPP model family functions
+## and a print method for class `detpointprocfamilyfun'
+## as well as the currently defined 
+## - dppBessel
+## - dppCauchy
+## - dppGauss
+## - dppMatern
+## - dppPowerExp
+
+detpointprocfamilyfun <- local({
+
+names_formals <- function(f, dots = FALSE){
+    nam <- names(formals(f))
+    if(!dots) nam <- nam[nam!="..."]
+    return(nam)
+}
+
+detpointprocfamilyfun <-
+  function(kernel=NULL, specden=NULL, basis="fourierbasis",
+           convkernel=NULL, Kfun=NULL, valid=NULL,
+           intensity=NULL, dim=2, name="User-defined",
+           isotropic=TRUE, range=NULL, parbounds=NULL,
+           specdenrange=NULL, startpar=NULL, ...)
+{
+  ## Check which functions are given, check them for sanity and
+  ## extract argument names and other stuff
+  given <- NULL
+  if(!is.null(kernel)){
+    if(!is.function(kernel))
+      stop("If kernel is given it must be a function.")
+    given <- "kernel"
+    kernelnames <- names_formals(kernel)
+    if(length(kernelnames)<1L)
+      stop("kernel function must have at least one argument")
+    kernelnames <- kernelnames[-1L]
+  }
+  if(!is.null(specden)){
+    if(!is.function(specden))
+      stop("If specden is given it must be a function.")
+    given <- c(given, "specden")
+    specdennames <- names_formals(specden)
+    if(length(specdennames)<1L)
+      stop("specden function must have at least one argument")
+    specdennames <- specdennames[-1L]
+  }
+  if(is.null(given))
+    stop("At least one of kernel or specden must be provided.")
+  if(length(given)==2){
+    if(!setequal(kernelnames,specdennames))
+      stop("argument names of kernel and specden must match.")
+  }
+  if(is.element("kernel",given)){
+    parnames <- kernelnames
+  } else{
+    parnames <- specdennames
+  }
+  if(!is.null(convkernel)){
+    given <- c(given,"convkernel")
+    if(!is.function(convkernel)||length(formals(convkernel))<2)
+      stop("If convkernel is given it must be a function with at least two arguments.")
+    if(!setequal(parnames,names_formals(convkernel)[-(1:2)]))
+      stop("argument names of convkernel must match argument names of kernel and/or specden.")
+  }
+  if(!is.null(Kfun)){
+    given <- c(given,"Kfun")
+    if(!is.function(Kfun)||length(formals(Kfun))<1L)
+      stop("If Kfun is given it must be a function with at least one arguments.")
+    if(!setequal(parnames,names_formals(Kfun)[-1L]))
+      stop("argument names of Kfun must match argument names of kernel and/or specden.")
+  }
+  if(!is.null(valid)){
+    if(!(is.function(valid)&&setequal(parnames,names_formals(valid))))
+      stop("argument names of valid must match argument names of kernel and/or specden.")
+  } else{
+    warning("No function for checking parameter validity provided. ANY numerical value for the parameters will be accepted.")
+  }
+  if(!is.null(intensity)&&!(is.character(intensity)&&length(intensity)==1L&&is.element(intensity, parnames)))
+    stop("argument intensity must be NULL or have length one, be of class character and match a parameter name")
+      
+  if(!(is.character(dim)|is.numeric(dim))|length(dim)!=1L)
+    stop("argument dim must have length one and be of class character or numeric")
+  if(is.character(dim)){
+    if(!is.element(dim, parnames))
+      stop("When dim is a character it must agree with one of the parameter names of the model")
+  } else{
+    dim <- round(dim)
+    if(dim<1L)
+      stop("When dim is a numeric it must be a positive integer")
+  }
+
+  ## Catch extra unknown args (will be appended to output object below).
+  dots <- list(...)
+
+  ## Create output object.
+  out <- function(...){
+    caller <- match.call()[[1L]]
+    caller <- eval(substitute(caller), parent.frame())
+    fixedpar <- list(...)
+    nam <- names(fixedpar)
+    if(length(fixedpar)>0&&is.null(nam))
+      stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form"))
+    match <- is.element(nam, parnames)
+    if(sum(!match)>0)
+      warning(paste("Not all supplied argument(s) make sense. Valid arguments are: ",
+                    paste(parnames, collapse = ", "),
+                    ". The following supplied argument(s) will be ignored: ",
+                    paste(nam[!match], collapse = ", "),
+                    sep = ""))
+    fixedpar <- fixedpar[match]
+    
+    ## Code to always fix the dimension to a numeric when calling the function #######
+    if(is.character(dim) && !is.element(dim,names(fixedpar))){
+         dimpar <- structure(list(2), .Names=dim)
+         fixedpar <- c(fixedpar, dimpar)
+    }
+    
+    ## Detect inhomogeneous intensity (an image), and replace by max and an image for thinning
+    thin <- NULL
+    if(!is.null(intensity)){
+       lambda <- getElement(fixedpar, intensity)
+       if(is.im(lambda)){
+         lambdamax <- max(lambda)
+         thin <- lambda/lambdamax
+         fixedpar[[intensity]] <- lambdamax
+       }
+    }
+      
+    obj <- list(fixedpar = fixedpar,
+                freepar = parnames[!is.element(parnames,names(fixedpar))],
+                kernel = kernel,
+                specden = specden,
+                convkernel = convkernel,
+                intensity = intensity,
+                thin = thin,
+                dim = dim,
+                name = name,
+                range = range,
+                valid = valid,
+                parbounds = parbounds,
+                specdenrange = specdenrange,
+                startpar = startpar,
+                isotropic = isotropic,
+                caller = caller,
+                basis = basis
+                )
+    obj <- append(obj, dots)
+    class(obj) <- "detpointprocfamily"
+    return(obj)
+  }
+  class(out) <- c("detpointprocfamilyfun",
+                  "pointprocfamilyfun",
+                  class(out))
+  attr(out, "parnames") <- parnames
+  attr(out, "name") <- name
+  return(out)
+}
+
+detpointprocfamilyfun
+}
+)
+
+print.detpointprocfamilyfun <- function(x, ...){
+  cat(paste(attr(x, "name"), "determinantal point process model family\n"))
+  cat("The parameters of the family are:\n")
+  cat(attr(x, "parnames"), sep = ", ")
+  cat("\n")
+  invisible(NULL)
+}
+
+dppBessel <- detpointprocfamilyfun(
+  name="Bessel",
+  kernel=function(x, lambda, alpha, sigma, d){
+    a <- 0.5*(sigma+d)
+    y <- abs(x/alpha)
+    # Kernel: lambda*2^a*gamma(a+1)*besselJ(2*y*sqrt(a),a) / (2*y*sqrt(a))^a
+    logrslt <- log(lambda) + a*log(2) + lgamma(a+1) - a*log(2*y*sqrt(a))
+    rslt <- exp(logrslt) * besselJ(2*y*sqrt(a), a)
+    rslt[x==0] <- lambda
+    return(rslt)
+  },
+  specden=function(x, lambda, alpha, sigma, d){
+    a <- sigma+d
+    # specden: lambda*(2*pi)^(d/2)*alpha^d*gamma(0.5*a+1)/a^(d/2)/gamma(sigma/2+1)*(1-2*pi^2*alpha^2*x^2/a)^(sigma/2)
+    logrslt <- log(lambda) + (d/2)*log(2*pi) + d*log(alpha) + lgamma(0.5*a+1)
+    logrslt <- logrslt - (d/2)*log(a) - lgamma(sigma/2+1)
+    tmp <- 1-2*pi^2*alpha^2*x^2/a
+    warnopt <- options(warn=-1)
+    logrslt <- logrslt + ifelse(tmp<0, -Inf, (sigma/2)*log(tmp))
+    options(warnopt)
+    return(exp(logrslt))
+  },
+  specdenrange=function(model){
+    p <- model$fixedpar
+    sqrt((p$sigma+p$d)/(2*pi^2*p$alpha^2))
+  },
+  valid=function(lambda, alpha, sigma, d){
+    a <- sigma+d
+    OK <- lambda>0 && alpha>0 && d>=1 && sigma>=0
+    if(!OK)
+      return(FALSE)
+    ## Upper bound for alpha (using log-scale)
+    lognum <- log(a^(0.5*d)) + lgamma(0.5*sigma+1)
+    logdenom <- log( lambda*(2*pi^(0.5*d))) + lgamma(0.5*a+1)
+    logalphamax <- (1/d) * (lognum - logdenom)
+    return(OK && log(alpha) <= logalphamax)
+  },
+  isotropic=TRUE,
+  intensity="lambda",
+  dim="d",
+  parbounds=function(name, lambda, alpha, sigma, d){
+    lognum <- log((sigma+d)^(0.5*d)) + lgamma(0.5*sigma+1)
+    logdenom <- log(2*pi^(0.5*d)) + lgamma(0.5*(sigma+d)+1)
+    switch(name,
+           lambda = c(0, exp(lognum - log( alpha^d) - logdenom)) ,
+           alpha = c(0, exp((1/d) * (lognum - log(lambda) - logdenom))),
+           sigma = c(0, switch(as.character(d), "2"=Inf, NA)),
+           stop("Parameter name misspecified")
+    )
+  },
+  startpar=function(model, X){
+    rslt <- NULL
+    if("d" %in% model$freepar){
+      model <- update(model, d=spatdim(X))
+    }
+    if("lambda" %in% model$freepar){
+      lambda <- intensity(X)
+      while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK)
+        lambda <- lambda/2
+      rslt <- c(rslt, "lambda" = lambda)
+    }
+    if("sigma" %in% model$freepar){
+      sigma <- 2
+      while(!is.na(OK <- valid(model <- update(model, sigma=sigma)))&&!OK)
+        sigma <- sigma/2
+      rslt <- c(rslt, "sigma" = sigma)
+    }
+    if("alpha" %in% model$freepar){
+      alpha <- .8*dppparbounds(model, "alpha")[2L]
+      while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){
+        alpha <- alpha/2
+      }
+      rslt <- c(rslt, "alpha" = alpha)
+    }
+    return(rslt)
+  }
+)
+
+dppCauchy <- detpointprocfamilyfun(
+  name="Cauchy",
+  kernel=function(x, lambda, alpha, nu, d){
+    rslt <- lambda * (1+(x/alpha)^2)^(-nu-d/2)
+    rslt[x==0] <- lambda
+    return(rslt)
+  },
+  specden=function(x, lambda, alpha, nu, d){
+    y <- 2*x*alpha*pi
+    rslt <- lambda * y^nu * besselK(x = y, nu = nu) * (sqrt(pi)*alpha)^d * exp((1-nu)*log(2) - lgamma(nu+d/2))
+    rslt[x==0] <- lambda * exp(lgamma(nu) - lgamma(nu+d/2)) * (sqrt(pi)*alpha)^d
+    return(rslt)
+  },
+  Kfun = function(x, lambda, alpha, nu, d){
+    rslt <- pi*x^2 - pi*alpha^2/(2*nu+1) * (1 - (alpha^2/(alpha^2+x^2))^(2*nu+1))
+    rslt[rslt<0] <- 0
+    return(rslt)
+  },
+  valid=function(lambda, alpha, nu, d){
+    ## Note the upper bound on nu for numerical stability!
+    lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)
+  },
+  isotropic=TRUE,
+  intensity="lambda",
+  dim="d",
+  range=function(alpha, nu, d, bound = .99){
+    if(missing(alpha))
+      stop("The parameter alpha is missing.")
+    if(missing(nu))
+      stop("The parameter nu is missing.")
+    if(missing(d))
+      stop("The parameter d (giving the dimension) is missing.")
+    if(!(is.numeric(bound)&&bound>0&&bound<1))
+      stop("Argument bound must be a numeric between 0 and 1.")
+    return(alpha * sqrt((1-bound)^(-1/(2*nu+d))-1))
+  },
+  parbounds=function(name, lambda, alpha, nu, d){
+    switch(name,
+           lambda = c(0, gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)),
+           alpha = c(0, (exp(lgamma(nu+d/2)-lgamma(nu))/lambda)^(1/d)/sqrt(pi)),
+           ## nu bound only implemented for d = 2.
+           nu = c(switch(as.character(d), "2"=pi*lambda*alpha^2, NA), Inf),
+           stop("Parameter name misspecified")
+    )
+  },
+  startpar=function(model, X){
+    rslt <- NULL
+    if("lambda" %in% model$freepar){
+      lambda <- intensity(X)
+      while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK)
+        lambda <- lambda/2
+      rslt <- c(rslt, "lambda" = lambda)
+    }
+    if("nu" %in% model$freepar){
+      nu <- 2
+      while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK)
+        nu <- nu/2
+      rslt <- c(rslt, "nu" = nu)
+    }
+    if("alpha" %in% model$freepar){
+      alpha <- .8*dppparbounds(model, "alpha")[2L]
+      while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){
+        alpha <- alpha/2
+      }
+      rslt <- c(rslt, "alpha" = alpha)
+    }
+    return(rslt)
+  }
+)
+
+dppGauss <- detpointprocfamilyfun(
+  name="Gaussian",
+  kernel=function(x, lambda, alpha, d){
+    rslt <- lambda*exp(-(x/alpha)^2)
+    return(rslt)
+  },
+  specden=function(x, lambda, alpha, d){
+    lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2)
+  },
+  convkernel=function(x, k, lambda, alpha, d){
+    logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2)
+    return(exp(logres))
+  },
+  Kfun = function(x, lambda, alpha, d){
+    pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2))
+  },
+  valid=function(lambda, alpha, d){
+    lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d)
+  },
+  isotropic=TRUE,
+  intensity="lambda",
+  dim="d",
+  range=function(alpha, bound = .99){
+    if(missing(alpha))
+      stop("The parameter alpha is missing.")
+    if(!(is.numeric(bound)&&bound>0&&bound<1))
+      stop("Argument bound must be a numeric between 0 and 1.")
+    return(alpha*sqrt(-log(sqrt(1-bound))))
+  },
+  parbounds=function(name, lambda, alpha, d){
+    switch(name,
+           lambda = c(0, (sqrt(pi)*alpha)^(-d)),
+           alpha = c(0, lambda^(-1/d)/sqrt(pi)),
+           stop("Parameter name misspecified")
+    )
+  },
+  startpar=function(model, X){
+    rslt <- NULL
+    if("lambda" %in% model$freepar){
+      lambda <- intensity(X)
+      rslt <- c(rslt, "lambda" = lambda)
+      model <- update(model, lambda=lambda)
+    }
+    if("alpha" %in% model$freepar){
+      alpha <- .8*dppparbounds(model, "alpha")[2L]
+      rslt <- c(rslt, "alpha" = alpha)
+    }
+    return(rslt)
+  }
+)
+
+dppMatern <- detpointprocfamilyfun(
+  name="Whittle-Matern",
+  kernel=function(x, lambda, alpha, nu, d){
+    rslt <- lambda*2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu)
+    rslt[x==0] <- lambda
+    return(rslt)
+  },
+  specden=function(x, lambda, alpha, nu, d){
+    lambda * exp(lgamma(nu+d/2) - lgamma(nu)) * (2*sqrt(pi)*alpha)^d * (1+(2*x*alpha*pi)^2)^(-nu-d/2)
+  },
+  convkernel=function(x, k, lambda, alpha, nu, d){
+    nu2 <- k*(nu+d/2)-d/2
+    logres <- (nu2)*log(x/alpha) + log(besselK(x = x/alpha, nu = nu2, expon.scaled = TRUE)) - x/alpha
+    logres[x == 0] <- (nu2-1)*log(2) + lgamma(nu2)
+    logres <- logres + k*log(lambda) + k*(lgamma(nu+d/2)-lgamma(nu)) + (d*k-d+1-nu2)*log(2) + d*(k-1)*log(sqrt(pi)*alpha) - lgamma(nu2+d/2)
+    index <- which(logres == Inf)
+    logres[index] <- -Inf
+    return(exp(logres))
+  },
+  valid=function(lambda, alpha, nu, d){
+    ## Note the upper bound on nu for numerical stability!
+    lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)
+  },
+  isotropic=TRUE,
+  intensity="lambda",
+  dim="d",
+  range=function(alpha, nu, d, bound = .99, exact = FALSE){
+    if(missing(alpha))
+      stop("The parameter alpha is missing.")
+    if(missing(nu))
+      stop("The parameter nu is missing.")
+    if(missing(d))
+      stop("The parameter d (giving the dimension) is missing.")
+    if(!is.logical(exact))
+      stop("Argument exact must be a logical.")
+    if(!exact&&d==2)
+      return(alpha * sqrt(8*nu)) ## range suggested by Haavard Rue et al.
+    if(!(is.numeric(bound)&&bound>0&&bound<1))
+      stop("Argument bound must be a numeric between 0 and 1.")
+    fun <- function(x) sqrt(1-bound)-2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu)
+    return(uniroot(fun, c(sqrt(.Machine$double.eps),1e3*alpha*sqrt(nu)))$root)
+  },
+  parbounds=function(name, lambda, alpha, nu, d){
+    switch(name,
+           lambda = c(0, gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)),
+           alpha = c(0, (exp(lgamma(nu)-lgamma(nu+d/2))/lambda)^(1/d)/2/sqrt(pi)),
+           ## nu bound only implemented for d = 2 and d = 4.
+           nu = c(0, switch(as.character(d), "2"=1/(4*pi*lambda*alpha^2), "4"=sqrt(1/4+1/(lambda*16*pi*pi*alpha^4))-1/2, NA)),
+           stop("Parameter name misspecified")
+    )
+  },
+  startpar=function(model, X){
+    rslt <- NULL
+    if("lambda" %in% model$freepar){
+      lambda <- intensity(X)
+      while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK)
+        lambda <- lambda/2
+      rslt <- c(rslt, "lambda" = lambda)
+    }
+    if("nu" %in% model$freepar){
+      nu <- 2
+      while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK)
+        nu <- nu/2
+      rslt <- c(rslt, "nu" = nu)
+    }
+    if("alpha" %in% model$freepar){
+      alpha <- .8*dppparbounds(model, "alpha")[2L]
+      while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){
+        alpha <- alpha/2
+      }
+      rslt <- c(rslt, "alpha" = alpha)
+    }
+    return(rslt)
+  }
+)
+
+dppPowerExp <- detpointprocfamilyfun(
+  name="Power Exponential Spectral",
+  specden=function(x, lambda, alpha, nu, d){
+    lambda * gamma(d/2+1) * alpha^d / (pi^(d/2)*gamma(d/nu+1)) * exp(-(alpha*x)^nu)
+  },
+  valid=function(lambda, alpha, nu, d){
+    ## Note the upper bound on nu for numerical stability!
+    lambda>0 && alpha>0 && nu>0 && nu<=20 && d>=1 && lambda <= pi^(d/2)*gamma(d/nu+1) / (gamma(1+d/2)*alpha^d)
+  },
+  isotropic=TRUE,
+  intensity="lambda",
+  dim="d",
+  parbounds=function(name, lambda, alpha, nu, d){
+    switch(name,
+           lambda = c(0, pi^(d/2)*gamma(d/nu+1) / (gamma(d/2+1)*alpha^d)),
+           alpha = c(0, (pi^(d/2)*gamma(d/nu+1) / (lambda * gamma(d/2+1)))^(1/d)),
+           nu = c(NA, NA),
+           stop("Parameter name misspecified")
+    )
+  },
+  startpar=function(model, X){
+    rslt <- NULL
+    if("lambda" %in% model$freepar){
+      lambda <- intensity(X)
+      while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK)
+        lambda <- lambda/2
+      rslt <- c(rslt, "lambda" = lambda)
+    }
+    if("nu" %in% model$freepar){
+      nu <- 2
+      while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK)
+        nu <- nu/2
+      rslt <- c(rslt, "nu" = nu)
+    }
+    if("alpha" %in% model$freepar){
+      alpha <- .8*dppparbounds(model, "alpha")[2L]
+      while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){
+        alpha <- alpha/2
+      }
+      rslt <- c(rslt, "alpha" = alpha)
+    }
+    return(rslt)
+  }
+)
diff --git a/R/dg.R b/R/dg.R
new file mode 100755
index 0000000..75531f1
--- /dev/null
+++ b/R/dg.R
@@ -0,0 +1,167 @@
+#
+#     dg.S
+#
+#    $Revision: 1.19 $	$Date: 2017/06/05 10:31:58 $
+#
+#     Diggle-Gratton pair potential
+#
+#
+DiggleGratton <- local({
+
+  # .... auxiliary functions ......
+
+  diggraterms <- function(X, Y, idX, idY, delta, rho) {
+    stopifnot(is.numeric(delta))
+    stopifnot(is.numeric(rho))
+    stopifnot(delta < rho)
+    # sort in increasing order of x coordinate
+    oX <- fave.order(X$x)
+    oY <- fave.order(Y$x)
+    Xsort <- X[oX]
+    Ysort <- Y[oY]
+    idXsort <- idX[oX]
+    idYsort <- idY[oY]
+    nX <- npoints(X)
+    nY <- npoints(Y)
+    # call C routine
+    out <- .C("Ediggra",
+              nnsource = as.integer(nX),
+              xsource  = as.double(Xsort$x),
+              ysource  = as.double(Xsort$y),
+              idsource = as.integer(idXsort),
+              nntarget = as.integer(nY),
+              xtarget  = as.double(Ysort$x),
+              ytarget  = as.double(Ysort$y),
+              idtarget = as.integer(idYsort),
+              ddelta   = as.double(delta),
+              rrho     = as.double(rho),
+              values   = as.double(double(nX)),
+              PACKAGE = "spatstat")
+    answer <- integer(nX)
+    answer[oX] <- out$values
+    return(answer)
+  }
+
+  # .......... template object ..........
+  
+  BlankDG <- 
+  list(
+         name     = "Diggle-Gratton process",
+         creator  = "DiggleGratton",
+         family    = "pairwise.family",  #evaluated later
+         pot      = function(d, par) {
+                       delta <- par$delta
+                       rho <- par$rho
+                       above <- (d > rho)
+                       inrange <- (!above) & (d > delta)
+                       h <- above + inrange * (d - delta)/(rho - delta)
+                       return(log(h))
+                    },
+         par      = list(delta=NULL, rho=NULL),  # to be filled in later
+         parnames = list("lower limit delta", "upper limit rho"),
+         selfstart = function(X, self) {
+           # self starter for DiggleGratton
+           nX <- npoints(X)
+           if(nX < 2) {
+             # not enough points to make any decisions
+             return(self)
+           }
+           md <- minnndist(X)
+           if(!is.na(delta <- self$par$delta)) {
+             # value fixed by user or previous invocation
+             # check it
+             if(md < delta)
+               warning(paste("Hard core distance delta is too large;",
+                             "some data points will have zero probability"))
+             return(self)
+           }
+           if(md == 0) 
+             warning(paste("Pattern contains duplicated points:",
+                           "hard core distance delta must be zero"))
+           # take hard core = minimum interpoint distance * n/(n+1)
+           deltaX <- md * nX/(nX+1)
+           DiggleGratton(delta=deltaX, rho=self$par$rho)
+         },
+         init = function(self) {
+           delta <- self$par$delta
+           rho   <- self$par$rho
+           if(!is.numeric(rho) || length(rho) != 1L)
+             stop("upper limit rho must be a single number")
+           stopifnot(is.finite(rho))
+           if(!is.na(delta)) {
+             if(!is.numeric(delta) || length(delta) != 1L)
+               stop("lower limit delta must be a single number")
+             stopifnot(delta >= 0)
+             stopifnot(rho > delta)
+           } else stopifnot(rho >= 0)
+         },
+         update = NULL, # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           kappa <- as.numeric(coeffs[1L])
+           return(list(param=list(kappa=kappa),
+                       inames="exponent kappa",
+                       printable=dround(kappa)))
+         },
+         valid = function(coeffs, self) {
+           kappa <- as.numeric(coeffs[1L])
+           return(is.finite(kappa) && (kappa >= 0))
+         },
+         project = function(coeffs, self) {
+           kappa <- as.numeric(coeffs[1L])
+           if(is.finite(kappa) && (kappa >= 0))
+             return(NULL)
+           return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           rho <- self$par$rho
+           if(all(is.na(coeffs)))
+             return(rho)
+           kappa <- coeffs[1L]
+           delta <- self$par$delta
+           if(abs(kappa) <= epsilon)
+             return(delta)
+           else return(rho)
+         },
+       version=NULL, # evaluated later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+         # fast evaluator for DiggleGratton interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for DiggleGratton")
+         delta <- potpars$delta
+         rho   <- potpars$rho
+         idX <- seq_len(npoints(X))
+         idU <- rep.int(-1L, npoints(U))
+         idU[EqualPairs[,2L]] <- EqualPairs[,1L]
+         answer <- diggraterms(U, X, idU, idX, delta, rho)
+         answer <- log(pmax.int(0, answer))
+         return(matrix(answer, ncol=1L))
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         rho   <- self$par$rho
+         delta <- self$par$delta
+         width <- rho - delta
+         kappa <- coeffs[1L]
+         ans <- pi * (rho^2
+                      - 2 * rho* width/(kappa + 1)
+                      + 2 * width^2/((kappa + 1) * (kappa + 2)))
+         return(ans)
+       }
+  )
+  class(BlankDG) <- "interact"
+
+  DiggleGratton <- function(delta=NA, rho) {
+    instantiate.interact(BlankDG, list(delta=delta, rho=rho))
+  }
+
+  DiggleGratton <- intermaker(DiggleGratton, BlankDG)
+
+  DiggleGratton
+})
diff --git a/R/dgs.R b/R/dgs.R
new file mode 100755
index 0000000..335ed99
--- /dev/null
+++ b/R/dgs.R
@@ -0,0 +1,115 @@
+#
+#
+#    dgs.R
+#
+#    $Revision: 1.9 $	$Date: 2017/06/05 10:31:58 $
+#
+#    Diggle-Gates-Stibbard process
+#
+#
+# -------------------------------------------------------------------
+#	
+
+DiggleGatesStibbard <- local({
+
+  # .......... auxiliary functions ................
+  dgsTerms <- function(X, Y, idX, idY, rho) {
+    stopifnot(is.numeric(rho))
+    # sort in increasing order of x coordinate
+    oX <- fave.order(X$x)
+    oY <- fave.order(Y$x)
+    Xsort <- X[oX]
+    Ysort <- Y[oY]
+    idXsort <- idX[oX]
+    idYsort <- idY[oY]
+    nX <- npoints(X)
+    nY <- npoints(Y)
+    # call C routine
+    out <- .C("Ediggatsti",
+            nnsource = as.integer(nX),
+            xsource  = as.double(Xsort$x),
+            ysource  = as.double(Xsort$y),
+            idsource = as.integer(idXsort),
+            nntarget = as.integer(nY),
+            xtarget  = as.double(Ysort$x),
+            ytarget  = as.double(Ysort$y),
+            idtarget = as.integer(idYsort),
+            rrho     = as.double(rho),
+            values   = as.double(double(nX)),
+            PACKAGE = "spatstat")
+    answer <- integer(nX)
+    answer[oX] <- out$values
+    return(answer)
+  }
+
+  # ...... template object ......................
+  BlankDGS <- 
+    list(
+         name   = "Diggle-Gates-Stibbard process",
+         creator = "DiggleGatesStibbard",
+         family  = "pairwise.family",  # evaluated later
+         pot    = function(d, par) {
+           rho <- par$rho
+           v <- log(sin((pi/2) * d/rho)^2)
+           v[ d > par$rho ] <- 0
+           attr(v, "IsOffset") <- TRUE
+           v
+         },
+         par    = list(rho = NULL),  # to be filled in later
+         parnames = "interaction range", 
+         init   = function(self) {
+           rho <- self$par$rho
+           if(!is.numeric(rho) || length(rho) != 1L || rho <= 0)
+             stop("interaction range rho must be a positive number")
+         },
+         update = NULL,       # default OK
+         print = NULL,        # default OK
+         interpret =  function(coeffs, self) {
+           return(NULL)
+         },
+         valid = function(coeffs, self) {
+           return(TRUE)
+         },
+         project = function(coeffs, self) {
+           return(NULL)
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           rho <- self$par$rho
+           return(rho)
+         },
+         version=NULL, # evaluated later
+         # fast evaluation is available for the border correction only
+         can.do.fast=function(X,correction,par) {
+           return(all(correction %in% c("border", "none")))
+         },
+         fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+           # fast evaluator for DiggleGatesStibbard interaction
+           if(!all(correction %in% c("border", "none")))
+             return(NULL)
+           if(spatstat.options("fasteval") == "test")
+             message("Using fast eval for DiggleGatesStibbard")
+           rho <- potpars$rho
+           idX <- seq_len(npoints(X))
+           idU <- rep.int(-1L, npoints(U))
+           idU[EqualPairs[,2L]] <- EqualPairs[,1L]
+           v <- dgsTerms(U, X, idU, idX, rho)
+           v <- matrix(v, ncol=1L)
+           attr(v, "IsOffset") <- TRUE
+           return(v)
+         },
+         Mayer=function(coeffs, self) {
+           # second Mayer cluster integral
+           rho   <- self$par$rho
+           return((pi/2 - 2/pi) * rho^2)
+         }
+         )
+  class(BlankDGS) <- "interact"
+
+  DiggleGatesStibbard <- function(rho) {
+    instantiate.interact(BlankDGS, list(rho = rho))
+  }
+
+  DiggleGatesStibbard <- intermaker(DiggleGatesStibbard, BlankDGS)
+
+  DiggleGatesStibbard
+})
diff --git a/R/diagnoseppm.R b/R/diagnoseppm.R
new file mode 100755
index 0000000..67a566c
--- /dev/null
+++ b/R/diagnoseppm.R
@@ -0,0 +1,439 @@
+#
+#                            diagnoseppm.R
+#
+# Makes diagnostic plots based on residuals or energy weights
+#
+# $Revision: 1.43 $ $Date: 2016/07/06 06:56:56 $
+#
+
+diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt,
+                                sigma=NULL,
+                                rbord=reach(object),
+                                compute.sd=is.poisson(object),
+                                compute.cts=TRUE,
+                                envelope=FALSE, nsim=39, nrank=1,
+                                rv=NULL, oldstyle=FALSE,
+                                splineargs = list(spar=0.5),
+                                verbose=TRUE)
+{
+  if(is.marked.ppm(object))
+    stop("Sorry, this is not yet implemented for marked models")
+
+  # quadrature points
+  Q <- quad.ppm(object)
+  U <- union.quad(Q)
+  Qweights <- w.quad(Q)
+
+  # -------------- Calculate residuals/weights -------------------
+
+  # Discretised residuals
+
+  if(type == "eem") {
+    residval <- if(!is.null(rv)) rv else eem(object, check=FALSE)
+    residval <- as.numeric(residval)
+    X <- data.ppm(object)
+    Y <- X %mark% residval
+  } else {
+    if(!is.null(rv) && !inherits(rv, "msr"))
+      stop("rv should be a measure (object of class msr)")
+    residobj <-
+      if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE)
+    residval <- with(residobj, "increment")
+    if(ncol(as.matrix(residval)) > 1L)
+      stop("Not implemented for vector-valued residuals; use [.msr to split into separate components")
+    Y <- U %mark% residval
+  }
+
+  # Atoms and density of measure
+
+  Ymass <- NULL
+  Ycts  <- NULL
+  Ydens <- NULL
+
+  if(compute.cts) {
+    if(type == "eem") {
+      Ymass <- Y
+      Ycts  <- U %mark% (-1)
+      Ydens <- as.im(-1, Y$window)
+    } else {
+      atoms <- with(residobj, "is.atom")
+      masses <- with(residobj, "discrete")
+      cts    <- with(residobj, "density")
+      if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) {
+        Ymass <- (U %mark% masses)[atoms]
+        Ycts    <- U %mark% cts
+        # remove NAs (as opposed to zero cif points)
+        if(!all(ok <- is.finite(cts))) {
+          U <- U[ok]
+          Ycts <- Ycts[ok]
+          cts  <- cts[ok]
+          Qweights <- Qweights[ok]
+        }
+        # interpolate continuous part to yield an image for plotting
+        if(type == "inverse" && all(cts > 0)) {
+          Ydens <- as.im(-1, Y$window)
+        } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) {
+          # all values of `cts' will be equal
+          Ydens <- as.im(cts[1L], Y$window)
+        } else {
+          smallsigma <- maxnndist(Ycts)
+          Ujitter <- U
+          Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma)
+          Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma)
+          Ydens <- Smooth(Ujitter %mark% marks(Ycts),
+                          sigma=smallsigma,
+                          weights=Qweights,
+                          edge=TRUE, ...)
+        }
+      }
+    }
+  }
+    
+
+  #----------------  Erode window ---------------------------------
+  #
+  ## Compute windows 
+  W <- Y$window
+
+  # Erode window if required
+  clip <- !is.null(rbord) && is.finite(rbord) && (rbord > 0) 
+  if(clip) {
+    Wclip <- erosion.owin(W, rbord)
+    Yclip <- Y[Wclip]
+    Qweightsclip <- Qweights[inside.owin(U, , Wclip)]
+    if(!is.null(Ycts))
+      Ycts <- Ycts[Wclip]
+    if(!is.null(Ydens))
+      Ydens <- Ydens[Wclip, drop=FALSE]
+  } else {
+    Wclip <- W
+    Yclip <- Y
+  }
+  
+  # ------------ start collecting results -------------------------
+  
+  result <- list(type=type,
+                 clip=clip,
+                 Y=Y,
+                 W=W,
+                 Yclip=Yclip,
+                 Ymass=Ymass,
+                 Ycts=Ycts,
+                 Ydens=Ydens)
+
+  # ------------- smoothed field ------------------------------
+
+  Z <- NULL
+  if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) {
+    if(is.null(sigma))
+      sigma <- 0.1 * diameter(Wclip)  
+    Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...)
+  }
+  if(opt$smooth) {
+    result$smooth <- list(Z = Z, sigma=sigma)
+    if(type == "pearson")
+      result$smooth$sdp <- 1/(2 * sigma * sqrt(pi))
+  }
+
+  # -------------- marginals of smoothed field ------------------------
+  
+  if(opt$xmargin) {
+    xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep
+    if(type == "eem") 
+      ExZ <- colSums(!is.na(Z$v)) * Z$xstep
+    else 
+      ExZ <- numeric(length(xZ))
+    result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ)
+  }
+  
+  if(opt$ymargin) {
+    yZ <- apply(Z$v, 1L, sum, na.rm=TRUE) * Z$ystep
+    if(type == "eem")
+      EyZ <- rowSums(!is.na(Z$v)) * Z$ystep
+    else
+      EyZ <- numeric(length(yZ))
+    result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ)
+  }
+  
+  # -------------- cumulative (lurking variable) plots --------------
+
+  ## precompute simulated patterns for envelopes
+  if(identical(envelope, TRUE))
+    envelope <- simulate(object, nsim=nsim, progress=verbose)
+
+  if(opt$xcumul)
+    result$xcumul <- 
+    lurking(object, covariate=expression(x),
+            type=type,
+            clipwindow= if(clip) Wclip else NULL,
+            rv=residval,
+            plot.sd=compute.sd,
+            envelope=envelope, nsim=nsim, nrank=nrank,
+            plot.it=FALSE,
+            typename=typename,
+            covname="x coordinate",
+            oldstyle=oldstyle,
+            check=FALSE,
+            splineargs=splineargs,
+            ...)
+
+  if(opt$ycumul)
+    result$ycumul <- 
+    lurking(object, covariate=expression(y),
+            type=type,
+            clipwindow= if(clip) Wclip else NULL,
+            rv=residval,
+            plot.sd=compute.sd,
+            envelope=envelope, nsim=nsim, nrank=nrank,
+            plot.it=FALSE,
+            typename=typename,
+            covname="y coordinate",
+            oldstyle=oldstyle,
+            check=FALSE,
+            splineargs=splineargs,
+            ...)
+
+  # -------------- summary numbers --------------
+  
+  if(opt$sum) 
+    result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE),
+                       areaWclip=area(Wclip),
+                       areaquad=if(clip) sum(Qweightsclip) else sum(Qweights),
+                       range=if(!is.null(Z)) range(Z) else NULL)
+
+  return(invisible(result))
+}
+
+
+########################################################################
+
+
+diagnose.ppm <- function(object, ..., type="raw", which="all", 
+                         sigma=NULL, 
+                         rbord =reach(object), cumulative=TRUE,
+                         plot.it = TRUE, rv = NULL, 
+                         compute.sd=is.poisson(object), compute.cts=TRUE,
+                         envelope=FALSE, nsim=39, nrank=1,
+                         typename, check=TRUE, repair=TRUE, oldstyle=FALSE,
+                         splineargs=list(spar=0.5))
+{
+  asked.newstyle <- !missing(oldstyle) && !oldstyle
+
+  if(is.marked.ppm(object))
+    stop("Sorry, this is not yet implemented for marked models")
+  
+  # check whether model originally came from replicated data
+  is.subfit <- (object$method == "mppm")
+
+  Coefs <- coef(object)
+  if(check && damaged.ppm(object)) {
+    if(!repair)
+      stop("object format corrupted; try update(object, use.internal=TRUE)")
+    message("object format corrupted; repairing it.")
+    object <- update(object, use.internal=TRUE)
+    object <- tweak.coefs(object, Coefs)
+  } else if(compute.sd && is.null(getglmfit(object))) {
+    object <- update(object, forcefit=TRUE, use.internal=TRUE)
+    object <- tweak.coefs(object, Coefs)
+  }
+
+  # -------------  Interpret arguments --------------------------
+
+  # Edge-effect avoidance
+  if(missing(rbord) && !is.finite(rbord)) {
+    ## Model has infinite reach
+    ## Use correction rule employed when fitting
+    rbord <- if(object$correction == "border") object$rbord else 0
+  }
+  
+  # match type argument
+  type <- pickoption("type", type,
+                     c(eem="eem",
+                       raw="raw",
+                       inverse="inverse",
+                       pearson="pearson",
+                       Pearson="pearson"))
+  if(missing(typename))
+    typename <- switch(type,
+                       eem="exponential energy weights",
+                       raw="raw residuals",
+                       inverse="inverse-lambda residuals",
+                       pearson="Pearson residuals")
+
+  # 'which' is multiple choice with exact matching 
+  optionlist <- c("all", "marks", "smooth", "x", "y", "sum")
+
+  if(!all(m <- which %in% optionlist))
+    stop(paste("Unrecognised choice(s) of",
+               paste(sQuote("which"), ":", sep=""),
+               paste(which[!m], collapse=", ")))
+
+  opt <- list()
+  opt$all <- "all" %in% which
+  opt$marks <-  ("marks" %in% which)   | opt$all
+  opt$smooth <- ("smooth" %in% which)  | opt$all
+  opt$xmargin <- (("x" %in% which)       | opt$all) && !cumulative
+  opt$ymargin <- (("y" %in% which)       | opt$all) && !cumulative
+  opt$xcumul <-  (("x" %in% which)       | opt$all) && cumulative
+  opt$ycumul <-  (("y" %in% which)       | opt$all) && cumulative
+  opt$sum <-     ("sum" %in% which)      | opt$all
+
+  # compute and plot estimated standard deviations?
+  # yes for Poisson, no for other models, unless overridden
+  if(!missing(compute.sd))
+    plot.sd <- compute.sd
+  else
+    plot.sd <- list(...)$plot.sd
+  if(is.null(plot.sd))
+    plot.sd <- is.poisson.ppm(object)
+  if(missing(compute.sd))
+    compute.sd <- plot.sd
+
+  # default for mppm objects is oldstyle=TRUE
+  if(compute.sd && is.subfit) {
+    if(!asked.newstyle) {
+      # silently change default
+      oldstyle <- TRUE
+    } else {
+      stop(paste("Variance calculation for a subfit of an mppm object",
+                 "is only implemented for oldstyle=TRUE"),
+           call.=FALSE)
+    }
+  }
+    
+  # interpolate the density of the residual measure?
+  if(missing(compute.cts)) {
+    plot.neg <- resolve.defaults(list(...),
+                                 formals(plot.diagppm)["plot.neg"])$plot.neg
+    # only if it is needed for the mark plot
+    compute.cts <- opt$marks && (plot.neg != "discrete")
+  }
+
+  # -------  DO THE CALCULATIONS -----------------------------------
+  RES <-  diagnose.ppm.engine(object, type=type, typename=typename,
+                              opt=opt, sigma=sigma, rbord=rbord,
+                              compute.sd=compute.sd,
+                              compute.cts=compute.cts,
+                              envelope=envelope, nsim=nsim, nrank=nrank,
+                              rv=rv, oldstyle=oldstyle,
+                              splineargs=splineargs,
+                              ...)
+
+  RES$typename <- typename
+  RES$opt <- opt
+  RES$compute.sd <- compute.sd
+  RES$compute.cts <- compute.cts
+  
+  class(RES) <- "diagppm"
+
+  # -------  PLOT --------------------------------------------------
+  if(plot.it) 
+    plot(RES, ...)
+
+  return(RES)
+}
+
+plot.diagppm <-
+  function(x, ..., which,
+           plot.neg=c("image", "discrete", "contour", "imagecontour"),
+           plot.smooth=c("imagecontour", "image", "contour", "persp"),
+           plot.sd, spacing=0.1, outer=3, 
+           srange=NULL, monochrome=FALSE, main=NULL)
+{
+  opt <- x$opt
+  
+  plot.neg <- match.arg(plot.neg)
+  plot.smooth <- match.arg(plot.smooth)
+  
+  if(!missing(which)) {
+    witches <- c("all", "marks", "smooth", "x", "y", "sum")
+    unknown <- is.na(match(which, witches))
+    if(any(unknown))
+      warning(paste("Unrecognised",
+                    ngettext(sum(unknown), "option", "options"),
+                    "which =",
+                    commasep(sQuote(which[unknown])),
+                    ": valid options are",
+                    commasep(sQuote(witches))), call.=FALSE)
+    oldopt <- opt
+    newopt <- list()
+    newopt$all <- "all" %in% which
+    newopt$marks <-  ("marks" %in% which)   | newopt$all
+    newopt$smooth <- ("smooth" %in% which)  | newopt$all
+    newopt$xmargin <- (("x" %in% which)       | newopt$all) && oldopt$xmargin
+    newopt$ymargin <- (("y" %in% which)       | newopt$all) && oldopt$ymargin
+    newopt$xcumul <-  (("x" %in% which)       | newopt$all) && oldopt$xcumul
+    newopt$ycumul <-  (("y" %in% which)       | newopt$all)  && oldopt$ycumul
+    newopt$sum <-     ("sum" %in% which)      | newopt$all
+
+    illegal <- (unlist(newopt) > unlist(oldopt))
+    if(any(illegal)) {
+      offending <- paste(names(newopt)[illegal], collapse=", ")
+      whinge <- paste("cannot display the following components;\n",
+                      "they were not computed: - \n", offending, "\n")
+      stop(whinge)
+    }
+
+    opt <- newopt
+  }
+
+  if(missing(plot.sd)) {
+    plot.sd <- x$compute.sd
+  } else if(plot.sd && !(x$compute.sd)) {
+    warning("can't plot standard deviations; they were not computed")
+    plot.sd <- FALSE
+  }
+
+  if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) {
+    if(!missing(plot.neg))
+      warning("can't plot continuous component of residuals; it was not computed")
+    plot.neg <- "discrete"
+  }
+  
+  if(opt$all) 
+    resid4plot(RES=x,
+               plot.neg=plot.neg, plot.smooth=plot.smooth,
+               spacing=spacing, outer=outer,
+               srange=srange, monochrome=monochrome, main=main, ...)
+  else
+    resid1plot(RES=x, opt=opt,
+               plot.neg=plot.neg, plot.smooth=plot.smooth,
+               srange=srange, monochrome=monochrome, main=main, ...)
+}
+
+
+print.diagppm <- function(x, ...) {
+  
+  opt <- x$opt
+  typename <- x$typename
+  
+  splat("Model diagnostics", paren(typename))
+  splat("Diagnostics available:")
+  optkey <- list(all="four-panel plot",
+                 marks=paste("mark plot", if(!x$compute.cts)
+                   "(discrete representation only)" else NULL),
+                 smooth="smoothed residual field",
+                 xmargin="x marginal density",
+                 ymargin="y marginal density",
+                 xcumul="x cumulative residuals",
+                 ycumul="y cumulative residuals",
+                 sum="sum of all residuals")
+  avail <- unlist(optkey[names(opt)[unlist(opt)]])
+  names(avail) <- NULL
+  cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep=""))
+  
+  if(opt$sum) {
+    xs <- x$sum
+    windowname <- if(x$clip) "clipped window" else "entire window"
+    splat("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4))
+    splat("area of", windowname, "=", signif(xs$areaWclip, 4))
+    splat("quadrature area =", signif(xs$areaquad, 4))
+  }
+  if(opt$smooth) {
+    splat("range of smoothed field = ", prange(signif(range(x$smooth$Z),4)))
+    if(!is.null(sdp <- x$smooth$sdp))
+      splat("Null standard deviation of smoothed Pearson residual field:",
+            signif(sdp, 4))
+  }
+  return(invisible(NULL))
+}
diff --git a/R/diagram.R b/R/diagram.R
new file mode 100644
index 0000000..3b0e562
--- /dev/null
+++ b/R/diagram.R
@@ -0,0 +1,322 @@
+##
+##   diagram.R
+##
+##   Simple objects for the elements of a diagram (text, arrows etc)
+##    that are compatible with plot.layered and plot.solist
+##
+##   $Revision: 1.12 $ $Date: 2016/04/25 02:34:40 $
+
+# ......... internal class 'diagramobj' supports other classes  .........
+
+diagramobj <- function(X, ...) {
+  if(inherits(try(Frame(X), silent=TRUE), "try-error"))
+    stop("X is not a spatial object")
+  a <- list(...)
+  if(sum(nzchar(names(a))) != length(a))
+    stop("All extra arguments must be named")
+  attributes(X) <- append(attributes(X), a)
+  class(X) <- c("diagramobj", class(X))
+  return(X)
+}
+
+"[.diagramobj" <- function(x, ...) {
+  y <- NextMethod("[")
+  attributes(y) <- attributes(x)
+  return(y)
+}
+
+shift.diagramobj <- function(X, ...) {
+  y <- NextMethod("shift")
+  attributes(y) <- attributes(X)
+  return(y)
+}
+
+scalardilate.diagramobj <- function(X, f, ...) {
+  y <- NextMethod("scalardilate")
+  attributes(y) <- attributes(X)
+  return(y)
+}
+
+# .............. user-accessible classes ................
+# .........  (these only need a creator and a plot method) ......
+
+
+## ...........  text .................
+
+textstring <- function(x, y, txt=NULL, ...) {
+  if(is.ppp(x) && missing(y)) {
+    X <- x
+    Window(X) <- boundingbox(x)
+  } else {
+    if(missing(y) && checkfields(x, c("x", "y"))) {
+      y <- x$y
+      x <- x$x
+      stopifnot(length(x) == length(y))
+    }
+    X <- ppp(x, y, window=owin(range(x),range(y)))
+  }
+  marks(X) <- txt
+  Y <- diagramobj(X, otherargs=list(...))
+  class(Y) <- c("textstring", class(Y))
+  return(Y)
+}
+
+plot.textstring <- function(x, ..., do.plot=TRUE) {
+  txt <- marks(x)
+  otha <- attr(x, "otherargs")
+  if(do.plot) do.call.matched(text.default,
+                              resolve.defaults(list(...),
+                                               list(x=x$x, y=x$y, labels=txt),
+                                               otha),
+                              funargs=graphicsPars("text"))
+  return(invisible(Frame(x)))
+}
+
+print.textstring <- function(x, ...) {
+  splat("Text string object")
+  txt <- marks(x)
+  if(npoints(x) == 1) {
+    splat("Text:", dQuote(txt))
+    splat("Coordinates:", paren(paste(as.vector(coords(x)), collapse=", ")))
+  } else {
+    splat("Text:")
+    print(txt)
+    splat("Coordinates:")
+    print(coords(x))
+  }
+  return(invisible(NULL))
+}
+  
+## ...........  'yardstick' to display scale information  ................
+
+yardstick <- function(x0, y0, x1, y1, txt=NULL, ...) {
+  nomore <- missing(y0) && missing(x1) && missing(y1) 
+  if(is.ppp(x0) && nomore) {
+    if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
+    X <- x0
+  } else if(is.psp(x0) && nomore) {
+    if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
+    X <- endpoints.psp(x0)
+  } else {
+    xx <- c(x0, x1)
+    yy <- c(y0, y1)
+    B <- boundingbox(list(x=xx, y=yy))
+    X <- ppp(xx, yy, window=B, check=FALSE)
+  }
+  Window(X) <- boundingbox(X)
+  Y <- diagramobj(X, txt=txt, otherargs=list(...))
+  class(Y) <- c("yardstick", class(Y))
+  return(Y)
+}
+
+plot.yardstick <- local({
+
+  mysegments <- function(x0, y0, x1, y1, ..., moreargs=list()) {
+    ## ignore unrecognised arguments without whingeing
+    do.call.matched(segments,
+                    resolve.defaults(list(x0=x0, y0=y0, x1=x1, y1=y1),
+                                     list(...),
+                                     moreargs),
+                    extrargs=c("col", "lty", "lwd", "xpd", "lend"))
+  }
+  
+  myarrows <- function(x0, y0, x1, y1, ...,
+                       left=TRUE, right=TRUE,
+                       angle=20, frac=0.25,
+                       main, show.all, add) {
+    mysegments(x0, y0, x1, y1, ...)
+    if(left || right) {
+      ang <- angle * pi/180
+      co <- cos(ang)
+      si <- sin(ang)
+      dx <- x1-x0
+      dy <- y1-y0
+      le <- sqrt(dx^2 + dy^2)
+      rot <- matrix(c(dx, dy, -dy, dx)/le, 2, 2)
+      arlen <- frac * le
+      up <- arlen * (rot %*% c(co, si))
+      lo <- arlen * (rot %*% c(co, -si))
+      if(left) {
+        mysegments(x0, y0, x0+up[1L], y0+up[2L], ...)
+        mysegments(x0, y0, x0+lo[1L], y0+lo[2L], ...)
+      }
+      if(right) {
+        mysegments(x1, y1, x1-up[1L], y1-up[2L], ...)
+        mysegments(x1, y1, x1-lo[1L], y1-lo[2L], ...)
+      }
+    }
+    return(invisible(NULL))
+  }
+
+  plot.yardstick <- function(x, ...,
+                             angle=20,
+                             frac=1/8,
+                             split=FALSE,
+                             shrink=1/4,
+                             pos=NULL,
+                             txt.args=list(),
+                             txt.shift=c(0,0),
+                             do.plot=TRUE) {
+    if(do.plot) {
+      txt <- attr(x, "txt")
+      argh <- resolve.defaults(list(...), attr(x, "otherargs"))
+      A <- as.numeric(coords(x)[1L,])
+      B <- as.numeric(coords(x)[2L,])
+      M <- (A+B)/2
+      if(!split) {
+        ## double-headed arrow
+        myarrows(A[1L], A[2L], B[1L], y1=B[2L],
+                 angle=angle, frac=frac, moreargs=argh)
+        if(is.null(pos) && !("adj" %in% names(txt.args)))
+          pos <- if(abs(A[1L] - B[1L]) < abs(A[2L] - B[2L])) 4 else 3
+      } else {
+        ## two single-headed arrows with text 
+        dM <- (shrink/2) * (B - A)
+        AM <- M - dM
+        BM <- M + dM
+        newfrac <- frac/((1-shrink)/2)
+        myarrows(AM[1L], AM[2L], A[1L], A[2L],
+                 angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
+        myarrows(BM[1L], BM[2L], B[1L], B[2L], 
+                 angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
+      }
+      if(is.null(txt.shift)) txt.shift <- rep(0, 2) else 
+                             txt.shift <- ensure2vector(unlist(txt.shift))
+      do.call.matched(text.default,
+                      resolve.defaults(list(x=M[1L] + txt.shift[1L],
+                                            y=M[2L] + txt.shift[2L]),
+                                       txt.args,
+                                       list(labels=txt, pos=pos),
+                                       argh,
+                                       .MatchNull=FALSE),
+                      funargs=graphicsPars("text"))
+    }
+    return(invisible(Window(x)))
+  }
+  plot.yardstick
+})
+
+
+print.yardstick <- function(x, ...) {
+  splat("Yardstick")
+  if(!is.null(txt <- attr(x, "txt")))
+    splat("Text:", txt)
+  ui <- summary(unitname(x))
+  splat("Length:", pairdist(x)[1L,2L], ui$plural, ui$explain)
+  splat("Midpoint:",
+        paren(paste(signif(c(mean(x$x), mean(x$y)), 3), collapse=", ")))
+  dx <- diff(range(x$x))
+  dy <- diff(range(x$y))
+  orient <- if(dx == 0) "vertical" else
+            if(dy == 0) "horizontal" else
+            paste(atan2(dy, dx) * 180/pi, "degrees")
+  splat("Orientation:", orient)
+  return(invisible(NULL))
+}
+
+
+## code to draw a decent-looking arrow in spatstat diagrams
+## (works in layered objects)
+
+## The name 'onearrow' is used because R contains
+## hidden functions [.arrow, length.arrow
+
+onearrow <- function(x0, y0, x1, y1, txt=NULL, ...) {
+  nomore <- missing(y0) && missing(x1) && missing(y1) 
+  if(is.ppp(x0) && nomore) {
+    if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
+    X <- x0
+  } else if(is.psp(x0) && nomore) {
+    if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
+    X <- endpoints.psp(x0)
+  } else {
+    xx <- c(x0, x1)
+    yy <- c(y0, y1)
+    B <- boundingbox(list(x=xx, y=yy))
+    X <- ppp(xx, yy, window=B, check=FALSE)
+  }
+  Window(X) <- boundingbox(X)
+  Y <- diagramobj(X, txt=txt, otherargs=list(...))
+  class(Y) <- c("onearrow", class(Y))
+  return(Y)
+}
+
+print.onearrow <- function(x, ...) {
+  cat("Single arrow", fill=TRUE)
+  if(!is.null(txt <- attr(x, "txt")))
+    cat("Text:", txt, fill=TRUE)
+  NextMethod("print")
+}
+
+plot.onearrow <- function(x, ...,
+                          add=FALSE,
+                          main="",
+                          retract=0.05,   
+                          headfraction=0.25,
+                          headangle=12, # degrees
+                          headnick=0.1, # fraction of head length
+                          col.head=NA,
+                          lwd.head=lwd,
+                          lwd=1,
+                          col=1,
+                          zap=FALSE,
+                          zapfraction=0.07,
+                          pch=1, cex=1,
+                          do.plot=TRUE,
+                          do.points=FALSE,
+                          show.all=!add) {
+  result <- plot.ppp(x, main=main, add=add,
+                     pch=pch, cex=cex,
+                     do.plot=do.plot && do.points,
+                     show.all=show.all)
+  if(do.plot) {
+    if(!do.points && !add)
+      plot(Frame(x), main="", type="n")
+    txt <- attr(x, "txt")
+    argh <- resolve.defaults(list(...), attr(x, "otherargs"))
+    A <- as.numeric(coords(x)[1L,])
+    B <- as.numeric(coords(x)[2L,])
+    V <- B - A
+    AR <- A + retract * V
+    BR <- B - retract * V
+    H <- B - headfraction * V
+    HN <- H + headnick * headfraction * V
+    headlength <- headfraction * sqrt(sum(V^2))
+    halfwidth <- headlength * tan((headangle/2) * pi/180)
+    alpha <- atan2(V[2L], V[1L]) + pi/2
+    U <- c(cos(alpha), sin(alpha))
+    HL <- H + halfwidth * U
+    HR <- H - halfwidth * U
+    Head <- rbind(HN, HL, BR, HR, HN)
+    if(!is.na(col.head))
+      do.call.matched(polygon,
+                      resolve.defaults(list(x=Head),
+                                       argh,
+                                       list(col=col.head, lwd=lwd.head)))
+    if(!zap) {
+      Tail <- AR
+    } else {
+      M <- (AR+HN)/2
+      dM <- (zapfraction/2) * (1-headfraction) * V
+      dM <- dM + c(-dM[2L], dM[1L])
+      ML <- M + dM
+      MR <- M - dM
+      Tail <- rbind(AR, ML, MR)
+    }
+    do.call.matched(lines,
+                    resolve.defaults(list(x=rbind(Tail, Head)),
+                                     argh,
+                                     list(col=col, lwd=lwd)),
+                    extrargs=c("col", "lwd", "lty", "xpd", "lend"))
+    if(!is.null(txt <- attr(x, "txt"))) {
+      H <- (A+B)/2
+      do.call.matched(text.default,
+                      resolve.defaults(
+                        list(x=H[1L], y=H[2L]),
+                        argh,
+                        list(labels=txt, pos=3 + (V[2L] != 0))),
+                      funargs=graphicsPars("text"))
+    }
+  }
+  return(invisible(result))
+}
diff --git a/R/digestCovariates.R b/R/digestCovariates.R
new file mode 100644
index 0000000..9fcb0f1
--- /dev/null
+++ b/R/digestCovariates.R
@@ -0,0 +1,66 @@
+#'
+#'    digestCovariates.R
+#'
+#'     $Revision: 1.2 $  $Date: 2017/01/26 00:22:14 $
+#' 
+
+is.scov <- function(x) {
+  #' Determines whether x is a valid candidate for a spatial covariate
+  #' A spatial object is OK if it can be coerced to a function
+  if(inherits(x, c("im", "funxy", "owin", "tess", "ssf", "leverage.ppm")))
+    return(TRUE)
+  #' A function(x,y,...) is OK
+  if(is.function(x) && identical(names(formals(x))[1:2], c("x", "y")))
+    return(TRUE)
+  #' A single character "x" or "y" is OK
+  if(is.character(x) && length(x) == 1 && (x %in% c("x", "y"))) 
+    return(TRUE)
+  #' Can't handle input
+  return(FALSE)
+}
+  
+## Assumes each input (besides W) is a single covariate or a list of covariates
+## Returns a `solist` with possibly a unitname attribute
+digestCovariates <- function(..., W = NULL) {
+  x <- list(...)
+  #' Find individual covariates in list
+  valid <- sapply(x, is.scov)
+  covs <- x[valid]
+  #' The remaining entries are assumed to be lists of covariates
+  #' so we unlist them
+  x <- unlist(x[!valid], recursive = FALSE)
+  valid <- sapply(x, is.scov)
+  if(!all(valid))
+    stop("Couldn't interpret all input as spatial covariates.")
+  covs <- append(covs, x)
+
+  if(any(needW <- !sapply(covs, is.sob))) {
+    if(is.null(W)){
+      boxes <- sapply(covs[!needW], Frame, fatal = FALSE)
+      W <- do.call(boundingbox, boxes)
+    } else stopifnot(is.owin(W))
+  }
+  
+  covunits <- vector("list", length(covs))
+  # Now covs is a list of valid covariates we can loop through
+  for(i in seq_along(covs)){
+    covar <- covs[[i]]
+    if(inherits(covar, "distfun"))
+      covunits[[i]] <- unitname(covar)
+    if(is.character(covar) && length(covar) == 1 && (covar %in% c("x", "y"))) {
+      covar <- if(covar == "x"){
+        function(x,y) { x }
+      } else{
+        function(x,y) { y }
+      }
+      covunits[[i]] <- unitname(W)
+    }
+    if(is.function(covar) && !inherits(covar, "funxy")){
+      covar <- funxy(f = covar, W = W)
+    }
+    covs[[i]] <- covar
+  }
+  covs <- as.solist(covs)
+  attr(covs, "covunits") <- covunits
+  return(covs)
+}
diff --git a/R/disc.R b/R/disc.R
new file mode 100755
index 0000000..5411c67
--- /dev/null
+++ b/R/disc.R
@@ -0,0 +1,128 @@
+##
+## disc.R
+##
+##  discs and ellipses
+##
+## $Revision: 1.18 $ $Date: 2017/01/15 05:25:16 $
+##
+
+disc <- local({
+
+  indic <- function(x,y,x0,y0,r) { as.integer((x-x0)^2 + (y-y0)^2 < r^2) }
+  
+  disc <- function(radius=1, centre=c(0,0), ...,
+                   mask=FALSE, npoly=128, delta=NULL) {
+    check.1.real(radius)
+    stopifnot(radius > 0)
+    centre <- as2vector(centre)
+    if(!missing(npoly) && !is.null(npoly) && !is.null(delta))
+      stop("Specify either npoly or delta")
+    if(!missing(npoly) && !is.null(npoly)) {
+      stopifnot(length(npoly) == 1)
+      stopifnot(npoly >= 3)
+    } else if(!is.null(delta)) {
+      check.1.real(delta)
+      stopifnot(delta > 0)
+      npoly <- pmax(16, ceiling(2 * pi * radius/delta))
+    } else npoly <- 128
+    if(!mask) {
+      theta <- seq(from=0, to=2*pi, length.out=npoly+1)[-(npoly+1L)]
+      x <- centre[1L] + radius * cos(theta)
+      y <- centre[2L] + radius * sin(theta)
+      W <- owin(poly=list(x=x, y=y), check=FALSE)
+    } else {
+      xr <- centre[1L] + radius * c(-1,1)
+      yr <- centre[2L] + radius * c(-1,1)
+      B <- owin(xr,yr)
+      IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], r=radius, ...)
+      W <- levelset(IW, 1, "==")
+    }
+    return(W)
+  }
+
+  disc
+})
+
+hexagon <- function(edge=1, centre=c(0,0), ...,
+                    align=c("bottom", "top", "left", "right", "no")) {
+  regularpolygon(6, edge, centre, align=align)
+}
+
+regularpolygon <- function(n, edge=1, centre=c(0,0), ...,
+                           align=c("bottom", "top", "left", "right", "no")) {
+  check.1.integer(n)
+  check.1.real(edge)
+  stopifnot(n >= 3)
+  stopifnot(edge > 0)
+  align <- match.arg(align)
+  theta <- 2 * pi/n
+  radius <- edge/(2 * sin(theta/2))
+  result <- disc(radius, centre, npoly=n, mask=FALSE)
+  if(align != "no") {
+    k <- switch(align,
+                bottom = 3/4,
+                top = 1/4,
+                left = 1/2,
+                right = 1)
+    alpha <- theta * (1/2 - (k * n) %% 1)
+    result <- rotate(result, -alpha)
+  }
+  Frame(result) <- boundingbox(result)
+  return(result)
+}
+
+
+ellipse <- local({
+  
+  indic <- function(x,y,x0,y0,a,b,co,si){
+    x <- x-x0
+    y <- y-y0
+    as.integer(((x*co + y*si)/a)^2 + ((-x*si + y*co)/b)^2 < 1)
+  }
+
+  ellipse <- function(a, b, centre=c(0,0), phi=0, ...,
+                      mask=FALSE, npoly = 128) {
+    ## Czechs:
+    stopifnot(length(a) == 1)
+    stopifnot(a > 0)
+    stopifnot(length(b) == 1)
+    stopifnot(b > 0)
+    centre <- as2vector(centre)
+    stopifnot(length(phi) == 1)
+    stopifnot(length(npoly) == 1)
+    stopifnot(npoly > 2)
+    ## Rotator cuff:
+    co <- cos(phi)
+    si <- sin(phi)
+    ## Mask:
+    if(mask) {
+      ## Thetas maximizing x and y.
+      tx <- atan(-b*tan(phi)/a)
+      ty <- atan(b/(a*tan(phi)))
+      ## Maximal x and y (for centre = c(0,0)).
+      xm <- a*co*cos(tx) - b*si*sin(tx)
+      ym <- a*si*cos(ty) + b*co*sin(ty)
+      ## Range of x and y.
+      xr <- xm*c(-1,1)+centre[1L]
+      yr <- ym*c(-1,1)+centre[2L]
+      ## Wrecked-angle to contain the mask.
+      B  <- as.mask(owin(xr,yr),...)
+      ## Build the mask as a level set.
+      IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], a=a, b=b, co=co, si=si)
+      return(levelset(IW, 1, "=="))
+    }
+    ## Polygonal.
+    ## Build "horizontal" ellipse centred at 0:
+    theta <- seq(0, 2 * pi, length = npoly+1)[-(npoly+1L)]
+    xh <-  a * cos(theta)
+    yh <-  b * sin(theta)
+
+    ## Rotate through angle phi and shift centre:
+    x  <- centre[1L] + co*xh - si*yh
+    y  <- centre[2L] + si*xh + co*yh
+    owin(poly=list(x = x, y = y))
+  }
+
+  ellipse
+})
+
diff --git a/R/discarea.R b/R/discarea.R
new file mode 100755
index 0000000..c7c029f
--- /dev/null
+++ b/R/discarea.R
@@ -0,0 +1,111 @@
+#
+#    discarea.R
+#
+#  $Revision: 1.18 $  $Date: 2017/06/05 10:31:58 $
+#
+#
+#  Compute area of intersection between a disc and a window,
+#
+discpartarea <- function(X, r, W=as.owin(X)) {
+  if(!missing(W)) {
+    verifyclass(W, "owin")
+    if(!inherits(X, "ppp"))
+      X <- as.ppp(X, W)
+  }
+  verifyclass(X, "ppp")
+
+  n <- X$n
+  if(is.matrix(r) && nrow(r) != n)
+    stop("the number of rows of r should match the number of points in X")
+  if(!is.matrix(r)) {
+    nr <- length(r)
+    r <- matrix(r, nrow=n, ncol=nr, byrow=TRUE)
+  } else {
+    nr <- ncol(r)
+  }
+  
+  W <- as.polygonal(W)
+  
+  # convert polygon to line segments
+  Y <- edges(W)
+  # remove vertical segments (contribution is zero)
+  vert <- (Y$ends$x1 == Y$ends$x0)
+  Y <- Y[!vert]
+  # go
+  z <- .C("discareapoly",
+          nc=as.integer(n),
+          xc=as.double(X$x),
+          yc=as.double(X$y),
+          nr=as.integer(nr),
+          rmat=as.double(r),
+          nseg=as.integer(Y$n),
+          x0=as.double(Y$ends$x0),
+          y0=as.double(Y$ends$y0),
+          x1=as.double(Y$ends$x1),
+          y1=as.double(Y$ends$y1),
+          eps=as.double(.Machine$double.eps),
+          out=as.double(numeric(length(r))),
+          PACKAGE = "spatstat")
+  areas <- matrix(z$out, n, nr)
+  return(areas)
+}
+
+# Compute area of dilation of point pattern
+# using Dirichlet tessellation or distmap
+#  (areas of other dilations using distmap)
+
+dilated.areas <- function(X, r, W=as.owin(X), ...,
+                          constrained=TRUE,
+                          exact=FALSE) {
+  if(is.matrix(r)) {
+    if(sum(dim(r) > 1) > 1L)
+      stop("r should be a vector or single value")
+    r <- as.vector(r)
+  }
+  if(exact && !is.ppp(X)) {
+    exact <- FALSE
+    warning("Option exact=TRUE is only available for ppp objects")
+  }
+  if(!constrained) {
+    # unconstrained dilation
+    bb <- as.rectangle(X)
+    W <- grow.rectangle(bb, max(r))
+    if(is.owin(X))
+      X <- rebound.owin(X, W)
+    else
+      X$window <- W
+  } else W <- as.owin(W)
+  if(!exact) {
+    D <- distmap(X)
+    pixelarea <- D$xstep * D$ystep
+    Dvals <- D[W, drop=TRUE]
+    if(is.im(Dvals))
+      Dvals <- as.vector(as.matrix(Dvals))
+    Dvals <- Dvals[!is.na(Dvals)]
+    rr <- c(-1, r)
+    h <- cumsum(whist(Dvals, rr))
+    return(h * pixelarea)
+  }
+  X <- unique(X)
+  npts <- npoints(X)
+  nr <- length(r)
+  if(npts == 0)
+    return(numeric(nr))
+  else if(npts == 1L) 
+    return(discpartarea(X, r, W))
+  samebox <- (W$type == "rectangle") &&
+              identical(all.equal(W, as.owin(X)), "TRUE")
+  needclip <- constrained && !samebox
+  dd <- dirichlet(X)
+  til <- tiles(dd)
+  out <- matrix(0, npts, nr)
+  for(i in 1:npts) {
+    Ti <- til[[i]]
+    if(needclip)
+      Ti <- intersect.owin(Ti, W)
+    out[i,] <- discpartarea(X[i], r, Ti)
+  }
+  return(apply(out, 2, sum))
+}
+
+  
diff --git a/R/dist2dpath.R b/R/dist2dpath.R
new file mode 100755
index 0000000..65010b4
--- /dev/null
+++ b/R/dist2dpath.R
@@ -0,0 +1,68 @@
+#
+#  dist2dpath.R
+#
+#   $Revision: 1.10 $    $Date: 2017/06/05 10:31:58 $
+#
+#       dist2dpath    compute shortest path distances
+#
+
+dist2dpath <- function(dist, method="C") {
+  ## given a matrix of distances between adjacent vertices
+  ## (value = Inf if not adjacent)
+  ## compute the matrix of shortest path distances
+  stopifnot(is.matrix(dist) && isSymmetric(dist))
+  stopifnot(all(diag(dist) == 0))
+  findist <- dist[is.finite(dist)]
+  if(any(findist < 0))
+    stop("Some distances are negative")
+  ##
+  n <- nrow(dist)
+  if(n <= 1L) return(dist)
+  cols <- col(dist)
+  ##
+  tol <- .Machine$double.eps
+  posdist <- findist[findist > 0]
+  if(length(posdist) > 0) {
+    shortest <- min(posdist)
+    tol2 <- shortest/max(n,1024)
+    tol <- max(tol, tol2)
+  }
+  ##
+  switch(method,
+         interpreted={
+           dpathnew <- dpath <- dist
+           changed <- TRUE
+           while(changed) {
+             for(j in 1:n) 
+               dpathnew[,j] <- apply(dpath + dist[j,][cols], 1L, min)
+             unequal <- (dpathnew != dpath)
+             changed <- any(unequal) & any(abs(dpathnew-dpath)[unequal] > tol)
+             dpath <- dpathnew
+           }
+         },
+         C={
+           adj <- is.finite(dist)
+           diag(adj) <- TRUE
+           d <- dist
+           d[!adj] <- -1
+           z <- .C("Ddist2dpath",
+                   nv=as.integer(n),
+                   d=as.double(d),
+                   adj=as.integer(adj),
+                   dpath=as.double(numeric(n*n)),
+                   tol=as.double(tol),
+                   niter=as.integer(integer(1L)),
+                   status=as.integer(integer(1L)),
+                   PACKAGE = "spatstat")
+           if(z$status == -1L)
+             warning(paste("C algorithm did not converge to tolerance", tol,
+                           "after", z$niter, "iterations",
+                           "on", n, "vertices and",
+                           sum(adj) - n, "edges"))
+           dpath <- matrix(z$dpath, n, n)
+           ## value=-1 implies unreachable
+           dpath[dpath < 0] <- Inf
+         },
+         stop(paste("Unrecognised method", sQuote(method))))
+  return(dpath)
+}
diff --git a/R/distan3D.R b/R/distan3D.R
new file mode 100755
index 0000000..b0c7554
--- /dev/null
+++ b/R/distan3D.R
@@ -0,0 +1,299 @@
+#
+#      distan3D.R
+#
+#      $Revision: 1.13 $     $Date: 2017/06/05 10:31:58 $
+#
+#      Interpoint distances for 3D points
+#
+#      Methods for pairdist, nndist, nnwhich, crossdist
+#
+
+pairdist.pp3 <- function(X, ..., periodic=FALSE, squared=FALSE) {
+  verifyclass(X, "pp3")
+  # extract point coordinates
+  xyz <- coords(X)
+  n <- nrow(xyz)
+  x <- xyz$x
+  y <- xyz$y
+  z <- xyz$z
+  #   
+  # special cases
+  if(n == 0)
+    return(matrix(numeric(0), nrow=0, ncol=0))
+  else if(n == 1L)
+    return(matrix(0,nrow=1L,ncol=1L))
+  #
+  if(!periodic) {
+    Cout <- .C("D3pairdist",
+               n = as.integer(n),
+               x = as.double(x),
+               y = as.double(y),
+               z = as.double(z),
+               squared = as.integer(squared),
+               d = as.double(numeric(n*n)),
+               PACKAGE = "spatstat")
+  } else {
+    b <- as.box3(X)
+    wide <- diff(b$xrange)
+    high <- diff(b$yrange)
+    deep <- diff(b$zrange)
+    Cout <- .C("D3pairPdist",
+               n = as.integer(n),
+               x = as.double(x),
+               y = as.double(y),
+               z = as.double(z),
+               xwidth=as.double(wide),
+               yheight=as.double(high),
+               zdepth=as.double(deep),
+               squared = as.integer(squared),
+               d= as.double(numeric(n*n)),
+               PACKAGE = "spatstat")
+  }
+  dout <- matrix(Cout$d, nrow=n, ncol=n)
+  return(dout)
+}
+
+nndist.pp3 <- function(X, ..., k=1) {
+  verifyclass(X, "pp3")
+
+  if((narg <- length(list(...))) > 0) 
+    warning(paste(narg, "unrecognised",
+                  ngettext(narg, "argument was", "arguments were"),
+                  "ignored"))
+
+  # extract point coordinates
+  xyz <- coords(X)
+  n <- nrow(xyz)
+  x <- xyz$x
+  y <- xyz$y
+  z <- xyz$z
+  
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1L) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # trivial cases
+  if(n <= 1L) {
+    # empty pattern => return numeric(0)
+    # or pattern with only 1 point => return Inf
+    nnd <- matrix(Inf, nrow=n, ncol=kmax)
+    nnd <- nnd[,k, drop=TRUE]
+    return(nnd)
+  }
+  
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1L, kmax)
+
+  # calculate k-nn distances for k <= kmaxcalc
+  
+  if(kmaxcalc == 1L) {
+    # calculate nearest neighbour distance only
+    nnd<-numeric(n)
+    o <- fave.order(z)
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("nnd3D",
+               n= as.integer(n),
+               x= as.double(x[o]),
+               y= as.double(y[o]),
+               z= as.double(z[o]),
+               nnd= as.double(nnd),
+               nnwhich = as.integer(integer(1L)),
+               huge=as.double(big),
+               PACKAGE = "spatstat")
+    nnd[o] <- Cout$nnd
+  } else {
+    # case kmaxcalc > 1
+    nnd<-numeric(n * kmaxcalc)
+    o <- fave.order(z)
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("knnd3D",
+               n    = as.integer(n),
+               kmax = as.integer(kmaxcalc),
+               x    = as.double(x[o]),
+               y    = as.double(y[o]),
+               z    = as.double(z[o]),
+               nnd  = as.double(nnd),
+               nnwhich = as.integer(integer(1L)),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc)
+    nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+  }
+
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of Inf's
+    infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc)
+    nnd <- cbind(nnd, infs)
+  }
+
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnd <- nnd[, k, drop=TRUE]
+  }
+  
+  return(nnd)
+}
+
+nnwhich.pp3 <- function(X, ..., k=1) {
+  verifyclass(X, "pp3")
+  if((narg <- length(list(...))) > 0) 
+    warning(paste(narg, "unrecognised",
+                  ngettext(narg, "argument was", "arguments were"),
+                  "ignored"))
+  
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1L) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # extract point coordinates
+  xyz <- coords(X)
+  n <- nrow(xyz)
+  x <- xyz$x
+  y <- xyz$y
+  z <- xyz$z
+  
+  # special cases
+  if(n <= 1L) {
+    # empty pattern => return integer(0)
+    # or pattern with only 1 point => return NA
+    nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax)
+    nnw <- nnw[,k, drop=TRUE]
+    return(nnw)
+  }
+
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1L, kmax)
+
+  # identify k-nn for k <= kmaxcalc
+
+  if(kmaxcalc == 1L) {
+    # identify nearest neighbour only
+    nnw <- integer(n)
+    o <- fave.order(z)
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("nnw3D",
+               n = as.integer(n),
+               x = as.double(x[o]),
+               y = as.double(y[o]),
+               z = as.double(z[o]),
+               nnd = as.double(numeric(1L)),
+               nnwhich = as.integer(nnw),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    # [sic] Conversion from C to R indexing is done in C code.
+    witch <- Cout$nnwhich
+    if(any(witch <= 0))
+      stop("Internal error: illegal index returned from C code")
+    if(any(witch > n))
+      stop("Internal error: index returned from C code exceeds n")
+    nnw[o] <- o[witch]
+  } else {
+    # case kmaxcalc > 1
+    nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc)
+    o <- fave.order(z)
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("knnw3D",
+               n = as.integer(n),
+               kmax = as.integer(kmaxcalc),
+               x = as.double(x[o]),
+               y = as.double(y[o]),
+               z = as.double(z[o]),
+               nnd = as.double(numeric(1L)),
+               nnwhich = as.integer(nnw),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    # [sic] Conversion from C to R indexing is done in C code.
+    witch <- Cout$nnwhich 
+    witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+    if(any(witch <= 0))
+      stop("Internal error: illegal index returned from C code")
+    if(any(witch > n))
+      stop("Internal error: index returned from C code exceeds n")
+    # convert back to original ordering
+    nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc)
+  }
+  
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of NA's
+    nas <- matrix(as.integer(NA), nrow=n, ncol=kmax-kmaxcalc)
+    nnw <- cbind(nnw, nas)
+  }
+
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnw <- nnw[, k, drop=TRUE]
+  }
+  return(nnw)
+}
+
+crossdist.pp3 <- function(X, Y, ..., periodic=FALSE, squared=FALSE) {
+  verifyclass(X, "pp3")
+  verifyclass(Y, "pp3")
+
+  cX <- coords(X)
+  cY <- coords(Y)
+  nX <- nrow(cX)
+  nY <- nrow(cY)
+
+  if(nX == 0 || nY == 0)
+    return(matrix(numeric(0), nrow=nX, ncol=nY))
+
+  if(!periodic) {
+    Cout <- .C("D3crossdist",
+               nfrom = as.integer(nX),
+               xfrom = as.double(cX$x),
+               yfrom = as.double(cX$y),
+               zfrom = as.double(cX$z),
+               nto = as.integer(nY),
+               xto = as.double(cY$x),
+               yto = as.double(cY$y),
+               zto = as.double(cY$z),
+               squared = as.integer(squared),
+               d = as.double(matrix(0, nrow=nX, ncol=nY)),
+               PACKAGE = "spatstat")
+  } else {
+    b <- as.box3(X)
+    wide <- diff(b$xrange)
+    high <- diff(b$yrange)
+    deep <- diff(b$zrange)
+    Cout <- .C("D3crossPdist",
+               nfrom = as.integer(nX),
+               xfrom = as.double(cX$x),
+               yfrom = as.double(cX$y),
+               zfrom = as.double(cX$z),
+               nto = as.integer(nY),
+               xto = as.double(cY$x),
+               yto = as.double(cY$y),
+               zto = as.double(cY$z),
+               xwidth = as.double(wide),
+               yheight = as.double(high),
+               zheight = as.double(deep),
+               squared = as.integer(squared),
+               d = as.double(matrix(0, nrow=nX, ncol=nY)),
+               PACKAGE = "spatstat")
+  }
+  return(matrix(Cout$d, nrow=nX, ncol=nY))
+}
+  
diff --git a/R/distances.R b/R/distances.R
new file mode 100755
index 0000000..ca99824
--- /dev/null
+++ b/R/distances.R
@@ -0,0 +1,203 @@
+#
+#      distances.R
+#
+#      $Revision: 1.46 $     $Date: 2017/06/05 10:31:58 $
+#
+#
+#      Interpoint distances between pairs 
+#
+#
+
+pairdist <- function(X, ...) {
+  UseMethod("pairdist")
+}
+
+pairdist.ppp <- function(X, ..., periodic=FALSE, method="C", squared=FALSE) {
+  verifyclass(X, "ppp")
+  if(!periodic)
+    return(pairdist.default(X$x, X$y, method=method, squared=squared))
+  # periodic case
+  W <- X$window
+  if(W$type != "rectangle")
+    stop(paste("periodic edge correction can't be applied",
+               "in a non-rectangular window"))
+  wide <- diff(W$xrange)
+  high <- diff(W$yrange)
+  return(pairdist.default(X$x, X$y, period=c(wide,high),
+                          method=method, squared=squared))
+}
+
+
+pairdist.default <-
+  function(X, Y=NULL, ..., period=NULL, method="C", squared=FALSE)
+{
+  xy <- xy.coords(X,Y)[c("x","y")]
+  x <- xy$x
+  y <- xy$y
+
+  n <- length(x)
+  if(length(y) != n)
+    stop("lengths of x and y do not match")
+
+  # special cases
+  if(n == 0)
+    return(matrix(numeric(0), nrow=0, ncol=0))
+  else if(n == 1L)
+    return(matrix(0,nrow=1L,ncol=1L))
+
+  if((periodic<- !is.null(period))) {
+    stopifnot(is.numeric(period))
+    stopifnot(length(period) == 2 || length(period) == 1)
+    stopifnot(all(period > 0))
+    if(length(period) == 1) period <- rep.int(period, 2)
+    wide <- period[1L]
+    high <- period[2L]
+  }
+
+  switch(method,
+         interpreted={
+           xx <- matrix(rep.int(x, n), nrow = n)
+           yy <- matrix(rep.int(y, n), nrow = n)
+           if(!periodic) {
+             d2 <- (xx - t(xx))^2 + (yy - t(yy))^2
+           } else {
+             dx <- xx - t(xx)
+             dy <- yy - t(yy)
+             dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2)
+             dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2)
+             d2 <- dx2 + dy2
+           }
+           if(squared)
+             dout <- d2
+           else
+             dout <- sqrt(d2)
+         },
+         C={
+           d <- numeric( n * n)
+           if(!periodic) {
+               z<- .C("Cpairdist",
+                      n = as.integer(n),
+                      x= as.double(x),
+                      y= as.double(y),
+                      squared=as.integer(squared),
+                      d= as.double(d),
+                      PACKAGE = "spatstat")
+           } else {
+             z <- .C("CpairPdist",
+                     n = as.integer(n),
+                     x= as.double(x),
+                     y= as.double(y),
+                     xwidth=as.double(wide),
+                     yheight=as.double(high),
+                     squared = as.integer(squared),
+                     d= as.double(d),
+                     PACKAGE = "spatstat")
+           }
+           dout <- matrix(z$d, nrow=n, ncol=n)
+         },
+         stop(paste("Unrecognised method", sQuote(method)))
+       )
+  return(dout)
+}
+
+
+crossdist <- function(X, Y, ...) {
+  UseMethod("crossdist")
+}
+
+crossdist.ppp <- function(X, Y, ...,
+                          periodic=FALSE, method="C", squared=FALSE) {
+  verifyclass(X, "ppp")
+  Y <- as.ppp(Y)
+  if(!periodic)
+    return(crossdist.default(X$x, X$y, Y$x, Y$y,
+                             method=method, squared=squared))
+  # periodic case
+  WX <- X$window
+  WY <- Y$window
+  if(WX$type != "rectangle" || WY$type != "rectangle")
+    stop(paste("periodic edge correction can't be applied",
+               "in non-rectangular windows"))
+  if(!is.subset.owin(WX,WY) || !is.subset.owin(WY, WX))
+    stop(paste("periodic edge correction is not implemented",
+               "for the case when X and Y lie in different rectangles"))
+  wide <- diff(WX$xrange)
+  high <- diff(WX$yrange)
+  return(crossdist.default(X$x, X$y, Y$x, Y$y,
+                           period=c(wide,high),
+                           method=method, squared=squared))
+}
+
+crossdist.default <-
+  function(X, Y, x2, y2, ..., period=NULL, method="C", squared=FALSE)
+{
+  x1 <- X
+  y1 <- Y
+  # returns matrix[i,j] = distance from (x1[i],y1[i]) to (x2[j],y2[j])
+  if(length(x1) != length(y1))
+    stop("lengths of x and y do not match")
+  if(length(x2) != length(y2))
+    stop("lengths of x2 and y2 do not match")
+  n1 <- length(x1)
+  n2 <- length(x2)
+  if(n1 == 0 || n2 == 0)
+    return(matrix(numeric(0), nrow=n1, ncol=n2))
+
+  if((periodic<- !is.null(period))) {
+    stopifnot(is.numeric(period))
+    stopifnot(length(period) == 2 || length(period) == 1)
+    stopifnot(all(period > 0))
+    if(length(period) == 1L) period <- rep.int(period, 2)
+    wide <- period[1L]
+    high <- period[2L]
+  }
+
+   switch(method,
+         interpreted = {
+                 X1 <- matrix(rep.int(x1, n2), ncol = n2)
+                 Y1 <- matrix(rep.int(y1, n2), ncol = n2)
+                 X2 <- matrix(rep.int(x2, n1), ncol = n1)
+                 Y2 <- matrix(rep.int(y2, n1), ncol = n1)
+                 if(!periodic) 
+                   d2 <- (X1 - t(X2))^2 + (Y1 - t(Y2))^2
+                 else {
+                   dx <- X1 - t(X2)
+                   dy <- Y1 - t(Y2)
+                   dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2)
+                   dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2)
+                   d2 <- dx2 + dy2
+                 }
+                 return(if(squared) d2 else sqrt(d2))
+               },
+               C = {
+                 if(!periodic) {
+                   z<- .C("Ccrossdist",
+                          nfrom = as.integer(n1),
+                          xfrom = as.double(x1),
+                          yfrom = as.double(y1),
+                          nto = as.integer(n2),
+                          xto = as.double(x2),
+                          yto = as.double(y2),
+                          squared = as.integer(squared),
+                          d = as.double(matrix(0, nrow=n1, ncol=n2)),
+                          PACKAGE = "spatstat")
+                 } else {
+                   z<- .C("CcrossPdist",
+                          nfrom = as.integer(n1),
+                          xfrom = as.double(x1),
+                          yfrom = as.double(y1),
+                          nto = as.integer(n2),
+                          xto = as.double(x2),
+                          yto = as.double(y2),
+                          xwidth = as.double(wide),
+                          yheight = as.double(high),
+                          squared = as.integer(squared),
+                          d = as.double(matrix(0, nrow=n1, ncol=n2)),
+                          PACKAGE = "spatstat")
+                 }
+                 return(matrix(z$d, nrow=n1, ncol=n2))
+               },
+               stop(paste("Unrecognised method", method))
+               )
+      }
+
diff --git a/R/distances.psp.R b/R/distances.psp.R
new file mode 100755
index 0000000..3a1a1dc
--- /dev/null
+++ b/R/distances.psp.R
@@ -0,0 +1,141 @@
+#
+#  distances.psp.R
+#
+#  Hausdorff distance and Euclidean separation for psp objects
+#
+#  $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $
+#
+#
+
+pairdist.psp <- function(X, ..., method="C", type="Hausdorff") {
+  verifyclass(X, "psp")
+  if(X$n == 0)
+    return(matrix(, 0, 0))
+  type <- pickoption("type", type,
+                     c(Hausdorff="Hausdorff",
+                       hausdorff="Hausdorff",
+                       separation="separation"))
+
+  D12 <- AsymmDistance.psp(X, X, metric=type, method=method)
+
+  switch(type,
+         Hausdorff={
+           # maximum is Hausdorff metric
+           D <- array(pmax.int(D12, t(D12)), dim=dim(D12))
+         },
+         separation={
+           # Take minimum of endpoint-to-segment distances
+           D <- array(pmin.int(D12, t(D12)), dim=dim(D12))
+           # Identify any pairs of segments which cross
+           cross <- test.selfcrossing.psp(X)
+           # Assign separation = 0 to such pairs
+           D[cross] <- 0
+         })
+  return(D)
+}
+
+crossdist.psp <- function(X, Y, ..., method="C", type="Hausdorff") {
+  verifyclass(X, "psp")
+  Y <- as.psp(Y)
+  if(X$n * Y$n == 0)
+    return(matrix(, X$n, Y$n))
+
+  type <- pickoption("type", type,
+                     c(Hausdorff="Hausdorff",
+                       hausdorff="Hausdorff",
+                       separation="separation"))
+  
+  DXY <- AsymmDistance.psp(X, Y, metric=type, method=method)
+  DYX <- AsymmDistance.psp(Y, X, metric=type, method=method)
+  
+  switch(type,
+         Hausdorff={
+           # maximum is Hausdorff metric
+           D <- array(pmax.int(DXY, t(DYX)), dim=dim(DXY))
+         },
+         separation={
+           # Take minimum of endpoint-to-segment distances
+           D <- array(pmin.int(DXY, t(DYX)), dim=dim(DXY))
+           # Identify pairs of segments which cross
+           cross <- test.crossing.psp(X, Y)
+           # Assign separation = 0 to such pairs
+           D[cross] <- 0
+         })
+  return(D)
+}
+
+nndist.psp <- function(X, ..., k=1, method="C") {
+  verifyclass(X, "psp")
+  if(!(is.vector(k) && all(k %% 1 == 0) && all(k >= 1)))
+    stop("k should be a positive integer or integers")
+  n <- nobjects(X)
+  kmax <- max(k)
+  lenk <- length(k)
+  result <- if(lenk == 1) numeric(n) else matrix(, nrow=n, ncol=lenk)
+  if(n == 0)
+    return(result)
+  if(kmax >= n) {
+    # not enough objects 
+    # fill with Infinite values
+    result[] <- Inf
+    if(any(ok <- (kmax < n))) {
+      # compute the lower-order nnd's
+      result[, ok] <- nndist.psp(X, ..., k=k[ok], method=method)
+    }
+    return(result)
+  }
+  # normal case:
+  D <- pairdist.psp(X, ..., method=method)
+  diag(D) <- Inf
+  if(kmax == 1L) 
+    NND <- apply(D, 1L, min)
+  else 
+    NND <- t(apply(D, 1L, orderstats, k=k))[, , drop=TRUE]
+  return(NND)
+}
+
+# .....  AsymmDistance.psp .....
+#
+# If metric="Hausdorff":
+#     this function computes, for each pair of segments A = X[i] and B = Y[j],
+#     the value max_{a in A} d(a,B) = max_{a in A} min_{b in B} ||a-b||
+#     which appears in the definition of the Hausdorff metric.
+#     Since the distance function d(a,B) of a segment B is a convex function,
+#     the maximum is achieved at an endpoint of A. So the algorithm
+#     actually computes h(A,B) = max (d(e_1,B), d(e_2,B)) where e_1, e_2
+#     are the endpoints of A. And H(A,B) = max(h(A,B),h(B,A)).
+#
+# If metric="separation":
+#     the function computes, for each pair of segments A = X[i] and B = Y[j],
+#     the MINIMUM distance from an endpoint of A to any point of B.
+#        t(A,B) = min (d(e_1,B), d(e_2,B))
+#     where e_1, e_2 are the endpoints of A.
+#     Define the separation distance
+#        s(A,B) = min_{a in A} min_{b in B} ||a-b||.
+#     The minimum (a*, b*) occurs either when a* is an endpoint of A,
+#     or when b* is an endpoint of B, or when a* = b* (so A and B intersect).
+#     (If A and B are parallel, the minimum is still achieved at an endpoint)
+#     Thus s(A,B) = min(t(A,B), t(B,A)) unless A and B intersect.
+
+
+AsymmDistance.psp <- function(X, Y, metric="Hausdorff",
+                              method=c("C", "Fortran", "interpreted")) {
+  method <- match.arg(method)
+  # Extract endpoints of X
+  EX <- endpoints.psp(X, "both")
+  idX <- attr(EX, "id")
+  # compute shortest dist from each endpoint of X to each segment of Y
+  DPL <- distppll(cbind(EX$x,EX$y), Y$ends, mintype=0, method=method)
+  # for each segment in X, maximise or minimise over the two endpoints
+  Dist <- as.vector(DPL)
+  Point <- as.vector(idX[row(DPL)])
+  Segment <- as.vector(col(DPL))
+  switch(metric,
+         Hausdorff={
+           DXY <- tapply(Dist, list(factor(Point), factor(Segment)), max)
+         },
+         separation={
+           DXY <- tapply(Dist, list(factor(Point), factor(Segment)), min)
+           })
+  return(DXY)
+}
diff --git a/R/distanxD.R b/R/distanxD.R
new file mode 100755
index 0000000..9220f9a
--- /dev/null
+++ b/R/distanxD.R
@@ -0,0 +1,227 @@
+#
+#      distanxD.R
+#
+#      $Revision: 1.7 $     $Date: 2017/06/05 10:31:58 $
+#
+#      Interpoint distances for multidimensional points
+#
+#      Methods for pairdist, nndist, nnwhich, crossdist
+#
+
+pairdist.ppx <- function(X, ...) {
+  verifyclass(X, "ppx")
+  # extract point coordinates
+  coo <- as.matrix(coords(X, ...))
+  n <- nrow(coo)
+  if(n == 0)
+    return(matrix(numeric(0), nrow=0, ncol=0))
+  return(as.matrix(dist(coo)))
+}
+
+crossdist.ppx <- function(X, Y, ...) {
+  verifyclass(X, "ppx")
+  verifyclass(Y, "ppx")
+  # extract point coordinates
+  cooX <- as.matrix(coords(X, ...))
+  cooY <- as.matrix(coords(Y, ...))
+  nX <- nrow(cooX)
+  nY <- nrow(cooY)
+  if(ncol(cooX) != ncol(cooY))
+    stop("X and Y have different dimensions (different numbers of coordinates)")
+  if(nX == 0 || nY == 0)
+    return(matrix(numeric(0), nrow=nX, ncol=nY))
+  coo <- rbind(cooX, cooY)
+  dis <- as.matrix(dist(coo))
+  ans <- dis[1:nX, nX + (1:nY)]
+  return(ans)
+}
+
+nndist.ppx <- function(X, ..., k=1) {
+  verifyclass(X, "ppx")
+
+  # extract point coordinates
+  coo <- as.matrix(coords(X, ...))
+  n <- nrow(coo)
+  m <- ncol(coo)
+
+  if(m == 0) {
+    warning("nndist.ppx: Zero-dimensional coordinates: returning NA")
+    if(length(k) == 1L)
+      return(rep.int(NA_real_, n))
+    else
+      return(matrix(NA_real_, n, length(k)))
+  }
+  
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1L) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # trivial cases
+  if(n <= 1L) {
+    # empty pattern => return numeric(0)
+    # or pattern with only 1 point => return Inf
+    nnd <- matrix(Inf, nrow=n, ncol=kmax)
+    nnd <- nnd[,k, drop=TRUE]
+    return(nnd)
+  }
+  
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1L, kmax)
+
+  # calculate k-nn distances for k <= kmaxcalc
+  
+  if(kmaxcalc == 1L) {
+    # calculate nearest neighbour distance only
+    nnd<-numeric(n)
+    o <- fave.order(coo[,1L])
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("nndMD",
+               n= as.integer(n),
+               m=as.integer(m),
+               x= as.double(t(coo[o,])),
+               nnd= as.double(nnd),
+               as.double(big),
+               PACKAGE = "spatstat")
+    nnd[o] <- Cout$nnd
+  } else {
+    # case kmaxcalc > 1
+    nnd<-numeric(n * kmaxcalc)
+    o <- fave.order(coo[,1L])
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("knndMD",
+               n    = as.integer(n),
+               m    = as.integer(m),
+               kmax = as.integer(kmaxcalc),
+               x    = as.double(t(coo[o,])),
+               nnd  = as.double(nnd),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc)
+    nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+  }
+
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of Inf's
+    infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc)
+    nnd <- cbind(nnd, infs)
+  }
+
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnd <- nnd[, k, drop=TRUE]
+  }
+  
+  return(nnd)
+}
+
+nnwhich.ppx <- function(X, ..., k=1) {
+  verifyclass(X, "ppx")
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1L) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # extract point coordinates
+  coo <- coords(X, ...)
+  n <- nrow(coo)
+  m <- ncol(coo)
+  
+  if(m == 0) {
+    warning("nnwhich.ppx: Zero-dimensional coordinates: returning NA")
+    if(length(k) == 1L)
+      return(rep.int(NA_real_, n))
+    else
+      return(matrix(NA_real_, n, length(k)))
+  }
+  
+  # special cases
+  if(n <= 1L) {
+    # empty pattern => return integer(0)
+    # or pattern with only 1 point => return NA
+    nnw <- matrix(NA_integer_, nrow=n, ncol=kmax)
+    nnw <- nnw[,k, drop=TRUE]
+    return(nnw)
+  }
+
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1L, kmax)
+
+  # identify k-nn for k <= kmaxcalc
+
+  if(kmaxcalc == 1L) {
+    # identify nearest neighbour only
+    nnw <- integer(n)
+    o <- fave.order(coo[,1L])
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("nnwMD",
+               n = as.integer(n),
+               m = as.integer(m),
+               x = as.double(t(coo[o,])),
+               nnd = as.double(numeric(n)),
+               nnwhich = as.integer(nnw),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    witch <- Cout$nnwhich
+    if(any(witch <= 0))
+      stop("Internal error: non-positive index returned from C code")
+    if(any(witch > n))
+      stop("Internal error: index returned from C code exceeds n")
+    nnw[o] <- o[witch]
+  } else {
+    # case kmaxcalc > 1
+    nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc)
+    o <- fave.order(coo[,1L])
+    big <- sqrt(.Machine$double.xmax)
+    Cout <- .C("knnwMD",
+               n = as.integer(n),
+               m = as.integer(m),
+               kmax = as.integer(kmaxcalc),
+               x = as.double(t(coo[o,])),
+               nnd = as.double(numeric(n * kmaxcalc)),
+               nnwhich = as.integer(nnw),
+               huge = as.double(big),
+               PACKAGE = "spatstat")
+    witch <- Cout$nnwhich
+    witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+    if(any(witch <= 0))
+      stop("Internal error: non-positive index returned from C code")
+    if(any(witch > n))
+      stop("Internal error: index returned from C code exceeds n")
+    # convert back to original ordering
+    nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc)
+  }
+  
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of NA's
+    nas <- matrix(NA_integer_, nrow=n, ncol=kmax-kmaxcalc)
+    nnw <- cbind(nnw, nas)
+  }
+
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnw <- nnw[, k, drop=TRUE]
+  }
+  return(nnw)
+}
+
diff --git a/R/distbdry.R b/R/distbdry.R
new file mode 100755
index 0000000..e2032ef
--- /dev/null
+++ b/R/distbdry.R
@@ -0,0 +1,222 @@
+#
+#	distbdry.S		Distance to boundary
+#
+#	$Revision: 4.42 $	$Date: 2017/02/01 10:26:09 $
+#
+# -------- functions ----------------------------------------
+#
+#	bdist.points()
+#                       compute vector of distances 
+#			from each point of point pattern
+#                       to boundary of window
+#
+#       bdist.pixels()
+#                       compute matrix of distances from each pixel
+#                       to boundary of window
+#
+#       erodemask()    erode the window mask by a distance r
+#                       [yields a new window]
+#
+#
+# 
+"bdist.points"<-
+function(X)
+{
+	verifyclass(X, "ppp") 
+        if(X$n == 0)
+          return(numeric(0))
+	x <- X$x
+	y <- X$y
+	window <- X$window
+        switch(window$type,
+               rectangle = {
+		xmin <- min(window$xrange)
+		xmax <- max(window$xrange)
+		ymin <- min(window$yrange)
+		ymax <- max(window$yrange)
+		result <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y)
+               },
+               polygonal = {
+                 xy <- cbind(x,y)
+                 result <- rep.int(Inf, X$n)
+                 bdry <- window$bdry
+                 for(i in seq_along(bdry)) {
+                   polly <- bdry[[i]]
+                   px <- polly$x
+                   py <- polly$y
+                   nsegs <- length(px)
+                   for(j in seq_len(nsegs)) {
+                     j1 <- if(j < nsegs) j + 1L else 1L
+                     seg <- c(px[j],  py[j],
+                              px[j1], py[j1])
+                     result <- pmin.int(result, distppl(xy, seg))
+                   }
+                 }
+               },
+               mask = {
+                 b <- bdist.pixels(window, style="matrix")
+                 loc <- nearest.raster.point(x,y,window)
+                 result <- b[cbind(loc$row, loc$col)]
+               },
+               stop("Unrecognised window type", window$type)
+               )
+        return(result)
+}
+
+"bdist.pixels" <- function(w, ..., style="image",
+                           method=c("C", "interpreted")) {
+	verifyclass(w, "owin")
+
+        masque <- as.mask(w, ...)
+        
+        switch(w$type,
+               mask = {
+                 neg <- complement.owin(masque)
+                 m <- exactPdt(neg)
+                 b <- pmin.int(m$d,m$b)
+               },
+               rectangle = {
+                 rxy <- rasterxy.mask(masque)
+                 x <- rxy$x
+                 y <- rxy$y
+                 xmin <- w$xrange[1L]
+                 xmax <- w$xrange[2L]
+                 ymin <- w$yrange[1L]
+                 ymax <- w$yrange[2L]
+                 b <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y)
+               },
+               polygonal = {
+                 # set up pixel raster
+                 method <- match.arg(method)
+                 rxy <- rasterxy.mask(masque)
+                 x <- rxy$x
+                 y <- rxy$y
+                 b <- numeric(length(x))
+                 # test each pixel in/out, analytically
+                 inside <- inside.owin(x, y, w)
+                 # compute distances for these pixels
+                 xy <- cbind(x[inside], y[inside])
+                 switch(method,
+                        C = {
+                          #' C code
+                          ll <- as.data.frame(edges(w))
+                          dxy <- distppllmin(xy, ll)$min.d
+                        },
+                        interpreted = {
+                          #' ancient R code
+                          dxy <- rep.int(Inf, sum(inside))
+                          bdry <- w$bdry
+                          for(i in seq_along(bdry)) {
+                            polly <- bdry[[i]]
+                            nsegs <- length(polly$x)
+                            for(j in 1:nsegs) {
+                              j1 <- if(j < nsegs) j + 1L else 1L
+                              seg <- c(polly$x[j],  polly$y[j],
+                                       polly$x[j1], polly$y[j1])
+                              dxy <- pmin.int(dxy, distppl(xy, seg))
+                            }
+                          }
+                        })
+                 b[inside] <- dxy
+               },
+               stop("unrecognised window type", w$type)
+               )
+
+        # reshape it
+        b <- matrix(b, nrow=masque$dim[1L], ncol=masque$dim[2L])
+
+        switch(style,
+               coords={
+                 # format which can be plotted by image(), persp() etc
+                 return(list(x=masque$xcol, y=masque$yrow, z=t(b)))
+               },
+               matrix={
+                 # return matrix (for internal use by package)
+                 return(b)
+               },
+               image={
+                 bim <- im(b, xcol=masque$xcol, yrow=masque$yrow,
+                           unitname=unitname(masque))
+                 return(bim)
+               },
+               stop(paste("Unrecognised option for style:", style)))
+} 
+
+erodemask <- function(w, r, strict=FALSE) {
+  # erode a binary image mask without changing any other entries
+  verifyclass(w, "owin")
+  if(w$type != "mask")
+    stop(paste("window w is not of type", sQuote("mask")))
+  if(!is.numeric(r) || length(r) != 1L)
+    stop("r must be a single number")
+  if(r < 0)
+    stop("r must be nonnegative")
+        
+  bb <- bdist.pixels(w, style="matrix")
+
+  if(r > max(bb))
+    warning("eroded mask is empty")
+
+  if(identical(strict, TRUE))
+    w$m <- (bb > r)
+  else 
+    w$m <- (bb >= r)
+  return(w)
+}
+
+"Frame<-.owin" <- function(X, value) {
+  stopifnot(is.rectangle(value))
+  W <- Frame(X)
+  if(!is.subset.owin(W, value))
+    W <- intersect.owin(W, value)
+  rebound.owin(X, value)
+}
+
+rebound.owin <- function(x, rect) {
+  w <- x
+  verifyclass(rect, "owin")
+  if(is.empty(w))
+    return(emptywindow(rect))
+  verifyclass(w, "owin")
+  if(!is.subset.owin(as.rectangle(w), rect)) {
+    bb <- boundingbox(w)
+    if(!is.subset.owin(bb, rect))
+      stop(paste("The new rectangle",
+                 sQuote("rect"),
+                 "does not contain the window",
+                 sQuote("win")))
+  }
+  xr <- rect$xrange
+  yr <- rect$yrange
+  switch(w$type,
+         rectangle={
+           return(owin(xr, yr,
+                       poly=list(x=w$xrange[c(1L,2L,2L,1L)],
+                                 y=w$yrange[c(1L,1L,2L,2L)]),
+                       check=FALSE))
+         },
+         polygonal={
+           return(owin(xr, yr, poly=w$bdry, check=FALSE))
+         },
+         mask={
+           newseq <- function(oldseq, newrange, dstep) {
+             oldends <- range(oldseq)
+             nleft <- max(0, floor((oldends[1L] - newrange[1L])/dstep))
+             nright <- max(0, floor((newrange[2L] - oldends[2L])/dstep))
+             newstart <- max(oldends[1L] - nleft * dstep, newrange[1L])
+             newend <- min(oldends[2L] + nright * dstep, newrange[2L])
+             seq(from=newstart, by=dstep, to=newend)
+           }
+           xcol <- newseq(w$xcol, xr, mean(diff(w$xcol)))
+           yrow <- newseq(w$yrow, yr, mean(diff(w$yrow)))
+           newmask <- as.mask(xy=list(x=xcol, y=yrow))
+           xx <- rasterx.mask(newmask)
+           yy <- rastery.mask(newmask)
+           newmask$m <- inside.owin(xx, yy, w)
+           return(newmask)
+         }
+         )
+}
+  
+    
+  
diff --git a/R/distcdf.R b/R/distcdf.R
new file mode 100644
index 0000000..aafa7a7
--- /dev/null
+++ b/R/distcdf.R
@@ -0,0 +1,117 @@
+#'
+#'  distcdf.R
+#'
+#' cdf of |X1-X2| when X1,X2 are iid uniform in W, etc
+#'
+#'  $Revision: 1.10 $  $Date: 2016/02/11 10:17:12 $
+#'
+
+distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024, regularise=TRUE) {
+  reflexive <- missing(V) && missing(dV)
+  diffuse <- is.owin(W) && is.owin(V)
+  uniformW <- identical(dW, 1)
+  uniformV <- identical(dV, 1)
+  uniform <- uniformW && uniformV
+
+  if(is.owin(W)) {
+    W <- as.mask(as.owin(W), ...)
+    dW <- as.im(dW, W=W)
+  } else if(is.ppp(W)) {
+    if(uniformW) {
+      #' discrete uniform distribution on W
+      dW <- pixellate(W, ...)
+    } else {
+      #' dW should be a weight or vector of weights
+      if(!is.vector(dW) || !is.numeric(dW))
+        stop("If W is a point pattern, dW should be a vector of weights")
+      if(length(dW) == 1L) {
+        dW <- rep(dW, npoints(W))
+      } else stopifnot(length(dW) == npoints(W))
+      dW <- pixellate(W, weights=dW, ...)
+    }
+  } else stop("W should be a point pattern or a window")
+  
+  if(is.owin(V)) {
+    V <- as.mask(as.owin(V), ...)
+    dV <- as.im(dV, W=V)
+  } else if(is.ppp(V)) {
+    if(uniformV) {
+      #' discrete uniform distribution on V
+      dV <- pixellate(V, ...)
+    } else {
+      #' dV should be a weight or vector of weights
+      if(!is.vector(dV) || !is.numeric(dV))
+        stop("If V is a point pattern, dV should be a vector of weights")
+      if(length(dV) == 1L) {
+        dV <- rep(dV, npoints(V))
+      } else stopifnot(length(dV) == npoints(V))
+      dV <- pixellate(V, weights=dV, ...)
+    }
+  } else stop("V should be a point pattern or a window")
+
+  if(!uniformW && min(dW) < 0) 
+    stop("Negative values encountered in dW")
+  
+  if(!uniformV && min(dV) < 0) 
+    stop("Negative values encountered in dV")
+
+  #' compute
+  if(diffuse && uniform) {
+    #' uniform distributions on windows 
+    g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...)
+  } else {
+    g <- if(reflexive) imcov(dW) else imcov(dW, dV)
+  }
+  r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g)
+  rvals <- as.vector(as.matrix(r))
+  gvals <- as.vector(as.matrix(g))
+  rgrid <- seq(0, max(rvals), length=nr)
+  dr <- max(rvals)/(nr-1)
+  h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals))
+  ch <- c(0,cumsum(h))
+  #' regularise at very short distances
+  if(regularise) {
+    sevenpix <- 7 * with(r, max(xstep, ystep))
+    ii <- round(sevenpix/dr)
+    ch[1:ii] <- ch[ii] * (rgrid[1:ii]/rgrid[ii])^2
+  }
+  #' ok
+  result <- fv(data.frame(r=rgrid, f=ch),
+                "r", quote(CDF(r)),
+               "f", , range(rvals), c("r","%s(r)"),
+               c("Interpoint distance","Cumulative probability"),
+               fname="CDF")
+  return(result)
+}
+
+bw.frac <- function(X, ..., f=1/4) {
+  X <- as.owin(X)
+  g <- distcdf(X, ...)
+  r <- with(g, .x)
+  Fr <- with(g, .y)
+  iopt <- min(which(Fr >= f))
+  ropt <- r[iopt]
+  attr(ropt, "f") <- f
+  attr(ropt, "g") <- g
+  class(ropt) <- c("bw.frac", class(ropt))
+  return(ropt)
+}
+
+print.bw.frac <- function(x, ...) {
+  print(as.numeric(x), ...)
+}
+
+plot.bw.frac <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  g <- attr(x, "g")
+  f <- attr(x, "f")
+  ropt <- as.numeric(x)
+  do.call(plot,
+          resolve.defaults(list(g),
+                             list(...),
+                             list(main=xname)))
+  abline(v=ropt, lty=3)
+  abline(h=f, lty=3)
+  invisible(NULL)
+}
+
diff --git a/R/distfun.R b/R/distfun.R
new file mode 100755
index 0000000..e0f16cd
--- /dev/null
+++ b/R/distfun.R
@@ -0,0 +1,117 @@
+#
+#   distfun.R
+#
+#   distance function (returns a function of x,y)
+#
+#   $Revision: 1.23 $   $Date: 2017/06/05 10:31:58 $
+#
+
+distfun <- function(X, ...) {
+  UseMethod("distfun")
+}
+
+distfun.ppp <- function(X, ..., k=1) {
+  # this line forces X to be bound
+  stopifnot(is.ppp(X))
+  stopifnot(length(k) == 1)
+  g <- function(x,y=NULL) {
+    Y <- xy.coords(x, y)[c("x", "y")]
+    nncross(Y, X, what="dist", k=k)
+  }
+  attr(g, "Xclass") <- "ppp"
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  attr(g, "k") <- k
+  class(g) <- c("distfun", class(g))
+  return(g)
+}
+
+distfun.psp <- function(X, ...) {
+  # this line forces X to be bound
+  stopifnot(is.psp(X))
+  g <- function(x,y=NULL) {
+    Y <-  xy.coords(x, y)[c("x", "y")]
+    nncross(Y, X, what="dist")
+  }
+  attr(g, "Xclass") <- "psp"
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  class(g) <- c("distfun", class(g))
+  return(g)
+}
+
+distfun.owin <- function(X, ..., invert=FALSE) {
+  # this line forces X to be bound
+  stopifnot(is.owin(X))
+  #
+  P <- edges(X)
+  #
+  g <- function(x,y=NULL) {
+    Y <-  xy.coords(x, y)
+    inside <- inside.owin(Y$x, Y$y, X)
+    D <- nncross(Y, P, what="dist")
+    out <- if(!invert) ifelseAX(inside, 0, D) else ifelseXB(inside, D, 0)
+    return(out)
+  }
+  attr(g, "Xclass") <- "owin"
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  class(g) <- c("distfun", class(g))
+  return(g)
+}
+
+as.owin.distfun <- function(W, ..., fatal=TRUE) {
+  X <- get("X", envir=environment(W))
+  result <- if(is.owin(X)) as.rectangle(X) else as.owin(X, ..., fatal=fatal)
+  return(result)
+}
+
+domain.distfun <- Window.distfun <- function(X, ...) { as.owin(X) }
+
+as.im.distfun <- function(X, W=NULL, ...,
+                           eps=NULL, dimyx=NULL, xy=NULL,
+                           na.replace=NULL, approx=TRUE) {
+  k <- attr(X, "k")
+  if(approx && is.null(W) && (is.null(k) || (k == 1))) {
+    # use 'distmap' for speed
+    env <- environment(X)
+    Xdata  <- get("X",      envir=env)
+    args <- list(X=Xdata, eps=eps, dimyx=dimyx, xy=xy)
+    if(is.owin(Xdata)) {
+      args <- append(args, list(invert = get("invert", envir=env)))
+    }
+    D <- do.call(distmap, args = args)
+    if(!is.null(na.replace))
+      D$v[is.null(D$v)] <- na.replace
+  } else if(identical(attr(X, "Xclass"), "ppp")) {
+    # point pattern --- use nngrid/knngrid
+    env <- environment(X)
+    Xdata  <- get("X",      envir=env)
+    D <- nnmap(Xdata, W=W, what="dist", k=k, 
+               eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace,
+               ...)
+  } else {
+    # evaluate function at pixel centres
+    D <- as.im.function(X, W=W,
+                        eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace)
+  }
+  return(D)
+}
+
+print.distfun <- function(x, ...) {
+  xtype <- attr(x, "Xclass")
+  typestring <- switch(xtype,
+                       ppp="point pattern",
+                       psp="line segment pattern",
+                       owin="window",
+                       "unrecognised object")
+  objname <- switch(xtype,
+                    ppp="point",
+                    psp="line segment",
+                    "object")
+  cat(paste("Distance function for", typestring, "\n"))
+  X <- get("X", envir=environment(x))
+  print(X)
+  if(!is.null(k <- attr(x, "k")) && k > 1)
+    cat(paste("Distance to", ordinal(k), "nearest", objname,
+              "will be computed\n"))
+  return(invisible(NULL))
+}
+
diff --git a/R/distfunlpp.R b/R/distfunlpp.R
new file mode 100644
index 0000000..9406998
--- /dev/null
+++ b/R/distfunlpp.R
@@ -0,0 +1,45 @@
+#
+# distfunlpp.R
+#
+#   method for 'distfun' for class 'lpp'
+#
+#   $Revision: 1.2 $ $Date: 2016/02/11 09:36:11 $
+#
+
+distfun.lpp <- local({
+  
+  distfun.lpp <- function(X, ..., k=1) {
+    stopifnot(inherits(X, "lpp"))
+    force(X)
+    force(k)
+    stopifnot(length(k) == 1)
+    L <- as.linnet(X)
+    f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) {
+      # L is part of the environment
+      Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L)
+      d <- nncross.lpp(Y, X, what="dist", k=k)
+      return(d)
+    }
+    f <- linfun(f, L)
+    assign("k", k, envir=environment(f))
+    assign("X", X, envir=environment(f))
+    attr(f, "explain") <- uitleggen
+    return(f)
+  }
+
+  uitleggen <- function(x, ...) {
+    splat("Distance function for lpp object")
+    envx <- environment(x)
+    k <-  get("k", envir=envx)
+    if(k != 1L)
+      splat("Yields distance to", ordinal(k), "nearest point")
+    X <-  get("X", envir=envx)
+    print(X)
+  }
+
+  distfun.lpp
+})
+
+
+
+
diff --git a/R/distmap.R b/R/distmap.R
new file mode 100755
index 0000000..6262229
--- /dev/null
+++ b/R/distmap.R
@@ -0,0 +1,121 @@
+#
+#
+#      distmap.R
+#
+#      $Revision: 1.23 $     $Date: 2017/06/05 10:31:58 $
+#
+#
+#     Distance transforms
+#
+#
+distmap <- function(X, ...) {
+  UseMethod("distmap")
+}
+
+distmap.ppp <- function(X, ...) {
+  verifyclass(X, "ppp")
+  e <- exactdt(X, ...)
+  W <- e$w
+  uni <- unitname(W)
+  dmat <- e$d
+  imat <- e$i
+  V <- im(dmat, W$xcol, W$yrow, unitname=uni)
+  I <- im(imat, W$xcol, W$yrow, unitname=uni)
+  if(X$window$type == "rectangle") {
+    # distance to frame boundary
+    bmat <- e$b
+    B <- im(bmat, W$xcol, W$yrow, unitname=uni)
+  } else {
+    # distance to window boundary, not frame boundary
+    bmat <- bdist.pixels(W, style="matrix")
+    B <- im(bmat, W$xcol, W$yrow, unitname=uni)
+    # clip all to window
+    V <- V[W, drop=FALSE]
+    I <- I[W, drop=FALSE]
+    B <- B[W, drop=FALSE]
+  }
+  attr(V, "index") <- I
+  attr(V, "bdry")  <- B
+  return(V)
+}
+
+distmap.owin <- function(X, ..., discretise=FALSE, invert=FALSE) {
+  verifyclass(X, "owin")
+  uni <- unitname(X)
+  if(X$type == "rectangle") {
+    M <- as.mask(X, ...)
+    Bdry <- im(bdist.pixels(M, style="matrix"),
+               M$xcol, M$yrow, unitname=uni)
+    if(!invert)
+      Dist <- as.im(M, value=0)
+    else 
+      Dist <- Bdry
+  } else if(X$type == "polygonal" && !discretise) {
+    Edges <- edges(X)
+    Dist <- distmap(Edges, ...)
+    Bdry <- attr(Dist, "bdry")
+    if(!invert) 
+      Dist[X] <- 0
+    else {
+      bb <- as.rectangle(X)
+      bigbox <- grow.rectangle(bb, diameter(bb)/4)
+      Dist[complement.owin(X, bigbox)] <- 0
+    }
+  } else {
+    X <- as.mask(X, ...)
+    if(invert)
+      X <- complement.owin(X)
+    xc <- X$xcol
+    yr <- X$yrow
+    nr <- X$dim[1L]
+    nc <- X$dim[2L]
+# pad out the input image with a margin of width 1 on all sides
+    mat <- X$m
+    pad <- invert # boundary condition is opposite of value inside W
+    mat <- cbind(pad, mat, pad)
+    mat <- rbind(pad, mat, pad)
+# call C routine
+    res <- .C("distmapbin",
+              xmin=as.double(X$xrange[1L]),
+              ymin=as.double(X$yrange[1L]),
+              xmax=as.double(X$xrange[2L]),
+              ymax=as.double(X$yrange[2L]),
+              nr = as.integer(nr),
+              nc = as.integer(nc),
+              inp = as.integer(as.logical(t(mat))),
+              distances = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)),
+              boundary = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)),
+              PACKAGE = "spatstat")
+  # strip off margins again
+    dist <- matrix(res$distances,
+                   ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)]
+    bdist <- matrix(res$boundary,
+                    ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)]
+  # cast as image objects
+    Dist <- im(dist,  xc, yr, unitname=uni)
+    Bdry <- im(bdist, xc, yr, unitname=uni)
+  }
+  attr(Dist, "bdry")  <- Bdry
+  return(Dist)
+}
+
+distmap.psp <- function(X, ...) {
+  verifyclass(X, "psp")
+  W <- as.mask(Window(X), ...)
+  uni <- unitname(W)
+  rxy <- rasterxy.mask(W)
+  xp <- rxy$x
+  yp <- rxy$y
+  E <- X$ends
+  big <- 2 * diameter(Frame(W))^2
+  z <- NNdist2segments(xp, yp, E$x0, E$y0, E$x1, E$y1, big)
+  xc <- W$xcol
+  yr <- W$yrow
+  Dist <- im(array(sqrt(z$dist2), dim=W$dim), xc, yr, unitname=uni)
+  Indx <- im(array(z$index, dim=W$dim), xc, yr, unitname=uni)
+  Bdry <- im(bdist.pixels(W, style="matrix"), xc, yr, unitname=uni)
+  attr(Dist, "index") <- Indx
+  attr(Dist, "bdry")  <- Bdry
+  return(Dist)
+}
+
diff --git a/R/dppm.R b/R/dppm.R
new file mode 100644
index 0000000..f239fd7
--- /dev/null
+++ b/R/dppm.R
@@ -0,0 +1,158 @@
+#'
+#'     dppm.R
+#'
+#'     $Revision: 1.8 $   $Date: 2017/06/05 10:31:58 $
+
+dppm <-
+  function(formula, family, data=NULL,
+           ...,
+           startpar = NULL,
+           method = c("mincon", "clik2", "palm"),
+           weightfun=NULL,
+           control=list(),
+           algorithm="Nelder-Mead",
+           statistic="K",
+           statargs=list(),
+           rmax = NULL,
+           covfunargs=NULL,
+           use.gam=FALSE,
+           nd=NULL, eps=NULL) {
+
+  # Instantiate family if not already done.
+  if(is.character(family))
+    family <- get(family, mode="function")
+  if(inherits(family, "detpointprocfamilyfun")) {
+    familyfun <- family
+    family <- familyfun()
+  }
+  verifyclass(family, "detpointprocfamily")
+
+  # Check for intensity as only unknown and exit (should be changed for likelihood method)
+  if(length(family$freepar)==1 && (family$freepar %in% family$intensity))
+      stop("Only the intensity needs to be estimated. Please do this with ppm yourself.")
+  # Detect missing rhs of 'formula' and fix
+  if(inherits(formula, c("ppp", "quad"))){
+    Xname <- short.deparse(substitute(formula))
+    formula <- as.formula(paste(Xname, "~ 1"))
+  }
+  if(!inherits(formula, "formula"))
+    stop(paste("Argument 'formula' should be a formula"))
+
+#  kppm(formula, DPP = family, data = data, covariates = data,
+#       startpar = startpar, method = method, weightfun = weightfun,
+#       control = control, algorithm = algorithm, statistic = statistic,
+#       statargs = statargs, rmax = rmax, covfunargs = covfunargs,
+#       use.gam = use.gam, nd = nd, eps = eps, ...)
+
+  thecall <- call("kppm",
+                  X=formula,
+                  DPP=family,
+                  data = data, covariates = data,
+                  startpar = startpar, method = method,
+                  weightfun = weightfun, control = control,
+                  algorithm = algorithm, statistic = statistic,
+                  statargs = statargs, rmax = rmax, covfunargs = covfunargs,
+                  use.gam = use.gam, nd = nd, eps = eps)
+  ncall <- length(thecall)
+  argh <- list(...)
+  nargh <- length(argh)
+  if(nargh > 0) {
+    thecall[ncall + 1:nargh] <- argh
+    names(thecall)[ncall + 1:nargh] <- names(argh)
+  }
+  callenv <- parent.frame()
+  if(!is.null(data)) callenv <- list2env(data, parent=callenv)
+  result <- eval(thecall, envir=callenv, enclos=baseenv())
+  return(result)
+}
+
+## Auxiliary function to mimic cluster models for DPPs in kppm code
+spatstatDPPModelInfo <- function(model){
+  out <- list(
+    modelname = paste(model$name, "DPP"), # In modelname field of mincon fv obj.
+    descname = paste(model$name, "DPP"), # In desc field of mincon fv obj.
+    modelabbrev = paste(model$name, "DPP"), # In fitted obj.
+    printmodelname = function(...) paste(model$name, "DPP"), # Used by print.kppm
+    parnames = model$freepar,
+    checkpar = function(par){
+      return(par)
+    },
+    checkclustargs = function(margs, old = TRUE) list(),
+    resolvedots = function(...){
+      ## returning the input arguments p, q, rmin, rmax in list with one element 'ctrl'
+      dots <- list(...)
+      return(list(ctrl = dots[c("p", "q", "rmin", "rmax")]))
+    },
+    ## K-function
+    K = function(par, rvals, ...){
+      if(length(par)==1 && is.null(names(par)))
+        names(par) <- model$freepar
+      mod <- update(model, as.list(par))
+      if(!valid(mod)){
+        return(rep(Inf, length(rvals)))
+      } else{
+        return(Kmodel(mod)(rvals))
+      }
+    },
+    ## pair correlation function
+    pcf = function(par, rvals, ...){
+      if(length(par)==1 && is.null(names(par)))
+        names(par) <- model$freepar
+      mod <- update(model, as.list(par))
+      if(!valid(mod)){
+        return(rep(Inf, length(rvals)))
+      } else{
+        return(pcfmodel(mod)(rvals))
+      }
+    },
+    ## sensible starting parameters
+    selfstart = function(X) {
+      return(model$startpar(model, X))
+    }
+    )
+  return(out)
+}
+
+## Auxilliary function used for DPP stuff in kppm.R
+dppmFixIntensity <- function(DPP, lambda, po){
+  lambdaname <- DPP$intensity
+  if(is.null(lambdaname))
+    warning("The model has no intensity parameter.\n",
+            "Prediction from the fitted model is invalid ",
+            "(but no warning or error will be given by predict.dppm).")
+  ## Update model object with estimated intensity if it is a free model parameter
+  if(lambdaname %in% DPP$freepar){
+    clusters <- update(DPP, structure(list(lambda), .Names=lambdaname))
+  } else{
+    clusters <- DPP
+    lambda <- intensity(clusters)
+    ## Overwrite po object with fake version
+    X <- po$Q$data
+    dont.complain.about(X)
+    po <- ppm(X~offset(log(lambda))-1)
+    po$fitter <- "dppm"
+    ## update pseudolikelihood value using code in logLik.ppm
+    po$maxlogpl.orig <- po$maxlogpl
+    po$maxlogpl <- logLik(po, warn=FALSE)
+    #########################################
+  }
+  return(list(clusters=clusters, lambda=lambda, po=po))
+}
+
+## Auxiliary function used for DPP stuff in kppm.R
+dppmFixAlgorithm <- function(algorithm, changealgorithm, clusters, startpar){
+  if(!setequal(clusters$freepar, names(startpar)))
+    stop("Names of startpar vector does not match the free parameters of the model.")
+  lower <- upper <- NULL
+  if(changealgorithm){
+    bb <- dppparbounds(clusters, names(startpar))
+    if(all(is.finite(bb))){
+      algorithm <- "Brent"
+      lower <- bb[1L]
+      upper <- bb[2L]
+    } else{
+      algorithm <- "BFGS"
+    }
+  }
+  return(list(algorithm = algorithm, lower = lower, upper = upper))
+}
diff --git a/R/dppmclass.R b/R/dppmclass.R
new file mode 100644
index 0000000..49226d3
--- /dev/null
+++ b/R/dppmclass.R
@@ -0,0 +1,35 @@
+is.dppm <- function#Recognise Fitted Determinantal Point Process Models
+### Check that an object inherits the class dppm
+(x
+ ### Any object.
+ ){
+    inherits(x, "dppm")
+    ### A single logical value.
+
+    ##keyword<< spatial
+    ##keyword<< manip
+    ##keyword<< models
+}
+
+plot.dppm <- function (x, ..., what = c("intensity", "statistic")){
+    objectname <- short.deparse(substitute(x))
+    if(missing(what) && is.stationary(x))
+        what <- "statistic"
+    plot.kppm(x, ..., xname = objectname, what = what)
+}
+
+Kmodel.dppm <- function (model, ...){
+    Kmodel(model$fitted, W=model$window)
+}
+
+pcfmodel.dppm <- function (model, ...){
+    pcfmodel(model$fitted, W=model$window)
+}
+
+intensity.dppm <- function (X, ...){
+    return(intensity(X$fitted))
+}
+
+reach.dppm <- function(x, ...){
+    reach(x$fitted, ...)
+}
diff --git a/R/dummify.R b/R/dummify.R
new file mode 100644
index 0000000..4f589c9
--- /dev/null
+++ b/R/dummify.R
@@ -0,0 +1,39 @@
+#
+# dummify.R
+#
+# Convert a factor to a matrix of dummy variables, etc.
+#
+#  $Revision: 1.5 $  $Date: 2016/02/11 10:17:12 $
+#
+
+dummify <- function(x) {
+  if(is.matrix(x) || is.data.frame(x)) {
+    x <- as.data.frame(x)
+    y <- do.call(data.frame, lapply(x, dummify))
+    return(as.matrix(y))
+  }
+  # x is 1-dimensional
+  if(is.complex(x)) 
+    return(as.matrix(data.frame(Re=Re(x), Im=Im(x))))
+  # convert factors etc
+  if(is.character(x)) 
+    x <- factor(x)
+  if(is.logical(x)) 
+    x <- factor(x, levels=c(FALSE,TRUE))
+  if(is.factor(x)) {
+    # convert to dummy variables
+    nx <- length(x)
+    lev <- levels(x)
+    y <- matrix(0L, nrow=nx, ncol=length(lev))
+    colnames(y) <- lev
+    y[cbind(seq_len(nx), as.integer(x))] <- 1L
+    return(y)
+  }
+  # convert to numeric
+  y <- as.numeric(x)
+  if(!is.matrix(y))
+    y <- matrix(y, ncol=1)
+  return(y)
+}
+
+
diff --git a/R/dummy.R b/R/dummy.R
new file mode 100755
index 0000000..e51bee8
--- /dev/null
+++ b/R/dummy.R
@@ -0,0 +1,409 @@
+#
+#	dummy.S
+#
+#	Utilities for generating patterns of dummy points
+#
+#       $Revision: 5.31 $     $Date: 2015/10/21 09:06:57 $
+#
+#	corners()	corners of window
+#	gridcenters()	points of a rectangular grid
+#	stratrand()	random points in each tile of a rectangular grid
+#	spokes()	Rolf's 'spokes' arrangement
+#	
+#	concatxy()	concatenate any lists of x, y coordinates
+#
+#	default.dummy()	Default action to create a dummy pattern
+#		
+	
+corners <- function(window) {
+	window <- as.owin(window)
+	x <- window$xrange[c(1L,2L,1L,2L)]
+	y <- window$yrange[c(1L,1L,2L,2L)]
+	return(list(x=x, y=y))
+}
+
+gridcenters <-	
+gridcentres <- function(window, nx, ny) {
+	window <- as.owin(window)
+	xr <- window$xrange
+	yr <- window$yrange
+	x <- seq(from=xr[1L], to=xr[2L], length.out = 2L * nx + 1L)[2L * (1:nx)]
+	y <- seq(from=yr[1L], to=yr[2L], length.out = 2L * ny + 1L)[2L * (1:ny)]
+	x <- rep.int(x, ny)
+	y <- rep.int(y, rep.int(nx, ny))
+	return(list(x=x, y=y))
+}
+
+stratrand <- function(window,nx,ny, k=1) {
+	
+	# divide window into an nx * ny grid of tiles
+	# and place k points at random in each tile
+	
+	window <- as.owin(window)
+
+	wide  <- diff(window$xrange)/nx
+	high  <- diff(window$yrange)/ny
+        cent <- gridcentres(window, nx, ny)
+	cx <- rep.int(cent$x, k)
+	cy <- rep.int(cent$y, k)
+	n <- nx * ny * k
+	x <- cx + runif(n, min = -wide/2, max = wide/2)
+	y <- cy + runif(n, min = -high/2, max = high/2)
+	return(list(x=x,y=y))
+}
+
+tilecentroids <- function (W, nx, ny)
+{
+  W <- as.owin(W)
+  if(W$type == "rectangle")
+    return(gridcentres(W, nx, ny))
+  else {
+    # approximate
+    W   <- as.mask(W)
+    rxy <- rasterxy.mask(W, drop=TRUE)
+    xx  <- rxy$x
+    yy  <- rxy$y
+    pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,nx)$index
+    x   <- tapply(xx,pid,mean)
+    y   <- tapply(yy,pid,mean)
+    return(list(x=x,y=y))
+  }
+}
+
+cellmiddles <- local({
+  # auxiliary 
+  middle <- function(v) { n <- length(v);
+                          mid <- ceiling(n/2);
+                          v[mid]}
+
+  dcut <- function(x, nx, xrange) {
+    dx <- diff(xrange)/nx
+    fx <- ((x - xrange[1L])/dx) %% 1
+    bx <- dx * pmin(fx, 1-fx)
+    bx
+  }
+  
+  # main
+  cellmiddles <- function (W, nx, ny, npix=NULL, distances=FALSE) {
+    if(W$type == "rectangle")
+      return(gridcentres(W, nx, ny))
+
+    # pixel approximation to window
+    # This matches the pixel approximation used to compute tile areas
+    # and ensures that dummy points are generated only inside those tiles
+    # that have nonzero digital area
+    M   <- as.mask(W, dimyx=rev(npix))
+    xx <- as.vector(rasterx.mask(M, drop=TRUE))
+    yy <- as.vector(rastery.mask(M, drop=TRUE))
+    pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,ny)$index
+
+    # compute tile centroids
+    xmid <- tapply(xx, pid, mean)
+    ymid <- tapply(yy, pid, mean)
+    # check whether they are inside window
+    ok <- inside.owin(xmid, ymid, W)
+    if(all(ok))
+      return(list(x=xmid, y=ymid))
+
+    # some problem tiles
+    bad <- rep.int(TRUE, nx * ny)
+    bad[as.integer(names(xmid))] <- !ok
+    badpid <- bad[pid]
+    if(!distances) {
+       midpix <- tapply(seq_along(pid)[badpid], pid[badpid], middle)
+    } else {
+      # find 'middle' points using boundary distances
+      Dlines <- im(outer(dcut(M$yrow,ny,M$yrange),
+                         dcut(M$xcol,nx,M$xrange),
+                         "pmin"),
+                   M$xcol, M$yrow, M$xrange, M$yrange)
+      Dbdry <- bdist.pixels(M)
+      Dtile <- eval.im(pmin(Dlines, Dbdry))
+      dtile <- as.vector(Dtile[M])
+      df <- data.frame(dtile=dtile, id=seq_along(dtile))[badpid, , drop=FALSE]
+      midpix <- by(df, pid[badpid], midpixid)
+    }
+    xmid[!ok] <- xx[midpix]
+    ymid[!ok] <- yy[midpix]
+    return(list(x=xmid,y=ymid))
+  }
+
+  midpixid <- function(z) { z$id[which.max(z$dtile)] }
+  
+  cellmiddles
+})
+
+spokes <- function(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault=1) {
+	#
+	# Rolf Turner's "spokes" arrangement
+	#
+	# Places dummy points on radii of circles 
+	# emanating from each data point x[i], y[i]
+	#
+	#       nrad:    number of radii from each data point
+	#       nper:	 number of dummy points per radius
+	#       fctr:	 length of largest radius = fctr * M
+	#                where M is mean nearest neighbour distance in data
+	#
+        pat <- inherits(x,"ppp")
+        if(pat) w <- x$w
+        if(checkfields(x,c("x","y"))) {
+          y <- x$y
+          x <- x$x
+        }
+        M <- if(length(x) > 1) mean(nndist(x,y)) else Mdefault
+	lrad  <- fctr * M / nper
+	theta <- 2 * pi * (1:nrad)/nrad
+	cs    <- cos(theta)
+	sn    <- sin(theta)
+	xt    <- lrad * as.vector((1:nper) %o% cs)
+	yt    <- lrad * as.vector((1:nper) %o% sn)
+	xd    <- as.vector(outer(x, xt, "+"))
+	yd    <- as.vector(outer(y, yt, "+"))
+	
+        tmp <- list(x = xd, y = yd)
+        if(pat) return(as.ppp(tmp,W=w)[w]) else return(tmp)
+}
+	
+# concatenate any number of list(x,y) into a list(x,y)
+		
+concatxy <- function(...) {
+	x <- unlist(lapply(list(...), getElement, name="x"))
+	y <- unlist(lapply(list(...), getElement, name="y"))
+	if(length(x) != length(y))
+		stop("Internal error: lengths of x and y unequal")
+	return(list(x=x,y=y))
+}
+
+#------------------------------------------------------------
+
+default.dummy <- function(X, nd=NULL, random=FALSE, ntile=NULL, npix = NULL,
+                          quasi=FALSE, ..., eps=NULL, verbose=FALSE) {
+  # default action to create dummy points.
+  # regular grid of nd[1] * nd[2] points
+  # plus corner points of window frame,
+  # all clipped to window.
+  orig <- list(nd=nd, eps=eps, ntile=ntile, npix=npix)
+  orig <- orig[!sapply(orig, is.null)]
+  # 
+  X <- as.ppp(X)
+  win <- X$window
+  #
+  #
+  # default dimensions
+  a <- default.n.tiling(X, nd=nd, ntile=ntile, npix=npix,
+                        eps=eps, random=random, quasi=quasi, verbose=verbose)
+  nd    <- a$nd
+  ntile <- a$ntile
+  npix  <- a$npix
+  periodsample <- !quasi && !random &&
+                  is.mask(win) &&
+                  all(nd %% win$dim == 0)
+  # make dummy points
+  dummy <- if(quasi) rQuasi(prod(nd), as.rectangle(win)) else
+           if(random) stratrand(win, nd[1L], nd[2L], 1) else 
+           cellmiddles(win, nd[1L], nd[2L], npix)
+  dummy <- as.ppp(dummy, win, check=FALSE)
+  # restrict to window
+  if(!is.rectangle(win) && !periodsample)
+    dummy <- dummy[win]
+  # corner points
+  corn <- as.ppp(corners(win), win, check=FALSE)
+  corn <- corn[win]
+  dummy <- superimpose(dummy, corn, W=win, check=FALSE)
+  if(dummy$n == 0)
+    stop("None of the dummy points lies inside the window")
+  # pass parameters for computing weights
+  attr(dummy, "weight.parameters") <-
+    append(list(...), list(ntile=ntile, verbose=verbose, npix=npix))
+  # record parameters used to create dummy locations
+  attr(dummy, "dummy.parameters") <-
+    list(nd=nd, random=random, quasi=quasi, verbose=verbose, orig=orig)
+  return(dummy)
+}
+
+
+# Criteria:
+#   for rectangular windows,
+#       R1.  nd >= ntile
+#   for non-rectangular windows,
+#       R2. nd should be a multiple of ntile
+#       R3. each dummy point is also a pixel of the npix grid
+#       R4. npix should ideally be a multiple of nd, for speed
+#       R5. npix should be large, for accuracy
+#       R6. npix should not be too large, for speed
+#       R7. if the window is a mask, npix should ideally be
+#           a multiple of the mask array dimensions, for speed.
+#
+
+default.n.tiling <- local({
+  # auxiliary
+  ensure2print <- function(x, verbose=TRUE, blah="user specified") {
+    xname <- short.deparse(substitute(x))
+    x <- ensure2vector(x)
+    if(verbose)
+      cat(paste(blah, xname, "=", x[1L], "*", x[2L], "\n"))
+    x
+  }
+  minmultiple <- function(n, lo, hi) {
+    if(lo > hi) {
+      temp <- hi
+      hi <- lo
+      lo <- temp
+    }
+    if(n > hi) return(hi)
+    m <- n * (floor(lo/n):ceiling(hi/n))
+    m <- m[m >= n & m >= lo & m <= hi]
+    if(length(m) > 0) min(m) else hi
+  }
+    
+  mindivisor <- function(N, lo, Nbig) {
+    d <- divisors(N)
+    ok <- (d >= lo)
+    if(any(ok)) return(min(d[ok]))
+    m <- floor(Nbig/N)
+    d <- unlist(lapply(as.list(seq_len(m) * N), divisors))
+    d <- sort(unique(d))
+    ok <- (d >= lo)
+    if(any(ok)) return(min(d[ok]))
+    return(Nbig)
+  }
+
+  min2mul <- function(n, lo, hi) 
+    c(minmultiple(n[1L], lo[1L], hi[1L]),
+      minmultiple(n[2L], lo[2L], hi[2L]))
+
+  min2div <- function(N, lo, Nbig) 
+    c(mindivisor(N[1L], lo[1L], Nbig[1L]),
+      mindivisor(N[2L], lo[2L], Nbig[2L]))
+
+  maxdiv <- function(n, k=1) {
+    if(length(n) > 1L)
+      return(c(maxdiv(n[1L], k),
+               maxdiv(n[2L], k)))
+    ## k-th largest divisor other than n
+    d <- divisors(n)
+    m <- length(d)
+    ans <- if(m == 2L) n else if(m < 2+k) d[2L] else d[m-k]
+    return(ans)
+  }
+
+  # main
+  default.n.tiling <- function(X,
+                               nd=NULL, ntile=NULL, npix=NULL,
+                               eps=NULL,
+                               random=FALSE, quasi=FALSE, verbose=TRUE) {
+  # computes dimensions of rectangular grids of 
+  #     - dummy points  (nd) (eps)
+  #     - tiles for grid weights (ntile)
+  #     - pixels for approximating area (npix)
+  # for data pattern X.
+  #
+  verifyclass(X, "ppp")
+  win <- X$window
+  pixels <- (win$type != "rectangle")
+  
+  if(nd.given <- !is.null(nd)) 
+    nd <- ensure2print(nd, verbose)
+  if(ntile.given <- !is.null(ntile)) 
+    ntile <- ensure2print(ntile, verbose)
+  if(npix.given <- !is.null(npix)) 
+    npix <- ensure2print(npix, verbose)
+
+  if(pixels) 
+    sonpixel <- rev(ensure2print(spatstat.options("npixel"), verbose, ""))
+
+  ndummy.min <- ensure2print(spatstat.options("ndummy.min"), verbose, "")
+  ndminX <- pmax(ndummy.min, 10 * ceiling(2 * sqrt(X$n)/10))
+  ndminX <- ensure2vector(ndminX)
+
+  if(!is.null(eps)) {
+    eps <- ensure2print(eps, verbose)
+    Xbox <- as.rectangle(as.owin(X))
+    sides <- with(Xbox, c(diff(xrange), diff(yrange)))
+    ndminX <- pmax(ndminX, ceiling(sides/eps))
+  }
+
+  # range of acceptable values for npix
+  if(npix.given)
+    Nmin <- Nmax <- npix
+  else 
+    switch(win$type,
+           rectangle = {
+             Nmin <- ensure2vector(X$n)
+             Nmax <- Inf
+           },
+           polygonal = {
+             Nmin <- sonpixel
+             Nmax <- 4 * sonpixel
+           },
+           mask={
+             nmask <- rev(win$dim)
+             Nmin <- nmask
+             Nmax <- pmax(2 * nmask, 4 * sonpixel)
+           })
+
+  # determine values of nd and ntile
+
+  if(nd.given && !ntile.given) {
+    # ntile must be a divisor of nd
+    if(any(nd > Nmax))
+      warning("number of dummy points nd exceeds maximum pixel dimensions")
+    ntile <- min2div(nd, ndminX, nd)
+  } else if(!nd.given && ntile.given) {
+    # nd must be a multiple of ntile
+    nd <- min2mul(ntile, ndminX, Nmin)
+    if(any(nd >= Nmin))
+      nd <- ntile
+  } else if(!nd.given && !ntile.given) {
+     if(!pixels) {
+       nd <- ntile <- ensure2vector(ndminX)
+       if(verbose)
+         cat(paste("nd and ntile default to", nd[1L], "*", nd[2L], "\n"))
+     } else {
+       # find suitable divisors of the number of pixels
+       nd <- ntile <- min2div(Nmin, ndminX, Nmax)
+       if(any(nd >= Nmin)) { # none suitable
+         if(verbose)
+           cat("No suitable divisor of pixel dimensions\n")
+         nd <- ntile <- ndminX
+       }
+     }
+  } else {
+    # both nd, ntile were given
+    if(any(ntile > nd))
+      warning("the number of tiles (ntile) exceeds the number of dummy points (nd)")
+  }
+
+  if(!ntile.given && quasi) {
+    if(verbose) cat("Adjusting ntile because quasi=TRUE\n")
+    ntile <- maxdiv(ntile, if(pixels) 2L else 1L)
+  } 
+ 
+  if(!npix.given && pixels) 
+    npix <- min2mul(nd, Nmin, Nmax)
+
+  if(verbose) {
+    if(!quasi)
+      cat(paste("dummy points:",
+                paste0(if(random) "stratified random in" else NULL,
+                       "grid"),
+                nd[1L], "x", nd[2L], "\n"))
+    else
+      cat(paste("dummy points:",
+                nd[1L], "x", nd[2L], "=", prod(nd),
+                "quasirandom points\n"))
+    cat(paste("weighting tiles", ntile[1L], "x", ntile[2L], "\n"))
+    if(pixels) cat(paste("pixel grid", npix[1L], "x", npix[2L], "\n"))
+  }
+
+  if(pixels) 
+    return(list(nd=nd, ntile=ntile, npix=npix))
+  else
+    return(list(nd=nd, ntile=ntile, npix=npix))
+}
+
+  default.n.tiling
+})
+
+
diff --git a/R/edgeRipley.R b/R/edgeRipley.R
new file mode 100755
index 0000000..7f57505
--- /dev/null
+++ b/R/edgeRipley.R
@@ -0,0 +1,185 @@
+#
+#        edgeRipley.R
+#
+#    $Revision: 1.16 $    $Date: 2017/06/05 10:31:58 $
+#
+#    Ripley isotropic edge correction weights
+#
+#  edge.Ripley(X, r, W)      compute isotropic correction weights
+#                            for centres X[i], radii r[i,j], window W
+#
+#  To estimate the K-function see the idiom in "Kest.S"
+#
+#######################################################################
+
+edge.Ripley <- local({
+
+  small <- function(x) { abs(x) < .Machine$double.eps }
+
+  hang <- function(d, r) {
+    nr <- nrow(r)
+    nc <- ncol(r)
+    answer <- matrix(0, nrow=nr, ncol=nc)
+    # replicate d[i] over j index
+    d <- matrix(d, nrow=nr, ncol=nc)
+    hit <- (d < r)
+    answer[hit] <- acos(d[hit]/r[hit])
+    answer
+  }
+
+  edge.Ripley <- function(X, r, W=Window(X), method="C", maxweight=100) {
+    # X is a point pattern, or equivalent
+    X <- as.ppp(X, W)
+    W <- X$window
+
+    switch(W$type,
+           rectangle={},
+           polygonal={
+             if(method != "C")
+               stop(paste("Ripley isotropic correction for polygonal windows",
+                          "requires method = ", dQuote("C")))
+           },
+           mask={
+             stop(paste("sorry, Ripley isotropic correction",
+                        "is not implemented for binary masks"))
+           }
+           )
+
+    n <- npoints(X)
+
+    if(is.matrix(r) && nrow(r) != n)
+      stop("the number of rows of r should match the number of points in X")
+    if(!is.matrix(r)) {
+      if(length(r) != n)
+        stop("length of r is incompatible with the number of points in X")
+      r <- matrix(r, nrow=n)
+    }
+
+    #
+    Nr <- nrow(r)
+    Nc <- ncol(r)
+    if(Nr * Nc == 0) return(r)
+    
+    ##########
+  
+    x <- X$x
+    y <- X$y
+
+    stopifnot(method %in% c("interpreted", "C"))
+
+    switch(method,
+           interpreted = {
+           ######## interpreted R code for rectangular case #########
+
+             # perpendicular distance from point to each edge of rectangle
+             # L = left, R = right, D = down, U = up
+             dL  <- x - W$xrange[1L]
+             dR  <- W$xrange[2L] - x
+             dD  <- y - W$yrange[1L]
+             dU  <- W$yrange[2L] - y
+
+             # detect whether any points are corners of the rectangle
+             corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2)
+  
+             # angle between (a) perpendicular to edge of rectangle
+             # and (b) line from point to corner of rectangle
+             bLU <- atan2(dU, dL)
+             bLD <- atan2(dD, dL)
+             bRU <- atan2(dU, dR)
+             bRD <- atan2(dD, dR)
+             bUL <- atan2(dL, dU)
+             bUR <- atan2(dR, dU)
+             bDL <- atan2(dL, dD)
+             bDR <- atan2(dR, dD)
+
+             # The above are all vectors [i]
+             # Now we compute matrices [i,j]
+
+             # half the angle subtended by the intersection between
+             # the circle of radius r[i,j] centred on point i
+             # and each edge of the rectangle (prolonged to an infinite line)
+
+             aL <- hang(dL, r)
+             aR <- hang(dR, r)
+             aD <- hang(dD, r) 
+             aU <- hang(dU, r)
+
+             # apply maxima
+             # note: a* are matrices; b** are vectors;
+             # b** are implicitly replicated over j index
+             cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD)
+             cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD)
+             cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR)
+             cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR)
+
+             # total exterior angle
+             ext <- cL + cR + cU + cD
+
+             # add pi/2 for corners 
+             if(any(corner))
+               ext[corner,] <- ext[corner,] + pi/2
+
+             # OK, now compute weight
+             weight <- 1 / (1 - ext/(2 * pi))
+
+           },
+           C = {
+             ############ C code #############################
+             switch(W$type,
+                    rectangle={
+                      z <- .C("ripleybox",
+                              nx=as.integer(n),
+                              x=as.double(x),
+                              y=as.double(y),
+                              rmat=as.double(r),
+                              nr=as.integer(Nc), #sic
+                              xmin=as.double(W$xrange[1L]),
+                              ymin=as.double(W$yrange[1L]),
+                              xmax=as.double(W$xrange[2L]),
+                              ymax=as.double(W$yrange[2L]),
+                              epsilon=as.double(.Machine$double.eps),
+                              out=as.double(numeric(Nr * Nc)),
+                              PACKAGE = "spatstat")
+                      weight <- matrix(z$out, nrow=Nr, ncol=Nc)
+                    },
+                    polygonal={
+                      Y <- edges(W)
+                      z <- .C("ripleypoly",
+                              nc=as.integer(n),
+                              xc=as.double(x),
+                              yc=as.double(y),
+                              nr=as.integer(Nc),
+                              rmat=as.double(r),
+                              nseg=as.integer(Y$n),
+                              x0=as.double(Y$ends$x0),
+                              y0=as.double(Y$ends$y0),
+                              x1=as.double(Y$ends$x1),
+                              y1=as.double(Y$ends$y1),
+                              out=as.double(numeric(Nr * Nc)),
+                              PACKAGE = "spatstat")
+                      angles <- matrix(z$out, nrow = Nr, ncol = Nc)
+                      weight <- 2 * pi/angles
+                    }
+                    )
+           }
+    )
+    # eliminate wild values
+    weight <- matrix(pmax.int(0, pmin.int(maxweight, weight)),
+                     nrow=Nr, ncol=Nc)
+    return(weight)
+  }
+
+  edge.Ripley
+})
+
+rmax.Ripley <- function(W) {
+  W <- as.owin(W)
+  if(is.rectangle(W))
+    return(boundingradius(W))
+  if(is.polygonal(W) && length(W$bdry) == 1L)
+    return(boundingradius(W))
+  ## could have multiple connected components
+  pieces <- tiles(tess(image=connected(W)))
+  answer <- sapply(pieces, boundingradius)
+  return(as.numeric(answer))
+}
diff --git a/R/edgeTrans.R b/R/edgeTrans.R
new file mode 100755
index 0000000..618b913
--- /dev/null
+++ b/R/edgeTrans.R
@@ -0,0 +1,150 @@
+#
+#        edgeTrans.R
+#
+#    $Revision: 1.15 $    $Date: 2016/04/25 02:34:40 $
+#
+#    Translation edge correction weights
+#
+#  edge.Trans(X)      compute translation correction weights
+#                     for each pair of points from point pattern X 
+#
+#  edge.Trans(X, Y, W)   compute translation correction weights
+#                        for all pairs of points X[i] and Y[j]
+#                        (i.e. one point from X and one from Y)
+#                        in window W
+#
+#  edge.Trans(X, Y, W, paired=TRUE)
+#                        compute translation correction weights
+#                        for each corresponding pair X[i], Y[i].
+#
+#  To estimate the K-function see the idiom in "Kest.R"
+#
+#######################################################################
+
+edge.Trans <- function(X, Y=X, W=Window(X), exact=FALSE, paired=FALSE,
+                       ..., 
+                       trim=spatstat.options("maxedgewt"),
+                       dx=NULL, dy=NULL,
+                       give.rmax=FALSE,
+                       gW = NULL) {
+  given.dxdy <- !is.null(dx) && !is.null(dy)
+  if(!given.dxdy) {
+    ## dx, dy will be computed from X, Y
+    X <- as.ppp(X, W)
+    W <- X$window
+    Y <- if(!missing(Y)) as.ppp(Y, W) else X
+    nX <- X$n
+    nY <- Y$n
+    if(paired) {
+      if(nX != nY)
+        stop("X and Y should have equal length when paired=TRUE")
+      dx <- Y$x - X$x
+      dy <- Y$y - X$y
+    } else {
+      dx <- outer(X$x, Y$x, "-")
+      dy <- outer(X$y, Y$y, "-")
+    }
+  } else {
+    ## dx, dy given
+    if(paired) {
+      ## dx, dy are vectors
+      check.nvector(dx)
+      check.nvector(dy)
+      stopifnot(length(dx) == length(dy))
+    } else {
+      ## dx, dy are matrices
+      check.nmatrix(dx)
+      check.nmatrix(dy)
+      stopifnot(all(dim(dx) == dim(dy)))
+      nX <- nrow(dx)
+      nY <- ncol(dx)
+    }
+    stopifnot(is.owin(W))
+  }
+    
+  ## For irregular polygons, exact evaluation is very slow;
+  ## so use pixel approximation, unless exact=TRUE
+  if(W$type == "polygonal" && !exact)
+    W <- as.mask(W)
+
+  ## compute
+  if(!paired) {
+    dx <- as.vector(dx)
+    dy <- as.vector(dy)
+  }
+  switch(W$type,
+         rectangle={
+           ## Fast code for this case
+           wide <- diff(W$xrange)
+           high <- diff(W$yrange)
+           weight <- wide * high / ((wide - abs(dx)) * (high - abs(dy)))
+         },
+         polygonal={
+           ## This code is SLOW
+           n <- length(dx)
+           weight <- numeric(n)
+           if(n > 0) {
+             for(i in seq_len(n)) {
+               Wshift <- shift(W, c(dx[i], dy[i]))
+               weight[i] <- overlap.owin(W, Wshift)
+             }
+             weight <- area(W)/weight
+           }
+         },
+         mask={
+           ## compute set covariance of window
+           if(is.null(gW)) gW <- setcov(W)
+           ## evaluate set covariance at these vectors
+           gvalues <- lookup.im(gW, dx, dy, naok=TRUE, strict=FALSE)
+           weight <- area(W)/gvalues
+         }
+         )
+  
+  ## clip high values
+  if(length(weight) > 0)
+    weight <- pmin.int(weight, trim)
+
+  if(!paired) 
+    weight <- matrix(weight, nrow=nX, ncol=nY)
+
+  if(give.rmax) 
+    attr(weight, "rmax") <- rmax.Trans(W, gW)
+  return(weight)
+}
+
+## maximum radius for translation correction
+## = radius of largest circle centred at 0 contained in W + ^W
+
+rmax.Trans <- function(W, g=setcov(W)) {
+  ## calculate maximum permissible 'r' value
+  ## for validity of translation correction
+  W <- as.owin(W)
+  if(is.rectangle(W)) 
+    return(shortside(W))
+  ## find support of set covariance
+  if(is.null(g)) g <- setcov(W)
+  eps <- 2 * max(1, max(g)) * .Machine$double.eps
+  gsupport <- solutionset(g > eps)
+  gboundary <- bdry.mask(gsupport)
+  xy <- rasterxy.mask(gboundary, drop=TRUE)
+  rmax <- with(xy, sqrt(min(x^2 + y^2)))
+  return(rmax)
+}
+
+## maximum radius for rigid motion correction
+## = radius of smallest circle centred at 0 containing W + ^W
+
+rmax.Rigid <- function(X, g=setcov(Window(X))) {
+  stopifnot(is.ppp(X) || is.owin(X))
+  if(is.ppp(X))
+    return(max(pairdist(X[chull(X)])))
+  W <- X
+  if(is.rectangle(W)) return(diameter(W))
+  if(is.null(g)) g <- setcov(W)
+  eps <- 2 * max(1, max(g)) * .Machine$double.eps
+  gsupport <- solutionset(g > eps)
+  gboundary <- bdry.mask(gsupport)
+  xy <- rasterxy.mask(gboundary, drop=TRUE)
+  rmax <- with(xy, sqrt(max(x^2 + y^2)))
+  return(rmax)
+}
diff --git a/R/edges2triangles.R b/R/edges2triangles.R
new file mode 100644
index 0000000..55362f2
--- /dev/null
+++ b/R/edges2triangles.R
@@ -0,0 +1,124 @@
+#
+#   edges2triangles.R
+#
+#   $Revision: 1.14 $  $Date: 2017/06/05 10:31:58 $
+#
+
+edges2triangles <- function(iedge, jedge, nvert=max(iedge, jedge),
+                            ..., check=TRUE, friendly=rep(TRUE, nvert)) {
+  usefriends <- !missing(friendly)
+  if(check) {
+    stopifnot(length(iedge) == length(jedge))
+    stopifnot(all(iedge > 0))
+    stopifnot(all(jedge > 0))
+    if(!missing(nvert)) {
+      stopifnot(all(iedge <= nvert))
+      stopifnot(all(jedge <= nvert))
+    }
+    if(usefriends) {
+      stopifnot(is.logical(friendly))
+      stopifnot(length(friendly) == nvert)
+      usefriends <- !all(friendly)
+    }
+  }
+  # zero length data, or not enough to make triangles
+  if(length(iedge) < 3) return(matrix(, nrow=0, ncol=3))
+  # sort in increasing order of 'iedge'
+  oi <- fave.order(iedge)
+  iedge <- iedge[oi]
+  jedge <- jedge[oi]
+  # call C
+  storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer"
+  if(!usefriends) {
+    zz <- .Call("triograph",
+                nv=nvert, iedge=iedge, jedge=jedge,
+                PACKAGE="spatstat")
+  } else {
+    fr <- as.logical(friendly)
+    storage.mode(fr) <- "integer"
+    zz <- .Call("trioxgraph",
+                nv=nvert, iedge=iedge, jedge=jedge, friendly=fr,
+                PACKAGE="spatstat")
+  }
+  mat <- as.matrix(as.data.frame(zz))
+  return(mat)
+}
+
+# compute triangle diameters as well
+
+trianglediameters <- function(iedge, jedge, edgelength, ..., 
+                              nvert=max(iedge, jedge),
+                              dmax=Inf, check=TRUE) {
+  if(check) {
+    stopifnot(length(iedge) == length(jedge))
+    stopifnot(length(iedge) == length(edgelength))
+    stopifnot(all(iedge > 0))
+    stopifnot(all(jedge > 0))
+    if(!missing(nvert)) {
+      stopifnot(all(iedge <= nvert))
+      stopifnot(all(jedge <= nvert))
+    }
+    if(is.finite(dmax)) check.1.real(dmax)
+  }
+  # zero length data
+  if(length(iedge) == 0 || dmax < 0)
+    return(data.frame(i=integer(0),
+                      j=integer(0),
+                      k=integer(0),
+                      diam=numeric(0)))
+
+  # call C
+  storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer"
+  storage.mode(edgelength) <- "double"
+  if(is.infinite(dmax)) {
+    zz <- .Call("triDgraph",
+                nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength,
+                PACKAGE = "spatstat")
+  } else {
+    storage.mode(dmax) <- "double"
+    zz <- .Call("triDRgraph",
+                nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength,
+                dmax=dmax,
+                PACKAGE = "spatstat")
+  }    
+  df <- as.data.frame(zz)
+  colnames(df) <- c("i", "j", "k", "diam")
+  return(df)
+}
+
+closetriples <- function(X, rmax) {
+  a <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE)
+  tri <- trianglediameters(a$i, a$j, a$d, nvert=npoints(X), dmax=rmax)
+  return(tri)
+}
+
+# extract 'vees', i.e. triples (i, j, k) where i ~ j and i ~ k
+
+edges2vees <- function(iedge, jedge, nvert=max(iedge, jedge),
+                            ..., check=TRUE) {
+  if(check) {
+    stopifnot(length(iedge) == length(jedge))
+    stopifnot(all(iedge > 0))
+    stopifnot(all(jedge > 0))
+    if(!missing(nvert)) {
+      stopifnot(all(iedge <= nvert))
+      stopifnot(all(jedge <= nvert))
+    }
+  }
+  # zero length data, or not enough to make vees
+  if(length(iedge) < 2)
+    return(data.frame(i=numeric(0),
+                      j=numeric(0),
+                      k=numeric(0)))
+  # call 
+  vees <- .Call("graphVees",
+                nv = nvert,
+                iedge = iedge,
+                jedge = jedge,
+                PACKAGE="spatstat")
+  names(vees) <- c("i", "j", "k")
+  vees <- as.data.frame(vees)
+  return(vees)
+}
+
+  
diff --git a/R/edit.R b/R/edit.R
new file mode 100644
index 0000000..8524d92
--- /dev/null
+++ b/R/edit.R
@@ -0,0 +1,38 @@
+##   edit.R
+##
+##   Methods for 'edit'
+##
+##   $Revision: 1.3 $ $Date: 2015/04/19 06:14:21 $
+
+edit.ppp <- local({
+
+  edit.ppp <- function(name, ...) {
+    X <- name
+    df <- as.data.frame(X)
+    df <- as.data.frame(lapply(df, as.num.or.char))
+    Y <- edit(df, ...)
+    Z <- as.ppp(Y, W=Window(X))
+    return(Z)
+  }
+
+  as.num.or.char <- function(x) {
+    if (is.character(x)) x else
+    if (is.numeric(x)) {
+      storage.mode(x) <- "double"
+      x
+    } else as.character(x)
+  }
+
+  edit.ppp
+})
+
+edit.im <- function(name, ...) {
+  X <- name
+  M <- transmat(as.matrix(X), from="spatstat", to="European")
+  Y <- as.data.frame(M)
+  Z <- edit(Y, ...)
+  X[] <- transmat(as.matrix(Z), from="European", to="spatstat")
+  return(X)
+}
+
+  
diff --git a/R/eem.R b/R/eem.R
new file mode 100755
index 0000000..f30dfdf
--- /dev/null
+++ b/R/eem.R
@@ -0,0 +1,17 @@
+# eem.R
+#
+# Computes the Stoyan-Grabarnik "exponential energy weights" 
+#
+# $Revision: 1.4 $ $Date: 2008/07/25 19:51:05 $
+#
+
+eem <- function(fit, check=TRUE) {
+  verifyclass(fit, "ppm")
+  lambda <- fitted.ppm(fit, check=check)
+  Q <- quad.ppm(fit)
+  Z <- is.data(Q)
+  eemarks <- 1/lambda[Z]
+  attr(eemarks, "type") <- "eem"
+  attr(eemarks, "typename") <- "exponential energy marks"
+  return(eemarks)
+}
diff --git a/R/effectfun.R b/R/effectfun.R
new file mode 100755
index 0000000..d418f5c
--- /dev/null
+++ b/R/effectfun.R
@@ -0,0 +1,187 @@
+#
+#  effectfun.R
+#
+#   $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $
+#
+
+effectfun <- local({
+
+  okclasses <- c("ppm", "kppm", "lppm", "dppm", "rppm", "profilepl")
+
+effectfun <-  function(model, covname, ..., se.fit=FALSE) {
+  if(!inherits(model, okclasses))
+    stop(paste("First argument 'model' should be a fitted model of class",
+               commasep(sQuote(okclasses), " or ")),
+	 call.=FALSE)
+  orig.model <- model	 
+  model <- as.ppm(model)
+  dotargs <- list(...)
+  # determine names of covariates involved
+  intern.names <-
+    if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y")
+  needed.names <- variablesinformula(rhs.of.formula(formula(model)))
+  # check for clashes/quirks
+  if("lambda" %in% needed.names) {
+    if(is.dppm(orig.model) && (
+       identical.formulae(formula(model), ~offset(log(lambda))-1) ||
+       identical.formulae(formula(model), ~log(lambda)-1)
+       ))
+      stop("effectfun is not defined for a DPP model with fixed intensity",
+           call.=FALSE)
+    intensityname <- setdiff(c("Lambda", "intensity"), needed.names)[1]
+  } else intensityname <- "lambda"
+  ## validate the relevant covariate 
+  if(missing(covname) || is.null(covname)) {
+    mc <- model.covariates(model)
+    if(length(mc) == 1) covname <- mc else stop("covname must be provided")
+  }
+  if(!(covname %in% c(intern.names, needed.names)))
+    stop(paste("model does not have a covariate called", sQuote(covname)),
+         call.=FALSE)
+  # check that fixed values for all other covariates are provided 
+  given.covs <- names(dotargs)
+  if(any(uhoh <- !(needed.names %in% c(given.covs, covname)))) {
+    nuh <- sum(uhoh)
+    stop(paste(ngettext(nuh,
+                        "A value for the covariate",
+                        "Values for the covariates"),
+               commasep(dQuote(needed.names[uhoh])),
+               "must be provided (as",
+               ngettext(nuh, "an argument", "arguments"),
+               "to effectfun)"))
+  }
+  # establish type and range of covariate values
+  N0 <- 256
+  if(covname == "x") {
+    covtype <- "real"
+    W <- as.owin(data.ppm(model))
+    Zr <- W$xrange
+    Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0)
+  } else if(covname == "y") {
+    covtype <- "real"
+    W <- as.owin(data.ppm(model))
+    Zr <- W$yrange
+    Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0)
+  } else if(covname == "marks") {
+    covtype <- "factor"
+    Zvals <- levels(marks(data.ppm(model)))
+  } else {
+    # covariate is external
+    if(is.data.frame(covdf <- model$covariates)) {
+      Z <- covdf$covname
+      covtype <- typeof(Z)
+      if(covtype == "double")
+        covtype <- "real"
+      switch(covtype,
+             real={
+               Zr <- range(Z)
+               Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0)
+             },
+             integer={
+               Zr <- range(Z)
+               Zvals <- seq(from=Zr[1L], to=Zr[2L], by=ceiling((diff(Zr)+1)/N0))
+             },
+             factor={
+               Zvals <- levels(Z)
+             },
+             logical={
+               Zvals <- c(FALSE, TRUE)
+             },
+             stop(paste("Cannot handle covariate of type", dQuote(covtype)))
+             )
+    } else {
+      Z <- getdataobjects(covname,
+                          environment(formula(model)),
+                          model$covariates)[[1L]]
+      if(is.null(Z))
+        stop(paste("Cannot find covariate", sQuote(covname)),
+             call.=FALSE)
+      # convert to image
+      if(!is.im(Z))
+        Z <- as.im(Z, W=as.owin(model))
+      covtype <- Z$type
+      switch(covtype,
+             real={
+               Zr <- summary(Z)$range
+               Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0)
+             },
+             factor={
+               Zvals <- levels(Z)
+             },
+             logical={
+               Zvals <- c(FALSE, TRUE)
+             },
+             stop(paste("Cannot handle covariate of type", dQuote(covtype)))
+             )
+    }
+  }
+  # set up data frames of fake data for predict method
+  # First set up default, constant value for each covariate
+  N <- length(Zvals)
+  fakeloc <- resolve.defaults(dotargs,
+                              list(x=0, y=0))[c("x","y")]
+  if(is.marked.ppm(model)) {
+    lev <- levels(marks(data.ppm(model)))
+    fakeloc$marks <- lev[1L]
+  }
+  fakeloc <- lapply(fakeloc, padout, N=N)
+  fakecov <- lapply(dotargs, padout, N=N)
+  # Overwrite value for covariate of interest
+  if(covname %in% intern.names)
+    fakeloc[[covname]] <- Zvals
+  else fakecov[[covname]] <- Zvals
+  # convert to data frame
+  fakeloc <- do.call(data.frame, fakeloc)
+  fakecov <- if(length(fakecov) > 0) do.call(data.frame, fakecov) else NULL
+  #
+  # Now predict
+  pred <- predict(orig.model, locations=fakeloc, covariates=fakecov, se=se.fit)
+  if(!se.fit) lambda <- pred else {
+    lambda <- pred$estimate
+    se     <- pred$se
+    sedf <- data.frame(se =se,
+                       hi = lambda + 2 * se,
+                       lo = lambda - 2 * se)
+  }
+  #
+  dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc 
+  dfin <- dfin[covname]
+  dflam <- data.frame(lambda=lambda)
+  names(dflam) <- intensityname
+  df <- cbind(dfin, dflam)
+  #
+  if(covtype != "real") {
+    result <- df
+    if(se.fit) result <- cbind(result, sedf)
+  } else {
+    bc <- paren(covname)
+    result <- fv(df, argu=covname, 
+                 ylab=substitute(lambda(X),
+		                 list(X=as.name(covname),
+				      lambda=as.name(intensityname))),
+                 labl=c(covname,
+                   paste("hat(%s)", bc)),
+                 valu=intensityname, alim=Zr,
+                 desc=c(paste("value of covariate", covname),
+                   "fitted intensity"),
+                 fname=intensityname)
+    if(se.fit) {
+      result <- bind.fv(result, sedf,
+                        labl=c(paste("se[%s]", bc),
+                          paste("%s[hi]", bc),
+                          paste("%s[lo]", bc)),
+                        desc=c("standard error of fitted trend",
+                          "upper limit of pointwise 95%% CI for trend",
+                          "lower limit of pointwise 95%% CI for trend"))
+      fvnames(result, ".") <- c(intensityname, "hi", "lo")
+      fvnames(result, ".s") <- c("hi", "lo")
+      formula(result) <- paste(". ~ ", covname)
+    }
+  }
+  return(result)
+}
+
+ padout <- function(x,N) { rep.int(x[1L],N) }
+
+ effectfun
+})
diff --git a/R/envelope.R b/R/envelope.R
new file mode 100755
index 0000000..ee431dc
--- /dev/null
+++ b/R/envelope.R
@@ -0,0 +1,2002 @@
+#
+#   envelope.R
+#
+#   computes simulation envelopes 
+#
+#   $Revision: 2.86 $  $Date: 2017/06/05 10:31:58 $
+#
+
+envelope <- function(Y, fun, ...) {
+  UseMethod("envelope")
+}
+
+  # .................................................................
+  #     A 'simulation recipe' contains the following variables
+  #
+  #  type = Type of simulation
+  #           "csr": uniform Poisson process
+  #           "rmh": simulated realisation of fitted Gibbs or Poisson model 
+  #          "kppm": simulated realisation of fitted cluster model 
+  #          "expr": result of evaluating a user-supplied expression
+  #          "list": user-supplied list of point patterns
+  #
+  #  expr = expression that is repeatedly evaluated to generate simulations
+  #
+  #    envir = environment in which to evaluate the expression `expr'
+  #
+  #    'csr' = TRUE iff the model is (known to be) uniform Poisson
+  #
+  #    pois  = TRUE if model is known to be Poisson
+  #  
+  #  constraints = additional information about simulation (e.g. 'with fixed n')
+  #
+  # ...................................................................
+
+simulrecipe <- function(type, expr, envir, csr, pois=csr, constraints="") {
+  if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE")
+  out <- list(type=type,
+              expr=expr,
+              envir=envir,
+              csr=csr,
+              pois=pois,
+              constraints=constraints)
+  class(out) <- "simulrecipe"
+  out
+}
+
+
+envelope.ppp <-
+  function(Y, fun=Kest, nsim=99, nrank=1, ...,
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, fix.n=FALSE, fix.marks=FALSE,
+           verbose=TRUE, clipdata=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE, 
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, maxnerr=nsim, do.pwrong=FALSE,
+           envir.simul=NULL) {
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- Kest
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+
+  ismarked <- is.marked(Y)
+  ismulti  <- is.multitype(Y)
+  fix.marks <- fix.marks && ismarked
+  
+  if(!is.null(simulate)) {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    if(fix.n || fix.marks) 
+      warning("fix.n and fix.marks were ignored, because 'simulate' was given")
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+    # Data pattern is argument Y
+    X <- Y
+  } else if(!fix.n && !fix.marks) {
+    # ...................................................
+    # Realisations of complete spatial randomness
+    # will be generated by rpoispp 
+    # Data pattern X is argument Y
+    # Data pattern determines intensity of Poisson process
+    X <- Y
+    sY <- summary(Y, checkdup=FALSE)
+    Yintens <- sY$intensity
+    nY <- npoints(Y)
+    Ywin <- Y$window
+    Ymarx <- marks(Y)
+    # expression that will be evaluated
+    simexpr <- if(is.null(Ymarx)) {
+        # unmarked point pattern
+        expression(rpoispp(Yintens, win=Ywin))
+      } else if(is.null(dim(Ymarx))) {
+        # single column of marks
+        expression({
+          A <- rpoispp(Yintens, win=Ywin);
+          j <- sample(nY, npoints(A), replace=TRUE);
+          A %mark% Ymarx[j]
+        })
+      } else {
+        # multiple columns of marks
+        expression({
+          A <- rpoispp(Yintens, win=Ywin);
+          j <- sample(nY, npoints(A), replace=TRUE);
+          A %mark% Ymarx[j, , drop=FALSE]
+        })
+      }
+    dont.complain.about(Yintens, Ywin)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE,
+                             pois  = TRUE)
+  } else if(fix.marks) {
+    # ...................................................
+    # Data pattern is argument Y
+    X <- Y
+    # Realisations of binomial process
+    # with fixed number of points and fixed marks
+    # will be generated by runifpoint
+    nY <- npoints(Y)
+    Ywin <- as.owin(Y)
+    Ymarx <- marks(Y)
+    # expression that will be evaluated
+    simexpr <- expression(runifpoint(nY, Ywin) %mark% Ymarx)
+    # suppress warnings from code checkers
+    dont.complain.about(nY, Ywin, Ymarx)
+    # simulation constraints (explanatory string)
+    constraints <- if(ismulti) "with fixed number of points of each type" else
+                   "with fixed number of points and fixed marks"
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE,
+                             pois  = TRUE,
+                             constraints = constraints)
+  } else {
+    # ...................................................
+    # Data pattern is argument Y
+    X <- Y
+    # Realisations of binomial process
+    # will be generated by runifpoint
+    nY <- npoints(Y)
+    Ywin <- as.owin(Y)
+    Ymarx <- marks(Y)
+    # expression that will be evaluated
+    simexpr <- if(is.null(Ymarx)) {
+      ## unmarked
+      expression(runifpoint(nY, Ywin))
+    } else if(is.null(dim(Ymarx))) {
+      ## single column of marks
+      expression({
+        A <- runifpoint(nY, Ywin);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j]
+      })
+    } else {
+      ## multiple columns of marks
+      expression({
+        A <- runifpoint(nY, Ywin);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j, ,drop=FALSE]
+      })
+    }
+    dont.complain.about(nY, Ywin)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE,
+                             pois  = TRUE,
+                             constraints = "with fixed number of points")
+  }
+  
+  envelopeEngine(X=X, fun=fun, simul=simrecipe,
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=clipdata,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp,
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, maxnerr=maxnerr, cl=cl,
+                 envir.user=envir.user, do.pwrong=do.pwrong)
+}
+
+envelope.ppm <- 
+  function(Y, fun=Kest, nsim=99, nrank=1, ..., 
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, fix.n=FALSE, fix.marks=FALSE,
+           verbose=TRUE, clipdata=TRUE, 
+           start=NULL,
+           control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, 
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE, 
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, maxnerr=nsim, do.pwrong=FALSE,
+           envir.simul=NULL) {
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- Kest
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+
+  # Extract data pattern X from fitted model Y
+  X <- data.ppm(Y)
+  
+  if(is.null(simulate)) {
+    # ...................................................
+    # Simulated realisations of the fitted model Y
+    # will be generated
+    pois <- is.poisson(Y)
+    csr <- is.stationary(Y) && pois
+    type <- if(csr) "csr" else "rmh"
+    # Set up parameters for rmh
+    rmodel <- rmhmodel(Y, verbose=FALSE)
+    if(is.null(start))
+      start <- list(n.start=npoints(X))
+    rstart <- rmhstart(start)
+    rcontr <- rmhcontrol(control)
+    if(fix.marks) {
+      rcontr <- update(rcontr, fixall=TRUE, p=1, expand=1)
+      nst <- if(is.multitype(X)) table(marks(X)) else npoints(X)
+      rstart <- update(rstart, n.start=nst)
+      constraints <- "with fixed number of points of each type"
+    } else if(fix.n) {
+      rcontr <- update(rcontr, p=1, expand=1)
+      rstart <- update(rstart, n.start=X$n)
+      constraints <- "with fixed number of points"
+    } else constraints <- ""
+    # pre-digest arguments
+    rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE)
+    # expression that will be evaluated
+    simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE))
+    dont.complain.about(rmhinfolist)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type  = type,
+                             expr  = simexpr,
+                             envir = envir.here,
+                             csr   = csr,
+                             pois  = pois,
+                             constraints = constraints)
+  } else {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+  }
+  envelopeEngine(X=X, fun=fun, simul=simrecipe, 
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=clipdata,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp, 
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, maxnerr=maxnerr, cl=cl,
+                 envir.user=envir.user, do.pwrong=do.pwrong)
+}
+
+envelope.kppm <-
+  function(Y, fun=Kest, nsim=99, nrank=1, ..., 
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, verbose=TRUE, clipdata=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE,
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim,
+           do.pwrong=FALSE, envir.simul=NULL)
+{
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- Kest
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+  
+  # Extract data pattern X from fitted model Y
+  X <- Y$X
+  
+  if(is.null(simulate)) {
+    # Simulated realisations of the fitted model Y
+    # will be generated using simulate.kppm
+    kmodel <- Y
+    # expression that will be evaluated
+    simexpr <- expression(simulate(kmodel)[[1L]])
+    dont.complain.about(kmodel)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "kppm",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = FALSE,
+                             pois  = FALSE)
+  } else {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+  }
+  envelopeEngine(X=X, fun=fun, simul=simrecipe, 
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=clipdata,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp,
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, maxnerr=maxnerr, cl=cl,
+                 envir.user=envir.user, do.pwrong=do.pwrong)
+
+}
+
+## .................................................................
+##   Engine for simulating and computing envelopes
+## .................................................................
+#
+#  X is the data point pattern, which could be ppp, pp3, ppx etc
+#  X determines the class of pattern expected from the simulations
+#
+
+envelopeEngine <-
+  function(X, fun, simul,
+           nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs,
+           verbose=TRUE, clipdata=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE,
+           savefuns=FALSE, savepatterns=FALSE,
+           saveresultof=NULL,
+           weights=NULL,
+           nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, maxnerr=nsim, internal=NULL, cl=NULL,
+           envir.user=envir.user,
+           expected.arg="r",
+           do.pwrong=FALSE,
+           foreignclass=NULL,
+           collectrubbish=FALSE) {
+  #
+  envir.here <- sys.frame(sys.nframe())
+
+  alternative <- match.arg(alternative)
+
+  foreignclass <- as.character(foreignclass)
+  if(length(foreignclass) != 0 && clipdata) {
+    warning(paste("Ignoring clipdata=TRUE:",
+                  "I don't know how to clip objects of class",
+                  sQuote(paste(foreignclass, collapse=","))))
+    clipdata <- FALSE
+  }
+  
+  # ----------------------------------------------------------
+  # Determine Simulation
+  # ----------------------------------------------------------
+  
+  # Identify class of patterns to be simulated, from data pattern X
+  Xclass <- if(is.ppp(X)) "ppp" else
+            if(is.pp3(X)) "pp3" else
+            if(is.ppx(X)) "ppx" else
+            if(inherits(X, foreignclass)) foreignclass else
+            stop("Unrecognised class of point pattern")
+  Xobjectname <- paste("point pattern of class", sQuote(Xclass))
+
+  # Option to use weighted average
+  if(use.weights <- !is.null(weights)) {
+    # weight can be either a numeric vector or a function
+    if(is.numeric(weights)) {
+      compute.weights <- FALSE
+      weightfun <- NULL
+    } else if(is.function(weights)) {
+      compute.weights <- TRUE
+      weightfun <- weights
+      weights <- NULL  
+    } else stop("weights should be either a function or a numeric vector")
+  } else compute.weights <- FALSE
+    
+  # Undocumented option to generate patterns only.
+  patterns.only <- identical(internal$eject, "patterns")
+
+  # Undocumented option to evaluate 'something' for each pattern
+  if(savevalues <- !is.null(saveresultof)) {
+    stopifnot(is.function(saveresultof))
+    SavedValues <- list()
+  }
+
+  # Identify type of simulation from argument 'simul'
+  if(inherits(simul, "simulrecipe")) {
+    # ..................................................
+    # simulation recipe is given
+    simtype <- simul$type
+    simexpr <- simul$expr
+    envir   <- simul$envir
+    csr     <- simul$csr
+    pois    <- simul$pois
+    constraints <- simul$constraints
+  } else {
+    # ...................................................
+    # simulation is specified by argument `simulate' to envelope()
+    simulate <- simul
+    # which should be an expression, or a list of point patterns,
+    # or an envelope object.
+    csr <- FALSE
+    # override
+    if(!is.null(icsr <- internal$csr)) csr <- icsr
+    pois <- csr
+    constraints <- ""
+#    model <- NULL
+    if(inherits(simulate, "envelope")) {
+      # envelope object: see if it contains stored point patterns
+      simpat <- attr(simulate, "simpatterns")
+      if(!is.null(simpat))
+        simulate <- simpat
+      else
+        stop(paste("The argument", sQuote("simulate"),
+                   "is an envelope object but does not contain",
+                   "any saved point patterns."))
+    }
+    if(is.expression(simulate)) {
+      # The user-supplied expression 'simulate' will be evaluated repeatedly
+      simtype <- "expr"
+      simexpr <- simulate
+      envir <- envir.user
+    } else if(is.list(simulate) &&
+              all(sapply(simulate, inherits, what=Xclass))) {
+      # The user-supplied list of point patterns will be used
+      simtype <- "list"
+      SimDataList <- simulate
+      # expression that will be evaluated
+      simexpr <- expression(SimDataList[[i]])
+      dont.complain.about(SimDataList)
+      envir <- envir.here
+      # ensure that `i' is defined
+      i <- 1L
+      # any messages?
+      if(!is.null(mess <- attr(simulate, "internal"))) {
+        # determine whether these point patterns are realisations of CSR
+        csr <- !is.null(mc <- mess$csr) && mc
+      }
+    } else stop(paste(sQuote("simulate"),
+                      "should be an expression, or a list of point patterns"))
+  }
+  # -------------------------------------------------------------------
+  # Determine clipping window
+  # ------------------------------------------------------------------
+
+  if(clipdata) {
+    # Generate one realisation
+    Xsim <- eval(simexpr, envir=envir)
+    if(!inherits(Xsim, Xclass))
+      switch(simtype,
+             csr=stop(paste("Internal error:", Xobjectname, "not generated")),
+             rmh=stop(paste("Internal error: rmh did not return an",
+               Xobjectname)),
+             kppm=stop(paste("Internal error: simulate.kppm did not return an",
+               Xobjectname)),
+             expr=stop(paste("Evaluating the expression", sQuote("simulate"),
+               "did not yield an", Xobjectname)),
+             list=stop(paste("Internal error: list entry was not an",
+               Xobjectname)),
+             stop(paste("Internal error:", Xobjectname, "not generated"))
+             )
+    # Extract window
+    clipwin <- Xsim$window
+    if(!is.subset.owin(clipwin, X$window))
+      warning("Window containing simulated patterns is not a subset of data window")
+  }
+  
+  # ------------------------------------------------------------------
+  # Summary function to be applied 
+  # ------------------------------------------------------------------
+
+  if(is.null(fun))
+    stop("Internal error: fun is NULL")
+
+  # Name of function, for error messages
+  fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else
+  if(is.character(fun)) fun else "fun"
+  fname <- sQuote(fname)
+
+  # R function to apply
+  if(is.character(fun)) {
+    gotfun <- try(get(fun, mode="function"))
+    if(inherits(gotfun, "try-error"))
+      stop(paste("Could not find a function named", sQuote(fun)))
+    fun <- gotfun
+  } else if(!is.function(fun)) 
+    stop(paste("unrecognised format for function", fname))
+  fargs <- names(formals(fun))
+  if(!any(c(expected.arg, "...") %in% fargs))
+    stop(paste(fname, "should have",
+               ngettext(length(expected.arg), "an argument", "arguments"),
+               "named", commasep(sQuote(expected.arg)),
+               "or a", sQuote("..."), "argument"))
+  usecorrection <- any(c("correction", "...") %in% fargs)
+  
+  # ---------------------------------------------------------------------
+  # validate other arguments
+  if((nrank %% 1) != 0)
+    stop("nrank must be an integer")
+  if((nsim %% 1) != 0)
+    stop("nsim must be an integer")
+  stopifnot(nrank > 0 && nrank < nsim/2)
+
+  rgiven <- any(expected.arg %in% names(list(...)))
+
+  if(tran <- !is.null(transform)) {
+    stopifnot(is.expression(transform))
+    # prepare expressions to be evaluated each time 
+    transform.funX    <- inject.expr("with(funX,.)",    transform)
+    transform.funXsim <- inject.expr("with(funXsim,.)", transform)
+    # .... old code using 'eval.fv' ......
+    # transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv")
+    # transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv")
+    # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv
+  }
+  if(!is.null(ginterval)) 
+    stopifnot(is.numeric(ginterval) && length(ginterval) == 2)
+    
+  # ---------------------------------------------------------------------
+  # Evaluate function for data pattern X
+  # ------------------------------------------------------------------
+  Xarg <- if(!clipdata) X else X[clipwin]
+  corrx <- if(usecorrection) list(correction="best") else NULL
+  funX <- do.call(fun,
+                  resolve.defaults(list(Xarg),
+                                   list(...),
+                                   funYargs,
+                                   corrx))
+                                     
+  if(!inherits(funX, "fv"))
+    stop(paste("The function", fname,
+               "must return an object of class", sQuote("fv")))
+
+  ## warn about 'dangerous' arguments
+  if(!is.null(dang <- attr(funX, "dangerous")) &&
+     any(uhoh <- dang %in% names(list(...)))) {
+    nuh <- sum(uhoh)
+    warning(paste("Envelope may be invalid;",
+                  ngettext(nuh, "argument", "arguments"),
+                  commasep(sQuote(dang[uhoh])),
+                  ngettext(nuh, "appears", "appear"),
+                  "to have been fixed."),
+            call.=FALSE)
+  }
+  
+  argname <- fvnames(funX, ".x")
+  valname <- fvnames(funX, ".y")
+  has.theo <- "theo" %in% fvnames(funX, "*")
+  csr.theo <- csr && has.theo
+  use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo)
+  
+  if(tran) {
+    # extract only the recommended value
+    if(use.theory) 
+      funX <- funX[, c(argname, valname, "theo")]
+    else
+      funX <- funX[, c(argname, valname)]
+    # apply the transformation to it
+    funX <- eval(transform.funX)
+  }
+    
+  rvals <- funX[[argname]]
+#  fX    <- funX[[valname]]
+
+  # default domain over which to maximise
+  alim <- attr(funX, "alim")
+  if(global && is.null(ginterval))
+    ginterval <- if(rgiven || is.null(alim)) range(rvals) else alim
+  
+  #--------------------------------------------------------------------
+  # Determine number of simulations
+  # ------------------------------------------------------------------
+  #
+  ## determine whether dual simulations are required
+  ## (one set of simulations to calculate the theoretical mean,
+  ##  another independent set of simulations to obtain the critical point.)
+  dual <- (global && !use.theory && !VARIANCE)
+  Nsim <- if(!dual) nsim else (nsim + nsim2)
+
+  # if taking data from a list of point patterns,
+  # check there are enough of them
+  if(simtype == "list" && Nsim > length(SimDataList))
+    stop(paste("Number of simulations",
+               paren(if(!dual)
+                     paste(nsim) else
+                     paste(nsim, "+", nsim2, "=", Nsim)
+                     ),
+               "exceeds number of point pattern datasets supplied"))
+
+  # Undocumented secret exit
+  # ------------------------------------------------------------------
+  if(patterns.only) {
+    # generate simulated realisations and return only these patterns
+    if(verbose) {
+      action <- if(simtype == "list") "Extracting" else "Generating"
+      descrip <- switch(simtype,
+                        csr = "simulations of CSR",
+                        rmh = paste("simulated realisations of fitted",
+                          if(pois) "Poisson" else "Gibbs",
+                          "model"),
+                        kppm = "simulated realisations of fitted cluster model",
+                        expr = "simulations by evaluating expression",
+                        list = "point patterns from list",
+                        "simulated realisations")
+      if(!is.null(constraints) && nzchar(constraints))
+        descrip <- paste(descrip, constraints)
+      explan <- if(dual) paren(paste(nsim2, "to estimate the mean and",
+                                     nsim, "to calculate envelopes")) else ""
+      splat(action, Nsim, descrip, explan, "...")
+    }
+    XsimList <- list()
+  # start simulation loop
+    sstate <- list() 
+    for(i in 1:Nsim) {
+      if(verbose) sstate <- progressreport(i, Nsim, state=sstate)
+      Xsim <- eval(simexpr, envir=envir)
+      if(!inherits(Xsim, Xclass))
+        switch(simtype,
+               csr={
+                 stop(paste("Internal error:", Xobjectname, "not generated"))
+               },
+               rmh={
+                 stop(paste("Internal error: rmh did not return an",
+                            Xobjectname))
+               },
+               kppm={
+                 stop(paste("Internal error: simulate.kppm did not return an",
+                            Xobjectname))
+               },
+               expr={
+                 stop(paste("Evaluating the expression", sQuote("simulate"),
+                            "did not yield an", Xobjectname))
+               },
+               list={
+                 stop(paste("Internal error: list entry was not an",
+                            Xobjectname))
+               },
+               stop(paste("Internal error:", Xobjectname, "not generated"))
+               )
+      XsimList[[i]] <- Xsim
+    }
+    if(verbose) {
+      cat(paste("Done.\n"))
+      flush.console()
+    }
+    attr(XsimList, "internal") <- list(csr=csr)
+    return(XsimList)
+  }
+  
+  # capture main decision parameters
+  envelopeInfo <-  list(call=cl,
+                        Yname=Yname,
+                        valname=valname,
+                        csr=csr,
+                        csr.theo=csr.theo,
+                        use.theory=use.theory,
+                        pois=pois,
+                        simtype=simtype,
+                        constraints=constraints,
+                        nrank=nrank,
+                        nsim=nsim,
+                        Nsim=Nsim,
+                        global=global,
+                        ginterval=ginterval,
+                        dual=dual,
+                        nsim2=nsim2,
+                        VARIANCE=VARIANCE,
+                        nSD=nSD,
+                        alternative=alternative,
+                        scale=scale,
+                        clamp=clamp,
+                        use.weights=use.weights,
+                        do.pwrong=do.pwrong)
+
+  # ----------------------------------------
+  ######### SIMULATE #######################
+  # ----------------------------------------
+
+  if(verbose) {
+    action <- if(simtype == "list") "Extracting" else "Generating"
+    descrip <- switch(simtype,
+                      csr = "simulations of CSR",
+                      rmh = paste("simulated realisations of fitted",
+                        if(pois) "Poisson" else "Gibbs",
+                        "model"),
+                      kppm = "simulated realisations of fitted cluster model",
+                      expr = "simulations by evaluating expression",
+                      list = "point patterns from list",
+                      "simulated patterns")
+    if(!is.null(constraints) && nzchar(constraints))
+      descrip <- paste(descrip, constraints)
+    explan <- if(dual) paren(paste(nsim2, "to estimate the mean and",
+                                   nsim, "to calculate envelopes")) else ""
+    splat(action, Nsim, descrip, explan, "...")
+  }
+  # determine whether simulated point patterns should be saved
+  catchpatterns <- savepatterns && simtype != "list"
+  Caughtpatterns <- list()
+  # allocate space for computed function values
+  nrvals <- length(rvals)
+  simvals <- matrix(, nrow=nrvals, ncol=Nsim)
+  # allocate space for weights to be computed
+  if(compute.weights)
+    weights <- numeric(Nsim)
+  
+  # inferred values of function argument 'r' or equivalent parameters
+  if(identical(expected.arg, "r")) {
+    # Kest, etc
+    inferred.r.args <- list(r=rvals)
+  } else if(identical(expected.arg, c("rmax", "nrval"))) {
+    # K3est, etc
+    inferred.r.args <- list(rmax=max(rvals), nrval=length(rvals))
+  } else
+  stop(paste("Don't know how to infer values of", commasep(expected.arg)))
+    
+  # arguments for function when applied to simulated patterns
+  funargs <-
+    resolve.defaults(funargs,
+                     inferred.r.args,
+                     list(...),
+                     if(usecorrection) list(correction="best") else NULL)
+  
+  # start simulation loop
+  nerr <- 0
+  if(verbose) pstate <- list()
+  for(i in 1:Nsim) {
+    ok <- FALSE
+    # safely generate a random pattern and apply function
+    while(!ok) {
+      Xsim <- eval(simexpr, envir=envir)
+      # check valid point pattern
+      if(!inherits(Xsim, Xclass))
+        switch(simtype,
+               csr=stop(paste("Internal error:", Xobjectname, "not generated")),
+               rmh=stop(paste("Internal error: rmh did not return an",
+                 Xobjectname)),
+               kppm=stop(paste("Internal error:",
+                 "simulate.kppm did not return an",
+                 Xobjectname)),
+               expr=stop(paste("Evaluating the expression", sQuote("simulate"),
+                 "did not yield an", Xobjectname)),
+               list=stop(paste("Internal error: list entry was not an",
+                 Xobjectname)),
+               stop(paste("Internal error:", Xobjectname, "not generated"))
+               )
+      if(catchpatterns)
+        Caughtpatterns[[i]] <- Xsim
+      if(savevalues)
+        SavedValues[[i]] <- saveresultof(Xsim)
+      if(compute.weights) {
+        wti <- weightfun(Xsim)
+        if(!is.numeric(wti))
+          stop("weightfun did not return a numeric value")
+        if(length(wti) != 1L)
+          stop("weightfun should return a single numeric value")
+        weights[i] <- wti
+      }
+      # apply function safely
+      funXsim <- try(do.call(fun, append(list(Xsim), funargs)))
+
+      ok <- !inherits(funXsim, "try-error")
+      
+      if(!ok) {
+        nerr <- nerr + 1L
+        if(nerr > maxnerr)
+          stop("Exceeded maximum number of errors")
+        cat("[retrying]\n")
+      } 
+    }
+
+    # sanity checks
+    if(i == 1L) {
+      if(!inherits(funXsim, "fv"))
+        stop(paste("When applied to a simulated pattern, the function",
+                   fname, "did not return an object of class",
+                   sQuote("fv")))
+      argname.sim <- fvnames(funXsim, ".x")
+      valname.sim <- fvnames(funXsim, ".y")
+      if(argname.sim != argname)
+        stop(paste("The objects returned by", fname,
+                   "when applied to a simulated pattern",
+                   "and to the data pattern",
+                   "are incompatible. They have different argument names",
+                   sQuote(argname.sim), "and", sQuote(argname), 
+                   "respectively"))
+      if(valname.sim != valname)
+        stop(paste("When", fname, "is applied to a simulated pattern",
+                   "it provides an estimate named", sQuote(valname.sim), 
+                   "whereas the estimate for the data pattern is named",
+                   sQuote(valname),
+                   ". Try using the argument", sQuote("correction"),
+                   "to make them compatible"))
+      rfunX    <- with(funX,    ".x")
+      rfunXsim <- with(funXsim, ".x")
+      if(!identical(rfunX, rfunXsim))
+        stop(paste("When", fname, "is applied to a simulated pattern,",
+                   "the values of the argument", sQuote(argname.sim),
+                   "are different from those used for the data."))
+    }
+
+    if(tran) {
+      # extract only the recommended value
+      if(use.theory) 
+        funXsim <- funXsim[, c(argname, valname, "theo")]
+      else
+        funXsim <- funXsim[, c(argname, valname)]
+      # apply the transformation to it
+      funXsim <- eval(transform.funXsim)
+    }
+
+    # extract the values for simulation i
+    simvals.i <- funXsim[[valname]]
+    if(length(simvals.i) != nrvals)
+      stop("Vectors of function values have incompatible lengths")
+      
+    simvals[ , i] <- funXsim[[valname]]
+    if(verbose)
+      pstate <- progressreport(i, Nsim, state=pstate)
+    
+    if(collectrubbish) {
+      rm(Xsim)
+      rm(funXsim)
+      gc()
+    }
+  }
+  ##  end simulation loop
+  
+  if(verbose) {
+    cat("\nDone.\n")
+    flush.console()
+  }
+
+  # ...........................................................
+  # save functions and/or patterns if so commanded
+
+  if(savefuns) {
+    alldata <- cbind(rvals, simvals)
+    simnames <- paste("sim", 1:Nsim, sep="")
+    colnames(alldata) <- c("r", simnames)
+    alldata <- as.data.frame(alldata)
+    SimFuns <- fv(alldata,
+                  argu="r",
+                  ylab=attr(funX, "ylab"),
+                  valu="sim1",
+                  fmla= deparse(. ~ r),
+                  alim=attr(funX, "alim"),
+                  labl=names(alldata),
+                  desc=c("distance argument r",
+                    paste("Simulation ", 1:Nsim, sep="")),
+                  fname=attr(funX, "fname"),
+                  yexp=attr(funX, "yexp"),
+                  unitname=unitname(funX))
+    fvnames(SimFuns, ".") <- simnames
+  } 
+  if(savepatterns)
+    SimPats <- if(simtype == "list") SimDataList else Caughtpatterns
+
+  ######### COMPUTE ENVELOPES #######################
+
+  etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise"
+  if(dual) {
+    jsim <- 1:nsim
+    jsim.mean <- nsim + 1:nsim2
+  } else {
+    jsim <- jsim.mean <- NULL
+  }
+
+  result <- envelope.matrix(simvals, funX=funX,
+                            jsim=jsim, jsim.mean=jsim.mean,
+                            type=etype, alternative=alternative,
+                            scale=scale, clamp=clamp,
+                            csr=csr, use.theory=use.theory,
+                            nrank=nrank, ginterval=ginterval, nSD=nSD,
+                            Yname=Yname, do.pwrong=do.pwrong,
+                            weights=weights)
+
+  # tack on envelope information
+  attr(result, "einfo") <- envelopeInfo
+
+  # tack on functions and/or patterns if so commanded   
+  if(savefuns)
+    attr(result, "simfuns") <- SimFuns
+  if(savepatterns) {
+    attr(result, "simpatterns") <- SimPats
+    attr(result, "datapattern") <- X
+  }
+  # save function weights 
+  if(use.weights)
+    attr(result, "weights") <- weights
+
+  # undocumented - tack on values of some other quantity
+  if(savevalues) {
+    attr(result, "simvalues") <- SavedValues
+    attr(result, "datavalue") <- saveresultof(X)
+  }
+  return(result)
+}
+
+
+plot.envelope <- function(x, ..., main) {
+  if(missing(main)) main <- short.deparse(substitute(x))
+  shade.given <- ("shade" %in% names(list(...)))
+  shade.implied <- !is.null(fvnames(x, ".s"))
+  if(!(shade.given || shade.implied)) {
+    # ensure x has default 'shade' attribute
+    # (in case x was produced by an older version of spatstat)
+    if(all(c("lo", "hi") %in% colnames(x)))
+      fvnames(x, ".s") <- c("lo", "hi")
+    else warning("Unable to determine shading for envelope")
+  }
+  NextMethod("plot", main=main)
+}
+
+print.envelope <- function(x, ...) {
+  e <- attr(x, "einfo")
+  g <- e$global
+  csr <- e$csr
+  pois <- e$pois
+  if(is.null(pois)) pois <- csr
+  simtype <- e$simtype
+  constraints <- e$constraints
+  nr <- e$nrank
+  nsim <- e$nsim
+  V <- e$VARIANCE
+  fname <- flat.deparse(attr(x, "ylab"))
+  type <- if(V) paste("Pointwise", e$nSD, "sigma") else
+          if(g) "Simultaneous" else "Pointwise"
+  splat(type, "critical envelopes for", fname,
+        "\nand observed value for", sQuote(e$Yname))
+  if(!is.null(valname <- e$valname) && waxlyrical('extras'))
+    splat("Edge correction:", dQuote(valname))
+  ## determine *actual* type of simulation
+  descrip <-
+    if(csr) "simulations of CSR"
+    else if(!is.null(simtype)) {
+      switch(simtype,
+             csr="simulations of CSR",
+             rmh=paste("simulations of fitted",
+               if(pois) "Poisson" else "Gibbs",
+               "model"),
+             kppm="simulations of fitted cluster model",
+             expr="evaluations of user-supplied expression",
+             list="point pattern datasets in user-supplied list",
+             funs="columns of user-supplied data")
+    } else "simulations of fitted model"
+  if(!is.null(constraints) && nzchar(constraints))
+    descrip <- paste(descrip, constraints)
+  #  
+  splat("Obtained from", nsim, descrip)
+  #
+  if(waxlyrical('extras')) {
+    if(!is.null(e$dual) && e$dual) 
+      splat("Theoretical (i.e. null) mean value of", fname,
+            "estimated from a separate set of",
+            e$nsim2, "simulations")
+    if(!is.null(attr(x, "simfuns"))) 
+      splat("(All simulated function values are stored)")
+    if(!is.null(attr(x, "simpatterns"))) 
+      splat("(All simulated point patterns are stored)")
+  }
+  splat("Alternative:", e$alternative)
+  if(!V && waxlyrical('extras')) {
+    ## significance interpretation!
+    alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) }
+    splat("Significance level of",
+          if(g) "simultaneous" else "pointwise",
+          "Monte Carlo test:",
+          paste0(if(g) nr else 2 * nr,
+                 "/", nsim+1),
+          "=", signif(alpha, 3))
+  }
+  if(waxlyrical('gory') && !is.null(pwrong <- attr(x, "pwrong"))) {
+    splat("\t[Estimated significance level of pointwise excursions:",
+          paste0("pwrong=", signif(pwrong, 3), "]"))
+  }
+  NextMethod("print")
+}
+                  
+summary.envelope <- function(object, ...) {
+  e <- attr(object, "einfo")
+  g <- e$global
+  V <- e$VARIANCE
+  nr <- e$nrank
+  nsim <- e$nsim
+  csr <- e$csr
+  pois <- e$pois
+  if(is.null(pois)) pois <- csr
+  simtype <- e$simtype
+  constraints <- e$constraints
+  alternative <- e$alternative
+  use.theory <- e$use.theory
+  has.theo <- "theo" %in% fvnames(object, "*")
+  csr.theo <- csr && has.theo
+  use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo)
+  fname <- deparse(attr(object, "ylab"))
+  type <- if(V) paste("Pointwise", e$nSD, "sigma") else
+          if(g) "Simultaneous" else "Pointwise"
+  splat(type, "critical envelopes for", fname, 
+      "\nand observed value for", sQuote(e$Yname))
+  # determine *actual* type of simulation
+  descrip <-
+    if(csr) "simulations of CSR"
+    else if(!is.null(simtype)) {
+      switch(simtype,
+             csr="simulations of CSR",
+             rmh=paste("simulations of fitted",
+               if(pois) "Poisson" else "Gibbs",
+               "model"),
+             kppm="simulations of fitted cluster model",
+             expr="evaluations of user-supplied expression",
+             list="point pattern datasets in user-supplied list",
+             funs="columns of user-supplied data",
+             "simulated point patterns")
+    } else "simulations of fitted model"
+  if(!is.null(constraints) && nzchar(constraints))
+    descrip <- paste(descrip, constraints)
+  #  
+  splat("Obtained from", nsim, descrip)
+  #
+  if(waxlyrical('extras')) {
+    if(!is.null(e$dual) && e$dual) 
+      splat("Theoretical (i.e. null) mean value of", fname,
+            "estimated from a separate set of",
+            e$nsim2, "simulations")
+    if(!is.null(attr(object, "simfuns")))
+      splat("(All", nsim, "simulated function values",
+            "are stored in attr(,", dQuote("simfuns"), ") )")
+    if(!is.null(attr(object, "simpatterns")))
+      splat("(All", nsim, "simulated point patterns",
+            "are stored in attr(,", dQuote("simpatterns"), ") )")
+  }
+  #
+  splat("Alternative:", alternative)
+  if(V) {
+    # nSD envelopes
+    splat(switch(alternative,
+                 two.sided = "Envelopes",
+                 "Critical boundary"),
+          "computed as sample mean",
+          switch(alternative,
+                 two.sided="plus/minus",
+                 less="minus",
+                 greater="plus"),
+          e$nSD, "sample standard deviations")
+  } else {
+    # critical envelopes
+    lo.ord <- if(nr == 1L) "minimum" else paste(ordinal(nr), "smallest")
+    hi.ord <- if(nr == 1L) "maximum" else paste(ordinal(nr), "largest")
+    if(g) 
+      splat(switch(alternative,
+                   two.sided = "Envelopes",
+                   "Critical boundary"),
+            "computed as",
+            if(use.theory) "theoretical curve" else "mean of simulations",
+            switch(alternative,
+                   two.sided="plus/minus",
+                   less="minus",
+                   greater="plus"),
+            hi.ord,
+            "simulated value of maximum", 
+            switch(alternative,
+                   two.sided="absolute",
+                   less="negative",
+                   greater="positive"),
+            "deviation")
+    else {
+      if(alternative != "less")
+        splat("Upper envelope: pointwise", hi.ord, "of simulated curves")
+      if(alternative != "greater")
+        splat("Lower envelope: pointwise", lo.ord, "of simulated curves")
+    }
+    symmetric <- (alternative == "two.sided") && !g
+    alpha <- if(!symmetric) { nr/(nsim+1) } else { 2 * nr/(nsim+1) }
+    splat("Significance level of Monte Carlo test:",
+          paste0(if(!symmetric) nr else 2 * nr,
+                 "/", nsim+1),
+          "=", alpha)
+  } 
+  splat("Data:", e$Yname)
+  return(invisible(NULL))
+}
+  
+
+# envelope.matrix
+
+# core functionality to compute envelope values
+
+# theory = funX[["theo"]]
+# observed = fX
+
+envelope.matrix <- function(Y, ...,
+                            rvals=NULL, observed=NULL, theory=NULL, 
+                            funX=NULL,
+                            nsim=NULL, nsim2=NULL,
+                            jsim=NULL, jsim.mean=NULL,
+                            type=c("pointwise", "global", "variance"),
+                            alternative=c("two.sided", "less", "greater"),
+                            scale = NULL, clamp=FALSE,
+                            csr=FALSE, use.theory = csr, 
+                            nrank=1, ginterval=NULL, nSD=2,
+                            savefuns=FALSE,
+                            check=TRUE,
+                            Yname=NULL,
+                            do.pwrong=FALSE,
+                            weights=NULL,
+                            precomputed=NULL) {
+  if(is.null(Yname))
+    Yname <- short.deparse(substitute(Y))
+
+  type <- match.arg(type)
+  alternative <- match.arg(alternative)
+
+  if(!is.null(funX))
+    stopifnot(is.fv(funX))
+
+  pwrong <- NULL
+  use.weights <- !is.null(weights)
+  cheat <- !is.null(precomputed)
+
+  if(is.null(rvals) && is.null(observed) && !is.null(funX)) {
+    # assume funX is summary function for observed data
+    rvals <- with(funX, .x)
+    observed <- with(funX, .y)
+    theory <-
+      if(use.theory && "theo" %in% names(funX)) with(funX, theo) else NULL
+  } else if(check) {
+    # validate vectors of data
+    if(is.null(rvals)) stop("rvals must be supplied")
+    if(is.null(observed)) stop("observed must be supplied")
+    if(!is.null(Y)) stopifnot(length(rvals) == nrow(Y))
+    stopifnot(length(observed) == length(rvals))
+  }
+
+  if(use.theory) {
+    use.theory <- !is.null(theory)
+    if(use.theory && check) stopifnot(length(theory) == length(rvals))
+  }
+
+  simvals <- Y
+  fX <- observed
+
+  atr <- if(!is.null(funX)) attributes(funX) else
+         list(alim=range(rvals),
+              ylab=quote(f(r)),
+              yexp=quote(f(r)),
+              fname="f")
+
+  fname <- atr$fname
+  
+  if(!cheat) {
+    # ................   standard calculation .....................
+    # validate weights
+    if(use.weights) 
+      check.nvector(weights, ncol(simvals), 
+                    things="simulated functions", naok=TRUE)
+
+    # determine numbers of columns used
+      Ncol <- ncol(simvals)
+      if(Ncol < 2)
+        stop("Need at least 2 columns of function values")
+      
+      if(is.null(jsim) && !is.null(nsim)) {
+        # usual case - 'nsim' determines 'jsim'
+        if(nsim > Ncol)
+          stop(paste(nsim, "simulations are not available; only",
+                     Ncol, "columns provided"))
+        jsim <- 1:nsim
+        if(!is.null(nsim2)) {
+          # 'nsim2' determines 'jsim.mean'
+          if(nsim + nsim2 > Ncol)
+            stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, 
+                       "simulations are not available; only",
+                       Ncol, "columns provided"))
+          jsim.mean <- nsim + 1:nsim2
+        }
+      }
+      
+      restrict.columns <- !is.null(jsim)
+      dual <- !is.null(jsim.mean)
+
+  } else {
+    # ................ precomputed values ..................
+    # validate weights
+    if(use.weights) 
+      check.nvector(weights, nsim,
+                    things="simulations", naok=TRUE)
+    restrict.columns <- FALSE
+    dual <- FALSE
+  }
+
+  shadenames <- NULL
+  
+  switch(type,
+         pointwise = {
+           # ....... POINTWISE ENVELOPES ...............................
+           if(cheat) {
+             stopifnot(checkfields(precomputed, c("lo", "hi")))
+             lo <- precomputed$lo
+             hi <- precomputed$hi
+           } else {
+             simvals[is.infinite(simvals)] <- NA
+             if(restrict.columns) {
+               simvals <- simvals[,jsim]
+               if(use.weights) weights <- weights[jsim]
+             }
+             nsim <- ncol(simvals)
+             nsim.mean <- NULL
+             if(nrank == 1L) {
+               lohi <- apply(simvals, 1L, range)
+             } else {
+               lohi <- apply(simvals, 1L,
+#                             function(x, n) { sort(x)[n] },
+                             orderstats,
+                             k=c(nrank, nsim-nrank+1L))
+             }
+             lo <- lohi[1L,]
+             hi <- lohi[2L,]
+           }
+           lo.name <- "lower pointwise envelope of %s from simulations"
+           hi.name <- "upper pointwise envelope of %s from simulations"
+           ##
+           switch(alternative,
+                  two.sided = { },
+                  less = {
+                    hi <- rep.int(Inf, length(hi))
+                    hi.name <- "infinite upper limit"
+                  },
+                  greater = {
+                    lo <- rep.int(-Inf, length(lo))
+                    lo.name <- "infinite lower limit"
+                  })
+           #
+           if(use.theory) {
+             results <- data.frame(r=rvals,
+                                   obs=fX,
+                                   theo=theory,
+                                   lo=lo,
+                                   hi=hi)
+           } else {
+             m <- if(cheat) precomputed$mmean else 
+                  if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else
+                  apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE)
+             results <- data.frame(r=rvals,
+                                   obs=fX,
+                                   mmean=m,
+                                   lo=lo,
+                                   hi=hi)
+           }
+           shadenames <- c("lo", "hi")
+           if(do.pwrong) {
+             # estimate the p-value for the 'wrong test'
+             if(cheat) {
+               pwrong <- precomputed$pwrong
+               do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE)
+             } else {
+               dataranks <- t(apply(simvals, 1, rank, ties.method="random"))
+               upper.signif <- (dataranks <= nrank)
+               lower.signif <- (dataranks >= nsim-nrank+1L)
+               is.signif <- switch(alternative,
+                                   less = lower.signif,
+                                   greater = upper.signif,
+                                   two.sided = lower.signif | upper.signif)
+#               is.signif.somewhere <- apply(is.signif, 2, any)
+               is.signif.somewhere <- matcolany(is.signif)
+               pwrong <- sum(is.signif.somewhere)/nsim
+             }
+           }
+         },
+         global = {
+           # ..... SIMULTANEOUS ENVELOPES ..........................
+           if(cheat) {
+             # ... use precomputed values ..
+             stopifnot(checkfields(precomputed, c("lo", "hi")))
+             lo <- precomputed$lo
+             hi <- precomputed$hi
+             if(use.theory) {
+               reference <- theory
+             } else {
+               stopifnot(checkfields(precomputed, "mmean"))
+               reference <- precomputed$mmean
+             }
+             nsim.mean <- NULL
+             domain <- rep.int(TRUE, length(rvals))
+           } else {
+             # ... normal case: compute envelopes from simulations
+             if(!is.null(ginterval)) {
+               domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L])
+               funX <- funX[domain, ]
+               simvals <- simvals[domain, ]
+             } else domain <- rep.int(TRUE, length(rvals))
+             simvals[is.infinite(simvals)] <- NA
+             if(use.theory) {
+               reference <- theory[domain]
+               if(restrict.columns) {
+                 simvals <- simvals[, jsim]
+                 if(use.weights) weights <- weights[jsim]
+               }
+               nsim.mean <- NULL
+             } else if(dual) {
+               # Estimate the mean from one set of columns
+               # Form envelopes from another set of columns
+               simvals.mean <- simvals[, jsim.mean]
+               # mmean <-
+               reference <- 
+                 if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else
+                 apply(simvals.mean, 1L, weighted.mean, w=weights[jsim.mean],
+                       na.rm=TRUE)
+               nsim.mean <- ncol(simvals.mean)
+               # retain only columns used for envelope
+               simvals <- simvals[, jsim]
+             } else {
+               # Compute the mean and envelopes using the same data
+               if(restrict.columns) {
+                 simvals <- simvals[, jsim]
+                 if(use.weights) weights <- weights[jsim]
+               }
+               # mmean <-
+               reference <- 
+                 if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else
+                 apply(simvals.mean, 1L, weighted.mean, w=weights, na.rm=TRUE)
+               nsim.mean <- NULL
+             }
+             nsim <- ncol(simvals)
+             # compute deviations
+             deviations <- sweep(simvals, 1L, reference)
+             deviations <-
+               switch(alternative,
+                      two.sided = abs(deviations),
+                      greater = if(clamp) pmax(0, deviations) else deviations,
+                      less = if(clamp) pmax(0, -deviations) else (-deviations))
+             deviations <- matrix(deviations,
+                                  nrow=nrow(simvals), ncol=ncol(simvals))
+             ## rescale ?
+             sc <- 1
+             if(!is.null(scale)) {
+               stopifnot(is.function(scale))
+               sc <- scale(rvals)
+               sname <- "scale(r)"
+               ans <- check.nvector(sc, length(rvals), things="values of r",
+                                    fatal=FALSE, vname=sname)
+               if(!ans)
+                 stop(attr(ans, "whinge"), call.=FALSE)
+               if(any(bad <- (sc <= 0))) {
+                 ## issue a warning unless this only happens at r=0
+                 if(any(bad[rvals > 0]))
+                   warning(paste("Some values of", sname,
+                                 "were negative or zero:",
+                                 "scale was reset to 1 for these values"),
+                           call.=FALSE)
+                 sc[bad] <- 1
+               }
+               deviations <- sweep(deviations, 1L, sc, "/")
+             }
+             ## compute max (scaled) deviations
+             suprema <- apply(deviations, 2L, max, na.rm=TRUE)
+             # ranked deviations
+             dmax <- sort(suprema)[nsim-nrank+1L]
+             # simultaneous bands
+             lo <- reference - sc * dmax
+             hi <- reference + sc * dmax
+           }
+
+           lo.name <- "lower critical boundary for %s"
+           hi.name <- "upper critical boundary for %s"
+
+           switch(alternative,
+                  two.sided = { },
+                  less = {
+                    hi <- rep.int(Inf, length(hi))
+                    hi.name <- "infinite upper boundary"
+                  },
+                  greater = {
+                    lo <- rep.int(-Inf, length(lo))
+                    lo.name <- "infinite lower boundary"
+                  })
+
+           if(use.theory)
+             results <- data.frame(r=rvals[domain],
+                                   obs=fX[domain],
+                                   theo=reference,
+                                   lo=lo,
+                                   hi=hi)
+           else
+             results <- data.frame(r=rvals[domain],
+                                   obs=fX[domain],
+                                   mmean=reference,
+                                   lo=lo,
+                                   hi=hi)
+           shadenames <- c("lo", "hi")
+           if(do.pwrong)
+             warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;",
+                           "it is not relevant to global envelopes"))
+         },
+         variance={
+           # ....... POINTWISE MEAN, VARIANCE etc ......................
+           if(cheat) {
+             # .... use precomputed values ....
+             stopifnot(checkfields(precomputed, c("Ef", "varf")))
+             Ef   <- precomputed$Ef
+             varf <- precomputed$varf
+           } else {
+             # .... normal case: compute from simulations
+             simvals[is.infinite(simvals)] <- NA
+             if(restrict.columns) {
+               simvals <- simvals[, jsim]
+               if(use.weights) weights <- weights[jsim]
+             }
+             nsim <- ncol(simvals)
+             if(!use.weights) {
+               Ef   <- apply(simvals, 1L, mean, na.rm=TRUE)
+               varf <- apply(simvals, 1L, var,  na.rm=TRUE)
+             } else {
+               Ef   <- apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE)
+               varf <- apply(simvals, 1L, weighted.var,  w=weights, na.rm=TRUE)
+             }
+           }
+           nsim.mean <- NULL
+           # derived quantities
+           sd <- sqrt(varf)
+           stdres <- (fX-Ef)/sd
+           stdres[!is.finite(stdres)] <- NA
+           # critical limits
+           lo <- Ef - nSD * sd
+           hi <- Ef + nSD * sd
+           lo.name <- paste("lower", nSD, "sigma critical limit for %s")
+           hi.name <- paste("upper", nSD, "sigma critical limit for %s")
+           # confidence interval 
+           loCI <- Ef - nSD * sd/sqrt(nsim)
+           hiCI <- Ef + nSD * sd/sqrt(nsim)
+           loCI.name <- paste("lower", nSD, "sigma confidence bound",
+                              "for mean of simulated %s")
+           hiCI.name <- paste("upper", nSD, "sigma confidence bound",
+                              "for mean of simulated %s")
+           ##
+           switch(alternative,
+                  two.sided = { },
+                  less = {
+                    hi <- hiCI <- rep.int(Inf, length(hi))
+                    hi.name <- "infinite upper boundary"
+                    hiCI.name <- "infinite upper confidence limit"
+                  },
+                  greater = {
+                    lo <- loCI <- rep.int(-Inf, length(lo))
+                    lo.name <- "infinite lower boundary"
+                    loCI.name <- "infinite lower confidence limit"
+                  })
+           # put together
+           if(use.theory) {
+             results <- data.frame(r=rvals,
+                                   obs=fX,
+                                   theo=theory,
+                                   lo=lo,
+                                   hi=hi)
+             shadenames <- c("lo", "hi")
+             morestuff <- data.frame(mmean=Ef,
+                                     var=varf,
+                                     res=fX-Ef,
+                                     stdres=stdres,
+                                     loCI=loCI,
+                                     hiCI=hiCI)
+             loCIlabel <- if(alternative == "greater") "-infinity" else
+                         makefvlabel(NULL, NULL, fname, "loCI")
+             hiCIlabel <- if(alternative == "less") "infinity" else 
+                         makefvlabel(NULL, NULL, fname, "hiCI")
+             mslabl <- c(makefvlabel(NULL, "bar", fname),
+                         makefvlabel("var", "hat", fname),
+                         makefvlabel("res", "hat", fname),
+                         makefvlabel("stdres", "hat", fname),
+                         loCIlabel,
+                         hiCIlabel)
+             wted <- if(use.weights) "weighted " else NULL
+             msdesc <- c(paste0(wted, "sample mean of %s from simulations"),
+                         paste0(wted, "sample variance of %s from simulations"),
+                         "raw residual",
+                         "standardised residual",
+                         loCI.name, hiCI.name)
+           } else {
+             results <- data.frame(r=rvals,
+                                   obs=fX,
+                                   mmean=Ef,
+                                   lo=lo,
+                                   hi=hi)
+             shadenames <- c("lo", "hi")
+             morestuff <- data.frame(var=varf,
+                                     res=fX-Ef,
+                                     stdres=stdres,
+                                     loCI=loCI,
+                                     hiCI=hiCI)
+             loCIlabel <- if(alternative == "greater") "-infinity" else
+                         makefvlabel(NULL, NULL, fname, "loCI")
+             hiCIlabel <- if(alternative == "less") "infinity" else 
+                         makefvlabel(NULL, NULL, fname, "hiCI")
+             mslabl <- c(makefvlabel("var", "hat", fname),
+                         makefvlabel("res", "hat", fname),
+                         makefvlabel("stdres", "hat", fname),
+                         loCIlabel,
+                         hiCIlabel)
+             msdesc <- c(paste0(if(use.weights) "weighted " else NULL,
+                                "sample variance of %s from simulations"),
+                         "raw residual",
+                         "standardised residual",
+                         loCI.name, hiCI.name)
+           }
+           if(do.pwrong) {
+             # estimate the p-value for the 'wrong test'
+             if(cheat) {
+               pwrong <- precomputed$pwrong
+               do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE)
+             } else {
+               upper.signif <- (simvals > hi)
+               lower.signif <- (simvals < lo)
+               is.signif <- switch(alternative,
+                                   less = lower.signif,
+                                   greater = upper.signif,
+                                   two.sided = lower.signif | upper.signif)
+#               is.signif.somewhere <- apply(is.signif, 2, any)
+               is.signif.somewhere <- matcolany(is.signif)
+               pwrong <- sum(is.signif.somewhere)/nsim
+             }
+           }
+         }
+         )
+
+  ############  WRAP UP #########################
+
+  if(use.theory) {
+    # reference is computed curve `theo'
+    reflabl <- makefvlabel(NULL, NULL, fname, "theo")
+    refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL)
+  } else {
+    # reference is sample mean of simulations
+    reflabl <- makefvlabel(NULL, "bar", fname)
+    refdesc <- paste0(if(use.weights) "weighted " else NULL,
+                      "sample mean of %s from simulations")
+  }
+
+  lolabl <- if(alternative == "greater") "-infinity" else
+             makefvlabel(NULL, "hat", fname, "lo")
+  hilabl <- if(alternative == "less") "infinity" else
+             makefvlabel(NULL, "hat", fname, "hi")
+
+  result <- fv(results,
+               argu="r",
+               ylab=atr$ylab,
+               valu="obs",
+               fmla= deparse(. ~ r),
+               alim=intersect.ranges(atr$alim, range(results$r)),
+               labl=c("r",
+                 makefvlabel(NULL, "hat", fname, "obs"),
+                 reflabl,
+                 lolabl,
+                 hilabl),
+               desc=c("distance argument r",
+                 "observed value of %s for data pattern",
+                 refdesc, lo.name, hi.name),
+               fname=atr$fname,
+               yexp =atr$yexp)
+
+  # columns to be plotted by default
+  dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo")
+
+  if(type == "variance") {
+    # add more stuff
+    result <- bind.fv(result, morestuff, mslabl, msdesc)
+    if(use.theory) dotty <- c(dotty, "mmean")
+  }
+
+  fvnames(result, ".") <- dotty
+  fvnames(result, ".s") <- shadenames
+
+  unitname(result) <- unitname(funX)
+  class(result) <- c("envelope", class(result))
+
+  # tack on envelope information
+  attr(result, "einfo") <- list(global = (type =="global"),
+                                ginterval = ginterval,
+                                alternative=alternative,
+                                scale = scale,
+                                clamp = clamp,
+                                csr = csr,
+                                use.theory = use.theory,
+                                csr.theo = csr && use.theory,
+                                simtype = "funs",
+                                constraints = "",
+                                nrank = nrank,
+                                nsim = nsim,
+                                VARIANCE = (type == "variance"),
+                                nSD = nSD,
+                                valname = NULL,
+                                dual = dual,
+                                nsim = nsim,
+                                nsim2 = nsim.mean,
+                                Yname = Yname,
+                                do.pwrong=do.pwrong,
+                                use.weights=use.weights)
+
+  # tack on saved functions
+  if(savefuns) {
+    alldata <- cbind(rvals, simvals)
+    simnames <- paste("sim", 1:nsim, sep="")
+    colnames(alldata) <- c("r", simnames)
+    alldata <- as.data.frame(alldata)
+    SimFuns <- fv(alldata,
+                  argu="r",
+                  ylab=atr$ylab,
+                  valu="sim1",
+                  fmla= deparse(. ~ r),
+                  alim=atr$alim,
+                  labl=names(alldata),
+                  desc=c("distance argument r",
+                          paste("Simulation ", 1:nsim, sep="")),
+                  unitname=unitname(funX))
+    fvnames(SimFuns, ".") <- simnames
+    attr(result, "simfuns") <- SimFuns
+  }
+  if(do.pwrong)
+    attr(result, "pwrong") <- pwrong
+  if(use.weights)
+    attr(result, "weights") <- weights
+  return(result)
+}
+
+envelope.envelope <- function(Y, fun=NULL, ...,
+                              transform=NULL, global=FALSE, VARIANCE=FALSE) {
+
+  Yname <- short.deparse(substitute(Y))
+
+  stopifnot(inherits(Y, "envelope"))
+  Yorig <- Y
+
+  aargh <- list(...)
+
+  X  <- attr(Y, "datapattern")
+  sf <- attr(Y, "simfuns")
+  sp <- attr(Y, "simpatterns")
+  wt <- attr(Y, "weights")
+  einfo <- attr(Y, "einfo")
+
+  csr <- aargh$internal$csr %orifnull% einfo$csr
+
+  if(is.null(fun) && is.null(sf)) {
+    # No simulated functions - must compute them from simulated patterns
+    if(is.null(sp))
+      stop(paste("Cannot compute envelope:",
+                 "Y does not contain simulated functions",
+                 "(was not generated with savefuns=TRUE)",
+                 "and does not contain simulated patterns",
+                 "(was not generated with savepatterns=TRUE)"))
+    # set default fun=Kest
+    fun <- Kest
+  }
+  
+  if(!is.null(fun)) {
+    # apply new function 
+    # point patterns are required
+    if(is.null(sp))
+      stop(paste("Object Y does not contain simulated point patterns",
+                 "(attribute", dQuote("simpatterns"), ");",
+                 "cannot apply a new", sQuote("fun")))
+    if(is.null(X))
+      stop(paste("Cannot apply a new", sQuote("fun"),
+                 "; object Y generated by an older version of spatstat"))
+    ## send signal if simulations were CSR
+    internal <- aargh$internal
+    if(csr) {
+        if(is.null(internal)) internal <- list()
+        internal$csr <- TRUE
+    }
+    ## compute new envelope
+    result <- do.call(envelope,
+                      resolve.defaults(list(Y=X, fun=fun, simulate=sp),
+                                       aargh,
+                                       list(transform=transform,
+                                            global=global,
+                                            VARIANCE=VARIANCE,
+                                            internal=internal,
+                                            Yname=Yname,
+                                            nsim=einfo$nsim,
+                                            nsim2=einfo$nsim2,
+                                            weights=wt),
+                                       .StripNull=TRUE))
+  } else {
+    # compute new envelope with existing simulated functions
+    if(is.null(sf)) 
+      stop(paste("Y does not contain a", dQuote("simfuns"), "attribute",
+                 "(it was not generated with savefuns=TRUE)"))
+
+    if(!is.null(transform)) {
+      # Apply transformation to Y and sf
+      stopifnot(is.expression(transform))
+      ##      cc <- dotexpr.to.call(transform, "Y", "eval.fv")
+      cc <- inject.expr("with(Y, .)", transform)
+      Y <- eval(cc)
+      ##      cc <- dotexpr.to.call(transform, "sf", "eval.fv")
+      cc <- inject.expr("with(sf, .)", transform)
+      sf <- eval(cc)
+    }
+
+    # extract simulated function values
+    df <- as.data.frame(sf)
+    rname <- fvnames(sf, ".x")
+    df <- df[, (names(df) != rname)]
+
+    # interface with 'envelope.matrix'
+    etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise"
+    result <- do.call(envelope.matrix,
+                      resolve.defaults(list(Y=as.matrix(df)),
+                                       aargh,
+                                       list(type=etype,
+                                            csr=csr,
+                                            funX=Y, 
+                                            Yname=Yname,
+                                            weights=wt),
+                                       .StripNull=TRUE))
+  }
+
+  if(!is.null(transform)) {
+    # post-process labels
+    labl <- attr(result, "labl")
+    dnames <- colnames(result)
+    dnames <- dnames[dnames %in% fvnames(result, ".")]
+    # expand "."
+    ud <- as.call(lapply(c("cbind", dnames), as.name))
+    dont.complain.about(ud)
+    expandtransform <- eval(substitute(substitute(tr, list(.=ud)),
+                                       list(tr=transform[[1L]])))
+    # compute new labels 
+    attr(result, "fname") <- attr(Yorig, "fname")
+    mathlabl <- as.character(fvlegend(result, expandtransform))
+    # match labels to columns
+    evars <- all.vars(expandtransform)
+    used.dotnames <- evars[evars %in% dnames]
+    mathmap <- match(colnames(result), used.dotnames)
+    okmath <- !is.na(mathmap)
+    # update appropriate labels
+    labl[okmath] <- mathlabl[mathmap[okmath]]
+    attr(result, "labl") <- labl
+  }
+  
+  # Tack on envelope info
+  copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints")
+  attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross]
+  attr(result, "einfo")$csr <- csr
+  # Save data
+  
+  return(result)
+}
+
+pool.envelope <- local({
+
+  pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) {
+    Yname <- short.deparse(sys.call())
+    if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]")
+    Elist <- unname(list(...))
+    nE <-  length(Elist)
+    if(nE == 0) return(NULL)
+    #' ........ validate envelopes .....................
+    #' All arguments must be envelopes
+    notenv <- !unlist(lapply(Elist, inherits, what="envelope"))
+    if(any(notenv)) {
+      n <- sum(notenv)
+      why <- paste(ngettext(n, "Argument", "Arguments"),
+                   commasep(which(notenv)),
+                   ngettext(n, "does not", "do not"),
+                   "belong to the class",
+                   dQuote("envelope"))
+      stop(why)
+    }
+    ## Only one envelope?
+    if(nE == 1)
+      return(Elist[[1L]])
+    ## envelopes must be compatible
+    ok <- do.call(compatible, Elist)
+    if(!ok)
+      stop("Envelopes are not compatible")
+    ## ... reconcile parameters in different envelopes .......
+    eilist <- lapply(Elist, attr, which="einfo")
+    global    <- resolveEinfo(eilist, "global",   FALSE)
+    ginterval    <- resolveEinfo(eilist, "ginterval", NULL, atomic=FALSE)
+    VARIANCE  <- resolveEinfo(eilist, "VARIANCE", FALSE)
+    alternative      <- resolveEinfo(eilist, "alternative", FALSE)
+    scale <- resolveEinfo(eilist, "scale", NULL, atomic=FALSE)
+    clamp <- resolveEinfo(eilist, "clamp", FALSE)
+    resolveEinfo(eilist, "simtype",  "funs",
+                 "Envelopes were generated using different types of simulation")
+    resolveEinfo(eilist, "constraints",  "",
+            "Envelopes were generated using different types of conditioning")
+    resolveEinfo(eilist, "csr.theo", FALSE, NULL)
+    csr         <- resolveEinfo(eilist, "csr", FALSE, NULL)
+    use.weights <- resolveEinfo(eilist, "use.weights" , FALSE,
+     "Weights were used in some, but not all, envelopes: they will be ignored")
+    use.theory <- resolveEinfo(eilist, "use.theory", csr, NULL)
+    ##
+    weights <-
+      if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL
+    type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise"
+    
+    ## ........ validate saved functions .....................
+    if(savefuns || !VARIANCE) {
+      ## Individual simulated functions are required
+      SFlist <- lapply(Elist, attr, which="simfuns")
+      isnul <- unlist(lapply(SFlist, is.null))
+      if(any(isnul)) {
+        n <- sum(isnul)
+        comply <- if(!VARIANCE) "compute the envelope:" else
+                  "save the simulated functions:"
+        why <- paste("Cannot", comply,
+                     ngettext(n, "argument", "arguments"),
+                     commasep(which(isnul)),
+                     ngettext(n, "does not", "do not"),
+                     "contain a", dQuote("simfuns"), "attribute",
+                     "(not generated with savefuns=TRUE)")
+        stop(why)
+      }
+      ## Simulated functions must be the same function
+      fnames <- unique(lapply(SFlist, attr, which="fname"))
+      if(length(fnames) > 1L) {
+        fnames <- unlist(lapply(fnames, flatfname))
+        stop(paste("Envelope objects contain values",
+                   "of different functions:",
+                   commasep(sQuote(fnames))))
+      }
+      ## vectors of r values must be identical
+      rlist <- lapply(SFlist, getrvals)
+      rvals <- rlist[[1L]]
+      samer <- unlist(lapply(rlist, identical, y=rvals))
+      if(!all(samer))
+        stop(paste("Simulated function values are not compatible",
+                   "(different values of function argument)"))
+      ## Extract function values and assemble into one matrix
+      matlist <- lapply(SFlist, getdotvals)
+      SFmatrix <- do.call(cbind, matlist)
+    }
+    ## compute pooled envelope
+    switch(type,
+           pointwise = {
+             result <- envelope(SFmatrix, funX=Elist[[1L]],
+                                type=type, alternative=alternative,
+                                clamp=clamp,
+                                csr=csr, use.theory=use.theory,
+                                Yname=Yname, weights=weights,
+                                savefuns=savefuns)
+           },
+           global = {
+             simfunmatrix <- if(is.null(ginterval)) SFmatrix else {
+               ## savefuns have not yet been clipped to ginterval
+               ## while envelope data have been clipped.
+               domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L])
+               SFmatrix[domain, , drop=FALSE]
+             }
+             result <- envelope(simfunmatrix, funX=Elist[[1L]],
+                                type=type, alternative=alternative,
+                                scale=scale, clamp=clamp,
+                                csr=csr, use.theory=use.theory,
+                                ginterval=ginterval,
+                                Yname=Yname, weights=weights,
+                                savefuns=savefuns)
+           },
+           variance = {
+             ## Pool sample means and variances
+             nsims <- unlist(lapply(eilist, getElement, name="nsim"))
+             mmeans <- lapply(Elist, getElement, name="mmean")
+             vars   <- lapply(Elist, getElement, name="var")
+             mmeans <- matrix(unlist(mmeans), ncol=nE)
+             vars   <- matrix(unlist(vars),   ncol=nE)
+             if(!use.weights) {
+               w.mean <- nsims
+               d.mean <- sum(nsims)
+               w.var  <- nsims - 1
+               d.var  <- sum(nsims) - 1
+             } else {
+               weightlist <- lapply(Elist, attr, which="weights")
+               w.mean <- unlist(lapply(weightlist, sum))
+               d.mean <- sum(w.mean)
+               ssw <- unlist(lapply(weightlist, meansqfrac))
+               ##  meansqfrac :  function(x) {sum((x/sum(x))^2)}))
+               w.var  <- w.mean * (1 - ssw)
+               d.var <-  d.mean * (1 - sum(ssw))
+             }
+             poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1L))
+             within <- vars %*% matrix(w.var, ncol=1L)
+             between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1L)
+             poolvar <- as.numeric((within + between)/d.var)
+             ## feed precomputed data to envelope.matrix
+             pc <- list(Ef=poolmmean[],
+                        varf=poolvar[])
+             nsim <- sum(nsims)
+             result <- envelope.matrix(NULL, funX=Elist[[1L]],
+                                       type=type, alternative=alternative,
+                                       csr=csr, Yname=Yname,
+                                       weights=weights,
+                                       savefuns=savefuns,
+                                       nsim=nsim,
+                                       precomputed=pc)
+           })
+  
+    ## Copy envelope info that is not handled by envelope.matrix
+    copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints")
+    attr(result, "einfo")[copyacross] <- attr(Elist[[1L]], "einfo")[copyacross]
+  
+    ## ..............saved patterns .....................
+    if(savepatterns) {
+      SPlist <- lapply(Elist, attr, which="simpatterns")
+      isnul <- unlist(lapply(SPlist, is.null))
+      if(any(isnul)) {
+        n <- sum(isnul)
+        why <- paste("Cannot save the simulated patterns:",
+                     ngettext(n, "argument", "arguments"),
+                     commasep(which(isnul)),
+                     ngettext(n, "does not", "do not"),
+                     "contain a", dQuote("simpatterns"), "attribute",
+                     "(not generated with savepatterns=TRUE)")
+        warning(why)
+      } else {
+        attr(result, "simpatterns") <- Reduce(append, SPlist)
+      }
+    }
+    ## ..............saved summary functions ................
+    if(savefuns) {
+      alldata <- cbind(rvals, SFmatrix)
+      Nsim <- ncol(SFmatrix)
+      simnames <- paste0("sim", 1:Nsim)
+      colnames(alldata) <- c("r", simnames)
+      alldata <- as.data.frame(alldata)
+      SFtemplate <- SFlist[[1L]]
+      SimFuns <- fv(alldata,
+                    argu="r",
+                    ylab=attr(SFtemplate, "ylab"),
+                    valu="sim1",
+                    fmla= deparse(. ~ r),
+                    alim=attr(SFtemplate, "alim"),
+                    labl=names(alldata),
+                    desc=c("distance argument r",
+                      paste("Simulation ", 1:Nsim, sep="")),
+                    fname=attr(SFtemplate, "fname"),
+                    yexp=attr(SFtemplate, "yexp"),
+                    unitname=unitname(SFtemplate))
+      fvnames(SimFuns, ".") <- simnames
+      attr(result, "simfuns") <- SimFuns
+    } 
+  
+    dotnames   <- lapply(Elist, fvnames, a=".")
+    dn <- dotnames[[1L]]
+    if(all(unlist(lapply(dotnames, identical, y=dn))))
+      fvnames(result, ".") <- dn
+    
+    shadenames <- lapply(Elist, fvnames, a=".s")
+    sh <- shadenames[[1L]]
+    if(all(unlist(lapply(shadenames, identical, y=sh))))
+      fvnames(result, ".s") <- sh
+  
+    return(result)
+  }
+
+  getrvals <- function(z) { as.matrix(z)[, fvnames(z, ".x")] }
+  
+  getdotvals <- function(z) { as.matrix(z)[, fvnames(z, "."), drop=FALSE] }
+
+  meansqfrac <- function(x) {sum((x/sum(x))^2)} 
+
+  pool.envelope
+})
+
+# resolve matching entries in different envelope objects
+#   x is a list of envelope info objects
+
+resolveEinfo <- function(x, what, fallback, warn, atomic=TRUE) {
+  if(atomic) {
+    y <- unique(unlist(lapply(x, getElement, name=what)))
+    if(length(y) == 1L)
+      return(y)
+  } else {
+    y <- unique(lapply(x, getElement, name=what))
+    if(length(y) == 1L)
+      return(y[[1L]])
+  }
+  if(missing(warn))
+    warn <- paste("Envelopes were generated using different values",
+                  "of argument", paste(sQuote(what), ";", sep=""),
+                  "reverting to default value")
+  if(!is.null(warn))
+    warning(warn, call.=FALSE)
+  return(fallback)
+}
+
+as.data.frame.envelope <- function(x, ..., simfuns=FALSE) {
+  if(simfuns && !is.null(sf <- attr(x, "simfuns"))) {
+    # tack on the simulated functions as well
+    y <- as.data.frame(bind.fv(x, sf, clip=TRUE))
+    return(y)
+  } 
+  NextMethod("as.data.frame")
+}
diff --git a/R/envelope3.R b/R/envelope3.R
new file mode 100755
index 0000000..264765b
--- /dev/null
+++ b/R/envelope3.R
@@ -0,0 +1,78 @@
+#
+#   envelope3.R
+#
+#   simulation envelopes for pp3 
+#
+#   $Revision: 1.13 $  $Date: 2016/04/25 02:34:40 $
+#
+
+envelope.pp3 <-
+  function(Y, fun=K3est, nsim=99, nrank=1, ...,
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, verbose=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE,
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, maxnerr=nsim,
+           do.pwrong=FALSE, envir.simul=NULL) {
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- K3est
+
+  if("clipdata" %in% names(list(...)))
+    stop(paste("The argument", sQuote("clipdata"),
+               "is not available for envelope.pp3"))
+  
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+  
+  if(is.null(simulate)) {
+    # ...................................................
+    # Realisations of complete spatial randomness
+    # will be generated by rpoispp
+    # Data pattern X is argument Y
+    # Data pattern determines intensity of Poisson process
+    X <- Y
+    sY <- summary(Y)
+    Yintens <- sY$intensity
+    Ydomain <- Y$domain
+    # expression that will be evaluated
+    simexpr <- 
+      if(!is.marked(Y)) {
+        # unmarked point pattern
+        expression(rpoispp3(Yintens, domain=Ydomain))
+      } else {
+        stop("Sorry, simulation of marked 3D point patterns is not yet implemented")
+      }
+    # suppress warnings from code checkers
+    dont.complain.about(Yintens, Ydomain)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE)
+  } else {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+    # Data pattern is argument Y
+    X <- Y
+  }
+  envelopeEngine(X=X, fun=fun, simul=simrecipe,
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=FALSE,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp,
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, maxnerr=maxnerr, cl=cl,
+                 envir.user=envir.user,
+                 expected.arg=c("rmax", "nrval"),
+                 do.pwrong=do.pwrong)
+}
+
diff --git a/R/envelopeArray.R b/R/envelopeArray.R
new file mode 100644
index 0000000..f7ad894
--- /dev/null
+++ b/R/envelopeArray.R
@@ -0,0 +1,95 @@
+#
+#      envelopeArray.R
+#
+#   $Revision: 1.1 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+                                  
+envelopeArray <- function(X, fun, ...,
+                          dataname=NULL,verb=FALSE,reuse=TRUE) {
+#'
+  if(is.null(dataname))
+    dataname <- short.deparse(substitute(X))
+
+#' determine function name
+  f.is.name <- is.name(substitute(fun))
+  fname <-
+    if(f.is.name)
+      paste(as.name(substitute(fun)))
+    else if(is.character(fun))
+      fun
+    else sQuote("fun") 
+
+#' determine function to be called
+
+  if(is.character(fun)) {
+    fun <- get(fun, mode="function")
+  } else if(!is.function(fun)) 
+    stop(paste(sQuote("fun"), "should be a function or a character string"))
+
+#' Apply function to data pattern, to test it
+#' and to determine array dimensions, margin labels etc.
+
+  fX <- do.call.matched(fun, append(list(X), list(...)), matchfirst=TRUE)
+  if(!inherits(fX, "fasp"))
+     stop("function did not return an object of class 'fasp'")
+
+  d <- dim(fX)
+  witch <- matrix(1:prod(d), nrow=d[1L], ncol=d[2L],
+                  dimnames=dimnames(fX))
+
+#' make function that extracts [i,j] entry of result
+
+   ijfun <- function(X, ..., i=1, j=1, expectdim=d) {
+     fX <- fun(X, ...)
+     if(!inherits(fX, "fasp"))
+       stop("function did not return an object of class 'fasp'")
+     if(!all(dim(fX) == expectdim))
+       stop("function returned an array with different dimensions")
+     return(fX[i,j])
+   }
+   
+  # ------------ start computing -------------------------------  
+  if(reuse) {
+    L <- do.call(spatstat::envelope,
+                 resolve.defaults(
+                                  list(X, fun=ijfun),
+                                  list(internal=list(eject="patterns")),
+                                  list(...),
+                                  list(verbose=verb)))
+    intern <- attr(L, "internal")
+  } else intern <- L <- NULL
+
+  # compute function array and build up 'fasp' object
+  fns  <- list()
+  k   <- 0
+
+  for(i in 1:nrow(witch)) {
+    for(j in 1:ncol(witch)) {
+      if(verb) cat("i =",i,"j =",j,"\n")
+      currentfv <- 
+        do.call(spatstat::envelope,
+                resolve.defaults(
+                                 list(X, ijfun),
+                                 list(simulate=L, internal=intern),
+                                 list(verbose=FALSE),
+                                 list(...),
+                                 list(Yname=dataname),
+				 list(i=i, j=j)))
+      k <- k+1
+      fns[[k]] <- as.fv(currentfv)
+    }
+  }
+
+  # wrap up into 'fasp' object
+  title <- paste("array of envelopes of", fname,
+                 "for", dataname)
+  
+  rslt <- fasp(fns, which=witch,
+               formulae=NULL,
+               dataname=dataname,
+               title=title,
+               checkfv=FALSE)
+  return(rslt)
+}
+
diff --git a/R/envelopelpp.R b/R/envelopelpp.R
new file mode 100755
index 0000000..f8c093e
--- /dev/null
+++ b/R/envelopelpp.R
@@ -0,0 +1,213 @@
+#
+#  envelopelpp.R
+#
+#  $Revision: 1.23 $   $Date: 2016/11/23 08:10:44 $
+#
+#  Envelopes for 'lpp' objects
+#
+#
+
+envelope.lpp <-
+  function(Y, fun=linearK, nsim=99, nrank=1, ...,
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, fix.n=FALSE, fix.marks=FALSE,
+           verbose=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE,
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) {
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- linearK
+
+  if("clipdata" %in% names(list(...)))
+    stop(paste("The argument", sQuote("clipdata"),
+               "is not available for envelope.lpp"))
+  
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+  
+  if(!is.null(simulate)) {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+    # Data pattern is argument Y
+    X <- Y
+  } else if(!fix.n && !fix.marks) {
+    # ...................................................
+    # Realisations of complete spatial randomness
+    # Data pattern X is argument Y
+    # Data pattern determines intensity of Poisson process
+    X <- Y
+    nY <- npoints(Y)
+    Yintens <- intensity(unmark(Y))
+    Ymarx <- marks(Y)
+    NETWORK <- Y$domain
+    dont.complain.about(nY, Yintens, NETWORK)
+    ## expression that will be evaluated
+    simexpr <- if(is.null(Ymarx)) {
+      #' unmarked point pattern
+      expression(rpoislpp(Yintens, NETWORK))
+    } else if(is.null(dim(Ymarx))) {
+      #' single column of marks
+      expression({
+        A <- rpoislpp(Yintens, NETWORK);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j]
+      })
+    } else {
+      #' multiple columns of marks
+      expression({
+        A <- rpoislpp(Yintens, NETWORK);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j, , drop=FALSE]
+      })
+    }
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE)
+  } else if(!fix.marks) {
+    # Fixed number of points, but random locations and marks
+    # Data pattern X is argument Y
+    X <- Y
+    nY <- npoints(Y)
+    Ymarx <- marks(Y)
+    NETWORK <- Y$domain
+    dont.complain.about(nY, NETWORK)
+    # expression that will be evaluated
+    simexpr <- if(is.null(Ymarx)) {
+      ## unmarked
+      expression(runiflpp(nY, NETWORK))
+    } else if(is.null(dim(Ymarx))) {
+      ## single column of marks
+      expression({
+        A <- runiflpp(nY, NETWORK);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j]
+      })
+    } else {
+      ## multiple columns of marks
+      expression({
+        A <- runiflpp(nY, NETWORK);
+        j <- sample(nY, npoints(A), replace=TRUE);
+        A %mark% Ymarx[j, ,drop=FALSE]
+      })
+    }
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE)
+  } else {
+    # ...................................................
+    # Randomised locations only; 
+    # fixed number of points and fixed marks
+    # Data pattern X is argument Y
+    X <- Y
+    nY <- npoints(Y)
+    Ymarx <- marks(Y)
+    NETWORK <- Y$domain
+    # expression that will be evaluated
+    simexpr <- expression(rpoislpp(nY, NETWORK) %mark% Ymarx)
+    dont.complain.about(nY, Ymarx, NETWORK)
+    # evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "csr",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = TRUE)
+  }
+  envelopeEngine(X=X, fun=fun, simul=simrecipe,
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=FALSE,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp, 
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, cl=cl,
+                 envir.user=envir.user, do.pwrong=do.pwrong)
+}
+
+envelope.lppm <-
+  function(Y, fun=linearK, nsim=99, nrank=1, ..., 
+           funargs=list(), funYargs=funargs,
+           simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, 
+           transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL,
+           alternative=c("two.sided", "less", "greater"),
+           scale=NULL, clamp=FALSE, 
+           savefuns=FALSE, savepatterns=FALSE, nsim2=nsim,
+           VARIANCE=FALSE, nSD=2,
+           Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) {
+  cl <- short.deparse(sys.call())
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  if(is.null(fun)) fun <- linearK
+
+  if("clipdata" %in% names(list(...)))
+    stop(paste("The argument", sQuote("clipdata"),
+               "is not available for envelope.pp3"))
+
+  envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame()
+  envir.here <- sys.frame(sys.nframe())
+  
+  if(!is.null(simulate)) {
+    # ...................................................
+    # Simulations are determined by 'simulate' argument
+    # Processing is deferred to envelopeEngine
+    simrecipe <- simulate
+    # Data pattern is argument Y
+    X <- Y
+  } else {
+    ## ...................................................
+    ## Simulation of the fitted model Y
+    if(!is.poisson(Y))
+      stop("Simulation of non-Poisson models is not yet implemented")
+    MODEL <- Y
+    X <- Y$X
+    NETWORK <- domain(X)
+    lambdaFit <- predict(MODEL)
+    Xmarx <- marks(X)
+    nX <- if(!is.marked(X)) npoints(X) else table(marks(X))
+    dont.complain.about(NETWORK, Xmarx, nX)
+    #' 
+    if(!fix.n && !fix.marks) {
+      #' Unconstrained simulations
+      LMAX <-
+        if(is.im(lambdaFit)) max(lambdaFit) else sapply(lambdaFit, max)
+      dont.complain.about(LMAX)
+      simexpr <- expression(rpoislpp(lambdaFit, NETWORK, lmax=LMAX))
+    } else if(!fix.marks && is.marked(X)) {
+      #' Fixed total number of points
+      EN <- sapply(lambdaFit, integral)
+      PROB <- EN/sum(EN)
+      dont.complain.about(PROB)
+      simexpr <- expression(
+        rlpp(as.integer(rmultinom(1L, nX, PROB)), lambdaFit)
+      )
+    } else {
+      #' Fixed number of points of each type 
+      simexpr <- expression(rlpp(nX, lambdaFit))
+    }
+    #' evaluate in THIS environment
+    simrecipe <- simulrecipe(type = "lppm",
+                             expr = simexpr,
+                             envir = envir.here,
+                             csr   = FALSE)
+  }
+  envelopeEngine(X=X, fun=fun, simul=simrecipe,
+                 nsim=nsim, nrank=nrank, ...,
+                 funargs=funargs, funYargs=funYargs,
+                 verbose=verbose, clipdata=FALSE,
+                 transform=transform,
+                 global=global, ginterval=ginterval, use.theory=use.theory,
+                 alternative=alternative, scale=scale, clamp=clamp,
+                 savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2,
+                 VARIANCE=VARIANCE, nSD=nSD,
+                 Yname=Yname, cl=cl,
+                 envir.user=envir.user, do.pwrong=do.pwrong)
+}
diff --git a/R/eval.fasp.R b/R/eval.fasp.R
new file mode 100755
index 0000000..e5b71a7
--- /dev/null
+++ b/R/eval.fasp.R
@@ -0,0 +1,95 @@
+#
+#     eval.fasp.R
+#
+#
+#        eval.fasp()             Evaluate expressions involving fasp objects
+#
+#        compatible.fasp()       Check whether two fasp objects are compatible
+#
+#     $Revision: 1.11 $     $Date: 2016/02/11 10:17:12 $
+#
+
+eval.fasp <- local({
+
+  eval.fasp <- function(expr, envir, dotonly=TRUE) {
+    #' convert syntactic expression to 'expression' object
+    e <- as.expression(substitute(expr))
+    #' convert syntactic expression to call
+    ##  elang <- substitute(expr)
+    #' find names of all variables in the expression
+    varnames <- all.vars(e)
+    if(length(varnames) == 0)
+      stop("No variables in this expression")
+    ## get the actual variables
+    if(missing(envir)) {
+      envir <- sys.parent()
+    } else if(is.list(envir)) {
+      envir <- list2env(envir, parent=parent.frame())
+    }
+    vars <- lapply(as.list(varnames), get, envir=envir)
+    names(vars) <- varnames
+    ## find out which ones are fasp objects
+    isfasp <- unlist(lapply(vars, inherits, what="fasp"))
+    if(!any(isfasp))
+      stop("No fasp objects in this expression")
+    fasps <- vars[isfasp]
+    nfasps <- length(fasps)
+    ## test whether the fasp objects are compatible
+    if(nfasps > 1L && !(do.call(compatible, unname(fasps))))
+      stop(paste(if(nfasps > 2) "some of" else NULL,
+                 "the objects",
+                 commasep(sQuote(names(fasps))),
+                 "are not compatible"))
+    ## copy first object as template
+    result <- fasps[[1L]]
+    which <- result$which
+    nr <- nrow(which)
+    nc <- ncol(which)
+    ## create environment for evaluation
+    fenv <- new.env()
+    ## for each [i,j] extract fv objects and evaluate expression
+    for(i in seq_len(nr))
+      for(j in seq_len(nc)) {
+        ## extract fv objects at position [i,j]
+        funs <- lapply(fasps, getpanel, i=i, j=j)
+        ## insert into list of argument values
+        vars[isfasp] <- funs
+        ## assign them into the right environment
+        for(k in seq_along(vars)) 
+          assign(varnames[k], vars[[k]], envir=fenv)
+        ## evaluate
+        resultij <- eval(substitute(eval.fv(ee,ff,dd),
+                                    list(ee=e, ff=fenv, dd=dotonly)))
+        ## insert back into fasp
+        result$fns[[which[i,j] ]] <- resultij
+      }
+    result$title <- paste("Result of eval.fasp(", e, ")", sep="")
+    return(result)
+  }
+
+  getpanel <- function(x, i, j) { as.fv(x[i,j]) }
+
+  eval.fasp
+})
+
+compatible.fasp <- function(A, B, ...) {
+  verifyclass(A, "fasp")
+  if(missing(B)) return(TRUE)
+  verifyclass(B, "fasp")
+  dimA <- dim(A$which)
+  dimB <- dim(B$which)
+  if(!all(dimA == dimB))
+    return(FALSE)
+  for(i in seq_len(dimA[1L])) 
+    for(j in seq_len(dimA[2L])) {
+      Aij <- as.fv(A[i,j])
+      Bij <- as.fv(B[i,j])
+      if(!compatible.fv(Aij, Bij))
+        return(FALSE)
+    }
+  # A and B agree
+  if(length(list(...)) == 0) return(TRUE)
+  # recursion
+  return(compatible.fasp(B, ...))
+}
+
diff --git a/R/eval.fv.R b/R/eval.fv.R
new file mode 100755
index 0000000..6cf3bea
--- /dev/null
+++ b/R/eval.fv.R
@@ -0,0 +1,282 @@
+#
+#     eval.fv.R
+#
+#
+#        eval.fv()             Evaluate expressions involving fv objects
+#
+#        compatible.fv()       Check whether two fv objects are compatible
+#
+#     $Revision: 1.35 $     $Date: 2016/07/26 10:30:13 $
+#
+
+eval.fv <- local({
+
+  # main function
+  eval.fv <- function(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) {
+    # convert syntactic expression to 'expression' object
+    e <- as.expression(substitute(expr))
+    # convert syntactic expression to call
+    elang <- substitute(expr)
+    # find names of all variables in the expression
+    varnames <- all.vars(e)
+    if(length(varnames) == 0)
+      stop("No variables in this expression")
+    # get the actual variables
+    if(missing(envir)) {
+      envir <- parent.frame()
+    } else if(is.list(envir)) {
+      envir <- list2env(envir, parent=parent.frame())
+    }
+    vars <- lapply(as.list(varnames), get, envir=envir)
+    names(vars) <- varnames
+    # find out which ones are fv objects
+    fvs <- unlist(lapply(vars, is.fv))
+    nfuns <- sum(fvs)
+    if(nfuns == 0)
+      stop("No fv objects in this expression")
+    # extract them
+    funs <- vars[fvs]
+    # restrict to columns identified by 'dotnames'
+    if(dotonly) 
+      funs <- lapply(funs, restrict.to.dot)
+    # map names if instructed
+    if(!is.null(equiv))
+      funs <- lapply(funs, mapnames, map=equiv)
+    # test whether the fv objects are compatible
+    if(nfuns > 1L && !(do.call(compatible, unname(funs)))) {
+      warning(paste(if(nfuns > 2) "some of" else NULL,
+                    "the functions",
+                    commasep(sQuote(names(funs))),
+                    "were not compatible: enforcing compatibility"))
+      funs <- do.call(harmonise, append(funs, list(strict=TRUE)))
+    }
+    # copy first object as template
+    result <- funs[[1L]]
+    labl <- attr(result, "labl")
+    origdotnames   <- fvnames(result, ".")
+    origshadenames <- fvnames(result, ".s")
+    # determine which function estimates are supplied
+    argname <- fvnames(result, ".x")
+    nam <- names(result)
+    ynames <- nam[nam != argname]
+    # for each function estimate, evaluate expression
+    for(yn in ynames) {
+      # extract corresponding estimates from each fv object
+      funvalues <- lapply(funs, "[[", i=yn)
+      # insert into list of argument values
+      vars[fvs] <- funvalues
+      # evaluate
+      result[[yn]] <- eval(e, vars, enclos=envir)
+    }
+    if(!relabel)
+      return(result)
+    # determine mathematical labels.
+    # 'yexp' determines y axis label
+    # 'ylab' determines y label in printing and description
+    # 'fname' is sprintf-ed into 'labl' for legend
+    yexps <- lapply(funs, attr, which="yexp")
+    ylabs <- lapply(funs, attr, which="ylab")
+    fnames <- lapply(funs, getfname)
+    # Repair 'fname' attributes if blank
+    blank <- unlist(lapply(fnames, isblank))
+    if(any(blank)) {
+      # Set function names to be object names as used in the expression
+      for(i in which(blank))
+        attr(funs[[i]], "fname") <- fnames[[i]] <- names(funs)[i]
+    }
+    # Remove duplicated names
+    # Typically occurs when combining several K functions, etc.
+    # Tweak fv objects so their function names are their object names
+    # as used in the expression
+    if(anyDuplicated(fnames)) {
+      newfnames <- names(funs)
+      for(i in 1:nfuns)
+        funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i])
+      fnames <- newfnames
+    }
+    if(anyDuplicated(ylabs)) {
+      flatnames <- lapply(funs, flatfname)
+      for(i in 1:nfuns) {
+        new.ylab <- substitute(f(r), list(f=flatnames[[i]]))
+        funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab)
+      }
+      ylabs <- lapply(funs, attr, which="ylab")
+    }
+    if(anyDuplicated(yexps)) {
+      newfnames <- names(funs)
+      for(i in 1:nfuns) {
+        new.yexp <- substitute(f(r), list(f=as.name(newfnames[i])))
+        funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp)
+      }
+      yexps <- lapply(funs, attr, which="yexp")
+    }
+    # now compute y axis labels for the result
+    attr(result, "yexp") <- eval(substitute(substitute(e, yexps),
+                                            list(e=elang)))
+    attr(result, "ylab") <- eval(substitute(substitute(e, ylabs),
+                                            list(e=elang)))
+    # compute fname equivalent to expression
+    if(nfuns > 1L) {
+      # take original expression
+      the.fname <- paren(flatten(deparse(elang)))
+    } else if(nzchar(oldname <- flatfname(funs[[1L]]))) {
+      # replace object name in expression by its function name
+      namemap <- list(as.name(oldname)) 
+      names(namemap) <- names(funs)[1L]
+      the.fname <- deparse(eval(substitute(substitute(e, namemap),
+                                           list(e=elang))))
+    } else the.fname <- names(funs)[1L]
+    attr(result, "fname") <- the.fname
+    # now compute the [modified] y labels
+    labelmaps <- lapply(funs, fvlabelmap, dot=FALSE)
+    for(yn in ynames) {
+      # labels for corresponding columns of each argument
+      funlabels <- lapply(labelmaps, "[[", i=yn)
+      # form expression involving these columns
+      labl[match(yn, names(result))] <-
+        flatten(deparse(eval(substitute(substitute(e, f),
+                                        list(e=elang, f=funlabels)))))
+    }
+    attr(result, "labl") <- labl
+    # copy dotnames and shade names from template
+    fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)]
+    if(!is.null(origshadenames) && all(origshadenames %in% names(result)))
+      fvnames(result, ".s") <- origshadenames
+    return(result)
+  }
+
+  # helper functions
+  restrict.to.dot <- function(z) {
+    argu <- fvnames(z, ".x")
+    dotn <- fvnames(z, ".")
+    shadn <- fvnames(z, ".s")
+    ok <- colnames(z) %in% unique(c(argu, dotn, shadn))
+    return(z[, ok])
+  }
+  getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" }
+  flatten <- function(x) { paste(x, collapse=" ") }
+  mapnames <- function(x, map=NULL) {
+    colnames(x) <- mapstrings(colnames(x), map=map)
+    fvnames(x, ".y") <- mapstrings(fvnames(x, ".y"), map=map)
+    return(x)
+  }
+  isblank <-  function(z) { !any(nzchar(z)) }
+  
+  eval.fv
+})
+    
+compatible <- function(A, B, ...) {
+  UseMethod("compatible")
+}
+
+compatible.fv <- local({
+
+  approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps }
+
+  compatible.fv <- function(A, B, ...) {
+    verifyclass(A, "fv")
+    if(missing(B)) {
+      answer <- if(length(...) == 0) TRUE else compatible(A, ...)
+      return(answer)
+    }
+    verifyclass(B, "fv")
+    ## are columns the same?
+    namesmatch <-
+      identical(all.equal(names(A),names(B)), TRUE) &&
+        (fvnames(A, ".x") == fvnames(B, ".x")) &&
+          (fvnames(A, ".y") == fvnames(B, ".y"))
+    if(!namesmatch)
+      return(FALSE)
+    ## are 'r' values the same ?
+    rA <- with(A, .x)
+    rB <- with(B, .x)
+    rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB)
+    if(!rmatch)
+      return(FALSE)
+    ## A and B are compatible
+    if(length(list(...)) == 0)
+      return(TRUE)
+    ## recursion
+    return(compatible.fv(B, ...))
+  }
+
+  compatible.fv
+})
+
+
+# force a list of images to be compatible with regard to 'x' values
+
+harmonize <- harmonise <- function(...) {
+  UseMethod("harmonise")
+}
+
+harmonize.fv <- harmonise.fv <- local({
+
+  harmonise.fv <- function(..., strict=FALSE) {
+    argh <- list(...)
+    n <- length(argh)
+    if(n == 0) return(argh)
+    if(n == 1) {
+      a1 <- argh[[1L]]
+      if(is.fv(a1)) return(argh)
+      if(is.list(a1) && all(sapply(a1, is.fv))) {
+        argh <- a1
+        n <- length(argh)
+      }
+    }
+    isfv <- sapply(argh, is.fv)
+    if(!all(isfv))
+      stop("All arguments must be fv objects")
+    if(n == 1) return(argh[[1L]])
+    ## determine range of argument
+    ranges <- lapply(argh, argumentrange)
+    xrange <- c(max(unlist(lapply(ranges, min))),
+                min(unlist(lapply(ranges, max))))
+    if(diff(xrange) < 0)
+      stop("No overlap in ranges of argument")
+    if(strict) {
+      ## find common column names and keep these
+      keepnames <- Reduce(intersect, lapply(argh, colnames))
+      argh <- lapply(argh, "[", j=keepnames)
+    }
+    ## determine finest resolution
+    xsteps <- sapply(argh, argumentstep)
+    finest <- which.min(xsteps)
+    ## extract argument values
+    xx <- with(argh[[finest]], .x)
+    xx <- xx[xrange[1L] <= xx & xx <= xrange[2L]]
+    xrange <- range(xx)
+    ## convert each fv object to a function
+    funs <- lapply(argh, as.function, value="*")
+    ## evaluate at common argument
+    result <- vector(mode="list", length=n)
+    for(i in 1:n) {
+      ai <- argh[[i]]
+      fi <- funs[[i]]
+      xxval <- list(xx=xx)
+      names(xxval) <- fvnames(ai, ".x")
+      starnames <- fvnames(ai, "*")
+      ## ensure they are given in same order as current columns
+      starnames <- colnames(ai)[colnames(ai) %in% starnames]
+      yyval <- lapply(starnames,
+                      function(v,xx,fi) fi(xx, v),
+                      xx=xx, fi=fi)
+      names(yyval) <- starnames
+      ri <- do.call(data.frame, append(xxval, yyval))
+      fva <- .Spatstat.FvAttrib
+      attributes(ri)[fva] <- attributes(ai)[fva]
+      class(ri) <- c("fv", class(ri))
+      attr(ri, "alim") <- intersect.ranges(attr(ai, "alim"), xrange)
+      result[[i]] <- ri
+    }
+    names(result) <- names(argh)
+    return(result)
+  }
+
+  argumentrange <- function(f) { range(with(f, .x)) }
+  
+  argumentstep <- function(f) { mean(diff(with(f, .x))) }
+  
+  harmonise.fv
+})
+
diff --git a/R/eval.im.R b/R/eval.im.R
new file mode 100755
index 0000000..1714ed2
--- /dev/null
+++ b/R/eval.im.R
@@ -0,0 +1,248 @@
+#
+#     eval.im.R
+#
+#        eval.im()             Evaluate expressions involving images
+#
+#        compatible.im()       Check whether two images are compatible
+#
+#        harmonise.im()       Harmonise images
+#        commonGrid()
+#
+#     $Revision: 1.41 $     $Date: 2016/11/18 08:40:40 $
+#
+
+eval.im <- local({
+
+  eval.im <- function(expr, envir, harmonize=TRUE) {
+    e <- as.expression(substitute(expr))
+    ## get names of all variables in the expression
+    varnames <- all.vars(e)
+    allnames <- all.names(e, unique=TRUE)
+    funnames <- allnames[!(allnames %in% varnames)]
+    if(length(varnames) == 0)
+      stop("No variables in this expression")
+    ## get the values of the variables
+    if(missing(envir)) {
+      envir <- parent.frame() # WAS: sys.parent()
+    } else if(is.list(envir)) {
+      envir <- list2env(envir, parent=parent.frame())
+    }
+    vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+    funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+    ## WAS: vars <- lapply(as.list(varnames), get, envir=envir)
+    ## WAS: funs <- lapply(as.list(funnames), get, envir=envir)
+    ##
+    ## find out which variables are images
+    ims <- unlist(lapply(vars, is.im))
+    if(!any(ims))
+      stop("No images in this expression")
+    images <- vars[ims]
+    nimages <- length(images)
+    ## test that the images are compatible
+    if(!(do.call(compatible, unname(images)))) {
+      whinge <- paste(if(nimages > 2) "some of" else NULL,
+                      "the images",
+                      commasep(sQuote(names(images))),
+                      if(!harmonize) "are" else "were",
+                      "not compatible")
+      if(!harmonize) {
+        stop(whinge, call.=FALSE)
+      } else {
+        warning(whinge, call.=FALSE)
+        images <- do.call(harmonise.im, images)
+      }
+    }
+    ## trap a common error: using fv object as variable
+    isfun <- unlist(lapply(vars, is.fv))
+    if(any(isfun))
+      stop("Cannot use objects of class fv as variables in eval.im")
+    ## replace each image by its matrix of pixel values, and evaluate
+    imagevalues <- lapply(images, getImValues)
+    template <- images[[1L]]
+    ## This bit has been repaired:
+    vars[ims] <- imagevalues
+    v <- eval(e, append(vars, funs))
+    ##
+    ## reshape, etc
+    result <- im(v,
+                 xcol=template$xcol, yrow=template$yrow,
+                 xrange=template$xrange, yrange=template$yrange, 
+                 unitname=unitname(template))
+    return(result)
+  }
+  
+  ## extract pixel values without destroying type information
+  getImValues <- function(x) {
+    v <- as.matrix(x)
+    dim(v) <- NULL
+    return(v)
+  }
+
+  eval.im
+})
+
+compatible.im <- function(A, B, ..., tol=1e-6) {
+  verifyclass(A, "im")
+  if(missing(B)) return(TRUE)
+  verifyclass(B, "im")
+  if(!all(A$dim == B$dim))
+    return(FALSE)
+  xdiscrep <- max(abs(A$xrange - B$xrange),
+                 abs(A$xstep - B$xstep),
+                 abs(A$xcol - B$xcol))
+  ydiscrep <- max(abs(A$yrange - B$yrange),
+                 abs(A$ystep - B$ystep),
+                 abs(A$yrow - B$yrow))
+  xok <- (xdiscrep < tol * min(A$xstep, B$xstep))
+  yok <- (ydiscrep < tol * min(A$ystep, B$ystep))
+  uok <- compatible.units(unitname(A), unitname(B))
+  if(!(xok && yok && uok))
+    return(FALSE)
+  ## A and B are compatible
+  if(length(list(...)) == 0)
+    return(TRUE)
+  ## recursion
+  return(compatible.im(B, ..., tol=tol))
+}
+
+## force a list of images to be compatible
+
+harmonize.im <- harmonise.im <- function(...) {
+  argz <- list(...)
+  n <- length(argz)
+  if(n < 2) return(argz)
+  result <- vector(mode="list", length=n)
+  isim <- unlist(lapply(argz, is.im))
+  if(!any(isim))
+    stop("No images supplied")
+  imgs <- argz[isim]
+  ## if any windows are present, extract bounding box
+  iswin <- unlist(lapply(argz, is.owin))
+  bb0 <- if(!any(iswin)) NULL else do.call(boundingbox, unname(argz[iswin]))
+  if(length(imgs) == 1L && is.null(bb0)) {
+    ## only one 'true' image: use it as template.
+    result[isim] <- imgs
+    Wtemplate <- imgs[[1L]]
+  } else {
+    ## test for compatible units
+    un <- lapply(imgs, unitname)
+    uok <- unlist(lapply(un, compatible.units, y=un[[1L]]))
+    if(!all(uok))
+      stop("Images have incompatible units of length")
+    ## find the image with the highest resolution
+    xsteps <- unlist(lapply(imgs, getElement, name="xstep"))
+    which.finest <- which.min(xsteps)
+    finest <- imgs[[which.finest]]
+    ## get the bounding box
+    bb <- do.call(boundingbox, lapply(unname(imgs), as.rectangle))
+    if(!is.null(bb0)) bb <- boundingbox(bb, bb0)
+    ## determine new raster coordinates
+    xcol <- prolongseq(finest$xcol, bb$xrange)
+    yrow <- prolongseq(finest$yrow, bb$yrange)
+    xy <- list(x=xcol, y=yrow)
+    ## resample all images on new raster
+    newimgs <- lapply(imgs, as.im, xy=xy)
+    result[isim] <- newimgs
+    Wtemplate <- newimgs[[which.finest]]
+  }
+  ## convert other data to images
+  if(any(notim <- !isim)) 
+    result[notim] <- lapply(argz[notim], as.im, W=as.mask(Wtemplate))
+  names(result) <- names(argz)
+  return(result)
+}
+
+## Return just the corresponding template window
+
+commonGrid <- local({
+  ## auxiliary function
+  gettype <- function(x) {
+    if(is.im(x) || is.mask(x)) "raster" else
+    if(is.owin(x) || is.ppp(x) || is.psp(x)) "spatial" else
+    "none"
+  }
+
+  commonGrid <- function(...) {
+    argz <- list(...)
+    type <- unlist(lapply(argz, gettype))
+    israster <- (type == "raster")
+    haswin   <- (type != "none")
+
+    if(any(israster)) {
+      ## Get raster data
+      rasterlist <- argz[israster]
+    } else {
+      ## No existing raster data - apply default resolution
+      if(!any(haswin))
+        stop("No spatial data supplied")
+      wins <- lapply(argz[haswin], as.owin)
+      rasterlist <- lapply(wins, as.mask)
+    }
+
+    ## Find raster object with finest resolution
+    if(length(rasterlist) == 1L) {
+      ## only one raster object
+      finest <- rasterlist[[1L]]
+    } else {
+      ## test for compatible units
+      un <- lapply(rasterlist, unitname)
+      uok <- unlist(lapply(un, compatible.units, y=un[[1L]]))
+      if(!all(uok))
+        stop("Objects have incompatible units of length")
+      ## find the image/mask with the highest resolution
+      xsteps <- unlist(lapply(rasterlist, getElement, name="xstep"))
+      which.finest <- which.min(xsteps)
+      finest <- rasterlist[[which.finest]]
+    }
+    ## determine the bounding box
+    bb <- do.call(boundingbox, lapply(unname(argz[haswin]), as.rectangle))
+    ## determine new raster coordinates
+    xcol <- prolongseq(finest$xcol, bb$xrange)
+    yrow <- prolongseq(finest$yrow, bb$yrange)
+    xy <- list(x=xcol, y=yrow)
+    ## generate template
+    Wtemplate <- as.mask(bb, xy=xy)
+    return(Wtemplate)
+  }
+
+  commonGrid
+})
+
+im.apply <- local({
+
+  im.apply <- function(X, FUN, ...) {
+    stopifnot(is.list(X))
+    if(!all(sapply(X, is.im)))
+      stop("All elements of X must be pixel images")
+    fun <- if(is.character(FUN)) get(FUN) else
+           if(is.function(FUN)) FUN else stop("Unrecognised format for FUN")
+    ## ensure images are compatible
+    X <- do.call(harmonise.im, X)
+    template <- X[[1L]]
+    ## extract numerical values and convert to matrix with one column per image
+    vlist <- lapply(X, flatten)
+    vals <- matrix(unlist(vlist), ncol=length(X))
+    colnames(vals) <- names(X)
+    ok <- complete.cases(vals)
+    if(!any(ok)) {
+      ## empty result
+      return(as.im(NA, W=template))
+    }
+    ## apply function
+    resultok <- apply(vals[ok,, drop=FALSE], 1L, fun, ...)
+    if(length(resultok) != sum(ok))
+      stop("FUN should yield one value per pixel")
+    ## pack up, with attention to type of data
+    d <- dim(template)
+    resultmat <- matrix(resultok[1L], d[1L], d[2L])
+    resultmat[ok] <- resultok
+    resultmat[!ok] <- NA
+    result <- as.im(resultmat, W=X[[1L]])
+    if(is.factor(resultok)) levels(result) <- levels(resultok)
+    return(result)
+  }
+
+  flatten <- function(z) { as.vector(as.matrix(z)) }
+
+  im.apply
+})
diff --git a/R/evalcovar.R b/R/evalcovar.R
new file mode 100644
index 0000000..dc92bf9
--- /dev/null
+++ b/R/evalcovar.R
@@ -0,0 +1,415 @@
+#'
+#' evalcovar.R
+#'
+#'   evaluate covariate values at data points and at pixels
+#'
+#' $Revision: 1.21 $ $Date: 2017/01/18 07:29:46 $
+#'
+
+evalCovar <- function(model, covariate, ...) {
+  UseMethod("evalCovar")
+}
+
+evalCovar.ppm <- local({
+
+  evalCovar.ppm <- function(model, covariate, ...,
+                            lambdatype=c("cif", "trend", "intensity"),
+                            dimyx=NULL, eps=NULL,
+                            interpolate=TRUE, jitter=TRUE, 
+                            modelname=NULL, covname=NULL,
+                            dataname=NULL) {
+    lambdatype <- match.arg(lambdatype)
+    #' evaluate covariate values at data points and at pixels
+    csr <- is.poisson.ppm(model) && is.stationary.ppm(model)
+    #' determine names
+    if(is.null(modelname))
+      modelname <- if(csr) "CSR" else short.deparse(substitute(model))
+    if(is.null(covname)) {
+      covname <- singlestring(short.deparse(substitute(covariate)))
+      if(is.character(covariate)) covname <- covariate
+    }
+    if(is.null(dataname))
+      dataname <- model$Qname
+    
+    info <-  list(modelname=modelname, covname=covname,
+                  dataname=dataname, csr=csr,
+                  spacename="two dimensions")
+  
+    X <- data.ppm(model)
+    W <- as.owin(model)
+
+    #' explicit control of pixel resolution
+    if(!is.null(dimyx) || !is.null(eps))
+      W <- as.mask(W, dimyx=dimyx, eps=eps)
+
+    #' evaluate covariate 
+    if(is.character(covariate)) {
+      #' One of the characters 'x' or 'y'
+      #' Turn it into a function.
+      ns <- length(covariate)
+      if(ns == 0) stop("covariate is empty")
+      if(ns > 1) stop("more than one covariate specified")
+      covname <- covariate
+      covariate <- switch(covariate,
+                          x=xcoordfun,
+                          y=ycoordfun,
+                          stop(paste("Unrecognised covariate",
+                                     dQuote(covariate))))
+    } 
+  
+    if(!is.marked(model)) {
+      #' ...................  unmarked .......................
+      if(is.im(covariate)) {
+        type <- "im"
+        if(!interpolate) {
+          #' look up covariate values 
+          ZX <- safelookup(covariate, X)
+        } else {
+          #' evaluate at data points by interpolation
+          ZX <- interp.im(covariate, X$x, X$y)
+          #' fix boundary glitches
+          if(any(uhoh <- is.na(ZX)))
+            ZX[uhoh] <- safelookup(covariate, X[uhoh])
+        }
+        #' covariate values for pixels inside window
+        Z <- covariate[W, drop=FALSE]
+        #' corresponding mask
+        W <- as.owin(Z)
+      } else if(is.function(covariate)) {
+        type <- "function"
+        #' evaluate exactly at data points
+        ZX <- covariate(X$x, X$y)
+        if(!all(is.finite(ZX)))
+          warning("covariate function returned NA or Inf values")
+        #' window
+        W <- as.mask(W)
+        #' covariate in window
+        Z <- as.im(covariate, W=W)
+        #' collapse function body to single string
+        covname <- singlestring(covname)
+      } else if(is.null(covariate)) {
+        stop("The covariate is NULL", call.=FALSE)
+      } else stop(paste("The covariate should be",
+                        "an image, a function(x,y)",
+                        "or one of the characters",
+                        sQuote("x"), "or", sQuote("y")),
+                  call.=FALSE)
+      #' values of covariate in window
+      Zvalues <- as.vector(Z[W, drop=TRUE])
+      #' corresponding fitted [conditional] intensity values
+      lambda <- as.vector(predict(model, locations=W,
+                                  type=lambdatype)[W, drop=TRUE])
+      #' pixel area (constant)
+      pixelarea <- with(Z, xstep * ystep)
+    } else {
+      #' ...................  marked .......................
+      if(!is.multitype(model))
+        stop("Only implemented for multitype models (factor marks)")
+      marx <- marks(X, dfok=FALSE)
+      possmarks <- levels(marx)
+      npts <- npoints(X)
+      #' single image: replicate 
+      if(is.im(covariate)) {
+        covariate <- rep(list(covariate), times=length(possmarks))
+        names(covariate) <- as.character(possmarks)
+      }
+      #'
+      if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) {
+        #' list of images
+        type <- "im"
+        if(length(covariate) != length(possmarks))
+          stop("Number of images does not match number of possible marks")
+        #' evaluate covariate at each data point 
+        ZX <- numeric(npts)
+        for(k in seq_along(possmarks)) {
+          ii <- (marx == possmarks[k])
+          covariate.k <- covariate[[k]]
+          if(!interpolate) {
+            #' look up covariate values 
+            values <- safelookup(covariate, X)
+          } else {
+            #' interpolate
+            values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii])
+            #' fix boundary glitches
+            if(any(uhoh <- is.na(values)))
+              values[uhoh] <- safelookup(covariate.k, X[ii][uhoh])
+          }
+          ZX[ii] <- values
+        }
+        #' restrict covariate images to window 
+        Z <- lapply(covariate, "[", i=W, drop=FALSE)
+        #' extract pixel locations and pixel values
+        Zframes <- lapply(Z, as.data.frame)
+        #' covariate values at each pixel inside window
+        Zvalues <- unlist(lapply(Zframes, getElement, name="value"))
+        #' pixel locations 
+        locn <- lapply(Zframes, getxy)
+        #' tack on mark values
+        for(k in seq_along(possmarks))
+          locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k]))
+        loc <- do.call(rbind, locn)
+        #' corresponding fitted [conditional] intensity values
+        lambda <- predict(model, locations=loc, type=lambdatype)
+        #' pixel areas
+        pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined))
+      } else if(is.function(covariate)) {
+        type <- "function"
+        #' evaluate exactly at data points
+        ZX <- covariate(X$x, X$y, marx)
+        #' same window
+        W <- as.mask(W)
+        #' covariate in window
+        Z <- list()
+        for(k in seq_along(possmarks))
+          Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W)
+        #' functioncaller: function(x,y,m,f) { f(x,y,m) }
+        Zvalues <- unlist(lapply(Z, pixelvalues))
+        #' corresponding fitted [conditional] intensity values
+        lambda <- predict(model, locations=W, type=lambdatype)
+        lambda <- unlist(lapply(lambda, pixelvalues))
+        if(length(lambda) != length(Zvalues))
+          stop("Internal error: length(lambda) != length(Zvalues)")
+        #' collapse function body to single string
+        covname <- singlestring(covname)
+        #' pixel areas
+        pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined))
+      } else if(is.null(covariate)) {
+        stop("The covariate is NULL", call.=FALSE)
+      } else stop(paste("For a multitype point process model,",
+                        "the covariate should be an image, a list of images,",
+                        "a function(x,y,m)", 
+                        "or one of the characters",
+                        sQuote("x"), "or", sQuote("y")),
+                  call.=FALSE)
+    }    
+    #' ..........................................................
+
+    #' apply jittering to avoid ties
+    if(jitter) {
+      nX <- length(ZX)
+      dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX))
+      ZX <- ZX + rnorm(nX, sd=dZ)
+      Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ)
+    }
+
+    lambdaname <- if(is.poisson(model)) "intensity" else lambdatype
+    lambdaname <- paste("the fitted", lambdaname)
+    check.finite(lambda, xname=lambdaname, usergiven=FALSE)
+    check.finite(Zvalues, xname="the covariate", usergiven=TRUE)
+    
+    #' wrap up 
+    values <- list(Zimage    = Z,
+                   Zvalues   = Zvalues,
+                   lambda    = lambda,
+                   weights   = pixelarea,
+                   ZX        = ZX,
+                   type      = type)
+    return(list(values=values, info=info))
+  }
+
+  xcoordfun <- function(x,y,m){x}
+  ycoordfun <- function(x,y,m){y}
+
+  pixarea <- function(z) { z$xstep * z$ystep }
+  npixdefined <- function(z) { sum(!is.na(z$v)) }
+  functioncaller <- function(x,y,m,f) { f(x,y,m) }
+  pixelvalues <- function(z) { as.data.frame(z)[,3L] }
+  getxy <- function(z) { z[,c("x","y")] }
+  
+  evalCovar.ppm
+})
+
+evalCovar.lppm <- local({
+
+  evalCovar.lppm <- function(model, covariate, ...,
+                             lambdatype=c("cif", "trend", "intensity"),
+                             eps=NULL, nd=1000,
+                             interpolate=TRUE, jitter=TRUE, 
+                             modelname=NULL, covname=NULL,
+                             dataname=NULL) {
+    lambdatype <- match.arg(lambdatype)
+    #' evaluate covariate values at data points and at pixels
+    csr <- is.poisson(model) && is.stationary(model)
+
+    #' determine names
+    if(is.null(modelname))
+      modelname <- if(csr) "CSR" else short.deparse(substitute(model))
+    if(is.null(covname)) {
+      covname <- singlestring(short.deparse(substitute(covariate)))
+      if(is.character(covariate)) covname <- covariate
+    }
+    if(is.null(dataname))
+      dataname <- model$Xname
+    info <-  list(modelname=modelname, covname=covname,
+                  dataname=dataname, csr=csr,
+                  spacename="linear network")
+
+    #' convert character covariate to function
+    if(is.character(covariate)) {
+      #' One of the characters 'x' or 'y'
+      #' Turn it into a function.
+      ns <- length(covariate)
+      if(ns == 0) stop("covariate is empty")
+      if(ns > 1) stop("more than one covariate specified")
+      covname <- covariate
+      covariate <- switch(covariate,
+                          x=xcoordfun,
+                          y=ycoordfun,
+                          stop(paste("Unrecognised covariate",
+                                     dQuote(covariate))))
+    }
+  
+    #' extract model components
+    X <- model$X
+    fit <- model$fit
+    #'
+    L <- as.linnet(X)
+    Q <- quad.ppm(fit)
+    isdat <- is.data(Q)
+    U <- union.quad(Q)
+    wt <- w.quad(Q)
+  
+    #' evaluate covariate
+    if(!is.marked(model)) {
+      #' ...................  unmarked .......................
+      if(is.im(covariate)) {
+        if(inherits(covariate, "linim")) {
+          type <- "linim"
+          Zimage <- covariate
+        } else {
+          type <- "im"
+          Zimage <- as.linim(covariate, L)
+        }
+        if(!interpolate) {
+          #' look up covariate values at quadrature points
+          Zvalues <- safelookup(covariate, U)
+        } else {
+          #' evaluate at quadrature points by interpolation
+          Zvalues <- interp.im(covariate, U$x, U$y)
+          #' fix boundary glitches
+          if(any(uhoh <- is.na(Zvalues)))
+            Zvalues[uhoh] <- safelookup(covariate, U[uhoh])
+        }
+        #' extract data values
+        ZX <- Zvalues[isdat]
+      } else if(is.function(covariate)) {
+        type <- "function"
+        Zimage <- as.linim(covariate, L)
+        #' evaluate exactly at quadrature points
+        Zvalues <- covariate(U$x, U$y)
+        if(!all(is.finite(Zvalues)))
+          warning("covariate function returned NA or Inf values")
+        #' extract data values
+        ZX <- Zvalues[isdat]
+        #' collapse function body to single string
+        covname <- singlestring(covname)
+      } else if(is.null(covariate)) {
+        stop("The covariate is NULL", call.=FALSE)
+      } else stop(paste("The covariate should be",
+                        "an image, a function(x,y)",
+                        "or one of the characters",
+                        sQuote("x"), "or", sQuote("y")),
+                  call.=FALSE)
+      #' corresponding fitted [conditional] intensity values
+      lambda <- as.vector(predict(model, locations=U, type=lambdatype))
+    } else {
+      #' ...................  marked .......................
+      if(!is.multitype(model))
+      stop("Only implemented for multitype models (factor marks)")
+      marx <- marks(U, dfok=FALSE)
+      possmarks <- levels(marx)
+      #' single image: replicate 
+      if(is.im(covariate)) {
+        covariate <- rep(list(covariate), length(possmarks))
+        names(covariate) <- possmarks
+      }
+      #'
+      if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) {
+        #' list of images
+        if(length(covariate) != length(possmarks))
+          stop("Number of images does not match number of possible marks")
+        #' determine type of data
+        islinim <- unlist(lapply(covariate, inherits, what="linim"))
+        type <- if(all(islinim)) "linim" else "im"
+        Zimage <- covariate
+        Zimage[!islinim] <- lapply(Zimage[!islinim], as.linim, L=L)
+        #' evaluate covariate at each data point by interpolation
+        Zvalues <- numeric(npoints(U))
+        for(k in seq_along(possmarks)) {
+          ii <- (marx == possmarks[k])
+          covariate.k <- covariate[[k]]
+          if(!interpolate) {
+            #' direct lookup
+            values <- safelookup(covariate.k, U[ii])
+          } else {
+            #' interpolation
+            values <- interp.im(covariate.k, x=U$x[ii], y=U$y[ii])
+            #' fix boundary glitches
+            if(any(uhoh <- is.na(values)))
+              values[uhoh] <- safelookup(covariate.k, U[ii][uhoh])
+          }
+          Zvalues[ii] <- values
+        }
+        #' extract data values
+        ZX <- Zvalues[isdat]
+        #' corresponding fitted [conditional] intensity values
+        lambda <- predict(model, locations=U, type=lambdatype)
+        if(length(lambda) != length(Zvalues))
+          stop("Internal error: length(lambda) != length(Zvalues)")
+      } else if(is.function(covariate)) {
+        type <- "function"
+        #' evaluate exactly at quadrature points
+        Zvalues <- covariate(U$x, U$y, marx)
+        #' extract data values
+        ZX <- Zvalues[isdat]
+        #' corresponding fitted [conditional] intensity values
+        lambda <- predict(model, locations=U, type=lambdatype)
+        if(length(lambda) != length(Zvalues))
+          stop("Internal error: length(lambda) != length(Zvalues)")
+        #' images
+        Zimage <- list()
+        for(k in seq_along(possmarks))
+          Zimage[[k]] <- as.linim(functioncaller, L=L, m=possmarks[k],
+                                  f=covariate)
+        #' collapse function body to single string
+        covname <- singlestring(covname)
+      } else if(is.null(covariate)) {
+        stop("The covariate is NULL", call.=FALSE)
+      } else stop(paste("For a multitype point process model,",
+                        "the covariate should be an image, a list of images,",
+                        "a function(x,y,m)", 
+                        "or one of the characters",
+                        sQuote("x"), "or", sQuote("y")),
+                  call.=FALSE)
+    }    
+    #' ..........................................................
+
+    #' apply jittering to avoid ties
+    if(jitter) {
+      nX <- length(ZX)
+      dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX))
+      ZX <- ZX + rnorm(nX, sd=dZ)
+      Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ)
+    }
+
+    lambdaname <- if(is.poisson(model)) "intensity" else lambdatype
+    lambdaname <- paste("the fitted", lambdaname)
+    check.finite(lambda, xname=lambdaname, usergiven=FALSE)
+    check.finite(Zvalues, xname="the covariate", usergiven=TRUE)
+
+    #' wrap up 
+    values <- list(Zimage    = Zimage,
+                   Zvalues   = Zvalues,
+                   lambda    = lambda,
+                   weights   = wt,
+                   ZX        = ZX,
+                   type      = type)
+    return(list(values=values, info=info))
+  }
+
+  xcoordfun <- function(x,y,m){x}
+  ycoordfun <- function(x,y,m){y}
+  functioncaller <- function(x,y,m,f) { f(x,y,m) }
+
+  evalCovar.lppm
+})
diff --git a/R/ewcdf.R b/R/ewcdf.R
new file mode 100755
index 0000000..7d25ea8
--- /dev/null
+++ b/R/ewcdf.R
@@ -0,0 +1,126 @@
+#
+#     ewcdf.R
+#
+#     $Revision: 1.11 $  $Date: 2017/01/04 07:27:39 $
+#
+#  With contributions from Kevin Ummel
+#
+
+ewcdf <- function(x, weights=rep(1/length(x), length(x)))
+{
+  stopifnot(length(x) == length(weights))
+  # remove NA's together
+  nbg <- is.na(x) 
+  x <- x[!nbg]
+  weights <- weights[!nbg]
+  n <- length(x)
+  if (n < 1)
+    stop("'x' must have 1 or more non-missing values")
+  stopifnot(all(weights >= 0))
+  # sort in increasing order of x value
+  ox <- fave.order(x)
+  x <- x[ox]
+  w <- weights[ox]
+  # find jump locations and match
+  vals <- sort(unique(x))
+  xmatch <- factor(match(x, vals), levels=seq_along(vals))
+  # sum weight in each interval
+  wmatch <- tapply(w, xmatch, sum)
+  wmatch[is.na(wmatch)] <- 0
+  cumwt <- cumsum(wmatch)
+  # make function
+  rval <- approxfun(vals, cumwt,
+                    method = "constant", yleft = 0, yright = sum(wmatch),
+                    f = 0, ties = "ordered")
+  class(rval) <- c("ewcdf", "ecdf", "stepfun", class(rval))
+  assign("w", w, envir=environment(rval))
+  attr(rval, "call") <- sys.call()
+  return(rval)
+}
+
+  # Hacked from stats:::print.ecdf
+print.ewcdf <- function (x, digits = getOption("digits") - 2L, ...) {
+  cat("Weighted empirical CDF \nCall: ")
+  print(attr(x, "call"), ...)
+  env <- environment(x)
+  xx <- get("x", envir=env)
+  ww <- get("w", envir=env)
+  n <- length(xx)
+  i1 <- 1L:min(3L, n)
+  i2 <- if (n >= 4L) max(4L, n - 1L):n else integer()
+  numform <- function(x) paste(formatC(x, digits = digits), collapse = ", ")
+  cat(" x[1:", n, "] = ", numform(xx[i1]), if (n > 3L) 
+      ", ", if (n > 5L) 
+      " ..., ", numform(xx[i2]), "\n", sep = "")
+  cat(" weights[1:", n, "] = ", numform(ww[i1]), if (n > 3L) 
+      ", ", if (n > 5L) 
+      " ..., ", numform(ww[i2]), "\n", sep = "")
+  invisible(x)
+}
+
+quantile.ewcdf <- function(x, probs=seq(0,1,0.25), names=TRUE, ...,
+                           normalise=TRUE, type=1) {
+  trap.extra.arguments(..., .Context="quantile.ewcdf")
+  if(!(type %in% c(1,2)))
+    stop("Only quantiles of type 1 and 2 are implemented", call.=FALSE)
+  env <- environment(x)
+  xx <- get("x", envir=env)
+  n <- length(xx)
+  Fxx <- get("y", envir=env)
+  maxFxx <- max(Fxx)
+  eps <- 100 * .Machine$double.eps
+  if(normalise) {
+    Fxx <- Fxx/maxFxx
+    maxp <- 1
+  } else {
+    maxp <- maxFxx
+  }
+  if(any((p.ok <- !is.na(probs)) &
+         (probs/maxp < -eps | probs/maxp > 1 + eps))) {
+    allowed <- if(normalise) "[0,1]" else
+               paste("permitted range", prange(c(0, maxp)))
+    stop(paste("'probs' outside", allowed), call.=FALSE)
+  }
+  if (na.p <- any(!p.ok)) {
+    o.pr <- probs
+    probs <- probs[p.ok]
+    probs <- pmax(0, pmin(maxp, probs))
+  }
+  np <- length(probs)
+  if (n > 0 && np > 0) {
+    qs <- numeric(np)
+    if(type == 1) {
+      ## right-continuous inverse
+      for(k in 1:np) qs[k] <- xx[min(which(Fxx >= probs[k]))]
+    } else {
+      ## average of left and right continuous
+      for(k in 1:np) {
+        pk <- probs[k]
+        ik <- min(which(Fxx >= probs[k]))
+        qs[k] <- if(Fxx[ik] > pk) (xx[ik] + xx[ik-1L])/2 else xx[ik]
+      }
+    }
+  } else {
+    qs <- rep(NA_real_, np)
+  }
+  if (names && np > 0L) {
+    dig <- max(2L, getOption("digits"))
+    if(normalise) {
+      probnames <-
+        if(np < 100) formatC(100 * probs, format="fg", width=1, digits=dig) else
+        format(100 * probs, trim = TRUE, digits = dig)
+      names(qs) <- paste0(probnames, "%")
+    } else {
+      names(qs) <-
+        if(np < 100) formatC(probs, format="fg", width=1, digits=dig) else
+        format(probs, trim=TRUE, digits=dig)
+    }
+  }
+  if (na.p) {
+    o.pr[p.ok] <- qs
+    names(o.pr) <- rep("", length(o.pr))
+    names(o.pr)[p.ok] <- names(qs)
+    o.pr
+  } else qs
+}
+
diff --git a/R/exactMPLEstrauss.R b/R/exactMPLEstrauss.R
new file mode 100644
index 0000000..8fd57ae
--- /dev/null
+++ b/R/exactMPLEstrauss.R
@@ -0,0 +1,71 @@
+#
+# exactMPLEstrauss.R
+#
+# 'exact' MPLE for stationary Strauss process
+#
+#  $Revision: 1.6 $  $Date: 2014/11/10 07:39:41 $
+#
+
+exactMPLEstrauss <- local({
+
+  # main function
+  exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) {
+#    n <- npoints(X)
+    W <- as.owin(X)
+    # border correction
+    WminR <- erosion(W, R)
+    bR <- (bdist.points(X) >= R)
+    nR <- sum(bR)
+    # evaluate neighbour counts for data points
+    Tcounts <- crosspaircounts(X, X, R) - 1L
+    sumT  <- sum(Tcounts[bR])
+    # determine the coefficients a_k for k = 0, 1, ...
+    Z <- scanmeasure(X, R, dimyx=ngrid)
+    Z <- Z[WminR, drop=FALSE]
+    kcounts <- tabulate(as.vector(Z$v) + 1L)
+    pixarea <- with(Z, xstep * ystep)
+    A <- kcounts * pixarea
+    # find optimal log(gamma)
+    op <- optim(log(0.5), lpl, sco, method="L-BFGS-B",
+                control=list(fnscale=-1),
+                lower=-Inf, upper=if(project) 0 else Inf,
+                A=A, sumT=sumT, nR=nR)
+    loggamma <- op$par
+    # plot?
+    if(plotit) {
+      x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512)
+      plot(x, lpl(x, A, sumT, nR),
+           type="l",
+           xlab=expression(log(gamma)),
+           ylab=expression(log(PL(gamma))))
+      abline(v=loggamma, lty=3)
+    }
+    # derive optimal beta 
+    kmax <-length(A) - 1L
+    polypart <- A %*% exp(outer(0:kmax, loggamma))
+    beta <- nR/polypart
+    logbeta <- log(beta)
+    result <- c(logbeta, loggamma)
+    names(result) <- c("(Intercept)", "Interaction")
+    return(result)
+  }
+
+  # helper functions (vectorised)
+  # log pseudolikelihood
+  lpl <- function(theta, A=A, sumT=sumT, nR=nR) {
+    kmax <-length(A) - 1L
+    polypart <- A %*% exp(outer(0:kmax, theta))
+    nR * (log(nR) - log(polypart) - 1) + theta * sumT
+  }
+  # pseudoscore
+  sco <- function(theta, A=A, sumT=sumT, nR=nR) {
+    kmax <- length(A) - 1L
+    kseq <- 0:kmax
+    mat <- exp(outer(kseq, theta))
+    polypart <- A %*% mat
+    Dpolypart <- (A * kseq) %*% mat
+    sumT - nR * Dpolypart/polypart
+  }
+
+  exactMPLEstrauss
+})
diff --git a/R/exactPdt.R b/R/exactPdt.R
new file mode 100755
index 0000000..5eb7c1a
--- /dev/null
+++ b/R/exactPdt.R
@@ -0,0 +1,74 @@
+#
+#	exactPdt.R
+#	R function exactPdt() for exact distance transform of pixel image
+#
+#	$Revision: 4.17 $	$Date: 2017/06/05 10:31:58 $
+#
+
+"exactPdt"<-
+  function(w)
+{
+  verifyclass(w, "owin")
+  if(w$type != "mask")
+    stop(paste("Input must be a window of type", sQuote("mask")))
+#	
+  nr <- w$dim[1L]
+  nc <- w$dim[2L]
+# input image will be padded out with a margin of width 2 on all sides
+  mr <- mc <- 2L
+  # full dimensions of padded image
+  Nnr <- nr + 2 * mr
+  Nnc <- nc + 2 * mc
+  N <- Nnr * Nnc
+  # output image (subset): rows & columns (R indexing)
+  rmin <- mr + 1L
+  rmax <- Nnr - mr
+  cmin <- mc + 1L
+  cmax <- Nnc - mc
+  # do padding
+  x <- matrix(FALSE, nrow=Nnr, ncol=Nnc)
+  x[rmin:rmax, cmin:cmax] <- w$m
+  #
+  res <- .C("ps_exact_dt_R",
+            as.double(w$xrange[1L]),
+            as.double(w$yrange[1L]),
+            as.double(w$xrange[2L]),
+            as.double(w$yrange[2L]),
+            nr = as.integer(nr),
+            nc = as.integer(nc),
+            mr = as.integer(mr),
+            mc = as.integer(mc),
+            inp = as.integer(t(x)),
+            distances = as.double (double(N)),
+            rows      = as.integer(integer(N)),
+            cols      = as.integer(integer(N)),
+            boundary  = as.double (double(N)),
+            PACKAGE = "spatstat")
+  dist <- matrix(res$distances,
+                 ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+  rows <- matrix(res$rows,
+                 ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+  cols <- matrix(res$cols,
+                 ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+  bdist<- matrix(res$boundary,
+                 ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+  # convert from C to R indexing
+  rows <- rows + 1L - as.integer(mr)
+  cols <- cols + 1L - as.integer(mc)
+  return(list(d=dist,row=rows,col=cols,b=bdist, w=w))
+}
+
+project2set <- function(X, W, ...) {
+  stopifnot(is.ppp(X))
+  W <- as.mask(W, ...)
+  eW <- exactPdt(W)
+  ## grid location of X
+  XX <- nearest.raster.point(X$x, X$y, W)
+  ijX <- cbind(XX$row, XX$col)
+  ## look up values of 'eW' at this location 
+  iY <- eW$row[ijX]
+  jY <- eW$col[ijX]
+  ## convert to spatial coordinates
+  Y <- ppp(W$xcol[jY], W$yrow[iY], window=W)
+  return(Y)
+}
diff --git a/R/exactdt.R b/R/exactdt.R
new file mode 100755
index 0000000..e3a498d
--- /dev/null
+++ b/R/exactdt.R
@@ -0,0 +1,75 @@
+#
+#	exactdt.S
+#	S function exactdt() for exact distance transform
+#
+#	$Revision: 4.17 $	$Date: 2017/06/05 10:31:58 $
+#
+
+exactdt <- local({
+
+  die <- function(why) { stop(paste("ppp object format corrupted:", why)) }
+
+  exactdt <- function(X, ...) {
+    verifyclass(X, "ppp")
+    w <- X$window
+    if(spatstat.options("exactdt.checks.data")) {
+      ## check validity of ppp structure 
+      bb <- as.rectangle(w)
+      xr <- bb$xrange
+      yr <- bb$yrange
+      rx <- range(X$x)
+      ry <- range(X$y)
+      if(rx[1L] < xr[1L] || rx[2L] > xr[2L]) die("x-coordinates out of bounds")
+      if(ry[1L] < yr[1L] || ry[2L] > yr[2L]) die("y-coordinates out of bounds")
+      if(length(X$x) != length(X$y))
+        die("x and y vectors have different length")
+      if(length(X$x) != X$n) die("length of x,y vectors does not match n")
+    }
+    w <- as.mask(w, ...)
+    ## dimensions of result
+    nr <- w$dim[1L]
+    nc <- w$dim[2L]
+    ## margins in C array 
+    mr <- 2
+    mc <- 2
+    ## full dimensions of allocated storage
+    Nnr <- nr + 2 * mr
+    Nnc <- nc + 2 * mc
+    N <- Nnr * Nnc
+    ## output rows & columns (R indexing)
+    rmin <- mr + 1
+    rmax <- Nnr - mr
+    cmin <- mc + 1
+    cmax <- Nnc - mc
+    ## go
+    res <- .C("exact_dt_R",
+              as.double(X$x),
+              as.double(X$y),
+              as.integer(X$n),
+              as.double(w$xrange[1L]),
+              as.double(w$yrange[1L]),
+              as.double(w$xrange[2L]),
+              as.double(w$yrange[2L]),
+              nr = as.integer(nr),
+              nc = as.integer(nc),
+              mr = as.integer(mr),
+              mc = as.integer(mc),
+              distances = as.double(double(N)),
+              indices = as.integer(integer(N)),
+              boundary = as.double(double(N)),
+              PACKAGE = "spatstat")
+    ## extract 
+    dist <- matrix(res$distances,
+                   ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+    inde <- matrix(res$indices,
+                   ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+    bdry <- matrix(res$boundary,
+                   ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax]
+    ## convert index from C to R indexing
+    inde <- inde + 1L
+    return(list(d = dist, i = inde, b = bdry, w=w))
+  }
+
+  exactdt
+})
+
diff --git a/R/factors.R b/R/factors.R
new file mode 100644
index 0000000..090b49f
--- /dev/null
+++ b/R/factors.R
@@ -0,0 +1,64 @@
+#'
+#'   factors.R
+#'
+#'  Tools for manipulating factors and factor-valued things
+#'
+#'  $Revision: 1.4 $  $Date: 2016/04/25 02:34:40 $
+
+relevel.im <- function(x, ref, ...) {
+  if(x$type != "factor")
+    stop("Only valid for factor-valued images")
+  x[] <- relevel(x[], ref, ...)
+  return(x)
+}
+
+relevel.ppp <- relevel.ppx <- function(x, ref, ...) {
+  stopifnot(is.multitype(x))
+  marks(x) <- relevel(marks(x), ref, ...)
+  return(x)
+}
+
+mergeLevels <- function(.f, ...) {
+  if(is.im(.f)) {
+    aa <- mergeLevels(.f[], ...)
+    .f[] <- aa
+    return(.f)
+  }
+  if(is.multitype(.f)) {
+    marks(.f) <- mergeLevels(marks(.f), ...)
+    return(.f)
+  }
+  stopifnot(is.factor(.f))
+  map <- list(...)
+  n <- length(map)
+  if(n == 0) return(.f)
+  # mapping for 'other'
+  if(any(isnul <- (lengths(map) == 0))) {
+    if(sum(isnul) > 1)
+      stop("At most one argument should be NULL or character(0)")
+    otherlevels <- setdiff(levels(.f), unlist(map))
+    map[[which(isnul)]] <- otherlevels
+  }
+  newlevels <- names(map)
+  oldlevels <- levels(.f)
+  mappedlevels <- unlist(map)
+  if(sum(nzchar(newlevels)) != n)
+    stop("Arguments must be in the form name=value")
+  if(!all(mappedlevels %in% oldlevels))
+    stop("Argument values must be levels of .f")
+  ## construct mapping
+  fullmap <- oldlevels
+  for(i in seq_len(n)) {
+    relevant <- oldlevels %in% map[[i]]
+    fullmap[relevant] <- newlevels[i]
+  }
+  ## apply mapping
+  newf <- factor(fullmap[.f], levels=unique(fullmap))
+  return(newf)
+}
+
+levelsAsFactor <- function(x) {
+  lev <- levels(x)
+  if(is.null(lev)) return(NULL)
+  return(factor(lev, levels=lev))
+}
diff --git a/R/fardist.R b/R/fardist.R
new file mode 100644
index 0000000..815de81
--- /dev/null
+++ b/R/fardist.R
@@ -0,0 +1,63 @@
+##
+##  fardist.R
+##
+## Farthest distance to boundary
+##
+##  $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $
+
+fardist <- function(X, ...) {
+  UseMethod("fardist")
+}
+
+fardist.owin <- function(X, ..., squared=FALSE) {
+  verifyclass(X, "owin")
+  M <- as.mask(X, ...)
+  V <- if(is.mask(X)) vertices(M) else vertices(X)
+  nx <- dim(M)[2L]
+  ny <- dim(M)[1L]
+  x0 <- M$xcol[1L]
+  y0 <- M$yrow[1L]
+  xstep <- M$xstep
+  ystep <- M$ystep
+  if(squared) {
+    z <- .C("fardist2grid",
+            nx = as.integer(nx),
+            x0 = as.double(x0),
+            xstep = as.double(xstep),
+            ny = as.integer(ny),
+            y0 = as.double(y0),
+            ystep = as.double(ystep),
+            np = as.integer(length(V$x)),
+            xp = as.double(V$x),
+            yp = as.double(V$y),
+            dfar = as.double(numeric(nx * ny)),
+            PACKAGE = "spatstat")
+  } else {
+    z <- .C("fardistgrid",
+            nx = as.integer(nx),
+            x0 = as.double(x0),
+            xstep = as.double(xstep),
+            ny = as.integer(ny),
+            y0 = as.double(y0),
+            ystep = as.double(ystep),
+            np = as.integer(length(V$x)),
+            xp = as.double(V$x),
+            yp = as.double(V$y),
+            dfar = as.double(numeric(nx * ny)),
+            PACKAGE = "spatstat")
+  }
+  out <- im(z$dfar, xcol=M$xcol, yrow=M$yrow,
+            xrange=M$xrange, yrange=M$yrange, unitname=unitname(M))
+  if(!is.rectangle(X))
+    out <- out[X, drop=FALSE]
+  return(out)
+}
+  
+fardist.ppp <- function(X, ..., squared=FALSE) {
+  verifyclass(X, "ppp")
+  V <- vertices(Window(X))
+  D2 <- crossdist(X$x, X$y, V$x, V$y, squared=TRUE) 
+  D2max <- apply(D2, 1L, max)
+  if(squared) return(D2max) else return(sqrt(D2max))
+}
+
diff --git a/R/fasp.R b/R/fasp.R
new file mode 100755
index 0000000..641c873
--- /dev/null
+++ b/R/fasp.R
@@ -0,0 +1,219 @@
+#
+#	fasp.R
+#
+#	$Revision: 1.35 $	$Date: 2017/02/07 07:22:47 $
+#
+#
+#-----------------------------------------------------------------------------
+#
+
+# creator
+fasp <- function(fns, which, formulae=NULL,
+                 dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL,
+                 checkfv=TRUE) {
+  stopifnot(is.list(fns))
+  stopifnot(is.matrix(which))
+  stopifnot(length(fns) == length(which))
+  n   <- length(which)
+
+  if(checkfv)
+    for(i in seq_len(n))
+      if(!is.fv(fns[[i]]))
+        stop(paste("fns[[", i, "]] is not an fv object", sep=""))
+
+  # set row and column labels
+  if(!is.null(rowNames))
+    rownames(which) <- rowNames
+  if(!is.null(colNames))
+    colnames(which) <- colNames
+
+  if(!is.null(formulae)) {
+    # verify format and convert to character vector
+    formulae <- FormatFaspFormulae(formulae, "formulae")
+    # ensure length matches length of "fns"
+    if(length(formulae) == 1L && n > 1L)
+        # single formula - replicate it
+        formulae <- rep.int(formulae, n)
+    else 
+        stopifnot(length(formulae) == length(which))
+  }
+
+  rslt <- list(fns=fns, 
+               which=which, default.formula=formulae,
+               dataname=dataname, title=title)
+  class(rslt) <- "fasp"
+  return(rslt)
+}
+
+# subset extraction operator
+
+"[.fasp" <-
+  function(x, I, J, drop=TRUE, ...) {
+
+        verifyclass(x, "fasp")
+        
+        m <- nrow(x$which)
+        n <- ncol(x$which)
+        
+        if(missing(I)) I <- 1:m
+        if(missing(J)) J <- 1:n
+        if(!is.vector(I) || !is.vector(J))
+          stop("Subset operator is only implemented for vector indices")
+
+        # determine index subset for lists 'fns', 'titles' etc
+        included <- rep.int(FALSE, length(x$fns))
+        w <- as.vector(x$which[I,J])
+        if(length(w) == 0)
+          stop("result is empty")
+        included[w] <- TRUE
+
+        # if only one cell selected, and drop=TRUE:
+        if((sum(included) == 1L) && drop)
+          return(x$fns[included][[1L]])
+        
+        # determine positions in shortened lists
+        whichIJ <- x$which[I,J,drop=FALSE]
+        newk <- cumsum(included)
+        newwhich <- matrix(newk[whichIJ],
+                           ncol=ncol(whichIJ), nrow=nrow(whichIJ))
+        rownames(newwhich) <- rownames(x$which)[I]
+        colnames(newwhich) <- colnames(x$which)[J]
+
+        # default plotting formulae - could be NULL
+        deform <- x$default.formula
+        
+        # create new fasp object
+        Y <- fasp(fns      = x$fns[included],
+                  formulae = if(!is.null(deform)) deform[included] else NULL,
+                  which    = newwhich,
+                  dataname = x$dataname,
+                  title    = x$title)
+        return(Y)
+}
+
+dim.fasp <- function(x) { dim(x$which) }
+
+# print method
+
+print.fasp <- function(x, ...) {
+  verifyclass(x, "fasp")
+  cat(paste("Function array (class", sQuote("fasp"), ")\n"))
+  dim <- dim(x$which)
+  cat(paste("Dimensions: ", dim[1L], "x", dim[2L], "\n"))
+  cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n"))
+  invisible(NULL)
+}
+
+# other methods
+
+as.fv.fasp <- function(x) do.call(cbind.fv, x$fns)
+
+dimnames.fasp <- function(x) {
+  return(dimnames(x$which))
+}
+
+"dimnames<-.fasp" <- function(x, value) {
+  w <- x$which
+  dimnames(w) <- value
+  x$which <- w
+  return(x)
+}
+
+pool.fasp <- local({
+
+  pool.fasp <- function(...) {
+    Alist <- list(...)
+    Yname <- short.deparse(sys.call())
+    if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]")
+    nA <-  length(Alist)
+    if(nA == 0) return(NULL)
+    ## validate....
+    ## All arguments must be fasp objects
+    notfasp <- !unlist(lapply(Alist, inherits, what="fasp"))
+    if(any(notfasp)) {
+      n <- sum(notfasp)
+      why <- paste(ngettext(n, "Argument", "Arguments"),
+                   commasep(which(notfasp)),
+                   ngettext(n, "does not", "do not"),
+                   "belong to the class",
+                   dQuote("fasp"))
+      stop(why)
+    }
+    ## All arguments must have envelopes
+    notenv <- !unlist(lapply(Alist, has.env))
+    if(any(notenv)) {
+      n <- sum(notenv)
+      why <- paste(ngettext(n, "Argument", "Arguments"),
+                   commasep(which(notenv)),
+                   ngettext(n, "does not", "do not"),
+                   "contain envelope data")
+      stop(why)
+    }
+  
+    if(nA == 1L) return(Alist[[1L]])
+  
+    ## All arguments must have the same dimensions
+    witches <- lapply(Alist, getElement, name="which")
+    witch1 <- witches[[1L]]
+    same <- unlist(lapply(witches, identical, y=witch1))
+    if(!all(same))
+      stop("Function arrays do not have the same array dimensions")
+  
+    ## OK.
+    ## Pool envelopes at each position
+    result <- Alist[[1L]]
+    fns <- result$fns
+    for(k in seq_along(fns)) {
+      funks <- lapply(Alist, extractfun, k=k)
+      fnk <- do.call(pool.envelope, funks)
+      attr(fnk, "einfo")$Yname <- Yname
+      fns[[k]] <- fnk
+    }
+    result$fns <- fns
+    return(result)
+  }
+
+  has.env <- function(z) {
+    all(unlist(lapply(z$fns, inherits, what="envelope")))
+  }
+
+  extractfun <- function(z, k) { z$fns[[k]] }
+  
+  pool.fasp
+  
+})
+
+## other functions
+
+FormatFaspFormulae <- local({
+
+  zapit <- function(x, argname) {
+    if(inherits(x, "formula")) deparse(x)
+    else if(is.character(x)) x
+    else stop(paste("The entries of",
+                    sQuote(argname),
+                    "must be formula objects or strings"))
+  }
+
+  FormatFaspFormulae <- function(f, argname) {
+    ## f should be a single formula object, a list of formula objects,
+    ## a character vector, or a list containing formulae and strings.
+    ## It will be converted to a character vector.
+    result <-
+      if(is.character(f))
+        f
+      else if(inherits(f, "formula"))
+        deparse(f)
+      else if(is.list(f))
+        unlist(lapply(f, zapit, argname=argname))
+      else stop(paste(sQuote(argname),
+                      "should be a formula, a list of formulae,",
+                      "or a character vector"))
+
+    return(result)
+  }
+
+  FormatFaspFormulae
+})
+
+
diff --git a/R/fgk3.R b/R/fgk3.R
new file mode 100755
index 0000000..8737d27
--- /dev/null
+++ b/R/fgk3.R
@@ -0,0 +1,497 @@
+#
+#	$Revision: 1.26 $	$Date: 2017/06/05 10:31:58 $
+#
+#	Estimates of F, G and K for three-dimensional point patterns
+#
+#
+#  ............ user interface .............................
+#
+
+K3est <- function(X, ...,
+                  rmax=NULL, nrval=128,
+                  correction=c("translation", "isotropic"),
+                  ratio=FALSE)
+{
+  stopifnot(inherits(X, "pp3"))
+  correction <- pickoption("correction", correction,
+                           c(translation="translation",
+			     trans="translation",
+                             isotropic="isotropic",
+                             iso="isotropic",
+                             best="isotropic"),
+                           multi=TRUE)
+  trap.extra.arguments(..., .Context="In K3est")
+  B <- X$domain
+  if(is.null(rmax))
+    rmax <- diameter(B)/2
+  r <- seq(from=0, to=rmax, length.out=nrval)
+  np <- npoints(X)
+  denom <- np * (np-1)/volume(B)
+  
+  # this will be the output data frame
+  K <- data.frame(r=r, theo= (4/3) * pi * r^3)
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  K <- ratfv(K, NULL, denom,
+             "r", quote(K[3](r)), 
+             "theo", NULL, c(0,rmax/2), c("r","{%s[%s]^{pois}}(r)"), desc,
+             fname=c("K", "3"),
+             ratio=ratio)
+
+  # extract the x,y,z ranges as a vector of length 6
+  flatbox <- unlist(B[1:3])
+
+  # extract coordinates
+  coo <- coords(X)
+  
+  if(any(correction %in% "translation")) {
+    u <- k3engine(coo$x, coo$y, coo$z, flatbox,
+                  rmax=rmax, nrval=nrval, correction="translation")
+    K <- bind.ratfv(K,
+                    data.frame(trans=u$num), u$denom,
+                    "{hat(%s)[%s]^{trans}}(r)",
+                    "translation-corrected estimate of %s",
+                    "trans",
+                    ratio=ratio)
+  }
+  if(any(correction %in% "isotropic")) {
+    u <- k3engine(coo$x, coo$y, coo$z, flatbox,
+                  rmax=rmax, nrval=nrval, correction="isotropic")
+    K <- bind.ratfv(K,
+                    data.frame(iso=u$num), u$denom,
+                    "{hat(%s)[%s]^{iso}}(r)",
+                    "isotropic-corrected estimate of %s",
+                    "iso",
+                    ratio=ratio)
+  }
+  # default is to display them all
+  formula(K) <- . ~ r
+  unitname(K) <- unitname(X)
+  return(K)
+}
+                  
+G3est <- function(X, ...,
+                  rmax=NULL, nrval=128,
+                  correction=c("rs", "km", "Hanisch"))
+{
+  stopifnot(inherits(X, "pp3"))
+  correction <- pickoption("correction", correction,
+                           c(rs="rs",
+                             border="rs",
+                             km="km",
+                             KM="km",
+                             Hanisch="han",
+                             hanisch="han",
+                             best="km"),
+                           multi=TRUE)
+  trap.extra.arguments(..., .Context="In G3est")
+  B <- X$domain
+  if(is.null(rmax))
+    rmax <- diameter(B)/2
+  r <- seq(from=0, to=rmax, length.out=nrval)
+
+  coo <- coords(X)
+  lambda <- nrow(coo)/volume(B)
+  
+  # this will be the output data frame
+  G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3))
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  G <- fv(G, "r", substitute(G3(r), NULL),
+          "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3")
+
+  # extract the x,y,z ranges as a vector of length 6
+  flatbox <- unlist(B[1:3])
+
+  # collect four histograms for censored data
+  u <- g3Cengine(coo$x, coo$y, coo$z, flatbox,
+                 rmax=rmax, nrval=nrval)
+
+  if("rs" %in% correction) 
+    G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)",
+                  "reduced sample estimate of %s",
+                  "rs")
+  if("km" %in% correction)
+    G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)",
+                  "Kaplan-Meier estimate of %s",
+                  "km")
+  if("han" %in% correction) 
+    G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)",
+                  "Normalised Hanisch estimate of %s",
+                  "han")
+  # default is to display them all
+  formula(G) <- . ~ r
+  unitname(G) <- unitname(X)
+  return(G)
+}
+
+F3est <- function(X, ...,
+                  rmax=NULL, nrval=128, vside=NULL,
+                  correction=c("rs", "km", "cs"),
+                  sphere=c("fudge", "ideal", "digital"))
+{
+  stopifnot(inherits(X, "pp3"))
+  sphere <- match.arg(sphere)
+  correction <- pickoption("correction", correction,
+                           c(rs="rs",
+                             border="rs",
+                             km="km",
+                             KM="km",
+                             Kaplan="km",
+                             cs="cs",
+                             CS="cs",
+                             best="km"),
+                           multi=TRUE)
+  trap.extra.arguments(..., .Context="In F3est")
+  B <- X$domain
+  if(is.null(rmax))
+    rmax <- diameter(B)/2
+  r <- seq(from=0, to=rmax, length.out=nrval)
+
+  coo <- coords(X)
+  vol <- volume(B)
+  lambda <- nrow(coo)/vol
+
+  # determine voxel size
+  if(missing(vside)) {
+    voxvol <- vol/spatstat.options("nvoxel")
+    vside <- voxvol^(1/3)
+    # ensure the shortest side is a whole number of voxels
+    s <- shortside(B)
+    m <- ceiling(s/vside)
+    vside <- s/m
+  }
+
+  # compute theoretical value
+  switch(sphere,
+         ideal = {
+           volsph <- (4/3) * pi * r^3
+           spherename <- "ideal sphere"
+         },
+         fudge = {
+           volsph <- 0.78 * (4/3) * pi * r^3
+           spherename <- "approximate sphere"
+         },
+         digital = {
+           volsph <- digital.volume(c(0, rmax), nrval, vside)
+           spherename <- "digital sphere"
+         })
+  theo.desc <- paste("theoretical Poisson %s using", spherename)
+           
+  # this will be the output data frame
+  FF <- data.frame(r     = r,
+                   theo  = 1 - exp( - lambda * volsph))
+  desc <- c("distance argument r", theo.desc)
+  labl <- c("r","%s[pois](r)")
+  FF <- fv(FF, "r", substitute(F3(r), NULL),
+          "theo", , c(0,rmax/2), labl, desc, fname="F3")
+
+  # extract the x,y,z ranges as a vector of length 6
+  flatbox <- unlist(B[1:3])
+
+  # go
+  u <- f3Cengine(coo$x, coo$y, coo$z, flatbox,
+                 rmax=rmax, nrval=nrval, vside=vside)
+
+  if("rs" %in% correction) 
+    FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)",
+                  "reduced sample estimate of %s",
+                  "rs")
+  if("km" %in% correction)
+    FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)",
+                  "Kaplan-Meier estimate of %s",
+                  "km")
+  if("cs" %in% correction)
+    FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)",
+                  "Chiu-Stoyan estimate of %s",
+                  "cs")
+  # default is to display them all
+  formula(FF) <- . ~ r
+  unitname(FF) <- unitname(X)
+  return(FF)
+}
+
+pcf3est <- function(X, ...,
+                    rmax=NULL, nrval=128,
+                    correction=c("translation", "isotropic"),
+                    delta=NULL, adjust=1, biascorrect=TRUE)
+{
+  stopifnot(inherits(X, "pp3"))
+  correction <- pickoption("correction", correction,
+                           c(translation="translation",
+                             trans="translation",
+                             isotropic="isotropic",
+                             iso="isotropic",
+                             best="isotropic"),
+                           multi=TRUE)
+  trap.extra.arguments(..., .Context="In pcf3est")
+  B <- X$domain
+  if(is.null(rmax))
+    rmax <- diameter(B)/2
+  r <- seq(from=0, to=rmax, length.out=nrval)
+
+  if(is.null(delta)) {
+    lambda <- npoints(X)/volume(B)
+    delta <- adjust * 0.26/lambda^(1/3)
+  }
+  if(biascorrect) {
+    # bias correction
+    rondel <- r/delta
+    biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3))
+  }
+
+  # this will be the output data frame
+  g <- data.frame(r=r, theo=rep.int(1, length(r)))
+  desc <- c("distance argument r", "theoretical Poisson %s")
+  g <- fv(g, "r", quote(g[3](r)),
+          "theo", , c(0,rmax/2),
+          c("r", "{%s[%s]^{pois}}(r)"),
+          desc, fname=c("g", "3"))
+
+  # extract the x,y,z ranges as a vector of length 6
+  flatbox <- unlist(B[1:3])
+
+  # extract coordinates
+  coo <- coords(X)
+  
+  if(any(correction %in% "translation")) {
+    u <- pcf3engine(coo$x, coo$y, coo$z, flatbox,
+                  rmax=rmax, nrval=nrval, correction="translation", delta=delta)
+    gt <- u$f
+    if(biascorrect)
+      gt <- gt/biasbit
+    g <- bind.fv(g, data.frame(trans=gt),
+                 "{hat(%s)[%s]^{trans}}(r)",
+                 "translation-corrected estimate of %s",
+                 "trans")
+  }
+  if(any(correction %in% "isotropic")) {
+    u <- pcf3engine(coo$x, coo$y, coo$z, flatbox,
+                  rmax=rmax, nrval=nrval, correction="isotropic", delta=delta)
+    gi <- u$f
+    if(biascorrect)
+      gi <- gi/biasbit
+    g <- bind.fv(g, data.frame(iso=gi), 
+                 "{hat(%s)[%s]^{iso}}(r)",
+                 "isotropic-corrected estimate of %s",
+                 "iso")
+  }
+  # default is to display them all
+  formula(g) <- . ~ r
+  unitname(g) <- unitname(X)
+  attr(g, "delta") <- delta
+  return(g)
+}
+
+#  ............ low level code ..............................
+#
+k3engine <- function(x, y, z, box=c(0,1,0,1,0,1),
+                     rmax=1, nrval=100, correction="translation") 
+{
+  code <- switch(correction, translation=0, isotropic=1)
+  res <- .C("RcallK3",
+            as.double(x), as.double(y), as.double(z), 
+            as.integer(length(x)),
+            as.double(box[1L]), as.double(box[2L]), 
+            as.double(box[3L]), as.double(box[4L]), 
+            as.double(box[5L]), as.double(box[6L]), 
+            as.double(0), as.double(rmax), 
+            as.integer(nrval),
+            f = as.double(numeric(nrval)),
+            num = as.double(numeric(nrval)),
+            denom = as.double(numeric(nrval)),
+            as.integer(code),
+            PACKAGE = "spatstat")
+  return(list(range = c(0,rmax),
+              f = res$f, num=res$num, denom=res$denom, 
+              correction=correction))
+}
+#
+#
+#
+g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), 
+                     rmax=1, nrval=10, correction="Hanisch G3") 
+{
+	code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3)
+	res <- .C("RcallG3",
+		as.double(x), as.double(y), as.double(z), 
+		as.integer(length(x)),
+		as.double(box[1L]), as.double(box[2L]), 
+		as.double(box[3L]), as.double(box[4L]), 
+		as.double(box[5L]), as.double(box[6L]), 
+		as.double(0), as.double(rmax), 
+		as.integer(nrval),
+		f = as.double(numeric(nrval)),
+		num = as.double(numeric(nrval)),
+		denom = as.double(numeric(nrval)),
+		as.integer(code),
+	  PACKAGE = "spatstat")
+	return(list(range = range, f = res$f, num=res$num, denom=res$denom, 
+		correction=correction))
+}
+#
+#
+f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), 
+	vside=0.05, 
+	range=c(0,1.414), nval=25, correction="minus sampling") 
+	
+{
+#
+	code <- switch(correction, "minus sampling"=1, no=0)
+	res <- .C("RcallF3",
+		as.double(x), as.double(y), as.double(z), 
+		as.integer(length(x)),
+		as.double(box[1L]), as.double(box[2L]), 
+		as.double(box[3L]), as.double(box[4L]), 
+		as.double(box[5L]), as.double(box[6L]), 
+		as.double(vside), 
+		as.double(range[1L]), as.double(range[2L]),
+		m=as.integer(nval),
+		num = as.integer(integer(nval)),
+		denom = as.integer(integer(nval)),
+		as.integer(code),
+	  PACKAGE = "spatstat")
+	r <- seq(from=range[1L], to=range[2L], length.out=nval)
+	f <- with(res, ifelseXB(denom > 0, num/denom, 1))
+
+	return(list(r = r, f = f, num=res$num, denom=res$denom, 
+		correction=correction))
+}
+
+f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), 
+	vside=0.05, rmax=1, nrval=25)
+{
+#
+  res <- .C("RcallF3cen",
+            as.double(x), as.double(y), as.double(z), 
+            as.integer(length(x)),
+            as.double(box[1L]), as.double(box[2L]), 
+            as.double(box[3L]), as.double(box[4L]), 
+            as.double(box[5L]), as.double(box[6L]), 
+            as.double(vside), 
+            as.double(0), as.double(rmax),
+            m=as.integer(nrval),
+            obs = as.integer(integer(nrval)),
+            nco = as.integer(integer(nrval)),
+            cen = as.integer(integer(nrval)),
+            ncc = as.integer(integer(nrval)),
+            upperobs = as.integer(integer(1L)),
+            uppercen = as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+  r <- seq(from=0, to=rmax, length.out=nrval)
+  #
+  obs <- res$obs
+  nco <- res$nco
+  cen <- res$cen
+  ncc <- res$ncc
+  upperobs <- res$upperobs
+  uppercen <- res$uppercen
+  #
+  breaks <- breakpts.from.r(r)
+  km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs)
+  rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen)
+  #
+  ero <- eroded.volumes(as.box3(box), r)
+  H <- cumsum(nco/ero)
+  cs <- H/max(H[is.finite(H)])
+  #
+  return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r))
+}
+
+g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), 
+	rmax=1, nrval=25)
+{
+#
+  res <- .C("RcallG3cen",
+            as.double(x), as.double(y), as.double(z), 
+            as.integer(length(x)),
+            as.double(box[1L]), as.double(box[2L]), 
+            as.double(box[3L]), as.double(box[4L]), 
+            as.double(box[5L]), as.double(box[6L]), 
+            as.double(0), as.double(rmax),
+            m=as.integer(nrval),
+            obs = as.integer(integer(nrval)),
+            nco = as.integer(integer(nrval)),
+            cen = as.integer(integer(nrval)),
+            ncc = as.integer(integer(nrval)),
+            upperobs = as.integer(integer(1L)),
+            uppercen = as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+  r <- seq(from=0, to=rmax, length.out=nrval)
+  #
+  obs <- res$obs
+  nco <- res$nco
+  cen <- res$cen
+  ncc <- res$ncc
+  upperobs <- res$upperobs
+  uppercen <- res$uppercen
+  #
+  breaks <- breakpts.from.r(r)
+  km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs)
+  rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen)
+  #
+  ero <- eroded.volumes(as.box3(box), r)
+  H <- cumsum(nco/ero)
+  han <- H/max(H[is.finite(H)])
+  return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r))
+}
+
+pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1),
+                       rmax=1, nrval=100, correction="translation",
+                       delta=rmax/10) 
+{
+  code <- switch(correction, translation=0, isotropic=1)
+  res <- .C("Rcallpcf3",
+            as.double(x), as.double(y), as.double(z), 
+            as.integer(length(x)),
+            as.double(box[1L]), as.double(box[2L]), 
+            as.double(box[3L]), as.double(box[4L]), 
+            as.double(box[5L]), as.double(box[6L]), 
+            as.double(0), as.double(rmax), 
+            as.integer(nrval),
+            f = as.double(numeric(nrval)),
+            num = as.double(numeric(nrval)),
+            denom = as.double(numeric(nrval)),
+            method=as.integer(code),
+            delta=as.double(delta),
+            PACKAGE = "spatstat")
+	return(list(range = c(0,rmax),
+                    f = res$f, num=res$num, denom=res$denom, 
+                    correction=correction))
+}
+#
+# ------------------------------------------------------------
+# volume of a sphere (exact and approximate)
+#
+
+sphere.volume <- function(range=c(0,1.414), nval=10) 
+{
+  rr <- seq(from=range[1L], to=range[2L], length.out=nval)
+  return( (4/3) * pi * rr^3)
+}
+
+digital.volume <- function(range=c(0, 1.414),  nval=25, vside= 0.05) 
+{
+#	Calculate number of points in digital sphere 
+#	by performing distance transform for a single point
+#	in the middle of a suitably large box
+#
+#	This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!!
+#
+	w <- 2 * range[2L] + 2 * vside
+#
+	dvol <- .C("RcallF3",
+                   as.double(w/2), as.double(w/2), as.double(w/2),
+                   as.integer(1L),
+                   as.double(0), as.double(w), 
+                   as.double(0), as.double(w), 
+                   as.double(0), as.double(w), 
+                   as.double(vside),
+                   as.double(range[1L]), as.double(range[2L]),
+                   as.integer(nval),
+                   num = as.integer(integer(nval)),
+                   denom = as.integer(integer(nval)),
+                   as.integer(0),
+	                 PACKAGE = "spatstat")$num
+#	
+        (vside^3) * dvol 
+      }
+
diff --git a/R/fii.R b/R/fii.R
new file mode 100755
index 0000000..b5deb33
--- /dev/null
+++ b/R/fii.R
@@ -0,0 +1,242 @@
+#
+# fii.R
+#
+# Class of fitted interpoint interactions
+#
+#
+fii <- function(interaction=NULL, coefs=numeric(0),
+                Vnames=character(0), IsOffset=NULL) {
+  if(is.null(interaction)) 
+    interaction <- Poisson()
+  stopifnot(is.interact(interaction))
+  if(is.poisson.interact(interaction)) {
+    if(length(Vnames) > 0)
+      stop("Coefficients inappropriate for Poisson process")
+  }
+  if(is.null(IsOffset))
+    IsOffset <- rep.int(FALSE, length(Vnames))
+  else {
+    stopifnot(is.logical(IsOffset))
+    stopifnot(length(IsOffset) == length(Vnames))
+  } 
+  out <- list(interaction=interaction,
+              coefs=coefs,
+              Vnames=Vnames,
+              IsOffset=IsOffset)
+  class(out) <- c("fii", class(out))
+  return(out)
+}
+
+summary.fii <- function(object, ...) {
+  y <- unclass(object)
+  INTERACT <- object$interaction
+  coefs    <- object$coefs
+  Vnames   <- object$Vnames
+  IsOffset <- object$IsOffset
+  y$poisson <- is.poisson.interact(INTERACT)
+  thumbnail <- NULL
+  if(y$poisson) {
+    thumbnail <- "Poisson()"
+  } else {
+    if(!is.null(INTERACT$interpret)) {
+      # invoke auto-interpretation feature
+      sensible <-  
+        if(newstyle.coeff.handling(INTERACT))
+          (INTERACT$interpret)(coefs[Vnames[!IsOffset]], INTERACT)
+        else 
+          (INTERACT$interpret)(coefs, INTERACT)
+      if(!is.null(sensible)) {
+        header <- paste("Fitted", sensible$inames)
+        printable <- sensible$printable
+        # Try to make a thumbnail description
+        param <- sensible$param
+        ipar <- INTERACT$par
+        if(all(lengths(param) == 1) &&
+           all(lengths(ipar) == 1)) {
+          allargs <- append(ipar, param)
+          allargs <- lapply(allargs, signif, digits=4)
+          thumbnail <- fakecallstring(INTERACT$creator, allargs)
+        } 
+      } else {
+        # no fitted interaction parameters (e.g. Hard Core)
+        header <- NULL
+        printable <- NULL
+        thumbnail <- paste0(INTERACT$creator, "()")
+      }
+    } else {
+      # fallback
+      sensible <- NULL
+      VN <- Vnames[!IsOffset]
+      if(length(VN) > 0) {
+        header <- "Fitted interaction terms"
+        icoef <- coefs[VN]
+        printable <-  exp(unlist(icoef))
+        ricoef <- lapply(icoef, signif, digits=4)
+        thumbnail <- fakecallstring(INTERACT$creator, ricoef)
+      } else {
+        header <- NULL
+        printable <- NULL
+        thumbnail <- paste0(INTERACT$creator, "()")
+      }
+    }
+    y <- append(y, list(sensible=sensible,
+                        header=header,
+                        printable=printable,
+                        thumbnail=thumbnail))
+  }
+  class(y) <- c("summary.fii", class(y))
+  return(y)
+}
+
+print.fii <- function(x, ...) {
+  sx <- summary(x)
+  do.call(print.summary.fii,
+          resolve.defaults(list(x=sx, brief=TRUE), list(...)))
+  return(invisible(NULL))
+}
+
+print.summary.fii <- local({
+
+  #'  hide internal arguments
+  print.summary.fii <- function(x, ...) {
+    PrintIt(x, ...)
+  }
+  
+  PrintIt <- function(x, ..., prefix="Interaction: ",
+                      banner=TRUE,
+                      family = waxlyrical('extras'),
+                      brief = !family,
+                      tiny = !waxlyrical('errors')) {
+    if(tiny) {
+      #' use thumbnail if available
+      thumbnail <- x$thumbnail
+      if(!is.null(thumbnail)) {
+        splat(thumbnail)
+        return(invisible(NULL))
+      }
+    }
+    terselevel <- spatstat.options('terse')
+    if(banner && !brief)
+      cat(prefix)
+    if(x$poisson) {
+      splat("Poisson process")
+      parbreak(terselevel)
+    } else {
+      print(x$interaction, family=family, brief=TRUE, banner=banner)
+      if(!is.null(x$printable)) {
+        nvalues <- length(x$printable)
+        nheader <- length(x$header)
+        if(nvalues == 1) {
+          splat(paste(paste0(x$header, ":\t"), x$printable))
+        } else if(nvalues == nheader) {
+          for(i in 1:nheader) {
+            hdi <- x$header[i]
+            xpi <- x$printable[[i]]
+            if(!is.list(xpi) && length(xpi) == 1) {
+              splat(paste0(hdi, ":\t", xpi))
+            } else {
+              splat(paste0(hdi, ":"))
+              print(xpi)
+            }
+          } 
+        } else {
+          splat(x$header)
+          print(x$printable)
+        } 
+      }
+    }
+    if(!brief) {
+      co <- x$coefs[x$Vnames[!x$IsOffset]]
+      if(length(co) > 0) {
+        parbreak(terselevel)
+        splat("Relevant coefficients:")
+        print(co)
+      }
+    }
+    return(invisible(NULL))
+  }
+
+  print.summary.fii
+})
+
+parameters.fii <- function(model, ...) {
+  ss <- summary(model)
+  out <- append(ss$interaction$par, ss$sensible$param)
+  return(out)
+}
+
+coef.summary.fii <- function(object, ...) {
+  object$printable
+}
+
+reach.fii <- function(x, ..., epsilon=0) {
+  inte <- x$interaction
+  coeffs <- x$coefs
+  Vnames <- x$Vnames
+
+  if(is.poisson.interact(inte))
+    return(0)
+
+  # get 'irange' function from interaction object
+  irange <- inte$irange
+
+  if(is.null(irange))
+    return(Inf)
+
+  # apply 'irange' function using fitted coefficients
+  if(newstyle.coeff.handling(inte))
+    ir <- irange(inte, coeffs[Vnames], epsilon=epsilon)
+  else 
+    ir <- irange(inte, coeffs, epsilon=epsilon)
+  
+  if(is.na(ir))
+    ir <- Inf
+
+  return(ir)
+}
+
+plot.fii <- function(x, ...) {
+  inte <- x$interaction
+  if(is.poisson.interact(inte)) {
+    message("Poisson interaction; nothing plotted")
+    return(invisible(NULL))
+  }
+  plfun <- inte$plot %orifnull% inte$family$plot
+  if(is.null(plfun)) 
+    stop("Plotting not implemented for this type of interaction")
+  plfun(x, ...)
+}
+
+
+fitin <- function(object) {
+  UseMethod("fitin")
+}
+
+fitin.ppm <- function(object) {
+  f <- object$fitin
+  if(!is.null(f))
+    return(f)
+  # For compatibility with older versions
+  inte <- object$interaction
+  if(is.null(inte)) 
+    f <- fii() # Poisson
+  else {
+    coefs <- coef(object)
+    Vnames <- object$internal$Vnames
+    IsOffset <- object$internal$IsOffset
+    # Internal names of regressor variables 
+    f <- fii(inte, coefs, Vnames, IsOffset)
+  }
+  unitname(f) <- unitname(data.ppm(object))
+  return(f)
+}
+
+as.interact.fii <- function(object) {
+  verifyclass(object, "fii")
+  return(object$interaction)
+}
+
+coef.fii <- function(object, ...) {
+  verifyclass(object, "fii")
+  return(object$coefs)
+}
diff --git a/R/fiksel.R b/R/fiksel.R
new file mode 100755
index 0000000..9a821d8
--- /dev/null
+++ b/R/fiksel.R
@@ -0,0 +1,182 @@
+#
+#
+#    fiksel.R
+#
+#    $Revision: 1.14 $	$Date: 2017/06/05 10:31:58 $
+#
+#    Fiksel interaction 
+#    
+#    ee Stoyan Kendall Mcke 1987 p 161
+#
+# -------------------------------------------------------------------
+#	
+
+Fiksel <- local({
+
+  # ......... auxiliary functions ...........
+
+  fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) {
+    answer <- crossfikselterms(U, X, r, kappa)
+    nU <- npoints(U)
+    # subtract contrinbutions from identical pairs (exp(-0) = 1 for each)
+    if(length(EqualPairs) > 0) {
+      idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU)))
+      answer <- answer - idcount
+    }
+    return(answer)
+  }
+
+  crossfikselterms <- function(X, Y, r, kappa) {
+    stopifnot(is.numeric(r))
+    # sort in increasing order of x coordinate
+    oX <- fave.order(X$x)
+    oY <- fave.order(Y$x)
+    Xsort <- X[oX]
+    Ysort <- Y[oY]
+    nX <- npoints(X)
+    nY <- npoints(Y)
+    # call C routine
+    out <- .C("Efiksel",
+            nnsource = as.integer(nX),
+            xsource  = as.double(Xsort$x),
+            ysource  = as.double(Xsort$y),
+            nntarget = as.integer(nY),
+            xtarget  = as.double(Ysort$x),
+            ytarget  = as.double(Ysort$y),
+            rrmax    = as.double(r),
+            kkappa   = as.double(kappa),
+            values   = as.double(double(nX)),
+            PACKAGE = "spatstat")
+    answer <- integer(nX)
+    answer[oX] <- out$values
+    return(answer)
+  }
+
+
+  # ........ template object ..............
+  
+  BlankFiksel <- 
+  list(
+         name   = "Fiksel process",
+         creator = "Fiksel",
+         family  = "pairwise.family",  # evaluated later
+         pot    = function(d, par) {
+           v <- (d <= par$r) * exp( - d * par$kappa)
+           v[ d <= par$hc ] <-  (-Inf)
+           v
+         },
+         par    = list(r = NULL, hc = NULL, kappa=NULL),  # filled in later
+         parnames = c("interaction distance",
+                      "hard core distance",
+                      "rate parameter"), 
+         selfstart = function(X, self) {
+           # self starter for Fiksel
+           nX <- npoints(X)
+           if(nX < 2) {
+             # not enough points to make any decisions
+             return(self)
+           }
+           md <- minnndist(X)
+           if(!is.na(hc <- self$par$hc)) {
+             # value fixed by user or previous invocation
+             # check it
+             if(md < hc)
+               warning(paste("Hard core distance is too large;",
+                             "some data points will have zero probability"))
+             return(self)
+           }
+           if(md == 0) 
+             warning(paste("Pattern contains duplicated points:",
+                           "hard core must be zero"))
+           # take hc = minimum interpoint distance * n/(n+1)
+           hcX <- md * nX/(nX+1)
+           Fiksel(r=self$par$r, hc = hcX, kappa=self$par$kappa)
+         },
+         init   = function(self) {
+           r <- self$par$r
+           hc <- self$par$hc
+           kappa <- self$par$kappa
+           check.1.real(r)
+           check.1.real(kappa)
+           if(!is.na(hc)) {
+             check.1.real(hc)
+             stopifnot(hc > 0)
+             stopifnot(r > hc)
+           } else stopifnot(r > 0)
+         },
+         update = NULL,       # default OK
+         print = NULL,         # default OK
+         interpret =  function(coeffs, self) {
+           a <- as.numeric(coeffs[1L])
+           return(list(param=list(a=a),
+                       inames="interaction strength a",
+                       printable=signif(a)))
+         },
+         valid = function(coeffs, self) {
+           a <- (self$interpret)(coeffs, self)$param$a
+           return(is.finite(a))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self))
+             return(NULL)
+           hc <- self$par$hc
+           if(hc > 0) return(Hardcore(hc)) else return(Poisson()) 
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$r
+           hc <- self$par$hc
+           if(anyNA(coeffs))
+             return(r)
+           a <- coeffs[1L]
+           if(abs(a) <= epsilon)
+             return(hc)
+           else
+             return(r)
+         },
+       version=NULL, # evaluated later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+         # fast evaluator for Fiksel interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for Fiksel")
+         r <- potpars$r
+         hc <- potpars$hc
+         kappa <- potpars$kappa
+         hclose <- strausscounts(U, X, hc, EqualPairs)
+         fikselbit <- fikselterms(U, X, r, kappa, EqualPairs)
+         answer <- ifelseXB(hclose == 0, fikselbit, -Inf)
+         return(matrix(answer, ncol=1))
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         a <- as.numeric(coeffs[1L])
+         r     <- self$par$r
+         hc    <- self$par$hc
+         kappa <- self$par$kappa
+         f <- function(x, kappa, a){ 2 * pi * x *
+                                       (1 - exp(a * exp(-x * kappa))) }
+         hardbit <- integrate(f=f, lower=hc, upper=r,
+                              a=a, kappa=kappa)
+         mess <- hardbit[["message"]]
+         if(!identical(mess, "OK")) {
+           warning(mess)
+           return(NA)
+         }
+         return(pi * hc^2 + hardbit$value)
+       }
+  )
+  class(BlankFiksel) <- "interact"
+
+  Fiksel <- function(r, hc=NA, kappa) {
+    instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa))
+  }
+
+  Fiksel <- intermaker(Fiksel, BlankFiksel)
+  
+  Fiksel
+})
diff --git a/R/fitted.mppm.R b/R/fitted.mppm.R
new file mode 100755
index 0000000..ab3e294
--- /dev/null
+++ b/R/fitted.mppm.R
@@ -0,0 +1,63 @@
+# 
+#  fitted.mppm.R
+#
+# method for 'fitted' for mppm objects
+#
+#   $Revision: 1.2 $   $Date: 2014/11/10 07:42:09 $
+# 
+
+fitted.mppm <- function(object, ...,
+                        type="lambda", dataonly=FALSE) {
+#  sumry <- summary(object)
+
+  type <- pickoption("type", type, c(lambda="lambda",
+                                     cif   ="lambda",
+                                     trend ="trend"), multi=FALSE, exact=FALSE)
+  # extract fitted model object and data frame
+  glmfit  <- object$Fit$FIT
+  glmdata <- object$Fit$moadf
+  # interaction names
+  Vnames <- unlist(object$Fit$Vnamelist)
+  interacting <- (length(Vnames) > 0)
+    
+  # Modification of `glmdata' may be required
+  if(interacting) 
+    switch(type,
+           trend={
+             # zero the interaction statistics
+             glmdata[ , Vnames] <- 0
+           },
+           lambda={
+             # Find any dummy points with zero conditional intensity
+             forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf)
+             # exclude from predict.glm
+             glmdata <- glmdata[!forbid, ]
+           })
+
+  # Compute predicted [conditional] intensity values
+  values <- predict(glmfit, newdata=glmdata, type="response")
+  # Note: the `newdata' argument is necessary in order to obtain
+  # predictions at all quadrature points. If it is omitted then
+  # we would only get predictions at the quadrature points j
+  # where glmdata$SUBSET[j]=TRUE.
+
+  if(interacting && type=="lambda") {
+   # reinsert zeroes
+    vals <- numeric(length(forbid))
+    vals[forbid] <- 0
+    vals[!forbid] <- values
+    values <- vals
+  }
+
+  names(values) <- NULL
+  
+  id <- glmdata$id
+  if(dataonly) {
+    # extract only data values
+    isdata <- (glmdata$.mpl.Y != 0)
+    values <- values[isdata]
+    id     <- id[isdata]
+  }
+
+  return(split(values, id))
+}
diff --git a/R/fitted.ppm.R b/R/fitted.ppm.R
new file mode 100755
index 0000000..a16aab6
--- /dev/null
+++ b/R/fitted.ppm.R
@@ -0,0 +1,134 @@
+# 
+#  fitted.ppm.R
+#
+# method for 'fitted' for ppm objects
+#
+#   $Revision: 1.17 $   $Date: 2017/06/05 10:31:58 $
+# 
+
+fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE,
+                       new.coef=NULL, leaveoneout=FALSE,
+                       drop=FALSE, check=TRUE, repair=TRUE, dropcoef=FALSE) {
+  verifyclass(object, "ppm")
+
+  if(check && damaged.ppm(object)) {
+    if(!repair)
+      stop("object format corrupted; try update(object, use.internal=TRUE)")
+    message("object format corrupted; repairing it.")
+    object <- update(object, use.internal=TRUE)
+  }
+
+  if(leaveoneout) {
+    ## Leave-one-out calculation for data points only
+    if(missing(dataonly)) dataonly <- TRUE
+    if(!dataonly)
+      stop("Leave-one-out calculation requires dataonly=TRUE")
+    if(!is.null(new.coef))
+      stop("Leave-one-out calculation requires new.coef=NULL")
+  }
+  
+  coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef)
+  
+  uniform <- is.poisson.ppm(object) && no.trend.ppm(object)
+
+  typelist <- c("lambda", "cif",    "trend", "link")
+  typevalu <- c("lambda", "lambda", "trend", "link")
+  if(is.na(m <- pmatch(type, typelist)))
+    stop(paste("Unrecognised choice of ", sQuote("type"),
+               ": ", sQuote(type), sep=""))
+  type <- typevalu[m]
+  
+  if(uniform) {
+    lambda <- exp(coeffs[[1L]])
+    Q <- quad.ppm(object, drop=drop)
+    lambda <- rep.int(lambda, n.quad(Q))
+  } else {
+    glmdata <- getglmdata(object, drop=drop)
+    glmfit  <- getglmfit(object)
+    Vnames <- object$internal$Vnames
+    interacting <- (length(Vnames) != 0)
+    
+    # Modification of `glmdata' may be required
+    if(interacting) 
+      switch(type,
+           trend={
+             # zero the interaction statistics
+             glmdata[ , Vnames] <- 0
+           },
+           link=,
+           lambda={
+             # Find any dummy points with zero conditional intensity
+             forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf)
+             # exclude from predict.glm
+             glmdata <- glmdata[!forbid, ]
+           })
+
+    # Compute predicted [conditional] intensity values
+    changecoef <- !is.null(new.coef) || (object$method != "mpl")
+    lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef,
+                         type = ifelse(type == "link", "link", "response"))
+
+    # Note: the `newdata' argument is necessary in order to obtain
+    # predictions at all quadrature points. If it is omitted then
+    # we would only get predictions at the quadrature points j
+    # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE.
+
+    if(interacting && type=="lambda") {
+     # reinsert zeroes
+      lam <- numeric(length(forbid))
+      lam[forbid] <- 0
+      lam[!forbid] <- lambda
+      lambda <- lam
+    }
+
+  }
+  if(dataonly)
+    lambda <- lambda[is.data(quad.ppm(object))]
+
+  if(leaveoneout) {
+    ## Perform leverage calculation
+    dfb <- dfbetas(object, multitypeOK=TRUE)
+    delta <- with(dfb, 'discrete')[with(dfb, 'is.atom'),,drop=FALSE]
+    ## adjust fitted value
+    mom <- model.matrix(object)[is.data(quad.ppm(object)),,drop=FALSE]
+    if(type == "trend" && !uniform && interacting)
+      mom[, Vnames] <- 0
+    lambda <- lambda * exp(- rowSums(delta * mom))
+  }
+  lambda <- unname(as.vector(lambda))
+  return(lambda)
+}
+
+adaptcoef <- function(new.coef, fitcoef, drop=FALSE) {
+  ## a replacement for 'fitcoef' will be extracted from 'new.coef' 
+  if(is.null(new.coef)) {
+    coeffs <- fitcoef
+  } else if(length(new.coef) == length(fitcoef)) {
+    coeffs <- new.coef
+  } else {
+    fitnames <- names(fitcoef)
+    newnames <- names(new.coef)
+    if(is.null(newnames) || is.null(fitnames))
+      stop(paste("Argument new.coef has wrong length",
+                 length(new.coef), ": should be", length(fitcoef)),
+           call.=FALSE)
+    absentnames <- setdiff(fitnames, newnames)
+    excessnames <- setdiff(newnames, fitnames)
+    if((nab <- length(absentnames)) > 0)
+      stop(paste(ngettext(nab, "Coefficient", "Coefficients"),
+                 commasep(sQuote(absentnames)),
+                 ngettext(nab, "is", "are"),
+                 "missing from new.coef"),
+           call.=FALSE)
+    if(!drop && ((nex <- length(excessnames)) > 0)) 
+      stop(paste(ngettext(nex, "Coefficient", "Coefficients"),
+                 commasep(sQuote(excessnames)),
+                 ngettext(nab, "is", "are"),
+                 "present in new.coef but not in coef(object)"),
+           call.=FALSE)
+    #' extract only the relevant coefficients
+    coeffs <- new.coef[fitnames]
+  }
+  return(coeffs)
+}
+
diff --git a/R/flipxy.R b/R/flipxy.R
new file mode 100755
index 0000000..3783c7b
--- /dev/null
+++ b/R/flipxy.R
@@ -0,0 +1,59 @@
+#
+# flipxy.R
+#
+# flip x and y coordinates
+#
+# $Revision: 1.3 $ $Date: 2017/02/07 07:22:47 $ 
+#
+
+flipxy <- function(X) {
+  UseMethod("flipxy")
+}
+
+flipxy.ppp <- function(X) {
+  stopifnot(is.ppp(X))
+  ppp(X$y, X$x, marks=X$marks,
+      window=flipxy(X$window), unitname=unitname(X),
+      check=FALSE)
+}
+
+flipxypolygon <- function(p) {
+  # flip x and y coordinates, and reinstate anticlockwise order
+  oldy <- p$y
+  p$y <- rev(p$x)
+  p$x <- rev(oldy)
+  # area and hole status unchanged
+  return(p)
+}
+
+flipxy.owin <- function(X) {
+  verifyclass(X, "owin")
+  switch(X$type,
+         rectangle={
+           W <- owin(X$yrange, X$xrange, unitname=unitname(X))
+         },
+         polygonal={
+           bdry <- lapply(X$bdry, flipxypolygon)
+           W <- owin(poly=bdry, check=FALSE, unitname=unitname(X))
+         },
+         mask={
+           W <- owin(mask=t(X$m),
+                     xy=list(x=X$yrow, y=X$xcol),
+                     unitname=unitname(X))
+         },
+         stop("Unrecognised window type")
+         )
+  return(W)
+}
+
+flipxy.psp <- function(X) {
+  stopifnot(is.psp(X))
+  flipends <- (X$ends)[, c(2L,1L,4L,3L), drop=FALSE]
+  as.psp(flipends, window=flipxy(X$window), marks=X$marks,
+         unitname=unitname(X), check=FALSE)
+}
+
+flipxy.im <- function(X) {
+  im(t(X$v), xcol=X$yrow, yrow=X$xcol, unitname=unitname(X))
+}
+
diff --git a/R/fourierbasis.R b/R/fourierbasis.R
new file mode 100644
index 0000000..d51e2a7
--- /dev/null
+++ b/R/fourierbasis.R
@@ -0,0 +1,20 @@
+fourierbasis <- function(x, k, win = boxx(rep(list(0:1), ncol(k)))) {
+  x <- as.matrix(x)
+  k <- as.matrix(k)
+  if (nrow(k) == 0 | nrow(x) == 0) 
+    return(complex())
+  d <- ncol(x)
+  if (ncol(k) != d) 
+    stop("Arguments x and k must have the same number of columns.")
+  win <- as.boxx(win)
+  boxlengths <- as.numeric(win$ranges[2L, ] - win$ranges[1L, ])
+  if (length(boxlengths) != d) 
+    stop("The box dimension differs from the number of columns in x and k")
+  rslt <- exp(2 * pi * (0+1i) * outer(k[, 1L], x[, 1L]/boxlengths[1L]))
+  if (d > 1) {
+    for (i in 2:d) {
+      rslt <- rslt * exp(2 * pi * (0+1i) * outer(k[, i], x[, i]/boxlengths[i]))
+    }
+  }
+  return(rslt/prod(boxlengths))
+}
diff --git a/R/fryplot.R b/R/fryplot.R
new file mode 100755
index 0000000..ef32eb7
--- /dev/null
+++ b/R/fryplot.R
@@ -0,0 +1,81 @@
+#
+#  fryplot.R
+#
+#  $Revision: 1.15 $ $Date: 2017/02/07 07:22:47 $
+#
+
+fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) {
+  Xname <- short.deparse(substitute(X))
+  X <- as.ppp(X)
+  b <- as.rectangle(X)
+  halfspan <- with(b, c(diff(xrange), diff(yrange)))
+  if(!is.null(width)) {
+    halfwidth <- ensure2vector(width)/2
+    halfspan <- pmin.int(halfspan, halfwidth)
+  }
+  bb <- owin(c(-1,1) * halfspan[1L], c(-1,1) * halfspan[2L])
+  Y <- frypoints(X, from=from, to=to, dmax=diameter(bb))[bb]
+  do.call(plot.ppp,
+          resolve.defaults(list(x=Y),
+                           list(...),
+                           list(main=paste("Fry plot of", Xname))))
+  if(axes) {
+    lines(c(0,0), c(-1,1) * halfspan[1L])
+    lines(c(-1,1) * halfspan[2L], c(0,0))
+  }
+  return(invisible(NULL))
+}
+
+frypoints <- function(X, from=NULL, to=NULL, dmax=Inf) {
+  X <- as.ppp(X)
+  b <- as.rectangle(X)
+  bb <- owin(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange))
+  n <- X$n
+  xx <- X$x
+  yy <- X$y
+  ## determine (dx, dy) for all relevant pairs
+  if(is.null(from) && is.null(to)) {
+    if(is.infinite(dmax)) {
+      dx <- outer(xx, xx, "-")
+      dy <- outer(yy, yy, "-")
+      notsame <- matrix(TRUE, n, n)
+      diag(notsame) <- FALSE
+      DX <- as.vector(dx[notsame])
+      DY <- as.vector(dy[notsame])
+      I <- row(notsame)[notsame]
+    } else {
+      cl <- closepairs(X, dmax)
+      DX <- cl$dx
+      DY <- cl$dy
+      I  <- cl$j  ## sic: I is the index of the 'TO' element
+    }
+  } else {
+    seqn <- seq_len(n)
+    from <- if(is.null(from)) seqn else seqn[from]
+    to   <- if(is.null(to))   seqn else seqn[to]
+    if(is.infinite(dmax)) {
+      dx <- outer(xx[to], xx[from], "-")
+      dy <- outer(yy[to], yy[from], "-")
+      notsame <- matrix(TRUE, n, n)
+      diag(notsame) <- FALSE
+      notsame <- notsame[to, from, drop=FALSE]
+      DX <- as.vector(dx[notsame])
+      DY <- as.vector(dy[notsame])
+      I <- row(notsame)[notsame]
+    } else {
+      cl <- crosspairs(X[from], X[to], dmax)
+      ok <- with(cl, from[i] != to[j])
+      DX <- cl$dx[ok]
+      DY <- cl$dy[ok]
+      I  <- cl$j[ok]
+    }
+  }
+  ## form into point pattern
+  Fry <- ppp(DX, DY, window=bb, check=FALSE)
+  if(is.marked(X)) {
+    marx <- as.data.frame(marks(X))
+    marxto <- if(is.null(to)) marx else marx[to, ,drop=FALSE]
+    marks(Fry) <- marxto[I, ]
+  }
+  return(Fry)
+}
diff --git a/R/funxy.R b/R/funxy.R
new file mode 100644
index 0000000..3e42649
--- /dev/null
+++ b/R/funxy.R
@@ -0,0 +1,107 @@
+#
+#   funxy.R
+#
+#   Class of functions of x,y location with a spatial domain
+#
+#   $Revision: 1.14 $   $Date: 2017/06/05 10:31:58 $
+#
+
+spatstat.xy.coords <- function(x,y) {
+  if(missing(y) || is.null(y)) {
+    xy <- if(is.ppp(x) || is.lpp(x)) coords(x) else
+          if(checkfields(x, c("x", "y"))) x else 
+          stop("Argument y is missing", call.=FALSE)
+    x <- xy$x
+    y <- xy$y
+  }
+  xy.coords(x,y)[c("x","y")]
+}
+
+funxy <- function(f, W=NULL) {
+  stopifnot(is.function(f))
+  stopifnot(is.owin(W))
+  if(!identical(names(formals(f))[1:2], c("x", "y")))
+    stop("The first two arguments of f should be named x and y", call.=FALSE)
+  if(is.primitive(f))
+    stop("Not implemented for primitive functions", call.=FALSE)
+  ## copy 'f' including formals, environment, attributes
+  h <- f
+  ## make new function body:
+  ## paste body of 'f' into last line of 'spatstat.xy.coords'
+  b <- body(spatstat.xy.coords)
+  b[[length(b)]] <- body(f)
+  ## transplant the body 
+  body(h) <- b
+  ## reinstate attributes
+  attributes(h) <- attributes(f)
+  ## stamp it
+  class(h) <- c("funxy", class(h))
+  attr(h, "W") <- W
+  attr(h, "f") <- f
+  return(h)  
+}
+
+print.funxy <- function(x, ...) {
+  nama <- names(formals(x))
+  splat(paste0("function", paren(paste(nama,collapse=","))),
+        "of class", sQuote("funxy"))
+  print(as.owin(x))
+  splat("\nOriginal function definition:")
+  print(attr(x, "f"))
+}
+
+summary.funxy <- function(object, ...) { print(object, ...) }
+
+as.owin.funxy <- function(W, ..., fatal=TRUE) {
+  W <- attr(W, "W")
+  as.owin(W, ..., fatal=fatal)
+}
+
+domain.funxy <- Window.funxy <- function(X, ...) { as.owin(X) }
+
+#   Note that 'distfun' (and other classes inheriting from funxy)
+#   has a method for as.owin that takes precedence over as.owin.funxy
+#   and this will affect the behaviour of the following plot methods
+#   because 'distfun' does not have its own plot method.
+
+plot.funxy <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  W <- as.owin(x)
+  do.call(do.as.im,
+          resolve.defaults(list(x, action="plot"),
+                           list(...),
+                           list(main=xname, W=W)))
+  invisible(NULL)
+}
+
+contour.funxy <- function(x, ...) {
+  xname <- deparse(substitute(x))
+  W <- as.owin(x)
+  do.call(do.as.im,
+          resolve.defaults(list(x, action="contour"),
+                           list(...),
+                           list(main=xname, W=W)))
+  invisible(NULL)
+}
+
+persp.funxy <- function(x, ...) {
+  xname <- deparse(substitute(x))
+  W <- as.rectangle(as.owin(x))
+  do.call(do.as.im,
+          resolve.defaults(list(x, action="persp"),
+                           list(...),
+                           list(main=xname, W=W)))
+  invisible(NULL)
+}
+
+hist.funxy <- function(x, ..., xname) {
+  if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x))
+  a <- do.call.matched(as.im,
+                       list(X=x, ...),
+                       c("X", "W",
+		         "dimyx", "eps", "xy",
+   		         "na.replace", "strict"),
+		       sieve=TRUE)
+  Z <- a$result
+  do.call(hist.im, append(list(x=Z, xname=xname), a$otherargs))
+}
diff --git a/R/fv.R b/R/fv.R
new file mode 100755
index 0000000..8ac99e7
--- /dev/null
+++ b/R/fv.R
@@ -0,0 +1,1465 @@
+##
+##
+##    fv.R
+##
+##    class "fv" of function value objects
+##
+##    $Revision: 1.148 $   $Date: 2017/07/13 01:26:02 $
+##
+##
+##    An "fv" object represents one or more related functions
+##    of the same argument, such as different estimates of the K function.
+##
+##    It is a data.frame with additional attributes
+##    
+##         argu       column name of the function argument (typically "r")
+##
+##         valu       column name of the recommended function
+##
+##         ylab       generic label for y axis e.g. K(r)
+##
+##         fmla       default plot formula
+##
+##         alim       recommended range of function argument
+##
+##         labl       recommended xlab/ylab for each column
+##
+##         desc       longer description for each column 
+##
+##         unitname   name of unit of length for 'r'
+##
+##         shade      (optional) column names of upper & lower limits
+##                    of shading - typically a confidence interval
+##
+##    Objects of this class are returned by Kest(), etc
+##
+##################################################################
+## creator
+
+fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL,
+               alim=NULL, labl=names(x), desc=NULL, unitname=NULL,
+               fname=NULL, yexp=ylab) {
+  stopifnot(is.data.frame(x))
+  ## check arguments
+  stopifnot(is.character(argu))
+  if(!is.null(ylab))
+    stopifnot(is.character(ylab) || is.language(ylab))
+  if(!missing(yexp)) {
+    if(is.null(yexp)) yexp <- ylab
+    else stopifnot(is.language(yexp))
+  }
+  stopifnot(is.character(valu))
+  
+  if(!(argu %in% names(x)))
+    stop(paste(sQuote("argu"), "must be the name of a column of x"))
+
+  if(!(valu %in% names(x)))
+    stop(paste(sQuote("valu"), "must be the name of a column of x"))
+
+  if(is.null(fmla))
+    fmla <- paste(valu, "~", argu)
+  else if(inherits(fmla, "formula")) {
+    ## convert formula to string
+    fmla <- flat.deparse(fmla)
+  } else if(!is.character(fmla))
+    stop(paste(sQuote("fmla"), "should be a formula or a string"))
+
+  if(missing(alim)) {
+    ## Note: if alim is given as NULL, it is not changed.
+    argue <- x[[argu]]
+    alim <- range(argue[is.finite(argue)])
+  } else if(!is.null(alim)) {
+    if(!is.numeric(alim) || length(alim) != 2)
+      stop(paste(sQuote("alim"), "should be a vector of length 2"))
+  }
+  
+  if(!is.character(labl))
+    stop(paste(sQuote("labl"), "should be a vector of strings"))
+  stopifnot(length(labl) == ncol(x))
+  if(is.null(desc))
+    desc <- character(ncol(x))
+  else {
+    stopifnot(is.character(desc))
+    stopifnot(length(desc) == ncol(x))
+    nbg <- is.na(desc)
+    if(any(nbg)) desc[nbg] <- ""
+  }
+  if(!is.null(fname))
+    stopifnot(is.character(fname) && length(fname) %in% 1:2)
+  ## pack attributes
+  attr(x, "argu") <- argu
+  attr(x, "valu") <- valu
+  attr(x, "ylab") <- ylab
+  attr(x, "yexp") <- yexp
+  attr(x, "fmla") <- fmla
+  attr(x, "alim") <- alim
+  attr(x, "labl") <- labl
+  attr(x, "desc") <- desc
+  attr(x, "units") <- as.units(unitname)
+  attr(x, "fname") <- fname
+  attr(x, "dotnames") <- NULL
+  attr(x, "shade") <- NULL
+  ## 
+  class(x) <- c("fv", class(x))
+  return(x)
+}
+
+.Spatstat.FvAttrib <- c(
+                        "argu",
+                        "valu",
+                        "ylab",
+                        "yexp",
+                        "fmla",
+                        "alim",
+                        "labl",
+                        "desc",
+                        "units",
+                        "fname",
+                        "dotnames",
+                        "shade")
+
+as.data.frame.fv <- function(x, ...) {
+  stopifnot(is.fv(x))
+  fva <- .Spatstat.FvAttrib
+  attributes(x)[fva] <- NULL
+  class(x) <- "data.frame"
+  x
+}
+
+is.fv <- function(x) {
+  inherits(x, "fv")
+}
+
+## 
+
+as.fv <- function(x) { UseMethod("as.fv") }
+
+as.fv.fv <- function(x) x
+
+as.fv.data.frame <- function(x) {
+  if(ncol(x) < 2) stop("Need at least 2 columns")
+  return(fv(x, names(x)[1L], , names(x)[2L]))
+}
+
+as.fv.matrix <- function(x)  {
+  y <- as.data.frame(x)
+  if(any(bad <- is.na(names(y))))
+    names(y)[bad] <- paste0("V", which(bad))
+  return(as.fv.data.frame(y))
+}
+
+## other methods for as.fv are described in the files for the relevant classes.
+
+vanilla.fv <- function(x) {
+  ## remove everything except basic fv characteristics
+  retain <- c("names", "row.names", .Spatstat.FvAttrib)
+  attributes(x) <- attributes(x)[retain]
+  class(x) <- c("fv", "data.frame")
+  return(x)
+}
+
+print.fv <- local({
+  
+  maxwords <- function(z, m) { max(0, which(cumsum(nchar(z) + 1) <= m+1)) }
+  usewords <- function(z, n) paste(z[1:n], collapse=" ")
+
+  print.fv <- function(x, ..., tight=FALSE) {
+    verifyclass(x, "fv")
+    terselevel <- spatstat.options("terse")
+    showlabels <- waxlyrical('space', terselevel)
+    showextras <- waxlyrical('extras', terselevel)
+    nama <- names(x)
+    a <- attributes(x)
+    if(!is.null(ylab <- a$ylab)) {
+      if(is.language(ylab))
+        ylab <- flat.deparse(ylab)
+    }
+    if(!inherits(x, "envelope")) {
+      splat("Function value object",
+            paren(paste("class", sQuote("fv"))))
+      if(!is.null(ylab)) {
+        xlab <- fvlabels(x, expand=TRUE)[[a$argu]]
+        splat("for the function", xlab, "->", ylab)
+      }
+    }
+    ## Descriptions ..
+    desc <- a$desc
+    ## .. may require insertion of ylab
+    if(!is.null(ylab))
+      desc <- sprintf(desc, ylab)
+    ## Labels ..
+    labl <- fvlabels(x, expand=TRUE)
+    ## Avoid overrunning text margin
+    maxlinewidth <- options('width')[[1L]]
+    key.width <- max(nchar(nama))
+    labl.width <- if(showlabels) max(nchar(labl), nchar("Math.label")) else 0
+    desc.width <- max(nchar(desc), nchar("Description"))
+    fullwidth <- key.width + labl.width + desc.width + 2
+    if(fullwidth > maxlinewidth && tight) {
+      ## try shortening the descriptions so that it all fits on one line
+      spaceleft <- maxlinewidth - (key.width + labl.width + 2)
+      desc <- truncline(desc, spaceleft)
+      desc.width <- max(nchar(desc), nchar("Description"))    
+      fullwidth <- key.width + labl.width + desc.width + 2
+    }
+    spaceleft <- maxlinewidth - (key.width + 1)
+    if(desc.width > spaceleft) {
+      ## Descriptions need to be truncated to max line width
+      desc <- truncline(desc, spaceleft)
+      desc.width <- max(nchar(desc), nchar("Description"))    
+      fullwidth <- key.width + labl.width + desc.width + 2
+    }
+    if(showextras) {
+      fullwidth <- pmin(maxlinewidth, fullwidth)
+      fullline <- paste0(rep(".", fullwidth), collapse="")
+      cat(fullline, fill=TRUE)
+    }
+    df <- data.frame(Math.label=labl,
+                     Description=desc,
+                     row.names=nama,
+                     stringsAsFactors=FALSE)
+    if(!showlabels) df <- df[,-1L,drop=FALSE]
+    print(df, right=FALSE)
+  ##
+    if(showextras) {
+      cat(fullline, fill=TRUE)
+      splat("Default plot formula: ",
+            flat.deparse(as.formula(a$fmla)))
+      splat("where", dQuote("."), "stands for",
+            commasep(sQuote(fvnames(x, ".")), ", "))
+      if(!is.null(a$shade)) 
+        splat("Columns", commasep(sQuote(a$shade)), 
+              "will be plotted as shading (by default)")
+      alim <- a$alim
+      splat("Recommended range of argument",
+            paste0(a$argu, ":"),
+            if(!is.null(alim)) prange(signif(alim, 5)) else "not specified")
+      rang <- signif(range(with(x, .x)), 5)
+      splat("Available range", "of argument",
+            paste0(a$argu, ":"), prange(rang))
+      ledge <- summary(unitname(x))$legend
+      if(!is.null(ledge))
+        splat(ledge)
+    }
+    return(invisible(NULL))
+  }
+
+  print.fv
+})
+
+
+
+## manipulating the names in fv objects
+
+.Spatstat.FvAbbrev <- c(
+                        ".x",
+                        ".y",
+                        ".s",
+                        ".",
+                        "*",
+                        ".a")
+
+fvnames <- function(X, a=".") {
+  verifyclass(X, "fv")
+  if(!is.character(a) || length(a) > 1)
+    stop("argument a must be a character string")
+  switch(a,
+         ".y"={
+           return(attr(X, "valu"))
+         },
+         ".x"={
+           return(attr(X, "argu"))
+         },
+         ".s"={
+           return(attr(X, "shade"))
+         },
+         "." = {
+           ## The specified 'dotnames'
+           dn <- attr(X, "dotnames")
+           if(is.null(dn)) 
+             dn <- fvnames(X, "*")
+           return(dn)
+         },
+         "*"=,
+         ".a"={
+           ## all column names other than the function argument
+           allvars <- names(X)
+           argu <- attr(X, "argu")
+           nam <- allvars[allvars != argu]
+           nam <- rev(nam) ## convention
+           return(nam)
+         },
+         {
+           if(a %in% names(X)) return(a)
+           stop(paste("Unrecognised abbreviation", dQuote(a)))
+         }
+       )
+}
+
+"fvnames<-" <- function(X, a=".", value) {
+  verifyclass(X, "fv")
+  if(!is.character(a) || length(a) > 1)
+    stop(paste("argument", sQuote("a"), "must be a character string"))
+  ## special cases
+  if(a == "." && length(value) == 0) {
+    ## clear the dotnames
+    attr(X, "dotnames") <- NULL
+    return(X)
+  }
+  if(a == ".a" || a == "*") {
+    warning("Column names unchanged: use names(x) <- value to change them")
+    return(X)
+  }
+
+  ## validate the names
+  switch(a,
+         ".x"=,
+         ".y"={
+           if(!is.character(value) || length(value) != 1)
+             stop("value should be a single string")
+         },
+         ".s"={
+           if(!is.character(value) || length(value) != 2)
+             stop("value should be a vector of 2 character strings")
+         },
+         "."={
+           if(!is.character(value))
+             stop("value should be a character vector")
+         },
+         stop(paste("Unrecognised abbreviation", dQuote(a)))
+       )
+  ## check the names match existing column names
+  tags <- names(X)
+  if(any(nbg <- !(value %in% tags))) 
+    stop(paste(ngettext(sum(nbg), "The string", "The strings"),
+               commasep(dQuote(value[nbg])),
+               ngettext(sum(nbg),
+                        "does not match the name of any column of X", 
+                        "do not match the names of any columns of X")))
+  ## reassign names
+  switch(a,
+         ".x"={
+           attr(X, "argu") <- value
+         },
+         ".y"={
+           attr(X, "valu") <- value
+         },
+         ".s"={
+           attr(X, "shade") <- value
+         },
+         "."={
+           attr(X, "dotnames") <- value
+         })
+  return(X)
+}
+
+"names<-.fv" <- function(x, value) {
+  nama <- colnames(x)
+  indx <- which(nama == fvnames(x, ".x"))
+  indy <- which(nama == fvnames(x, ".y"))
+  inds <- which(nama %in% fvnames(x, ".s"))
+  ind. <- which(nama %in% fvnames(x, "."))
+  ## rename columns of data frame
+  x <- NextMethod("names<-")
+  ## adjust other tags
+  fvnames(x, ".x") <- value[indx]
+  fvnames(x, ".y") <- value[indy]
+  fvnames(x, ".")  <- value[ind.]
+  if(length(inds) > 0)
+    fvnames(x, ".s") <- value[inds]
+  return(x)
+}
+
+fvlabels <- function(x, expand=FALSE) {
+  lab <- attr(x, "labl")
+  if(expand && !is.null(fname <- attr(x, "fname"))) {
+    ## expand plot labels using function name
+    nstrings <- max(substringcount("%s", lab))
+    ## pad with blanks
+    nextra <- nstrings - length(fname)
+    if(nextra > 0) 
+      fname <- c(fname, rep("", nextra))
+    ## render
+    lab <- do.call(sprintf, append(list(lab), as.list(fname)))
+  }
+  ## remove empty space
+  lab <- gsub(" ", "", lab)
+  names(lab) <- names(x)
+  return(lab)
+}
+
+"fvlabels<-" <- function(x, value) {
+  stopifnot(is.fv(x))
+  stopifnot(is.character(value))
+  stopifnot(length(value) == length(fvlabels(x)))
+  attr(x, "labl") <- value
+  return(x)
+}
+
+flatfname <- function(x) {
+  fn <- if(is.character(x)) x else attr(x, "fname")
+  if(length(fn) > 1)
+    fn <- paste0(fn[1L], "[", paste(fn[-1L], collapse=" "), "]")
+  as.name(fn)
+}
+
+makefvlabel <- function(op=NULL, accent=NULL, fname, sub=NULL, argname="r") {
+  ## de facto standardised label
+  a <- "%s"
+  if(!is.null(accent)) 
+    a <- paste0(accent, paren(a))     ## eg hat(%s)
+  if(!is.null(op))
+    a <- paste0("bold", paren(op), "~", a)  ## eg bold(var)~hat(%s)
+  if(is.null(sub)) {
+    if(length(fname) != 1) {
+      a <- paste0(a, "[%s]")
+      a <- paren(a, "{")
+    }
+  } else {
+    if(length(fname) == 1) {
+      a <- paste0(a, paren(sub, "["))
+    } else {
+      a <- paste0(a, paren("%s", "["), "^", paren(sub, "{"))
+      a <- paren(a, "{")
+    }
+  } 
+  a <- paste0(a, paren(argname))
+  return(a)
+}
+
+fvlabelmap <- local({
+  magic <- function(x) {
+    subx <- paste("substitute(", x, ", NULL)")
+    out <- try(eval(parse(text=subx)), silent=TRUE)
+    if(inherits(out, "try-error"))
+      out <- as.name(make.names(subx))
+    out
+  }
+
+  fvlabelmap <- function(x, dot=TRUE) {
+    labl <- fvlabels(x, expand=TRUE)
+    ## construct mapping from identifiers to labels
+    map <- as.list(labl)
+    map <- lapply(map, magic)
+    names(map) <- colnames(x)
+    if(dot) {
+      ## also map "." and ".a" to name of target function
+      if(!is.null(ye <- attr(x, "yexp")))
+        map <- append(map, list("."=ye, ".a"=ye))
+      ## map other fvnames to their corresponding labels
+      map <- append(map, list(".x"=map[[fvnames(x, ".x")]],
+                              ".y"=map[[fvnames(x, ".y")]]))
+      if(!is.null(fvnames(x, ".s"))) {
+        shex <- unname(map[fvnames(x, ".s")])
+        shadexpr <- substitute(c(A,B), list(A=shex[[1L]], B=shex[[2L]]))
+        map <- append(map, list(".s" = shadexpr))
+      }
+    }
+    return(map)
+  }
+
+  fvlabelmap
+})
+
+## map from abbreviations to expressions involving the column names,
+## for use in eval(substitute(...))
+fvexprmap <- function(x) {
+  dotnames <- fvnames(x, ".")
+  u <- if(length(dotnames) == 1) as.name(dotnames) else 
+       as.call(lapply(c("cbind", dotnames), as.name))
+  ux <- as.name(fvnames(x, ".x"))
+  uy <- as.name(fvnames(x, ".y"))
+  umap <- list(.=u, .a=u, .x=ux, .y=uy)
+  if(!is.null(fvnames(x, ".s"))) {
+    shnm <- fvnames(x, ".s")
+    shadexpr <- substitute(cbind(A,B), list(A=as.name(shnm[1L]),
+                                            B=as.name(shnm[2L])))
+    umap <- append(umap, list(.s = shadexpr))
+  }
+  return(umap)
+}
+
+fvlegend <- local({
+
+  fvlegend <- function(object, elang) {
+    ## Compute mathematical legend(s) for column(s) in fv object 
+    ## transformed by language expression 'elang'.
+    ## The expression must already be in 'expanded' form.
+    ## The result is an expression, or expression vector.
+    ## The j-th entry of the vector is an expression for the
+    ## j-th column of function values.
+    ee <- distributecbind(as.expression(elang))
+    map <- fvlabelmap(object, dot = TRUE)
+    eout <- as.expression(lapply(ee, invokemap, map=map))
+    return(eout)
+  }
+
+  invokemap <- function(ei, map) {
+    eval(substitute(substitute(e, mp), list(e = ei, mp = map)))
+  }
+  
+  fvlegend
+})
+
+
+bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL, clip=FALSE) {
+  verifyclass(x, "fv")
+  ax <- attributes(x)
+  if(is.fv(y)) {
+    ## y is already an fv object
+    ay <- attributes(y)
+    if(!identical(ax$fname, ay$fname)) {
+      ## x and y represent different functions
+      ## expand the labels separately 
+      fvlabels(x) <- fvlabels(x, expand=TRUE)
+      fvlabels(y) <- fvlabels(y, expand=TRUE)
+      ax <- attributes(x)
+      ay <- attributes(y)
+    }
+    ## check compatibility of 'r' values
+    xr <- ax$argu
+    yr <- ay$argu
+    rx <- x[[xr]]
+    ry <- y[[yr]]
+    if(length(rx) != length(ry)) {
+      if(!clip) 
+        stop("fv objects x and y have incompatible domains")
+      # restrict both objects to a common domain
+      ra <- intersect.ranges(range(rx), range(ry))
+      x <- x[inside.range(rx, ra), ]
+      y <- y[inside.range(ry, ra), ]
+      rx <- x[[xr]]
+      ry <- y[[yr]]
+    }
+    if(length(rx) != length(ry) || max(abs(rx-ry)) > .Machine$double.eps)
+      stop("fv objects x and y have incompatible values of r")
+    ## reduce y to data frame and strip off 'r' values
+    ystrip <- as.data.frame(y)
+    yrpos <- which(colnames(ystrip) == yr)
+    ystrip <- ystrip[, -yrpos, drop=FALSE]
+    ## determine descriptors
+    if(is.null(labl)) labl <- attr(y, "labl")[-yrpos]
+    if(is.null(desc)) desc <- attr(y, "desc")[-yrpos]
+    ##
+    y <- ystrip
+  } else {
+    ## y is a matrix or data frame
+    y <- as.data.frame(y)
+  }
+  
+  ## check for duplicated column names
+  allnames <- c(colnames(x), colnames(y))
+  if(any(dup <- duplicated(allnames))) {
+    nbg <- unique(allnames[dup])
+    nn <- length(nbg)
+    warning(paste("The column",
+                  ngettext(nn, "name", "names"),
+                  commasep(sQuote(nbg)),
+                  ngettext(nn, "was", "were"),
+                  "duplicated. Unique names were generated"))
+    allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE)
+    colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))]
+  }
+      
+  if(is.null(labl))
+    labl <- paste("%s[", colnames(y), "](r)", sep="")
+  else if(length(labl) != ncol(y))
+    stop(paste("length of", sQuote("labl"),
+               "does not match number of columns of y"))
+  if(is.null(desc))
+    desc <- character(ncol(y))
+  else if(length(desc) != ncol(y))
+    stop(paste("length of", sQuote("desc"),
+               "does not match number of columns of y"))
+  if(is.null(preferred))
+    preferred <- ax$valu
+
+  xy <- cbind(as.data.frame(x), y)
+  z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim,
+          c(ax$labl, labl),
+          c(ax$desc, desc),
+          unitname=unitname(x),
+          fname=ax$fname,
+          yexp=ax$yexp)
+  return(z)
+}
+
+cbind.fv <- function(...) {
+  a <- list(...)
+  n <- length(a)
+  if(n == 0)
+    return(NULL)
+  if(n == 1) {
+    ## single argument - extract it
+    a <- a[[1L]]
+    ## could be an fv object 
+    if(is.fv(a))
+      return(a)
+    n <- length(a)
+  }
+  z <- a[[1L]]
+  if(!is.fv(z))
+    stop("First argument should be an object of class fv")
+  if(n > 1)
+    for(i in 2:n) 
+      z <- bind.fv(z, a[[i]])
+  return(z)
+}
+
+collapse.anylist <-
+collapse.fv <- local({
+
+  collapse.fv <- function(object, ..., same=NULL, different=NULL) {
+    if(is.fv(object)) {
+      x <- list(object, ...)
+    } else if(inherits(object, "anylist")) {
+      x <- append(object, list(...))
+    } else if(is.list(object) && all(sapply(object, is.fv))) {
+      x <- append(object, list(...))
+    } else stop("Format not understood")
+    if(!all(unlist(lapply(x, is.fv))))
+      stop("arguments should be objects of class fv")
+    if(is.null(same)) same <- character(0)
+    if(is.null(different)) different <- character(0)
+    if(anyDuplicated(c(same, different)))
+      stop(paste("The arguments", sQuote("same"), "and", sQuote("different"),
+                 "should not have entries in common"))
+    either <- c(same, different)
+    ## validate
+    if(length(either) == 0)
+      stop(paste("At least one column of values must be selected",
+                 "using the arguments", sQuote("same"), "and",
+                 sQuote("different")))
+    nbg <- unique(unlist(lapply(x, missingnames, expected=either)))
+    if((nbad <- length(nbg)) > 0)
+      stop(paste(ngettext(nbad, "The name", "The names"),
+                 commasep(sQuote(nbg)),
+                 ngettext(nbad, "is", "are"),
+                 "not present in the function objects"))
+    ## names for different versions
+    versionnames <- names(x)
+    if(is.null(versionnames))
+      versionnames <- paste("x", seq_along(x), sep="")
+    shortnames <- abbreviate(versionnames, minlength=12)
+    ## extract the common values
+    y <- x[[1L]]
+    xname <- fvnames(y, ".x")
+    yname <- fvnames(y, ".y")
+    if(length(same) == 0) {
+      ## The column of 'preferred values' .y cannot be deleted
+      ## retain .y for now and delete it later.
+      z <- y[, c(xname, yname)]
+    } else {
+      if(!(yname %in% same))
+        fvnames(y, ".y") <- same[1L]
+      z <- y[, c(xname, same)]
+    }
+    dotnames <- same
+    ## now merge the different values
+    for(i in seq_along(x)) {
+      ## extract values for i-th object
+      xi <- x[[i]]
+      wanted <- (names(xi) %in% different)
+      y <- as.data.frame(xi)[, wanted, drop=FALSE]
+      desc <- attr(xi, "desc")[wanted]
+      labl <- attr(xi, "labl")[wanted]
+      ## relabel
+      prefix <- shortnames[i]
+      preamble <- versionnames[i]
+      names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="")
+      dotnames <- c(dotnames, names(y))
+      ## glue onto fv object
+      z <- bind.fv(z, y,
+                   labl=paste(prefix, labl, sep="~"),
+                   desc=paste(preamble, desc))
+    }
+    if(length(same) == 0) {
+      ## remove the second column which was retained earlier
+      fvnames(z, ".y") <- names(z)[3L]
+      z <- z[, -2L]
+    }
+    fvnames(z, ".") <- dotnames
+    return(z)
+  }
+
+  missingnames <- function(z, expected) { expected[!(expected %in% names(z))] }
+  
+  collapse.fv
+})
+
+## rename one of the columns of an fv object
+tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) {
+  hit <- (names(x) == current.tag)
+  if(!any(hit))
+    return(x)
+  ## update descriptions of column
+  i <- min(which(hit))
+  if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl
+  if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc
+  ## adjust column tag
+  if(!is.null(new.tag)) {
+    names(x)[i] <- new.tag
+    ## update dotnames
+    dn <- fvnames(x, ".")
+    if(current.tag %in% dn ) {
+      dn[dn == current.tag] <- new.tag
+      fvnames(x, ".") <- dn
+    }
+    ## if the tweaked column is the preferred value, adjust accordingly
+    if(attr(x, "valu") == current.tag)
+      attr(x, "valu") <- new.tag
+    ## if the tweaked column is the function argument, adjust accordingly
+    if(attr(x, "argu") == current.tag)
+      attr(x, "valu") <- new.tag
+  }
+  return(x)
+}
+
+
+## change some or all of the auxiliary text in an fv object
+rebadge.fv <- function(x, new.ylab, new.fname,
+                       tags, new.desc, new.labl,
+                       new.yexp=new.ylab, new.dotnames,
+                       new.preferred, new.formula, new.tags) {
+  if(!missing(new.ylab)) 
+    attr(x, "ylab") <- new.ylab
+  if(!missing(new.yexp) || !missing(new.ylab))
+    attr(x, "yexp") <- new.yexp
+  if(!missing(new.fname))
+    attr(x, "fname") <- new.fname
+  if(!missing(tags) && !(missing(new.desc) && missing(new.labl) && missing(new.tags))) {
+    nama <- names(x)
+    desc <- attr(x, "desc")
+    labl <- attr(x, "labl")
+    valu <- attr(x, "valu")
+    for(i in seq_along(tags))
+    if(!is.na(m <- match(tags[i], nama))) {
+      if(!missing(new.desc)) desc[m] <- new.desc[i]
+      if(!missing(new.labl)) labl[m] <- new.labl[i]
+      if(!missing(new.tags)) {
+        names(x)[m] <- new.tags[i]
+        if(tags[i] == valu)
+          attr(x, "valu") <- new.tags[i]
+      }
+    }
+    attr(x, "desc") <- desc
+    attr(x, "labl") <- labl
+  }
+  if(!missing(new.dotnames))
+    fvnames(x, ".") <- new.dotnames
+  if(!missing(new.preferred)) {
+    stopifnot(new.preferred %in% names(x))
+    attr(x, "valu") <- new.preferred
+  }
+  if(!missing(new.formula))
+    formula(x) <- new.formula
+  return(x)
+}
+
+## common invocations to label a function like Kdot or Kcross
+rebadge.as.crossfun <- function(x, main, sub=NULL, i, j) {
+  i <- make.parseable(i)
+  j <- make.parseable(j)
+  if(is.null(sub)) {
+    ylab <- substitute(main[i, j](r),
+                       list(main=main, i=i, j=j))
+    fname <- c(main, paste0("list", paren(paste(i, j, sep=","))))
+    yexp <- substitute(main[list(i, j)](r),
+                       list(main=main, i=i, j=j))
+  } else {
+    ylab <- substitute(main[sub, i, j](r),
+                       list(main=main, sub=sub, i=i, j=j))
+    fname <- c(main, paste0("list", paren(paste(sub, i, j, sep=","))))
+    yexp <- substitute(main[list(sub, i, j)](r),
+                       list(main=main, sub=sub, i=i, j=j))
+  }
+  y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp)
+  return(y)
+}
+
+rebadge.as.dotfun <- function(x, main, sub=NULL, i) {
+  i <- make.parseable(i)
+  if(is.null(sub)) {
+    ylab <- substitute(main[i ~ dot](r),
+                       list(main=main, i=i))
+    fname <- c(main, paste0(i, "~symbol(\"\\267\")"))
+    yexp <- substitute(main[i ~ symbol("\267")](r),
+                       list(main=main, i=i))
+  } else {
+    ylab <- substitute(main[sub, i ~ dot](r),
+                       list(main=main, sub=sub, i=i))
+    fname <- c(main, paste0("list",
+                            paren(paste0(sub, ",",
+                                         i, "~symbol(\"\\267\")"))))
+    yexp <- substitute(main[list(sub, i ~ symbol("\267"))](r),
+                       list(main=main, sub=sub, i=i))
+  }
+  y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp)
+  return(y)
+}
+
+## even simpler wrapper for rebadge.fv
+rename.fv <- function(x, fname, ylab, yexp=ylab) {
+  stopifnot(is.fv(x))
+  stopifnot(is.character(fname) && (length(fname) %in% 1:2))
+  argu <- fvnames(x, ".x")
+  if(missing(ylab) || is.null(ylab))
+    ylab <- switch(length(fname),
+                   substitute(fn(argu), list(fn=as.name(fname),
+                                             argu=as.name(argu))),
+                   substitute(fn[fsub](argu), list(fn=as.name(fname[1]),
+                                                   fsub=as.name(fname[2]),
+                                                   argu=as.name(argu))))
+  if(missing(yexp) || is.null(yexp))
+    yexp <- ylab
+  y <- rebadge.fv(x, new.fname=fname, new.ylab=ylab, new.yexp=yexp)
+  return(y)
+}
+
+## subset extraction operator
+"[.fv" <-
+  function(x, i, j, ..., drop=FALSE)
+{
+  igiven <- !missing(i)
+  jgiven <- !missing(j)
+  y <- as.data.frame(x)
+  if(igiven && jgiven)
+    z <- y[i, j, drop=drop]
+  else if(igiven)
+    z <- y[i, , drop=drop]
+  else if(jgiven)
+    z <- y[ , j, drop=drop]
+  else z <- y
+
+  ## return only the selected values as a data frame or vector.
+  if(drop) return(z)
+
+  if(!jgiven) 
+    selected <- seq_len(ncol(x))
+  else {
+    nameindices <- seq_along(names(x))
+    names(nameindices) <- names(x)
+    selected <- as.vector(nameindices[j])
+  }
+
+  # validate choice of selected/dropped columns
+  nama <- names(z)
+  argu <- attr(x, "argu")
+  if(!(argu %in% nama))
+    stop(paste("The function argument", sQuote(argu), "must not be removed"))
+  valu <- attr(x, "valu")
+  if(!(valu %in% nama))
+    stop(paste("The default column of function values",
+               sQuote(valu), "must not be removed"))
+
+  # if the plot formula involves explicit mention of dropped columns,
+  # replace it by a generic formula
+  fmla <- as.formula(attr(x, "fmla"))
+  if(!all(variablesinformula(fmla) %in% nama)) 
+    fmla <- as.formula(. ~ .x, env=environment(fmla))
+  
+  ## If range of argument was implicitly changed, adjust "alim"
+  alim <- attr(x, "alim")
+  rang <- range(z[[argu]])
+  alim <- intersect.ranges(alim, rang, fatal=FALSE)
+
+  result <- fv(z, argu=attr(x, "argu"),
+               ylab=attr(x, "ylab"),
+               valu=attr(x, "valu"),
+               fmla=fmla,
+               alim=alim,
+               labl=attr(x, "labl")[selected],
+               desc=attr(x, "desc")[selected],
+               unitname=attr(x, "units"),
+               fname=attr(x,"fname"),
+               yexp=attr(x, "yexp"))
+  
+  ## carry over preferred names, if possible
+  dotn <- fvnames(x, ".")
+  fvnames(result, ".") <- dotn[dotn %in% colnames(result)]
+  shad <- fvnames(x, ".s")
+  if(!is.null(shad) && all(shad %in% colnames(result)))
+    fvnames(result, ".s") <- shad
+  return(result)
+}  
+
+## Subset and column replacement methods
+## to guard against deletion of columns
+
+"[<-.fv" <- function(x, i, j, value) {
+  if(!missing(j)) {
+    ## check for alterations to structure of object
+    if((is.character(j) && !all(j %in% colnames(x))) ||
+       (is.numeric(j) && any(j > ncol(x))))
+      stop("Use bind.fv to add new columns to an object of class fv")
+    if(is.null(value) && missing(i)) {
+      ## column(s) will be removed
+      co <- seq_len(ncol(x))
+      names(co) <- colnames(x)
+      keepcol <- setdiff(co, co[j])
+      return(x[ , keepcol, drop=FALSE])
+    }
+  }
+  NextMethod("[<-")
+}
+
+"$<-.fv" <- function(x, name, value) {
+  j <- which(colnames(x) == name)
+  if(is.null(value)) {
+    ## column will be removed
+    if(length(j) != 0)
+      return(x[, -j, drop=FALSE])
+    return(x)
+  }
+  if(length(j) == 0) {
+    ## new column
+    df <- data.frame(1:nrow(x), value)[,-1L,drop=FALSE]
+    colnames(df) <- name
+    y <- bind.fv(x, df, desc=paste("Additional variable", sQuote(name)))
+    return(y)
+  }
+  NextMethod("$<-")
+}
+
+## method for 'formula'
+
+formula.fv <- function(x, ...) {
+  attr(x, "fmla")
+}
+
+# new generic
+
+"formula<-" <- function(x, ..., value) {
+  UseMethod("formula<-")
+}
+
+"formula<-.fv" <- function(x, ..., value) {
+  if(is.null(value))
+    value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x"))
+  else if(inherits(value, "formula")) {
+    ## convert formula to string
+    value <- flat.deparse(value)
+  } else if(!is.character(value))
+    stop("Assignment value should be a formula or a string")
+  attr(x, "fmla") <- value
+  return(x)
+}
+
+##   method for with()
+
+  
+with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) {
+  if(any(names(list(...)) == "drop"))
+    stop("Outdated argument 'drop' used in with.fv")
+  cl <- short.deparse(sys.call())
+  verifyclass(data, "fv")
+  if(is.null(enclos)) 
+    enclos <- parent.frame()
+   ## convert syntactic expression to 'expression' object
+#  e <- as.expression(substitute(expr))
+  ## convert syntactic expression to call
+  elang <- substitute(expr)
+  ## map "." etc to names of columns of data
+  datanames <- names(data)
+  xname <- fvnames(data, ".x")
+  yname <- fvnames(data, ".y")
+  ux <- as.name(xname)
+  uy <- as.name(yname)
+  dnames <- datanames[datanames %in% fvnames(data, ".")]
+  ud <- as.call(lapply(c("cbind", dnames), as.name))
+  anames <- datanames[datanames %in% fvnames(data, ".a")]
+  ua <- as.call(lapply(c("cbind", anames), as.name))
+  if(!is.null(fvnames(data, ".s"))) {
+    snames <- datanames[datanames %in% fvnames(data, ".s")]
+    us <- as.call(lapply(c("cbind", snames), as.name))
+  } else us <- NULL
+  expandelang <- eval(substitute(substitute(ee,
+                                      list(.=ud, .x=ux, .y=uy, .s=us, .a=ua)),
+                           list(ee=elang)))
+  dont.complain.about(ua, ud, us, ux, uy)
+  evars <- all.vars(expandelang)
+  used.dotnames <- evars[evars %in% dnames]
+  ## evaluate expression
+  datadf <- as.data.frame(data)
+  results <- eval(expandelang, as.list(datadf), enclos=enclos)
+  ## --------------------
+  ## commanded to return numerical values only?
+  if(!is.null(fun) && !fun)
+    return(results)
+
+  if(!is.matrix(results) && !is.data.frame(results)) {
+    ## result is a vector
+    if(is.null(fun)) fun <- FALSE
+    if(!fun || length(results) != nrow(datadf))
+      return(results)
+    results <- matrix(results, ncol=1)
+  } else {
+    ## result is a matrix or data frame
+    if(is.null(fun)) fun <- TRUE
+    if(!fun || nrow(results) != nrow(datadf))
+      return(results)
+  }
+  ## result is a matrix or data frame of the right dimensions
+  ## make a new fv object
+  ## ensure columns of results have names
+  if(is.null(colnames(results)))
+    colnames(results) <- paste("col", seq_len(ncol(results)), sep="")
+  resultnames <- colnames(results)
+  ## get values of function argument
+  xvalues <- datadf[[xname]]
+  ## tack onto result matrix
+  results <- cbind(xvalues, results)
+  colnames(results) <- c(xname, resultnames)
+  results <- data.frame(results)
+  ## check for alteration of column names
+  oldnames <- resultnames
+  resultnames <- colnames(results)[-1L]
+  if(any(resultnames != oldnames))
+    warning("some column names were illegal and have been changed")
+  ## determine mapping (if any) from columns of output to columns of input
+  namemap <- match(colnames(results), names(datadf))
+  okmap <- !is.na(namemap)
+  ## Build up fv object
+  ## decide which of the columns should be the preferred value
+  newyname <- if(yname %in% resultnames) yname else resultnames[1L]
+  ## construct default plot formula
+  fmla <- flat.deparse(as.formula(paste(". ~", xname)))
+  dotnames <- resultnames
+  ## construct description strings
+  desc <- character(ncol(results))
+  desc[okmap] <- attr(data, "desc")[namemap[okmap]]
+  desc[!okmap] <- paste("Computed value", resultnames[!okmap])
+  ## function name (fname) and mathematical expression for function (yexp)
+  oldyexp <- attr(data, "yexp")
+  oldfname <- attr(data, "fname")
+  if(is.null(oldyexp)) {
+    fname <- cl
+    yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname)))
+  } else {
+    ## map 'cbind(....)' to "." for name of function only
+    cb <- paste("cbind(",
+                paste(used.dotnames, collapse=","),
+                ")", sep="")
+    compresselang <- gsub(cb, ".", flat.deparse(expandelang), fixed=TRUE)
+    compresselang <- as.formula(paste(compresselang, "~1"))[[2L]]
+    ## construct mapping using original function name
+    labmap <- fvlabelmap(data, dot=TRUE)
+    labmap[["."]] <- oldyexp
+    yexp <- eval(substitute(substitute(ee, ff), 
+                            list(ee=compresselang, ff=labmap)))
+    labmap2 <- labmap
+    labmap2[["."]] <- as.name(oldfname)
+    fname <- eval(substitute(substitute(ee, ff), 
+                             list(ee=compresselang,
+                                  ff=labmap2)))
+    fname <- paren(flat.deparse(fname))
+  }
+  ## construct mathematical labels
+  mathlabl <- as.character(fvlegend(data, expandelang))
+  mathlabl <- gsub("[[:space:]]+", " ", mathlabl)
+  labl <- colnames(results)
+  mathmap <- match(labl, used.dotnames)
+  okmath <- !is.na(mathmap)
+  labl[okmath] <- mathlabl[mathmap[okmath]]
+  ## form fv object and return
+  out <- fv(results, argu=xname, valu=newyname, labl=labl,
+            desc=desc, alim=attr(data, "alim"), fmla=fmla,
+            unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp)
+  fvnames(out, ".") <- dotnames
+  return(out)
+}
+
+## method for 'range'
+
+range.fv <- local({
+
+  getValues <- function(x) {
+    xdat <- as.matrix(as.data.frame(x))
+    yall <- fvnames(x, ".")
+    vals <- xdat[, yall]
+    return(as.vector(vals))
+  }
+  
+  range.fv <- function(..., na.rm=TRUE, finite=na.rm) {
+    aarg <- list(...)
+    isfun <- sapply(aarg, is.fv)
+    if(any(isfun)) 
+      aarg[isfun] <- lapply(aarg[isfun], getValues)
+    z <- do.call(range, append(aarg, list(na.rm=na.rm, finite=finite)))
+    return(z)
+  }
+
+  range.fv
+})
+
+min.fv <- function(..., na.rm=TRUE, finite=na.rm) {
+  range(..., na.rm=TRUE, finite=na.rm)[1L]
+}
+
+max.fv <- function(..., na.rm=TRUE, finite=na.rm) {
+  range(..., na.rm=TRUE, finite=na.rm)[2L]
+}
+
+  
+## stieltjes integration for fv objects
+
+stieltjes <- function(f, M, ...) {
+  ## stieltjes integral of f(x) dM(x)
+  stopifnot(is.function(f))
+  if(is.stepfun(M)) {
+    envM <- environment(M)
+    #' jump locations
+    x <- get("x", envir=envM)
+    #' values of integrand
+    fx <- f(x, ...)
+    #' jump amounts
+    xx <- c(-Inf, (x[-1L] + x[-length(x)])/2, Inf)
+    dM <- diff(M(xx))
+    #' integrate f(x) dM(x)
+    f.dM <- fx * dM
+    result <- sum(f.dM[is.finite(f.dM)])
+    return(list(result))
+  } else if(is.fv(M)) {
+    ## integration variable
+    argu <- attr(M, "argu")
+    x <- M[[argu]]
+    ## values of integrand
+    fx <- f(x, ...)
+    ## estimates of measure
+    valuenames <- names(M) [names(M) != argu]
+    Mother <- as.data.frame(M)[, valuenames]
+    Mother <- as.matrix(Mother, nrow=nrow(M))
+    ## increments of measure
+    dM <- apply(Mother, 2, diff)
+    dM <- rbind(dM, 0)
+    ## integrate f(x) dM(x)
+    f.dM <- fx * dM
+    f.dM[!is.finite(f.dM)] <- 0
+    results <- colSums(f.dM)
+    results <- as.list(results)
+    names(results) <- valuenames
+    return(results)
+  } else stop("M must be an object of class fv or stepfun")
+}
+
+prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix,
+                     whichtags=fvnames(x, "*")) {
+  ## attach a prefix to fv information 
+  stopifnot(is.fv(x))
+  att <- attributes(x)
+  relevant <- names(x) %in% whichtags
+  oldtags <- names(x)[relevant]
+  newtags <- paste(tagprefix, oldtags, sep="")
+  newlabl <- paste(lablprefix, att$labl[relevant], sep="")
+  newdesc <- paste(descprefix, att$desc[relevant])
+  y <- rebadge.fv(x, tags=oldtags,
+                  new.desc=newdesc,
+                  new.labl=newlabl,
+                  new.tags=newtags)
+  return(y)
+}
+
+reconcile.fv <- local({
+
+  reconcile.fv <- function(...) {
+    ## reconcile several fv objects by finding the columns they share in common
+    z <- list(...)
+    if(!all(unlist(lapply(z, is.fv)))) {
+      if(length(z) == 1 &&
+         is.list(z[[1L]]) &&
+         all(unlist(lapply(z[[1L]], is.fv))))
+        z <- z[[1L]]
+      else    
+        stop("all arguments should be fv objects")
+    }
+    n <- length(z)
+    if(n <= 1) return(z)
+    ## find columns that are common to all estimates
+    keepcolumns <- names(z[[1L]])
+    keepvalues <- fvnames(z[[1L]], "*")
+    for(i in 2:n) {
+      keepcolumns <- intersect(keepcolumns, names(z[[i]]))
+      keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*"))
+    }
+    if(length(keepvalues) == 0)
+      stop("cannot reconcile fv objects: they have no columns in common")
+    ## determine name of the 'preferred' column
+    prefs <- unlist(lapply(z, fvnames, a=".y"))
+    prefskeep <- prefs[prefs %in% keepvalues]
+    if(length(prefskeep) > 0) {
+      ## pick the most popular
+      chosen <- unique(prefskeep)[which.max(table(prefskeep))]
+    } else {
+      ## drat - pick a value arbitrarily
+      chosen <- keepvalues[1L]
+    }
+    z <- lapply(z, rebadge.fv, new.preferred=chosen)
+    z <- lapply(z, "[.fv", j=keepcolumns)
+    ## also clip to the same r values
+    rmax <- min(sapply(z, maxrval))
+    z <- lapply(z, cliprmax, rmax=rmax)
+    return(z)
+  }
+
+  maxrval <- function(x) { max(with(x, .x)) }
+  cliprmax <- function(x, rmax) { x[ with(x, .x) <= rmax, ] }
+  
+  reconcile.fv
+})
+
+as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) {
+  trap.extra.arguments(...)
+  value.orig <- value
+  ## extract function argument
+  xx <- with(x, .x)
+  ## extract all function values 
+  yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE]
+  ## determine which value(s) to supply
+  if(!is.character(value))
+    stop("value should be a string or vector specifying columns of x")
+  if(!all(value %in% colnames(yy))) {
+    expandvalue <- try(fvnames(x, value))
+    if(!inherits(expandvalue, "try-error")) {
+      value <- expandvalue
+    } else stop("Unable to determine columns of x")
+  }
+  yy <- yy[,value, drop=FALSE]
+  argname <- fvnames(x, ".x")
+  ## determine extrapolation rule (1=NA, 2=most extreme value)
+  stopifnot(is.logical(extrapolate))
+  stopifnot(length(extrapolate) %in% 1:2)
+  endrule <- 1 + extrapolate
+  ## make function(s)
+  if(length(value) == 1 && !identical(value.orig, "*")) {
+    ## make a single 'approxfun' and return it
+    f <- approxfun(xx, yy[,,drop=TRUE], rule=endrule)
+    ## magic
+    names(formals(f))[1L] <- argname
+    body(f)[[4L]] <- as.name(argname)
+  } else {
+    ## make a list of 'approxfuns' with different function values
+    funs <- lapply(yy, approxfun, x = xx, rule = endrule)
+    ## return a function which selects the appropriate 'approxfun' and executes
+    f <- function(xxxx, what=value) {
+      what <- match.arg(what)
+      funs[[what]](xxxx)
+    }
+    ## recast function definition
+    ## ('any sufficiently advanced technology is
+    ##   indistinguishable from magic' -- Arthur C. Clarke)
+    formals(f)[[2L]] <- value
+    names(formals(f))[1L] <- argname
+    ##    body(f)[[3L]][[2L]] <- as.name(argname)
+    body(f) <- eval(substitute(substitute(z,
+                                          list(xxxx=as.name(argname))),
+                               list(z=body(f))))
+  }
+  class(f) <- c("fvfun", class(f))
+  attr(f, "fname") <- attr(x, "fname")
+  attr(f, "yexp") <- attr(x, "yexp")
+  return(f)
+}
+
+print.fvfun <- function(x, ...) {
+  y <- args(x)
+  yexp <- as.expression(attr(x, "yexp"))
+  body(y) <- as.name(paste("Returns interpolated value of", yexp))
+  print(y, ...)
+  return(invisible(NULL))
+}
+
+findcbind <- function(root, depth=0, maxdepth=1000) {
+  ## recursive search through a parse tree to find calls to 'cbind'
+  if(depth > maxdepth) stop("Reached maximum depth")
+  if(length(root) == 1) return(NULL)
+  if(identical(as.name(root[[1L]]), as.name("cbind"))) return(list(numeric(0)))
+  out <- NULL
+  for(i in 2:length(root)) {
+    di <- findcbind(root[[i]], depth+1, maxdepth)
+    if(!is.null(di))
+      out <- append(out, lapply(di, append, values=i, after=FALSE))
+  }
+  return(out)
+}
+
+.MathOpNames <- c("+", "-", "*", "/",
+                  "^", "%%", "%/%",
+                  "&", "|", "!",
+                  "==", "!=", "<", "<=", ">=", ">")
+
+distributecbind <- local({
+
+  distributecbind <- function(x) {
+    ## x is an expression involving a call to 'cbind'
+    ## return a vector of expressions, each obtained by replacing 'cbind(...)'
+    ## by one of its arguments in turn.
+    stopifnot(typeof(x) == "expression")
+    xlang <- x[[1L]]
+    locations <- findcbind(xlang)
+    if(length(locations) == 0)
+      return(x)
+    ## cbind might occur more than once
+    ## check that the number of arguments is the same each time
+    narg <- unique(sapply(locations, nargs.in.expr, e=xlang))
+    if(length(narg) > 1) 
+      return(NULL)
+    out <- NULL
+    if(narg > 0) {
+      for(i in 1:narg) {
+        ## make a version of the expression
+        ## in which cbind() is replaced by its i'th argument
+        fakexlang <- xlang
+        for(loc in locations) {
+          if(length(loc) > 0) {
+            ## usual case: 'loc' is integer vector representing nested index
+            cbindcall <- xlang[[loc]]
+            ## extract i-th argument
+            argi <- cbindcall[[i+1]]
+            ## if argument is an expression, enclose it in parentheses
+            if(length(argi) > 1 && paste(argi[[1L]]) %in% .MathOpNames)
+              argi <- substitute((x), list(x=argi))
+            ## replace cbind call by its i-th argument
+            fakexlang[[loc]] <- argi
+          } else {
+            ## special case: 'loc' = integer(0) representing xlang itself
+            cbindcall <- xlang
+            ## extract i-th argument
+            argi <- cbindcall[[i+1L]]
+            ## replace cbind call by its i-th argument
+            fakexlang <- cbindcall[[i+1L]]
+          }
+        }
+        ## add to final expression
+        out <- c(out, as.expression(fakexlang))
+      }
+    }
+    return(out)
+  }
+
+  nargs.in.expr <- function(loc, e) {
+    n <- if(length(loc) > 0) length(e[[loc]]) else length(e)
+    return(n - 1L)
+  }
+
+  distributecbind
+})
+
+## Form a new 'fv' object as a ratio
+
+ratfv <- function(df, numer, denom, ..., ratio=TRUE) {
+  ## Determine y
+  if(!missing(df)) {
+    y <- fv(df, ...)
+    num <- NULL
+  } else {
+    ## Compute numer/denom
+    ## Numerator must be a data frame
+    num <- fv(numer, ...)    
+    ## Denominator may be a data frame or a constant
+    force(denom)
+    y <- eval.fv(num/denom)
+    ## relabel
+    y <- fv(as.data.frame(y), ...)
+  }
+  if(!ratio)
+    return(y)
+  if(is.null(num)) {
+    ## Compute num = y * denom
+    ## Denominator may be a data frame or a constant
+    force(denom)
+    num <- eval.fv(y * denom)
+    ## ditch labels
+    num <- fv(as.data.frame(num), ...)
+  }
+  ## make denominator an fv object
+  if(is.data.frame(denom)) {
+    den <- fv(denom, ...)
+  } else {
+    ## scalar
+    check.1.real(denom, "Unless it is a data frame,")
+    ## replicate it in all the data columns
+    dendf <- as.data.frame(num)
+    valuecols <- (names(num) != fvnames(num, ".x"))
+    dendf[, valuecols] <- denom
+    den <- fv(dendf, ...)
+  } 
+  ## tweak the descriptions
+  ok <- (names(y) != fvnames(y, ".x"))
+  attr(num, "desc")[ok] <- paste("numerator of",   attr(num, "desc")[ok])
+  attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok])
+  ## form ratio object
+  y <- rat(y, num, den, check=FALSE)
+  return(y)
+}
+
+## Tack new column(s) onto a ratio fv object
+
+bind.ratfv <- function(x, numerator=NULL, denominator=NULL, 
+                       labl = NULL, desc = NULL, preferred = NULL,
+                       ratio=TRUE,
+		       quotient=NULL) {
+  if(ratio && !inherits(x, "rat"))
+    stop("ratio=TRUE is set, but x has no ratio information", call.=FALSE)
+  if(is.null(numerator) && !is.null(denominator) && !is.null(quotient))
+    numerator <- quotient * denominator
+  if(is.null(denominator) && inherits(numerator, "rat")) {
+    ## extract numerator & denominator from ratio object
+    both <- numerator
+    denominator <- attr(both, "denominator")
+    usenames <- fvnames(both, ".a")
+    numerator   <- as.data.frame(both)[,usenames]
+    denominator <- as.data.frame(denominator)[,usenames]
+    ##  labels default to those of ratio object
+    if(is.null(labl)) labl <- attr(both, "labl")
+    if(is.null(desc)) desc <- attr(both, "desc")
+    if(is.null(labl)) labl <- attr(both, "labl")
+  }
+  # calculate ratio
+  #    The argument 'quotient' is rarely needed 
+  #    except to avoid 0/0 or to improve accuracy
+  if(is.null(quotient))
+    quotient <- numerator/denominator
+    
+  # bind new column to x   
+  y <- bind.fv(x, quotient,
+               labl=labl, desc=desc, preferred=preferred)
+  if(!ratio)
+    return(y)
+    
+  ## convert scalar denominator to data frame
+  if(!is.data.frame(denominator)) {
+    if(!is.numeric(denominator) || !is.vector(denominator))
+      stop("Denominator should be a data frame or a numeric vector")
+    nd <- length(denominator)
+    if(nd != 1 && nd != nrow(x))
+      stop("Denominator has wrong length")
+    dvalue <- denominator
+    denominator <- numerator
+    denominator[] <- dvalue
+  }
+  ## Now fuse with x
+  num <- attr(x, "numerator")
+  den <- attr(x, "denominator")
+  num <- bind.fv(num, numerator,
+                 labl=labl, desc=paste("numerator of", desc),
+                 preferred=preferred)
+  den <- bind.fv(den, denominator,
+                 labl=labl, desc=paste("denominator of", desc),
+                 preferred=preferred)
+  y <- rat(y, num, den, check=FALSE)
+  return(y)
+}
+
+conform.ratfv <- function(x) {
+  ## harmonise display properties in components of a ratio
+  stopifnot(inherits(x, "rat"), is.fv(x))
+  num <- attr(x, "numerator")
+  den <- attr(x, "denominator")
+  formula(num) <- formula(den) <- formula(x)
+  fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".")
+  unitname(num)     <- unitname(den)     <- unitname(x)
+  attr(x, "numerator") <- num
+  attr(x, "denominator") <- den
+  return(x)
+}
+
diff --git a/R/geyer.R b/R/geyer.R
new file mode 100755
index 0000000..ce41303
--- /dev/null
+++ b/R/geyer.R
@@ -0,0 +1,365 @@
+#
+#
+#    geyer.S
+#
+#    $Revision: 2.38 $	$Date: 2017/06/05 10:31:58 $
+#
+#    Geyer's saturation process
+#
+#    Geyer()    create an instance of Geyer's saturation process
+#                 [an object of class 'interact']
+#
+#	
+
+Geyer <- local({
+
+  # .......... template ..........
+
+  BlankGeyer <- 
+  list(
+         name     = "Geyer saturation process",
+         creator  = "Geyer",
+         family   = "pairsat.family",  # evaluated later
+         pot      = function(d, par) {
+                         (d <= par$r)  # same as for Strauss
+                    },
+         par      = list(r = NULL, sat=NULL),  # filled in later
+         parnames = c("interaction distance","saturation parameter"),
+         init     = function(self) {
+                      r <- self$par$r
+                      sat <- self$par$sat
+                      if(!is.numeric(r) || length(r) != 1 || r <= 0)
+                       stop("interaction distance r must be a positive number")
+                      if(!is.numeric(sat) || length(sat) != 1 || sat < 0)
+                       stop("saturation parameter sat must be a positive number")
+                    },
+         update = NULL, # default OK
+         print = NULL,    # default OK
+         plot = function(fint, ..., d=NULL, plotit=TRUE) {
+           verifyclass(fint, "fii")
+           inter <- fint$interaction
+           unitz <- unitname(fint)
+           if(!identical(inter$name, "Geyer saturation process"))
+             stop("Tried to plot the wrong kind of interaction")
+           #' fitted interaction coefficient
+           theta <- fint$coefs[fint$Vnames]
+           #' interaction radius
+           r <- inter$par$r
+           sat <- inter$par$sat
+           xlim <- resolve.1.default(list(xlim=c(0, 1.25 * r)), list(...)) 
+           rmax <- max(xlim, d)
+           if(is.null(d)) {
+             d <- seq(from=0, to=rmax, length.out=1024)
+           } else {
+             stopifnot(is.numeric(d) &&
+                       all(is.finite(d)) &&
+                       all(diff(d) > 0))
+           }
+           #' compute interaction between two points at distance d
+           y <- exp(theta * sat * (d <= r))
+           #' compute `fv' object
+           fun <- fv(data.frame(r=d, h=y, one=1),
+                     "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r,
+                     xlim, c("r", "h(r)", "1"),
+                     c("distance argument r",
+                       "maximal interaction h(r)",
+                       "reference value 1"),
+                     unitname=unitz)
+           if(plotit)
+             do.call(plot.fv,
+                     resolve.defaults(list(fun),
+                                      list(...),
+                                      list(ylim=range(0,1,y))))
+           return(invisible(fun))
+         },
+         #' end of function 'plot'
+         interpret =  function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1L])
+           gamma <- exp(loggamma)
+           return(list(param=list(gamma=gamma),
+                       inames="interaction parameter gamma",
+                       printable=dround(gamma)))
+         },
+         valid = function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1L])
+           sat <- self$par$sat
+           return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$r
+           if(any(!is.na(coeffs))) {
+             loggamma <- coeffs[1L]
+             if(!is.na(loggamma) && (abs(loggamma) <= epsilon))
+               return(0)
+           }
+           return(2 * r)
+         },
+       version=NULL, # evaluated later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction,
+                         ..., halfway=FALSE, check=TRUE) {
+         # fast evaluator for Geyer interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for Geyer")
+         r   <- potpars$r
+         sat <- potpars$sat
+         # first ensure all data points are in U
+         nX <- npoints(X)
+         nU <- npoints(U)
+         Xseq  <- seq_len(nX)
+         if(length(EqualPairs) == 0) {
+           # no data points currently included 
+           missingdata <- rep.int(TRUE, nX)
+         } else {
+           Xused <- EqualPairs[,1L]
+           missingdata <- !(Xseq %in% Xused)
+         }
+         somemissing <- any(missingdata)
+         if(somemissing) {
+           # add the missing data points
+           nmiss <- sum(missingdata)
+           U <- superimpose(U, X[missingdata], W=X$window, check=check)
+           # correspondingly augment the list of equal pairs
+           originalrows <- seq_len(nU)
+           newXindex <- Xseq[missingdata]
+           newUindex <- nU + seq_len(nmiss)
+           EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex))
+           nU <- nU + nmiss
+         }
+         # determine saturated pair counts
+         counts <- strausscounts(U, X, r, EqualPairs) 
+         satcounts <- pmin.int(sat, counts)
+         satcounts <- matrix(satcounts, ncol=1)
+         if(halfway) {
+           # trapdoor used by suffstat()
+           answer <- satcounts
+         } else if(sat == Inf) {
+           # no saturation: fast code
+           answer <- 2 * satcounts
+         } else {
+           # extract counts for data points
+           Uindex <- EqualPairs[,2L]
+           Xindex <- EqualPairs[,1L]
+           Xcounts <- integer(npoints(X))
+           Xcounts[Xindex] <- counts[Uindex]
+           # evaluate change in saturated counts of other data points
+           change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs)
+           answer <- satcounts + change
+           answer <- matrix(answer, ncol=1)
+         }
+         if(somemissing)
+           answer <- answer[originalrows, , drop=FALSE]
+         return(answer)
+       },
+       delta2 = function(X,inte,correction, ..., sparseOK=FALSE) {
+         # Sufficient statistic for second order conditional intensity
+         # h(X[i] | X) - h(X[i] | X[-j])
+         # Geyer interaction
+         if(!(correction %in% c("border", "none")))
+           return(NULL)
+         r   <- inte$par$r
+         sat <- inte$par$sat
+         result <- geyerdelta2(X, r, sat, sparseOK=sparseOK)
+         return(result)
+       }
+  )
+  class(BlankGeyer) <- "interact"
+  
+  Geyer <- function(r, sat) {
+    instantiate.interact(BlankGeyer, list(r = r, sat=sat))
+  }
+
+  Geyer <- intermaker(Geyer, BlankGeyer)
+  
+  Geyer
+})
+
+  # ........... externally visible auxiliary functions .........
+  
+geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) {
+  # evaluate effect of adding dummy point or deleting data point
+  # on saturated counts of other data points
+  stopifnot(is.numeric(r))
+  stopifnot(is.numeric(sat))
+  # for C calls we need finite numbers
+  stopifnot(is.finite(r))
+  stopifnot(is.finite(sat))
+  # sort in increasing order of x coordinate
+  oX <- fave.order(X$x)
+  oU <- fave.order(U$x)
+  Xsort <- X[oX]
+  Usort <- U[oU]
+  nX <- npoints(X)
+  nU <- npoints(U)
+  Xcountsort <- Xcounts[oX]
+  # inverse: data point i has sorted position i' = rankX[i]
+  rankX <- integer(nX)
+  rankX[oX] <- seq_len(nX)
+  rankU <- integer(nU)
+  rankU[oU] <- seq_len(nU)
+  # map from quadrature points to data points
+  Uindex <- EqualPairs[,2L]
+  Xindex <- EqualPairs[,1L]
+  Xsortindex <- rankX[Xindex]
+  Usortindex <- rankU[Uindex]
+  Cmap <- rep.int(-1L, nU)
+  Cmap[Usortindex] <- Xsortindex - 1L
+  # call C routine
+  zz <- .C("Egeyer",
+           nnquad = as.integer(nU),
+           xquad  = as.double(Usort$x),
+           yquad  = as.double(Usort$y),
+           quadtodata = as.integer(Cmap),
+           nndata = as.integer(nX),
+           xdata  = as.double(Xsort$x),
+           ydata  = as.double(Xsort$y),
+           tdata  = as.integer(Xcountsort),
+           rrmax  = as.double(r),
+           ssat   = as.double(sat),
+           result = as.double(numeric(nU)),
+           PACKAGE = "spatstat")
+  result <- zz$result[rankU]
+  return(result)
+}
+
+geyerdelta2 <- local({
+
+  geyerdelta2 <- function(X, r, sat, ..., sparseOK=FALSE) {
+    # Sufficient statistic for second order conditional intensity
+    # Geyer model
+    stopifnot(is.numeric(sat) && length(sat) == 1 && sat > 0)
+    # X could be a ppp or quad.
+    if(is.ppp(X)) {
+      # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j
+      # i.e.  h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic
+      return(geydelppp(X, r, sat, sparseOK))
+    } else if(inherits(X, "quad")) {
+      # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j
+      return(geydelquad(X, r, sat, sparseOK))
+    } else stop("Internal error: X should be a ppp or quad object")
+  }
+
+  geydelppp <- function(X, r, sat, sparseOK) {
+    # initialise
+    nX <- npoints(X)
+    result <- if(!sparseOK) matrix(0, nX, nX) else
+              sparseMatrix(i=integer(0), j=integer(0), x=numeric(0),
+                           dims=c(nX, nX))
+    # identify all r-close pairs (ordered pairs i ~ j)
+    a <- closepairs(X, r, what="indices")
+    I <- a$i
+    J <- a$j
+    IJ <- cbind(I,J)
+    # count number of r-neighbours for each point
+    # (consistently with the above)
+    tvals <- table(factor(I, levels=1:nX))
+    # Compute direct part
+    # (arising when i~j) 
+    tI <- tvals[I]
+    tJ <- tvals[J]
+    result[IJ] <-
+      pmin(sat, tI) - pmin(sat, tI - 1) + pmin(sat, tJ) - pmin(sat, tJ - 1)
+    # Compute indirect part
+    # (arising when i~k and j~k for another point k)
+    # First find all such triples 
+    ord <- (I < J)
+    vees <- edges2vees(I[ord], J[ord], nX)
+    # evaluate contribution of (k, i, j)
+    KK <- vees$i
+    tKK <- tvals[KK]
+    contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1) + pmin(sat, tKK-2)
+    # for each (i, j), sum the contributions over k 
+    II <- vees$j
+    JJ <- vees$k
+    if(!sparseOK) {
+      II <- factor(II, levels=1:nX)
+      JJ <- factor(JJ, levels=1:nX)
+      # was:
+      # delta3 <- tapply(contribKK, list(I=II, J=JJ), sum)
+      # delta3[is.na(delta3)] <- 0
+      delta3 <- tapplysum(contribKK, list(I=II, J=JJ))
+    } else {
+      delta3 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nX, nX))
+    }
+    # symmetrise and combine
+    result <- result + delta3 + t(delta3)
+    return(result)
+  }
+
+  geydelquad <- function(Q, r, sat, sparseOK) {
+    Z <- is.data(Q)
+    U <- union.quad(Q)
+    nU <- npoints(U)
+    nX <- npoints(Q$data)
+    result <- if(!sparseOK) matrix(0, nU, nU) else
+              sparseMatrix(i=integer(0), j=integer(0), x=numeric(0),
+                           dims=c(nU, nU))
+    # identify all r-close pairs U[i], U[j]
+    a <- closepairs(U, r, what="indices")
+    I <- a$i
+    J <- a$j
+    IJ <- cbind(I, J)
+    # tag which ones are data points
+    zI <- Z[I]
+    zJ <- Z[J]
+    # count t(U[i], X)
+    IzJ <- I[zJ]
+    JzJ <- J[zJ]
+    tvals <- table(factor(IzJ, levels=1:nU))
+    # Compute direct part
+    # (arising when U[i]~U[j]) 
+    tI <- tvals[I]
+    tJ <- tvals[J]
+    tIJ <- tI - zJ
+    tJI <- tJ - zI
+    result[IJ] <-  pmin(sat, tIJ + 1L) - pmin(sat, tIJ) +
+                   pmin(sat, tJI + 1L) - pmin(sat, tJI) 
+    # Compute indirect part
+    # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k])
+    # First find all such triples
+    # Group close pairs X[k] ~ U[j] by index k
+    spl <- split(IzJ, factor(JzJ, levels=1:nX))
+    grlen <- lengths(spl)
+    # Assemble list of triples U[i], X[k], U[j]
+    # by expanding each pair U[i], X[k]
+    JJ <- unlist(spl[JzJ])
+    II <- rep(IzJ, grlen[JzJ])
+    KK <- rep(JzJ, grlen[JzJ])
+    # remove identical pairs i = j
+    ok <- II != JJ
+    II <- II[ok]
+    JJ <- JJ[ok]
+    KK <- KK[ok]
+    # evaluate contribution of each triple
+    tKK <- tvals[KK]
+    zII <- Z[II]
+    zJJ <- Z[JJ]
+    tKIJ <- tKK - zII - zJJ 
+    contribKK <-
+      pmin(sat, tKIJ + 2) - 2 * pmin(sat, tKIJ + 1) + pmin(sat, tKIJ)
+    # for each (i, j), sum the contributions over k
+    if(!sparseOK) {
+      II <- factor(II, levels=1:nU)
+      JJ <- factor(JJ, levels=1:nU)
+      # was:
+      # delta4 <- tapply(contribKK, list(I=II, J=JJ), sum)
+      # delta4[is.na(delta4)] <- 0
+      delta4 <- tapplysum(contribKK, list(I=II, J=JJ))
+    } else {
+      delta4 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nU, nU))
+    }
+    # combine
+    result <- result + delta4
+    return(result)
+  }
+
+  geyerdelta2
+})
diff --git a/R/hackglmm.R b/R/hackglmm.R
new file mode 100755
index 0000000..73f4521
--- /dev/null
+++ b/R/hackglmm.R
@@ -0,0 +1,103 @@
+# hackglmm.R
+#  $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $
+
+hackglmmPQL <- 
+function (fixed, random, family, data, correlation, weights,
+    control, niter = 10, verbose = TRUE, subset, ..., reltol=1e-3)
+{
+    if (is.character(family))
+        family <- get(family)
+    if (is.function(family))
+        family <- family()
+    if (is.null(family$family)) {
+        print(family)
+        stop("'family' not recognized")
+    }
+    m <- mcall <- Call <- match.call()
+    nm <- names(m)[-1L]
+    keep <- is.element(nm, c("weights", "data", "subset", "na.action"))
+    for (i in nm[!keep]) m[[i]] <- NULL
+    allvars <- if (is.list(random))
+        allvars <- c(all.vars(fixed), names(random), unlist(lapply(random,
+            function(x) all.vars(formula(x)))))
+    else c(all.vars(fixed), all.vars(random))
+    Terms <- if (missing(data))
+        terms(fixed)
+    else terms(fixed, data = data)
+    off <- attr(Terms, "offset")
+    if (length(off <- attr(Terms, "offset")))
+        allvars <- c(allvars, as.character(attr(Terms, "variables"))[off +
+            1])
+    Call$fixed <- eval(fixed)
+    Call$random <- eval(random)
+    m$formula <- as.formula(paste("~", paste(allvars, collapse = "+")))
+    environment(m$formula) <- environment(fixed)
+    m$drop.unused.levels <- TRUE
+    m[[1L]] <- as.name("model.frame")
+    mf <- eval.parent(m)
+    off <- model.offset(mf)
+    if (is.null(off))
+        off <- 0
+    w <- model.weights(mf)
+    if (is.null(w))
+        w <- rep(1, nrow(mf))
+    wts <- mf$wts <- w
+    if(missing(subset)) 
+      fit0 <- glm(formula = fixed, family = family, data = mf,
+                  weights = wts, ...)
+    else {
+    # hack to get around peculiar problem with `subset' argument
+      glmmsubset <- eval(expression(subset), data)
+      if(length(glmmsubset) != nrow(mf)) {
+        if(sum(glmmsubset) != nrow(mf))
+          stop("Internal error: subset vector is wrong length")
+        message("(Fixing subset index..)")
+        glmmsubset <- glmmsubset[glmmsubset]
+      }
+      mf$glmmsubset <- glmmsubset
+      fit0 <- glm(formula = fixed, family = family, data = mf,
+                  weights = wts, subset=glmmsubset, ...)
+    } 
+    w <- fit0$prior.weights
+    eta <- fit0$linear.predictor
+    zz <- eta + fit0$residuals - off
+    wz <- fit0$weights
+    fam <- family
+    nm <- names(mcall)[-1L]
+    keep <- is.element(nm, c("fixed", "random", "data", "subset",
+        "na.action", "control"))
+    for (i in nm[!keep]) mcall[[i]] <- NULL
+    fixed[[2L]] <- quote(zz)
+    mcall[["fixed"]] <- fixed
+    mcall[[1L]] <- as.name("lme")
+    mcall$random <- random
+    mcall$method <- "ML"
+    if (!missing(correlation))
+        mcall$correlation <- correlation
+    mcall$weights <- quote(varFixed(~invwt))
+    mf$zz <- zz
+    mf$invwt <- 1/wz
+    mcall$data <- mf
+    for (i in 1:niter) {
+        if (verbose)
+            cat("iteration", i, "\n")
+        fit <- eval(mcall)
+        etaold <- eta
+        eta <- fitted(fit) + off
+        if (sum((eta - etaold)^2) < (reltol^2) * sum(eta^2))
+            break
+        mu <- fam$linkinv(eta)
+        mu.eta.val <- fam$mu.eta(eta)
+        mf$zz <- eta + (fit0$y - mu)/mu.eta.val - off
+        wz <- w * mu.eta.val^2/fam$variance(mu)
+        mf$invwt <- 1/wz
+        mcall$data <- mf
+    }
+    attributes(fit$logLik) <- NULL
+    fit$call <- Call
+    fit$family <- family
+    fit$logLik <- as.numeric(NA)
+    oldClass(fit) <- c("glmmPQL", oldClass(fit))
+    fit
+}
+
diff --git a/R/hardcore.R b/R/hardcore.R
new file mode 100755
index 0000000..26f49c9
--- /dev/null
+++ b/R/hardcore.R
@@ -0,0 +1,120 @@
+#
+#
+#    hardcore.S
+#
+#    $Revision: 1.11 $	$Date: 2015/10/21 09:06:57 $
+#
+#    The Hard core process
+#
+#    Hardcore()     create an instance of the Hard Core process
+#                      [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Hardcore <- local({
+
+  BlankHardcore <- 
+  list(
+         name   = "Hard core process",
+         creator = "Hardcore",
+         family  = "pairwise.family",  # evaluated later
+         pot    = function(d, par) {
+           v <- 0 * d
+           v[ d <= par$hc ] <-  (-Inf)
+           attr(v, "IsOffset") <- TRUE
+           v
+         },
+         par    = list(hc = NULL),  # filled in later
+         parnames = "hard core distance", 
+         selfstart = function(X, self) {
+           # self starter for Hardcore
+           nX <- npoints(X)
+           if(nX < 2) {
+             # not enough points to make any decisions
+             return(self)
+           }
+           md <- minnndist(X)
+           if(md == 0) {
+             warning(paste("Pattern contains duplicated points:",
+                           "impossible under Hardcore model"))
+             return(self)
+           }
+           if(!is.na(hc <- self$par$hc)) {
+             # value fixed by user or previous invocation
+             # check it
+             if(md < hc)
+               warning(paste("Hard core distance is too large;",
+                             "some data points will have zero probability"))
+             return(self)
+           }
+           # take hc = minimum interpoint distance * n/(n+1)
+           hcX <- md * nX/(nX+1)
+           Hardcore(hc = hcX)
+       },
+         init   = function(self) {
+           hc <- self$par$hc
+           if(length(hc) != 1)
+             stop("hard core distance must be a single value")
+           if(!is.na(hc) && !(is.numeric(hc) && hc > 0))
+             stop("hard core distance hc must be a positive number, or NA")
+         },
+         update = NULL,       # default OK
+         print = NULL,        # default OK
+         interpret =  function(coeffs, self) {
+           return(NULL)
+         },
+         valid = function(coeffs, self) {
+           return(TRUE)
+         },
+         project = function(coeffs, self) {
+           return(NULL)
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           hc <- self$par$hc
+           return(hc)
+         },
+       version=NULL, # evaluated later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+         # fast evaluator for Hardcore interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for Hardcore")
+         hc <- potpars$hc
+         # call evaluator for Strauss process
+         counts <- strausscounts(U, X, hc, EqualPairs)
+         # all counts should be zero
+         v <- matrix(ifelseAB(counts > 0, -Inf, 0), ncol=1)
+         attr(v, "IsOffset") <- TRUE
+         return(v)
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         hc <- self$par$hc
+         return(pi * hc^2)
+       },
+       Percy=function(d, coeffs, par, ...) {
+         ## term used in Percus-Yevick type approximation
+         H <- par$hc
+         t <- abs(d/(2*H))
+         t <- pmin.int(t, 1)
+         y <- 2 * H^2 * (pi - acos(t) + t * sqrt(1 - t^2))
+         return(y)
+       }
+  )
+  class(BlankHardcore) <- "interact"
+  
+  Hardcore <- function(hc=NA) {
+    instantiate.interact(BlankHardcore, list(hc=hc))
+  }
+
+  Hardcore <- intermaker(Hardcore, BlankHardcore)
+  
+  Hardcore
+})
diff --git a/R/harmonic.R b/R/harmonic.R
new file mode 100755
index 0000000..7e39d65
--- /dev/null
+++ b/R/harmonic.R
@@ -0,0 +1,58 @@
+#
+#
+#   harmonic.R
+#
+#	$Revision: 1.2 $	$Date: 2004/01/07 08:57:39 $
+#
+#   harmonic()
+#          Analogue of polynom() for harmonic functions only
+#
+# -------------------------------------------------------------------
+#	
+
+harmonic <- function(x,y,n) {
+  if(missing(n))
+    stop("the order n must be specified")
+  n <- as.integer(n)
+  if(is.na(n) || n <= 0)
+    stop("n must be a positive integer")
+
+  if(n > 3)
+    stop("Sorry, harmonic() is not implemented for degree > 3")
+
+  namex <- deparse(substitute(x))
+  namey <- deparse(substitute(y))
+  if(!is.name(substitute(x))) 
+      namex <- paste("(", namex, ")", sep="")
+  if(!is.name(substitute(y))) 
+      namey <- paste("(", namey, ")", sep="")
+  
+  switch(n,
+         {
+           result <- cbind(x, y)
+           names <- c(namex, namey)
+         },
+         {
+           result <- cbind(x, y,
+                           x*y, x^2-y^2)
+           names <- c(namex, namey,
+                      paste("(", namex, ".", namey, ")", sep=""),
+                      paste("(", namex, "^2-", namey, "^2)", sep=""))
+         },
+         {
+           result <- cbind(x, y,
+                           x * y, x^2-y^2, 
+                           x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y)
+           names <- c(namex, namey,
+                      paste("(", namex, ".", namey, ")", sep=""),
+                      paste("(", namex, "^2-", namey, "^2)", sep=""),
+                      paste("(", namex, "^3-3", namex, ".", namey, "^2)",
+                            sep=""),
+                      paste("(", namey, "^3-3", namex, "^2.", namey, ")",
+                            sep="")
+                      )
+         }
+         )
+  dimnames(result) <- list(NULL, names)
+  return(result)
+}
diff --git a/R/hasclose.R b/R/hasclose.R
new file mode 100644
index 0000000..18b540e
--- /dev/null
+++ b/R/hasclose.R
@@ -0,0 +1,189 @@
+#'
+#'     hasclose.R
+#'
+#'    Determine whether each point has a close neighbour
+#'
+#'    $Revision: 1.11 $  $Date: 2017/06/05 10:31:58 $
+
+has.close <- function(X, r, Y=NULL, ...) {
+  UseMethod("has.close")
+}
+
+has.close.default <- function(X, r, Y=NULL, ..., periodic=FALSE) {
+  trap.extra.arguments(...)
+  if(!periodic) {
+    nd <- if(is.null(Y)) nndist(X) else nncross(X, Y, what="dist")
+    return(nd <= r)
+  }
+  if(is.null(Y)) {
+    pd <- pairdist(X, periodic=TRUE)
+    diag(pd) <- Inf
+  } else {
+    pd <- crossdist(X, Y, periodic=TRUE)
+  }
+#  return(apply(pd <= r, 1, any))
+  return(matrowany(pd <= r))
+}
+
+has.close.ppp <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) {
+  trap.extra.arguments(...)
+  nX <- npoints(X)
+  if(nX <= 1) return(logical(nX))
+  #' sort by increasing x coordinate
+  cX <- coords(X)
+  if(!sorted) {
+    oo <- order(cX$x)
+    cX <- cX[oo,,drop=FALSE]
+  }
+  if(is.null(Y)) {
+    if(!periodic) {
+      zz <- .C("hasXclose",
+               n = as.integer(nX),
+               x = as.double(cX$x),
+               y = as.double(cX$y),
+               r = as.double(r),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    } else {
+      b <- sidelengths(Frame(X))
+      zz <- .C("hasXpclose",
+               n = as.integer(nX),
+               x = as.double(cX$x),
+               y = as.double(cX$y),
+               r = as.double(r),
+               b = as.double(b),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    }
+  } else {
+    stopifnot(is.ppp(Y))
+    nY <- npoints(Y)
+    if(nY == 0) return(logical(nX))
+    cY <- coords(Y)
+    #' sort Y by increasing x coordinate
+    if(!sorted) {
+      ooY <- order(cY$x)
+      cY <- cY[ooY, , drop=FALSE]
+    }
+    if(!periodic) {
+      zz <- .C("hasXYclose",
+               n1 = as.integer(nX),
+               x1 = as.double(cX$x),
+               y1 = as.double(cX$y),
+               n2 = as.integer(nY),
+               x2 = as.double(cY$x),
+               y2 = as.double(cY$y),
+               r = as.double(r),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    } else {
+      bX <- sidelengths(Frame(X))
+      bY <- sidelengths(Frame(Y))
+      if(any(bX != bY))
+        warning("Windows are not equal: periodic distance may be erroneous")
+      zz <- .C("hasXYpclose",
+               n1 = as.integer(nX),
+               x1 = as.double(cX$x),
+               y1 = as.double(cX$y),
+               n2 = as.integer(nY),
+               x2 = as.double(cY$x),
+               y2 = as.double(cY$y),
+               r = as.double(r),
+               b = as.double(bX),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    }
+  }
+  tt <- as.logical(zz$t)
+  if(sorted) return(tt)
+  #' reinstate original order
+  ans <- logical(nX)
+  ans[oo] <- tt
+  return(ans)
+}
+
+has.close.pp3 <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) {
+  trap.extra.arguments(...)
+  nX <- npoints(X)
+  if(nX <= 1) return(logical(nX))
+  cX <- coords(X)
+  if(!sorted) {
+    #' sort by increasing x coordinate
+    oo <- order(cX$x)
+    cX <- cX[oo,,drop=FALSE]
+  }
+  if(is.null(Y)) {
+    if(!periodic) {
+      zz <- .C("hasX3close",
+               n = as.integer(nX),
+               x = as.double(cX$x),
+               y = as.double(cX$y),
+               z = as.double(cX$z),
+               r = as.double(r),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    } else {
+      b <- sidelengths(as.box3(X))
+      zz <- .C("hasX3pclose",
+               n = as.integer(nX),
+               x = as.double(cX$x),
+               y = as.double(cX$y),
+               z = as.double(cX$z),
+               r = as.double(r),
+               b = as.double(b), 
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    }
+  } else {
+    stopifnot(is.pp3(Y))
+    nY <- npoints(Y)
+    if(nY == 0) return(logical(nX))
+    cY <- coords(Y)
+    if(!sorted) {
+      #' sort Y by increasing x coordinate
+      ooY <- order(cY$x)
+      cY <- cY[ooY, , drop=FALSE]
+    }
+    if(!periodic) {
+      zz <- .C("hasXY3close",
+               n1 = as.integer(nX),
+               x1 = as.double(cX$x),
+               y1 = as.double(cX$y),
+               z1 = as.double(cX$z),
+               n2 = as.integer(nY),
+               x2 = as.double(cY$x),
+               y2 = as.double(cY$y),
+               z2 = as.double(cY$z),
+               r = as.double(r),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    } else {
+      bX <- sidelengths(as.box3(X))
+      bY <- sidelengths(as.box3(Y))
+      if(any(bX != bY))
+        warning("Domains are not equal: periodic distance may be erroneous")
+      zz <- .C("hasXY3pclose",
+               n1 = as.integer(nX),
+               x1 = as.double(cX$x),
+               y1 = as.double(cX$y),
+               z1 = as.double(cX$z),
+               n2 = as.integer(nY),
+               x2 = as.double(cY$x),
+               y2 = as.double(cY$y),
+               z2 = as.double(cY$z),
+               r = as.double(r),
+               b = as.double(bX),
+               t = as.integer(integer(nX)),
+               PACKAGE = "spatstat")
+    }
+  }
+  tt <- as.logical(zz$t)
+  if(sorted) return(tt)
+  #' reinstate original order
+  ans <- logical(nX)
+  ans[oo] <- tt
+  return(ans)
+}
+
+
+  
diff --git a/R/hasenvelope.R b/R/hasenvelope.R
new file mode 100644
index 0000000..ee7db09
--- /dev/null
+++ b/R/hasenvelope.R
@@ -0,0 +1,27 @@
+#'
+#'    hasenvelope.R
+#'
+#'    A simple class of objects which contain additional envelope data
+#' 
+#'    $Revision: 1.1 $ $Date: 2015/10/05 06:20:31 $
+
+hasenvelope <- function(X, E=NULL) {
+  if(inherits(E, "envelope")) {
+    attr(X, "envelope") <- E
+    class(X) <- c("hasenvelope", class(X))
+  }
+  return(X)
+}
+
+print.hasenvelope <- function(x, ...) {
+  NextMethod("print")
+  splat("[Object contains simulation envelope data]")
+  return(invisible(NULL))
+}
+
+envelope.hasenvelope <- function(Y, ..., Yname=NULL) {
+  if(is.null(Yname)) Yname <- short.deparse(substitute(Y))
+  E <- attr(Y, "envelope")
+  return(envelope(E, ..., Yname=Yname))
+}
+  
diff --git a/R/headtail.R b/R/headtail.R
new file mode 100644
index 0000000..1757f36
--- /dev/null
+++ b/R/headtail.R
@@ -0,0 +1,22 @@
+#'
+#'     headtail.R
+#'
+#'   Methods for head() and tail()
+#'
+#'   $Revision: 1.1 $  $Date: 2016/12/20 01:11:29 $
+
+head.tess <- head.psp <- head.ppx <- head.ppp <- function(x, n=6L, ...) {
+  stopifnot(length(n) == 1L)
+  xlen <- nobjects(x)
+  n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen)
+  x[seq_len(n)]
+}
+
+tail.tess <- tail.psp <- tail.ppx <- tail.ppp <- function (x, n = 6L, ...) {
+  stopifnot(length(n) == 1L)
+  xlen <- nobjects(x)
+  n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen)
+  x[seq.int(to = xlen, length.out = n)]
+}
+
+
diff --git a/R/hermite.R b/R/hermite.R
new file mode 100644
index 0000000..ad3d6d4
--- /dev/null
+++ b/R/hermite.R
@@ -0,0 +1,76 @@
+##
+##  hermite.R
+##
+##  Gauss-Hermite quadrature
+##
+##  $Revision: 1.5 $  $Date: 2017/02/07 07:35:32 $
+##
+
+HermiteCoefs <- function(order) {
+  ## compute coefficients of Hermite polynomial (unnormalised)
+  x <- 1
+  if(order > 0) 
+    for(n in 1:order)
+      x <- c(0, 2 * x) - c(((0:(n-1)) * x)[-1L], 0, 0)
+  return(x)
+}
+
+gauss.hermite <- function(f, mu=0, sd=1, ..., order=5) {
+  stopifnot(is.function(f))
+  stopifnot(length(mu) == 1)
+  stopifnot(length(sd) == 1)
+  ## Hermite polynomial coefficients (un-normalised)
+  Hn <- HermiteCoefs(order)
+  Hn1 <- HermiteCoefs(order-1)
+  ## quadrature points
+  x <- sort(Re(polyroot(Hn)))
+  ## weights
+  Hn1x <- matrix(Hn1, nrow=1) %*% t(outer(x, 0:(order-1), "^"))
+  w <- 2^(order-1) * factorial(order) * sqrt(pi)/(order * Hn1x)^2
+  ## adjust
+  ww <- w/sqrt(pi)
+  xx <- mu + sd * sqrt(2) * x
+  ## compute
+  ans <- 0
+  for(i in seq_along(x))
+    ans <- ans + ww[i] * f(xx[i], ...)
+  return(ans)
+}
+
+dmixpois <- local({
+
+  dpoisG <- function(x, ..., k, g) dpois(k, g(x))
+
+  function(x, mu, sd, invlink=exp, GHorder=5) 
+    gauss.hermite(dpoisG, mu=mu, sd=sd, g=invlink, k=x, order=GHorder)
+})
+
+pmixpois <- local({
+  ppoisG <- function(x, ..., q, g, lot) ppois(q, g(x), lower.tail=lot)
+
+  function(q, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) 
+    gauss.hermite(ppoisG, mu=mu, sd=sd, g=invlink, q=q, order=GHorder,
+                 lot=lower.tail)
+})
+  
+qmixpois <- function(p, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) {
+  ## guess upper limit
+  ## Guess upper and lower limits
+  pmin <- min(p, 1-p)/2
+  lam.hi <- invlink(qnorm(pmin, mean=max(mu), sd=max(sd), lower.tail=FALSE))
+  lam.lo <- invlink(qnorm(pmin, mean=min(mu), sd=max(sd), lower.tail=TRUE))
+  kmin <- qpois(pmin, lam.lo, lower.tail=TRUE)
+  kmax <- qpois(pmin, lam.hi, lower.tail=FALSE)
+  kk <- kmin:kmax
+  pp <- pmixpois(kk, mu, sd, invlink, lower.tail=TRUE, GHorder)
+  ans <- if(lower.tail) kk[findInterval(p, pp, all.inside=TRUE)] else
+         rev(kk)[findInterval(1-p, rev(1-pp), all.inside=TRUE)]
+  return(ans)
+}
+  
+rmixpois <- function(n, mu, sd, invlink=exp) {
+  lam <- invlink(rnorm(n, mean=mu, sd=sd))
+  y <- rpois(n, lam)
+  return(y)
+}
+
diff --git a/R/hexagons.R b/R/hexagons.R
new file mode 100644
index 0000000..64694ef
--- /dev/null
+++ b/R/hexagons.R
@@ -0,0 +1,90 @@
+## hexagons.R
+## $Revision: 1.6 $ $Date: 2017/02/07 07:35:32 $
+
+hexgrid <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) {
+  W <- as.owin(W)
+  check.1.real(s)
+  stopifnot(s > 0)
+  hstep <- 3 * s
+  vstep <- sqrt(3) * s
+  R <- grow.rectangle(as.rectangle(W), hstep)
+  xr <- R$xrange
+  yr <- R$yrange
+  ## initial positions for 'odd' and 'even grids
+  p0 <- as2vector(origin %orifnull% centroid.owin(R))
+  p0 <- p0 + as2vector(offset)
+  q0 <- p0 + c(hstep, vstep)/2
+  ## 'even' points
+  p0 <- c(startinrange(p0[1L], hstep, xr),
+          startinrange(p0[2L], vstep, yr))
+  if(!anyNA(p0)) {
+    xeven <- prolongseq(p0[1L], xr, step=hstep)
+    yeven <- prolongseq(p0[2L], yr, step=vstep)
+    xyeven <- expand.grid(x=xeven, y=yeven)
+  } else xyeven <- list(x=numeric(0), y=numeric(0))
+  ## 'odd' points
+  q0 <- c(startinrange(q0[1L], hstep, xr),
+          startinrange(q0[2L], vstep, yr))
+  if(!anyNA(q0)) {
+    xodd <- prolongseq(q0[1L], xr, step=hstep)
+    yodd <- prolongseq(q0[2L], yr, step=vstep)
+    xyodd <- expand.grid(x=xodd, y=yodd)
+  } else xyodd <- list(x=numeric(0), y=numeric(0))
+  ##
+  xy <- concatxy(xyeven, xyodd)
+  XY <- as.ppp(xy, W=R)
+  ##
+  if(trim) return(XY[W])
+  ok <- inside.owin(XY, w=dilation.owin(W, s))
+  return(XY[ok])
+}
+
+hextess <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) {
+  W <- as.owin(W)
+  G <- hexgrid(W=W, s=s, offset=offset, origin=origin, trim=FALSE)
+  if(trim && is.mask(W)) {
+    ## Result is a pixel image tessellation
+    ## Determine pixel resolution by extending 'W' to larger domain of 'G'
+    rasta <- harmonise.im(as.im(1, W), as.owin(G))[[1L]]
+    rasta <- as.mask(rasta)
+    ## Tweak G to have mask window
+    G$window <- rasta
+    ##
+    img <- nnmap(G, what="which")
+    result <- tess(image=img)
+    return(result)
+  }
+  ## Result is a polygonal tessellation
+  Gxy <- as.matrix(as.data.frame(G))
+  n <- nrow(Gxy)
+  ## Hexagon centred at origin
+  hex0 <- disc(npoly=6, radius=s)
+  ## Form hexagons
+  hexes <- vector(mode="list", length=n)
+  for(i in 1:n) 
+    hexes[[i]] <- shift(hex0, Gxy[i,])
+  ## Determine whether tiles intersect window wholly or partly
+  suspect <- rep(TRUE, n)
+  GW <- G[W]
+  GinW <- inside.owin(G, w=W) 
+  suspect[GinW] <- (bdist.points(GW) <= s)
+  ## Compute intersection of tiles with window
+  trimmed <- hexes
+  trimmed[suspect] <- trimmed.suspect <- 
+    lapply(trimmed[suspect], intersect.owin, B=W, fatal=FALSE)
+  nonempty <- rep(TRUE, n)
+  nonempty[suspect] <- !unlist(lapply(trimmed.suspect, is.empty))
+  if(trim) {
+    ## return the tiles intersected with W
+    result <- tess(tiles=trimmed[nonempty], window=W)
+  } else {
+    ## return the tiles that have nonempty intersection with W
+    result <- tess(tiles=hexes[nonempty])
+  }
+  return(result)
+}
+
+
+  
+  
+  
diff --git a/R/hierarchy.R b/R/hierarchy.R
new file mode 100644
index 0000000..52e7847
--- /dev/null
+++ b/R/hierarchy.R
@@ -0,0 +1,50 @@
+## hierarchy.R
+##
+## Support functions for hierarchical interactions
+##
+##  $Revision: 1.1 $  $Date: 2015/05/26 08:39:56 $
+
+hierarchicalordering <- function(i, s) {
+  s <- as.character(s)
+  if(inherits(i, "hierarchicalordering")) {
+    ## already a hierarchical ordering
+    if(length(s) != length(i$labels))
+      stop("Tried to change the number of types in the hierarchical order")
+    i$labels <- s
+    return(i)
+  }
+  n <- length(s)
+  possible <- if(is.character(i)) s else seq_len(n)
+  j <- match(i, possible)
+  if(any(uhoh <- is.na(j)))
+    stop(paste("Unrecognised",
+               ngettext(sum(uhoh), "level", "levels"),
+               commasep(sQuote(i[uhoh])),
+               "amongst possible levels",
+               commasep(sQuote(s))))
+  if(length(j) < n)
+    stop("Ordering is incomplete")
+  ord <- order(j)
+  m <- matrix(, n, n)
+  rel <- matrix(ord[row(m)] <= ord[col(m)], n, n)
+  dimnames(rel) <- list(s, s)
+  x <- list(indices=j, ordering=ord, labels=s, relation=rel)
+  class(x) <- "hierarchicalordering"
+  x
+}
+
+print.hierarchicalordering <- function(x, ...) {
+  splat(x$labels[x$indices], collapse=" ~> ")
+  invisible(NULL)
+}
+                     
+hiermat <- function (x, h) 
+{
+  stopifnot(is.matrix(x))
+  isna <- is.na(x)
+  x[] <- as.character(x)
+  x[isna] <- ""
+  if(inherits(h, "hierarchicalordering")) ## allows h to be NULL, etc
+    x[!(h$relation)] <- ""
+  return(noquote(x))
+}
diff --git a/R/hierhard.R b/R/hierhard.R
new file mode 100644
index 0000000..2e77c1c
--- /dev/null
+++ b/R/hierhard.R
@@ -0,0 +1,196 @@
+##
+##    hierhard.R
+##
+##    $Revision: 1.3 $	$Date: 2017/02/07 07:35:32 $
+##
+##    The hierarchical hard core process
+##
+## -------------------------------------------------------------------
+##	
+
+HierHard <- local({
+
+  # ......... define interaction potential
+
+  HHpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[j]  type (mark) of point U[j]
+     #
+     # get matrices of interaction radii
+     h <- par$hradii
+     #
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+     
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+
+     ## list all ordered pairs of types to be checked
+     uptri <- par$archy$relation & !is.na(h)
+     mark1 <- (lx[row(h)])[uptri]
+     mark2 <- (lx[col(h)])[uptri]
+     ## corresponding names
+     mark1name <- (lxname[row(h)])[uptri]
+     mark2name <- (lxname[col(h)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     ## create logical array for result
+     z <- array(FALSE, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       # apply relevant hard core distance to each pair of points
+       hxu <- h[ tx, tu ]
+       forbid <- (d < hxu)
+       forbid[is.na(forbid)] <- FALSE
+       # form the potential
+       value <- array(0, dim=dim(d))
+       value[forbid] <- -Inf
+       ## score
+       for(i in 1:npairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2[i])
+         # assign
+         z[Xsub, Qsub, i] <- value[Xsub, Qsub]
+       }
+     }
+     attr(z, "IsOffset") <- TRUE
+     return(z)
+   }
+   #### end of 'pot' function ####
+
+  # Set up basic object except for family and parameters
+  BlankHHobject <- 
+  list(
+    name     = "Hierarchical hard core process",
+    creator  = "HierHard",
+    family   = "hierpair.family", # evaluated later
+    pot      = HHpotential,
+    par      = list(types=NULL, hradii=NULL, archy=NULL), 
+    parnames = c("possible types",
+                 "hardcore distances",
+                 "hierarchical order"),
+    pardesc  = c("vector of possible types",
+                  "matrix of hardcore distances",
+                  "hierarchical order"),
+    selfstart = function(X, self) {
+      types <- self$par$types
+      hradii <- self$par$hradii
+      archy <- self$par$archy
+      if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self)
+      if(is.null(types)) types <- levels(marks(X))
+      if(is.null(archy)) 
+        archy <- seq_len(length(types))
+      if(!inherits(archy, "hierarchicalordering"))
+        archy <- hierarchicalordering(archy, types)
+      if(is.null(hradii)) {
+        marx <- marks(X)
+        d <- nndist(X, by=marx)
+        h <- aggregate(d, by=list(from=marx), min)
+        h <- as.matrix(h[, -1L, drop=FALSE])
+        m <- table(marx)
+        mm <- outer(m, m, pmin)
+        hradii <- h * mm/(mm+1)
+        dimnames(hradii) <- list(types, types)
+        h[!(archy$relation)] <- NA
+      }
+      HierHard(types=types,hradii=hradii,archy=archy)
+    },
+    init = function(self) {
+      types <- self$par$types
+      hradii <- self$par$hradii
+      ## hradii could be NULL
+      if(!is.null(types)) {
+        if(!is.null(dim(types)))
+          stop(paste("The", sQuote("types"),
+                     "argument should be a vector"))
+        if(length(types) == 0)
+          stop(paste("The", sQuote("types"),"argument should be",
+                     "either NULL or a vector of all possible types"))
+        if(anyNA(types))
+          stop("NA's not allowed in types")
+        if(is.factor(types)) {
+          types <- levels(types)
+        } else {
+          types <- levels(factor(types, levels=types))
+        }
+        nt <- length(types)
+        if(!is.null(hradii))
+          MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE)
+      }
+    },
+    update = NULL, # default OK
+    print = function(self) {
+         hradii <- self$par$hradii
+         types <- self$par$types
+         archy <- self$par$archy
+         if(waxlyrical('gory'))
+           splat(nrow(hradii), "types of points")
+         if(!is.null(types) && !is.null(archy)) {
+           if(waxlyrical('space')) {
+             splat("Possible types and ordering:")
+           } else cat("Hierarchy: ")
+           print(archy)
+         } else if(!is.null(types)) {
+           (if(waxlyrical('space')) splat else cat)("Possible types: ")
+           print(types)
+         } else if(waxlyrical('gory'))
+           splat("Possible types:\t not yet determined")
+         if(!is.null(hradii)) {
+           splat("Hardcore radii:")
+           print(hiermat(dround(hradii), archy))
+         } else splat("Hardcore radii: not yet determined")
+         invisible(NULL)
+       },
+       interpret = function(coeffs, self) {
+        # there are no regular parameters (woo-hoo!)
+         return(NULL)
+       },
+       valid = function(coeffs, self) {
+         return(TRUE)
+       },
+       project  = function(coeffs, self) {
+         return(NULL)
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         h <- self$par$hradii
+         return(max(0, h, na.rm=TRUE))
+       },
+       version=NULL # to be added
+       )
+  class(BlankHHobject) <- "interact"
+
+  # finally create main function
+  HierHard <- function(hradii=NULL, types=NULL, archy=NULL) {
+    if(!is.null(types)) {
+      if(is.null(archy)) archy <- seq_len(length(types))
+      archy <- hierarchicalordering(archy, types)
+    }
+    out <- instantiate.interact(BlankHHobject,
+                                list(types=types,
+                                     hradii=hradii,
+                                     archy=archy))
+    if(!is.null(types) && !is.null(out$par$hradii)) 
+      dimnames(out$par$hradii) <- list(types,types)
+    return(out)
+  }
+
+  HierHard <- intermaker(HierHard, BlankHHobject)
+  
+  HierHard
+})
diff --git a/R/hierpair.family.R b/R/hierpair.family.R
new file mode 100644
index 0000000..19d3e5f
--- /dev/null
+++ b/R/hierpair.family.R
@@ -0,0 +1,321 @@
+#
+#
+#    hierpair.family.R
+#
+#    $Revision: 1.6 $	$Date: 2017/02/07 07:35:32 $
+#
+#    The family of hierarchical pairwise interactions
+#
+#
+# -------------------------------------------------------------------
+#	
+
+hierpair.family <-
+  list(
+       name  = "hierpair",
+       print = function(self) {
+         splat("Hierarchical pairwise interaction family")
+       },
+       plot = function(fint, ..., d=NULL, plotit=TRUE) {
+         verifyclass(fint, "fii")
+         inter <- fint$interaction
+         if(is.null(inter) || is.null(inter$family)
+            || inter$family$name != "hierpair")
+           stop("Tried to plot the wrong kind of interaction")
+         # get fitted coefficients of interaction terms
+         # and set coefficients of offset terms to 1
+         Vnames <- fint$Vnames
+         IsOffset <- fint$IsOffset
+         coeff <- rep.int(1, length(Vnames))
+         names(coeff) <- Vnames
+         coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]]
+         # 
+         pairpot <- inter$pot
+         potpars <- inter$par
+         rmax <- reach(fint, epsilon=1e-3)
+         xlim <- list(...)$xlim
+         if(is.infinite(rmax)) {
+           if(!is.null(xlim))
+             rmax <- max(xlim)
+           else {
+             warning("Reach of interaction is infinite; need xlim to plot it")
+             return(invisible(NULL))
+           }
+         }
+         if(is.null(d)) {
+           dmax <- 1.25 * rmax
+           d <- seq(from=0, to=dmax, length.out=1024)
+         } else {
+           stopifnot(is.numeric(d) &&
+                     all(is.finite(d)) &&
+                     all(diff(d) > 0))
+           dmax <- max(d)
+         }
+         if(is.null(xlim))
+           xlim <- c(0, dmax)
+         types <- potpars$types
+         if(is.null(types))
+           stop("Unable to determine types of points")
+         if(!is.factor(types))
+           types <- factor(types, levels=types)
+         ## compute each potential and store in `fasp' object
+         m <- length(types)
+         nd <- length(d)
+         dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m)
+         tx <- rep.int(types, rep.int(nd, m))
+         ty <- types
+         p <- pairpot(dd, tx, ty, potpars)
+         if(length(dim(p))==2)
+           p <- array(p, dim=c(dim(p),1), dimnames=NULL)
+         if(dim(p)[3L] != length(coeff))
+           stop("Dimensions of potential do not match coefficient vector")
+         for(k in seq_len(dim(p)[3L]))
+           p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] )
+         y <- exp(apply(p, c(1,2), sum))
+         ylim <- range(0, 1.1, y, finite=TRUE)
+         fns <- vector(m^2, mode="list")
+         which <- matrix(, m, m)
+         for(i in seq_len(m)) {
+           for(j in seq_len(m)) {
+             ## relevant position in matrix
+             ijpos <- i + (j-1L) * m
+             which[i,j] <- ijpos
+             ## extract values of potential
+             yy <- y[tx == types[i], j]
+             ## make fv object
+             fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1),
+                                "r", quote(h(r)), "h", cbind(h,one) ~ r,
+                                xlim, c("r", "h(r)", "1"),
+                                c("distance argument r",
+                                  "pairwise interaction term h(r)",
+                                  "reference value 1"))
+           }
+         }
+         funz <- fasp(fns, which=which,
+                      formulae=list(cbind(h, one) ~ r),
+                      title="Fitted pairwise interactions",
+                      rowNames=paste(types), colNames=paste(types))
+         if(plotit)
+           do.call(plot.fasp,
+                   resolve.defaults(list(funz),
+                                    list(...),
+                                    list(ylim=ylim,
+                                         ylab="Pairwise interaction",
+                                         xlab="Distance")))
+         return(invisible(funz))
+       },
+       # end of function `plot'
+       # ----------------------------------------------------
+       eval  = function(X,U,EqualPairs,pairpot,potpars,correction,
+           ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE,
+           pot.only=FALSE) {
+         ##
+         ## This is the eval function for the `hierpair' family.
+         ## 
+
+fop <- names(formals(pairpot))
+if(identical(all.equal(fop, c("d", "par")), TRUE))
+  marx <- FALSE
+else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE))
+  marx <- TRUE
+else 
+  stop("Formal arguments of pair potential function are not understood")
+
+## edge correction argument
+
+if(length(correction) > 1)
+  stop("Only one edge correction allowed at a time!")
+
+if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none")))
+  stop(paste("Unrecognised edge correction", sQuote(correction)))
+
+ no.correction <- 
+
+#### Compute basic data
+
+   # Decide whether to apply faster algorithm using 'closepairs'
+   use.closepairs <- FALSE &&
+     (correction %in% c("none", "border", "translate", "translation")) &&
+     !is.null(Reach) && is.finite(Reach) &&
+     is.null(precomputed) && !savecomputed 
+
+if(!is.null(precomputed)) {
+  # precomputed
+  X <- precomputed$X
+  U <- precomputed$U
+  EqualPairs <- precomputed$E
+  M <- precomputed$M
+} else {
+  U <- as.ppp(U, X$window)   # i.e. X$window is DEFAULT window
+  if(!use.closepairs) 
+    # Form the matrix of distances
+    M <- crossdist(X, U, periodic=(correction=="periodic"))
+}
+
+nX <- npoints(X)
+nU <- npoints(U)
+dimM <- c(nX, nU)
+
+# Evaluate the pairwise potential without edge correction
+
+if(use.closepairs)
+  POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach)
+else if(!marx) 
+  POT <- pairpot(M, potpars)
+else
+  POT <- pairpot(M, marks(X), marks(U), potpars)
+
+# Determine whether each column of potential is an offset
+
+  IsOffset <- attr(POT, "IsOffset")
+
+# Check errors and special cases
+
+if(!is.matrix(POT) && !is.array(POT)) {
+  if(length(POT) == 0 && X$n ==  0) # empty pattern
+    POT <- array(POT, dim=c(dimM,1))
+  else
+    stop("Pair potential did not return a matrix or array")
+}
+
+if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) {
+        whinge <- paste0(
+           "The pair potential function ",short.deparse(substitute(pairpot)),
+           " must produce a matrix or array with its first two dimensions\n",
+           "the same as the dimensions of its input.\n")
+	stop(whinge)
+}
+
+# make it a 3D array
+if(length(dim(POT))==2)
+        POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL)
+                          
+if(correction == "translate" || correction == "translation") {
+        edgewt <- edge.Trans(X, U)
+        # sanity check ("everybody knows there ain't no...")
+        if(!is.matrix(edgewt))
+          stop("internal error: edge.Trans() did not yield a matrix")
+        if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x))
+          stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions")
+        POT <- c(edgewt) * POT
+} else if(correction == "isotropic" || correction == "Ripley") {
+        # weights are required for contributions from QUADRATURE points
+        edgewt <- t(edge.Ripley(U, t(M), X$window))
+        if(!is.matrix(edgewt))
+          stop("internal error: edge.Ripley() did not return a matrix")
+        if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x))
+          stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions")
+        POT <- c(edgewt) * POT
+}
+
+# No pair potential term between a point and itself
+if(length(EqualPairs) > 0) {
+  nplanes <- dim(POT)[3L]
+  for(k in 1:nplanes)
+    POT[cbind(EqualPairs, k)] <- 0
+}
+
+# Return just the pair potential?
+if(pot.only)
+  return(POT)
+
+# Sum the pairwise potentials 
+
+V <- apply(POT, c(2,3), sum)
+
+# attach the original pair potentials
+attr(V, "POT") <- POT
+
+# attach the offset identifier
+attr(V, "IsOffset") <- IsOffset
+
+# pass computed information out the back door
+if(savecomputed)
+  attr(V, "computed") <- list(E=EqualPairs, M=M)
+return(V)
+
+},
+######### end of function $eval
+       suffstat = function(model, X=NULL, callstring="hierpair.family$suffstat") {
+# for hierarchical pairwise models only  (possibly nonstationary)
+  verifyclass(model, "ppm")
+  if(!identical(model$interaction$family$name,"hierpair"))
+    stop("Model is not a hierarchical pairwise interaction process")
+
+  if(is.null(X)) {
+    X <- data.ppm(model)
+    modelX <- model
+  } else {
+    verifyclass(X, "ppp")
+    modelX <- update(model, X, method="mpl")
+  }
+
+  # find data points which do not contribute to pseudolikelihood
+  mplsubset <- getglmdata(modelX)$.mpl.SUBSET
+  mpldata   <- is.data(quad.ppm(modelX))
+  contribute <- mplsubset[mpldata]
+
+  Xin  <- X[contribute]
+  Xout <- X[!contribute]
+  
+  # partial model matrix arising from ordered pairs of data points
+  # which both contribute to the pseudolikelihood
+  Empty <- X[integer(0)]
+  momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat")
+
+  # partial model matrix at data points which contribute to the pseudolikelihood
+  momIN <-
+    partialModelMatrix(X, Empty, model, "suffstat")[contribute, , drop=FALSE]
+  
+  # partial model matrix arising from ordered pairs of data points
+  # the second of which does not contribute to the pseudolikelihood
+  mom <- partialModelMatrix(Xout, Xin, model, "suffstat")
+  indx <- Xout$n + seq_len(Xin$n)
+  momINxOUT <- mom[indx, , drop=FALSE]
+
+  ## determine which canonical covariates are true second-order terms
+  ## eg 'mark1x1' 
+  typ <- levels(marks(X))
+  vn <- paste0("mark", typ, "x", typ)
+  order2  <- names(coef(model)) %in% vn
+  order1  <- !order2
+
+  result <- 0 * coef(model)
+  
+  if(any(order1)) {
+    # first order contributions (including 'mark1x2' etc)
+    o1terms  <- momIN[ , order1, drop=FALSE]
+    o1sum   <- colSums(o1terms)
+    result[order1] <- o1sum
+  }
+  if(any(order2)) {
+    # adjust for double counting of ordered pairs in INxIN but not INxOUT
+    o2termsINxIN  <- momINxIN[, order2, drop=FALSE]
+    o2termsINxOUT <- momINxOUT[, order2, drop=FALSE]
+    o2sum   <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT)
+    result[order2] <- o2sum
+  }
+
+  return(result)
+  },
+######### end of function $suffstat
+  delta2 = function(X, inte, correction, ...) {
+  # Sufficient statistic for second order conditional intensity
+  # for hierarchical pairwise interaction processes
+  # Equivalent to evaluating pair potential.
+    X <- as.ppp(X)
+    nX <- npoints(X)
+    E <- cbind(1:nX, 1:nX)
+    R <- reach(inte)
+    result <- hierpair.family$eval(X,X,E,
+                                   inte$pot,inte$par,
+                                   correction,
+                                   pot.only=TRUE,
+                                   Reach=R)
+  }
+######### end of function $delta2
+)
+######### end of list
+
+class(hierpair.family) <- "isf"
+
diff --git a/R/hierstrauss.R b/R/hierstrauss.R
new file mode 100644
index 0000000..e6dab48
--- /dev/null
+++ b/R/hierstrauss.R
@@ -0,0 +1,239 @@
+##
+##    hierstrauss.R
+##
+##    $Revision: 1.9 $	$Date: 2016/02/16 01:39:12 $
+##
+##    The hierarchical Strauss process
+##
+##    HierStrauss()    create an instance of the hierarchical Strauss process
+##                 [an object of class 'interact']
+##	
+## -------------------------------------------------------------------
+##	
+
+HierStrauss <- local({
+
+  # ......... define interaction potential
+
+  HSpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[j]  type (mark) of point U[j]
+     #
+     # get matrix of interaction radii r[ , ]
+     r <- par$radii
+     #
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+     
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+
+     ## list all ordered pairs of types to be checked
+     uptri <- par$archy$relation & !is.na(r)
+     mark1 <- (lx[row(r)])[uptri]
+     mark2 <- (lx[col(r)])[uptri]
+     ## corresponding names
+     mark1name <- (lxname[row(r)])[uptri]
+     mark2name <- (lxname[col(r)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     ## create logical array for result
+     z <- array(FALSE, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       ## assemble the relevant interaction distance for each pair of points
+       rxu <- r[ tx, tu ]
+       ## apply relevant threshold to each pair of points
+       str <- (d <= rxu)
+       ## score
+       for(i in 1:npairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2[i])
+         # assign
+         z[Xsub, Qsub, i] <- str[Xsub, Qsub]
+       }
+     }
+     return(z)
+   }
+   #### end of 'pot' function ####
+
+  # ........ auxiliary functions ..............
+  delHS <- function(which, types, radii, archy) {
+    radii[which] <- NA
+    if(all(is.na(radii))) return(Poisson())
+    return(HierStrauss(types=types, radii=radii, archy=archy))
+  }
+  
+  # Set up basic object except for family and parameters
+  BlankHSobject <- 
+  list(
+       name     = "Hierarchical Strauss process",
+       creator  = "HierStrauss",
+       family   = "hierpair.family", # evaluated later
+       pot      = HSpotential,
+       par      = list(types=NULL, radii=NULL, archy=NULL), # filled in later
+       parnames = c("possible types",
+                    "interaction distances",
+                    "hierarchical order"),
+       selfstart = function(X, self) {
+         if(!is.null(self$par$types) && !is.null(self$par$archy))
+           return(self)
+         types <- self$par$types %orifnull% levels(marks(X))
+         archy <- self$par$archy %orifnull% types
+         HierStrauss(types=types,radii=self$par$radii,archy=archy)
+       },
+       init = function(self) {
+         types <- self$par$types
+         if(!is.null(types)) {
+           radii <- self$par$radii
+           nt <- length(types)
+           MultiPair.checkmatrix(radii, nt, sQuote("radii"), asymmok=TRUE)
+           if(length(types) == 0)
+             stop(paste("The", sQuote("types"),"argument should be",
+                        "either NULL or a vector of all possible types"))
+           if(anyNA(types))
+             stop("NA's not allowed in types")
+           if(is.factor(types)) {
+             types <- levels(types)
+           } else {
+             types <- levels(factor(types, levels=types))
+           }
+         }
+       },
+       update = NULL, # default OK
+       print = function(self) {
+         radii <- self$par$radii
+         types <- self$par$types
+         archy <- self$par$archy
+         if(waxlyrical('gory'))
+           splat(nrow(radii), "types of points")
+         if(!is.null(types) && !is.null(archy)) {
+           if(waxlyrical('space')) {
+             splat("Possible types and ordering:")
+           } else cat("Hierarchy: ")
+           print(archy)
+         } else if(!is.null(types)) {
+           (if(waxlyrical('space')) splat else cat)("Possible types: ")
+           print(types)
+         } else if(waxlyrical('gory'))
+           splat("Possible types:\t not yet determined")
+         splat("Interaction radii:")
+         print(hiermat(radii, self$par$archy))
+         invisible(NULL)
+       },
+       interpret = function(coeffs, self) {
+         # get possible types
+         typ <- self$par$types
+         ntypes <- length(typ)
+         # get matrix of Strauss interaction radii
+         r <- self$par$radii
+         # list all unordered pairs of types
+         uptri <- self$par$archy$relation & !is.na(r)
+         index1 <- (row(r))[uptri]
+         index2 <- (col(r))[uptri]
+         npairs <- length(index1)
+         # extract canonical parameters; shape them into a matrix
+         gammas <- matrix(NA, ntypes, ntypes)
+         dimnames(gammas) <- list(typ, typ)
+         gammas[ cbind(index1, index2) ] <- exp(coeffs)
+         #
+         return(list(param=list(gammas=gammas),
+                     inames="interaction parameters gamma_ij",
+                     printable=hiermat(round(gammas, 4), self$par$archy)))
+       },
+       valid = function(coeffs, self) {
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # interaction radii
+         radii <- self$par$radii
+         # parameters to estimate
+         required <- !is.na(radii) & self$par$archy$relation
+         # all required parameters must be finite
+         if(!all(is.finite(gamma[required]))) return(FALSE)
+         # DIAGONAL interaction parameters must be non-explosive
+         d <- diag(rep(TRUE, nrow(radii)))
+         return(all(gamma[required & d] <= 1))
+       },
+       project  = function(coeffs, self) {
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # interaction radii and types
+         radii <- self$par$radii
+         types <- self$par$types
+         archy <- self$par$archy
+         # problems?
+         uptri <- archy$relation
+         required <- !is.na(radii) & uptri
+         okgamma  <- !uptri | (is.finite(gamma) & (gamma <= 1))
+         naughty  <- required & !okgamma
+         # 
+         if(!any(naughty))  
+           return(NULL)
+         if(spatstat.options("project.fast")) {
+           # remove ALL naughty terms simultaneously
+           return(delHS(naughty, types, radii, archy))
+         } else {
+           # present a list of candidates
+           rn <- row(naughty)
+           cn <- col(naughty)
+           ord <- self$par$archy$ordering
+           uptri <- (ord[rn] <= ord[cn]) 
+           upn <- uptri & naughty
+           rowidx <- as.vector(rn[upn])
+           colidx <- as.vector(cn[upn])
+           mats <- lapply(as.data.frame(rbind(rowidx, colidx)),
+                          matrix, ncol=2)
+           inters <- lapply(mats, delHS, types=types, radii=radii, archy=archy)
+           return(inters)
+         }
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         r <- self$par$radii
+         active <- !is.na(r) & self$par$archy$relation
+         if(any(!is.na(coeffs))) {
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           gamma[is.na(gamma)] <- 1
+           active <- active & (abs(log(gamma)) > epsilon)
+         }
+         if(any(active)) return(max(r[active])) else return(0)
+       },
+       version=NULL # to be added
+       )
+  class(BlankHSobject) <- "interact"
+
+  # finally create main function
+  HierStrauss <- function(radii, types=NULL, archy=NULL) {
+    if(!is.null(types)) {
+      if(is.null(archy)) archy <- seq_len(length(types))
+      archy <- hierarchicalordering(archy, types)
+    }
+    radii[radii == 0] <- NA
+    out <- instantiate.interact(BlankHSobject,
+                                list(types=types,
+                                     radii=radii,
+                                     archy=archy))
+    if(!is.null(types))
+      dimnames(out$par$radii) <- list(types, types)
+    return(out)
+  }
+
+  HierStrauss <- intermaker(HierStrauss, BlankHSobject)
+  
+  HierStrauss
+})
diff --git a/R/hierstrhard.R b/R/hierstrhard.R
new file mode 100644
index 0000000..1b7e25b
--- /dev/null
+++ b/R/hierstrhard.R
@@ -0,0 +1,317 @@
+##
+##    hierstrhard.R
+##
+##    $Revision: 1.4 $	$Date: 2017/02/07 07:35:32 $
+##
+##    The hierarchical Strauss-hard core process
+##
+## -------------------------------------------------------------------
+##	
+
+HierStraussHard <- local({
+
+  # ......... define interaction potential
+
+  HSHpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[j]  type (mark) of point U[j]
+     #
+     # get matrices of interaction radii
+     r <- par$iradii
+     h <- par$hradii
+     #
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+     
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+
+     ## list all ordered pairs of types to be checked
+     uptri <- par$archy$relation & !is.na(r)
+     mark1 <- (lx[row(r)])[uptri]
+     mark2 <- (lx[col(r)])[uptri]
+     ## corresponding names
+     mark1name <- (lxname[row(r)])[uptri]
+     mark2name <- (lxname[col(r)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     ## create logical array for result
+     z <- array(FALSE, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       ## assemble the relevant interaction distance for each pair of points
+       rxu <- r[ tx, tu ]
+       ## apply relevant threshold to each pair of points
+       str <- (d <= rxu)
+       # and the relevant hard core distance
+       hxu <- h[ tx, tu ]
+       forbid <- (d < hxu)
+       forbid[is.na(forbid)] <- FALSE
+       # form the potential
+       value <- str
+       value[forbid] <- -Inf
+       ## score
+       for(i in 1:npairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2[i])
+         # assign
+         z[Xsub, Qsub, i] <- value[Xsub, Qsub]
+       }
+     }
+     return(z)
+   }
+   #### end of 'pot' function ####
+
+  # ........ auxiliary functions ..............
+  delHSH <- function(which, types, iradii, hradii, archy, ihc) {
+    iradii[which] <- NA
+    if(any(!is.na(iradii))) {
+      # some gamma interactions left
+      # return modified HierStraussHard with fewer gamma parameters
+      return(HierStraussHard(types=types, iradii=iradii, hradii=hradii,
+                             archy=archy))
+    } else if(any(!ihc)) {
+      # ihc = inactive hard cores
+      # no gamma interactions left, but some active hard cores
+      return(HierHard(types=types, hradii=hradii, archy=archy))
+    } else return(Poisson())
+  }
+  
+  # Set up basic object except for family and parameters
+  BlankHSHobject <- 
+  list(
+    name     = "Hierarchical Strauss-hard core process",
+    creator  = "HierStraussHard",
+    family   = "hierpair.family", # evaluated later
+    pot      = HSHpotential,
+    par      = list(types=NULL, iradii=NULL, hradii=NULL, archy=NULL), 
+    parnames = c("possible types",
+                 "interaction distances",
+                 "hardcore distances",
+                 "hierarchical order"),
+    pardesc  = c("vector of possible types",
+                  "matrix of interaction distances",
+                  "matrix of hardcore distances",
+                  "hierarchical order"),
+    selfstart = function(X, self) {
+      types <- self$par$types
+      hradii <- self$par$hradii
+      archy <- self$par$archy
+      if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self)
+      if(is.null(types)) types <- levels(marks(X))
+      if(is.null(archy)) 
+        archy <- seq_len(length(types))
+      if(!inherits(archy, "hierarchicalordering"))
+        archy <- hierarchicalordering(archy, types)
+      if(is.null(hradii)) {
+        marx <- marks(X)
+        d <- nndist(X, by=marx)
+        h <- aggregate(d, by=list(from=marx), min)
+        h <- as.matrix(h[, -1L, drop=FALSE])
+        m <- table(marx)
+        mm <- outer(m, m, pmin)
+        hradii <- h * mm/(mm+1)
+        dimnames(hradii) <- list(types, types)
+        h[!(archy$relation)] <- NA
+      }
+      HierStraussHard(types=types,hradii=hradii,
+                      iradii=self$par$iradii, archy=archy)
+    },
+    init = function(self) {
+      types <- self$par$types
+      iradii <- self$par$iradii
+      hradii <- self$par$hradii
+      ## hradii could be NULL
+      if(!is.null(types)) {
+        if(!is.null(dim(types)))
+          stop(paste("The", sQuote("types"),
+                     "argument should be a vector"))
+        if(length(types) == 0)
+          stop(paste("The", sQuote("types"),"argument should be",
+                     "either NULL or a vector of all possible types"))
+        if(anyNA(types))
+          stop("NA's not allowed in types")
+        if(is.factor(types)) {
+          types <- levels(types)
+        } else {
+          types <- levels(factor(types, levels=types))
+        }
+        nt <- length(types)
+        MultiPair.checkmatrix(iradii, nt, sQuote("iradii"), asymmok=TRUE)
+        if(!is.null(hradii))
+          MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE)
+      }
+      ina <- is.na(iradii)
+      if(all(ina))
+        stop(paste("All entries of", sQuote("iradii"), "are NA"))
+      if(!is.null(hradii)) {
+        hna <- is.na(hradii)
+        both <- !ina & !hna
+        if(any(iradii[both] <= hradii[both]))
+          stop("iradii must be larger than hradii")
+      }
+    },
+    update = NULL, # default OK
+    print = function(self) {
+         iradii <- self$par$iradii
+         hradii <- self$par$hradii
+         types <- self$par$types
+         archy <- self$par$archy
+         if(waxlyrical('gory'))
+           splat(nrow(iradii), "types of points")
+         if(!is.null(types) && !is.null(archy)) {
+           if(waxlyrical('space')) {
+             splat("Possible types and ordering:")
+           } else cat("Hierarchy: ")
+           print(archy)
+         } else if(!is.null(types)) {
+           (if(waxlyrical('space')) splat else cat)("Possible types: ")
+           print(types)
+         } else if(waxlyrical('gory'))
+           splat("Possible types:\t not yet determined")
+         splat("Interaction radii:")
+         dig <- getOption("digits")
+         print(hiermat(signif(iradii, dig), archy))
+         if(!is.null(hradii)) {
+           splat("Hardcore radii:")
+           print(hiermat(signif(hradii, dig), archy))
+         } else splat("Hardcore radii: not yet determined")
+         invisible(NULL)
+       },
+       interpret = function(coeffs, self) {
+         # get possible types
+         typ <- self$par$types
+         ntypes <- length(typ)
+         ## get matrices of interaction radii
+         r <- self$par$iradii
+         h <- self$par$hradii
+         ## list all unordered pairs of types
+         uptri <- self$par$archy$relation & !is.na(r)
+         index1 <- (row(r))[uptri]
+         index2 <- (col(r))[uptri]
+         npairs <- length(index1)
+         # extract canonical parameters; shape them into a matrix
+         gammas <- matrix(NA, ntypes, ntypes)
+         dimnames(gammas) <- list(typ, typ)
+         gammas[ cbind(index1, index2) ] <- exp(coeffs)
+         #
+         return(list(param=list(gammas=gammas),
+                     inames="interaction parameters gamma_ij",
+                     printable=hiermat(dround(gammas), self$par$archy)))
+       },
+       valid = function(coeffs, self) {
+         # interaction radii r[i,j]
+         iradii <- self$par$iradii
+         # hard core radii r[i,j]
+         hradii <- self$par$hradii
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # parameters to estimate
+         required <- !is.na(iradii) & self$par$archy$relation
+         # all required parameters must be finite
+         if(!all(is.finite(gamma[required]))) return(FALSE)
+         # DIAGONAL interactions must be non-explosive
+         d <- diag(rep(TRUE, nrow(iradii)))
+         activehard <- !is.na(hradii) & (hradii > 0)
+         return(all(gamma[required & d & !activehard] <= 1))
+       },
+       project  = function(coeffs, self) {
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # interaction radii
+         iradii <- self$par$iradii
+         # hard core radii r[i,j]
+         hradii <- self$par$hradii
+         types <- self$par$types
+         archy <- self$par$archy
+         # active hard cores
+         activehard <- !is.na(hradii) & (hradii > 0)
+         ihc <- !activehard
+         # problems?
+         uptri <- archy$relation
+         required <- !is.na(iradii) & uptri
+         offdiag <- !diag(nrow(iradii))
+         gammavalid <- is.finite(gamma) & (activehard | offdiag | (gamma <= 1))
+         naughty <- required & !gammavalid
+         # 
+         if(!any(naughty))  
+           return(NULL)
+         if(spatstat.options("project.fast")) {
+           # remove ALL naughty terms simultaneously
+           return(delHSH(naughty, types, iradii, hradii, archy, ihc))
+         } else {
+           # present a list of candidates
+           rn <- row(naughty)
+           cn <- col(naughty)
+           ord <- self$par$archy$ordering
+           uptri <- (ord[rn] <= ord[cn]) 
+           upn <- uptri & naughty
+           rowidx <- as.vector(rn[upn])
+           colidx <- as.vector(cn[upn])
+           mats <- lapply(as.data.frame(rbind(rowidx, colidx)),
+                          matrix, ncol=2)
+           inters <- lapply(mats, delHSH, types=types,
+                            iradii=iradii, hradii=hradii,
+                            archy=archy, ihc=ihc)
+           return(inters)
+         }
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         r <- self$par$iradii
+         h <- self$par$hradii
+         ractive <- !is.na(r) & self$par$archy$relation
+         hactive <- !is.na(h) & self$par$archy$relation
+         if(any(!is.na(coeffs))) {
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           gamma[is.na(gamma)] <- 1
+           ractive <- ractive & (abs(log(gamma)) > epsilon)
+         }
+         if(!any(c(ractive,hactive)))
+           return(0)
+         else
+           return(max(c(r[ractive],h[hactive])))
+       },
+       version=NULL # to be added
+       )
+  class(BlankHSHobject) <- "interact"
+
+  # finally create main function
+  HierStraussHard <- function(iradii, hradii=NULL, types=NULL, archy=NULL) {
+    if(!is.null(types)) {
+      if(is.null(archy)) archy <- seq_len(length(types))
+      archy <- hierarchicalordering(archy, types)
+    }
+    iradii[iradii == 0] <- NA
+    out <- instantiate.interact(BlankHSHobject,
+                                list(types=types,
+                                     iradii=iradii,
+                                     hradii=hradii,
+                                     archy=archy))
+    if(!is.null(types)) {
+      dn <- list(types, types)
+      dimnames(out$par$iradii) <- dn
+      if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn
+    }
+    return(out)
+  }
+
+  HierStraussHard <- intermaker(HierStraussHard, BlankHSHobject)
+  
+  HierStraussHard
+})
diff --git a/R/ho.R b/R/ho.R
new file mode 100755
index 0000000..002b49d
--- /dev/null
+++ b/R/ho.R
@@ -0,0 +1,79 @@
+#
+#  ho.R
+#
+#  Huang-Ogata method 
+#
+#  $Revision: 1.17 $ $Date: 2016/03/15 07:42:26 $
+#
+
+ho.engine <- function(model, ..., nsim=100, nrmh=1e5,
+                        start=NULL,
+                        control=list(nrep=nrmh), verb=TRUE) {
+  verifyclass(model, "ppm")
+
+  if(is.null(start)) 
+    start <- list(n.start=data.ppm(model)$n)
+  
+  # check that the model can be simulated
+  if(!valid.ppm(model)) {
+    warning("Fitted model is invalid - cannot be simulated")
+    return(NULL)
+  }
+  
+  # compute the observed value of the sufficient statistic
+  X <- data.ppm(model)
+  sobs <- suffstat(model, X)
+  
+  # generate 'nsim' realisations of the fitted model
+  # and compute the sufficient statistics of the model
+  rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE)
+  if(verb) {
+    cat("Simulating... ")
+    state <- list()
+  }
+  ndone <- 0
+  while(ndone < nsim) {
+    Xi <- rmhEngine(rmhinfolist, verbose=FALSE)
+    v <- try(suffstat(model,Xi))
+    if(!inherits(v, "try-error")) {
+      if(ndone == 0) 
+        svalues <- matrix(, nrow=nsim, ncol=length(v))
+      ndone <- ndone + 1
+      svalues[ndone, ] <- v
+    }
+    if(verb) state <- progressreport(ndone, nsim, state=state)
+  }
+  if(verb) cat("Done.\n\n")
+  # calculate the sample mean and variance of the
+  # sufficient statistic for the simulations
+  smean <- apply(svalues, 2, mean, na.rm=TRUE)
+  svar <- var(svalues, na.rm=TRUE)
+  # value of canonical parameter from MPL fit
+  theta0 <- coef(model)
+  # Newton-Raphson update
+  Vinverse <- solve(svar)
+  theta <- theta0 + as.vector(Vinverse %*% (sobs - smean))
+  ## appropriate names
+  nama <- names(theta0)
+  if(!is.null(nama)) {
+    names(theta) <- nama
+    dimnames(svar) <- dimnames(Vinverse) <- list(nama, nama)
+  }
+  ## update model
+  newmodel <- model
+  newmodel$coef <- theta
+  newmodel$coef.orig <- theta0
+  newmodel$method <- "ho"
+  newmodel$fitter <- "ho"
+  newmodel$fisher <- svar
+  newmodel$varcov <- Vinverse
+  # recompute fitted interaction
+  newmodel$fitin <- NULL
+  newmodel$fitin <- fitin(newmodel)
+  ## update pseudolikelihood value using code in logLik.ppm
+  newmodel$maxlogpl.orig <- model$maxlogpl
+  newmodel$maxlogpl <- logLik(newmodel, new.coef=theta, warn=FALSE)
+  ##
+  return(newmodel)
+}
+
diff --git a/R/hopskel.R b/R/hopskel.R
new file mode 100644
index 0000000..b79faae
--- /dev/null
+++ b/R/hopskel.R
@@ -0,0 +1,84 @@
+##
+## hopskel.R
+##     Hopkins-Skellam test
+##
+##  $Revision: 1.2 $  $Date: 2014/09/23 08:24:36 $
+
+hopskel <- function(X) {
+  stopifnot(is.ppp(X))
+  n <- npoints(X)
+  if(n < 2) return(NA)
+  dX <- nndist(X)
+  U <- runifpoint(n, Window(X))
+  dU <- nncross(U, X, what="dist")
+  A <- mean(dX^2)/mean(dU^2)
+  return(A)
+}
+
+hopskel.test <- function(X, ..., 
+                         alternative=c("two.sided", "less", "greater",
+                           "clustered", "regular"),
+                         method=c("asymptotic", "MonteCarlo"),
+                         nsim=999
+                         ) {
+  Xname <- short.deparse(substitute(X))
+
+  verifyclass(X, "ppp")
+  W <- Window(X)
+  n <- npoints(X)
+
+  method <- match.arg(method)
+  
+  # alternative hypothesis
+  alternative <- match.arg(alternative)
+  if(alternative == "clustered") alternative <- "less"
+  if(alternative == "regular") alternative <- "greater"
+  altblurb <-
+    switch(alternative,
+           two.sided="two-sided",
+           less="clustered (A < 1)",
+           greater="regular (A > 1)")
+
+  ## compute observed value
+  statistic <- hopskel(X)
+  ## p-value
+  switch(method,
+         asymptotic = {
+           ## F-distribution
+           nn <- 2 * n
+           p.value <-
+             switch(alternative,
+                    less = pf(statistic, nn, nn, lower.tail=TRUE),
+                    greater = pf(statistic, nn, nn, lower.tail=FALSE),
+                    two.sided = 2 *
+                    pf(statistic, nn, nn, lower.tail=(statistic < 1)))
+           pvblurb <- "using F distribution"
+         },
+         MonteCarlo = {
+           ## Monte Carlo p-value
+           sims <- numeric(nsim)
+           for(i in 1:nsim) {
+             Xsim <- runifpoint(n, win=W)
+             sims[i] <- hopskel(Xsim)
+             p.upper <- (1 + sum(sims >= statistic))/(1 + nsim)
+             p.lower <- (1 + sum(sims <= statistic))/(1 + nsim)
+             p.value <- switch(alternative,
+                               less=p.lower,
+                               greater=p.upper,
+                               two.sided=2*min(p.lower, p.upper))
+           }
+           pvblurb <- paste("Monte Carlo test based on",
+                            nsim, "simulations of CSR with fixed n")
+         })
+
+  statistic <- as.numeric(statistic)
+  names(statistic) <- "A"
+  
+  out <- list(statistic=statistic,
+              p.value=p.value,
+              alternative=altblurb,
+              method=c("Hopkins-Skellam test of CSR", pvblurb),
+              data.name=Xname)
+  class(out) <- "htest"
+  return(out)
+}
diff --git a/R/hybrid.R b/R/hybrid.R
new file mode 100755
index 0000000..33a7c2e
--- /dev/null
+++ b/R/hybrid.R
@@ -0,0 +1,314 @@
+#
+#
+#    hybrid.R
+#
+#    $Revision: 1.8 $	$Date: 2017/02/07 07:35:32 $
+#
+#    Hybrid of several interactions
+#
+#    Hybrid()    create a hybrid of several interactions
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Hybrid <- local({
+
+  Hybrid <- function(...) {
+    interlist <- list(...)
+    n <- length(interlist)
+    if(n == 0)
+      stop("No arguments given")
+    #' arguments may be interaction objects or ppm objects
+    isinter <- unlist(lapply(interlist, is.interact))
+    isppm   <- unlist(lapply(interlist, is.ppm))
+    if(any(nbg <- !(isinter | isppm)))
+      stop(paste(ngettext(sum(nbg), "Argument", "Arguments"),
+                 paste(which(nbg), collapse=", "),
+                 ngettext(sum(nbg), "is not an interaction",
+                          "are not interactions")))
+    #' ensure the list contains only interaction objects
+    if(any(isppm))
+      interlist[isppm] <- lapply(interlist[isppm], as.interact)
+    #' recursively expand any components that are themselves hybrids
+    while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) {
+      i <- min(which(ishybrid))
+      n <- length(interlist)
+      expandi <- interlist[[i]]$par
+      interlist <- c(if(i > 1) interlist[1:(i-1L)] else NULL,
+                     expandi,
+                     if(i < n) interlist[(i+1L):n] else NULL)
+    }
+    #' 
+    ncomponents <- length(interlist)
+    if(ncomponents == 1) {
+      #' single interaction - return it
+      return(interlist[[1L]])
+    }
+    #' ensure all components have names
+    names(interlist) <- good.names(names(interlist),
+                                   "HybridComponent", 1:ncomponents)
+    out <- 
+      list(
+        name     = "Hybrid interaction",
+        creator  = "Hybrid",
+        family    = hybrid.family,
+        pot      = NULL,
+        par      = interlist,
+        parnames = names(interlist),
+        selfstart = function(X, self) {
+          ilist <- self$par
+          sslist <- lapply(ilist, getElement, name="selfstart")
+          has.ss <- sapply(sslist, is.function)
+          if(any(has.ss)) {
+            ilist[has.ss] <- lapply(ilist[has.ss], invokeSelfStart, Y=X)
+            self$par <- ilist
+          }
+          return(self)
+        },
+        init     = NULL,
+        update = NULL,  # default OK
+        print = function(self, ..., family=FALSE, brief=FALSE) {
+          if(family)
+            print.isf(self$family)
+          ncomponents <- length(self$par)
+          clabs <- self$parnames
+          splat("Hybrid of", ncomponents, "components:",
+                commasep(sQuote(clabs)))
+          for(i in 1:ncomponents) {
+            splat(paste0(clabs[i], ":"))
+            print(self$par[[i]], ..., family=family, brief=brief)
+          }
+          parbreak()
+          return(invisible(NULL))
+        },
+        interpret =  function(coeffs, self) {
+          interlist <- self$par
+          result <- list(param=list(),
+                         inames=character(0),
+                         printable=list())
+          for(i in 1:length(interlist)) {
+            interI <- interlist[[i]]
+            nameI  <- names(interlist)[[i]]
+            nameI. <- paste(nameI, ".", sep="")
+            #' find coefficients with prefix that exactly matches nameI.
+            Cname  <- names(coeffs)
+            prefixlength <- nchar(nameI.)
+            Cprefix <- substr(Cname, 1, prefixlength)
+            relevant <- (Cprefix == nameI.)
+            #' extract them
+            if(any(relevant)) {
+              Crelevant <- coeffs[relevant]
+              names(Crelevant) <-
+                substr(Cname[relevant], prefixlength+1, max(nchar(Cname)))
+              #' invoke the self-interpretation of interI
+              interpretI <- interI$interpret
+              if(is.function(interpretI)) {
+                resultI <- interpretI(Crelevant, interI)
+                paramI  <- resultI$param
+                prinI   <- resultI$printable
+                inamesI <- resultI$inames
+                inamesI <- paste(nameI, inamesI)
+                if(length(prinI) > 0) {
+                  result$param     <- append(result$param, paramI)
+                  result$printable <- append(result$printable, list(prinI))
+                  result$inames <- c(result$inames, inamesI)
+                }
+              }
+            }
+          }
+          return(result)
+        },
+        valid = function(coeffs, self) {
+          #' check validity via mechanism used for 'rmhmodel' 
+          siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]]
+          Z <- siminfo(coeffs, self)
+          cifs   <- Z$cif
+          pars   <- Z$par
+          ntypes <- Z$ntypes
+          if((Ncif <- length(cifs)) == 1) {
+            #' single cif
+            pars <- append(pars, list(beta=rep.int(1, ntypes)))
+          } else {
+            for(i in 1:Ncif) 
+              pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i])))
+          }
+          RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), 
+                         stopinvalid=FALSE)
+          return(RM$integrable)
+        },
+        project = function(coeffs, self) {
+          if((self$valid)(coeffs, self)) return(NULL)
+          #' separate into components
+          spl <- splitHybridInteraction(coeffs, self)
+          interlist <- spl$interlist
+          coeflist  <- spl$coeflist
+          #' compute projection for each component interaction
+          Ncif <- length(interlist)
+          projlist <- vector(mode="list", length=Ncif)
+          nproj    <- integer(Ncif)
+          for(i in 1:Ncif) {
+            coefsI <- coeflist[[i]]
+            interI <- interlist[[i]]
+            if(!is.interact(interI))
+              stop("Internal error: interlist entry is not an interaction")
+            projI <- interI$project
+            if(is.null(projI))
+              stop(paste("Projection is not yet implemented for a",
+                         interI$name))
+            p <- projI(coefsI, interI)
+            #' p can be NULL (indicating no projection required for interI)
+            #' or a single interaction or a list of interactions.
+            if(is.null(p)) {
+              if(Ncif == 1) return(NULL) # no projection required
+              p <- list(NULL)
+              nproj[i] <- 0
+            } else if(is.interact(p)) {
+              p <- list(p)
+              nproj[i] <- 1L
+            } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) {
+              nproj[i] <- length(p)
+            } else
+              stop("Internal error: result of projection had wrong format")
+            projlist[[i]] <- p
+          }
+          #' for interaction i there are nproj[i] **new** interactions to try.
+          if(all(nproj == 0))
+            return(NULL)
+          if(spatstat.options("project.fast")) {
+            #' Single interaction required.
+            #' Extract first entry from each list
+            #' (there should be only one entry, but...)
+            qlist <- lapply(projlist, "[[", i=1L)
+            #' replace NULL entries by corresponding original interactions
+            isnul <- unlist(lapply(qlist, is.null))
+            if(all(isnul))
+              return(NULL)
+            if(any(isnul))
+              qlist[isnul] <- interlist[isnul]
+            names(qlist) <- names(interlist)
+            #' build hybrid and return
+            result <- do.call(Hybrid, qlist)
+            return(result)
+          } 
+          #' Full case
+          result <- list()
+          for(i in which(nproj > 0)) {
+            ntry <- nproj[i]
+            tries <- projlist[[i]]
+            for(j in 1:ntry) {
+              #' assemble list of component interactions for hybrid
+              qlist <- interlist
+              qlist[[i]] <- tries[[j]]
+              #' eliminate Poisson
+              ispois <- unlist(lapply(qlist, is.poisson))
+              if(all(ispois)) {
+                #' collapse to single Poisson
+                h <- Poisson()
+              } else {
+                if(any(ispois)) qlist <- qlist[!ispois]
+                h <- do.call(Hybrid, qlist)
+              }
+              result <- append(result, list(h))
+            }
+          }
+          #' 'result' is a list of interactions, each a hybrid
+          if(length(result) == 1)
+            result <- result[[1L]]
+          return(result)
+        },
+        irange = function(self, coeffs=NA, epsilon=0, ...) {
+          interlist <- self$par
+          answer <- 0
+          for(i in 1:length(interlist)) {
+            interI <- interlist[[i]]
+            nameI  <- names(interlist)[[i]]
+            nameI. <- paste(nameI, ".", sep="")
+            #' find coefficients with prefix that exactly matches nameI.
+            if(all(is.na(coeffs)))
+              Crelevant <- NA
+            else {
+              Cname  <- names(coeffs)
+              prefixlength <- nchar(nameI.)
+              Cprefix <- substr(Cname, 1, prefixlength)
+              relevant <- (Cprefix == nameI.)
+              #' extract them
+              Crelevant <- coeffs[relevant]
+              names(Crelevant) <-
+                substr(Cname[relevant], prefixlength+1, max(nchar(Cname)))
+            }
+            #' compute reach 
+            reachI <- interI$irange
+            if(is.function(reachI)) {
+              resultI <- reachI(interI,
+                                coeffs=Crelevant, epsilon=epsilon, ...)
+              answer <- max(answer, resultI)
+            }
+          }
+          return(answer)
+        },
+        version=versionstring.spatstat()
+        )
+    class(out) <- "interact"
+    return(out)
+  }
+
+  invokeSelfStart <- function(inte, Y) {
+    ss <- inte$selfstart
+    if(!is.function(ss)) return(inte)
+    return(ss(Y, inte))
+  }
+
+  Hybrid
+})
+
+
+is.hybrid <- function(x) { UseMethod("is.hybrid") }
+
+is.hybrid.interact <- function(x) {
+  return(is.interact(x) && (x$name == "Hybrid interaction"))
+}
+
+is.hybrid.ppm <- function(x) {
+  return(is.hybrid(as.interact(x)))
+}
+
+splitHybridInteraction <- function(coeffs, inte) {
+  # For hybrids, $par is a list of the component interactions,
+  # but coeffs is a numeric vector. 
+  # Split the coefficient vector into the relevant coeffs for each interaction
+  interlist <- inte$par
+  N <- length(interlist)
+  coeflist <- vector(mode="list", length=N)
+  for(i in 1:N) {
+    interI <- interlist[[i]]
+    # forbid hybrids-of-hybrids - these should not occur anyway
+    if(interI$name == "Hybrid interaction")
+      stop("A hybrid-of-hybrid interactions is not implemented")
+    # nameI is the tag that identifies I-th component in hybrid
+    nameI  <- names(interlist)[[i]]
+    nameI. <- paste(nameI, ".", sep="")
+    # find coefficients with prefix that exactly matches nameI.
+    Cname  <- names(coeffs)
+    prefixlength <- nchar(nameI.)
+    Cprefix <- substr(Cname, 1, prefixlength)
+    relevant <- (Cprefix == nameI.)
+    # extract coefficients
+    #   (there may be none, if this interaction is Poisson or an 'offset')
+    coeffsI <- coeffs[relevant]
+    # remove the prefix so the coefficients are recognisable to interaction
+    if(any(relevant)) 
+      names(coeffsI) <-
+        substr(Cname[relevant], prefixlength+1, max(nchar(Cname)))
+    # store
+    coeflist[[i]] <- coeffsI
+  }
+  names(coeflist) <- names(interlist)
+  return(list(coeflist=coeflist, interlist=interlist))
+}
+
+Hybrid <- intermaker(Hybrid, list(creator="Hybrid",
+                                  name="general hybrid Gibbs process",
+                                  par=list("..."=42),
+                                  parnames=list("any list of interactions")))
diff --git a/R/hybrid.family.R b/R/hybrid.family.R
new file mode 100755
index 0000000..fccaae3
--- /dev/null
+++ b/R/hybrid.family.R
@@ -0,0 +1,173 @@
+#
+#   hybrid.family.R
+#
+#    $Revision: 1.12 $	$Date: 2017/02/07 07:35:32 $
+#
+#    Hybrid interactions
+#
+#    hybrid.family:      object of class 'isf' defining pairwise interaction
+#	
+# -------------------------------------------------------------------
+#	
+
+hybrid.family <-
+  list(
+       name  = "hybrid",
+       print = function(self) {
+         cat("Hybrid interaction family\n")
+       },
+       plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) {
+         # plot hybrid interaction if possible
+         verifyclass(fint, "fii")
+         inter <- fint$interaction
+         if(is.null(inter) || is.null(inter$family)
+            || inter$family$name != "hybrid")
+           stop("Tried to plot the wrong kind of interaction")
+         if(is.null(d)) {
+           # compute reach and determine max distance for plots
+           dmax <- 1.25 * reach(inter)
+           if(!is.finite(dmax)) {
+             # interaction has infinite reach
+             # Are plot limits specified?
+             xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf)))
+             if(all(is.finite(xlim))) dmax <- max(xlim) else 
+             stop("Interaction has infinite reach; need to specify xlim or d")
+           }
+           d <- seq(0, dmax, length=256)
+         }
+         # get fitted coefficients of interaction terms
+         # and set coefficients of offset terms to 1         
+         Vnames <- fint$Vnames
+         IsOffset <- fint$IsOffset
+         coeff <- rep.int(1, length(Vnames))
+         names(coeff) <- Vnames
+         coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]]         
+         # extract the component interactions 
+         interlist <- inter$par
+         # check that they are all pairwise interactions
+         families <- unlist(lapply(interlist, interactionfamilyname))
+         if(!separate && !all(families == "pairwise")) {
+           warning(paste("Cannot compute the resultant function;",
+                         "not all components are pairwise interactions;",
+                         "plotting each component separately"))
+           separate <- TRUE
+         }
+         # deal with each interaction
+         ninter <- length(interlist)
+         results <- list()
+         for(i in 1:ninter) {
+           interI <- interlist[[i]]
+           nameI  <- names(interlist)[[i]]
+           nameI. <- paste(nameI, ".", sep="")
+           # find coefficients with prefix that exactly matches nameI.
+           prefixlength <- nchar(nameI.)
+           Vprefix <- substr(Vnames, 1, prefixlength)
+           relevant <- (Vprefix == nameI.)
+           # construct fii object for this component
+           fitinI <- fii(interI,
+                         coeff[relevant], Vnames[relevant], IsOffset[relevant])
+           # convert to fv object
+           a <- plot(fitinI, ..., d=d, plotit=FALSE)
+           aa <- list(a)
+           names(aa) <- nameI
+           results <- append(results, aa)
+         }
+         # computation of resultant is only implemented for fv objects
+         if(!separate && !all(unlist(lapply(results, is.fv)))) {
+           warning(paste("Cannot compute the resultant function;",
+                         "not all interaction components yielded an fv object;",
+                         "plotting separate results for each component"))
+           separate <- TRUE
+         }
+         # return separate 'fv' or 'fasp' objects if required
+         results <- as.anylist(results)
+         if(separate) {
+           if(plotit) {
+             main0 <- "Pairwise interaction components"
+             do.call(plot, resolve.defaults(list(results),
+                                              list(...),
+                                              list(main=main0)))
+           }
+           return(invisible(results))
+         }
+         # multiply together to obtain resultant pairwise interaction
+         ans <- results[[1L]]
+         if(ninter >= 2) {
+           for(i in 2:ninter) {
+             Fi <- results[[i]]
+             ans <- eval.fv(ans * Fi)
+           }
+           copyover <- c("ylab", "yexp", "labl", "desc", "fname")
+           attributes(ans)[copyover] <- attributes(results[[1L]])[copyover]
+         }
+         main0 <- "Resultant pairwise interaction"
+         if(plotit)
+           do.call(plot, resolve.defaults(list(ans),
+                                            list(...),
+                                            list(main=main0)))
+         return(invisible(ans))
+       },
+       eval  = function(X,U,EqualPairs,pot,pars,correction, ...) {
+         # `pot' is ignored; `pars' is a list of interactions
+         nU <- length(U$x)
+         V <- matrix(, nU, 0)
+         IsOffset <- logical(0)
+         for(i in 1:length(pars)) {
+           # extract i-th component interaction
+           interI <- pars[[i]]
+           nameI  <- names(pars)[[i]]
+           # compute potential for i-th component
+           VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...)
+           if(ncol(VI) > 0) {
+             if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names
+               colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".")
+             # prefix label with name of i-th component 
+             colnames(VI) <- paste(nameI, dimnames(VI)[[2L]], sep=".")
+             # handle IsOffset
+             offI <- attr(VI, "IsOffset")
+             if(is.null(offI))
+               offI <- rep.int(FALSE, ncol(VI))
+             # tack on
+             IsOffset <- c(IsOffset, offI)
+             # append to matrix V
+             V <- cbind(V, VI)
+           }
+         }
+         if(any(IsOffset))
+           attr(V, "IsOffset") <- IsOffset
+         return(V)
+       },
+       delta2 = function(X, inte, correction, ..., sparseOK=FALSE) {
+         ## Sufficient statistic for second order conditional intensity
+         result <- NULL
+         interlist <- inte$par
+         for(ii in interlist) {
+           v <- NULL
+           ## look for 'delta2' in component interaction 'ii'
+           if(!is.null(delta2 <- ii$delta2) && is.function(delta2)) 
+             v <- delta2(X, ii, correction, sparseOK=sparseOK)
+           ## look for 'delta2' in family of component 'ii'
+           if(is.null(v) &&
+              !is.null(delta2 <- ii$family$delta2) &&
+              is.function(delta2))
+             v <- delta2(X, ii, correction, sparseOK=sparseOK)
+           if(is.null(v)) {
+             ## no special algorithm available: generic algorithm needed
+             return(NULL)
+           }
+           if(is.null(result)) {
+             result <- v
+           } else if(inherits(v, c("sparse3Darray", "sparseMatrix"))) {
+             result <- bind.sparse3Darray(result, v, along=3)
+           } else {
+             result <- abind::abind(as.array(result), v, along=3)
+           }
+         }
+         return(result)
+       },
+       suffstat = NULL
+)
+
+class(hybrid.family) <- "isf"
+
+
diff --git a/R/hyperframe.R b/R/hyperframe.R
new file mode 100755
index 0000000..104ab54
--- /dev/null
+++ b/R/hyperframe.R
@@ -0,0 +1,640 @@
+#
+#  hyperframe.R
+#
+# $Revision: 1.64 $  $Date: 2017/02/07 07:35:32 $
+#
+
+hyperframe <- local({
+
+  hyperframe <- function(...,
+                         row.names=NULL, check.rows=FALSE, check.names=TRUE,
+                         stringsAsFactors=default.stringsAsFactors()) {
+    aarg <- list(...)
+    nama <- names(aarg)
+
+    ## number of columns (= variables)
+    nvars <- length(aarg)
+  
+    if(nvars == 0) {
+      ## zero columns - return
+      result <- list(nvars=0,
+                     ncases=0,
+                     vname=character(0),
+                     vtype=factor(,
+                       levels=c("dfcolumn","hypercolumn","hyperatom")),
+                     vclass=character(0),
+                     df=data.frame(),
+                     hyperatoms=list(),
+                     hypercolumns=list())
+      class(result) <- c("hyperframe", class(result))
+      return(result)
+    }
+
+    ## check column names
+    if(is.null(nama))
+      nama <- paste("V", 1:nvars, sep="")
+    else if(any(unnamed <- (nama == ""))) 
+      nama[unnamed] <- paste("V", seq_len(sum(unnamed)), sep="")
+    nama <- make.names(nama, unique=TRUE)
+    names(aarg) <- nama
+  
+    ## Each argument must be either
+    ##    - a vector suitable as a column in a data frame
+    ##    - a list of objects of the same class
+    ##    - a single object of some class
+  
+    dfcolumns    <- sapply(aarg, is.dfcolumn)
+    hypercolumns <- sapply(aarg, is.hypercolumn)
+    hyperatoms   <- !(dfcolumns | hypercolumns)
+
+    ## Determine number of rows (= cases) 
+    columns <- dfcolumns | hypercolumns
+    if(!any(columns)) {
+      ncases <- 1
+    } else {
+      heights <- rep.int(1, nvars)
+      heights[columns] <-  lengths(aarg[columns])
+      u <- unique(heights)
+      if(length(u) > 1) {
+        u <- u[u != 1]
+        if(length(u) > 1)
+          stop(paste("Column lengths are inconsistent:",
+                     paste(u, collapse=",")))
+      }
+      ncases <- u
+      if(ncases > 1 && all(heights[dfcolumns] == 1)) {
+        ## force the data frame to have 'ncases' rows
+        aarg[dfcolumns] <- lapply(aarg[dfcolumns], rep, ncases)
+        heights[dfcolumns] <- ncases
+      }
+      if(any(stubs <- hypercolumns & (heights != ncases))) {
+        ## hypercolumns of height 1 should be hyperatoms
+        aarg[stubs] <- lapply(aarg[stubs], "[[", i=1L)
+        hypercolumns[stubs] <- FALSE
+        hyperatoms[stubs] <- TRUE
+      }
+    }
+  
+    ## Collect the data frame columns into a data frame
+    if(!any(dfcolumns))
+      df <- as.data.frame(matrix(, ncases, 0), row.names=row.names)
+    else {
+      df <- do.call(data.frame,
+                    append(aarg[dfcolumns],
+                           list(row.names=row.names,
+                                check.rows=check.rows,
+                                check.names=check.names,
+                                stringsAsFactors=stringsAsFactors)))
+      names(df) <- nama[dfcolumns]
+    }
+
+    ## Storage type of each variable
+    vtype <- character(nvars)
+    vtype[dfcolumns] <- "dfcolumn"
+    vtype[hypercolumns] <- "hypercolumn"
+    vtype[hyperatoms] <- "hyperatom"
+    vtype=factor(vtype, levels=c("dfcolumn","hypercolumn","hyperatom"))
+
+    ## Class of each variable
+    vclass <- character(nvars)
+    if(any(dfcolumns))
+      vclass[dfcolumns] <- unlist(lapply(as.list(df), class1))
+    if(any(hyperatoms))
+      vclass[hyperatoms] <- unlist(lapply(aarg[hyperatoms], class1))
+    if(any(hypercolumns))
+      vclass[hypercolumns] <- unlist(lapply(aarg[hypercolumns], class1of1))
+    ## Put the result together
+    result <- list(nvars=nvars,
+                   ncases=ncases,
+                   vname=nama,
+                   vtype=vtype,
+                   vclass=vclass,
+                   df=df,
+                   hyperatoms=aarg[hyperatoms],
+                   hypercolumns=aarg[hypercolumns])
+    
+    class(result) <- c("hyperframe", class(result))
+    return(result)
+  }
+
+  is.dfcolumn <- function(x) {
+    is.atomic(x) && (is.vector(x) || is.factor(x))
+  }
+  
+  is.hypercolumn <- function(x) {
+    if(!is.list(x))
+      return(FALSE)
+    if(inherits(x, c("listof", "anylist")))
+      return(TRUE)
+    if(length(x) <= 1)
+      return(TRUE)
+    cla <- lapply(x, class)
+    return(length(unique(cla)) == 1)
+  }
+
+  class1 <- function(x) { class(x)[1L] }
+
+  class1of1 <- function(x) { class(x[[1L]])[1L] }
+  
+  hyperframe
+})
+
+
+is.hyperframe <- function(x) inherits(x, "hyperframe")
+
+print.hyperframe <- function(x, ...) {
+  ux <- unclass(x)
+  nvars <- ux$nvars
+  ncases <- ux$ncases
+  if(nvars * ncases == 0) {
+    splat("NULL hyperframe with", ncases,
+          ngettext(ncases, "row (=case)", "rows (=cases)"),
+          "and", nvars,
+          ngettext(nvars, "column (=variable)", "columns (=variables)"))
+  } else {
+    if(waxlyrical('gory')) cat("Hyperframe:\n")
+    print(as.data.frame(x, discard=FALSE), ...)
+  }
+  return(invisible(NULL))
+}
+
+dim.hyperframe <- function(x) {
+  with(unclass(x), c(ncases, nvars))
+}
+
+summary.hyperframe <- function(object, ..., brief=FALSE) {
+  x <- unclass(object)
+  y <- list(
+            nvars = x$nvars,
+            ncases = x$ncases,
+            dim = c(x$ncases, x$nvars),
+            typeframe = data.frame(VariableName=x$vname, Class=x$vclass),
+            storage = x$vtype,
+            col.names = x$vname)
+  classes <- x$vclass
+  names(classes) <- x$vname
+  y$classes <- classes
+  # Ordinary data frame columns
+  df <- x$df
+  y$dfnames <- colnames(df)
+  y$df <- if(length(df) > 0 && !brief) summary(df) else NULL
+  y$row.names <- row.names(df)
+  # insert into full array
+  if(!brief && x$nvars > 0) {
+    isobject <- (x$vtype != "dfcolumn")
+    nobj <- sum(isobject)
+    if(nobj == 0) {
+      allcols <- y$df
+    } else {
+      nas <- rep(list(NA_character_), nobj)
+      names(nas) <- x$vname[isobject]
+      allcols <- do.call(cbind, append(list(y$df), nas))
+      acnames <- c(colnames(df), names(nas))
+      allcols <- allcols[ , match(x$vname, acnames), drop=FALSE]
+    }
+    pclass <- padtowidth(paren(classes), colnames(allcols), justify="right")
+    allcols <- as.table(rbind(class=pclass, as.table(allcols)))
+    row.names(allcols) <- rep("", nrow(allcols))
+    y$allcols <- allcols
+  }
+  class(y) <- c("summary.hyperframe", class(y))
+  return(y)
+}
+
+print.summary.hyperframe <- function(x, ...) {
+  nvars <- x$nvars
+  ncases <- x$ncases
+  splat(if(nvars * ncases == 0) "NULL hyperframe" else "hyperframe",
+        "with", ncases,
+        ngettext(ncases, "row", "rows"),
+        "and", nvars,
+        ngettext(nvars, "column", "columns"))
+  if(nvars == 0)
+    return(invisible(NULL))
+  print(if(any(x$storage == "dfcolumn")) x$allcols else noquote(x$classes))
+  return(invisible(NULL))
+}
+
+names.hyperframe <- function(x) { unclass(x)$vname }
+
+"names<-.hyperframe" <- function(x, value) {
+  x <- unclass(x)
+  stopifnot(is.character(value))
+  value <- make.names(value)
+  if(length(value) != x$nvars)
+    stop("Incorrect length for vector of names")
+  vtype <- x$vtype
+  names(x$df)           <- value[vtype == "dfcolumn"]
+  names(x$hyperatoms)   <- value[vtype == "hyperatom"]
+  names(x$hypercolumns) <- value[vtype == "hypercolumn"]
+  x$vname <- value
+  class(x) <- c("hyperframe", class(x))
+  return(x)
+}
+
+row.names.hyperframe <- function(x) {
+  return(row.names(unclass(x)$df))
+}
+
+"row.names<-.hyperframe" <- function(x, value) {
+  y <- unclass(x)
+  df <- y$df
+  row.names(df) <- value
+  y$df <- df
+  class(y) <- c("hyperframe", class(y))
+  return(y)
+}
+
+
+## conversion to hyperframe
+
+as.hyperframe <- function(x, ...) {
+  UseMethod("as.hyperframe")
+}
+
+as.hyperframe.hyperframe <- function(x, ...) {
+  return(x)
+}
+
+as.hyperframe.data.frame <- function(x, ..., stringsAsFactors=FALSE) {
+  xlist <- if(missing(x)) NULL else as.list(x)
+  do.call(hyperframe,
+          resolve.defaults(
+                           xlist,
+                           list(...),
+                           list(row.names=rownames(x),
+                                stringsAsFactors=stringsAsFactors),
+                           .StripNull=TRUE))
+}
+
+as.hyperframe.anylist <- 
+as.hyperframe.listof <- function(x, ...) {
+  if(!missing(x)) {
+    xname <- sensiblevarname(short.deparse(substitute(x)), "x")
+    xlist <- list(x)
+    names(xlist) <- xname
+  } else xlist <- NULL
+  do.call(hyperframe,
+          resolve.defaults(
+                           xlist,
+                           list(...),
+                           list(row.names=rownames(x)),
+                           .StripNull=TRUE))
+}
+
+as.hyperframe.default <- function(x, ...) {
+  as.hyperframe(as.data.frame(x, ...))
+}
+
+#### conversion to other types
+
+as.data.frame.hyperframe <- function(x, row.names = NULL,
+                                     optional = FALSE, ...,
+                                     discard=TRUE, warn=TRUE) {
+  ux <- unclass(x)
+  if(is.null(row.names))
+    row.names <- row.names(ux$df)
+  vtype <- ux$vtype
+  vclass <- ux$vclass
+  dfcol <- (vtype == "dfcolumn")
+  if(discard) { 
+    nhyper <- sum(!dfcol)
+    if(nhyper > 0 && warn)
+      warning(paste(nhyper, 
+                    ngettext(nhyper, "variable", "variables"),
+                    "discarded in conversion to data frame"))
+    df <- as.data.frame(ux$df, row.names=row.names, optional=optional, ...)
+  } else {
+    lx <- as.list(x)
+    nrows <- ux$ncases
+    vclassstring <- paren(vclass)
+    if(any(!dfcol)) 
+      lx[!dfcol] <- lapply(as.list(vclassstring[!dfcol]),
+                           rep.int, times=nrows)
+    df <- do.call(data.frame, append(lx, list(row.names=row.names)))
+    colnames(df) <- ux$vname
+  }
+  return(df)
+}
+
+as.list.hyperframe <- function(x, ...) {
+  ux <- unclass(x)
+  out <- vector(mode="list", length=ux$nvars)
+  vtype <- ux$vtype
+  df <- ux$df
+  if(any(dfcol <- (vtype == "dfcolumn")))
+    out[dfcol] <- as.list(df)
+  if(any(hypcol <- (vtype == "hypercolumn"))) {
+    hc <- lapply(ux$hypercolumns, as.solist, demote=TRUE)
+    out[hypcol] <- hc
+  }
+  if(any(hatom <- (vtype == "hyperatom"))) {
+    ha <- ux$hyperatoms
+    names(ha) <- NULL
+    hacol <- lapply(ha, list)
+    hacol <- lapply(hacol, rep.int, times=ux$ncases)
+    hacol <- lapply(hacol, as.solist, demote=TRUE)
+    out[hatom] <- hacol
+  }
+  out <- lapply(out, "names<-", value=row.names(df))
+  names(out) <- names(x)
+  return(out)
+}
+
+# evaluation
+
+eval.hyper <- function(e, h, simplify=TRUE, ee=NULL) {
+  .Deprecated("with.hyperframe", package="spatstat")
+  if(is.null(ee))
+    ee <- as.expression(substitute(e))
+  with.hyperframe(h, simplify=simplify, ee=ee)
+}
+
+with.hyperframe <- function(data, expr, ..., simplify=TRUE, ee=NULL,
+                            enclos=NULL) {
+  if(!inherits(data, "hyperframe"))
+    stop("data must be a hyperframe")
+  if(is.null(ee))
+    ee <- as.expression(substitute(expr))
+  if(is.null(enclos))
+    enclos <- parent.frame()
+  n <- nrow(data)
+  out <- vector(mode="list", length=n)
+  datalist <- as.list(data)
+  for(i in 1:n) {
+    rowi <- lapply(datalist, "[[", i=i)  # ensures the result is always a list
+    outi <- eval(ee, rowi, enclos)
+    if(!is.null(outi))
+      out[[i]] <- outi
+  }
+  names(out) <- row.names(data)
+  if(simplify && all(unlist(lapply(out, is.vector)))) {
+    # if all results are atomic vectors of equal length,
+    # return a matrix or vector.
+    lenfs <- lengths(out)
+    if(all(unlist(lapply(out, is.atomic))) &&
+            length(unique(lenfs)) == 1) {
+      out <- t(as.matrix(as.data.frame(out)))
+      row.names(out) <- row.names(data)
+      out <- out[,,drop=TRUE]
+      return(out)
+    }
+  }
+  out <- hyperframe(result=out, row.names=row.names(data))$result
+  return(out)
+}
+
+cbind.hyperframe <- function(...) {
+  aarg <- list(...)
+  narg <- length(aarg)
+  if(narg == 0)
+    return(hyperframe())
+  namarg <- names(aarg)
+  if(is.null(namarg))
+    namarg <- rep.int("", narg)
+  ishyper <- unlist(lapply(aarg, inherits, what="hyperframe"))
+  isdf <- unlist(lapply(aarg, inherits, what="data.frame"))
+  columns <- list()
+  for(i in 1:narg) {
+    if(ishyper[i] || isdf[i]){
+      if(ncol(aarg[[i]]) > 0) {
+        newcolumns <- as.list(aarg[[i]])
+        if(namarg[i] != "")
+          names(newcolumns) <- paste(namarg[i], ".", names(newcolumns), sep="")
+        columns <- append(columns, newcolumns)
+      }
+    } else {
+      nextcolumn <- list(aarg[[i]])
+      names(nextcolumn) <- namarg[i]
+      columns <- append(columns, nextcolumn)
+    }
+  }
+  result <- do.call(hyperframe, columns)
+  return(result)
+}
+
+rbind.hyperframe <- function(...) {
+  argh <- list(...)
+  if(length(argh) == 0)
+    return(NULL)
+  # convert them all to hyperframes
+  argh <- lapply(argh, as.hyperframe)
+  #
+  nargh <- length(argh)
+  if(nargh == 1)
+    return(argh[[1L]])
+  # check for compatibility of dimensions & names
+  dfs <- lapply(argh, as.data.frame, discard=FALSE)
+  dfall <- do.call(rbind, dfs)
+  # check that data frame columns also match
+  dfs0 <- lapply(argh, as.data.frame, discard=TRUE, warn=FALSE)
+  df0all <- do.call(rbind, dfs0)
+  # assemble data
+  rslt <- list()
+  nam <- names(dfall) 
+  nam0 <- names(df0all)
+  for(k in seq_along(nam)) {
+    nama <- nam[k]
+    if(nama %in% nam0) {
+      # data frame column: already made
+      rslt[[k]] <- dfall[,k]
+    } else {
+      # hypercolumns or hyperatoms: extract them
+      hdata <- lapply(argh, "[", j=nama, drop=FALSE)
+      hdata <- lapply(lapply(hdata, as.list), getElement, name=nama)
+      # append them
+      hh <- hdata[[1L]]
+      for(j in 2:nargh) {
+        hh <- append(hh, hdata[[j]])
+      }
+      rslt[[k]] <- hh
+    }
+  }
+  # make hyperframe
+  names(rslt) <- nam
+  out <- do.call(hyperframe, append(rslt, list(stringsAsFactors=FALSE)))
+  return(out)
+}
+
+plot.hyperframe <-
+  function(x, e, ..., main, arrange=TRUE,
+           nrows=NULL, ncols=NULL,
+           parargs=list(mar=mar * marsize),
+           marsize=1, mar=c(1,1,3,1)) {
+  xname <- short.deparse(substitute(x))
+  main <- if(!missing(main)) main else xname
+  mar <- rep(mar, 4)[1:4]
+  
+  if(missing(e)) {
+    # default: plot first column that contains objects
+    ok <- (summary(x)$storage %in% c("hypercolumn", "hyperatom"))
+    if(any(ok)) {
+      j <- min(which(ok))
+      x <- x[,j, drop=TRUE, strip=FALSE]
+      x <- as.solist(x, demote=TRUE)
+      plot(x, ..., main=main, arrange=arrange, nrows=nrows, ncols=ncols)
+      return(invisible(NULL))
+    } else {
+      # hyperframe does not contain any objects
+      # invoke plot.data.frame
+      x <- as.data.frame(x)
+      plot(x, ..., main=main)
+      return(invisible(NULL))
+    }
+  }
+
+  if(!is.language(e))
+    stop(paste("Argument e should be a call or an expression;",
+               "use quote(...) or expression(...)"))
+  ee <- as.expression(e)
+
+  if(!arrange) {
+    # No arrangement specified: just evaluate the plot expression 'nr' times
+    with(x, ee=ee)
+    return(invisible(NULL))
+  }
+
+  # Arrangement
+  # Decide whether to plot a main header
+  banner <- (sum(nchar(as.character(main))) > 0)
+  if(length(main) > 1)
+    main <- paste(main, collapse="\n")
+  nlines <- if(!is.character(main)) 1 else length(unlist(strsplit(main, "\n")))
+  # determine arrangement of plots
+  # arrange like mfrow(nrows, ncols) plus a banner at the top
+  n <- summary(x)$ncases
+  if(is.null(nrows) && is.null(ncols)) {
+    nrows <- as.integer(floor(sqrt(n)))
+    ncols <- as.integer(ceiling(n/nrows))
+  } else if(!is.null(nrows) && is.null(ncols))
+    ncols <- as.integer(ceiling(n/nrows))
+  else if(is.null(nrows) && !is.null(ncols))
+    nrows <- as.integer(ceiling(n/ncols))
+  else stopifnot(nrows * ncols >= length(x))
+  nblank <- ncols * nrows - n
+  # declare layout
+  mat <- matrix(c(seq_len(n), numeric(nblank)),
+                byrow=TRUE, ncol=ncols, nrow=nrows)
+  heights <- rep.int(1, nrows)
+  if(banner) {
+    # Increment existing panel numbers
+    # New panel 1 is the banner
+    panels <- (mat > 0)
+    mat[panels] <- mat[panels] + 1L
+    mat <- rbind(rep.int(1,ncols), mat)
+    heights <- c(0.1 * (1 + nlines), heights)
+  }
+  # initialise plot
+  layout(mat, heights=heights)
+  # plot banner
+  if(banner) {
+    opa <- par(mar=rep.int(0,4), xpd=TRUE)
+    plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE,
+         xlim=c(-1,1),ylim=c(-1,1))
+    cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title
+    text(0,0,main, cex=cex)
+  }
+  # plot panels
+  npa <- do.call(par, parargs)
+  if(!banner) opa <- npa
+  with(x, ee=ee)
+  # revert
+  layout(1)
+  par(opa)
+  return(invisible(NULL))
+}
+
+
+str.hyperframe <- function(object, ...) {
+  d <- dim(object)
+  x <- unclass(object)
+  argh <- resolve.defaults(list(...), list(nest.lev=0, indent.str="  .."))
+  cat(paste("'hyperframe':\t",
+            d[1L], ngettext(d[1L], "row", "rows"),
+            "and",
+            d[2L], ngettext(d[2L], "column", "columns"),
+            "\n"))
+  nr <- d[1L]
+  nc <- d[2L]
+  if(nc > 0) {
+    vname <- x$vname
+    vclass <- x$vclass
+    vtype  <- as.character(x$vtype)
+    indentstring <- with(argh, paste(rep.int(indent.str, nest.lev), collapse=""))
+    for(j in 1:nc) {
+      tag <- paste("$", vname[j])
+      switch(vtype[j],
+             dfcolumn={
+               desc <- vclass[j]
+               if(nr > 0) {
+                 vals <- object[1:min(nr,3),j,drop=TRUE]
+                 vals <- paste(paste(format(vals), collapse=" "), "...")
+               } else vals <- ""
+             },
+             hypercolumn=,
+             hyperatom={
+               desc <- "objects of class"
+               vals <- vclass[j]
+             })
+      cat(paste(paste(indentstring, tag, sep=""),
+                ":", desc, vals, "\n"))
+    }
+  }
+  return(invisible(NULL))
+}
+
+subset.hyperframe <- function(x, subset, select, ...) {
+  stopifnot(is.hyperframe(x))
+  r <- if(missing(subset)) {
+    rep_len(TRUE, nrow(x))
+  } else {
+      r <- eval(substitute(
+        with(x, e, enclos=parent.frame()),
+        list(e=substitute(subset))))
+    if (!is.logical(r)) 
+      stop("'subset' must be logical")
+    r & !is.na(r)
+  }
+  vars <- if(missing(select)) { 
+    TRUE
+  } else {
+    nl <- as.list(seq_len(ncol(x)))
+    names(nl) <- names(x)
+    eval(substitute(select), nl, parent.frame())
+  }
+  nama <- names(x)
+  names(nama) <- nama
+  vars <- nama[vars]
+  z <- x[i=r, j=vars, ...]
+  return(z)
+}
+
+head.hyperframe <- function (x, n = 6L, ...) {
+  stopifnot(length(n) == 1L)
+  n <- if(n < 0L) max(nrow(x) + n, 0L) else min(n, nrow(x))
+  x[seq_len(n), , drop = FALSE]
+}
+
+tail.hyperframe <- function(x, n = 6L, ...) {
+  stopifnot(length(n) == 1L)
+  nrx <- nrow(x)
+  n <- if(n < 0L) max(nrx + n, 0L) else min(n, nrx)
+  sel <- seq.int(to = nrx, length.out = n)
+  x[sel, , drop = FALSE]
+}
+
+edit.hyperframe <- function(name, ...) {
+  x <- name
+  isdf <- unclass(x)$vtype == "dfcolumn"
+  if(!any(isdf)) {
+    warning("No columns of editable data", call.=FALSE)
+    return(x)
+  }
+  y <- x[,isdf]
+  ynew <- edit(as.data.frame(y), ...)
+  xnew <- x
+  for(na in names(ynew)) xnew[,na] <- ynew[,na]
+  losenames <- setdiff(names(y), names(ynew))
+  for(na in losenames) xnew[,na] <- NULL
+  return(xnew)
+}
diff --git a/R/hypersub.R b/R/hypersub.R
new file mode 100755
index 0000000..597041e
--- /dev/null
+++ b/R/hypersub.R
@@ -0,0 +1,213 @@
+##
+## hypersub.R
+##
+##
+##  subset operations for hyperframes
+##
+##  $Revision: 1.25 $    $Date: 2017/02/07 07:35:32 $
+##
+
+"[.hyperframe" <- function(x, i, j, drop=FALSE, strip=drop, ...) {
+  x <- unclass(x)
+  if(!missing(i)) {
+    y <- x
+    y$df     <- x$df[i, , drop=FALSE]
+    y$ncases <- nrow(y$df)
+    y$hypercolumns <- lapply(x$hypercolumns, "[", i=i)
+    x <- y
+  }
+  if(!missing(j)) {
+    y <- x
+    patsy <- seq_len(y$nvars)
+    names(patsy) <- y$vname
+    jj <- patsy[j]
+    names(jj) <- NULL
+    y$nvars <- length(jj)
+    y$vname <- vname <- x$vname[jj]
+    y$vtype <- vtype <- x$vtype[jj]
+    y$vclass <- x$vclass[jj]
+    if(ncol(x$df) != 0) 
+      y$df    <- x$df[ , vname[vtype == "dfcolumn"], drop=FALSE]
+    y$hyperatoms <- x$hyperatoms[ vname[ vtype == "hyperatom" ]]
+    y$hypercolumns <- x$hypercolumns[ vname [ vtype == "hypercolumn" ] ]
+    x <- y
+  }
+  if(drop) {
+    nrows <- x$ncases
+    ncols <- x$nvars
+    if(nrows == 1 && ncols == 1 && strip) {
+      ## return a single object 
+      y <- switch(as.character(x$vtype),
+                  dfcolumn    = x$df[, , drop=TRUE],
+                  hypercolumn = (x$hypercolumns[[1L]])[[1L]],
+                  hyperatom   = x$hyperatoms[[1L]])
+      return(y)
+    } else if(nrows == 1) {
+      ## return the row as a vector or a list
+      if(strip && all(x$vtype == "dfcolumn"))
+        return(x$df[ , , drop=TRUE])
+      n <- x$nvars
+      y <- vector(mode="list", length=n)
+      names(y) <- nama <- x$vname
+      for(i in seq_len(n)) {
+        nami <- nama[i]
+        y[[i]] <- switch(as.character(x$vtype[i]),
+                         dfcolumn = x$df[ , nami, drop=TRUE],
+                         hyperatom = x$hyperatoms[[nami]],
+                         hypercolumn = (x$hypercolumns[[nami]])[[1L]]
+                         )
+      }
+      return(as.solist(y, demote=TRUE))
+    } else if(ncols == 1) {
+      ## return a column as an 'anylist'/'solist' or a vector
+      switch(as.character(x$vtype),
+             dfcolumn = {
+               return(x$df[, , drop=TRUE])
+             },
+             hypercolumn = {
+               y <- as.solist(x$hypercolumns[[1L]], demote=TRUE)
+               names(y) <- row.names(x$df)
+               return(y)
+             },
+             hyperatom = {
+               ## replicate it to make a hypercolumn
+               ha <- x$hyperatoms[1L]
+               names(ha) <- NULL
+               hc <- rep.int(ha, x$ncases)
+               hc <- as.solist(hc, demote=TRUE)
+               names(hc) <- row.names(x$df)
+               return(hc)
+             }
+           )
+    }
+  }
+  class(x) <- c("hyperframe", class(x))
+  return(x)
+}
+
+"$.hyperframe" <- function(x,name) {
+  m <- match(name, unclass(x)$vname)
+  if(is.na(m))
+    return(NULL)
+  return(x[, name, drop=TRUE, strip=FALSE])
+}
+
+"$<-.hyperframe" <- function(x, name, value) {
+  y <- as.list(x)
+  if(is.hyperframe(value)) {
+    if(ncol(value) == 1) {
+      y[name] <- as.list(value)
+    } else {
+      y <- insertinlist(y, name, as.list(value))
+    }
+  } else {
+    dfcol <- is.atomic(value) && (is.vector(value) || is.factor(value))
+    if(!dfcol && !is.null(value))
+      value <- as.list(value)
+    y[[name]] <- value
+  }
+  z <- do.call(hyperframe, append(y, list(row.names=row.names(x),
+                                            stringsAsFactors=FALSE)))
+  return(z)
+}
+
+"[<-.hyperframe" <- 
+function (x, i, j, value)
+{
+  sumry <- summary(x)
+  colnam <- sumry$col.names
+  dimx <- sumry$dim
+  igiven <- !missing(i)
+  jgiven <- !missing(j)
+  if(!igiven) i <- seq_len(dimx[1L])
+  if(!jgiven) j <- seq_len(dimx[2L])
+#  singlerow    <- ((is.integer(i) && length(i) == 1 && i > 0)
+#                   || (is.character(i) && length(i) == 1)
+#                   || (is.logical(i) && sum(i) == 1))
+  singlecolumn <- ((is.integer(j) && length(j) == 1 && j > 0)
+                   || (is.character(j) && length(j) == 1)
+                   || (is.logical(j) && sum(j) == 1))
+  if(!igiven && jgiven) {
+    # x[, j] <- value
+    if(singlecolumn) {
+      # expecting single hypercolumn
+      if(is.logical(j)) j <- names(x)[j]
+      y <- get("$<-.hyperframe")(x, j, value)
+    } else {
+      # expecting hyperframe 
+      xlist <- as.list(x)
+      xlist[j] <- as.list(as.hyperframe(value))
+      # the above construction accepts all indices including extra entries
+      y <- do.call(hyperframe, append(xlist,
+                                        list(row.names=row.names(x))))
+    }
+  } else {
+    ## x[, ] <- value or x[i, ] <- value or x[i,j] <- value 
+    ## convert indices to positive integers
+    rowseq <- seq_len(dimx[1L])
+    colseq <- seq_len(dimx[2L])
+    names(rowseq) <- row.names(x)
+    names(colseq) <- colnam
+    I <- rowseq[i]
+    J <- colseq[j]
+    ## convert to lists 
+    xlist <- as.list(x)
+    hv <- if(is.hyperframe(value)) value else
+          as.hyperframe(as.solist(value, demote=TRUE))
+    vlist <- as.list(hv)
+    nrowV <- dim(hv)[1L]
+    ncolV <- dim(hv)[2L]
+    if(nrowV != length(I)) {
+      if(nrowV == 1) {
+        ## replicate
+        vlist <- lapply(vlist, rep, times=nrowV)
+      } else stop(paste("Replacement value has wrong number of rows:",
+                        nrowV, "should be", length(I)),
+                  call.=FALSE)
+    }
+    if(ncolV != length(J)) {
+      if(ncolV == 1) {
+        ## replicate
+        vlist <- rep(vlist, times=ncolV)
+      } else stop(paste("Replacement value has wrong number of columns:",
+                        ncolV, "should be", length(J)),
+                  call.=FALSE)
+    }
+    ## replace entries
+    for(jj in J) 
+      xlist[[jj]][I] <- vlist[[jj]][I]
+    ## put back together
+    y <- do.call(hyperframe, append(xlist,
+                                      list(row.names=row.names(x))))
+  } 
+  return(y)
+}
+
+
+split.hyperframe <- local({
+
+  split.hyperframe <- function(x, f, drop=FALSE, ...) {
+    y <- data.frame(id=seq_len(nrow(x)))
+    z <- split(y, f, drop=drop)
+    z <- lapply(z, getElement, name="id")
+    out <- lapply(z, indexi, x=x)
+    return(out)
+  }
+
+  indexi <- function(i, x) x[i,]
+  
+  split.hyperframe
+})
+
+
+"split<-.hyperframe" <- function(x, f, drop=FALSE, ..., value) {
+  ix <- split(seq_len(nrow(x)), f, drop = drop, ...)
+  n <- length(value)
+  j <- 0
+  for (i in ix) {
+    j <- j%%n + 1L
+    x[i, ] <- value[[j]]
+  }
+  x
+}
+  
diff --git a/R/idw.R b/R/idw.R
new file mode 100755
index 0000000..c333d6e
--- /dev/null
+++ b/R/idw.R
@@ -0,0 +1,69 @@
+#
+#  idw.R
+#
+#  Inverse-distance weighted smoothing
+#
+#  $Revision: 1.9 $ $Date: 2017/06/05 10:31:58 $
+
+idw <- function(X, power=2, at="pixels", ...) {
+  stopifnot(is.ppp(X) && is.marked(X))
+  marx <- marks(X)
+  if(is.data.frame(marx)) {
+    if(ncol(marx) > 1) {
+      # multiple columns of marks - process one-by-one
+      out <- list()
+      for(j in 1:ncol(marx)) 
+        out[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ...)
+      names(out) <- names(marx)
+      switch(at,
+             pixels = { out <- as.solist(out) },
+             points = { out <- as.data.frame(out) })
+      return(out)
+    } else 
+      marx <- marx[,1L]
+  }
+  if(!is.numeric(marx))
+    stop("Marks must be numeric")
+  check.1.real(power)
+  switch(at,
+         pixels = {
+           # create grid
+           W <- as.mask(as.owin(X), ...)
+           dim <- W$dim
+           npixels <- prod(dim)
+           # call C
+           z <- .C("Cidw",
+                   x = as.double(X$x),
+                   y = as.double(X$y),
+                   v = as.double(marx),
+                   n = as.integer(npoints(X)),
+                   xstart = as.double(W$xcol[1L]),
+                   xstep  = as.double(W$xstep),
+                   nx     = as.integer(dim[2L]),
+                   ystart = as.double(W$yrow[1L]),
+                   ystep  = as.double(W$ystep),
+                   ny     = as.integer(dim[1L]),
+                   power  = as.double(power),
+                   num    = as.double(numeric(npixels)),
+                   den    = as.double(numeric(npixels)),
+                   rat    = as.double(numeric(npixels)),
+                   PACKAGE = "spatstat")
+           out <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W)
+           out <- out[W, drop=FALSE]
+         },
+         points={
+           npts <- npoints(X)
+           z <- .C("idwloo",
+                   x = as.double(X$x),
+                   y = as.double(X$y),
+                   v = as.double(marx),
+                   n = as.integer(npts),
+                   power  = as.double(power),
+                   num    = as.double(numeric(npts)),
+                   den    = as.double(numeric(npts)),
+                   rat    = as.double(numeric(npts)),
+                   PACKAGE = "spatstat")
+           out <- z$rat
+         })
+  return(out)
+}
diff --git a/R/images.R b/R/images.R
new file mode 100755
index 0000000..faa2762
--- /dev/null
+++ b/R/images.R
@@ -0,0 +1,1179 @@
+#
+#       images.R
+#
+#      $Revision: 1.145 $     $Date: 2017/06/05 10:31:58 $
+#
+#      The class "im" of raster images
+#
+#     im()     object creator
+#
+#     is.im()   tests class membership
+#
+#     rasterx.im(), rastery.im()    
+#                      raster X and Y coordinates
+#
+#     nearest.pixel()   
+#     lookup.im()
+#                      facilities for looking up pixel values
+#
+################################################################
+########   basic support for class "im"
+################################################################
+#
+#   creator 
+
+im <- function(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), 
+               xrange=NULL, yrange=NULL,
+               unitname=NULL) {
+
+  typ <- typeof(mat)
+  if(typ == "double")
+    typ <- "real"
+
+  miss.xcol <- missing(xcol)
+  miss.yrow <- missing(yrow)
+  
+  # determine dimensions
+  if(!is.null(dim(mat))) {
+    nr <- nrow(mat)
+    nc <- ncol(mat)
+    if(length(xcol) != nc)
+      stop("Length of xcol does not match ncol(mat)")
+    if(length(yrow) != nr)
+      stop("Length of yrow does not match nrow(mat)")
+  } else {
+    if(miss.xcol || miss.yrow)
+      stop(paste(sQuote("mat"),
+                 "is not a matrix and I can't guess its dimensions"))
+    stopifnot(length(mat) == length(xcol) * length(yrow))
+    nc <- length(xcol)
+    nr <- length(yrow)
+  }
+
+  # deal with factor case
+  if(is.factor(mat)) {
+    typ <- "factor"
+  } else if(!is.null(lev <- levels(mat))) {
+    typ <- "factor"
+    mat <- factor(mat, levels=lev)
+  }
+
+  # Ensure 'mat' is a matrix (without destroying factor information)
+  if(!is.matrix(mat))
+    dim(mat) <- c(nr, nc)
+
+  # set up coordinates
+  if((miss.xcol || length(xcol) <= 1) && !is.null(xrange) ) {
+    # use 'xrange' 
+    xstep <- diff(xrange)/nc
+    xcol <- seq(from=xrange[1L] + xstep/2, to=xrange[2L] - xstep/2, length.out=nc)
+  } else if(length(xcol) > 1) {
+    # use 'xcol'
+    # ensure spacing is constant
+    xcol <- seq(from=min(xcol), to=max(xcol), length.out=length(xcol))
+    xstep <- diff(xcol)[1L]
+    xrange <- range(xcol) + c(-1,1) * xstep/2
+  } else stop("Cannot determine pixel width")
+  
+  if((miss.yrow || length(yrow) <= 1) && !is.null(yrange)) {
+    # use 'yrange'
+    ystep <- diff(yrange)/nr
+    yrow <- seq(from=yrange[1L] + ystep/2, to=yrange[2L] - ystep/2, length.out=nr)
+  } else if(length(yrow) > 1) {
+    # use 'yrow'
+    # ensure spacing is constant
+    yrow <- seq(from=min(yrow), to=max(yrow), length.out=length(yrow))
+    ystep <- diff(yrow)[1L]
+    yrange <- range(yrow) + c(-1,1) * ystep/2
+  }  else stop("Cannot determine pixel height")
+
+  unitname <- as.units(unitname)
+
+  out <- list(v   = mat,
+              dim = c(nr, nc),
+              xrange   = xrange,
+              yrange   = yrange,
+              xstep    = xstep,
+              ystep    = ystep,
+              xcol    = xcol,
+              yrow    = yrow,
+              type    = typ,
+              units   = unitname)
+  class(out) <- "im"
+  return(out)
+}
+
+is.im <- function(x) {
+  inherits(x,"im")
+}
+
+levels.im <- function(x) {
+  levels(x$v)
+}
+
+"levels<-.im" <- function(x, value) {
+  if(x$type != "factor") 
+    stop("image is not factor-valued")
+  levels(x$v) <- value
+  x
+}
+
+################################################################
+########   methods for class "im"
+################################################################
+
+shift.im <- function(X, vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "im")
+  if(!is.null(origin)) {
+    stopifnot(is.character(origin))
+    if(!missing(vec))
+      warning("argument vec ignored; overruled by argument origin")
+    origin <- pickoption("origin", origin, c(centroid="centroid",
+                                             midpoint="midpoint",
+                                             bottomleft="bottomleft"))
+    W <- as.owin(X)
+    locn <- switch(origin,
+                   centroid={ unlist(centroid.owin(W)) },
+                   midpoint={ c(mean(W$xrange), mean(W$yrange)) },
+                   bottomleft={ c(W$xrange[1L], W$yrange[1L]) })
+    return(shift(X, -locn))
+  }
+  X$xrange <- X$xrange + vec[1L]
+  X$yrange <- X$yrange + vec[2L]
+  X$xcol <- X$xcol + vec[1L]
+  X$yrow <- X$yrow + vec[2L]
+  attr(X, "lastshift") <- vec
+  return(X)
+}
+
+"Frame<-.im" <- function(X, value) {
+  stopifnot(is.rectangle(value))
+  if(!is.subset.owin(value, Frame(X))) {
+    ## first expand
+    X <- X[value, drop=FALSE]
+  }
+  X[value, drop=TRUE]
+}
+
+
+"[.im" <- local({
+
+  disjoint <- function(r, s) { (r[2L] < s[1L]) || (r[1L] > s[2L])  }
+  clip <- function(r, s) { c(max(r[1L],s[1L]), min(r[2L],s[2L])) }
+  inrange <- function(x, r) { (x >= r[1L]) & (x <= r[2L]) }
+
+  Extract.im <- function(x, i, j, ...,
+                         drop=TRUE, tight=FALSE, raster=NULL,
+                         rescue=is.owin(i)) {
+
+    ## detect 'blank' arguments like second argument in x[i, ] 
+    ngiven <- length(sys.call())
+    nmatched <- length(match.call())
+    nblank <- ngiven - nmatched
+    itype <- if(missing(i)) "missing" else "given"
+    jtype <- if(missing(j)) "missing" else "given"
+    if(nblank == 1) {
+      if(!missing(i)) jtype <- "blank"
+      if(!missing(j)) itype <- "blank"
+    } else if(nblank == 2) {
+      itype <- jtype <- "blank"
+    }
+
+    if(missing(rescue) && itype != "given")
+      rescue <- FALSE
+    
+    if(itype == "missing" && jtype == "missing") {
+      ## no indices: return entire image 
+      out <- if(is.null(raster)) x else as.im(raster)
+      xy <- expand.grid(y=out$yrow,x=out$xcol)
+      if(!is.null(raster)) {
+        ## resample image on new pixel raster
+        values <- lookup.im(x, xy$x, xy$y, naok=TRUE)
+        out <- im(values, out$xcol, out$yrow, unitname=unitname(out))
+      }
+      if(!drop)
+        return(out)
+      else {
+        v <- out$v
+        return(v[!is.na(v)])
+      }
+    }
+
+    if(itype == "given") {
+      ## .................................................................
+      ## Try spatial index
+      ## .................................................................
+      if(verifyclass(i, "owin", fatal=FALSE)) {
+
+        if(jtype == "given")
+          warning("Argument j ignored")
+      
+        ## 'i' is a window
+        ## if drop = FALSE, just set values outside window to NA
+        ## if drop = TRUE, extract values for all pixels inside window
+        ##                 as an image (if 'i' is a rectangle)
+        ##                 or as a vector (otherwise)
+
+        ## determine pixel raster for output
+        if(!is.null(raster)) {
+          out <- as.im(raster)
+          do.resample <- TRUE
+        } else if(is.subset.owin(i, as.owin(x))) {
+          out <- x
+          do.resample <- FALSE
+        } else {
+          ## new window does not contain data window: expand it
+          bb <- boundingbox(as.rectangle(i), as.rectangle(x))
+          rr <- if(is.mask(i)) i else x
+          xcol <- prolongseq(rr$xcol, bb$xrange, rr$xstep)
+          yrow <- prolongseq(rr$yrow, bb$yrange, rr$ystep)
+          out <- list(xcol=xcol, yrow=yrow)
+          do.resample <- TRUE
+        }
+        xy <- expand.grid(y=out$yrow,x=out$xcol)
+        if(do.resample) {
+          ## resample image on new pixel raster
+          values <- lookup.im(x, xy$x, xy$y, naok=TRUE)
+          out <- im(values, out$xcol, out$yrow, unitname=unitname(out))
+        }
+        inside <- inside.owin(xy$x, xy$y, i)
+        if(!drop) {
+          ## set other pixels to NA and return image
+          out$v[!inside] <- NA
+          if(!tight)
+            return(out)
+        } else if(!(rescue && i$type == "rectangle")) {
+          ## return pixel values
+          values <- out$v[inside]
+          return(values)
+        }
+        ## return image in smaller rectangle
+        if(disjoint(i$xrange, x$xrange) || disjoint(i$yrange, x$yrange))
+          ## empty intersection
+          return(numeric(0))
+        xr <- clip(i$xrange, x$xrange)
+        yr <- clip(i$yrange, x$yrange)
+        colsub <- inrange(out$xcol, xr)
+        rowsub <- inrange(out$yrow, yr)
+        ncolsub <- sum(colsub)
+        nrowsub <- sum(rowsub)
+        if(ncolsub == 0 || nrowsub == 0)
+          return(numeric(0))
+        marg <- list(mat=out$v[rowsub, colsub, drop=FALSE],
+                     unitname=unitname(x))
+        xarg <-
+          if(ncolsub > 1) list(xcol = out$xcol[colsub]) else list(xrange=xr)
+        yarg <-
+          if(nrowsub > 1) list(yrow = out$yrow[rowsub]) else list(yrange=yr)
+        result <- do.call(im, c(marg, xarg, yarg))
+        return(result)
+      }
+      if(verifyclass(i, "im", fatal=FALSE)) {
+        if(jtype == "given")
+          warning("Argument j ignored")
+        ## logical images OK
+        if(i$type == "logical") {
+          ## convert to window
+          w <- as.owin(eval.im(ifelse1NA(i)))
+          return(x[w, drop=drop, ..., raster=raster])
+        } else stop("Subset argument \'i\' is an image, but not of logical type")
+      }
+
+      if(inherits(i, "linnet")) {
+        #' linear network
+        if(jtype == "given")
+          warning("Argument j ignored")
+        W <- raster %orifnull% as.owin(x)
+        M <- as.mask.psp(as.psp(i), W=W, ...)
+        xM <- x[M, drop=drop]
+        if(is.im(xM)) xM <- linim(i, xM)
+        return(xM)
+      }
+      
+      if(is.ppp(i)) {
+        ## 'i' is a point pattern 
+        if(jtype == "given")
+          warning("Argument j ignored")
+        ## Look up the greyscale values for the points of the pattern
+        values <- lookup.im(x, i$x, i$y, naok=TRUE)
+        if(drop) 
+          values <- values[!is.na(values)]
+        if(length(values) == 0) 
+          ## ensure the zero-length vector is of the right type
+          values <- 
+            switch(x$type,
+                   factor={ factor(, levels=levels(x)) },
+                   integer = { integer(0) },
+                   logical = { logical(0) },
+                   real = { numeric(0) },
+                   complex = { complex(0) },
+                   character = { character(0) },
+                   { values }
+                   )
+        return(values)
+      }
+    }
+    ## ............... not a spatial index .............................
+
+    ## Try indexing as a matrix
+
+    ## Construct a matrix index call for possible re-use
+    M <- as.matrix(x)
+    ## suppress warnings from code checkers
+    dont.complain.about(M)
+    ##
+    ycall <- switch(itype,
+                    given = {
+                      switch(jtype,
+                             given   = quote(M[i, j, drop=FALSE]),
+                             blank   = quote(M[i,  , drop=FALSE]),
+                             missing = quote(M[i,    drop=FALSE]))
+                    },
+                    blank = {
+                      switch(jtype,
+                             given   = quote(M[ , j, drop=FALSE]),
+                             blank   = quote(M[ ,  , drop=FALSE]),
+                             missing = quote(M[ ,    drop=FALSE]))
+                    },
+                    missing = {
+                      switch(jtype,
+                             given   = quote(M[j=j,  drop=FALSE]),
+                             blank   = quote(M[j= ,  drop=FALSE]),
+                             missing = quote(M[      drop=FALSE]))
+                    })
+    ## try it
+    y <- try(eval(as.call(ycall)), silent=TRUE)
+    if(!inherits(y, "try-error")) {
+      ## valid subset index for a matrix
+      if(rescue) {
+        ## check whether it's a rectangular block, in correct order
+        RR <- row(x$v)
+        CC <- col(x$v)
+        rcall <- ycall
+        rcall[[2L]] <- quote(RR)
+        ccall <- ycall
+        ccall[[2L]] <- quote(CC)
+        rr <- eval(as.call(rcall))
+        cc <- eval(as.call(ccall))
+        rseq <- sort(unique(as.vector(rr)))
+        cseq <- sort(unique(as.vector(cc)))
+        if(all(diff(rseq) == 1) && all(diff(cseq) == 1) &&
+           (length(rr) == length(rseq) * length(cseq)) &&
+           all(rr == RR[rseq, cseq]) && all(cc == CC[rseq,cseq])) {
+          ## yes - make image
+          dim(y) <- c(length(rseq), length(cseq))
+          Y <- x
+          Y$v <- y
+          Y$dim <- dim(y)
+          Y$xcol <- x$xcol[cseq]
+          Y$yrow <- x$yrow[rseq]
+          Y$xrange <- range(Y$xcol) + c(-1,1) * x$xstep/2
+          Y$yrange <- range(Y$yrow) + c(-1,1) * x$ystep/2
+          return(Y)
+        }
+      }
+      ## return pixel values (possibly as matrix)
+      return(y)
+    }
+
+    ## Last chance!
+    if(itype == "given" &&
+       !is.matrix(i) &&
+       !is.null(ip <- as.ppp(i, W=as.owin(x), fatal=FALSE, check=FALSE))) {
+      ## 'i' is convertible to a point pattern 
+      ## Look up the greyscale values for the points of the pattern
+      values <- lookup.im(x, ip$x, ip$y, naok=TRUE)
+      if(drop) 
+        values <- values[!is.na(values)]
+      if(length(values) == 0) 
+        ## ensure the zero-length vector is of the right type
+        values <- 
+          switch(x$type,
+                 factor={ factor(, levels=levels(x)) },
+                 integer = { integer(0) },
+                 logical = { logical(0) },
+                 real = { numeric(0) },
+                 complex = { complex(0) },
+                 character = { character(0) },
+                 { values }
+                 )
+      return(values)
+    }
+  
+    stop("The subset operation is undefined for this type of index")
+  }
+
+  Extract.im
+})
+
+update.im <- function(object, ...) {
+  ## update internal structure of image after manipulation
+  X <- object
+  mat <- X$v
+  typ <- typeof(mat)
+  if(typ == "double")
+    typ <- "real"
+  ## deal with factor case
+  if(is.factor(mat)) {
+    typ <- "factor"
+  } else if(!is.null(lev <- levels(mat))) {
+    typ <- "factor"
+    X$v <- factor(mat, levels=lev)
+  }
+  X$type <- typ
+  return(X)
+}
+
+"[<-.im" <- function(x, i, j, value) {
+  # detect 'blank' arguments like second argument of x[i, ] 
+  ngiven <- length(sys.call())
+  nmatched <- length(match.call())
+  nblank <- ngiven - nmatched
+  itype <- if(missing(i)) "missing" else "given"
+  jtype <- if(missing(j)) "missing" else "given"
+  if(nblank == 1) {
+    if(!missing(i)) jtype <- "blank"
+    if(!missing(j)) itype <- "blank"
+  } else if(nblank == 2) {
+    itype <- jtype <- "blank"
+  }
+
+  X <- x
+  W <- as.owin(X)
+
+  stopifnot(is.im(value) || is.vector(value) ||
+            is.matrix(value) || is.array(value) || is.factor(value))
+  if(is.im(value)) 
+    value <- value$v
+
+  if(itype == "missing" && jtype == "missing") {
+    # no index provided
+    # set all pixels to 'value'
+    v <- X$v
+    if(!is.factor(value)) {
+      v[!is.na(v)] <- value
+    } else {
+      vnew <- matrix(NA_integer_, ncol(v), nrow(v))
+      vnew[!is.na(v)] <- as.integer(value)
+      v <- factor(vnew, labels=levels(value))
+    }
+    X$v <- v
+    return(update(X))
+  }
+  if(itype == "given") {
+    # ..................... Try a spatial index ....................
+    if(verifyclass(i, "owin", fatal=FALSE)) {
+      if(jtype == "given") warning("Index j ignored")
+      # 'i' is a window
+      if(is.empty(i))
+        return(X)
+      rxy <- rasterxy.mask(W)
+      xx <- rxy$x
+      yy <- rxy$y
+      ok <- inside.owin(xx, yy, i)
+      X$v[ok] <- value
+      X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v))
+      return(update(X))
+    }
+    if(verifyclass(i, "im", fatal=FALSE) && i$type == "logical") {
+      if(jtype == "given") warning("Index j ignored")
+      # convert logical vector to window where entries are TRUE
+      i <- as.owin(eval.im(ifelse1NA(i)))
+      # continue as above
+      rxy <- rasterxy.mask(W)
+      xx <- rxy$x
+      yy <- rxy$y
+      ok <- inside.owin(xx, yy, i)
+      X$v[ok] <- value
+      X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v))
+      return(update(X))
+    }
+    if(is.ppp(i)) {
+      # 'i' is a point pattern
+      if(jtype == "given") warning("Index j ignored")
+      nv <- length(value)
+      np <- npoints(i)
+      if(nv != np && nv != 1)
+        stop("Length of replacement value != number of point locations")
+      # test whether all points are inside window FRAME
+      ok <- inside.owin(i$x, i$y, as.rectangle(W))
+      if(any(!ok)) {
+        warning("Some points are outside the outer frame of the image")
+        if(nv == np)
+          value <- value[ok]
+        i <- i[ok]
+      }
+      if(npoints(i) > 0) {
+        # determine row & column positions for each point 
+        loc <- nearest.pixel(i$x, i$y, X)
+        # set values
+        X$v[cbind(loc$row, loc$col)] <- value
+      }
+      X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v))
+      return(update(X))
+    }
+  }
+  # .................. 'i' is not a spatial index ....................
+  
+  # Construct a matrix replacement call 
+  ycall <- switch(itype,
+                  given = {
+                    switch(jtype,
+                           given   = quote(X$v[i, j] <- value),
+                           blank   = quote(X$v[i,  ] <- value),
+                           missing = quote(X$v[i]    <- value))
+                  },
+                  blank = {
+                    switch(jtype,
+                           given   = quote(X$v[ , j] <- value),
+                           blank   = quote(X$v[ ,  ] <- value),
+                           missing = quote(X$v[ ] <- value))
+                  },
+                  missing = {
+                    switch(jtype,
+                           given   = quote(X$v[j=j] <- value),
+                           blank   = quote(X$v[j= ] <- value),
+                           missing = quote(X$v[] <- value))
+                  })
+  # try it
+  litmus <- try(eval(as.call(ycall)), silent=TRUE)
+  if(!inherits(litmus, "try-error")){
+    X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v))
+    return(update(X))
+  }
+  #  Last chance!
+  if(itype == "given" &&
+     !is.matrix(i) &&
+     !is.null(ip <- as.ppp(i, W=W, fatal=FALSE, check=TRUE))) {
+    # 'i' is convertible to a point pattern
+    if(jtype == "given") warning("Index j ignored")
+    nv <- length(value)
+    np <- npoints(ip)
+    if(nv != np && nv != 1)
+      stop("Length of replacement value != number of point locations")
+    # test whether all points are inside window FRAME
+    ok <- inside.owin(ip$x, ip$y, as.rectangle(W))
+    if(any(!ok)) {
+      warning("Some points are outside the outer frame of the image")
+      if(nv == np)
+        value <- value[ok]
+      ip <- ip[ok]
+    }
+    if(npoints(ip) > 0) {
+      # determine row & column positions for each point 
+      loc <- nearest.pixel(ip$x, ip$y, X)
+      # set values
+      X$v[cbind(loc$row, loc$col)] <- value
+    }
+    X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v))
+    return(update(X))
+  }
+
+  stop("The subset operation is undefined for this type of index")
+}
+
+################################################################
+########   other tools
+################################################################
+
+#
+# This function is similar to nearest.raster.point except for
+# the third argument 'im' and the different idiom for calculating
+# row & column - which could be used in nearest.raster.point()
+
+nearest.pixel <- function(x,y, Z) {
+  stopifnot(is.im(Z) || is.mask(Z))
+  if(length(x) > 0) {
+    nr <- Z$dim[1L]
+    nc <- Z$dim[2L]
+    cc <- round(1 + (x - Z$xcol[1L])/Z$xstep)
+    rr <- round(1 + (y - Z$yrow[1L])/Z$ystep)
+    cc <- pmax.int(1,pmin.int(cc, nc))
+    rr <- pmax.int(1,pmin.int(rr, nr))
+  } else cc <- rr <- integer(0)
+  return(list(row=rr, col=cc))
+}
+
+# Explores the 3 x 3 neighbourhood of nearest.pixel
+# and finds the nearest pixel that is not NA
+
+nearest.valid.pixel <- function(x, y, Z) {
+  rc <- nearest.pixel(x,y,Z) # checks that Z is an 'im' or 'mask'
+  rr <- rc$row
+  cc <- rc$col
+  # check whether any pixels are outside image domain
+  inside <- as.owin(Z)$m
+  miss <- !inside[cbind(rr, cc)]
+  if(!any(miss))
+    return(rc)
+  # for offending pixels, explore 3 x 3 neighbourhood
+  nr <- Z$dim[1L]
+  nc <- Z$dim[2L]
+  xcol <- Z$xcol
+  yrow <- Z$yrow
+  for(i in which(miss)) {
+    rows <- rr[i] + c(-1L,0L,1L)
+    cols <- cc[i] + c(-1L,0L,1L)
+    rows <- unique(pmax.int(1, pmin.int(rows, nr)))
+    cols <- unique(pmax.int(1, pmin.int(cols, nc)))
+    rcp <- expand.grid(row=rows, col=cols)
+    ok <- inside[as.matrix(rcp)]
+    if(any(ok)) {
+      # At least one of the neighbours is valid
+      # Find the closest one
+      rcp <- rcp[ok,]
+      dsq <- with(rcp, (x[i] - xcol[col])^2 + (y[i] - yrow[row])^2)
+      j <- which.min(dsq)
+      rc$row[i] <- rcp$row[j]
+      rc$col[i] <- rcp$col[j]
+    }
+  }
+  return(rc)
+}
+  
+
+# This function is a generalisation of inside.owin()
+# to images other than binary-valued images.
+
+lookup.im <- function(Z, x, y, naok=FALSE, strict=TRUE) {
+  verifyclass(Z, "im")
+
+  if(Z$type == "factor")
+    Z <- repair.old.factor.image(Z)
+  
+  if(length(x) != length(y))
+    stop("x and y must be numeric vectors of equal length")
+
+  # initialise answer to NA 
+  if(Z$type != "factor") {
+    niets <- NA
+    mode(niets) <- mode(Z$v)
+  } else {
+    niets <- factor(NA, levels=levels(Z))
+  }
+  value <- rep.int(niets, length(x))
+               
+  # test whether inside bounding rectangle
+  xr <- Z$xrange
+  yr <- Z$yrange
+  eps <- sqrt(.Machine$double.eps)
+  frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & 
+             (y >= yr[1L] - eps) & (y <= yr[2L] + eps)
+  
+  if(!any(frameok)) {
+    # all points OUTSIDE range - no further work needed
+    if(!naok)
+      warning("Internal error: all values NA")
+    return(value)  # all NA
+  }
+
+  # consider only those points which are inside the frame
+  xf <- x[frameok]
+  yf <- y[frameok]
+  # map locations to raster (row,col) coordinates
+  if(strict)
+    loc <- nearest.pixel(xf,yf,Z)
+  else
+    loc <- nearest.valid.pixel(xf,yf,Z)
+  # look up image values
+  vf <- Z$v[cbind(loc$row, loc$col)]
+  
+  # insert into answer
+  value[frameok] <- vf
+
+  if(!naok && anyNA(value))
+    warning("Internal error: NA's generated")
+
+  return(value)
+}
+  
+
+## low level
+
+rasterx.im <- function(x) {
+  verifyclass(x, "im")
+  xx <- x$xcol
+  matrix(xx[col(x)], ncol=ncol(x), nrow=nrow(x))
+}
+
+rastery.im <- function(x) {
+  verifyclass(x, "im")
+  yy <- x$yrow
+  matrix(yy[row(x)], ncol=ncol(x), nrow=nrow(x))
+}
+
+rasterxy.im <- function(x, drop=FALSE) {
+  verifyclass(x, "im")
+  xx <- x$xcol
+  yy <- x$yrow
+  ans <- cbind(x=as.vector(xx[col(x)]),
+               y=as.vector(yy[row(x)]))
+  if(drop) {
+    ok <- as.vector(!is.na(x$v))
+    ans <- ans[ok, , drop=FALSE]
+  }
+  return(ans)
+}
+
+## user interface 
+
+raster.x <- function(w, drop=FALSE) {
+  if(is.owin(w)) return(rasterx.mask(w, drop=drop))
+  if(!is.im(w)) stop("w should be a window or an image")
+  x <- w$xcol[col(w)]
+  x <- if(drop) x[!is.na(w$v), drop=TRUE] else array(x, dim=w$dim)
+  return(x)
+}
+  
+raster.y <- function(w, drop=FALSE) {
+  if(is.owin(w)) return(rastery.mask(w, drop=drop))
+  if(!is.im(w)) stop("w should be a window or an image")
+  y <- w$yrow[row(w)]
+  y <- if(drop) y[!is.na(w$v), drop=TRUE] else array(y, dim=w$dim)
+  return(y)
+}
+
+raster.xy <- function(w, drop=FALSE) {
+  if(is.owin(w)) return(rasterxy.mask(w, drop=drop))
+  if(!is.im(w)) stop("w should be a window or an image")
+  y <- w$xcol[col(w)]
+  y <- w$yrow[row(w)]
+  if(drop) {
+    ok <- !is.na(w$v)
+    x <- x[ok, drop=TRUE]
+    y <- y[ok, drop=TRUE]
+  }
+  return(list(x=as.numeric(x),
+              y=as.numeric(y)))
+}
+
+##############
+
+# methods for other functions
+
+xtfrm.im <- function(x) { as.numeric(as.matrix.im(x)) }
+
+as.matrix.im <- function(x, ...) {
+  return(x$v)
+}
+
+as.array.im <- function(x, ...) {
+  m <- as.matrix(x)
+  a <- do.call(array, resolve.defaults(list(m),
+                                       list(...),
+                                       list(dim=c(dim(m), 1))))
+  return(a)
+}
+
+as.data.frame.im <- function(x, ...) {
+  verifyclass(x, "im")
+  v <- x$v
+  xx <- x$xcol[col(v)]
+  yy <- x$yrow[row(v)]
+  ok <- !is.na(v)
+  xx <- as.vector(xx[ok])
+  yy <- as.vector(yy[ok])
+  # extract pixel values without losing factor info
+  vv <- v[ok]
+  dim(vv) <- NULL
+  # 
+  data.frame(x=xx, y=yy, value=vv, ...)
+}
+
+mean.im <- function(x, trim=0, na.rm=TRUE, ...) {
+  verifyclass(x, "im")
+  xvalues <- x[drop=na.rm]
+  return(mean(xvalues, trim=trim, na.rm=na.rm))
+}
+
+## arguments of generic 'median' will change in R 3.4
+median.im <- if("..." %in% names(formals(median))) {
+   function(x, na.rm=TRUE, ...) {
+    verifyclass(x, "im")
+    xvalues <- x[drop=na.rm]
+    return(median(xvalues, ...))
+  }
+} else {
+   function(x, na.rm=TRUE) {
+    verifyclass(x, "im")
+    xvalues <- x[drop=na.rm]
+    return(median(xvalues))
+  }
+}
+
+where.max <- function(x, first=TRUE) {
+  stopifnot(is.im(x))
+  if(first) { 
+    ## find the first maximum
+    v <- x$v
+    locn <- which.max(as.vector(v))  # ignores NA, NaN
+    locrow <- as.vector(row(v))[locn]
+    loccol <- as.vector(col(v))[locn]
+  } else {
+    ## find all maxima
+    xmax <- max(x)
+    M <- solutionset(x == xmax)
+    loc <- which(M$m, arr.ind=TRUE)
+    locrow <- loc[,1L]
+    loccol <- loc[,2L]
+  }
+  xx <- x$xcol[loccol]
+  yy <- x$yrow[locrow]
+  return(ppp(x=xx, y=yy, window=Window(x)))
+}
+
+where.min <- function(x, first=TRUE) {
+  stopifnot(is.im(x))
+  if(first) { 
+    ## find the first minimum
+    v <- x$v
+    locn <- which.min(as.vector(v))  # ignores NA, NaN
+    locrow <- as.vector(row(v))[locn]
+    loccol <- as.vector(col(v))[locn]
+  } else {
+    ## find all minima
+    xmin <- min(x)
+    M <- solutionset(x == xmin)
+    loc <- which(M$m, arr.ind=TRUE)
+    locrow <- loc[,1L]
+    loccol <- loc[,2L]
+  }
+  xx <- x$xcol[loccol]
+  yy <- x$yrow[locrow]
+  return(ppp(x=xx, y=yy, window=Window(x)))
+}
+
+## the following ensures that 'sd' works
+
+as.double.im <- function(x, ...) { as.double(x[], ...) }
+
+##
+
+hist.im <- function(x, ..., probability=FALSE, xname) {
+  if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x))
+  verifyclass(x, "im")
+  main <- paste("Histogram of", xname)
+  # default plot arguments
+  # extract pixel values
+  values <- as.matrix(x)
+  dim(values) <- NULL
+  # barplot or histogram
+  if(x$type %in% c("logical", "factor")) {
+    # barplot
+    tab <- table(values)
+    probs <- tab/sum(tab)
+    if(probability) {
+      heights <- probs
+      ylab <- "Probability"
+    } else {
+      heights <- tab
+      ylab <- "Number of pixels"
+    }
+    mids <- do.call(barplot,
+                   resolve.defaults(list(heights),
+                                    list(...),
+                                    list(xlab=paste("Pixel value"),
+                                         ylab=ylab,
+                                         main=main)))
+    out <- list(counts=tab, probs=probs, heights=heights,
+                mids=mids, xname=xname)
+    class(out) <- "barplotdata"
+  } else {
+    # histogram
+    values <- values[!is.na(values)]
+    plotit <- resolve.defaults(list(...), list(plot=TRUE))$plot
+    if(plotit) {
+      ylab <- if(probability) "Probability density" else "Number of pixels"
+      out <- do.call(hist.default,
+                     resolve.defaults(list(values),
+                                      list(...),
+                                      list(freq=!probability,
+                                           xlab="Pixel value",
+                                           ylab=ylab,
+                                           main=main)))
+      out$xname <- xname
+    } else {
+      # plot.default whinges if `probability' given when plot=FALSE
+      out <- do.call(hist.default,
+                   resolve.defaults(list(values),
+                                    list(...)))
+      # hack!
+      out$xname <- xname
+    }
+  }
+  return(invisible(out))
+}
+
+plot.barplotdata <- function(x, ...) {
+  do.call(barplot,
+          resolve.defaults(list(height=x$heights),
+                           list(...),
+                           list(main=paste("Histogram of ", x$xname))))
+}
+
+cut.im <- function(x, ...) {
+  verifyclass(x, "im")
+  typ <- x$type
+  if(typ %in% c("factor", "logical", "character")) 
+    stop(paste0("cut.im is not defined for ", typ, "-valued images"),
+         call.=FALSE)
+  vcut <- cut(as.numeric(as.matrix(x)), ...)
+  return(im(vcut,
+            xcol=x$xcol, yrow=x$yrow,
+            xrange=x$xrange, yrange=x$yrange,
+            unitname=unitname(x)))
+}
+
+quantile.im <- function(x, ...) {
+  verifyclass(x, "im")
+  q <- do.call(quantile,
+               resolve.defaults(list(as.numeric(as.matrix(x))),
+                                list(...),
+                                list(na.rm=TRUE)))
+  return(q)
+}
+
+integral <- function(f, domain=NULL, ...) {
+  UseMethod("integral")
+}
+
+integral.im <- function(f, domain=NULL, ...) {
+  verifyclass(f, "im")
+  typ <- f$type
+  if(!any(typ == c("integer", "real", "complex", "logical")))
+    stop(paste("Don't know how to integrate an image of type", sQuote(typ)))
+  if(!is.null(domain)) {
+    if(is.tess(domain)) return(sapply(tiles(domain), integral.im, f=f))
+    f <- f[domain, drop=FALSE, tight=TRUE]
+  }
+  a <- with(f, sum(v, na.rm=TRUE) * xstep * ystep)
+  return(a)
+}
+
+conform.imagelist <- function(X, Zlist) {
+  # determine points of X where all images in Zlist are defined
+  ok <- rep.int(TRUE, length(X$x))
+  for(i in seq_along(Zlist)) {
+    Zi <- Zlist[[i]]
+    ZiX <- Zi[X, drop=FALSE]
+    ok <- ok & !is.na(ZiX)
+  }
+  return(ok)
+}
+
+split.im <- function(x, f, ..., drop=FALSE) {
+  stopifnot(is.im(x))
+  if(inherits(f, "tess")) 
+    subsets <- tiles(f)
+  else if(is.im(f)) {
+    if(f$type != "factor")
+      f <- eval.im(factor(f))
+    subsets <- tiles(tess(image=f))
+  } else stop("f should be a tessellation or a factor-valued image")
+  if(!is.subset.owin(as.owin(x), as.owin(f)))
+    stop("f does not cover the window of x")
+  n <- length(subsets)
+  out <- vector(mode="list", length=n)
+  names(out) <- names(subsets)
+  for(i in 1:n)
+    out[[i]] <- x[subsets[[i]], drop=drop]
+  if(drop)
+    return(out)
+  else 
+    return(as.solist(out))
+}
+
+by.im <- function(data, INDICES, FUN, ...) {
+  stopifnot(is.im(data))
+  V <- split(data, INDICES)
+  U <- lapply(V, FUN, ...)
+  return(as.solist(U, demote=TRUE))
+}
+
+rebound.im <- function(x, rect) {
+  stopifnot(is.im(x))
+  stopifnot(is.owin(rect))
+  rect <- as.rectangle(rect)
+  stopifnot(is.subset.owin(as.rectangle(x), rect))
+  # compute number of extra rows/columns
+  dx <- x$xstep
+  nleft  <- max(0, floor((x$xrange[1L]-rect$xrange[1L])/dx))
+  nright <- max(0, floor((rect$xrange[2L]-x$xrange[2L])/dx))
+  dy <- x$ystep
+  nbot <- max(0, floor((x$yrange[1L]-rect$yrange[1L])/dy))
+  ntop <- max(0, floor((rect$yrange[2L]-x$yrange[2L])/dy))
+  # determine exact x and y ranges (to preserve original pixel locations)
+  xrange.new <- x$xrange + c(-nleft, nright) * dx
+  yrange.new <- x$yrange + c(-nbot,  ntop) * dy
+  # expand pixel data matrix
+  nr <- x$dim[1L]
+  nc <- x$dim[2L]
+  nrnew <- nbot  + nr + ntop
+  ncnew <- nleft + nc + nright
+  naval <- switch(x$type,
+                  factor=,
+                  integer=NA_integer_,
+                  real=NA_real_,
+                  character=NA_character_,
+                  complex=NA_complex_,
+                  NA)
+  vnew <- matrix(naval, nrnew, ncnew)
+  if(x$type != "factor") {
+    vnew[nbot + (1:nr), nleft + (1:nc)] <- x$v
+  } else {
+    vnew[nbot + (1:nr), nleft + (1:nc)] <- as.integer(x$v)
+    vnew <- factor(vnew, labels=levels(x))
+    dim(vnew) <- c(nrnew, ncnew)
+  }
+  # build new image object
+  xnew <- im(vnew,
+             xrange = xrange.new,
+             yrange = yrange.new,
+             unitname = unitname(x))
+  return(xnew)
+}
+
+sort.im <- function(x, ...) {
+  verifyclass(x, "im")
+  sort(as.vector(as.matrix(x)), ...)
+}
+
+dim.im <- function(x) { x$dim }
+
+# colour images
+rgbim <- function(R, G, B, A=NULL, maxColorValue=255, autoscale=FALSE) {
+  if(autoscale) {
+    R <- scaletointerval(R, 0, maxColorValue)
+    G <- scaletointerval(G, 0, maxColorValue)
+    B <- scaletointerval(B, 0, maxColorValue)
+    if(!is.null(A))
+      A <- scaletointerval(A, 0, maxColorValue)
+  }
+  Z <- eval.im(factor(rgbNA(as.vector(R), as.vector(G), as.vector(B),
+                            as.vector(A),
+                            maxColorValue=maxColorValue)))
+  return(Z)
+}
+
+hsvim <- function(H, S, V, A=NULL, autoscale=FALSE) {
+  if(autoscale) {
+    H <- scaletointerval(H, 0, 1)
+    S <- scaletointerval(S, 0, 1)
+    V <- scaletointerval(V, 0, 1)
+    if(!is.null(A))
+      A <- scaletointerval(A, 0, 1)
+  }
+  Z <- eval.im(factor(hsvNA(as.vector(H), as.vector(S), as.vector(V),
+                            as.vector(A))))
+  return(Z)
+}
+
+scaletointerval <- function(x, from=0, to=1, xrange=range(x)) {
+  UseMethod("scaletointerval")
+}
+
+scaletointerval.default <- function(x, from=0, to=1, xrange=range(x)) {
+  x <- as.numeric(x)
+  rr <- if(missing(xrange)) range(x, na.rm=TRUE) else as.numeric(xrange)
+  b <- as.numeric(to - from)/diff(rr)
+  if(is.finite(b)) {
+    y <- from + b * (x - rr[1L])
+  } else {
+    y <- (from+to)/2 + 0 * x
+  }
+  y[] <- pmin(pmax(y[], from), to)
+  return(y)
+}
+
+scaletointerval.im <- function(x, from=0, to=1, xrange=range(x)) {
+  v <- scaletointerval(x$v, from, to, xrange=xrange)
+  y <- im(v, x$xcol, x$yrow, x$xrange, x$yrange, unitname(x))
+  return(y)
+}
+
+zapsmall.im <- function(x, digits) {
+  if(missing(digits))
+    return(eval.im(zapsmall(x)))
+  return(eval.im(zapsmall(x, digits=digits)))
+}
+
+domain.im <- Window.im <- function(X, ...) { as.owin(X) }
+
+"Window<-.im" <- function(X, ..., value) {
+  verifyclass(value, "owin")
+  X[value, drop=FALSE]
+}
+
+padimage <- function(X, value=NA, n=1, W=NULL) {
+  stopifnot(is.im(X))
+  stopifnot(length(value) == 1)
+  if(!missing(n) && !is.null(W)) 
+    stop("Arguments n and W are incompatible", call.=FALSE)
+  padW <- !is.null(W)
+  if(isfac <- (X$type == "factor")) {
+    ## handle factors
+    levX <- levels(X)
+    if(is.factor(value)) {
+      stopifnot(identical(levels(X), levels(value)))
+    } else {
+      value <- factor(value, levels=levX)
+    }
+    X <- eval.im(as.integer(X))
+    value <- as.integer(value)
+  }
+  if(!padW) {
+    ## pad by 'n' pixels
+    nn <- rep(n, 4)
+    nleft   <- nn[1L]
+    nright  <- nn[2L]
+    nbottom <- nn[3L]
+    ntop    <- nn[4L]
+  } else {
+    ## pad out to window W
+    FX <- Frame(X)
+    B <- boundingbox(Frame(W), FX)
+    nleft   <- max(1, round((FX$xrange[1L] - B$xrange[1L])/X$xstep))
+    nright  <- max(1, round((B$xrange[2L] - FX$xrange[2L])/X$xstep))
+    nbottom <- max(1, round((FX$yrange[1L] - B$yrange[1L])/X$ystep))
+    ntop    <- max(1, round((B$yrange[2L] - FX$yrange[2L])/X$ystep))
+  }
+  mX <- as.matrix(X)
+  dd <- dim(mX)
+  mX <- cbind(matrix(value, dd[1L], nleft, byrow=TRUE),
+              as.matrix(X),
+              matrix(value, dd[1L], nright, byrow=TRUE))
+  dd <- dim(mX)
+  mX <- rbind(matrix(rev(value), nbottom, dd[2L]),
+              mX,
+              matrix(value, ntop, dd[2L]))
+  xcol <- with(X,
+               c(xcol[1L]     - (nleft:1) * xstep,
+                 xcol,
+                 xcol[length(xcol)] + (1:nright) * xstep))
+  yrow <- with(X,
+               c(yrow[1L]     - (nbottom:1) * ystep,
+                 yrow,
+                 yrow[length(yrow)] + (1:ntop) * ystep))
+  xr <- with(X, xrange + c(-nleft, nright) * xstep)
+  yr <- with(X, yrange + c(-nbottom, ntop) * ystep)
+  Y <- im(mX,
+          xcol=xcol, yrow=yrow, xrange=xr, yrange=yr,
+          unitname=unitname(X))
+  if(isfac)
+    Y <- eval.im(factor(Y, levels=seq_along(levX), labels=levX))
+  if(padW && !is.rectangle(W)) 
+    Y <- Y[W, drop=FALSE]
+  return(Y)
+}
+
+as.function.im <- function(x, ...) {
+  Z <- x
+  f <- function(x,y) { Z[list(x=x, y=y)] }
+  g <- funxy(f, Window(x))
+  return(g)
+}
+
+anyNA.im <- function(x, recursive=FALSE) {
+  anyNA(x$v)
+}
+
diff --git a/R/indicator.R b/R/indicator.R
new file mode 100644
index 0000000..038c53e
--- /dev/null
+++ b/R/indicator.R
@@ -0,0 +1,20 @@
+#' indicator function for window
+
+as.function.owin <- function(x, ...) {
+  W <- x
+  g <- function(x, y=NULL) {
+    xy <- xy.coords(x, y)
+    inside.owin(xy$x, xy$y, W)
+  }
+  class(g) <- c("indicfun", class(g))
+  return(g)
+}
+
+print.indicfun <- function(x, ...) {
+  W <- get("W", envir=environment(x))
+  nama <- names(formals(x))
+  splat(paste0("function", paren(paste(nama, collapse=","))))
+  splat("Indicator function (returns 1 inside window, 0 outside)")
+  print(W)
+  return(invisible(NULL))
+}
diff --git a/R/indices.R b/R/indices.R
new file mode 100644
index 0000000..91f6db1
--- /dev/null
+++ b/R/indices.R
@@ -0,0 +1,239 @@
+#'
+#'   indices.R
+#'
+#'   Code for handling vector/array indices
+#'
+#'   $Revision: 1.7 $  $Date: 2017/02/07 07:47:20 $
+#'
+
+grokIndexVector <- function(ind, len, nama=NULL) {
+  #' Parse any kind of index vector,
+  #' returning
+  #'      a logical index 'lo' (the subset of elements),
+  #'      a positive integer index 'i' ( = which(lo) ),
+  #'      the number 'n' of values required
+  #'      the number 'nind' of values indexed
+  #' and if appropriate
+  #'      a character vector 's' of names
+  #'      a mapping 'map' (matching 'ind' to 'i')
+  #'
+  #' There are two versions:
+  #'    'strict' (confined to specified bounds 1:len and specified names 'nama')
+  #'    'full'   (allowing implied extension of array bounds)
+  named <- !is.null(nama)
+  if(missing(len) && named) len <- length(nama)
+  force(len)
+  # special cases
+  if(is.null(ind)) {
+    #' all entries (implied)
+    return(list(strict=list(lo=rep(TRUE, len),
+                            i=seq_len(len),
+                            n=len,
+                            s=nama,
+                            nind=len,
+                            map=NULL)))
+  }
+  if(length(ind) == 0) {
+    #' no entries
+    return(list(strict=list(lo=logical(len),
+                            i=integer(0),
+                            n=0L,
+                            s=character(0),
+                            nind=0L,
+                            map=NULL)))
+  }
+  #' main cases
+  if(is.logical(ind)) {
+    # logical (subset) index into 1:len
+    lo <- ind
+    m <- length(lo)
+    if(m < len) {
+      #' recycle
+      oldlo <- lo
+      lo <- logical(len)
+      lo[oldlo] <- TRUE
+      m <- len
+    }
+    if(m == len) {
+      n <- sum(lo)
+      result <- list(strict=list(lo=lo, i=which(lo), n=n, s=nama,
+                       nind=n, map=NULL))
+      return(result)
+    }
+    #' new elements implied
+    lostrict <- lo[1:len]
+    newones <- (len+1L):m
+    nstrict <- sum(lostrict)
+    strict <- list(lo=lostrict,
+                   i=which(lostrict),
+                   n=nstrict,
+                   s=nama,
+                   nind=nstrict,
+                   map=NULL)
+    nfull <- sum(lo)
+    full <- list(newones=newones,
+                 fullset=1:m,
+                 lo=lo,
+                 i=which(lo),
+                 n=nfull,
+                 s=if(named) c(nama, rep("", length(newones))) else NULL,
+                 nind=nfull,
+                 map=NULL)
+    return(list(strict=strict, full=full))
+  }
+  if(is.character(ind)) {
+    #' character index into 'nama'
+    #' order is important
+    imap <- match(ind, nama)
+    unknown <- is.na(imap)
+    i <- sort(unique(imap[!unknown]))
+    lo <- logical(len)
+    lo[i] <- TRUE
+    map <- match(imap, i)
+    n <- length(ind)
+    s <- nama[map]
+    nind <- length(ind)
+    if(identical(map, seq_along(map))) map <- NULL
+    strict <- list(lo=lo, i=i, n=n, s=s, nind, map=map)
+    if(!any(unknown)) return(list(strict=strict))
+    # some unrecognised strings
+    newones <- unique(ind[unknown])
+    fullset <- c(nama, newones)
+    imapfull <- match(ind, fullset)
+    ifull <- sort(unique(imapfull))
+    lofull <- logical(length(fullset))
+    lofull[ifull] <- TRUE
+    mapfull <- match(imapfull, ifull)
+    nfull <- length(ind)
+    sfull <- fullset[mapfull]
+    if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL
+    full <- list(newones=newones, fullset=fullset,
+                 lo=lofull, i=ifull, n=nfull, s=sfull, nind=nind, map=mapfull)
+    return(list(strict=strict, full=full))
+  }
+  if(is.numeric(ind)) {
+    if(all(ind > 0)) {
+      #' integer index into 1:len
+      #' order is important
+      ifull <- sort(unique(ind))
+      inside <- (ifull <= len)
+      i <- ifull[inside]
+      map <- match(ind, i)
+      lo <- logical(len)
+      lo[i] <- TRUE
+      n <- length(ind)
+      s <- nama[ind]
+      if(identical(map, seq_along(map))) map <- NULL
+      strict <- list(lo=lo,i=i,n=n,s=s,nind=length(i),map=map)
+      if(all(inside)) return(list(strict=strict))
+      newones <- ifull[!inside]
+      mapfull <- match(ind, ifull)
+      fullset <- 1:max(ifull)
+      lofull <- logical(length(fullset))
+      lofull[ifull] <- TRUE
+      nfull <- length(ind)
+      sfull <- if(named) c(nama, rep("", length(newones)))[ind] else NULL
+      if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL
+      return(list(strict=strict, full=list(newones=newones, fullset=fullset,
+                                           lo=lofull, i=ifull,
+                                           n=nfull, s=sfull,
+                                           nind=nfull, map=mapfull)))
+    }
+    if(all(ind < 0)) {
+      #' exclusion index
+      #' ignore indices outside bounds
+      negind <- -ind
+      negind <- negind[negind <= len]
+      lo <- rep(TRUE, len)
+      lo[negind] <- FALSE
+      i <- which(lo)
+      n <- length(i)
+      map <- seq_len(n)
+      return(list(strict=list(lo=lo, i=i, n=n, s=nama[i], nind=n, map=map)))
+    }
+    stop("An integer index may not contain both negative and positive values",
+         call.=FALSE)
+  }
+  stop("Unrecognised format for index", call.=FALSE)
+}
+
+replacementIndex <- function(ii, stuff) {
+  # 'stuff' is predigested information about a subset index.
+  # Find the location in the original array
+  # whose value should be replaced by the 'ii'-th replacement value
+  # according to this info.
+  with(stuff, {
+    if(!is.null(map)) ii <- map[ii]
+    i[ii]
+  })
+}
+
+positiveIndex <- function(i, nama, len=length(nama)) {
+  #' convert any kind of index to a positive integer sequence
+  x <- seq_len(len)
+  if(is.null(i)) return(x)
+  stopifnot(is.vector(i))
+  if(is.numeric(i) && !all(ok <- (abs(i) <= len))) {
+    warning("Index values lie outside array bounds", call.=FALSE)
+    i <- i[ok]
+  }
+  names(x) <- nama
+  y <- x[i]
+  return(unname(y))
+}
+
+logicalIndex <- function(i, nama, len=length(nama)) {
+  #' convert any kind of index to a logical vector
+  if(is.null(i)) return(rep(TRUE, len))
+  stopifnot(is.vector(i))
+  if(is.numeric(i) && !all(ok <- (abs(i) <= len))) {
+    warning("Index values lie outside array bounds", call.=FALSE)
+    i <- i[ok]
+  }
+  x <- logical(len)
+  names(x) <- nama
+  x[i] <- TRUE
+  return(unname(x))
+}
+
+#' convert any appropriate subset index for any kind of point pattern
+#' to a logical vector
+
+ppsubset <- function(X, I, Iname, fatal=FALSE) {
+  if(missing(Iname))
+    Iname <- deparse(substitute(I))
+  # I could be a window or logical image
+  if(is.im(I))
+    I <- solutionset(I)
+  if((is.ppp(X) || is.lpp(X)) && is.owin(I)) {
+    I <- inside.owin(X, w=I)
+    return(I)
+  }
+  if((is.pp3(X) && inherits(I, "box3")) ||
+     (is.ppx(X) && inherits(I, "boxx"))) {
+    I <- inside.boxx(X, w=I)
+    return(I)
+  }
+  # I could be a function to be applied to X
+  if(is.function(I)) {
+    I <- I(X)
+    if(!is.vector(I)) {
+      whinge <- paste("Function", sQuote(Iname), "did not return a vector")
+      if(fatal) stop(whinge, call.=FALSE)
+      warning(whinge, call.=FALSE)
+      return(NULL)
+    }
+  }      
+  # I is now a subset index: convert to logical
+  I <- grokIndexVector(I, npoints(X))$strict$lo
+
+  if(anyNA(I)) {
+    #' illegal entries
+    whinge <- paste("Indices in", sQuote(Iname), "exceed array limits")
+    if(fatal) stop(whinge, call.=FALSE)
+    warning(whinge, call.=FALSE)
+    return(NULL)
+  }
+
+  return(I)
+}
diff --git a/R/infline.R b/R/infline.R
new file mode 100755
index 0000000..afe7a10
--- /dev/null
+++ b/R/infline.R
@@ -0,0 +1,255 @@
+#
+# infline.R
+#
+# Infinite lines
+#
+# $Revision: 1.28 $ $Date: 2017/02/07 07:47:20 $
+#
+
+infline <- function(a=NULL, b=NULL, h=NULL, v=NULL, p=NULL, theta=NULL) {
+  if(is.null(a) != is.null(b))
+    stop("invalid specification of a,b")
+  if(is.null(p) != is.null(theta))
+    stop("invalid specification of p,theta")
+  if(!is.null(h)) 
+    out <- data.frame(a=h, b=0, h=h, v=NA, p=h, theta=pi/2)
+  else if(!is.null(v)) 
+    out <- data.frame(a=NA,b=NA,h=NA,v=v,p=v,theta=ifelseAB(v < 0, pi, 0))
+  else if(!is.null(a)) {
+    # a, b specified
+    z <- data.frame(a=a,b=b)
+    a <- z$a
+    b <- z$b
+    theta <- ifelseAX(b == 0, pi/2, atan(-1/b))
+    theta <- theta %% pi
+    p <- a * sin(theta)
+    out <- data.frame(a=a, b=b,
+                      h=ifelseXB(b==0, a, NA),
+                      v=NA, p=p, theta=theta)
+  } else if(!is.null(p)) {
+    # p, theta specified
+    z <- data.frame(p=p,theta=theta)
+    p <- z$p
+    theta <- z$theta
+    theta <- theta %% (2*pi)
+    if(any(reverse <- (theta >= pi))) {
+      theta[reverse] <- theta[reverse] - pi
+      p[reverse]     <- -p[reverse]
+    }
+    vert <- (theta == 0)
+    horz <- (cos(theta) == 0)
+    gene <- !(vert | horz)
+    v <- ifelseXB(vert, p, NA)
+    h <- ifelseXB(horz, p, NA)
+    a <- ifelseXB(gene, p/sin(theta), NA)
+    b <- ifelseXB(gene, -cos(theta)/sin(theta), NA)
+    out <- data.frame(a=a,b=b,h=h,v=v,p=p,theta=theta)
+  } else stop("No data given!")
+  class(out) <- c("infline", class(out))
+  return(out)
+}
+
+is.infline <- function(x) { inherits(x, "infline") }
+
+plot.infline <- function(x, ...) {
+  for(i in seq_len(nrow(x))) {
+    xi <- as.list(x[i, 1:4])
+    xi[sapply(xi, is.na)] <- NULL
+    do.call(abline, append(xi, list(...)))
+  }
+  return(invisible(NULL))
+}
+
+print.infline <- function(x, ...) {
+  n <- nrow(x)
+  splat(n, "infinite", ngettext(n, "line", "lines"))
+  print(as.data.frame(x), ...)
+  return(invisible(NULL))
+}
+
+clip.infline <- function(L, win) {
+  # clip a set of infinite straight lines to a window
+  win <- as.owin(win)
+  stopifnot(inherits(L, "infline"))
+  nL <- nrow(L)
+  if(nL == 0)
+    return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win))
+  seqL <- seq_len(nL)
+  # determine circumcircle of win
+  xr <- win$xrange
+  yr <- win$yrange
+  xmid <- mean(xr)
+  ymid <- mean(yr)
+  width <- diff(xr)
+  height <- diff(yr)
+  rmax <- sqrt(width^2 + height^2)/2
+  boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax)
+  # convert line coordinates to origin (xmid, ymid)
+  p <- L$p
+  theta <- L$theta
+  co <- cos(theta)
+  si <- sin(theta)
+  p <- p - xmid * co - ymid * si
+  # compute intersection points with circumcircle 
+  hit <- (abs(p) < rmax)
+  if(!any(hit)) 
+    return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win))
+  p <- p[hit]
+  theta <- theta[hit]
+  q <- sqrt(rmax^2 - p^2)
+  co <- co[hit]
+  si <- si[hit]
+  id <- seqL[hit]
+  X <- psp(x0= xmid + p * co + q * si,
+           y0= ymid + p * si - q * co,
+           x1= xmid + p * co - q * si,
+           y1= ymid + p * si + q * co,
+           marks = factor(id, levels=seqL),
+           window=boundbox, check=FALSE)
+  # clip to window
+  X <- X[win]
+  return(X)
+}
+  
+chop.tess <- function(X, L) {
+  stopifnot(is.infline(L))
+  stopifnot(is.tess(X)||is.owin(X))
+  X <- as.tess(X)
+
+  if(X$type == "image") {
+    Xim <- X$image
+    xr <- Xim$xrange
+    yr <- Xim$yrange
+    # extract matrices of pixel values and x, y coordinates
+    Zmat <- as.integer(as.matrix(Xim))
+    xmat <- rasterx.im(Xim)
+    ymat <- rastery.im(Xim)
+    # process lines
+    for(i in seq_len(nrow(L))) {
+      # line i chops window into two pieces
+      if(!is.na(h <- L[i, "h"])) {
+        # horizontal line
+        if(h > yr[1L] && h < yr[2L]) 
+          Zmat <- 2 * Zmat + (ymat > h)
+      } else if(!is.na(v <- L[i, "v"])) {
+        # vertical line
+        if(v > xr[1L] && v < xr[2L])
+          Zmat <- 2 * Zmat + (xmat < v)
+      } else {
+        # generic line y = a + bx
+        a <- L[i, "a"]
+        b <- L[i, "b"]
+        Zmat <- 2 * Zmat + (ymat > a + b * xmat)
+      }
+    }
+    # Now just put back as factor image
+    Zim <- im(Zmat, xcol=Xim$xcol, yrow=Xim$yrow, unitname=unitname(Xim))
+    Z <- tess(image=Zim)
+    return(Z)
+  }
+
+  #---- polygonal computation --------
+  # get bounding box
+  B <- as.rectangle(as.owin(X))
+  xr <- B$xrange
+  yr <- B$yrange
+
+  # get coordinates
+  for(i in seq_len(nrow(L))) {
+    # line i chops box B into two pieces
+    if(!is.na(h <- L[i, "h"])) {
+      # horizontal line
+      if(h < yr[1L] || h > yr[2L])
+        Z <- NULL
+      else {
+        lower <- owin(xr, c(yr[1L], h))
+        upper <- owin(xr, c(h, yr[2L]))
+        Z <- tess(tiles=list(lower,upper), window=B)
+      }
+    } else if(!is.na(v <- L[i, "v"])) {
+      # vertical line
+      if(v < xr[1L] || v > xr[2L])
+        Z <- NULL
+      else {
+        left <- owin(c(xr[1L], v), yr)
+        right <- owin(c(v, xr[2L]), yr)
+        Z <- tess(tiles=list(left,right), window=B)
+      }
+    } else {
+      # generic line
+      a <- L[i, "a"]
+      b <- L[i, "b"]
+      # Intersect with extended left and right sides of B
+      yleft <- a + b * xr[1L]
+      yright <- a + b * xr[2L]
+      ylo <- min(yleft, yright, yr[1L]) - 1
+      yhi <- max(yleft, yright, yr[2L]) + 1
+      lower <- owin(poly=list(x=xr[c(1L,1L,2L,2L)],
+                              y=c(yleft,ylo,ylo,yright)))
+      upper <- owin(poly=list(x=xr[c(1L,2L,2L,1L)],
+                              y=c(yleft,yright,yhi,yhi)))
+      Bplus <- owin(xr, c(ylo, yhi), unitname=unitname(B))
+      Z <- tess(tiles=list(lower,upper), window=Bplus)
+    }
+    # intersect this simple tessellation with X
+    if(!is.null(Z)) {
+      X <- intersect.tess(X, Z)
+      tilenames(X) <- paste("Tile", seq_len(length(tiles(X))))
+    }
+  }
+  return(X)
+}
+
+whichhalfplane <- function(L, x, y=NULL) {
+  verifyclass(L, "infline")
+  xy <- xy.coords(x, y)
+  x <- xy$x
+  y <- xy$y
+  m <- length(x)
+  n <- nrow(L)
+  Z <- matrix(as.logical(NA_integer_), n, m)
+  for(i in seq_len(n)) {
+    if(!is.na(h <- L[i, "h"])) {
+      #' horizontal line
+      Z[i,] <- (y < h)
+    } else if(!is.na(v <- L[i, "v"])) {
+      #' vertical line
+      Z[i,] <- (x < v)
+    } else {
+      #' generic line y = a + bx
+      a <- L[i, "a"]
+      b <- L[i, "b"]
+      Z[i,] <- (y < a + b * x)
+    }
+  }
+  return(Z)
+}
+
+rotate.infline <- function(X, angle=pi/2, ...) {
+  if(nrow(X) == 0) return(X)
+  Y <- with(X, infline(p = p, theta=theta + angle))
+  return(Y)
+}
+
+shift.infline <- function(X, vec=c(0,0), ...) {
+  if(nrow(X) == 0) return(X)
+  vec <- as2vector(vec)
+  Y <- with(X, infline(p = p + vec[1L] * cos(theta) + vec[2L] * sin(theta),
+                       theta=theta))
+  return(Y)
+}
+
+reflect.infline <- function(X) {
+  if(nrow(X) == 0) return(X)
+  Y <- with(X, infline(p = p,
+                       theta=(theta + pi) %% (2 * pi)))
+  return(Y)
+}
+
+flipxy.infline <- function(X) {
+  if(nrow(X) == 0) return(X)
+  Y <- with(X, infline(p = p,
+                       theta=(pi/2 - theta) %% (2 * pi)))
+  return(Y)
+}
+
diff --git a/R/inforder.family.R b/R/inforder.family.R
new file mode 100755
index 0000000..24b3f35
--- /dev/null
+++ b/R/inforder.family.R
@@ -0,0 +1,94 @@
+#
+#
+#    inforder.family.R
+#
+#    $Revision: 1.2 $	$Date: 2010/07/10 10:22:09 $
+#
+#    Family of `infinite-order' point process models
+#
+#    inforder.family:      object of class 'isf' 
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+inforder.family <-
+  list(
+       name  = "inforder",
+       print = function(self) {
+         cat("Family of infinite-order interactions\n")
+       },
+       plot = NULL,
+       # ----------------------------------------------------
+       eval  = function(X,U,EqualPairs,pot,pars,correction, ...) {
+  #
+  # This is the eval function for the `inforder' family.
+  # 
+  # This internal function is not meant to be called by the user.
+  # It is called by mpl.prepare() during execution of ppm().
+  #         
+  # The eval functions perform all the manipulations that are common to
+  # a given class of interactions. 
+  #
+  # For the `inforder' family of interactions with infinite order,
+  # there are no structures common to all interactions.
+  # So this function simply invokes the potential 'pot' directly
+  # and expects 'pot' to return the values of the sufficient statistic S(u,X).
+  #
+  # ARGUMENTS:
+  #   All 'eval' functions have the following arguments 
+  #   which are called in sequence (without formal names)
+  #   by mpl.prepare():
+  #       
+  #   X           data point pattern                      'ppp' object
+  #   U           points at which to evaluate potential   list(x,y) suffices
+  #   EqualPairs  two-column matrix of indices i, j such that X[i] == U[j]
+  #               (or NULL, meaning all comparisons are FALSE)
+  #   pot         potential function 
+  #   potpars     auxiliary parameters for pairpot        list(......)
+  #   correction  edge correction type                    (string)
+  #
+  # VALUE:
+  #    All `eval' functions must return a        
+  #    matrix of values of the total potential
+  #    induced by the pattern X at each location given in U.
+  #    The rows of this matrix correspond to the rows of U (the sample points);
+  #    the k columns are the coordinates of the k-dimensional potential.
+  #
+  ##########################################################################
+
+  # POTENTIAL:
+  # In this case the potential function 'pot' should have arguments
+  #    pot(X, U, EqualPairs, pars, correction, ...)
+  #         
+  # It must return a vector with length equal to the number of points in U,
+  # or a matrix with as many rows as there are points in U.
+
+         if(!is.ppp(U))
+           U <- ppp(U$x, U$y, window=X$window)
+         
+         POT <- pot(X, U, EqualPairs, pars, correction, ...)
+
+         if(is.matrix(POT)) {
+           if(nrow(POT) != U$n)
+             stop("Internal error: the potential returned a matrix with the wrong number of rows")
+         } else if(is.array(POT) && length(dim(POT)) > 2)
+           stop("Internal error: the potential returned an array with more than 2 dimensions")
+         else if(is.vector(POT)) {
+           if(length(POT) != U$n)
+             stop("Internal error: the potential returned a vector with the wrong length")
+           POT <- matrix(POT, ncol=1)
+         } else
+         stop("Internal error: the return value from the potential is not understood")
+
+         return(POT)
+       },
+######### end of function $eval
+       suffstat = NULL
+######### end of function $suffstat
+)
+######### end of list
+
+class(inforder.family) <- "isf"
+
+
diff --git a/R/intensity.R b/R/intensity.R
new file mode 100644
index 0000000..fc6bf57
--- /dev/null
+++ b/R/intensity.R
@@ -0,0 +1,304 @@
+#
+# intensity.R
+#
+# Code related to intensity and intensity approximations
+#
+#  $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $
+#
+
+intensity <- function(X, ...) {
+  UseMethod("intensity")
+}
+
+intensity.ppp <- function(X, ..., weights=NULL) {
+  n <- npoints(X)
+  a <- area(Window(X))
+  if(is.null(weights)) {
+    ## unweighted case - for efficiency
+    if(is.multitype(X)) {
+      mks <- marks(X)
+      answer <- as.vector(table(mks))/a
+      names(answer) <- levels(mks)
+    } else answer <- n/a
+    return(answer)
+  }
+  ## weighted case 
+  if(is.numeric(weights)) {
+    check.nvector(weights, n)
+  } else if(is.expression(weights)) {
+    # evaluate expression in data frame of coordinates and marks
+    df <- as.data.frame(X)
+    pf <- parent.frame()
+    eval.weights <- try(eval(weights, envir=df, enclos=pf))
+    if(inherits(eval.weights, "try-error"))
+      stop("Unable to evaluate expression for weights", call.=FALSE)
+    if(!check.nvector(eval.weights, n, fatal=FALSE, warn=TRUE))
+      stop("Result of evaluating the expression for weights has wrong format")
+    weights <- eval.weights
+  } else stop("Unrecognised format for argument 'weights'")
+  ##
+  if(is.multitype(X)) {
+    mks <- marks(X)
+    answer <- as.vector(tapply(weights, mks, sum))/a
+    answer[is.na(answer)] <- 0
+    names(answer) <- levels(mks)
+  } else {
+    answer <- sum(weights)/a
+  }
+  return(answer)
+}
+
+intensity.splitppp <- function(X, ..., weights=NULL) {
+  if(is.null(weights))
+    return(sapply(X, intensity.ppp))
+  if(is.expression(weights))
+    return(sapply(X, intensity.ppp, weights=weights))
+  if(is.numeric(weights)) {
+    fsplit <- attr(X, "fsplit")
+    n <- length(fsplit)
+    check.nvector(weights, n)
+    result <- mapply(intensity.ppp, X, weights=split(weights, fsplit))
+    result <- simplify2array(result, higher=FALSE)
+    return(result)
+  }
+  stop("Unrecognised format for weights")
+}
+
+intensity.ppm <- function(X, ...) {
+  if(!identical(valid.ppm(X), TRUE)) {
+    warning("Model is invalid - projecting it")
+    X <- project.ppm(X)
+  }
+  if(is.poisson(X)) {
+    if(is.stationary(X)) {
+      # stationary univariate/multivariate Poisson
+      sX <- summary(X, quick="no variances")
+      lam <- sX$trend$value
+      if(sX$multitype && sX$no.trend) {
+        ## trend is ~1; lam should be replicated for each mark
+        lev <- levels(marks(data.ppm(X)))
+        lam <- rep(lam, length(lev))
+        names(lam) <- lev
+      }
+      return(lam)
+    }
+    # Nonstationary Poisson
+    return(predict(X, ...))
+  }
+  # Gibbs process
+  if(is.multitype(X))
+    stop("Not yet implemented for multitype Gibbs processes")
+  # Compute first order term
+  if(is.stationary(X)) {
+    ## activity parameter
+    sX <- summary(X, quick="no variances")
+    beta <- sX$trend$value
+  } else {
+    ## activity function (or values of it, depending on '...')
+    beta <- predict(X, ...)
+  }
+  ## apply approximation
+  lambda <- PoisSaddle(beta, fitin(X))
+  return(lambda)
+}
+
+PoisSaddle <- function(beta, fi) {
+  ## apply Poisson-Saddlepoint approximation
+  ## given first order term and fitted interaction
+  stopifnot(inherits(fi, "fii"))
+  inte <- as.interact(fi)
+  if(identical(inte$family$name, "pairwise"))
+    return(PoisSaddlePairwise(beta, fi))
+  if(identical(inte$name, "Geyer saturation process"))
+    return(PoisSaddleGeyer(beta, fi))
+  if(identical(inte$name, "Area-interaction process"))
+    return(PoisSaddleArea(beta, fi))
+  stop(paste("Intensity approximation is not yet available for",
+             inte$name), call.=FALSE)
+}
+
+PoisSaddlePairwise <- function(beta, fi) {
+  inte <- as.interact(fi)
+  Mayer <- inte$Mayer
+  if(is.null(Mayer))
+    stop(paste("Sorry, not yet implemented for", inte$name))
+  # interaction coefficients
+  co <- with(fi, coefs[Vnames[!IsOffset]])
+  # compute second Mayer cluster integral
+  G <- Mayer(co, inte)
+  if(is.null(G) || !is.finite(G)) 
+    stop("Internal error in computing Mayer cluster integral")
+  if(G < 0)
+    stop(paste("Unable to apply Poisson-saddlepoint approximation:",
+               "Mayer cluster integral is negative"))
+  ## solve
+  if(is.im(beta)) {
+    lambda <- if(G == 0) beta else eval.im(LambertW(G * beta)/G)
+  } else {
+    lambda <- if(G == 0) beta else (LambertW(G * beta)/G)
+    if(length(lambda) == 1) lambda <- unname(lambda)
+  }
+  return(lambda)
+}
+
+
+# Lambert's W-function
+
+LambertW <- local({
+
+  yexpyminusx <- function(y,x){y*exp(y)-x}
+
+  W <- function(x) {
+    result <- rep.int(NA_real_, length(x))
+    ok <- is.finite(x) & (x >= 0)
+    if(requireNamespace("gsl", quietly=TRUE)) {
+      result[ok] <- gsl::lambert_W0(x[ok])
+    } else {
+      for(i in which(ok))
+        result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root
+    }
+    return(result)
+  }
+
+  W
+})
+
+PoisSaddleGeyer <- local({
+
+  PoisSaddleGeyer <- function(beta, fi) {
+    gamma <- summary(fi)$sensible$param$gamma
+    if(gamma == 1) return(beta)
+    inte <- as.interact(fi)
+    sat <- inte$par$sat
+    R   <- inte$par$r
+    #' get probability distribution of Geyer statistic under reference model
+    z <- Spatstat.Geyer.Nulldist # from sysdata
+    if(is.na(m <- match(sat, z$sat)))
+      stop(paste("Sorry, the Poisson-saddlepoint approximation",
+                 "is not implemented for Geyer models with sat =", sat),
+           call.=FALSE)
+    probmatrix <- z$prob[[m]]
+    maxachievable <- max(which(colSums(probmatrix) > 0)) - 1
+    gammarange <- sort(c(1, gamma^maxachievable))
+    #' apply approximation
+    betavalues <- beta[]
+    nvalues <- length(betavalues)
+    lambdavalues <- numeric(nvalues)
+    for(i in seq_len(nvalues)) {
+      beta.i <- betavalues[i]
+      ra <- beta.i * gammarange
+      lambdavalues[i] <- uniroot(diffapproxGeyer, ra, beta=beta.i,
+                                 gamma=gamma, R=R, sat=sat,
+                                 probmatrix=probmatrix)$root
+    }
+    #' return result in same format as 'beta'
+    lambda <- beta
+    lambda[] <- lambdavalues
+    if(length(lambda) == 1) lambda <- unname(lambda)
+    return(lambda)
+  }
+
+  diffapproxGeyer <- function(lambda, beta, gamma, R, sat, probmatrix) {
+    lambda - approxEpoisGeyerT(lambda, beta, gamma, R, sat, probmatrix)
+  }
+  approxEpoisGeyerT <- function(lambda, beta=1, gamma=1, R=1, sat=1,
+                                probmatrix) {
+    #' Compute approximation to E_Pois(lambda) Lambda(0,X) for Geyer
+    #' ('probmatrix' contains distribution of geyerT(0, Z_n) for each n,
+    #' where 'sat' is given, and Z_n is runifdisc(n, radius=2*R).
+    possT <- 0:(ncol(probmatrix)-1)
+    possN <- 0:(nrow(probmatrix)-1)
+    pN <- dpois(possN, lambda * pi * (2*R)^2)
+    EgamT <- pN %*% probmatrix %*% (gamma^possT)
+    #' assume that, for n > max(possN),
+    #' distribution of T is concentrated on T=sat
+    EgamT <- EgamT + (gamma^sat) * (1-sum(pN))
+    return(beta * EgamT)
+  }
+
+  PoisSaddleGeyer
+})
+
+PoisSaddleArea <- local({
+
+  PoisSaddleArea <- function(beta, fi) {
+    eta <- summary(fi)$sensible$param$eta
+    if(eta == 1) return(beta)
+    etarange <- range(c(eta^2, 1.1, 0.9))
+    inte <- as.interact(fi)
+    R   <- inte$par$r
+    #' reference distribution of canonical sufficient statistic
+    zeroprob <- Spatstat.Area.Zeroprob
+    areaquant <- Spatstat.Area.Quantiles
+    # expectation of eta^A_n for each n = 0, 1, ....
+    EetaAn <- c(1/eta,
+                zeroprob + (1-zeroprob) * colMeans((eta^(-areaquant))))
+    #' compute approximation
+    betavalues <- beta[]
+    nvalues <- length(betavalues)
+    lambdavalues <- numeric(nvalues)
+    for(i in seq_len(nvalues)) {
+      beta.i <- betavalues[i]
+      ra <- beta.i * etarange
+      lambdavalues[i] <- uniroot(diffapproxArea, ra, beta=beta.i,
+                                 eta=eta, r=R,
+                                 EetaAn=EetaAn)$root
+    }
+    #' return result in same format as 'beta'
+    lambda <- beta
+    lambda[] <- lambdavalues
+    if(length(lambda) == 1) lambda <- unname(lambda)
+    return(lambda)
+  }
+
+  diffapproxArea <- function(lambda, beta, eta, r, EetaAn) {
+    lambda - approxEpoisArea(lambda, beta, eta, r, EetaAn)
+  }
+
+  approxEpoisArea <- function(lambda, beta=1, eta=1, r=1, EetaAn) {
+    #' Compute approximation to E_Pois(lambda) Lambda(0,X) for AreaInter
+    mu <- lambda * pi * (2*r)^2
+    zeta <- pi^2/2 - 1
+    theta <-  -log(eta)
+    zetatheta <- zeta * theta
+
+    #' contribution from tabulated values
+    Nmax <- length(EetaAn) - 1L
+    possN <- 0:Nmax
+    qN <- dpois(possN, mu)
+    # expectation of eta^A when N ~ poisson (truncated)
+    EetaA <- sum(qN * EetaAn)
+
+    #' asymptotics for quite large n
+    Nbig <- qpois(0.999, mu)
+    qn <- 0
+    if(Nbig > Nmax) {
+      n <- (Nmax+1):Nbig
+      #' asymptotic mean uncovered area conditional on this being positive
+      mstarn <- (16/((n+3)^2)) * exp(n * (1/4 - log(4/3)))
+      ztm <- zetatheta * mstarn
+      ok <- (ztm < 1)
+      if(!any(ok)) {
+        Nbig <- Nmax
+        qn <- 0
+      } else {
+        if(!all(ok)) {
+          Nbig <- max(which(!ok)) - 1
+          n <- (Nmax+1):Nbig
+          ztm <- ztm[1:((Nbig-Nmax)+1)]
+        }
+        qn <- dpois(n, mu)
+        #' asymptotic  probability of complete coverage
+        pstarn <- 1 - pmin(1, 3 * (1 + n^2/(16*pi)) * exp(-n/4))
+        Estarn <- (1 - ztm)^(-1/zeta)
+        EetaA <- EetaA + sum(qn * (pstarn + (1-pstarn) * Estarn))
+      }
+    }
+    #' for very large n, assume complete coverage, so A = 0
+    EetaA <- EetaA + 1 - sum(qN) - sum(qn)
+    return(beta * eta * EetaA)
+  }
+
+  PoisSaddleArea
+
+})
diff --git a/R/interact.R b/R/interact.R
new file mode 100755
index 0000000..e32321d
--- /dev/null
+++ b/R/interact.R
@@ -0,0 +1,332 @@
+#
+#	interact.S
+#
+#
+#	$Revision: 1.28 $	$Date: 2015/10/21 09:06:57 $
+#
+#	Class 'interact' representing the interpoint interaction
+#               of a point process model
+#              (e.g. Strauss process with a given threshold r)
+#
+#       Class 'isf' representing a generic interaction structure
+#              (e.g. pairwise interactions)
+#
+#	These do NOT specify the "trend" part of the model,
+#	only the "interaction" component.
+#
+#               The analogy is:
+#
+#                       glm()             ppm()
+#
+#                       model formula     trend formula
+#
+#                       family            interaction
+#
+#               That is, the 'systematic' trend part of a point process
+#               model is specified by a 'trend' formula argument to ppm(),
+#               and the interpoint interaction is specified as an 'interact'
+#               object.
+#
+#       You only need to know about these classes if you want to
+#       implement a new point process model.
+#
+#       THE DISTINCTION:
+#       An object of class 'isf' describes an interaction structure
+#       e.g. pairwise interaction, triple interaction,
+#       pairwise-with-saturation, Dirichlet interaction.
+#       Think of it as determining the "order" of interaction
+#       but not the specific interaction potential function.
+#
+#       An object of class 'interact' completely defines the interpoint
+#       interactions in a specific point process model, except for the
+#       regular parameters of the interaction, which are to be estimated
+#       by ppm() or otherwise. An 'interact' object specifies the values
+#       of all the 'nuisance' or 'irregular' parameters. An example
+#       is the Strauss process with a given, fixed threshold r
+#       but with the parameters beta and gamma undetermined.
+#
+#       DETAILS:
+#
+#       An object of class 'isf' contains the following:
+#
+#	     $name               Name of the interaction structure         
+#                                        e.g. "pairwise"
+#
+#	     $print		 How to 'print()' this object
+#				 [A function; invoked by the 'print' method
+#                                 'print.isf()']
+#
+#            $eval               A function which evaluates the canonical
+#                                sufficient statistic for an interaction
+#                                of this general class (e.g. any pairwise
+#                                interaction.)
+#
+#       If lambda(u,X) denotes the conditional intensity at a point u
+#       for the point pattern X, then we assume
+#                  log lambda(u, X) = theta . S(u,X)
+#       where theta is the vector of regular parameters,
+#       and we call S(u,X) the sufficient statistic.
+#
+#       A typical calling sequence for the $eval function is
+#
+#            (f$eval)(X, U, E, potentials, potargs, correction)
+#
+#       where X is the data point pattern, U is the list of points u
+#       at which the sufficient statistic S(u,X) is to be evaluated,
+#       E is a logical matrix equivalent to (X[i] == U[j]),
+#       $potentials defines the specific potential function(s) and
+#       $potargs contains any nuisance/irregular parameters of these
+#       potentials [the $potargs are passed to the $potentials without
+#       needing to be understood by $eval.]
+#       $correction is the name of the edge correction method.
+#
+#
+#       An object of class 'interact' contains the following:
+#
+#
+#            $name               Name of the specific potential
+#                                        e.g. "Strauss"
+#
+#            $family              Object of class "isf" describing
+#                                the interaction structure
+#
+#            $pot	         The interaction potential function(s)
+#                                -- usually a function or list of functions.
+#                                (passed as an argument to $family$eval)
+#
+#            $par                list of any nuisance/irregular parameters
+#                                (passed as an argument to $family$eval)
+#
+#            $parnames           vector of long names/descriptions
+#                                of the parameters in 'par'
+#
+#            $init()             initialisation action
+#                                or NULL indicating none required
+#
+#            $update()           A function to modify $par
+#                                [Invoked by 'update.interact()']
+#                                or NULL indicating a default action
+#
+#	     $print		 How to 'print()' this object
+#				 [Invoked by 'print' method 'print.interact()']
+#                                or NULL indicating a default action
+#
+# --------------------------------------------------------------------------
+
+print.isf <- function(x, ...) {
+  if(is.null(x)) return(invisible(NULL))
+  verifyclass(x, "isf")
+  if(!is.null(x$print))
+    (x$print)(x)
+  invisible(NULL)
+}
+
+print.interact <- function(x, ..., family, brief=FALSE, banner=TRUE) {
+  verifyclass(x, "interact")
+  if(missing(family)) family <- waxlyrical('extras')
+  #' Print name of model
+  if(banner) {
+    if(family && !brief && !is.null(xf <- x$family))
+      print.isf(xf)
+    splat(if(!brief) "Interaction:" else NULL, x$name, sep="")
+  }
+  # Now print the parameters
+  if(!is.null(x$print)) {
+     (x$print)(x)
+  } else {
+    # default
+    # just print the parameter names and their values
+    pwords <- x$parnames
+    parval <- x$par
+    pwords <- paste(toupper(substring(pwords, 1, 1)),
+                    substring(pwords, 2), sep="")
+    isnum <- sapply(parval, is.numeric)
+    parval[isnum] <- lapply(parval[isnum], signif,
+                            digits=getOption("digits"))
+    splat(paste(paste0(pwords, ":\t", parval), collapse="\n"))
+  }
+  invisible(NULL)
+}
+
+is.interact <- function(x) { inherits(x, "interact") }
+
+update.interact <- function(object, ...) {
+  verifyclass(object, "interact")
+  if(!is.null(object$update))
+    (object$update)(object, ...)
+  else {
+    # Default
+    # First update the version
+    if(outdated.interact(object))
+      object <- reincarnate.interact(object)
+    # just match the arguments in "..."
+    # with those in object$par and update them
+    want <- list(...)
+    if(length(want) > 0) {
+      m <- match(names(want),names(object$par))
+      nbg <- is.na(m)
+      if(any(nbg)) {
+        which <- paste((names(want))[nbg])
+        warning(paste("Arguments not matched: ", which))
+      }
+      m <- m[!nbg]
+      object$par[m] <- want
+    }
+    # call object's own initialisation routine
+    if(!is.null(object$init))
+      (object$init)(object)
+    object
+  }    
+}
+
+  
+is.poisson.interact <- function(x) {
+  verifyclass(x, "interact")
+  is.null(x$family)
+}
+
+parameters.interact <- function(model, ...) {
+  model$par
+}
+
+# Test whether interact object was made by an older version of spatstat
+
+outdated.interact <- function(object) {
+  ver <- object$version
+  older <- is.null(ver) || (package_version(ver) < versionstring.spatstat())
+  return(older)
+}
+
+
+# Test whether the functions in the interaction object
+# expect the coefficient vector to contain ALL coefficients,
+# or only the interaction coefficients.
+# This change was introduced in 1.11-0, at the same time
+# as interact objects were given version numbers.
+
+newstyle.coeff.handling <- function(object) {
+  stopifnot(inherits(object, "interact"))  
+  ver <- object$version
+  old <- is.null(ver) || (package_version(ver) < "1.11")
+  return(!old)
+}
+
+# ######
+#
+# Re-create an interact object in the current version of spatstat
+#
+# 
+
+reincarnate.interact <- function(object) {
+  # re-creates an interact object in the current version of spatstat
+
+  if(!is.null(object$update)) {
+    newobject <- (object$update)(object)
+    return(newobject)
+  }
+  
+  par <- object$par
+#  pot <- object$pot
+  name <- object$name
+  
+  # get creator function
+  creator <- object$creator
+  if(is.null(creator)) {
+    # old version: look up list
+    creator <- .Spatstat.Old.InteractionList[[name]]
+    if(is.null(creator))
+      stop(paste("Don't know how to update", sQuote(name),
+                 "to current version of spatstat"))
+  }
+  if(is.character(creator))
+    creator <- get(creator)
+  if(!is.function(creator) && !is.expression(creator))
+    stop("Internal error: creator is not a function or expression")
+
+  # call creator
+  if(is.expression(creator)) 
+    newobject <- eval(creator)
+  else {
+    # creator is a function
+  
+    # It's assumed that the creator function's arguments are
+    # either identical to components of 'par' (the usual case)
+    # or to one of the components of the object itself (Ord, Saturated)
+    # or to printfun=object$print (Pairwise).
+  
+    argnames <- names(formals(creator))
+    available <- append(par, object)
+    available <- append(available, list(printfun=object$print))
+    ok <- argnames %in% names(available)
+    if(!all(ok))
+      stop(paste("Internal error:",
+                 ngettext(sum(!ok), "argument", "arguments"),
+                 paste(sQuote(argnames[!ok]), collapse=", "),
+                 "in creator function were not understood"))
+    newobject <- do.call(creator, available[argnames])
+  }
+  
+  if(!inherits(newobject, "interact"))
+    stop("Internal error: creator did not return an object of class interact")
+
+  return(newobject)
+}
+
+
+# This list is necessary to deal with older formats of 'interact' objects
+# which did not include the creator name
+
+.Spatstat.Old.InteractionList <-
+  list("Diggle-Gratton process"    = "DiggleGratton",
+       "Geyer saturation process"  = "Geyer",
+       "Lennard-Jones potential"   = "LennardJones",
+       "Multitype Strauss process" = "MultiStrauss",
+       "Multitype Strauss Hardcore process" = "MultiStraussHard",
+       "Ord process with threshold potential"="OrdThresh",
+       "Piecewise constant pairwise interaction process"="PairPiece",
+       "Poisson process"           = "Poisson",
+       "Strauss process"           = "Strauss",
+       "Strauss - hard core process" = "StraussHard",
+       "Soft core process" = "Softcore",
+       # weird ones:
+       "Ord process with user-defined potential" = expression(Ord(object$pot)),
+       "Saturated process with user-defined potential"
+          =expression(Saturated(object$pot)),
+       "user-defined pairwise interaction process"=
+       expression(
+           Pairwise(object$pot,
+                    par=object$par,
+                    parnames=object$parnames,
+                    printfun=object$print))
+           
+     )
+       
+as.interact <- function(object) {
+  UseMethod("as.interact")
+}
+
+as.interact.interact <- function(object) {
+  verifyclass(object, "interact")
+  return(object)
+}
+
+interactionfamilyname <- function(x) {
+  if(inherits(x, "isf")) return(x$name)
+  x <- as.interact(x)
+  return(x$family$name)
+}
+                                      
+#### internal code for streamlining initialisation of interactions
+#
+#    x should be a partially-completed 'interact' object
+#
+
+instantiate.interact <- function(x, par) {
+  if(is.character(x$family)) x$family <- get(x$family)
+  # set parameter values
+  x$par    <- par
+  # validate parameters
+  x$init(x)
+  x$version <- versionstring.spatstat()
+  return(x)
+}
diff --git a/R/interactions.R b/R/interactions.R
new file mode 100755
index 0000000..37907d8
--- /dev/null
+++ b/R/interactions.R
@@ -0,0 +1,246 @@
+#
+# interactions.R
+#
+# Works out which interaction is in force for a given point pattern
+#
+#  $Revision: 1.25 $  $Date: 2016/04/25 02:34:40 $
+#
+#
+impliedpresence <- function(tags, formula, df, extranames=character(0)) {
+  # Determines, for each row of the data frame df,
+  # whether the variable called tags[j] is required in the formula
+  stopifnot(is.data.frame(df))
+  stopifnot(inherits(formula, "formula"))
+  stopifnot(is.character(tags))
+  stopifnot(is.character(extranames))
+#  allvars <- variablesinformula(formula)
+  if(any(tags %in% names(df)))
+    stop(paste(sQuote("tags"),
+               "conflicts with the name of a column of",
+               sQuote("df")))
+  if(any(extranames %in% names(df)))
+    stop(paste(sQuote("extranames"),
+               "conflicts with the name of a column of",
+               sQuote("df")))
+  # answer is a matrix 
+  nvars <- length(tags)
+  nrows <- nrow(df)
+  answer <- matrix(TRUE, nrows, nvars)
+  # expand data frame with zeroes for each tags and extranames
+  for(v in unique(c(tags, extranames)))
+    df[ , v] <- 0
+  # loop
+  for(i in seq(nrow(df))) {
+    # make a fake data frame for the formula
+    # using the data frame entries from row i
+    # (includes 0 values for all other variables)
+    pseudat <- df[i, , drop=FALSE]
+    # use this to construct a fake model matrix
+    mof0 <- model.frame(formula, pseudat)
+    mom0 <- model.matrix(formula, mof0)
+    for(j in seq(nvars)) {
+      # Reset the variable called tags[j] to 1
+      pseudatj <- pseudat
+      pseudatj[ , tags[j]] <- 1
+      # Now create the fake model matrix
+      mofj <- model.frame(formula, pseudatj)
+      momj <- model.matrix(formula, mofj)
+      # Compare the two matrices
+      answer[i,j] <- any(momj != mom0)
+    }
+  }
+  return(answer)
+}
+
+active.interactions <- function(object) {
+  stopifnot(inherits(object, "mppm"))
+  interaction <- object$Inter$interaction
+  iformula <- object$iformula
+  nenv <- new.env()
+  environment(iformula) <- nenv 
+#%^!ifdef RANDOMEFFECTS
+  random <- object$random
+  if(!is.null(random))
+    environment(random) <- nenv
+#%^!endif  
+
+  itags    <- object$Inter$itags
+# The following are currently unused  
+#  ninter   <- object$Inter$ninter
+#  iused    <- object$Inter$iused
+#  trivial  <- object$Inter$trivial
+
+  # names of variables
+  dat <- object$data
+  datanames <- names(dat)
+  dfnames <- summary(dat)$dfnames
+  nondfnames <- datanames[!(datanames %in% dfnames)]
+  nondfnames <- union(nondfnames, c("x", "y"))
+  
+  # extract data-frame values
+  dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE)
+  
+  # determine which interaction(s) are in force 
+  answer <- impliedpresence(itags, iformula, dfdata, nondfnames)
+#%^!ifdef RANDOMEFFECTS  
+  if(!is.null(random)) {
+    if("|" %in% all.names(random)) {
+      ## hack since model.matrix doesn't handle "|" as desired
+      rnd <- gsub("|", "/", pasteFormula(random), fixed=TRUE)
+      random <- as.formula(rnd, env=environment(random))
+    }
+    answer2 <- impliedpresence(itags, random, dfdata, nondfnames)
+    answer <- answer | answer2
+  }
+#%^!endif  
+  colnames(answer) <- names(interaction)
+  return(answer)
+}
+
+impliedcoefficients <- function(object, tag) {
+  stopifnot(inherits(object, "mppm"))
+  stopifnot(is.character(tag) && length(tag) == 1)
+  fitobj      <- object$Fit$FIT
+  Vnamelist   <- object$Fit$Vnamelist
+  has.random  <- object$Info$has.random
+# Not currently used:  
+#  fitter      <- object$Fit$fitter
+#  interaction <- object$Inter$interaction
+#  ninteract   <- object$Inter$ninteract
+#  trivial     <- object$Inter$trivial
+#  iused       <- object$Inter$iused
+  itags       <- object$Inter$itags
+  if(!(tag %in% itags))
+    stop(paste("Argument", dQuote("tag"),
+               "is not one of the interaction names"))
+  # (0) Set up
+  # Identify the columns of the glm data frame
+  # that are associated with this interpoint interaction 
+  vnames <- Vnamelist[[tag]]
+  if(!is.character(vnames))
+    stop("Internal error - wrong format for vnames")
+  # Check atomic type of each covariate
+  Moadf <- as.list(object$Fit$moadf)
+  islog <- sapply(Moadf, is.logical)
+  isnum <- sapply(Moadf, is.numeric)
+  isfac <- sapply(Moadf, is.factor)
+  # Interaction variables must be numeric or logical
+  if(any(bad <- !(isnum | islog)[vnames]))
+    stop(paste("Internal error: the",
+               ngettext(sum(bad), "variable", "variables"),
+               commasep(sQuote(vnames[bad])),
+               "should be numeric or logical"),
+         call.=FALSE)
+  # The answer is a matrix of coefficients,
+  # with one row for each point pattern,
+  # and one column for each vname
+  answer <- matrix(, nrow=object$npat, ncol=length(vnames))
+  colnames(answer) <- vnames
+  
+  # (1) make a data frame of covariates
+  # Names of all columns in glm data frame
+  allnames <- names(Moadf)
+  # Extract the design covariates
+  df <- as.data.frame(object$data, warn=FALSE)
+  # Names of all covariates other than design covariates
+  othernames <- allnames[!(allnames %in% names(df))]
+  # Add columns in which all other covariates are set to 0, FALSE, etc
+  for(v in othernames) {
+    df[, v] <- if(isnum[[v]]) 0 else
+               if(islog[[v]]) FALSE else
+               if(isfac[[v]]) {
+                 lev <- levels(Moadf[[v]])
+                 factor(lev[1], levels=lev)
+               } else sort(unique(Moadf[[v]]))[1]
+  }
+  # (2) evaluate linear predictor
+  Coefs <- if(!has.random) coef(fitobj) else fixef(fitobj)
+  opt <- options(warn= -1)
+#  eta0 <- predict(fitobj, newdata=df, type="link")
+  eta0 <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link")
+  options(opt)
+  
+  # (3) for each vname in turn,
+  # set the value of the vname to 1 and predict again
+  for(j in seq_along(vnames)) {
+    vnj <- vnames[j]
+    df[[vnj]] <- 1
+    opt <- options(warn= -1)
+#    etaj <- predict(fitobj, newdata=df, type="link")
+    etaj <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link")
+    options(opt)
+    answer[ ,j] <- etaj - eta0
+    # set the value of this vname back to 0
+    df[[vnj]] <- 0
+  }
+  return(answer)
+}
+
+
+
+illegal.iformula <- local({
+
+  illegal.iformula <- function(ifmla, itags, dfvarnames) {
+    ## THIS IS TOO STRINGENT!
+    ## Check validity of the interaction formula.
+    ##  ifmla is the formula.
+    ##  itags is the character vector of interaction names.
+    ## Check whether the occurrences of `itags' in `iformula' are valid:
+    ## e.g. no functions applied to `itags[i]'.
+    ## Returns NULL if legal, otherwise a character string 
+    stopifnot(inherits(ifmla, "formula"))
+    stopifnot(is.character(itags))
+    ## formula must not have a LHS
+    if(length(ifmla) > 2)
+      return("iformula must not have a left-hand side")
+    ## variables in formula must be interaction tags or data frame variables
+    varsinf <- variablesinformula(ifmla)
+    if(!all(ok <- varsinf %in% c(itags, dfvarnames))) 
+      return(paste(
+                   ngettext(sum(!ok), "variable", "variables"),
+                   paste(dQuote(varsinf[!ok]), collapse=", "),
+                   "not allowed in iformula"))
+    ## if formula uses no interaction tags, it's trivial
+    if(!any(itags %in% variablesinformula(ifmla)))
+      return(NULL)
+    ## create terms object
+    tt <- attributes(terms(ifmla))
+    ## extract all variables appearing in the formula
+    vars <- as.list(tt$variables)[-1]
+    ##  nvars <- length(vars)
+    varexprs <- lapply(vars, as.expression)
+    varstrings <- sapply(varexprs, paste)
+    ## Each variable may be a name or an expression
+    v.is.name <- sapply(vars, is.name)
+    ## a term may be an expression like sin(x), poly(x,y,degree=2)
+    v.args <- lapply(varexprs, all.vars)
+    ##  v.n.args <- sapply(v.args, length)
+    v.has.itag <- sapply(lapply(v.args, "%in%", x=itags), any)
+    ## interaction tags may only appear as names, not in functions
+    if(any(nbg <- v.has.itag & !v.is.name))
+      return(paste("interaction tags may not appear inside a function:",
+                   paste(dQuote(varstrings[nbg]), collapse=", ")))
+    ## Interaction between two itags is not defined
+    ## Inspect the higher-order terms
+    fax <- tt$factors
+    if(prod(dim(fax)) == 0)
+      return(NULL)
+    ## rows are first order terms, columns are terms of order >= 1
+    fvars <- rownames(fax)
+    fterms <- colnames(fax)
+    fv.args <- lapply(fvars, variablesintext)
+    ft.args <- lapply(fterms, variables.in.term, 
+                      factors=fax, varnamelist=fv.args)
+    ft.itags <- lapply(ft.args, intersect, y=itags)
+    if(any(lengths(ft.itags) > 1))
+      return("Interaction between itags is not defined")
+    return(NULL)
+  }
+
+  variables.in.term <- function(term, factors, varnamelist) {
+    basis <- (factors[, term] != 0)
+    unlist(varnamelist[basis])
+  }
+  
+  illegal.iformula
+})
diff --git a/R/interp.im.R b/R/interp.im.R
new file mode 100755
index 0000000..31b4b91
--- /dev/null
+++ b/R/interp.im.R
@@ -0,0 +1,61 @@
+#
+# interp.im.R
+#
+#  $Revision: 1.4 $  $Date: 2017/02/07 07:47:20 $
+#
+
+interp.im <- local({
+
+  lukimyu <- function(ccc, rrr, mat, defaults) {
+    dimm <- dim(mat)
+    within <- (rrr >= 1 & rrr <= dimm[1L] & ccc >= 1 & ccc <= dimm[2L])
+    result <- defaults
+    result[within] <- mat[cbind(rrr[within], ccc[within])]
+    result
+  }
+
+  interp.im <- function(Z, x, y=NULL) {
+    stopifnot(is.im(Z))
+    if(!is.null(levels(Z)))
+      stop("Interpolation is undefined for factor-valued images")
+    xy <- xy.coords(x, y)
+    x <- xy$x
+    y <- xy$y
+    ok <- inside.owin(x,y, as.owin(Z))
+    ## get default lookup values (for boundary cases)
+    fallback <- Z[ppp(x[ok], y[ok], window=as.rectangle(Z), check=FALSE)]
+    ## Transform to grid coordinates
+    ## so that pixel centres are at integer points,
+    ## bottom left of image is (0,0)
+    xx <- (x[ok] - Z$xcol[1L])/Z$xstep
+    yy <- (y[ok] - Z$yrow[1L])/Z$ystep
+    ## find grid point to left and below
+    ## (may transgress boundary)
+    xlower <- floor(xx)
+    ylower <- floor(yy)
+    cc <- as.integer(xlower) + 1L
+    rr <- as.integer(ylower) + 1L
+    ## determine whether (x,y) is above or below antidiagonal in square
+    dx <- xx - xlower
+    dy <- yy - ylower
+    below <- (dx + dy <= 1)
+    ## if below, interpolate Z(x,y) = (1-x-y)Z(0,0) + xZ(1,0) + yZ(0,1)
+    ## if above, interpolate Z(x,y) = (x+y-1)Z(1,1) + (1-x)Z(0,1) + (1-y)Z(1,0)
+    V <- Z$v
+    values <- ifelse(below,
+                     ( (1-dx-dy)*lukimyu(cc,rr,V,fallback)
+                      + dx*lukimyu(cc+1,rr,V,fallback)
+                      + dy*lukimyu(cc,rr+1,V,fallback)
+                      ),
+                     ( (dx+dy-1)*lukimyu(cc+1,rr+1,V,fallback)
+                      + (1-dx)*lukimyu(cc,rr+1,V,fallback)
+                      + (1-dy)*lukimyu(cc+1,rr,V,fallback)
+                      ))
+    result <- numeric(length(x))
+    result[ok] <- values
+    result[!ok] <- NA
+    return(result)
+  }
+
+  interp.im
+})
diff --git a/R/iplot.R b/R/iplot.R
new file mode 100755
index 0000000..4eb4e1c
--- /dev/null
+++ b/R/iplot.R
@@ -0,0 +1,347 @@
+#
+# interactive plot for ppp objects using rpanel
+#
+#   $Revision: 1.23 $   $Date: 2017/02/07 07:47:20 $
+#
+#
+
+# Effect:
+# when the user types
+#                 iplot(x)
+# a pop-up panel displays a standard plot of x and
+# buttons allowing control of the plot parameters.
+
+# Coding:
+# The panel 'p' contains the following internal variables
+#      x          Original point pattern
+#      w          Window of point pattern
+#      xname      Name of x (for main title)
+#      mtype      Type of marks of x
+#      bb         frame of x 
+#      bbmid      midpoint of frame
+# The following variables in 'p' are controlled by panel buttons etc
+#      split      Logical: whether to split multitype pattern
+#      pointmap   Plot character, or "marks" indicating that marks are used
+#      zoomfactor Zoom factor 
+#      zoomcentre Centre point for zoom
+#      charsize   Character expansion factor cex
+#      markscale  Mark scale factor markscale
+#      
+
+iplot <- function(x, ...) {
+  UseMethod("iplot")
+}
+
+iplot.ppp <- local({
+
+iplot.ppp <- function(x, ..., xname) {
+  if(missing(xname))
+    xname <- short.deparse(substitute(x))
+  verifyclass(x, "ppp")
+  
+  if(markformat(x) %in% c("hyperframe", "list"))
+    marks(x) <- as.data.frame(as.hyperframe(marks(x)))
+  if(markformat(x) == "dataframe" && ncol(marks(x)) > 1) {
+    warning("Using only the first column of marks")
+    marks(x) <- marks(x)[,1L]
+  }
+  mtype <- if(is.multitype(x)) "multitype" else if(is.marked(x)) "marked" else "unmarked"
+
+  bb <- as.rectangle(as.owin(x))
+  bbmid <- unlist(centroid.owin(bb))
+  ##
+  kraever("rpanel")
+  ##
+  p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), 
+                          x=x,
+                          w=as.owin(x),
+                          xname=xname,
+                          mtype=mtype,
+                          bb=bb,
+                          bbmid=bbmid,
+                          split=FALSE,
+                          pointmap=if(is.marked(x)) "marks" else "o",
+                          zoomfactor=1,
+                          zoomcentre=bbmid,
+                          size=c(700, 400))
+
+# Split panel into three
+# Left: plot controls
+# Middle: data
+# Right: navigation/zoom
+  rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0))
+  rpanel::rp.grid(p, "gdisplay",  pos=list(row=0,column=1))
+  rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2))
+
+#----- Data display ------------
+
+  # This line is to placate the package checker
+  mytkr <- NULL
+
+  # Create data display panel 
+  rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.ppp, action=click.iplot.ppp,
+                     pos=list(row=0,column=0,grid="gdisplay"))
+
+  
+#----- Plot controls ------------
+  nextrow <- 0
+  pozzie <- function(n=nextrow, ...)
+    append(list(row=n,column=0,grid="gcontrols"), list(...))
+  
+# main title
+  rpanel::rp.textentry(p, xname, action=redraw.iplot.ppp, title="Plot title",
+                       pos=pozzie(0))
+  nextrow <- 1
+
+# split ?
+  if(mtype == "multitype") {
+    rpanel::rp.checkbox(p, split, initval=FALSE, 
+                        title="Split according to marks",
+                        action=redraw.iplot.ppp,
+                        pos=pozzie(1))
+    nextrow <- 2
+  }
+
+# plot character or mark style
+  ptvalues <- c("o", "bullet", "plus")
+  ptlabels <- c("open circles", "filled circles", "crosshairs")
+  if(is.marked(x)) {
+    ptvalues <- c("marks", ptvalues)
+    ptlabels <- if(mtype == "multitype")
+      c("Symbols depending on mark", ptlabels)
+    else c("Circles proportional to mark", ptlabels)
+  }
+  pointmap <- ptvalues[1L]
+  rpanel::rp.radiogroup(p, pointmap, vals=ptvalues, labels=ptlabels,
+                        title="how to plot points", action=redraw.iplot.ppp,
+                        pos=pozzie(nextrow))
+  nextrow <- nextrow+1
+
+# plot character size
+  charsize <- 1
+  rpanel::rp.slider(p, charsize, 0, 5, action=redraw.iplot.ppp, 
+                    title="symbol expansion factor (cex)",
+                    initval=1, showvalue=TRUE,
+                    pos=pozzie(nextrow, sticky=""))
+  nextrow <- nextrow+1
+  
+# mark scale
+  if(mtype == "marked") {
+    marx <- x$marks
+    marx <- marx[is.finite(marx)]
+    scal <- mark.scale.default(marx, x$window)
+    markscale <- scal
+    rpanel::rp.slider(p, markscale,
+                      from=scal/10, to = 10*scal,
+                      action=redraw.iplot.ppp,
+                      initval=scal,
+                      title="mark scale factor (markscale)",
+                      showvalue=TRUE,
+                      pos=pozzie(nextrow))
+    nextrow <- nextrow+1
+  }
+
+# button to print a summary at console
+  rpanel::rp.button(p, title="Print summary information",
+                    pos=pozzie(nextrow),
+                    action=function(panel) { print(summary(panel$x)); panel} )
+#  
+#----- Navigation controls ------------
+  nextrow <- 0
+  navpos <- function(n=nextrow,cc=0, ...)
+    append(list(row=n,column=cc,grid="gnavigate"), list(...))
+
+  rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        height <- sidelengths(bb)[2L]
+                        stepsize <- (height/4)/zo
+                        panel$zoomcentre <- ce + c(0, stepsize)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        width <- sidelengths(bb)[1L]
+                        stepsize <- (width/4)/zo
+                        panel$zoomcentre <- ce - c(stepsize, 0)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        width <- sidelengths(bb)[1L]
+                        stepsize <- (width/4)/zo
+                        panel$zoomcentre <- ce + c(stepsize, 0)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        height <- sidelengths(bb)[2L]
+                        stepsize <- (height/4)/zo
+                        panel$zoomcentre <- ce - c(0, stepsize)
+                        CommitAndRedraw(panel)
+                        return(panel)
+            })
+  nextrow <- nextrow + 1
+
+  rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- panel$zoomfactor * 2
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- panel$zoomfactor / 2
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- 1
+                        panel$zoomcentre <- panel$bbmid
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""),
+                    action=redraw.iplot.ppp)
+  nextrow <- nextrow+1
+# quit button 
+  rpanel::rp.button(p, title="Quit", quitbutton=TRUE,
+                    pos=navpos(nextrow, 1, sticky=""),
+                    action= function(panel) { panel })
+
+  invisible(NULL)
+}
+
+
+  # Function to redraw the whole shebang
+  redraw.iplot.ppp <- function(panel) {
+    rpanel::rp.tkrreplot(panel, mytkr)
+    panel
+  }
+
+
+# Function executed when data display is clicked
+
+  click.iplot.ppp <- function(panel, x, y) {
+    if(panel$split) {
+      cat("Mouse interaction is not supported when the point pattern is split\n")
+    } else {
+      panel$zoomcentre <- panel$zoomcentre +
+        (c(x,y) - panel$bbmid)/panel$zoomfactor
+      CommitAndRedraw(panel)
+    }
+    return(panel)
+  }
+
+# function that updates the plot when the control panel is operated
+
+do.iplot.ppp <- function(panel) { 
+  use.marks <- TRUE
+  pch <- 16
+  switch(panel$pointmap,
+         marks={
+           use.marks <- TRUE
+           pch <- NULL
+         }, 
+         o = {
+           use.marks <- FALSE
+           pch <- 1
+         }, 
+         bullet = {
+           use.marks <- FALSE
+           pch <- 16
+         },
+         plus = {
+           use.marks <- FALSE
+           pch <- 3
+         })
+  # scale and clip the pattern
+  x <- panel$x
+  w     <- panel$w
+  z     <- panel$zoomfactor
+  if(is.null(z)) z <- 1
+  ce    <- panel$zoomcentre
+  bb    <- panel$bb
+  bbmid <- panel$bbmid
+  scalex <- shift(scalardilate(shift(x, -ce), z), bbmid)
+  scalew <- shift(scalardilate(shift(w, -ce), z), bbmid)
+  scalex <- scalex[, bb]
+  scalew <- intersect.owin(scalew, bb, fatal=FALSE)
+  # determine what is plotted under the clipped pattern
+  blankargs <- list(type="n")
+  dashargs  <- list(lty=3, border="red")
+  panel.begin <- 
+    if(is.null(scalew)) {
+      # empty intersection; just create the plot space
+      layered(bb,          plotargs=list(blankargs))
+    } else if(identical(bb, scalew)) {
+      if(z == 1) {
+        # original state
+        # window is rectangular 
+        # plot the data window as a solid black rectangle
+        layered(bb, scalew,  plotargs=list(blankargs, list(lwd=2)))
+      } else {
+        # zoom view is entirely inside window
+        # plot the clipping region as a red dashed rectangle
+        layered(bb, plotargs=list(dashargs))
+      }
+    } else {
+      # field of view is not a subset of window
+      # plot the clipping region as a red dashed rectangle
+      # Then add the data window
+      layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE)))
+    }
+
+  # draw it
+#  opa <- par(ask=FALSE)
+  if(panel$mtype == "multitype" && panel$split) {
+    scalex <- split(scalex, un=(panel$pointmap != "marks"))
+    plot(scalex, main=panel$xname, 
+         use.marks=use.marks, pch=pch, cex=panel$charsize,
+         panel.begin=panel.begin)
+  } else {
+    # draw scaled & clipped window
+    plot(panel.begin, main=panel$xname)
+    # add points
+    if(panel$mtype == "marked" && panel$pointmap == "marks") {
+      plot(scalex, add=TRUE, use.marks=use.marks, markscale=panel$markscale)
+    } else {
+      plot(scalex, add=TRUE, use.marks=use.marks, pch=pch, cex=panel$charsize)
+    }
+  }
+#  par(opa)
+  panel
+}
+
+CommitAndRedraw <- function(panel) {
+  # hack to ensure that panel is immediately updated in rpanel
+  kraever("rpanel")
+  ## This is really a triple-colon!
+  rpanel:::rp.control.put(panel$panelname, panel)
+  # now redraw it
+  redraw.iplot.ppp(panel)
+}
+
+iplot.ppp
+})
+
diff --git a/R/iplotlayered.R b/R/iplotlayered.R
new file mode 100644
index 0000000..71d2fa1
--- /dev/null
+++ b/R/iplotlayered.R
@@ -0,0 +1,314 @@
+#
+# interactive plot 
+#
+#   $Revision: 1.13 $   $Date: 2017/02/07 07:47:20 $
+#
+#
+
+iplot.default <- function(x, ..., xname) {
+ if(missing(xname))
+    xname <- short.deparse(substitute(x))
+ x <- as.layered(x)
+ iplot(x, ..., xname=xname)
+}
+
+iplot.layered <- local({
+
+  CommitAndRedraw <- function(panel) {
+    ## hack to ensure that panel is immediately updated in rpanel
+    kraever("rpanel")
+    ## This is really a triple-colon!
+    rpanel:::rp.control.put(panel$panelname, panel)
+    ## now redraw it
+    redraw.iplot.layered(panel)
+  }
+  
+  faster.layers <- function(x, visible) {
+    if(any(islinnet <- unlist(lapply(x, inherits, what="linnet")))) {
+      # convert linnet layers to psp, for efficiency
+      x[islinnet] <- lapply(x[islinnet], as.psp)
+    }
+    repeat{
+      islpp <- unlist(lapply(x, inherits, what="lpp"))
+      if(!any(islpp))
+        break
+      # convert an lpp layer to two layers: psp and ppp, for efficiency
+      ii <- min(which(islpp))
+      pl <- layerplotargs(x)
+      n <- length(x)
+      xpre <- if(ii == 1) NULL else x[1:ii]
+      xpost <- if(ii == n) NULL else x[(ii+1L):n]
+      ppre <- if(ii == 1) NULL else pl[1:ii]
+      ppost <- if(ii == n) NULL else pl[(ii+1):n]
+      a <- as.psp(as.linnet(x[[ii]]))
+      b <- as.ppp(x[[ii]])
+      x <- layered(LayerList=c(xpre, list(a, b), xpost),
+                   plotargs=unname(c(ppre, pl[ii], pl[ii], ppost)))
+      visible <- visible[if(ii == 1) c(1, seq_len(n)) else
+                         if(ii == n) c(seq_len(n), n) else
+                         c(1:(ii-1), ii, ii, (ii+1):n)]
+    }
+    attr(x, "visible") <- visible
+    return(x)
+  }
+  
+iplot.layered <- function(x, ..., xname, visible) {
+  if(missing(xname))
+    xname <- short.deparse(substitute(x))
+  verifyclass(x, "layered")
+
+  if(missing(visible) || is.null(visible)) {
+    visible <- rep(TRUE, length(x))
+  } else if(length(visible) == 1) {
+    visible <- rep(visible, length(x))
+  } else stopifnot(length(visible) == length(x))
+
+  kraever("rpanel")
+
+  x <- faster.layers(x, visible)
+  visible <- attr(x, "visible")
+
+  x <- freeze.colourmaps(x)
+  bb <- as.rectangle(as.owin(x))
+  bbmid <- unlist(centroid.owin(bb))
+
+
+  lnames <- names(x)
+  if(sum(nzchar(lnames)) != length(x))
+    lnames <- paste("Layer", seq_len(length(x)))
+  ##
+  p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), 
+                          x=x,
+                          w=as.owin(x),
+                          xname=xname,
+                          layernames=lnames,
+                          bb=bb,
+                          bbmid=bbmid,
+                          zoomfactor=1,
+                          zoomcentre=bbmid,
+                          which = visible,
+                          size=c(700, 400))
+
+# Split panel into three
+# Left: plot controls
+# Middle: data
+# Right: navigation/zoom
+  rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0))
+  rpanel::rp.grid(p, "gdisplay",  pos=list(row=0,column=1))
+  rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2))
+
+#----- Data display ------------
+
+  # This line is to placate the package checker
+  mytkr <- NULL
+
+  # Create data display panel 
+  rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.layered,
+                     action=click.iplot.layered,
+                     pos=list(row=0,column=0,grid="gdisplay"))
+
+  
+#----- Plot controls ------------
+  nextrow <- 0
+  pozzie <- function(n=nextrow, ...)
+    append(list(row=n,column=0,grid="gcontrols"), list(...))
+  
+# main title
+  rpanel::rp.textentry(p, xname, action=redraw.iplot.layered,
+                       title="Plot title",
+                       pos=pozzie(0))
+  nextrow <- 1
+
+# select some layers
+  nx <- length(x)
+  which <- rep.int(TRUE, nx)
+  if(nx > 1) {
+    rpanel::rp.checkbox(p, which, labels=lnames,
+                        action=redraw.iplot.layered,
+                        title="Select layers to plot",
+                        pos=pozzie(nextrow), sticky="")
+    nextrow <- nextrow + 1
+  }
+  
+# button to print a summary at console
+  rpanel::rp.button(p, title="Print summary information",
+                    pos=pozzie(nextrow),
+                    action=function(panel) {
+                        lapply(lapply(panel$x, summary), print)
+                        return(panel)
+                    })
+#  
+#----- Navigation controls ------------
+  nextrow <- 0
+  navpos <- function(n=nextrow,cc=0, ...)
+    append(list(row=n,column=cc,grid="gnavigate"), list(...))
+
+  rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        height <- sidelengths(bb)[2L]
+                        stepsize <- (height/4)/zo
+                        panel$zoomcentre <- ce + c(0, stepsize)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        width <- sidelengths(bb)[1L]
+                        stepsize <- (width/4)/zo
+                        panel$zoomcentre <- ce - c(stepsize, 0)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        width <- sidelengths(bb)[1L]
+                        stepsize <- (width/4)/zo
+                        panel$zoomcentre <- ce + c(stepsize, 0)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        zo <- panel$zoomfactor
+                        ce <- panel$zoomcentre
+                        bb <- panel$bb
+                        height <- sidelengths(bb)[2L]
+                        stepsize <- (height/4)/zo
+                        panel$zoomcentre <- ce - c(0, stepsize)
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+
+  rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- panel$zoomfactor * 2
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- panel$zoomfactor / 2
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""),
+                    action=function(panel) {
+                        panel$zoomfactor <- 1
+                        panel$zoomcentre <- panel$bbmid
+                        CommitAndRedraw(panel)
+                        return(panel)
+                    })
+  nextrow <- nextrow + 1
+  rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""),
+                    action=redraw.iplot.layered)
+  nextrow <- nextrow+1
+# quit button 
+  rpanel::rp.button(p, title="Quit", quitbutton=TRUE,
+            pos=navpos(nextrow, 1, sticky=""),
+            action= function(panel) { panel })
+
+  invisible(NULL)
+}
+
+
+  # Function to redraw the whole shebang
+  redraw.iplot.layered <- function(panel) {
+    rpanel::rp.tkrreplot(panel, mytkr)
+    panel
+  }
+
+
+# Function executed when data display is clicked
+
+  click.iplot.layered <- function(panel, x, y) {
+    panel$zoomcentre <- panel$zoomcentre +
+      (c(x,y) - panel$bbmid)/panel$zoomfactor
+    CommitAndRedraw(panel)
+    return(panel)
+  }
+
+# function that updates the plot when the control panel is operated
+
+do.iplot.layered <- function(panel) { 
+  # scale and clip the pattern
+  x <- panel$x[panel$which]
+  w     <- panel$w
+  z     <- panel$zoomfactor
+  if(is.null(z)) z <- 1
+  ce    <- panel$zoomcentre
+  bb    <- panel$bb
+  bbmid <- panel$bbmid
+  scalex <- shift(scalardilate(shift(x, -ce), z), bbmid)
+  scalew <- shift(scalardilate(shift(w, -ce), z), bbmid)
+  scalex <- scalex[, bb]
+  scalew <- intersect.owin(scalew, bb, fatal=FALSE)
+  # determine what is plotted under the clipped pattern
+  blankargs <- list(type="n")
+  dashargs  <- list(lty=3, border="red")
+  panel.begin <- 
+    if(is.null(scalew)) {
+      # empty intersection; just create the plot space
+      layered(bb,          plotargs=list(blankargs))
+    } else if(identical(bb, scalew)) {
+      if(z == 1) {
+        # original state
+        # window is rectangular 
+        # plot the data window as a solid black rectangle
+        layered(bb, scalew,  plotargs=list(blankargs, list(lwd=2)))
+      } else {
+        # zoom view is entirely inside window
+        # plot the clipping region as a red dashed rectangle
+        layered(bb, plotargs=list(dashargs))
+      }
+    } else {
+      # field of view is not a subset of window
+      # plot the clipping region as a red dashed rectangle
+      # Then add the data window
+      layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE)))
+    }
+  
+  # draw it
+  opa <- par(ask=FALSE)
+  plot(panel.begin, main=panel$xname)
+  plot(scalex, add=TRUE)
+  par(opa)
+  panel
+}
+
+freeze.colourmaps <- function(x) {
+  # tweak a layered object to ensure that
+  # the colours of image layers don't change with zoom/pan
+  isim <- unlist(lapply(x, is.im))
+  if(any(isim)) {
+    # ensure there are plotargs
+    pl <- attr(x, "plotargs")
+    if(is.null(pl))
+      pl <- rep.int(list(list()), length(x))
+    # make sure the plotargs include 'zlim'
+    for(i in which(isim)) {
+      x.i <- x[[i]]
+      if(x.i$type %in% c("integer", "real")) 
+        pl[[i]] <- resolve.defaults(pl[[i]], list(zlim=range(x.i)))
+    }
+    # put back
+    attr(x, "plotargs") <- pl
+  }
+  return(x) 
+}
+
+iplot.layered
+})
diff --git a/R/ippm.R b/R/ippm.R
new file mode 100755
index 0000000..6a5a3b5
--- /dev/null
+++ b/R/ippm.R
@@ -0,0 +1,257 @@
+#
+# ippm.R
+#
+#   $Revision: 2.24 $   $Date: 2017/07/18 00:38:31 $
+#
+# Fisher scoring algorithm for irregular parameters in ppm trend
+#
+
+ippm <- local({
+
+  chucknames <- c("iScore", "start", "nlm.args", "silent", "warn.unused")
+
+  hasarg <- function(f,a) { a %in% names(formals(f)) }
+  
+  ippm <- function(Q, ...,
+                   iScore=NULL, 
+                   start=list(),
+                   covfunargs=start,
+                   nlm.args=list(stepmax=1/2),
+                   silent=FALSE,
+                   warn.unused=TRUE) {
+    ## remember call
+    cl <- match.call()
+    callframe <- parent.frame()
+    callstring <- short.deparse(sys.call())
+    ##
+    ppmcall <- cl[!(names(cl) %in% chucknames)]
+    ppmcall[[1L]] <- as.name('ppm')
+    ## validate
+    if(!is.list(start))
+      stop("start should be a list of initial values for irregular parameters")
+    if(length(start) == 0) {
+      ppmcall <- ppmcall[names(ppmcall) != "covfunargs"]
+      return(eval(ppmcall, callframe))
+    }
+    if(!is.null(iScore)) {
+      if(!is.list(iScore) || length(iScore) != length(start))
+        stop("iScore should be a list of the same length as start")
+      stopifnot(identical(names(iScore), names(start)))
+      if(!all(sapply(iScore, is.function)))
+        stop("iScore should be a list of functions")
+    }
+    ##
+    smap <- match(names(start), names(covfunargs))
+    if(anyNA(smap))
+      stop("variables in start should be a subset of variables in covfunargs")
+    covfunargs[smap] <- start
+    ## fit the initial model and extract information
+    ppmcall$covfunargs <- covfunargs
+    fit0 <- eval(ppmcall, callframe)
+#    lpl0 <- fit0$maxlogpl
+#    p <- length(coef(fit0))
+    ## examine covariates and trend
+    covariates <- fit0$covariates
+    isfun <- sapply(covariates, is.function)
+    covfuns <- covariates[isfun]
+    ## determine which covariates depend on which irregular parameters
+    pnames <- names(start)
+    depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames))
+    rownames(depmat) <- names(covfuns)
+    colnames(depmat) <- pnames
+    for(j in 1:length(pnames))
+      depmat[,j] <- sapply(covfuns, hasarg, pnames[j])
+    ## find covariates that depend on ANY irregular parameter 
+    depvar <- rownames(depmat)[apply(depmat, 1L, any)]
+    ## check that these covariates appear only in offset terms
+    covnames.fitted <- model.covariates(fit0, fitted=TRUE,  offset=FALSE)
+    if(any(uhoh <- depvar %in% covnames.fitted))
+      stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"),
+                 commasep(sQuote(depvar[uhoh])),
+                 "should appear only in offset terms"))
+    ## check that every irregular parameter to be updated appears somewhere 
+    cov.names.offset <- model.covariates(fit0, fitted=FALSE,  offset=TRUE)
+    covfun.names.offset <- intersect(cov.names.offset, names(covfuns))
+    usearg <- apply(depmat[covfun.names.offset, , drop=FALSE], 2L, any)
+    if(!all(usearg)) {
+      if(warn.unused) {
+        nbad <- sum(!usearg)
+        warning(paste("Cannot maximise over the irregular",
+                      ngettext(nbad, "parameter", "parameters"),
+                      commasep(sQuote(names(usearg)[!usearg])),
+                      ngettext(nbad, "because it is", "because they are"),
+                      "not used in any term of the model"))
+      }
+      ## restrict 
+      start <- start[usearg]
+      if(!is.null(iScore)) iScore <- iScore[usearg]
+      pnames <- names(start)
+    }
+    if(length(start) == 0) {
+      ppmcall <- ppmcall[names(ppmcall) != "covfunargs"]
+      return(eval(ppmcall, callframe))
+    }
+    ## parameters for objective function
+    fdata <- list(fit0=fit0,
+                  nreg=length(coef(fit0)),
+                  covfunargs=covfunargs,
+                  smap=smap,
+                  pnames=pnames,
+                  iScore=iScore)
+    ## minimise objective
+    startvec <- unlist(start)
+    typsize <- abs(startvec)
+    typsize <- pmax(typsize, min(typsize[typsize > 0]))
+    g <- do.call(nlm,
+                 resolve.defaults(list(f=objectivefun,
+                                       p=startvec,
+                                       thedata=fdata),
+                                  nlm.args,
+                                  list(typsize=typsize)))
+    popt <- g$estimate
+    ## detect error states
+    icode <- g$code
+    if(!silent && icode > 2) {
+      errmess <- nlmcodes[[icode]]
+      if(!is.null(errmess)) warning(errmess) else 
+      warning("Unrecognised error code ", paste(icode),
+              " returned from nlm", call.=FALSE)
+    }
+    ## return optimised model
+    covfunargs[smap] <- popt
+    attr(covfunargs, "fitter") <- "ippm"
+    attr(covfunargs, "free") <- names(start)
+    fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE)
+    fit$dispatched <- fit[c("call", "callstring", "callframe")]
+    fit$call <- cl
+    fit$callstring <- callstring
+    fit$callframe <- callframe
+    fit$iScore <- iScore
+    class(fit) <- c("ippm", class(fit))
+    return(fit)
+  }
+
+  ## define objective function
+  objectivefun <- function(param, thedata) {
+    with(thedata, {
+      ## fit model with current irregular parameters
+      param <- as.list(param)
+      names(param) <- pnames
+      covfunargs[smap] <- param
+      fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE)
+      lpl <- logLik(fit, warn=FALSE)
+      ## return negative logL because nlm performs *minimisation*
+      value <- -as.numeric(lpl)
+      ## compute derivatives
+      stuff <- ppmInfluence(fit, what="score",
+                            iScore=iScore,
+                            iArgs=param)
+      score <- stuff$score
+      if(length(score) == length(coef(fit)) + length(param)) 
+        attr(value, "gradient") <- -score[-(1:nreg), drop=FALSE]
+      ## attr(value, "hessian") <- -hess[-(1:nreg), -(1:nreg), drop=FALSE]
+      return(value)
+    })
+  }
+
+  ## from help(nlm)
+  nlmcodes <- list(c("Relative gradient is close to zero; ",
+                     "current iterate is probably solution"),
+                   c("Successive iterates are within tolerance; ",
+                     "current iterate is probably solution"),
+                   c("Last global step failed to locate a point ",
+                     "lower than current estimate. ",
+                     "Either current estimate is an approximate ",
+                     "local minimum of the function ",
+                     "or 'steptol' is too small"),
+                   "Iteration limit exceeded",
+                   c("Maximum step size 'stepmax' ",
+                     "exceeded five consecutive times. ",
+                     "Either the function is unbounded below, ",
+                     "becomes asymptotic to a finite value ",
+                     "from above in some direction, ",
+                     "or 'stepmax' is too small"))
+
+  ippm
+})
+
+
+update.ippm <- local({
+
+  newformula <- function(old, change, eold, enew) {
+    old <- eval(old, eold)
+    change <- eval(change, enew)
+    old <- as.formula(old, env=eold)
+    change <- as.formula(change, env=enew)
+    update.formula(old, change)
+  }
+
+  update.ippm <- function(object, ..., envir=environment(terms(object))) {
+#    call <- match.call()
+    new.call <- old.call <- object$call
+    old.callframe <- object$callframe
+    Qold <- eval(old.call$Q, as.list(envir), enclos=old.callframe)
+    argh <- list(...)
+    if(any(isfmla <- sapply(argh, inherits, what="formula"))) {
+      if(sum(isfmla) > 1)
+        stop("Syntax not understood: several arguments are formulas")
+      i <- min(which(isfmla))
+      new.fmla <- argh[[i]]
+      argh <- argh[-i]
+      if(inherits(Qold, "formula")) {
+        ## formula will replace 'Q'
+        if(is.null(lhs.of.formula(new.fmla))) {
+          f <- (. ~ x)
+          f[[3L]] <- new.fmla[[2L]]
+          new.fmla <- f
+        }
+        new.call$Q <- newformula(Qold, new.fmla, old.callframe, envir)
+      } else if(inherits(Qold, c("ppp", "quad"))) {
+        ## formula will replace 'trend' and may replace 'Q'
+        new.fmla <- newformula(formula(object), new.fmla, old.callframe, envir)
+        if(!is.null(lhs <- lhs.of.formula(new.fmla))) {
+          newQ <- eval(eval(substitute(substitute(l, list("."=Q)),
+                                       list(l=lhs,
+                                            Q=Qold))),
+                       envir=as.list(envir), enclos=old.callframe)
+          new.call$Q <- newQ
+        }
+        new.fmla <- rhs.of.formula(new.fmla)
+        if("trend" %in% names(old.call)) {
+          new.call$trend <- new.fmla
+        } else {
+          ## find which argument in the original call was a formula
+          wasfmla <- sapply(old.call, formulaic,
+                            envir=as.list(envir),
+                            enclos=old.callframe)
+          if(any(wasfmla)) {
+            new.call[[min(which(wasfmla))]] <- new.fmla
+          } else {
+            new.call$trend <- new.fmla
+          }
+        }
+      }
+    }
+    ## silence the warnings about unused covfunargs (unless overruled)
+    new.call$warn.unused <- FALSE
+    ## other arguments
+    if(length(argh) > 0) {
+      nama <- names(argh)
+      named <- if(is.null(nama)) rep(FALSE, length(argh)) else nzchar(nama)
+      if(any(named))
+        new.call[nama[named]] <- argh[named]
+      if(any(!named))
+        new.call[length(new.call) + 1:sum(!named)] <- argh[!named]
+    }
+    result <- eval(new.call, as.list(envir), enclos=old.callframe)
+    return(result)
+  }
+
+  formulaic <- function(z, envir, enclos) {
+    u <- try(eval(z, envir, enclos))
+    return(inherits(u, "formula"))
+  }
+
+  update.ippm
+})
+
diff --git a/R/is.cadlag.R b/R/is.cadlag.R
new file mode 100755
index 0000000..a799184
--- /dev/null
+++ b/R/is.cadlag.R
@@ -0,0 +1,11 @@
+is.cadlag <- function (s) 
+{
+if(!is.stepfun(s)) stop("s is not a step function.\n")
+r <- knots(s)
+h <- s(r)
+n <- length(r)
+r1 <- c(r[-1L],r[n]+1)
+rm <- (r+r1)/2
+hm <- s(rm)
+identical(all.equal(h,hm),TRUE)
+}
diff --git a/R/is.subset.owin.R b/R/is.subset.owin.R
new file mode 100755
index 0000000..d2f014a
--- /dev/null
+++ b/R/is.subset.owin.R
@@ -0,0 +1,87 @@
+#
+#  is.subset.owin.R
+#
+#  $Revision: 1.14 $   $Date: 2017/02/07 07:47:20 $
+#
+#  Determine whether a window is a subset of another window
+#
+#  is.subset.owin()
+#
+
+is.subset.owin <- local({
+  
+  is.subset.owin <- function(A, B) {
+    A <- as.owin(A)
+    B <- as.owin(B)
+
+    if(identical(A, B))
+      return(TRUE)
+
+    A <- rescue.rectangle(A)
+    B <- rescue.rectangle(B)
+  
+    if(is.rectangle(B)) {
+      # Some cases can be resolved using convexity of B
+    
+      # (1) A is also a rectangle
+      if(is.rectangle(A)) {
+        xx <- A$xrange[c(1L,2L,2L,1L)]
+        yy <- A$yrange[c(1L,1L,2L,2L)]
+        ok <- inside.owin(xx, yy, B)
+        return(all(ok))
+      } 
+      # (2) A is polygonal
+      # Then A is a subset of B iff,
+      # for every constituent polygon of A with positive sign,
+      # the vertices are all in B
+      if(is.polygonal(A)) {
+        ok <- unlist(lapply(A$bdry, okpolygon, B=B))
+        return(all(ok))
+      }
+      # (3) Feeling lucky
+      # Test whether the bounding box of A is a subset of B
+      # Then a fortiori, A is a subset of B
+      AA <- boundingbox(A)
+      if(is.subset.owin(AA, B))
+        return(TRUE)
+    }
+
+    if(!is.mask(A) && !is.mask(B)) {
+      # rectangles or polygonal domains
+      if(!all(inside.owin(vertices(A), , B)))
+        return(FALSE)
+      # all vertices of A are inside B.
+      if(is.convex(B))
+        return(TRUE)
+      A <- as.polygonal(A)
+      B <- as.polygonal(B)
+      if(length(B$bdry) == 1 && length(A$bdry) == 1) {
+        # two simply-connected sets 
+        # check for boundary crossings
+        bx <- crossing.psp(edges(A), edges(B))
+        return(npoints(bx) == 0)
+      } else {
+        # compare area of intersection with area of A
+        return(overlap.owin(A,B) >= area(A))
+      }
+    }
+  
+   # Discretise
+    a <- as.mask(A)
+    b <- as.mask(B)
+    rxy <- rasterxy.mask(a, drop=TRUE)
+    xx <- rxy$x
+    yy <- rxy$y
+    ok <- inside.owin(xx, yy, b)
+    return(all(ok))
+    
+  }
+
+  okpolygon <- function(a, B) {
+    if(Area.xypolygon(a) < 0) return(TRUE)
+    ok <- inside.owin(a$x, a$y, B)
+    return(all(ok))
+  }
+
+  is.subset.owin
+})
diff --git a/R/istat.R b/R/istat.R
new file mode 100755
index 0000000..137264e
--- /dev/null
+++ b/R/istat.R
@@ -0,0 +1,165 @@
+#
+# interactive analysis of point patterns
+#
+#   $Revision: 1.23 $   $Date: 2015/10/21 09:06:57 $
+#
+#
+
+istat <- function(x, xname) {
+  if(missing(xname))
+    xname <- short.deparse(substitute(x))
+  verifyclass(x, "ppp")
+  kraever("rpanel")
+  # generate simulations of CSR for use in envelopes
+  simx <- envelope(x, fun=NULL, nsim=39, verbose=FALSE,
+                   internal=list(csr=TRUE, eject="patterns"))
+  # initial value of smoothing parameter
+  sigma0 <- with(x$window, min(diff(xrange),diff(yrange)))/8
+  # create panel
+  p <- rpanel::rp.control(title=paste("istat(", xname, ")", sep=""),
+                          panelname="istat",
+                          size=c(600,400),
+                          x=x,           # point pattern
+                          xname=xname,   # name of point pattern
+                          simx=simx,   # simulated realisations of CSR
+                          stat="data",
+                          envel="none",
+                          sigma=sigma0)
+# Split panel into two halves  
+# Left half of panel: display
+# Right half of panel: controls
+  rpanel::rp.grid(p, "gdisplay",
+                  pos=list(row=0,column=0), width=400, height=400)
+  rpanel::rp.grid(p, "gcontrols",
+                  pos=list(row=0,column=1), width=200, height=400)
+
+#----- Display side ------------
+
+  # This line is to placate the package checker
+  mytkr2 <- NULL
+  
+  rpanel::rp.tkrplot(p, mytkr2, do.istat,
+                     pos=list(row=0,column=0,grid="gdisplay"))
+
+  redraw <- function(panel) {
+    rpanel::rp.tkrreplot(panel, mytkr2)
+    panel
+  }
+  
+#----- Control side ------------
+  nextrow <- 0
+  pozzie <- function(n=nextrow,s='w')
+    list(row=n,column=0,grid="gcontrols",sticky=s)
+  
+# choice of summary statistic
+  ftable <- c(data="data",
+              density="kernel smoothed",
+              Kest="K-function",
+              Lest="L-function",
+              pcf="pair correlation",
+              Kinhom="inhomogeneous K",
+              Linhom="inhomogeneous L",
+              Fest="empty space function F",
+              Gest="nearest neighbour function G",
+              Jest="J-function")
+  fvals <- names(ftable)
+  flabs <- as.character(ftable)
+  stat <- NULL
+  rpanel::rp.radiogroup(p, stat, vals=fvals, labels=flabs,
+                        title="statistic", action=redraw,
+                        pos=pozzie(0))
+  nextrow <- 1
+# envelopes?
+  envel <- NULL
+  evals <- c("none", "pointwise", "simultaneous")
+  elabs <- c("No simulation envelopes",
+             "Pointwise envelopes under CSR",
+             "Simultaneous envelopes under CSR")
+  rpanel::rp.radiogroup(p, envel, vals=evals, labels=elabs,
+                        title="Simulation envelopes", action=redraw,
+                        pos=pozzie(nextrow))
+  nextrow <- nextrow + 1
+# smoothing parameters
+  sigma <- NULL
+  rect <- as.rectangle(x$window)
+  winwid  <- min(abs(diff(rect$xrange)), abs(diff(rect$yrange)))
+  rpanel::rp.slider(p, sigma, winwid/80, winwid/2, action=redraw, 
+                    title="sigma",
+                    initval=winwid/8, showvalue=TRUE, pos=pozzie(nextrow, ''))
+  nextrow <- nextrow + 1
+  pcfbw <- pcfbwinit <- 0.15/sqrt(5 * x$n/area(x$window))
+  rpanel::rp.slider(p, pcfbw, pcfbwinit/10, 4 * pcfbwinit, action=redraw, 
+                    title="bw", initval=pcfbwinit,
+                    showvalue=TRUE, pos=pozzie(nextrow, ''))
+  nextrow <- nextrow + 1
+# button to print a summary at console
+  rpanel::rp.button(p, title="Print summary information",
+                    action=function(panel) { print(summary(panel$x)); panel},
+                    pos=pozzie(nextrow))
+  nextrow <- nextrow + 1
+# quit button 
+  rpanel::rp.button(p, title="Quit", quitbutton=TRUE,
+                    action= function(panel) { panel }, pos=pozzie(nextrow))
+
+  invisible(NULL)
+}
+
+# function that updates the plot when the control panel is operated
+
+do.istat <- function(panel) { 
+  x     <- panel$x
+  xname <- panel$xname
+  envel <- panel$envel
+  stat  <- panel$stat
+  sigma <- panel$sigma
+  simx  <- panel$simx
+  if(stat=="data") {
+    plot(x, main=xname)
+    return(panel)
+  }
+  out <- 
+    switch(envel,
+           none=switch(stat,
+             density=density(x, sigma=sigma),
+             Kest=Kest(x),
+             Lest=Lest(x), 
+             pcf=pcf(x, bw=panel$pcfbw),
+             Kinhom=Kinhom(x, sigma=sigma),
+             Linhom=Linhom(x, sigma=sigma),
+             Fest=Fest(x),
+             Gest=Gest(x),
+             Jest=Jest(x)),
+           pointwise=switch(stat,
+             density=density(x, sigma=sigma),
+             Kest=envelope(x, Kest, nsim=39, simulate=simx),
+             Lest=envelope(x, Lest, nsim=39, simulate=simx),
+             pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=39, simulate=simx),
+             Kinhom=envelope(x, Kinhom, nsim=39, sigma=sigma, simulate=simx),
+             Linhom=envelope(x, Linhom, nsim=39, sigma=sigma, simulate=simx),
+             Fest=envelope(x, Fest, nsim=39, simulate=simx),
+             Gest=envelope(x, Gest, nsim=39, simulate=simx),
+             Jest=envelope(x, Jest, nsim=39, simulate=simx)),
+           simultaneous=switch(stat,
+             density=density(x, sigma=sigma),
+             Kest=envelope(x, Kest, nsim=19, global=TRUE, simulate=simx),
+             Lest=envelope(x, Lest, nsim=19, global=TRUE, simulate=simx),
+             pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=19, global=TRUE, simulate=simx),
+             Kinhom=envelope(x, Kinhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx),
+             Linhom=envelope(x, Linhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx),
+             Fest=envelope(x, Fest, nsim=19, global=TRUE, simulate=simx),
+             Gest=envelope(x, Gest, nsim=19, global=TRUE, simulate=simx),
+             Jest=envelope(x, Jest, nsim=19, global=TRUE, simulate=simx))
+           )
+  # plot it
+  if(stat %in% c("density", "Kinhom", "Linhom")) {
+    plot(out, main=paste(stat, "(", xname, ", sigma)", sep=""))
+    if(stat == "density")
+      points(x)
+  } else if(stat == "pcf")
+    plot(out, main=paste("pcf(", xname, ", bw)", sep=""))
+  else 
+    plot(out, main=paste(stat, "(", xname, ")", sep=""))
+
+  return(panel)
+}
+
diff --git a/R/kernel2d.R b/R/kernel2d.R
new file mode 100644
index 0000000..0b95db0
--- /dev/null
+++ b/R/kernel2d.R
@@ -0,0 +1,131 @@
+#'
+#'   kernel2d.R
+#'
+#'  Two-dimensional smoothing kernels
+#'
+#'  $Revision: 1.12 $ $Date: 2017/02/07 07:50:52 $
+#'
+
+.Spatstat.2D.KernelTable <- list(
+  #' table entries:
+  #'   d = density of standardised kernel
+  #'   sd = standard deviation of x coordinate, for standardised kernel
+  #'   hw = halfwidth of support of standardised kernel 
+  gaussian=list(
+    d  = function(x,y, ...) { dnorm(x) * dnorm(y) },
+    sd  = 1,
+    hw = 8,
+    symmetric = TRUE),
+  epanechnikov=list(
+    d  = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) },
+    sd = 1/sqrt(6),
+    hw = 1,
+    symmetric = TRUE),
+  quartic=list(
+    d  = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 },
+    sd = 1/sqrt(8),
+    hw = 1,
+    symmetric = TRUE),
+  disc=list(
+    d  = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) },
+    sd = 1/2,
+    hw = 1,
+    symmetric = TRUE)
+)
+
+validate2Dkernel <- function(kernel, fatal=TRUE) {
+  if(is.character(match2DkernelName(kernel))) return(TRUE)
+  if(is.im(kernel) || is.function(kernel)) return(TRUE)
+  if(!fatal) return(FALSE)
+  if(is.character(kernel))
+    stop(paste("Unrecognised choice of kernel", sQuote(kernel),
+               paren(paste("options are",
+                           commasep(sQuote(names(.Spatstat.2D.KernelTable)))))),
+         call.=FALSE)
+  stop(paste("kernel should be a character string,",
+             "a pixel image, or a function (x,y)"),
+       call.=FALSE)
+}
+
+match2DkernelName <- function(kernel) {
+  if(!is.character(kernel) || length(kernel) != 1) return(NULL)
+  nama <- names(.Spatstat.2D.KernelTable)
+  m <- pmatch(kernel, nama)
+  if(is.na(m)) return(NULL)
+  return(nama[m])
+}
+
+lookup2DkernelInfo <- function(kernel) {
+  validate2Dkernel(kernel)
+  kernel <- match2DkernelName(kernel)
+  if(is.null(kernel)) return(NULL)
+  return(.Spatstat.2D.KernelTable[[kernel]])
+}
+
+evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ...,
+                             scalekernel=is.character(kernel)) {
+
+  info <- lookup2DkernelInfo(kernel)
+
+  if(scalekernel) {
+    ## kernel adjustment factor 
+    sdK <- if(is.character(kernel)) info$sd else 1
+    ## transform coordinates to x',y' such that kerfun(x', y')
+    ## yields density k(x,y) at desired bandwidth
+    if(is.null(varcov)) {
+      rr <- sdK/sigma
+      x <- x * rr
+      y <- y * rr
+      const <- rr^2
+    } else {
+      SinvH <- matrixinvsqrt(varcov)
+      rSinvH <- sdK * SinvH
+      XY <- cbind(x, y) %*% rSinvH
+      x <- XY[,1L]
+      y <- XY[,2L]
+      const <- det(rSinvH)
+    }
+  } 
+
+  ## now evaluate kernel
+  
+  if(is.character(kernel)) {
+    kerfun <- info$d
+    result <- kerfun(x, y)
+    if(scalekernel)
+      result <- const * result
+    return(result)
+  }
+
+  if(is.function(kernel)) {
+    argh <- list(...)
+    if(length(argh) > 0)
+      argh <- argh[names(argh) %in% names(formals(kernel))]
+    result <- do.call(kernel, append(list(x, y), argh))
+    if(anyNA(result))
+      stop("NA values returned from kernel function")
+    if(length(result) != length(x))
+      stop("Kernel function returned the wrong number of values")
+    if(scalekernel)
+      result <- const * result
+    return(result)
+  }
+
+  if(is.im(kernel)) {
+    result <- kernel[list(x=x, y=y)]
+    if(anyNA(result))
+      stop("Domain of kernel image is not large enough")
+    return(result)
+    if(scalekernel)
+      result <- const * result
+  } 
+
+  # never reached
+  stop("Unrecognised format for kernel")
+}
+
+  
+
+  
+
+  
diff --git a/R/kernels.R b/R/kernels.R
new file mode 100755
index 0000000..80f05f2
--- /dev/null
+++ b/R/kernels.R
@@ -0,0 +1,275 @@
+#
+# kernels.R
+#
+#  rXXX, dXXX, pXXX and qXXX for kernels
+#
+#  $Revision: 1.17 $  $Date: 2016/07/02 03:36:46 $
+#
+
+match.kernel <- function(kernel) {
+  kernel.map <- c(Gaussian    ="gaussian",
+                  gaussian    ="gaussian",
+                  Normal      ="gaussian",
+                  normal      ="gaussian",
+                  rectangular ="rectangular",
+                  triangular  ="triangular",
+                  Epanechnikov="epanechnikov",
+                  epanechnikov="epanechnikov",
+                  biweight    ="biweight",
+                  cosine      ="cosine",
+                  optcosine   ="optcosine"
+                  )
+  ker <- pickoption("kernel", kernel, kernel.map)
+  return(ker)
+}
+
+kernel.factor <- function(kernel="gaussian") {
+  # This function returns the factor c such that
+  #              h = c * sigma
+  # where sigma is the standard deviation of the kernel, and
+  # h is the corresponding bandwidth parameter as conventionally defined.
+
+  # Conventionally h is defined as a scale factor
+  # relative to the `standard form' of the kernel, namely the 
+  # form with support [-1,1], except in the Gaussian case where
+  # the standard form is N(0,1).
+  
+  # Thus the standard form of the kernel (h=1) has standard deviation 1/c.
+  
+  # The kernel with standard deviation 1 has support [-c,c]
+  # except for gaussian case.
+  
+  kernel <- match.kernel(kernel)
+  switch(kernel,
+         gaussian     = 1,
+         rectangular  = sqrt(3),
+         triangular   = sqrt(6),
+         epanechnikov = sqrt(5),
+         biweight     = sqrt(7),
+         cosine       = 1/sqrt(1/3 - 2/pi^2),
+         optcosine    = 1/sqrt(1 - 8/pi^2))
+}
+
+rkernel <- function(n, kernel="gaussian", mean=0, sd=1) {
+  kernel <- match.kernel(kernel)
+  if(kernel == "gaussian")
+    return(rnorm(n, mean=mean, sd=sd))
+  # inverse cdf transformation
+  u <- runif(n)
+  qkernel(u, kernel, mean=mean, sd=sd)
+}
+
+dkernel <- function(x,  kernel="gaussian", mean=0, sd=1) {
+  kernel <- match.kernel(kernel)
+  stopifnot(is.numeric(x))
+  stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0)
+  a <- sd * kernel.factor(kernel)
+  y <- abs(x-mean)/a
+  dens <-
+    switch(kernel,
+           gaussian       = { dnorm(y) },
+           rectangular    = { ifelse(y < 1, 1/2, 0) },
+           triangular     = { ifelse(y < 1, (1 - y), 0) },
+           epanechnikov   = { ifelse(y < 1, (3/4) * (1 - y^2), 0) },
+           biweight       = { ifelse(y < 1, (15/16) * (1 - y^2)^2, 0) },
+           cosine         = { ifelse(y < 1, (1 + cos(pi * y))/2, 0) },
+           optcosine      = { ifelse(y < 1, (pi/4) * cos(pi * y/2), 0) }
+           )
+  dens/a
+}
+
+pkernel <- function(q, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE){
+  kernel <- match.kernel(kernel)
+  stopifnot(is.numeric(q))
+  stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0)
+  a <- sd * kernel.factor(kernel)
+  y <- (q-mean)/a
+  switch(kernel,
+         gaussian = {
+           pnorm(y, lower.tail=lower.tail)
+         },
+         rectangular = {
+           punif(y, min=-1, max=1, lower.tail=lower.tail)
+         },
+         triangular = {
+           p <- ifelse(y < -1, 0, ifelse(y > 1, 1,
+                       ifelse(y < 0, y + y^2/2 + 1/2,
+                              y - y^2/2 + 1/2)))
+           if(lower.tail) p else (1 - p)
+         },
+         epanechnikov = {
+           p <- ifelse(y < -1, 0, ifelse(y > 1, 1,
+                        (2 + 3 * y - y^3)/4))
+           if(lower.tail) p else (1 - p)
+         },
+         biweight = {
+           p <- ifelse(y < -1, 0, ifelse(y > 1, 1,
+                       (15 * y - 10 * y^3 + 3 * y^5 + 8)/16))
+           if(lower.tail) p else (1 - p)
+         },
+         cosine = {
+           p <- ifelse(y < -1, 0, ifelse(y > 1, 1,
+                       (y + sin(pi * y)/pi + 1)/2))
+           if(lower.tail) p else (1 - p)
+         },
+         optcosine = {
+           p <- ifelse(y < -1, 0, ifelse(y > 1, 1,
+                       (sin(pi * y/2) + 1)/2))
+           if(lower.tail) p else (1 - p)
+         })
+}
+
+qkernel <- function(p, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE) {
+  kernel <- match.kernel(kernel)
+  stopifnot(is.numeric(p))
+  stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0)
+  a <- sd * kernel.factor(kernel)
+  if(!lower.tail)
+    p <- 1 - p
+  y <-
+    switch(kernel,
+           gaussian = {
+             qnorm(p, lower.tail=lower.tail)
+           },
+           rectangular = {
+             qunif(p, min=-1, max=1, lower.tail=lower.tail)
+           },
+           triangular = {
+             ifelse(p < 1/2, sqrt(2 * p) - 1, 1 - sqrt(2 * (1-p)))
+           },
+           epanechnikov = {
+             # solve using `polyroot'
+             yy <- numeric(n <- length(p))
+             yy[p == 0] <- -1
+             yy[p == 1] <-  1
+             inside <- (p != 0) & (p != 1)
+             # coefficients of polynomial (2 + 3 y - y^3)/4
+             z <- c(2, 3, 0, -1)/4
+             for(i in seq(n)[inside]) {
+               sol <- polyroot(z - c(p[i], 0, 0, 0))
+               ok <- abs(Im(sol)) < 1e-6
+               realpart <- Re(sol)
+               ok <- ok & (abs(realpart) <= 1)
+               if(sum(ok) != 1)
+                 stop(paste("Internal error:", sum(ok), "roots of polynomial"))
+               yy[i] <- realpart[ok]
+             }
+             yy
+           },
+           biweight = {
+             # solve using `polyroot'
+             yy <- numeric(n <- length(p))
+             yy[p == 0] <- -1
+             yy[p == 1] <-  1
+             inside <- (p != 0) & (p != 1)
+             # coefficients of polynomial (8 + 15 * y - 10 * y^3 + 3 * y^5)/16
+             z <- c(8, 15, 0, -10, 0, 3)/16
+             for(i in seq(n)[inside]) {
+               sol <- polyroot(z - c(p[i], 0, 0, 0, 0, 0))
+               ok <- abs(Im(sol)) < 1e-6
+               realpart <- Re(sol)
+               ok <- ok & (abs(realpart) <= 1)
+               if(sum(ok) != 1) 
+                 stop(paste("Internal error:", sum(ok), "roots of polynomial"))
+               yy[i] <- realpart[ok]
+             }
+             yy
+           },
+           cosine = {
+             # solve using `uniroot'
+             g <- function(y, pval) { (y + sin(pi * y)/pi + 1)/2 - pval }
+             yy <- numeric(n <- length(p))
+             yy[p == 0] <- -1
+             yy[p == 1] <-  1
+             inside <- (p != 0) & (p != 1)
+             for(i in seq(n)[inside]) 
+               yy[i] <- uniroot(g, c(-1,1), pval=p[i])$root
+             yy
+           },
+           optcosine = {
+             (2/pi) * asin(2 * p - 1)
+           })
+  return(mean + a * y)
+}
+
+# integral of t^m k(t) dt from -Inf to r
+# was:    nukernel(r, m, kernel)
+
+kernel.moment <- local({
+
+  kernel.moment <- function(m, r, kernel="gaussian") {
+    ker <- match.kernel(kernel)
+    if(ker != "gaussian") {
+      r <- pmin(r, 1)
+      r <- pmax(r, -1)
+    }
+    stopifnot(length(m) == 1)
+    if(!(m %in% c(0,1,2)) || (ker %in% c("cosine", "optcosine"))) {
+      ## use generic integration
+      neginf <- if(ker == "gaussian") -10 else -1
+      result <- numeric(length(r))
+      for(i in seq_along(r))
+        result[i] <- integralvalue(kintegrand, lower=neginf, upper=r[i],
+                                   m=m, ker=ker)
+      return(result)
+    }
+    switch(ker,
+           gaussian={
+             if(m == 0) return(pnorm(r)) else
+             if(m == 1) return(-dnorm(r)) else
+             return(pnorm(r) - r * dnorm(r))
+           },
+           rectangular = {
+             if(m == 0) return((r + 1)/2) else
+             if(m == 1) return((r^2 - 1)/4) else
+             return((r^3 + 1)/6)
+           },
+           triangular={
+             m1 <- m+1
+             m2 <- m+2
+             const <- ((-1)^m1)/m1 + ((-1)^m2)/m2
+             answer <- (r^m1)/m1 + ifelse(r < 0, 1, -1) * (r^m2)/m2 - const
+             return(answer)
+           },
+           epanechnikov = {
+             if(m == 0)
+               return((2 + 3*r - r^3)/4)
+             else if(m == 1)
+               return((-3 + 6*r^2 - 3*r^4)/16)
+             else
+               return(( 2 + 5*r^3  - 3* r^5)/20)
+           },
+           biweight = {
+             if(m == 0)
+               return((3*r^5 - 10*r^3 + 15*r + 8)/16)
+             else if(m == 1)
+               return((5*r^6 - 15*r^4 + 15*r^2 -5)/32)
+             else 
+               return((15*r^7 - 42*r^5 + 35*r^3 + 8)/112)
+           },
+           # never reached!
+           cosine={stop("Sorry, not yet implemented for cosine kernel")},
+           optcosine={stop("Sorry, not yet implemented for optcosine kernel")}
+           )
+  }
+
+  integralvalue <- function(...) integrate(...)$value
+  
+  kintegrand <- function(x, m, ker) { x^m * dkernel(x, ker) }
+
+  kernel.moment
+})
+
+kernel.squint <- function(kernel="gaussian", bw=1) {
+  kernel <- match.kernel(kernel)
+  check.1.real(bw)
+  RK <- switch(kernel,
+               gaussian = 1/(2 * sqrt(pi)),
+               rectangular = sqrt(3)/6, 
+               triangular = sqrt(6)/9,
+               epanechnikov = 3/(5 * sqrt(5)), 
+               biweight = 5 * sqrt(7)/49,
+               cosine = 3/4 * sqrt(1/3 - 2/pi^2),
+               optcosine = sqrt(1 - 8/pi^2) * pi^2/16)
+  return(RK/bw)
+}
diff --git a/R/kmrs.R b/R/kmrs.R
new file mode 100755
index 0000000..5a19d53
--- /dev/null
+++ b/R/kmrs.R
@@ -0,0 +1,242 @@
+#
+#	kmrs.S
+#
+#	S code for Kaplan-Meier, Reduced Sample and Hanisch
+#	estimates of a distribution function
+#	from _histograms_ of censored data.
+#
+#	kaplan.meier()
+#	reduced.sample()
+#       km.rs()
+#
+#	$Revision: 3.26 $	$Date: 2013/06/27 08:59:16 $
+#
+#	The functions in this file produce vectors `km' and `rs'
+#	where km[k] and rs[k] are estimates of F(breaks[k+1]),
+#	i.e. an estimate of the c.d.f. at the RIGHT endpoint of the interval.
+#
+
+"kaplan.meier" <-
+function(obs, nco, breaks, upperobs=0) {
+#	obs: histogram of all observations : min(T_i,C_i)
+#	nco: histogram of noncensored observations : T_i such that T_i <= C_i
+# 	breaks: breakpoints (vector or 'breakpts' object, see breaks.S)
+#       upperobs: number of observations beyond rightmost breakpoint
+#  
+        breaks <- as.breakpts(breaks)
+
+	n <- length(obs)
+	if(n != length(nco)) 
+		stop("lengths of histograms do not match")
+	check.hist.lengths(nco, breaks)
+#
+#	
+#   reverse cumulative histogram of observations
+	d <- revcumsum(obs) + upperobs
+#
+#  product integrand
+	s <- ifelseXB(d > 0, 1 - nco/d, 1)
+#
+	km <- 1 - cumprod(s)
+#  km has length n;  km[i] is an estimate of F(r) for r=breaks[i+1]
+#	
+	widths <- diff(breaks$val)
+        lambda <- numeric(n)
+        pos <- (s > 0)
+        lambda[pos] <- -log(s[pos])/widths[pos]
+#  lambda has length n; lambda[i] is an estimate of
+#  the average of \lambda(r) over the interval (breaks[i],breaks[i+1]).
+#	
+	return(list(km=km, lambda=lambda))
+}
+
+"reduced.sample" <-
+function(nco, cen, ncc, show=FALSE, uppercen=0)
+#	nco: histogram of noncensored observations: T_i such that T_i <= C_i
+#	cen: histogram of all censoring times: C_i
+#	ncc: histogram of censoring times for noncensored obs:
+#		C_i such that T_i <= C_i
+#
+#	Then nco[k] = #{i: T_i <= C_i, T_i \in I_k}
+#	     cen[k] = #{i: C_i \in I_k}
+#	     ncc[k] = #{i: T_i <= C_i, C_i \in I_k}.
+#
+#       The intervals I_k must span an interval [0,R] beginning at 0.
+#       If this interval did not include all censoring times,
+#       then `uppercen' must be the number of censoring times
+#       that were not counted in 'cen'.
+{
+	n <- length(nco)
+	if(n != length(cen) || n != length(ncc))
+		stop("histogram lengths do not match")
+#
+#	denominator: reverse cumulative histogram of censoring times
+#		denom(r) = #{i : C_i >= r}
+#	We compute 
+#		cc[k] = #{i: C_i > breaks[k]}	
+#	except that > becomes >= for k=0.
+#
+	cc <- revcumsum(cen) + uppercen
+#
+#
+#	numerator
+#	#{i: T_i <= r <= C_i }
+#	= #{i: T_i <= r, T_i <= C_i} - #{i: C_i < r, T_i <= C_i}
+#	We compute
+#		u[k] = #{i: T_i <= C_i, T_i <= breaks[k+1]}
+#			- #{i: T_i <= C_i, C_i <= breaks[k]}
+#		     = #{i: T_i <= C_i, C_i > breaks[k], T_i <= breaks[k+1]}
+#	this ensures that numerator and denominator are 
+#	comparable, u[k] <= cc[k] always.
+#
+	u <- cumsum(nco) - c(0,cumsum(ncc)[1:(n-1)])
+	rs <- u/cc
+#
+#	Hence rs[k] = u[k]/cc[k] is an estimator of F(r) 
+#	for r = breaks[k+1], i.e. for the right hand end of the interval.
+#
+        if(!show)
+          return(rs)
+        else
+          return(list(rs=rs, numerator=u, denominator=cc))
+}
+
+"km.rs" <-
+function(o, cc, d, breaks) {
+#	o: censored lifetimes min(T_i,C_i)
+#	cc: censoring times C_i
+#	d: censoring indicators 1(T_i <= C_i)
+#	breaks: histogram breakpoints (vector or 'breakpts' object)
+#
+  breaks <- as.breakpts(breaks)
+  bval <- breaks$val
+# compile histograms (breakpoints may not span data)
+  obs <- whist( o,     breaks=bval)
+  nco <- whist( o[d],  breaks=bval)
+  cen <- whist( cc,    breaks=bval)
+  ncc <- whist( cc[d], breaks=bval)
+# number of observations exceeding largest breakpoint
+  upperobs <- attr(obs, "high")
+  uppercen <- attr(cen, "high")
+# go
+  km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs)
+  rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen)
+#
+  return(list(rs=rs, km=km$km, hazard=km$lambda,
+              r=breaks$r, breaks=bval))
+}
+
+"km.rs.opt" <-
+function(o, cc, d, breaks, KM=TRUE, RS=TRUE) {
+#	o: censored lifetimes min(T_i,C_i)
+#	cc: censoring times C_i
+#	d: censoring indicators 1(T_i <= C_i)
+#	breaks: histogram breakpoints (vector or 'breakpts' object)
+#
+  breaks <- as.breakpts(breaks)
+  bval <- breaks$val
+  out <- list(r=breaks$r, breaks=bval)
+  if(KM || RS)
+    nco <- whist( o[d],  breaks=bval)
+  if(KM) {
+    obs <- whist( o,     breaks=bval)
+    upperobs <- attr(obs, "high")
+    km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs)
+    out <- append(list(km=km$km, hazard=km$lambda), out)
+  }
+  if(RS) {
+    cen <- whist( cc,    breaks=bval)
+    ncc <- whist( cc[d], breaks=bval)
+    uppercen <- attr(cen, "high")
+    rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen)
+    out <- append(list(rs=rs), out)
+  }
+  return(out)
+}
+
+
+censtimeCDFest <- function(o, cc, d, breaks, ...,
+                           KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE,
+                           han.denom=NULL, tt=NULL, pmax=0.9) {
+# Histogram-based estimation of cumulative distribution function
+# of lifetimes subject to censoring.
+#	o: censored lifetimes min(T_i,C_i)
+#	cc: censoring times C_i
+#	d: censoring indicators 1(T_i <= C_i)
+#	breaks: histogram breakpoints (vector or 'breakpts' object)
+#       han.denom: denominator (eroded area) for each value of r
+#       tt: uncensored lifetimes T_i, if known  
+  breaks <- as.breakpts(breaks)
+  bval <- breaks$val
+  rval <- breaks$r
+  rmax <- breaks$max
+  # Kaplan-Meier and/or Reduced Sample
+  out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS)
+  # convert to data frame
+  out$breaks <- NULL
+  df <- as.data.frame(out)
+  # Raw ecdf of observed lifetimes if available
+  if(RAW && !is.null(tt)) {
+    h <- whist(tt[tt <= rmax], breaks=bval)
+    df <- cbind(df, data.frame(raw=cumsum(h)/length(tt)))
+  }
+  # Hanisch
+  if(HAN) {
+    if(is.null(han.denom))
+      stop("Internal error: missing denominator for Hanisch estimator")
+    if(length(han.denom) != length(rval))
+      stop(paste("Internal error:",
+                 "length(han.denom) =", length(han.denom),
+                 "!=", length(rval), "= length(rvals)"))
+    #  uncensored distances
+    x <- o[d]
+    # calculate Hanisch estimator
+    h <- whist(x[x <= rmax], breaks=bval)
+    H <- cumsum(h/han.denom)
+    df <- cbind(df, data.frame(han=H/max(H[is.finite(H)])))
+  }
+  # determine appropriate plotting range
+  bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw"
+  alim <- range(df$r[df[[bestest]] <= pmax])
+  # convert to fv object
+  nama <-  c("r",  "km", "hazard", "han", "rs", "raw")
+  avail <- c(TRUE,  KM,  KM,       HAN,   RS,   RAW)
+  iscdf <- c(FALSE, TRUE, FALSE,   TRUE,  TRUE, TRUE)
+  labl <- c("r", "hat(%s)[km](r)", "lambda(r)", "hat(%s)[han](r)",
+            "hat(%s)[bord](r)", "hat(%s)[raw](r)")[avail]
+  desc <- c("distance argument r",
+            "Kaplan-Meier estimate of %s",
+            "Kaplan-Meier estimate of hazard function lambda(r)",
+            "Hanisch estimate of %s",
+            "border corrected estimate of %s",
+            "uncorrected estimate of %s")[avail]
+  df <- df[, nama[avail]]
+  Z <- fv(df, "r", substitute(CDF(r), NULL), bestest, . ~ r, alim, labl, desc,
+          fname="CDF")
+  fvnames(Z, ".") <- nama[iscdf & avail]
+  return(Z)
+}
+
+# simple interface for students and code development
+
+compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) {
+  han <- !is.null(han.denom)
+  breaks <- breakpts.from.r(r)
+  if(check) {
+    stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0))
+    if(han)
+      stopifnot(length(han.denom) == length(r))
+  }
+  D <- as.vector(D)
+  B <- as.vector(B)
+  # observed (censored) lifetimes
+  o <- pmin.int(D, B)
+  # censoring indicators
+  d <- (D <= B)
+  # go
+  result <- censtimeCDFest(o, B, d, breaks,
+                           HAN=han, 
+                           han.denom=han.denom,
+                           RAW=TRUE, tt=D)
+  result <- rebadge.fv(result, new.fname="compileCDF")
+}
diff --git a/R/kppm.R b/R/kppm.R
new file mode 100755
index 0000000..2cb86f6
--- /dev/null
+++ b/R/kppm.R
@@ -0,0 +1,1726 @@
+#
+# kppm.R
+#
+# kluster/kox point process models
+#
+# $Revision: 1.134 $ $Date: 2017/07/19 07:18:54 $
+#
+
+kppm <- function(X, ...) {
+  UseMethod("kppm")
+}
+
+
+kppm.formula <-
+  function(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"),
+           ..., data=NULL) {
+  ## remember call
+  callstring <- short.deparse(sys.call())
+  cl <- match.call()
+
+  ########### INTERPRET FORMULA ##############################
+  
+  if(!inherits(X, "formula"))
+    stop(paste("Argument 'X' should be a formula"))
+  formula <- X
+  
+  if(spatstat.options("expand.polynom"))
+    formula <- expand.polynom(formula)
+
+  ## check formula has LHS and RHS. Extract them
+  if(length(formula) < 3)
+    stop(paste("Formula must have a left hand side"))
+  Yexpr <- formula[[2L]]
+  trend <- formula[c(1L,3L)]
+  
+  ## FIT #######################################
+  thecall <- call("kppm", X=Yexpr, trend=trend,
+                  data=data, clusters=clusters)
+  ncall <- length(thecall)
+  argh <- list(...)
+  nargh <- length(argh)
+  if(nargh > 0) {
+    thecall[ncall + 1:nargh] <- argh
+    names(thecall)[ncall + 1:nargh] <- names(argh)
+  }
+#  result <- eval(thecall, 
+#                 envir=if(!is.null(data)) data else parent.frame(),
+#                 enclos=if(!is.null(data)) parent.frame() else baseenv())
+  callenv <- list2env(as.list(data), parent=parent.frame())
+  result <- eval(thecall, envir=callenv, enclos=baseenv())
+
+  result$call <- cl
+  result$callframe <- parent.frame()
+  if(!("callstring" %in% names(list(...))))
+    result$callstring <- callstring
+  
+  return(result)
+}
+
+kppm.ppp <- kppm.quad <-
+  function(X, trend = ~1,
+           clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"),
+           data=NULL,
+           ...,
+           covariates = data,
+           subset, 
+           method = c("mincon", "clik2", "palm"),
+           improve.type = c("none", "clik1", "wclik1", "quasi"),
+           improve.args = list(),
+           weightfun=NULL,
+           control=list(),
+           algorithm="Nelder-Mead",
+           statistic="K",
+           statargs=list(),
+           rmax = NULL,
+           covfunargs=NULL,
+           use.gam=FALSE,
+           nd=NULL, eps=NULL) {
+  cl <- match.call()
+  callstring <- paste(short.deparse(sys.call()), collapse="")
+  Xname <- short.deparse(substitute(X))
+  clusters <- match.arg(clusters)
+  improve.type <- match.arg(improve.type)
+  method <- match.arg(method)
+  if(method == "mincon")
+    statistic <- pickoption("summary statistic", statistic,
+                            c(K="K", g="pcf", pcf="pcf"))
+  ClusterArgs <- list(method = method,
+                      improve.type = improve.type,
+                      improve.args = improve.args,
+                      weightfun=weightfun,
+                      control=control,
+                      algorithm=algorithm,
+                      statistic=statistic,
+                      statargs=statargs,
+                      rmax = rmax)
+  Xenv <- list2env(as.list(covariates), parent=parent.frame())
+  X <- eval(substitute(X), envir=Xenv, enclos=baseenv())
+  isquad <- inherits(X, "quad")
+  if(!is.ppp(X) && !isquad)
+    stop("X should be a point pattern (ppp) or quadrature scheme (quad)")
+  if(is.marked(X))
+    stop("Sorry, cannot handle marked point patterns")
+  if(!missing(subset)) {
+    W <- eval(subset, covariates, parent.frame())
+    if(!is.null(W)) {
+      if(is.im(W)) {
+        W <- solutionset(W)
+      } else if(!is.owin(W)) {
+        stop("Argument 'subset' should yield a window or logical image",
+             call.=FALSE)
+      }
+      X <- X[W]
+    }
+  }
+  po <- ppm(Q=X, trend=trend, covariates=covariates,
+            forcefit=TRUE, rename.intercept=FALSE,
+            covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps)
+  XX <- if(isquad) X$data else X
+  # set default weight function
+  if(is.null(weightfun) && method != "mincon") {
+    RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2
+    weightfun <- function(d, rr=RmaxW) { as.integer(d <= rr) }
+    formals(weightfun)[[2]] <- RmaxW
+    attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")")
+  }
+  # fit
+  out <- switch(method,
+         mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters,
+                             control=control, statistic=statistic,
+                             statargs=statargs, rmax=rmax,
+                             algorithm=algorithm, ...),
+         clik2   = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters,
+                             control=control, weightfun=weightfun, 
+                             rmax=rmax, algorithm=algorithm, ...),
+         palm   = kppmPalmLik(X=XX, Xname=Xname, po=po, clusters=clusters,
+                             control=control, weightfun=weightfun, 
+                             rmax=rmax, algorithm=algorithm, ...))
+  #
+  out <- append(out, list(ClusterArgs=ClusterArgs,
+                          call=cl,
+                          callframe=parent.frame(),
+                          callstring=callstring))
+  # Detect DPPs
+  DPP <- list(...)$DPP
+  class(out) <- c(ifelse(is.null(DPP), "kppm", "dppm"), class(out))
+
+  # Update intensity estimate with improve.kppm if necessary:
+  if(improve.type != "none")
+    out <- do.call(improve.kppm,
+                   append(list(object = out, type = improve.type),
+                          improve.args))
+  return(out)
+}
+
+kppmMinCon <- function(X, Xname, po, clusters, control, statistic, statargs,
+                       algorithm="Nelder-Mead", DPP=NULL, ...) {
+  # Minimum contrast fit
+  stationary <- is.stationary(po)
+  # compute intensity
+  if(stationary) {
+    lambda <- summary(po)$trend$value
+  } else {
+    # compute intensity at high resolution if available
+    w <- as.owin(po, from="covariates")
+    if(!is.mask(w)) w <- NULL
+    lambda <- predict(po, locations=w)
+  }
+  # Detect DPP model and change clusters and intensity correspondingly
+  if(!is.null(DPP)){
+    tmp <- dppmFixIntensity(DPP, lambda, po)
+    clusters <- tmp$clusters
+    lambda <- tmp$lambda
+    po <- tmp$po
+  }
+  mcfit <- clusterfit(X, clusters, lambda = lambda,
+                      dataname = Xname, control = control,
+                      statistic = statistic, statargs = statargs,
+                      algorithm=algorithm, ...)
+  fitinfo <- attr(mcfit, "info")
+  attr(mcfit, "info") <- NULL
+  # all info that depends on the fitting method:
+  Fit <- list(method    = "mincon",
+              statistic = statistic,
+              Stat      = fitinfo$Stat,
+              StatFun   = fitinfo$StatFun,
+              StatName  = fitinfo$StatName,
+              FitFun    = fitinfo$FitFun,
+              statargs  = statargs,
+              mcfit     = mcfit)
+  # results
+  if(!is.null(DPP)){
+    clusters <- update(clusters, as.list(mcfit$par))
+    out <- list(Xname      = Xname,
+                X          = X,
+                stationary = stationary,
+                fitted     = clusters,
+                po         = po,
+                Fit        = Fit)
+  } else{
+    out <- list(Xname      = Xname,
+                X          = X,
+                stationary = stationary,
+                clusters   = clusters,
+                modelname  = fitinfo$modelname,
+                isPCP      = fitinfo$isPCP,
+                po         = po,
+                lambda     = lambda,
+                mu         = mcfit$mu,
+                par        = mcfit$par,
+                par.canon  = mcfit$par.canon,
+                clustpar   = mcfit$clustpar,
+                clustargs  = mcfit$clustargs,
+                modelpar   = mcfit$modelpar,
+                covmodel   = mcfit$covmodel,
+                Fit        = Fit)
+  }
+  return(out)
+}
+
+clusterfit <- function(X, clusters, lambda = NULL, startpar = NULL,
+                       q=1/4, p=2, rmin=NULL, rmax=NULL, ...,
+                       statistic = NULL, statargs = NULL,
+                       algorithm="Nelder-Mead"){
+  ## If possible get dataname from dots
+  dataname <- list(...)$dataname
+  ## Cluster info:
+  info <- spatstatClusterModelInfo(clusters)
+  ## Detect DPP usage
+  isDPP <- inherits(clusters, "detpointprocfamily")
+  
+  if(inherits(X, "ppp")){
+      if(is.null(dataname))
+         dataname <- getdataname(short.deparse(substitute(X), 20), ...)
+      if(is.null(statistic))
+          statistic <- "K"
+      # Startpar:
+      if(is.null(startpar))
+          startpar <- info$selfstart(X)
+      stationary <- is.null(lambda) || (is.numeric(lambda) && length(lambda)==1)
+      # compute summary function
+      if(stationary) {
+          if(is.null(lambda)) lambda <- intensity(X)
+          StatFun <- if(statistic == "K") "Kest" else "pcf"
+          StatName <-
+              if(statistic == "K") "K-function" else "pair correlation function"
+          Stat <- do.call(StatFun,
+                          resolve.defaults(list(X=X),
+                                           statargs,
+                                           list(correction="best")))
+      } else {
+          StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom"
+          StatName <- if(statistic == "K") "inhomogeneous K-function" else
+          "inhomogeneous pair correlation function"
+          Stat <- do.call(StatFun,
+                          resolve.defaults(list(X=X, lambda=lambda),
+                                           statargs,
+                                           list(correction="best")))
+      }
+  } else if(inherits(X, "fv")){
+      Stat <- X
+      ## Get statistic type
+      stattype <- attr(Stat, "fname")
+      StatFun <- paste0(stattype)
+      StatName <- NULL
+      if(is.null(statistic)){
+          if(is.null(stattype) || !is.element(stattype[1L], c("K", "pcf")))
+              stop("Cannot infer the type of summary statistic from argument ",
+                   sQuote("X"), " please specify this via argument ",
+                   sQuote("statistic"))
+          statistic <- stattype[1L]
+      }
+      if(stattype[1L]!=statistic)
+          stop("Statistic inferred from ", sQuote("X"),
+               " not equal to supplied argument ",
+               sQuote("statistic"))
+      # Startpar:
+      if(is.null(startpar)){
+          if(isDPP)
+              stop("No rule for starting parameters in this case. Please set ",
+                   sQuote("startpar"), " explicitly.")
+          startpar <- info$checkpar(startpar, old=FALSE)
+          startpar[["scale"]] <- mean(range(Stat[[fvnames(Stat, ".x")]]))
+      }
+  } else{
+      stop("Unrecognised format for argument X")
+  }
+  
+  ## avoid using g(0) as it may be infinite
+  if(statistic=="pcf"){
+      argu <- fvnames(Stat, ".x")
+      rvals <- Stat[[argu]]
+      if(rvals[1L] == 0 && (is.null(rmin) || rmin == 0)) {
+          rmin <- rvals[2L]
+      }
+  }
+
+  ## DPP resolving algorithm and checking startpar
+  changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead"
+  if(isDPP){
+    alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar)
+    algorithm <- alg$algorithm
+  }
+
+  isPCP <- info$isPCP
+  if(isDPP && missing(q)) q <- 1/2
+  dots <- info$resolvedots(..., q = q, p = p, rmin = rmin, rmax = rmax)
+  # determine initial values of parameters
+  startpar <- info$checkpar(startpar)
+  # fit
+  theoret <- info[[statistic]]
+  desc <- paste("minimum contrast fit of", info$descname)
+
+  #' ............ experimental .........................
+  do.adjust <- spatstat.options("kppm.adjusted")
+  if(do.adjust) {
+    W <- Window(X)
+    adjdata <- list(paircorr = info[["pcf"]],
+                    pairWcdf = distcdf(W),
+		    tohuman  = NULL)
+    adjfun <- function(theo, par, auxdata, ...) {
+      with(auxdata, {
+        if(!is.null(tohuman))
+	  par <- tohuman(par)
+        a <- as.numeric(stieltjes(paircorr, pairWcdf, par=par, ...))
+	return(theo/a)
+      })
+    }
+    adjustment <- list(fun=adjfun, auxdata=adjdata)
+  } else adjustment <- NULL
+    
+  #' ............ experimental .........................
+  usecanonical <- spatstat.options("kppm.canonical")
+  if(usecanonical) {
+     tocanonical <- info$tocanonical
+     tohuman <- info$tohuman
+     if(is.null(tocanonical) || is.null(tohuman)) {
+       warning("Canonical parameters are not yet supported for this model")
+       usecanonical <- FALSE
+     }
+  }
+  startpar.human <- startpar
+  if(usecanonical) {
+    htheo <- theoret
+    startpar <- tocanonical(startpar)
+    theoret <- function(par, ...) { htheo(tohuman(par), ...) }
+    if(do.adjust)
+      adjustment$data$tohuman <- tohuman
+  }
+  #' ...................................................
+  
+  mcargs <- resolve.defaults(list(observed=Stat,
+                                  theoretical=theoret,
+                                  startpar=startpar,
+                                  ctrl=dots$ctrl,
+                                  method=algorithm,
+                                  fvlab=list(label="%s[fit](r)",
+                                      desc=desc),
+                                  explain=list(dataname=dataname,
+                                      fname=statistic,
+                                      modelname=info$modelname),
+                                  margs=dots$margs,
+                                  model=dots$model,
+                                  funaux=info$funaux,
+				  adjustment=adjustment),
+                             list(...))
+  if(isDPP && algorithm=="Brent" && changealgorithm){
+    mcargs <- resolve.defaults(mcargs, list(lower=alg$lower, upper=alg$upper))
+  }
+  
+  mcfit <- do.call(mincontrast, mcargs)
+  # extract fitted parameters and reshape
+  if(!usecanonical) {
+    optpar.canon <- NULL
+    optpar.human <- mcfit$par
+    names(optpar.human) <- names(startpar.human)
+  } else {
+    optpar.canon <- mcfit$par
+    names(optpar.canon) <- names(startpar)
+    optpar.human <- tohuman(optpar.canon)
+    names(optpar.human) <- names(startpar.human)
+  }
+  mcfit$par       <- optpar.human
+  mcfit$par.canon <- optpar.canon
+  # Return results for DPPs
+  if(isDPP){
+    extra <- list(Stat      = Stat,
+                  StatFun   = StatFun,
+                  StatName  = StatName,
+                  modelname  = info$modelabbrev,
+                  lambda     = lambda)
+    attr(mcfit, "info") <- extra
+    return(mcfit)
+  }
+  ## Extra stuff for ordinary cluster/lgcp models
+  ## imbue with meaning
+  ## infer model parameters
+  mcfit$modelpar <- info$interpret(optpar.human, lambda)
+  mcfit$internal <- list(model=ifelse(isPCP, clusters, "lgcp"))
+  mcfit$covmodel <- dots$covmodel
+  
+  if(isPCP) {
+    # Poisson cluster process: extract parent intensity kappa
+    kappa <- mcfit$par[["kappa"]]
+    # mu = mean cluster size
+    mu <- lambda/kappa
+  } else {
+    # LGCP: extract variance parameter sigma2
+    sigma2 <- mcfit$par[["sigma2"]]
+    # mu = mean of log intensity 
+    mu <- log(lambda) - sigma2/2
+  }
+  ## Parameter values (new format)
+  mcfit$mu <- mu
+  mcfit$clustpar <- info$checkpar(mcfit$par, old=FALSE)
+  mcfit$clustargs <- info$checkclustargs(dots$margs, old=FALSE)
+
+  ## The old fit fun that would have been used (DO WE NEED THIS?)
+  FitFun <- paste0(tolower(clusters), ".est", statistic)
+
+  extra <- list(FitFun    = FitFun,
+                Stat      = Stat,
+                StatFun   = StatFun,
+                StatName  = StatName,
+                modelname  = info$modelabbrev,
+                isPCP      = isPCP,
+                lambda     = lambda)
+  attr(mcfit, "info") <- extra
+  return(mcfit)
+}
+
+kppmComLik <- function(X, Xname, po, clusters, control, weightfun, rmax,
+                       algorithm="Nelder-Mead", DPP=NULL, ...) {
+  W <- as.owin(X)
+  if(is.null(rmax))
+    rmax <- rmax.rule("K", W, intensity(X))
+  # identify pairs of points that contribute
+  cl <- closepairs(X, rmax, what="ijd")
+#  I <- cl$i
+#  J <- cl$j
+  dIJ <- cl$d
+  # compute weights for pairs of points
+  if(is.function(weightfun)) {
+    wIJ <- weightfun(dIJ)
+    sumweight <- sum(wIJ)
+  } else {
+    npairs <- length(dIJ)
+    wIJ <- rep.int(1, npairs)
+    sumweight <- npairs
+  }
+  # convert window to mask, saving other arguments for later
+  dcm <- do.call.matched(as.mask,
+                         append(list(w=W), list(...)),
+                         sieve=TRUE)
+  M         <- dcm$result
+  otherargs <- dcm$otherargs
+
+  ## Detect DPP usage
+  isDPP <- inherits(clusters, "detpointprocfamily")
+
+  # compute intensity at pairs of data points
+  # and c.d.f. of interpoint distance in window
+  if(stationary <- is.stationary(po)) {
+    # stationary unmarked Poisson process
+    lambda <- intensity(X)
+#    lambdaIJ <- lambda^2
+    # compute cdf of distance between two uniform random points in W
+    g <- distcdf(W)
+    # scaling constant is (area * intensity)^2
+    gscale <- npoints(X)^2  
+  } else {
+    # compute fitted intensity at data points and in window
+#    lambdaX <- fitted(po, dataonly=TRUE)
+    lambda <- lambdaM <- predict(po, locations=M)
+    # lambda(x_i) * lambda(x_j)
+#    lambdaIJ <- lambdaX[I] * lambdaX[J]
+    # compute cdf of distance between two random points in W
+    # with density proportional to intensity function
+    g <- distcdf(M, dW=lambdaM)
+    # scaling constant is (integral of intensity)^2
+    gscale <- integral.im(lambdaM)^2
+  }
+
+  # Detect DPP model and change clusters and intensity correspondingly
+  isDPP <- !is.null(DPP)
+  if(isDPP){
+    tmp <- dppmFixIntensity(DPP, lambda, po)
+    clusters <- tmp$clusters
+    lambda <- tmp$lambda
+    po <- tmp$po
+  }
+
+  # trim 'g' to [0, rmax] 
+  g <- g[with(g, .x) <= rmax,]
+  # get pair correlation function (etc) for model
+  info <- spatstatClusterModelInfo(clusters)
+  pcfun      <- info$pcf
+  funaux     <- info$funaux
+  selfstart  <- info$selfstart
+  isPCP      <- info$isPCP
+  parhandler <- info$parhandler
+  modelname  <- info$modelname
+  # Assemble information required for computing pair correlation
+  pcfunargs <- list(funaux=funaux)
+  if(is.function(parhandler)) {
+    # Additional parameters of cluster model are required.
+    # These may be given as individual arguments,
+    # or in a list called 'covmodel'
+    clustargs <- if("covmodel" %in% names(otherargs))
+                 otherargs[["covmodel"]] else otherargs
+    clargs <- do.call(parhandler, clustargs)
+    pcfunargs <- append(clargs, pcfunargs)
+  } else clargs <- NULL
+  # determine starting parameter values
+  startpar <- selfstart(X)
+  #' ............ experimental .........................
+  usecanonical <- spatstat.options("kppm.canonical")
+  if(usecanonical) {
+     tocanonical <- info$tocanonical
+     tohuman <- info$tohuman
+     if(is.null(tocanonical) || is.null(tohuman)) {
+       warning("Canonical parameters are not yet supported for this model")
+       usecanonical <- FALSE
+     }
+  }
+  startpar.human <- startpar
+  if(usecanonical) {
+    pcftheo <- pcfun
+    startpar <- tocanonical(startpar)
+    pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) }
+  } 
+  # .....................................................
+  # create local function to evaluate pair correlation
+  #  (with additional parameters 'pcfunargs' in its environment)
+  paco <- function(d, par) {
+    do.call(pcfun, append(list(par=par, rvals=d), pcfunargs))
+  }
+  # define objective function 
+  if(!is.function(weightfun)) {
+    # pack up necessary information
+    objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, gscale=gscale, 
+                    envir=environment(paco))
+    # define objective function (with 'paco' in its environment)
+    # Note that this is 1/2 of the log composite likelihood,
+    # minus the constant term 
+    #       sum(log(lambdaIJ)) - npairs * log(gscale)
+    obj <- function(par, objargs) {
+      with(objargs,
+           sum(log(paco(dIJ, par)))
+           - sumweight * log(unlist(stieltjes(paco, g, par=par))),
+           enclos=objargs$envir)
+    }
+  } else {
+    # create local function to evaluate  pair correlation(d) * weight(d)
+    #  (with additional parameters 'pcfunargs', 'weightfun' in its environment)
+    force(weightfun)
+    wpaco <- function(d, par) {
+      y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs))
+      w <- weightfun(d)
+      return(y * w)
+    }
+    # pack up necessary information
+    objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, gscale=gscale, 
+                    envir=environment(wpaco))
+    # define objective function (with 'paco', 'wpaco' in its environment)
+    # Note that this is 1/2 of the log composite likelihood,
+    # minus the constant term 
+    #       sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale)
+    obj <- function(par, objargs) {
+      with(objargs,
+           sum(wIJ * log(paco(dIJ, par)))
+           - sumweight * log(unlist(stieltjes(wpaco, g, par=par))),
+           enclos=objargs$envir)
+    }
+  }
+  # arguments for optimization
+  ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0))
+  optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm)
+  ## DPP resolving algorithm and checking startpar
+  changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead"
+  if(isDPP){
+    alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters,
+                            startpar.human)
+    algorithm <- optargs$method <- alg$algorithm
+    if(algorithm=="Brent" && changealgorithm){
+      optargs$lower <- alg$lower
+      optargs$upper <- alg$upper
+    }
+  }
+  # optimize it
+  opt <- do.call(optim, optargs)
+  # raise warning/error if something went wrong
+  signalStatus(optimStatus(opt), errors.only=TRUE)
+  # fitted parameters
+  if(!usecanonical) {
+    optpar.canon <- NULL
+    optpar.human <- opt$par
+    names(optpar.human) <- names(startpar.human)
+  } else {
+    optpar.canon <- opt$par
+    names(optpar.canon) <- names(startpar)
+    optpar.human <- tohuman(optpar.canon)
+    names(optpar.human) <- names(startpar.human)
+  }
+  opt$par       <- optpar.human
+  opt$par.canon <- optpar.canon
+  # Finish in DPP case
+  if(!is.null(DPP)){
+    # all info that depends on the fitting method:
+    Fit <- list(method    = "clik2",
+                clfit     = opt,
+                weightfun = weightfun,
+                rmax      = rmax,
+                objfun    = obj,
+                objargs   = objargs)
+    # pack up
+    clusters <- update(clusters, as.list(opt$par))
+    result <- list(Xname      = Xname,
+                   X          = X,
+                   stationary = stationary,
+                   fitted     = clusters,
+                   modelname  = modelname,
+                   po         = po,
+                   lambda     = lambda,
+                   Fit        = Fit)
+    return(result)
+  }
+  # meaningful model parameters
+  modelpar <- info$interpret(optpar.human, lambda)
+  # infer parameter 'mu'
+  if(isPCP) {
+    # Poisson cluster process: extract parent intensity kappa
+    kappa <- optpar.human[["kappa"]]
+    # mu = mean cluster size
+    mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa)
+  } else {
+    # LGCP: extract variance parameter sigma2
+    sigma2 <- optpar.human[["sigma2"]]
+    # mu = mean of log intensity 
+    mu <- if(stationary) log(lambda) - sigma2/2 else
+          eval.im(log(lambda) - sigma2/2)    
+  }
+  # all info that depends on the fitting method:
+  Fit <- list(method    = "clik2",
+              clfit     = opt,
+              weightfun = weightfun,
+              rmax      = rmax,
+              objfun    = obj,
+              objargs   = objargs)
+  # pack up
+  result <- list(Xname      = Xname,
+                 X          = X,
+                 stationary = stationary,
+                 clusters   = clusters,
+                 modelname  = modelname,
+                 isPCP      = isPCP,
+                 po         = po,
+                 lambda     = lambda,
+                 mu         = mu,
+                 par        = optpar.human,
+                 par.canon  = optpar.canon,
+                 clustpar   = info$checkpar(par=optpar.human, old=FALSE),
+                 clustargs  = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs,
+                 modelpar   = modelpar,
+                 covmodel   = clargs,
+                 Fit        = Fit)
+  return(result)
+}
+
+kppmPalmLik <- function(X, Xname, po, clusters, control, weightfun, rmax,
+                        algorithm="Nelder-Mead", DPP=NULL, ...) {
+  W <- as.owin(X)
+  if(is.null(rmax))
+    rmax <- rmax.rule("K", W, intensity(X))
+  # identify pairs of points that contribute
+  cl <- closepairs(X, rmax)
+#  I <- cl$i
+  J <- cl$j
+  dIJ <- cl$d
+  # compute weights for pairs of points
+  if(is.function(weightfun)) {
+    wIJ <- weightfun(dIJ)
+#    sumweight <- sum(wIJ)
+  } else {
+    npairs <- length(dIJ)
+    wIJ <- rep.int(1, npairs)
+#    sumweight <- npairs
+  }
+  # convert window to mask, saving other arguments for later
+  dcm <- do.call.matched(as.mask,
+                         append(list(w=W), list(...)),
+                         sieve=TRUE)
+  M         <- dcm$result
+  otherargs <- dcm$otherargs
+
+  ## Detect DPP usage
+  isDPP <- inherits(clusters, "detpointprocfamily")
+
+  # compute intensity at data points
+  # and c.d.f. of interpoint distance in window
+  if(stationary <- is.stationary(po)) {
+    # stationary unmarked Poisson process
+    lambda <- intensity(X)
+    lambdaJ <- rep(lambda, length(J))
+    # compute cdf of distance between a uniform random point in W
+    # and a randomly-selected point in X 
+    g <- distcdf(X, M)
+    # scaling constant is (integral of intensity) * (number of points)
+    gscale <- npoints(X)^2
+  } else {
+    # compute fitted intensity at data points and in window
+    lambdaX <- fitted(po, dataonly=TRUE)
+    lambda <- lambdaM <- predict(po, locations=M)
+    lambdaJ <- lambdaX[J] 
+    # compute cdf of distance between a uniform random point in X 
+    # and a random point in W with density proportional to intensity function
+    g <- distcdf(X, M, dV=lambdaM)
+    # scaling constant is (integral of intensity) * (number of points)
+    gscale <- integral.im(lambdaM) * npoints(X)
+  }
+
+  # Detect DPP model and change clusters and intensity correspondingly
+  isDPP <- !is.null(DPP)
+  if(isDPP){
+    tmp <- dppmFixIntensity(DPP, lambda, po)
+    clusters <- tmp$clusters
+    lambda <- tmp$lambda
+    po <- tmp$po
+  }
+
+  # trim 'g' to [0, rmax] 
+  g <- g[with(g, .x) <= rmax,]
+  # get pair correlation function (etc) for model
+  info <- spatstatClusterModelInfo(clusters)
+  pcfun      <- info$pcf
+  funaux     <- info$funaux
+  selfstart  <- info$selfstart
+  isPCP      <- info$isPCP
+  parhandler <- info$parhandler
+  modelname  <- info$modelname
+  # Assemble information required for computing pair correlation
+  pcfunargs <- list(funaux=funaux)
+  if(is.function(parhandler)) {
+    # Additional parameters of cluster model are required.
+    # These may be given as individual arguments,
+    # or in a list called 'covmodel'
+    clustargs <- if("covmodel" %in% names(otherargs))
+                 otherargs[["covmodel"]] else otherargs
+    clargs <- do.call(parhandler, clustargs)
+    pcfunargs <- append(clargs, pcfunargs)
+  } else clargs <- NULL
+  # determine starting parameter values
+  startpar <- selfstart(X)
+  #' ............ experimental .........................
+  usecanonical <- spatstat.options("kppm.canonical")
+  if(usecanonical) {
+     tocanonical <- info$tocanonical
+     tohuman <- info$tohuman
+     if(is.null(tocanonical) || is.null(tohuman)) {
+       warning("Canonical parameters are not yet supported for this model")
+       usecanonical <- FALSE
+     }
+  }
+  startpar.human <- startpar
+  if(usecanonical) {
+    pcftheo <- pcfun
+    startpar <- tocanonical(startpar)
+    pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) }
+  }
+  # .....................................................
+  # create local function to evaluate pair correlation
+  #  (with additional parameters 'pcfunargs' in its environment)
+  paco <- function(d, par) {
+    do.call(pcfun, append(list(par=par, rvals=d), pcfunargs))
+  }
+  # define objective function 
+  if(!is.function(weightfun)) {
+    # pack up necessary information
+    objargs <- list(dIJ=dIJ, g=g, gscale=gscale,
+                    sumloglam=sum(log(lambdaJ)),
+                    envir=environment(paco))
+    # define objective function (with 'paco' in its environment)
+    # This is the log Palm likelihood
+    obj <- function(par, objargs) {
+      with(objargs,
+           sumloglam + sum(log(paco(dIJ, par)))
+           - gscale * unlist(stieltjes(paco, g, par=par)),
+           enclos=objargs$envir)
+    }
+  } else {
+    # create local function to evaluate  pair correlation(d) * weight(d)
+    #  (with additional parameters 'pcfunargs', 'weightfun' in its environment)
+    force(weightfun)
+    wpaco <- function(d, par) {
+      y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs))
+      w <- weightfun(d)
+      return(y * w)
+    }
+    # pack up necessary information
+    objargs <- list(dIJ=dIJ, wIJ=wIJ, g=g, gscale=gscale,
+                    wsumloglam=sum(wIJ * log(lambdaJ)),
+                    envir=environment(wpaco))
+    # define objective function (with 'paco', 'wpaco' in its environment)
+    # This is the log Palm likelihood
+    obj <- function(par, objargs) {
+      with(objargs,
+           wsumloglam + sum(wIJ * log(paco(dIJ, par)))
+           - gscale * unlist(stieltjes(wpaco, g, par=par)),
+           enclos=objargs$envir)
+    }
+  }    
+  # arguments for optimization
+  ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0))
+  optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm)
+  ## DPP resolving algorithm and checking startpar
+  changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead"
+  if(isDPP){
+    alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters,
+                            startpar.human)
+    algorithm <- optargs$method <- alg$algorithm
+    if(algorithm=="Brent" && changealgorithm){
+      optargs$lower <- alg$lower
+      optargs$upper <- alg$upper
+    }
+  }
+  # optimize it
+  opt <- do.call(optim, optargs)
+  # raise warning/error if something went wrong
+  signalStatus(optimStatus(opt), errors.only=TRUE)
+  # Extract optimal values of parameters
+  if(!usecanonical) {
+    optpar.canon <- NULL
+    optpar.human <- opt$par
+    names(optpar.human) <- names(startpar.human)
+  } else {
+    optpar.canon <- opt$par
+    names(optpar.canon) <- names(startpar)
+    optpar.human <- tohuman(optpar.canon)
+    names(optpar.human) <- names(startpar.human)
+  }
+  # Finish in DPP case
+  if(!is.null(DPP)){
+    opt$par <- optpar.human
+    opt$par.canon <- optpar.canon
+    # all info that depends on the fitting method:
+    Fit <- list(method    = "palm",
+                clfit     = opt,
+                weightfun = weightfun,
+                rmax      = rmax,
+                objfun    = obj,
+                objargs   = objargs)
+    # pack up
+    clusters <- update(clusters, as.list(optpar.human))
+    result <- list(Xname      = Xname,
+                   X          = X,
+                   stationary = stationary,
+                   fitted     = clusters,
+                   modelname  = modelname,
+                   po         = po,
+                   lambda     = lambda,
+                   Fit        = Fit)
+    return(result)
+  }
+  # meaningful model parameters
+  modelpar <- info$interpret(optpar.human, lambda)
+  # infer parameter 'mu'
+  if(isPCP) {
+    # Poisson cluster process: extract parent intensity kappa
+    kappa <- optpar.human[["kappa"]]
+    # mu = mean cluster size
+    mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa)
+  } else {
+    # LGCP: extract variance parameter sigma2
+    sigma2 <- optpar.human[["sigma2"]]
+    # mu = mean of log intensity 
+    mu <- if(stationary) log(lambda) - sigma2/2 else
+          eval.im(log(lambda) - sigma2/2)    
+  }
+  # all info that depends on the fitting method:
+  Fit <- list(method    = "palm",
+              clfit     = opt,
+              weightfun = weightfun,
+              rmax      = rmax)
+  # pack up
+  result <- list(Xname      = Xname,
+                 X          = X,
+                 stationary = stationary,
+                 clusters   = clusters,
+                 modelname  = modelname,
+                 isPCP      = isPCP,
+                 po         = po,
+                 lambda     = lambda,
+                 mu         = mu,
+                 par        = optpar.human,
+                 par.canon  = optpar.canon,
+                 clustpar   = info$checkpar(par=optpar.human, old=FALSE),
+                 clustargs  = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs,
+                 modelpar   = modelpar,
+                 covmodel   = clargs,
+                 Fit        = Fit)
+  return(result)
+}
+
+improve.kppm <- local({
+
+  fnc <- function(r, eps, g){ (g(r) - 1)/(g(0) - 1) - eps}
+
+  improve.kppm <- function(object, type=c("quasi", "wclik1", "clik1"),
+                           rmax = NULL, eps.rmax = 0.01,
+                           dimyx = 50, maxIter = 100, tolerance = 1e-06,
+                           fast = TRUE, vcov = FALSE, fast.vcov = FALSE,
+                           verbose = FALSE,
+                           save.internals = FALSE) {
+    verifyclass(object, "kppm")
+    type <- match.arg(type)
+    gfun <- pcfmodel(object)
+    X <- object$X
+    win <- as.owin(X)
+    ## simple (rectangular) grid quadrature scheme
+    ## (using pixels with centers inside owin only)
+    mask <- as.mask(win, dimyx = dimyx)
+    wt <- pixellate(win, W = mask)
+    wt <- wt[mask]
+    Uxy <- rasterxy.mask(mask)
+    U <- ppp(Uxy$x, Uxy$y, window = win, check=FALSE)
+    U <- U[mask]
+#    nU <- npoints(U)
+    Yu <- pixellate(X, W = mask)
+    Yu <- Yu[mask]
+    
+    ## covariates at quadrature points
+    po <- object$po
+    Z <- model.images(po, mask)
+    Z <- sapply(Z, "[", i=U)
+
+    ##obtain initial beta estimate using composite likelihood
+    beta0 <- coef(po)
+    
+    ## determining the dependence range
+    if (type != "clik1" && is.null(rmax))
+      {
+        diamwin <- diameter(win)
+        rmax <- if(fnc(diamwin, eps.rmax, gfun) >= 0) diamwin else
+                uniroot(fnc, lower = 0, upper = diameter(win),
+                        eps=eps.rmax, g=gfun)$root
+        if(verbose)
+          splat(paste0("type: ", type, ", ",
+                       "dependence range: ", rmax, ", ",
+                       "dimyx: ", dimyx, ", g(0) - 1:", gfun(0) -1))
+      }
+    ## preparing the WCL case
+    if (type == "wclik1")
+      Kmax <- 2*pi * integrate(function(r){r * (gfun(r) - 1)},
+                               lower=0, upper=rmax)$value * exp(c(Z %*% beta0))
+    ## the g()-1 matrix without tapering
+    if (!fast || (vcov && !fast.vcov)){
+      if (verbose)
+        cat("computing the g(u_i,u_j)-1 matrix ...")
+      gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n)
+      if (verbose)
+        cat("..Done.\n")
+    }
+    
+    if ( (fast && type == "quasi") | fast.vcov ){
+      if (verbose)
+        cat("computing the sparse G-1 matrix ...\n")
+      ## Non-zero gminus1 entries (when using tapering)
+      cp <- crosspairs(U,U,rmax,what="ijd")
+      if (verbose)
+        cat("crosspairs done\n")
+      Gtap <- (gfun(cp$d) - 1)
+      if(vcov){
+        if(fast.vcov){
+          gminus1 <- Matrix::sparseMatrix(i=cp$i, j=cp$j,
+                                          x=Gtap, dims=c(U$n, U$n))
+        } else{
+          if(fast)
+            gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n)
+        }
+      }
+      if (verbose & type!="quasi")
+        cat("..Done.\n")
+    }
+       
+    if (type == "quasi" && fast){
+      mu0 <- exp(c(Z %*% beta0)) * wt
+      mu0root <- sqrt(mu0)
+      sparseG <- Matrix::sparseMatrix(i=cp$i, j=cp$j,
+                                      x=mu0root[cp$i] * mu0root[cp$j] * Gtap,
+                                      dims=c(U$n, U$n))
+      Rroot <- Matrix::Cholesky(sparseG, perm = TRUE, Imult = 1)
+      ##Imult=1 means that we add 1*I
+      if (verbose)
+        cat("..Done.\n")
+    }
+    
+    ## iterative weighted least squares/Fisher scoring
+    bt <- beta0
+    noItr <- 1
+    repeat {
+      mu <- exp(c(Z %*% bt)) * wt
+      mu.root <- sqrt(mu)
+      ## the core of estimating equation: ff=phi
+      ## in case of ql, \phi=V^{-1}D=V_\mu^{-1/2}x where (G+I)x=V_\mu^{1/2} Z
+      ff <- switch(type,
+                   clik1 = Z,
+                   wclik1= Z/(1 + Kmax),
+                   quasi = if(fast){
+                     Matrix::solve(Rroot, mu.root * Z)/mu.root
+                   } else{
+                     solve(diag(U$n) + t(gminus1 * mu), Z)
+                   }
+                   )
+      ##alternative
+      ##R=chol(sparseG+sparseMatrix(i=c(1:U$n),j=c(1:U$n),
+      ##                            x=rep(1,U$n),dims=c(U$n,U$n)))
+      ##ff2 <- switch(type,
+      ##              clik1 = Z,
+      ##              wclik1= Z/(1 + Kmax),
+      ##              quasi = if (fast)
+      ##                         solve(R,solve(t(R), mu.root * Z))/mu.root
+      ##                      else solve(diag(U$n) + t(gminus1 * mu), Z))
+      ## print(summary(as.numeric(ff)-as.numeric(ff2)))
+      ## the estimating equation: u_f(\beta)
+      uf <- (Yu - mu) %*% ff
+      ## inverse of minus expectation of Jacobian matrix: I_f
+      Jinv <- solve(t(Z * mu) %*% ff)
+      if(maxIter==0){
+        ## This is a built-in early exit for vcov internal calculations
+        break
+      }
+      deltabt <- as.numeric(uf %*% Jinv)
+      if (any(!is.finite(deltabt))) {
+        warning(paste("Infinite value, NA or NaN appeared",
+                      "in the iterative weighted least squares algorithm.",
+                      "Returning the initial intensity estimate unchanged."),
+                call.=FALSE)
+        return(object)
+      }
+      ## updating the present estimate of \beta
+      bt <- bt + deltabt
+      if (verbose)
+        splat(paste0("itr: ", noItr, ",\nu_f: ", as.numeric(uf),
+                     "\nbeta:", bt, "\ndeltabeta:", deltabt))
+      if (max(abs(deltabt/bt)) <= tolerance || max(abs(uf)) <= tolerance)
+        break
+      if (noItr > maxIter)
+        stop("Maximum number of iterations reached without convergence.")
+      noItr <- noItr + 1
+    }
+    out <- object
+    out$po$coef.orig <- beta0
+    out$po$coef <- bt
+    loc <- if(is.sob(out$lambda)) as.mask(out$lambda) else mask
+    out$lambda <- predict(out$po, locations = loc)
+    out$improve <- list(type = type,
+                        rmax = rmax,
+                        dimyx = dimyx,
+                        fast = fast,
+                        fast.vcov = fast.vcov)
+    if(save.internals){
+      out$improve <- append(out$improve, list(ff=ff, uf=uf, J.inv=Jinv))
+    }
+    if(vcov){
+      if (verbose)
+        cat("computing the asymptotic variance ...\n")
+      ## variance of the estimation equation: Sigma_f = Var(u_f(bt))
+      trans <- if(fast) Matrix::t else t
+      Sig <- trans(ff) %*% (ff * mu) + trans(ff * mu) %*% gminus1 %*% (ff * mu)
+      ## note Abdollah's G does not have mu.root inside...
+      ## the asymptotic variance of \beta:
+      ##         inverse of the Godambe information matrix
+      out$vcov <- as.matrix(Jinv %*% Sig %*% Jinv)
+    }
+    return(out)
+  }
+  improve.kppm
+})
+
+
+is.kppm <- function(x) { inherits(x, "kppm")}
+
+print.kppm <- print.dppm <- function(x, ...) {
+
+  isPCP <- x$isPCP
+  # detect DPP
+  isDPP <- inherits(x, "dppm")
+  # handle outdated objects - which were all cluster processes
+  if(!isDPP && is.null(isPCP)) isPCP <- TRUE
+
+  terselevel <- spatstat.options('terse')
+  digits <- getOption('digits')
+  
+  splat(if(x$stationary) "Stationary" else "Inhomogeneous",
+        if(isDPP) "determinantal" else if(isPCP) "cluster" else "Cox",
+        "point process model")
+
+  if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20)
+    splat("Fitted to point pattern dataset", sQuote(x$Xname))
+
+  if(waxlyrical('gory', terselevel)) {
+    switch(x$Fit$method,
+           mincon = {
+             splat("Fitted by minimum contrast")
+             splat("\tSummary statistic:", x$Fit$StatName)
+           },
+           clik  =,
+           clik2 = {
+             splat("Fitted by maximum second order composite likelihood")
+             splat("\trmax =", x$Fit$rmax)
+             if(!is.null(wtf <- x$Fit$weightfun)) {
+               a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf)
+               splat("\tweight function:", a)
+             }
+           },
+           palm = {
+             splat("Fitted by maximum Palm likelihood")
+             splat("\trmax =", x$Fit$rmax)
+             if(!is.null(wtf <- x$Fit$weightfun)) {
+               a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf)
+               splat("\tweight function:", a)
+             }
+           },
+           warning(paste("Unrecognised fitting method", sQuote(x$Fit$method)))
+           )
+  }
+
+  parbreak(terselevel)
+  
+  # ............... trend .........................
+
+  if(!(isDPP && is.null(x$fitted$intensity)))
+    print(x$po, what="trend")
+
+  # ..................... clusters ................
+
+  # DPP case
+  if(isDPP){
+      splat("Fitted DPP model:")
+      print(x$fitted)
+      return(invisible(NULL))
+  }
+
+  tableentry <- spatstatClusterModelInfo(x$clusters)
+  
+  splat(if(isPCP) "Cluster" else "Cox",
+        "model:", tableentry$printmodelname(x))
+  cm <- x$covmodel
+  if(!isPCP) {
+    # Covariance model - LGCP only
+    splat("\tCovariance model:", cm$model)
+    margs <- cm$margs
+    if(!is.null(margs)) {
+      nama <- names(margs)
+      tags <- ifelse(nzchar(nama), paste(nama, "="), "")
+      tagvalue <- paste(tags, margs)
+      splat("\tCovariance parameters:",
+            paste(tagvalue, collapse=", "))
+    }
+  }
+  pc <- x$par.canon
+  if(!is.null(pc)) {
+    splat("Fitted canonical parameters:")
+    print(pc, digits=digits)
+  }
+  pa <- x$clustpar
+  if (!is.null(pa)) {
+    splat("Fitted",
+          if(isPCP) "cluster" else "covariance",
+          "parameters:")
+    print(pa, digits=digits)
+  }
+
+  if(!is.null(mu <- x$mu)) {
+    if(isPCP) {
+      splat("Mean cluster size: ",
+            if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]")
+    } else {
+      splat("Fitted mean of log of random intensity:",
+            if(!is.im(mu)) signif(mu, digits) else "[pixel image]")
+    }
+  }
+  invisible(NULL)
+}
+
+plot.kppm <- local({
+
+  plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) }
+  
+  plot.kppm <- function(x, ...,
+                        what=c("intensity", "statistic", "cluster"),
+                        pause=interactive(),
+                        xname) {
+    ## catch objectname from dots if present otherwise deparse x:
+    if(missing(xname)) xname <- short.deparse(substitute(x))
+    nochoice <- missing(what)
+    what <- pickoption("plot type", what,
+                       c(statistic="statistic",
+                         intensity="intensity",
+                         cluster="cluster"),
+                       multi=TRUE)
+    ## handle older objects
+    Fit <- x$Fit
+    if(is.null(Fit)) {
+      warning("kppm object is in outdated format")
+      Fit <- x
+      Fit$method <- "mincon"
+    }
+    ## Catch locations for clusters if given
+    loc <- list(...)$locations
+    inappropriate <- (nochoice & ((what == "intensity") & (x$stationary))) |
+             ((what == "statistic") & (Fit$method != "mincon")) |
+             ((what == "cluster") & (identical(x$isPCP, FALSE))) | 
+             ((what == "cluster") & (!x$stationary) & is.null(loc))
+
+    if(!nochoice && !x$stationary && "cluster" %in% what && is.null(loc))
+      stop("Please specify additional argument ", sQuote("locations"),
+           " which will be passed to the function ",
+           sQuote("clusterfield"), ".")
+
+    if(any(inappropriate)) {
+      what <- what[!inappropriate]
+      if(length(what) == 0){
+        message("Nothing meaningful to plot. Exiting...")
+        return(invisible(NULL))
+      }
+    }
+    pause <- pause && (length(what) > 1)
+    if(pause) opa <- par(ask=TRUE)
+    for(style in what)
+      switch(style,
+             intensity={
+               plotem(x$po, ...,
+                      dmain=c(xname, "Intensity"),
+                      how="image", se=FALSE)
+             },
+             statistic={
+               plotem(Fit$mcfit, ...,
+                      dmain=c(xname, Fit$StatName))
+             },
+             cluster={
+               plotem(clusterfield(x, locations = loc, verbose=FALSE), ...,
+                      dmain=c(xname, "Fitted cluster"))
+             })
+    if(pause) par(opa)
+    return(invisible(NULL))
+  }
+
+  plot.kppm
+})
+
+
+predict.kppm <- predict.dppm <- function(object, ...) {
+  se <- resolve.1.default(list(se=FALSE), list(...))
+  interval <- resolve.1.default(list(interval="none"), list(...))
+  if(se) warning("Standard error calculation assumes a Poisson process")
+  if(interval != "none")
+    warning(paste(interval, "interval calculation assumes a Poisson process"))
+  predict(as.ppm(object), ...)
+}
+
+fitted.kppm <- fitted.dppm <- function(object, ...) {
+  fitted(as.ppm(object), ...)
+}
+
+residuals.kppm <- residuals.dppm <- function(object, ...) {
+  type <- resolve.1.default(list(type="raw"), list(...))
+  if(type != "raw")
+    warning(paste("calculation of", type, "residuals",
+                  "assumes a Poisson process"))
+  residuals(as.ppm(object), ...)
+}
+
+simulate.kppm <- function(object, nsim=1, seed=NULL, ...,
+                          window=NULL, covariates=NULL,
+                          verbose=TRUE, retry=10,
+                          drop=FALSE) {
+  starttime <- proc.time()
+  verbose <- verbose && (nsim > 1)
+  check.1.real(retry)
+  # .... copied from simulate.lm ....
+  if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
+    runif(1)
+  if (is.null(seed))
+    RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+  else {
+    R.seed <- get(".Random.seed", envir = .GlobalEnv)
+    set.seed(seed)
+    RNGstate <- structure(seed, kind = as.list(RNGkind()))
+    on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+  }
+  
+  # ..................................
+  # determine window for simulation results
+  if(!is.null(window)) {
+    stopifnot(is.owin(window))
+    win <- window
+  } else {
+    win <- as.owin(object)
+  }
+  # ..................................
+  # determine parameters
+  mp <- as.list(object$modelpar)
+
+  # parameter 'mu'
+  # = parent intensity of cluster process
+  # = mean log intensity of log-Gaussian Cox process
+  
+  if(is.null(covariates) && (object$stationary || is.null(window))) {
+    # use existing 'mu' (scalar or image)
+    mu <- object$mu
+  } else {
+    # recompute 'mu' using new data
+    switch(object$clusters,
+           Cauchy=,
+           VarGamma=,
+           Thomas=,
+           MatClust={
+             # Poisson cluster process
+             kappa <- mp$kappa
+             lambda <- predict(object, window=win, covariates=covariates)
+             mu <- eval.im(lambda/kappa)
+           },
+           LGCP={
+             # log-Gaussian Cox process
+             sigma2 <- mp$sigma2
+             lambda <- predict(object, window=win, covariates=covariates)
+             mu <- eval.im(log(lambda) - sigma2/2)
+           },
+           stop(paste("Simulation of", sQuote(object$clusters),
+                      "processes is not yet implemented"))
+           )
+  }
+
+  # prepare data for execution
+  out <- list()
+  switch(object$clusters,
+         Thomas={
+           kappa <- mp$kappa
+           sigma <- mp$sigma
+           cmd <- expression(rThomas(kappa,sigma,mu,win))
+           dont.complain.about(kappa, sigma, mu)
+         },
+         MatClust={
+           kappa <- mp$kappa
+           r     <- mp$R
+           cmd   <- expression(rMatClust(kappa,r,mu,win))
+           dont.complain.about(kappa, r)
+         },
+         Cauchy = {
+           kappa <- mp$kappa
+           omega <- mp$omega
+           cmd   <- expression(rCauchy(kappa, omega, mu, win))
+           dont.complain.about(kappa, omega, mu)
+         },
+         VarGamma = {
+           kappa  <- mp$kappa
+           omega  <- mp$omega
+           nu.ker <- object$covmodel$margs$nu.ker
+           cmd    <- expression(rVarGamma(kappa, nu.ker, omega, mu, win))
+           dont.complain.about(kappa, nu.ker, omega, mu)
+         },
+         LGCP={
+           sigma2 <- mp$sigma2
+           alpha  <- mp$alpha
+           cm <- object$covmodel
+           model <- cm$model
+           margs <- cm$margs
+           param <- append(list(var=sigma2, scale=alpha), margs)
+           #' 
+           if(!is.im(mu)) {
+             # model will be simulated in 'win'
+             cmd <- expression(rLGCP(model=model, mu=mu, param=param,
+                               ..., win=win))
+             #' check that RandomFields package recognises parameter format
+             rfmod <- try(rLGCP(model, mu=mu, param=param, win=win,
+                              ..., modelonly=TRUE))
+           } else {
+             # model will be simulated in as.owin(mu), then change window
+             cmd <- expression(rLGCP(model=model, mu=mu, param=param,
+                               ...)[win])
+             #' check that RandomFields package recognises parameter format
+             rfmod <- try(rLGCP(model, mu=mu, param=param, 
+                              ..., modelonly=TRUE))
+           }
+           #' suppress warnings from code checker
+           dont.complain.about(model, mu, param)
+           #' check that model is recognised
+           if(inherits(rfmod, "try-error"))
+             stop(paste("Internal error in simulate.kppm:",
+                        "unable to build Random Fields model",
+                        "for log-Gaussian Cox process"))
+         })
+  
+  # run
+  if(verbose) {
+    cat(paste("Generating", nsim, "simulations... "))
+    state <- list()
+  }
+  for(i in 1:nsim) {
+    out[[i]] <- try(eval(cmd))
+    if(verbose) state <- progressreport(i, nsim, state=state)
+  }
+  # detect failures
+  if(any(bad <- unlist(lapply(out, inherits, what="try-error")))) {
+    nbad <- sum(bad)
+    gripe <- paste(nbad,
+                   ngettext(nbad, "simulation was", "simulations were"),
+                   "unsuccessful")
+    if(verbose) splat(gripe)
+    if(retry <= 0) {
+      fate <- "returned as NULL"
+      out[bad] <- list(NULL)
+    } else {
+      if(verbose) cat("Retrying...")
+      ntried <- 0
+      while(ntried < retry) {
+        ntried <- ntried + 1
+        for(j in which(bad))
+          out[[j]] <- try(eval(cmd))
+        bad <- unlist(lapply(out, inherits, what="try-error"))
+        nbad <- sum(bad)
+        if(nbad == 0) break
+      }
+      if(verbose) cat("Done.\n")
+      fate <- if(nbad == 0) "all recomputed" else
+              paste(nbad, "simulations still unsuccessful")
+      fate <- paste(fate, "after", ntried,
+                    ngettext(ntried, "further try", "further tries"))
+    }
+    warning(paste(gripe, fate, sep=": "))
+  }
+  if(verbose)
+    cat("Done.\n")
+  # pack up
+  if(nsim == 1 && drop) {
+    out <- out[[1L]]
+  } else {
+    out <- as.solist(out)
+    if(nsim > 0)
+      names(out) <- paste("Simulation", 1:nsim)
+  }
+  out <- timed(out, starttime=starttime)
+  attr(out, "seed") <- RNGstate
+  return(out)
+}
+
+formula.kppm <- formula.dppm <- function(x, ...) {
+  formula(x$po, ...)
+}
+
+terms.kppm <- terms.dppm <- function(x, ...) {
+  terms(x$po, ...)
+}
+
+labels.kppm <- labels.dppm <- function(object, ...) {
+  labels(object$po, ...)
+}
+
+update.kppm <- function(object, ..., evaluate=TRUE) {
+  argh <- list(...)
+  nama <- names(argh)
+  callframe <- object$callframe
+  envir <- environment(terms(object))
+  #' look for a formula argument
+  fmla <- formula(object)
+  jf <- integer(0)
+  if(!is.null(trend <- argh$trend)) {
+    if(!can.be.formula(trend))
+      stop("Argument \"trend\" should be a formula")
+    fmla <- newformula(formula(object), trend, callframe, envir)
+    jf <- which(nama == "trend")
+  } else if(any(isfo <- sapply(argh, can.be.formula))) {
+    if(sum(isfo) > 1) {
+      if(!is.null(nama)) isfo <- isfo & nzchar(nama)
+      if(sum(isfo) > 1)
+        stop(paste("Arguments not understood:",
+                   "there are two unnamed formula arguments"))
+    }
+    jf <- which(isfo)
+    fmla <- argh[[jf]]
+    fmla <- newformula(formula(object), fmla, callframe, envir)
+  }
+
+  #' look for a point pattern or quadscheme
+  if(!is.null(X <- argh$X)) {
+    if(!inherits(X, c("ppp", "quad")))
+      stop(paste("Argument X should be a formula,",
+                 "a point pattern or a quadrature scheme"))
+    jX <- which(nama == "X")
+  } else if(any(ispp <- sapply(argh, inherits, what=c("ppp", "quad")))) {
+    if(sum(ispp) > 1) {
+      if(!is.null(nama)) ispp <- ispp & nzchar(nama)
+      if(sum(ispp) > 1)
+        stop(paste("Arguments not understood:",
+                   "there are two unnamed point pattern/quadscheme arguments"))
+    }
+    jX <- which(ispp)
+    X <- argh[[jX]]
+  } else {
+    X <- object$X
+    jX <- integer(0)
+  }
+  Xexpr <- if(length(jX) > 0) sys.call()[[2L + jX]] else NULL
+  #' remove arguments just recognised, if any
+  jused <- c(jf, jX)
+  if(length(jused) > 0)
+    argh <- argh[-jused]
+  #' update the matched call
+  thecall <- getCall(object)
+  methodname <- as.character(thecall[[1L]])
+  switch(methodname,
+         kppm.formula = {
+	   # original call has X = [formula with lhs]
+	   if(!is.null(Xexpr)) {
+	     lhs.of.formula(fmla) <- Xexpr
+	   } else if(is.null(lhs.of.formula(fmla))) {
+	     lhs.of.formula(fmla) <- as.name('.')
+	   }
+           oldformula <- as.formula(getCall(object)$X)
+           thecall$X <- newformula(oldformula, fmla, callframe, envir)
+         },
+         {
+	   # original call has X = ppp and trend = [formula without lhs]
+           oldformula <- as.formula(getCall(object)$trend)
+	   fom <-  newformula(oldformula, fmla, callframe, envir)
+	   if(!is.null(Xexpr))
+	      lhs.of.formula(fom) <- Xexpr
+	   if(is.null(lhs.of.formula(fom))) {
+	      # new call has same format
+	      thecall$trend <- fom
+  	      if(length(jX) > 0)
+  	        thecall$X <- X
+	   } else {
+	      # new call has formula with lhs
+	      thecall$trend <- NULL
+	      thecall$X <- fom
+	   }
+         })
+  knownnames <- union(names(formals(kppm.ppp)), names(formals(mincontrast)))
+  knownnames <- setdiff(knownnames, c("X", "trend", "observed", "theoretical"))
+  ok <- nama %in% knownnames
+  thecall <- replace(thecall, nama[ok], argh[ok])
+  thecall$formula <- NULL # artefact of 'step', etc
+  thecall[[1L]] <- as.name("kppm")
+  if(!evaluate)
+    return(thecall)
+  out <- eval(thecall, envir=parent.frame(), enclos=envir)
+  #' update name of data
+  if(length(jX) == 1) {
+    mc <- match.call()
+    Xlang <- mc[[2L+jX]]
+    out$Xname <- short.deparse(Xlang)
+  }
+  #'
+  return(out)
+}
+
+unitname.kppm <- unitname.dppm <- function(x) {
+  return(unitname(x$X))
+}
+
+"unitname<-.kppm" <- "unitname<-.dppm" <- function(x, value) {
+  unitname(x$X) <- value
+  if(!is.null(x$Fit$mcfit)) {
+    unitname(x$Fit$mcfit) <- value
+  } else if(is.null(x$Fit)) {
+    warning("kppm object in outdated format")
+    if(!is.null(x$mcfit))
+      unitname(x$mcfit) <- value
+  }
+  return(x)
+}
+
+as.fv.kppm <- as.fv.dppm <- function(x) {
+  if(x$Fit$method == "mincon")
+    return(as.fv(x$Fit$mcfit))
+  gobs <- pcfinhom(x$X, lambda=x, correction="good", update=FALSE)
+  gfit <- (pcfmodel(x))(gobs$r)
+  g <- bind.fv(gobs,
+               data.frame(fit=gfit), 
+               "%s[fit](r)",
+               "predicted %s for fitted model")
+  return(g)
+}
+
+coef.kppm <- coef.dppm <- function(object, ...) {
+  return(coef(object$po))
+}
+
+
+Kmodel.kppm <- function(model, ...) {
+  Kpcf.kppm(model, what="K")
+}
+
+pcfmodel.kppm <- function(model, ...) {
+  Kpcf.kppm(model, what="pcf")
+}
+
+Kpcf.kppm <- function(model, what=c("K", "pcf", "kernel")) {
+  what <- match.arg(what)
+  # Extract function definition from internal table
+  clusters <- model$clusters
+  tableentry <- spatstatClusterModelInfo(clusters)
+  if(is.null(tableentry))
+    stop("No information available for", sQuote(clusters), "cluster model")
+  fun <- tableentry[[what]]
+  if(is.null(fun))
+    stop("No expression available for", what, "for", sQuote(clusters),
+         "cluster model")
+  # Extract model parameters
+  par <- model$par
+  # Extract auxiliary definitions (if applicable)
+  funaux <- tableentry$funaux
+  # Extract covariance model (if applicable)
+  cm <- model$covmodel
+  model <- cm$model
+  margs <- cm$margs
+  #
+  f <- function(r) as.numeric(fun(par=par, rvals=r,
+                                  funaux=funaux, model=model, margs=margs))
+  return(f)
+}
+
+is.stationary.kppm <- is.stationary.dppm <- function(x) {
+  return(x$stationary)
+}
+
+is.poisson.kppm <- function(x) {
+  switch(x$clusters,
+         Cauchy=,
+         VarGamma=,
+         Thomas=,
+         MatClust={
+           # Poisson cluster process
+           mu <- x$mu
+           return(!is.null(mu) && (max(mu) == 0))
+         },
+         LGCP = {
+           # log-Gaussian Cox process
+           sigma2 <- x$par[["sigma2"]]
+           return(sigma2 == 0)
+         },
+         return(FALSE))
+}
+
+# extract ppm component
+
+as.ppm.kppm <- as.ppm.dppm <- function(object) {
+  object$po
+}
+
+# other methods that pass through to 'ppm'
+
+as.owin.kppm <- as.owin.dppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) {
+  from <- match.arg(from)
+  as.owin(as.ppm(W), ..., from=from, fatal=fatal)
+}
+
+domain.kppm <- Window.kppm <- domain.dppm <-
+  Window.dppm <- function(X, ..., from=c("points", "covariates")) {
+  from <- match.arg(from)
+  as.owin(X, from=from)
+}
+
+model.images.kppm <-
+  model.images.dppm <- function(object, W=as.owin(object), ...) {
+  model.images(as.ppm(object), W=W, ...)
+}
+
+model.matrix.kppm <-
+  model.matrix.dppm <- function(object,
+                                data=model.frame(object, na.action=NULL), ...,
+                                Q=NULL, 
+                                keepNA=TRUE) {
+  if(missing(data)) data <- NULL
+  model.matrix(as.ppm(object), data=data, ..., Q=Q, keepNA=keepNA)
+}
+
+model.frame.kppm <- model.frame.dppm <- function(formula, ...) {
+  model.frame(as.ppm(formula), ...)
+}
+
+logLik.kppm <- logLik.dppm <- function(object, ...) {
+  pl <- object$Fit$clfit$value
+  if(is.null(pl))
+    stop("logLik is only available for kppm objects fitted with method='palm'",
+         call.=FALSE)
+  ll <- logLik(as.ppm(object)) # to inherit class and d.f.
+  ll[] <- pl
+  return(ll)
+}
+ 
+AIC.kppm <- AIC.dppm <- function(object, ..., k=2) {
+  # extract Palm loglikelihood
+  pl <- object$Fit$clfit$value
+  if(is.null(pl))
+    stop("AIC is only available for kppm objects fitted with method='palm'",
+         call.=FALSE)
+  df <- length(coef(object))
+  return(- 2 * as.numeric(pl) + k * df)
+}
+
+extractAIC.kppm <- extractAIC.dppm <- function (fit, scale = 0, k = 2, ...)
+{
+  edf <- length(coef(fit))
+  aic <- AIC(fit, k=k)
+  c(edf, aic)
+}
+
+nobs.kppm <- nobs.dppm <- function(object, ...) { nobs(as.ppm(object)) }
+
+psib <- function(object) UseMethod("psib")
+
+psib.kppm <- function(object) {
+  clus <- object$clusters
+  info <- spatstatClusterModelInfo(clus)
+  if(!info$isPCP) {
+    warning("The model is not a cluster process")
+    return(NA)
+  }
+  g <- pcfmodel(object)
+  p <- 1 - 1/g(0)
+  return(p)
+}
+
diff --git a/R/laslett.R b/R/laslett.R
new file mode 100644
index 0000000..3e6aa02
--- /dev/null
+++ b/R/laslett.R
@@ -0,0 +1,339 @@
+#' Calculating Laslett's transform
+#' Original by Kassel Hingee
+#' Adapted by Adrian Baddeley
+#' Copyright (C) 2016 Kassel Hingee and Adrian Baddeley
+
+# $Revision: 1.8 $  $Date: 2017/02/07 08:12:05 $
+
+laslett <- function(X, ...,
+                    verbose=FALSE, plotit=TRUE,
+                    discretise=FALSE,
+                    type = c("lower", "upper", "left", "right")){
+  #' validate X and convert to a logical matrix
+  type <- match.arg(type)
+  oldX <- X
+
+  if(is.im(X)) {
+    X <- solutionset(X != 0)
+  } else if(!is.owin(X)) 
+    stop("X should be an image or a window", call.=FALSE)
+
+  if(type != "lower") {
+    nrot <- match(type, c("right", "upper", "left"))
+    theta <- nrot * pi/2
+    X <- rotate(X, angle=-theta)
+  }
+  
+  if(!discretise && (is.polygonal(X) || is.rectangle(X))) {
+    result <- polyLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE)
+  } else {
+    result <- maskLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE)
+  }
+
+  if(type != "lower") {
+    #' rotate back
+    prods <- c("TanOld", "TanNew", "Rect")
+    result[prods] <- lapply(result[prods], rotate, angle=theta)
+  }
+
+  if(plotit)
+    plot(result, ...)
+
+  result$type <- type
+  
+  return(result)
+}
+
+maskLaslett <- local({
+  
+  sumtoright <- function(x) { rev(cumsum(rev(x))) - x }
+
+  maskLaslett <- function(X, ...,
+                          eps=NULL, dimyx=NULL, xy=NULL, 
+                          oldX=X, verbose=FALSE, plotit=TRUE) {
+    if(is.null(oldX)) oldX <- X
+    X <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy)
+    unitX <- unitname(X)
+    if(is.empty(X))
+      stop("Empty window!")
+    M <- as.matrix(X)
+    #' ....... Compute transformed set ...................
+    #' Total width of transformed set on each row
+    TotFalse <- rowSums(!M)
+    ## compute transformed set
+    Laz <- (col(M) <= TotFalse[row(M)])
+    Laz <- owin(mask=Laz, xrange=X$xrange, yrange=X$yrange, unitname=unitX)
+    #' Largest sub-rectangle of transformed set
+    width <- min(TotFalse) * X$xstep
+    Rect <- owin(X$xrange[1L] + c(0, width), X$yrange, unitname=unitX)
+    #' Along each horizontal line (row),
+    #' compute a running count of FALSE pixels.
+    #' This is the mapping for the set transform
+    #' (the value at any pixel gives the new column number
+    #' for the transformed pixel)
+    CumulFalse <- t(apply(!M, 1L, cumsum))
+    #' discard one column for consistency with other matrices below
+    CumulFalse <- CumulFalse[,-1L,drop=FALSE]
+
+    #' ....... Find lower tangent points .................
+
+    #' compute discrete gradient in x direction
+    G <- t(apply(M, 1, diff))
+    #' detect entries, exits, changes
+    Exit <- (G == -1)
+    Enter <- (G == 1)
+    Change <- Exit | Enter
+    #' form a running total of the number of pixels inside X
+    #' to the **right** of the current pixel
+    FutureInside <- t(apply(M, 1, sumtoright))[,-1L,drop=FALSE]
+    #' find locations of changes 
+    loc <- which(Change, arr.ind=TRUE)
+    #' don't consider entries/exits in the bottom row
+    ok <- (loc[,"row"] > 1) 
+    loc <- loc[ok, , drop=FALSE]
+    #' corresponding locations on horizontal line below current line
+    below <- cbind(loc[,"row"]-1L, loc[,"col"])
+    #' look up data at these locations
+    df <- data.frame(row=loc[,"row"],
+                     col=loc[,"col"],
+                     newcol=CumulFalse[loc],
+                     Exit=Exit[loc],
+                     Enter=Enter[loc],
+                     InsideBelow=M[below],
+                     FutureInsideBelow=FutureInside[below])
+    #' identify candidates for tangents
+    df$IsCandidate <-
+      with(df, Enter & !InsideBelow & (newcol < TotFalse[row]))
+    #' collect data for each horizontal line (row)
+    #' then sort by increasing x (column) within each line.
+    oo <- with(df, order(row, col))
+    df <- df[oo, , drop=FALSE]
+    #' divide data into one piece for each hztal line
+    g <- split(df, df$row)
+    #' Initialise empty list of tangent points
+    tangents <- data.frame(row=integer(0), col=integer(0), newcol=integer(0))
+    #' process each hztal line
+    for(p in g) {
+      tangents <-
+        with(p, {
+          candidates <- which(IsCandidate) # indices are row numbers in 'p'
+          if(verbose) print(p)
+          exits <- which(Exit)
+          for(i in candidates) {
+            if(verbose) cat(paste("candidate", i, "\n"))
+            if(any(found <- (exits > i))) {
+              j <- exits[min(which(found))]
+              if(verbose) cat(paste("next exit:", j, "\n"))
+              #' check no pixels inside X in row below between i and j
+              if(FutureInsideBelow[i] == FutureInsideBelow[j]) {
+                if(verbose)
+                  cat(paste("Tangent (1) at row=", row[i],
+                            "col=", col[i], "\n"))
+                tangents <- rbind(tangents,
+                                  data.frame(row=row[i],
+                                             col=col[i],
+                                             newcol=newcol[i]))
+              }
+            } else {
+              #' no exits on this row
+              if(verbose)
+                cat("no subsequent exit\n")
+              if(FutureInsideBelow[i] == 0) {
+                if(verbose)
+                  cat(paste("Tangent (2) at row=", row[i],
+                            "col=", col[i], "\n"))
+                tangents <- rbind(tangents,
+                                  data.frame(row=row[i],
+                                             col=col[i],
+                                             newcol=newcol[i]))
+              }
+            }
+          }
+          if(verbose) cat("====\n")
+          tangents
+        })
+    }
+    tangents$oldx <- X$xcol[tangents$col]
+    tangents$newx <- X$xcol[tangents$newcol]
+    tangents$y    <- X$yrow[tangents$row]
+    TanOld <- with(tangents, ppp(oldx, y, window=Frame(X), unitname=unitX))
+    TanNew <- with(tangents, ppp(newx, y, window=Laz), unitname=unitX)
+    result <- list(oldX=oldX,
+                   TanOld=TanOld, TanNew=TanNew, Rect=Rect,
+                   df=tangents)
+    class(result) <- c("laslett", class(result))
+    if(plotit)
+      plot(result, ...)
+    return(result)
+  }
+
+  maskLaslett
+})
+
+print.laslett <- function(x, ...) {
+  cat("Laslett Transform\n")
+  cat("\nOriginal object:\n")
+  print(x$oldX)
+  cat("\nTransformed set:\n")
+  W <- Window(x$TanNew)
+  print(W)
+  unitinfo <- summary(unitname(W))
+  cat("\nTransformed area:", area.owin(W),
+      "square", unitinfo$plural, unitinfo$explain,
+      fill=TRUE)
+  cat("\n")
+  type <- x$type %orifnull% "lower"
+  cat(npoints(x$TanNew), type, "tangent points found.", fill=TRUE)
+  return(invisible(NULL))
+}
+  
+plot.laslett <- function(x, ...,
+                         Xpars=list(box=TRUE, col="grey"),
+                         pointpars=list(pch=3, cols="blue"),
+                         rectpars=list(lty=3, border="green")) {
+  Display <- with(x,
+                  solist(Original=
+                         layered(oldX,
+                                 TanOld,
+                                 plotargs=list(Xpars, pointpars)),
+                         Transformed=
+                         layered(TanNew,
+                                 Rect, 
+                                 plotargs=list(pointpars, rectpars))))
+
+  #' ignore arguments intended for as.mask
+  argh <- list(...)
+  if(any(bad <- names(argh) %in% c("eps", "dimyx", "xy")))
+    argh <- argh[!bad]
+
+  do.call(plot,
+          resolve.defaults(list(x=Display),
+                           argh,
+                           list(main="", mar.panel=0, hsep=1,
+                                equal.scales=TRUE)))
+  return(invisible(NULL))
+}
+
+polyLaslett <- function(X, ..., oldX=X, verbose=FALSE, plotit=TRUE) {
+  X <- as.polygonal(X)
+  if(is.empty(X))
+    stop("Empty window!")
+  unitX <- unitname(X)
+  # expand frame slightly
+  B <- Frame(X)
+  B <- grow.rectangle(B, max(sidelengths(B))/8)
+  x0 <- B$xrange[1L]
+  x1 <- B$xrange[2L]
+  # extract vertices
+  v <- vertices(X)
+  nv <- length(v$x)
+  # ..........  compute transformed set .....................
+  # make horizontal segments from each vertex to sides of box
+  left <- with(v, psp(rep(x0,nv), y, x, y, window=B, marks=1:nv, check=FALSE))
+  right <- with(v, psp(x, y, rep(x1,nv), y, window=B, marks=1:nv, check=FALSE))
+  # intersect each horizontal segment with the window
+  if(verbose) cat("Processing", nv, "polygon vertices... ")
+  clipleft <- clip.psp(left, X)
+  clipright <- clip.psp(right, X)
+  if(verbose) cat("Done.\n")
+  # calculate lengths of clipped segments, and group by vertex.
+  # marks indicate which hztal segment was the parent of each piece.
+  lenleft <- tapply(lengths.psp(clipleft),
+                    factor(marks(clipleft), levels=1:nv),
+                    sum)
+  lenright <- tapply(lengths.psp(clipright),
+                     factor(marks(clipright), levels=1:nv),
+                     sum)
+  lenleft[is.na(lenleft)] <- 0
+  lenright[is.na(lenright)] <- 0
+  emptylenleft <- lengths.psp(left) - lenleft
+  emptylenright <- lengths.psp(right) - lenright
+  # The transformed polygon 
+  isrightmost <- (lenright == 0)
+  yright <- v$y[isrightmost]
+  xright <- x0 + (emptylenleft+emptylenright)[isrightmost]
+  minxright <- min(xright) # right margin of largest rectangle
+  ord <- order(yright)
+  Ty <- yright[ord]
+  Tx <- xright[ord]
+  nT <- length(Ty)
+  if(Tx[nT] > x0) {
+    Ty <- c(Ty, Ty[nT])
+    Tx <- c(Tx, x0)
+  }
+  if(Tx[1L] > x0) {
+    Ty <- c(Ty[1L], Ty)
+    Tx <- c(x0,    Tx)
+  }
+  TX <- owin(B$xrange, B$yrange, poly=list(x=Tx, y=Ty), check=FALSE)
+  TX <- TX[Frame(X)]
+  # ..........  identify lower tangents .....................
+  V <- as.ppp(v, W=Frame(X), unitname=unitX)
+  is.candidate <- is.tangent <- logical(nv)
+  # apply simple criteria for ruling in or out
+  Plist <- X$bdry
+  cumnv <- 0
+  for(i in seq_along(Plist)) {
+    P <- Plist[[i]]
+    xx <- P$x
+    yy <- P$y
+    nn <- length(xx)
+#    xnext <- c(xx[-1L], xx[1L])
+    ynext <- c(yy[-1L], yy[1L])
+#    xprev <- c(xx[nn], xx[-nn])
+    yprev <- c(yy[nn], yy[-nn])
+    is.candidate[cumnv + seq_len(nn)] <- 
+      if(!is.hole.xypolygon(P)) {
+        (yprev > yy & ynext >= yy)
+      } else {
+        (yprev >= yy & ynext > yy)
+      }
+    cumnv <- cumnv + nn
+  }
+  ##  was.candidate <- is.candidate
+  
+  #' reject candidates lying too close to boundary
+  tooclose <- (bdist.points(V[is.candidate]) < diameter(Frame(V))/1000)
+  is.candidate[is.candidate][tooclose] <- FALSE
+  #' evaluate candidate points
+  #' make tiny boxes around vertex
+  candidates <- which(is.candidate)
+  nc <- length(candidates)
+  nnd <- nndist(V)
+  if(verbose) {
+    cat(paste("Processing", nc, "tangent candidates ... "))
+    pstate <- list()
+  }
+  tiny <- .Machine$double.eps
+  for(j in 1:nc) {
+    i <- candidates[j]
+    eps <- nnd[i]/16
+    xi <- v$x[i]
+    yi <- v$y[i]
+    Below <- owin(xi + c(-eps,eps), yi + c(-eps, 0))
+#    Above <- owin(xi + c(-eps, eps), yi + c(0, eps))
+    UpLeft <- owin(xi + c(-eps, 0), yi + c(0, eps))
+    is.tangent[i] <- (overlap.owin(X, Below) <= tiny) &&
+                     (overlap.owin(X, UpLeft) < eps^2)
+    if(verbose)
+      pstate <- progressreport(j, nc, state=pstate)
+  }
+  if(verbose) cat(paste("Found", sum(is.tangent), "tangents\n"))
+  TanOld <- V[is.tangent]
+  ynew <- TanOld$y
+  xnew <- x0 + emptylenleft[is.tangent]
+  TanNew <- ppp(xnew, ynew, window=TX, check=FALSE, unitname=unitX)
+  #  maximal rectangle
+  Rect <- owin(c(X$xrange[1L], minxright), X$yrange, unitname=unitX)
+  #
+  df <- data.frame(xold=TanOld$x, xnew=TanNew$x, y=TanNew$y)
+  #
+  result <- list(oldX=oldX,
+                 TanOld=TanOld, TanNew=TanNew, Rect=Rect,
+                 df=df)
+  class(result) <- c("laslett", class(result))
+  if(plotit)
+    plot(result, ...)
+  return(result)
+}
+
diff --git a/R/layered.R b/R/layered.R
new file mode 100755
index 0000000..7f3cdf0
--- /dev/null
+++ b/R/layered.R
@@ -0,0 +1,393 @@
+#
+# layered.R
+#
+# Simple mechanism for layered plotting
+#
+#  $Revision: 1.39 $  $Date: 2017/06/05 10:31:58 $
+#
+
+layered <- function(..., plotargs=NULL, LayerList=NULL) {
+  argh <- list(...)
+  if(length(argh) > 0 && !is.null(LayerList))
+    stop("LayerList is incompatible with other arguments")
+  out <- if(!is.null(LayerList)) LayerList else argh
+  n <- length(out)
+  if(sum(nzchar(names(out))) != n)
+    names(out) <- paste("Layer", seq_len(n))
+  if(is.null(plotargs)) {
+    plotargs <- rep.int(list(list()), n)
+  } else {
+    if(!is.list(plotargs))
+      stop("plotargs should be a list of lists")
+    if(!all(unlist(lapply(plotargs, is.list))))
+      plotargs <- list(plotargs)
+    np <- length(plotargs)
+    if(np == 1) plotargs <- rep(plotargs, n) else if(np != n)
+      stop("plotargs should have one component for each element of the list")
+  }
+  names(plotargs) <- names(out)
+  attr(out, "plotargs") <- plotargs
+  class(out) <- c("layered", class(out))
+  return(out)
+}
+
+print.layered <- function(x, ...) {
+  splat("Layered object")
+  if(length(x) == 0) splat("(no entries)")
+  for(i in seq_along(x)) {
+    cat(paste("\n", names(x)[i], ":\n", sep=""))
+    print(x[[i]])
+  }
+  pl <- layerplotargs(x)
+  hasplot <- (lengths(pl) > 0)
+  if(any(hasplot)) 
+    splat("Includes plot arguments for", commasep(names(pl)[hasplot]))
+  invisible(NULL)
+}
+
+plot.layered <- function(x, ..., which=NULL, plotargs=NULL,
+                         add=FALSE, show.all=!add, main=NULL,
+                         do.plot=TRUE) {
+  if(is.null(main))
+    main <- short.deparse(substitute(x))
+  n <- length(x)
+  if(!is.null(plotargs)) {
+    np <- length(plotargs)
+    if(!(is.list(plotargs) && all(unlist(lapply(plotargs, is.list)))))
+      stop("plotargs should be a list of lists")
+  }
+  ## select layers
+  if(!is.null(which)) {
+    x <- x[which]
+    nw <- length(x)
+    if(!is.null(plotargs)) {
+      if(np == n) plotargs <- plotargs[which] else
+      if(np == 1) plotargs <- rep(plotargs, nw) else
+      if(np != nw) 
+        stop("plotargs should have one component for each layer to be plotted")
+    }
+    n <- nw
+  } else if(!is.null(plotargs)) {
+    if(np == 1) plotargs <- rep(plotargs, n) else
+    if(np != n) stop("plotargs should have one component for each layer")
+  }
+  ## remove null layers
+  if(any(isnul <- unlist(lapply(x, is.null)))) {
+    x <- x[!isnul]
+    if(!is.null(plotargs))
+      plotargs <- plotargs[!isnul]
+    n <- length(x)
+  }
+  ## anything to plot?
+  if(n == 0)
+    return(invisible(NULL))
+  ## Merge plotting arguments
+  xplotargs <- layerplotargs(x)
+  if(is.null(plotargs)) {
+    plotargs <- xplotargs
+  } else if(length(xplotargs) > 0) {
+    for(i in 1:n)
+      plotargs[[i]] <- resolve.defaults(plotargs[[i]], xplotargs[[i]])
+  }
+  ## Determine bounding box
+  a <- plotEachLayer(x, ..., plotargs=plotargs, add=add,
+                     show.all=show.all, do.plot=FALSE)
+  if(!do.plot)
+    return(a)
+  bb <- as.rectangle(as.owin(a))
+  ## Start plotting
+  if(!add && !is.null(bb)) {
+    ## initialise new plot using bounding box
+    pt <- prepareTitle(main)
+    plot(bb, type="n", main=pt$blank)
+    add <- TRUE
+  }
+  # plot the layers
+  out <- plotEachLayer(x, ..., main=main,
+                       plotargs=plotargs, add=add,
+                       show.all=show.all, do.plot=TRUE)
+  return(invisible(out))
+}
+
+plotEachLayer <- function(x, ..., main,
+                          plotargs, add, show.all, do.plot=TRUE) {
+  main.given <- !missing(main)
+  ## do.plot=TRUE    =>   plot the layers 
+  ## do.plot=FALSE   =>   determine bounding boxes
+  out <- boxes <- list()
+  nama <- names(x)
+  firstlayer <- TRUE
+  for(i in seq_along(x)) {
+    xi <- x[[i]]
+    if(length(xi) == 0) {
+      # null layer - no plotting
+      out[[i]] <- boxes[[i]] <- NULL
+    } else {
+      ## plot layer i on top of previous layers if any.
+      ## By default,
+      ##    - show all graphic elements of the first component only;
+      ##    - show title 'firstmain' on first component;
+      ##    - do not show any component names.
+      add.i <- add || !firstlayer
+      if(main.given) {
+        main.i <- if(firstlayer) main else ""
+      } else {
+        show.all.i <- resolve.1.default(list(show.all=FALSE),
+                                         list(...), 
+                                         plotargs[[i]])
+        main.i <- if(show.all.i) nama[i] else ""
+      }
+      dflt <- list(main=main.i,
+                   show.all=show.all && firstlayer)
+      pla.i <- plotargs[[i]]
+      defaultplot <- !(".plot" %in% names(pla.i))
+      ## plot layer i, or just determine bounding box
+      if(defaultplot &&
+         inherits(xi, c("ppp", "psp", "owin",
+                        "lpp", "linnet", 
+                        "im", "msr", "layered"))) {
+        ## plot method for 'xi' has argument 'do.plot'.
+        mplf <-
+          if(inherits(xi, c("ppp", "lpp"))) list(multiplot=FALSE) else list()
+        out[[i]] <- outi <- do.call(plot,
+                                    resolve.defaults(list(x=xi,
+                                                          add=add.i,
+                                                          do.plot=do.plot),
+                                                     list(...),
+                                                     mplf,
+                                                     pla.i,
+                                                     dflt))
+        boxes[[i]] <- as.rectangle(as.owin(outi))
+      } else {
+        ## plot method for 'xi' does not have argument 'do.plot'
+        if(do.plot) {
+          if(defaultplot) {
+            plotfun <- "plot"
+          } else {
+            plotfun <- pla.i[[".plot"]]
+            pla.i <- pla.i[names(pla.i) != ".plot"]
+          }
+          out[[i]] <- outi <- do.call(plotfun,
+                                      resolve.defaults(list(x=xi,
+                                                            add=add.i),
+                                                       list(...),
+                                                       pla.i,
+                                                       dflt))
+        }
+        ## convert layer i to box
+        boxi <- try(as.rectangle(xi), silent=TRUE)
+        boxes[[i]] <- if(!inherits(boxi, "try-error")) boxi else NULL
+      }
+      firstlayer <- FALSE
+    }
+  }
+  ## one box to bound them all
+  if(!all(unlist(lapply(boxes, is.null))))
+    attr(out, "bbox") <- do.call(boundingbox, boxes)
+  return(out)
+}
+
+
+"[.layered" <- function(x, i, j, drop=FALSE, ...) {
+  i.given <- !missing(i) && !is.null(i)
+  j.given <- !missing(j) && !is.null(j)
+  if(!i.given && !j.given)
+    return(x)
+  p <- attr(x, "plotargs")
+  x <- unclass(x)
+  nx <- length(x)
+  if(i.given) {
+    if(is.owin(i)) {
+      #' spatial window subset
+      nonemp <- (lengths(x) != 0)
+      x[nonemp] <- lapply(x[nonemp], "[", i=i, ...)
+    } else {
+      #' vector subset index
+      x <- x[i]
+      p <- p[i]
+      nx <- length(x)
+    }
+  }
+  if(j.given) {
+    nonemp <- (lengths(x) != 0)
+    x[nonemp] <- lapply(x[nonemp], "[", i=j, ...)
+  }
+  if(drop && nx == 1)
+    return(x[[1L]])
+  y <- layered(LayerList=x, plotargs=p)
+  return(y)
+}
+
+"[[<-.layered" <- function(x, i, value) {
+  x[i] <- if(!is.null(value)) list(value) else NULL
+  return(x)
+}
+
+"[<-.layered" <- function(x, i, value) {
+  p <- layerplotargs(x)
+  ## invoke list method
+  y <- x
+  class(y) <- "list"
+  y[i] <- value
+  # make it a 'layered' object too
+  class(y) <- c("layered", class(y))
+  # update names and plotargs
+  if(any(blank <- !nzchar(names(y)))) {
+    names(y)[blank] <- paste("Layer", which(blank))
+    pnew <- rep(list(list()), length(y))
+    names(pnew) <- names(y)
+    m <- match(names(y), names(x))
+    mok <- !is.na(m)
+    pnew[mok] <- p[m[mok]]
+    layerplotargs(y) <- pnew
+  } else layerplotargs(y) <- layerplotargs(x)[names(y)]
+  return(y)
+}
+
+layerplotargs <- function(L) {
+  stopifnot(inherits(L, "layered"))
+  attr(L, "plotargs")
+}
+
+"layerplotargs<-" <- function(L, value) {
+  if(!inherits(L, "layered"))
+    L <- layered(L)
+  if(!is.list(value))
+    stop("Replacement value should be a list, or a list-of-lists")
+  n <- length(L)
+  if(!all(unlist(lapply(value, is.list)))) 
+    value <- unname(rep(list(value), n))
+  if(length(value) != n) {
+    if(length(value) == 1) value <- unname(rep(value, n)) else
+    stop("Replacement value is wrong length")
+  }
+  if(is.null(names(value))) names(value) <- names(L) else
+  if(!identical(names(value), names(L)))
+    stop("Mismatch in names of list elements")
+  attr(L, "plotargs") <- value
+  return(L)
+}
+
+applytolayers <- function(L, FUN, ...) {
+  # Apply FUN to each **non-null** layer,
+  # preserving the plot arguments
+  pla <- layerplotargs(L)
+  if(length(L) > 0) {
+    ok <- !unlist(lapply(L, is.null))
+    L[ok] <- lapply(L[ok], FUN, ...)
+  }
+  Z <- layered(LayerList=L, plotargs=pla)
+  return(Z)
+}
+  
+shift.layered <- function(X, vec=c(0,0), ...) {
+  if(length(list(...)) > 0) {
+    if(!missing(vec)) 
+      warning("Argument vec ignored; overridden by other arguments")
+    ## ensure the same shift is applied to all layers
+    s <- shift(X[[1L]], ...)
+    vec <- getlastshift(s)
+  }
+  Y <- applytolayers(X, shift, vec=vec)
+  attr(Y, "lastshift") <- vec
+  return(Y)
+}
+
+affine.layered <- function(X, ...) {
+  applytolayers(X, affine, ...)
+}
+
+rotate.layered <- function(X, ..., centre=NULL) {
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  Y <- applytolayers(X, rotate, ...)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
+reflect.layered <- function(X) {
+  applytolayers(X, reflect)
+}
+
+flipxy.layered <- function(X) {
+  applytolayers(X, flipxy)
+}
+
+scalardilate.layered <- function(X, ...) {
+  applytolayers(X, scalardilate, ...)
+}
+  
+rescale.layered <- function(X, s, unitname) {
+  if(missing(s)) s <- NULL
+  if(missing(unitname)) unitname <- NULL
+  applytolayers(X, rescale, s=s, unitname=unitname) 
+}
+
+
+as.owin.layered <- local({
+
+  as.owin.layered <- function(W, ..., fatal=TRUE) {
+    if(length(W) == 0) {
+      if(fatal) stop("Layered object is empty: no window data")
+      return(NULL)
+    }
+    ## remove null layers
+    isnul <- unlist(lapply(W, is.null))
+    W <- W[!isnul]
+    if(length(W) == 0) {
+      if(fatal) stop("Layered object has no window data")
+      return(NULL)
+    }
+    Wlist <- lapply(unname(W), as.owin, ..., fatal=fatal)
+    Wlist <- lapply(Wlist, rescue.rectangle)
+    Wlist <- lapply(Wlist, puffbox)
+    Z <- Wlist[[1L]]
+    if(length(Wlist) > 1) {
+      same <- unlist(lapply(Wlist[-1L], identical, y=Z))
+      if(!all(same))
+        Z <- do.call(union.owin, Wlist)
+    }
+    return(Z)
+  }
+
+  puffbox <- function(W) {
+    ## union.owin will delete boxes that have width zero or height zero
+    ## so 'puff' them out slightly
+    ss <- sidelengths(Frame(W))
+    if(ss[1L] == 0) W$xrange <- W$xrange + 1e-6 * c(-1,1) * ss[2L]
+    if(ss[2L] == 0) W$yrange <- W$yrange + 1e-6 * c(-1,1) * ss[1L]
+    return(W)
+  }
+  
+  as.owin.layered
+})
+
+
+domain.layered <- Window.layered <- function(X, ...) { as.owin(X) }
+
+as.layered <- function(X) {
+  UseMethod("as.layered")
+}
+
+as.layered.default <- function(X) {
+  if(is.list(X) && all(sapply(X, is.sob))) layered(LayerList=X) else 
+  layered(X)
+}
+
+as.layered.ppp <- function(X) {
+  if(!is.marked(X)) return(layered(X))
+  if(is.multitype(X)) return(layered(LayerList=split(X)))
+  mX <- marks(X)
+  if(!is.null(d <- dim(mX)) && d[2L] > 1) {
+    mx <- as.data.frame(marks(X))
+    Y <- lapply(mx, setmarks, x=X)
+    return(layered(LayerList=Y))
+  }
+  return(layered(X))
+}
+
+
+  
diff --git a/R/lennard.R b/R/lennard.R
new file mode 100755
index 0000000..14ea79b
--- /dev/null
+++ b/R/lennard.R
@@ -0,0 +1,112 @@
+#
+#
+#    lennard.R
+#
+#    $Revision: 1.21 $	$Date: 2017/02/07 08:12:05 $
+#
+#    Lennard-Jones potential
+#
+#
+# -------------------------------------------------------------------
+#	
+
+LennardJones <- local({
+
+  BlankLJ <- 
+    list(
+         name     = "Lennard-Jones process",
+         creator  = "LennardJones",
+         family   = "pairwise.family",  # evaluated later
+         pot      = function(d, par) {
+           sig0 <- par$sigma0
+           if(is.na(sig0)) {
+             d6 <- d^{-6}
+             p <- array(c(-d6^2,d6),dim=c(dim(d),2))
+           } else {
+             # expand around sig0 and set large numbers to Inf
+             drat <- d/sig0
+             d6 <- drat^{-6}
+             p <- array(c(-d6^2,d6),dim=c(dim(d),2))
+             small <- (drat < 1/4)
+             small <- array(c(small, small), dim=c(dim(d), 2))
+             p[small] <- -Inf
+             big <- (drat > 4)
+             big <- array(c(big, big), dim=c(dim(d), 2))
+             p[big] <- 0
+           }
+           return(p)
+         },
+         par      = list(sigma0=NULL),  # filled in later
+         parnames = "Initial approximation to sigma",
+         selfstart = function(X, self) {
+           # self starter for Lennard Jones
+           # attempt to set value of 'sigma0'
+           if(!is.na(self$par$sigma0)) {
+             # value fixed by user or previous invocation
+             return(self)
+           }
+           if(npoints(X) < 2) {
+             # not enough points
+             return(self)
+           }
+           s0 <- minnndist(X)
+           if(s0 == 0) {
+             warning(paste("Pattern contains duplicated points:",
+                           "impossible under Lennard-Jones model"))
+             s0 <- mean(nndist(X))
+             if(s0 == 0)
+               return(self)
+           }
+           LennardJones(s0)           
+         },
+         init     = function(...){}, # do nothing
+         update = NULL, # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           theta1 <- as.numeric(coeffs[1L])
+           theta2 <- as.numeric(coeffs[2L])
+           sig0 <- self$par$sigma0
+           if(is.na(sig0))
+             sig0 <- 1
+           if(sign(theta1) * sign(theta2) == 1) {
+             sigma <- sig0 * (theta1/theta2)^(1/6)
+             epsilon <- (theta2^2)/(4 * theta1)
+           } else {
+             sigma <- NA
+             epsilon <- NA
+           }
+           return(list(param=list(sigma=sigma, epsilon=epsilon),
+                       inames="interaction parameters",
+                       printable=signif(c(sigma=sigma,epsilon=epsilon))))
+         },
+         valid = function(coeffs, self) {
+           p <- unlist(self$interpret(coeffs, self)$param)
+           return(all(is.finite(p) & (p > 0)))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           if(anyNA(coeffs) || epsilon == 0)
+             return(Inf)
+           sig0 <- self$par$sigma0
+           if(is.na(sig0)) sig0 <- 1
+           theta1 <- abs(coeffs[1L])
+           theta2 <- abs(coeffs[2L])
+           return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6)))
+         },
+       version=NULL # filled in later
+  )
+  class(BlankLJ) <- "interact"
+
+  LennardJones <- function(sigma0=NA) {
+    if(is.null(sigma0) || !is.finite(sigma0))
+      sigma0 <- NA
+    instantiate.interact(BlankLJ, list(sigma0=sigma0))
+  }
+
+  LennardJones <- intermaker(LennardJones, BlankLJ)
+  
+  LennardJones
+})
+
diff --git a/R/levelset.R b/R/levelset.R
new file mode 100755
index 0000000..356aea8
--- /dev/null
+++ b/R/levelset.R
@@ -0,0 +1,41 @@
+# levelset.R
+#
+#  $Revision: 1.5 $  $Date: 2015/01/15 07:10:37 $
+#
+# level set of an image
+
+levelset <- function(X, thresh, compare="<=") {
+  # force X and thresh to be evaluated in this frame
+  verifyclass(X, "im")
+  thresh <- thresh
+  switch(compare,
+         "<"  = { A <- eval.im(X < thresh) },
+         ">"  = { A <- eval.im(X > thresh) },
+         "<=" = { A <- eval.im(X <= thresh) },
+         ">=" = { A <- eval.im(X >= thresh) },
+         "==" = { A <- eval.im(X == thresh) },
+         "!=" = { A <- eval.im(X != thresh) },
+         stop(paste("unrecognised comparison operator", sQuote(compare))))
+  W <- as.owin(eval.im(ifelse1NA(A)))
+  return(W)
+}
+
+# compute owin containing all pixels where image expression is TRUE
+
+solutionset <- function(..., envir) {
+  if(missing(envir))
+    envir <- parent.frame()
+  A <- try(eval.im(..., envir=envir), silent=TRUE)
+  if(inherits(A, "try-error"))
+    A <- try(eval(..., envir=envir), silent=TRUE)
+  if(inherits(A, "try-error"))
+    stop("Unable to evaluate expression")
+  if(!is.im(A))
+    stop("Evaluating the expression did not yield a pixel image")
+  if(A$type != "logical")
+    stop("Evaluating the expression did not yield a logical-valued image")
+  W <- as.owin(eval.im(ifelse1NA(A)))
+  return(W)
+}
+
+
diff --git a/R/leverage.R b/R/leverage.R
new file mode 100755
index 0000000..55c5379
--- /dev/null
+++ b/R/leverage.R
@@ -0,0 +1,883 @@
+#
+#  leverage.R
+#
+#  leverage and influence
+#
+#  $Revision: 1.81 $ $Date: 2017/08/10 02:31:47 $
+#
+
+leverage <- function(model, ...) {
+  UseMethod("leverage")
+}
+
+leverage.ppm <- function(model, ...,
+                         drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL)
+{
+  fitname <- short.deparse(substitute(model))
+  a <- ppmInfluence(model, what="leverage", drop=drop,
+                    iScore=iScore, iHessian=iHessian, iArgs=iArgs,
+                    ...,
+                    fitname=fitname)
+  return(a$leverage)
+}
+
+influence.ppm <- function(model, ...,
+                          drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL)
+{
+  fitname <- short.deparse(substitute(model))
+  a <- ppmInfluence(model, what="influence", drop=drop,
+                    iScore=iScore, iHessian=iHessian, iArgs=iArgs,
+                    ...,
+                    fitname=fitname)
+  return(a$influence)
+}
+
+dfbetas.ppm <- function(model, ...,
+                        drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) {
+  fitname <- short.deparse(substitute(model))
+  a <- ppmInfluence(model, what="dfbetas", drop=drop,
+                         iScore=iScore, iHessian=iHessian, iArgs=iArgs,
+                     ...,
+                    fitname=fitname)
+  return(a$dfbetas)
+}
+
+ppmInfluence <- function(fit,
+                         what=c("leverage", "influence", "dfbetas"),
+                         ..., 
+                         iScore=NULL, iHessian=NULL, iArgs=NULL,
+                         drop=FALSE,
+                         fitname=NULL) {
+  stuff <- ppmInfluenceEngine(fit, what=what,
+                          ..., 
+                          iScore=iScore, iHessian=iHessian, iArgs=iArgs,
+                          drop=drop, fitname=fitname)
+  fnam <- c("fitname", "fit.is.poisson")
+  result <- list()
+  if("lev" %in% names(stuff)) {
+    lev <- stuff[c(fnam, "lev")]
+    class(lev) <- "leverage.ppm"
+    result$leverage <- lev
+  }
+  if("infl" %in% names(stuff)) {
+    infl <- stuff[c(fnam, "infl")]
+    class(infl) <- "influence.ppm"
+    result$influence <- infl
+  }
+  if(!is.null(dfb <- stuff$dfbetas)) {
+    attr(dfb, "info") <- stuff[fnam]
+    result$dfbetas <- dfb
+  }
+  other <- setdiff(names(stuff), c("lev", "infl", "dfbetas"))
+  result[other] <- stuff[other]
+  return(result)
+}
+
+ppmInfluenceEngine <- function(fit,
+                         what=c("leverage", "influence", "dfbetas",
+                           "score", "derivatives", "increments"),
+                         ...,
+                         iScore=NULL, iHessian=NULL, iArgs=NULL,
+                         drop=FALSE,
+                         method=c("C", "interpreted"),
+                         precomputed=list(),
+                         sparseOK=TRUE,
+                         fitname=NULL,
+                         multitypeOK=FALSE,
+                         entrywise = TRUE,
+                         matrix.action = c("warn", "fatal", "silent"),
+                         geomsmooth = TRUE) {
+  logi <- identical(fit$method, "logi")
+  if(is.null(fitname)) 
+    fitname <- short.deparse(substitute(fit))
+  stopifnot(is.ppm(fit))
+
+  ## type of calculation
+  method <- match.arg(method)
+  what <- match.arg(what, several.ok=TRUE)
+  matrix.action <- match.arg(matrix.action)
+
+  influencecalc <- any(what %in% c("leverage", "influence", "dfbetas"))
+  hesscalc <- influencecalc || any(what == "derivatives")
+  sparse <- sparseOK 
+  target <- paste(what, collapse=",")
+  
+  ## Detect presence of irregular parameters
+  if(is.null(iArgs))
+    iArgs <- fit$covfunargs
+  gotScore <- !is.null(iScore)
+  gotHess <- !is.null(iHessian)
+  needHess <- gotScore && hesscalc  # may be updated later
+  if(!gotHess && needHess)
+    stop("Must supply iHessian", call.=FALSE)
+
+  ## extract values from model, using precomputed values if given
+  theta  <- precomputed$coef   %orifnull% coef(fit)
+  lam    <- precomputed$lambda %orifnull% fitted(fit, check=FALSE)
+  mom    <- precomputed$mom    %orifnull% model.matrix(fit)
+  p <- length(theta)
+  Q <- quad.ppm(fit)
+  w <- w.quad(Q)
+  loc <- union.quad(Q)
+  isdata <- is.data(Q)
+  if(length(w) != length(lam))
+    stop(paste("Internal error: length(w) = ", length(w),
+               "!=", length(lam), "= length(lam)"))
+	       
+  ## extract negative Hessian matrix of regular part of log composite likelihood
+  ##  hess = negative Hessian H
+  ##  fgrad = Fisher-scoring-like gradient G = estimate of E[H]
+  if(logi){
+    ## Intensity of dummy points
+    rho <- fit$Q$param$rho %orifnull% intensity(as.ppp(fit$Q))
+    logiprob <- lam / (lam + rho)
+    vclist <- vcov(fit, what = "internals", matrix.action="silent")
+    hess <- vclist$Slog
+    fgrad <- vclist$fisher
+    invhess <- if(is.null(hess)) NULL else checksolve(hess, "silent")
+    invfgrad <- if(is.null(fgrad)) NULL else checksolve(fgrad, "silent")
+    if(is.null(invhess) || is.null(invfgrad)) {
+      #' use more expensive estimate of variance terms
+      vclist <- vcov(fit, what = "internals", fine=TRUE,
+                     matrix.action=matrix.action)
+      hess <- vclist$Slog
+      fgrad <- vclist$fisher
+      #' try again - exit if really singular
+      invhess <- checksolve(hess, matrix.action, "Hessian", target)
+      invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target)
+    }
+#    vc <- invhess %*% (vclist$Sigma1log+vclist$Sigma2log) %*% invhess
+  } else {
+    invfgrad <- vcov(fit, hessian=TRUE, matrix.action="silent")
+    fgrad <- hess <-
+      if(is.null(invfgrad)) NULL else checksolve(invfgrad, "silent")
+    if(is.null(fgrad)) {
+      invfgrad <- vcov(fit, hessian=TRUE, fine=TRUE,
+                       matrix.action=matrix.action)
+      fgrad <- hess <- checksolve(invfgrad, matrix.action, "Hessian", target)
+    }
+  }
+
+  ## evaluate additional (`irregular') components of score, if any
+  iscoremat <- ppmDerivatives(fit, "gradient", iScore, loc, covfunargs=iArgs)
+  gotScore <- !is.null(iscoremat)
+  needHess <- gotScore && hesscalc
+  if(!gotScore) {
+    REG <- 1:ncol(mom)
+  } else {
+    ## count regular and irregular parameters
+    nreg <- ncol(mom)
+    nirr <- ncol(iscoremat)
+    ## add extra columns to model matrix
+    mom <- cbind(mom, iscoremat)
+    REG <- 1:nreg
+    IRR <- nreg + 1:nirr
+    ## evaluate additional (`irregular') entries of Hessian
+    ihessmat <- if(!needHess) NULL else
+                ppmDerivatives(fit, "hessian", iHessian, loc, covfunargs=iArgs)
+    if(gotHess <- !is.null(ihessmat)) {
+      ## recompute negative Hessian of log PL and its mean
+      fgrad <- hessextra <- matrix(0, ncol(mom), ncol(mom))
+    }  
+    if(!logi) {
+      ## pseudolikelihood
+      switch(method,
+             interpreted = {
+               for(i in seq(loc$n)) {
+                 # weight for integrand
+                 wti <- lam[i] * w[i]
+                 if(all(is.finite(wti))) {
+                   # integral of outer product of score 
+                   momi <- mom[i, ]
+                   v1 <- outer(momi, momi, "*") * wti
+                   if(all(is.finite(v1)))
+                     fgrad <- fgrad + v1
+                   # integral of Hessian
+                   # contributions nonzero for irregular parameters
+                   if(gotHess) {
+                     v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti
+                     if(all(is.finite(v2)))
+                       hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2
+                   }
+                 }
+               }
+               # subtract sum over data points
+               if(gotHess) {
+                 for(i in which(isdata)) {
+                   v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) 
+                   if(all(is.finite(v2)))
+                     hessextra[IRR, IRR] <- hessextra[IRR, IRR] - v2
+                 }
+                 hess <- fgrad + hessextra
+                 invhess <- checksolve(hess, matrix.action, "Hessian", target)
+               } else {
+                 invhess <- hess <- NULL
+               }
+             },
+             C = {
+               wlam <- lam * w
+               fgrad <- sumouter(mom, wlam)
+               if(gotHess) {
+                 # integral term
+                 isfin <- is.finite(wlam) & matrowall(is.finite(ihessmat))
+                 vintegral <-
+                   if(all(isfin)) wlam %*% ihessmat else
+                               wlam[isfin] %*% ihessmat[isfin,, drop=FALSE]
+                 # sum over data points
+                 vdata <- .colSums(ihessmat[isdata, , drop=FALSE],
+                                   sum(isdata), ncol(ihessmat),
+                                   na.rm=TRUE)
+                 vcontrib <- vintegral - vdata
+                 hessextra[IRR, IRR] <-
+                   hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr)
+                 hess <- fgrad + hessextra
+                 invhess <- checksolve(hess, matrix.action, "Hessian", target)
+               } else {
+                 invhess <- hess <- NULL
+               }
+             })
+    } else {
+      if(!spatstat.options('developer'))
+        stop("Logistic fits are not yet supported")
+      ## logistic fit
+      switch(method,
+             interpreted = {
+	       oweight <- logiprob * (1 - logiprob)
+	       hweight <- ifelse(isdata, -(1 - logiprob), logiprob)
+               for(i in seq(loc$n)) {
+                 ## outer product of score 
+                 momi <- mom[i, ]
+                 v1 <- outer(momi, momi, "*") * oweight[i]
+                 if(all(is.finite(v1)))
+                   fgrad <- fgrad + v1
+		 ## Hessian term
+                 ## contributions nonzero for irregular parameters
+                 if(gotHess) {
+                   v2 <- hweight[i] *
+		         matrix(as.numeric(ihessmat[i,]), nirr, nirr)
+                   if(all(is.finite(v2)))
+                     hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2
+                 }
+               }
+	       if(gotHess) {
+                 hess <- fgrad + hessextra
+                 invhess <- checksolve(hess, matrix.action, "Hessian", target)
+               } else {
+                 invhess <- hess <- NULL
+	       }
+             },
+             C = {
+	       oweight <- logiprob * (1 - logiprob)
+	       hweight <- ifelse(isdata, -(1 - logiprob), logiprob)
+               fgrad <- sumouter(mom, oweight)
+               if(gotHess) {
+                 # Hessian term
+                 isfin <- is.finite(hweight) & matrowall(is.finite(ihessmat))
+                 vcontrib <-
+                   if(all(isfin)) hweight %*% ihessmat else
+                               hweight[isfin] %*% ihessmat[isfin,, drop=FALSE]
+                 hessextra[IRR, IRR] <-
+                   hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr)
+                 hess <- fgrad + hessextra
+                 invhess <- checksolve(hess, matrix.action, "Hessian", target)
+               } else {
+                 invhess <- hess <- NULL
+               }
+             })
+    }
+    invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target)
+  }
+  
+  if(!needHess) {
+    if(!logi){
+    hess <- fgrad
+    invhess <- invfgrad
+    }
+  }
+  #
+  ok <- NULL
+  if(drop) {
+    ok <- complete.cases(mom)
+    if(all(ok)) {
+      ok <- NULL
+    } else {
+      if((nbad <- sum(isdata[!ok])) > 0)
+        warning(paste("NA value of canonical statistic at",
+                      nbad, ngettext(nbad, "data point", "data points")),
+                call.=FALSE)
+      Q <- Q[ok]
+      mom <- mom[ok, , drop=FALSE]
+      loc <- loc[ok]
+      lam <- lam[ok]
+      w   <- w[ok]
+      isdata <- isdata[ok]
+    }
+  } 
+
+  # ........  start assembling results .....................
+  # 
+  ## start building result
+  result <- list(fitname=fitname, fit.is.poisson=is.poisson(fit))
+  class(result) <- "ppmInfluence"
+
+  if(any(c("score", "derivatives") %in% what)) {
+    ## calculate the composite score
+    rawmean <- if(logi) logiprob else (lam * w)
+    rawresid <- isdata - rawmean
+    score <- matrix(rawresid, nrow=1) %*% mom
+
+    if("score" %in% what)
+      result$score <- score
+    if("derivatives" %in% what) 
+      result$deriv <- list(mom=mom, score=score,
+                           fgrad=fgrad, invfgrad=invfgrad,
+                           hess=hess, invhess=invhess)
+    if(all(what %in% c("score", "derivatives")))
+      return(result)
+  }
+    
+  # compute effect of adding/deleting each quadrature point
+  #    columns index the point being added/deleted
+  #    rows index the points affected
+  #  ........ Poisson case ..................................
+  eff <- mom
+  # ........  Gibbs case ....................................
+  ## second order interaction terms
+  ddS <- ddSintegrand <- NULL
+  if(!is.poisson(fit)) {
+    ## goal is to compute these effect matrices:
+    eff.data <- eff.back  <- matrix(0, nrow(eff), ncol(eff),
+                                    dimnames=dimnames(eff))
+    U <- union.quad(Q)
+    nU <- npoints(U)
+    zerocif <- (lam == 0)
+    anyzerocif <- any(zerocif)
+    ## decide whether to split into blocks
+    nX <- Q$data$n
+    nD <- Q$dummy$n
+    bls <- quadBlockSizes(nX, nD, p, announce=TRUE)
+    nblocks    <- bls$nblocks
+    nperblock  <- bls$nperblock
+    ##
+    if(nblocks > 1 && ("increments" %in% what)) {
+      warning("Oversize quadrature scheme: cannot return array of increments",
+              call.=FALSE)
+      what <- setdiff(what, "increments")
+    }
+    R <- reach(fit)
+    ## indices into original quadrature scheme
+    whichok <- if(!is.null(ok)) which(ok) else seq_len(nX+nD) 
+    whichokdata <- whichok[isdata]
+    whichokdummy <- whichok[!isdata]
+    ## loop 
+    for(iblock in 1:nblocks) {
+      first <- min(nD, (iblock - 1) * nperblock + 1)
+      last  <- min(nD, iblock * nperblock)
+      # corresponding subset of original quadrature scheme
+      if(!is.null(ok) || nblocks > 1) {
+        ## subset for which we will compute the effect
+        current <- c(whichokdata, whichokdummy[first:last])
+        ## find neighbours, needed for calculation
+        other <- setdiff(seq_len(nU), current)
+        crx <- crosspairs(U[current], U[other], R, what="indices")
+        nabers <- other[unique(crx$j)]
+        ## subset actually requested
+        requested <- c(current, nabers)
+        ## corresponding stuff ('B' for block)
+        isdataB <- isdata[requested]
+        zerocifB <- zerocif[requested]
+        anyzerocifB <- any(zerocifB)
+        momB <- mom[requested, , drop=FALSE]
+        lamB <- lam[requested]
+        if(logi) logiprobB <- logiprob[requested]
+        wB <- w[requested]
+        currentB <- seq_along(current)
+      } else {
+        requested <- NULL
+        isdataB <- isdata
+        zerocifB <- zerocif
+        anyzerocifB <- anyzerocif
+        momB <- mom
+        lamB <- lam
+        if(logi) logiprobB <- logiprob
+        wB <- w
+      }
+      ## compute second order terms 
+      ## ddS[i,j, ] = Delta_i Delta_j S(x)
+      ddS <- deltasuffstat(fit, restrict = "first", dataonly=FALSE,
+                           quadsub=requested, sparseOK=sparse)
+      ## 
+      if(is.null(ddS)) {
+        warning("Second order interaction terms are not implemented",
+                " for this model; they are treated as zero", call.=FALSE)
+        break
+      } else {
+        sparse <- inherits(ddS, "sparse3Darray")
+        if(gotScore) {
+          ## add extra planes of zeroes to second-order model matrix
+          ## (zero because the irregular components are part of the trend)
+          paddim <- c(dim(ddS)[1:2], nirr)
+          if(!sparse) {
+            ddS <- abind::abind(ddS, array(0, dim=paddim), along=3)
+          } else {
+            ddS <- bind.sparse3Darray(ddS,
+                                      sparse3Darray(dims=paddim),
+                                      along=3)
+          }
+        }
+      }
+      ## effect of addition/deletion of U[j]
+      ## on score contribution from data points (sum automatically restricted to
+      ## interior for border correction by earlier call to
+      ## deltasuffstat(..., restrict = "first"))
+      ddSX <- ddS[isdataB, , , drop=FALSE]
+      eff.data.B <- marginSums(ddSX, c(2,3))
+      ## check if any quadrature points have zero conditional intensity;
+      ## these do not contribute; the associated values of the sufficient
+      ## statistic may be Infinite and must be explicitly set to zero.
+      if(anyzerocifB)
+        eff.data.B[zerocifB, ] <- 0
+      ## save results for current subset of quadrature points 
+      if(is.null(requested)) {
+        eff.data <- eff.data.B
+      } else {
+        eff.data[current,] <- as.matrix(eff.data.B[currentB,,drop=FALSE])
+      }
+      ## 
+      rm(ddSX, eff.data.B)
+      ## effect of addition/deletion of U[j] on integral term in score
+      changesignB <- ifelse(isdataB, -1, 1)
+      if(!sparse) {
+        if(logi){
+          stop("Non-sparse method is not implemented for method = 'logi'!")
+        } else{
+          ## model matrix after addition/deletion of each U[j]
+          ## mombefore[i,j,] <- mom[i,]
+          di <- dim(ddS)
+          mombefore <- array(apply(momB, 2, rep, times=di[2]), dim=di)
+          momchange <- ddS
+          momchange[ , isdataB, ] <- - momchange[, isdataB, ]
+          momafter <- mombefore + momchange
+          ## effect of addition/deletion of U[j] on lambda(U[i], X)
+          if(gotScore){
+            lamratio <- exp(tensor::tensor(momchange[,,REG,drop=FALSE],
+                                           theta, 3, 1))
+          } else{
+            lamratio <- exp(tensor::tensor(momchange, theta, 3, 1))
+          }
+          lamratio <- array(lamratio, dim=dim(momafter))
+          ddSintegrand <- lamB * (momafter * lamratio - mombefore)
+          rm(lamratio)
+        }
+        rm(momchange, mombefore, momafter)
+        gc()
+      } else {
+        if(logi){
+          ## Delta suff. stat. with sign change for data/dummy (sparse3Darray)
+          momchange <- ddS
+          momchange[ , isdataB, ] <- - momchange[, isdataB, ]
+          ## theta^T %*% ddS (with sign -1/+1 according to data/dummy) as triplet sparse matrix
+          if(gotScore){
+            momchangeeffect <- tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1)
+          } else{
+            momchangeeffect <- tenseur(momchange, theta, 3, 1)
+          }
+          momchangeeffect <- expandSparse(momchangeeffect, n = dim(ddS)[3], across = 3)
+          ijk <- SparseIndices(momchangeeffect)
+          ## Entrywise calculations below
+          momchange <- as.numeric(momchange[ijk])
+          ## Transform to change in probability
+          expchange <- exp(momchangeeffect$x)
+          lamBi <- lamB[ijk$i]
+          logiprobBi <- logiprobB[ijk$i]
+          changesignBj <- changesignB[ijk$j]
+          pchange <- changesignBj*(lamBi * expchange / (lamBi*expchange + rho) - logiprobBi)
+          mombefore <- mom[cbind(ijk$i,ijk$k)]
+          ## Note: changesignBj * momchange == as.numeric(ddS[ijk])
+          ddSintegrand <- (mombefore + momchange) * pchange + logiprobBi * changesignBj * momchange
+          ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand,
+                                        dims = dim(ddS))
+        } else{
+          if(entrywise){
+            momchange <- ddS
+            momchange[ , isdataB, ] <- - momchange[, isdataB, ]
+            if(gotScore){
+              lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE],
+                                              theta, 3, 1))
+            } else{
+              lamratiominus1 <- expm1(tenseur(momchange, theta, 3, 1))
+            }
+            lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3)
+            ijk <- SparseIndices(lamratiominus1)
+            ## Everything entrywise with ijk now:
+            # lamratiominus1 <- lamratiominus1[cbind(ijk$i, ijk$j)]
+            lamratiominus1 <- as.numeric(lamratiominus1$x)
+            momchange <- as.numeric(momchange[ijk])
+            mombefore <- momB[cbind(ijk$i, ijk$k)]
+            momafter <- mombefore + momchange
+            ## lamarray[i,j,k] <- lam[i]
+            lamarray <- lamB[ijk$i]
+            ddSintegrand <- lamarray * (momafter * lamratiominus1 + momchange)
+            ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand,
+                                          dims = dim(ddS))
+          } else{
+            ## Entries are required only for pairs i,j which interact.
+            ## mombefore[i,j,] <- mom[i,]
+            mombefore <- mapSparseEntries(ddS, 1, momB, conform=TRUE, across=3)
+            momchange <- ddS
+            momchange[ , isdataB, ] <- - momchange[, isdataB, ]
+            momafter <- evalSparse3Dentrywise(mombefore + momchange)
+            ## lamarray[i,j,k] <- lam[i]
+            lamarray <- mapSparseEntries(ddS, 1, lamB, conform=TRUE, across=3)
+            if(gotScore){
+              lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE],
+                                              theta, 3, 1))
+            } else{
+              lamratiominus1 <- expm1(tenseur(momchange,theta, 3, 1))
+            }
+            lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3)
+            ddSintegrand <- evalSparse3Dentrywise(lamarray * (momafter* lamratiominus1 + momchange))
+          }
+          rm(lamratiominus1, lamarray, momafter)
+        }
+        rm(momchange, mombefore)
+      }
+      if(anyzerocifB) {
+        ddSintegrand[zerocifB,,] <- 0
+        ddSintegrand[,zerocifB,] <- 0
+      }
+      ## integrate
+      if(logi){
+        # eff.back.B <- tenseur(ddSintegrand, rep(1, length(wB)), 1, 1)
+        eff.back.B <- marginSums(ddSintegrand, c(2,3))
+      } else{
+        eff.back.B <- changesignB * tenseur(ddSintegrand, wB, 1, 1)
+      }
+      ## save contribution
+      if(is.null(requested)) {
+        eff.back <- eff.back.B
+      } else {
+        eff.back[current,] <- as.matrix(eff.back.B[currentB, , drop=FALSE])
+      }
+    }
+    
+    ## total
+    eff <- eff + eff.data - eff.back
+    eff <- as.matrix(eff)
+  }
+  
+  if("increments" %in% what) {
+    result$increm <- list(ddS=ddS,
+                          ddSintegrand=ddSintegrand,
+                          isdata=isdata,
+                          wQ=w)
+  }
+  if(!any(c("leverage", "influence", "dfbetas") %in% what))
+    return(result)
+
+  # ............ compute leverage, influence, dfbetas ..............
+  
+  # compute basic contribution from each quadrature point
+  nloc <- npoints(loc)
+  switch(method,
+         interpreted = {
+           b <- numeric(nloc)
+           for(i in seq(nloc)) {
+             effi <- eff[i,, drop=FALSE]
+             momi <- mom[i,, drop=FALSE]
+             b[i] <- momi %*% invhess %*% t(effi)
+           }
+         },
+         C = {
+           b <- bilinearform(mom, invhess, eff)
+         })
+  
+  # .......... leverage .............
+  
+  if("leverage" %in% what) {
+    ## values of leverage (diagonal) at points of 'loc'
+    h <- b * lam
+    ok <- is.finite(h)
+    if(mt <- is.multitype(loc))
+      h <- data.frame(leverage=h, type=marks(loc))
+    levval <- (loc %mark% h)[ok]
+    levvaldum <- levval[!isdata[ok]]
+    geomsmooth <- geomsmooth && all(marks(levvaldum) >= 0)
+    if(!mt) {
+      levsmo <- Smooth(levvaldum, sigma=maxnndist(loc), geometric=geomsmooth)
+    } else {
+      levsplitdum <- split(levvaldum, reduce=TRUE)
+      levsmo <- Smooth(levsplitdum,
+                       sigma=max(sapply(levsplitdum, maxnndist)),
+                       geometric=geomsmooth)
+    }
+    ## nominal mean level
+    a <- area(Window(loc)) * markspace.integral(loc)
+    levmean <- p/a
+    lev <- list(val=levval, smo=levsmo, ave=levmean)
+    result$lev <- lev
+  }
+  # .......... influence .............
+  if("influence" %in% what) {
+    if(logi){
+      X <- loc
+      effX <- as.numeric(isdata) * eff - mom * logiprob
+    } else{
+      # values of influence at data points
+      X <- loc[isdata]
+      effX <- eff[isdata, ,drop=FALSE]
+    }
+    M <- (1/p) * quadform(effX, invhess)
+    if(logi || is.multitype(X)) {
+      # result will have several columns of marks
+      M <- data.frame(influence=M)
+      if(logi) M$isdata <- factor(isdata, levels = c(TRUE, FALSE), labels = c("data", "dummy"))
+      if(is.multitype(X)) M$type <- marks(X)
+    } 
+    V <- X %mark% M
+    result$infl <- V
+  }
+  # .......... dfbetas .............
+  if("dfbetas" %in% what) {
+    if(logi){
+      M <- as.numeric(isdata) * eff - mom * logiprob
+      M <- t(invhess %*% t(M))
+      Mdum <- M
+      Mdum[isdata,] <- 0
+      Mdum <- Mdum / w.quad(Q)
+      result$dfbetas <- msr(Q, M[isdata, ], Mdum)
+    } else{
+      vex <- invhess %*% t(mom)
+      dex <- invhess %*% t(eff)
+      switch(method,
+             interpreted = {
+               dis <- con <- matrix(0, nloc, ncol(mom))
+               for(i in seq(nloc)) {
+                 vexi <- vex[,i, drop=FALSE]
+                 dexi <- dex[,i, drop=FALSE]
+                 dis[i, ] <- if(isdata[i]) dexi else 0
+                 con[i, ] <- - lam[i] * vexi
+               }
+             },
+             C = {
+               dis <- t(dex)
+               dis[!isdata,] <- 0
+               con <- - lam * t(vex)
+               con[lam == 0,] <- 0
+             })
+      colnames(dis) <- colnames(con) <- colnames(mom)
+      # result is a vector valued measure
+      result$dfbetas <- msr(Q, dis[isdata, ], con)
+    }
+  }
+  return(result)
+}
+
+## extract derivatives from covariate functions
+## WARNING: these are not the score components in general
+
+ppmDerivatives <- function(fit, what=c("gradient", "hessian"),
+                            Dcovfun=NULL, loc, covfunargs=list()) {
+  what <- match.arg(what)
+  if(!is.null(Dcovfun)) {
+    ## use provided function Dcov to compute derivatives
+    Dvalues <- mpl.get.covariates(Dcovfun, loc, covfunargs=covfunargs)
+    result <- as.matrix(as.data.frame(Dvalues))
+    return(result)
+  }
+  ## any irregular parameters?
+  if(length(covfunargs) == 0)
+    return(NULL)
+  ## Try to extract derivatives from covariate functions
+  ## This often works if the functions were created by symbolic differentiation
+  fvalues <- mpl.get.covariates(fit$covariates, loc, covfunargs=covfunargs,
+                                need.deriv=TRUE)
+  Dlist <- attr(fvalues, "derivatives")[[what]]
+  if(length(Dlist) == 0)
+    return(NULL)
+  switch(what,
+         gradient = {
+           result <- do.call(cbind, unname(lapply(Dlist, as.data.frame)))
+           result <- as.matrix(result)
+         },
+         hessian = {
+           ## construct array containing Hessian matrices
+           biga <- do.call(blockdiagarray, Dlist)
+           ## flatten matrices 
+           result <- matrix(biga, nrow=dim(biga)[1L])
+         })
+  return(result)
+}
+
+plot.leverage.ppm <- local({
+
+  plot.leverage.ppm <- function(x, ..., showcut=TRUE, col.cut=par("fg"),
+                                multiplot=TRUE) {
+    fitname <- x$fitname
+    defaultmain <- paste("Leverage for", fitname)
+    y <- x$lev
+    smo <- y$smo
+    ave <- y$ave
+    if(!multiplot && inherits(smo, "imlist")) {
+      ave <- ave * length(smo)
+      smo <- Reduce("+", smo)
+      defaultmain <- c(defaultmain, "(sum over all types of point)")
+    }
+    if(is.im(smo)) {
+      do.call(plot.im,
+              resolve.defaults(list(smo),
+                               list(...),
+                               list(main=defaultmain)))
+      if(showcut)
+        onecontour(0, x=smo, ..., level.cut=ave, col.cut=col.cut)
+    } else if(inherits(smo, "imlist")) {
+      xtra <- list(panel.end=onecontour,
+                   panel.end.args=list(level.cut=ave, col.cut=col.cut))
+      do.call(plot.solist,
+              resolve.defaults(list(smo),
+                               list(...),
+                               list(main=defaultmain),
+                               if(showcut) xtra else list()))
+    } 
+    invisible(NULL)
+  }
+
+  onecontour <- function(i, x, ..., level.cut, col.cut) {
+    if(diff(range(x)) > 0)
+      do.call.matched(contour.im,
+                      resolve.defaults(list(x=x, levels=level.cut,
+                                            add=TRUE, col=col.cut),
+                                       list(...),
+                                       list(drawlabels=FALSE)),
+                      extrargs=c("levels", "drawlabels",
+                        "labcex", "col", "lty", "lwd", "frameplot"))
+  }
+
+  plot.leverage.ppm
+})
+                           
+plot.influence.ppm <- function(x, ..., multiplot=TRUE) {
+  fitname <- x$fitname
+  defaultmain <- paste("Influence for", fitname)
+  y <- x$infl
+  if(multiplot && isTRUE(ncol(marks(y)) > 1)) {
+    # apart from the influence value, there may be additional columns of marks
+    # containing factors: {type of point}, { data vs dummy in logistic case }
+    ma <- as.data.frame(marks(y))
+    fax <- sapply(ma, is.factor)
+    nfax <- sum(fax)
+    if(nfax == 1) {
+      # split on first available factor, and remove this factor
+      y <- split(y, reduce=TRUE)
+    } else if(nfax > 1) {
+      # several factors: split according to them all, and remove them all
+      f.all <- do.call(interaction, ma[fax])
+      z <- y %mark% ma[,!fax]
+      y <- split(z, f.all)
+    }
+  }
+  do.call(plot,
+          resolve.defaults(list(y),
+                           list(...),
+                           list(main=defaultmain,
+                                multiplot=multiplot,
+                                which.marks=1)))
+}
+
+persp.leverage.ppm <- function(x, ..., main) {
+  if(missing(main)) main <- deparse(substitute(x))
+  y <- as.im(x)
+  if(is.im(y)) return(persp(y, main=main, ...))
+  pa <- par(ask=TRUE)
+  lapply(y, persp, main=main, ...)
+  par(pa)
+  return(invisible(NULL))
+}
+  
+as.im.leverage.ppm <- function(X, ...) {
+  return(X$lev$smo) # could be either an image or a list of images
+}
+
+as.function.leverage.ppm <- function(x, ...) {
+  X <- x$lev$val
+  S <- ssf(unmark(X), marks(X))
+  return(as.function(S))
+}
+
+as.ppp.influence.ppm <- function(X, ...) {
+  return(X$infl)
+}
+
+as.owin.leverage.ppm <- function(W, ..., fatal=TRUE) {
+  y <- as.im(W)
+  if(inherits(y, "imlist")) y <- y[[1L]]
+  as.owin(y, ..., fatal=fatal)
+}
+
+as.owin.influence.ppm <- function(W, ..., fatal=TRUE) {
+  as.owin(as.ppp(W), ..., fatal=fatal)
+}
+
+domain.leverage.ppm <- domain.influence.ppm <-
+  Window.leverage.ppm <- Window.influence.ppm <-
+  function(X, ...) { as.owin(X) } 
+
+print.leverage.ppm <- function(x, ...) {
+  splat("Point process leverage function")
+  fitname <- x$fitname
+  splat("for model:", fitname)
+  lev <- x$lev
+  splat("\nExact values:")
+  print(lev$val)
+  splat("\nSmoothed values:")
+  print(lev$smo)
+  ## for compatibility we retain the x$fit usage
+  if(x$fit.is.poisson %orifnull% is.poisson(x$fit))
+    splat("\nAverage value:", lev$ave)
+  return(invisible(NULL))
+}
+
+print.influence.ppm <- function(x, ...) {
+  splat("Point process influence measure")  
+  fitname <- x$fitname
+  splat("for model:", fitname)
+  splat("\nExact values:")
+  print(x$infl)
+  return(invisible(NULL))
+}
+
+"[.leverage.ppm" <- function(x, i, ..., update=TRUE) {
+  if(missing(i)) return(x)
+  y <- x$lev
+  smoi <- if(is.im(y$smo)) y$smo[i, ...] else solapply(y$smo, "[", i=i, ...)
+  if(!inherits(smoi, c("im", "imlist"))) return(smoi)
+  y$smo <- smoi
+  y$val <- y$val[i, ...]
+  if(update) 
+    y$ave <- if(is.im(smoi)) mean(smoi) else mean(sapply(smoi, mean))
+  x$lev <- y
+  return(x)
+}
+
+"[.influence.ppm" <- function(x, i, ...) {
+  if(missing(i)) return(x)
+  y <- x$infl[i, ...]
+  if(!is.ppp(y)) return(y)
+  x$infl <- y
+  return(x)
+}
+
+shift.leverage.ppm <- function(X, ...) {
+  vec <- getlastshift(shift(as.owin(X), ...))
+  X$lev$val <- shift(X$lev$val, vec=vec)
+  smo <- X$lev$smo
+  X$lev$smo <-
+    if(is.im(smo)) shift(smo, vec=vec) else solapply(smo, shift, vec=vec)
+  return(putlastshift(X, vec))
+}
+
+shift.influence.ppm <- function(X, ...) {
+  X$infl <- shift(X$infl, ...)
+  return(putlastshift(X, getlastshift(X$infl)))
+}
+
diff --git a/R/linalg.R b/R/linalg.R
new file mode 100755
index 0000000..938d05e
--- /dev/null
+++ b/R/linalg.R
@@ -0,0 +1,241 @@
+#
+# linalg.R
+#
+#  Linear Algebra
+#
+# $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $
+#
+
+sumouter <- function(x, w=NULL, y=x) {
+  #' compute matrix sum_i (w[i] * outer(x[i,], y[i,]))
+  stopifnot(is.matrix(x))
+  weighted <- !is.null(w)
+  symmetric <- missing(y) || identical(x,y)
+  if(weighted) {
+    if(length(dim(w)) > 1) stop("w should be a vector")
+    w <- as.numeric(w)
+    check.nvector(w, nrow(x), things="rows of x")
+  }
+  if(!symmetric) {
+    stopifnot(is.matrix(y))
+    stopifnot(nrow(x) == nrow(y))
+  }
+  #' transpose (compute outer squares of columns)
+  tx <- t(x)
+  if(!symmetric) ty <- t(y)
+  #' check for NA etc
+  ok <- apply(is.finite(tx), 2, all)
+  if(!symmetric) ok <- ok & apply(is.finite(ty), 2, all)
+  if(weighted) ok <- ok & is.finite(w)
+  #' remove NA etc
+  if(!all(ok)) {
+    tx <- tx[ , ok, drop=FALSE]
+    if(!symmetric) ty <- ty[ , ok, drop=FALSE]
+    if(weighted) w <- w[ok]
+  }
+  #' call C code
+  if(symmetric) {
+    n <- ncol(tx)
+    p <- nrow(tx)
+    if(is.null(w)) {
+      zz <- .C("Csumouter",
+               x=as.double(tx),
+               n=as.integer(n),
+               p=as.integer(p),
+               y=as.double(numeric(p * p)),
+               PACKAGE = "spatstat")
+    } else {
+      zz <- .C("Cwsumouter",
+               x=as.double(tx),
+               n=as.integer(n),
+               p=as.integer(p),
+               w=as.double(w),
+               y=as.double(numeric(p * p)),
+               PACKAGE = "spatstat")
+    }
+    out <- matrix(zz$y, p, p)
+    if(!is.null(nama <- colnames(x)))
+      dimnames(out) <- list(nama, nama)
+  } else {
+    n <- ncol(tx)
+    px <- nrow(tx)
+    py <- nrow(ty)
+    if(is.null(w)) {
+      zz <- .C("Csum2outer",
+               x=as.double(tx),
+               y=as.double(ty),
+               n=as.integer(n),
+               px=as.integer(px),
+               py=as.integer(py),
+               z=as.double(numeric(px * py)),
+               PACKAGE = "spatstat")
+    } else {
+      zz <- .C("Cwsum2outer",
+               x=as.double(tx),
+               y=as.double(ty),
+               n=as.integer(n),
+               px=as.integer(px),
+               py=as.integer(py),
+               w=as.double(w),
+               z=as.double(numeric(px * py)))
+    }
+    out <- matrix(zz$z, px, py)
+    namx <- colnames(x)
+    namy <- colnames(y)
+    if(!is.null(namx) || !is.null(namy))
+      dimnames(out) <- list(namx, namy)
+  }
+  return(out)
+}
+
+quadform <- function(x, v) {
+  #' compute vector of values y[i] = x[i, ] %*% v %*% t(x[i,]
+  stopifnot(is.matrix(x))
+  p <- ncol(x)
+  n <- nrow(x)
+  nama <- rownames(x)
+  # transpose (evaluate quadratic form for each column)
+  tx <- t(x)
+  ok <- apply(is.finite(tx), 2, all)
+  allok <- all(ok)
+  if(!allok) {
+    tx <- tx[ , ok, drop=FALSE]
+    n <- ncol(tx)
+  }
+  if(missing(v)) {
+    v <- diag(rep.int(1, p))
+  } else {
+    stopifnot(is.matrix(v))
+    if(nrow(v) != ncol(v)) stop("v should be a square matrix")
+    stopifnot(ncol(x) == nrow(v))
+  }
+  z <- .C("Cquadform",
+          x=as.double(tx),
+          n=as.integer(n),
+          p=as.integer(p),
+          v=as.double(v),
+          y=as.double(numeric(n)),
+          PACKAGE = "spatstat")
+  result <- z$y
+  names(result) <- nama[ok]
+  if(allok)
+    return(result)
+  fullresult <- rep.int(NA_real_, length(ok))
+  fullresult[ok] <- result
+  names(fullresult) <- nama
+  return(fullresult)
+}
+
+bilinearform <- function(x, v, y) {
+  #' compute vector of values z[i] = x[i, ] %*% v %*% t(y[i,])
+  stopifnot(is.matrix(x))
+  stopifnot(is.matrix(y))
+  stopifnot(identical(dim(x), dim(y)))
+  p <- ncol(x)
+  n <- nrow(x)
+  nama <- rownames(x)
+  # transpose (evaluate quadratic form for each column)
+  tx <- t(x)
+  ty <- t(y)
+  ok <- matcolall(is.finite(tx)) & matcolall(is.finite(ty))
+  allok <- all(ok)
+  if(!allok) {
+    tx <- tx[ , ok, drop=FALSE]
+    ty <- ty[ , ok, drop=FALSE]
+    n <- ncol(tx)
+  }
+  if(missing(v)) {
+    v <- diag(rep.int(1, p))
+  } else {
+    stopifnot(is.matrix(v))
+    if(nrow(v) != ncol(v)) stop("v should be a square matrix")
+    stopifnot(ncol(x) == nrow(v))
+  }
+  z <- .C("Cbiform",
+          x=as.double(tx),
+          y=as.double(ty),
+          n=as.integer(n),
+          p=as.integer(p),
+          v=as.double(v),
+          z=as.double(numeric(n)),
+          PACKAGE = "spatstat")
+  result <- z$z
+  names(result) <- nama[ok]
+  if(allok)
+    return(result)
+  fullresult <- rep.int(NA_real_, length(ok))
+  fullresult[ok] <- result
+  names(fullresult) <- nama
+  return(fullresult)
+}
+
+sumsymouter <- function(x, w=NULL) {
+  ## x is a 3D array
+  ## w is a matrix
+  ## Computes the sum of outer(x[,i,j], x[,j,i]) * w[i,j] over all pairs i != j
+  if(inherits(x, c("sparseSlab", "sparse3Darray")) &&
+     (is.null(w) || inherits(w, "sparseMatrix")))
+    return(sumsymouterSparse(x, w))
+  x <- as.array(x)
+  stopifnot(length(dim(x)) == 3)
+  if(dim(x)[2L] != dim(x)[3L])
+    stop("The second and third dimensions of x should be equal")
+  if(!is.null(w)) {
+    w <- as.matrix(w)
+    if(!all(dim(w) == dim(x)[-1L]))
+      stop("Dimensions of w should match the second and third dimensions of x")
+  }
+  p <- dim(x)[1L]
+  n <- dim(x)[2L]
+  if(is.null(w)) {
+    zz <- .C("Csumsymouter",
+             x = as.double(x),
+             p = as.integer(p),
+             n = as.integer(n),
+             y = as.double(numeric(p * p)),
+             PACKAGE = "spatstat")
+  } else {
+    zz <- .C("Cwsumsymouter",
+             x = as.double(x),
+             w = as.double(w),
+             p = as.integer(p),
+             n = as.integer(n),
+             y = as.double(numeric(p * p)),
+             PACKAGE = "spatstat")
+  }
+  matrix(zz$y, p, p)
+}
+
+checksolve <- function(M, action, descrip, target="") {
+  Mname <- short.deparse(substitute(M))
+  Minv <- try(solve(M), silent=(action=="silent"))
+  if(!inherits(Minv, "try-error"))
+    return(Minv)
+  if(missing(descrip))
+    descrip <- paste("the matrix", sQuote(Mname))
+  whinge <- paste0("Cannot compute ", target, ": ", descrip, " is singular")
+  switch(action,
+         fatal=stop(whinge, call.=FALSE),
+         warn= warning(whinge, call.=FALSE),
+         silent={})
+  return(NULL)
+}
+
+check.mat.mul <- function(A, B, Acols="columns of A", Brows="rows of B",
+                          fatal=TRUE) {
+  # check whether A %*% B would be valid: if not, print a useful message
+  if(!is.matrix(A)) A <- matrix(A, nrow=1, dimnames=list(NULL, names(A)))
+  if(!is.matrix(B)) B <- matrix(B, ncol=1, dimnames=list(names(B), NULL))
+  nA <- ncol(A)
+  nB <- nrow(B) 
+  if(nA == nB) return(TRUE)
+  if(!fatal) return(FALSE)
+  if(any(nzchar(Anames <- colnames(A))))
+    message(paste0("Names of ", Acols, ": ", commasep(Anames)))
+  if(any(nzchar(Bnames <- rownames(B))))
+    message(paste0("Names of ", Brows, ": ", commasep(Bnames)))
+  stop(paste("Internal error: number of", Acols, paren(nA),
+             "does not match number of", Brows, paren(nB)),
+       call.=FALSE)
+}
+
diff --git a/R/lindirichlet.R b/R/lindirichlet.R
new file mode 100644
index 0000000..efd8560
--- /dev/null
+++ b/R/lindirichlet.R
@@ -0,0 +1,158 @@
+#'   lindirichlet.R
+#'
+#'   Dirichlet tessellation on a linear network
+#'
+
+lineardirichlet <- function(X) {
+  stopifnot(is.lpp(X))
+  #' unique points, remembering original sequence
+  ii <- which(!duplicated(X))
+  uX <- X[ii]
+  nuX <- npoints(uX)
+  #' local coordinates
+  coUX <- coords(uX)[, c("seg", "tp")]
+  #' add label from original sequence index
+  coUX$lab <- ii
+  #' reorder
+  oo <- with(coUX, order(seg, tp))
+  coUXord <- coUX[oo, , drop=FALSE]
+  seg <- coUXord$seg
+  tp  <- coUXord$tp
+  #' network data
+  L <- domain(X)
+  nv <- nvertices(L)
+  ns <- nsegments(L)
+  seglen <- lengths.psp(as.psp(L))
+  from <- L$from
+  to   <- L$to
+  #' upper point on interpoint distance
+  huge <- sum(seglen)
+  #' numerical tolerance for nnwhich
+  tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20)
+  #' Find data point nearest to each vertex of network
+  from0 <- from - 1L
+  to0   <- to - 1L
+  seg0  <- seg - 1L
+  z <- .C("Clinvwhichdist",
+          np = as.integer(nuX),
+	  sp = as.integer(seg0),
+	  tp = as.double(tp),
+	  nv = as.integer(nv),
+	  ns = as.integer(ns),
+	  from = as.integer(from0),
+	  to   = as.integer(to0),
+	  seglen = as.double(seglen),
+	  huge = as.double(huge),
+	  tol = as.double(tol),
+	  dist = as.double(numeric(nv)),
+	  which = as.integer(integer(nv)),
+	  PACKAGE = "spatstat")
+   vnndist <- z$dist
+   vnnwhich <- z$which + 1L  # index into sorted unique point pattern
+   vnnwhich[vnnwhich == 0] <- NA # possible if network is disconnected
+   vnnlab <- coUXord$lab[vnnwhich] # index into original data pattern
+   #' initialise tessellation data
+   df <- data.frame(seg=integer(0),
+                    t0=numeric(0),
+		    t1=numeric(0),
+		    tile=integer(0))
+   #' split point data by segment, discarding segments which contain no points
+   fseg <- factor(seg, levels=1:ns)
+   blist <- split(coUXord, fseg, drop=TRUE)
+   #' process each segment containing data points
+   for(b in blist) {
+     n <- nrow(b)
+     #' which segment?
+     sygmund <- b$seg[[1L]]
+     lenf <- seglen[sygmund]
+     #' segment endpoints
+     A <- from[sygmund]
+     B <- to[sygmund]
+     #' data points (from X) closest to endpoints
+     jA <- vnnlab[A]
+     jB <- vnnlab[B]
+     dA <- vnndist[A]
+     dB <- vnndist[B]
+     #' data points (along segment) closest to endpoints
+     iA <- b$lab[1L]
+     iB <- b$lab[n]
+     #' splits between consecutive data points
+     btp <- b$tp
+     tcut <- if(n < 2) numeric(0) else (btp[-1] + btp[-n])/2
+     labs <- b$lab
+     #' consider left endpoint
+     if(jA == iA) {
+       #' leftmost data point covers left endpoint
+       tcut <- c(0, tcut)
+     } else {
+       #' cut between left endpoint and leftmost data point
+       dA1 <- lenf * btp[1L]
+       dx <- (dA1 - dA)/2
+       if(dx > 0) {
+         #' expected!
+	 tx <- dx/lenf
+	 tcut <- c(0, tx, tcut)
+	 labs <- c(jA, labs)
+       } else {
+         #' unexpected
+	 tcut <- c(0, tcut)
+       }
+     }
+     #' consider right endpoint
+     if(jB == iB) {
+       #' rightmost data point covers right endpoint
+       tcut <- c(tcut, 1)
+     } else {
+       #' cut between right endpoint and rightmost data point
+       dB1 <- lenf * (1 - btp[n])
+       dx <- (dB1 - dB)/2
+       if(dx > 0) {
+         #' expected!
+	 tx <- 1 - dx/lenf
+	 tcut <- c(tcut, tx, 1)
+	 labs <- c(labs, jB)
+       } else {
+         #' unexpected
+	 tcut <- c(tcut, 1)
+       }
+     }
+     m <- length(tcut)
+     newdf <- data.frame(seg=sygmund, t0=tcut[-m], t1=tcut[-1L], tile=labs)
+     df <- rbind(df, newdf)
+   }
+   #' now deal with segments having no data points
+   unloved <- (table(fseg) == 0)
+   if(any(unloved)) {
+     for(sygmund in which(unloved)) {
+      lenf <- seglen[sygmund]
+      #' segment endpoints
+      A <- from[sygmund]
+      B <- to[sygmund]
+      #' data points (from X) closest to endpoints
+      jA <- vnnlab[A]
+      jB <- vnnlab[B]
+      dA <- vnndist[A]
+      dB <- vnndist[B]
+      if(is.na(jA) || is.na(jB) || jA == jB) {
+        #' entire segment is covered by one tile
+        thetile <- if(is.na(jA)) jB else jA
+	newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=thetile)
+      } else {
+        #' split somewhere
+	tx <- (dB - dA + lenf)/(2 * lenf)
+	if(tx >= 0 && tx <= 1) {
+  	  newdf <- data.frame(seg=sygmund,
+	                      t0=c(0,tx), t1=c(tx,1), tile=c(jA, jB))
+	} else if(tx < 0) {
+	  # weird
+	  newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=jB)
+	} else {
+	  # weird
+	  newdf <- data.frame(seg=sygmund, t0=0.0, t1=1.0, tile=jA)
+	}
+      }
+      df <- rbind(df, newdf)
+     }
+   }
+   return(lintess(L, df))
+}
\ No newline at end of file
diff --git a/R/linearK.R b/R/linearK.R
new file mode 100755
index 0000000..f6dd75b
--- /dev/null
+++ b/R/linearK.R
@@ -0,0 +1,281 @@
+#
+# linearK
+#
+# $Revision: 1.48 $ $Date: 2017/08/09 00:21:51 $
+#
+# K function for point pattern on linear network
+#
+#
+linearK <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  denom <- np * (np - 1)/lengthL
+  K <- linearKengine(X, r=r, ..., 
+ 		     denom=denom, correction=correction, ratio=ratio)
+  # set appropriate y axis label
+  switch(correction,
+         Ang  = {
+           ylab <- quote(K[L](r))
+           fname <- c("K", "L")
+         },
+         none = {
+           ylab <- quote(K[net](r))
+           fname <- c("K", "net")
+         })
+  K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname)
+  return(K)
+}
+
+linearKinhom <- function(X, lambda=NULL, r=NULL,  ...,
+                         correction="Ang", normalise=TRUE, normpower=1,
+			 update=TRUE, leaveoneout=TRUE,
+			 ratio=FALSE) {
+  stopifnot(inherits(X, "lpp"))
+  loo.given <- !missing(leaveoneout)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  if(is.null(lambda))
+    linearK(X, r=r, ..., ratio=ratio, correction=correction)
+  if(normalise) {
+    check.1.real(normpower)
+    stopifnot(normpower >= 1)
+  }
+  lambdaX <- getlambda.lpp(lambda, X, ...,
+                           update=update, leaveoneout=leaveoneout,
+                           loo.given=loo.given,
+                           lambdaname="lambda")
+  invlam <- 1/lambdaX
+  invlam2 <- outer(invlam, invlam, "*")
+  lengthL <- volume(domain(X))
+  denom <- if(!normalise) lengthL else
+           if(normpower == 1) sum(invlam) else
+           lengthL * (sum(invlam)/lengthL)^normpower
+
+  K <- linearKengine(X,
+                     reweight=invlam2, denom=denom, 
+  	             r=r, correction=correction, 
+	 	     ratio=ratio, ...)
+		     
+  # set appropriate y axis label
+  switch(correction,
+         Ang  = {
+           ylab <- quote(K[L, inhom](r))
+           yexp <- quote(K[list(L, "inhom")](r))
+           fname <- c("K", "list(L, inhom)")
+         },
+         none = {
+           ylab <- quote(K[net, inhom](r))
+           yexp <- quote(K[list(net, "inhom")](r))
+           fname <- c("K", "list(net, inhom)")
+         })
+  K <- rebadge.fv(K, new.fname=fname, new.ylab=ylab, new.yexp=yexp)
+  attr(K, "dangerous") <- attr(lambdaX, "dangerous")
+  return(K)
+}
+
+
+getlambda.lpp <- function(lambda, X, subset=NULL, ...,
+                          update=TRUE, leaveoneout=TRUE,
+                          loo.given=TRUE,
+			  lambdaname) {
+  missup <- missing(update)
+  if(missing(lambdaname)) lambdaname <- deparse(substitute(lambda))
+  Y <- if(is.null(subset)) X else X[subset]
+  danger <- TRUE
+  if(is.ppm(lambda) || is.lppm(lambda)) {
+    ## fitted model
+    if(update) {
+      ## refit the model to the full dataset X
+      lambda <- if(is.lppm(lambda)) update(lambda, X) else
+                update(lambda, as.ppp(X))
+      ## now evaluate
+      lambdaX <- fitted(lambda, dataonly=TRUE, leaveoneout=leaveoneout)
+      ## restrict if required
+      lambdaY <- if(is.null(subset)) lambdaX else lambdaX[subset]
+      ## 
+      danger <- FALSE
+      if(missup)
+        warn.once("lin.inhom.update",
+                  "The behaviour of linearKinhom and similar functions",
+                  "when lambda is an lppm object",
+                  "has changed in spatstat 1.41-0,",
+		  "and again in spatstat 1.52-0.",
+                  "See help(linearKinhom)")
+    } else {
+      if(loo.given && leaveoneout)
+        stop("leave-one-out calculation for fitted models is only available when update=TRUE",
+             call.=FALSE)
+      lambdaY <- predict(lambda, locations=as.data.frame(as.ppp(Y)))
+    }
+  } else {
+    ## lambda is some other kind of object
+    lambdaY <-
+      if(is.vector(lambda)) lambda  else
+      if(inherits(lambda, "linfun")) lambda(Y, ...) else
+      if(inherits(lambda, "linim")) lambda[Y, drop=FALSE] else
+      if(is.function(lambda)) {
+        coo <- coords(Y)
+        do.call.matched(lambda, list(x=coo$x, y=coo$y, ...))
+      } else if(is.im(lambda)) safelookup(lambda, as.ppp(Y)) else 
+      stop(paste(lambdaname, "should be",
+                 "a numeric vector, function, pixel image, or fitted model"))
+  }
+  if(!is.numeric(lambdaY))
+    stop(paste("Values of", lambdaname, "are not numeric"))
+  if((nv <- length(lambdaY)) != (np <- npoints(Y)))
+    stop(paste("Obtained", nv, "values of", lambdaname,
+	   "but point pattern contains", np, "points"))
+  if(any(lambdaY < 0))
+    stop(paste("Negative values of", lambdaname, "obtained"))
+  if(any(lambdaY == 0))
+    stop(paste("Zero values of", lambdaname, "obtained"))
+  if(danger)
+    attr(lambdaY, "dangerous") <- lambdaname
+  return(lambdaY)
+}
+
+linearKengine <- function(X, ..., r=NULL, reweight=NULL, denom=1,
+                          correction="Ang", ratio=FALSE, showworking=FALSE) {
+  # ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+  # extract info about pattern
+  np <- npoints(X)
+  # extract linear network
+  L <- domain(X)
+  W <- Window(L)
+  # determine r values
+  rmaxdefault <- 0.98 * boundingradius(L)
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  #
+  type <- if(correction == "Ang") "L" else "net"
+  fname <- c("K", type)
+  ylab <- substitute(K[type](r), list(type=type))
+  #
+  if(np < 2) {
+    # no pairs to count: return zero function
+    zeroes <- numeric(length(r))
+    df <- data.frame(r = r, est = zeroes)
+    K <- ratfv(df, NULL, 0,
+            "r", ylab,
+            "est", . ~ r, c(0, rmax),
+            c("r", makefvlabel(NULL, "hat", fname)), 
+            c("distance argument r", "estimated %s"),
+            fname = fname,
+	    ratio=ratio)
+    if(correction == "Ang") {
+      # tack on theoretical value
+      K <- bind.ratfv(K,
+		      quotient    = data.frame(theo=r), 
+                      denominator = 0,
+                      labl = makefvlabel(NULL, NULL, fname, "theo"),
+                      desc = "theoretical Poisson %s",
+		      ratio = ratio)
+    }
+    return(K)
+  }
+  # compute pairwise distances  
+  D <- pairdist(X)
+  #---  compile into K function ---
+  if(correction == "none" && is.null(reweight)) {
+    # no weights (Okabe-Yamada)
+    K <- compileK(D, r, denom=denom, fname=fname, ratio=ratio)
+    K <- rebadge.fv(K, ylab, fname)
+    unitname(K) <- unitname(X)
+    return(K)
+  }
+  if(correction == "none")
+     edgewt <- 1
+  else {
+     # inverse m weights (Wei's correction)
+     # determine tolerance
+     toler <- default.linnet.tolerance(L)
+     # compute m[i,j]
+     m <- matrix(1, np, np)
+     for(j in 1:np) 
+       m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler)
+     if(any(uhoh <- (m == 0) & is.finite(D))) {
+       warning("Internal error: disc boundary count equal to zero")
+       m[uhoh] <- 1
+     }
+     edgewt <- 1/m
+  }
+  # compute K
+  wt <- if(!is.null(reweight)) edgewt * reweight else edgewt
+  K <- compileK(D, r, weights=wt, denom=denom, fname=fname, ratio=ratio)
+  # tack on theoretical value
+  if(ratio) {
+    K <- bind.ratfv(K,
+		    quotient = data.frame(theo = r),
+		    denominator = denom,
+                    labl = makefvlabel(NULL, NULL, fname, "theo"),
+                    desc = "theoretical Poisson %s")
+  } else {
+    K <- bind.fv(K, data.frame(theo=r),
+                 makefvlabel(NULL, NULL, fname, "theo"),
+                 "theoretical Poisson %s")
+  }		 
+  K <- rebadge.fv(K, ylab, fname)
+  unitname(K) <- unitname(X)
+  fvnames(K, ".") <- rev(fvnames(K, "."))
+  # show working
+  if(showworking)
+    attr(K, "working") <- list(D=D, wt=wt)
+  attr(K, "correction") <- correction
+  return(K)
+}
+
+ApplyConnected <- function(X, Engine, r=NULL,
+                           ..., rule, auxdata=NULL) {
+  # Apply 'Engine' to each connected component of domain(X)
+  stopifnot(is.function(rule))
+  # Ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+  L <- domain(X)
+  # check network connectivity
+  br <- boundingradius(L)
+  if(disco <- is.infinite(br)) {
+    # disconnected network
+    XX <- connected(X)
+    LL <- lapply(XX, domain)
+    br <- max(sapply(LL, boundingradius))
+  } else XX <- NULL
+  # determine r values
+  rmaxdefault <- 0.98 * br
+  breaks <- handle.r.b.args(r, NULL, Window(L), rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  if(!disco) {
+    # single connected network
+    stuff <- rule(X=X, auxdata=auxdata, ...)
+    result <- do.call(Engine, append(list(X=X, r=r), stuff))
+    return(result)
+  }
+  # disconnected network
+  nsub <- length(XX)
+  results <- anylist()
+  denoms <- numeric(nsub)
+  for(i in seq_len(nsub)) {
+    X.i <- XX[[i]]
+    sub.i <- attr(X.i, "retainpoints") # identifies which points of X
+    aux.i <- if(length(auxdata) == 0) NULL else
+             lapply(auxdata, marksubset, index=sub.i)
+    stuff.i <- rule(X=X.i, auxdata=aux.i, ...)
+    denoms[i] <- stuff.i$denom %orifnull% 1
+    results[[i]] <- do.call(Engine, append(list(X=X.i, r=r), stuff.i))
+  }
+  result <- do.call(pool, append(results,
+                                 list(weights=denoms,
+				      relabel=FALSE, variance=FALSE)))
+  return(result)
+}
+
diff --git a/R/linearKmulti.R b/R/linearKmulti.R
new file mode 100644
index 0000000..ce70f27
--- /dev/null
+++ b/R/linearKmulti.R
@@ -0,0 +1,293 @@
+#
+# linearKmulti
+#
+# $Revision: 1.13 $ $Date: 2017/02/07 08:12:05 $
+#
+# K functions for multitype point pattern on linear network
+#
+#
+
+linearKdot <- function(X, i, r=NULL, ..., correction="Ang") {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))  
+  I <- (marx == i)
+  J <- rep(TRUE, npoints(X))  # i.e. all points
+  result <- linearKmulti(X, I, J,
+                         r=r, correction=correction, ...)
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  result <- rebadge.as.dotfun(result, "K", type, i)
+  return(result)
+}
+
+linearKcross <- function(X, i, j, r=NULL, ..., correction="Ang") {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))
+  if(missing(j)) j <- lev[2L] else
+    if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark"))
+  #
+  if(i == j) {
+    result <- linearK(X[marx == i], r=r, correction=correction, ...)
+  } else {
+    I <- (marx == i)
+    J <- (marx == j)
+    result <- linearKmulti(X, I, J, r=r, correction=correction, ...)
+  }
+  # rebrand
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  result <- rebadge.as.crossfun(result, "K", type, i, j)
+  return(result)
+}
+
+linearKmulti <- function(X, I, J, r=NULL, ..., correction="Ang") {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  
+  # extract info about pattern
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  # validate I, J
+  if(!is.logical(I) || !is.logical(J))
+    stop("I and J must be logical vectors")
+  if(length(I) != np || length(J) != np)
+    stop(paste("The length of I and J must equal",
+               "the number of points in the pattern"))
+	
+  if(!any(I)) stop("no points satisfy I")
+#  if(!any(J)) stop("no points satisfy J")
+		
+  nI <- sum(I)
+  nJ <- sum(J)
+  nIandJ <- sum(I & J)
+#  lambdaI <- nI/lengthL
+#  lambdaJ <- nJ/lengthL
+  # compute K
+  denom <- (nI * nJ - nIandJ)/lengthL
+  K <- linearKmultiEngine(X, I, J, r=r, denom=denom,
+                          correction=correction, ...)
+  # set appropriate y axis label
+  correction <- attr(K, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  K <- rebadge.as.crossfun(K, "K", type, "I", "J")
+  return(K)
+}
+
+# ................ inhomogeneous ............................
+
+linearKdot.inhom <- function(X, i, lambdaI, lambdadot,
+                             r=NULL, ..., correction="Ang", normalise=TRUE) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))  
+  I <- (marx == i)
+  J <- rep(TRUE, npoints(X))  # i.e. all points
+  # compute
+  result <- linearKmulti.inhom(X, I, J, lambdaI, lambdadot, 
+                               r=r, correction=correction, normalise=normalise,
+                               ...)
+  ## relabel
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  result <- rebadge.as.dotfun(result, "K", type, i)
+  return(result)
+}
+
+linearKcross.inhom <- function(X, i, j, lambdaI, lambdaJ,
+                               r=NULL, ...,
+                               correction="Ang", normalise=TRUE) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))
+  if(missing(j)) j <- lev[2L] else
+    if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark"))
+  #
+  if(i == j) {
+    I <- (marx == i)
+    result <- linearKinhom(X[I], lambda=lambdaI, r=r,
+                           correction=correction, normalise=normalise, ...)
+  } else {
+    I <- (marx == i)
+    J <- (marx == j)
+    result <- linearKmulti.inhom(X, I, J, lambdaI, lambdaJ,
+                                 r=r, correction=correction,
+                                 normalise=normalise, ...)
+  }
+  # rebrand
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  result <- rebadge.as.crossfun(result, "K", type, i, j)
+  return(result)
+}
+
+linearKmulti.inhom <- function(X, I, J, lambdaI, lambdaJ,
+                               r=NULL, ...,
+                               correction="Ang", normalise=TRUE) {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  
+  # extract info about pattern
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  #
+  # validate I, J
+  if(!is.logical(I) || !is.logical(J))
+    stop("I and J must be logical vectors")
+  if(length(I) != np || length(J) != np)
+    stop(paste("The length of I and J must equal",
+               "the number of points in the pattern"))
+	
+  if(!any(I)) stop("no points satisfy I")
+
+  # validate lambda vectors
+  lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...)
+  lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...)
+
+  # compute K
+  weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*")
+  denom <- if(!normalise) lengthL else sum(1/lambdaI) 
+  K <- linearKmultiEngine(X, I, J, r=r,
+                          reweight=weightsIJ, denom=denom,
+                          correction=correction, ...)
+  # set appropriate y axis label
+  correction <- attr(K, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  K <- rebadge.as.crossfun(K, "K", type, "I", "J")
+  # set markers for 'envelope'
+  attr(K, "dangerous") <- union(attr(lambdaI, "dangerous"),
+                                attr(lambdaJ, "dangerous"))
+  return(K)
+}
+
+# .............. internal ...............................
+
+linearKmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1,
+                          correction="Ang", showworking=FALSE) {
+  # ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+  # extract info about pattern
+  np <- npoints(X)
+  # extract linear network
+  L <- domain(X)
+  W <- Window(L)
+  # determine r values
+  rmaxdefault <- 0.98 * boundingradius(L)
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  #
+  if(correction == "Ang") {
+    fname <- c("K", "list(L, I, J)")
+    ylab <- quote(K[L,I,J](r))
+  } else {
+    fname <- c("K", "list(net, I, J)")
+    ylab <- quote(K[net,I,J](r))
+  }
+  #
+  if(np < 2) {
+    # no pairs to count: return zero function
+    zeroes <- rep(0, length(r))
+    df <- data.frame(r = r, est = zeroes)
+    K <- fv(df, "r", ylab,
+            "est", . ~ r, c(0, rmax),
+            c("r", makefvlabel(NULL, "hat", fname)),
+            c("distance argument r", "estimated %s"),
+            fname = fname)
+    return(K)
+  }
+  #
+  nI <- sum(I)
+  nJ <- sum(J)
+  whichI <- which(I)
+  whichJ <- which(J)
+  clash <- I & J
+  has.clash <- any(clash)
+  # compute pairwise distances
+  if(exists("crossdist.lpp")) {
+    DIJ <- crossdist(X[I], X[J], check=FALSE)
+    if(has.clash) {
+      # exclude pairs of identical points from consideration
+      Iclash <- which(clash[I])
+      Jclash <- which(clash[J])
+      DIJ[cbind(Iclash,Jclash)] <- Inf
+    }
+  } else {
+    D <- pairdist(X)
+    diag(D) <- Inf
+    DIJ <- D[I, J]
+  }
+  #---  compile into K function ---
+  if(correction == "none" && is.null(reweight)) {
+    # no weights (Okabe-Yamada)
+    K <- compileK(DIJ, r, denom=denom, check=FALSE, fname=fname)
+    K <- rebadge.as.crossfun(K, "K", "net", "I", "J")
+    unitname(K) <- unitname(X)
+    attr(K, "correction") <- correction
+    return(K)
+  }
+  if(correction == "none")
+     edgewt <- 1
+  else {
+     # inverse m weights (Ang's correction)
+     # determine tolerance
+     toler <- default.linnet.tolerance(L)
+     # compute m[i,j]
+     m <- matrix(1, nI, nJ)
+     XI <- X[I]
+     if(!has.clash) {
+       for(k in seq_len(nJ)) {
+         j <- whichJ[k]
+         m[,k] <- countends(L, XI, DIJ[, k], toler=toler)
+       }
+     } else {
+       # don't count identical pairs
+       for(k in seq_len(nJ)) {
+         j <- whichJ[k]
+         inotj <- (whichI != j)
+         m[inotj, k] <- countends(L, XI[inotj], DIJ[inotj, k], toler=toler)
+       }
+     }
+     edgewt <- 1/m
+  }
+  # compute K
+  wt <- if(!is.null(reweight)) edgewt * reweight else edgewt
+  K <- compileK(DIJ, r, weights=wt, denom=denom, check=FALSE, fname=fname)
+  ## rebadge and tweak
+  K <- rebadge.as.crossfun(K, "K", "L", "I", "J")
+  fname <- attr(K, "fname")
+  # tack on theoretical value
+  K <- bind.fv(K, data.frame(theo=r),
+               makefvlabel(NULL, NULL, fname, "pois"),
+               "theoretical Poisson %s")
+  ## 
+  unitname(K) <- unitname(X)
+  fvnames(K, ".") <- rev(fvnames(K, "."))
+  # show working
+  if(showworking)
+    attr(K, "working") <- list(DIJ=DIJ, wt=wt)
+  attr(K, "correction") <- correction
+  return(K)
+}
+
diff --git a/R/lineardisc.R b/R/lineardisc.R
new file mode 100755
index 0000000..7c279c4
--- /dev/null
+++ b/R/lineardisc.R
@@ -0,0 +1,245 @@
+#
+#
+#   disc.R
+#
+#   $Revision: 1.27 $ $Date: 2017/06/05 10:31:58 $
+#
+#   Compute the disc of radius r in a linear network
+#
+#   
+lineardisc <- function(L, x=locator(1), r, plotit=TRUE,
+                       cols=c("blue", "red", "green")) {
+  # L is the linear network (object of class "linnet")
+  # x is the centre point of the disc
+  # r is the radius of the disc
+  #
+  stopifnot(inherits(L, "linnet"))
+  check.1.real(r)
+  if(L$sparse) {
+    message("Converting linear network to non-sparse representation..")
+    L <- as.linnet(L, sparse=FALSE)
+  }
+  lines <- L$lines
+  vertices <- L$vertices
+  lengths <- lengths.psp(lines)
+  win <- L$window
+  #
+  # project x to nearest segment
+  if(missing(x))
+    x <- clickppp(1, win, add=TRUE)
+  else
+    x <- as.ppp(x, win)
+  pro <- project2segment(x, lines)
+  # which segment?
+  startsegment <- pro$mapXY
+  # parametric position of x along this segment
+  startfraction <- pro$tp
+  # vertices at each end of this segment
+  A <- L$from[startsegment]
+  B <- L$to[startsegment]
+  # distances from x to  A and B
+  dxA <- startfraction * lengths[startsegment]
+  dxB <- (1-startfraction) * lengths[startsegment]
+  # is r large enough to reach both A and B?
+  startfilled <- (max(dxA, dxB) <= r)
+  # compute vector of shortest path distances from x to each vertex j,
+  # going through A:
+  dxAv <- dxA + L$dpath[A,]
+  # going through B:
+  dxBv <- dxB + L$dpath[B,]
+  # going either through A or through B:
+  dxv <- pmin.int(dxAv, dxBv)
+  # Thus dxv[j] is the shortest path distance from x to vertex j.
+  #
+  # Determine which vertices are inside the disc of radius r
+  covered <- (dxv <= r)
+  # Thus covered[j] is TRUE if the j-th vertex is inside the disc.
+  #
+  # Determine which line segments are completely inside the disc
+  #
+  from <- L$from
+  to   <- L$to
+  # ( a line segment is inside the disc if the shortest distance
+  #   from x to one of its endpoints, plus the length of the segment,
+  #   is less than r ....
+  allinside <- (dxv[from] + lengths <= r) | (dxv[to] + lengths <= r)
+  #   ... or alternatively, if the sum of the
+  #   two residual distances exceeds the length of the segment )
+  residfrom <- pmax.int(0, r - dxv[from])
+  residto   <- pmax.int(0, r - dxv[to])
+  allinside <- allinside | (residfrom + residto >= lengths)
+  # start segment is special
+  allinside[startsegment] <- startfilled
+  # Thus allinside[k] is TRUE if the k-th segment is inside the disc
+  
+  # Collect all these segments
+  disclines <- lines[allinside]
+  #
+  # Determine which line segments cross the boundary of the disc
+  boundary <- (covered[from] | covered[to]) & !allinside
+  # For each of these, calculate the remaining distance at each end
+  resid.from <- ifelseXB(boundary, pmax.int(r - dxv[from], 0), 0)
+  resid.to   <- ifelseXB(boundary, pmax.int(r - dxv[to],   0), 0)
+  # Where the remaining distance is nonzero, create segment and endpoint
+  okfrom <- (resid.from > 0)
+  okfrom[startsegment] <- FALSE
+  if(any(okfrom)) {
+    v0 <- vertices[from[okfrom]]
+    v1 <- vertices[to[okfrom]]
+    tp <- (resid.from/lengths)[okfrom]
+    vfrom <- ppp((1-tp)*v0$x + tp*v1$x,
+                 (1-tp)*v0$y + tp*v1$y,
+                 window=win)
+    extralinesfrom <- as.psp(from=v0, to=vfrom)
+  } else vfrom <- extralinesfrom <- NULL
+  #
+  okto <- (resid.to > 0)
+  okto[startsegment] <- FALSE
+  if(any(okto)) {
+    v0 <- vertices[to[okto]]
+    v1 <- vertices[from[okto]]
+    tp <- (resid.to/lengths)[okto]
+    vto <- ppp((1-tp)*v0$x + tp*v1$x,
+               (1-tp)*v0$y + tp*v1$y,
+               window=win)
+    extralinesto <- as.psp(from=v0, to=vto)
+  } else vto <- extralinesto <- NULL
+  #
+  # deal with special case where start segment is not fully covered
+  if(!startfilled) {
+    vA <- vertices[A]
+    vB <- vertices[B]
+    rfrac <- r/lengths[startsegment]
+    tleft <- pmax.int(startfraction-rfrac, 0)
+    tright <- pmin.int(startfraction+rfrac, 1)
+    vleft <- ppp((1-tleft) * vA$x + tleft * vB$x,
+                 (1-tleft) * vA$y + tleft * vB$y,
+                 window=win)
+    vright <- ppp((1-tright) * vA$x + tright * vB$x,
+                  (1-tright) * vA$y + tright * vB$y,
+                  window=win)
+    startline <- as.psp(from=vleft, to=vright)
+    startends <- superimpose(if(!covered[A]) vleft else NULL,
+                             if(!covered[B]) vright else NULL)
+  } else startline <- startends <- NULL
+  #
+  # combine all lines
+  disclines <- superimpose(disclines,
+                           extralinesfrom, extralinesto, startline,
+                           W=win, check=FALSE)
+  # combine all disc endpoints
+  discends <- superimpose(vfrom, vto, vertices[dxv == r], startends,
+                          W=win, check=FALSE)
+  #
+  if(plotit) {
+    if(dev.cur() == 1) {
+      # null device - initialise a plot
+      plot(L, main="")
+    }
+    points(x, col=cols[1L], pch=16)
+    plot(disclines, add=TRUE, col=cols[2L], lwd=2)
+    plot(discends, add=TRUE, col=cols[3L], pch=16)
+  }
+  return(list(lines=disclines, endpoints=discends))
+}
+
+countends <- function(L, x=locator(1), r, toler=NULL) {
+  # L is the linear network (object of class "linnet")
+  # x is the centre point of the disc
+  # r is the radius of the disc
+  #
+  stopifnot(inherits(L, "linnet"))
+  # get x
+  if(missing(x))
+    x <- clickppp(1, Window(L), add=TRUE)
+  if(!inherits(x, "lpp"))
+    x <- as.lpp(x, L=L)
+  np <- npoints(x)
+  
+  if(length(r) != np)
+    stop("Length of vector r does not match number of points in x")
+  #
+  if(!is.connected(L)) {
+    #' disconnected network - split into components
+    result <- numeric(np)
+    lab <- connected(L, what="labels")
+    subsets <- split(seq_len(nvertices(L)), factor(lab))
+    for(subi in subsets) {
+      xi <- thinNetwork(x, retainvertices=subi)
+      witch <- which(attr(xi, "retainpoints"))
+      ok <- is.finite(r[witch])
+      witchok <- witch[ok]
+      result[witchok] <-
+        countends(domain(xi), xi[ok], r[witchok], toler=toler)      
+    }
+    return(result)
+  }
+  lines <- L$lines
+  vertices <- L$vertices
+  lengths <- lengths.psp(lines)
+  dpath <- L$dpath
+  nv <- vertices$n
+  ns <- lines$n
+  #
+  if(!spatstat.options("Ccountends")) {
+    #' interpreted code
+    result <- integer(np)
+    for(i in seq_len(np)) 
+      result[i] <- npoints(lineardisc(L, x[i], r[i], plotit=FALSE)$endpoints)
+    return(result)
+  }
+  # extract coordinates
+  coo <- coords(x)
+  #' which segment
+  startsegment <- coo$seg 
+  # parametric position of x along this segment
+  startfraction <- coo$tp
+  # convert indices to C 
+  seg0 <- startsegment - 1L
+  from0 <- L$from - 1L
+  to0   <- L$to - 1L
+  # determine numerical tolerance
+  if(is.null(toler)) {
+    toler <- default.linnet.tolerance(L)
+  } else {
+    check.1.real(toler)
+    stopifnot(toler > 0)
+  }
+  zz <- .C("Ccountends",
+           np = as.integer(np),
+           f = as.double(startfraction),
+           seg = as.integer(seg0),
+           r = as.double(r), 
+           nv = as.integer(nv), 
+           xv = as.double(vertices$x),
+           yv = as.double(vertices$y),  
+           ns = as.integer(ns),
+           from = as.integer(from0),
+           to = as.integer(to0), 
+           dpath = as.double(dpath),
+           lengths = as.double(lengths),
+           toler=as.double(toler),
+           nendpoints = as.integer(integer(np)),
+           PACKAGE = "spatstat")
+  zz$nendpoints
+}
+
+default.linnet.tolerance <- function(L) {
+  # L could be a linnet or psp
+  if(!is.null(toler <- L$toler)) return(toler)
+  len2 <- lengths.psp(as.psp(L), squared=TRUE)
+  len2pos <- len2[len2 > 0]
+  toler <- if(length(len2pos) == 0) 0 else (0.001 * sqrt(min(len2pos)))
+  toler <- makeLinnetTolerance(toler)
+  return(toler)
+}
+
+makeLinnetTolerance <- function(toler) {
+  max(sqrt(.Machine$double.xmin),
+      toler[is.finite(toler)], na.rm=TRUE)
+}
+
+
+
+
+  
diff --git a/R/linearmrkcon.R b/R/linearmrkcon.R
new file mode 100644
index 0000000..8c4c230
--- /dev/null
+++ b/R/linearmrkcon.R
@@ -0,0 +1,63 @@
+#
+# linearmrkcon.R
+#
+# mark connection function & mark equality function for linear networks
+#
+# $Revision: 1.4 $ $Date: 2017/02/07 08:12:05 $
+#
+
+linearmarkconnect <- function(X, i, j, r=NULL, ...) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i) || is.null(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))
+  if(missing(j) || is.null(j)) j <- lev[2L] else
+    if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark"))
+
+  # ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+
+  #
+  pcfij <- linearpcfcross(X, i, j, r=r, ...)
+  pcfall <- linearpcf(X, r=r, ...)
+
+  qi <- mean(marx == i)
+  qj <- mean(marx == j)
+
+  result <- eval.fv(qi * qj * pcfij/pcfall)
+  
+  # rebrand
+  result <- rebadge.as.crossfun(result, "p", "L", i, j)
+  attr(result, "labl") <- attr(pcfij, "labl")
+  return(result)
+}
+
+linearmarkequal <- local({
+  
+  linearmarkequal <- function(X, r=NULL, ...) {
+    if(!is.multitype(X, dfok=FALSE)) 
+      stop("Point pattern must be multitype")
+  
+    ## ensure distance information is present
+    X <- as.lpp(X, sparse=FALSE)
+
+    lev <- levels(marks(X))
+    v <- list()
+    for(l in lev) v[[l]] <- linearmarkconnect(X, l, l, r=r, ...)
+
+    result <- Reduce(addfuns, v)
+    result <-rebadge.fv(result, 
+                        quote(p[L](r)),
+                        new.fname=c("p", "L"))
+    attr(result, "labl") <- attr(v[[1L]], "labl")
+    return(result)
+  }
+
+  addfuns <- function(f1, f2) eval.fv(f1 + f2)
+
+  linearmarkequal
+})
+
+
diff --git a/R/linearpcf.R b/R/linearpcf.R
new file mode 100755
index 0000000..60f0e39
--- /dev/null
+++ b/R/linearpcf.R
@@ -0,0 +1,177 @@
+#
+# linearpcf.R
+#
+# $Revision: 1.26 $ $Date: 2017/08/09 00:21:46 $
+#
+# pair correlation function for point pattern on linear network
+#
+#
+linearpcf <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  # extract info about pattern
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  # compute
+  denom <- np * (np - 1)/lengthL
+  g <- linearpcfengine(X, r=r, ...,
+                       denom=denom, correction=correction, ratio=ratio)
+  # extract bandwidth
+  bw <- attr(g, "bw")
+   # set appropriate y axis label
+  switch(correction,
+         Ang  = {
+           ylab <- quote(g[L](r))
+           fname <- c("g", "L")
+         },
+         none = {
+           ylab <- quote(g[net](r))
+           fname <- c("g", "net")
+         })
+  g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname)
+  # reattach bandwidth
+  attr(g, "bw") <- bw
+  return(g)
+}
+
+linearpcfinhom <- function(X, lambda=NULL, r=NULL,  ...,
+                           correction="Ang", normalise=TRUE, normpower=1,
+			   update=TRUE, leaveoneout=TRUE, ratio=FALSE) {
+  stopifnot(inherits(X, "lpp"))
+  loo.given <- !missing(leaveoneout)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  if(is.null(lambda))
+    linearpcf(X, r=r, ..., correction=correction, ratio=ratio)
+  if(normalise) {
+    check.1.real(normpower)
+    stopifnot(normpower >= 1)
+  }
+  # extract info about pattern
+  lengthL <- volume(domain(X))
+  #
+  lambdaX <- getlambda.lpp(lambda, X, ...,
+                           update=update, leaveoneout=leaveoneout,
+                           loo.given=loo.given,
+                           lambdaname="lambda")
+  #
+  invlam <- 1/lambdaX
+  invlam2 <- outer(invlam, invlam, "*")
+  denom <- if(!normalise) lengthL else
+           if(normpower == 1) sum(invlam) else
+           lengthL * (sum(invlam)/lengthL)^normpower
+  g <- linearpcfengine(X, ..., r=r,
+                       reweight=invlam2, denom=denom,
+		       correction=correction, ratio=ratio)
+  # extract bandwidth
+  bw <- attr(g, "bw")
+  # set appropriate y axis label
+  switch(correction,
+         Ang  = {
+           ylab <- quote(g[L, inhom](r))
+           fname <- c("g", "list(L, inhom)")
+         },
+         none = {
+           ylab <- quote(g[net, inhom](r))
+           fname <- c("g", "list(net, inhom)")
+         })
+  g <- rebadge.fv(g, new.fname=fname, new.ylab=ylab)
+  # reattach bandwidth
+  attr(g, "bw") <- bw
+  attr(g, "dangerous") <- attr(lambdaX, "dangerous")
+  return(g)
+}
+
+
+linearpcfengine <- function(X, ..., r=NULL,
+                            reweight=NULL, denom=1,
+			    correction="Ang", ratio=FALSE) {
+  # ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+  # extract info about pattern
+  np <- npoints(X)
+  # extract linear network
+  L <- domain(X)
+  W <- Window(L)
+  # determine r values
+  rmaxdefault <- 0.98 * boundingradius(L)
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  #
+  type <- if(correction == "Ang") "L" else "net"
+  fname <- c("g", type)
+  ylab <- substitute(g[type](r), list(type=type))
+  #  
+  if(np < 2) {
+    # no pairs to count: return zero function
+    zeroes <- numeric(length(r))
+    df <- data.frame(r = r, est = zeroes)
+    g <- ratfv(df, NULL, 0,
+            "r", ylab,
+            "est", . ~ r, c(0, rmax),
+            c("r", makefvlabel(NULL, "hat", fname)), 
+            c("distance argument r", "estimated %s"),
+            fname = fname,
+	    ratio=ratio)
+    if(correction == "Ang") {
+      # tack on theoretical value
+      g <- bind.ratfv(g,
+                      quotient = data.frame(theo=r),
+		      denominator = 0, 
+                      labl = makefvlabel(NULL, NULL, fname, "theo"),
+                      desc = "theoretical Poisson %s",
+   		      ratio=ratio)
+    }
+    return(g)
+  }
+  # compute pairwise distances  
+  D <- pairdist(X)
+  #---  compile into pcf ---
+  if(correction == "none" && is.null(reweight)) {
+    # no weights (Okabe-Yamada)
+    g <- compilepcf(D, r, denom=denom, fname=fname, ratio=ratio)
+    unitname(g) <- unitname(X)
+    attr(g, "correction") <- correction
+    return(g)
+  }
+  if(correction == "none")
+     edgewt <- 1
+  else {
+     # inverse m weights (Wei's correction)
+     # determine tolerance
+     toler <- default.linnet.tolerance(L)
+     # compute m[i,j]
+     m <- matrix(1, np, np)
+     for(j in 1:np) 
+       m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler)
+     edgewt <- 1/m
+  }
+  # compute pcf
+  wt <- if(!is.null(reweight)) edgewt * reweight else edgewt
+  g <- compilepcf(D, r, weights=wt, denom=denom, ..., fname=fname, ratio=ratio)
+  # extract bandwidth
+  bw <- attr(g, "bw")
+  # tack on theoretical value
+  g <- bind.ratfv(g,
+                  quotient = data.frame(theo=rep.int(1,length(r))),
+		  denominator = denom,
+                  labl = makefvlabel(NULL, NULL, fname, "pois"),
+                  desc = "theoretical Poisson %s",
+		  ratio = ratio)
+  # tweak
+  unitname(g) <- unitname(X)
+  fvnames(g, ".") <- rev(fvnames(g, "."))
+  # tack on bandwidth again
+  attr(g, "bw") <- bw
+  attr(g, "correction") <- correction
+  return(g)
+}
+
diff --git a/R/linearpcfmulti.R b/R/linearpcfmulti.R
new file mode 100644
index 0000000..bc5e4e9
--- /dev/null
+++ b/R/linearpcfmulti.R
@@ -0,0 +1,294 @@
+#
+# linearpcfmulti.R
+#
+# $Revision: 1.12 $ $Date: 2017/02/07 08:12:05 $
+#
+# pair correlation functions for multitype point pattern on linear network
+#
+#
+
+linearpcfdot <- function(X, i, r=NULL, ..., correction="Ang") {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i) || is.null(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))  
+  I <- (marx == i)
+  J <- rep(TRUE, npoints(X))  # i.e. all points
+  result <- linearpcfmulti(X, I, J,
+                           r=r, correction=correction, ...)
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  result <- rebadge.as.dotfun(result, "g", type, i)
+  return(result)
+}
+
+linearpcfcross <- function(X, i, j, r=NULL, ..., correction="Ang") {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i) || is.null(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))
+  if(missing(j) || is.null(j)) j <- lev[2L] else
+    if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark"))
+  #
+  if(i == j) {
+    result <- linearpcf(X[marx == i], r=r, correction=correction, ...)
+  } else {
+    I <- (marx == i)
+    J <- (marx == j)
+    result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...)
+  }
+  # rebrand
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  result <- rebadge.as.crossfun(result, "g", type, i, j)
+  return(result)
+}
+
+linearpcfmulti <- function(X, I, J, r=NULL, ..., correction="Ang") {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  
+  # extract info about pattern
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  # validate I, J
+  if(!is.logical(I) || !is.logical(J))
+    stop("I and J must be logical vectors")
+  if(length(I) != np || length(J) != np)
+    stop(paste("The length of I and J must equal",
+               "the number of points in the pattern"))
+	
+  if(!any(I)) stop("no points satisfy I")
+#  if(!any(J)) stop("no points satisfy J")
+		
+  nI <- sum(I)
+  nJ <- sum(J)
+  nIandJ <- sum(I & J)
+#  lambdaI <- nI/lengthL
+#  lambdaJ <- nJ/lengthL
+  # compute pcf
+  denom <- (nI * nJ - nIandJ)/lengthL
+  g <- linearPCFmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...)
+  # set appropriate y axis label
+  correction <- attr(g, "correction")
+  type <- if(correction == "Ang") "L" else "net"
+  g <- rebadge.as.crossfun(g, "g", type, "I", "J")
+  attr(g, "correction") <- correction
+  return(g)
+}
+
+# ................ inhomogeneous ............................
+
+linearpcfdot.inhom <- function(X, i, lambdaI, lambdadot,
+                             r=NULL, ..., correction="Ang", normalise=TRUE) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))  
+  I <- (marx == i)
+  J <- rep(TRUE, npoints(X))  # i.e. all points
+  # compute
+  result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdadot, 
+                               r=r, correction=correction, normalise=normalise,
+                               ...)
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  result <- rebadge.as.dotfun(result, "g", type, i)
+  return(result)
+}
+
+linearpcfcross.inhom <- function(X, i, j, lambdaI, lambdaJ,
+                               r=NULL, ...,
+                               correction="Ang", normalise=TRUE) {
+  if(!is.multitype(X, dfok=FALSE)) 
+	stop("Point pattern must be multitype")
+  marx <- marks(X)
+  lev <- levels(marx)
+  if(missing(i)) i <- lev[1L] else
+    if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark"))
+  if(missing(j)) j <- lev[2L] else
+    if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark"))
+  #
+  if(i == j) {
+    I <- (marx == i)
+    result <- linearpcfinhom(X[I], lambda=lambdaI, r=r,
+                           correction=correction, normalise=normalise, ...)
+  } else {
+    I <- (marx == i)
+    J <- (marx == j)
+    result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ,
+                                 r=r, correction=correction,
+                                 normalise=normalise, ...)
+  }
+  # rebrand
+  correction <- attr(result, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  result <- rebadge.as.crossfun(result, "g", type, i, j)
+  return(result)
+}
+
+linearpcfmulti.inhom <- function(X, I, J, lambdaI, lambdaJ,
+                               r=NULL, ...,
+                               correction="Ang",
+                               normalise=TRUE) {
+  stopifnot(inherits(X, "lpp"))
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             Ang="Ang",
+                             best="Ang"),
+                           multi=FALSE)
+  
+  # extract info about pattern
+  np <- npoints(X)
+  lengthL <- volume(domain(X))
+  # validate I, J
+  if(!is.logical(I) || !is.logical(J))
+    stop("I and J must be logical vectors")
+  if(length(I) != np || length(J) != np)
+    stop(paste("The length of I and J must equal",
+               "the number of points in the pattern"))
+	
+  if(!any(I)) stop("no points satisfy I")
+
+  # validate lambda vectors
+  lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...)
+  lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...)
+
+  # compute pcf
+  weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*")
+  denom <- if(!normalise) lengthL else sum(1/lambdaI) 
+  g <- linearPCFmultiEngine(X, I, J, r=r,
+                            reweight=weightsIJ, denom=denom,
+                            correction=correction, ...)
+  # set appropriate y axis label
+  correction <- attr(g, "correction")
+  type <- if(correction == "Ang") "L, inhom" else "net, inhom"
+  g <- rebadge.as.crossfun(g, "g", type, "I", "J")
+  attr(g, "correction") <- correction
+  attr(g, "dangerous") <- union(attr(lambdaI, "dangerous"),
+                                attr(lambdaJ, "dangerous"))
+  return(g)
+}
+
+# .............. internal ...............................
+
+linearPCFmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1,
+                          correction="Ang", showworking=FALSE) {
+  # ensure distance information is present
+  X <- as.lpp(X, sparse=FALSE)
+  # extract info about pattern
+  np <- npoints(X)
+  # extract linear network
+  L <- domain(X)
+  W <- Window(L)
+  # determine r values
+  rmaxdefault <- 0.98 * boundingradius(L)
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  #
+  if(correction == "Ang") {
+    fname <- c("g", "list(L, I, J)")
+    ylab <- quote(g[L,I,J](r))
+  } else {
+    fname <- c("g", "list(net, I, J)")
+    ylab <- quote(g[net,I,J](r))
+  }
+  #
+   if(np < 2) {
+    # no pairs to count: return zero function
+    zeroes <- rep(0, length(r))
+    df <- data.frame(r = r, est = zeroes)
+    g <- fv(df, "r", ylab,
+            "est", . ~ r, c(0, rmax),
+            c("r", makefvlabel(NULL, "hat", fname)), 
+            c("distance argument r", "estimated %s"),
+            fname = fname)
+    unitname(g) <- unitname(X)
+    attr(g, "correction") <- correction
+    return(g)
+  }
+  #
+  nI <- sum(I)
+  nJ <- sum(J)
+  whichI <- which(I)
+  whichJ <- which(J)
+  clash <- I & J
+  has.clash <- any(clash)
+  # compute pairwise distances
+  if(exists("crossdist.lpp")) {
+    DIJ <- crossdist(X[I], X[J], check=FALSE)
+    if(has.clash) {
+      # exclude pairs of identical points from consideration
+      Iclash <- which(clash[I])
+      Jclash <- which(clash[J])
+      DIJ[cbind(Iclash,Jclash)] <- Inf
+    }
+  } else {
+    D <- pairdist(X)
+    diag(D) <- Inf
+    DIJ <- D[I, J]
+  }
+  #---  compile into pair correlation function ---
+  if(correction == "none" && is.null(reweight)) {
+    # no weights (Okabe-Yamada)
+    g <- compilepcf(DIJ, r, denom=denom, check=FALSE, fname=fname)
+    g <- rebadge.as.crossfun(g, "g", "net", "I", "J")    
+    unitname(g) <- unitname(X)
+    attr(g, "correction") <- correction
+    return(g)
+  }
+  if(correction == "none")
+     edgewt <- 1
+  else {
+     # inverse m weights (Ang's correction)
+     # determine tolerance
+     toler <- default.linnet.tolerance(L)
+     # compute m[i,j]
+     m <- matrix(1, nI, nJ)
+     XI <- X[I]
+     if(!has.clash) {
+       for(k in seq_len(nJ)) {
+         j <- whichJ[k]
+         m[,k] <- countends(L, XI, DIJ[, k], toler=toler)
+       }
+     } else {
+       # don't count identical pairs
+       for(k in seq_len(nJ)) {
+         j <- whichJ[k]
+         inotj <- (whichI != j)
+         m[inotj, k] <- countends(L, XI[inotj], DIJ[inotj, k], toler=toler)
+       }
+     }
+     edgewt <- 1/m
+  }
+  # compute pcf
+  wt <- if(!is.null(reweight)) edgewt * reweight else edgewt
+  g <- compilepcf(DIJ, r, weights=wt, denom=denom, check=FALSE, ...,
+                  fname=fname)
+  ## rebadge and tweak
+  g <- rebadge.as.crossfun(g, "g", "L", "I", "J")
+  fname <- attr(g, "fname")
+  # tack on theoretical value
+  g <- bind.fv(g, data.frame(theo=rep(1,length(r))),
+               makefvlabel(NULL, NULL, fname, "pois"),
+               "theoretical Poisson %s")
+  unitname(g) <- unitname(X)
+  fvnames(g, ".") <- rev(fvnames(g, "."))
+  # show working
+  if(showworking)
+    attr(g, "working") <- list(DIJ=DIJ, wt=wt)
+  attr(g, "correction") <- correction
+  return(g)
+}
+
diff --git a/R/linequad.R b/R/linequad.R
new file mode 100755
index 0000000..b88f1b4
--- /dev/null
+++ b/R/linequad.R
@@ -0,0 +1,254 @@
+#
+# linequad.R
+#
+#  $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $
+#
+# create quadscheme for a pattern of points lying *on* line segments
+
+linequad <- function(X, Y, ..., eps=NULL, nd=1000, random=FALSE) {
+  epsgiven <- !is.null(eps)
+  if(is.lpp(X)) {
+    # extract local coordinates from lpp object
+    coo <- coords(X)
+    mapXY <- coo$seg
+    tp    <- coo$tp
+    Xproj <- as.ppp(X)
+    if(!missing(Y) && !is.null(Y))
+      warning("Argument Y ignored when X is an lpp object")
+    Y <- as.psp(X)
+  } else if(is.ppp(X)) {
+    # project data points onto segments
+    stopifnot(is.psp(Y))
+    v <- project2segment(X, Y)
+    Xproj <- v$Xproj
+    mapXY <- v$mapXY
+    tp    <- v$tp
+  } else stop("X should be an object of class lpp or ppp")
+  
+  # handle multitype
+  ismulti <- is.multitype(X)
+  if(is.marked(X) && !ismulti)
+    stop("Not implemented for marked patterns")
+  if(ismulti) {
+    marx <- marks(X)
+    flev <- factor(levels(marx))
+  }
+  #
+  win <- as.owin(Y)
+  len <- lengths.psp(Y)
+  nseg <- length(len)
+  if(is.null(eps)) {
+    stopifnot(is.numeric(nd) && length(nd) == 1L & is.finite(nd) && nd > 0)
+    eps <- sum(len)/nd
+  } else
+  stopifnot(is.numeric(eps) && length(eps) == 1L && is.finite(eps) && eps > 0)
+  ##
+  if(is.lpp(X) && spatstat.options('Clinequad')) {
+    L <- as.linnet(X)
+    W <- Frame(L)
+    V <- vertices(L)
+    nV <- npoints(V)
+    coordsV <- coords(V)
+    coordsX <- coords(X)
+    nX <- npoints(X)
+    ooX <- order(coordsX$seg)
+    ndumeach <- ceiling(len/eps) + 1L
+    ndummax <- sum(ndumeach)
+    maxdataperseg <- max(table(factor(coordsX$seg, levels=1:nsegments(L))))
+    maxscratch <- max(ndumeach) + maxdataperseg
+    if(!ismulti) {
+      if(!random) {
+        z <- .C("Clinequad",
+                ns    = as.integer(nseg),
+                from  = as.integer(L$from-1L),
+                to    = as.integer(L$to-1L), 
+                nv    = as.integer(nV),
+                xv    = as.double(coordsV$x),
+                yv    = as.double(coordsV$y), 
+                eps   = as.double(eps),
+                ndat  = as.integer(nX),
+                sdat  = as.integer(coordsX$seg[ooX]-1L),
+                tdat  = as.double(coordsX$tp[ooX]),
+                wdat  = as.double(numeric(nX)),
+                ndum  = as.integer(integer(1L)),
+                xdum  = as.double(numeric(ndummax)),
+                ydum  = as.double(numeric(ndummax)),
+                sdum  = as.integer(integer(ndummax)),
+                tdum  = as.double(numeric(ndummax)),
+                wdum  = as.double(numeric(ndummax)),
+                maxscratch = as.integer(maxscratch),
+                PACKAGE = "spatstat")
+      } else {
+        z <- .C("ClineRquad",
+                ns    = as.integer(nseg),
+                from  = as.integer(L$from-1L),
+                to    = as.integer(L$to-1L), 
+                nv    = as.integer(nV),
+                xv    = as.double(coordsV$x),
+                yv    = as.double(coordsV$y), 
+                eps   = as.double(eps),
+                ndat  = as.integer(nX),
+                sdat  = as.integer(coordsX$seg[ooX]-1L),
+                tdat  = as.double(coordsX$tp[ooX]),
+                wdat  = as.double(numeric(nX)),
+                ndum  = as.integer(integer(1L)),
+                xdum  = as.double(numeric(ndummax)),
+                ydum  = as.double(numeric(ndummax)),
+                sdum  = as.integer(integer(ndummax)),
+                tdum  = as.double(numeric(ndummax)),
+                wdum  = as.double(numeric(ndummax)),
+                maxscratch = as.integer(maxscratch),
+                PACKAGE = "spatstat")
+      }
+      seqdum <- seq_len(z$ndum)
+      dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], window=W, check=FALSE))
+      wdum <- z$wdum[seqdum]
+      wdat <- numeric(nX)
+      wdat[ooX] <- z$wdat
+      dat <- as.ppp(X)
+    } else {
+      ntypes <- length(flev)
+      ndummax <- ntypes * (ndummax + nX)
+      maxscratch <- ntypes * maxscratch
+      if(!random) {
+        z <- .C("ClineMquad",
+                ns    = as.integer(nseg),
+                from  = as.integer(L$from-1L),
+                to    = as.integer(L$to-1L), 
+                nv    = as.integer(nV),
+                xv    = as.double(coordsV$x),
+                yv    = as.double(coordsV$y), 
+                eps   = as.double(eps),
+                ntypes = as.integer(ntypes),
+                ndat  = as.integer(nX),
+                xdat  = as.double(coordsX$x),
+                ydat  = as.double(coordsX$y),
+                mdat  = as.integer(as.integer(marx)-1L),
+                sdat  = as.integer(coordsX$seg[ooX]-1L),
+                tdat  = as.double(coordsX$tp[ooX]),
+                wdat  = as.double(numeric(nX)),
+                ndum  = as.integer(integer(1L)),
+                xdum  = as.double(numeric(ndummax)),
+                ydum  = as.double(numeric(ndummax)),
+                mdum  = as.integer(integer(ndummax)),
+                sdum  = as.integer(integer(ndummax)),
+                tdum  = as.double(numeric(ndummax)),
+                wdum  = as.double(numeric(ndummax)),
+                maxscratch = as.integer(maxscratch),
+                PACKAGE = "spatstat")
+      } else {
+        z <- .C("ClineRMquad",
+                ns    = as.integer(nseg),
+                from  = as.integer(L$from-1L),
+                to    = as.integer(L$to-1L), 
+                nv    = as.integer(nV),
+                xv    = as.double(coordsV$x),
+                yv    = as.double(coordsV$y), 
+                eps   = as.double(eps),
+                ntypes = as.integer(ntypes),
+                ndat  = as.integer(nX),
+                xdat  = as.double(coordsX$x),
+                ydat  = as.double(coordsX$y),
+                mdat  = as.integer(as.integer(marx)-1L),
+                sdat  = as.integer(coordsX$seg[ooX]-1L),
+                tdat  = as.double(coordsX$tp[ooX]),
+                wdat  = as.double(numeric(nX)),
+                ndum  = as.integer(integer(1L)),
+                xdum  = as.double(numeric(ndummax)),
+                ydum  = as.double(numeric(ndummax)),
+                mdum  = as.integer(integer(ndummax)),
+                sdum  = as.integer(integer(ndummax)),
+                tdum  = as.double(numeric(ndummax)),
+                wdum  = as.double(numeric(ndummax)),
+                maxscratch = as.integer(maxscratch),
+                PACKAGE = "spatstat")
+      }
+      seqdum <- seq_len(z$ndum)
+      marques <- factor(z$mdum[seqdum] + 1L, labels=flev)
+      dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], marks=marques,
+                         window=W, check=FALSE))
+      wdum <- z$wdum[seqdum]
+      wdat <- numeric(nX)
+      wdat[ooX] <- z$wdat
+      dat <- as.ppp(X)
+    }      
+  } else {
+    ## older, interpreted code
+    ## initialise quad scheme 
+    dat <- dum <- ppp(numeric(0), numeric(0), window=win)
+    wdat <- wdum <- numeric(0)
+    if(ismulti)
+      marks(dat) <- marks(dum) <- marx[integer(0)]
+    ## consider each segment in turn
+    YY    <- as.data.frame(Y)
+    for(i in 1:nseg) {
+      ## divide segment into pieces of length eps
+      ## with shorter bits at each end
+      leni <- len[i]
+      nwhole <- floor(leni/eps)
+      if(leni/eps - nwhole < 0.5 && nwhole > 2)
+        nwhole <- nwhole - 1
+      rump <- (leni - nwhole * eps)/2
+      brks <- c(0, rump + (0:nwhole) * eps, leni)
+      nbrks <- length(brks)
+      ## dummy points at middle of each piece
+      sdum <- (brks[-1L] + brks[-nbrks])/2
+      x <- with(YY, x0[i] + (sdum/leni) * (x1[i]-x0[i]))
+      y <- with(YY, y0[i] + (sdum/leni) * (y1[i]-y0[i]))
+      newdum <- list(x=x, y=y)
+      ndum <- length(sdum)
+      IDdum <- 1:ndum
+      ## relevant data points
+      relevant <- (mapXY == i)
+      newdat <- Xproj[relevant]
+      sdat   <- leni * tp[relevant]
+      IDdat  <- findInterval(sdat, brks,
+                             rightmost.closed=TRUE, all.inside=TRUE)
+      ## determine weights
+      w <- countingweights(id=c(IDdum, IDdat), areas=diff(brks))
+      wnewdum <- w[1:ndum]
+      wnewdat <- w[-(1:ndum)]
+      ##
+      if(!ismulti) {
+        ## unmarked pattern
+        dat <- superimpose(dat, newdat, W=win, check=FALSE)
+        dum <- superimpose(dum, newdum, W=win, check=FALSE)
+        wdat <- c(wdat, wnewdat)
+        wdum <- c(wdum, wnewdum)
+      } else {
+        ## marked point pattern
+        ## attach correct marks to data points
+        marks(newdat) <- marx[relevant]
+        dat <- superimpose(dat, newdat, W=win, check=FALSE)
+        wdat <- c(wdat, wnewdat)
+        newdum <- as.ppp(newdum, W=win, check=FALSE)
+        ## replicate dummy points with each mark
+        ## also add points at data locations with other marks
+        for(k in seq_len(length(flev))) {
+          le <- flev[k]
+          avoid <- (marks(newdat) != le)
+          dum <- superimpose(dum,
+                             newdum %mark% le,
+                             newdat[avoid] %mark% le,
+                             W=win, check=FALSE)
+          wdum <- c(wdum, wnewdum, wnewdat[avoid])
+        }
+      }
+    }
+  }
+  ## save parameters
+  dmethod <- paste("Equally spaced along each segment at spacing eps =",
+                    signif(eps, 4),
+                    summary(unitname(X))$plural)
+  if(!epsgiven)
+    dmethod <- paste0(dmethod, "\nOriginal parameter nd = ", nd)
+  wmethod <- "Counting weights based on segment length"
+  param <- list(dummy = list(method=dmethod),
+                weight = list(method=wmethod))
+  ## make quad scheme
+  Qout <- quad(dat, dum, c(wdat, wdum), param=param)
+  ## silently attach lines
+  attr(Qout, "lines") <- Y
+  return(Qout)
+}
+
diff --git a/R/linfun.R b/R/linfun.R
new file mode 100644
index 0000000..8c8a830
--- /dev/null
+++ b/R/linfun.R
@@ -0,0 +1,140 @@
+#
+#   linfun.R
+#
+#   Class of functions of location on a linear network
+#
+#   $Revision: 1.12 $   $Date: 2017/06/05 10:31:58 $
+#
+
+linfun <- function(f, L) {
+  stopifnot(is.function(f))
+  stopifnot(inherits(L, "linnet"))
+  fargs <- names(formals(f))
+  needargs <- c("x", "y", "seg", "tp")
+  if(!all(needargs %in% fargs))
+    stop(paste("Function must have formal arguments",
+               commasep(sQuote(needargs))),
+         call.=FALSE)
+  otherfargs <- setdiff(fargs, needargs)
+  g <- function(...) {
+    argh <- list(...)
+    extra <- names(argh) %in% otherfargs
+    if(!any(extra)) {
+      X <- as.lpp(..., L=L)
+      value <- do.call(f, as.list(coords(X)))
+    } else {
+      extrargs <- argh[extra]
+      mainargs <- argh[!extra]
+      X <- do.call(as.lpp, append(mainargs, list(L=L)))
+      value <- do.call(f, append(as.list(coords(X)), extrargs))
+    }
+    return(value)
+  }
+  class(g) <- c("linfun", class(g))
+  attr(g, "L") <- L
+  attr(g, "f") <- f
+  return(g)
+}
+
+print.linfun <- function(x, ...) {
+  L <- as.linnet(x)
+  if(!is.null(explain <- attr(x, "explain"))) {
+    explain(x)
+  } else {
+    splat("Function on linear network:")
+    print(attr(x, "f"), ...)
+    splat("Function domain:")
+    print(L)
+  }
+  invisible(NULL)
+}
+
+summary.linfun <- function(object, ...) { print(object, ...) }
+
+as.linim.linfun <- function(X, L=domain(X),
+                            ..., eps = NULL, dimyx = NULL, xy = NULL,
+                                       delta=NULL) {
+  if(is.null(L))
+    L <- domain(X)
+  # create template
+  Y <- as.linim(1, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta)
+  # extract coordinates of sample points along network
+  df <- attr(Y, "df")
+  coo <- df[, c("x", "y", "mapXY", "tp")]
+  colnames(coo)[3L] <- "seg"
+  # evaluate function at sample points
+  vals <- do.call(X, append(as.list(coo), list(...)))
+  # write values in data frame
+  df$values <- vals
+  # overwrite values in pixel array 
+  storage.mode(Y$v) <- typ <- typeof(vals)
+  Y$type <- if(typ == "double") "real" else typ
+  pix <- nearest.raster.point(df$xc, df$yc, Y)
+  Y$v[] <- NA
+  Y$v[cbind(pix$row, pix$col)] <- vals
+  #
+  attr(Y, "df") <- df
+  return(Y)
+}
+
+as.data.frame.linfun <- function(x, ...) {
+  as.data.frame(as.linim(x, ...))
+}
+
+as.linfun.linim <- function(X, ...) {
+  trap.extra.arguments(..., .Context="as.linfun.linim")
+  ## extract info
+  L <- as.linnet(X)
+  df <- attr(X, "df")
+  ## function values and corresponding locations
+  values <- df$values
+  locations <- with(df, as.lpp(x=x, y=y, seg=mapXY, tp=tp, L=L))
+  ## Function that maps any spatial location to the nearest data location 
+  nearestloc <- nnfun(locations)
+  ## Function that reads value at nearest data location
+  f <- function(x, y, seg, tp) {
+    values[nearestloc(x,y,seg,tp)]
+  }
+  g <- linfun(f, L)
+  return(g)
+}
+
+plot.linfun <- function(x, ..., L=NULL, main) {
+  if(missing(main)) main <- short.deparse(substitute(x))
+  if(is.null(L)) L <- as.linnet(x)
+  argh <- list(...)
+  fargnames <- get("otherfargs", envir=environment(x))
+  resolution <- c("eps", "dimyx", "xy", "delta")
+  convert <- names(argh) %in% c(fargnames, resolution)
+  Z <- do.call(as.linim, append(list(x, L=L), argh[convert]))
+  rslt <- do.call(plot.linim, append(list(Z, main=main), argh[!convert]))
+  return(invisible(rslt))
+}
+
+as.owin.linfun <- function(W, ...) {
+  as.owin(as.linnet(W))
+}
+
+domain.linfun <- as.linnet.linfun <- function(X, ...) {
+  attr(X, "L")
+}
+
+as.function.linfun <- function(x, ...) {
+  nax <- names(attributes(x))
+  if(!is.null(nax)) {
+    retain <- (nax == "srcref")
+    attributes(x)[!retain] <- NULL
+  }
+  return(x)
+}
+
+integral.linfun <- function(f, domain=NULL, ..., delta) {
+  if(missing(delta)) delta <- NULL
+  integral(as.linim(f, delta=delta), domain=domain, ...)
+}
+
+as.linfun <- function(X, ...) {
+  UseMethod("as.linfun")
+}
+
+as.linfun.linfun <- function(X, ...) { return(X) }
\ No newline at end of file
diff --git a/R/linim.R b/R/linim.R
new file mode 100755
index 0000000..4cef9ad
--- /dev/null
+++ b/R/linim.R
@@ -0,0 +1,652 @@
+#
+# linim.R
+#
+#  $Revision: 1.35 $   $Date: 2017/07/13 02:43:30 $
+#
+#  Image/function on a linear network
+#
+
+linim <- function(L, Z, ..., df=NULL) {
+  L <- as.linnet(L)
+  stopifnot(is.im(Z))
+  if(is.null(df)) {
+    # compute the data frame of mapping information
+    xx <- rasterx.im(Z)
+    yy <- rastery.im(Z)
+    mm <- !is.na(Z$v)
+    xx <- as.vector(xx[mm])
+    yy <- as.vector(yy[mm])
+    pixelcentres <- ppp(xx, yy, window=as.rectangle(Z), check=FALSE)
+    pixdf <- data.frame(xc=xx, yc=yy)
+    # project pixel centres onto lines
+    p2s <- project2segment(pixelcentres, as.psp(L))
+    projloc <- as.data.frame(p2s$Xproj)
+    projmap <- as.data.frame(p2s[c("mapXY", "tp")])
+    # extract values
+    values <- Z[pixelcentres]
+    # bundle
+    df <- cbind(pixdf, projloc, projmap, data.frame(values=values))
+  } else {
+    stopifnot(is.data.frame(df))
+    neednames <- c("xc", "yc", "x", "y", "mapXY", "tp", "values")
+    ok <- neednames %in% names(df)
+    if(any(!ok)) {
+      nn <- sum(!ok)
+      stop(paste(ngettext(nn, "A column", "Columns"),
+                 "named", commasep(sQuote(neednames[!ok])),
+                 ngettext(nn, "is", "are"),
+                 "missing from argument", sQuote("df")))
+    }
+  }
+  out <- Z
+  attr(out, "L") <- L
+  attr(out, "df") <- df
+  class(out) <- c("linim", class(out))
+  return(out)
+}
+
+print.linim <- function(x, ...) {
+  splat("Image on linear network")
+  print(attr(x, "L"))
+  NextMethod("print")
+}
+
+summary.linim <- function(object, ...) {
+  y <- NextMethod("summary")
+  if("integral" %in% names(y))
+    y$integral <- integral(object)
+  y$network <- summary(as.linnet(object))
+  class(y) <- c("summary.linim", class(y))
+  return(y)
+}
+
+print.summary.linim <- function(x, ...) {
+  splat(paste0(x$type, "-valued"), "pixel image on a linear network")
+  unitinfo <- summary(x$units)
+  pluralunits <- unitinfo$plural
+  sigdig <- getOption('digits')
+  di <- x$dim
+  win <- x$window
+  splat(di[1L], "x", di[2L], "pixel array (ny, nx)")
+  splat("enclosing rectangle:",
+        prange(signif(win$xrange, sigdig)),
+        "x",
+        prange(signif(win$yrange, sigdig)),
+        unitinfo$plural,
+        unitinfo$explain)
+  splat("dimensions of each pixel:",
+        signif(x$xstep, 3), "x", signif(x$ystep, sigdig),
+        pluralunits)
+  if(!is.null(explain <- unitinfo$explain))
+    splat(explain)
+  splat("Pixel values (on network):")
+  switch(x$type,
+         integer=,
+         real={
+           splat("\trange =", prange(signif(x$range, sigdig)))
+           splat("\tintegral =", signif(x$integral, sigdig))
+           splat("\tmean =", signif(x$mean, sigdig))
+         },
+         factor={
+           print(x$table)
+         },
+         complex={
+           splat("\trange: Real",
+                 prange(signif(x$Re$range, sigdig)),
+                 "Imaginary",
+                 prange(signif(x$Im$range, sigdig)))
+           splat("\tintegral =", signif(x$integral, sigdig))
+           splat("\tmean =", signif(x$mean, sigdig))
+         },
+         {
+           print(x$summary)
+         })
+  splat("Underlying network:")
+  print(x$network)
+  return(invisible(NULL))
+}
+
+
+plot.linim <- function(x, ..., style=c("colour", "width"),
+                       scale, adjust=1,
+		       legend=TRUE,
+                       leg.side=c("right", "left", "bottom", "top"),
+                       leg.sep=0.1,
+                       leg.wid=0.1,
+                       leg.args=list(),
+                       leg.scale=1,
+                       do.plot=TRUE) {
+  xname <- short.deparse(substitute(x))
+  style <- match.arg(style)
+  leg.side <- match.arg(leg.side)
+  ribstuff <- list(ribside  = leg.side,
+                   ribsep   = leg.sep,
+                   ribwid   = leg.wid,
+                   ribargs  = leg.args,
+                   ribscale = leg.scale)
+  # colour style: plot as pixel image
+  if(style == "colour" || !do.plot)
+    return(do.call(plot.im,
+                   resolve.defaults(list(x),
+                                    list(...),
+                                    ribstuff,
+                                    list(main=xname,
+				         legend=legend,
+					 do.plot=do.plot))))
+  # width style
+  L <- attr(x, "L")
+  df <- attr(x, "df")
+  Llines <- as.psp(L)
+  W <- as.owin(L)
+  # plan layout
+  if(legend) {
+    # use layout procedure in plot.im
+    z <- do.call(plot.im,
+		 resolve.defaults(list(x, do.plot=FALSE, legend=TRUE),
+                                  list(...),
+                                  ribstuff,
+                                  list(main=xname)))
+    bb.all <- attr(z, "bbox")
+    bb.leg <- attr(z, "bbox.legend")
+  } else {
+    bb.all <- Frame(W)
+    bb.leg <- NULL
+  }
+  legend <- !is.null(bb.leg)
+  if(legend) {
+    # expand plot region to accommodate text annotation in legend
+    if(leg.side %in% c("left", "right")) {
+      delta <- 2 * sidelengths(bb.leg)[1]
+      xmargin <- if(leg.side == "right") c(0, delta) else c(delta, 0)
+      bb.all <- grow.rectangle(bb.all, xmargin=xmargin)
+    }
+  }
+  # initialise plot
+  bb <- do.call.matched(plot.owin,
+                        resolve.defaults(list(x=bb.all, type="n"),
+                                         list(...), list(main=xname)),
+                        extrargs="type")
+  # resolve graphics parameters for polygons
+  grafpar <- resolve.defaults(list(...), list(border=1, col=1))
+  grafpar <- grafpar[names(grafpar) %in% names(formals(polygon))]
+  # rescale values to a plottable range
+  vr <- range(df$values)
+  vr[1L] <- min(0, vr[1L])
+  if(missing(scale)) {
+    maxsize <- mean(distmap(Llines))/2
+    scale <- maxsize/diff(vr)
+  } 
+  df$values <- adjust * scale * (df$values - vr[1L])/2
+  # split data by segment
+  mapXY <- factor(df$mapXY, levels=seq_len(Llines$n))
+  dfmap <- split(df, mapXY, drop=TRUE)
+  # sort each segment's data by position along segment
+  dfmap <- lapply(dfmap, sortalongsegment)
+  # plot each segment's data
+#  Lends <- Llines$ends
+  Lperp <- angles.psp(Llines) + pi/2
+  Lfrom <- L$from
+  Lto   <- L$to
+  Lvert <- L$vertices
+  Ljoined  <- (vertexdegree(L) > 1)
+  # precompute coordinates of dodecagon
+  dodo <- disc(npoly=12)$bdry[[1L]]
+  #
+  for(i in seq(length(dfmap))) {
+    z <- dfmap[[i]]
+    segid <- unique(z$mapXY)[1L]
+    xx <- z$x
+    yy <- z$y
+    vv <- z$values
+    # add endpoints of segment
+    ileft <- Lfrom[segid]
+    iright <- Lto[segid]
+    leftend <- Lvert[ileft]
+    rightend <- Lvert[iright]
+    xx <- c(leftend$x, xx, rightend$x)
+    yy <- c(leftend$y, yy, rightend$y)
+    vv <- c(vv[1L],     vv, vv[length(vv)])
+    rleft <- vv[1L]
+    rright <- vv[length(vv)]
+    # draw polygon around segment
+    xx <- c(xx, rev(xx))
+    yy <- c(yy, rev(yy))
+    vv <- c(vv, -rev(vv))
+    ang <- Lperp[segid]
+    xx <- xx + cos(ang) * vv
+    yy <- yy + sin(ang) * vv
+    ## first add dodecagonal 'joints'
+    if(Ljoined[ileft] && rleft > 0) 
+      do.call(polygon,
+              append(list(x=rleft * dodo$x + leftend$x,
+                          y=rleft * dodo$y + leftend$y),
+                     grafpar))
+    if(Ljoined[iright] && rright > 0)
+      do.call(polygon,
+              append(list(x=rright * dodo$x + rightend$x,
+                          y=rright * dodo$y + rightend$y),
+                     grafpar))
+    # now draw main
+    do.call(polygon, append(list(x=xx, y=yy), grafpar))
+  }
+  result <- adjust * scale
+  attr(result, "bbox") <- bb
+  if(legend) {
+    attr(result, "bbox.legend") <- bb.leg
+    ## get graphical arguments
+    grafpar <- resolve.defaults(leg.args, grafpar)
+    grafpar <- grafpar[names(grafpar) %in% names(formals(polygon))]
+    ## set up scale of typical pixel values
+    gvals <- leg.args$at %orifnull% prettyinside(range(x))
+    # corresponding widths
+    wvals <- adjust * scale * gvals
+    # glyph positions
+    ng <- length(gvals)
+    xr <- bb.leg$xrange
+    yr <- bb.leg$yrange
+    switch(leg.side,
+           right = ,
+	   left = {
+	     y <- seq(yr[1], yr[2], length.out=ng+1L)
+	     y <- (y[-1L] + y[-(ng+1L)])/2
+	     for(j in 1:ng) {
+               xx <- xr[c(1L,2L,2L,1L)]
+	       yy <- (y[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)]
+	       do.call(polygon, append(list(xx, yy), grafpar))
+	     }
+	   },
+	   bottom = ,
+	   top = {
+	     x <- seq(xr[1], xr[2], length.out=ng+1L)
+	     x <- (x[-1L] + x[-(ng+1L)])/2
+	     for(j in 1:ng) {
+	       xx <- (x[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)]
+               yy <- yr[c(1L,2L,2L,1L)]
+	       do.call(polygon, append(list(xx, yy), grafpar))
+	     }
+	   })
+     # add text labels
+     check.1.real(leg.scale)
+     glabs <- leg.args$labels %orifnull% signif(leg.scale * gvals, 2)
+     switch(leg.side,
+            right  = text(xr[2], y,     pos=4, labels=glabs),
+            left   = text(xr[1], y,     pos=2, labels=glabs),
+	    bottom = text(x,     yr[1], pos=1, labels=glabs),
+	    top    = text(x,     yr[2], pos=3, labels=glabs))
+  }
+  return(invisible(result))
+}
+
+sortalongsegment <- function(df) {
+  df[fave.order(df$tp), , drop=FALSE]
+}
+
+as.im.linim <- function(X, ...) {
+  attr(X, "L") <- attr(X, "df") <- NULL
+  class(X) <- "im"
+  if(length(list(...)) > 0)
+    X <- as.im(X, ...)
+  return(X)
+}
+
+as.linim <- function(X, ...) {
+  UseMethod("as.linim")
+}
+
+as.linim.default <- function(X, L, ..., eps = NULL, dimyx = NULL, xy = NULL,
+                                        delta = NULL) {
+  stopifnot(inherits(L, "linnet"))
+  Y <- as.im(X, W=as.rectangle(as.owin(L)), ..., eps=eps, dimyx=dimyx, xy=xy)
+  M <- as.mask.psp(as.psp(L), as.owin(Y))
+  Y[complement.owin(M)] <- NA
+  df <- NULL
+  if(!is.null(delta)) {
+    df <- pointsAlongNetwork(L, delta)
+    pix <- nearest.valid.pixel(df$x, df$y, Y)
+    df$xc <- Y$xcol[pix$col]
+    df$yc <- Y$yrow[pix$row]
+    df$values <- Y$v[cbind(pix$row, pix$col)]
+    df <- df[,c("xc", "yc", "x", "y", "seg", "tp", "values")]
+    names(df)[names(df) == "seg"] <- "mapXY"
+  }
+  out <- linim(L, Y, df=df)
+  return(out)
+}
+
+pointsAlongNetwork <- local({
+
+  pointsAlongNetwork <- function(L, delta) {
+    #' sample points evenly spaced along each segment
+    stopifnot(inherits(L, "linnet"))
+    S <- as.psp(L)
+    ns <- nsegments(S)
+    seglen <- lengths.psp(S)
+    ends <- as.data.frame(S)
+    nsample <- pmax(1, ceiling(seglen/delta))
+    df <- NULL
+    x0 <- ends$x0
+    y0 <- ends$y0
+    x1 <- ends$x1
+    y1 <- ends$y1
+    for(i in seq_len(ns)) {
+      nn <- nsample[i] + 1L
+      tcut <- seq(0, 1, length.out=nn)
+      tp <- (tcut[-1] + tcut[-nn])/2
+      x <- x0[i] * (1-tp) + x1[i] * tp
+      y <- y0[i] * (1-tp) + y1[i] * tp
+      df <- rbind(df, data.frame(x=x, y=y, seg=i, tp=tp))
+    }
+    return(df)          
+  }
+
+  pointsAlongNetwork
+})
+
+as.linim.linim <- function(X, ...) {
+  if(length(list(...)) == 0)
+    return(X)
+  Y <- as.linim.default(X, as.linnet(X), ...)
+  return(Y)
+}
+
+# analogue of eval.im
+
+eval.linim <- function(expr, envir, harmonize=TRUE) {
+  sc <- sys.call()
+  # Get names of all variables in the expression
+  e <- as.expression(substitute(expr))
+  varnames <- all.vars(e)
+  allnames <- all.names(e, unique=TRUE)
+  funnames <- allnames[!(allnames %in% varnames)]
+  if(length(varnames) == 0)
+    stop("No variables in this expression")
+  # get the values of the variables
+  if(missing(envir)) {
+    envir <- parent.frame() # WAS: sys.parent()
+  } else if(is.list(envir)) {
+    envir <- list2env(envir, parent=parent.frame())
+  }
+  vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+  funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+  # Find out which variables are (linear) images
+  islinim <- unlist(lapply(vars, inherits, what="linim"))
+  if(!any(islinim))
+    stop("There are no linear images (class linim) in this expression")
+  # ....................................
+  # Evaluate the pixel values using eval.im
+  # ....................................
+  sc[[1L]] <- as.name('eval.im')
+  sc$envir <- envir
+  Y <- eval(sc)
+  # .........................................
+  # Then evaluate data frame entries if feasible
+  # .........................................
+  dfY <- NULL
+  linims <- vars[islinim]
+  nlinims <- length(linims)
+  dframes <- lapply(linims, attr, which="df")
+  nets <- lapply(linims, attr, which="L")
+  isim <- unlist(lapply(vars, is.im))
+  if(!any(isim & !islinim)) {
+    # all images are 'linim' objects
+    # Check that the images refer to the same linear network
+    if(nlinims > 1) {
+      agree <- unlist(lapply(nets[-1L], identical, y=nets[[1L]]))
+      if(!all(agree))
+        stop(paste("Images do not refer to the same linear network"))
+    }
+    dfempty <- unlist(lapply(dframes, is.null))
+    if(!any(dfempty)) {
+      # ensure data frames are compatible
+      if(length(dframes) > 1 && (
+          length(unique(nr <- sapply(dframes, nrow))) > 1   ||
+           !allElementsIdentical(dframes, "seg")            ||
+   	   !allElementsIdentical(dframes, "tp")
+	)) {
+        # find the one with finest spacing
+	imax <- which.max(nr)
+	# resample the others
+	dframes[-imax] <- lapply(dframes[-imax],
+	                         resampleNetworkDataFrame,
+	                         template=dframes[[imax]])
+      }
+      # replace each image variable by its data frame column of values
+      vars[islinim] <- lapply(dframes, getElement, "values")
+      # now evaluate expression
+      Yvalues <- eval(e, append(vars, funs))
+      # pack up
+      dfY <- dframes[[1L]]
+      dfY$values <- Yvalues
+    }
+  }
+  result <- linim(nets[[1L]], Y, df=dfY)
+  return(result)
+}
+
+resampleNetworkDataFrame <- function(df, template) {
+  # resample 'df' at the points of 'template'
+  invalues  <- df$values
+  insegment <- df$mapXY
+  inteepee  <- df$tp
+  out <- template
+  n <- nrow(out)
+  outvalues <- vector(mode = typeof(invalues), length=n)
+  outsegment <- out$mapXY
+  outteepee  <- out$tp
+  for(i in seq_len(n)) {
+    relevant <- which(insegment == outsegment[i])
+    if(length(relevant) > 0) {
+      j <- which.min(abs(inteepee[relevant] - outteepee[i]))
+      outvalues[i] <- invalues[relevant[j]]
+    }
+  }
+  out$values <- outvalues
+  return(out)
+}
+
+as.linnet.linim <- function(X, ...) {
+  attr(X, "L")
+}
+
+"[.linim" <- function(x, i, ..., drop=TRUE) {
+  if(!missing(i) && is.lpp(i)) {
+    n <- npoints(i)
+    result <- vector(mode=typeof(x$v), length=n)
+    if(n == 0) return(result)
+    if(!is.null(df <- attr(x, "df"))) {
+      #' use data frame of sample points along network
+      knownseg <- df$mapXY
+      knowntp  <- df$tp
+      knownval <- df$values
+      #' extract local coordinates of query points
+      coo <- coords(i)
+      queryseg <- coo$seg
+      querytp  <- coo$tp
+      #' match to nearest sample point
+      for(j in 1:n) {
+        relevant <- (knownseg == queryseg[j])
+        if(!any(relevant)) {
+          result[j] <- NA
+        } else {
+          k <- which.min(abs(knowntp[relevant] - querytp[j]))
+          result[j] <- knownval[relevant][k]
+        }
+      }
+      if(drop && anyNA(result))
+        result <- result(!is.na(result))
+      return(result)
+    }
+    #' give up and use pixel image
+  }
+  #' apply subset method for 'im'
+  y <- NextMethod("[")
+  if(!is.im(y)) return(y) # vector of pixel values
+  #' handle linear network info
+  L <- attr(x, "L")
+  df <- attr(x, "df")
+  #' clip to new window
+  W <- Window(y)
+  attr(y, "L") <- L[W]
+  attr(y, "df") <- df[inside.owin(df$xc, df$yc, W), , drop=FALSE]
+  return(y)
+}
+
+integral.linim <- function(f, domain=NULL, ...){
+  verifyclass(f, "linim")
+  if(!is.null(domain)) 
+    f <- f[domain]
+  #' extract data
+  L <- as.linnet(f)
+  ns <- nsegments(L)
+  df <- attr(f, "df")
+  vals <- df$values
+  seg <- factor(df$mapXY, levels=1:ns)
+  #' ensure each segment has at least one sample point
+  nper <- table(seg)
+  if(any(missed <- (nper == 0))) {
+    missed <- unname(which(missed))
+    xy <- midpoints.psp(as.psp(L)[missed])
+    valxy <- f[xy]
+    seg <- c(seg, factor(missed, levels=1:ns))
+    vals <- c(vals, valxy)
+  }
+  #' take average of data on each segment
+  mu <- as.numeric(by(vals, seg, mean, ..., na.rm=TRUE))
+  mu[is.na(mu)] <- 0
+  #' weighted sum
+  len <- lengths.psp(as.psp(L))
+  if(anyNA(vals)) {
+    p <- as.numeric(by(!is.na(vals), seg, mean, ..., na.rm=TRUE))
+    p[is.na(p)] <- 0
+    len <- len * p
+  }
+  return(sum(mu * len))
+}
+
+mean.linim <- function(x, ...) {
+  trap.extra.arguments(...)
+  integral(x)/sum(lengths.psp(as.psp(as.linnet(x))))
+}
+
+quantile.linim <- function(x, probs = seq(0,1,0.25), ...) {
+  verifyclass(x, "linim")
+  #' extract data
+  df <- attr(x, "df")
+  L <- as.linnet(x)
+  vals <- df$values
+  #' count sample points on each segment
+  seg <- factor(df$mapXY, levels=1:nsegments(L))
+  nvals <- table(seg)
+  #' calculate weights
+  len <- lengths.psp(as.psp(L))
+  iseg <- as.integer(seg)
+  wts <- len[iseg]/nvals[iseg]
+  return(weighted.quantile(vals, wts, probs))
+}
+
+median.linim <- function(x, ...) {
+  trap.extra.arguments(...)
+  return(unname(quantile(x, 0.5)))
+}
+
+shift.linim <- function (X, ...) {
+  verifyclass(X, "linim")
+  Z <- shift(as.im(X), ...)
+  L <- shift(as.linnet(X), ...)
+  v <- getlastshift(L)
+  df <- attr(X, "df")
+  df[,c("xc","yc")] <- shiftxy(df[,c("xc", "yc")], v)
+  df[,c("x","y")]   <- shiftxy(df[,c("x", "y")],   v)
+  Y <- linim(L, Z, df=df)
+  return(putlastshift(Y, v))
+}
+
+affine.linim <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) {
+  Z <- affine(as.im(X), mat=mat, vec=vec, ...)
+  L <- affine(as.linnet(X), mat=mat, vec=vec, ...)
+  df <- attr(X, "df")
+  df[,c("xc","yc")] <- affinexy(df[,c("xc", "yc")], mat=mat, vec=vec)
+  df[,c("x","y")]   <- affinexy(df[,c("x", "y")],   mat=mat, vec=vec)
+  Y <- linim(L, Z, df=df)
+  return(Y)
+}
+
+scalardilate.linim <- function(X, f, ..., origin=NULL) {
+  trap.extra.arguments(..., .Context = "In scalardilate(X,f)")
+  check.1.real(f, "In scalardilate(X,f)")
+  stopifnot(is.finite(f) && f > 0)
+  if (!is.null(origin)) {
+    X <- shift(X, origin = origin)
+    negorig <- getlastshift(X)
+  }
+  else negorig <- c(0, 0)
+  Y <- affine(X, mat = diag(c(f, f)), vec = -negorig)
+  return(Y)
+}
+
+as.data.frame.linim <- function(x, ...) {
+  df <- attr(x, "df")
+  if(!is.na(m <- match("mapXY", colnames(df))))
+    colnames(df)[m] <- "seg"
+  return(df)
+}
+
+pairs.linim <- function(..., plot=TRUE, eps=NULL) {
+  argh <- list(...)
+  ## unpack single argument which is a list of images
+  if(length(argh) == 1) {
+    arg1 <- argh[[1L]]
+    if(is.list(arg1) && all(sapply(arg1, is.im)))
+      argh <- arg1
+  }
+  ## identify which arguments are images
+  isim <- sapply(argh, is.im)
+  nim <- sum(isim)
+  if(nim == 0) 
+    stop("No images provided")
+  ## separate image arguments from others
+  imlist <- argh[isim]
+  rest   <- argh[!isim]
+  ## identify which arguments are images on a network
+  islinim <- sapply(imlist, inherits, what="linim")
+  if(!any(islinim)) # shouldn't be here
+    return(pairs.im(argh, plot=plot))
+  ## adjust names
+  imnames <- names(imlist) %orifnull% rep("", length(imlist))
+  if(any(needsname <- !nzchar(imnames))) 
+    imnames[needsname] <- paste0("V", seq_len(nim)[needsname])
+  names(imlist) <- imnames
+  ## choose resolution
+  if(is.null(eps)) {
+    xstep <- min(sapply(imlist, getElement, name="xstep"))
+    ystep <- min(sapply(imlist, getElement, name="ystep"))
+    eps <- min(xstep, ystep)
+  }
+  ## extract linear network
+  Z1 <- imlist[[min(which(islinim))]]
+  L <- as.linnet(Z1)
+  ## construct equally-spaced sample points
+  X <- pointsOnLines(as.psp(L), eps=eps)
+  ## sample each image
+  pixvals <- lapply(imlist, "[", i=X, drop=FALSE)
+  pixdf <- as.data.frame(pixvals)
+  ## pairs plot
+  if(plot) {
+    if(nim > 1) {
+      do.call(pairs.default, resolve.defaults(list(x=pixdf),
+                                              rest,
+                                              list(labels=imnames, pch=".")))
+      labels <- resolve.defaults(rest, list(labels=imnames))$labels
+      colnames(pixdf) <- labels
+    } else {
+      do.call(hist.default,
+              resolve.defaults(list(x=pixdf[,1L]),
+                               rest,
+                               list(main=paste("Histogram of", imnames[1L]),
+                                    xlab=imnames[1L])))
+    }
+  }
+  class(pixdf) <- unique(c("plotpairsim", class(pixdf)))
+  attr(pixdf, "eps") <- eps
+  return(invisible(pixdf))
+}
diff --git a/R/linnet.R b/R/linnet.R
new file mode 100755
index 0000000..2a8d6e7
--- /dev/null
+++ b/R/linnet.R
@@ -0,0 +1,614 @@
+# 
+# linnet.R
+#    
+#    Linear networks
+#
+#    $Revision: 1.62 $    $Date: 2017/06/05 10:31:58 $
+#
+# An object of class 'linnet' defines a linear network.
+# It includes the following components
+#
+#        vertices     (ppp)      vertices of network
+#
+#        m            (matrix)   adjacency matrix
+#
+#        lines        (psp)      edges of network
+#
+#        dpath        (matrix)   matrix of shortest path distances
+#                                between each pair of vertices
+#
+#        from, to     (vectors)  map from edges to vertices.
+#                                The endpoints of the i-th segment lines[i]
+#                                are vertices[from[i]] and vertices[to[i]]
+#
+#
+#  FUNCTIONS PROVIDED:
+#       linnet        creates an object of class "linnet" from data
+#       print.linnet  print an object of class "linnet"
+#       plot.linnet   plot an object of class "linnet"
+#
+
+# Make an object of class "linnet" from the minimal data
+
+linnet <- function(vertices, m, edges, sparse=FALSE, warn=TRUE) {
+  if(missing(m) && missing(edges))
+    stop("specify either m or edges")
+  if(!missing(m) && !missing(edges))
+    stop("do not specify both m and edges")
+  # validate inputs
+  stopifnot(is.ppp(vertices))
+  nv <- npoints(vertices)
+  if(nv <= 1) {
+    m <- matrix(FALSE, nv, nv)
+  } else if(!missing(m)) {
+    # check logical matrix or logical sparse matrix
+    if(!is.matrix(m) && !inherits(m, c("lgCMatrix", "lgTMatrix")))
+      stop("m should be a matrix or sparse matrix")
+    stopifnot(is.logical(m) && isSymmetric(m))
+    if(nrow(m) != vertices$n)
+      stop("dimensions of matrix m do not match number of vertices")
+    if(any(diag(m))) {
+      warning("diagonal entries of the matrix m should not be TRUE; ignored")
+      diag(m) <- FALSE
+    }
+    sparse <- !is.matrix(m)
+    ## determine 'from' and 'to' vectors
+    ij <- which(m, arr.ind=TRUE)
+    ij <- ij[ ij[,1L] < ij[,2L], , drop=FALSE]
+    from <- ij[,1L]
+    to   <- ij[,2L]
+  } else {
+    # check (from, to) pairs
+    stopifnot(is.matrix(edges) && ncol(edges) == 2)
+    if(any((edges %% 1) != 0))
+      stop("Entries of edges list should be integers")
+    if(any(self <- (edges[,1L] == edges[,2L]))) {
+      warning("edge list should not join a vertex to itself; ignored")
+      edges <- edges[!self, , drop=FALSE]
+    }
+    np <- npoints(vertices)
+    if(any(edges > np))
+      stop("index out-of-bounds in edges list")
+    from <- edges[,1L]
+    to   <- edges[,2L]
+    # convert to adjacency matrix
+    if(!sparse) {
+      m <- matrix(FALSE, np, np)
+      m[edges] <- TRUE
+    } else 
+      m <- sparseMatrix(i=from, j=to, x=TRUE, dims=c(np, np))
+    m <- m | t(m)
+  }
+  # create line segments
+  xx   <- vertices$x
+  yy   <- vertices$y
+  lines <- psp(xx[from], yy[from], xx[to], yy[to], window=vertices$window,
+               check=FALSE)
+  # tolerance
+  toler <- default.linnet.tolerance(lines)
+  ## pack up
+  out <- list(vertices=vertices, m=m, lines=lines, from=from, to=to,
+              sparse=sparse, window=vertices$window,
+              toler=toler)
+  class(out) <- c("linnet", class(out))
+  ## finish ?
+  if(sparse)
+    return(out)
+  # compute matrix of distances between adjacent vertices
+  n <- nrow(m)
+  d <- matrix(Inf, n, n)
+  diag(d) <- 0
+  d[m] <- pairdist(vertices)[m]
+  ## now compute shortest-path distances between each pair of vertices
+  out$dpath <- dpath <- dist2dpath(d)
+  if(warn && any(is.infinite(dpath)))
+    warning("Network is not connected", call.=FALSE)
+  # pre-compute bounding radius 
+  out$boundingradius <- boundingradius(out)
+  return(out)  
+}
+
+print.linnet <- function(x, ...) {
+  nv <- x$vertices$n
+  nl <- x$lines$n
+  splat("Linear network with",
+        nv, ngettext(nv, "vertex", "vertices"), 
+        "and",
+        nl, ngettext(nl, "line", "lines"))
+  if(!is.null(br <- x$boundingradius) && is.infinite(br))
+     splat("[Network is not connected]")
+  print(as.owin(x), prefix="Enclosing window: ")
+  return(invisible(NULL))
+}
+
+summary.linnet <- function(object, ...) {
+  deg <- vertexdegree(object)
+  sparse <- object$sparse %orifnull% is.null(object$dpath)
+  result <- list(nvert = object$vertices$n,
+                 nline = object$lines$n,
+                 nedge = sum(deg)/2,
+                 unitinfo = summary(unitname(object)),
+                 totlength = sum(lengths.psp(object$lines)),
+                 maxdegree = max(deg),
+		 ncomponents = length(levels(connected(object, what="labels"))),
+                 win = as.owin(object),
+                 sparse = sparse)
+  if(!sparse) {
+    result$diam <- diameter(object)
+    result$boundrad <- boundingradius(object)
+  }
+  result$toler <- object$toler
+  class(result) <- c("summary.linnet", class(result))
+  result
+}
+
+print.summary.linnet <- function(x, ...) {
+  dig <- getOption('digits')
+  with(x, {
+    splat("Linear network with",
+          nvert, ngettext(nvert, "vertex", "vertices"), 
+          "and",
+          nline, ngettext(nline, "line", "lines"))
+    splat("Total length", signif(totlength, dig), 
+          unitinfo$plural, unitinfo$explain)
+    splat("Maximum vertex degree:", maxdegree)
+    if(sparse) splat("[Sparse matrix representation]") else
+    	       splat("[Non-sparse matrix representation]")
+    if(ncomponents > 1) {
+      splat("Network is disconnected: ", ncomponents, "connected components")
+    } else {
+      splat("Network is connected")
+      if(!sparse) {
+        splat("Diameter:", signif(diam, dig), unitinfo$plural)
+        splat("Bounding radius:", signif(boundrad, dig), unitinfo$plural)
+      }
+    }
+    if(!is.null(x$toler))
+      splat("Numerical tolerance:", signif(x$toler, dig), unitinfo$plural)
+    print(win, prefix="Enclosing window: ")
+  })
+  return(invisible(NULL))
+}
+
+plot.linnet <- function(x, ..., main=NULL, add=FALSE,
+                        vertices=FALSE, window=FALSE,
+                        do.plot=TRUE) {
+  if(is.null(main))
+    main <- short.deparse(substitute(x))
+  stopifnot(inherits(x, "linnet"))
+  bb <- Frame(x)
+  if(!do.plot) return(invisible(bb))
+  lines <- as.psp(x)
+  if(!add) {
+    # initialise new plot
+    w <- as.owin(lines)
+    if(window)
+      plot(w, ..., main=main)
+    else
+      plot(w, ..., main=main, type="n")
+  }
+  # plot segments and (optionally) vertices
+  do.call(plot,
+          resolve.defaults(list(x=lines,
+                                show.all=FALSE, add=TRUE,
+                                main=main),
+                           list(...)))
+  if(vertices)
+    plot(x$vertices, add=TRUE)
+  return(invisible(bb))
+}
+
+as.psp.linnet <- function(x, ..., fatal=TRUE) {
+  verifyclass(x, "linnet", fatal=fatal)
+  return(x$lines)
+}
+
+vertices.linnet <- function(w) {
+  verifyclass(w, "linnet")
+  return(w$vertices)
+}
+
+nvertices.linnet <- function(x, ...) {
+  verifyclass(x, "linnet")
+  return(x$vertices$n)
+}
+
+nsegments.linnet <- function(x) {
+  return(x$lines$n)
+}
+
+Window.linnet <- function(X, ...) {
+  return(X$window)
+}
+
+"Window<-.linnet" <- function(X, ..., check=TRUE, value) {
+  if(check) {
+    X <- X[value]
+  } else {
+    X$window <- value
+    X$lines$window <- value
+    X$vertices$window <- value
+  }
+  return(X)
+}
+
+as.owin.linnet <- function(W, ...) {
+  return(Window(W))
+}
+
+as.linnet <- function(X, ...) {
+  UseMethod("as.linnet")
+}
+
+as.linnet.linnet <- function(X, ..., sparse) {
+  if(missing(sparse)) return(X)
+  if(is.null(X$sparse)) X$sparse <- is.null(X$dpath)
+  if(sparse && !(X$sparse)) {
+    # delete distance matrix
+    X$dpath <- NULL
+    # convert adjacency matrix to sparse matrix
+    X$m <- as(X$m, "sparseMatrix")
+    X$sparse <- TRUE
+  } else if(!sparse && X$sparse) {
+    # convert adjacency to matrix
+    X$m <- m <- as.matrix(X$m)
+    edges <- which(m, arr.ind=TRUE)
+    from <- edges[,1L]
+    to   <- edges[,2L]
+    # compute distances to one-step neighbours
+    n <- nrow(m)
+    d <- matrix(Inf, n, n)
+    diag(d) <- 0
+    coo <- coords(vertices(X))
+    d[edges] <- sqrt(rowSums((coo[from, 1:2] - coo[to, 1:2])^2))
+    # compute shortest path distance matrix
+    X$dpath <- dist2dpath(d)
+    # compute bounding radius
+    X$boundingradius <- boundingradius(X)
+    X$sparse <- FALSE
+  } else if(!sparse) {
+    # possibly update internals
+    X$boundingradius <- boundingradius(X)
+  }
+  # possibly update internals
+  X$circumradius <- NULL
+  X$toler <- default.linnet.tolerance(X)
+  return(X)
+}
+
+as.linnet.psp <- local({
+  
+  as.linnet.psp <- function(X, ..., eps, sparse=FALSE) {
+    X <- selfcut.psp(X)
+    V <- unique(endpoints.psp(X))
+    if(missing(eps) || is.null(eps)) {
+      eps <- sqrt(.Machine$double.eps) * diameter(Frame(X))
+    } else {
+      check.1.real(eps)
+      stopifnot(eps >= 0)
+    }
+    if(eps > 0 && minnndist(V) <= eps) {
+      gV <- marks(connected(V, eps))
+      xy <- split(coords(V), gV)
+      mxy <- lapply(xy, centro)
+      V <- do.call(superimpose, append(unname(mxy), list(W=Window(X))))
+    }
+    first  <- endpoints.psp(X, "first")
+    second <- endpoints.psp(X, "second")
+    from <- nncross(first, V, what="which")
+    to   <- nncross(second, V, what="which")
+    nontrivial <- (from != to)
+    join <- cbind(from, to)[nontrivial, , drop=FALSE]
+    result <- linnet(V, edges=join, sparse=sparse)
+    if(is.marked(X)) marks(result$lines) <- marks(X[nontrivial])
+    return(result)
+  }
+
+  centro <- function(X) as.list(apply(X, 2, mean))
+  
+  as.linnet.psp
+})
+
+
+unitname.linnet <- function(x) {
+  unitname(x$window)
+}
+
+"unitname<-.linnet" <- function(x, value) {
+  w <- x$window
+  v <- x$vertices
+  l <- x$lines
+  unitname(w) <- unitname(v) <- unitname(l) <- value
+  x$window <- w
+  x$vertices <- v
+  x$lines <- l
+  return(x)
+}
+
+diameter.linnet <- function(x) {
+  stopifnot(inherits(x, "linnet"))
+  dpath <- x$dpath
+  if(is.null(dpath)) return(NULL) else return(max(0, dpath))
+}
+
+volume.linnet <- function(x) {
+  sum(lengths.psp(x$lines))
+}
+
+vertexdegree <- function(x) {
+  verifyclass(x, "linnet")
+  return(rowSums(x$m))
+}
+
+circumradius.linnet <- function(x, ...) {
+  .Deprecated("boundingradius.linnet")
+  boundingradius.linnet(x, ...)
+}
+
+boundingradius.linnet <- function(x, ...) {
+  stopifnot(inherits(x, "linnet"))
+  cr <- x$boundingradius %orifnull% x$circumradius
+  if(!is.null(cr))
+    return(cr)
+  dpath <- x$dpath
+  if(is.null(dpath)) return(NULL)
+  if(any(is.infinite(dpath))) return(Inf)
+  if(nrow(dpath) <= 1)
+    return(max(0,dpath))
+  from  <- x$from
+  to    <- x$to
+  lines <- x$lines
+  nseg  <- lines$n
+  leng  <- lengths.psp(lines)
+  if(spatstat.options("Clinearradius")) {
+    fromC <- from - 1L
+    toC   <- to - 1L
+    nv <- npoints(vertices(x))
+    huge <- sum(leng)
+    z <- .C("linearradius",
+            ns = as.integer(nseg),
+            from = as.integer(fromC),
+            to = as.integer(toC),
+            lengths = as.double(leng),
+            nv = as.integer(nv), 
+            dpath = as.double(dpath), 
+            huge = as.double(huge), 
+            result = as.double(numeric(1)),
+            PACKAGE = "spatstat")
+    return(z$result)
+  }
+  sA <- sB <- matrix(Inf, nseg, nseg)
+  for(i in 1:nseg) {
+    # endpoints of segment i
+    A <- from[i]
+    B <- to[i]
+    AB <- leng[i]
+    sA[i,i] <- sB[i,i] <- AB/2
+    for(j in (1:nseg)[-i]) {
+    # endpoints of segment j
+      C <- from[j]
+      D <- to[j]
+      CD <- leng[j]
+      AC <- dpath[A,C]
+      AD <- dpath[A,D]
+      BC <- dpath[B,C]
+      BD <- dpath[B,D]
+      # max dist from A to any point in segment j
+      sA[i,j] <- if(AD > AC + CD) AC + CD else
+                if(AC > AD + CD) AD + CD else
+                (AC + AD + CD)/2
+      # max dist from B to any point in segment j
+      sB[i,j] <- if(BD > BC + CD) BC + CD else
+                if(BC > BD + CD) BD + CD else
+                (BC + BD + CD)/2
+    }
+  }
+  # max dist from each A to any point in another segment
+  mA <- apply(sA, 1, max)
+  # max dist from each B to any point in another segment
+  mB <- apply(sB, 1, max)
+  # min of these
+  min(mA, mB)
+}
+
+
+
+####################################################
+# affine transformations
+####################################################
+
+scalardilate.linnet <- function(X, f, ...) {
+  trap.extra.arguments(..., .Context="In scalardilate(X,f)")
+  check.1.real(f, "In scalardilate(X,f)")
+  stopifnot(is.finite(f) && f > 0)
+  Y <- X
+  Y$vertices     <- scalardilate(X$vertices, f=f)
+  Y$lines        <- scalardilate(X$lines, f=f)
+  Y$window       <- scalardilate(X$window, f=f)
+  if(!is.null(X$dpath)) {
+    Y$dpath        <- f * X$dpath
+    Y$boundingradius <- f * (X$boundingradius %orifnull% X$circumradius)
+    Y$circumradius <- NULL
+  }
+  if(!is.null(X$toler))
+    X$toler <- makeLinnetTolerance(f * X$toler)
+  return(Y)
+}
+
+affine.linnet <- function(X,  mat=diag(c(1,1)), vec=c(0,0), ...) {
+  verifyclass(X, "linnet")
+  if(length(unique(eigen(mat)$values)) == 1) {
+    # transformation is an isometry
+    scal <- sqrt(abs(det(mat)))
+    Y <- X
+    Y$vertices     <- affine(X$vertices, mat=mat, vec=vec, ...)
+    Y$lines        <- affine(X$lines,    mat=mat, vec=vec, ...)
+    Y$window       <- affine(X$window,   mat=mat, vec=vec, ...)
+    if(!is.null(X$dpath)) {
+      Y$dpath        <- scal * X$dpath
+      Y$boundingradius <- scal * (X$boundingradius %orifnull% X$circumradius)
+      X$circumradius <- NULL
+    }
+    if(!is.null(Y$toler))
+      Y$toler <- makeLinnetTolerance(scal * Y$toler)
+  } else {
+    # general case
+    vertices <- affine(X$vertices, mat=mat, vec=vec, ...)
+    Y <- linnet(vertices, edges=cbind(X$from, X$to))
+  }
+  return(Y)
+}
+
+shift.linnet <- function(X, vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "linnet")
+  Y <- X
+  Y$window  <- W <- shift(X$window, vec=vec, ..., origin=origin)
+  v <- getlastshift(W)
+  Y$vertices <- shift(X$vertices, vec=v, ...)
+  Y$lines    <- shift(X$lines, vec=v, ...)
+  # tack on shift vector
+  attr(Y, "lastshift") <- v
+  return(Y)
+}
+
+rotate.linnet <- function(X, angle=pi/2, ..., centre=NULL) {
+  verifyclass(X, "linnet")
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  Y <- X
+  Y$vertices <- rotate(X$vertices, angle=angle, ...)
+  Y$lines    <- rotate(X$lines, angle=angle, ...)
+  Y$window   <- rotate(X$window, angle=angle, ...)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
+rescale.linnet <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier
+  Y <- scalardilate(X, f=1/s)
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+
+"[.linnet" <- function(x, i, ..., snip=TRUE) {
+  if(!is.owin(i))
+    stop("In [.linnet: the index i should be a window", call.=FALSE)
+  w <- i
+  ## Find vertices that lie inside window
+  vertinside <- inside.owin(x$vertices, w=w)
+  from <- x$from
+  to   <- x$to
+  if(snip) {
+    ## For efficiency, first restrict network to relevant segments.
+    ## Find segments EITHER OF whose endpoints lie in 'w'
+    okedge <- vertinside[from] | vertinside[to]
+    ## extract relevant subset of network graph
+    x <- thinNetwork(x, retainedges=okedge)
+    ## Now add vertices at crossing points with boundary of 'w'
+    b <- crossing.psp(as.psp(x), edges(w))
+    x <- insertVertices(x, unique(b))
+    boundarypoints <- attr(x, "id")
+    ## update data
+    from <- x$from
+    to   <- x$to
+    vertinside <- inside.owin(x$vertices, w=w)
+    vertinside[boundarypoints] <- TRUE
+  }
+  ## find segments whose endpoints BOTH lie in 'w'
+  edgeinside <- vertinside[from] & vertinside[to]
+  ## extract relevant subset of network
+  xnew <- thinNetwork(x, retainedges=edgeinside)
+  ## adjust window efficiently
+  Window(xnew, check=FALSE) <- w
+  return(xnew)
+}
+
+#
+# interactive plot for linnet objects
+#
+
+iplot.linnet <- function(x, ..., xname) {
+  if(missing(xname))
+    xname <- short.deparse(substitute(x))
+  if(!inherits(x, "linnet"))
+    stop("x should be a linnet object")
+  ## predigest
+  v <- vertices(x)
+  deg <- vertexdegree(x)
+  dv <- textstring(v, txt=paste(deg))
+  y <- layered(lines=as.psp(x),
+               vertices=v,
+               degree=dv)
+  iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE))
+}
+
+pixellate.linnet <- function(x, ...) {
+  pixellate(as.psp(x), ...)
+}
+
+connected.linnet <- function(X, ..., what=c("labels", "components")) {
+  verifyclass(X, "linnet")
+  what <- match.arg(what)
+  nv <- npoints(vertices(X))
+  ie <- X$from - 1L
+  je   <- X$to - 1L
+  ne <- length(ie)
+  zz <- .C("cocoGraph",
+           nv = as.integer(nv),
+           ne = as.integer(ne), 
+           ie = as.integer(ie),
+           je = as.integer(je),
+           label = as.integer(integer(nv)), 
+           status = as.integer(integer(1L)),
+           PACKAGE = "spatstat")
+  if (zz$status != 0) 
+    stop("Internal error: connected.linnet did not converge")
+  lab <- zz$label + 1L
+  lab <- as.integer(factor(lab))
+  lab <- factor(lab)
+  if(what == "labels")
+    return(lab)
+  nets <- list()
+  subsets <- split(seq_len(nv), lab)
+  for(i in seq_along(subsets)) 
+    nets[[i]] <- thinNetwork(X, retainvertices=subsets[[i]])
+  return(nets)
+}
+
+is.connected.linnet <- function(X, ...) {
+  if(!is.null(dpath <- X$dpath))
+    return(all(is.finite(dpath)))
+  lab <- connected(X, what="labels")
+  npieces <- length(levels(lab))
+  return(npieces == 1)
+}
+
+crossing.linnet <- function(X, Y) {
+  X <- as.linnet(X)
+  if(!inherits(Y, c("linnet", "infline", "psp")))
+    stop("L should be an object of class psp, linnet or infline", call.=FALSE)
+  ## convert infinite lines to segments
+  if(inherits(Y, "linnet")) Y <- as.psp(Y)
+  if(inherits(Y, "infline")) {
+    Y <- clip.infline(Y, Frame(X))
+    id <- marks(Y)
+    lev <- levels(id)
+  } else {
+    id <- lev <- seq_len(nsegments(Y))
+  }
+  ## extract segments of network
+  S <- as.psp(X)
+  ## find crossing points
+  SY <- crossing.psp(S, Y, fatal=FALSE, details=TRUE)
+  if(is.null(SY) || npoints(SY) == 0)
+    return(lpp(L=X))
+  SY <- as.data.frame(SY)
+  Z <- with(as.data.frame(SY),
+            as.lpp(x=x, y=y, seg=iA, tp=tA, L=X,
+                   marks=factor(id[as.integer(jB)], levels=lev)))
+  return(Z)
+}
diff --git a/R/linnetsurgery.R b/R/linnetsurgery.R
new file mode 100644
index 0000000..53929a6
--- /dev/null
+++ b/R/linnetsurgery.R
@@ -0,0 +1,200 @@
+#'
+#'   linnetsurgery.R
+#'
+#' Surgery on linear networks and related objects
+#'
+#' $Revision: 1.11 $  $Date: 2017/06/05 10:31:58 $
+#'
+
+insertVertices <- function(L, ...) {
+  if(!inherits(L, c("lpp", "linnet")))
+    stop("L should be a linear network (linnet) or point pattern (lpp)",
+         call.=FALSE)
+  if(haspoints <- is.lpp(L)) {
+    X <- L
+    L <- as.linnet(L)
+    cooXnew <- cooXold <- coords(X) 
+    segXold <- cooXold$seg 
+    tpXold  <- cooXold$tp  
+  }
+  ## validate new vertices
+  V <- as.lpp(..., L=L)
+  if(!identical(as.linnet(L, sparse=TRUE), as.linnet(V, sparse=TRUE)))
+    stop("New vertices must lie on exactly the same network as L")
+  if(npoints(V) == 0) {
+    attr(L, "id") <- integer(0)
+    if(!haspoints) {
+      return(L)
+    } else {
+      X$domain <- L
+      return(X)
+    }
+  }
+  ## extract new vertex coordinates
+  co <- coords(V)
+  seg <- co$seg
+  tp <- co$tp
+  ## determine which segments will be split,
+  ## and compute new serial numbers for the un-split segments
+  splitsegments <- sort(unique(seg))
+  notsplit <- rep(TRUE, nsegments(L))
+  notsplit[splitsegments] <- FALSE
+  segmap <- cumsum(notsplit)
+  nunsplit <- sum(notsplit)
+  ## existing vertices
+  v <- L$vertices
+  n <- npoints(v)
+  ## initialise
+  nadd <- 0
+  vadd <- list(x=numeric(0), y=numeric(0))
+  fromadd <- toadd <- id <- integer(0)
+  ## split segments containing new vertices
+  for(theseg in splitsegments) {
+    ## find new vertices lying on segment 'theseg'
+    i <- L$from[theseg]
+    j <- L$to[theseg]
+    those <- (seg == theseg)
+    idthose <- which(those)
+    ## order the new vertices along this segment
+    tt <- tp[those]
+    oo <- order(tt)
+    tt <- tt[oo]
+    idadd <- idthose[oo]
+    ## make new vertices
+    nnew <- length(tt)
+    xnew <- with(v, x[i] + tt * diff(x[c(i,j)]))
+    ynew <- with(v, y[i] + tt * diff(y[c(i,j)]))
+    vnew <- list(x=xnew, y=ynew)
+    ## make new edges
+    kk <- n + nadd + (1:nnew)
+    fromnew <- c(i, kk)
+    tonew   <- c(kk, j)
+    nnewseg <- nnew + 1
+    ## add new vertices and edges to running total
+    nadd <- nadd + nnew
+    vadd <- concatxy(vadd, vnew)
+    fromadd <- c(fromadd, fromnew)
+    toadd <- c(toadd, tonew)
+    id <- c(id, idadd)
+    ## handle data points if any
+    if(haspoints && any(relevant <- (segXold == theseg))) {
+      tx <- tpXold[relevant]
+      ttt <- c(0, tt, 1)
+      m <- findInterval(tx, ttt, rightmost.closed=TRUE, all.inside=TRUE)
+      t0 <- ttt[m]
+      t1 <- ttt[m+1L]
+      tpXnew <- (tx - t0)/(t1-t0)
+      tpXnew <- pmin(1, pmax(0, tpXnew))
+      n0 <- nunsplit + length(fromadd) - nnewseg
+      segXnew <- n0 + m
+      cooXnew$seg[relevant] <- segXnew
+      cooXnew$tp[relevant] <- tpXnew
+    }
+  }
+  newfrom <- c(L$from[-splitsegments], fromadd)
+  newto   <- c(L$to[-splitsegments], toadd)
+  newv <- superimpose(v, vadd, check=FALSE)
+  Lnew <- linnet(newv, edges=cbind(newfrom, newto),
+                 sparse=identical(L$sparse, TRUE))
+  newid <- integer(nadd)
+  newid[id] <- n + 1:nadd
+  attr(Lnew, "id") <- newid
+  if(!haspoints)
+    return(Lnew)
+  ## adjust segment id for data points on segments that were not split
+  Xnotsplit <- notsplit[segXold]
+  cooXnew$seg[Xnotsplit] <- segmap[segXold[Xnotsplit]]
+  Xnew <- lpp(cooXnew, Lnew)
+  marks(Xnew) <- marks(X)
+  attr(Xnew, "id") <- newid
+  return(Xnew)
+}
+
+thinNetwork <- function(X, retainvertices, retainedges) {
+  ## thin a network by retaining only the specified edges and/or vertices 
+  if(!inherits(X, c("linnet", "lpp")))
+    stop("X should be a linnet or lpp object", call.=FALSE)
+  gotvert <- !missing(retainvertices)
+  gotedge <- !missing(retainedges)
+  if(!gotedge && !gotvert)
+    return(X)
+  L <- as.linnet(X)
+  from <- L$from
+  to   <- L$to
+  V <- L$vertices
+  sparse <- identical(L$sparse, TRUE)
+  edgesFALSE <- logical(nsegments(L))
+  verticesFALSE <- logical(npoints(V))
+  if(!gotedge) {
+    retainedges <- edgesFALSE
+  } else if(!is.logical(retainedges)) {
+    z <- edgesFALSE
+    z[retainedges] <- TRUE
+    retainedges <- z
+  }
+  if(!gotvert) {
+    retainvertices <- verticesFALSE
+  } else if(!is.logical(retainvertices)) {
+    z <- verticesFALSE
+    z[retainvertices] <- TRUE
+    retainvertices <- z
+  }
+  if(gotvert && !gotedge) {
+    ## retain all edges between retained vertices
+    retainedges <- retainvertices[from] & retainvertices[to]
+  } else if(gotedge) {
+    ## retain vertices required for the retained edges
+    retainvertices[from[retainedges]] <- TRUE
+    retainvertices[to[retainedges]]   <- TRUE
+  }
+  ## assign new serial numbers to vertices, and recode
+  Vsub <- V[retainvertices]
+  newserial <- cumsum(retainvertices)
+  newfrom <- newserial[from[retainedges]]
+  newto   <- newserial[to[retainedges]]
+  ## extract relevant subset of network
+  Lsub <- linnet(Vsub, edges=cbind(newfrom, newto), sparse=sparse)
+  ## tack on information about subset
+  attr(Lsub, "retainvertices") <- retainvertices
+  attr(Lsub, "retainedges") <- retainedges
+  ## done?
+  if(inherits(X, "linnet"))
+    return(Lsub)
+  ## X is an lpp object
+  ## Find data points that lie on accepted segments
+  dat <- X$data
+  ok <- retainedges[dat$seg]
+  dsub <- dat[ok, , drop=FALSE]
+  ## compute new serial numbers for retained segments
+  segmap <- cumsum(retainedges)
+  dsub$seg <- segmap[as.integer(dsub$seg)]
+  # make new lpp object
+  Y <- ppx(data=dsub, domain=Lsub, coord.type=as.character(X$ctype))
+  class(Y) <- c("lpp", class(Y))
+  ## tack on information about subset
+  attr(Y, "retainpoints") <- ok
+  return(Y)
+}
+
+validate.lpp.coords <- function(X, fatal=TRUE, context="") {
+  ## check for mangled internal data
+  proj <- project2segment(as.ppp(X), as.psp(as.linnet(X)))
+  seg.claimed <- coords(X)$seg
+  seg.mapped  <- proj$mapXY
+  if(any(seg.claimed != seg.mapped)) {
+    whinge <- paste("Incorrect segment id", context)
+    if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE)
+    return(FALSE)
+  }
+  tp.claimed <- coords(X)$tp
+  tp.mapped  <- proj$tp
+  v <- max(abs(tp.claimed - tp.mapped))
+  if(v > 0.01) {
+    whinge <- paste("Incorrect 'tp' coordinate",
+                    paren(paste("max discrepancy", v)),
+                    context)
+    if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE)
+    return(FALSE)
+  }
+  return(TRUE)
+}
diff --git a/R/lintess.R b/R/lintess.R
new file mode 100644
index 0000000..3590877
--- /dev/null
+++ b/R/lintess.R
@@ -0,0 +1,259 @@
+#'
+#'   lintess.R
+#'
+#'   Tessellations on a Linear Network
+#'
+#'   $Revision: 1.13 $   $Date: 2017/06/05 10:31:58 $
+#'
+
+lintess <- function(L, df) {
+  verifyclass(L, "linnet")
+  if(missing(df) || is.null(df)) {
+    # tessellation consisting of a single tile
+    ns <- nsegments(L)
+    df <- data.frame(seg=seq_len(ns), t0=0, t1=1, tile=factor(1))
+    out <- list(L=L, df=df)
+    class(out) <- c("lintess", class(out))
+    return(out)
+  } 
+  # validate 'df'
+  stopifnot(is.data.frame(df))
+  needed <- c("seg", "t0", "t1", "tile")
+  if(any(bad <- is.na(match(needed, colnames(df)))))
+    stop(paste(ngettext(sum(bad), "Column", "Columns"),
+               commasep(sQuote(needed[bad])),
+               "missing from data frame"),
+         call.=FALSE)
+  df$seg <- as.integer(df$seg)
+  df$tile <- as.factor(df$tile)
+  if(any(reversed <- with(df, t1 < t0)))
+    df[reversed, c("t0", "t1")] <- df[reversed, c("t1", "t0")]
+  with(df, {
+    segU <- sort(unique(seg))
+    segN <- seq_len(nsegments(L))
+    if(length(omitted <- setdiff(segN, segU)) > 0)
+      stop(paste(ngettext(length(omitted), "Segment", "Segments"),
+                 commasep(omitted),
+                 "omitted from data"),
+           call.=FALSE)
+    if(length(unknown <- setdiff(segU, segN)) > 0)
+      stop(paste(ngettext(length(unknown), "Segment", "Segments"),
+                 commasep(unknown),
+                 ngettext(length(unknown), "do not", "does not"),
+                 "exist in the network"),
+           call.=FALSE)
+    pieces <- split(df, seg)
+    for(piece in pieces) {
+      t0 <- piece$t0
+      t1 <- piece$t1
+      thedata <- paste("Data for segment", piece$seg[[1L]])
+      if(!any(t0 == 0))
+        stop(paste(thedata, "do not contain an entry with t0 = 0"),
+             call.=FALSE)
+      if(!any(t1 == 1))
+        stop(paste(thedata, "do not contain an entry with t1 = 1"),
+             call.=FALSE)
+      if(any(t1 < 1 & is.na(match(t1, t0))) |
+         any(t0 > 0 & is.na(match(t0, t1))))
+        stop(paste(thedata, "are inconsistent"),
+             call.=FALSE)
+    }
+  })
+  out <- list(L=L, df=df)
+  class(out) <- c("lintess", class(out))
+  return(out)
+}
+
+print.lintess <- function(x, ...) {
+  splat("Tessellation on a linear network")
+  nt <- length(levels(x$df$tile))
+  splat(nt, "tiles")
+  if(anyNA(x$df$tile)) splat("[An additional tile is labelled NA]")
+  return(invisible(NULL))
+}
+
+summary.lintess <- function(object, ...) {
+  df <- object$df
+  lev <- levels(df$tile)
+  nt <- length(lev)
+  nr <- nrow(df)
+  seglen <- lengths.psp(as.psp(object$L))
+  df$fraglen <- with(df, seglen[seg] * (t1-t0))
+  tilelen <- with(df, tapplysum(fraglen, list(tile)))
+  hasna <- anyNA(df$tile)
+  nalen <- if(hasna) (sum(seglen) - sum(tilelen)) else 0
+  y <- list(nt=nt, nr=nr, lev=lev, seglen=seglen, tilelen=tilelen,
+            hasna=hasna, nalen=nalen)
+  class(y) <- c("summary.lintess", class(y))
+  return(y)
+}
+
+print.summary.lintess <- function(x, ...) {
+  splat("Tessellation on a linear network")
+  with(x, {
+    splat(nt, "tiles")
+    if(hasna) splat("[An additional tile is labelled NA]")
+    if(nt <= 30) {
+      splat("Tile labels:", paste(lev, collapse=" "))
+      splat("Tile lengths:")
+      print(signif(tilelen, 4))
+    } else {
+      splat("Tile lengths (summary):")
+      print(summary(tilelen))
+    }
+    if(hasna) splat("Tile labelled NA has length", nalen)
+  })
+  return(invisible(NULL))
+}
+
+plot.lintess <- function(x, ..., main, add=FALSE,
+                         style=c("segments", "image"),
+                         col=NULL) {
+  if(missing(main)) main <- short.deparse(substitute(x))
+  style <- match.arg(style)
+  if(style == "image") {
+    z <- plot(as.linfun(x), main=main, ..., add=add)
+    return(invisible(z))
+  }
+  #' determine colour map
+  df <- x$df
+  lev <- levels(df$tile)
+  if(is.null(col)) {
+    col <- rainbow(length(lev))
+    cmap <- colourmap(col, inputs=lev)
+  } else if(inherits(col, "colourmap")) {
+    cmap <- col
+    col <- cmap(lev)
+  } else if(is.colour(col)) {
+    if(length(col) == 1) col <- rep(col, length(lev))
+    if(length(col) != length(lev))
+      stop(paste(length(col), "colours provided but",
+                 length(lev), "colours needed"))
+    cmap <- colourmap(col, inputs=lev)
+  } else stop("col should be a vector of colours, or a colourmap object")
+  #' determine segment coordinates
+  L <- as.linnet(x)
+  from <- L$from[df$seg]
+  to   <- L$to[df$seg]
+  V <- vertices(L)
+  vx <- V$x
+  vy <- V$y
+  #' plot
+  if(!add) plot(Frame(x), main=main, type="n")
+  with(df,
+       segments(
+         vx[from] * (1-t0) + vx[to] * t0,
+         vy[from] * (1-t0) + vy[to] * t0,
+         vx[from] * (1-t1) + vx[to] * t1,
+         vy[from] * (1-t1) + vy[to] * t1,
+         col=col[as.integer(tile)],
+         ...)
+       )
+  return(invisible(cmap))
+}
+
+as.owin.lintess <- function(W, ...) { as.owin(as.linnet(W), ...) }
+
+Window.lintess <- function(X, ...) { as.owin(as.linnet(X)) }
+
+domain.lintess <- as.linnet.lintess <- function(X, ...) { X$L }
+
+as.linfun.lintess <- function(X, ..., values, navalue=NA) {
+  L <- X$L
+  df <- X$df
+  if(missing(values) || is.null(values)) {
+    rowvalues <- df$tile
+  } else {
+    if(length(values) != length(levels(df$tile)))
+      stop("Length of 'values' should equal the number of tiles", call.=FALSE)
+    rowvalues <- values[as.integer(df$tile)]    
+  }
+  f <- function(x, y, seg, tp) {
+    result <- rowvalues[integer(0)]
+    for(i in seq_along(seg)) {
+      tpi <- tp[i]
+      segi <- seg[i]
+      j <- which(df$seg == segi)
+      kk <- which(df[j, "t0"] <= tpi & df[j, "t1"] >= tpi)
+      result[i] <- if(length(kk) == 0) navalue else rowvalues[j[min(kk)]]
+    }
+    return(result)
+  }
+  g <- linfun(f, L)
+  return(g)
+}
+
+#'  Divide a linear network into tiles demarcated by
+#' the points of a point pattern
+
+divide.linnet <- local({
+  
+  divide.linnet <- function(X) {
+    stopifnot(is.lpp(X))
+    L <- as.linnet(X)
+    coo <- coords(X)
+    #' add identifiers of endpoints
+    coo$from <- L$from[coo$seg]
+    coo$to   <- L$to[coo$seg]
+    #' group data by segment, sort by increasing 'tp'
+    coo <- coo[with(coo, order(seg, tp)), , drop=FALSE]
+    bits <- split(coo, coo$seg)
+    #' expand as a sequence of intervals
+    bits <- lapply(bits, expanddata)
+    #' reassemble as data frame
+    df <- Reduce(rbind, bits)
+    #' find all undivided segments
+    other <- setdiff(seq_len(nsegments(L)), unique(coo$seg))
+    #' add a single line for each undivided segment
+    if(length(other) > 0)
+      df <- rbind(df, data.frame(seg=other, t0=0, t1=1,
+                                 from=L$from[other], to=L$to[other]))
+    #' We now have a tessellation 
+    #' Sort again
+    df <- df[with(df, order(seg, t0)), , drop=FALSE]
+    #' Now identify connected components
+    #' Two intervals are connected if they share an endpoint
+    #' that is a vertex of the network.
+    nvert <- nvertices(L)
+    nbits <- nrow(df)
+    iedge <- jedge <- integer(0)
+    for(iv in seq_len(nvert)) {
+      joined <- with(df, which(from == iv | to == iv))
+      njoin <- length(joined)
+      if(njoin > 1)
+        iedge <- c(iedge, joined[-njoin])
+      jedge <- c(jedge, joined[-1L])
+    }
+    nedge <- length(iedge)
+    zz <- .C("cocoGraph",
+             nv = as.integer(nbits),
+             ne = as.integer(nedge), 
+             ie = as.integer(iedge - 1L),
+             je = as.integer(jedge - 1L),
+             label = as.integer(integer(nbits)), 
+             status = as.integer(integer(1L)),
+             PACKAGE = "spatstat")
+    if (zz$status != 0) 
+      stop("Internal error: connectedness algorithm did not converge")
+    lab <- zz$label + 1L
+    lab <- as.integer(factor(lab))
+    df <- df[,c("seg", "t0", "t1")]
+    df$tile <- lab
+    return(lintess(L, df))
+  }
+
+  expanddata <- function(z) {
+    df <- with(z,
+               data.frame(seg=c(seg[1L], seg),
+                          t0 = c(0, tp),
+                          t1 = c(tp, 1),
+                          from=NA_integer_,
+                          to=NA_integer_))
+    df$from[1L] <- z$from[1L]
+    df$to[nrow(df)] <- z$to[1L]
+    return(df)
+  }
+
+  divide.linnet
+})
+
diff --git a/R/listof.R b/R/listof.R
new file mode 100755
index 0000000..2a205fa
--- /dev/null
+++ b/R/listof.R
@@ -0,0 +1,54 @@
+#
+# listof.R
+#
+# Methods for class `listof'
+#
+# plot.listof is defined in plot.splitppp.R
+#
+
+"[<-.listof" <- function(x, i, value) {
+  # invoke list method
+  class(x) <- "list"
+  x[i] <- value
+  # then make it a 'listof' object too
+  class(x) <- c("listof", class(x))
+  x
+}
+  
+summary.listof <- function(object, ...) {
+  x <- lapply(object, summary, ...)
+  class(x) <- "summary.listof"
+  x
+}
+
+print.summary.listof <- function(x, ...) {
+  class(x) <- "listof"
+  print(x)
+  invisible(NULL)
+}
+
+listof <- function(...) {
+#  warn.once("listof",
+#            "The class listof will be Deprecated",
+#            "in future versions of spatstat.",
+#            "Use anylist or solist")
+  stuff <- list(...)
+  class(stuff) <- c("listof", class(stuff))
+  return(stuff)
+}
+
+as.listof <- function(x) {
+  if(!is.list(x))
+    x <- list(x)
+  if(!inherits(x, "listof"))
+    class(x) <- c("listof", class(x))
+#  warn.once("listof",
+#            "The class listof will be Deprecated",
+#            "in future versions of spatstat.",
+#            "Use anylist or solist")
+  return(x)
+}
+
+as.layered.listof <- function(X) {
+  layered(LayerList=X)
+}
diff --git a/R/lixellate.R b/R/lixellate.R
new file mode 100644
index 0000000..58e58ea
--- /dev/null
+++ b/R/lixellate.R
@@ -0,0 +1,105 @@
+#'
+#'    lixellate.R
+#'
+#'   Divide each segment of a linear network into several pieces
+#' 
+#'     $Revision: 1.5 $  $Date: 2017/06/05 10:31:58 $
+#'
+
+lixellate <- function(X, ..., nsplit, eps, sparse=TRUE) {
+  missn <- missing(nsplit)
+  misse <- missing(eps)
+  if(missn && misse)
+    stop("One of the arguments 'nsplit' or 'eps' must be given")
+  if(!missn && !misse)
+    stop("The arguments 'nsplit' or 'eps' are incompatible")
+  if(!missn)
+    stopifnot(is.numeric(nsplit) && all(nsplit >= 0))
+
+  if(is.lpp(X)) {
+    rtype <- "lpp"
+    np <- npoints(X)
+    L <- as.linnet(X)
+  } else if(inherits(X, "linnet")) {
+    rtype <- "linnet"
+    L <- X
+    X <- runiflpp(1, L)
+    np <- 0
+  } else stop("X should be a linnet or lpp object")
+  
+  if(is.null(sparse))
+    sparse <- identical(L$sparse, TRUE)
+
+  from <- L$from
+  to <- L$to
+  ns <- length(from)
+
+  if(missn) {
+    lenfs <- lengths.psp(as.psp(L))
+    nsplit <- ceiling(lenfs/eps)
+  } else {
+    if(length(nsplit) == 1) {
+      nsplit <- rep(nsplit, ns)
+    } else if(length(nsplit) != ns) {
+      stop(paste("nsplit should be a single number,",
+                 "or a vector of length equal to the number of segments"))
+    }
+  }
+
+  sumN <- sum(nsplit)
+  sumN1 <- sum(nsplit-1)
+
+  V <- vertices(L)
+  nv <- npoints(V)
+  xv <- V$x
+  yv <- V$y
+
+  coordsX <- coords(X)
+  sp <- coordsX$seg
+  tp <- coordsX$tp
+  ## sort data in increasing order of 'sp'
+  oo <- order(sp)
+  
+  z <- .C("Clixellate",
+          ns=as.integer(ns),
+          fromcoarse=as.integer(from-1L),
+          tocoarse = as.integer(to-1L),
+          fromfine=as.integer(integer(sumN)),
+          tofine = as.integer(integer(sumN)),
+          nv = as.integer(nv),
+          xv = as.double(c(xv, numeric(sumN1))),
+          yv = as.double(c(yv, numeric(sumN1))),
+          svcoarse = as.integer(integer(nv + sumN1)),
+          tvcoarse = as.double(numeric(nv + sumN1)),
+          nsplit = as.integer(nsplit),
+          np = as.integer(np),
+          spcoarse = as.integer(sp[oo]-1L),
+          tpcoarse = as.double(tp[oo]),
+          spfine = as.integer(integer(np)),
+          tpfine = as.double(numeric(np)),
+          PACKAGE = "spatstat")
+
+  Lfine <- with(z, {
+    ii <- seq_len(nv)
+    Vnew <- ppp(xv[ii], yv[ii], window=Frame(L), check=FALSE)
+    Lfine <- linnet(Vnew, edges=cbind(fromfine,tofine)+1, sparse=sparse)
+    marks(Lfine$vertices) <- markcbind(marks(Lfine$vertices),
+                                       data.frame(segcoarse=svcoarse+1,
+                                                  tpcoarse=tvcoarse))
+    Lfine
+  })
+  if(rtype == "linnet")
+    return(Lfine)
+
+  ## put coordinates back in original order
+  sp[oo] <- as.integer(z$spfine + 1L)
+  tp[oo] <- z$tpfine
+  coordsX$seg <- sp
+  coordsX$tp <- tp
+  ## make lpp
+  Xfine <- lpp(coordsX, Lfine)
+  marks(Xfine) <- marks(X)
+  
+  return(Xfine)
+}
+
diff --git a/R/localK.R b/R/localK.R
new file mode 100755
index 0000000..462ea05
--- /dev/null
+++ b/R/localK.R
@@ -0,0 +1,223 @@
+#
+#	localK.R		Getis-Franklin neighbourhood density function
+#
+#	$Revision: 1.21 $	$Date: 2015/07/11 08:19:26 $
+#
+#
+
+"localL" <-
+  function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL)
+{
+  localK(X, wantL=TRUE,
+         correction=correction, verbose=verbose, rvalue=rvalue)
+}
+
+"localLinhom" <-
+  function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL,
+           sigma=NULL, varcov=NULL)
+{
+  localKinhom(X, lambda=lambda, wantL=TRUE, ..., 
+              correction=correction, verbose=verbose, rvalue=rvalue,
+              sigma=sigma, varcov=varcov)
+}
+
+"localK" <-
+  function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL)
+{
+  verifyclass(X, "ppp")
+  localKengine(X, ..., correction=correction, verbose=verbose, rvalue=rvalue)
+}
+
+"localKinhom" <-
+  function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL,
+           sigma=NULL, varcov=NULL)
+{
+  verifyclass(X, "ppp")
+
+  if(is.null(lambda)) {
+    # No intensity data provided
+    # Estimate density by leave-one-out kernel smoothing
+    lambda <- density(X, ..., sigma=sigma, varcov=varcov,
+                            at="points", leaveoneout=TRUE)
+    lambda <- as.numeric(lambda)
+  } else {
+    # validate
+    if(is.im(lambda)) 
+      lambda <- safelookup(lambda, X)
+    else if(is.ppm(lambda))
+      lambda <- predict(lambda, locations=X, type="trend")
+    else if(is.function(lambda)) 
+      lambda <- lambda(X$x, X$y)
+    else if(is.numeric(lambda) && is.vector(as.numeric(lambda)))
+      check.nvector(lambda, npoints(X))
+    else stop(paste(sQuote("lambda"),
+                    "should be a vector, a pixel image, or a function"))
+  }  
+  localKengine(X, lambda=lambda, ...,
+               correction=correction, verbose=verbose, rvalue=rvalue)
+}
+
+"localKengine" <-
+  function(X, ..., wantL=FALSE, lambda=NULL,
+           correction="Ripley", verbose=TRUE, rvalue=NULL)
+{
+  npts <- npoints(X)
+  W <- X$window
+  areaW <- area(W)
+  lambda.ave <- npts/areaW
+  lambda1.ave <- (npts - 1)/areaW
+
+  weighted <- !is.null(lambda)
+
+  if(is.null(rvalue)) 
+    rmaxdefault <- rmax.rule("K", W, lambda.ave)
+  else {
+    stopifnot(is.numeric(rvalue))
+    stopifnot(length(rvalue) == 1)
+    stopifnot(rvalue >= 0)
+    rmaxdefault <- rvalue
+  }
+  breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+  
+  correction.given <- !missing(correction)
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=FALSE)
+
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  # recommended range of r values
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  # identify all close pairs
+  rmax <- max(r)
+  close <- closepairs(X, rmax)
+  DIJ <- close$d
+  XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+  I <- close$i
+  if(weighted) {
+    J <- close$j
+    lambdaJ <- lambda[J]
+    weightJ <- 1/lambdaJ
+  } 
+  
+  # initialise
+  df <- as.data.frame(matrix(NA, length(r), npts))
+  labl <- desc <- character(npts)
+
+  bkt <- function(x) { paste("[", x, "]", sep="") }
+
+  if(verbose) state <- list()
+  
+  switch(correction,
+         none={
+           # uncorrected! For demonstration purposes only!
+           for(i in 1:npts) {
+             ii <- (I == i)
+             wh <- whist(DIJ[ii], breaks$val,
+                         if(weighted) weightJ[ii] else NULL)  # no edge weights
+             df[,i] <- cumsum(wh)
+             icode <- numalign(i, npts)
+             names(df)[i] <- paste("un", icode, sep="")
+             labl[i] <- paste("%s", bkt(icode), "(r)", sep="")
+             desc[i] <- paste("uncorrected estimate of %s",
+                              "for point", icode)
+             if(verbose) state <- progressreport(i, npts, state=state)
+           }
+           if(!weighted) df <- df/lambda1.ave
+         },
+         translate={
+           # Translation correction
+           XJ <- ppp(close$xj, close$yj, window=W, check=FALSE)
+           edgewt <- edge.Trans(XI, XJ, paired=TRUE)
+           if(weighted)
+             edgewt <- edgewt * weightJ
+           for(i in 1:npts) {
+             ii <- (I == i)
+             wh <- whist(DIJ[ii], breaks$val, edgewt[ii])
+             Ktrans <- cumsum(wh)
+             df[,i] <- Ktrans
+             icode <- numalign(i, npts)
+             names(df)[i] <- paste("trans", icode, sep="")
+             labl[i] <- paste("%s", bkt(icode), "(r)", sep="")
+             desc[i] <- paste("translation-corrected estimate of %s",
+                              "for point", icode)
+             if(verbose) state <- progressreport(i, npts, state=state)
+           }
+           if(!weighted) df <- df/lambda1.ave
+           h <- diameter(W)/2
+           df[r >= h, ] <- NA
+         },
+         isotropic={
+           # Ripley isotropic correction
+           edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1))
+           if(weighted)
+             edgewt <- edgewt * weightJ
+           for(i in 1:npts) {
+             ii <- (I == i)
+             wh <- whist(DIJ[ii], breaks$val, edgewt[ii])
+             Kiso <- cumsum(wh)
+             df[,i] <- Kiso
+             icode <- numalign(i, npts)
+             names(df)[i] <- paste("iso", icode, sep="")
+             labl[i] <- paste("%s", bkt(icode), "(r)", sep="")
+             desc[i] <- paste("Ripley isotropic correction estimate of %s", 
+                              "for point", icode)
+             if(verbose) state <- progressreport(i, npts, state=state)
+           }
+           if(!weighted) df <- df/lambda1.ave
+           h <- diameter(W)/2
+           df[r >= h, ] <- NA
+         })
+  # transform values if L required
+  if(wantL)
+    df <- sqrt(df/pi)
+  
+  # return vector of values at r=rvalue, if desired
+  if(!is.null(rvalue)) {
+    nr <- length(r)
+    if(r[nr] != rvalue)
+      stop("Internal error - rvalue not attained")
+    return(as.numeric(df[nr,]))
+  }
+  # function value table required
+  # add r and theo
+  if(!wantL) {
+    df <- cbind(df, data.frame(r=r, theo=pi * r^2))
+    if(!weighted) {
+      ylab <- quote(K[loc](r))
+      fnam <- "K[loc][',']"
+    } else {
+      ylab <- quote(Kinhom[loc](r))
+      fnam <- "Kinhom[loc][',']"
+    }
+  } else {
+    df <- cbind(df, data.frame(r=r, theo=r))
+    if(!weighted) {
+      ylab <- quote(L[loc](r))
+      fnam <- "L[loc][',']"
+    } else {
+      ylab <- quote(Linhom[loc](r))
+      fnam <- "Linhom[loc][',']"
+    }
+  }
+  desc <- c(desc, c("distance argument r", "theoretical Poisson %s"))
+  labl <- c(labl, c("r", "%s[pois](r)"))
+  # create fv object
+  K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam)
+  # default is to display them all
+  formula(K) <- . ~ r
+  unitname(K) <- unitname(X)
+  attr(K, "correction") <- correction
+  return(K)
+}
+
+
diff --git a/R/localpcf.R b/R/localpcf.R
new file mode 100755
index 0000000..e4c1602
--- /dev/null
+++ b/R/localpcf.R
@@ -0,0 +1,206 @@
+#
+#   localpcf.R
+#
+#  $Revision: 1.22 $  $Date: 2017/06/05 10:31:58 $
+#
+#
+
+localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) {
+  if(length(list(...)) > 0)
+    warning("Additional arguments ignored")
+  stopifnot(is.ppp(X))
+  localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan)
+}
+
+localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15,
+                     lambda=NULL, sigma=NULL, varcov=NULL) {
+  stopifnot(is.ppp(X))
+  if(is.null(lambda)) {
+    # No intensity data provided
+    # Estimate density by leave-one-out kernel smoothing
+    lambda <- density(X, ..., sigma=sigma, varcov=varcov,
+                            at="points", leaveoneout=TRUE)
+    lambda <- as.numeric(lambda)
+  } else {
+    # validate
+    if(is.im(lambda)) 
+      lambda <- safelookup(lambda, X)
+    else if(is.ppm(lambda))
+      lambda <- predict(lambda, locations=X, type="trend")
+    else if(is.function(lambda)) 
+      lambda <- lambda(X$x, X$y)
+    else if(is.numeric(lambda) && is.vector(as.numeric(lambda)))
+      check.nvector(lambda, npoints(X))
+    else stop(paste(sQuote("lambda"),
+                    "should be a vector, a pixel image, or a function"))
+  }
+  localpcfengine(X,
+                 delta=delta, rmax=rmax, nr=nr, stoyan=stoyan,
+                 lambda=lambda)
+}
+ 
+localpcfengine <- function(X, ...,
+                           delta=NULL, rmax=NULL, nr=512, stoyan=0.15,
+                           lambda=NULL) {
+  m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan,
+                      lambda=lambda)
+  r <- attr(m, "r")
+  delta <- attr(m, "delta")
+  nX <- npoints(X)
+  if(nX == 0) {
+    df <- data.frame(r=r, theo=rep.int(1, length(r)))
+    nama <- desc <- labl <- NULL
+  } else {
+    # border correction
+    dbord <- bdist.points(X)
+    m[r[row(m)] > dbord[col(m)]] <- NA
+    #
+    df <- data.frame(m, r=r, theo=rep.int(1, length(r)))
+    icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX))
+    nama <- paste("est", icode, sep="")
+    desc <- paste("estimate of %s for point", icode)
+    labl <- paste("%s[", icode, "](r)", sep="")
+  }
+  names(df) <- c(nama, "r", "theo")
+  desc <- c(desc, "distance argument r", "theoretical Poisson %s")
+  labl <- c(labl, "r", "%s[pois](r)")
+  # create fv object
+  g <- fv(df, "r", quote(localg(r)),
+          "theo", , c(0, max(r)), labl, desc, fname="localg")
+  # default is to display them all
+  formula(g) <- . ~ r
+  fvnames(g, ".") <- names(df)[names(df) != "r"]
+  unitname(g) <- unitname(X)
+  attr(g, "delta") <- delta  
+  attr(g, "correction") <- "border"
+  return(g)
+}
+
+localpcfmatrix <- function(X, i=seq_len(npoints(X)), ...,
+                           lambda = NULL,
+                           delta=NULL, rmax=NULL,
+                           nr=512, stoyan=0.15) {
+  missi <- missing(i)
+  weighted <- !is.null(lambda)
+  nX <- npoints(X)
+  nY <- if(missi) nX else length(seq_len(nX)[i])
+  W <- as.owin(X)
+  lambda.ave <- nX/area(W)
+  if(is.null(delta)) 
+    delta <- stoyan/sqrt(lambda.ave)
+  if(is.null(rmax)) 
+    rmax <- rmax.rule("K", W, lambda.ave)
+  #
+  if(nX == 0 || nY == 0) {
+    out <- matrix(0, nr, 0)
+  } else {
+    # sort points in increasing order of x coordinate
+    oX <- fave.order(X$x)
+    Xsort <- X[oX]
+    idXsort <- (1:nX)[oX]
+    if(weighted) {
+      lambdaXsort <- lambda[oX]
+      weightXsort <- 1/lambdaXsort
+    }
+    if(missi) {
+      Y <- X
+      oY <- oX
+      Ysort   <- Xsort
+      idYsort <- idXsort
+    } else {
+      # i is some kind of index
+      Y <- X[i]
+      idY <- (1:nX)[i]
+      oY <- fave.order(Y$x)
+      Ysort <- Y[oY]
+      idYsort <- idY[oY]
+    }
+    nY <- npoints(Y)
+    force(nr)
+    # call C
+    if(!weighted) {
+      zz <- .C("locpcfx",
+               nn1 = as.integer(nY),
+               x1  = as.double(Ysort$x),
+               y1  = as.double(Ysort$y),
+               id1 = as.integer(idYsort),
+               nn2 = as.integer(nX),
+               x2  = as.double(Xsort$x),
+               y2  = as.double(Xsort$y),
+               id2 = as.integer(idXsort),
+               nnr = as.integer(nr),
+               rmaxi=as.double(rmax),
+               del=as.double(delta),
+               pcf=as.double(double(nr * nY)),
+               PACKAGE = "spatstat")
+    } else {
+      zz <- .C("locWpcfx",
+               nn1 = as.integer(nY),
+               x1  = as.double(Ysort$x),
+               y1  = as.double(Ysort$y),
+               id1 = as.integer(idYsort),
+               nn2 = as.integer(nX),
+               x2  = as.double(Xsort$x),
+               y2  = as.double(Xsort$y),
+               id2 = as.integer(idXsort),
+               w2  = as.double(weightXsort),
+               nnr = as.integer(nr),
+               rmaxi=as.double(rmax),
+               del=as.double(delta),
+               pcf=as.double(double(nr * nY)),
+               PACKAGE = "spatstat")
+    }
+    out <- matrix(zz$pcf, nr, nY)
+    # reorder columns to match original
+    out[, oY] <- out
+    # rescale
+    out <- out/(2 * pi * if(!weighted) lambda.ave else 1)
+  }
+  # dress up
+  attr(out, "r") <- seq(from=0, to=rmax, length.out=nr)
+  attr(out, "delta") <- delta
+  class(out) <- c("localpcfmatrix", class(out))
+  return(out)
+}
+
+print.localpcfmatrix <- function(x, ...) {
+  cat("Matrix of local pair correlation estimates\n")
+  nc <- ncol(x)
+  nr <- nrow(x)
+  cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n"))
+  rval <- attr(x, "r")
+  cat(paste("r values from 0 to", max(rval), "in", nr, "steps\n"))
+  return(invisible(NULL))
+}
+
+plot.localpcfmatrix <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  rval <- attr(x, "r")
+  do.call(matplot,
+          resolve.defaults(list(rval, x),
+                           list(...),
+                           list(type="l", main=xname,
+                                xlab="r", ylab="pair correlation")))
+}
+
+"[.localpcfmatrix" <-
+  function(x, i, ...) {
+    r     <- attr(x, "r")
+    delta <- attr(x, "delta")
+    class(x) <- "matrix"
+    if(missing(i)) {
+      x <- x[ , ...]
+    } else {
+      x <- x[i, ...]
+      if(is.matrix(i))
+        return(x)
+      r <- r[i]
+    }
+    if(!is.matrix(x))
+      x <- matrix(x, nrow=length(r))
+    attr(x, "r") <- r
+    attr(x, "delta") <- delta
+    class(x) <- c("localpcfmatrix", class(x))
+    return(x)
+}
+
diff --git a/R/logistic.R b/R/logistic.R
new file mode 100644
index 0000000..90f60b3
--- /dev/null
+++ b/R/logistic.R
@@ -0,0 +1,391 @@
+#
+#  logistic.R
+#
+#   $Revision: 1.23 $  $Date: 2017/02/07 08:12:05 $
+#
+#  Logistic likelihood method - under development
+#
+
+logi.engine <- function(Q,
+                        trend = ~1,
+                        interaction,
+                        ...,
+                        covariates=NULL,
+                        subsetexpr=NULL,
+                        correction="border",
+                        rbord=reach(interaction),
+                        covfunargs=list(),
+                        allcovar=FALSE,
+                        vnamebase=c("Interaction", "Interact."),
+                        vnameprefix=NULL,
+                        justQ = FALSE,
+                        savecomputed = FALSE,
+                        precomputed = NULL,
+                        VB=FALSE
+                        ){
+  if(is.null(trend)) trend <- ~1 
+  if(is.null(interaction)) interaction <- Poisson()
+  want.trend <- !identical.formulae(trend, ~1)
+  want.inter <- !is.poisson(interaction)
+  want.subset <- !is.null(subsetexpr)
+  # validate choice of edge correction
+  correction <- pickoption("correction", correction,
+                           c(border="border",
+                             periodic="periodic",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             none="none"))
+  # rbord applies only to border correction
+  if(correction == "border") {
+    check.1.real(rbord, "In ppm")
+    explain.ifnot(rbord >= 0, "In ppm")
+  } else rbord <- 0
+  # backdoor stuff
+  if(!missing(vnamebase)) {
+    if(length(vnamebase) == 1)
+      vnamebase <- rep.int(vnamebase, 2)
+    if(!is.character(vnamebase) || length(vnamebase) != 2)
+      stop("Internal error: illegal format of vnamebase")
+  }
+  if(!is.null(vnameprefix)) {
+    if(!is.character(vnameprefix) || length(vnameprefix) != 1)
+      stop("Internal error: illegal format of vnameprefix")
+  }
+  # create dummy points
+  if(inherits(Q, "ppp")){
+    Xplus <- Q
+    Q <- quadscheme.logi(Xplus, ...)
+    D <- Q$dummy
+    Dinfo <- Q$param
+  } else if(checkfields(Q, c("data", "dummy"))) {
+    Xplus <- Q$data
+    D <- Q$dummy
+    Dinfo <- Q$param
+    if(is.null(Dinfo)){
+      Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D)))
+    }
+    Q <- quadscheme.logi(Xplus, D)
+  } else stop("Format of object Q is not understood")
+  if (justQ) 
+    return(Q)
+  ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.):
+  extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix)
+  extraargs <- append(extraargs, list(...))
+  ## Dummy intensity
+  if(correction == "border" && Dinfo$how=="grid"){
+    Dbord <- D[bdist.points(D)>=rbord]
+    Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord))
+  }
+  rho <- Dinfo$rho
+  ##Setting the B from Barker dynamics (relative to dummy intensity)
+  B <- list(...)$Barker
+  if(is.null(B))
+    B <- 1
+  B <- B*rho
+  Dinfo <- append(Dinfo, list(B=B))
+  Dinfo <- append(Dinfo, list(extraargs=extraargs))
+  # 
+  Wplus <- as.owin(Xplus)
+  nXplus <- npoints(Xplus)
+  U <- superimpose(Xplus, D, W=Wplus, check=FALSE)
+#  E <- equalpairs(U, Xplus, marked = is.marked(Xplus))
+  E <- cbind(1:nXplus, 1:nXplus)
+#  
+  computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list()
+  # assemble covariate data frame
+  if(want.trend || want.subset) {
+    tvars <- variablesinformula(trend)
+    if(want.subset)
+      tvars <- union(tvars, all.vars(subsetexpr))
+    if(!is.data.frame(covariates)) {
+      ## resolve 'external' covariates
+      externalvars <- setdiff(tvars, c("x", "y", "marks"))
+      tenv <- environment(trend)
+      covariates <- getdataobjects(externalvars, tenv, covariates, fatal=TRUE)
+    }
+    wantxy <- c("x", "y") %in% tvars
+    wantxy <- wantxy | rep.int(allcovar, 2)
+    cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE]
+    if(!is.null(covariates)) {
+      df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs)
+      cvdf <- cbind(cvdf, df)
+    }
+    wantmarks <- "marks" %in% tvars
+    if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U))
+  } else cvdf <- NULL
+  # evaluate interaction sufficient statistics
+  if (!is.null(ss <- interaction$selfstart)) 
+    interaction <- ss(Xplus, interaction)
+  V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed)
+  if(!is.matrix(V))
+    stop("evalInteraction did not return a matrix")
+  if (savecomputed) 
+    computed <- append(computed, attr(V, "computed"))
+  IsOffset <- attr(V, "IsOffset")
+  if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V))
+  # determine names
+  if(ncol(V) > 0) {
+    Vnames <- colnames(V)
+    if(is.null(Vnames)) {
+      nc <- ncol(V)
+      Vnames <- if(nc == 1) vnamebase[1L] else paste(vnamebase[2L], 1:nc, sep="")
+      colnames(V) <- Vnames
+    } else if(!is.null(vnameprefix)) {
+      Vnames <- paste(vnameprefix, Vnames, sep="")
+      colnames(V) <- Vnames
+    }
+  } else Vnames <- character(0)
+  # combine all data
+  glmdata <- as.data.frame(V)
+  if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf)
+  # construct response and weights
+  ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U))
+  # Keep only those quadrature points for which the
+  # conditional intensity is nonzero.
+  KEEP  <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U))
+  ok <- ok & KEEP
+  wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D)))
+  resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D)))
+  ## User-defined subset:
+  if(!is.null(subsetexpr)) {
+    USERSUBSET <- eval(subsetexpr, glmdata, environment(trend))
+    ok <- ok & USERSUBSET
+  }
+  # add offset, subset and weights to data frame
+  # using reserved names beginning with ".logi."
+  glmdata <- cbind(glmdata,
+                   .logi.Y = resp,
+                   .logi.B = B,
+                   .logi.w = wei,
+                   .logi.ok =ok)
+  # build glm formula 
+  # (reserved names begin with ".logi.")
+  trendpart <- paste(as.character(trend), collapse=" ")
+  fmla <- paste(".logi.Y ", trendpart)
+  # Interaction terms
+  if(want.inter) {
+    VN <- Vnames
+    # enclose offset potentials in 'offset(.)'
+    if(any(IsOffset))
+      VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="")
+    fmla <- paste(c(fmla, VN), collapse="+")
+  }
+  # add offset intrinsic to logistic technique
+  fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+")
+  fmla <- as.formula(fmla)
+  # to satisfy package checker: 
+  .logi.B <- B
+  .logi.w <- wei
+  .logi.ok  <- ok
+  .logi.Y   <- resp
+  # suppress warnings from code checkers
+  dont.complain.about(.logi.B, .logi.w, .logi.ok, .logi.Y)
+  # go
+  ##fit <- glm(fmla, data=glmdata,
+  ##           family=binomial(), subset = .logi.ok, weights = .logi.w)
+  fit <- if(VB) 
+           vblogit.fmla(fmla, data = glmdata, 
+                        subset = .logi.ok, weights = .logi.w, ...)
+         else 
+           glm(fmla, data = glmdata, 
+               family = binomial(), subset = .logi.ok, weights = .logi.w)
+  environment(fit$terms) <- sys.frame(sys.nframe())
+  ## Fitted coeffs
+  co <- coef(fit)
+  fitin <- fii(interaction, co, Vnames, IsOffset)
+
+  ## Saturated log-likelihood:
+  satlogpl <- sum(ok*resp*log(B))
+  ## Max. value of log-likelihood:
+  maxlogpl <- logLik(fit) + satlogpl
+
+  # Stamp with spatstat version number
+  spv <- package_version(versionstring.spatstat())
+  the.version <- list(major=spv$major,
+                      minor=spv$minor,
+                      release=spv$patchlevel,
+                      date="$Date: 2017/02/07 08:12:05 $")
+
+  ## Compile results
+  fit <- list(method      = "logi",
+              fitter      = "glm",
+              projected   = FALSE,
+              coef        = co,
+              trend       = trend,
+              interaction = interaction,
+              Q           = Q,
+              correction  = correction,
+              rbord       = rbord,
+              terms       = terms(trend),
+              version     = the.version,
+              fitin       = fitin,
+              maxlogpl    = maxlogpl,
+              satlogpl    = satlogpl,
+              covariates  = mpl.usable(covariates),
+#              varcov      = if(VB) fit$S else NULL,
+              internal    = list(Vnames  = Vnames,
+                                 IsOffset=IsOffset,
+                                 glmdata = glmdata,
+                                 glmfit = fit,
+                                 logistic = Dinfo,
+                                 computed = computed,
+                                 vnamebase=vnamebase,
+                                 vnameprefix=vnameprefix,
+                                 VB = if(VB) TRUE else NULL,
+                                 priors = if(VB) fit$priors else NULL
+                                 )
+              )
+  class(fit) <- "ppm"
+  return(fit)
+}
+
+
+forbid.logi <- function(object) {
+  if(object$method == "logi")
+    stop("Sorry, this is not implemented for method=\'logi\'")
+  return(invisible(NULL))
+}
+
+logi.dummy <- function(X, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){
+  ## Resolving nd inspired by default.n.tiling
+  if(is.null(nd)){
+    nd <- spatstat.options("ndummy.min")
+    if(inherits(X, "ppp"))
+      nd <- pmax(nd, 10 * ceiling(2 * sqrt(X$n)/10))
+  }
+  nd <- ensure2vector(nd)
+  marx <- is.multitype(X)
+  if(marx)
+    lev <- levels(marks(X))
+  if(marx && mark.repeat){
+    N <- length(lev)
+    Dlist <- inDlist <- vector("list", N)
+  } else{
+    N <- 1
+  }
+  W <- as.owin(X)
+  type <- match.arg(dummytype, c("stratrand", "binomial", "poisson", "grid", "transgrid"))
+  B <- boundingbox(W)
+  rho <- nd[1L]*nd[2L]/area(B)
+  Dinfo <- list(nd=nd, rho=rho, how=type)
+  ## Repeating dummy process for each mark type 1:N (only once if unmarked or mark.repeat = FALSE)
+  for(i in 1:N){
+    switch(type,
+           stratrand={
+             D <- as.ppp(stratrand(B, nd[1L], nd[2L]), W = B)
+             inD <- which(inside.owin(D, w = W))
+             D <- D[W]
+             inD <- paste(i,inD,sep="_")
+           },
+           binomial={
+             D <- runifpoint(nd[1L]*nd[2L], win=B)
+             D <- D[W]
+           },
+           poisson={
+             D <- rpoispp(rho, win = W)
+           },
+           grid={
+             D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B)
+             inD <- which(inside.owin(D, w = W))
+             D <- D[W]
+             inD <- paste(i,inD,sep="_")
+           },
+           transgrid={
+             D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B)
+             dxy <- c(diff(D$window$xrange),diff(D$window$yrange))/(2*nd)
+             coords(D) <- coords(D)+matrix(runif(2,-dxy,dxy),npoints(D),2,byrow=TRUE)
+             inD <- which(inside.owin(D, w = W))
+             D <- D[W]
+             inD <- paste(i,inD,sep="_")
+           },
+         stop("unknown dummy type"))
+    if(marx && mark.repeat){
+      marks(D) <- factor(lev[i], levels = lev)
+      Dlist[[i]] <- D
+      if(type %in% c("stratrand","grid","transgrid"))
+        inDlist[[i]] <- inD
+    }
+  }
+  if(marx && mark.repeat){
+    inD <- Reduce(append, inDlist)
+    D <- Reduce(superimpose, Dlist)
+  }
+  if(type %in% c("stratrand","grid","transgrid"))
+    Dinfo <- append(Dinfo, list(inD=inD))
+  if(marx && !mark.repeat){
+    marks(D) <- sample(factor(lev, levels=lev), npoints(D), replace = TRUE)
+    Dinfo$rho <- Dinfo$rho/length(lev)
+  }
+  attr(D, "dummy.parameters") <- Dinfo
+  return(D)
+}
+
+quadscheme.logi <- function(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){
+  data <- as.ppp(data)
+  ## If dummy is missing we generate dummy pattern with logi.dummy.
+  if(missing(dummy))
+    dummy <- logi.dummy(data, dummytype, nd, mark.repeat, ...)
+  Dinfo <- attr(dummy, "dummy.parameters")
+  D <- as.ppp(dummy)
+  if(is.null(Dinfo))
+    Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D)))
+  ## Weights:
+  n <- npoints(data)+npoints(D)
+  w <- area(Window(data))/n
+  Q <- quad(data, D, rep(w,n), param=Dinfo)
+  class(Q) <- c("logiquad", class(Q))
+  return(Q)
+}
+
+summary.logiquad <- function(object, ..., checkdup=FALSE) {
+  verifyclass(object, "logiquad")
+  s <- list(
+       data  = summary.ppp(object$data, checkdup=checkdup),
+       dummy = summary.ppp(object$dummy, checkdup=checkdup),
+       param = object$param)
+  class(s) <- "summary.logiquad"
+  return(s)
+}
+
+print.summary.logiquad <- function(x, ..., dp=3) {
+  cat("Quadrature scheme = data + dummy\n")
+  Dinfo <- x$param
+  if(is.null(Dinfo))
+    cat("created by an unknown function.\n")
+  cat("Data pattern:\n")
+  print(x$data, dp=dp)
+
+  cat("\n\nDummy pattern:\n")
+  # How they were computed
+    switch(Dinfo$how,
+           stratrand={
+             cat(paste("(Stratified random dummy points,",
+                       paste(Dinfo$nd, collapse=" x "),
+                       "grid of cells)\n"))
+           },
+           binomial={
+             cat("(Binomial dummy points)\n")
+           },
+           poisson={
+             cat("(Poisson dummy points)\n")
+           },
+           grid={
+             cat(paste("(Fixed grid of dummy points,",
+                       paste(Dinfo$nd, collapse=" x "),
+                       "grid)\n"))
+           },
+           transgrid={
+             cat(paste("(Random translation of fixed grid of dummy points,",
+                       paste(Dinfo$nd, collapse=" x "),
+                       "grid)\n"))
+           },
+           given=cat("(Dummy points given by user)\n")
+       )
+  # Description of them
+  print(x$dummy, dp=dp)
+
+  return(invisible(NULL))
+}
diff --git a/R/lohboot.R b/R/lohboot.R
new file mode 100644
index 0000000..017affe
--- /dev/null
+++ b/R/lohboot.R
@@ -0,0 +1,118 @@
+#
+#  lohboot.R
+#
+#  $Revision: 1.14 $   $Date: 2017/02/07 08:12:05 $
+#
+#  Loh's bootstrap CI's for local pcf, local K etc
+#
+
+lohboot <-
+  function(X,
+           fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"),
+           ..., nsim=200, confidence=0.95, global=FALSE, type=7) {
+  stopifnot(is.ppp(X))
+  fun.name <- short.deparse(substitute(fun))
+  if(is.character(fun)) {
+    fun <- match.arg(fun)
+  } else if(is.function(fun)) {
+    flist <- list(pcf=pcf, Kest=Kest, Lest=Lest,
+                  pcfinhom=pcfinhom, Kinhom=Kinhom, Linhom=Linhom)
+    id <- match(list(fun), flist)
+    if(is.na(id))
+      stop(paste("Loh's bootstrap is not supported for the function",
+                 sQuote(fun.name)))
+    fun <- names(flist)[id]
+  } else stop("Unrecognised format for argument fun")
+  # validate confidence level
+  stopifnot(confidence > 0.5 && confidence < 1)
+  alpha <- 1 - confidence
+  if(!global) {
+    probs <- c(alpha/2, 1-alpha/2)
+    rank <- nsim * probs[2L]
+  } else {
+    probs <- 1-alpha
+    rank <- nsim * probs
+  }
+  if(abs(rank - round(rank)) > 0.001)
+    warning(paste("confidence level", confidence,
+                  "corresponds to a non-integer rank", paren(rank),
+                  "so quantiles will be interpolated"))
+  n <- npoints(X)
+  # compute local functions
+  localfun <- switch(fun,
+                     pcf=localpcf,
+                     Kest=localK,
+                     Lest=localL,
+                     pcfinhom=localpcfinhom,
+                     Kinhom=localKinhom,
+                     Linhom=localLinhom)
+  f <- localfun(X, ...)
+  theo <- f$theo
+  # parse edge correction info
+  correction <- attr(f, "correction")
+  switch(correction,
+         none      = { ctag <- "un";    cadj <- "uncorrected" },
+         border    = { ctag <- "bord";  cadj <- "border-corrected" },
+         translate = { ctag <- "trans"; cadj <- "translation-corrected" },
+         isotropic = { ctag <- "iso";   cadj <- "Ripley isotropic corrected" })
+  # first n columns are the local pcfs (etc) for the n points of X
+  y <- as.matrix(as.data.frame(f))[, 1:n]
+  nr <- nrow(y)
+  # average them
+  ymean <- .rowMeans(y, na.rm=TRUE, nr, n)
+  # resample
+  ystar <- matrix(, nrow=nr, ncol=nsim)
+  for(i in 1:nsim) {
+    # resample n points with replacement
+    ind <- sample(n, replace=TRUE)
+    # average their local pcfs
+    ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE)
+  }
+  # compute quantiles
+  if(!global) {
+    # pointwise quantiles
+    hilo <- apply(ystar, 1, quantile,
+                  probs=probs, na.rm=TRUE, type=type)
+  } else {
+    # quantiles of deviation
+    ydif <- sweep(ystar, 1, ymean)
+    ydev <- apply(abs(ydif), 2, max, na.rm=TRUE)
+    crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type)
+    hilo <- rbind(ymean - crit, ymean + crit)
+  }
+  # create fv object
+  df <- data.frame(r=f$r,
+                   theo=theo,
+                   ymean,
+                   lo=hilo[1L,],
+                   hi=hilo[2L,])
+  colnames(df)[3L] <- ctag
+  CIlevel <- paste(100 * confidence, "%% confidence", sep="")
+  desc <- c("distance argument r",
+            "theoretical Poisson %s",
+            paste(cadj, "estimate of %s"),
+            paste("lower", CIlevel, "limit for %s"),
+            paste("upper", CIlevel, "limit for %s"))
+  clabl <- paste("hat(%s)[", ctag, "](r)", sep="")
+  labl <- c("r", "%s[pois](r)", clabl, "%s[loCI](r)", "%s[hiCI](r)")
+  switch(fun,
+         pcf={ fname <- "g" ; ylab <- quote(g(r)) },
+         Kest={ fname <- "K" ; ylab <- quote(K(r)) },
+         Lest={ fname <- "L" ; ylab <- quote(L(r)) },
+         pcfinhom={ fname <- "g[inhom]" ; ylab <- quote(g[inhom](r)) },
+         Kinhom={ fname <- "K[inhom]" ; ylab <- quote(K[inhom](r)) },
+         Linhom={ fname <- "L[inhom]" ; ylab <- quote(L[inhom](r)) })
+  g <- fv(df, "r", ylab, ctag, , c(0, max(f$r)), labl, desc, fname=fname)
+  formula(g) <- . ~ r
+  fvnames(g, ".") <- c(ctag, "theo", "hi", "lo")
+  fvnames(g, ".s") <- c("hi", "lo")
+  unitname(g) <- unitname(X)
+  g
+}
+
+
+    
+  
+  
+  
+  
diff --git a/R/lpp.R b/R/lpp.R
new file mode 100755
index 0000000..eb7df93
--- /dev/null
+++ b/R/lpp.R
@@ -0,0 +1,664 @@
+#
+# lpp.R
+#
+#  $Revision: 1.56 $   $Date: 2017/08/08 03:24:35 $
+#
+# Class "lpp" of point patterns on linear networks
+
+lpp <- function(X, L, ...) {
+  stopifnot(inherits(L, "linnet"))
+  if(missing(X) || is.null(X)) {
+    ## empty pattern
+    df <- data.frame(x=numeric(0), y=numeric(0))
+    lo <- data.frame(seg=integer(0), tp=numeric(0))
+  } else {
+    localnames <- c("seg", "tp")
+    spatialnames <- c("x", "y")
+    allcoordnames <- c(spatialnames, localnames)
+    if(is.matrix(X)) X <- as.data.frame(X)
+    if(checkfields(X, localnames)) {
+      #' X includes at least local coordinates
+      X <- as.data.frame(X)
+      #' validate local coordinates
+      if(nrow(X) > 0) {
+        nedge <- nsegments(L)
+        if(with(X, any(seg < 1 || seg > nedge)))
+          stop("Segment index coordinate 'seg' exceeds bounds")
+        if(with(X, any(tp < 0 || tp > 1)))
+          stop("Local coordinate 'tp' outside [0,1]")
+      }
+      if(!checkfields(X, spatialnames)) {
+        #' data give local coordinates only
+        #' reconstruct x,y coordinates from local coordinates
+        Y <- local2lpp(L, X$seg, X$tp, df.only=TRUE)
+        X[,spatialnames] <- Y[,spatialnames,drop=FALSE]
+      }
+      #' local coordinates
+      lo <- X[ , localnames, drop=FALSE]
+      #' spatial coords and marks
+      marknames <- setdiff(names(X), allcoordnames)
+      df <- X[, c(spatialnames, marknames), drop=FALSE]
+    } else {
+      #' local coordinates must be computed from spatial coordinates
+      if(!is.ppp(X))
+        X <- as.ppp(X, W=L$window, ...)
+      #' project to segment
+      pro <- project2segment(X, as.psp(L))
+      #' projected points (spatial coordinates and marks)
+      df  <- as.data.frame(pro$Xproj)
+      #' local coordinates
+      lo  <- data.frame(seg=pro$mapXY, tp=pro$tp)
+    }
+  }
+  # combine spatial, local, marks
+  nmark <- ncol(df) - 2
+  if(nmark == 0) {
+    df <- cbind(df, lo)
+    ctype <- c(rep("s", 2), rep("l", 2))
+  } else {
+    df <- cbind(df[,1:2], lo, df[, -(1:2), drop=FALSE])
+    ctype <- c(rep("s", 2), rep("l", 2), rep("m", nmark))
+  }
+  out <- ppx(data=df, domain=L, coord.type=ctype)
+  class(out) <- c("lpp", class(out))
+  return(out)
+}
+
+print.lpp <- function(x, ...) {
+  stopifnot(inherits(x, "lpp"))
+  splat("Point pattern on linear network")
+  sd <- summary(x$data)
+  np <- sd$ncases
+  nama <- sd$col.names
+  splat(np, ngettext(np, "point", "points"))
+  ## check for unusual coordinates
+  ctype <- x$ctype
+  nam.m <- nama[ctype == "mark"]
+  nam.t <- nama[ctype == "temporal"]
+  nam.c <- setdiff(nama[ctype == "spatial"], c("x","y"))
+  nam.l <- setdiff(nama[ctype == "local"], c("seg", "tp"))
+  if(length(nam.c) > 0)
+    splat("Additional spatial coordinates", commasep(sQuote(nam.c)))
+  if(length(nam.l) > 0)
+    splat("Additional local coordinates", commasep(sQuote(nam.l)))
+  if(length(nam.t) > 0)
+    splat("Additional temporal coordinates", commasep(sQuote(nam.t)))
+  if((nmarks <- length(nam.m)) > 0) {
+    if(nmarks > 1) {
+      splat(nmarks, "columns of marks:", commasep(sQuote(nam.m)))
+    } else {
+      marx <- marks(x)
+      if(is.factor(marx)) {
+        exhibitStringList("Multitype, with possible types:", levels(marx))
+      } else splat("Marks of type", sQuote(typeof(marx)))
+    }
+  }
+  print(x$domain, ...)
+  return(invisible(NULL))
+}
+
+plot.lpp <- function(x, ..., main, add=FALSE,
+                     use.marks=TRUE, which.marks=NULL,
+                     show.all=!add, show.window=FALSE,
+                     show.network=TRUE,
+                     do.plot=TRUE, multiplot=TRUE) {
+  if(missing(main))
+    main <- short.deparse(substitute(x))
+  ## Handle multiple columns of marks as separate plots
+  ##  (unless add=TRUE or which.marks selects a single column
+  ##   or multiplot = FALSE)
+  mx <- marks(x)
+  if(use.marks && !is.null(dim(mx))) {
+    implied.all <- is.null(which.marks)
+    want.several <- implied.all || !is.null(dim(mx <- mx[,which.marks]))
+    do.several <- want.several && !add && multiplot
+    if(want.several)
+      mx <- as.data.frame(mx) #' ditch hyperframe columns
+    if(do.several) {
+      ## generate one plot for each column of marks
+      y <- solapply(mx, setmarks, x=x)
+      out <- do.call(plot,
+                     c(list(x=y, main=main, do.plot=do.plot,
+                            show.window=show.window),
+                       list(...)))
+      return(invisible(out))
+    }
+    if(is.null(which.marks)) {
+      which.marks <- 1
+      if(do.plot) message("Plotting the first column of marks")
+    }
+  }
+  ## determine space required, including legend
+  P <- as.ppp(x)
+  a <- plot(P, ..., do.plot=FALSE)
+  if(!do.plot) return(a)
+  ## initialise graphics space
+  if(!add) {
+    if(show.window) {
+      plot(Window(P), main=main, invert=TRUE, ...)
+    } else {
+      b <- attr(a, "bbox")
+      plot(b, type="n", main=main, ..., show.all=FALSE)
+    }
+  }
+  ## plot linear network
+  if(show.network) {
+    L <- as.linnet(x)
+    do.call.matched(plot.linnet,
+                    resolve.defaults(list(x=L, add=TRUE),
+                                     list(...)),
+                    extrargs=c("lty", "lwd", "col"))
+  }
+  ## plot points, legend, title
+  ans <- do.call.matched(plot.ppp,
+                         c(list(x=P, add=TRUE, main=main,
+                                show.all=show.all, show.window=FALSE),
+                           list(...)),
+                         extrargs=c("shape", "size", "pch", "cex",
+                           "fg", "bg", "cols", "lty", "lwd", "etch",
+                           "cex.main", "col.main", "line", "outer", "sub"))
+  return(invisible(ans))
+}
+
+
+summary.lpp <- function(object, ...) {
+  stopifnot(inherits(object, "lpp"))
+  L <- object$domain
+  result <- summary(L)
+  np <- npoints(object)
+  result$npoints <- np <- npoints(object)
+  result$intensity <- np/result$totlength
+  result$is.marked <- is.marked(object)
+  result$is.multitype <- is.marked(object)
+  if(result$is.marked) {
+    mks <- marks(object)
+    if(result$multiple.marks <- is.data.frame(mks)) {
+      result$marknames <- names(mks)
+      result$is.numeric <- FALSE
+      result$marktype <- "dataframe"
+      result$is.multitype <- FALSE
+    } else {
+      result$is.numeric <- is.numeric(mks)
+      result$marknames <- "marks"
+      result$marktype <- typeof(mks)
+      result$is.multitype <- is.multitype(object)
+    }
+    if(result$is.multitype) {
+      tm <- as.vector(table(mks))
+      tfp <- data.frame(frequency=tm,
+                        proportion=tm/sum(tm),
+                        intensity=tm/result$totlength,
+                        row.names=levels(mks))
+      result$marks <- tfp
+    } else 
+      result$marks <- summary(mks)
+  }
+  class(result) <- "summary.lpp"
+  return(result)
+}
+
+print.summary.lpp <- function(x, ...) {
+  splat("Point pattern on linear network")
+  splat(x$npoints, "points")
+  splat("Linear network with",
+        x$nvert, "vertices and",
+        x$nline, "lines")
+  u <- x$unitinfo
+  dig <- getOption('digits')
+  splat("Total length", signif(x$totlength, dig), u$plural, u$explain)
+  splat("Average intensity", signif(x$intensity, dig),
+        "points per", if(u$vanilla) "unit length" else u$singular)
+  if(x$is.marked) {
+    if(x$multiple.marks) {
+      splat("Mark variables:", commasep(x$marknames, ", "))
+      cat("Summary:\n")
+      print(x$marks)
+    } else if(x$is.multitype) {
+      cat("Multitype:\n")
+      print(signif(x$marks,dig))
+    } else {
+      splat("marks are ",
+            if(x$is.numeric) "numeric, ",
+            "of type ", sQuote(x$marktype),
+            sep="")
+      cat("Summary:\n")
+      print(x$marks)
+    }
+  }
+  print(x$win, prefix="Enclosing window: ")
+  invisible(NULL)
+}
+
+intensity.lpp <- function(X, ...) {
+  len <- sum(lengths.psp(as.psp(as.linnet(X))))
+  if(is.multitype(X)) table(marks(X))/len else npoints(X)/len
+}
+
+is.lpp <- function(x) {
+  inherits(x, "lpp")
+}
+
+is.multitype.lpp <- function(X, na.action="warn", ...) {
+  marx <- marks(X)
+  if(is.null(marx))
+    return(FALSE)
+  if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1)
+    return(FALSE)
+  if(!is.factor(marx))
+    return(FALSE)
+  if((length(marx) > 0) && anyNA(marx))
+    switch(na.action,
+           warn = {
+             warning(paste("some mark values are NA in the point pattern",
+                           short.deparse(substitute(X))))
+           },
+           fatal = {
+             return(FALSE)
+           },
+           ignore = {}
+           )
+  return(TRUE)
+}
+
+as.lpp <- function(x=NULL, y=NULL, seg=NULL, tp=NULL, ...,
+                   marks=NULL, L=NULL, check=FALSE, sparse) {
+  nomore <- is.null(y) && is.null(seg) && is.null(tp)
+  if(inherits(x, "lpp") && nomore) {
+    X <- x
+    if(!missing(sparse) && !is.null(sparse))
+      X$domain <- as.linnet(domain(X), sparse=sparse)
+  } else {
+    if(!inherits(L, "linnet"))
+      stop("L should be a linear network")
+    if(!missing(sparse) && !is.null(sparse))
+      L <- as.linnet(L, sparse=sparse)
+    if(is.ppp(x) && nomore) {
+      X <- lpp(x, L)
+    } else if(is.null(x) && is.null(y) && !is.null(seg) && !is.null(tp)){
+      X <- lpp(data.frame(seg=seg, tp=tp), L=L)
+    } else {
+      if(is.numeric(x) && length(x) == 2 && is.null(y)) {
+        xy <- list(x=x[1L], y=x[2L])
+      } else  {
+        xy <- xy.coords(x,y)[c("x", "y")]
+      }
+      if(!is.null(seg) && !is.null(tp)) {
+        # add segment map information
+        xy <- append(xy, list(seg=seg, tp=tp))
+      } else {
+        # convert to ppp, typically suppressing check mechanism
+        xy <- as.ppp(xy, W=as.owin(L), check=check)
+      }
+      X <- lpp(xy, L)
+    }
+  }
+  if(!is.null(marks))
+    marks(X) <- marks
+  return(X)
+}
+
+as.ppp.lpp <- function(X, ..., fatal=TRUE) {
+  verifyclass(X, "lpp", fatal=fatal)
+  L <- X$domain
+  Y <- as.ppp(coords(X, temporal=FALSE, local=FALSE),
+              W=L$window, check=FALSE)
+  if(!is.null(marx <- marks(X))) {
+    if(is.hyperframe(marx)) marx <- as.data.frame(marx)
+    marks(Y) <- marx
+  }
+  return(Y)
+}
+
+Window.lpp <- function(X, ...) { as.owin(X) }
+
+"Window<-.lpp" <- function(X, ..., check=TRUE, value) {
+  if(check) {
+    X <- X[value]
+  } else {
+    Window(X$domain, check=FALSE) <- value
+  }
+  return(X)
+}
+
+as.owin.lpp <- function(W,  ..., fatal=TRUE) {
+  as.owin(as.ppp(W, ..., fatal=fatal))
+}
+
+domain.lpp <- function(X, ...) { as.linnet(X) }
+
+as.linnet.lpp <- function(X, ..., fatal=TRUE, sparse) {
+  verifyclass(X, "lpp", fatal=fatal)
+  L <- X$domain
+  if(!missing(sparse))
+    L <- as.linnet(L, sparse=sparse)
+  return(L)
+}
+  
+unitname.lpp <- function(x) {
+  u <- unitname(x$domain)
+  return(u)
+}
+
+"unitname<-.lpp" <- function(x, value) {
+  w <- x$domain
+  unitname(w) <- value
+  x$domain <- w
+  return(x)
+}
+
+"marks<-.lpp" <- function(x, ..., value) {
+  NextMethod("marks<-")
+}
+  
+unmark.lpp <- function(X) {
+  NextMethod("unmark")
+}
+
+as.psp.lpp <- function(x, ..., fatal=TRUE){
+  verifyclass(x, "lpp", fatal=fatal)
+  return(x$domain$lines)
+}
+
+nsegments.lpp <- function(x) {
+  return(x$domain$lines$n)
+}
+
+local2lpp <- function(L, seg, tp, X=NULL, df.only=FALSE) {
+  stopifnot(inherits(L, "linnet"))
+  if(is.null(X)) {
+    # map to (x,y)
+    Ldf <- as.data.frame(L$lines)
+    dx <- with(Ldf, x1-x0)
+    dy <- with(Ldf, y1-y0)
+    x <- with(Ldf, x0[seg] + tp * dx[seg])
+    y <- with(Ldf, y0[seg] + tp * dy[seg])
+  } else {
+    x <- X$x
+    y <- X$y
+  }
+  # compile into data frame
+  data <- data.frame(x=x, y=y, seg=seg, tp=tp)
+  if(df.only) return(data)
+  ctype <- c("s", "s", "l", "l")
+  out <- ppx(data=data, domain=L, coord.type=ctype)
+  class(out) <- c("lpp", class(out))
+  return(out)
+}
+
+####################################################
+# subset extractor
+####################################################
+
+"[.lpp" <- function (x, i, j, drop=FALSE, ..., snip=TRUE) {
+  if(!missing(i) && !is.null(i)) {
+    if(is.owin(i)) {
+      # spatial domain: call code for 'j'
+      xi <- x[,i,snip=snip]
+    } else {
+      # usual row-type index
+      da <- x$data
+      daij <- da[i, , drop=FALSE]
+      xi <- ppx(data=daij, domain=x$domain, coord.type=as.character(x$ctype))
+      if(drop)
+        xi <- xi[drop=TRUE] # call [.ppx to remove unused factor levels
+      class(xi) <- c("lpp", class(xi))
+    }
+    x <- xi
+  } 
+  if(missing(j) || is.null(j))
+    return(x)
+  stopifnot(is.owin(j))
+  w <- j
+  L <- x$domain
+  # Find vertices that lie inside 'w'
+  vertinside <- inside.owin(L$vertices, w=w)
+  from <- L$from
+  to   <- L$to
+  if(snip) {
+    ## For efficiency, first restrict network to relevant segments.
+    ## Find segments EITHER OF whose endpoints lie in 'w'
+    okedge <- vertinside[from] | vertinside[to]
+    ## extract relevant subset of network graph
+    x <- thinNetwork(x, retainedges=okedge)
+    ## Now add vertices at crossing points with boundary of 'w'
+    b <- crossing.psp(as.psp(L), edges(w))
+    x <- insertVertices(x, unique(b))
+    boundarypoints <- attr(x, "id")
+    ## update data
+    L <- x$domain
+    from <- L$from
+    to   <- L$to
+    vertinside <- inside.owin(L$vertices, w=w)
+    vertinside[boundarypoints] <- TRUE
+  }
+  ## find segments whose endpoints BOTH lie in 'w'
+  edgeinside <- vertinside[from] & vertinside[to]
+  ## extract relevant subset of network
+  xnew <- thinNetwork(x, retainedges=edgeinside)
+  ## adjust window without checking
+  Window(xnew, check=FALSE) <- w
+  return(xnew)
+}
+
+####################################################
+# affine transformations
+####################################################
+
+scalardilate.lpp <- function(X, f, ...) {
+  trap.extra.arguments(..., .Context="In scalardilate(X,f)")
+  check.1.real(f, "In scalardilate(X,f)")
+  stopifnot(is.finite(f) && f > 0)
+  Y <- X
+  Y$data$x <- f * as.numeric(X$data$x)
+  Y$data$y <- f * as.numeric(X$data$y)
+  Y$domain <- scalardilate(X$domain, f)
+  return(Y)
+}
+
+affine.lpp <- function(X,  mat=diag(c(1,1)), vec=c(0,0), ...) {
+  verifyclass(X, "lpp")
+  Y <- X
+  Y$data[, c("x","y")] <- affinexy(X$data[, c("x","y")], mat=mat, vec=vec)
+  Y$domain <- affine(X$domain, mat=mat, vec=vec, ...)
+  return(Y)
+}
+
+shift.lpp <- function(X, vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "lpp")
+  Y <- X
+  Y$domain <- shift(X$domain, vec=vec, ..., origin=origin)
+  vec <- getlastshift(Y$domain)
+  Y$data[, c("x","y")] <- shiftxy(X$data[, c("x","y")], vec=vec)
+  # tack on shift vector
+  attr(Y, "lastshift") <- vec
+  return(Y)
+}
+
+rotate.lpp <- function(X, angle=pi/2, ..., centre=NULL) {
+  verifyclass(X, "lpp")
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  Y <- X
+  Y$data[, c("x","y")] <- rotxy(X$data[, c("x","y")], angle=angle)
+  Y$domain <- rotate(X$domain, angle=angle, ...)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
+rescale.lpp <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s)) s <- 1/unitname(X)$multiplier
+  Y <- scalardilate(X, f=1/s)
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+
+superimpose.lpp <- function(..., L=NULL) {
+  objects <- list(...)
+  if(!is.null(L) && !inherits(L, "linnet"))
+    stop("L should be a linear network")
+  if(length(objects) == 0) {
+    if(is.null(L)) return(NULL)
+    emptyX <- lpp(list(x=numeric(0), y=numeric(0)), L)
+    return(emptyX)
+  }
+  islpp <- unlist(lapply(objects, is.lpp))
+  if(is.null(L) && !any(islpp))
+    stop("Cannot determine linear network: no lpp objects given")
+  nets <- unique(lapply(objects[islpp], as.linnet))
+  if(length(nets) > 1)
+    stop("Point patterns are defined on different linear networks")
+  if(!is.null(L)) {
+    nets <- unique(append(nets, list(L)))
+    if(length(nets) > 1)
+      stop("Argument L is a different linear network")
+  }
+  L <- nets[[1L]]
+  ## convert list(x,y) to linear network, etc
+  if(any(!islpp))
+    objects[!islpp] <- lapply(objects[!islpp], lpp, L=L)
+  ## concatenate coordinates 
+  locns <- do.call(rbind, lapply(objects, coords))
+  ## concatenate marks (or use names of arguments)
+  marx <- superimposeMarks(objects, sapply(objects, npoints))
+  ## make combined pattern
+  Y <- lpp(locns, L)
+  marks(Y) <- marx
+  return(Y)
+}
+
+#
+# interactive plot for lpp objects
+#
+
+iplot.lpp <- function(x, ..., xname) {
+  if(missing(xname))
+    xname <- short.deparse(substitute(x))
+  stopifnot(is.lpp(x))
+  ## predigest
+  L <- domain(x)
+  v <- vertices(L)
+  deg <- vertexdegree(L)
+  dv <- textstring(v, txt=paste(deg))
+  y <- layered(lines=as.psp(L),
+               vertices=v,
+               degree=dv,
+               points=as.ppp(x))
+  iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE, TRUE))
+}
+
+identify.lpp <- function(x, ...) {
+  verifyclass(x, "lpp")
+  P <- as.ppp(x)
+  id <- identify(P$x, P$y, ...)
+  if(!is.marked(x)) return(id)
+  marks <- as.data.frame(P)[id, -(1:2)]
+  out <- cbind(data.frame(id=id), marks)
+  row.names(out) <- NULL
+  return(out)
+}
+
+cut.lpp <- function(x, z=marks(x), ...) {
+  if(missing(z) || is.null(z)) {
+    z <- marks(x, dfok=TRUE)
+    if(is.null(z))
+      stop("x has no marks to cut")
+  } else {
+    #' special objects
+    if(inherits(z, "linim")) {
+      z <- z[x, drop=FALSE]
+    } else if(inherits(z, "linfun")) {
+      z <- z(x)
+    } else if(inherits(z, "lintess")) {
+      z <- (as.linfun(z))(x)
+    }
+  }
+  #' standard data types
+  if(is.character(z)) {
+    if(length(z) == npoints(x)) {
+      # interpret as a factor
+      z <- factor(z)
+    } else if((length(z) == 1) && (z %in% colnames(df <- as.data.frame(x)))) {
+      # interpret as the name of a column of marks or a coordinate
+      zname <- z
+      z <- df[, zname]
+      if(zname == "seg") z <- factor(z)
+    } else stop("format of argument z not understood") 
+  }
+  if(is.factor(z) || is.vector(z)) {
+    stopifnot(length(z) == npoints(x))
+    g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z)
+    marks(x) <- g
+    return(x)
+  }
+  if(is.data.frame(z) || is.matrix(z)) {
+    stopifnot(nrow(z) == npoints(x))
+    # take first column 
+    z <- z[,1L]
+    g <- if(is.numeric(z)) cut(z, ...) else factor(z)
+    marks(x) <- g
+    return(x)
+  }
+  stop("Format of z not understood")
+}
+
+points.lpp <- function(x, ...) {
+  points(coords(x, spatial=TRUE, local=FALSE), ...)
+}
+
+connected.lpp <- function(X, R=Inf, ..., dismantle=TRUE) {
+  if(!dismantle) {
+    if(is.infinite(R)) {
+      Y <- X %mark% factor(1)
+      attr(Y, "retainpoints") <- attr(X, "retainpoints")
+      return(Y)
+    }
+    check.1.real(R)
+    stopifnot(R >= 0)
+    nv <- npoints(X)
+    close <- (pairdist(X) <= R)
+    diag(close) <- FALSE
+    ij <- which(close, arr.ind=TRUE)
+    ie <- ij[,1] - 1L
+    je <- ij[,2] - 1L
+    ne <- length(ie)
+    zz <- .C("cocoGraph",
+           nv=as.integer(nv),
+           ne=as.integer(ne),
+           ie=as.integer(ie),
+           je=as.integer(je),
+           label=as.integer(integer(nv)),
+           status=as.integer(integer(1L)),
+           PACKAGE = "spatstat")
+    if(zz$status != 0)
+      stop("Internal error: connected.ppp did not converge")
+    lab <- zz$label + 1L
+    # Renumber labels sequentially 
+    lab <- as.integer(factor(lab))
+    # Convert labels to factor
+    lab <- factor(lab)
+    # Apply to points
+    Y <- X %mark% lab
+    attr(Y, "retainpoints") <- attr(X, "retainpoints")
+    return(Y)
+  }
+  # first break the *network* into connected components
+  L <- domain(X)
+  lab <- connected(L, what="labels")
+  if(length(levels(lab)) == 1) {
+    XX <- solist(X)
+  } else {
+    subsets <- split(seq_len(nvertices(L)), lab)
+    XX <- solist()
+    for(i in seq_along(subsets)) 
+      XX[[i]] <- thinNetwork(X, retainvertices=subsets[[i]])
+  }
+  # now find R-connected components in each dismantled piece
+  YY <- solapply(XX, connected.lpp, R=R, dismantle=FALSE)
+  if(length(YY) == 1)
+    YY <- YY[[1]]
+  return(YY)
+}
diff --git a/R/lppm.R b/R/lppm.R
new file mode 100755
index 0000000..0390837
--- /dev/null
+++ b/R/lppm.R
@@ -0,0 +1,343 @@
+#
+#  lppm.R
+#
+#  Point process models on a linear network
+#
+#  $Revision: 1.39 $   $Date: 2017/06/05 10:31:58 $
+#
+
+lppm <- function(X, ...) {
+  UseMethod("lppm")
+}
+
+
+lppm.formula <- function(X, interaction=NULL, ..., data=NULL) {
+  ## remember call
+  callstring <- paste(short.deparse(sys.call()), collapse = "")
+  cl <- match.call()
+
+  ########### INTERPRET FORMULA ##############################
+  
+  if(!inherits(X, "formula"))
+    stop(paste("Argument 'X' should be a formula"))
+  formula <- X
+  
+  if(spatstat.options("expand.polynom"))
+    formula <- expand.polynom(formula)
+
+  ## check formula has LHS and RHS. Extract them
+  if(length(formula) < 3)
+    stop(paste("Formula must have a left hand side"))
+  Yexpr <- formula[[2L]]
+  trend <- formula[c(1L,3L)]
+  
+  ## FIT #######################################
+  thecall <- call("lppm", X=Yexpr, trend=trend,
+                  data=data, interaction=interaction)
+  ncall <- length(thecall)
+  argh <- list(...)
+  nargh <- length(argh)
+  if(nargh > 0) {
+    thecall[ncall + 1:nargh] <- argh
+    names(thecall)[ncall + 1:nargh] <- names(argh)
+  }
+  result <- eval(thecall, parent.frame())
+
+  result$call <- cl
+  result$callstring <- callstring
+
+  return(result)
+}
+
+lppm.lpp <- function(X, ..., eps=NULL, nd=1000, random=FALSE) {
+  Xname <- short.deparse(substitute(X))
+  callstring <- paste(short.deparse(sys.call()), collapse = "")
+  cl <- match.call()
+  nama <- names(list(...))
+  resv <- c("method", "forcefit")
+  if(any(clash <- resv %in% nama))
+    warning(paste(ngettext(sum(clash), "Argument", "Arguments"),
+                  commasep(sQuote(resv[clash])),
+                  "must not be used"))
+  stopifnot(inherits(X, "lpp"))
+  Q <- linequad(X, eps=eps, nd=nd, random=random)
+  fit <- ppm(Q, ..., method="mpl", forcefit=TRUE)
+  if(!is.poisson.ppm(fit))
+    warning("Non-Poisson models currently use Euclidean distance")
+  out <- list(X=X, fit=fit, Xname=Xname, call=cl, callstring=callstring)
+  class(out) <- "lppm"
+  return(out)
+}
+
+is.lppm <- function(x) { inherits(x, "lppm") }
+
+# undocumented
+as.ppm.lppm <- function(object) { object$fit }
+
+fitted.lppm <- function(object, ..., dataonly=FALSE, new.coef=NULL,
+                        leaveoneout=FALSE) {
+  pfit <- object$fit
+  v <- fitted(pfit, dataonly=dataonly, new.coef=new.coef,
+              leaveoneout=leaveoneout)
+  return(v)
+}
+  
+predict.lppm <- function(object, ..., 
+                         type="trend", locations=NULL,
+                         new.coef=NULL) {
+  type <- pickoption("type", type,
+                     c(trend="trend", cif="cif", lambda="cif"))
+  X <- object$X
+  fit <- object$fit
+  L <- as.linnet(X)
+
+  if(!is.null(locations)) {
+    # locations given; return a vector of predicted values
+    values <- predict(fit, locations=locations, type=type, new.coef=new.coef)
+    return(values)
+  }
+  
+  # locations not given; want a pixel image
+  # pixellate the lines
+  Llines <- as.psp(L)
+  linemask <- as.mask.psp(Llines, ...)
+  lineimage <- as.im(linemask)
+  # extract pixel centres
+  xx <- rasterx.mask(linemask)
+  yy <- rastery.mask(linemask)
+  mm <- linemask$m
+  xx <- as.vector(xx[mm])
+  yy <- as.vector(yy[mm])
+  pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE)
+  pixdf <- data.frame(xc=xx, yc=yy)
+  # project pixel centres onto lines
+  p2s <- project2segment(pixelcentres, Llines)
+  projloc <- as.data.frame(p2s$Xproj)
+  projmap <- as.data.frame(p2s[c("mapXY", "tp")])
+  projdata <- cbind(pixdf, projloc, projmap)
+  # predict at the projected points
+  if(!is.multitype(fit)) {
+    values <- predict(fit, locations=projloc, type=type, new.coef=new.coef)
+    # map to nearest pixels
+    Z <- lineimage
+    Z[pixelcentres] <- values
+    # attach exact line position data
+    df <- cbind(projdata, values)
+    out <- linim(L, Z, df=df)
+  } else {
+    # predict for each type
+    lev <- levels(marks(data.ppm(fit)))
+    out <- list()
+    for(k in seq(length(lev))) {
+      markk <- factor(lev[k], levels=lev)
+      locnk <- cbind(projloc, data.frame(marks=markk))
+      values <- predict(fit, locations=locnk, type=type, new.coef=new.coef)
+      Z <- lineimage
+      Z[pixelcentres] <- values
+      df <- cbind(projdata, values)
+      out[[k]] <- linim(L, Z, df=df)
+    }
+    out <- as.solist(out)
+    names(out) <- as.character(lev)
+  }
+  return(out)
+}
+
+coef.lppm <- function(object, ...) {
+  coef(object$fit)
+}
+
+print.lppm <- function(x, ...) {
+  splat("Point process model on linear network")
+  print(x$fit)
+  terselevel <- spatstat.options('terse')
+  if(waxlyrical('extras', terselevel))
+    splat("Original data:", x$Xname)
+  if(waxlyrical('gory', terselevel))
+    print(as.linnet(x))
+  return(invisible(NULL))
+}
+
+summary.lppm <- function(object, ...) {
+  splat("Point process model on linear network")
+  print(summary(object$fit))
+  terselevel <- spatstat.options('terse')
+  if(waxlyrical('extras', terselevel))
+    splat("Original data:", object$Xname)
+  if(waxlyrical('gory', terselevel))
+    print(summary(as.linnet(object)))
+  return(invisible(NULL))
+}
+
+plot.lppm <- function(x, ..., type="trend") {
+  xname <- short.deparse(substitute(x))
+  y <- predict(x, type=type)
+  do.call(plot, resolve.defaults(list(y),
+                                   list(...),
+                                   list(main=xname)))
+}
+  
+anova.lppm <- function(object, ..., test=NULL) {
+  stuff <- list(object=object, ...)
+  if(!is.na(hit <- match("override", names(stuff)))) {
+    warning("Argument 'override' is outdated and was ignored")
+    stuff <- stuff[-hit]
+  }
+  #' extract ppm objects where appropriate
+  mod <- sapply(stuff, is.lppm)
+  stuff[mod] <- lapply(stuff[mod], getElement, name="fit")
+  #' analysis of deviance or adjusted composite deviance
+  do.call(anova.ppm, append(stuff, list(test=test)))
+}
+
+update.lppm <- function(object, ...) {
+  stopifnot(inherits(object, "lppm"))
+  X <- object$X
+  fit <- object$fit
+  Xname <- object$Xname
+  callframe <- environment(formula(fit))
+  aargh <- list(...)
+  islpp <- sapply(aargh, is.lpp)
+  if(any(islpp)) {
+    # trap point pattern argument & convert to quadscheme
+    ii <- which(islpp)
+    if((npp <- length(ii)) > 1)
+      stop(paste("Arguments not understood:", npp, "lpp objects given"))
+    X <- aargh[[ii]]
+    aargh[[ii]] <- linequad(X)
+  }
+  isfmla <- sapply(aargh, inherits, what="formula")
+  if(any(isfmla)) {
+    # trap formula pattern argument, update it, evaluate LHS if required
+    jj <- which(isfmla)
+    if((nf <- length(jj)) > 1)
+      stop(paste("Arguments not understood:", nf, "formulae given"))
+    fmla <- aargh[[jj]]
+    fmla <- update(formula(object), fmla)
+    if(!is.null(lhs <- lhs.of.formula(fmla))) {
+      X <- eval(lhs, envir=list2env(list("."=X), parent=callframe))
+      Qpos <- if(any(islpp)) ii else (length(aargh) + 1L)
+      aargh[[Qpos]] <- linequad(X)
+    }
+    aargh[[jj]] <- rhs.of.formula(fmla)
+  }
+  newfit <- do.call(update.ppm,
+                    append(list(fit), aargh),
+                    envir=callframe)
+  if(!is.poisson.ppm(newfit))
+    warning("Non-Poisson models currently use Euclidean distance")
+  out <- list(X=X, fit=newfit, Xname=Xname)
+  class(out) <- "lppm"
+  return(out)
+}
+
+terms.lppm <- function(x, ...) {
+  terms(x$fit, ...)
+}
+
+logLik.lppm <- function(object, ...) {
+  logLik(object$fit, ...)
+}
+
+deviance.lppm <- function(object, ...) {
+  as.numeric(-2 * logLik(object, ...))
+}
+
+pseudoR2.lppm <- function(object, ...) {
+  dres <- deviance(object, ..., warn=FALSE)
+  nullmod <- update(object, . ~ 1)
+  dnul <- deviance(nullmod, warn=FALSE)
+  return(1 - dres/dnul)
+}
+
+formula.lppm <- function(x, ...) {
+  formula(x$fit, ...)
+}
+
+extractAIC.lppm <- function(fit, ...) {
+  extractAIC(fit$fit, ...)
+}
+
+as.owin.lppm <- function(W, ..., fatal=TRUE) {
+  stopifnot(inherits(W, "lppm"))
+  as.owin(as.linnet(W), ..., fatal=fatal)
+}
+
+Window.lppm <- function(X, ...) { as.owin(X) }
+
+
+model.images.lppm <- local({
+
+  model.images.lppm <- function(object, L=as.linnet(object), ...) {
+    stopifnot(inherits(object, "lppm"))
+    stopifnot(inherits(L, "linnet"))
+    m <- model.images(object$fit, W=as.rectangle(L), ...)
+    if(length(m) > 0) {
+      ## restrict images to L
+      rasta <- as.mask(m[[1L]])
+      DL <- as.mask.psp(as.psp(L), xy=rasta)
+      ZL <- as.im(DL)
+      if(!is.hyperframe) {
+        ## list of images: convert to list of linims
+        m <- tolinims(m, L=L, imL=ZL)
+      } else {
+        ## hyperframe, each column being a list of images
+        mm <- lapply(as.list(m), tolinims, L=L, imL=ZL)
+        m <- do.call(hyperframe, mm)
+      }
+    }
+    return(m)
+  }
+
+  tolinim <- function(x, L, imL) linim(L, eval.im(x * imL))
+  tolinims <- function(x, L, imL) solapply(x, tolinim, L=L, imL=imL)
+  
+  model.images.lppm
+})
+
+  
+model.matrix.lppm <- function(object,
+                              data=model.frame(object, na.action=NULL),
+                             ..., keepNA=TRUE) {
+  stopifnot(is.lppm(object))
+  if(missing(data)) data <- NULL
+  model.matrix(object$fit, data=data, ..., keepNA=keepNA)
+}
+
+model.frame.lppm <- function(formula, ...) {
+  stopifnot(inherits(formula, "lppm"))
+  model.frame(formula$fit, ...)
+}
+
+domain.lppm <- as.linnet.lppm <- function(X, ...) {
+  as.linnet(X$X, ...)
+}
+
+nobs.lppm <- function(object, ...) {
+  npoints(object$X)
+}
+
+is.poisson.lppm <- function(x) { is.poisson(x$fit) }
+
+is.stationary.lppm <- function(x) { is.stationary(x$fit) }
+
+is.multitype.lppm <- function(X, ...) { is.multitype(X$fit) }
+
+is.marked.lppm <- function(X, ...) { is.marked(X$fit) }
+
+vcov.lppm <- function(object, ...) {
+  if(!is.poisson(object))
+    stop("vcov.lppm is only implemented for Poisson models")
+  vcov(object$fit, ...)
+}
+
+valid.lppm <- function(object, ...) {
+  valid(object$fit, ...)
+}
+
+emend.lppm <- function(object, ...) {
+  object$fit <- emend(object$fit, ...)
+  return(object)
+}
+
+  
diff --git a/R/lurking.R b/R/lurking.R
new file mode 100755
index 0000000..452ba13
--- /dev/null
+++ b/R/lurking.R
@@ -0,0 +1,482 @@
+# Lurking variable plot for arbitrary covariate.
+#
+#
+# $Revision: 1.52 $ $Date: 2017/02/07 08:12:05 $
+#
+
+lurking <- local({
+
+  cumsumna <- function(x) { cumsum(ifelse(is.na(x), 0, x)) }
+
+  ## main function
+  lurking <- function(object, covariate, type="eem",
+                      cumulative=TRUE,
+                      clipwindow=default.clipwindow(object),
+                      rv = NULL,
+                      plot.sd=is.poisson(object), 
+                      envelope=FALSE, nsim=39, nrank=1,
+                      plot.it=TRUE,
+                      typename,
+                      covname, oldstyle=FALSE,
+                      check=TRUE, ..., splineargs=list(spar=0.5),
+                      verbose=TRUE) {
+    cl <- match.call()
+    ## default name for covariate
+    if(missing(covname) || is.null(covname)) {
+      covname <- if(is.name(cl$covariate)) as.character(cl$covariate) else
+                 if(is.expression(cl$covariate)) cl$covariate else NULL
+    }
+
+    if(!identical(envelope, FALSE)) {
+      ## compute simulation envelope
+      Xsim <- NULL
+      if(!identical(envelope, TRUE)) {
+        ## some kind of object
+        Y <- envelope
+        if(is.list(Y) && all(sapply(Y, is.ppp))) {
+          Xsim <- Y
+          envelope <- TRUE
+        } else if(inherits(Y, "envelope")) {
+          Xsim <- attr(Y, "simpatterns")
+          if(is.null(Xsim))
+            stop("envelope does not contain simulated point patterns")
+          envelope <- TRUE
+        } else stop("Unrecognised format of argument: envelope")
+        nXsim <- length(Xsim)
+        if(missing(nsim) && (nXsim < nsim)) {
+          warning(paste("Only", nXsim, "simulated patterns available"))
+          nsim <- nXsim
+        }
+      }
+    }
+    
+    ## validate object
+    if(is.ppp(object)) {
+      X <- object
+      object <- ppm(X ~1, forcefit=TRUE)
+      dont.complain.about(X)
+    } else verifyclass(object, "ppm")
+
+    ## may need to refit the model
+    if(plot.sd && is.null(getglmfit(object)))
+      object <- update(object, forcefit=TRUE, use.internal=TRUE)
+
+    ## match type argument
+    type <- pickoption("type", type,
+                       c(eem="eem",
+                         raw="raw",
+                         inverse="inverse",
+                         pearson="pearson",
+                         Pearson="pearson"))
+    if(missing(typename))
+      typename <- switch(type,
+                         eem="exponential energy weights",
+                         raw="raw residuals",
+                         inverse="inverse-lambda residuals",
+                         pearson="Pearson residuals")
+
+    ## extract spatial locations
+    Q <- quad.ppm(object)
+    datapoints <- Q$data
+    quadpoints <- union.quad(Q)
+    Z <- is.data(Q)
+    wts <- w.quad(Q)
+    ## subset of quadrature points used to fit model
+    subQset <- getglmsubset(object)
+    if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q))
+  
+    #################################################################
+    ## compute the covariate
+    
+    if(is.im(covariate)) {
+      covvalues <- covariate[quadpoints, drop=FALSE]
+    } else if(is.vector(covariate) && is.numeric(covariate)) {
+      covvalues <- covariate
+      if(length(covvalues) != quadpoints$n)
+        stop("Length of covariate vector,", length(covvalues), "!=",
+             quadpoints$n, ", number of quadrature points")
+    } else if(is.expression(covariate)) {
+      ## Expression involving covariates in the model
+      glmdata <- getglmdata(object)
+      ## Fix special cases
+      if(is.null(glmdata)) {
+        ## default 
+        glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y)
+        if(is.marked(quadpoints))
+          glmdata$marks <- marks(quadpoints)
+      }
+      ## ensure x and y are in data frame 
+      if(!all(c("x","y") %in% names(glmdata))) {
+        glmdata$x <- quadpoints$x
+        glmdata$y <- quadpoints$y
+      } 
+      if(!is.null(object$covariates)) {
+        ## Expression may involve an external covariate that's not used in model
+        neednames <- all.vars(covariate)
+        if(!all(neednames %in% colnames(glmdata))) {
+          moredata <- mpl.get.covariates(object$covariates, quadpoints,
+                                         covfunargs=object$covfunargs)
+          use <- !(names(moredata) %in% colnames(glmdata))
+          glmdata <- cbind(glmdata, moredata[,use,drop=FALSE])
+        }
+      }
+      ## Evaluate expression
+      sp <- parent.frame()
+      covvalues <- eval(covariate, envir= glmdata, enclos=sp)
+      if(!is.numeric(covvalues))
+        stop("The evaluated covariate is not numeric")
+    } else 
+      stop(paste("The", sQuote("covariate"), "should be either",
+                 "a pixel image, an expression or a numeric vector"))
+
+    #################################################################
+    ## Validate covariate values
+
+    nbg <- is.na(covvalues)
+    if(any(offending <- nbg && subQset)) {
+      if(is.im(covariate))
+        warning(paste(sum(offending), "out of", length(offending),
+                      "quadrature points discarded because",
+                      ngettext(sum(offending), "it lies", "they lie"),
+                      "outside the domain of the covariate image"))
+      else
+        warning(paste(sum(offending), "out of", length(offending),
+                      "covariate values discarded because",
+                      ngettext(sum(offending), "it is NA", "they are NA")))
+    }
+    ## remove points
+    ok <- !nbg & subQset
+    Q <- Q[ok]
+    covvalues <- covvalues[ok]
+    quadpoints <- quadpoints[ok]
+    ## adjust
+    Z <- is.data(Q)
+    wts <- w.quad(Q)
+    if(any(is.infinite(covvalues) | is.nan(covvalues)))
+      stop("covariate contains Inf or NaN values")
+
+    ## Quadrature points marked by covariate value
+    covq <- quadpoints %mark% as.numeric(covvalues)
+
+    ################################################################
+    ## Residuals/marks attached to appropriate locations.
+    ## Stoyan-Grabarnik weights are attached to the data points only.
+    ## Others (residuals) are attached to all quadrature points.
+
+    resvalues <- 
+      if(!is.null(rv)) rv
+      else if(type=="eem") eem(object, check=check)
+      else residuals.ppm(object, type=type, check=check)
+  
+    if(inherits(resvalues, "msr")) {
+      ## signed or vector-valued measure
+      resvalues <- resvalues$val
+      if(ncol(as.matrix(resvalues)) > 1)
+        stop("Not implemented for vector measures; use [.msr to split into separate components")
+    }
+
+    if(type != "eem")
+      resvalues <- resvalues[ok]
+
+    res <- (if(type == "eem") datapoints else quadpoints) %mark% as.numeric(resvalues)
+
+    ## ... and the same locations marked by the covariate
+    covres <- if(type == "eem") covq[Z] else covq
+
+    ## NAMES OF THINGS
+    ## name of the covariate
+    if(is.null(covname)) 
+      covname <- if(is.expression(covariate)) covariate else "covariate"
+    ## type of residual/mark
+    if(missing(typename)) 
+      typename <- if(!is.null(rv)) "rv" else ""
+    
+    #######################################################################
+    ## START ANALYSIS
+    ## Clip to subwindow if needed
+    clip <-
+      (!is.poisson.ppm(object) || !missing(clipwindow)) &&
+      !is.null(clipwindow)
+    if(clip) {
+      covq <- covq[clipwindow]
+      res <- res[clipwindow]
+      covres <- covres[clipwindow]
+      clipquad <- inside.owin(quadpoints$x, quadpoints$y, clipwindow)
+      wts <- wts[ clipquad ]
+    }
+
+    ## -----------------------------------------------------------------------
+    ## (A) EMPIRICAL CUMULATIVE FUNCTION
+    ## based on data points if type="eem", otherwise on quadrature points
+
+    ## Reorder the data/quad points in order of increasing covariate value
+    ## and then compute the cumulative sum of their residuals/marks
+    markscovres <- marks(covres)
+    o <- fave.order(markscovres)
+    covsort <- markscovres[o]
+    cummark <- cumsumna(marks(res)[o]) 
+    ## we'll plot(covsort, cummark) in the cumulative case
+
+    ## (B) THEORETICAL MEAN CUMULATIVE FUNCTION
+    ## based on all quadrature points
+    
+    ## Range of covariate values
+    covqmarks <- marks(covq)
+    covrange <- range(covqmarks, na.rm=TRUE)
+    ## Suitable breakpoints
+    cvalues <- seq(from=covrange[1L], to=covrange[2L], length.out=100)
+    csmall <- cvalues[1L] - diff(cvalues[1:2])
+    cbreaks <- c(csmall, cvalues)
+    ## cumulative area as function of covariate values
+    covclass <- cut(covqmarks, breaks=cbreaks)
+    increm <- tapply(wts, covclass, sum)
+    cumarea <- cumsumna(increm)
+    ## compute theoretical mean (when model is true)
+    mean0 <- if(type == "eem") cumarea else numeric(length(cumarea))
+    ## we'll plot(cvalues, mean0) in the cumulative case
+
+    ## (A'),(B') DERIVATIVES OF (A) AND (B)
+    ##  Required if cumulative=FALSE  
+    ##  Estimated by spline smoothing (with x values jittered)
+    if(!cumulative) {
+      ## fit smoothing spline to (A) 
+      ss <- do.call(smooth.spline,
+                    append(list(covsort, cummark),
+                           splineargs)
+                    )
+      ## estimate derivative of (A)
+      derivmark <- predict(ss, covsort, deriv=1)$y 
+      ## similarly for (B) 
+      ss <- do.call(smooth.spline,
+                    append(list(cvalues, mean0),
+                           splineargs)
+                    )
+      derivmean <- predict(ss, cvalues, deriv=1)$y
+    }
+  
+    ## -----------------------------------------------------------------------
+    ## Store what will be plotted
+  
+    if(cumulative) {
+      empirical <- data.frame(covariate=covsort, value=cummark)
+      theoretical <- data.frame(covariate=cvalues, mean=mean0)
+    } else {
+      empirical <- data.frame(covariate=covsort, value=derivmark)
+      theoretical <- data.frame(covariate=cvalues, mean=derivmean)
+    }
+
+    ## ------------------------------------------------------------------------
+  
+    ## (C) STANDARD DEVIATION if desired
+    ## (currently implemented only for Poisson)
+    ## (currently implemented only for cumulative case)
+
+    if(plot.sd && !is.poisson.ppm(object))
+      warning(paste("standard deviation is calculated for Poisson model;",
+                    "not valid for this model"))
+
+    if(plot.sd && cumulative) {
+      ## Fitted intensity at quadrature points
+      lambda <- fitted.ppm(object, type="trend", check=check)
+      lambda <- lambda[ok]
+      ## Fisher information for coefficients
+      asymp <- vcov(object,what="internals")
+      Fisher <- asymp$fisher
+      ## Local sufficient statistic at quadrature points
+      suff <- asymp$suff
+      suff <- suff[ok, ,drop=FALSE]
+      ## Clip if required
+      if(clip) {
+        lambda <- lambda[clipquad]
+        suff   <- suff[clipquad, , drop=FALSE]  ## suff is a matrix
+      }
+      ## First term: integral of lambda^(2p+1)
+      switch(type,
+             pearson={
+               varI <- cumarea
+             },
+             raw={
+               ## Compute sum of w*lambda for quadrature points in each interval
+               dvar <- tapply(wts * lambda, covclass, sum)
+               ## tapply() returns NA when the table is empty
+               dvar[is.na(dvar)] <- 0
+               ## Cumulate
+               varI <- cumsum(dvar)
+             },
+             inverse=, ## same as eem
+             eem={
+               ## Compute sum of w/lambda for quadrature points in each interval
+               dvar <- tapply(wts / lambda, covclass, sum)
+               ## tapply() returns NA when the table is empty
+               dvar[is.na(dvar)] <- 0
+               ## Cumulate
+               varI <- cumsum(dvar)
+             })
+
+      ## variance-covariance matrix of coefficients
+      V <- try(solve(Fisher), silent=TRUE)
+      if(inherits(V, "try-error")) {
+        warning("Fisher information is singular; reverting to oldstyle=TRUE")
+        oldstyle <- TRUE
+      }
+      
+      ## Second term: B' V B
+      if(oldstyle) {
+        varII <- 0
+      } else {
+        ## lamp = lambda^(p + 1)
+        lamp <- switch(type,
+                       raw     = lambda, 
+                       pearson = sqrt(lambda),
+                       inverse =,
+                       eem     = as.integer(lambda > 0))
+        ## Compute sum of w * lamp * suff for quad points in intervals
+        Bcontrib <- as.vector(wts * lamp) * suff
+        dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib))
+        for(j in seq_len(ncol(dB))) 
+          dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE)
+        ## tapply() returns NA when the table is empty
+        dB[is.na(dB)] <- 0
+        ## Cumulate columns
+        B <- apply(dB, 2, cumsum)
+        ## compute B' V B for each i 
+        varII <- diag(B %*% V %*% t(B))
+      }
+      ##
+      ## variance of residuals
+      varR <- varI - varII
+      ## trap numerical errors
+      nbg <- (varR < 0)
+      if(any(nbg)) {
+        ran <- range(varR)
+        varR[nbg] <- 0
+        relerr <- abs(ran[1L]/ran[2L])
+        nerr <- sum(nbg)
+        if(relerr > 1e-6) {
+          warning(paste(nerr, "negative",
+                        ngettext(nerr, "value (", "values (min="),
+                        signif(ran[1L], 4), ")",
+                        "of residual variance reset to zero",
+                        "(out of", length(varR), "values)"))
+        }
+      }
+      theoretical$sd <- sqrt(varR)
+    }
+
+    ## 
+    if(envelope) {
+      ## compute envelopes by simulation
+      cl$plot.it <- FALSE
+      cl$envelope <- FALSE
+      cl$rv <- NULL
+      if(is.null(Xsim))
+        Xsim <- simulate(object, nsim=nsim, progress=verbose)
+      values <- NULL
+      if(verbose) {
+        cat("Processing.. ")
+        state <- list()
+      }
+      for(i in seq_len(nsim)) {
+        cl$object <- update(object, Xsim[[i]])
+        result.i <- eval(cl, parent.frame())
+        ## interpolate empirical values onto common sequence
+        f.i <- with(result.i$empirical,
+                    approxfun(covariate, value, rule=2))
+        val.i <- f.i(theoretical$covariate)
+        values <- cbind(values, val.i)
+        if(verbose) state <- progressreport(i, nsim, state=state)
+      }
+      if(verbose) cat("Done.\n")
+      hilo <- if(nrank == 1) apply(values, 1, range) else
+                 apply(values, 1, orderstats, k=c(nrank, nsim-nrank+1))
+      theoretical$upper <- hilo[1L,]
+      theoretical$lower <- hilo[2L,]
+    }
+    ## ----------------  RETURN COORDINATES ----------------------------
+    stuff <- list(empirical=empirical,
+                  theoretical=theoretical)
+    attr(stuff, "info") <- list(typename=typename,
+                                cumulative=cumulative,
+                                covrange=covrange,
+                                covname=covname)
+    class(stuff) <- "lurk"
+    ## ---------------  PLOT THEM  ----------------------------------
+    if(plot.it) 
+      plot(stuff, ...)
+    return(invisible(stuff))
+  }
+
+  lurking
+})
+
+
+# plot a lurk object
+
+
+plot.lurk <- function(x, ..., shade="grey") {
+  xplus <- append(x, attr(x, "info"))
+  with(xplus, {
+    ## work out plot range
+    mr <- range(0, empirical$value, theoretical$mean, na.rm=TRUE)
+    if(!is.null(theoretical$sd))
+      mr <- range(mr,
+                  theoretical$mean + 2 * theoretical$sd,
+                  theoretical$mean - 2 * theoretical$sd,
+                  na.rm=TRUE)
+    if(!is.null(theoretical$upper))
+      mr <- range(mr, theoretical$upper, theoretical$lower, na.rm=TRUE)
+
+    ## start plot
+    vname <- paste(if(cumulative)"cumulative" else "marginal", typename)
+    do.call(plot,
+            resolve.defaults(
+              list(covrange, mr),
+              list(type="n"),
+              list(...),
+              list(xlab=covname, ylab=vname)))
+    ## Envelopes
+    if(!is.null(theoretical$upper)) {
+      Upper <- theoretical$upper
+      Lower <- theoretical$lower
+    } else if(!is.null(theoretical$sd)) {
+      Upper <- with(theoretical, mean+2*sd)
+      Lower <- with(theoretical, mean-2*sd)
+    } else Upper <- Lower <- NULL
+    if(!is.null(Upper) && !is.null(Lower)) {
+      xx <- theoretical$covariate
+      if(!is.null(shade)) {
+        ## shaded envelope region
+        shadecol <- if(is.colour(shade)) shade else "grey"
+        xx <- c(xx,    rev(xx))
+        yy <- c(Upper, rev(Lower))
+        do.call.matched(polygon,
+                        resolve.defaults(list(x=xx, y=yy),
+                                         list(...),
+                                         list(border=shadecol, col=shadecol)))
+      } else {
+        do.call(lines,
+                resolve.defaults(
+                  list(x = xx, y=Upper),
+                  list(...),
+                  list(lty=3)))
+        do.call(lines,
+                resolve.defaults(
+                  list(x = xx, y = Lower),
+                  list(...),
+                  list(lty=3)))
+      }
+    }
+    ## Empirical
+    lines(value ~ covariate, empirical, ...)
+    ## Theoretical mean
+    do.call(lines,
+            resolve.defaults(
+              list(mean ~ covariate, theoretical),
+              list(...),
+              list(lty=2)))
+  })
+  return(invisible(NULL))
+}
+
+
+
diff --git a/R/markcorr.R b/R/markcorr.R
new file mode 100755
index 0000000..7c1da1a
--- /dev/null
+++ b/R/markcorr.R
@@ -0,0 +1,846 @@
+##
+##
+##     markcorr.R
+##
+##     $Revision: 1.77 $ $Date: 2016/02/16 01:39:12 $
+##
+##    Estimate the mark correlation function
+##    and related functions 
+##    
+## ------------------------------------------------------------------------
+
+markvario <- local({
+
+  halfsquarediff <- function(m1, m2) { ((m1-m2)^2)/2 }
+
+  assigntheo <- function(x, value) { x$theo <- value; return(x) }
+  
+  markvario <- 
+    function(X, correction=c("isotropic", "Ripley", "translate"),
+             r=NULL, method="density", ..., normalise=FALSE) {
+      m <- onecolumn(marks(X))
+      if(!is.numeric(m))
+        stop("Marks are not numeric")
+      if(missing(correction))
+        correction <- NULL
+      ## Compute estimates
+      v <- markcorr(X, f=halfsquarediff, 
+                    r=r, correction=correction, method=method,
+                    normalise=normalise, ...)
+      if(is.fv(v)) v <- anylist(v)
+      ## adjust theoretical value and fix labels
+      theoval <- if(normalise) 1 else var(m)
+      for(i in seq_len(length(v))) {
+        v[[i]]$theo <- theoval
+        v[[i]] <- rebadge.fv(v[[i]],
+                             quote(gamma(r)),
+                             "gamma")
+      }
+      if(length(v) == 1) v <- v[[1]]
+      return(v)
+    }
+
+  markvario
+})
+
+markconnect <- local({
+
+  indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) }
+  
+  markconnect <- function(X, i, j, r=NULL, 
+                          correction=c("isotropic", "Ripley", "translate"),
+                          method="density", ..., normalise=FALSE) {
+    stopifnot(is.ppp(X) && is.multitype(X))
+    if(missing(correction))
+      correction <- NULL
+    marx <- marks(X)
+    lev  <- levels(marx)
+    if(missing(i)) i <- lev[1]
+    if(missing(j)) j <- lev[2]
+    ## compute estimates
+    p <- markcorr(X, f=indicateij, r=r,
+                  correction=correction, method=method,
+                  ...,
+                  fargs=list(i=i, j=j),
+                  normalise=normalise)
+    ## alter theoretical value and fix labels
+    if(!normalise) {
+      pipj <- mean(marx==i) * mean(marx==j) 
+      p$theo <- pipj
+    } else {
+      p$theo <- 1
+    }
+    p <- rebadge.fv(p,
+                    new.ylab=substitute(p[i,j](r), list(i=paste(i),j=paste(j))),
+                    new.fname=c("p", paste0("list(", i, ",", j, ")")),
+                    new.yexp=substitute(p[list(i,j)](r),
+                      list(i=paste(i),j=paste(j))))
+    return(p)
+  }
+  markconnect
+})
+
+
+Emark <- local({
+
+  f1 <- function(m1, m2) { m1 }
+
+  Emark <- function(X, r=NULL, 
+                    correction=c("isotropic", "Ripley", "translate"),
+                    method="density", ..., normalise=FALSE) {
+    stopifnot(is.ppp(X) && is.marked(X))
+    marx <- marks(X)
+    isvec <- is.vector(marx) && is.numeric(marx)
+    isdf <- is.data.frame(marx) && all(sapply(as.list(marx), is.numeric))
+    if(!(isvec || isdf))
+      stop("All marks of X should be numeric")
+    if(missing(correction))
+      correction <- NULL
+    E <- markcorr(X, f1, r=r,
+                  correction=correction, method=method,
+                  ..., normalise=normalise)
+    if(isvec) {
+      E <- rebadge.fv(E, quote(E(r)), "E")
+    } else {
+      E[] <- lapply(E, rebadge.fv, new.ylab=quote(E(r)), new.fname="E")
+    }
+    return(E)
+  }
+
+  Emark
+})
+
+Vmark <- local({
+
+  f2 <- function(m1, m2) { m1^2 }
+
+  Vmark <- function(X, r=NULL, 
+                    correction=c("isotropic", "Ripley", "translate"),
+                    method="density", ..., normalise=FALSE) {
+    if(missing(correction))
+      correction <- NULL
+    E <- Emark(X, r=r, correction=correction, method=method, ...,
+             normalise=FALSE)
+    E2 <- markcorr(X, f2, r=E$r,
+                   correction=correction, method=method,
+                   ..., normalise=FALSE)
+    if(normalise) 
+      sig2 <- var(marks(X))
+    if(is.fv(E)) {
+      E <- list(E)
+      E2 <- list(E2)
+    }
+    V <- list()
+    for(i in seq_along(E)) {
+      Ei <- E[[i]]
+      E2i <- E2[[i]]
+      Vi <- eval.fv(E2i - Ei^2)
+      if(normalise) 
+        Vi <- eval.fv(Vi/sig2[i,i])
+      Vi <- rebadge.fv(Vi, quote(V(r)), "V")
+      attr(Vi, "labl") <- attr(Ei, "labl")
+      V[[i]] <- Vi
+    }
+    if(length(V) == 1) return(V[[1]])
+    V <- as.anylist(V)
+    names(V) <- colnames(marks(X))
+    return(V)
+  }
+
+  Vmark
+})
+
+############## workhorses 'markcorr' and 'markcorrint' ####################
+
+markcorrint <-
+Kmark <-
+  function(X, f=NULL, r=NULL, 
+           correction=c("isotropic", "Ripley", "translate"), ...,
+           f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) {
+  ## Computes the analogue of Kest(X)
+  ## where each pair (x_i,x_j) is weighted by w(m_i,m_j)
+  ##
+  ## If multiplicative=TRUE then w(u,v) = f(u) f(v)
+  ## If multiplicative=FALSE then w(u,v) = f(u, v)
+  ##
+  stopifnot(is.ppp(X) && is.marked(X))
+  is.marked(X, dfok=FALSE)
+  W <- Window(X)
+  ## 
+  if(identical(sys.call()[[1]], as.name('markcorrint')))
+    warn.once('markcorrint',
+              "markcorrint will be deprecated in future versions of spatstat;",
+              "use the equivalent function Kmark")
+  ## validate test function
+  h <- check.testfun(f, f1, X)
+  f     <- h$f
+  f1    <- h$f1
+  ftype <- h$ftype
+  multiplicative <- ftype %in% c("mul", "product")
+  ## 
+  ## check corrections
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("isotropic", "Ripley", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  isborder  <- correction %in% c("border", "bord.modif")
+  if(any(isborder) && !multiplicative) {
+    whinge <- paste("Border correction is not valid unless",
+                    "test function is of the form f(u,v) = f1(u)*f1(v)")
+    correction <- correction[!isborder]
+    if(length(correction) == 0)
+      stop(whinge)
+    else
+      warning(whinge)
+  }
+  ## estimated intensity
+  lambda <- intensity(X)
+  mX <- marks(X)
+  switch(ftype,
+         mul={
+           wt <- mX/lambda
+           K <- Kinhom(X, r=r, reciplambda=wt, correction=correction,
+                       ..., renormalise=FALSE)
+           Ef2 <- mean(mX)^2
+         },
+         equ={
+           fXX <- outer(mX, mX, "==")
+           wt <- fXX/lambda^2
+           K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction,
+                       ..., renormalise=FALSE)
+           mtable <- table(mX)
+           Ef2 <- sum(mtable^2)/length(mX)^2
+         },
+         product={
+           f1X <- do.call(f1, append(list(mX), fargs))
+           wt <- f1X/lambda
+           K <- Kinhom(X, r=r, reciplambda=wt, correction=correction,
+                       ..., renormalise=FALSE)
+           Ef2 <- mean(f1X)^2
+         },
+         general={
+           fXX <- do.call(outer, append(list(mX, mX, f), fargs))
+           wt <- fXX/lambda^2
+           K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction,
+                       ..., renormalise=FALSE)
+           Ef2 <- mean(fXX)
+         })
+  K$theo <- K$theo * Ef2
+  labl <- attr(K, "labl")
+  if(normalise)
+    K <- eval.fv(K/Ef2)
+  if(returnL)
+    K <- eval.fv(sqrt(K/pi))
+  attr(K, "labl") <- labl
+  if(normalise && !returnL) {
+    ylab <- quote(K[f](r))
+    fnam <- c("K", "f")
+  } else if(normalise && returnL) {
+    ylab <- quote(L[f](r))
+    fnam <- c("L", "f")
+  } else if(!normalise && !returnL) {
+    ylab <- quote(C[f](r))
+    fnam <- c("C", "f")
+  } else {
+    ylab <- quote(sqrt(C[f](r)/pi))
+    fnam <- "sqrt(C[f]/pi)"
+  }
+  K <- rebadge.fv(K, ylab, fnam)
+  return(K)
+}
+
+markcorr <-
+  function(X, f = function(m1, m2) { m1 * m2}, r=NULL, 
+           correction=c("isotropic", "Ripley", "translate"),
+           method="density", ...,
+           weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL)
+{
+  ## mark correlation function with test function f
+  stopifnot(is.ppp(X) && is.marked(X))
+  nX <- npoints(X)
+  
+  ## set defaults to NULL
+  if(missing(f)) f <- NULL
+  if(missing(correction)) correction <- NULL
+
+  ## handle data frame of marks
+  marx <- marks(X, dfok=TRUE)
+  if(is.data.frame(marx)) {
+    nc <- ncol(marx)
+    result <- list()
+    for(j in 1:nc) {
+      Xj <- X %mark% marx[,j]
+      result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction,
+                              method=method, ...,
+                              weights=weights,
+                              f1=f1, normalise=normalise, fargs=fargs)
+    }
+    result <- as.anylist(result)
+    names(result) <- colnames(marx)
+    return(result)
+  }
+  
+  ## weights
+  if(unweighted <- is.null(weights)) {
+    weights <- rep(1, nX)
+  } else {
+    stopifnot(is.numeric(weights))
+    if(length(weights) == 1) {
+      weights <- rep(weights, nX)
+    } else check.nvector(weights, nX)
+    stopifnot(all(weights > 0))
+  }
+  
+  ## validate test function
+  h <- check.testfun(f, f1, X)
+  f     <- h$f
+  f1    <- h$f1
+  ftype <- h$ftype
+  ##
+  ## 
+  npts <- npoints(X)
+  W <- X$window
+  
+  ## determine r values 
+  rmaxdefault <- rmax.rule("K", W, npts/area(W))
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+        
+  if(length(method) > 1)
+    stop("Select only one method, please")
+  if(method=="density" && !breaks$even)
+    stop(paste("Evenly spaced r values are required if method=",
+               sQuote("density"), sep=""))
+        
+  ## available selection of edge corrections depends on window
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("isotropic", "Ripley", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+  
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  ## Denominator
+  ## Ef = Ef(M,M') when M, M' are independent
+  ## Apply f to every possible pair of marks, and average
+  Ef <- switch(ftype,
+               mul = {
+                 mean(marx * weights)^2
+               },
+               equ = {
+                 if(unweighted) {
+                   mtable <- table(marx)
+                 } else {
+                   mtable <- tapply(weights, marx, sum)
+                   mtable[is.na(mtable)] <- 0
+                 }
+                 sum(mtable^2)/nX^2
+             },
+               product={
+                 f1m <- do.call(f1, append(list(marx), fargs))
+                 mean(f1m * weights)^2
+               },
+               general = {
+                 mcross <- if(is.null(fargs)) {
+                   outer(marx, marx, f)
+                 } else {
+                   do.call(outer, append(list(marx,marx,f),fargs))
+                 }
+                 if(unweighted) {
+                   mean(mcross)
+                 } else {
+                   wcross <- outer(weights, weights, "*")
+                   mean(mcross * wcross)
+                 }
+               },
+               stop("Internal error: invalid ftype"))
+
+  if(normalise) {
+    theory <- 1
+    Efdenom <- Ef
+  } else {
+    theory <- Ef
+    Efdenom <- 1
+  }
+
+  if(normalise) {
+    ## check validity of denominator
+    if(Efdenom == 0)
+      stop("Cannot normalise the mark correlation; the denominator is zero")
+    else if(Efdenom < 0)
+      warning(paste("Problem when normalising the mark correlation:",
+                    "the denominator is negative"))
+  }
+  
+  ## this will be the output data frame
+  result <- data.frame(r=r, theo= rep.int(theory,length(r)))
+  desc <- c("distance argument r",
+            "theoretical value (independent marks) for %s")
+  alim <- c(0, min(rmax, rmaxdefault))
+  ## determine conventional name of function
+  if(ftype %in% c("mul", "equ")) {
+    if(normalise) {
+      ylab <- quote(k[mm](r))
+      fnam <- c("k", "mm")
+    } else {
+      ylab <- quote(c[mm](r))
+      fnam <- c("c", "mm")
+    }
+  } else {
+    if(normalise) {
+      ylab <- quote(k[f](r))
+      fnam <- c("k", "f")
+    } else {
+      ylab <- quote(c[f](r))
+      fnam <- c("c", "f")
+    }
+  }
+  result <- fv(result, "r", ylab, "theo", , alim,
+               c("r","{%s[%s]^{iid}}(r)"), desc, fname=fnam)
+
+  ## find close pairs of points
+  close <- closepairs(X, rmax)
+  dIJ <- close$d
+  I   <- close$i
+  J   <- close$j
+  XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+
+  ## apply f to marks of close pairs of points
+  ##
+  mI <- marx[I]
+  mJ <- marx[J]
+  ff <- switch(ftype,
+               mul = mI * mJ,
+               equ = (mI == mJ),
+               product={
+                 if(is.null(fargs)) {
+                   fI <- f1(mI)
+                   fJ <- f1(mJ)
+                 } else {
+                   fI <- do.call(f1, append(list(mI), fargs))
+                   fJ <- do.call(f1, append(list(mJ), fargs))
+                 }
+                 fI * fJ
+               },
+               general={
+                 if(is.null(fargs))
+                   f(marx[I], marx[J])
+                 else
+                   do.call(f, append(list(marx[I], marx[J]), fargs))
+               })
+
+  ## check values of f(M1, M2)
+  
+  if(is.logical(ff))
+    ff <- as.numeric(ff)
+  else if(!is.numeric(ff))
+    stop("function f did not return numeric values")
+
+  if(anyNA(ff)) 
+    switch(ftype,
+           mul=,
+           equ=stop("some marks were NA"),
+           product=,
+           general=stop("function f returned some NA values"))
+    
+  if(any(ff < 0))
+    switch(ftype,
+           mul=,
+           equ=stop("negative marks are not permitted"),
+           product=,
+           general=stop("negative values of function f are not permitted"))
+
+  ## weights
+  if(!unweighted)
+    ff <- ff * weights[I] * weights[J]
+  
+  #### Compute estimates ##############
+        
+  if(any(correction == "translate")) {
+    ## translation correction
+    XJ <- ppp(close$xj, close$yj, window=W, check=FALSE)
+    edgewt <- edge.Trans(XI, XJ, paired=TRUE)
+    ## get smoothed estimate of mark covariance
+    Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...)
+    result <- bind.fv(result,
+                      data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)",
+                      "translation-corrected estimate of %s",
+                      "trans")
+  }
+  if(any(correction == "isotropic")) {
+    ## Ripley isotropic correction
+    edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1))
+    ## get smoothed estimate of mark covariance
+    Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...)
+    result <- bind.fv(result,
+                      data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)",
+                      "Ripley isotropic correction estimate of %s",
+                      "iso")
+  }
+  ## which corrections have been computed?
+  nama2 <- names(result)
+  corrxns <- rev(nama2[nama2 != "r"])
+
+  ## default is to display them all
+  formula(result) <- (. ~ r)
+  fvnames(result, ".") <- corrxns
+  ##
+  unitname(result) <- unitname(X)
+  return(result)
+}
+
+## mark cross-correlation function 
+
+markcrosscorr <-
+  function(X, r=NULL, 
+           correction=c("isotropic", "Ripley", "translate"),
+           method="density", ..., normalise=TRUE, Xname=NULL)
+{
+  if(missing(Xname))
+    Xname <- short.deparse(substitute(X))
+
+  stopifnot(is.ppp(X) && is.marked(X))
+  npts <- npoints(X)
+  W <- Window(X)
+
+  ## available selection of edge corrections depends on window
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("isotropic", "Ripley", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             "bord.modif"="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+  correction <- implemented.for.K(correction, W$type, correction.given)
+  
+  ## determine r values 
+  rmaxdefault <- rmax.rule("K", W, npts/area(W))
+  breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault)
+  r <- breaks$r
+  rmax <- breaks$max
+        
+  ## find close pairs of points
+  close <- closepairs(X, rmax)
+  dIJ <- close$d
+  I   <- close$i
+  J   <- close$j
+  XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+
+  ## determine estimation method
+  if(length(method) > 1)
+    stop("Select only one method, please")
+  if(method=="density" && !breaks$even)
+    stop(paste("Evenly spaced r values are required if method=",
+               sQuote("density"), sep=""))
+
+  ## ensure marks are a data frame
+  marx <- marks(X, dfok=TRUE)
+  if(!is.data.frame(marx))
+    marx <- data.frame(marks=marx)
+
+  ## convert factor marks to dummy variables
+  while(any(isfac <- sapply(marx, is.factor))) {
+    i <- min(which(isfac))
+    mari <- marx[,i]
+    levi <- levels(mari)
+    nami <- colnames(marx)[i]
+    dumi <- 1 * outer(mari, levi, "==")
+    colnames(dumi) <- paste0(nami, levi)
+    marx <- as.data.frame(append(marx[,-i,drop=FALSE], list(dumi), after=i-1))
+  }
+  nc <- ncol(marx)
+  nama <- colnames(marx)
+  ## loop over all pairs of columns
+  funs <- list()
+  for(i in 1:nc) {
+    marxi <- marx[,i]
+    namei <- nama[i]
+    for(j in 1:nc) {
+      marxj <- marx[,j]
+      namej <- nama[j]
+      ## Denominator
+      ## Ef = E M M' = EM EM'
+      ## when M, M' are independent from the respective columns
+      Ef <- mean(marxi) * mean(marxj)
+      if(normalise) {
+        theory <- 1
+        Efdenom <- Ef
+        ## check validity of denominator
+        if(Efdenom == 0)
+          stop(paste("Cannot normalise the mark correlation for",
+                     namei, "x", namej, "because the denominator is zero"),
+               call.=FALSE)
+        else if(Efdenom < 0)
+          warning(paste("Problem when normalising the mark correlation for",
+                        namei, "x", namej,
+                        "- the denominator is negative"),
+                  call.=FALSE)
+      } else {
+        theory <- Ef
+        Efdenom <- 1
+      }
+      ## this will be the output data frame
+      df.ij <- data.frame(r=r, theo= rep.int(theory,length(r)))
+      desc <- c("distance argument r",
+                "theoretical value (independent marks) for %s")
+      alim <- c(0, min(rmax, rmaxdefault))
+      ## determine conventional name of function
+      mimj <- as.name(paste0(namei,".",namej))
+      if(normalise) {
+        ylab <- substitute(k[mm](r), list(mm=mimj))
+        fnam <- c("k", as.character(mimj))
+      } else {
+        ylab <- substitute(c[mm](r), list(mm=mimj))
+        fnam <- c("c", as.character(mimj))
+      }
+      fun.ij <- fv(df.ij, "r", ylab, "theo", , alim,
+                   c("r","{%s[%s]^{ind}}(r)"), desc, fname=fnam)
+
+      mI <- marxi[I]
+      mJ <- marxj[J]
+      ff <- mI * mJ
+      ## check values of f(M1, M2)
+
+      if(anyNA(ff)) 
+        stop("some marks were NA", call.=FALSE)
+
+      if(any(ff < 0))
+        stop("negative marks are not permitted")
+    
+      ## Compute estimates ##############
+        
+      if(any(correction == "translate")) {
+        ## translation correction
+        XJ <- ppp(close$xj, close$yj, window=W, check=FALSE)
+        edgewt <- edge.Trans(XI, XJ, paired=TRUE)
+        ## get smoothed estimate of mark covariance
+        Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...)
+        fun.ij <- bind.fv(fun.ij,
+                          data.frame(trans=Mtrans),
+                          "{hat(%s)[%s]^{trans}}(r)",
+                          "translation-corrected estimate of %s",
+                          "trans")
+      }
+      if(any(correction == "isotropic")) {
+        ## Ripley isotropic correction
+        edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1))
+        ## get smoothed estimate of mark covariance
+        Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...)
+        fun.ij <- bind.fv(fun.ij,
+                          data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)",
+                          "Ripley isotropic correction estimate of %s",
+                          "iso")
+      }
+      ## which corrections have been computed?
+      nama2 <- names(fun.ij)
+      corrxns <- rev(nama2[nama2 != "r"])
+
+      ## default is to display them all
+      formula(fun.ij) <- (. ~ r)
+      fvnames(fun.ij, ".") <- corrxns
+      ##
+      unitname(fun.ij) <- unitname(X)
+      funs <- append(funs, list(fun.ij))
+    }
+  }
+  # matrix mapping array entries to list positions in 'funs'
+  witch <- matrix(1:(nc^2), nc, nc, byrow=TRUE)
+  header <- paste("Mark cross-correlation functions for", Xname)
+  answer <- fasp(funs, witch, 
+                 rowNames=nama, colNames=nama,
+                 title=header, dataname=Xname)
+  return(answer)
+}
+
+sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) {
+  ## Smooth Estimate of Weighted Second Moment Density
+  ## (engine for computing mark correlations, etc)
+  ## ------
+  ## Vectors containing one entry for each (close) pair of points
+  ## d = interpoint distance
+  ## ff = f(M1, M2) where M1, M2 are marks at the two points
+  ## wt = edge correction weight
+  ## -----
+  ## Ef = E[f(M, M')] where M, M' are independent random marks
+  ## 
+  d <- as.vector(d)
+  ff <- as.vector(ff)
+  wt <- as.vector(wt)
+  switch(method,
+         density={
+           fw <- ff * wt
+           sum.fw <- sum(fw)
+           sum.wt <- sum(wt)
+           ## smooth estimate of kappa_f
+           est <- density(d, weights=fw/sum.fw,
+                          from=min(rvals), to=max(rvals), n=length(rvals),
+                          ...)$y
+           numerator <- est * sum.fw
+           ## smooth estimate of kappa_1
+           est0 <- density(d, weights=wt/sum.wt, 
+                          from=min(rvals), to=max(rvals), n=length(rvals),
+                          ...)$y
+           denominator <- est0 * Ef * sum.wt
+           result <- numerator/denominator
+         },
+         sm={
+           ## This is slow!
+           oldopt <- options(warn=-1)
+           smok <- requireNamespace("sm")
+           options(oldopt)
+           if(!smok)
+             stop(paste("Option method=sm requires package sm,",
+                        "which is not available"))
+
+           ## smooth estimate of kappa_f
+           fw <- ff * wt
+           est <- sm::sm.density(d, weights=fw,
+                                 eval.points=rvals,
+                                 display="none", nbins=0, ...)$estimate
+           numerator <- est * sum(fw)/sum(est)
+           ## smooth estimate of kappa_1
+           est0 <- sm::sm.density(d, weights=wt,
+                                  eval.points=rvals,
+                                  display="none", nbins=0, ...)$estimate
+           denominator <- est0 * (sum(wt)/ sum(est0)) * Ef
+           result <- numerator/denominator
+         },
+         smrep={
+           oldopt <- options(warn=-1)
+           smok <- requireNamespace("sm")
+           options(oldopt)
+           if(!smok)
+             stop(paste("Option method=smrep requires package sm,",
+                  "which is not available"))
+
+           hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA))
+           if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights)))
+             warning("default smoothing parameter may be inappropriate")
+           
+           ## use replication to effect the weights (it's faster)
+           nw <- round(nwtsteps * wt/max(wt))
+           drep.w <- rep.int(d, nw)
+           fw <- ff * wt
+           nfw <- round(nwtsteps * fw/max(fw))
+           drep.fw <- rep.int(d, nfw)
+
+           ## smooth estimate of kappa_f
+           est <- sm::sm.density(drep.fw,
+                                 eval.points=rvals,
+                                 display="none", ...)$estimate
+           numerator <- est * sum(fw)/sum(est)
+           ## smooth estimate of kappa_1
+           est0 <- sm::sm.density(drep.w,
+                                  eval.points=rvals,
+                                  display="none", ...)$estimate
+           denominator <- est0 * (sum(wt)/ sum(est0)) * Ef
+           result <- numerator/denominator
+         },
+         loess = {
+           ## set up data frame
+           df <- data.frame(d=d, ff=ff, wt=wt)
+           ## fit curve to numerator using loess
+           fitobj <- loess(ff ~ d, data=df, weights=wt, ...)
+           ## evaluate fitted curve at desired r values
+           Eff <- predict(fitobj, newdata=data.frame(d=rvals))
+           ## normalise:
+           ## denominator is the sample mean of all ff[i,j],
+           ## an estimate of E(ff(M1,M2)) for M1,M2 independent marks
+           result <- Eff/Ef
+         },
+         )
+  return(result)
+}
+
+############## user interface bits ##################################
+
+check.testfun <- local({
+  
+  fmul <- function(m1, m2) { m1 * m2 }
+  fequ <- function(m1, m2) { m1 == m2 }
+  f1id <- function(m) { m }
+
+  check.testfun <- function(f=NULL, f1=NULL, X) {
+    ## Validate f or f1 as a test function for point pattern X
+    ## Determine function type 'ftype'
+    ##      ("mul", "equ", "product" or "general")
+
+    if(is.null(f) && is.null(f1)) {
+      ## no functions given
+      ## default depends on kind of marks
+      if(is.multitype(X)) {
+        f <- fequ
+        ftype <- "equ"
+      } else {
+        f1 <- f1id
+        ftype <- "mul"
+      }
+    } else if(!is.null(f1)) {
+      ## f1 given
+      ## specifies test function of the form f(u,v) = f1(u) f1(v)
+      if(!is.null(f))
+        warning("argument f ignored (overridden by f1)")
+      stopifnot(is.function(f1))
+      ftype <- "product"
+    } else {
+      ## f given 
+      if(is.character(fname <- f)) {
+        switch(fname,
+               "mul"  = {
+                 f1 <- f1id
+                 ftype <- "mul"
+               },
+               "equ" = {
+                 f <- fequ
+                 ftype <- "equ"
+               },
+               {
+                 f <- get(fname)
+                 ftype <- "general"
+               })
+      } else if(is.function(f)) {
+        ftype <- if(isTRUE(all.equal(f, fmul))) "mul" else
+                 if(isTRUE(all.equal(f, fequ))) "equ" else "general"
+        if(ftype == "mul" && is.multitype(X))
+          stop(paste("Inappropriate choice of function f;",
+                     "point pattern is multitype;",
+                     "types cannot be multiplied."))
+      } else
+        stop("Argument f must be a function or the name of a function")
+    }
+    return(list(f=f, f1=f1, ftype=ftype))
+  }
+
+  check.testfun
+})
+
+
diff --git a/R/marks.R b/R/marks.R
new file mode 100755
index 0000000..634312b
--- /dev/null
+++ b/R/marks.R
@@ -0,0 +1,388 @@
+#
+# marks.R
+#
+#   $Revision: 1.44 $   $Date: 2016/04/25 02:34:40 $
+#
+# stuff for handling marks
+#
+#
+
+marks <- function(x, ...) {
+  UseMethod("marks")
+}
+
+marks.default <- function(x, ...) { NULL }
+
+# The 'dfok' switch is temporary
+# while we convert the code to accept data frames of marks
+
+marks.ppp <- function(x, ..., dfok=TRUE, drop=TRUE) {
+  ma <- x$marks
+  if((is.data.frame(ma) || is.matrix(ma))) {
+    if(!dfok)
+      stop("Sorry, not implemented when the marks are a data frame")
+    if(drop && ncol(ma) == 1)
+      ma <- ma[,1,drop=TRUE]
+  }
+  return(ma)
+}
+
+# ------------------------------------------------------------------
+
+"marks<-" <- function(x, ..., value) {
+  UseMethod("marks<-")
+}
+
+"marks<-.ppp" <- function(x, ..., dfok=TRUE, drop=TRUE, value) {
+  np <- npoints(x)
+  m <- value
+  switch(markformat(m),
+         none = {
+           return(unmark(x))
+         },
+         vector = {
+           # vector of marks
+           if(length(m) == 1) m <- rep.int(m, np)
+           else if(np == 0) m <- rep.int(m, 0) # ensures marked pattern obtained
+           else if(length(m) != np) stop("number of points != number of marks")
+           marx <- m
+         },
+         dataframe = {
+           if(!dfok)
+             stop("Sorry, data frames of marks are not yet implemented")
+           m <- as.data.frame(m)
+           # data frame of marks
+           if(ncol(m) == 0) {
+             # no mark variables
+             marx <- NULL
+           } else {
+             # marks to be attached
+             if(nrow(m) == np) {
+               marx <- m
+             } else {
+               # lengths do not match
+               if(nrow(m) == 1 || np == 0) {
+               # replicate data frame
+                 marx <- as.data.frame(lapply(as.list(m),
+                                              function(x, k) { rep.int(x, k) },
+                                              k=np))
+               } else
+               stop("number of rows of data frame != number of points")
+             }
+             # convert single-column data frame to vector?
+             if(drop && ncol(marx) == 1)
+               marx <- marx[,1,drop=TRUE]
+           }
+         },
+         hyperframe = 
+         stop("Hyperframes of marks are not supported in ppp objects; use ppx"),
+         stop("Format of marks is not understood")
+         )
+  # attach/overwrite marks
+  Y <- ppp(x$x,x$y,window=x$window,marks=marx, check=FALSE, drop=drop)
+  return(Y)
+}
+
+"%mark%" <- setmarks <- function(x,value) {
+  marks(x) <- value
+  return(x)
+}
+
+# -------------------------------------------------
+
+markformat <- function(x) {
+  UseMethod("markformat")
+}
+
+markformat.ppp <- function(x) {
+  mf <- x$markformat
+  if(is.null(mf)) 
+    mf <- markformat(marks(x))
+  return(mf)
+}
+
+markformat.default <- function(x) {
+  if(is.null(x)) return("none")
+  if(is.null(dim(x))) {
+    if(is.vector(x) || is.factor(x) || is.atomic(x)) return("vector")
+    if(inherits(x, "POSIXt") || inherits(x, "Date")) return("vector")
+  }
+  if(is.data.frame(x) || is.matrix(x)) return("dataframe")
+  if(is.hyperframe(x)) return("hyperframe")
+  if(inherits(x, c("solist", "anylist", "listof"))) return("list")
+  stop("Mark format not understood")
+}
+
+# ------------------------------------------------------------------
+
+"is.marked" <-
+function(X, ...) {
+  UseMethod("is.marked")
+}
+
+"is.marked.ppp" <-
+function(X, na.action="warn", ...) {
+  marx <- marks(X, ...)
+  if(is.null(marx))
+    return(FALSE)
+  if((length(marx) > 0) && anyNA(marx)) {
+    gripe <- paste("some mark values are NA in the point pattern",
+                   short.deparse(substitute(X)))
+    switch(na.action,
+           warn = warning(gripe, call.=FALSE),
+           fatal = stop(gripe, call.=FALSE),
+           ignore = {}
+           )
+  }
+  return(TRUE)
+}
+
+"is.marked.default" <-
+  function(...) { return(!is.null(marks(...))) }
+
+
+# ------------------------------------------------------------------
+
+is.multitype <- function(X, ...) {
+  UseMethod("is.multitype")
+}
+
+is.multitype.default <- function(X, ...) {
+  m <- marks(X)
+  if(is.null(m))
+    return(FALSE)
+  if(!is.null(dim(m))) {
+    # should have a single column
+    if(dim(m)[2] != 1)
+      return(FALSE)
+    m <- m[,1,drop=TRUE]
+  }
+  return(is.factor(m))
+}
+
+is.multitype.ppp <- function(X, na.action="warn", ...) {
+  marx <- marks(X, dfok=TRUE)
+  if(is.null(marx))
+    return(FALSE)
+  if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1)
+    return(FALSE)
+  if(!is.factor(marx))
+    return(FALSE)
+  if((length(marx) > 0) && anyNA(marx))
+    switch(na.action,
+           warn = {
+             warning(paste("some mark values are NA in the point pattern",
+                           short.deparse(substitute(X))))
+           },
+           fatal = {
+             return(FALSE)
+           },
+           ignore = {}
+           )
+  return(TRUE)
+}
+
+# ------------------------------------------------------------------
+
+unmark <- function(X) {
+  UseMethod("unmark")
+}
+
+unmark.ppp <- function(X) {
+  X$marks <- NULL
+  X$markformat <- "none"
+  return(X)
+}
+
+unmark.splitppp <- function(X) {
+  Y <- lapply(X, unmark.ppp)
+  class(Y) <- c("splitppp", class(Y))
+  return(Y)
+}
+
+##### utility functions for subsetting & combining marks #########
+
+
+marksubset <- function(x, index, format=NULL) {
+  if(is.null(format)) format <- markformat(x)
+  switch(format,
+         none={return(NULL)},
+         list=,
+         vector={return(x[index])},
+         hyperframe=,
+         dataframe={return(x[index,,drop=FALSE])},
+         stop("Internal error: unrecognised format of marks"))
+}
+
+"%msub%" <- marksubsetop <- function(x,i) { marksubset(x, i) }
+
+"%mrep%" <- markreplicateop <- function(x,n) { 
+  format <- markformat(x)
+  switch(format,
+         none={return(NULL)},
+         list=,
+         vector={ return(rep.int(x,n))},
+         dataframe={
+           return(as.data.frame(lapply(x, rep, times=n)))
+         },
+         hyperframe={
+           xcols <- as.list(x)
+           repxcols <- lapply(xcols, rep, times=n)
+           return(do.call(hyperframe, repxcols))
+         },
+         stop("Internal error: unrecognised format of marks"))
+}
+
+"%mapp%" <- markappendop <- function(x,y) { 
+  fx <- markformat(x)
+  fy <- markformat(y)
+  agree <- (fx == fy)
+  if(all(c(fx,fy) %in% c("dataframe", "hyperframe")))
+    agree <- agree && identical(names(x),names(y)) 
+  if(!agree)
+    stop("Attempted to concatenate marks that are not compatible")
+  switch(fx,
+         none   = { return(NULL) },
+         vector = {
+           if(is.factor(x) || is.factor(y))
+             return(cat.factor(x,y))
+           else return(c(x,y))
+         },
+         hyperframe=,
+         dataframe = { return(rbind(x,y)) },
+         list = {
+           z <- append(x,y)
+           z <- as.solist(z, demote=TRUE)
+           return(z)
+         },
+         stop("Internal error: unrecognised format of marks"))
+}
+
+markappend <- function(...) {
+  # combine marks from any number of patterns
+  marxlist <- list(...)
+  # check on compatibility of marks
+  mkfmt <- sapply(marxlist,markformat)
+  if(length(ufm <- unique(mkfmt))>1)
+    stop(paste("Cannot append marks of different formats:",
+               commasep(sQuote(ufm))),
+         call.=FALSE)
+  mkfmt <- mkfmt[1]
+  # combine the marks
+  switch(mkfmt,
+         none = {
+           return(NULL)
+         },
+         vector = {
+           marxlist <- lapply(marxlist,
+                              function(x){as.data.frame.vector(x,nm="v1")})
+           marx <- do.call(rbind, marxlist)[,1]
+           return(marx)
+         },
+         hyperframe =,
+         dataframe = {
+           # check compatibility of data frames
+           # (this is redundant but gives more helpful message)
+           nama <- lapply(marxlist, names)
+           dims <- lengths(nama)
+           if(length(unique(dims)) != 1)
+             stop("Data frames of marks have different column dimensions.")
+           samenames <- unlist(lapply(nama,
+                                      function(x,y) { identical(x,y) },
+                                      y=nama[[1]]))
+           if(!all(samenames))
+             stop("Data frames of marks have different names.\n")
+           marx <- do.call(rbind, marxlist)
+           return(marx)
+         },
+         list = {
+           marx <- do.call(c, marxlist)
+           marx <- as.solist(marx, demote=TRUE) 
+           return(marx)
+         })
+  stop("Unrecognised mark format")
+}
+
+markcbind <- function(...) {
+  # cbind several columns of marks
+  marxlist <- list(...)
+  mkfmt <- unlist(lapply(marxlist, markformat))
+  if(any(vacuous <- (mkfmt == "none"))) {
+    marxlist <- marxlist[!vacuous]
+    mkfmt    <- mkfmt[!vacuous]
+  }
+  if(any(isvec <- (mkfmt == "vector"))) {
+    ## convert vectors to data frames with invented names
+    for(i in which(isvec)) {
+      mi <- as.data.frame(marxlist[i])
+      colnames(mi) <- paste0("V", i)
+      marxlist[[i]] <- mi
+    }
+    mkfmt[isvec] <- "dataframe"
+  }
+  if(all(mkfmt == "dataframe")) {
+    ## result is a data frame
+    marx <- do.call(data.frame, marxlist)
+  } else {
+    ## result is a hyperframe
+    if(!all(ishyp <- (mkfmt == "hyperframe"))) 
+      marxlist[!ishyp] <- lapply(marxlist[!ishyp], as.hyperframe)
+    marx <- do.call(hyperframe, marxlist)
+  }
+  return(marx)
+}
+
+# extract only the columns of (passably) numeric data from a data frame
+numeric.columns <- function(M, logical=TRUE, others=c("discard", "na")) {
+  others <- match.arg(others)
+  M <- as.data.frame(M)
+  if(ncol(M) == 1)
+    colnames(M) <- NULL
+  process <- function(z, logi, other) {
+    if(is.numeric(z)) return(z)
+    if(logi && is.logical(z)) return(as.integer(z))
+    switch(other,
+           na=rep.int(NA_real_, length(z)),
+           discard=NULL,
+           NULL)
+  }
+  Mprocessed <- lapply(M, process, logi=logical, other=others)
+  isnul <- unlist(lapply(Mprocessed, is.null))
+  if(all(isnul)) {
+    # all columns have been removed
+    # return a data frame with no columns
+    return(as.data.frame(matrix(, nrow=nrow(M), ncol=0)))
+  }
+  Mout <- do.call(data.frame, Mprocessed[!isnul])
+  if(ncol(M) == 1 && ncol(Mout) == 1)
+    colnames(Mout) <- NULL
+  return(Mout)
+}
+
+coerce.marks.numeric <- function(X, warn=TRUE) {
+  marx <- marks(X)
+  if(is.null(dim(marx))) {
+    if(is.factor(marx)) {
+      if(warn) warning("Factor-valued marks were converted to integer codes",
+                       call.=FALSE)
+      marx <- as.integer(marx)
+      return(X %mark% marx)
+    }
+  } else {
+    marx <- as.data.frame(marx)
+    if(any(fax <- unlist(lapply(marx, is.factor)))) {
+      if(warn) {
+        nf <- sum(fax)
+        whinge <- paste("Factor-valued mark",
+                        ngettext(nf, "variable", "variables"),
+                        commasep(sQuote(colnames(marx)[fax])),
+                        ngettext(nf, "was", "were"),
+                        "converted to integer codes")
+        warning(whinge, call.=FALSE)
+      }
+      marx[fax] <- as.data.frame(lapply(marx[fax], as.integer))
+      return(X %mark% marx)
+    }
+  }
+  return(X)
+}
diff --git a/R/marktable.R b/R/marktable.R
new file mode 100755
index 0000000..bfddff6
--- /dev/null
+++ b/R/marktable.R
@@ -0,0 +1,65 @@
+#
+#	marktable.R
+#
+#	Tabulate mark frequencies in neighbourhood of each point 
+#	for multitype point patterns
+#
+#	$Revision: 1.7 $	$Date: 2015/03/25 03:43:35 $
+#
+#       Requested by Ian Robertson <igr at stanford.edu>
+
+
+"marktable" <- 
+function(X, R, N, exclude=TRUE, collapse=FALSE) 
+{
+  verifyclass(X, "ppp")
+  if(!is.marked(X, dfok=FALSE))
+    stop("point pattern has no marks")
+  gotR <- !missing(R) && !is.null(R)
+  gotN <- !missing(N) && !is.null(N)
+  if(gotN == gotR)
+    stop("Exactly one of the arguments N and R should be given")
+  stopifnot(is.logical(exclude) && length(exclude) == 1)
+
+  m <- marks(X)
+  if(!is.factor(m))
+    stop("marks must be a factor")
+
+  if(gotR) {
+    stopifnot(is.numeric(R) && length(R) == 1 && R > 0)
+    #' identify close pairs
+    p <- closepairs(X,R,what="indices")
+    pi <- p$i
+    pj <- p$j
+    if(!exclude) {
+      #' add identical pairs
+      n <- X$n
+      pi <- c(pi, 1:n)
+      pj <- c(pj, 1:n)
+    }
+  } else {
+    stopifnot(is.numeric(N) && length(N) == 1)
+    ii <- seq_len(npoints(X))
+    nn <- nnwhich(X, k=1:N)
+    if(N == 1) nn <- matrix(nn, ncol=1)
+    if(!exclude)
+      nn <- cbind(ii, nn)
+    pi <- as.vector(row(nn))
+    pj <- as.vector(nn)
+  }
+
+  #' tabulate
+  if(!collapse) {
+    ## table for each point
+    i <- factor(pi, levels=seq_len(npoints(X)))
+    mj <- m[pj]
+    mat <- table(point=i, mark=mj)
+  } else {
+    #' table by type
+    mi <- m[pi]
+    mj <- m[pj]
+    mat <- table(point=mi, neighbour=mj)
+  }
+  return(mat)
+}
+
diff --git a/R/matrixpower.R b/R/matrixpower.R
new file mode 100644
index 0000000..848b9e2
--- /dev/null
+++ b/R/matrixpower.R
@@ -0,0 +1,78 @@
+#'
+#'       matrixpower.R
+#'
+#'   $Revision: 1.1 $  $Date: 2016/11/13 01:50:51 $
+#'
+
+matrixsqrt <- function(x, complexOK=TRUE) {
+  ## matrix square root
+  if(length(dim(x)) != 2)
+    stop("x must be a matrix")
+  if(!is.matrix(x))
+    x <- as.matrix(x)
+  if(missing(complexOK) && is.complex(x)) complexOK <- TRUE
+  if(!complexOK) stopifnot(is.numeric(x)) else
+                 stopifnot(is.numeric(x) || is.complex(x))
+  e <- eigen(x)
+  values <- e$values
+  vectors <- e$vectors
+  if(any(values < 0)) {
+    if(complexOK) values <- as.complex(values) else
+    stop("matrix has negative eigenvalues: square root is complex",
+         call.=FALSE)
+  }
+  y <- vectors %*% diag(sqrt(values)) %*% t(vectors)
+  if(!is.null(dn <- dimnames(x)))
+    dimnames(y) <- rev(dn)
+  return(y)
+}
+
+matrixinvsqrt <- function(x, complexOK=TRUE) {
+  ## matrix inverse square root
+  if(length(dim(x)) != 2)
+    stop("x must be a matrix")
+  if(!is.matrix(x))
+    x <- as.matrix(x)
+  if(missing(complexOK) && is.complex(x)) complexOK <- TRUE
+  if(!complexOK) stopifnot(is.numeric(x)) else
+                 stopifnot(is.numeric(x) || is.complex(x))
+  e <- eigen(x)
+  values <- e$values
+  vectors <- e$vectors
+  if(any(values == 0))
+    stop("matrix is singular; cannot compute inverse square root", call.=FALSE)
+  if(any(values < 0)) {
+    if(complexOK) values <- as.complex(values) else
+    stop("matrix has negative eigenvalues: inverse square root is complex",
+         call.=FALSE)
+  }
+  y <- vectors %*% diag(1/sqrt(values)) %*% t(vectors)
+  if(!is.null(dn <- dimnames(x)))
+    dimnames(y) <- rev(dn)
+  return(y)
+}
+
+matrixpower <- function(x, power, complexOK=TRUE) {
+  check.1.real(power)
+  if(length(dim(x)) != 2)
+    stop("x must be a matrix")
+  if(!is.matrix(x))
+    x <- as.matrix(x)
+  if(missing(complexOK) && is.complex(x)) complexOK <- TRUE
+  if(!complexOK) stopifnot(is.numeric(x)) else
+                 stopifnot(is.numeric(x) || is.complex(x))
+  e <- eigen(x)
+  values <- e$values
+  vectors <- e$vectors
+  if(any(values == 0) && power < 0)
+    stop("matrix is singular; cannot compute negative power", call.=FALSE)
+  if(any(values < 0) && (power != ceiling(power))) {
+    if(complexOK) values <- as.complex(values) else
+    stop("matrix has negative eigenvalues: result is complex",
+         call.=FALSE)
+  }
+  y <- vectors %*% diag(values^power) %*% t(vectors)
+  if(!is.null(dn <- dimnames(x)))
+    dimnames(y) <- rev(dn)
+  return(y)
+}
diff --git a/R/measures.R b/R/measures.R
new file mode 100755
index 0000000..f021982
--- /dev/null
+++ b/R/measures.R
@@ -0,0 +1,676 @@
+#
+#   measures.R
+#
+#  signed/vector valued measures with atomic and diffuse components
+#
+#  $Revision: 1.66 $  $Date: 2017/07/10 10:02:34 $
+#
+msr <- function(qscheme, discrete, density, check=TRUE) {
+  if(!inherits(qscheme, "quad"))
+    stop("qscheme should be a quadrature scheme")
+  nquad <- n.quad(qscheme)
+  U <- union.quad(qscheme)
+  wt <- w.quad(qscheme)
+  Z <- is.data(qscheme)
+  ndata <- sum(Z)
+  # ensure conformable vectors/matrices
+  stopifnot(is.numeric(discrete) || is.logical(discrete))
+  stopifnot(is.numeric(density))
+  if(is.vector(discrete) && is.vector(density)) {
+    # handle constants
+    if(length(discrete) == 1)
+      discrete <- rep.int(discrete, ndata)
+    if(length(density) == 1)
+      density <- rep.int(density, nquad)
+    # check lengths
+    if(check) {
+      check.nvector(discrete, ndata, things="data points", naok=TRUE)
+      check.nvector(density,  nquad, things="quadrature points", naok=TRUE)
+    }
+    discretepad <- numeric(nquad)
+    discretepad[Z] <- discrete
+  } else {
+    if(length(discrete) == 1 && is.matrix(density)) {
+      # replicate constant 'discrete' component to matrix of correct size
+      discrete <- matrix(discrete, ndata, ncol(density))
+    } else if(length(density) == 1 && is.matrix(discrete)) {
+      # replicate constant 'density' to matrix of correct size
+      density <- matrix(density, nquad, ncol(discrete))
+    } else {
+      discrete <- as.matrix(discrete)
+      density <- as.matrix(density)
+    }
+    if(check) {
+      # check numbers of rows
+      check.nmatrix(discrete, ndata, things="data points",
+                    naok=TRUE, squarematrix=FALSE)
+      check.nmatrix(density,  nquad, things="quadrature points",
+                    naok=TRUE, squarematrix=FALSE)
+    }
+    nd <- ncol(discrete)
+    nc <- ncol(density)
+    if(nd != nc) {
+      if(nd == 1) {
+        # replicate columns of discrete component
+        discrete <- matrix(rep.int(discrete, nc), ndata, nc)
+        colnames(discrete) <- colnames(density)
+      } else if(nc == 1) {
+        # replicate columns of density component
+        density <- matrix(rep.int(density, nd), nquad, nd)
+        colnames(density) <- colnames(discrete)
+      } else stop(paste("Incompatible numbers of columns in",
+                        sQuote("discrete"), paren(nd), "and",
+                        sQuote("density"), paren(nc)))
+    }
+    discretepad <- matrix(0, nquad, max(nd, nc))
+    discretepad[Z, ] <- discrete
+    colnames(discretepad) <- colnames(density)
+  }
+
+  #
+  #
+  # Discretised measure (value of measure for each quadrature tile)
+  val <- discretepad + wt * density
+  if(is.matrix(density)) colnames(val) <- colnames(density)
+  #
+  out <- list(loc = U,
+              val = val,
+              atoms = Z,
+              discrete = discretepad,
+              density = density,
+              wt = wt)
+  class(out) <- "msr"
+  return(out)
+}
+
+# Translation table for usage of measures
+#
+#           e.g. res <- residuals(fit, ...)
+#
+#     OLD                               NEW           
+#     res[ ]                       res$val[ ]       with(res, "increment")
+#     attr(res, "atoms")           res$atoms        with(res, "is.atom")
+#     attr(res, "discrete")        res$discrete     with(res, "discrete")
+#     attr(res, "continuous")      res$density      with(res, "density")
+#     w.quad(quad.ppm(fit))        res$wt           with(res, "qweights")
+#     union.quad(quad.ppm(fit))    res$loc          with(res, "qlocations")
+# .................................................
+
+with.msr <- function(data, expr, ...) {
+  stopifnot(inherits(data, "msr"))
+  stuff <- list(increment  = data$val,
+                is.atom    = data$atoms,
+                discrete   = data$discrete,
+                density    = data$density,
+                continuous = data$density * data$wt,
+                qweights   = data$wt,
+                qlocations = data$loc,
+                atoms      = data$loc[data$atoms],
+                atommass   = data$wt[data$atoms])
+  y <- eval(substitute(expr), envir=stuff, enclos=parent.frame())
+  if(is.character(y) && length(y) == 1 && y %in% names(stuff))
+    y <- stuff[[y]]
+  return(y)
+}
+
+print.msr <- function(x, ...) {
+  xloc <- x$loc
+  n <- npoints(xloc)
+  d <- ncol(as.matrix(x$val))
+  splat(paste0(if(d == 1) "Scalar" else paste0(d, "-dimensional vector"),
+               "-valued measure"))
+  if(d > 1 && !is.null(cn <- colnames(x$val)) && waxlyrical("space"))
+    splat("vector components:", commasep(sQuote(cn)))
+  if(is.marked(xloc)) {
+    splat("\tDefined on 2-dimensional space x marks")
+    if(is.multitype(xloc))
+      exhibitStringList("\tPossible marks: ", levels(marks(xloc)))
+  } 
+  if(waxlyrical("gory")) {
+    splat("Approximated by", n, "quadrature points")
+    print(as.owin(xloc))
+    splat(sum(x$atoms), "atoms")
+  }
+  if(waxlyrical("extras")) {
+    splat("Total mass:")
+    if(d == 1) {
+      splat("discrete =", signif(sum(with(x, "discrete")), 5),
+            "  continuous =", signif(sum(with(x, "continuous")), 5),
+            "  total =", signif(sum(with(x, "increment")), 5))
+    } else {
+      if(is.null(cn)) cn <- paste("component", 1:d)
+      for(j in 1:d) {
+        splat(paste0(cn[j], ":\t"),
+              "discrete =", signif(sum(with(x, "discrete")[,j]), 5),
+              "  continuous =", signif(sum(with(x, "continuous")[,j]), 5),
+              "  total =", signif(sum(with(x, "increment")[,j]), 5))
+      }
+    }
+  }
+  return(invisible(NULL))
+}
+
+is.multitype.msr <- function(X, ...) {
+  is.multitype(X$loc, ...)
+}
+is.marked.msr <- function(X, ...) {
+  is.marked(X$loc, ...)
+}
+
+split.msr <- function(x, f, drop=FALSE, ...) {
+  xloc <- x$loc
+  ## determine split using rules for split.ppp
+  locsplit <- if(missing(f))
+    split(xloc, drop=drop) else split(xloc, f, drop=drop)
+  ## extract grouping factor 
+  g <- attr(locsplit, "fgroup")
+  ## split contributions to measure
+  atomsplit <- split(x$atoms, g, drop=drop) # hyuk
+  wtsplit <- split(x$wt, g, drop=drop)
+  if(ncol(x) == 1) {
+    ## scalar measure
+    valsplit  <- split(x$val, g, drop=drop)
+    discsplit <- split(x$discrete, g, drop=drop)
+    denssplit <- split(x$density, g, drop=drop)
+  } else {
+    ## vector measure
+    valsplit  <- lapply(split(as.data.frame(x$val), g, drop=drop),
+                        as.matrix)
+    discsplit <- lapply(split(as.data.frame(x$discrete), g, drop=drop),
+                        as.matrix)
+    denssplit <- lapply(split(as.data.frame(x$density), g, drop=drop),
+                        as.matrix)
+  }
+  ## form the component measures
+  result <- mapply(list,
+                   loc=locsplit,
+                   val=valsplit,
+                   atoms=atomsplit,
+                   discrete=discsplit,
+                   density=denssplit,
+                   wt=wtsplit,
+                   SIMPLIFY=FALSE)
+  names(result) <- names(locsplit)
+  result <- lapply(result, "class<-", value="msr")
+  if(drop && any(isnul <- (sapply(locsplit, npoints) == 0)))
+    result[isnul] <- NULL
+  result <- as.solist(result)
+  return(result)
+}
+
+integral.msr <- function(f, domain=NULL, ...) {
+  stopifnot(inherits(f, "msr"))
+  if(!is.null(domain)) {
+    if (is.tess(domain)) 
+      return(sapply(tiles(domain), integral.msr, f = f))
+    f <- f[domain]
+  }
+  y <- with(f, "increment")
+  if(is.matrix(y)) apply(y, 2, sum) else sum(y)
+}
+
+augment.msr <- function(x, ..., sigma) {
+  ## add a pixel image of the smoothed density component
+  stopifnot(inherits(x, "msr"))
+  if(!is.null(attr(x, "smoothdensity"))) return(x)
+  d <- ncol(as.matrix(x$val))
+  xloc <- x$loc
+  W <- as.owin(xloc)
+  if(missing(sigma)) sigma <- maxnndist(xloc, positive=TRUE)
+  if(is.multitype(xloc)) {
+    ## multitype case - split by type, smooth, sum
+    y <- lapply(split(x), augment.msr, sigma=sigma, ...)
+    z <- lapply(y, attr, which="smoothdensity")
+    if((nc <- ncol(x)) == 1) {
+      ## scalar valued
+      smo <- Reduce("+", z)
+    } else {
+      ## vector valued
+      smo <- vector(mode="list", length=nc)
+      for(j in 1:nc) 
+        smo[[j]] <- Reduce("+", lapply(z, "[[", i=j))
+      smo <- as.solist(smo)
+    }
+    attr(x, "smoothdensity") <- smo
+    return(x)
+  }   
+  ## smooth density unless constant
+  xdensity <- as.matrix(x$density)
+  ra <- apply(xdensity, 2, range)
+  varble <- apply(as.matrix(ra), 2, diff) > sqrt(.Machine$double.eps)
+  ##
+  if(d == 1) {
+    smo <- if(!varble) as.im(mean(xdensity), W=W) else
+           do.call(Smooth,
+                   resolve.defaults(list(X=xloc %mark% xdensity),
+                                    list(...),
+                                    list(sigma=sigma)))
+  } else {
+    smo <- vector(mode="list", length=d)
+    names(smo) <- colnames(x)
+    if(any(varble)) 
+      smo[varble] <-
+        do.call(Smooth,
+                resolve.defaults(list(X=xloc %mark% xdensity[,varble, drop=FALSE]),
+                                 list(...),
+                                 list(sigma=sigma)))
+    if(any(!varble)) 
+      smo[!varble] <- lapply(apply(xdensity[, !varble, drop=FALSE], 2, mean),
+                             as.im, W=W)
+    smo <- as.solist(smo)
+  }
+  attr(x, "smoothdensity") <- smo
+  return(x)
+}
+
+plot.msr <- function(x, ..., add=FALSE,
+                     how=c("image", "contour", "imagecontour"),
+                     main=NULL, 
+                     do.plot=TRUE,
+                     multiplot=TRUE,
+                     massthresh=0) {
+  if(is.null(main)) 
+    main <- short.deparse(substitute(x))
+  how <- match.arg(how)
+  
+  if(!multiplot) {
+    ## compress everything to a single panel
+    x$loc <- unmark(x$loc)
+    if(is.matrix(x$val))      x$val <- rowSums(x$val)
+    if(is.matrix(x$discrete)) x$discrete <- rowSums(x$discrete)
+    if(is.matrix(x$density))  x$density <- rowSums(x$density)
+    if(!is.null(smo <- attr(x, "smoothdensity")) && inherits(smo, "solist"))
+      attr(x, "smoothdensity") <- Reduce("+", smo)
+  }
+
+  d <- dim(x)[2]
+  k <- if(is.multitype(x)) length(levels(marks(x$loc))) else 1
+
+  ## multiple plot panels may be generated
+  if(k == 1 && d == 1) {
+    ## single plot
+    y <- solist(x)
+  } else if(k > 1 && d == 1) {
+    ## multitype
+    y <- split(x)
+  } else if(k == 1 && d > 1) {
+    ## vector-valued
+    y <- unstack(x)
+  } else if(k > 1 && d > 1) {
+    ## both multitype and vector-valued
+    y <- split(x)
+    typenames <- names(y)
+    vecnames <- colnames(x$val)
+    y <- as.solist(Reduce(append, lapply(y, unstack)))
+    names(y) <- as.vector(t(outer(typenames, vecnames, paste, sep=".")))
+  } 
+  # ensure image of density is present
+  y <- lapply(y, augment.msr)
+
+  if(length(y) > 1) {
+    ## plot as an array of panels
+    userarg <- list(...)
+    rowcol <- list(nrows=k, ncols=d)
+    if(any(c("nrows", "ncols") %in% names(userarg))) rowcol <- list()
+    result <- do.call(plot.solist, resolve.defaults(list(y),
+                                                    userarg,
+                                                    rowcol,
+                                                    list(how=how,
+                                                         main=main,
+                                                         equal.scales=TRUE)))
+    return(invisible(result))
+  }
+  ## scalar measure
+  x <- y[[1]]
+  ## get atoms
+  xatomic <- (x$loc %mark% x$discrete)[x$atoms]
+  if(length(massthresh) && all(is.finite(massthresh))) {
+    ## ignore atoms with absolute mass <= massthresh
+    check.1.real(massthresh)
+    xatomic <- xatomic[abs(marks(xatomic)) > massthresh]
+  }
+  xtra.im <- graphicsPars("image")
+  xtra.pp <- setdiff(graphicsPars("ppp"), c("box", "col"))
+  xtra.ow <- graphicsPars("owin")
+  smo <- attr(x, "smoothdensity")
+  ##
+  do.image <-  how %in% c("image", "imagecontour")
+  do.contour <-  how %in% c("contour", "imagecontour")
+  ## allocate space for plot and legend using do.plot=FALSE mechanism
+  pdata <- do.call.matched(plot.ppp,
+                           resolve.defaults(list(x=xatomic,
+                                                 do.plot=FALSE,
+                                                 main=main),
+                                            list(...),
+                                            list(show.all=TRUE)),
+                           extrargs=xtra.pp)
+  result <- pdata
+  bb <- attr(pdata, "bbox")
+  if(do.image) {
+    idata <- do.call.matched(plot.im,
+                             resolve.defaults(list(x=smo,
+                                                   main=main,
+                                                   do.plot=FALSE),
+                                              list(...)),
+                             extrargs=xtra.im)
+    result <- idata
+    bb <- boundingbox(bb, attr(idata, "bbox"))
+  }
+  ##
+  attr(result, "bbox") <- bb
+  ##
+  if(do.plot) {
+    if(!add) {
+      blankmain <- prepareTitle(main)$blank
+      ## initialise plot
+      do.call.matched(plot.owin,
+                      resolve.defaults(list(x=bb, type="n", main=blankmain),
+                                       list(...)),
+                      extrargs=xtra.ow)
+    }
+    ## display density
+    if(do.image) 
+      do.call.matched(plot.im,
+                      resolve.defaults(list(x=smo, add=TRUE),
+                                       list(...),
+                                       list(main=main, show.all=TRUE)),
+                      extrargs=xtra.im)
+    if(do.contour) 
+      do.call.matched(contour.im,
+                      resolve.defaults(list(x=smo, add=TRUE),
+                                       list(...),
+                                       list(main=main,
+                                            axes=FALSE, show.all=!do.image)),
+                      extrargs=c("zlim", "labels", "labcex",
+                        ## DO NOT ALLOW 'col' 
+                        "drawlabels", "method", "vfont", "lty", "lwd"))
+    ## display atoms
+    do.call.matched(plot.ppp,
+                    resolve.defaults(list(x=xatomic, add=TRUE, main=""),
+                                     list(...),
+                                     list(show.all=TRUE)),
+                    extrargs=xtra.pp)
+  }
+  return(invisible(result))
+}
+
+"[.msr" <- function(x, i, j, ...) {
+  valu  <- as.matrix(x$val)
+  disc  <- as.matrix(x$discrete)
+  dens  <- as.matrix(x$density)
+  wt    <- x$wt
+  atoms <- x$atoms
+  #
+  if(!missing(j)) {
+    valu <- valu[, j]
+    disc <- disc[, j]
+    dens <- dens[, j]
+  }
+  loc <- x$loc
+  if(!missing(i)) {
+    # use [.ppp to identify which points are retained
+    locn  <- loc %mark% seq_len(npoints(loc))
+    loci  <- locn[i, clip=TRUE]
+    loc   <- unmark(loci)
+    id    <- marks(loci)
+    # extract
+    valu  <- valu[id, ]
+    disc  <- disc[id, ]
+    dens  <- dens[id, ]
+    wt    <- wt[id]
+    atoms <- atoms[id]
+  }
+  out <- list(loc=loc,
+              val=valu,
+              atoms=atoms,
+              discrete=disc,
+              density=dens,
+              wt=wt)
+  class(out) <- "msr"
+  return(out)    
+}
+
+dim.msr <- function(x) { dim(as.matrix(x$val)) }
+
+dimnames.msr <- function(x) { list(NULL, colnames(x$val)) }
+
+smooth.msr <- function(X, ...) {
+  .Deprecated("Smooth.msr", package="spatstat",
+     msg="smooth.msr is deprecated: use the generic Smooth with a capital S")
+  Smooth(X, ...)
+}
+
+Smooth.msr <- function(X, ..., drop=TRUE) {
+  verifyclass(X, "msr")
+  loc <- X$loc
+  val <- X$val
+  result <- density(loc, weights=val, ...)
+  if(!drop && is.im(result))
+    result <- solist(result)
+  return(result)
+}
+
+as.owin.msr <- function(W, ..., fatal=TRUE) {
+  as.owin(W$loc, ..., fatal=fatal)
+}
+
+domain.msr <- Window.msr <- function(X, ...) { as.owin(X) } 
+
+shift.msr <- function(X,  ...) {
+  X$loc <- Xloc <- shift(X$loc, ...)
+  if(!is.null(smo <- attr(X, "smoothdensity")))
+    attr(X, "smoothdensity") <- shift(smo, getlastshift(Xloc))
+  putlastshift(X, getlastshift(Xloc))
+}
+
+as.layered.msr <- local({
+
+  as.layered.msr <- function(X) {
+    nc <- ncol(X)
+    if(nc == 0) return(layered())
+    if(nc == 1) return(layered(X))
+    Y <- lapply(seq_len(nc), pickcol, x=X)
+    names(Y) <- colnames(X)
+    return(layered(LayerList=Y))
+  }
+
+  pickcol <- function(j,x) x[,j]
+  
+  as.layered.msr
+})
+
+
+scalardilate.msr <- function(X, f, ...) {
+  X$loc <- Xloc <- scalardilate(X$loc, f, ...)
+  putlastshift(X, getlastshift(Xloc))
+}
+
+Ops.msr <- function(e1,e2=NULL){
+  vn <- c("val", "discrete", "density")
+  if(nargs() == 1L) {
+    #' unary operator
+    if(!is.element(.Generic, c("+", "-")))
+      stop(paste("Unary operation",
+                 sQuote(paste0(.Generic, "A")),
+                 "is undefined for a measure A."),
+           call.=FALSE)
+    e1 <- unclass(e1)
+    e1[vn] <- lapply(e1[vn], .Generic)
+    class(e1) <- "msr"
+    return(e1)
+  } else {
+    #' binary operator
+    m1 <- inherits(e1, "msr")
+    m2 <- inherits(e2, "msr")
+    if(m1 && m2) {
+      if(!is.element(.Generic, c("+", "-")))
+        stop(paste("Operation", sQuote(paste0("A", .Generic, "B")),
+                   "is undefined for measures A, B"),
+             call.=FALSE)
+      k1 <- dim(e1)[2]
+      k2 <- dim(e2)[2]
+      if(k1 != k2) 
+        stop(paste("Operation", sQuote(paste0("A", .Generic, "B")),
+                   "is undefined because A, B have incompatible dimensions:",
+                   "A is", ngettext(k1, "scalar", paste0(k1, "-vector")),
+                   ", B is", ngettext(k2, "scalar", paste0(k2, "-vector"))),
+             call.=FALSE)
+      if(!identical(e1$loc, e2$loc)) {
+        haha <- harmonise(e1, e2)
+        e1 <- haha[[1L]]
+        e2 <- haha[[2L]]
+      }
+      e1 <- unclass(e1)
+      e2 <- unclass(e2)
+      e1[vn] <- mapply(.Generic, e1[vn], e2[vn],
+                       SIMPLIFY=FALSE)
+      class(e1) <- "msr"
+      return(e1)
+    } else if(m1 && is.numeric(e2)) {
+      if(!is.element(.Generic, c("/", "*")))
+        stop(paste("Operation",
+                   sQuote(paste0("A", .Generic, "z")),
+                   "is undefined for a measure A and numeric z."),
+             call.=FALSE)
+      e1 <- unclass(e1)
+      e1[vn] <- lapply(e1[vn], .Generic, e2=e2)
+      class(e1) <- "msr"
+      return(e1)
+    } else if(m2 && is.numeric(e1)) {
+      if(.Generic != "*") 
+        stop(paste("Operation",
+                   sQuote(paste0("z", .Generic, "A")),
+                   "is undefined for a measure A and numeric z."),
+             call.=FALSE)
+      e2 <- unclass(e2)
+      e2[vn] <- lapply(e2[vn], .Generic, e1=e1)
+      class(e2) <- "msr"
+      return(e2)
+    }
+    stop(paste("Operation", sQuote(paste0("e1", .Generic, "e2")),
+               "is undefined for this kind of data"),
+         call.=FALSE)
+  }
+}
+
+measurePositive <- function(x) {
+  if(!inherits(x, "msr"))
+    stop("x must be a measure", call.=FALSE)
+  y <- x
+  y$discrete <- pmax(0, x$discrete)
+  y$density  <- pmax(0, x$density)
+  y$val      <- y$discrete + y$wt * y$density
+  return(y)
+}
+
+measureNegative <- function(x) {
+  if(!inherits(x, "msr"))
+    stop("x must be a measure", call.=FALSE)
+  y <- x
+  y$discrete <- -pmin(0, x$discrete)
+  y$density  <- -pmin(0, x$density)
+  y$val      <- y$discrete + y$wt * y$density
+  return(y)
+}
+
+measureVariation <- function(x) {
+  if(!inherits(x, "msr"))
+    stop("x must be a measure", call.=FALSE)
+  y <- x
+  y$discrete <- abs(x$discrete)
+  y$density  <- abs(x$density)
+  y$val      <- y$discrete + y$wt * y$density
+  return(y)
+}
+
+totalVariation <- function(x) integral(measureVariation(x))
+  
+harmonise.msr <- local({
+
+  harmonise.msr <- function(...) {
+    argz <- list(...)
+    n <- length(argz)
+    if(n == 0) return(argz)
+    ismeasure <- sapply(argz, inherits, what="msr")
+    if(!any(ismeasure))
+      stop("No measures supplied")
+    if(!all(ismeasure))
+    stop("All arguments should be measures (objects of class msr)")
+    if(n < 2) return(argz)
+    result <- vector(mode="list", length=n)
+    ## extract entries
+    loclist <- lapply(argz, getElement, name="loc")
+    atomlist <- lapply(argz, getElement, name="atoms")
+    masslist <- lapply(argz, getElement, name="discrete")
+    denslist <- lapply(argz, getElement, name="density")
+    ## check for compatible dimensions of measure values
+    dimen <- unique(sapply(argz, ncol))
+    if(length(dimen) > 1)
+      stop("Measures have different dimensions:", commasep(sort(dimen)))
+    ## check for marked points
+    ismarked <- sapply(loclist, is.marked)
+    if(any(ismarked) && !all(ismarked))
+      stop("Some, but not all, quadrature schemes are marked")
+    ismarked <- all(ismarked)
+    ## union of all quadrature points in all measures
+    Uloc <- do.call(superimpose, append(unname(loclist), list(check=FALSE)))
+    Uloc <- unique(Uloc)
+    nU <- npoints(Uloc)
+    ## match each quadrature set to the union
+    ## and find nearest data point to each point in the union
+    if(!ismarked) {
+      matchlist <- lapply(loclist, nncross, Y=Uloc, what="which")
+      nearlist  <- lapply(loclist, ssorcnn, xx=Uloc, what="which")
+    } else {
+      stop("Not yet implemented for marked quadrature schemes")
+    }
+    ## nearest neighbour interpolation of density values of each argument
+    ## onto the common quadrature set
+    Udenslist <- mapply(extract, x=denslist, i=nearlist,
+                        SIMPLIFY=FALSE)
+    ## initialise other bits
+    noatoms  <- logical(nU) 
+    zeromass <- if(dimen == 1) numeric(nU) else matrix(0, nU, dimen)
+    Uatomlist <- rep(list(noatoms), n)  
+    Umasslist <- rep(list(zeromass), n)
+    ## assign atoms in each argument
+    Uatomlist <- mapply(subsetgets, x=Uatomlist, i=matchlist, value=atomlist,
+                        SIMPLIFY=FALSE)
+    Umasslist <- mapply(subsetgets, x=Umasslist, i=matchlist, value=masslist,
+                        SIMPLIFY=FALSE)
+    ## union of atoms
+    isatom <- Reduce("|", Uatomlist)
+    ## masses at atoms
+    Umasslist <- lapply(Umasslist, extract, i=isatom)
+    ## make common quadrature scheme
+    UQ <- quadscheme(Uloc[isatom], Uloc[!isatom])
+    ## reorder density data correspondingly
+    neworder <- c(which(isatom), which(!isatom))
+    Udenslist <- lapply(Udenslist, extract, i=neworder)
+    ## make new measures
+    result <- mapply(msr,
+                     MoreArgs=list(qscheme=UQ),
+                     discrete=Umasslist,
+                     density=Udenslist,
+                     SIMPLIFY=FALSE)
+    names(result) <- names(argz)
+    class(result) <- unique(c("solist", class(result)))
+    return(result)
+  }
+
+  ssorcnn <- function(xx, yy, what) nncross(xx, yy, what=what)
+  
+  extract <- function(x, i) {
+    if(is.matrix(x)) x[i, , drop=FALSE] else x[i]
+  }
+  subsetgets <- function(x, i, value) {
+    if(is.matrix(x)) {
+      x[i, ] <- value
+    } else {
+      x[i] <- value
+    }
+    return(x)
+  }
+
+  harmonise.msr
+})
diff --git a/R/mincontrast.R b/R/mincontrast.R
new file mode 100755
index 0000000..c7b25f4
--- /dev/null
+++ b/R/mincontrast.R
@@ -0,0 +1,909 @@
+#
+#  mincontrast.R
+#
+#  Functions for estimation by minimum contrast
+#
+
+##################  base ################################
+
+mincontrast <- local({
+
+  ## objective function (in a format that is re-usable by other code)
+  contrast.objective <- function(par, objargs, ...) {
+    with(objargs, {
+      theo <- theoretical(par=par, rvals, ...)
+      if(!is.vector(theo) || !is.numeric(theo))
+        stop("theoretical function did not return a numeric vector")
+      if(length(theo) != nrvals)
+        stop("theoretical function did not return the correct number of values")
+      if(!is.null(adjustment)) {
+        theo <- adjustment$fun(theo=theo, par=par, auxdata=adjustment$auxdata)
+        if(!is.vector(theo) || !is.numeric(theo))
+	  stop("adjustment did not return a numeric vector")
+        if(length(theo) != nrvals)
+          stop("adjustment did not return the correct number of values")
+      }	
+      discrep <- (abs(theo^qq - obsq))^pp
+      value <- mean(discrep)
+      value <- min(value, .Machine$double.xmax)
+      return(value)
+    })
+  }
+
+  mincontrast <- function(observed, theoretical, startpar,
+                          ...,
+                          ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL),
+                          fvlab=list(label=NULL, desc="minimum contrast fit"),
+                          explain=list(dataname=NULL,
+                            modelname=NULL, fname=NULL),
+			  adjustment=NULL) {
+    verifyclass(observed, "fv")
+
+    stopifnot(is.function(theoretical))
+    if(!any("par" %in% names(formals(theoretical))))
+      stop(paste("Theoretical function does not include an argument called",
+                 sQuote("par")))
+
+    ## enforce defaults
+    ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL))
+    fvlab <- resolve.defaults(fvlab,
+                              list(label=NULL, desc="minimum contrast fit"))
+    explain <- resolve.defaults(explain,
+                                list(dataname=NULL, modelname=NULL, fname=NULL))
+  
+    ## extract vector of r values
+    argu <- fvnames(observed, ".x")
+    rvals <- observed[[argu]]
+    
+    ## determine range of r values
+    rmin <- ctrl$rmin
+    rmax <- ctrl$rmax
+    if(!is.null(rmin) && !is.null(rmax)) 
+      stopifnot(rmin < rmax && rmin >= 0)
+    else {
+      alim <- attr(observed, "alim") %orifnull% range(rvals)
+      if(is.null(rmax)) rmax <- alim[2]
+      if(is.null(rmin)) {
+        rmin <- alim[1]
+        if(rmin == 0 && identical(explain$fname,"g"))
+          rmin <- rmax/1e3 # avoid artefacts at zero in pcf
+      }
+    }
+    ## extract vector of observed values of statistic
+    valu <- fvnames(observed, ".y")
+    obs <- observed[[valu]]
+    ## restrict to [rmin, rmax]
+    if(max(rvals) < rmax)
+      stop(paste("rmax=", signif(rmax,4),
+                 "exceeds the range of available data",
+                 "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]"))
+    sub <- (rvals >= rmin) & (rvals <= rmax)
+    rvals <- rvals[sub]
+    obs <- obs[sub]
+    ## sanity clause
+    if(!all(ok <- is.finite(obs))) {
+      whinge <- paste("Some values of the empirical function",
+                      sQuote(explain$fname),
+                      "were infinite or NA.")
+      iMAX <- max(which(ok))
+      iMIN <- min(which(!ok)) + 1
+      if(iMAX > iMIN && all(ok[iMIN:iMAX])) {
+        rmin <- rvals[iMIN]
+        rmax <- rvals[iMAX]
+        obs   <- obs[iMIN:iMAX]
+        rvals <- rvals[iMIN:iMAX]
+        sub[sub] <- ok
+        warning(paste(whinge,
+                      "Range of r values was reset to",
+                      prange(c(rmin, rmax))),
+                call.=FALSE)
+      } else stop(paste(whinge, "Please choose a narrower range [rmin, rmax]"),
+                  call.=FALSE)
+    }
+    ## pack data into a list
+    objargs <- list(theoretical = theoretical,
+                    rvals       = rvals,
+                    nrvals      = length(rvals),
+                    obsq        = obs^(ctrl$q),   ## for efficiency
+                    qq          = ctrl$q,
+                    pp          = ctrl$p,
+                    rmin        = rmin,
+                    rmax        = rmax,
+		    adjustment  = adjustment)
+    ## go
+    minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ...)
+    ## if convergence failed, issue a warning 
+    signalStatus(optimStatus(minimum), errors.only=TRUE)
+    ## evaluate the fitted theoretical curve
+    fittheo <- theoretical(minimum$par, rvals, ...)
+    ## pack it up as an `fv' object
+    label <- fvlab$label %orifnull% "%s[fit](r)"
+    desc  <- fvlab$desc
+    fitfv <- bind.fv(observed[sub, ],
+                     data.frame(fit=fittheo),
+                     label, desc)
+    if(!is.null(adjustment)) {
+      adjtheo <- adjustment$fun(theo=fittheo,
+      	                        par=minimum$par,
+				auxdata=adjustment$auxdata)
+      fitfv <- bind.fv(fitfv,
+                       data.frame(adjfit=adjtheo),
+		       "%s[adjfit](r)",
+		       paste("adjusted", desc))
+    }				
+    result <- list(par      = minimum$par,
+                   fit      = fitfv,
+                   opt      = minimum,
+                   ctrl     = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax),
+                   info     = explain,
+                   startpar = startpar,
+                   objfun   = contrast.objective,
+                   objargs  = objargs,
+                   dotargs  = list(...))
+    class(result) <- c("minconfit", class(result))
+    return(result)
+  }
+
+  mincontrast
+})
+
+print.minconfit <- function(x, ...) {
+  terselevel <- spatstat.options('terse')
+  digits <- getOption('digits')
+  ## explanatory
+  cat(paste("Minimum contrast fit ",
+            "(",
+            "object of class ",
+            dQuote("minconfit"),
+            ")",
+            "\n", sep=""))
+  mo <- x$info$modelname
+  fu <- x$info$fname
+  da <- x$info$dataname
+  cm <- x$covmodel
+  if(!is.null(mo))
+    cat("Model:", mo, fill=TRUE)
+  if(!is.null(cm)) {
+    ## Covariance/kernel model and nuisance parameters 
+    cat("\t", cm$type, "model:", cm$model, fill=TRUE)
+    margs <- cm$margs
+    if(!is.null(margs)) {
+      nama <- names(margs)
+      tags <- ifelse(nzchar(nama), paste(nama, "="), "")
+      tagvalue <- paste(tags, margs)
+      splat("\t", cm$type, "parameters:",
+            paste(tagvalue, collapse=", "))
+    }
+  }
+  if(!is.null(fu) && !is.null(da))
+    splat("Fitted by matching theoretical", fu, "function to", da)
+  else {
+    if(!is.null(fu))
+      splat(" based on", fu)
+    if(!is.null(da))
+      splat(" fitted to", da)
+  }
+
+  if(waxlyrical('space', terselevel))
+      cat("\n")
+  ## Values
+  splat("Internal parameters fitted by minimum contrast ($par):")
+  print(x$par, ...)
+  if(waxlyrical('space', terselevel))
+      cat("\n")
+  
+  ## Handling new parameters
+  isPCP <- x$isPCP %orifnull% x$internal$model!="lgcp"
+  cpar <- x$clustpar
+  if (!is.null(cpar)) {
+    splat("Fitted",
+          if(isPCP) "cluster" else "covariance",
+          "parameters:")
+    print(cpar, digits=digits)
+  } else{
+    ## Old modelpar field if necessary
+    mp <- x$modelpar
+    if(!is.null(mp)) {
+      splat("Derived parameters of",
+            if(!is.null(mo)) mo else "model",
+            "($modelpar):")
+      print(mp)
+    }
+  }
+  if(!is.null(mu <- x$mu)) {
+    if(isPCP) {
+      splat("Mean cluster size: ",
+            if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]")
+    } else {
+      splat("Fitted mean of log of random intensity:",
+            if(!is.im(mu)) signif(mu, digits) else "[pixel image]")
+    }
+  }
+  if(waxlyrical('space', terselevel))
+      cat("\n")
+  ## Diagnostics
+  printStatus(optimStatus(x$opt))
+  ## Starting values
+  if(waxlyrical('gory', terselevel)){
+      cat("\n")
+      splat("Starting values of parameters:")
+      print(x$startpar)
+      ## Algorithm parameters
+      ct <- x$ctrl
+      splat("Domain of integration:",
+            "[",
+            signif(ct$rmin,4),
+            ",",
+            signif(ct$rmax,4),
+            "]")
+      splat("Exponents:",
+            "p=", paste(signif(ct$p, 3), ",",  sep=""),
+            "q=", signif(ct$q,3))
+  }
+  invisible(NULL)
+}
+              
+
+plot.minconfit <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  do.call(plot.fv,
+          resolve.defaults(list(x$fit),
+                           list(...),
+                           list(main=xname)))
+}
+
+unitname.minconfit <- function(x) {
+  unitname(x$fit)
+}
+
+"unitname<-.minconfit" <- function(x, value) {
+  unitname(x$fit) <- value
+  return(x)
+}
+
+as.fv.minconfit <- function(x) x$fit
+
+######  convergence status of 'optim' object
+
+optimStatus <- function(x, call=NULL) {
+  cgce <- x$convergence
+  neval <- x$counts[["function"]]
+  switch(paste(cgce),
+         "0" = {
+           simpleMessage(
+                         paste("Converged successfully after", 
+                               neval, "function evaluations"),
+                         call)
+         },
+         "1" = simpleWarning(
+           paste("Iteration limit maxit was reached after",
+                 neval, "function evaluations"),
+           call),
+         "10" = simpleWarning("Nelder-Mead simplex was degenerate", call),
+         "51"= {
+           simpleWarning(
+                         paste("Warning message from L-BGFS-B method:",
+                               sQuote(x$message)),
+                         call)
+         },
+         "52"={
+           simpleError(
+                         paste("Error message from L-BGFS-B method:",
+                               sQuote(x$message)),
+                         call)
+         },
+         simpleWarning(paste("Unrecognised error code", cgce), call)
+         )
+}
+
+signalStatus <- function(x, errors.only=FALSE) {
+  stopifnot(inherits(x, "condition"))
+  if(inherits(x, "error")) stop(x)
+  if(inherits(x, "warning")) warning(x) 
+  if(inherits(x, "message") && !errors.only) message(x)
+  return(invisible(NULL))
+}
+
+printStatus <- function(x, errors.only=FALSE) {
+  prefix <-
+    if(inherits(x, "error")) "error: " else 
+    if(inherits(x, "warning")) "warning: " else NULL
+  if(!is.null(prefix) || !errors.only)
+    cat(paste(prefix, conditionMessage(x), "\n", sep=""))
+  return(invisible(NULL))
+}
+
+accumulateStatus <- function(x, stats=NULL) {
+  if(is.null(stats))
+    stats <- list(values=list(), frequencies=integer(0))
+  if(!inherits(x, c("error", "warning", "message")))
+    return(stats)
+  with(stats,
+       {
+         same <- unlist(lapply(values, identical, y=x))
+         if(any(same)) {
+           i <- min(which(same))
+           frequencies[i] <- frequencies[i] + 1
+         } else {
+           values <- append(values, list(x))
+           frequencies <- c(frequencies, 1)
+         }
+       })
+  stats <- list(values=values, frequencies=frequencies)
+  return(stats)
+}
+
+printStatusList <- function(stats) {
+  with(stats,
+       {
+         for(i in seq_along(values)) {
+           printStatus(values[i])
+           cat(paste("\t", paren(paste(frequencies[i], "times")), "\n"))
+         }
+       }
+       )
+  invisible(NULL)
+}
+
+  
+############### applications (specific models) ##################
+
+
+getdataname <- function(defaultvalue, ..., dataname=NULL) {
+  if(!is.null(dataname)) dataname else defaultvalue
+}
+  
+thomas.estK <- function(X, startpar=c(kappa=1,scale=1),
+                        lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) {
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    K <- X
+    if(!identical(attr(K, "fname")[1], "K"))
+      warning("Argument X does not appear to be a K-function")
+  } else if(inherits(X, "ppp")) {
+    K <- Kest(X)
+    dataname <- paste("Kest(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("Thomas")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$K
+  
+  result <- mincontrast(K, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)",
+                          desc="minimum contrast fit of Thomas process"),
+                        explain=list(dataname=dataname,
+                          fname=attr(K, "fname"),
+                          modelname="Thomas process"), ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "sigma2")
+  result$par <- par
+  ## infer meaningful model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="Thomas")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+lgcp.estK <- function(X, startpar=c(var=1,scale=1),
+                      covmodel=list(model="exponential"), 
+                      lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) {
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+  
+  if(inherits(X, "fv")) {
+    K <- X
+    if(!identical(attr(K, "fname")[1], "K"))
+      warning("Argument X does not appear to be a K-function")
+  } else if(inherits(X, "ppp")) {
+    K <- Kest(X)
+    dataname <- paste("Kest(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("LGCP")
+  startpar <- info$checkpar(startpar)
+
+  ## digest parameters of Covariance model and test validity
+  ph <- info$parhandler
+  cmodel <- do.call(ph, covmodel)
+  
+  theoret <- info$K
+
+  result <- mincontrast(K, theoret, startpar,
+                        ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)",
+                          desc="minimum contrast fit of LGCP"),
+                        explain=list(dataname=dataname,
+                          fname=attr(K, "fname"),
+                          modelname="log-Gaussian Cox process"),
+                        ...,
+                        model=cmodel$model,
+                        margs=cmodel$margs)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("sigma2", "alpha")
+  result$par <- par
+  result$covmodel <- cmodel
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="lgcp")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE)
+  return(result)
+}
+
+matclust.estK <- function(X, startpar=c(kappa=1,scale=1),
+                          lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) {
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    K <- X
+    if(!identical(attr(K, "fname")[1], "K"))
+      warning("Argument X does not appear to be a K-function")
+  } else if(inherits(X, "ppp")) {
+    K <- Kest(X)
+    dataname <- paste("Kest(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("MatClust")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$K
+  funaux <-  info$funaux
+  
+  result <- mincontrast(K, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)",
+                          desc="minimum contrast fit of Matern Cluster process"),
+                        explain=list(dataname=dataname,
+                          fname=attr(K, "fname"),
+                          modelname="Matern Cluster process"),
+                        ...,
+                        funaux=funaux)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "R")
+  result$par <- par
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="MatClust")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+## versions using pcf (suggested by Jan Wild)
+
+thomas.estpcf <- function(X, startpar=c(kappa=1,scale=1),
+                          lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...,
+                          pcfargs=list()){
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    g <- X
+    if(!identical(attr(g, "fname")[1], "g"))
+      warning("Argument X does not appear to be a pair correlation function")
+  } else if(inherits(X, "ppp")) {
+    g <- do.call(pcf.ppp, append(list(X), pcfargs))
+    dataname <- paste("pcf(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("Thomas")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$pcf
+  
+  ## avoid using g(0) as it may be infinite
+  argu <- fvnames(g, ".x")
+  rvals <- g[[argu]]
+  if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) {
+    rmin <- rvals[2]
+  }
+  result <- mincontrast(g, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(
+                          label="%s[fit](r)",
+                          desc="minimum contrast fit of Thomas process"),
+                        explain=list(
+                          dataname=dataname,
+                          fname=attr(g, "fname"),
+                          modelname="Thomas process"), ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "sigma2")
+  result$par <- par
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="Thomas")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+matclust.estpcf <- function(X, startpar=c(kappa=1,scale=1),
+                            lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...,
+                            pcfargs=list()){
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    g <- X
+    if(!identical(attr(g, "fname")[1], "g"))
+      warning("Argument X does not appear to be a pair correlation function")
+  } else if(inherits(X, "ppp")) {
+    g <- do.call(pcf.ppp, append(list(X), pcfargs))
+    dataname <- paste("pcf(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("MatClust")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$pcf
+  funaux <-  info$funaux
+  
+  ## avoid using g(0) as it may be infinite
+  argu <- fvnames(g, ".x")
+  rvals <- g[[argu]]
+  if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) {
+    rmin <- rvals[2]
+  }
+  result <- mincontrast(g, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)",
+                          desc="minimum contrast fit of Matern Cluster process"),
+                        explain=list(dataname=dataname,
+                          fname=attr(g, "fname"),
+                          modelname="Matern Cluster process"),
+                        ...,
+                        funaux=funaux)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "R")
+  result$par <- par
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="MatClust")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+lgcp.estpcf <- function(X, startpar=c(var=1,scale=1),
+                      covmodel=list(model="exponential"), 
+                        lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...,
+                        pcfargs=list()) {
+  
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+  
+  if(inherits(X, "fv")) {
+    g <- X
+    if(!identical(attr(g, "fname")[1], "g"))
+      warning("Argument X does not appear to be a pair correlation function")
+  } else if(inherits(X, "ppp")) {
+    g <- do.call(pcf.ppp, append(list(X), pcfargs))
+    dataname <- paste("pcf(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("LGCP")
+  startpar <- info$checkpar(startpar)
+
+  ## digest parameters of Covariance model and test validity
+  ph <- info$parhandler
+  cmodel <- do.call(ph, covmodel)
+  
+  theoret <- info$pcf
+  
+  result <- mincontrast(g, theoret, startpar,
+                        ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)",
+                          desc="minimum contrast fit of LGCP"),
+                        explain=list(dataname=dataname,
+                          fname=attr(g, "fname"),
+                          modelname="log-Gaussian Cox process"),
+                        ...,
+                        model=cmodel$model,
+                        margs=cmodel$margs)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("sigma2", "alpha")
+  result$par <- par
+  result$covmodel <- cmodel
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="lgcp")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE)
+  return(result)
+}
+
+
+cauchy.estK <- function(X, startpar=c(kappa=1,scale=1),
+                        lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) {
+
+## omega: scale parameter of Cauchy kernel function
+## eta: scale parameter of Cauchy pair correlation function
+## eta = 2 * omega
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    K <- X
+    if(!identical(attr(K, "fname")[1], "K"))
+      warning("Argument X does not appear to be a K-function")
+  } else if(inherits(X, "ppp")) {
+    K <- Kest(X)
+    dataname <- paste("Kest(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("Cauchy")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$K
+
+  desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel"
+  result <- mincontrast(K, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)", desc=desc),
+                        explain=list(dataname=dataname,
+                          fname=attr(K, "fname"),
+                          modelname="Cauchy process"), ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "eta2")
+  result$par <- par
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="Cauchy")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+
+cauchy.estpcf <- function(X, startpar=c(kappa=1,scale=1),
+                          lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...,
+                          pcfargs=list()) {
+
+## omega: scale parameter of Cauchy kernel function
+## eta: scale parameter of Cauchy pair correlation function
+## eta = 2 * omega
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    g <- X
+    if(!identical(attr(g, "fname")[1], "g"))
+      warning("Argument X does not appear to be a pair correlation function")
+  } else if(inherits(X, "ppp")) {
+    g <- do.call(pcf.ppp, append(list(X), pcfargs))
+    dataname <- paste("pcf(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  info <- spatstatClusterModelInfo("Cauchy")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$pcf
+
+  ## avoid using g(0) as it may be infinite
+  argu <- fvnames(g, ".x")
+  rvals <- g[[argu]]
+  if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) {
+    rmin <- rvals[2]
+  }
+  
+  desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel"
+  result <- mincontrast(g, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)", desc=desc),
+                        explain=list(dataname=dataname,
+                          fname=attr(g, "fname"),
+                          modelname="Cauchy process"), ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "eta2")
+  result$par <- par
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="Cauchy")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  return(result)
+}
+
+## user-callable
+resolve.vargamma.shape <- function(..., nu.ker=NULL, nu.pcf=NULL, default = FALSE) {
+  if(is.null(nu.ker) && is.null(nu.pcf)){
+    if(!default)
+        stop("Must specify either nu.ker or nu.pcf", call.=FALSE)
+    nu.ker <- -1/4
+  }
+  if(!is.null(nu.ker) && !is.null(nu.pcf))
+    stop("Only one of nu.ker and nu.pcf should be specified",
+         call.=FALSE)
+  if(!is.null(nu.ker)) {
+    check.1.real(nu.ker)
+    stopifnot(nu.ker > -1/2)
+    nu.pcf <- 2 * nu.ker + 1
+  } else {
+    check.1.real(nu.pcf)
+    stopifnot(nu.pcf > 0)
+    nu.ker <- (nu.pcf - 1)/2
+  }
+  return(list(nu.ker=nu.ker, nu.pcf=nu.pcf))
+}
+
+vargamma.estK <- function(X, startpar=c(kappa=1,scale=1), nu = -1/4,
+                          lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL,
+                          ...) {
+
+## nu.ker: smoothness parameter of Variance Gamma kernel function
+## omega: scale parameter of kernel function
+## nu.pcf: smoothness parameter of Variance Gamma pair correlation function
+## eta: scale parameter of Variance Gamma pair correlation function
+## nu.pcf = 2 * nu.ker + 1    and    eta = omega
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+  
+  if(inherits(X, "fv")) {
+    K <- X
+    if(!identical(attr(K, "fname")[1], "K"))
+      warning("Argument X does not appear to be a K-function")
+  } else if(inherits(X, "ppp")) {
+    K <- Kest(X)
+    dataname <- paste("Kest(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+    stop("Unrecognised format for argument X")
+
+  ## Catch old nu.ker/nu.pcf syntax and resolve nu-value.
+  dots <- list(...)
+  if(missing(nu)){
+      nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker
+  }
+  check.1.real(nu)
+  stopifnot(nu > -1/2)
+
+  info <- spatstatClusterModelInfo("VarGamma")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$K
+  
+  ## test validity of parameter nu and digest
+  ph <- info$parhandler
+  cmodel <- ph(nu.ker=nu)
+  margs <- cmodel$margs
+
+  desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel"
+  result <- mincontrast(K, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)", desc=desc),
+                        explain=list(dataname=dataname,
+                          fname=attr(K, "fname"),
+                          modelname="Variance Gamma process"),
+                        margs=margs, ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "eta")
+  result$par <- par
+  result$covmodel <- cmodel
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="VarGamma")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE)
+  return(result)
+}
+
+
+vargamma.estpcf <- function(X, startpar=c(kappa=1,scale=1), nu=-1/4, 
+                            lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, 
+                            ..., pcfargs=list()) {
+
+## nu.ker: smoothness parameter of Variance Gamma kernel function
+## omega: scale parameter of kernel function
+## nu.pcf: smoothness parameter of Variance Gamma pair correlation function
+## eta: scale parameter of Variance Gamma pair correlation function
+## nu.pcf = 2 * nu.ker + 1    and    eta = omega
+
+  dataname <-
+    getdataname(short.deparse(substitute(X), 20), ...)
+
+  if(inherits(X, "fv")) {
+    g <- X
+    if(!identical(attr(g, "fname")[1], "g"))
+      warning("Argument X does not appear to be a pair correlation function")
+  } else if(inherits(X, "ppp")) {
+    g <- do.call(pcf.ppp, append(list(X), pcfargs))
+    dataname <- paste("pcf(", dataname, ")", sep="")
+    if(is.null(lambda))
+      lambda <- summary(X)$intensity
+  } else 
+      stop("Unrecognised format for argument X")
+  
+  ## Catch old nu.ker/nu.pcf syntax and resolve nu-value.
+  dots <- list(...)
+  if(missing(nu)){
+      ## nutmp <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent=TRUE)
+      ## if(!inherits(nutmp, "try-error")) nu <- nutmp
+      nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker
+  }
+  check.1.real(nu)
+  stopifnot(nu > -1/2)
+
+  info <- spatstatClusterModelInfo("VarGamma")
+  startpar <- info$checkpar(startpar)
+  theoret <- info$pcf
+
+  ## test validity of parameter nu and digest 
+  ph <- info$parhandler
+  cmodel <- ph(nu.ker=nu)
+  margs <- cmodel$margs
+  
+  ## avoid using g(0) as it may be infinite
+  argu <- fvnames(g, ".x")
+  rvals <- g[[argu]]
+  if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) {
+    rmin <- rvals[2]
+  }
+  
+  desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel"
+  result <- mincontrast(g, theoret, startpar,
+                        ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax),
+                        fvlab=list(label="%s[fit](r)", desc=desc),
+                        explain=list(dataname=dataname,
+                          fname=attr(g, "fname"),
+                          modelname="Variance Gamma process"),
+                        margs=margs,
+                        ...)
+  ## imbue with meaning
+  par <- result$par
+  names(par) <- c("kappa", "eta")
+  result$par <- par
+  result$covmodel <- cmodel
+  ## infer model parameters
+  result$modelpar <- info$interpret(par, lambda)
+  result$internal <- list(model="VarGamma")
+  ## add new parametrisation to object
+  result$clustpar <- info$checkpar(par, old=FALSE)
+  result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE)
+  return(result)
+}
+
diff --git a/R/minkowski.R b/R/minkowski.R
new file mode 100644
index 0000000..29211ab
--- /dev/null
+++ b/R/minkowski.R
@@ -0,0 +1,89 @@
+#'
+#'       minkowski.R
+#' 
+#'  Minkowski Sum and related operations
+#'
+#'  $Revision: 1.7 $ $Date: 2017/06/05 10:31:58 $
+
+
+"%(+)%" <- MinkowskiSum <- local({
+
+  MinkowskiSum <- function(A, B) {
+    if(is.ppp(A)) return(UnionOfShifts(B, A))
+    if(is.ppp(B)) return(UnionOfShifts(A, B))
+    ## extract lists of simply-connected polygons
+    AA <- simplepolygons(A)
+    BB <- simplepolygons(B)
+    ## determine common resolution for polyclip operations
+    eps <- mean(c(sidelengths(Frame(A)), sidelengths(Frame(B))))/2^30
+    ## compute Minkowski sums of pieces
+    pieces <- NULL
+    for(b in BB) 
+      pieces <- append(pieces, lapply(AA, MinkSumConnected, b=b, eps=eps))
+    ## form union in one step, to avoid artefacts
+    result <- union.owin(solapply(pieces, poly2owin))
+    return(result)
+  }
+
+  poly2owin <- function(z) owin(poly=z, check=FALSE)
+
+  MinkSumConnected <- function(a, b, eps) {
+    ## a and b are list(x,y) simply-connected polygons
+    out <- polyclip::polyminkowski(a, b, x0=0, y0=0, eps=eps)
+    if(length(out) == 1) return(out)
+    ispos <- (sapply(out, Area.xypolygon) >= 0)
+    if(sum(ispos) > 1) {
+      stop("Internal error: result of sumconnected is not simply connected",
+           call.=FALSE)
+    }
+    return(out[ispos])
+  }
+
+  simplepolygons <- function(A) {
+    if(is.psp(A)) return(psp2poly(A))
+    ## convert to owin, then polygonal
+    A <- as.polygonal(A)
+    ## separate into simply-connected pieces
+    AA <- break.holes(A)$bdry
+    return(AA)
+  }
+  
+  ## handle segment patterns as well 
+  psp2poly <- function(X) apply(as.matrix(X$ends), 1, seg2poly)
+
+  seg2poly <- function(z) with(as.list(z), list(x=c(x0, x1, x0), y=c(y0,y1,y0)))
+
+  ##
+  UnionOfShifts <- function(X, V) {
+    #' compute the union or superposition of copies of X by vectors in V
+    v <- as.matrix(coords(V))
+    n <- nrow(v)
+    Y <- vector(mode="list", length=n)
+    for(i in seq_len(n)) 
+      Y[[i]] <- shift(X, v[i,])
+    Y <- as.solist(Y)
+    if(is.owin(X)) {
+      Z <- union.owin(Y)
+    } else {
+      #' X is a pattern of objects in a window
+      W <- MinkowskiSum(Window(X), Window(V))
+      Z <- superimpose(Y, W=W)
+    }
+    return(Z)
+  }
+
+  MinkowskiSum
+})
+
+dilationAny <- function(A, B) { MinkowskiSum(A, reflect(B)) }
+
+"%(-)%" <- erosionAny <- function(A, B) {
+  D <- Frame(A)
+  Dplus <- grow.rectangle(D, 0.1 * shortside(D))
+  Ac <- complement.owin(A, Dplus)
+  AcB <- MinkowskiSum(Ac, reflect(B))
+  if(is.subset.owin(D, AcB))
+    return(emptywindow(D))
+  C <- complement.owin(AcB[Dplus], Dplus)[D]
+  return(C)
+}
diff --git a/R/minnndist.R b/R/minnndist.R
new file mode 100644
index 0000000..811c628
--- /dev/null
+++ b/R/minnndist.R
@@ -0,0 +1,64 @@
+##
+##  minnndist.R
+##
+## Fast versions of min(nndist(X)), max(nndist(X))
+##
+##  $Revision: 1.5 $  $Date: 2017/06/05 10:31:58 $
+
+minnndist <- function(X, positive=FALSE) {
+  stopifnot(is.ppp(X))
+  n <- npoints(X)
+  if(n <= 1) return(NA)
+  x <- X$x
+  y <- X$y
+  o <- fave.order(y)
+  big <- sqrt(.Machine$double.xmax)
+  if(positive) {
+      z <- .C("minPnnd2",
+              n = as.integer(n),
+              x = as.double(x[o]),
+              y = as.double(y[o]),
+              as.double(big),
+              result = as.double(numeric(1)),
+              PACKAGE = "spatstat")
+  } else {
+      z <- .C("minnnd2",
+              n = as.integer(n),
+              x = as.double(x[o]),
+              y = as.double(y[o]),
+              as.double(big),
+              result = as.double(numeric(1)),
+              PACKAGE = "spatstat")
+  }
+  return(sqrt(z$result))
+}
+
+maxnndist <- function(X, positive=FALSE) {
+  stopifnot(is.ppp(X))
+  n <- npoints(X)
+  if(n <= 1) return(NA)
+  x <- X$x
+  y <- X$y
+  o <- fave.order(y)
+  big <- sqrt(.Machine$double.xmax)
+  if(positive) {
+      z <- .C("maxPnnd2",
+              n = as.integer(n),
+              x = as.double(x[o]),
+              y = as.double(y[o]),
+              as.double(big),
+              result = as.double(numeric(1)),
+              PACKAGE = "spatstat")
+  } else {
+      z <- .C("maxnnd2",
+              n = as.integer(n),
+              x = as.double(x[o]),
+              y = as.double(y[o]),
+              as.double(big),
+              result = as.double(numeric(1)),
+              PACKAGE = "spatstat")
+  }
+  return(sqrt(z$result))
+}
+
+          
diff --git a/R/model.depends.R b/R/model.depends.R
new file mode 100755
index 0000000..5f61e3c
--- /dev/null
+++ b/R/model.depends.R
@@ -0,0 +1,86 @@
+#
+# Determine which 'canonical variables' depend on a supplied covariate
+#
+#   $Revision: 1.8 $  $Date: 2013/04/25 06:37:43 $
+#
+
+model.depends <- function(object) {
+  # supplied covariates
+  fo <- formula(object)
+  if(length(as.list(fo)) == 3) {
+    # formula has a response: strip it
+    fo <- fo[-2]
+  }
+  covars <- variablesinformula(fo)
+  # canonical covariates 
+  mm <- model.matrix(object)
+  ass <- attr(mm, "assign")
+  # model terms
+  tt <- terms(object)
+  lab <- attr(tt, "term.labels")
+  # 'ass' maps canonical covariates to 'lab'
+  # determine which canonical covariate depends on which supplied covariate
+  depends <- matrix(FALSE, length(ass), length(covars))
+  for(i in seq(along=ass)) {
+    if(ass[i] == 0) # 0 is the intercept term
+      depends[i,] <- FALSE
+    else {
+      turm <- lab[ass[i]]
+      depends[i, ] <- covars %in% all.vars(parse(text=turm))
+    }
+  }
+  rownames(depends) <- colnames(mm)
+  colnames(depends) <- covars
+  # detect offsets
+  if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) {
+    # entries of 'oo' index the list of variables in terms object
+    vv <- attr(tt, "variables")
+    offdep <- matrix(FALSE, noo, length(covars))
+    offnms <- character(noo)
+    for(i in seq_len(noo)) {
+      offseti <- languageEl(vv, oo[i] + 1)
+      offdep[i, ] <- covars %in% all.vars(offseti)
+      offnms[i] <- deparse(offseti)
+    }
+    rownames(offdep) <- offnms
+    colnames(offdep) <- covars
+    attr(depends, "offset") <- offdep
+  }
+  return(depends)
+}
+
+model.is.additive <- function(object) {
+  dep <- model.depends(object)
+  hit <- t(dep) %*% dep
+  diag(hit) <- 0
+  ok <- all(hit == 0)
+  return(ok)
+}
+
+model.covariates <- function(object, fitted=TRUE, offset=TRUE) {
+  md <- model.depends(object)
+  nm <- colnames(md)
+  keep <- rep.int(FALSE, length(nm))
+  # variables used in formula with coefficients
+  if(fitted) keep <- apply(md, 2, any)
+  # variables used in offset
+  if(offset) {
+    oo <- attr(md, "offset")
+    if(!is.null(oo)) 
+      keep <- keep | apply(oo, 2, any)
+  }
+  return(nm[keep])
+}
+
+has.offset.term <- function(object) {
+  # model terms
+  tt <- terms(object)
+  oo  <- attr(tt, "offset")
+  return(!is.null(oo) && (length(oo) > 0))
+}
+
+has.offset <- function(object) {
+  has.offset.term(object) || !is.null(model.offset(model.frame(object)))
+}
+
+
diff --git a/R/morisita.R b/R/morisita.R
new file mode 100755
index 0000000..0db74b2
--- /dev/null
+++ b/R/morisita.R
@@ -0,0 +1,41 @@
+#
+# morisita.R
+#
+#  $Revision: 1.2 $  $Date: 2016/02/11 10:17:12 $
+#
+
+miplot <- function(X, ...) {
+  Xname <- short.deparse(substitute(X))
+  X <- as.ppp(X)
+  W <- X$window
+  N <- X$n
+  if(W$type != "rectangle")
+    stop("Window of X is not a rectangle - Morisita index undefined")
+  a <- min(diff(W$xrange), diff(W$yrange))
+  maxnquad <- floor(a/mean(nndist(X)))
+  if(maxnquad <= 1)
+    stop("Not enough points for a Morisita plot")
+  mindex <- numeric(maxnquad)
+  for(nquad in 1:maxnquad) {
+    qq <- quadratcount(X, nquad, nquad)
+    tt <- as.vector(as.table(qq))
+    mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1))
+  }
+
+  quadsize <- diameter(W)/(1:maxnquad)
+  ok <- (quadsize <= a)
+  quadsize <- quadsize[ok]
+  mindex   <- mindex[ok]
+  
+  unitinfo <- summary(unitname(W))$axis
+  do.call(plot.default,
+          resolve.defaults(list(quadsize, mindex),
+                           list(...),
+                           list(xlim=c(0,max(quadsize)),
+                                ylim=c(0,max(1, mindex)),
+                                xlab=paste("Diameter of quadrat", unitinfo),
+                                ylab="Morisita index",
+                                main=paste("Morisita plot for", Xname))))
+  abline(h=1, lty=2)
+  return(invisible(NULL))
+}
diff --git a/R/morphology.R b/R/morphology.R
new file mode 100755
index 0000000..f060128
--- /dev/null
+++ b/R/morphology.R
@@ -0,0 +1,395 @@
+#
+#  morphology.R
+#
+#  dilation, erosion, opening, closing
+#
+#  generic functions
+#  and methods for owin, psp, ppp
+#
+#  $Revision: 1.30 $   $Date: 2016/07/30 05:13:53 $
+#
+
+# ............ generic  ............................
+
+erosion  <- function(w, r, ...) { UseMethod("erosion") }
+
+dilation <- function(w, r, ...) { UseMethod("dilation") }
+
+closing  <- function(w, r, ...) { UseMethod("closing") }
+
+opening  <- function(w, r, ...) { UseMethod("opening") }
+
+# ............ methods for class 'owin' ............................
+
+
+erode.owin <- function(...) {
+  .Deprecated("erosion.owin", package="spatstat")
+  erosion.owin(...)
+}
+
+erosion.owin <- 
+  function(w, r, shrink.frame=TRUE, ..., strict=FALSE, polygonal=NULL) {
+  verifyclass(w, "owin")
+  validradius(r, "erosion")
+  if(r == 0 && !strict)
+    return(w)
+
+  xr <- w$xrange
+  yr <- w$yrange
+  
+  if(2 * r >= max(diff(xr), diff(yr)))
+    stop("erosion distance r too large for frame of window")
+
+  # compute the dimensions of the eroded frame
+  exr <- xr + c(r, -r)
+  eyr <- yr + c(r, -r)
+  ebox <- list(x=exr[c(1,2,2,1)], y=eyr[c(1,1,2,2)])
+
+  ismask <- is.mask(w)
+  if(is.empty(w))
+    return(emptywindow(ebox))
+
+  # determine type of computation
+  if(is.null(polygonal))
+    polygonal <- !ismask
+  else {
+    stopifnot(is.logical(polygonal))
+    if(polygonal && ismask) {
+      # try to convert
+      w <- as.polygonal(w)
+      if(is.mask(w))
+        polygonal <- FALSE
+    }
+  }
+  
+  if(is.rectangle(w) && polygonal) {
+    # result is a smaller rectangle
+    if(shrink.frame) {
+      return(owin(exr, eyr))  # type 'rectangle' 
+    } else {
+      return(owin(xr, yr, poly=ebox, check=FALSE)) # type 'polygonal'
+    }
+  }
+
+  if(polygonal) {
+    # compute polygonal region using polyclip package
+    pnew <- polyclip::polyoffset(w$bdry, -r, jointype="round")
+    # ensure correct polarity
+    totarea <- sum(unlist(lapply(pnew, Area.xypolygon)))
+    if(totarea < 0)
+      pnew <- lapply(pnew, reverse.xypolygon)
+    if(shrink.frame) {
+      return(owin(poly=pnew, check=FALSE))
+    } else {
+      return(owin( xr,  yr, poly=pnew, check=FALSE))
+    }
+  }
+  
+  # otherwise erode the window in pixel image form
+  if(w$type == "mask") 
+    wnew <- erodemask(w, r, strict=strict)
+  else {
+    D <- distmap(w, invert=TRUE, ...)
+    wnew <- levelset(D, r, if(strict) ">" else ">=")
+  }
+        
+  if(shrink.frame) {
+    # trim off some rows & columns of pixel raster
+    keepcol <- (wnew$xcol >= exr[1] & wnew$xcol <= exr[2])
+    keeprow <- (wnew$yrow >= eyr[1] & wnew$yrow <= eyr[2])
+    wnew$xcol <- wnew$xcol[keepcol]
+    wnew$yrow <- wnew$yrow[keeprow]
+    wnew$dim <- c(sum(keeprow), sum(keepcol))
+    wnew$m <- wnew$m[keeprow, keepcol]
+    wnew$xrange <- exr
+    wnew$yrange <- eyr
+  }
+
+  return(wnew)
+}	
+
+dilate.owin <- function(...) {
+  .Deprecated("dilation.owin", package="spatstat")
+  dilation.owin(...)
+}
+
+dilation.owin <- 
+  function(w, r, ..., polygonal=NULL, tight=TRUE) {
+  verifyclass(w, "owin")
+  validradius(r, "dilation")
+  
+  if(r == 0)
+    return(w)
+
+  ismask <- is.mask(w)
+  if(is.empty(w))
+    return(w)
+
+  # determine type of computation
+  if(is.null(polygonal)) {
+    polygonal <- !ismask
+  } else stopifnot(is.logical(polygonal))
+  
+  if(polygonal) {
+    # convert to polygonal 
+    w <- as.polygonal(w)
+    if(!is.polygonal(w))
+      polygonal <- FALSE
+  }
+  
+  # bounding frame
+  bb <- if(tight) boundingbox(w) else as.rectangle(w)
+  newbox <- grow.rectangle(bb, r)
+
+  # compute dilation
+  if(!polygonal) {
+    # compute pixel approximation
+    epsilon <- sqrt(w$xstep^2 + w$ystep^2)
+    r <- max(r, epsilon)
+    w <- rebound.owin(w, newbox)
+    distant <- distmap(w, ...)
+    dil <- levelset(distant, r, "<=")
+    return(dil)
+  } else {
+    # compute polygonal region using polyclip package
+    pnew <- polyclip::polyoffset(w$bdry, r, jointype="round")
+    # ensure correct polarity
+    totarea <- sum(unlist(lapply(pnew, Area.xypolygon)))
+    if(totarea < 0)
+      pnew <- lapply(pnew, reverse.xypolygon)
+    # determine bounding frame, convert to owin
+    if(tight) {
+      out <- owin(poly=pnew, check=FALSE)
+    } else {
+      out <- owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE)
+    }
+    return(out)
+  }
+}
+
+closing.owin <- function(w, r, ..., polygonal=NULL) {
+  if(missing(r))
+    stop("r is required")
+  validradius(r, "closing")
+  wplus <- dilation.owin(w, r, ..., polygonal=polygonal, tight=FALSE)
+  if(is.empty(wplus))
+    return(wplus)
+  wclose <- erosion.owin(wplus, r, strict=TRUE)
+  b <- as.rectangle(w)
+  wclose <- rebound.owin(wclose[b], b)
+  return(wclose)
+}
+
+opening.owin <- function(w, r, ..., polygonal=NULL) {
+  if(missing(r))
+    stop("r is required")
+  validradius(r, "opening")
+  wminus <- erosion.owin(w, r, ..., polygonal=polygonal, shrink.frame=FALSE)
+  if(is.empty(wminus))
+    return(wminus)
+  wopen <- dilation.owin(wminus, r, tight=FALSE)
+  b <- as.rectangle(w)
+  wopen <- rebound.owin(wopen[b], b)
+  return(wopen)
+}
+
+
+border <- function(w, r, outside=FALSE, ...) {
+  w <- as.owin(w)
+  if(!outside) {
+    e <- erosion(w, r, ...)
+    b <- setminus.owin(w, e)
+  } else {
+    d <- dilation(w, r, ...)
+    b <- setminus.owin(d, w)
+  }
+  return(b)
+}
+
+# ............ methods for class 'psp' ............................
+
+
+dilation.psp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) {
+  verifyclass(w, "psp")
+  x <- w
+  validradius(r, "dilation")
+  if(r == 0)
+    return(w)
+
+  if(is.empty(x))
+    return(emptywindow(as.owin(w)))
+  
+  # bounding frame
+  bb <- if(tight) boundingbox(x) else as.rectangle(x)
+  newbox <- grow.rectangle(bb, r)
+  
+  # compute dilation
+  if(!polygonal) {
+    x <- rebound.psp(x, newbox)
+    distant <- distmap(x, ...)
+    dil <- levelset(distant, r, "<=")
+    return(dil)
+  } else if(spatstat.options("old.morpho.psp")) {
+    # old code for polygonal case
+    ends   <- x$ends
+    angles <- angles.psp(x, directed=TRUE)
+#    lengths <- lengths.psp(x)
+    out <- NULL
+    # dilate individual segments
+    halfcircle <- seq(from=0, to=pi, length.out=128)[-c(1,128)]
+    for(i in seq_len(x$n)) {
+      seg <- ends[i,]
+      co <- cos(angles[i])
+      si <- sin(angles[i])
+      # draw sausage around i-th segment
+      xx <- c(seg$x0, seg$x1) + r * si
+      yy <- c(seg$y0, seg$y1) - r * co
+      rightcircle <- angles[i] - pi/2 + halfcircle
+      xx <- c(xx, seg$x1 + r * cos(rightcircle))
+      yy <- c(yy, seg$y1 + r * sin(rightcircle))
+      xx <- c(xx, c(seg$x1, seg$x0) - r * si)
+      yy <- c(yy, c(seg$y1, seg$y0) + r * co)
+      leftcircle <- angles[i] + pi/2 + halfcircle
+      xx <- c(xx, seg$x0 + r * cos(leftcircle))
+      yy <- c(yy, seg$y0 + r * sin(leftcircle))
+      sausage <- owin(newbox$xrange, newbox$yrange, poly=list(x=xx, y=yy), check=FALSE)
+      # add to set
+      out <- union.owin(out, sausage, ...)
+    }
+    return(out)
+  } else {
+    # new code using 'polyclip' package
+    # convert to list of list(x,y)
+    ends   <- as.matrix(x$ends)
+    n <- nrow(ends)
+    plines <- vector(mode="list", length=n)
+    for(i in 1:n) plines[[i]] <- list(x=ends[i, c("x0","x1")],
+                                      y=ends[i, c("y0","y1")])
+    # call
+    pnew <- polyclip::polylineoffset(plines, r,
+                                     jointype="round", endtype="openround")
+    # ensure correct polarity
+    totarea <- sum(unlist(lapply(pnew, Area.xypolygon)))
+    if(totarea < 0)
+      pnew <- lapply(pnew, reverse.xypolygon)
+    # convert to owin object
+    out <- if(tight) owin(poly=pnew, check=FALSE) else
+            owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE)
+    return(out)
+  }
+}
+
+closing.psp <- function(w, r, ..., polygonal=TRUE) {
+  if(missing(r))
+    stop("r is required")
+  validradius(r, "closing")
+  wplus <- dilation.psp(w, r, ..., polygonal=polygonal, tight=FALSE)
+  if(is.empty(wplus))
+    return(emptywindow(as.owin(w)))
+  wclose <- erosion.owin(wplus, r, strict=TRUE)
+  wclose <- rebound.owin(wclose, as.rectangle(w))
+  return(wclose)
+}
+
+erosion.psp <- function(w, r, ...) {
+  idorempty(w, r, "erosion")
+}
+
+opening.psp <- function(w, r, ...) {
+  idorempty(w, r,"opening")
+}
+
+  
+# ............ methods for class 'ppp' ............................
+
+dilation.ppp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) {
+  verifyclass(w, "ppp")
+  validradius(r, "dilation")
+  x <- w
+  
+  if(r == 0)
+    return(x)
+
+  if(is.empty(w))
+    return(emptywindow(as.owin(w)))
+  
+  # bounding frame
+  bb <- if(tight) boundingbox(x) else as.rectangle(x)
+    newbox <- grow.rectangle(bb, r)
+
+  # compute dilation
+  if(!polygonal) {
+    # compute pixel approximation
+    x <- rebound.ppp(x, newbox)
+    distant <- distmap(x, ...)
+    dil <- levelset(distant, r, "<=")
+    return(dil)
+  } else {
+    # compute polygonal approximation
+    # generate discs
+    coo <- coords(x)
+    nn <- npoints(x)
+    balls <- vector(mode="list", length=nn)
+    ball0 <- disc(r, c(0,0), ...)
+    for(i in seq_len(nn))
+      balls[[i]] <- shift(ball0, vec=coo[i,])
+    class(balls) <- c("solist", class(balls))
+    out <- union.owin(balls)
+    return(out)
+  }
+}
+
+closing.ppp <- function(w, r, ..., polygonal=TRUE) {
+  if(missing(r))
+    stop("r is required")
+  validradius(r, "closing")
+  if(is.empty(w) || w$n <= 3)
+    return(emptywindow(as.owin(w)))
+  # remove `isolated' points
+  ok <- (nndist(w) <= 2 * r)
+  if(sum(ok) <= 3)
+    return(emptywindow(as.owin(w)))
+  w <- w[ok]
+  # dilate
+  wplus <- dilation.ppp(w, r, ..., polygonal=polygonal, tight=FALSE)
+  wclose <- erosion.owin(wplus, r, strict=TRUE)
+  wclose <- rebound.owin(wclose, as.rectangle(w))
+  return(wclose)
+}
+
+erosion.ppp <- function(w, r, ...) {
+  idorempty(w, r, "erosion")
+}
+
+opening.ppp <- function(w, r, ...) {
+  idorempty(w, r,"opening")
+}
+
+# ............ utilities ............................
+
+validradius <- local({
+
+  validradius <- function(r, caller="morphological operator") {
+  #  rname <- short.deparse(substitute(r))
+    if(!is.numeric(r) || length(r) != 1)
+      groan("radius r must be a single number", caller)
+    if(r < 0)
+      groan("radius r must be nonnegative", caller)
+    return(TRUE)
+  }
+
+  groan <- function(whinge, caller) {
+    stop(paste("for", paste(caller, ",", sep=""), whinge), call.=FALSE)
+  }
+
+  validradius
+})
+  
+idorempty <- function(w, r, caller="morphological operator") {
+  validradius(r, caller)
+  if(r == 0)
+    return(w)
+  else
+    return(emptywindow(w))
+}
+           
diff --git a/R/mpl.R b/R/mpl.R
new file mode 100755
index 0000000..b1f2e45
--- /dev/null
+++ b/R/mpl.R
@@ -0,0 +1,1542 @@
+#    mpl.R
+#
+#	$Revision: 5.208 $	$Date: 2017/07/19 06:55:15 $
+#
+#    mpl.engine()
+#          Fit a point process model to a two-dimensional point pattern
+#          by maximum pseudolikelihood
+#
+#    mpl.prepare()
+#          set up data for glm procedure
+#
+# -------------------------------------------------------------------
+#
+
+"mpl" <- function(Q,
+         trend = ~1,
+	 interaction = NULL,
+         data = NULL,
+	 correction="border",
+	 rbord = 0,
+         use.gam=FALSE) {
+   .Deprecated("ppm", package="spatstat")
+   ppm(Q=Q, trend=trend, interaction=interaction,
+       covariates=data, correction=correction, rbord=rbord,
+       use.gam=use.gam, method="mpl")
+}
+
+mpl.engine <- 
+  function(Q,
+           trend = ~1,
+           interaction = NULL,
+           ...,
+           covariates = NULL,
+           subsetexpr = NULL,
+           covfunargs = list(),
+           correction="border",
+           rbord = 0,
+           use.gam=FALSE,
+           gcontrol=list(),
+           GLM=NULL,
+           GLMfamily=NULL,
+           GLMcontrol=NULL,
+           famille=NULL,
+           forcefit=FALSE,
+           nd = NULL,
+           eps = eps,
+           allcovar=FALSE,
+           callstring="",
+           precomputed=NULL,
+           savecomputed=FALSE,
+           preponly=FALSE,
+           rename.intercept=TRUE,
+           justQ = FALSE,
+           weightfactor = NULL)
+  {
+    GLMname <- if(!missing(GLM)) short.deparse(substitute(GLM)) else NULL
+    ## Extract precomputed data if available
+    if(!is.null(precomputed$Q)) {
+      Q <- precomputed$Q
+      X <- precomputed$X
+      P <- precomputed$U
+    } else {
+      ## Determine quadrature scheme from argument Q
+      if(verifyclass(Q, "quad", fatal=FALSE)) {
+        ## user-supplied quadrature scheme - validate it
+        validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE)
+        ## Extract data points
+        X <- Q$data
+      } else if(verifyclass(Q, "ppp", fatal = FALSE)) {
+        ## point pattern - create default quadrature scheme
+        X <- Q
+        Q <- quadscheme(X, nd=nd, eps=eps, check=FALSE)
+      } else 
+      stop("First argument Q should be a point pattern or a quadrature scheme")
+      ## Data and dummy points together
+      P <- union.quad(Q)
+    }
+    ## secret exit  
+    if(justQ) return(Q)
+    ##  
+    computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL
+    ##
+    ## Validate main arguments
+    if(!is.null(trend) && !inherits(trend, "formula"))
+      stop(paste("Argument", sQuote("trend"), "must be a formula"))
+    if(!is.null(interaction) && !inherits(interaction, "interact"))
+      stop(paste("Argument", sQuote("interaction"), "has incorrect format"))
+    ##
+    check.1.real(rbord, "In ppm")
+    explain.ifnot(rbord >= 0, "In ppm")
+    ## rbord applies only to border correction
+    if(correction != "border") rbord <- 0 
+    ##
+    covfunargs <- as.list(covfunargs)
+    ##
+    ## Interpret the call
+    if(is.null(trend)) {
+      trend <- ~1
+      environment(trend) <- parent.frame()
+    }
+    want.trend <- !identical.formulae(trend, ~1)
+    want.inter <- !is.null(interaction) && !is.null(interaction$family)
+
+    ## Stamp with spatstat version number
+    spv <- package_version(versionstring.spatstat())
+    the.version <- list(major=spv$major,
+                        minor=spv$minor,
+                        release=spv$patchlevel,
+                        date="$Date: 2017/07/19 06:55:15 $")
+
+    if(want.inter) {
+      ## ensure we're using the latest version of the interaction object
+      if(outdated.interact(interaction)) 
+        interaction <- update(interaction)
+    }
+
+    ##  
+  
+    if(!want.trend && !want.inter &&
+       !forcefit && !allcovar && is.null(subsetexpr)) {
+      ## the model is the uniform Poisson process
+      ## The MPLE (= MLE) can be evaluated directly
+      npts <- npoints(X)
+      W    <- as.owin(X)
+      if(correction == "border" && rbord > 0) {
+        npts <- sum(bdist.points(X) >= rbord)
+        areaW <- eroded.areas(W, rbord)
+      } else {
+        npts <- npoints(X)
+        areaW <- area(W)
+      }
+      volume <- areaW * markspace.integral(X)
+      lambda <- npts/volume
+      ## fitted canonical coefficient
+      co <- log(lambda)
+      ## asymptotic variance of canonical coefficient
+      varcov <- matrix(1/npts, 1, 1)
+      fisher <- matrix(npts,   1, 1)
+      se <- sqrt(1/npts)
+      ## give names
+      tag <- if(rename.intercept) "log(lambda)" else "(Intercept)"
+      names(co) <- tag
+      dimnames(varcov) <- dimnames(fisher) <- list(tag, tag)
+      ## maximised log likelihood
+      maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1)
+      ##
+      rslt <- list(
+                   method      = "mpl",
+                   fitter      = "exact",
+                   projected   = FALSE,
+                   coef        = co,
+                   trend       = trend,
+                   interaction = NULL,
+                   fitin       = fii(),
+                   Q           = Q,
+                   maxlogpl    = maxlogpl,
+                   satlogpl    = NULL,
+                   internal    = list(computed=computed, se=se),
+                   covariates  = mpl.usable(covariates),
+                                        ## covariates are still retained!
+                   covfunargs  = covfunargs,
+                   subsetexpr  = NULL,
+                   correction  = correction,
+                   rbord       = rbord,
+                   terms       = terms(trend),
+                   fisher      = fisher,
+                   varcov      = varcov,
+                   version     = the.version,
+                   problems    = list())
+      class(rslt) <- "ppm"
+      return(rslt)
+    }
+    #################  P r e p a r e    D a t a   ######################
+
+    prep <- mpl.prepare(Q, X, P, trend, interaction,
+                        covariates, 
+                        want.trend, want.inter, correction, rbord,
+                        "quadrature points", callstring,
+                        subsetexpr=subsetexpr,
+                        allcovar=allcovar,
+                        precomputed=precomputed, savecomputed=savecomputed,
+                        covfunargs=covfunargs,
+                        weightfactor=weightfactor,
+                        ...)
+    ## back door
+    if(preponly) {
+      ## exit now, returning prepared data frame and internal information
+      prep$info <- list(want.trend=want.trend,
+                        want.inter=want.inter,
+                        correction=correction,
+                        rbord=rbord,
+                        interaction=interaction)
+      return(prep)
+    }
+  
+  
+    fmla <- prep$fmla
+    glmdata <- prep$glmdata
+    problems <- prep$problems
+    likelihood.is.zero <- prep$likelihood.is.zero
+    is.identifiable <- prep$is.identifiable
+    computed <- resolve.defaults(prep$computed, computed)
+    IsOffset <- prep$IsOffset
+
+    ## update covariates (if they were resolved from the environment)  
+    if(!is.null(prep$covariates))
+      covariates <- prep$covariates
+  
+    ################# F i t    i t   ####################################
+
+    if(!is.identifiable) 
+      stop(paste("in", callstring, ":", problems$unidentifiable$print),
+           call.=FALSE)
+  
+    ## to avoid problem with package checker  
+    .mpl.W <- glmdata$.mpl.W
+    .mpl.SUBSET <- glmdata$.mpl.SUBSET
+
+    ## determine algorithm control parameters
+    if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol))
+    gcontrol <- if(!is.null(GLMcontrol)) do.call(GLMcontrol, gcontrol) else
+                if(use.gam) do.call(mgcv::gam.control, gcontrol) else
+                do.call(stats::glm.control, gcontrol)
+  
+    ## Fit the generalized linear/additive model.
+
+    if(is.null(GLM) && is.null(famille)) {
+      ## the sanctioned technique, using `quasi' family
+      if(want.trend && use.gam) {
+        FIT  <- gam(fmla, family=quasi(link="log", variance="mu"),
+                    weights=.mpl.W,
+                    data=glmdata, subset=.mpl.SUBSET,
+                    control=gcontrol)
+        fittername <- "gam"
+      } else {
+        FIT  <- glm(fmla, family=quasi(link="log", variance="mu"),
+                    weights=.mpl.W,
+                    data=glmdata, subset=.mpl.SUBSET,
+                    control=gcontrol, model=FALSE)
+        fittername <- "glm"
+      }
+    } else if(!is.null(GLM)) {
+      ## alternative GLM fitting function or penalised GLM etc
+      fam <- GLMfamily %orifnull% quasi(link="log", variance="mu")
+      FIT <- GLM(fmla, family=fam,
+                 weights=.mpl.W,
+                 data=glmdata, subset=.mpl.SUBSET,
+                 control=gcontrol)
+      fittername <- GLMname
+    } else {
+      ## experimentation only!
+      if(is.function(famille))
+        famille <- famille()
+      stopifnot(inherits(famille, "family"))
+      if(want.trend && use.gam) {
+        FIT  <- gam(fmla, family=famille, weights=.mpl.W,
+                    data=glmdata, subset=.mpl.SUBSET,
+                    control=gcontrol)
+        fittername <- "experimental"
+      } else {
+        FIT  <- glm(fmla, family=famille, weights=.mpl.W,
+                    data=glmdata, subset=.mpl.SUBSET,
+                    control=gcontrol, model=FALSE)
+        fittername <- "experimental"
+      }
+    }
+    environment(FIT$terms) <- sys.frame(sys.nframe())
+
+  
+    ################  I n t e r p r e t    f i t   #######################
+
+    ## Fitted coefficients
+    co <- FIT$coef
+
+    ## glm covariates
+    W <- glmdata$.mpl.W
+    SUBSET <- glmdata$.mpl.SUBSET        
+    Z <- is.data(Q)
+    Vnames <- prep$Vnames
+
+    ## saturated log pseudolikelihood
+    satlogpl <- - (sum(log(W[Z & SUBSET])) + sum(Z & SUBSET))
+    ## attained value of max log pseudolikelihood
+    maxlogpl <- if(likelihood.is.zero) -Inf else (satlogpl - deviance(FIT)/2)
+
+    ## fitted interaction object
+    fitin <- if(want.inter) fii(interaction, co, Vnames, IsOffset) else fii()
+    unitname(fitin) <- unitname(X)
+    ######################################################################
+    ## Clean up & return 
+
+    rslt <-
+      list(
+           method       = "mpl",
+           fitter       = fittername,
+           projected    = FALSE,
+           coef         = co,
+           trend        = trend,
+           interaction  = if(want.inter) interaction else NULL,
+           fitin        = fitin,
+           Q            = Q,
+           maxlogpl     = maxlogpl,
+           satlogpl     = satlogpl,
+           internal     = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames,
+                               IsOffset=IsOffset, fmla=fmla, computed=computed,
+                               vnamebase=prep$vnamebase,
+                               vnameprefix=prep$vnameprefix),
+           covariates   = mpl.usable(covariates),
+           covfunargs   = covfunargs,
+           subsetexpr   = subsetexpr,
+           correction   = correction,
+           rbord        = rbord,
+           terms        = terms(trend),
+           version      = the.version,
+           problems     = problems)
+    class(rslt) <- "ppm"
+    return(rslt)
+  }  
+
+
+
+##########################################################################
+### /////////////////////////////////////////////////////////////////////
+##########################################################################
+
+mpl.prepare <- local({
+
+  mpl.prepare <- function(Q, X, P, trend, interaction, covariates, 
+                          want.trend, want.inter, correction, rbord,
+                          Pname="quadrature points", callstring="",
+                          ...,
+                          subsetexpr=NULL,
+                          covfunargs=list(),
+                          allcovar=FALSE,
+                          precomputed=NULL, savecomputed=FALSE,
+                          vnamebase=c("Interaction", "Interact."),
+                          vnameprefix=NULL,
+                          warn.illegal=TRUE,
+                          warn.unidentifiable=TRUE,
+                          weightfactor=NULL,
+                          skip.border=FALSE) {
+    ## Q: quadrature scheme
+    ## X = data.quad(Q)
+    ## P = union.quad(Q)
+  
+    if(missing(want.trend))
+      want.trend <- !is.null(trend) && !identical.formulae(trend, ~1)
+    if(missing(want.inter))
+      want.inter <- !is.null(interaction) && !is.null(interaction$family)
+
+    want.subset <- !is.null(subsetexpr)
+  
+    computed <- list()
+    problems <- list()
+  
+    names.precomputed <- names(precomputed)
+
+    likelihood.is.zero <- FALSE
+    is.identifiable <- TRUE
+  
+    if(!missing(vnamebase)) {
+      if(length(vnamebase) == 1)
+        vnamebase <- rep.int(vnamebase, 2)
+      if(!is.character(vnamebase) || length(vnamebase) != 2)
+        stop("Internal error: illegal format of vnamebase")
+    }
+    if(!is.null(vnameprefix)) {
+      if(!is.character(vnameprefix) || length(vnameprefix) != 1)
+        stop("Internal error: illegal format of vnameprefix")
+    }
+      
+    ################ C o m p u t e     d a t a  ####################
+
+    ## Extract covariate values
+    updatecovariates <- FALSE
+    covariates.df <- NULL
+    if(allcovar || want.trend || want.subset) {
+      if("covariates.df" %in% names.precomputed) {
+        covariates.df <- precomputed$covariates.df
+      } else {
+        if(!is.data.frame(covariates)) {
+          ## names of 'external' covariates to be found
+          covnames <- variablesinformula(trend)
+          if(want.subset)
+            covnames <- union(covnames, all.vars(subsetexpr))
+          if(allcovar)
+            covnames <- union(covnames, names(covariates))
+          covnames <- setdiff(covnames, c("x", "y", "marks"))
+          ## resolve 'external' covariates
+          tenv <- environment(trend)
+          covariates <- getdataobjects(covnames, tenv, covariates, fatal=TRUE)
+          updatecovariates <- any(attr(covariates, "external"))
+        }
+        ## extract values of covariates ('internal' and 'external')
+        covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs)
+      }
+      if(savecomputed)
+        computed$covariates.df <- covariates.df
+    } 
+
+    ## Form the weights and the ``response variable''.
+
+    if("dotmplbase" %in% names.precomputed) 
+      .mpl <- precomputed$dotmplbase
+    else {
+      nQ <- n.quad(Q)
+      wQ <- w.quad(Q)
+      mQ <- marks.quad(Q)   ## is NULL for unmarked patterns
+      zQ <- is.data(Q)
+      yQ <- numeric(nQ)
+      yQ[zQ] <- 1/wQ[zQ]
+      zeroes <- attr(wQ, "zeroes")
+      sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes
+      ## tweak weights ONLY
+      if(!is.null(weightfactor))
+        wQ <- wQ * weightfactor
+      ## pack up
+      .mpl <- list(W      = wQ,
+                   Z      = zQ,
+                   Y      = yQ,
+                   MARKS  = mQ, 
+                   SUBSET = sQ)
+    }
+
+    if(savecomputed)
+      computed$dotmplbase <- .mpl
+  
+    glmdata <- data.frame(.mpl.W = .mpl$W,
+                          .mpl.Y = .mpl$Y)
+
+    ## count data and dummy points in specified subset
+    izdat <- .mpl$Z[.mpl$SUBSET]
+    ndata <- sum(izdat)
+#    ndummy <- sum(!izdat)
+    
+    ## Determine the domain of integration for the pseudolikelihood.
+    if(correction == "border") {
+      bdP <-
+        if("bdP" %in% names.precomputed)
+          precomputed$bdP
+        else
+          bdist.points(P)
+      if(savecomputed)
+        computed$bdP <- bdP
+      .mpl$DOMAIN <- (bdP >= rbord)
+    }
+
+    skip.border <- skip.border && (correction == "border")
+  
+    ####################### T r e n d ##############################
+
+    internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET",
+                        "SUBSET", ".mpl")
+
+    reserved.names <- c("x", "y", "marks", internal.names)
+
+    if(allcovar || want.trend || want.subset) {
+      trendvariables <- variablesinformula(trend)
+      ## Check for use of internal names in trend
+      cc <- check.clashes(internal.names, trendvariables, "the model formula")
+      if(cc != "") stop(cc)
+      if(want.subset) {
+        subsetvariables <- all.vars(subsetexpr)
+        cc <- check.clashes(internal.names, trendvariables,
+                            "the subset expression")
+        if(cc != "") stop(cc)
+        trendvariables <- union(trendvariables, subsetvariables)
+      }
+      ## Standard variables
+      if(allcovar || "x" %in% trendvariables)
+        glmdata <- data.frame(glmdata, x=P$x)
+      if(allcovar || "y" %in% trendvariables)
+        glmdata <- data.frame(glmdata, y=P$y)
+      if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) {
+        if(is.null(.mpl$MARKS))
+          stop("Model formula depends on marks, but data do not have marks",
+               call.=FALSE)
+        glmdata <- data.frame(glmdata, marks=.mpl$MARKS)
+      }
+      ##
+      ## Check covariates
+      if(!is.null(covariates.df)) {
+        ## Check for duplication of reserved names
+        cc <- check.clashes(reserved.names, names(covariates),
+                            sQuote("covariates"))
+        if(cc != "") stop(cc)
+        ## Take only those covariates that are named in the trend formula
+        if(!allcovar) 
+          needed <- names(covariates.df) %in% trendvariables
+        else
+          needed <- rep.int(TRUE, ncol(covariates.df))
+        if(any(needed)) {
+          covariates.needed <- covariates.df[, needed, drop=FALSE]
+          ##  Append to `glmdata'
+          glmdata <- data.frame(glmdata,covariates.needed)
+          ##  Ignore any quadrature points that have NA's in the covariates
+          nbg <- is.na(covariates.needed)
+          if(any(nbg)) {
+            offending <- matcolany(nbg)
+            covnames.na <- names(covariates.needed)[offending]
+            quadpoints.na <- matrowany(nbg)
+            n.na <- sum(quadpoints.na)
+            n.tot <- length(quadpoints.na)
+            errate <- n.na/n.tot
+            pcerror <- round(signif(100 * errate, 2), 2)
+            complaint <- paste("Values of the",
+                               ngettext(length(covnames.na),
+                                        "covariate", "covariates"),
+                               paste(sQuote(covnames.na), collapse=", "),
+                               "were NA or undefined at",
+                               paste(pcerror, "%",
+                                     " (", 
+                                     n.na,
+                                     " out of ",
+                                     n.tot,
+                                     ")",
+                                     sep=""),
+                               "of the", Pname)
+            warning(paste(complaint,
+                          ". Occurred while executing: ",
+                          callstring, sep=""),
+                    call. = FALSE)
+            .mpl$SUBSET <-  .mpl$SUBSET & !quadpoints.na
+            details <- list(covnames.na   = covnames.na,
+                            quadpoints.na = quadpoints.na,
+                            print         = complaint)
+            problems <- append(problems,
+                               list(na.covariates=details))
+          }
+        }
+      }
+    }
+
+    ###################### I n t e r a c t i o n ####################
+
+    Vnames <- NULL
+    IsOffset <- NULL
+
+    if(want.inter) {
+      ## Form the matrix of "regression variables" V.
+      ## The rows of V correspond to the rows of P (quadrature points)
+      ## while the column(s) of V are the regression variables (log-potentials)
+
+      E <- precomputed$E %orifnull% equalpairs.quad(Q)
+
+      if(!skip.border) {
+        ## usual case
+        V <- evalInteraction(X, P, E, interaction, correction,
+                             ...,
+                             precomputed=precomputed,
+                             savecomputed=savecomputed)
+      } else {
+        ## evaluate only in eroded domain
+        if(all(c("Esub", "Usub", "Retain") %in% names.precomputed)) {
+          ## use precomputed data
+          Psub <- precomputed$Usub
+          Esub <- precomputed$Esub
+          Retain <- precomputed$Retain
+        } else {
+          Retain <- .mpl$DOMAIN
+          Psub <- P[Retain]
+          ## map serial numbers in 'P[Retain]' to serial numbers in 'Psub'
+          Pmap <- cumsum(Retain)
+          keepE <- Retain[ E[,2] ]
+          ## adjust equal pairs matrix
+          Esub <- E[ keepE, , drop=FALSE]
+          Esub[,2] <- Pmap[Esub[,2]]
+        }
+        ## call evaluator on reduced data
+        ## with 'W=NULL' (currently detected only by AreaInter)
+        if(all(c("X", "Q", "U") %in% names.precomputed)) {
+          subcomputed <- resolve.defaults(list(E=Esub, U=Psub, Q=Q[Retain]),
+                                          precomputed)
+        } else subcomputed <- NULL
+        V <- evalInteraction(X, Psub, Esub, interaction, correction,
+                             ...,
+                             W=NULL,
+                             precomputed=subcomputed,
+                             savecomputed=savecomputed)
+        if(savecomputed) {
+          computed$Usub <- Psub
+          computed$Esub <- Esub
+          computed$Retain <- Retain
+        }
+      }
+      
+      if(!is.matrix(V))
+        stop("interaction evaluator did not return a matrix")
+      
+      ## extract information about offsets
+      IsOffset <- attr(V, "IsOffset")
+      if(is.null(IsOffset)) IsOffset <- FALSE
+      
+      if(skip.border) {
+        ## fill in the values in the border region with zeroes.
+        Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V))
+        colnames(Vnew) <- colnames(V)
+        Vnew[Retain, ] <- V
+        ## retain attributes
+        attr(Vnew, "IsOffset") <- IsOffset
+        attr(Vnew, "computed") <- attr(V, "computed")
+        attr(Vnew, "POT") <- attr(V, "POT")
+        V <- Vnew
+      }
+    
+      ## extract intermediate computation results 
+      if(savecomputed)
+        computed <- resolve.defaults(attr(V, "computed"), computed)
+
+      ## Augment data frame by appending the regression variables
+      ## for interactions.
+      ##
+      ## First determine the names of the variables
+      ##
+      Vnames <- dimnames(V)[[2]]
+      if(is.null(Vnames)) {
+        ## No names were provided for the columns of V.
+        ## Give them default names.
+        ## In ppm the names will be "Interaction"
+        ##   or "Interact.1", "Interact.2", ...
+        ## In mppm an alternative tag will be specified by vnamebase.
+        nc <- ncol(V)
+        Vnames <- if(nc == 1) vnamebase[1] else paste0(vnamebase[2], 1:nc)
+        dimnames(V) <- list(dimnames(V)[[1]], Vnames)
+      } else if(!is.null(vnameprefix)) {
+        ## Variable names were provided by the evaluator (e.g. MultiStrauss).
+        ## Prefix the variable names by a string
+        ## (typically required by mppm)
+        Vnames <- paste(vnameprefix, Vnames, sep="")
+        dimnames(V) <- list(dimnames(V)[[1]], Vnames)
+      }
+      
+      ## Check the names are valid as column names in a dataframe
+      okVnames <- make.names(Vnames, unique=TRUE)
+      if(any(Vnames != okVnames)) {
+        warning(paste("Names of interaction terms",
+                      "contained illegal characters;",
+                      "names have been repaired."))
+        Vnames <- okVnames
+      }
+    
+      ##   Check for name clashes between the interaction variables
+      ##   and the formula
+      cc <- check.clashes(Vnames, termsinformula(trend), "model formula")
+      if(cc != "") stop(cc)
+      ##   and with the variables in 'covariates'
+      if(!is.null(covariates)) {
+        cc <- check.clashes(Vnames, names(covariates), sQuote("covariates"))
+        if(cc != "") stop(cc)
+      }
+
+      ## OK. append variables.
+      glmdata <- data.frame(glmdata, V)   
+
+      ## check IsOffset matches Vnames
+      if(length(IsOffset) != length(Vnames)) {
+        if(length(IsOffset) == 1)
+          IsOffset <- rep.int(IsOffset, length(Vnames))
+        else
+          stop("Internal error: IsOffset has wrong length", call.=FALSE)
+      }
+  
+      ## Keep only those quadrature points for which the
+      ## conditional intensity is nonzero. 
+
+      ##KEEP  <- apply(V != -Inf, 1, all)
+      .mpl$KEEP  <- matrowall(V != -Inf)
+
+      .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP
+
+      ## Check that there are at least some data and dummy points remaining
+      datremain <- .mpl$Z[.mpl$SUBSET]
+      somedat <- any(datremain)
+      somedum <- !all(datremain)
+      if(warn.unidentifiable && !(somedat && somedum)) {
+        ## Model would be unidentifiable if it were fitted.
+        ## Register problem
+        is.identifiable <- FALSE
+        if(ndata == 0) {
+          complaint <- "model is unidentifiable: data pattern is empty"
+        } else {
+          offending <- !c(somedat, somedum)
+          offending <- c("all data points", "all dummy points")[offending]
+          offending <- paste(offending, collapse=" and ")
+          complaint <- paste("model is unidentifiable:",
+                             offending, "have zero conditional intensity")
+        }
+        details <- list(data=!somedat,
+                        dummy=!somedum,
+                        print=complaint)
+        problems <- append(problems, list(unidentifiable=details))
+      }
+
+      ## check whether the model has zero likelihood:
+      ## check whether ANY data points have zero conditional intensity
+      if(any(.mpl$Z & !.mpl$KEEP)) {
+        howmany <- sum(.mpl$Z & !.mpl$KEEP)
+        complaint <- paste(howmany,
+                           "data point(s) are illegal",
+                           "(zero conditional intensity under the model)")
+        details <- list(illegal=howmany,
+                        print=complaint)
+        problems <- append(problems, list(zerolikelihood=details))
+        if(warn.illegal && is.identifiable)
+          warning(paste(complaint,
+                        ". Occurred while executing: ",
+                        callstring, sep=""),
+                  call. = FALSE)
+        likelihood.is.zero <- TRUE
+      }
+    }
+  
+    ##################     S u b s e t   ###################
+
+    if(correction == "border") 
+      .mpl$SUBSET <- .mpl$SUBSET & .mpl$DOMAIN
+  
+    if(!is.null(subsetexpr)) {
+      ## user-defined subset expression
+      USER.SUBSET <- eval(subsetexpr, glmdata, environment(trend))
+      if(is.owin(USER.SUBSET)) {
+        USER.SUBSET <- inside.owin(P$x, P$y, USER.SUBSET)
+      } else if(is.im(USER.SUBSET)) {
+        USER.SUBSET <- as.logical(USER.SUBSET[P, drop=FALSE])
+        if(anyNA(USER.SUBSET)) 
+          USER.SUBSET[is.na(USER.SUBSET)] <- FALSE
+      }
+      if(!(is.logical(USER.SUBSET) || is.numeric(USER.SUBSET)))
+        stop("Argument 'subset' should yield logical values", call.=FALSE)
+      if(anyNA(USER.SUBSET)) {
+        USER.SUBSET[is.na(USER.SUBSET)] <- FALSE
+        warning("NA values in argument 'subset' were changed to FALSE",
+                call.=FALSE)
+      }
+      .mpl$SUBSET <- .mpl$SUBSET & USER.SUBSET
+    }
+                        
+    glmdata <- cbind(glmdata,
+                     data.frame(.mpl.SUBSET=.mpl$SUBSET,
+                                stringsAsFactors=FALSE))
+
+    #################  F o r m u l a   ##################################
+
+    if(!want.trend) trend <- ~1 
+    trendpart <- paste(as.character(trend), collapse=" ")
+    if(!want.inter)
+      rhs <- trendpart
+    else {
+      VN <- Vnames
+      ## enclose offset potentials in 'offset(.)'
+      if(any(IsOffset))
+        VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="")
+      rhs <- paste(c(trendpart, VN), collapse= "+")
+    }
+    fmla <- paste(".mpl.Y ", rhs)
+    fmla <- as.formula(fmla)
+
+    ##  character string of trend formula (without Vnames)
+    trendfmla <- paste(".mpl.Y ", trendpart)
+
+    ####
+    result <- list(fmla=fmla, trendfmla=trendfmla,
+                   covariates=if(updatecovariates) covariates else NULL,
+                   glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset,
+                   subsetexpr=subsetexpr,
+                   problems=problems,
+                   likelihood.is.zero=likelihood.is.zero,
+                   is.identifiable=is.identifiable,
+                   computed=computed,
+                   vnamebase=vnamebase, vnameprefix=vnameprefix)
+    return(result)
+  }
+
+  check.clashes <- function(forbidden, offered, where) {
+    name.match <- outer(forbidden, offered, "==")
+    if(any(name.match)) {
+      is.matched <- apply(name.match, 2, any)
+      matched.names <- (offered)[is.matched]
+      if(sum(is.matched) == 1) {
+        return(paste("The variable",sQuote(matched.names),
+                   "in", where, "is a reserved name"))
+      } else {
+        return(paste("The variables",
+                   paste(sQuote(matched.names), collapse=", "),
+                   "in", where, "are reserved names"))
+      }
+    }
+    return("")
+  }
+
+  mpl.prepare
+})
+
+
+
+####################################################################
+####################################################################
+
+mpl.usable <- function(x) {
+  ## silently remove covariates that don't have recognised format
+  if(length(x) == 0 || is.data.frame(x)) return(x)
+  isim   <- sapply(x, is.im)
+  isfun  <- sapply(x, is.function)
+  iswin  <- sapply(x, is.owin)
+  istess <- sapply(x, is.tess)
+  isnum  <- sapply(x, is.numeric) & (lengths(x) == 1)
+  recognised <- isim | isfun | iswin | istess | isnum
+  if(!all(recognised)) 
+    x <- x[recognised]
+  return(x)
+}
+
+mpl.get.covariates <- local({
+
+  mpl.get.covariates <- function(covariates, locations, type="locations",
+                                 covfunargs=list(),
+                                 need.deriv=FALSE) {
+    covargname <- sQuote(short.deparse(substitute(covariates)))
+    locargname <- sQuote(short.deparse(substitute(locations)))
+    if(is.null(covfunargs)) covfunargs <- list()
+    ##
+    x <- locations$x
+    y <- locations$y
+    if(is.null(x) || is.null(y)) {
+      xy <- xy.coords(locations)
+      x <- xy$x
+      y <- xy$y
+    }
+    if(is.null(x) || is.null(y))
+      stop(paste("Can't interpret", locargname, "as x,y coordinates"))
+    n <- length(x)
+    if(is.data.frame(covariates)) {
+      if(nrow(covariates) != n)
+        stop(paste("Number of rows in", covargname, 
+                   "does not equal the number of", type))
+      return(covariates)
+    } else if(is.list(covariates)) {
+      if(length(covariates) == 0)
+        return(as.data.frame(matrix(, n, 0)))
+      isim   <- unlist(lapply(covariates, is.im))
+      isfun  <- unlist(lapply(covariates, is.function))
+      iswin  <- unlist(lapply(covariates, is.owin))
+      istess <- unlist(lapply(covariates, is.tess))
+      isnum  <- unlist(lapply(covariates, is.number))
+      if(!all(isim | isfun | isnum | iswin | istess))
+        stop(paste("Each entry in the list", covargname, 
+                   "should be an image, a function,",
+                   "a window, a tessellation or a single number"))
+      if(sum(nzchar(names(covariates))) < length(covariates))
+        stop(paste("Some entries in the list",
+                   covargname, "are un-named"))
+      ## look up values of each covariate at the quadrature points
+      values <- unclass(covariates)
+      values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y,
+                             naok=TRUE, strict=FALSE)
+      values[isfun] <- vf <- lapply(covariates[isfun], evalfxy, x=x, y=y,
+                                    extra=covfunargs)
+      values[isnum] <- lapply(covariates[isnum], rep, length(x))
+      values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y)
+      values[istess] <- lapply(covariates[istess], tileindex, x=x, y=y)
+      result <- as.data.frame(values)
+      if(need.deriv && any(isfun)) {
+        ## check for gradient/hessian attributes of function values
+        grad <- lapply(vf, attr, which="gradient")
+        hess <- lapply(vf, attr, which="hessian")
+        grad <- grad[!unlist(lapply(grad, is.null))]
+        hess <- hess[!unlist(lapply(hess, is.null))]
+        if(length(grad) > 0 || length(hess) > 0)
+          attr(result, "derivatives") <- list(gradient=grad, hessian=hess)
+      }
+      return(result)
+    } 
+    stop(paste(covargname, "must be either a data frame or a list"))
+  }
+
+  ## functions for 'apply'
+  evalfxy <- function(f, x, y, extra) {
+    if(length(extra) == 0)
+      return(f(x,y))
+    ## extra arguments must be matched explicitly by name
+    ok <- names(extra) %in% names(formals(f))
+    z <- do.call(f, append(list(x,y), extra[ok]))
+    return(z)
+  }
+
+  insidexy <- function(w, x, y) { inside.owin(x, y, w) }
+
+  is.number <- function(x) { is.numeric(x) && (length(x) == 1) }
+
+  mpl.get.covariates
+})
+
+bt.frame <- function(Q, trend=~1, interaction=NULL,
+                      ...,
+                      covariates=NULL,
+                      correction="border", rbord=0,
+                      use.gam=FALSE, allcovar=FALSE) {
+  prep <- mpl.engine(Q=Q, trend=trend, interaction=interaction,
+                     ..., covariates=covariates,
+                     correction=correction, rbord=rbord,
+                     use.gam=use.gam, allcovar=allcovar,
+                     preponly=TRUE, forcefit=TRUE)
+  class(prep) <- c("bt.frame", class(prep))
+  return(prep)
+}
+
+
+print.bt.frame <- function(x, ...) {
+  cat("Model frame for Berman-Turner device\n")
+  df <- x$glmdata
+  cat(paste("$glmdata: Data frame with", nrow(df), "rows and",
+            ncol(df), "columns\n"))
+  cat("          Column names:\t")
+  cat(paste(paste(names(df),collapse="\t"), "\n"))
+  cat("Complete model formula ($fmla):\t")
+  print(x$fmla)
+  info <- x$info
+  if(info$want.trend) {
+    cat("Trend:\tyes\nTrend formula string ($trendfmla):\t")
+    cat(paste(x$trendfmla, "\n"))
+  } else cat("Trend:\tno\n")
+  cat("Interaction ($info$interaction):\t")
+  inte <- info$interaction
+  if(is.null(inte))
+    inte <- Poisson()
+  print(inte, family=FALSE, brief=TRUE)
+  if(!is.poisson.interact(inte)) {
+    cat("Internal names of interaction variables ($Vnames):\t")
+    cat(paste(x$Vnames, collapse="\t"))
+    cat("\n")
+  }
+  edge <- info$correction
+  cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n"))
+  if(edge == "border") 
+    cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n"))
+  if(length(x$problems) > 0) {
+    cat("Problems:\n")
+    print(x$problems)
+  }
+  if(length(x$computed) > 0)
+    cat(paste("Frame contains saved computations for",
+              commasep(dQuote(names(x$computed)))))
+  return(invisible(NULL))
+}
+  
+partialModelMatrix <- function(X, D, model, callstring="", ...) {
+  ## X = 'data'
+  ## D = 'dummy'
+  Q <- quad(X,D)
+  P <- union.quad(Q)
+  trend <- model$trend
+  inter <- model$interaction
+  covar <- model$covariates
+  prep  <- mpl.prepare(Q, X, P, trend, inter, covar,
+                       correction=model$correction,
+                       rbord=model$rbord,
+                       Pname="data points", callstring=callstring,
+                       warn.unidentifiable=FALSE,
+                       ...)
+  fmla    <- prep$fmla
+  glmdata <- prep$glmdata
+  mof <- model.frame(fmla, glmdata)
+  mom <- model.matrix(fmla, mof)
+
+  if(!identical(all.equal(colnames(mom), names(coef(model))), TRUE))
+    warning(paste("Internal error: mismatch between",
+                  "column names of model matrix",
+                  "and names of coefficient vector in fitted model"))
+
+  attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET
+  return(mom)
+}
+  
+oversize.quad <- function(Q, ..., nU, nX, p=1) {
+  ## Determine whether the quadrature scheme is
+  ## too large to handle in one piece (in mpl)
+  ## for a generic interaction
+  ##    nU = number of quadrature points
+  ##    nX = number of data points
+  ##    p = dimension of statistic 
+  if(missing(nU))
+    nU <- n.quad(Q)
+  if(missing(nX))
+    nX <- npoints(Q$data)
+  nmat <- as.double(nU) * nX
+  nMAX <- spatstat.options("maxmatrix")/p
+  needsplit <- (nmat > nMAX)
+  return(needsplit)
+}
+
+quadBlockSizes <- function(nX, nD, p=1,
+                           nMAX=spatstat.options("maxmatrix")/p,
+                           announce=TRUE) {
+  if(inherits(nX, "quad") && missing(nD)) {
+    nD <- npoints(nX$dummy)
+    nX <- npoints(nX$data)
+  }
+  ## Calculate number of dummy points in largest permissible X * (X+D) matrix 
+  nperblock <- max(1, floor(nMAX/nX - nX))
+  ## determine number of such blocks 
+  nblocks <- ceiling(nD/nperblock)
+  ## make blocks roughly equal (except for the last one)
+  nperblock <- min(nperblock, ceiling(nD/nblocks))
+  ## announce
+  if(announce && nblocks > 1) {
+    msg <- paste("Large quadrature scheme",
+                 "split into blocks to avoid memory size limits;",
+                 nD, "dummy points split into",
+                 nblocks, "blocks,")
+    nfull <- nblocks - 1
+    nlastblock <- nD - nperblock * nfull
+    if(nlastblock == nperblock) {
+      msg <- paste(msg,
+                   "each containing",
+                   nperblock, "dummy points")
+    } else {
+      msg <- paste(msg,
+                   "the first",
+                   ngettext(nfull, "block", paste(nfull, "blocks")),
+                   "containing",
+                   nperblock,
+                   ngettext(nperblock, "dummy point", "dummy points"),
+                   "and the last block containing",
+                   nlastblock, 
+                   ngettext(nlastblock, "dummy point", "dummy points"))
+    }
+    message(msg)
+  } else nlastblock <- nperblock
+  return(list(nblocks=nblocks, nperblock=nperblock, nlastblock=nlastblock))
+}
+    
+## function that should be called to evaluate interaction terms
+## between quadrature points and data points
+
+evalInteraction <- function(X, P, E = equalpairs(P, X), 
+                            interaction, correction,
+                            ...,
+                            precomputed=NULL,
+                            savecomputed=FALSE) {
+
+  ## evaluate the interaction potential
+  ## (does not assign/touch the variable names)
+
+  verifyclass(interaction, "interact")
+
+  ## handle Poisson case
+  if(is.poisson(interaction)) {
+    out <- matrix(, nrow=npoints(P), ncol=0)
+    attr(out, "IsOffset") <- logical(0)
+    return(out)
+  }
+  
+  ## determine whether to use fast evaluation in C
+  fastok    <- (spatstat.options("fasteval") %in% c("on", "test"))
+  if(fastok) {
+    cando   <- interaction$can.do.fast
+    par     <- interaction$par
+    dofast  <- !is.null(cando) && cando(X, correction, par)
+  } else dofast <- FALSE
+
+  ## determine whether to split quadscheme into blocks
+  if(dofast) {
+    dosplit <- FALSE
+  } else {
+    ## decide whether the quadrature scheme is too large to handle in one piece
+    needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X))
+
+    ## not implemented when savecomputed=TRUE
+    dosplit   <- needsplit && !savecomputed
+    if(needsplit && savecomputed)
+      warning(paste("Oversize quadscheme cannot be split into blocks",
+                    "because savecomputed=TRUE;",
+                    "memory allocation error may occur"))
+  }
+  
+  if(!dosplit) {
+    ## normal case
+    V <- evalInterEngine(X=X, P=P, E=E,
+                         interaction=interaction,
+                         correction=correction,
+                         ...,
+                         precomputed=precomputed,
+                         savecomputed=savecomputed)
+  } else {
+    ## Too many quadrature points: split into blocks
+    nX <- npoints(X)
+    nP <- npoints(P)
+    ## Determine which evaluation points are data points
+    Pdata <- E[,2]
+    ## hence which are dummy points
+    Pall <- seq_len(nP)
+    Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall
+    nD <- length(Pdummy)
+    ## calculate block sizes
+    bls <- quadBlockSizes(nX, nD, announce=TRUE)
+    nblocks    <- bls$nblocks
+    nperblock  <- bls$nperblock
+    ##
+    seqX <- seq_len(nX)
+    EX <- cbind(seqX, seqX)
+    ##
+    for(iblock in 1:nblocks) {
+      first <- min(nD, (iblock - 1) * nperblock + 1)
+      last  <- min(nD, iblock * nperblock)
+      ## extract dummy points  
+      Di <- P[Pdummy[first:last]]
+      Pi <- superimpose(X, Di, check=FALSE, W=X$window)
+      ## evaluate potential
+      Vi <- evalInterEngine(X=X, P=Pi, E=EX, 
+                            interaction=interaction,
+                            correction=correction,
+                            ...,
+                            savecomputed=FALSE)
+      if(iblock == 1) {
+        V <- Vi
+      } else {
+        ## tack on the glm variables for the extra DUMMY points only
+        V <- rbind(V, Vi[-seqX, , drop=FALSE])
+      }
+    }
+    ## The first 'nX' rows of V contain values for X.
+    ## The remaining rows of V contain values for dummy points.
+    if(length(Pdata) == 0) {
+      ## simply discard rows corresponding to data
+      V <- V[-seqX, , drop=FALSE]
+    } else {
+      ## replace data in correct position
+      ii <- integer(nP)
+      ii[Pdata] <- seqX
+      ii[Pdummy] <- (nX+1):nrow(V)
+      V <- V[ii, , drop=FALSE]
+    }
+  } 
+  return(V)
+}
+
+## workhorse function that actually calls relevant code to evaluate interaction
+
+evalInterEngine <- function(X, P, E, 
+                            interaction, correction,
+                            ...,
+                            Reach = NULL,
+                            precomputed=NULL,
+                            savecomputed=FALSE) {
+
+  ## fast evaluator (C code) may exist
+  fasteval <- interaction$fasteval
+  cando    <- interaction$can.do.fast
+  par      <- interaction$par
+  feopt    <- spatstat.options("fasteval")
+  dofast   <- !is.null(fasteval) &&
+              (is.null(cando) || cando(X, correction,par)) &&
+              (feopt %in% c("on", "test"))
+    
+  V <- NULL
+  if(dofast) {
+    if(feopt == "test")
+      message("Calling fasteval")
+    V <- fasteval(X, P, E,
+                  interaction$pot, interaction$par, correction, ...)
+  }
+  if(is.null(V)) {
+    ## use generic evaluator for family
+    evaluate <- interaction$family$eval
+    if(is.null(Reach)) Reach <- reach(interaction)
+    if("precomputed" %in% names(formals(evaluate))) {
+      ## Use precomputed data
+      ## version 1.9-3 onward (pairwise and pairsat families)
+      V <- evaluate(X, P, E,
+                    interaction$pot,
+                    interaction$par,
+                    correction, ...,
+                    Reach=Reach, 
+                    precomputed=precomputed,
+                    savecomputed=savecomputed)
+    } else {
+      ## Cannot use precomputed data
+      ## Object created by earlier version of ppm
+      ## or not pairwise/pairsat interaction
+      V <- evaluate(X, P, E,
+                    interaction$pot,
+                    interaction$par,
+                    correction, ..., Reach=Reach)
+    }
+  }
+
+  return(V)
+}
+
+deltasuffstat <- local({
+  
+  deltasuffstat <- function(model, ...,
+                            restrict=c("pairs", "first", "none"),
+			    dataonly=TRUE, force=FALSE,
+                            quadsub=NULL,
+                            sparseOK=FALSE) {
+    stopifnot(is.ppm(model))
+    sparsegiven <- !missing(sparseOK)
+    restrict <- match.arg(restrict)
+    
+    if(dataonly) {
+      X <- data.ppm(model)
+      nX <- npoints(X)
+    } else {
+      X <- quad.ppm(model)
+      if(!is.null(quadsub)) {
+        z <- is.data(X)
+        z[quadsub] <- FALSE
+        if(any(z))
+          stop("subset 'quadsub' must include all data points", call.=FALSE)
+        X <- X[quadsub]
+      }
+      nX <- n.quad(X)
+    }
+    ncoef <- length(coef(model))
+    inte <- as.interact(model)
+
+    if(!sparseOK && exceedsMaxArraySize(nX, nX, ncoef)) {
+      if(sparsegiven)
+        stop("Array dimensions too large", call.=FALSE)
+      warning("Switching to sparse array code", call.=FALSE)
+      sparseOK <- TRUE
+    }
+
+    zeroes <- if(!sparseOK) array(0, dim=c(nX, nX, ncoef)) else
+                            sparse3Darray(dims=c(nX, nX, ncoef))
+    
+    if(is.poisson(inte))
+      return(zeroes)
+    
+    ## Get names of interaction terms in model (including offsets)
+    f <- fitin(model)
+    Inames <- f$Vnames
+    IsOffset <- f$IsOffset
+    ## Offset terms do not contribute to sufficient statistic
+    if(all(IsOffset)) 
+      return(zeroes)
+    
+    ## Nontrivial interaction terms must be computed.
+    ## Look for member function $delta2 in the interaction
+    v <- NULL
+    if(!is.null(delta2 <- inte$delta2) && is.function(delta2)) {
+      v <- delta2(X, inte, model$correction, sparseOK=sparseOK)
+    }
+    ## Look for generic $delta2 function for the family
+    if(is.null(v) &&
+       !is.null(delta2 <- inte$family$delta2) &&
+       is.function(delta2))
+      v <- delta2(X, inte, model$correction, sparseOK=sparseOK)
+    ## no luck?
+    if(is.null(v)) {
+      if(!force)
+        return(NULL)
+      ## use brute force algorithm
+      v <- if(dataonly) deltasufX(model, sparseOK) else
+           deltasufQ(model, quadsub, sparseOK)
+    }
+    ## make it a 3D array
+    if(length(dim(v)) != 3) {
+      if(is.matrix(v)) {
+        v <- array(v, dim=c(dim(v), 1))
+      } else if(inherits(v, "sparseMatrix")) {
+        v <- as.sparse3Darray(v)
+      }
+    }
+    if(!sparseOK && inherits(v, "sparse3Darray"))
+      v <- as.array(v)
+
+    if(restrict != "none") {
+      ## kill contributions from points outside the domain of pseudolikelihood
+      ## (e.g. points in the border region)
+      use <- if(dataonly) getppmdatasubset(model) else
+             if(is.null(quadsub)) getglmsubset(model) else
+             getglmsubset(model)[quadsub]
+      if(any(kill <- !use))
+        switch(restrict,
+ 	       pairs = { v[kill,kill,] <- 0 },
+	       first = { v[kill,,] <- 0 },
+	       none = {})
+    }
+
+    ## Output array: planes must correspond to model coefficients
+    result <- zeroes
+    ## Planes of 'v' correspond to interaction terms (including offsets)
+    if(length(Inames) != dim(v)[3])
+      stop(paste("Internal error: deltasuffstat:",
+                 "number of planes of v =", dim(v)[3],
+                 "!= number of interaction terms =", length(Inames)),
+           call.=FALSE)
+    ## Offset terms do not contribute to sufficient statistic
+    if(any(IsOffset)) {
+      v <- v[ , , !IsOffset, drop=FALSE]
+      Inames <- Inames[!IsOffset]
+    }
+    ## Map planes of 'v' into coefficients
+    Imap <- match(Inames, names(coef(model)))
+    if(anyNA(Imap))
+      stop(paste("Internal error: deltasuffstat:",
+                 "cannot match interaction coefficients"))
+    if(length(Imap) > 0) {
+      ## insert 'v' into array
+      result[ , , Imap] <- v
+    }
+    return(result)
+  }
+
+  ## compute deltasuffstat using partialModelMatrix
+
+  deltasufX <- function(model, sparseOK=FALSE) {
+    stopifnot(is.ppm(model))
+    X <- data.ppm(model)
+  
+    nX <- npoints(X)
+    p <- length(coef(model))
+
+    isdata <- is.data(quad.ppm(model))
+    m <- model.matrix(model)[isdata, ]
+    ok <- getppmdatasubset(model)
+
+    ## canonical statistic before and after deleting X[j]
+    ## mbefore[ , i, j] = h(X[i] | X)
+    ## mafter[ , i, j] = h(X[i] | X[-j])
+    mafter <- mbefore <- array(t(m), dim=c(p, nX, nX))
+  
+    ## identify close pairs
+    R <- reach(model)
+    if(is.finite(R)) {
+      cl <- closepairs(X, R, what="indices")
+      I <- cl$i
+      J <- cl$j
+      cl2 <- closepairs(X, 2*R, what="indices")
+      I2 <- cl2$i
+      J2 <- cl2$j
+    } else {
+      ## either infinite reach, or something wrong
+      IJ <- expand.grid(I=1:nX, J=1:nX)
+      IJ <- subset(IJ, I != J)
+      I2 <- I <- IJ$I
+      J2 <- J <- IJ$J
+    }
+    ## filter:  I and J must both belong to the nominated subset 
+    okIJ <- ok[I] & ok[J]
+    I <- I[okIJ]
+    J <- J[okIJ]
+    ##
+    if(length(I) > 0 && length(J) > 0) {
+      ## .............. loop over pairs ........................
+      ## The following ensures that 'empty' and 'X' have compatible marks 
+      empty <- X[integer(0)]
+      ## Run through pairs
+      for(i in unique(I)) {
+        ## all points within 2R
+        J2i <- unique(J2[I2==i])
+        ## all points within R
+        Ji  <- unique(J[I==i])
+        nJi <- length(Ji)
+        if(nJi > 0) {
+          Xi <- X[i]
+          ## neighbours of X[i]
+          XJi <- X[Ji]
+          ## replace X[-i] by X[-i] \cap b(0, 2R)
+          X.i <- X[J2i]
+          nX.i <- length(J2i)
+          ## index of XJi in X.i
+          J.i <- match(Ji, J2i)
+          if(anyNA(J.i))
+            stop("Internal error: Ji not a subset of J2i")
+          ## equalpairs matrix
+          E.i <- cbind(J.i, seq_len(nJi))
+          ## values of sufficient statistic 
+          ##    h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)]
+          ## for all j
+          pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE]
+          ## sufficient statistic in reverse order
+          ##    h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]
+          ## for all j
+          pmi <- matrix(, nJi, p)
+          for(k in 1:nJi) {
+            j <- Ji[k]
+            ## X.ij <- X[-c(i,j)]
+            X.ij <- X.i[-J.i[k]]
+            pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ]
+          }
+          ##
+          mafter[ , Ji, i] <- t(pmj)
+          mafter[ , i, Ji] <- t(pmi)
+        }
+      }
+    }
+        
+    ##  delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j])
+    delta <- mbefore - mafter
+    ## delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j])
+    delta <- aperm(delta, c(2,3,1))
+    return(delta)
+  }
+
+  deltasufQ <- function(model, quadsub, sparseOK) {
+    stopifnot(is.ppm(model))
+
+    p <- length(coef(model))
+    
+    Q <- quad.ppm(model)
+    m <- model.matrix(model)
+    ok <- getglmsubset(model)
+
+    if(!is.null(quadsub)) {
+      Q <- Q[quadsub]
+      m <- m[quadsub, , drop=FALSE]
+      ok <- ok[quadsub]
+    }
+    
+    X <- Q$data
+    U <- union.quad(Q)
+    nU <- npoints(U)
+    nX <- npoints(X)
+    isdata <- is.data(Q)
+    isdummy <- !isdata
+    m <- m[isdata, ,drop=FALSE]
+    
+    ## canonical statistic before and after adding/deleting U[j]
+    mafter <- mbefore <- array(t(m), dim=c(p, nU, nU))
+    delta <- array(0, dim=dim(mafter))
+    ##   mbefore[ , i, j] = h(U[i] | X)
+    ## For data points X[j]
+    ##   mafter[ , i, j] = h(U[i] | X[-j])
+    ##   delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j])
+    ## For dummy points X[j]
+    ##   mafter[ , i, j] = h(U[i] | X \cup U[j])
+    ##   delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X)
+
+    changesign <- ifelseAB(isdata, -1, 1)
+  
+    ## identify close pairs of quadrature points
+    R <- reach(model)
+    if(is.finite(R)) {
+      cl <- closepairs(U, R, what="indices")
+      I <- cl$i
+      J <- cl$j
+      cl2 <- closepairs(U, 2*R, what="indices")
+      I2 <- cl2$i
+      J2 <- cl2$j
+    } else {
+      ## either infinite reach, or something wrong
+      IJ <- expand.grid(I=1:nU, J=1:nX)
+      IJ <- IJ[ with(IJ, I != J), ]
+      I2 <- I <- IJ$I
+      J2 <- J <- IJ$J
+    }
+
+    ## filter:  I and J must both belong to the nominated subset 
+    okIJ <- ok[I] & ok[J]
+    I <- I[okIJ]
+    J <- J[okIJ]
+    ##
+    if(length(I) > 0 && length(J) > 0) {
+      ## .............. loop over pairs of quadrature points ...............
+      ## Run through pairs
+      uI <- unique(I)
+      zI <- isdata[uI]
+      uIdata <- uI[zI]
+      uIdummy <- uI[!zI]
+      ## Run through pairs i, j where 'i' is a data point
+      for(i in uIdata) {
+        ## all DATA points within 2R of X[i]
+        ## This represents X[-i] 
+        J2i <- unique(J2[I2==i])
+        J2i <- J2i[isdata[J2i]]
+        ## all QUADRATURE points within R of X[i]
+        Ji  <- unique(J[I==i])
+        nJi <- length(Ji)
+        if(nJi > 0) {
+          isd <- isdata[Ji]
+          ## data points which are neighbours of X[i]
+          XJi <- X[Ji[isd]]
+          ## dummy points which are neighbours of X[i]
+          DJi <- U[Ji[!isd]]
+          ## replace X[-i] by X[-i] \cap b(0, 2R)
+          X.i <- X[J2i]
+          nX.i <- length(J2i)
+          ## index of XJi in X.i 
+          J.i <- match(Ji[isd], J2i)
+          if(anyNA(J.i))
+            stop("Internal error: Ji[isd] not a subset of J2i")
+          ## index of DJi in superimpose(X.i, DJi)
+          JDi <- nX.i + seq_len(sum(!isd))
+          ## values of sufficient statistic 
+          ##    h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)]
+          ## for all j
+          pmj <- partialModelMatrix(X.i, DJi, model)[c(J.i, JDi), , drop=FALSE]
+          ##
+          mafter[ , Ji, i] <- t(pmj)
+        }
+      }
+      ## Run through pairs i, j where 'i' is a dummy point
+      for(i in uIdummy) {
+        ## all DATA points within 2R of U[i]
+        J2i <- unique(J2[I2==i])
+        J2i <- J2i[isdata[J2i]]
+        ## all QUADRATURE points within R of U[i]
+        Ji  <- unique(J[I==i])
+        nJi <- length(Ji)
+        if(nJi > 0) {
+          isd <- isdata[Ji]
+          JiData <- Ji[isd]
+          JiDummy <- Ji[!isd]
+          ## data points which are neighbours of U[i]
+          XJi <- X[JiData]
+          ## dummy points which are neighbours of U[i]
+          DJi <- U[JiDummy]
+          ## replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i]
+          J2Ui <- c(J2i, i)
+          XUi <- U[J2Ui]
+          nXUi <- length(J2Ui)
+          ## index of XJi in X.i 
+          J.i <- match(JiData, J2Ui)
+          if(anyNA(J.i))
+            stop("Internal error: Ji[isd] not a subset of J2i")
+          ## index of DJi in superimpose(X.i, DJi)
+          JDi <- nXUi + seq_len(length(JiDummy))
+          ## values of sufficient statistic 
+          ##    h(X[j] | X \cup U[i]) 
+          ## for all j
+          pmj <- partialModelMatrix(XUi, DJi, model)[c(J.i, JDi), , drop=FALSE]
+          ##
+          mafter[ , c(JiData, JiDummy), i] <- t(pmj)
+        }
+      }
+    }
+        
+    ##  delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j])
+    delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata]
+    ##  delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X)
+    delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy]
+    ## rearrange: new delta[i,j,] = old delta[, i, j]
+    delta <- aperm(delta, c(2,3,1))
+    return(delta)
+  }
+
+  deltasuffstat
+})
+
diff --git a/R/mppm.R b/R/mppm.R
new file mode 100755
index 0000000..47c56ae
--- /dev/null
+++ b/R/mppm.R
@@ -0,0 +1,659 @@
+#
+# mppm.R
+#
+#  $Revision: 1.81 $   $Date: 2016/12/30 01:44:07 $
+#
+
+mppm <- local({
+
+  mppm <- function(formula, data, interaction=Poisson(), ...,
+                   iformula=NULL,
+#%^!ifdef RANDOMEFFECTS                 
+                   random=NULL,
+#%^!endif                 
+                   use.gam=FALSE,
+#%^!ifdef RANDOMEFFECTS                                    
+                   reltol.pql=1e-3,
+#%^!endif                 
+                   gcontrol=list()
+                   ) {
+    ## remember call
+    cl <- match.call()
+    callstring <- paste(short.deparse(sys.call()), collapse="")
+
+    ## Validate arguments
+    if(!inherits(formula, "formula"))
+      stop(paste("Argument", dQuote("formula"), "should be a formula"))
+    stopifnot(is.hyperframe(data))
+    data.sumry <- summary(data, brief=TRUE)
+    npat <- data.sumry$ncases
+    if(npat == 0)
+      stop(paste("Hyperframe", sQuote("data"), "has zero rows"))
+    if(!is.null(iformula) && !inherits(iformula, "formula"))
+      stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL"))
+#%^!ifdef RANDOMEFFECTS  
+    if(has.random <- !is.null(random)) {
+      if(!inherits(random, "formula"))
+        stop(paste(sQuote("random"), "should be a formula or NULL"))
+      if(use.gam)
+        stop("Sorry, random effects are not available in GAMs")
+    }
+#%^!endif
+    if(! (is.interact(interaction) || is.hyperframe(interaction)))
+      stop(paste("The argument", sQuote("interaction"),
+                 "should be a point process interaction object (class",
+                 dQuote("interact"), 
+                 "), or a hyperframe containing such objects", sep=""))
+
+    backdoor <- list(...)$backdoor
+    if(is.null(backdoor) || !is.logical(backdoor))
+      backdoor <- FALSE
+
+    ############## HANDLE FORMULAS ############################
+  
+    ##------  Trend Formula ------------------
+    
+    ## check all variables in trend formula are recognised
+    checkvars(formula, data.sumry$col.names,
+              extra=c("x","y","id","marks"), bname="data")
+    ## check formula has LHS and RHS. Extract them
+    if(length(formula) < 3)
+      stop(paste("Argument", sQuote("formula"),
+                 "must have a left hand side"))
+    Yname <- formula[[2]]
+    trend <- formula[c(1,3)]
+    if(!is.name(Yname))
+      stop("Left hand side of formula should be a single name")
+    Yname <- paste(Yname)
+    if(!inherits(trend, "formula"))
+      stop("Internal error: failed to extract RHS of formula")
+    allvars <- variablesinformula(trend)
+  
+    ## --- Interaction formula -----
+    
+    ## names of interactions as they may appear in formulae
+    itags <- 
+      if(is.hyperframe(interaction)) names(interaction) else "Interaction"
+    ninteract <- length(itags)
+    ## ensure `iformula' is a formula without a LHS
+    ## and determine which columns of `interaction' are actually used
+    if(is.null(iformula)) {
+      if(ninteract > 1)
+        stop(paste("interaction hyperframe has more than 1 column;",
+                   "you must specify the choice of interaction",
+                   "using argument",  sQuote("iformula")))
+      iused <- TRUE
+      iformula <-  as.formula(paste("~", itags))
+    } else {
+      if(length(iformula) > 2)
+        stop(paste("The interaction formula",
+                   sQuote("iformula"),
+                   "should not have a left hand side"))
+      ## valid variables in `iformula' are interactions and data frame columns
+      permitted <- paste(sQuote("interaction"),
+                         "or permitted name in", sQuote("data"))
+      checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"),
+                bname=permitted)
+      ivars <- variablesinformula(iformula)
+      ## check which columns of `interaction' are actually used
+      iused <- itags %in% ivars
+      if(sum(iused) == 0)
+        stop("No interaction specified in iformula")
+      ## OK
+      allvars <- c(allvars, ivars)
+    } 
+
+#%^!ifdef RANDOMEFFECTS
+  
+    ## --- Random effects formula ----
+    if(!is.null(random))  {
+      if(length(random) > 2)
+        stop(paste("The random effects formula",
+                   sQuote("random"),
+                   "should not have a left hand side"))
+      checkvars(random, itags, extra=c(data.sumry$col.names, "x", "y", "id"),
+                bname="either data or interaction")
+      allvars <- c(allvars, variablesinformula(random))
+    }
+
+#%^!endif  
+  
+    ## ---- variables required (on RHS of one of the above formulae) -----
+    allvars <- unique(allvars)
+
+  
+    ########  EXTRACT DATA  #####################################
+  
+    ## Insert extra variable 'id'
+    data <- cbind.hyperframe(data, id=factor(1:npat))
+    data.sumry <- summary(data, brief=TRUE)
+    allvars <- unique(c(allvars, "id"))
+
+    ## Extract the list of responses (point pattern/quadscheme)
+    Y <- data[, Yname, drop=TRUE]
+    if(npat == 1) Y <- solist(Y)
+    Yclass <- data.sumry$classes[Yname]
+    if(Yclass == "ppp") {
+      ## convert to quadrature schemes, for efficiency's sake
+      Y <- solapply(Y, quadscheme)
+    } else {
+      if(Yclass != "quad")
+        stop(paste("Column", dQuote(Yname), "of data",
+                   "does not consist of point patterns (class ppp)",
+                   "nor of quadrature schemes (class quad)"))
+      Y <- as.solist(Y)
+    }
+  
+    ## Extract sub-hyperframe of data named in formulae
+    datanames <- names(data)
+    used.cov.names <- allvars[allvars %in% datanames]
+    has.covar <- (length(used.cov.names) > 0) 
+    if(has.covar) {
+      dfvar <- used.cov.names %in% data.sumry$dfnames
+      imvar <- data.sumry$types[used.cov.names] == "im"
+      if(any(nbg <- !(dfvar | imvar)))
+        stop(paste("Inappropriate format for",
+                   ngettext(sum(nbg), "covariate", "covariates"),
+                   paste(sQuote(used.cov.names[nbg]), collapse=", "),
+                   ": should contain image objects or vector/factor"))
+      covariates.hf <- data[, used.cov.names, drop=FALSE]
+      has.design <- any(dfvar)
+      dfvarnames <- used.cov.names[dfvar]
+      datadf <-
+        if(has.design)
+          as.data.frame(covariates.hf, discard=TRUE, warn=FALSE)
+        else NULL
+      if(has.design) {
+        ## check for NA's in design covariates
+#        if(any(nbg <- apply(is.na(datadf), 2, any)))
+        if(any(nbg <- matcolany(is.na(datadf))))
+          stop(paste("There are NA's in the",
+                     ngettext(sum(nbg), "covariate", "covariates"),
+                     commasep(dQuote(names(datadf)[nbg]))))
+      }
+    } else {
+      has.design <- FALSE
+      datadf     <- NULL
+    }
+  
+    ############### INTERACTION ###################################
+    ## ensure `interaction' is a hyperframe of `interact' objects
+    ## with the right number of rows.
+    ## All entries in a column must represent the same process
+    ## (possibly with different values of the irregular parameters).
+    ## Extract the names of the point processes.
+    if(is.interact(interaction)) {
+      ninteract <- 1
+      processes <- list(Interaction=interaction$name)
+      interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1]
+      constant <- c(Interaction=TRUE)
+    } else if(is.hyperframe(interaction)) {
+      inter.sumry <- summary(interaction)
+      ninteract <- inter.sumry$nvars
+      ## ensure it has the same number of rows as 'data'
+      nr <- inter.sumry$ncases
+      if(nr == 1 && npat > 1) {
+        interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1]
+        inter.sumry <- summary(interaction)
+      } else if(nr != npat)
+        stop(paste("Number of rows in", sQuote("interaction"),
+                   "=", nr, "!=", npat, "=",
+                   "number of rows in", sQuote("data")))
+      ## check all columns contain interaction objects
+      ok <- (inter.sumry$classes == "interact")
+      if(!all(ok)) {
+        nbg <- names(interaction)[!ok]
+        nn <- sum(!ok)
+        stop(paste(ngettext(nn, "Column", "Columns"),
+                   paste(sQuote(nbg), collapse=", "),
+                   ngettext(nn, "does", "do"),
+                   "not consist of interaction objects"))
+      }
+      ## all entries in a column must represent the same process type
+      ## (with possibly different values of the irregular parameters)
+      ok <- unlist(lapply(as.list(interaction), consistentname))
+      if(!all(ok)) {
+        nbg <- names(interaction)[!ok]
+        stop(paste("Different interactions may not appear in a single column.",
+                   "Violated by",
+                   paste(sQuote(nbg), collapse=", ")))
+      }
+      processes <- lapply(as.list(interaction), firstname)
+    
+      ## determine whether all entries in a column are EXACTLY the same
+      ## (=> have the same parameters)
+      constant <- (inter.sumry$storage == "hyperatom")
+      if(any(!constant)) {
+        others <- interaction[,!constant]
+        constant[!constant] <- sapply(lapply(as.list(others), unique),
+                                      length) == 1
+      }
+    }
+    ## check for trivial (Poisson) interactions
+    trivial <- unlist(lapply(as.list(interaction), allpoisson))
+    
+    ## check that iformula does not combine two interactions on one row
+    nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)]
+    ip <- impliedpresence(itags, iformula, datadf, nondfnames)
+    if(any(rowSums(ip) > 1))
+      stop("iformula invokes more than one interaction on a single row")
+  
+    ##
+    #################### BERMAN-TURNER DEVICE #########################
+    ##
+    ## set up list to contain the glm variable names for each interaction.
+    Vnamelist <- rep(list(character(0)), ninteract)
+    names(Vnamelist) <- itags
+    ## set up list to contain 'IsOffset'
+    Isoffsetlist <- rep(list(logical(0)), ninteract)
+    names(Isoffsetlist) <- itags
+    ####
+    ## ---------------- L O O P ---------------------------------
+    for(i in 1:npat) {
+      ## extract responses and covariates for presentation to ppm()
+      Yi <- Y[[i]]
+      covariates <-
+        if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL
+      if(has.design) {
+        ## convert each data frame value to an image
+        covariates[dfvarnames] <-
+          lapply(as.list(as.data.frame(covariates[dfvarnames])),
+                 as.im, W=Yi$data$window)
+      }
+    
+      ## Generate data frame and glm info for this point pattern
+      ## First the trend covariates
+      prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates,
+                        allcovar=TRUE, use.gam=use.gam)
+      glmdat <- prep0$glmdata
+    
+      ## now the nontrivial interaction terms
+      for(j in (1:ninteract)[iused & !trivial]) {
+        inter <- interaction[i,j,drop=TRUE]
+        prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates,
+                          allcovar=TRUE, use.gam=use.gam,
+                          vnamebase=itags[j], vnameprefix=itags[j])
+        ## store GLM variable names & check consistency
+        vnameij <- prepj$Vnames
+        if(i == 1)
+          Vnamelist[[j]] <- vnameij
+        else if(!identical(vnameij, Vnamelist[[j]]))
+          stop("Internal error: Unexpected conflict in glm variable names")
+        ## store offset indicator vectors
+        isoffset.ij <- prepj$IsOffset
+        if(i == 1)
+          Isoffsetlist[[j]] <- isoffset.ij
+        else if(!identical(isoffset.ij, Isoffsetlist[[j]]))
+          stop("Internal error: Unexpected conflict in offset indicators")
+        ## GLM data frame for this interaction
+        glmdatj <- prepj$glmdata
+        if(nrow(glmdatj) != nrow(glmdat))
+          stop("Internal error: differing numbers of rows in glm data frame")
+        iterms.ij <- glmdatj[vnameij]
+        subset.ij <- glmdatj$.mpl.SUBSET
+        ## tack on columns of interaction terms
+        glmdat <- cbind(glmdat, iterms.ij)
+        ## update subset (quadrature points where cif is positive)
+        glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij
+      }
+
+      ## assemble the Mother Of All Data Frames
+      if(i == 1) {
+        moadf <- glmdat
+      } else {
+        ## There may be new or missing columns
+        recognised <- names(glmdat) %in% names(moadf)
+        if(any(!recognised)) {
+          newnames <- names(glmdat)[!recognised]
+          zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames)))
+          names(zeroes) <- newnames
+          moadf <- cbind(moadf, zeroes)
+        }
+        provided   <- names(moadf)  %in% names(glmdat)
+        if(any(!provided)) {
+          absentnames <- names(moadf)[!provided]
+          zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames)))
+          names(zeroes) <- absentnames
+          glmdat <- cbind(glmdat, zeroes)
+        }
+        ## Ensure factor columns are consistent
+        m.isfac <- sapply(as.list(glmdat), is.factor)
+        g.isfac <- sapply(as.list(glmdat), is.factor)
+        if(any(uhoh <- (m.isfac != g.isfac)))
+          errorInconsistentRows("values (factor and non-factor)",
+                                colnames(moadf)[uhoh])
+        if(any(m.isfac)) {
+          m.levels <- lapply(as.list(moadf)[m.isfac], levels)
+          g.levels <- lapply(as.list(glmdat)[g.isfac], levels)
+          clash <- !mapply(identical, x=m.levels, y=g.levels)
+          if(any(clash))
+            errorInconsistentRows("factor levels",
+                                  (colnames(moadf)[m.isfac])[clash])
+        }
+        ## Finally they are compatible
+        moadf <- rbind(moadf, glmdat)
+      }
+    }
+    ## ---------------- E N D   o f    L O O P  --------------------------
+    ##
+    ## backdoor exit - Berman-Turner frame only - used by predict.mppm 
+    if(backdoor)
+      return(moadf)
+    ##
+    ##
+    ## --------------------------------------------------------------------
+    ## 
+    ## Construct the glm formula for the Berman-Turner device
+    ## 
+    ## Get trend part from the last-computed prep0
+    fmla  <- prep0$trendfmla
+    ## Tack on the RHS of the interaction formula
+    if(!all(trivial))
+      fmla <- paste(fmla, "+", as.character(iformula)[[2]])
+    ## Make it a formula
+    fmla <- as.formula(fmla)
+
+    ## Ensure that each interaction name is recognised.
+    ##
+    ## To the user, an interaction is identified by its `tag' name
+    ## (default tag: "Interaction")
+    ##
+    ## Internally, an interaction is fitted using its sufficient statistic
+    ## which may be 0, 1 or k-dimensional. 
+    ## The column names of the sufficient statistic are the Vnames
+    ## returned from ppm.
+    ## The Poisson process is a special case: it is 0-dimensional (no Vnames).
+    ##
+    ## For k-dimensional sufficient statistics, we modify the formulae,
+    ## replacing the interaction name by (vname1 + vname2 + .... + vnamek)
+    ## 
+    for(j in (1:ninteract)[iused]) {
+      vnames <- Vnamelist[[j]]
+      tag    <- itags[j]
+      isoffset <- Isoffsetlist[[j]]
+      if(any(isoffset)) {
+        ## enclose names of offset variables in 'offset()'
+        vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="")
+      }
+      if(trivial[j]) 
+        ## Poisson case: add a column of zeroes
+        moadf[[tag]] <- 0
+      else if(!identical(vnames, tag)) {
+        if(length(vnames) == 1) 
+          ## tag to be replaced by vname
+          vn <- paste("~", vnames[1])
+        else 
+          ## tag to be replaced by (vname1 + vname2 + .... + vnamek)
+          vn <- paste("~(", paste(vnames, collapse=" + "), ")")
+        ## pull out formula representation of RHS
+        vnr <- as.formula(vn)[[2]]
+        ## make substitution rule: list(<tag>=<vnr>)
+        vnsub <- list(vnr)
+        names(vnsub) <- tag
+        ## perform substitution in trend formula
+        fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla)))
+#%^!ifdef RANDOMEFFECTS      
+        ## perform substitution in random effects formula
+        if(has.random && tag %in% variablesinformula(random))
+          random <- eval(substitute(substitute(fom, vnsub), list(fom=random)))
+#%^!endif      
+      }
+    }
+
+    fmla <- as.formula(fmla)
+    ## Fix scoping problem
+    assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla))
+    ## Satisfy package checker
+    glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET
+    .mpl.W      <- moadf$.mpl.W
+  
+    ## ---------------- FIT THE MODEL ------------------------------------
+    want.trend <- prep0$info$want.trend
+    if(want.trend && use.gam) {
+      fitter <- "gam"
+      ctrl <- do.call(gam.control, resolve.defaults(gcontrol, list(maxit=50)))
+      FIT  <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W,
+                  data=moadf, subset=(.mpl.SUBSET=="TRUE"),
+                  control=ctrl)
+      deviants <- deviance(FIT)
+#%^!ifdef RANDOMEFFECTS    
+    } else if(!is.null(random)) {
+      fitter <- "glmmPQL"
+      ctrl <- do.call(lmeControl, resolve.defaults(gcontrol, list(maxIter=50)))
+      attr(fmla, "ctrl") <- ctrl # very strange way to pass argument
+      fixed <- 42 # to satisfy package checker
+      FIT  <- hackglmmPQL(fmla, random=random,
+                          family=quasi(link=log, variance=mu), weights=.mpl.W,
+                          data=moadf, subset=glmmsubset,
+                          control=attr(fixed, "ctrl"),
+                          reltol=reltol.pql)
+      deviants <-  -2 * logLik(FIT)
+#%^!endif    
+    } else {
+      fitter <- "glm"
+      ctrl <- do.call(glm.control, resolve.defaults(gcontrol, list(maxit=50)))
+      FIT  <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W,
+                  data=moadf, subset=(.mpl.SUBSET=="TRUE"),
+                  control=ctrl)
+      deviants <- deviance(FIT)
+    }
+    ## maximised log-pseudolikelihood
+    W <- moadf$.mpl.W
+    SUBSET <- moadf$.mpl.SUBSET
+    Z <- (moadf$.mpl.Y != 0)
+    maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET))
+    ##
+    ## ---------------- PACK UP THE RESULT --------------------------------
+    ##
+    result <- list(Call = list(callstring=callstring, cl=cl),
+                   Info =
+                   list(
+#%^!ifdef RANDOMEFFECTS                      
+                     has.random=has.random,
+#%^!endif                      
+                     has.covar=has.covar,
+                     has.design=has.design,
+                     Yname=Yname,
+                     used.cov.names=used.cov.names,
+                     allvars=allvars,
+                     names.data=names(data),
+                     is.df.column=(data.sumry$storage == "dfcolumn"),
+                     rownames=row.names(data),
+                     correction=prep0$info$correction,
+                     rbord=prep0$info$rbord
+                     ),
+                   Fit=
+                   list(
+                     fitter=fitter,
+                     use.gam=use.gam,
+                     fmla=fmla,
+                     FIT=FIT,
+                     moadf=moadf,
+                     Vnamelist=Vnamelist
+                     ),
+                   Inter =
+                   list(
+                     ninteract=ninteract,
+                     interaction=interaction,
+                     iformula=iformula,
+                     iused=iused,
+                     itags=itags,
+                     processes=processes,
+                     trivial=trivial,
+                     constant=constant
+                     ),
+                   formula=formula,
+                   trend=trend,
+                   iformula=iformula,
+#%^!ifdef RANDOMEFFECTS                 
+                   random=random,
+#%^!endif                 
+                   npat=npat,
+                   data=data,
+                   Y=Y,
+                   maxlogpl=maxlogpl,
+                   datadf=datadf)
+
+    class(result) <- c("mppm", class(result))
+    return(result)
+  }
+
+  # helper functions
+  checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){
+    fname <- short.deparse(substitute(f))
+    fvars <- variablesinformula(f)
+    bvars <- if(is.character(b)) b else names(b)
+    bvars <- c(bvars, extra)
+    nbg <- !(fvars %in% bvars)
+    if(any(nbg)) {
+      nn <- sum(nbg)
+      stop(paste(ngettext(nn, "Variable", "Variables"),
+                 commasep(dQuote(fvars[nbg])),
+                 "in", fname,
+                 ngettext(nn, "is not one of the", "are not"),
+                 "names in", bname))
+    }
+    return(NULL)
+  }
+  
+  consistentname <- function(x) {
+    xnames <- unlist(lapply(x, getElement, name="name"))
+    return(length(unique(xnames)) == 1)
+  }
+
+  firstname <- function(z) { z[[1]]$name }
+
+  allpoisson <- function(x) all(sapply(x, is.poisson.interact))
+
+  errorInconsistentRows <- function(what, offending) {
+    stop(paste("There are inconsistent",
+               what,
+               "for the",
+               ngettext(length(offending), "variable", "variables"),
+               commasep(sQuote(offending)),
+               "between different rows of the hyperframe 'data'"),
+         call.=FALSE)
+  }
+    
+  mppm
+})
+
+
+is.mppm <- function(x) {
+  inherits(x, "mppm")
+}
+
+coef.mppm <- function(object, ...) {
+  coef(object$Fit$FIT)
+}
+
+#%^!ifdef RANDOMEFFECTS
+
+fixef.mppm <- function(object, ...) {
+  if(object$Fit$fitter == "glmmPQL")
+    fixef(object$Fit$FIT)
+  else
+    coef(object$Fit$FIT)
+}
+
+ranef.mppm <- function(object, ...) {
+  if(object$Fit$fitter == "glmmPQL")
+    ranef(object$Fit$FIT)
+  else
+    as.data.frame(matrix(, nrow=object$npat, ncol=0))
+}
+
+#%^!endif
+
+print.mppm <- function(x, ...) {
+  print(summary(x, ..., brief=TRUE))
+}
+
+is.poisson.mppm <- function(x) {
+  trivial <- x$Inter$trivial
+  iused <- x$Inter$iused
+  all(trivial[iused])
+}
+
+quad.mppm <- function(x) {
+  as.solist(x$Y)
+}
+
+data.mppm <- function(x) {
+  solapply(x$Y, getElement, name="data")
+}
+
+windows.mppm <- function(x) {
+  solapply(data.mppm(x), Window)
+}
+
+logLik.mppm <- function(object, ..., warn=TRUE) {
+  if(warn && !is.poisson.mppm(object))
+    warning(paste("log likelihood is not available for non-Poisson model;",
+                  "log-pseudolikelihood returned"))
+  ll <- object$maxlogpl
+#%^!ifdef RANDOMEFFECTS  
+  attr(ll, "df") <- length(fixef(object))
+#%^!else  
+#  attr(ll, "df") <- length(coef(object))
+#%^!endif  
+  class(ll) <- "logLik"
+  return(ll)
+}
+
+AIC.mppm <- function(object, ..., k=2, takeuchi=TRUE) {
+  ll <- logLik(object, warn=FALSE)
+  pen <- attr(ll, "df")
+  if(takeuchi && !is.poisson(object)) {
+    vv <- vcov(object, what="all")
+    J  <- vv$fisher
+    H  <- vv$internals$A1
+    ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J
+    JiH <- try(solve(H, J), silent=TRUE)
+    if(!inherits(JiH, "try-error")) 
+      pen <- sum(diag(JiH))
+  } 
+  return(- 2 * as.numeric(ll) + k * pen)
+}
+
+extractAIC.mppm <- function(fit, scale = 0, k = 2, ..., takeuchi = TRUE) 
+{
+  edf <- length(coef(fit))
+  aic <- AIC(fit, k = k, takeuchi = takeuchi)
+  c(edf, aic)
+}
+
+getCall.mppm <- function(x, ...) { x$Call$cl }
+
+terms.mppm <- function(x, ...) { terms(formula(x)) }
+
+nobs.mppm <- function(object, ...) { sum(sapply(data.mppm(object), npoints)) }
+
+simulate.mppm <- function(object, nsim=1, ..., verbose=TRUE) {
+  subs <- subfits(object)
+  nr <- length(subs)
+  sims <- list()
+  if(verbose) {
+    splat("Generating simulated realisations of", nr, "models..")
+    state <- list()
+  }
+  for(irow in seq_len(nr)) {
+    sims[[irow]] <- do.call(simulate,
+                            resolve.defaults(list(object=subs[[irow]],
+                                                  nsim=nsim, drop=FALSE),
+                                             list(...),
+                                             list(progress=FALSE)))
+    if(verbose) state <- progressreport(irow, nr, state=state)
+  }
+  sim1list <- lapply(sims, "[[", i=1)
+  h <- hyperframe("Sim1"=sim1list)
+  if(nsim > 1) {
+    for(j in 2:nsim) {
+      simjlist <- lapply(sims, "[[", i=j)
+      hj <- hyperframe(Sim=simjlist)
+      names(hj) <- paste0("Sim", j)
+      h <- cbind(h, hj)
+    }
+  }
+  return(h)
+}
diff --git a/R/multihard.R b/R/multihard.R
new file mode 100755
index 0000000..a430062
--- /dev/null
+++ b/R/multihard.R
@@ -0,0 +1,191 @@
+#
+#
+#    multihard.R
+#
+#    $Revision: 1.17 $	$Date: 2016/02/16 01:39:12 $
+#
+#    The Hard core process
+#
+#    Hardcore()     create an instance of the Hard Core process
+#                      [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+MultiHard <- local({
+
+  # .... multitype hard core potential
+  
+  MHpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[i]  type (mark) of point U[j]
+     #
+     # get matrices of interaction radii
+     h <- par$hradii
+
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+     
+     # list all UNORDERED pairs of types to be checked
+     # (the interaction must be symmetric in type, and scored as such)
+     uptri <- (row(h) <= col(h)) & (!is.na(h))
+     mark1 <- (lx[row(h)])[uptri]
+     mark2 <- (lx[col(h)])[uptri]
+     # corresponding names
+     mark1name <- (lxname[row(h)])[uptri]
+     mark2name <- (lxname[col(h)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     # list all ORDERED pairs of types to be checked
+     # (to save writing the same code twice)
+     different <- mark1 != mark2
+     mark1o <- c(mark1, mark2[different])
+     mark2o <- c(mark2, mark1[different])
+     nordpairs <- length(mark1o)
+     # unordered pair corresponding to each ordered pair
+     ucode <- c(1:npairs, (1:npairs)[different])
+     #
+     # create numeric array for result
+     z <- array(0, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       # apply the relevant hard core distance to each pair of points
+       hxu <- h[ tx, tu ]
+       forbid <- (d < hxu)
+       forbid[is.na(forbid)] <- FALSE
+       # form the potential 
+       value <- array(0, dim=dim(d))
+       value[forbid] <- -Inf
+       # assign value[i,j] -> z[i,j,k] where k is relevant interaction code
+       for(i in 1:nordpairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1o[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2o[i])
+         # assign
+         z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub]
+       }
+     }
+     attr(z, "IsOffset") <- TRUE
+     return(z)
+   }
+   #### end of 'pot' function ####
+
+  # ............ template object ...................
+  
+  BlankMH <- 
+  list(
+       name     = "Multitype Hardcore process",
+       creator  = "MultiHard",
+       family   = "pairwise.family",  # evaluated later
+       pot      = MHpotential,
+       par      = list(types=NULL, hradii = NULL), # filled in later
+       parnames = c("possible types", "hardcore distances"),
+       pardesc  = c("vector of possible types",
+                    "matrix of hardcore distances"),
+       selfstart = function(X, self) {
+         types <- self$par$types
+         hradii <- self$par$hradii
+         if(!is.null(types) && !is.null(hradii)) return(self)
+         if(is.null(types)) types <- levels(marks(X))
+         if(is.null(hradii)) {
+           marx <- marks(X)
+           d <- nndist(X, by=marx)
+           h <- aggregate(d, by=list(from=marx), min)
+           h <- as.matrix(h[, -1, drop=FALSE])
+           m <- table(marx)
+           mm <- outer(m, m, pmin)
+           hradii <- h * mm/(mm+1)
+           dimnames(hradii) <- list(types, types)
+         }
+         MultiHard(types=types,hradii=hradii)
+       },
+       init     = function(self) {
+         types <- self$par$types
+         if(!is.null(types)) {
+           h <- self$par$hradii
+           nt <- length(types)
+           if(!is.null(h)) MultiPair.checkmatrix(h, nt, sQuote("hradii"))
+           if(length(types) == 0)
+             stop(paste("The", sQuote("types"),
+                        "argument should be",
+                        "either NULL or a vector of all possible types"))
+           if(anyNA(types))
+             stop("NA's not allowed in types")
+           if(is.factor(types)) {
+             types <- levels(types)
+           } else {
+             types <- levels(factor(types, levels=types))
+           }
+         }
+       },
+       update = NULL,  # default OK
+       print = function(self) {
+         h <- self$par$hradii
+         if(waxlyrical('gory')) {
+           if(!is.null(h)) splat(nrow(h), "types of points")
+           types <- self$par$types
+           if(!is.null(types)) {
+             splat("Possible types:")
+             print(noquote(types))
+           } else splat("Possible types:\t not yet determined")
+         }
+         if(!is.null(h)) {
+           splat("Hardcore radii:")
+           print(signif(h, getOption("digits")))
+         } else splat("Hardcore radii:\t not yet determined") 
+         invisible()
+       },
+       interpret = function(coeffs, self) {
+        # there are no regular parameters (woo-hoo!)
+         return(NULL)
+       },
+       valid = function(coeffs, self) {
+         return(TRUE)
+       },
+       project = function(coeffs, self) {
+         return(NULL)
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         h <- self$par$hradii
+         return(max(0, h, na.rm=TRUE))
+       },
+       version=NULL # fix later
+  )
+  class(BlankMH) <- "interact"
+
+  MultiHard <- function(hradii=NULL, types=NULL) {
+    if((missing(hradii) || !is.matrix(hradii)) && is.matrix(types)) {
+      ## old syntax: (types=NULL, hradii)
+      hradii <- types
+      types <- NULL
+    }
+    if(!is.null(hradii)) hradii[hradii == 0] <- NA
+    out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii))
+    if(!is.null(types))
+      dimnames(out$par$hradii) <- list(types, types)
+    return(out)
+  }
+
+  MultiHard <- intermaker(MultiHard, BlankMH)
+  
+  MultiHard
+})
diff --git a/R/multipair.util.R b/R/multipair.util.R
new file mode 100755
index 0000000..1aa0d95
--- /dev/null
+++ b/R/multipair.util.R
@@ -0,0 +1,31 @@
+##
+##
+##    multipair.util.R
+##
+##    $Revision: 1.13 $	$Date: 2014/04/29 01:13:35 $
+##
+##    Utilities for multitype pairwise interactions
+##	
+## -------------------------------------------------------------------
+##	
+
+MultiPair.checkmatrix <-
+  function(mat, n, matname, naok=TRUE, zerook=TRUE, asymmok=FALSE) {
+    if(missing(matname))
+      matname <- short.deparse(substitute(mat))
+    if(!is.matrix(mat))
+      stop(paste(matname, "must be a matrix"))
+    if(any(dim(mat) != rep.int(n,2)))
+      stop(paste(matname, "must be a square matrix,",
+                 "of size", n, "x", n))
+    isna <- is.na(mat)
+    if(!naok && any(isna))
+      stop(paste("NA entries not allowed in", matname))
+    if(any(mat[!isna] < 0))
+      stop(paste("Negative entries not allowed in", matname))
+    if(!zerook && any(mat[!isna] == 0))
+      stop(paste("Zero entries not allowed in", matname))
+    if(!asymmok && !isSymmetric(mat))
+      stop(paste(matname, "must be a symmetric matrix"))
+  }
+
diff --git a/R/multistrauss.R b/R/multistrauss.R
new file mode 100755
index 0000000..c60400a
--- /dev/null
+++ b/R/multistrauss.R
@@ -0,0 +1,237 @@
+#
+#
+#    multistrauss.S
+#
+#    $Revision: 2.23 $	$Date: 2015/03/31 03:57:11 $
+#
+#    The multitype Strauss process
+#
+#    MultiStrauss()    create an instance of the multitype Strauss process
+#                 [an object of class 'interact']
+#	
+# -------------------------------------------------------------------
+#	
+
+MultiStrauss <- local({
+
+  # ......... define interaction potential
+
+  MSpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[j]  type (mark) of point U[j]
+     #
+     # get matrix of interaction radii r[ , ]
+     r <- par$radii
+     #
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+     
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+
+     # list all UNORDERED pairs of types to be checked
+     # (the interaction must be symmetric in type, and scored as such)
+     uptri <- (row(r) <= col(r)) & !is.na(r)
+     mark1 <- (lx[row(r)])[uptri]
+     mark2 <- (lx[col(r)])[uptri]
+     # corresponding names
+     mark1name <- (lxname[row(r)])[uptri]
+     mark2name <- (lxname[col(r)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     # list all ORDERED pairs of types to be checked
+     # (to save writing the same code twice)
+     different <- mark1 != mark2
+     mark1o <- c(mark1, mark2[different])
+     mark2o <- c(mark2, mark1[different])
+     nordpairs <- length(mark1o)
+     # unordered pair corresponding to each ordered pair
+     ucode <- c(1:npairs, (1:npairs)[different])
+     #
+     # create logical array for result
+     z <- array(FALSE, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       # assemble the relevant interaction distance for each pair of points
+       rxu <- r[ tx, tu ]
+       # apply relevant threshold to each pair of points
+       str <- (d <= rxu)
+       # assign str[i,j] -> z[i,j,k] where k is relevant interaction code
+       for(i in 1:nordpairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1o[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2o[i])
+         # assign
+         z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub]
+       }
+     }
+     return(z)
+   }
+   #### end of 'pot' function ####
+
+  # ........ auxiliary functions ..............
+  delMS <- function(which, types, radii) {
+    radii[which] <- NA
+    if(all(is.na(radii))) return(Poisson())
+    return(MultiStrauss(types, radii))
+  }
+  
+  # Set up basic object except for family and parameters
+  BlankMSobject <- 
+  list(
+       name     = "Multitype Strauss process",
+       creator  = "MultiStrauss",
+       family   = "pairwise.family", # evaluated later
+       pot      = MSpotential,
+       par      = list(types=NULL, radii = NULL), # to be filled in later
+       parnames = c("possible types", "interaction distances"),
+       pardesc  = c("vector of possible types",
+                    "matrix of hardcore distances"),
+       selfstart = function(X, self) {
+         if(!is.null(self$par$types)) return(self)
+         types <- levels(marks(X))
+         MultiStrauss(types=types,radii=self$par$radii)
+       },
+       init = function(self) {
+         types <- self$par$types
+         if(!is.null(types)) {
+           radii <- self$par$radii
+           nt <- length(types)
+           MultiPair.checkmatrix(radii, nt, sQuote("radii"))
+           if(length(types) == 0)
+             stop(paste("The", sQuote("types"),"argument should be",
+                        "either NULL or a vector of all possible types"))
+           if(anyNA(types))
+             stop("NA's not allowed in types")
+           if(is.factor(types)) {
+             types <- levels(types)
+           } else {
+             types <- levels(factor(types, levels=types))
+           }
+         }
+       },
+       update = NULL, # default OK
+       print = function(self) {
+         radii <- self$par$radii
+         types <- self$par$types
+         if(waxlyrical('gory')) {
+           splat(nrow(radii), "types of points")
+           if(!is.null(types)) {
+             splat("Possible types: ")
+             print(noquote(types))
+           } else splat("Possible types:\t not yet determined")
+         }
+         cat("Interaction radii:\n")
+         print(signif(radii, getOption("digits")))
+         invisible()
+       },
+       interpret = function(coeffs, self) {
+         # get possible types
+         typ <- self$par$types
+         ntypes <- length(typ)
+         # get matrix of Strauss interaction radii
+         r <- self$par$radii
+         # list all unordered pairs of types
+         uptri <- (row(r) <= col(r)) & (!is.na(r))
+         index1 <- (row(r))[uptri]
+         index2 <- (col(r))[uptri]
+         npairs <- length(index1)
+         # extract canonical parameters; shape them into a matrix
+         gammas <- matrix(, ntypes, ntypes)
+         dimnames(gammas) <- list(typ, typ)
+         expcoef <- exp(coeffs)
+         gammas[ cbind(index1, index2) ] <- expcoef
+         gammas[ cbind(index2, index1) ] <- expcoef
+         #
+         return(list(param=list(gammas=gammas),
+                     inames="interaction parameters gamma_ij",
+                     printable=dround(gammas)))
+       },
+       valid = function(coeffs, self) {
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # interaction radii
+         radii <- self$par$radii
+         # parameters to estimate
+         required <- !is.na(radii)
+         gr <- gamma[required]
+         return(all(is.finite(gr) & gr <= 1))
+       },
+       project  = function(coeffs, self) {
+         # interaction parameters gamma[i,j]
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # interaction radii and types
+         radii <- self$par$radii
+         types <- self$par$types
+         # problems?
+         required <- !is.na(radii)
+         okgamma  <- is.finite(gamma) & (gamma <= 1)
+         naughty  <- required & !okgamma
+         # 
+         if(!any(naughty))  
+           return(NULL)
+         if(spatstat.options("project.fast")) {
+           # remove ALL naughty terms simultaneously
+           return(delMS(naughty, types, radii))
+         } else {
+           # present a list of candidates
+           rn <- row(naughty)
+           cn <- col(naughty)
+           uptri <- (rn <= cn) 
+           upn <- uptri & naughty
+           rowidx <- as.vector(rn[upn])
+           colidx <- as.vector(cn[upn])
+           matindex <- function(v) { matrix(c(v, rev(v)),
+                                            ncol=2, byrow=TRUE) }
+           mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex)
+           inters <- lapply(mats, delMS, types=types, radii=radii)
+           return(inters)
+         }
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         r <- self$par$radii
+         active <- !is.na(r)
+         if(any(!is.na(coeffs))) {
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           gamma[is.na(gamma)] <- 1
+           active <- active & (abs(log(gamma)) > epsilon)
+         }
+         if(any(active)) return(max(r[active])) else return(0)
+       },
+       version=NULL # to be added
+       )
+  class(BlankMSobject) <- "interact"
+
+  # finally create main function
+  MultiStrauss <- function(radii, types=NULL) {
+    if((missing(radii) || !is.matrix(radii)) && is.matrix(types)) {
+      ## old syntax: (types=NULL, radii)
+      radii <- types
+      types <- NULL
+    }
+    radii[radii == 0] <- NA
+    out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii))
+    if(!is.null(types))
+      dimnames(out$par$radii) <- list(types, types)
+    return(out)
+  }
+
+  MultiStrauss <- intermaker(MultiStrauss, BlankMSobject)
+  
+  MultiStrauss
+})
diff --git a/R/multistrhard.R b/R/multistrhard.R
new file mode 100755
index 0000000..fa5828d
--- /dev/null
+++ b/R/multistrhard.R
@@ -0,0 +1,351 @@
+#
+#
+#    multistrhard.S
+#
+#    $Revision: 2.38 $	$Date: 2016/02/16 01:39:12 $
+#
+#    The multitype Strauss/hardcore process
+#
+#    MultiStraussHard()
+#                 create an instance of the multitype Strauss/ harcore
+#                 point process
+#                 [an object of class 'interact']
+#	
+# -------------------------------------------------------------------
+#	
+
+doMultiStraussHard <- local({
+  
+  # ........  define potential ......................
+
+  MSHpotential <- function(d, tx, tu, par) {
+     # arguments:
+     # d[i,j] distance between points X[i] and U[j]
+     # tx[i]  type (mark) of point X[i]
+     # tu[i]  type (mark) of point U[j]
+     #
+     # get matrices of interaction radii
+     r <- par$iradii
+     h <- par$hradii
+
+     # get possible marks and validate
+     if(!is.factor(tx) || !is.factor(tu))
+	stop("marks of data and dummy points must be factor variables")
+     lx <- levels(tx)
+     lu <- levels(tu)
+     if(length(lx) != length(lu) || any(lx != lu))
+	stop("marks of data and dummy points do not have same possible levels")
+
+     if(!identical(lx, par$types))
+        stop("data and model do not have the same possible levels of marks")
+     if(!identical(lu, par$types))
+        stop("dummy points and model do not have the same possible levels of marks")
+                   
+     # ensure factor levels are acceptable for column names (etc)
+     lxname <- make.names(lx, unique=TRUE)
+
+     # list all UNORDERED pairs of types to be counted
+     # (the interaction must be symmetric in type, and scored as such)
+     uptri <- (row(r) <= col(r)) & !is.na(r)
+     mark1 <- (lx[row(r)])[uptri]
+     mark2 <- (lx[col(r)])[uptri]
+     # corresponding names
+     mark1name <- (lxname[row(r)])[uptri]
+     mark2name <- (lxname[col(r)])[uptri]
+     vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x")
+     vname <- paste("mark", vname, sep="")
+     npairs <- length(vname)
+     # list all ORDERED pairs of types to be counted
+     # (to save writing the same code twice)
+     different <- mark1 != mark2
+     mark1o <- c(mark1, mark2[different])
+     mark2o <- c(mark2, mark1[different])
+     nordpairs <- length(mark1o)
+     # unordered pair corresponding to each ordered pair
+     ucode <- c(1:npairs, (1:npairs)[different])
+     #
+     # create numeric array for result
+     z <- array(0, dim=c(dim(d), npairs),
+                dimnames=list(character(0), character(0), vname))
+     # go....
+     if(length(z) > 0) {
+       # apply the relevant interaction distance to each pair of points
+       rxu <- r[ tx, tu ]
+       str <- (d < rxu)
+       str[is.na(str)] <- FALSE
+       # and the relevant hard core distance
+       hxu <- h[ tx, tu ]
+       forbid <- (d < hxu)
+       forbid[is.na(forbid)] <- FALSE
+       # form the potential
+       value <- str
+       value[forbid] <- -Inf
+       # assign value[i,j] -> z[i,j,k] where k is relevant interaction code
+       for(i in 1:nordpairs) {
+         # data points with mark m1
+         Xsub <- (tx == mark1o[i])
+         # quadrature points with mark m2
+         Qsub <- (tu == mark2o[i])
+         # assign
+         z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub]
+       }
+     }
+     return(z)
+   }
+  # ............... end of potential function ...................
+
+  # .......... auxiliary functions .................
+  
+  delMSH <- function(which, types, iradii, hradii, ihc) {
+    iradii[which] <- NA
+    if(any(!is.na(iradii))) {
+      # some gamma interactions left
+      # return modified MultiStraussHard with fewer gamma parameters
+      return(MultiStraussHard(types, iradii, hradii))
+    } else if(any(!ihc)) {
+      # no gamma interactions left, but some active hard cores
+      return(MultiHard(types, hradii))
+    } else return(Poisson())
+  }
+
+  # ...........................................................
+  
+  # Set up basic object except for family and parameters
+
+  BlankMSHobject <- 
+    list(
+         name     = "Multitype Strauss Hardcore process",
+         creator  = "MultiStraussHard",
+         family   = "pairwise.family", # evaluated later
+         pot      = MSHpotential,
+         par      = list(types=NULL, iradii=NULL, hradii=NULL),  # to be added
+         parnames = c("possible types",
+                      "interaction distances",
+                      "hardcore distances"),
+         pardesc  = c("vector of possible types",
+                      "matrix of interaction distances",
+                      "matrix of hardcore distances"),
+         selfstart = function(X, self) {
+           types <- self$par$types
+           hradii <- self$par$hradii
+           if(!is.null(types) && !is.null(hradii)) return(self)
+           if(is.null(types)) types <- levels(marks(X))
+           if(is.null(hradii)) {
+             marx <- marks(X)
+             d <- nndist(X, by=marx)
+             h <- aggregate(d, by=list(from=marx), min)
+             h <- as.matrix(h[, -1, drop=FALSE])
+             m <- table(marx)
+             mm <- outer(m, m, pmin)
+             hradii <- h * mm/(mm+1)
+             dimnames(hradii) <- list(types, types)
+           }
+           MultiStraussHard(types=types,hradii=hradii,iradii=self$par$iradii)
+	 },
+         init     = function(self) {
+           types <- self$par$types
+           iradii <- self$par$iradii
+           hradii <- self$par$hradii
+           # hradii could be NULL
+           if(!is.null(types)) {
+             if(!is.null(dim(types)))
+               stop(paste("The", sQuote("types"),
+                          "argument should be a vector"))
+             if(length(types) == 0)
+               stop(paste("The", sQuote("types"),"argument should be",
+                          "either NULL or a vector of all possible types"))
+             if(anyNA(types))
+               stop("NA's not allowed in types")
+             if(is.factor(types)) {
+               types <- levels(types)
+             } else {
+               types <- levels(factor(types, levels=types))
+             }
+             nt <- length(types)
+             MultiPair.checkmatrix(iradii, nt, sQuote("iradii"))
+             if(!is.null(hradii))
+               MultiPair.checkmatrix(hradii, nt, sQuote("hradii"))
+           }
+           ina <- is.na(iradii)
+           if(all(ina))
+             stop(paste("All entries of", sQuote("iradii"),
+                        "are NA"))
+           if(!is.null(hradii)) {
+             hna <- is.na(hradii)
+             both <- !ina & !hna
+             if(any(iradii[both] <= hradii[both]))
+               stop("iradii must be larger than hradii")
+           }
+         },
+         update = NULL,  # default OK
+         print = function(self) {
+           types <- self$par$types
+           iradii <- self$par$iradii
+           hradii <- self$par$hradii
+           nt <- nrow(iradii)
+           if(waxlyrical('gory')) {
+             splat(nt, "types of points")
+             if(!is.null(types)) {
+               splat("Possible types:")
+               print(noquote(types))
+             } else splat("Possible types:\t not yet determined")
+           }
+           splat("Interaction radii:")
+           dig <- getOption("digits")
+           print(signif(iradii, dig))
+           if(!is.null(hradii)) {
+             splat("Hardcore radii:")
+             print(signif(hradii, dig))
+           } else splat("Hardcore radii: not yet determined")
+           invisible()
+         },
+        interpret = function(coeffs, self) {
+          # get possible types
+          typ <- self$par$types
+          ntypes <- length(typ)
+          # get matrices of interaction radii
+          r <- self$par$iradii
+          h <- self$par$hradii
+          # list all relevant unordered pairs of types
+          uptri <- (row(r) <= col(r)) & !is.na(r)
+          index1 <- (row(r))[uptri]
+          index2 <- (col(r))[uptri]
+          npairs <- length(index1)
+          # extract canonical parameters; shape them into a matrix
+          gammas <- matrix(, ntypes, ntypes)
+          dimnames(gammas) <- list(typ, typ)
+          expcoef <- exp(coeffs)
+          gammas[ cbind(index1, index2) ] <- expcoef
+          gammas[ cbind(index2, index1) ] <- expcoef
+          #
+          return(list(param=list(gammas=gammas),
+                      inames="interaction parameters gamma_ij",
+                      printable=dround(gammas)))
+        },
+        valid = function(coeffs, self) {
+           # interaction radii r[i,j]
+           iradii <- self$par$iradii
+           # hard core radii r[i,j]
+           hradii <- self$par$hradii
+           # interaction parameters gamma[i,j]
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           # Check that we managed to estimate all required parameters
+           required <- !is.na(iradii)
+           if(!all(is.finite(gamma[required])))
+             return(FALSE)
+           # Check that the model is integrable
+           # inactive hard cores ...
+           ihc <- (is.na(hradii) | hradii == 0)
+           # .. must have gamma <= 1
+           return(all(gamma[required & ihc] <= 1))
+         },
+         project = function(coeffs, self) {
+           # types
+           types <- self$par$types
+           # interaction radii r[i,j]
+           iradii <- self$par$iradii
+           # hard core radii r[i,j]
+           hradii <- self$par$hradii
+           # interaction parameters gamma[i,j]
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           # required gamma parameters
+           required <- !is.na(iradii)
+           # active hard cores
+           activehard <- !is.na(hradii) & (hradii > 0)
+           ihc <- !activehard
+           # problems
+           gammavalid <- is.finite(gamma) & (activehard | gamma <= 1)
+           naughty    <- required & !gammavalid
+           if(!any(naughty))
+             return(NULL)
+           #
+           if(spatstat.options("project.fast")) {
+             # remove ALL naughty terms simultaneously
+             return(delMSH(naughty, types, iradii, hradii, ihc))
+           } else {
+             # present a list of candidates
+             rn <- row(naughty)
+             cn <- col(naughty)
+             uptri <- (rn <= cn) 
+             upn <- uptri & naughty
+             rowidx <- as.vector(rn[upn])
+             colidx <- as.vector(cn[upn])
+#             matindex <- function(v) { matrix(c(v, rev(v)),
+#                                              ncol=2, byrow=TRUE) }
+             mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex)
+             inters <- lapply(mats, delMSH,
+                              types=types, iradii=iradii,
+                              hradii=hradii, ihc=ihc)
+             return(inters)           }
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$iradii
+           h <- self$par$hradii
+           ractive <- !is.na(r)
+           hactive <- !is.na(h)
+           if(any(!is.na(coeffs))) {
+             gamma <- (self$interpret)(coeffs, self)$param$gammas
+             gamma[is.na(gamma)] <- 1
+             ractive <- ractive & (abs(log(gamma)) > epsilon)
+           }
+           if(!any(c(ractive,hactive)))
+             return(0)
+           else
+             return(max(c(r[ractive],h[hactive])))
+         },
+         version=NULL # to be added
+         )
+  class(BlankMSHobject) <- "interact"
+
+  matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) }
+  
+  # Finally define MultiStraussHard function
+  doMultiStraussHard <- function(iradii, hradii=NULL, types=NULL) {
+    iradii[iradii == 0] <- NA
+    if(!is.null(hradii)) hradii[hradii == 0] <- NA
+    out <- instantiate.interact(BlankMSHobject,
+                                list(types=types,
+                                     iradii = iradii, hradii = hradii))
+    if(!is.null(types)) {
+      dn <- list(types, types)
+      dimnames(out$par$iradii) <- dn
+      if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn
+    }
+    return(out)
+  }
+
+  doMultiStraussHard
+})
+
+
+MultiStraussHard <- local({
+
+  MultiStraussHard <- function(iradii, hradii, types=NULL) {
+    ## try new syntax
+    newcall <- match.call()
+    newcall[[1]] <- as.name('doMultiStraussHard')
+    out <- try(eval(newcall, parent.frame()), silent=TRUE)
+    if(is.interact(out))
+      return(out)
+    ## try old syntax
+    oldcall <- match.call(function(types=NULL, iradii, hradii) {})
+    oldcall[[1]] <- as.name('doMultiStraussHard')
+    out <- try(eval(oldcall, parent.frame()), silent=TRUE)
+    if(is.interact(out))
+      return(out)
+    ## Syntax is wrong: generate error using new syntax rules
+    if(missing(hradii)) hradii <- NULL
+    doMultiStraussHard(iradii=iradii, hradii=hradii, types=types)
+  }
+
+
+  BlankMSHobject <- get("BlankMSHobject",
+                        envir=environment(doMultiStraussHard))
+  
+  MultiStraussHard <- intermaker(MultiStraussHard, BlankMSHobject)
+
+  MultiStraussHard
+})
+
+
+  
diff --git a/R/nearestsegment.R b/R/nearestsegment.R
new file mode 100755
index 0000000..413e345
--- /dev/null
+++ b/R/nearestsegment.R
@@ -0,0 +1,79 @@
+#
+#  nearestsegment.R
+#
+#  $Revision: 1.11 $  $Date: 2014/11/10 11:27:12 $
+#
+# Given a point pattern X and a line segment pattern Y,
+# for each point x of X, determine which segment of Y is closest to x
+# and find the point on Y closest to x.
+#
+
+nearestsegment <- function(X,Y) {
+  return(ppllengine(X,Y,"identify"))
+}
+
+project2segment <- function(X, Y) {
+  return(ppllengine(X,Y,"project"))
+}
+  
+ppllengine <- function(X, Y, action="project", check=FALSE) {
+  stopifnot(is.ppp(X))
+  stopifnot(is.psp(Y))
+  stopifnot(action %in% c("distance", "identify", "project"))
+  # deal with empty patterns
+  if(Y$n == 0)
+    stop("Segment pattern Y contains 0 segments; projection undefined")
+  if(X$n == 0) {
+    nowt <- numeric(0)
+    none <- integer(0)
+    switch(action,
+           identify = return(none),
+           distance = return(list(dist=nowt, which=none)),
+           project  = return(list(Xproj=X, mapXY=none, d=nowt, tp=nowt)))
+  }
+  #              
+  XX <- as.matrix(as.data.frame(unmark(X)))
+  YY <- as.matrix(as.data.frame(unmark(Y)))
+  # determine which segment lies closest to each point
+  huge <- max(diameter(as.rectangle(as.owin(X))),
+              diameter(as.rectangle(as.owin(Y))))
+  d <- distppllmin(XX, YY, huge^2)
+  mapXY <- d$min.which
+  if(action == "identify")
+    return(mapXY)
+  else if(action == "distance") 
+    return(data.frame(dist=d$min.d, which=mapXY))
+  
+  # combine relevant rows of data
+  alldata <- as.data.frame(cbind(XX, YY[mapXY, ,drop=FALSE]))
+  colnames(alldata) <- c("x", "y", "x0", "y0", "x1", "y1")
+  # coordinate geometry
+  dx <- with(alldata, x1-x0)
+  dy <- with(alldata, y1-y0)
+  leng <- sqrt(dx^2 + dy^2)
+  # rotation sines & cosines (may include 0/0)
+  co <- dx/leng
+  si <- dy/leng
+  # vector to point from first endpoint of segment
+  xv <- with(alldata, x - x0)
+  yv <- with(alldata, y - y0)
+  # rotate coordinate system so that x axis is parallel to line segment
+  xpr <- xv * co + yv * si
+#  ypr <- - xv * si + yv * co
+  # determine whether projection is an endpoint or interior point of segment
+  ok <- is.finite(xpr)
+  left <- !ok | (xpr <= 0)
+  right <- ok &  (xpr >= leng)
+  # location of projected point in rotated coordinates
+  xr <- with(alldata, ifelseAX(left, 0, ifelseXY(right, leng, xpr)))
+  # back to standard coordinates
+  xproj <- with(alldata, x0 + ifelseXB(ok, xr * co, 0))
+  yproj <- with(alldata, y0 + ifelseXB(ok, xr * si, 0))
+  Xproj <- ppp(xproj, yproj, window=X$window, marks=X$marks, check=check)
+  # parametric coordinates
+  tp <- xr/leng
+  tp[!is.finite(tp)] <- 0
+  # 
+  return(list(Xproj=Xproj, mapXY=mapXY, d=d$min.d, tp=tp))
+}
+
diff --git a/R/newformula.R b/R/newformula.R
new file mode 100644
index 0000000..6039b29
--- /dev/null
+++ b/R/newformula.R
@@ -0,0 +1,18 @@
+#'
+#'     newformula.R
+#'
+#'    $Revision: 1.1 $ $Date: 2017/01/02 10:24:14 $
+#' 
+#'   Update formula and expand polynomial
+
+newformula <- function(old, change, eold, enew) {
+  old <- if(is.null(old)) ~1 else eval(old, eold)
+  change <- if(is.null(change)) ~1 else eval(change, enew)
+  old <- as.formula(old, env=eold)
+  change <- as.formula(change, env=enew)
+  answer <- update.formula(old, change)
+  if(spatstat.options("expand.polynom")) 
+    answer <- expand.polynom(answer)
+  return(answer)
+}
+
diff --git a/R/news.R b/R/news.R
new file mode 100755
index 0000000..8ac5901
--- /dev/null
+++ b/R/news.R
@@ -0,0 +1,16 @@
+#
+# news.R
+#
+#  News and warnings
+#
+latest.news <- function(package="spatstat", doBrowse=FALSE) {
+  # get version number
+  v <- read.dcf(file=system.file("DESCRIPTION", package=package),
+                fields="Version")
+  ne <- eval(substitute(news(Version >= v0, package=package), list(v0=v)))
+  page(ne, method="print", doBrowse=doBrowse)
+  return(invisible(ne))
+}
+
+class(latest.news) <- "autoexec"
+
diff --git a/R/nnclean.R b/R/nnclean.R
new file mode 100755
index 0000000..52bfb64
--- /dev/null
+++ b/R/nnclean.R
@@ -0,0 +1,243 @@
+#
+#  nnclean.R
+#
+# Nearest-neighbour clutter removal
+#
+# Adapted from statlib file NNclean.q
+# Authors: Simon Byers and Adrian Raftery
+#
+#  $Revision: 1.16 $   $Date: 2016/02/11 10:17:12 $
+#
+
+nnclean <- function(X, k, ...) {
+  UseMethod("nnclean")
+}
+
+nnclean.pp3 <- function(X, k, ...,
+                        convergence = 0.001, plothist = FALSE,
+                        verbose=TRUE, maxit=50)
+{
+  # Adapted from statlib file NNclean.q
+  # Authors: Simon Byers and Adrian Raftery
+  # Adapted for spatstat by Adrian Baddeley
+
+  Xname <- short.deparse(substitute(X))
+  
+  stopifnot(inherits(X, "pp3"))
+  validposint(k, "nnclean.pp3")
+
+  kthNND <- nndist(X, k=k)  
+  
+  # apply classification algorithm
+  em <- do.call(nncleanEngine,
+                resolve.defaults(list(kthNND, k=k),
+                                 list(...),
+                                 list(d=3, tol=convergence, plothist=plothist,
+                                      verbose=verbose, maxit=maxit,
+                                      Xname=Xname)))
+
+  # tack results onto point pattern as marks
+  pp <- em$probs
+  zz <- factor(em$z, levels=c(0,1))
+  levels(zz) <- c("noise", "feature")
+  mm <- hyperframe(prob=pp, label=zz)
+  marks(X) <- cbind(marks(X), mm)
+  attr(X, "theta") <- em[c("lambda1", "lambda2", "p")]
+  attr(X, "info") <- em[c("d", "niter", "maxit", "converged")]
+  attr(X, "hist") <- em$hist
+  return(X)
+}
+
+nnclean.ppp <-
+  function(X, k, ...,
+           edge.correct = FALSE, wrap = 0.1,
+           convergence = 0.001, plothist = FALSE,
+           verbose=TRUE, maxit=50)
+{
+  # Adapted from statlib file NNclean.q
+  # Authors: Simon Byers and Adrian Raftery
+  # Adapted for spatstat by Adrian Baddeley
+
+  Xname <- short.deparse(substitute(X))
+  
+  validposint(k, "nnclean.ppp")
+
+  if(!edge.correct) {
+    # compute vector of k-th nearest neighbour distances
+    kthNND <- nndist(X, k=k)
+  } else {
+    # replicate data periodically
+    # (ensuring original points are listed first)
+    Xbox <- X[as.rectangle(X)]
+    Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE)
+    # trim to margin
+    W <- expand.owin(X$window, (1+2*wrap)^2)
+    Xpand <- Xpand[W]
+    kthNND <- nndist(Xpand, k=k)
+  }
+
+  # apply classification algorithm
+  em <- do.call(nncleanEngine,
+                resolve.defaults(list(kthNND, k=k),
+                                 list(...),
+                                 list(d=2, tol=convergence, plothist=plothist,
+                                      verbose=verbose, maxit=maxit,
+                                      Xname=Xname)))
+
+  # extract results
+  pp <- em$probs
+  zz <- em$z
+  zz <- factor(zz, levels=c(0,1))
+  levels(zz) <- c("noise", "feature")
+  df <- data.frame(class=zz,prob=pp) 
+
+  if(edge.correct) {
+    # trim back to original point pattern
+    df <- df[seq_len(X$n), ]
+  }
+  
+  # tack on
+  marx <- marks(X, dfok=TRUE)
+  if(is.null(marx))
+    marks(X, dfok=TRUE) <- df
+  else 
+    marks(X, dfok=TRUE) <- cbind(df, marx)
+
+  attr(X, "theta") <- em[c("lambda1", "lambda2", "p")]
+  attr(X, "info") <- em[c("d", "niter", "maxit", "converged")]
+  attr(X, "hist") <- em$hist
+  return(X)
+}
+
+nncleanEngine <-
+  function(kthNND, k, d, ..., 
+           tol = 0.001, maxit = 50,
+           plothist = FALSE, lineargs = list(), 
+           verbose=TRUE, Xname="X")
+{
+  ## Adapted from statlib file NNclean.q
+  ## Authors: Simon Byers and Adrian Raftery
+  ## Adapted for spatstat by Adrian Baddeley
+  
+  n <- length(kthNND)
+
+  ## Undocumented extension by Adrian Baddeley 2014
+  ## Allow different dimensions in feature and noise.
+  ## d[1] is cluster dimension.
+  
+  d <- ensure2vector(d)
+  alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.))
+
+  # raise to power d for efficiency
+  kNNDpowd1 <- kthNND^(d[1])
+  kNNDpowd2 <- kthNND^(d[2])
+  
+  #
+  # Now use kthNND in E-M algorithm
+  # First set up starting guesses.
+  #
+  #
+  probs <- numeric(n)
+  thresh <- (min(kthNND) + diff(range(kthNND))/3.)
+  high <- (kthNND > thresh)
+  delta <- as.integer(high)
+  p <- 0.5
+  lambda1 <- k/(alpha.d[1] * mean(kNNDpowd1[!high]))
+  lambda2 <- k/(alpha.d[2] * mean(kNNDpowd2[ high]))
+  loglik.old <- 0.
+  loglik.new <- 1.
+  #
+  # Iterator starts here, 
+  #
+  Z <- !kthNND
+  niter <- 0
+  while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) {
+    if(niter >= maxit) {
+      warning(paste("E-M algorithm failed to converge in",
+                    maxit, ngettext(maxit, "iteration", "iterations")),
+              call.=FALSE)
+      break
+    }
+    niter <- niter + 1
+    # E - step
+    f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1])
+    f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2])
+    delta[!Z] <- (p * f1)/(p * f1 + (1 - p) * f2)
+    delta[Z] <- 0
+    # M - step
+    sumdelta <- sum(delta)
+    negdelta <- 1. - delta
+    p <- sumdelta/n
+    lambda1 <- (k * sumdelta)/(alpha.d[1] * sum(kNNDpowd1 * delta))
+    lambda2 <- (k * (n - sumdelta))/(alpha.d[2] * sum(kNNDpowd2 * negdelta))
+    # evaluate marginal loglikelihood
+    loglik.old <- loglik.new
+    loglik.new <- sum( - p * lambda1 * alpha.d[1] * (kNNDpowd1 * delta)
+                      - (1. - p) * lambda2 * alpha.d[2] * (kNNDpowd2 * negdelta)
+                      + delta * k * log(lambda1 * alpha.d[1]) +
+			negdelta * k * log(lambda2 * alpha.d[2]))
+    if(verbose) 
+      cat(paste("Iteration", niter, "\tlogLik =", loglik.new,
+                "\tp =", signif(p,4), "\n"))
+  }
+  if(plothist) {
+    dotargs <- list(...)
+    if(spatstat.options('monochrome'))
+      dotargs <- col.args.to.grey(dotargs)
+    ## compute plot limits to include both histogram and density
+    xlim <- c(0, max(kthNND))
+    H <- do.call(hist,
+                 resolve.defaults(list(kthNND, plot=FALSE, warn.unused=FALSE),
+                                  dotargs,
+                                  list(nclass=40)))
+    barheights <- H$density
+    support <- seq(from=xlim[1], to=xlim[2], length.out = 200)
+    fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d[1]) +
+      (1 - p) * dknn(support, lambda=lambda2, k = k, d = d[2])
+    ylim <- range(c(0, barheights, fittedy))
+    xlab <- paste("Distance to", ordinal(k), "nearest neighbour")
+    ## now plot it (unless overridden by plot=FALSE)
+    reallyplot <- resolve.1.default("plot", list(...), list(plot=TRUE))
+    H <- do.call(hist,
+                 resolve.defaults(list(kthNND, probability=TRUE),
+                                  dotargs,
+                                  list(plot=TRUE,
+                                       warn.unused=reallyplot,
+                                       nclass=40,
+                                       xlim = xlim, ylim=ylim,
+                                       xlab = xlab,
+                                       ylab = "Probability density",
+                                       axes = TRUE, main="")))
+    H$xname <- xlab
+    if(reallyplot) {
+      box()
+      lineargs <- resolve.defaults(lineargs, list(col="green", lwd=2))
+      if(spatstat.options("monochrome"))
+        lineargs <- col.args.to.grey(lineargs)
+      do.call(lines, append(list(x=support, y=fittedy), lineargs))
+    }
+  }
+  #
+  delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1])
+  delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2])
+  probs[!Z] <- delta1/(delta1 + delta2)
+  probs[Z] <- 1
+  #
+  if(verbose) {
+    cat("Estimated parameters:\n")
+    cat(paste("p [cluster] =", signif(p, 5), "\n"))
+    cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n"))
+    cat(paste("lambda [noise]   =", signif(lambda2, 5), "\n"))
+  }
+  #
+  # z will be the classifications. 1= in cluster. 0= in noise. 
+  #
+  return(list(z = round(probs),
+              probs = probs,
+              lambda1 = lambda1, lambda2 = lambda2, p = p,
+              kthNND = kthNND, d=d, n=n, k=k,
+              niter = niter, maxit = maxit,
+              converged = (niter >= maxit),
+              hist=if(plothist) H else NULL))
+}
+
diff --git a/R/nncorr.R b/R/nncorr.R
new file mode 100755
index 0000000..506b460
--- /dev/null
+++ b/R/nncorr.R
@@ -0,0 +1,136 @@
+#
+# nncorr.R
+#
+# $Revision: 1.11 $  $Date: 2015/10/21 09:06:57 $
+#
+
+nnmean <- function(X, k=1) {
+  stopifnot(is.ppp(X) && is.marked(X))
+  if(k %% 1 != 0 || length(k) != 1 || k <= 0)
+    stop("k should be a single integer greater than 0", call.=FALSE)
+  if(k >= npoints(X))
+    stop("Not enough points to compute k-th nearest neighbours")
+  m <- numeric.columns(marks(X), logical=TRUE, others="na")
+  nnid <- nnwhich(X, k=k)
+  ok <- (nndist(X, k=k) <= bdist.points(X))
+  if(!any(ok, na.rm=TRUE))
+    stop("Insufficient data")
+  numer <- unlist(lapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE))
+  denom <- unlist(lapply(as.data.frame(m),             mean, na.rm=TRUE))
+  ans <- rbind(unnormalised=numer,
+               normalised  =numer/denom)
+  if(ncol(ans) == 1) ans <- ans[,1,drop=TRUE]
+  return(ans)
+}
+
+nnvario <- local({
+
+  nnvario <- function(X, k=1) {
+    stopifnot(is.ppp(X) && is.marked(X))
+    m <- numeric.columns(marks(X), logical=TRUE, others="na")
+    ans <- nncorr(X %mark% m, sqdif, k=k, denominator=diag(var(m)))
+    return(ans)
+  }
+  sqdif <- function(m1,m2) { ((m1-m2)^2)/2 }
+
+  nnvario
+})
+
+
+nncorr <- function(X, f = function(m1,m2) { m1 * m2},
+                   k=1,
+                   ...,
+                   use = "all.obs",
+                   method = c("pearson", "kendall", "spearman"),
+                   denominator=NULL) {
+  stopifnot(is.ppp(X) && is.marked(X))
+  if(k %% 1 != 0 || length(k) != 1 || k <= 0)
+    stop("k should be a single integer greater than 0", call.=FALSE)
+  if(k >= npoints(X))
+    stop("Not enough points to compute k-th nearest neighbours")
+  
+  m <- as.data.frame(marks(X))
+  nv <- ncol(m)
+  if(nv == 1) colnames(m) <- ""
+  #
+  if(missing(method) || is.null(method))
+    method <- "pearson"
+  # 
+  if(missing(f)) f <- NULL
+  if(!is.null(f) && !is.function(f)) {
+    if(nv == 1) stop("f should be a function")
+    # could be a list of functions
+    if(!(is.list(f) && all(unlist(lapply(f, is.function)))))
+      stop("f should be a function or a list of functions")
+    if(length(f) != nv)
+      stop("Length of list f does not match number of mark variables")
+  }
+  # optional denominator(s)
+  if(!is.null(denominator) && !(length(denominator) %in% c(1, nv)))
+    stop("Denominator has incorrect length")
+  # multi-dimensional case
+  if(nv > 1) {
+    # replicate things
+    if(is.function(f)) f <- rep.int(list(f), nv)
+    if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv)
+    #
+    result <- matrix(NA, nrow=3, ncol=nv)
+    outnames <- c("unnormalised", "normalised", "correlation")
+    dimnames(result) <- list(outnames, colnames(m))
+    for(j in 1:nv) {
+      mj <- m[,j, drop=FALSE]
+      denj <- denominator[[j]]
+      nncj <- nncorr(X %mark% mj, f=f[[j]], k=k, use=use, method=method,
+                     denominator=denj)
+      kj <- length(nncj)
+      result[1:kj,j] <- nncj
+    }
+    if(all(is.na(result[3, ]))) result <- result[1:2, ]
+    return(result)
+  }
+  # one-dimensional
+  m <- m[,1,drop=TRUE]
+  # select 'f' appropriately for X
+  chk <- check.testfun(f, X=X)
+  f     <- chk$f
+  ftype <- chk$ftype
+  # denominator
+  Efmm <-
+    if(!is.null(denominator)) denominator else 
+    switch(ftype,
+           mul={ 
+             mean(m)^2
+           },
+           equ={
+             sum(table(m)^2)/length(m)^2
+           },
+           general={
+             mean(outer(m, m, f, ...))
+           })
+  # border method
+  nn <- nnwhich(X, k=k)
+  ok <- (nndist(X, k=k) <= bdist.points(X))
+  if(!any(ok))
+    stop("Insufficient data")
+  mY <- m[nn[ok]]
+  mX <- m[ok]
+  Efmk <- switch(ftype,
+                 mul = {
+                   mean(mX * mY, ...)
+                 },
+                 equ = {
+                   mean(mX == mY, ...)
+                 }, 
+                 general = {
+                   mean(f(mX, mY, ...))
+                 })
+  #
+  answer <- c(unnormalised=Efmk,
+              normalised=Efmk/Efmm)
+  if(ftype == "mul") {
+    classic <- cor(mX, mY, use=use, method=method)
+    answer <- c(answer, correlation=classic)
+  }
+  return(answer)
+}
+  
diff --git a/R/nncross.R b/R/nncross.R
new file mode 100755
index 0000000..e0c7ebb
--- /dev/null
+++ b/R/nncross.R
@@ -0,0 +1,243 @@
+#
+#   nncross.R
+#
+#
+#    $Revision: 1.28 $  $Date: 2017/06/05 10:31:58 $
+#
+#  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012
+#  Licence: GNU Public Licence >= 2
+
+nncross <- function(X, Y, ...) {
+  UseMethod("nncross")
+}
+
+nncross.default <- function(X, Y, ...) {
+  X <- as.ppp(X, W=boundingbox)
+  nncross(X, Y, ...)
+}
+
+nncross.ppp <- function(X, Y, iX=NULL, iY=NULL,
+                    what = c("dist", "which"),
+                    ...,
+                    k = 1,
+                    sortby=c("range", "var", "x", "y"),
+                    is.sorted.X = FALSE,
+                    is.sorted.Y = FALSE) {
+  stopifnot(is.ppp(Y) || is.psp(Y))
+  sortby <- match.arg(sortby)
+  what   <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE)
+  want.dist  <- "dist" %in% what 
+  want.which <- "which" %in% what
+  want.both  <- want.dist && want.which
+
+  if(!missing(k)) {
+    # k can be a single integer or an integer vector
+    if(length(k) == 0)
+      stop("k is an empty vector")
+    else if(length(k) == 1) {
+      if(k != round(k) || k <= 0)
+        stop("k is not a positive integer")
+    } else {
+      if(any(k != round(k)) || any(k <= 0))
+        stop(paste("some entries of the vector",
+                   sQuote("k"), "are not positive integers"))
+    }
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+  nk <- length(k)
+
+  # trivial cases
+  nX <- npoints(X)
+  nY <- nobjects(Y)
+  # deal with null cases
+  if(nX == 0)
+    return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk),
+                which=matrix(0L, nrow=0, ncol=nk))[what]))
+  if(nY == 0)
+    return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk),
+                             which=matrix(NA, nrow=nX, ncol=nk))[what]))
+  
+  # Y is a line segment pattern 
+  if(is.psp(Y)) {
+    if(!identical(k, 1L))
+      stop("Sorry, the case k > 1 is not yet implemented for psp objects")
+    return(ppllengine(X,Y,"distance")[, what])
+  }
+
+  # Y is a point pattern
+  if(is.null(iX) != is.null(iY))
+    stop("If one of iX, iY is given, then both must be given")
+  exclude <- (!is.null(iX) || !is.null(iY))
+  if(exclude) {
+    stopifnot(is.integer(iX) && is.integer(iY))
+    if(length(iX) != nX)
+      stop("length of iX does not match the number of points in X")
+    if(length(iY) != nY)
+      stop("length of iY does not match the number of points in Y")
+  }
+
+  if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y")))
+     stop(paste("If data are already sorted,",
+                "the sorting coordinate must be specified explicitly",
+                "using sortby = \"x\" or \"y\""))
+
+  # decide whether to sort on x or y coordinate
+  switch(sortby,
+         range = {
+           WY <- as.owin(Y)
+           sortby.y <- (diff(WY$xrange) < diff(WY$yrange))
+         },
+         var = {
+           sortby.y <- (var(Y$x) < var(Y$y))
+         },
+         x={ sortby.y <- FALSE},
+         y={ sortby.y <- TRUE}
+         )
+
+  # The C code expects points to be sorted by y coordinate.
+  if(sortby.y) {
+    Xx <- X$x
+    Xy <- X$y
+    Yx <- Y$x
+    Yy <- Y$y
+  } else {
+    Xx <- X$y
+    Xy <- X$x
+    Yx <- Y$y
+    Yy <- Y$x
+  }
+  # sort only if needed
+  if(!is.sorted.X){
+    oX <- fave.order(Xy)
+    Xx <- Xx[oX]
+    Xy <- Xy[oX]
+    if(exclude) iX <- iX[oX]
+  }
+  if (!is.sorted.Y){
+    oY <- fave.order(Yy)
+    Yx <- Yx[oY]
+    Yy <- Yy[oY]
+    if(exclude) iY <- iY[oY]
+  }
+
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(nY, kmax)
+  
+  if(kmaxcalc == 1) {
+    # ............... single nearest neighbour ..................
+    # call C code
+    nndv <- if(want.dist) numeric(nX) else numeric(1)
+    nnwh <- if(want.which) integer(nX) else integer(1)
+    if(!exclude) iX <- iY <- integer(1)
+
+    huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y)))
+
+    z <- .C("nnXinterface",
+            n1=as.integer(nX),
+            x1=as.double(Xx),
+            y1=as.double(Xy),
+            id1=as.integer(iX),
+            n2=as.integer(nY),
+            x2=as.double(Yx),
+            y2=as.double(Yy),
+            id2=as.integer(iY),
+            exclude = as.integer(exclude),
+            wantdist = as.integer(want.dist),
+            wantwhich = as.integer(want.which),
+            nnd=as.double(nndv),
+            nnwhich=as.integer(nnwh),
+            huge=as.double(huge),
+            PACKAGE = "spatstat")
+
+    if(want.which) {
+      nnwcode <- z$nnwhich #sic. C code now increments by 1
+      if(any(uhoh <- (nnwcode == 0))) {
+        warning("NA's produced in nncross()$which")
+        nnwcode[uhoh] <- NA
+      }
+    }
+  
+    # reinterpret in original ordering
+    if(is.sorted.X){
+      if(want.dist) nndv <- z$nnd
+      if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode]
+    } else {
+      if(want.dist) nndv[oX] <- z$nnd
+      if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode]
+    }
+
+    if(want.both) return(data.frame(dist=nndv, which=nnwh))
+    return(if(want.dist) nndv else nnwh)
+
+  } else {
+    # ............... k nearest neighbours ..................
+    # call C code
+    nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1)
+    nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1)
+    if(!exclude) iX <- iY <- integer(1)
+
+    huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y)))
+  
+    z <- .C("knnXinterface",
+            n1=as.integer(nX),
+            x1=as.double(Xx),
+            y1=as.double(Xy),
+            id1=as.integer(iX),
+            n2=as.integer(nY),
+            x2=as.double(Yx),
+            y2=as.double(Yy),
+            id2=as.integer(iY),
+            kmax=as.integer(kmaxcalc),
+            exclude = as.integer(exclude),
+            wantdist = as.integer(want.dist),
+            wantwhich = as.integer(want.which),
+            nnd=as.double(nndv),
+            nnwhich=as.integer(nnwh),
+            huge=as.double(huge),
+            PACKAGE = "spatstat")
+
+    # extract results
+    nnD <- z$nnd
+    nnW <- z$nnwhich
+    # map 0 to NA
+    if(want.which && any(uhoh <- (nnW == 0))) {
+      nnW[uhoh] <- NA
+      if(want.dist) nnD[uhoh] <- Inf
+    }
+    # reinterpret indices in original ordering
+    if(!is.sorted.Y) nnW <- oY[nnW]
+    # reform as matrices
+    NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0
+    NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0
+    if(!is.sorted.X){
+      # rearrange rows to correspond to original ordering of points
+      if(want.dist) NND[oX, ] <- NND
+      if(want.which) NNW[oX, ] <- NNW
+    }
+    # the return value should correspond to the original vector k
+    if(kmax > kmaxcalc) {
+      # add columns of NA / Inf
+      kextra <- kmax - kmaxcalc
+      if(want.dist)
+        NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra))
+      if(want.which)
+        NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra))
+    }
+    if(length(k) < kmax) {
+      # select only the specified columns
+      if(want.dist)
+        NND <- NND[, k, drop=TRUE]
+      if(want.which)
+        NNW <- NNW[, k, drop=TRUE]
+    }
+
+    result <- as.data.frame(list(dist=NND, which=NNW)[what])
+    colnames(result) <- c(if(want.dist) paste0("dist.", k) else NULL,
+                          if(want.which) paste0("which.",k) else NULL)
+    if(ncol(result) == 1)
+      result <- result[, , drop=TRUE]
+    return(result)
+  }
+}
+
diff --git a/R/nncross3D.R b/R/nncross3D.R
new file mode 100644
index 0000000..1ab86dd
--- /dev/null
+++ b/R/nncross3D.R
@@ -0,0 +1,231 @@
+#
+#   nncross3D.R
+#
+#    $Revision: 1.8 $  $Date: 2017/06/05 10:31:58 $
+#
+#  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013
+#  Licence: GNU Public Licence >= 2
+
+nncross.pp3 <- function(X, Y, iX=NULL, iY=NULL,
+                    what = c("dist", "which"),
+                    ...,
+                    k = 1,
+                    sortby=c("range", "var", "x", "y", "z"),
+                    is.sorted.X = FALSE,
+                    is.sorted.Y = FALSE) {
+  stopifnot(is.pp3(Y))
+  sortby <- match.arg(sortby)
+  what   <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE)
+  want.dist  <- "dist" %in% what 
+  want.which <- "which" %in% what
+  want.both  <- want.dist && want.which
+
+  if(!missing(k)) {
+    # k can be a single integer or an integer vector
+    if(length(k) == 0)
+      stop("k is an empty vector")
+    else if(length(k) == 1) {
+      if(k != round(k) || k <= 0)
+        stop("k is not a positive integer")
+    } else {
+      if(any(k != round(k)) || any(k <= 0))
+        stop(paste("some entries of the vector",
+                   sQuote("k"), "are not positive integers"))
+    }
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+  nk <- length(k)
+
+  # trivial cases
+  nX <- npoints(X)
+  nY <- nobjects(Y)
+  # deal with null cases
+  if(nX == 0)
+    return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk),
+                which=matrix(0L, nrow=0, ncol=nk))[what]))
+  if(nY == 0)
+    return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk),
+                             which=matrix(NA, nrow=nX, ncol=nk))[what]))
+  
+  if(is.null(iX) != is.null(iY))
+    stop("If one of iX, iY is given, then both must be given")
+  exclude <- (!is.null(iX) || !is.null(iY))
+  if(exclude) {
+    stopifnot(is.integer(iX) && is.integer(iY))
+    if(length(iX) != nX)
+      stop("length of iX does not match the number of points in X")
+    if(length(iY) != nY)
+      stop("length of iY does not match the number of points in Y")
+  }
+
+  if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y", "z")))
+     stop(paste("If data are already sorted,",
+                "the sorting coordinate must be specified explicitly",
+                "using sortby = \"x\" or \"y\" or \"z\""))
+
+  # decide which coordinate to sort on
+  switch(sortby,
+         range = {
+           s <- sidelengths(as.box3(Y))
+           sortcoord <- c("x", "y", "z")[which.min(s)]
+         },
+         var = {
+           v <- apply(coords(Y), 2, var)
+           sortcoord <- c("x", "y", "z")[which.min(v)]           
+         },
+         x={ sortcoord <- "x" },
+         y={ sortcoord <- "y" },
+         z={ sortcoord <- "z" }
+         )
+
+  # The C code expects points to be sorted by z coordinate.
+  XX <- coords(X)
+  YY <- coords(Y)
+  switch(sortcoord,
+         x = {
+           # rotate x axis to z axis
+           XX <- XX[, c(3,2,1)]
+           YY <- YY[, c(3,2,1)]
+         },
+         y = {
+           # rotate y axis to z axis
+           XX <- XX[, c(3,1,2)]
+           YY <- YY[, c(3,1,2)]
+         },
+         z = { })
+
+  # sort only if needed
+  if(!is.sorted.X){
+    oX <- fave.order(XX[,3])
+    XX <- XX[oX, , drop=FALSE]
+    if(exclude) iX <- iX[oX]
+  }
+  if (!is.sorted.Y){
+    oY <- fave.order(YY[,3])
+    YY <- YY[oY, , drop=FALSE]
+    if(exclude) iY <- iY[oY]
+  }
+
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(nY, kmax)
+  
+  if(kmaxcalc == 1) {
+    # ............... single nearest neighbour ..................
+    # call C code
+    nndv <- if(want.dist) numeric(nX) else numeric(1)
+    nnwh <- if(want.which) integer(nX) else integer(1)
+    if(!exclude) iX <- iY <- integer(1)
+
+    huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y)))
+  
+    z <- .C("nnX3Dinterface",
+            n1=as.integer(nX),
+            x1=as.double(XX[,1]),
+            y1=as.double(XX[,2]),
+            z1=as.double(XX[,3]),
+            id1=as.integer(iX),
+            n2=as.integer(nY),
+            x2=as.double(YY[,1]),
+            y2=as.double(YY[,2]),
+            z2=as.double(YY[,3]),
+            id2=as.integer(iY),
+            exclude = as.integer(exclude),
+            wantdist = as.integer(want.dist),
+            wantwhich = as.integer(want.which),
+            nnd=as.double(nndv),
+            nnwhich=as.integer(nnwh),
+            huge=as.double(huge),
+            PACKAGE = "spatstat")
+
+    if(want.which) {
+      # conversion to R indexing is done in C code
+      nnwcode <- z$nnwhich
+      if(any(uhoh <- (nnwcode == 0))) {
+        warning("Internal error: NA's produced in nncross()$which")
+        nnwcode[uhoh] <- NA
+      }
+    }
+  
+    # reinterpret in original ordering
+    if(is.sorted.X){
+      if(want.dist) nndv <- z$nnd
+      if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode]
+    } else {
+      if(want.dist) nndv[oX] <- z$nnd
+      if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode]
+    }
+
+    if(want.both) return(data.frame(dist=nndv, which=nnwh))
+    return(if(want.dist) nndv else nnwh)
+
+  } else {
+    # ............... k nearest neighbours ..................
+    # call C code
+    nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1)
+    nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1)
+    if(!exclude) iX <- iY <- integer(1)
+    huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y)))
+  
+    z <- .C("knnX3Dinterface",
+            n1=as.integer(nX),
+            x1=as.double(XX[,1]),
+            y1=as.double(XX[,2]),
+            z1=as.double(XX[,3]),
+            id1=as.integer(iX),
+            n2=as.integer(nY),
+            x2=as.double(YY[,1]),
+            y2=as.double(YY[,2]),
+            z2=as.double(YY[,3]),
+            id2=as.integer(iY),
+            kmax=as.integer(kmaxcalc),
+            exclude = as.integer(exclude),
+            wantdist = as.integer(want.dist),
+            wantwhich = as.integer(want.which),
+            nnd=as.double(nndv),
+            nnwhich=as.integer(nnwh),
+            huge=as.double(huge),
+            PACKAGE = "spatstat")
+
+    # extract results
+    nnD <- z$nnd
+    nnW <- z$nnwhich
+    # map 0 to NA
+    if(want.which && any(uhoh <- (nnW == 0))) {
+      nnW[uhoh] <- NA
+      if(want.dist) nnD[uhoh] <- Inf
+    }
+    # reinterpret indices in original ordering
+    if(!is.sorted.Y) nnW <- oY[nnW]
+    # reform as matrices
+    NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0
+    NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0
+    if(!is.sorted.X){
+      # rearrange rows to correspond to original ordering of points
+      if(want.dist) NND[oX, ] <- NND
+      if(want.which) NNW[oX, ] <- NNW
+    }
+    # the return value should correspond to the original vector k
+    if(kmax > kmaxcalc) {
+      # add columns of NA / Inf
+      kextra <- kmax - kmaxcalc
+      if(want.dist)
+        NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra))
+      if(want.which)
+        NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra))
+    }
+    if(length(k) < kmax) {
+      # select only the specified columns
+      if(want.dist)
+        NND <- NND[, k, drop=TRUE]
+      if(want.which)
+        NNW <- NNW[, k, drop=TRUE]
+    }
+
+    result <- as.data.frame(list(dist=NND, which=NNW)[what])
+    if(ncol(result) == 1)
+      result <- result[, , drop=TRUE]
+    return(result)
+  }
+}
+
diff --git a/R/nndensity.R b/R/nndensity.R
new file mode 100644
index 0000000..fa4aea9
--- /dev/null
+++ b/R/nndensity.R
@@ -0,0 +1,36 @@
+#
+#  nndensity.R
+#
+#  Density estimation based on nn distance
+#
+#  $Revision: 1.3 $  $Date: 2014/10/24 00:22:30 $
+#
+
+nndensity <- function(x, ...) {
+  UseMethod("nndensity")
+}
+
+nndensity.ppp <- function(x, k, ..., verbose=TRUE) {
+  if(missing(k) || is.null(k)) {
+    k <- round(sqrt(npoints(x)))
+    if(verbose) cat(paste("k=", k, "\n"))
+  } else if(k == 1) warning("k=1 will produce strange results")
+  # distance to k-th nearest neighbour
+  D <- nnmap(x, k=k, what="dist", ...)
+  # area searched
+  A <- eval.im(pi * D^2)
+  # distance to boundary
+  B <- bdist.pixels(as.owin(D))
+  # handle edge effects
+  edge <- solutionset(B < D)
+  # centres of all pixels where edge effect occurs
+  xy <- rasterxy.mask(edge, drop=TRUE)
+  # corresponding values of distance
+  rr <- D[edge, drop=TRUE]
+  # compute actual search area
+  X <- as.ppp(xy, W=as.owin(x), check=FALSE)
+  A[edge] <- discpartarea(X, matrix(rr, ncol=1))
+  # finally compute intensity estimate
+  L <- eval.im(k/A)
+  return(L)
+}
diff --git a/R/nndist.R b/R/nndist.R
new file mode 100644
index 0000000..2f9e799
--- /dev/null
+++ b/R/nndist.R
@@ -0,0 +1,365 @@
+#
+#   nndist.R
+#
+#   nearest neighbour distances (nndist) and identifiers (nnwhich)
+#
+#   $Revision: 1.8 $ $Date: 2017/06/05 10:31:58 $
+#
+
+nndist <- function(X, ...) {
+  UseMethod("nndist")
+}
+
+nndist.ppp <- local({
+
+  nndist.ppp <- function(X, ..., k=1, by=NULL, method="C") {
+    verifyclass(X, "ppp")
+    trap.extra.arguments(..., .Context="In nndist.ppp")
+    if(is.null(by)) # usual case
+      return(nndist.default(X$x, X$y, k=k, by=by, method=method))
+    return(nndistby(X, k=k, by=by))
+  }
+
+  nndistby <- function(X, k, by) {
+    # split by factor 
+    idX <- seq_len(npoints(X))
+    Y <- split(X %mark% idX, f=by, un=FALSE)
+    distY <- lapply(Y, nndistsub, XX=X, iX=idX, k=k)
+    result <- do.call(cbind, distY)
+    return(result)
+  }
+
+  nndistsub <- function(Z, XX, iX, k) {
+    nncross(XX, Z, iX=iX, iY=marks(Z), k=k, what="dist")
+  }
+
+  nndist.ppp
+})
+
+nndist.default <-
+  function(X, Y=NULL, ..., k=1, by=NULL, method="C")
+{
+	#  computes the vector of nearest-neighbour distances 
+	#  for the pattern of points (x[i],y[i])
+	#
+  xy <- xy.coords(X,Y)[c("x","y")]
+  x <- xy$x
+  y <- xy$y
+
+  # validate
+  n <- length(x)
+  if(length(y) != n)
+    stop("lengths of x and y do not match")
+  
+  # other arguments ignored
+  trap.extra.arguments(..., .Context="In nndist.default")
+
+  # split by factor ?
+  if(!is.null(by)) {
+    X <- as.ppp(xy, W=boundingbox)
+    return(nndist(X, by=by, k=k))
+  }
+  
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # trivial cases
+  if(n <= 1) {
+    # empty pattern => return numeric(0)
+    # or pattern with only 1 point => return Inf
+    nnd <- matrix(Inf, nrow=n, ncol=kmax)
+    nnd <- nnd[,k, drop=TRUE]
+    return(nnd)
+  }
+  
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1, kmax)
+
+  # calculate k-nn distances for k <= kmaxcalc
+  
+  if(kmaxcalc == 1) {
+    # calculate nearest neighbour distance only
+    switch(method,
+         interpreted={
+           #  matrix of squared distances between all pairs of points
+           sq <- function(a, b) { (a-b)^2 }
+           squd <-  outer(x, x, sq) + outer(y, y, sq)
+           #  reset diagonal to a large value so it is excluded from minimum
+           diag(squd) <- Inf
+           #  nearest neighbour distances
+           nnd <- sqrt(apply(squd,1,min))
+         },
+         C={
+           nnd<-numeric(n)
+           o <- fave.order(y)
+           big <- sqrt(.Machine$double.xmax)
+           z<- .C("nndistsort",
+                  n= as.integer(n),
+                  x= as.double(x[o]), y= as.double(y[o]), nnd= as.double(nnd),
+                  as.double(big),
+                  PACKAGE = "spatstat")
+           nnd[o] <- z$nnd
+         },
+         stop(paste("Unrecognised method", sQuote(method)))
+         )
+  } else {
+    # case kmaxcalc > 1
+    switch(method,
+           interpreted={
+             if(n <= 1000) {
+               # form n x n matrix of squared distances
+               D2 <- pairdist.default(x, y, method=method, squared=TRUE)
+               # find k'th smallest squared distance
+               diag(D2) <- Inf
+               NND2 <- t(apply(D2, 1, sort))[, 1:kmaxcalc]
+               nnd <- sqrt(NND2)
+             } else {
+               # avoid creating huge matrix
+               # handle one row of D at a time
+               NND2 <- matrix(numeric(n * kmaxcalc), nrow=n, ncol=kmaxcalc)
+               for(i in seq_len(n)) {
+                 D2i <- (x - x[i])^2 + (y - y[i])^2
+                 D2i[i] <- Inf
+                 NND2[i,] <- sort(D2i)[1:kmaxcalc]
+               }
+               nnd <- sqrt(NND2)
+             }
+           },
+           C={
+             nnd<-numeric(n * kmaxcalc)
+             o <- fave.order(y)
+             big <- sqrt(.Machine$double.xmax)
+             z<- .C("knndsort",
+                    n    = as.integer(n),
+                    kmax = as.integer(kmaxcalc),
+                    x    = as.double(x[o]),
+                    y    = as.double(y[o]),
+                    nnd  = as.double(nnd),
+                    huge = as.double(big),
+                    PACKAGE = "spatstat")
+             nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc)
+             nnd[o, ] <- matrix(z$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+           },
+           stop(paste("Unrecognised method", sQuote(method)))
+           )
+  }
+
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of Inf
+    infs <- matrix(Inf, nrow=n, ncol=kmax-kmaxcalc)
+    nnd <- cbind(nnd, infs)
+  }
+
+  if(kmax > 1)
+    colnames(nnd) <- paste0("dist.", 1:kmax)
+  
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnd <- nnd[, k, drop=TRUE]
+  }
+  
+  return(nnd)
+}
+
+
+nnwhich <- function(X, ...) {
+  UseMethod("nnwhich")
+}
+
+nnwhich.ppp <- local({
+
+  nnwhich.ppp <- function(X, ..., k=1, by=NULL, method="C") {
+    verifyclass(X, "ppp")
+    trap.extra.arguments(..., .Context="In nnwhich.ppp")
+    if(is.null(by))
+      return(nnwhich.default(X$x, X$y, k=k, method=method))
+    return(nnwhichby(X, k=k, by=by))
+  }
+
+  nnwhichby <- function(X, k, by) {
+    # split by factor 
+    idX <- seq_len(npoints(X))
+    Y <- split(X %mark% idX, f=by, un=FALSE)
+    whichY <- lapply(Y, nnwhichsub, XX=X, iX=idX, k=k)
+    result <- do.call(cbind, whichY)
+    return(result)
+  }
+
+  nnwhichsub <- function(Z, XX, iX, k) {
+    # marks(Z) gives original serial numbers of subset Z
+    iY <- marks(Z)
+    Zid <- nncross(XX, Z, iX=iX, iY=iY, k=k, what="which")
+    nk <- length(k)
+    if(nk == 1) {
+      Yid <- iY[Zid]
+    } else {
+      Zid <- as.vector(as.matrix(Zid))
+      Yid <- iY[Zid]
+      Yid <- data.frame(which=matrix(Yid, ncol=nk))
+    }
+    return(Yid)
+  }
+
+  nnwhich.ppp
+})
+
+
+nnwhich.default <-
+  function(X, Y=NULL, ..., k=1, by=NULL, method="C")
+{
+	#  identifies nearest neighbour of each point in
+	#  the pattern of points (x[i],y[i])
+	#
+  xy <- xy.coords(X,Y)[c("x","y")]
+  x <- xy$x
+  y <- xy$y
+
+  # validate
+  n <- length(x)
+  if(length(y) != n)
+    stop("lengths of x and y do not match")
+  
+  # other arguments ignored
+  trap.extra.arguments(..., .Context="In nnwhich.default")
+
+  # split by factor ?
+  if(!is.null(by)) {
+    X <- as.ppp(xy, W=boundingbox)
+    return(nnwhich(X, by=by, k=k))
+  }
+  
+  # k can be a single integer or an integer vector
+  if(length(k) == 0)
+    stop("k is an empty vector")
+  else if(length(k) == 1) {
+    if(k != round(k) || k <= 0)
+      stop("k is not a positive integer")
+  } else {
+    if(any(k != round(k)) || any(k <= 0))
+      stop(paste("some entries of the vector",
+           sQuote("k"), "are not positive integers"))
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+
+  # special cases
+  if(n <= 1) {
+    # empty pattern => return integer(0)
+    # or pattern with only 1 point => return NA
+    nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax)
+    nnw <- nnw[,k, drop=TRUE]
+    return(nnw)
+  }
+
+  # number of neighbours that are well-defined
+  kmaxcalc <- min(n-1, kmax)
+
+  # identify k-nn for k <= kmaxcalc
+
+  if(kmaxcalc == 1) {
+    # identify nearest neighbour only
+    switch(method,
+           interpreted={
+             #  matrix of squared distances between all pairs of points
+             sq <- function(a, b) { (a-b)^2 }
+             squd <-  outer(x, x, sq) + outer(y, y, sq)
+             #  reset diagonal to a large value so it is excluded from minimum
+             diag(squd) <- Inf
+             #  nearest neighbours
+             nnw <- apply(squd,1,which.min)
+           },
+           C={
+             nnw <- integer(n)
+             o <- fave.order(y)
+             big <- sqrt(.Machine$double.xmax)
+             z<- .C("nnwhichsort",
+                    n = as.integer(n),
+                    x = as.double(x[o]),
+                    y = as.double(y[o]),
+                    nnwhich = as.integer(nnw),
+                    huge = as.double(big),
+                    PACKAGE = "spatstat")
+             witch <- z$nnwhich # sic 
+             if(any(witch <= 0))
+               stop("Internal error: non-positive index returned from C code")
+             if(any(witch > n))
+               stop("Internal error: index returned from C code exceeds n")
+             nnw[o] <- o[witch]
+           },
+           stop(paste("Unrecognised method", sQuote(method)))
+           )
+  } else {
+    # case kmaxcalc > 1
+    switch(method,
+           interpreted={
+             if(n <= 1000) {
+               # form n x n matrix of squared distances
+               D2 <- pairdist.default(x, y, method=method, squared=TRUE)
+               # find k'th smallest squared distance
+               diag(D2) <- Inf
+               nnw <- t(apply(D2, 1, fave.order))[, 1:kmaxcalc]
+             } else {
+               # avoid creating huge matrix
+               # handle one row of D at a time
+               nnw <- matrix(as.integer(NA), nrow=n, ncol=kmaxcalc)
+               for(i in seq_len(n)) {
+                 D2i <- (x - x[i])^2 + (y - y[i])^2
+                 D2i[i] <- Inf
+                 nnw[i,] <- fave.order(D2i)[1:kmaxcalc]
+               }      
+             }
+           },
+           C={
+             nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc)
+             o <- fave.order(y)
+             big <- sqrt(.Machine$double.xmax)
+             z<- .C("knnsort",
+                    n = as.integer(n),
+                    kmax = as.integer(kmaxcalc),
+                    x = as.double(x[o]),
+                    y = as.double(y[o]),
+                    nnd = as.double(numeric(n * kmaxcalc)),
+                    nnwhich = as.integer(nnw),
+                    huge = as.double(big),
+                    PACKAGE = "spatstat")
+             witch <- z$nnwhich # sic
+             witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE)
+             if(any(witch <= 0))
+               stop("Internal error: non-positive index returned from C code")
+             if(any(witch > n))
+               stop("Internal error: index returned from C code exceeds n")
+             # convert back to original ordering
+             nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc)
+           },
+           stop(paste("Unrecognised method", sQuote(method)))
+           )
+  }
+  
+  # post-processing
+  if(kmax > kmaxcalc) {
+    # add columns of NA's
+    nas <- matrix(as.numeric(NA), nrow=n, ncol=kmax-kmaxcalc)
+    nnw <- cbind(nnw, nas)
+  }
+
+  if(kmax > 1)
+    colnames(nnw) <- paste0("which.", 1:kmax)
+
+  if(length(k) < kmax) {
+    # select only the specified columns
+    nnw <- nnw[, k, drop=TRUE]
+  }
+  return(nnw)
+}
diff --git a/R/nndistlpp.R b/R/nndistlpp.R
new file mode 100755
index 0000000..1be5772
--- /dev/null
+++ b/R/nndistlpp.R
@@ -0,0 +1,654 @@
+#
+# nndistlpp.R
+#
+#  $Revision: 1.20 $ $Date: 2017/06/05 10:31:58 $
+#
+# Methods for nndist, nnwhich, nncross for linear networks
+#
+# nndist.lpp
+#   Calculates the nearest neighbour distances in the shortest-path metric
+#   for a point pattern on a linear network.
+
+nndist.lpp <- function(X, ..., k=1, method="C") {
+  stopifnot(inherits(X, "lpp"))
+  stopifnot(method %in% c("C", "interpreted"))
+  n <- npoints(X)
+  k <- as.integer(k)
+  stopifnot(all(k > 0))
+  kmax <- max(k)
+
+  L <- as.linnet(X)
+  if(is.null(br <- L$boundingradius) || is.infinite(br)) {
+    # network may be disconnected
+    lab <- connected(L, what="labels")
+    if(length(levels(lab)) > 1L) {
+      # network is disconnected
+      result <- matrix(Inf, n, length(k))
+      # handle each connected component separately
+      subsets <- split(seq_len(nvertices(L)), lab)
+      for(i in seq_along(subsets)) {
+        Xi <- thinNetwork(X, retainvertices=subsets[[i]])
+        relevant <- attr(Xi, "retainpoints")      
+        result[relevant, ] <- nndist.lpp(Xi, k=k, method=method)
+      }
+      return(result)
+    }
+  }
+  
+  toomany <- (kmax >= n-1)
+  if(toomany) {
+    ## not enough points to define kmax nearest neighbours
+    result <- matrix(Inf, nrow=n, ncol=kmax)
+    if(n <= 1) return(result[,k,drop=TRUE])
+    ## reduce kmax to feasible value
+    kmax <- n-1
+    kuse <- k[k <= kmax]
+  } else {
+    kuse <- k
+  }
+  
+  Y <- as.ppp(X)
+  sparse <- identical(L$sparse, TRUE)
+
+  ## find nearest segment for each point
+  ## This is given by local coordinates, if available (spatstat >= 1.28-0)
+  loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)
+  pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg)
+  
+  if(method == "interpreted") {
+    ## interpreted code 
+    D <- pairdist(X, method="interpreted")
+    diag(D) <- Inf
+    ans <- if(kmax == 1) apply(D, 1, min) else
+           t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE]
+  } else if(!sparse && kmax == 1) {
+    # C code for non-sparse network
+    Lseg  <- L$lines
+    Lvert <- L$vertices
+    from  <- L$from
+    to    <- L$to
+    dpath <- L$dpath
+    # convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    segmap <- pro - 1L
+    nseg <- length(from0)
+    # upper bound on interpoint distance
+    huge <- max(dpath) + 2 * max(lengths.psp(Lseg))
+    # space for result
+    ans <- double(n)
+    # call C
+    zz <- .C("linnndist",
+             np = as.integer(n),
+             xp = as.double(Y$x),
+             yp = as.double(Y$y),
+             nv = as.integer(Lvert$n),
+             xv = as.double(Lvert$x),
+             yv = as.double(Lvert$y),
+             ns = as.integer(nseg),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             dpath = as.double(dpath),
+             segmap = as.integer(segmap),
+             huge = as.double(huge),
+             answer = as.double(ans),
+             PACKAGE = "spatstat")
+    ans <- zz$answer
+  } else if(spatstat.options('Cnndistlpp')) {
+    ## use new C routine
+    Lseg  <- L$lines
+    Lvert <- L$vertices
+    from  <- L$from
+    to    <- L$to
+    ##
+    nseg <- length(from)
+    seglen <- lengths.psp(Lseg)
+    ## convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    segmap <- pro - 1L
+    tp <- loco$tp
+    ## sort by segment index
+    oo <- order(segmap, tp)
+    segmap <- segmap[oo]
+    tp <- tp[oo]
+    # upper bound on interpoint distance
+    huge <- sum(seglen)
+    #' numerical tolerance
+    tol <- max(.Machine$double.eps,
+               diameter(Frame(L))/2^20)
+    #'
+    kmax1 <- kmax + 1L
+    zz <- .C("linknnd",
+             kmax = as.integer(kmax1),
+             np = as.integer(n),
+             sp = as.integer(segmap),
+             tp = as.double(tp), 
+             nv = as.integer(Lvert$n),
+             ns = as.integer(nseg),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             seglen = as.double(seglen),
+             huge = as.double(huge),
+             tol = as.double(tol),
+             nndist = as.double(numeric(n * kmax1)),
+             nnwhich = as.integer(integer(n * kmax1)),
+             PACKAGE = "spatstat")
+    ans <- matrix(, n, kmax1)
+    ans[oo, ] <- matrix(zz$nndist, n, kmax1, byrow=TRUE)
+    # drop first column which is zero corresponding to j = i
+    ans <- ans[, -1, drop=FALSE]
+    colnames(ans) <- paste0("dist.", 1:ncol(ans))
+    ans <- ans[,kuse]
+  } else {    
+    ## use fast code for nncross
+    ans <- nncross(X, X, what="dist", k=kuse+1)
+    if(is.matrix(ans) || is.data.frame(ans))
+      colnames(ans) <- paste0("dist.", kuse)
+  }
+  if(!is.null(dim(ans))) {
+    ans <- as.matrix(ans)
+    rownames(ans) <- NULL
+  } 
+  if(!toomany)
+    return(ans)
+  result[, kuse] <- as.matrix(ans)
+  colnames(result) <- paste0("dist.", 1:ncol(result))
+  return(result[,k])
+}
+
+# nnwhich.lpp
+# Identifies the nearest neighbours in the shortest-path metric
+# for a point pattern on a linear network.
+#
+
+nnwhich.lpp <- function(X, ..., k=1, method="C") {
+  stopifnot(inherits(X, "lpp"))
+  stopifnot(method %in% c("C", "interpreted"))
+
+  k <- as.integer(k)
+  stopifnot(all(k > 0))
+  kmax <- max(k)
+
+  n <- npoints(X)
+
+  L <- as.linnet(X)
+  if(is.null(br <- L$boundingradius) || is.infinite(br)) {
+    # network may be disconnected
+    lab <- connected(L, what="labels")
+    if(length(levels(lab)) > 1L) {
+      # network is disconnected
+      result <- matrix(NA_integer_, n, length(k))
+      # handle each connected component separately
+      subsets <- split(seq_len(nvertices(L)), lab)
+      for(i in seq_along(subsets)) {
+        Xi <- thinNetwork(X, retainvertices=subsets[[i]])
+        relevant <- attr(Xi, "retainpoints")      
+        result[relevant, ] <- nnwhich.lpp(Xi, k=k, method=method)
+      }
+      return(result)
+    }
+  }
+  
+  toomany <- (kmax >= n-1)
+  if(toomany) {
+    ## not enough points to define kmax nearest neighbours
+    result <- matrix(NA_integer_, nrow=n, ncol=kmax)
+    if(n <= 1) return(result[,k,drop=TRUE])
+    ## reduce kmax to feasible value
+    kmax <- n-1
+    kuse <- k[k <= kmax]
+  } else {
+    kuse <- k
+  }
+  
+  #
+  Y <- as.ppp(X)
+  sparse <- identical(L$sparse, TRUE)
+  
+  ## find nearest segment for each point
+  ## This is given by local coordinates, if available (spatstat >= 1.28-0)
+  loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)
+  pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg)
+
+  if(method == "interpreted") {
+    D <- pairdist(X, method="interpreted")
+    diag(D) <- Inf
+    nnw <- if(kmax == 1) apply(D, 1, which.min) else
+           t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE]
+  } else if(!sparse && kmax == 1) {
+    # C code for non-sparse network
+    ##
+    Lseg  <- L$lines
+    Lvert <- L$vertices
+    from  <- L$from
+    to    <- L$to
+    dpath <- L$dpath
+    ## convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    segmap <- pro - 1L
+    nseg <- length(from0)
+    # upper bound on interpoint distance
+    huge <- max(dpath) + 2 * max(lengths.psp(Lseg))
+    # space for result
+    nnd <- double(n)
+    nnw <- integer(n)
+    # call C
+    zz <- .C("linnnwhich",
+             np = as.integer(n),
+             xp = as.double(Y$x),
+             yp = as.double(Y$y),
+             nv = as.integer(Lvert$n),
+             xv = as.double(Lvert$x),
+             yv = as.double(Lvert$y),
+             ns = as.integer(nseg),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             dpath = as.double(dpath),
+             segmap = as.integer(segmap),
+             huge = as.double(huge),
+             nndist = as.double(nnd),
+             nnwhich = as.integer(nnw),
+             PACKAGE = "spatstat")
+    # convert C indexing to R indexing
+    nnw <- zz$nnwhich + 1L
+    # any zeroes occur if points have no neighbours.
+    nnw[nnw == 0] <- NA
+  } else if(spatstat.options('Cnndistlpp')) {
+    ## use new C routine
+    Lseg  <- L$lines
+    Lvert <- L$vertices
+    from  <- L$from
+    to    <- L$to
+    ##
+    nseg <- length(from)
+    seglen <- lengths.psp(Lseg)
+    ## convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    segmap <- pro - 1L
+    tp <- loco$tp
+    ## sort by segment index
+    oo <- order(segmap, tp)
+    segmap <- segmap[oo]
+    tp <- tp[oo]
+    # upper bound on interpoint distance
+    huge <- sum(seglen)
+    #' numerical tolerance
+    tol <- max(.Machine$double.eps,
+               diameter(Frame(L))/2^20)
+    #'
+    kmax1 <- kmax + 1L
+    zz <- .C("linknnd",
+             kmax = as.integer(kmax1),
+             np = as.integer(n),
+             sp = as.integer(segmap),
+             tp = as.double(tp), 
+             nv = as.integer(Lvert$n),
+             ns = as.integer(nseg),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             seglen = as.double(seglen),
+             huge = as.double(huge),
+             tol = as.double(tol),
+             nndist = as.double(numeric(n * kmax1)),
+             nnwhich = as.integer(integer(n * kmax1)),
+             PACKAGE = "spatstat")
+    nnw <- matrix(, n, kmax1)
+    nnw[oo, ] <- matrix(oo[zz$nnwhich + 1L], n, kmax1, byrow=TRUE)
+    # drop first column which is j = i
+    nnw <- nnw[, -1, drop=FALSE]
+    colnames(nnw) <- paste0("which.", 1:ncol(nnw))
+    nnw <- nnw[,kuse]
+  } else {
+    ## use fast code for nncross
+    nnw <- nncross(X, X, what="which", k=kuse+1)
+    if(is.matrix(nnw) || is.data.frame(nnw))
+      colnames(nnw) <- paste0("which.", kuse)
+  }
+  if(!is.null(dim(nnw))) {
+    nnw <- as.matrix(nnw)
+    rownames(nnw) <- NULL
+  }
+  if(!toomany)
+    return(nnw)
+  result[, kuse] <- as.matrix(nnw)
+  colnames(result) <- paste0("which.", 1:ncol(result))
+  return(result[,k])
+}
+
+# nncross.lpp
+# Identifies the nearest neighbours in the shortest-path metric
+# from one point pattern on a linear network to ANOTHER pattern
+# on the SAME network.
+#
+
+nncross.lpp <- local({
+
+  nncross.lpp <- function(X, Y, iX=NULL, iY=NULL,
+                          what = c("dist", "which"), ..., k=1, method="C") {
+  stopifnot(inherits(X, "lpp"))
+  stopifnot(inherits(Y, "lpp"))
+  what   <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE)
+  stopifnot(method %in% c("C", "interpreted"))
+  if(is.null(iX) != is.null(iY))
+    stop("If one of iX, iY is given, then both must be given")
+  exclude <- (!is.null(iX) || !is.null(iY))
+
+  check <- resolve.defaults(list(...), list(check=TRUE))$check
+  if(check && !identical(as.linnet(X, sparse=TRUE),
+                         as.linnet(Y, sparse=TRUE)))
+    stop("X and Y are on different linear networks")
+
+  # internal use only
+  format <- resolve.defaults(list(...), list(format="data.frame"))$format
+
+  nX <- npoints(X)
+  nY <- npoints(Y)
+
+  L <- domain(X)
+  if(is.null(br <- L$boundingradius) || is.infinite(br)) {
+    # network may be disconnected
+    lab <- connected(L, what="labels")
+    if(length(levels(lab)) > 1L) {
+      # network is disconnected
+      # handle each connected component separately
+      subsets <- split(seq_len(nvertices(L)), lab)
+      nndistmat <- if("dist" %in% what) matrix(Inf, nX, length(k)) else NULL
+      nnwhichmat <-
+         if("which" %in% what) matrix(NA_integer_, nX, length(k)) else NULL
+      for(i in seq_along(subsets)) {
+        subi <- subsets[[i]]
+        Xi <- thinNetwork(X, retainvertices=subi)
+        useX <- attr(Xi, "retainpoints")      
+        Yi <- thinNetwork(Y, retainvertices=subi)
+        useY <- attr(Yi, "retainpoints")
+	z <- nncross.lpp(Xi, Yi,
+	                 iX = iX[useX], iY=iY[useY],
+	                 what=what, k=k, method=method,
+			 format="list")
+        if("dist" %in% what)
+	   nndistmat[useX, ] <- z$dist
+        if("which" %in% what)
+	   nnwhichmat[useX, ] <- which(useY)[z$which]
+      }
+      result <- list(dist=nndistmat, which=nnwhichmat)[what]
+      if(format == "data.frame")
+        result <- as.data.frame(result)[,,drop=TRUE]
+      return(result)
+    }
+  }
+
+  koriginal <- k <- as.integer(k)
+  stopifnot(all(k > 0))
+  kmax <- max(k)
+  if(exclude) {
+    kmax <- kmax+1
+    k <- 1:kmax
+  }
+  
+  toomany <- (kmax > nY)
+  if(toomany) {
+    paddist <- matrix(Inf, nX, kmax)
+    padwhich <- matrix(NA_integer_, nX, kmax)
+    kmax <- nY
+    kuse <- k[k <= kmax]
+  } else {
+    kuse <- k
+  }
+
+  if(length(kuse) == 0) {
+    # None of the required values are defined
+    nnd <- paddist
+    nnw <- padwhich
+    maxk <- max(k)
+    colnames(nnd) <- paste0("dist.", seq_len(maxk))
+    colnames(nnd) <- paste0("dist.", seq_len(maxk))
+    nnd <- nnd[,k,drop=TRUE]
+    nnw <- nnw[,k,drop=TRUE]
+    result <- list(dist=nnd, which=nnw)[what]
+    if(format == "data.frame")
+      result <- as.data.frame(result)[,,drop=TRUE]
+    return(result)
+  }
+  
+  need.dist <- ("dist" %in% what) || exclude
+  need.which <- ("which" %in% what) || exclude
+  
+  fast <- (method == "C") && spatstat.options("Cnncrosslpp")
+
+  if(!fast) {
+    ## require dpath matrix
+    Xsparse <- identical(domain(X)$sparse, TRUE)
+    Ysparse <- identical(domain(Y)$sparse, TRUE)
+    L <- if(!Xsparse && Ysparse) as.linnet(X) else
+         if(Xsparse && !Ysparse) as.linnet(Y) else
+         as.linnet(X, sparse=FALSE)
+  } else L <- as.linnet(X)
+  #
+  nX <- npoints(X)
+  nY <- npoints(Y)
+  P <- as.ppp(X)
+  Q <- as.ppp(Y)
+  #
+  Lvert <- L$vertices
+  from  <- L$from
+  to    <- L$to
+  if(fast) {
+    seglengths <- lengths.psp(as.psp(L))
+  } else {
+    dpath <- L$dpath
+  }
+  
+  # deal with null cases
+  if(nX == 0)
+    return(data.frame(dist=numeric(0), which=integer(0))[, what])
+  if(nY == 0)
+    return(data.frame(dist=rep(Inf, nX), which=rep(NA_integer_, nX))[, what])
+
+  # find nearest segment for each point
+  Xcoords <- coords(X)
+  Ycoords <- coords(Y)
+  Xpro <- Xcoords$seg
+  Ypro <- Ycoords$seg
+
+  # handle serial numbers
+  if(exclude) {
+    stopifnot(is.integer(iX) && is.integer(iY))
+    if(length(iX) != nX)
+      stop("length of iX does not match the number of points in X")
+    if(length(iY) != nY)
+      stop("length of iY does not match the number of points in Y")
+  }
+
+  if(method == "interpreted") {
+    ## interpreted code
+    D <- crossdist(X, Y, method="interpreted")
+    if(exclude)
+      D[outer(iX, iY, "==")] <- Inf
+    nnd <- nnw <- NULL
+    if(need.dist) {
+      nnd <- if(kmax == 1) apply(D, 1, min) else
+             t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE]
+    }
+    if(need.which) {
+      nnw <- if(kmax == 1) apply(D, 1, which.min) else
+             t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE]
+    } 
+  } else {
+    ## C code
+    ## convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    nseg <- length(from0)
+    Xsegmap <- Xpro - 1L
+    Ysegmap <- Ypro - 1L
+    ## upper bound on interpoint distance
+    huge <- if(!fast) {
+      max(dpath) + 2 * diameter(Frame(L))
+    } else {
+      sum(seglengths)
+    }
+    ## space for result
+    nnd <- double(nX * kmax)
+    nnw <- integer(nX * kmax)
+    ## call C
+    if(fast) {
+      ## experimental faster code
+      ooX <- order(Xsegmap)
+      ooY <- order(Ysegmap)
+      tol <- max(.Machine$double.eps,
+                 diameter(Frame(L))/2^20)
+      if(kmax > 1) {
+        zz <- .C("linknncross",
+                 kmax = as.integer(kmax),
+                 np = as.integer(nX),
+                 sp = as.integer(Xsegmap[ooX]),
+                 tp = as.double(Xcoords$tp[ooX]),
+                 nq = as.integer(nY),
+                 sq = as.integer(Ysegmap[ooY]),
+                 tq = as.double(Ycoords$tp[ooY]),
+                 nv = as.integer(Lvert$n),
+                 ns = as.integer(nseg),
+                 from = as.integer(from0),
+                 to = as.integer(to0),
+                 seglen = as.double(seglengths), 
+                 huge = as.double(huge),
+                 tol = as.double(tol), 
+                 nndist = as.double(nnd), 
+                 nnwhich = as.integer(nnw),
+                 PACKAGE = "spatstat")
+        zznd <- matrix(zz$nndist, ncol=kmax, byrow=TRUE)
+        zznw <- matrix(zz$nnwhich + 1L, ncol=kmax, byrow=TRUE)
+        if(any(notfound <- (zznw == 0))) {
+          zznd[notfound] <- NA
+          zznw[notfound] <- NA
+        }
+        nnd <- matrix(nnd, nX, kmax)
+        nnw <- matrix(nnw, nX, kmax)
+        nnd[ooX, ] <- zznd
+        nnw[ooX, ] <- ooY[zznw]
+        colnames(nnd) <- colnames(nnw) <- seq_len(kmax)
+        if(!identical(kuse, seq_len(kmax))) {
+          nnd <- nnd[,kuse,drop=FALSE]
+          nnw <- nnw[,kuse,drop=FALSE]
+          if(length(kuse) == 1) {
+            colnames(nnd) <- paste0("dist.", kuse)
+            colnames(nnw) <- paste0("which.", kuse)
+          }
+        }
+      } else {
+        zz <- .C("linSnndwhich",
+                 np = as.integer(nX),
+                 sp = as.integer(Xsegmap[ooX]),
+                 tp = as.double(Xcoords$tp[ooX]),
+                 nq = as.integer(nY),
+                 sq = as.integer(Ysegmap[ooY]),
+                 tq = as.double(Ycoords$tp[ooY]),
+                 nv = as.integer(Lvert$n),
+                 ns = as.integer(nseg),
+                 from = as.integer(from0),
+                 to = as.integer(to0),
+                 seglen = as.double(seglengths), 
+                 huge = as.double(huge),
+                 tol = as.double(tol), 
+                 nndist = as.double(nnd),
+                 nnwhich = as.integer(nnw),
+                 PACKAGE = "spatstat")
+        zznd <- zz$nndist
+        zznw <- zz$nnwhich + 1L
+        if(any(notfound <- (zznw == 0))) {
+          zznd[notfound] <- NA
+          zznw[notfound] <- NA
+        }
+        nnd[ooX] <- zznd
+        nnw[ooX] <- ooY[zznw]
+      }
+    } else if(!exclude) {
+      zz <- .C("linndcross",
+               np = as.integer(nX),
+               xp = as.double(P$x),
+               yp = as.double(P$y),
+               nq = as.integer(nY),
+               xq = as.double(Q$x),
+               yq = as.double(Q$y),
+               nv = as.integer(Lvert$n),
+               xv = as.double(Lvert$x),
+               yv = as.double(Lvert$y),
+               ns = as.integer(nseg),
+               from = as.integer(from0),
+               to = as.integer(to0),
+               dpath = as.double(dpath),
+               psegmap = as.integer(Xsegmap),
+               qsegmap = as.integer(Ysegmap),
+               huge = as.double(huge),
+               nndist = as.double(nnd),
+               nnwhich = as.integer(nnw),
+               PACKAGE = "spatstat")
+      nnd <- zz$nndist
+      nnw <- zz$nnwhich + 1L
+    } else {
+      ## excluding certain pairs
+      zz <- .C("linndxcross",
+               np = as.integer(nX),
+               xp = as.double(P$x),
+               yp = as.double(P$y),
+               nq = as.integer(nY),
+               xq = as.double(Q$x),
+               yq = as.double(Q$y),
+               nv = as.integer(Lvert$n),
+               xv = as.double(Lvert$x),
+               yv = as.double(Lvert$y),
+               ns = as.integer(nseg),
+               from = as.integer(from0),
+               to = as.integer(to0),
+               dpath = as.double(dpath),
+               psegmap = as.integer(Xsegmap),
+               qsegmap = as.integer(Ysegmap),
+               idP = as.integer(iX),
+               idQ = as.integer(iY),
+               huge = as.double(huge),
+               nndist = as.double(nnd),
+               nnwhich = as.integer(nnw),
+               PACKAGE = "spatstat")
+      nnd <- zz$nndist
+      nnw <- zz$nnwhich + 1L
+    }
+    # any zeroes occur if points have no neighbours.
+    nnw[nnw == 0] <- NA
+  }
+  if(toomany) {
+    ## Nearest neighbours were undefined for some large values of k.
+    ## Insert results obtained for valid 'k' back into matrix of NA/Inf
+    if(need.dist) {
+      paddist[,kuse] <- as.matrix(nnd)
+      nnd <- paddist
+    }
+    if(need.which) {
+      padwhich[,kuse] <- as.matrix(nnw)
+      nnw <- padwhich
+    }
+  }
+  if(exclude) {
+    ## now find neighbours that don't have the same id number
+    avoid <- matrix(iX[as.vector(row(nnw))] != iY[as.vector(nnw)],
+                    nrow=nrow(nnw), ncol=ncol(nnw))
+    colind <- apply(avoid, 1, whichcoltrue, m=seq_len(ncol(avoid)-1))
+    colind <- if(is.matrix(colind)) t(colind) else matrix(colind, ncol=1)
+    rowcol <- cbind(as.vector(row(colind)), as.vector(colind))
+    nnd <- matrix(nnd[rowcol], nrow=nX)
+    nnw <- matrix(nnw[rowcol], nrow=nX)
+    nnd <- nnd[,koriginal]
+    nnw <- nnw[,koriginal]
+  }
+  result <- list(dist=nnd, which=nnw)[what]
+  if(format == "data.frame")
+    result <- as.data.frame(result)[,,drop=TRUE]
+  return(result)
+}
+
+  whichcoltrue <- function(x, m) which(x)[m]
+  
+  nncross.lpp
+})
diff --git a/R/nnfun.R b/R/nnfun.R
new file mode 100644
index 0000000..c9bd304
--- /dev/null
+++ b/R/nnfun.R
@@ -0,0 +1,77 @@
+#
+#   nnfun.R
+#
+#   nearest neighbour function (returns a function of x,y)
+#
+#   $Revision: 1.5 $   $Date: 2014/10/24 00:22:30 $
+#
+
+nnfun <- function(X, ...) {
+  UseMethod("nnfun")
+}
+
+nnfun.ppp <- function(X, ..., k=1) {
+  # this line forces X to be bound
+  stopifnot(is.ppp(X))
+  if(length(k) != 1) stop("k should be a single integer")
+  g <- function(x,y=NULL) {
+    Y <- xy.coords(x, y)[c("x", "y")]
+    nncross(Y, X, what="which", k=k)
+  }
+  attr(g, "Xclass") <- "ppp"
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  class(g) <- c("nnfun", class(g))
+  return(g)
+}
+
+nnfun.psp <- function(X, ...) {
+  # this line forces X to be bound
+  stopifnot(is.psp(X))
+  g <- function(x,y=NULL) {
+    Y <-  xy.coords(x, y)[c("x", "y")]
+    nncross(Y, X, what="which")
+  }
+  attr(g, "Xclass") <- "psp"
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  class(g) <- c("nnfun", class(g))
+  return(g)
+}
+
+as.owin.nnfun <- function(W, ..., fatal=TRUE) {
+  X <- get("X", envir=environment(W))
+  as.owin(X, ..., fatal=fatal)
+}
+
+domain.nnfun <- Window.nnfun <- function(X, ...) { as.owin(X) }
+
+as.im.nnfun <- function(X, W=NULL, ...,
+                           eps=NULL, dimyx=NULL, xy=NULL,
+                           na.replace=NULL) {
+  if(is.null(W)) {
+    env <- environment(X)
+    Xdata  <- get("X", envir=env)
+    k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]]
+    Z <- nnmap(Xdata, k=k, what="which", eps=eps, dimyx=dimyx, xy=xy)
+    if(!is.null(na.replace))
+      Z$v[is.null(Z$v)] <- na.replace
+    return(Z)
+  }
+  # use as.im.function
+  NextMethod("as.im")
+}
+
+print.nnfun <- function(x, ...) {
+  env <- environment(x)
+  X <- get("X", envir=env)
+  k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]]
+  xtype <- attr(x, "Xclass")
+  typestring <- switch(xtype,
+                       ppp="point pattern",
+                       psp="line segment pattern",
+                       paste("object of class", sQuote(xtype)))
+  Kth <- if(k == 1) "Nearest" else paste0(ordinal(k), "-Nearest")
+  cat(paste(Kth, "Neighbour Index function for ", typestring, "\n"))
+  print(X)
+  return(invisible(NULL))
+}
+
diff --git a/R/nnfunlpp.R b/R/nnfunlpp.R
new file mode 100644
index 0000000..44101c5
--- /dev/null
+++ b/R/nnfunlpp.R
@@ -0,0 +1,43 @@
+#
+# nnfunlpp.R
+#
+#   method for 'nnfun' for class 'lpp'
+#
+#   $Revision: 1.2 $ $Date: 2016/08/21 04:33:47 $
+#
+
+nnfun.lpp <- local({
+
+  nnfun.lpp <- function(X, ..., k=1) {
+    stopifnot(inherits(X, "lpp"))
+    force(X)
+    force(k)
+    L <- as.linnet(X)
+    f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) {
+      # L is part of the environment
+      Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L)
+      i <- nncross.lpp(Y, X, what="which", k=k)
+      return(i)
+    }
+    f <- linfun(f, L)
+    attr(f, "explain") <- uitleggen
+    return(f)
+  }
+
+  uitleggen <- function(x, ...) {
+    env <- environment(attr(x, "f"))
+    X <- get("X", envir=env)
+    k <- get("k", envir=env)
+    if(identical(k, 1)) {
+      cat("Nearest-neighbour function for lpp object\n")
+    } else {
+      cat("k-th nearest neighbour function for lpp object\n")
+      cat(paste("k =", commasep(k), "\n"))
+    }
+    print(X)
+  }
+
+  nnfun.lpp
+})
+
+
diff --git a/R/nnmap.R b/R/nnmap.R
new file mode 100644
index 0000000..1ac23ad
--- /dev/null
+++ b/R/nnmap.R
@@ -0,0 +1,219 @@
+#
+#  nnmap.R
+#
+#    nearest or k-th nearest neighbour of each pixel
+#
+#  $Revision: 1.9 $  $Date: 2017/06/05 10:31:58 $
+#
+
+nnmap <- function(X, k=1, what = c("dist", "which"), ...,
+                  W=as.owin(X),
+                  is.sorted.X=FALSE,
+                  sortby=c("range", "var", "x", "y")) {
+  stopifnot(is.ppp(X))
+  sortby <- match.arg(sortby)
+  outputarray <- resolve.1.default("outputarray", ..., outputarray=FALSE)
+  
+  W <- as.owin(W)
+  huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(W)))
+  
+  what   <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE)
+  want.dist  <- "dist" %in% what 
+  want.which <- "which" %in% what
+  want.both  <- want.dist && want.which
+
+  if(!missing(k)) {
+    # k can be a single integer or an integer vector
+    if(length(k) == 0)
+      stop("k is an empty vector")
+    else if(length(k) == 1) {
+      if(k != round(k) || k <= 0)
+        stop("k is not a positive integer")
+    } else {
+      if(any(k != round(k)) || any(k <= 0))
+        stop(paste("some entries of the vector",
+                   sQuote("k"), "are not positive integers"))
+    }
+  }
+  k <- as.integer(k)
+  kmax <- max(k)
+  nk <- length(k)
+
+  # note whether W is `really' a rectangle
+  isrect <- is.rectangle(rescue.rectangle(W))
+
+  # set up pixel array
+  M <- do.call.matched(as.mask,
+                       resolve.defaults(list(...), list(w=W)))
+  Mdim <- M$dim
+  nxcol <- Mdim[2]
+  nyrow <- Mdim[1]
+  npixel <- nxcol * nyrow
+  
+  nX <- npoints(X)
+  if(nX == 0) {
+    # trivial - avoid potential problems in C code
+    NND <- if(want.dist) array(Inf, dim=c(nk, Mdim)) else 0
+    NNW <- if(want.which) array(NA_integer_, dim=c(nk, Mdim)) else 0
+  } else {
+    # usual case 
+    if(is.sorted.X && !(sortby %in% c("x", "y")))
+      stop(paste("If data are already sorted,",
+                 "the sorting coordinate must be specified explicitly",
+                 "using sortby = \"x\" or \"y\""))
+
+    # decide whether to sort on x or y coordinate
+    switch(sortby,
+           range = {
+             s <- sidelengths(as.rectangle(X))
+             sortby.y <- (s[1] < s[2])
+           },
+           var = {
+             sortby.y <- (var(X$x) < var(X$y))
+           },
+           x={ sortby.y <- FALSE},
+           y={ sortby.y <- TRUE}
+           )
+
+    # The C code expects points to be sorted by x coordinate.
+    if(sortby.y) {
+      oldM <- M
+      X <- flipxy(X)
+      W <- flipxy(W)
+      M <- flipxy(M)
+      Mdim <- M$dim
+    }
+    xx <- X$x
+    yy <- X$y
+    # sort only if needed
+    if(!is.sorted.X){
+      oX <- fave.order(xx)
+      xx <- xx[oX]
+      yy <- yy[oX]
+    }
+
+    # number of neighbours that are well-defined
+    kmaxcalc <- min(nX, kmax)
+
+    # prepare to call C code
+    nndv <- if(want.dist) numeric(npixel * kmaxcalc) else numeric(1)
+    nnwh <- if(want.which) integer(npixel * kmaxcalc) else integer(1)
+
+    # ............. call C code ............................
+    
+    if(kmaxcalc == 1) {
+      zz <- .C("nnGinterface",
+               nx = as.integer(nxcol),
+               x0 = as.double(M$xcol[1]),
+               xstep = as.double(M$xstep),
+               ny = as.integer(nyrow),
+               y0 = as.double(M$yrow[1]),
+               ystep = as.double(M$ystep),
+               np = as.integer(nX),
+               xp = as.double(xx),
+               yp = as.double(yy),
+               wantdist = as.integer(want.dist),
+               wantwhich = as.integer(want.which),
+               nnd = as.double(nndv),
+               nnwhich = as.integer(nnwh),
+               huge = as.double(huge),
+               PACKAGE = "spatstat")
+    } else {
+      zz <- .C("knnGinterface",
+               nx = as.integer(nxcol),
+               x0 = as.double(M$xcol[1]),
+               xstep = as.double(M$xstep),
+               ny = as.integer(nyrow),
+               y0 = as.double(M$yrow[1]),
+               ystep = as.double(M$ystep),
+               np = as.integer(nX),
+               xp = as.double(xx),
+               yp = as.double(yy),
+               kmax = as.integer(kmaxcalc),
+               wantdist = as.integer(want.dist),
+               wantwhich = as.integer(want.which),
+               nnd = as.double(nndv),
+               nnwhich = as.integer(nnwh),
+               huge = as.double(huge),
+               PACKAGE = "spatstat")
+    }
+    
+    # extract results
+    nnW <- zz$nnwhich
+    nnD <- zz$nnd
+    # map index 0 to NA
+    if(want.which && any(uhoh <- (nnW == 0))) {
+      nnW[uhoh] <- NA
+      if(want.dist) nnD[uhoh] <- Inf
+    }
+    # reinterpret indices in original ordering
+    if(!is.sorted.X) nnW <- oX[nnW]
+  
+    # reform as arrays 
+    NND <- if(want.dist) array(nnD, dim=c(kmaxcalc, Mdim)) else 0
+    NNW <- if(want.which) array(nnW, dim=c(kmaxcalc, Mdim)) else 0
+    if(sortby.y) {
+      # flip x and y back again
+      if(want.dist) NND <- aperm(NND, c(1, 3, 2))
+      if(want.which) NNW <- aperm(NNW, c(1, 3, 2))
+      M <- oldM
+      Mdim <- dim(M)
+    }
+    
+    # the return value should correspond to the original vector k
+    if(kmax > kmaxcalc) {
+      # pad with NA / Inf
+      if(want.dist) {
+        NNDcalc <- NND
+        NND <- array(Inf, dim=c(kmax, Mdim))
+        NND[1:kmaxcalc, , ] <- NNDcalc
+      }
+      if(want.which) {
+        NNWcalc <- NNW
+        NNW <- array(NA_integer_, dim=c(kmax, Mdim))
+        NNW[1:kmaxcalc, , ] <- NNWcalc
+      }
+    }
+    if(length(k) < kmax) {
+      # select only the specified planes
+      if(want.dist)
+        NND <- NND[k, , , drop=FALSE]
+      if(want.which)
+        NNW <- NNW[k, , , drop=FALSE]
+    }
+  }
+
+  # secret backdoor
+  if(outputarray) {
+    # return result as an array or pair of arrays
+    result <- if(want.both) { list(dist=NND, which=NNW) } else
+              if(want.dist) NND else NNW
+    attr(result, "pixarea") <- with(M, xstep * ystep)
+    return(result)
+  }
+
+  # format result as a list of images
+  result <- list()
+  if(want.dist) {
+    dlist <- list()
+    for(i in 1:nk) {
+      DI <- as.im(NND[i,,], M)
+      if(!isrect) DI <- DI[M, drop=FALSE]
+      dlist[[i]] <- DI
+    }
+    names(dlist) <- k
+    result[["dist"]] <- if(nk > 1) dlist else dlist[[1]]
+  }
+  if(want.which) {
+    wlist <- list()
+    for(i in 1:nk) {
+      WI <- as.im(NNW[i,,], M)
+      if(!isrect) WI <- WI[M, drop=FALSE]
+      wlist[[i]] <- WI
+    }
+    names(wlist) <- k
+    result[["which"]] <- if(nk > 1) wlist else wlist[[1]]
+  }
+  if(!want.both) result <- result[[1]]
+  return(result)
+}
diff --git a/R/nnmark.R b/R/nnmark.R
new file mode 100644
index 0000000..ad674e5
--- /dev/null
+++ b/R/nnmark.R
@@ -0,0 +1,48 @@
+#
+# nnmark.R
+#
+# $Revision: 1.6 $ $Date: 2015/10/21 09:06:57 $
+
+nnmark <- local({
+
+  nnmark <- function(X, ..., k=1, at=c("pixels", "points")) {
+    stopifnot(is.ppp(X))
+    stopifnot(is.marked(X))
+    at <- match.arg(at)
+    mX <- marks(X)
+    switch(at,
+           pixels = {
+             Y <- nnmap(X, k=k, what="which", ...)
+             switch(markformat(X),
+                    vector={
+                      result <- eval.im(mX[Y])
+                    },
+                    dataframe = {
+                      result <- solapply(mX, lookedup, indeximage=Y)
+                    },
+                    stop("Marks must be a vector or dataframe"))
+           },
+           points = {
+             Y <- nnwhich(X, k=k)
+             switch(markformat(X),
+                    vector={
+                      result <- mX[Y]
+                    },
+                    dataframe = {
+                      result <- mX[Y,, drop=FALSE]
+                      row.names(result) <- NULL
+                    },
+                    stop("Marks must be a vector or dataframe"))
+           })
+    return(result)
+  }
+
+  lookedup <- function(xvals, indeximage) eval.im(xvals[indeximage])
+
+  nnmark
+})
+
+
+
+
+  
diff --git a/R/nnorient.R b/R/nnorient.R
new file mode 100644
index 0000000..5c3d625
--- /dev/null
+++ b/R/nnorient.R
@@ -0,0 +1,139 @@
+##
+## nnorient.R
+##
+## nearest neighbour pair orientation distribution
+##
+## Function \vartheta(phi) defined in
+## Illian et al (2008) equ (4.5.3) page 253
+##
+##  $Revision: 1.3 $ $Date: 2014/12/05 07:31:57 $
+
+nnorient <- function(X, ..., cumulative=FALSE, correction, k = 1,
+                     unit=c("degree", "radian"),
+                     domain=NULL, ratio=FALSE) {
+  stopifnot(is.ppp(X))
+  check.1.integer(k)
+  stopifnot(k>=1)
+  W <- Window(X)
+
+  if(!is.null(domain))
+    stopifnot(is.subset.owin(domain, W))
+
+  unit <- match.arg(unit)
+  switch(unit,
+         degree = {
+           FullCircle <- 360
+           Convert <- 180/pi
+         },
+         radian = {
+           FullCircle <- 2 * pi
+           Convert <- 1
+         })
+
+  ## choose correction(s)
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(!correction.given)
+    correction <- c("bord.modif", "none")
+
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             bord.modif="bord.modif",
+                             good="good",
+                             best="best"),
+                           multi=TRUE)
+  correction[correction %in% c("good", "best")] <- "bord.modif"
+
+  ## process point pattern
+  Xcoord <- coords(X)
+  Ycoord <- Xcoord[nnwhich(X, k=k), ]
+  if(!is.null(domain)) {
+    inD <- inside.owin(Xcoord$x, Xcoord$y, domain)
+    Xcoord <- Xcoord[inD,]
+    Ycoord <- Ycoord[inD,]
+  } else 
+  
+  dYX <- Ycoord-Xcoord
+  ANGLE <- with(dYX, atan2(y, x) * Convert) %% FullCircle
+  nangles <- length(ANGLE)
+  
+  ## initialise output object
+  Nphi <- 512
+  breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1)
+  phi <- breaks$r
+  Odf <- data.frame(phi  = phi,
+                    theo = (if(cumulative) phi else 1)/FullCircle)
+  desc <- c("angle argument phi",
+            "theoretical isotropic %s")
+  NOletter <- if(cumulative) "Theta" else "vartheta"
+  NOsymbol <- as.name(NOletter)
+  NNO <- ratfv(Odf, NULL, denom=nangles,
+              argu="phi",
+              ylab=substitute(fn(phi), list(fn=NOsymbol)),
+              valu="theo",
+              fmla = . ~ phi,
+              alim = c(0, FullCircle),
+              c("phi",
+                "{%s[%s]^{pois}}(phi)"),
+              desc,
+              fname=NOletter,
+              yexp=substitute(fn(phi), list(fn=NOsymbol)))
+
+  ## ^^^^^^^^^^^^^^^  Compute edge corrected estimates ^^^^^^^^^^^^^^^^
+  
+  if(any(correction == "none")) {
+    ## uncorrected! For demonstration purposes only!
+    if(cumulative) {
+      wh <- whist(ANGLE, breaks$val)  # no weights
+      num.un <- cumsum(wh)
+    } else {
+      kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit)
+      num.un <- kd$y * nangles
+    }
+    den.un <- nangles
+    ## uncorrected estimate 
+    NNO <- bind.ratfv(NNO,
+                     data.frame(un=num.un), den.un,
+                    "{hat(%s)[%s]^{un}}(phi)",
+                    "uncorrected estimate of %s",
+                    "un",
+                    ratio=ratio)
+  }
+
+  if("bord.modif" %in% correction) {
+    ## border type correction
+    bX <- bdist.points(X)
+    nndX <- nndist(X, k=k)
+    if(!is.null(domain)) {
+      bX <- bX[inD]
+      nndX <- nndX[inD]
+    }
+    ok <- (nndX < bX)
+    nok <- sum(ok)
+    rr <- seq(0, max(bX), length=256)
+    Ar <- eroded.areas(W, rr)
+    Arf <- approxfun(rr, Ar, rule=2)
+    AI <- Arf(bX)
+    edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0)
+    if(cumulative) {
+      wh <- whist(ANGLE, breaks$val, edgewt)
+      num.bm <- cumsum(wh)/mean(edgewt)
+    } else {
+      w <- edgewt/sum(edgewt)
+      kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit)
+      num.bm <- kd$y * nok
+    }
+    den.bm <- nok
+    NNO <- bind.ratfv(NNO,
+                      data.frame(bordm=num.bm),
+                      den.bm,
+                      "{hat(%s)[%s]^{bordm}}(phi)",
+                      "modified border-corrected estimate of %s",
+                      "bordm",
+                      ratio=ratio)
+  }
+ 
+  unitname(NNO) <- switch(unit,
+                         degree = c("degree", "degrees"),
+                         radian = c("radian", "radians"))
+  return(NNO)
+}
diff --git a/R/objsurf.R b/R/objsurf.R
new file mode 100644
index 0000000..63b2838
--- /dev/null
+++ b/R/objsurf.R
@@ -0,0 +1,123 @@
+#
+#  objsurf.R
+#
+#  surface of the objective function for an M-estimator
+#
+#  $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $
+#
+
+objsurf <- function(x, ...) {
+  UseMethod("objsurf")
+}
+
+objsurf.kppm <- objsurf.dppm <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) {
+  Fit <- x$Fit
+  switch(Fit$method,
+         mincon = {
+           result <- objsurf(Fit$mcfit, ...,
+                             ngrid=ngrid, ratio=ratio, verbose=verbose)
+         },
+         clik = {
+           optpar  <- x$par
+           objfun  <- Fit$objfun
+           objargs <- Fit$objargs
+           result  <- objsurfEngine(objfun, optpar, objargs, ...,
+                                    ngrid=ngrid, ratio=ratio, verbose=verbose)
+         })
+  return(result)
+}
+
+objsurf.minconfit <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) {
+  optpar  <- x$par.canon %orifnull% x$par
+  objfun  <- x$objfun
+  objargs <- x$objargs
+  dotargs <- x$dotargs
+  objsurfEngine(objfun, optpar, objargs, ...,
+                dotargs=dotargs,
+                ngrid=ngrid, ratio=ratio, verbose=verbose)
+}
+
+objsurfEngine <- function(objfun, optpar, objargs, 
+                          ...,
+                          dotargs=list(),
+                          objname="objective", 
+                          ngrid=32, ratio=1.5, verbose=TRUE) {
+  trap.extra.arguments(...)
+  if(!is.function(objfun))
+    stop("Object is in an outdated format and needs to be re-fitted")
+  npar    <- length(optpar)
+  if(npar != 2)
+    stop("Only implemented for functions of 2 arguments")
+  # create grid of parameter values
+  ratio <- ensure2vector(ratio)
+  ngrid <- ensure2vector(ngrid)
+  stopifnot(all(ratio > 1))
+  xgrid <- seq(optpar[1]/ratio[1], optpar[1] * ratio[1], length=ngrid[1])
+  ygrid <- seq(optpar[2]/ratio[2], optpar[2] * ratio[2], length=ngrid[2])
+  pargrid <- expand.grid(xgrid, ygrid)
+  colnames(pargrid) <- names(optpar)
+  # evaluate
+  if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values..."))
+  values <- do.call(apply,
+                    append(list(pargrid, 1, objfun, objargs=objargs), dotargs))
+  if(verbose) cat("Done.\n")
+  result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2]))
+  attr(result, "optpar") <- optpar
+  attr(result, "objname") <- "contrast"
+  class(result) <- "objsurf"
+  return(result)
+}
+
+print.objsurf <- function(x, ...) {
+  cat("Objective function surface\n")
+  optpar <- attr(x, "optpar")
+  objname <- attr(x, "objname")
+  nama <- names(optpar)
+  cat("Parameter ranges:\n")
+  cat(paste(paste0(nama[1], ":"), prange(range(x$x)), "\n"))
+  cat(paste(paste0(nama[2], ":"), prange(range(x$y)), "\n"))
+  cat(paste("Function value:", objname, "\n"))
+  invisible(NULL)
+}
+
+image.objsurf <- plot.objsurf <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  optpar <- attr(x, "optpar")
+  nama <- names(optpar)
+  do.call(image,
+          resolve.defaults(list(x=unclass(x)),
+                           list(...),
+                           list(xlab=nama[1], ylab=nama[2], main=xname)))
+  abline(v=optpar[1], lty=3)
+  abline(h=optpar[2], lty=3)
+  invisible(NULL)
+}
+
+contour.objsurf <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  optpar <- attr(x, "optpar")
+  nama <- names(optpar)
+  do.call(contour,
+          resolve.defaults(list(x=unclass(x)),
+                           list(...),
+                           list(xlab=nama[1], ylab=nama[2], main=xname)))
+  abline(v=optpar[1], lty=3)
+  abline(h=optpar[2], lty=3)
+  invisible(NULL)
+}
+
+  
+persp.objsurf <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  optpar <- attr(x, "optpar")
+  objname <- attr(x, "objname")
+  nama <- names(optpar)
+  r <- do.call(persp,
+               resolve.defaults(list(x=x$x, y=x$y, z=x$z),
+                                list(...),
+                                list(xlab=nama[1], ylab=nama[2],
+                                     zlab=objname, main=xname)))
+  invisible(r)
+}
+
+
diff --git a/R/options.R b/R/options.R
new file mode 100755
index 0000000..1f2993d
--- /dev/null
+++ b/R/options.R
@@ -0,0 +1,607 @@
+#
+#     options.R
+#
+#     Spatstat options and other internal states
+#
+#    $Revision: 1.80 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+
+.spEnv <- new.env()
+
+putSpatstatVariable <- function(name, value) {
+  assign(name, value, envir=.spEnv)
+}
+getSpatstatVariable <- function(name) {
+  get(name, envir=.spEnv)
+}
+existsSpatstatVariable <- function(name) {
+  exists(name, envir=.spEnv)
+}
+
+putSpatstatVariable("Spatstat.Options", list())
+putSpatstatVariable("Spatstat.ProgressBar", NULL)
+putSpatstatVariable("Spatstat.ProgressData", NULL)
+putSpatstatVariable("warnedkeys", character(0))
+
+## Kovesi's uniform colour map, row 29, linear 'bmy'
+putSpatstatVariable("DefaultImageColours", 
+c("#000C7D", "#000D7E", "#000D80", "#000E81", "#000E83", "#000E85", 
+"#000F86", "#000F88", "#00108A", "#00108B", "#00118D", "#00118F", 
+"#001190", "#001292", "#001293", "#001295", "#001396", "#001398", 
+"#001399", "#00149A", "#00149C", "#00149D", "#00149E", "#00159F", 
+"#0015A0", "#0015A1", "#0015A2", "#0015A3", "#0015A4", "#0016A5", 
+"#0016A6", "#0016A6", "#0016A7", "#0016A8", "#0016A8", "#0016A8", 
+"#0A16A9", "#1516A9", "#1D15A9", "#2315A9", "#2915A9", "#2F15A8", 
+"#3414A8", "#3914A7", "#3E13A6", "#4313A5", "#4712A4", "#4C12A3", 
+"#5011A2", "#5311A1", "#5710A0", "#5A0F9F", "#5E0F9E", "#610E9E", 
+"#640E9D", "#670D9C", "#6A0D9B", "#6C0C9A", "#6F0B99", "#720B98", 
+"#740A98", "#770A97", "#790996", "#7C0896", "#7E0895", "#800794", 
+"#810794", "#840693", "#860692", "#880692", "#8A0591", "#8C0591", 
+"#8E0490", "#900490", "#92048F", "#94038F", "#96038E", "#98038E", 
+"#9A028D", "#9C028D", "#9E028D", "#A0018C", "#A2018C", "#A4018B", 
+"#A6018B", "#A8008A", "#AA008A", "#AB0089", "#AD0089", "#AF0088", 
+"#B10088", "#B30087", "#B50087", "#B70086", "#B80086", "#BA0086", 
+"#BC0085", "#BE0085", "#C00084", "#C20084", "#C30083", "#C50083", 
+"#C70082", "#C90082", "#CB0081", "#CD0081", "#CE0080", "#D00080", 
+"#D20080", "#D40080", "#D5007F", "#D7007F", "#D9007E", "#DA007E", 
+"#DC007D", "#DD007C", "#DF017C", "#E1027B", "#E2047B", "#E4067A", 
+"#E5087A", "#E70B79", "#E80D78", "#E91078", "#EB1277", "#EC1477", 
+"#ED1676", "#EF1875", "#F01A75", "#F11C74", "#F31E73", "#F42073", 
+"#F52272", "#F62471", "#F72671", "#F82870", "#FA2A6F", "#FB2C6F", 
+"#FC2E6E", "#FD306D", "#FE326C", "#FE346C", "#FE366B", "#FE386A", 
+"#FE3A6A", "#FE3D69", "#FE3F68", "#FE4167", "#FE4366", "#FE4566", 
+"#FE4765", "#FE4964", "#FE4B63", "#FE4D62", "#FE5062", "#FE5261", 
+"#FE5460", "#FE565F", "#FE585E", "#FE5A5D", "#FE5D5C", "#FE5F5B", 
+"#FE615B", "#FE635A", "#FE6559", "#FE6758", "#FE6A57", "#FE6C56", 
+"#FE6E55", "#FE7054", "#FE7253", "#FE7452", "#FE7651", "#FE7850", 
+"#FE7A4E", "#FE7C4D", "#FE7E4C", "#FE7F4B", "#FE804A", "#FE8249", 
+"#FE8448", "#FE8647", "#FE8745", "#FE8944", "#FE8B43", "#FE8D42", 
+"#FE8E40", "#FE903F", "#FE923E", "#FE943C", "#FE953B", "#FE9739", 
+"#FE9938", "#FE9A36", "#FE9C35", "#FE9E33", "#FE9F32", "#FEA130", 
+"#FEA22F", "#FEA42E", "#FEA52C", "#FEA72B", "#FEA82A", "#FEAA29", 
+"#FEAB28", "#FEAD27", "#FEAE26", "#FEB026", "#FEB125", "#FEB324", 
+"#FEB423", "#FEB523", "#FEB722", "#FEB822", "#FEBA21", "#FEBB20", 
+"#FEBC20", "#FEBE1F", "#FEBF1F", "#FEC11F", "#FEC21E", "#FEC31E", 
+"#FEC51E", "#FEC61D", "#FEC71D", "#FEC91D", "#FECA1D", "#FECB1D", 
+"#FECD1D", "#FECE1C", "#FECF1C", "#FED11C", "#FED21C", "#FED31C", 
+"#FED51C", "#FED61D", "#FED71D", "#FED91D", "#FEDA1D", "#FEDB1D", 
+"#FEDD1D", "#FEDE1E", "#FEDF1E", "#FEE11E", "#FEE21E", "#FEE31F", 
+"#FEE51F", "#FEE61F", "#FEE720", "#FEE820", "#FEEA21", "#FEEB21", 
+"#FEEC22", "#FEEE22", "#FEEF23", "#FEF023"))
+
+warn.once <- function(key, ...) {
+  warned <- getSpatstatVariable("warnedkeys")
+  if(!(key %in% warned)) {
+    warning(paste(...), call.=FALSE)
+    putSpatstatVariable("warnedkeys", c(warned, key))
+  }
+  return(invisible(NULL))
+}
+
+".Spat.Stat.Opt.Table" <-
+  list(
+       checkpolygons = list(
+         ## superseded
+         superseded=TRUE,
+         default=FALSE,
+         check=function(x) {
+           warning("spatstat.options('checkpolygons') will be ignored in future versions of spatstat", call.=FALSE)
+           return(is.logical(x) && length(x) == 1)
+         },
+         valid="a single logical value"
+         ),
+       checksegments = list(
+         ## default value of 'check' for psp objects
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1},
+         valid="a single logical value"
+         ),
+       closepairs.newcode=list(
+         ## use new code for 'closepairs'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       crossing.psp.useCall=list(
+         ## use new code for 'crossing.psp'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       crosspairs.newcode=list(
+         ## use new code for 'crosspairs'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       densityTransform=list(
+         ## use experimental new C routines for 'density.ppp'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       densityC=list(
+         ## use C routines for 'density.ppp'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       dpp.maxmatrix=list(
+         ## maximum size of matrix in dppeigen
+         default=2^24, # 16,777,216
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024
+         },
+         valid="a single integer, greater than 1024"
+       ),
+       exactdt.checks.data=list(
+         ## whether 'exactdt' checks validity of return value
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       expand=list(
+         ## default area expansion factor
+         default=2,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && x > 1
+         },
+         valid="a single numeric value, greater than 1"
+       ),
+       expand.polynom=list(
+         ## whether to expand polynom() in ppm formulae
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       fasteval=list(
+         ## whether to use 'fasteval' code if available
+         default="on",
+         check=function(x) { x %in% c("off", "on", "test") },
+         valid="one of the strings \'off\', \'on\' or \'test\'"
+       ),
+       fastpois=list(
+         # whether to use fast algorithm for rpoispp() when lambda is an image
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       fastthin=list(
+         # whether to use fast C algorithm for rthin() when P is constant
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       fastK.lgcp=list(
+         ## whether to cut a few corners in 'lgcp.estK'
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       fixpolygons = list(
+         ## whether to repair polygons automatically
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1},
+         valid="a single logical value"
+         ),
+       gpclib=list(
+         ## defunct!
+         superseded=TRUE, 
+         default=FALSE,
+         check=function(x) {
+           message("gpclib is no longer needed")
+           return(TRUE)
+         },
+         valid="a single logical value"
+         ),
+       huge.npoints=list(
+         ## threshold to trigger a warning from rpoispp 
+         default=1e6,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024
+         },
+         valid="a single integer, greater than 1024"
+       ),
+       image.colfun=list(
+         ## default colour scheme for plot.im
+#         default=function(n){topo.colors(n)},
+         default=function(n) {
+           z <- getSpatstatVariable("DefaultImageColours")
+           interp.colours(z, n)
+         },
+         check=function(x) {
+           if(!is.function(x) || length(formals(x)) == 0) return(FALSE)
+           y <- x(42)
+           if(length(y) != 42 || !is.character(y)) return(FALSE)
+           z <- try(col2rgb(y), silent=TRUE)
+           return(!inherits(z, "try-error"))
+         },
+         valid="a function f(n) that returns character strings, interpretable as colours"
+         ),
+       Kcom.remove.zeroes=list(
+         ## whether Kcom removes zero distances
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       maxedgewt=list(
+         ## maximum edge correction weight 
+         default=100,
+         check=function(x){
+           is.numeric(x) && length(x) == 1 && is.finite(x) && x >= 1
+         },
+         valid="a finite numeric value, not less than 1"
+       ),
+       maxmatrix=list(
+         ## maximum size of matrix of pairs of points in mpl.R
+         default=2^24, # 16,777,216
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024
+         },
+         valid="a single integer, greater than 1024"
+       ),
+       monochrome = list(
+         ## switch for monochrome colour scheme
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1},
+         valid="a single logical value"
+         ),
+       n.bandwidth=list(
+         ## number of values of bandwidth to try in bandwidth selection
+         default=32,
+         check=function(x) {
+           is.numeric(x) && (length(x) == 1) && (x == ceiling(x)) && (x > 2)
+         },
+         valid="a single integer, greater than 2"
+       ),
+       ndummy.min=list(
+         ## minimum grid size for dummy points
+         default=32,
+         check=function(x) {
+           is.numeric(x) && length(x) <= 2 && all(x == ceiling(x)) && all(x > 1)
+         },
+         valid="a single integer or a pair of integers, greater than 1"
+       ),
+       ngrid.disc=list(
+         ## number of grid points used to calculate area in area-interaction
+         default=128,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1
+         },
+         valid="a single integer, greater than 1"
+       ),
+       npixel=list(
+         ## default pixel dimensions
+         default=128,
+         check=function(x){
+           is.numeric(x) && (length(x) %in% c(1,2)) && is.finite(x) &&
+           all(x == ceiling(x)) && all(x > 1) 
+         },
+         valid="an integer, or a pair of integers, greater than 1"
+        ),
+       nvoxel=list(
+         ## default total number of voxels
+         default=2^22,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 2^12
+         },
+         valid="a single integer, greater than 2^12"
+       ),
+       old.morpho.psp=list(
+         ## use old code for morphological operations
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       par.binary=list(
+         ## default graphics parameters for masks
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       par.contour=list(
+         ## default graphics parameters for 'contour'
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       par.fv=list(
+         ## default graphics parameters for 'plot.fv'
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       par.persp=list(
+         ## default graphics parameters for 'persp' plots
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       par.points=list(
+         ## default graphics parameters for 'points'
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       par.pp3=list(
+         ## default graphics parameters for 'plot.pp3'
+         default=list(),
+         check=is.list,
+         valid="a list"
+         ),
+       print.ppm.SE=list(
+         ## under what conditions to print estimated SE in print.ppm
+         default="poisson",
+         check=function(x) { is.character(x) && length(x) == 1 &&
+                             x %in% c("always", "poisson", "never") },
+         valid="one of the strings \'always\', \'poisson\' or \'never\'"
+       ),
+       progress = list(
+         ## how to display progress reports
+         default="tty",
+         check=function(x){ x %in% c("tty", "tk", "txtbar") },
+         valid="one of the strings 'tty', 'tk' or 'txtbar'"
+         ),
+       project.fast=list(
+         ## whether to cut corners when projecting an invalid ppm object
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       psstA.ngrid=list(
+         ## size of point grid for computing areas in psstA
+         default=32,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 8
+         },
+         valid="a single integer, greater than or equal to 8"
+       ),
+       psstA.nr=list(
+         ## number of 'r' values to consider in psstA
+         default=30,
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 4
+         },
+         valid="a single integer, greater than or equal to 4"
+       ),
+       psstG.remove.zeroes=list(
+         ## whether to remove zero distances in psstG
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+      eroded.intensity=list(
+         ## whether to compute intensity estimate in eroded window
+         ## e.g. for Kcom, Gcom
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       rmh.nrep=list(
+         ## default value of parameter 'nrep' in rmh
+         default=5e5, 
+         check=function(x) {
+           is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 0
+         },
+         valid="a single integer, greater than 0"
+       ),
+       rmh.p=list(
+         ## default value of parameter 'p' in rmh
+         default=0.9,
+         check=function(x) { is.numeric(x) && length(x) == 1 &&
+                             x >= 0 && x <= 1 },
+         valid="a single numerical value, between 0 and 1"
+       ),
+       rmh.q=list(
+         ## default value of parameter 'q' in rmh
+         default=0.9,
+         check=function(x) { is.numeric(x) && length(x) == 1 &&
+                             x > 0 && x < 1 },
+         valid="a single numerical value, strictly between 0 and 1"
+       ),
+       scalable = list(
+         ## whether certain calculations in ppm should be scalable
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1},
+         valid="a single logical value"
+         ),
+       selfcrossing.psp.useCall=list(
+         ## whether to use new code in selfcrossing.psp
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       terse = list(
+         ## Level of terseness in printed output (higher => more terse)
+         default=0,
+         check=function(x) { length(x) == 1 && (x %in% 0:4) },
+         valid="an integer between 0 and 4"
+       ),
+       transparent=list(
+         ## whether to allow transparent colours in default colour maps
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       units.paren=list(
+         default="(",
+         check=function(x) {
+           is.character(x) && (length(x) == 1) &&
+             (x %in% c("(", "[", "{", ""))
+         },
+         valid="one of the strings '(', '[', '{' or '' "
+       ),
+       use.Krect=list(
+         ## whether to use function Krect in Kest(X) when window is rectangle
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Cwhist=list(
+         ## whether to use C code for whist
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Cbdrymask=list(
+         ## whether to use C code for bdry.mask
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       kppm.canonical=list(
+         ## whether to use 'canonical' parameters in kppm
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       kppm.adjusted=list(
+         ## experimental
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       check.rpanel.loaded=list(
+         # internal debugging
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       check.RandomFields.loaded=list(
+         # this is working OK so no need to check unless debugging
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       check.RandomFieldsUtils.loaded=list(
+         # this is working OK so no need to check unless debugging
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Clinequad = list(
+         # use C code for 'linequad'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Ccountends = list(
+         # use C code for 'countends'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Clinearradius = list(
+         # use C code for 'boundingradius.linnet'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Cnndistlpp = list(
+         # use C code for 'nndist.lpp'/'nnwhich.lpp'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       Cnncrosslpp = list(
+         # use C code for 'nncross.lpp'
+         default=TRUE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       ),
+       developer = list(
+         # general purpose; user is a developer; use experimental code, etc
+         default=FALSE,
+         check=function(x) { is.logical(x) && length(x) == 1 },
+         valid="a single logical value"
+       )
+    )
+# end of options list
+
+reset.spatstat.options <- function() {
+  Spatstat.Options <- lapply(.Spat.Stat.Opt.Table, getElement, name="default")
+  putSpatstatVariable("Spatstat.Options", Spatstat.Options)
+  invisible(Spatstat.Options)  
+}
+
+reset.spatstat.options()
+
+spatstat.options <- local({
+
+  spatstat.options <- function (...) {
+    Spatstat.Options <- getSpatstatVariable("Spatstat.Options")
+    called <- list(...)    
+
+    if(length(called) == 0) {
+      # return all options, except superseded ones
+      allofem <- .Spat.Stat.Opt.Table[names(Spatstat.Options)]
+      retain <- sapply(lapply(allofem, getElement, name="superseded"), is.null)
+      return(Spatstat.Options[retain])
+    }
+    
+    if(is.null(names(called)) && length(called)==1) {
+      # spatstat.options(x) 
+      x <- called[[1]]
+      if(is.null(x))
+        return(Spatstat.Options)  # spatstat.options(NULL)
+      if(is.list(x))
+        called <- x 
+    }
+    
+    if(is.null(names(called))) {
+        # spatstat.options("par1", "par2", ...)
+	ischar <- unlist(lapply(called, is.character))
+	if(all(ischar)) {
+          choices <- unlist(called)
+          ok <- choices %in% names(Spatstat.Options)
+          if(!all(ok))
+            stop(paste("Unrecognised option(s):", called[!ok]))
+          if(length(called) == 1)
+            return(Spatstat.Options[[choices]])
+          else
+            return(Spatstat.Options[choices])
+	} else {
+	   wrong <- called[!ischar]
+	   offending <- sapply(wrong, ShortDeparse)
+	   offending <- paste(offending, collapse=",")
+           stop(paste("Unrecognised mode of argument(s) [",
+		offending,
+	   "]: should be character string or name=value pair"))
+    	}
+    }
+    ## spatstat.options(name=value, name2=value2,...)
+    assignto <- names(called)
+    if (is.null(assignto) || !all(nzchar(assignto)))
+        stop("options must all be identified by name=value")
+    recog <- assignto %in% names(.Spat.Stat.Opt.Table)
+    if(!all(recog))
+	stop(paste("Unrecognised option(s):", assignto[!recog]))
+    ## validate new values
+    for(i in seq_along(assignto)) {
+      nama <- assignto[i]
+      valo <- called[[i]]
+      entry <- .Spat.Stat.Opt.Table[[nama]]
+      ok <- entry$check(valo)
+      if(!ok)
+        stop(paste("Parameter", dQuote(nama), "should be",
+                   entry$valid))
+    }
+    ## reassign
+    changed <- Spatstat.Options[assignto]
+    Spatstat.Options[assignto] <- called
+    putSpatstatVariable("Spatstat.Options", Spatstat.Options)
+  
+    ## return 
+    invisible(changed)
+  }
+
+  ShortDeparse <- function(x) {
+    y <- x
+    dont.complain.about(y)
+    short.deparse(substitute(y))
+  }
+    
+  spatstat.options
+})
+
diff --git a/R/ord.R b/R/ord.R
new file mode 100755
index 0000000..22c2fd6
--- /dev/null
+++ b/R/ord.R
@@ -0,0 +1,52 @@
+#
+#
+#    ord.S
+#
+#    $Revision: 1.7 $	$Date: 2015/10/21 09:06:57 $
+#
+#    Ord process with user-supplied potential
+#
+#    Ord()  create an instance of the Ord process
+#                 [an object of class 'interact']
+#                 with user-supplied potential
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Ord <- local({
+
+  BlankOrd <- 
+  list(
+         name     = "Ord process with user-defined potential",
+         creator  = "Ord",
+         family    = "ord.family",
+         pot      = NULL,
+         par      = NULL,
+         parnames = NULL,
+         init     = NULL,
+         update   = function(self, ...){
+           do.call(Ord,
+                   resolve.defaults(list(...),
+                                    list(pot=self$pot, name=self$name)))
+         } , 
+         print = function(self) {
+           cat("Potential function:\n")
+           print(self$pot)
+           invisible()
+         },
+       version=NULL
+  )
+  class(BlankOrd) <- "interact"
+
+  Ord <- function(pot, name) {
+    out <- instantiate.interact(BlankOrd)
+    out$pot <- pot
+    if(!missing(name)) out$name <- name
+  }
+
+  Ord <- intermaker(Ord, BlankOrd)
+})
+
+
+  
diff --git a/R/ord.family.R b/R/ord.family.R
new file mode 100755
index 0000000..12a2777
--- /dev/null
+++ b/R/ord.family.R
@@ -0,0 +1,130 @@
+#
+#
+#    ord.family.S
+#
+#    $Revision: 1.17 $	$Date: 2015/10/21 09:06:57 $
+#
+#    The Ord model (family of point process models)
+#
+#    ord.family:      object of class 'isf' defining Ord model structure
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+ord.family <-
+  list(
+         name  = "ord",
+         print = function(self) {
+                      cat("Ord model family\n")
+         },
+         eval  = function(X, U, EqualPairs, pot, pars, ...) {
+  #
+  # This auxiliary function is not meant to be called by the user.
+  # It computes the distances between points,
+  # evaluates the pair potential and applies edge corrections.
+  #
+  # Arguments:
+  #   X           data point pattern                      'ppp' object
+  #   U           points at which to evaluate potential   list(x,y) suffices
+  #   EqualPairs  two-column matrix of indices i, j such that X[i] == U[j]
+  #               (or NULL, meaning all comparisons are FALSE)
+  #   pot         potential function                      function(d, p)
+  #   pars        auxiliary parameters for pot            list(......)
+  #   ...         IGNORED                             
+  #
+  # Value:
+  #    matrix of values of the potential
+  #    induced by the pattern X at each location given in U.
+  #    The rows of this matrix correspond to the rows of U (the sample points);
+  #    the k columns are the coordinates of the k-dimensional potential.
+  #
+  # Note:
+  # The potential function 'pot' will be called as
+  #    pot(M, pars)   where M is a vector of tile areas.
+  # It must return a vector of the same length as M
+  # or a matrix with number of rows equal to the length of M
+  ##########################################################################
+
+nX <- npoints(X)
+nU <- length(U$x)       # number of data + dummy points
+
+seqX <- seq_len(nX)
+seqU <- seq_len(nU)
+
+# determine which points in the combined list are data points
+if(length(EqualPairs) > 0)           
+  is.data <- seqU %in% EqualPairs[,2] 
+else
+  is.data <- rep.int(FALSE, nU)
+
+#############################################################################
+# First compute Dirichlet tessellation of data
+# and its total potential (which could be vector-valued)
+#############################################################################
+
+marks(X) <- NULL
+Wdata <- dirichletWeights(X)   # sic - these are the tile areas.
+Pdata <- pot(Wdata, pars)
+summa <- function(P) {
+  if(is.matrix(P))
+    matrowsum(P)
+  else if(is.vector(P) || length(dim(P))==1 )
+    sum(P)
+  else
+    stop("Don't know how to take row sums of this object")
+}
+total.data.potential <- summa(Pdata)
+
+# Initialise V
+
+dimpot <- dim(Pdata)[-1]  # dimension of each value of the potential function
+                          # (= numeric(0) if potential is a scalar)
+
+dimV <- c(nU, dimpot)
+if(length(dimV) == 1)
+  dimV <- c(dimV, 1)
+
+V <- array(0, dim=dimV)
+
+rowV <- array(seqU, dim=dimV)
+
+#################### Next, evaluate V for the data points.  ###############
+# For each data point, compute Dirichlet tessellation
+# of the data with this point removed.
+# Compute difference of total potential.
+#############################################################################
+
+
+for(j in seq_len(nX)) {
+        #  Dirichlet tessellation of data without point j
+  Wminus <- dirichletWeights(X[-j])
+        #  regressor is the difference in total potential
+  V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars))
+}
+
+
+#################### Next, evaluate V for the dummy points   ################
+# For each dummy point, compute Dirichlet tessellation
+# of (data points together with this dummy point) only. 
+# Take difference of total potential.
+#############################################################################
+
+for(j in seqU[!is.data]) {
+  Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window)
+  #  compute Dirichlet tessellation (of these points only!)
+  Wplus <- dirichletWeights(Xplus)
+  #  regressor is difference in total potential
+  V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential
+}
+
+cat("dim(V) = \n")
+print(dim(V))
+
+return(V)
+
+} ######### end of function $eval                            
+
+) ######### end of list
+
+class(ord.family) <- "isf"
diff --git a/R/ordthresh.R b/R/ordthresh.R
new file mode 100755
index 0000000..2d0ed75
--- /dev/null
+++ b/R/ordthresh.R
@@ -0,0 +1,63 @@
+#
+#
+#    ordthresh.S
+#
+#    $Revision: 1.11 $	$Date: 2015/10/21 09:06:57 $
+#
+#    Ord process with threshold potential
+#
+#    OrdThresh()  create an instance of the Ord process
+#                 [an object of class 'interact']
+#                 with threshold potential
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+OrdThresh <- local({
+
+  BlankOrdThresh <- 
+    list(
+      name     = "Ord process with threshold potential",
+      creator  = "OrdThresh",
+      family    = "ord.family",
+      pot      = function(d, par) {
+        (d <= par$r)
+      },
+      par      = list(r = NULL),
+      parnames = "threshold distance",
+      init     = function(self) {
+        r <- self$par$r
+        if(!is.numeric(r) || length(r) != 1 || r <= 0)
+          stop("threshold distance r must be a positive number")
+      },
+      update = NULL,  # default OK
+      print = NULL,    # default OK
+      interpret =  function(coeffs, self) {
+        loggamma <- as.numeric(coeffs[1])
+        gamma <- exp(loggamma)
+        return(list(param=list(gamma=gamma),
+                    inames="interaction parameter gamma",
+                    printable=dround(gamma)))
+      },
+      valid = function(coeffs, self) {
+        loggamma <- as.numeric(coeffs[1])
+        is.finite(loggamma)
+      },
+      project = function(coeffs, self) {
+        if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+      },
+      irange = function(...) {
+        return(Inf)
+      },
+      version=NULL
+      )
+  class(BlankOrdThresh) <- "interact"
+
+  OrdThresh <- function(r) { instantiate.interact(BlankOrdThresh, list(r=r)) }
+
+  OrdThresh <- intermaker(OrdThresh, BlankOrdThresh)
+
+  OrdThresh
+})
+
diff --git a/R/otherpackages.R b/R/otherpackages.R
new file mode 100644
index 0000000..dcfe852
--- /dev/null
+++ b/R/otherpackages.R
@@ -0,0 +1,79 @@
+#'
+#'           otherpackages.R
+#' 
+#'    Dealing with other packages
+#' 
+#'    $Revision: 1.17 $  $Date: 2017/06/05 10:31:58 $
+
+fft2D <- function(z, inverse=FALSE, west=fftwAvailable()) {
+  if(west) return(fftwtools::fftw2d(data=z, inverse=inverse))
+  return(stats::fft(z=z, inverse=inverse))
+}
+
+fftwAvailable <- function() {
+  # including temporary check for recent version
+  ok <- requireNamespace("fftwtools", quietly=TRUE)
+  return(ok)
+}
+
+kraeverRandomFields <- function() {
+  kraever("RandomFieldsUtils")
+  kraever("RandomFields")
+# should no longer be needed:  
+#  capture.output(RandomFieldsUtils:::.onLoad())
+#  capture.output(RandomFields:::.onLoad())
+  return(invisible(NULL))
+}
+
+# require a namespace and optionally check whether it is attached
+kraever <- function(package, fatal=TRUE) {
+  if(!requireNamespace(package, quietly=TRUE)) {
+    if(fatal)
+      stop(paste("The package", sQuote(package), "is required"),
+           call.=FALSE)
+    return(FALSE)
+  }
+  if(spatstat.options(paste("check", package, "loaded", sep=".")) &&
+    !isNamespaceLoaded(package)){
+    if(fatal)
+      stop(paste("The package", sQuote(package),
+                 "must be loaded: please type",
+                 sQuote(paste0("library", paren(package)))),
+           call.=FALSE)
+    return(FALSE)
+  }
+  return(TRUE)
+}
+
+getRandomFieldsModelGen <- function(model) {
+  kraeverRandomFields()
+  if(inherits(model, "RMmodelgenerator"))
+    return(model)
+  if(!is.character(model))
+    stop(paste("'model' should be a character string",
+               "or one of the functions in the RandomFields package",
+               "with a name beginning 'RM'"),
+         call.=FALSE)
+  switch(model,
+         cauchy    = RandomFields::RMcauchy,
+         exponential = ,
+         exp       = RandomFields::RMexp,
+         gencauchy = RandomFields::RMgencauchy,
+         gauss     = RandomFields::RMgauss,
+         gneiting  = RandomFields::RMgneiting,
+         matern    = RandomFields::RMmatern,
+         nugget    = RandomFields::RMnugget,
+         spheric   = RandomFields::RMspheric,
+         stable    = RandomFields::RMstable,
+         whittle   = RandomFields::RMwhittle,
+         {
+           modgen <- try(getExportedValue("RandomFields", 
+                                          paste0("RM", model)),
+                         silent=TRUE)
+           if(inherits(modgen, "try-error") ||
+              !inherits(modgen, "RMmodelgenerator"))
+             stop(paste("Model", sQuote(model), "is not recognised"))
+           modgen
+         })
+}
+
diff --git a/R/pairdistlpp.R b/R/pairdistlpp.R
new file mode 100755
index 0000000..956aa98
--- /dev/null
+++ b/R/pairdistlpp.R
@@ -0,0 +1,104 @@
+#
+# pairdistlpp.R
+#
+#  $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $
+#
+#
+#  pairdist.lpp
+#        Calculates the shortest-path distance between each pair of points
+#        in a point pattern on a linear network.
+#
+
+pairdist.lpp <- function(X, ..., method="C") {
+  stopifnot(inherits(X, "lpp"))
+  stopifnot(method %in% c("C", "interpreted"))
+  #
+  n <- npoints(X)
+  pairdistmat <- matrix(Inf,n,n)
+  diag(pairdistmat) <- 0
+  #
+  L <- as.linnet(X, sparse=FALSE)
+  #
+  if(any(is.infinite(L$dpath))) {
+    #' disconnected network
+    lab <- connected(L, what="labels")
+    subsets <- split(seq_len(nvertices(L)), lab)
+    for(i in seq_along(subsets)) {
+      Xi <- thinNetwork(X, retainvertices=subsets[[i]])
+      witch <- attr(Xi, "retainpoints")      
+      pairdistmat[witch, witch] <- pairdist.lpp(Xi, method=method)
+    }
+    return(pairdistmat)
+  }
+  # 
+  Y <- as.ppp(X)
+  Lvert <- L$vertices
+  from  <- L$from
+  to    <- L$to
+  dpath <- L$dpath
+  
+  # nearest segment for each point
+  pro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg
+
+  if(method == "interpreted") {
+    # loop through all pairs of data points
+    for (i in 1:(n-1)) {
+      proi <- pro[i]
+      Xi <- Y[i]
+      nbi1 <- from[proi]
+      nbi2 <- to[proi]
+      vi1 <- Lvert[nbi1]
+      vi2 <- Lvert[nbi2]   
+      dXi1 <- crossdist(Xi, vi1)
+      dXi2 <- crossdist(Xi, vi2)
+      for (j in (i+1):n) {
+        Xj <- Y[j]
+        proj <- pro[j]
+        if(proi == proj) {
+          # points i and j lie on the same segment
+          # use Euclidean distance
+          d <- crossdist(Xi, Xj)
+        } else {
+          # shortest path from i to j passes through ends of segments
+          nbj1 <- from[proj]
+          nbj2 <- to[proj]
+          vj1 <- Lvert[nbj1]
+          vj2 <- Lvert[nbj2]
+          # Calculate shortest of 4 possible paths from i to j
+          d1Xj <- crossdist(vj1,Xj)
+          d2Xj <- crossdist(vj2,Xj)
+          d11 <- dXi1 + dpath[nbi1,nbj1] + d1Xj
+          d12 <- dXi1 + dpath[nbi1,nbj2] + d2Xj
+          d21 <- dXi2 + dpath[nbi2,nbj1] + d1Xj
+          d22 <- dXi2 + dpath[nbi2,nbj2] + d2Xj
+          d <- min(d11,d12,d21,d22)
+        }
+        # store result
+        pairdistmat[i,j] <- pairdistmat[j,i] <- d
+      }
+    }
+  } else {
+    # C code
+    # convert indices to start at 0
+    from0 <- from - 1L
+    to0   <- to - 1L
+    segmap <- pro - 1L
+    zz <- .C("linpairdist",
+             np = as.integer(n),
+             xp = as.double(Y$x),
+             yp = as.double(Y$y),
+             nv = as.integer(Lvert$n),
+             xv = as.double(Lvert$x),
+             yv = as.double(Lvert$y),
+             ns = as.double(L$n),
+             from = as.integer(from0),
+             to = as.integer(to0),
+             dpath = as.double(dpath),
+             segmap = as.integer(segmap),
+             answer = as.double(numeric(n*n)),
+             PACKAGE = "spatstat")
+    pairdistmat <- matrix(zz$answer, n, n)
+  }
+  return(pairdistmat)
+}
+
diff --git a/R/pairorient.R b/R/pairorient.R
new file mode 100644
index 0000000..603b189
--- /dev/null
+++ b/R/pairorient.R
@@ -0,0 +1,218 @@
+##
+## pairorient.R
+##
+## point pair orientation distribution
+##
+## Function O_{r1,r2}(phi) defined in
+## Stoyan & Stoyan (1994) equ (14.53) page 271
+##
+##     and its derivative estimated by kernel smoothing
+##
+##  $Revision: 1.9 $ $Date: 2014/12/05 06:59:53 $
+
+pairorient <- function(X, r1, r2, ...,
+                       cumulative=FALSE,
+                       correction, ratio=FALSE,
+                       unit=c("degree", "radian"),
+                       domain=NULL) {
+  stopifnot(is.ppp(X))
+  check.1.real(r1)
+  check.1.real(r2)
+  stopifnot(r1 < r2)
+  W <- Window(X)
+  if(!is.null(domain))
+    stopifnot(is.subset.owin(domain, W))
+  
+  unit <- match.arg(unit)
+  switch(unit,
+         degree = {
+           FullCircle <- 360
+           Convert <- 180/pi
+         },
+         radian = {
+           FullCircle <- 2 * pi
+           Convert <- 1
+         })
+
+  ## choose correction(s)
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(!correction.given)
+    correction <- c("border", "isotropic", "translate")
+  correction <- pickoption("correction", correction,
+                           c(none="none",
+                             border="border",
+                             bord.modif="bord.modif",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             good="good",
+                             best="best"),
+                           multi=TRUE)
+#  best.wanted <- ("best" %in% correction)
+  ## replace 'good' by the optimal choice for this size of dataset
+  if("good" %in% correction)
+    correction[correction == "good"] <- good.correction.K(X)
+  ## retain only corrections that are implemented for the window
+  correction <- implemented.for.K(correction, W$type, correction.given)
+
+  
+  ## Find close pairs in range [r1, r2]
+  close <- as.data.frame(closepairs(X, r2))
+  ok <- with(close, r1 <= d & d <= r2)
+  if(!is.null(domain))
+      ok <- ok & with(close, inside.owin(xi, yi, domain))
+  if(!any(ok)) {
+    warning(paste("There are no pairs of points in the distance range",
+                  prange(c(r1,r2))))
+    return(NULL)
+  }
+  close <- close[ok, , drop=FALSE]
+  ANGLE <- with(close, atan2(dy, dx) * Convert) %% FullCircle
+
+  ## initialise output object
+  Nphi <- 512
+  breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1)
+  phi <- breaks$r
+  Odf <- data.frame(phi  = phi,
+                    theo = (if(cumulative) phi else 1)/FullCircle)
+  desc <- c("angle argument phi",
+            "theoretical isotropic %s")
+  Oletter <- if(cumulative) "O" else "o"
+  Osymbol <- as.name(Oletter)
+  OO <- ratfv(Odf, NULL, denom=nrow(close),
+              argu="phi",
+              ylab=substitute(fn[R1,R2](phi), list(R1=r1, R2=r2, fn=Osymbol)),
+              valu="theo",
+              fmla = . ~ phi,
+              alim = c(0, FullCircle),
+              c("phi",
+                "{%s[%s]^{pois}}(phi)"),
+              desc,
+              fname=c(Oletter, paste0("list(", r1, ",", r2, ")")),
+              yexp=substitute(fn[list(R1,R2)](phi),
+                list(R1=r1,R2=r2,fn=Osymbol)))
+
+  ## ^^^^^^^^^^^^^^^  Compute edge corrected estimates ^^^^^^^^^^^^^^^^
+
+  nangles <- length(ANGLE)
+  
+  if(any(correction == "none")) {
+    ## uncorrected! For demonstration purposes only!
+    if(cumulative) {
+      wh <- whist(ANGLE, breaks$val)  # no weights
+      num.un <- cumsum(wh)
+    } else {
+      kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit)
+      num.un <- kd$y * nangles
+    }
+    den.un <- nangles
+    ## uncorrected estimate 
+    OO <- bind.ratfv(OO,
+                     data.frame(un=num.un), den.un,
+                    "{hat(%s)[%s]^{un}}(phi)",
+                    "uncorrected estimate of %s",
+                    "un",
+                    ratio=ratio)
+  }
+
+  if(any(c("border", "bord.modif") %in% correction)) {
+    ## border type corrections
+    bX <- bdist.points(X)
+    bI <- bX[close$i]
+    if("border" %in% correction) {
+      bok <- (bI > r2)
+      ANGLEok <- ANGLE[bok]
+      nok <- length(ANGLEok)
+      if(cumulative) {
+        wh <- whist(ANGLEok, breaks$val)
+        num.bord <- cumsum(wh)
+      } else {
+        kd <- circdensity(ANGLEok, ..., n=Nphi, unit=unit)
+        num.bord <- kd$y * nok
+      }
+      den.bord <- nok
+      OO <- bind.ratfv(OO,
+                       data.frame(border=num.bord),
+                       den.bord,
+                       "{hat(%s)[%s]^{bord}}(phi)",
+                       "border-corrected estimate of %s",
+                       "border",
+                       ratio=ratio)
+    }
+    if("bord.modif" %in% correction) {
+      ok <- (close$d < bI)
+      nok <- sum(ok)
+      inradius <- max(distmap(W, invert=TRUE))
+      rrr <- range(r2, inradius)
+      rr <- seq(rrr[1], rrr[2], length=256)
+      Ar <- eroded.areas(W, rr)
+      Arf <- approxfun(rr, Ar, rule=2)
+      AI <- (Arf(bX))[close$i]
+      edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0)
+      if(cumulative) {
+        wh <- whist(ANGLE, breaks$val, edgewt)
+        num.bm <- cumsum(wh)/mean(edgewt)
+      } else {
+        w <- edgewt/sum(edgewt)
+        kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit)
+        num.bm <- kd$y * nok
+      }
+      den.bm <- nok
+      OO <- bind.ratfv(OO,
+                       data.frame(bordm=num.bm),
+                       den.bm,
+                       "{hat(%s)[%s]^{bordm}}(phi)",
+                       "modified border-corrected estimate of %s",
+                       "bordm",
+                       ratio=ratio)
+    }
+  }
+  if(any(correction == "translate")) {
+    ## Ohser-Stoyan translation correction
+    edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE)
+    if(cumulative) {
+      wh <- whist(ANGLE, breaks$val, edgewt)
+      num.trans <- cumsum(wh)/mean(edgewt)
+    } else {
+      w <- edgewt/sum(edgewt)
+      kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit)
+      num.trans <- kd$y * nangles
+    }
+    den.trans <- nangles
+    OO <- bind.ratfv(OO,
+                     data.frame(trans=num.trans),
+                     den.trans,
+                     "{hat(%s)[%s]^{trans}}(phi)",
+                     "translation-corrected estimate of %s",
+                     "trans",
+                     ratio=ratio)
+  }
+  if(any(correction == "isotropic")) {
+    ## Ripley isotropic correction
+    XI <- ppp(close$xi, close$yi, window=W, check=FALSE)
+    DIJ <- close$d
+    edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1))
+    if(cumulative) {
+      wh <- whist(ANGLE, breaks$val, edgewt)
+      num.iso <- cumsum(wh)/mean(edgewt)
+    } else {
+      w <- edgewt/sum(edgewt)
+      kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit)
+      num.iso <- kd$y * nangles
+    }
+    den.iso <- nangles
+    OO <- bind.ratfv(OO,
+                     data.frame(iso=num.iso),
+                     den.iso,
+                     "{hat(%s)[%s]^{iso}}(phi)",
+                     "Ripley isotropic-corrected estimate of %s",
+                     "iso",
+                     ratio=ratio)
+  }
+  unitname(OO) <- switch(unit,
+                         degree = c("degree", "degrees"),
+                         radian = c("radian", "radians"))
+  return(OO)
+}
diff --git a/R/pairpiece.R b/R/pairpiece.R
new file mode 100755
index 0000000..4cdc298
--- /dev/null
+++ b/R/pairpiece.R
@@ -0,0 +1,130 @@
+#
+#
+#    pairpiece.S
+#
+#    $Revision: 1.22 $	$Date: 2015/10/21 09:06:57 $
+#
+#    A pairwise interaction process with piecewise constant potential
+#
+#    PairPiece()   create an instance of the process
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+PairPiece <- local({
+
+  # .... auxiliary functions ........
+  delP <- function(i, r) {
+    r <- r[-i]
+    nr <- length(r)
+    if(nr == 0) return(Poisson())
+    if(nr == 1) return(Strauss(r))
+    return(PairPiece(r))
+  }
+
+  # ..... template ..........
+
+  BlankPairPiece <- 
+  list(
+         name     = "Piecewise constant pairwise interaction process",
+         creator  = "PairPiece",
+         family   = "pairwise.family", # evaluated later
+         pot      = function(d, par) {
+                       r <- par$r
+                       nr <- length(r)
+                       out <- array(FALSE, dim=c(dim(d), nr))
+                       out[,,1] <-  (d < r[1])
+                       if(nr > 1) {
+                         for(i in 2:nr) 
+                           out[,,i] <- (d >= r[i-1]) & (d < r[i])
+                       }
+                       out
+                     },
+         par      = list(r = NULL), # filled in later
+         parnames = "interaction thresholds",
+         init     = function(self) {
+                      r <- self$par$r
+                      if(!is.numeric(r) || !all(r > 0))
+                       stop("interaction thresholds r must be positive numbers")
+                      if(length(r) > 1 && !all(diff(r) > 0))
+                        stop("interaction thresholds r must be strictly increasing")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           r <- self$par$r
+           npiece <- length(r)
+           # extract coefficients
+           gammas <- exp(as.numeric(coeffs))
+           # name them
+           gn <- gammas
+           names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="")
+           #
+           return(list(param=list(gammas=gammas),
+                       inames="interaction parameters gamma_i",
+                       printable=dround(gn)))
+         },
+        valid = function(coeffs, self) {
+           # interaction parameters gamma
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           if(!all(is.finite(gamma))) return(FALSE)
+           return(all(gamma <= 1) || gamma[1] == 0)
+        },
+        project = function(coeffs, self){
+           # interaction parameters gamma
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           # interaction thresholds r[i]
+           r <- self$par$r
+           # check for NA or Inf
+           bad <- !is.finite(gamma)
+           # gamma > 1 forbidden unless hard core
+           ishard <- is.finite(gamma[1]) && (gamma[1] == 0)
+           if(!ishard)
+             bad <- bad | (gamma > 1)
+           if(!any(bad))
+             return(NULL)
+           if(spatstat.options("project.fast") || sum(bad) == 1) {
+             # remove smallest threshold with an unidentifiable parameter
+             firstbad <- min(which(bad))
+             return(delP(firstbad, r))
+           } else {
+             # consider all candidate submodels
+             subs <- lapply(which(bad), delP, r=r)
+             return(subs)
+           }
+        },
+        irange = function(self, coeffs=NA, epsilon=0, ...) {
+          r <- self$par$r
+          if(all(is.na(coeffs)))
+            return(max(r))
+          gamma <- (self$interpret)(coeffs, self)$param$gammas
+          gamma[is.na(gamma)] <- 1
+          active <- (abs(log(gamma)) > epsilon)
+          if(!any(active))
+            return(0)
+          else return(max(r[active]))
+        },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         r     <- self$par$r
+         gamma <- (self$interpret)(coeffs, self)$param$gammas
+         # areas of annuli between r[i-1], r[i]
+         areas <- pi * diff(c(0,r)^2)
+         return(sum(areas * (1-gamma)))
+       },
+       version=NULL # filled in later
+       )
+  class(BlankPairPiece) <- "interact"
+
+  PairPiece <- function(r) {
+    instantiate.interact(BlankPairPiece, list(r=r))
+  }
+
+  PairPiece <- intermaker(PairPiece, BlankPairPiece)
+  
+  PairPiece
+})
+
+                   
diff --git a/R/pairs.im.R b/R/pairs.im.R
new file mode 100755
index 0000000..a5a1cdc
--- /dev/null
+++ b/R/pairs.im.R
@@ -0,0 +1,134 @@
+#
+#   pairs.im.R
+#
+#   $Revision: 1.11 $   $Date: 2016/11/15 03:47:29 $
+#
+
+pairs.listof <- pairs.solist <- function(..., plot=TRUE) {
+  argh <- expandSpecialLists(list(...), special=c("solist", "listof"))
+  haslines <- any(sapply(argh, inherits, what="linim"))
+  if(haslines) {
+    do.call(pairs.linim, append(argh, list(plot=plot)))
+  } else {
+    do.call(pairs.im, append(argh, list(plot=plot)))
+  }
+}
+
+pairs.im <- function(..., plot=TRUE) {
+  argh <- list(...)
+  cl <- match.call()
+  ## unpack single argument which is a list of images
+  if(length(argh) == 1) {
+    arg1 <- argh[[1]]
+    if(is.list(arg1) && all(unlist(lapply(arg1, is.im))))
+      argh <- arg1
+  }
+  ## identify which arguments are images
+  isim <- unlist(lapply(argh, is.im))
+  nim <- sum(isim)
+  if(nim == 0) 
+    stop("No images provided")
+  ## separate image arguments from others
+  imlist <- argh[isim]
+  rest   <- argh[!isim]
+  ## determine image names for plotting
+  imnames <- names(imlist)
+  backupnames <- paste(cl)[c(FALSE, isim, FALSE)]
+  if(length(backupnames) != nim)
+    backupnames <- paste("V", seq_len(nim), sep="")
+  if(length(imnames) != nim)
+    imnames <- backupnames
+  else if(any(needname <- !nzchar(imnames)))
+    imnames[needname] <- backupnames[needname]
+  ## 
+  if(nim == 1) {
+    ## one image: plot histogram
+    hist(..., plot=plot)
+    ## save pixel values
+    Z <- imlist[[1]]
+    pixvals <- list(Z[])
+    names(pixvals) <- imnames
+  } else {
+    ## extract pixel rasters and reconcile them
+    imwins <- lapply(imlist, as.owin)
+    names(imwins) <- NULL
+    rasta    <- do.call(intersect.owin, imwins)
+    ## extract image pixel values on common raster
+    pixvals <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=TRUE)
+  }
+  ## combine into data frame
+  pixdf <- do.call(data.frame, pixvals)
+  ## pairs plot
+  if(plot && nim > 1)
+    do.call(pairs, resolve.defaults(list(x=pixdf),
+                                      rest,
+                                      list(labels=imnames, pch=".")))
+  labels <- resolve.defaults(rest, list(labels=imnames))$labels
+  colnames(pixdf) <- labels
+  class(pixdf) <- c("plotpairsim", class(pixdf))
+  return(invisible(pixdf))
+}
+
+plot.plotpairsim <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  x <- as.data.frame(x)
+  if(ncol(x) == 1) {
+    do.call(hist.default,
+            resolve.defaults(list(x=x[,1]),
+                             list(...),
+                             list(main=xname)))
+  } else {
+    do.call(pairs.default,
+            resolve.defaults(list(x=x),
+                             list(...),
+                             list(pch=".")))
+  }
+  return(invisible(NULL))
+}
+
+print.plotpairsim <- function(x, ...) {
+  cat("Object of class plotpairsim\n")
+  cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n"))
+  return(invisible(NULL))
+}
+
+panel.image <- function(x, y, ..., sigma=NULL) {
+  usr <- par("usr"); on.exit(par(usr))
+  par(usr = c(0, 1, 0, 1))
+  xx <- scaletointerval(x)
+  yy <- scaletointerval(y)
+  p <- ppp(xx, yy, window=square(1), check=FALSE)
+  plot(density(p, sigma=sigma), add=TRUE, ...)
+}
+
+panel.contour <- function(x, y, ..., sigma=NULL) {
+  usr <- par("usr"); on.exit(par(usr))
+  par(usr = c(0, 1, 0, 1))
+  xx <- scaletointerval(x)
+  yy <- scaletointerval(y)
+  p <- ppp(xx, yy, window=square(1), check=FALSE)
+  Z <- density(p, sigma=sigma)
+  do.call(contour,
+          resolve.defaults(list(x=Z, add=TRUE),
+                           list(...),
+                           list(drawlabels=FALSE)))
+}
+
+panel.histogram <- function(x, ...) {
+  usr <- par("usr"); on.exit(par(usr))
+  par(usr = c(usr[1:2], 0, 1.5) )
+  h <- hist(x, plot = FALSE)
+  breaks <- h$breaks; nB <- length(breaks)
+  y <- h$counts; y <- y/max(y)
+  do.call(rect,
+          resolve.defaults(list(xleft   = breaks[-nB],
+                                ybottom = 0,
+                                xright  = breaks[-1],
+                                ytop    = y),
+                           list(...),
+                           list(col="grey")))
+}
+
+  
+  
+  
diff --git a/R/pairsat.family.R b/R/pairsat.family.R
new file mode 100755
index 0000000..a15e2b8
--- /dev/null
+++ b/R/pairsat.family.R
@@ -0,0 +1,252 @@
+#
+#
+#    pairsat.family.S
+#
+#    $Revision: 1.44 $	$Date: 2016/02/11 09:36:11 $
+#
+#    The saturated pairwise interaction family of point process models
+#
+#    (an extension of Geyer's saturation process to all pairwise interactions)
+#
+#    pairsat.family:         object of class 'isf'
+#                     defining saturated pairwise interaction
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+pairsat.family <-
+  list(
+         name  = "saturated pairwise",
+         print = function(self) {
+                      cat("Saturated pairwise interaction family\n")
+         },
+         eval  = function(X,U,EqualPairs,pairpot,potpars,correction,
+                          ..., Reach=NULL,
+                               precomputed=NULL, savecomputed=FALSE,
+                               halfway=FALSE) {
+  #
+  # This is the eval function for the `pairsat' family.
+  # 
+  # This internal function is not meant to be called by the user.
+  # It is called by mpl.prepare() during execution of ppm().
+  #         
+  # The eval functions perform all the manipulations that are common to
+  # a given class of interactions. 
+  #
+  # For the `pairsat' family of pairwise-interaction processes,
+  # this eval function computes the distances between points,
+  # invokes 'pairpot' to evaluate the potential between each pair of points,
+  # applies edge corrections, and then sums the pair potential terms
+  # applying the saturation threshold.
+  #
+  # ARGUMENTS:
+  #   All 'eval' functions have the following arguments 
+  #   which are called in sequence (without formal names)
+  #   by mpl.prepare():
+  #       
+  #   X           data point pattern                      'ppp' object
+  #   U           points at which to evaluate potential   list(x,y) suffices
+  #   EqualPairs  two-column matrix of indices i, j such that X[i] == U[j]
+  #               (or NULL, meaning all comparisons are FALSE)
+  #   pot         potential function 
+  #   potpars     auxiliary parameters for pot            list(......)
+  #   correction  edge correction type                    (string)
+  #
+  # VALUE:
+  #    All `eval' functions must return a        
+  #    matrix of values of the total potential
+  #    induced by the pattern X at each location given in U.
+  #    The rows of this matrix correspond to the rows of U (the sample points);
+  #    the k columns are the coordinates of the k-dimensional potential.
+  #         
+  ########################################################################
+  #
+  # POTENTIAL:
+  # The pair potential function 'pairpot' will be called as
+  #    pairpot(M, potpars)   where M is a matrix of interpoint distances.
+  # It must return a matrix with the same dimensions as M
+  # or an array with its first two dimensions the same as the dimensions of M.
+  #           
+  # NOTE:
+  #   Note the Geyer saturation threshold must be given in 'potpars$sat'
+
+  ##########################################################################
+
+           
+# coercion should be unnecessary, but this is useful for debugging
+X <- as.ppp(X)
+U <- as.ppp(U, X$window)   # i.e. X$window is DEFAULT window
+
+# saturation parameter(s)
+saturate <- potpars$sat
+
+# interaction distance of corresponding pairwise interaction
+PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL
+
+if(is.null(saturate)) {
+  # pairwise interaction 
+  V <- pairwise.family$eval(X, U, EqualPairs,
+                            pairpot, potpars, correction, ...,
+                            Reach=PairReach,
+                            precomputed=precomputed,
+                            savecomputed=savecomputed)
+  return(V)
+}
+
+# first ensure all data points are included in the quadrature points
+nX <- npoints(X)
+nU <- npoints(U)
+Xseq  <- seq_len(nX)
+if(length(EqualPairs) == 0) {
+  # no data points currently included 
+  missingdata <- rep.int(TRUE, nX)
+} else {
+  Xused <- EqualPairs[,1]
+  missingdata <- !(Xseq %in% Xused)
+}
+somemissing <- any(missingdata)
+if(somemissing) {
+  # add the missing data points
+  originalrows <- seq_len(nU)
+  nmiss <- sum(missingdata)
+  U <- superimpose(U, X[missingdata], W=X$window, check=FALSE)
+  # correspondingly augment the list of equal pairs
+  newXindex <- Xseq[missingdata]
+  newUindex <- nU + seq_len(nmiss)
+  EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex))
+  nU <- nU + nmiss
+}
+
+# compute the pair potentials POT and the unsaturated potential sums V
+
+V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction,
+                          ..., Reach=PairReach)
+POT <- attr(V, "POT")
+
+computed <- attr(V, "computed")   # could be NULL
+
+#
+# V is a matrix with rows = quadrature points,
+#                    columns = coordinates of potential
+# POT is an array with rows = data points
+#                      columns = quadrature points
+#                      planes = coordinates of potential
+
+#################################################################
+################## saturation part ##############################
+#################################################################
+
+# check dimensions and ensure 'saturate' is a vector
+ns <- length(saturate)
+np <- ncol(V)
+if(ns == 1 && np > 1)
+  saturate <- rep.int(saturate, np)
+else if(ns != np)
+  stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE)
+
+# replicate as a matrix and as an array
+saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V))
+saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT))
+#
+# (a) compute SATURATED potential sums
+V.sat <- pmin(V, saturate2)
+
+if(halfway)
+  return(V.sat)
+#
+# (b) compute effect of addition/deletion of dummy/data point j
+# on the UNSATURATED potential sum of each data point i
+#
+# Identify data points
+is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V
+
+# Extract potential sums for data points only
+V.data <- V[is.data, , drop=FALSE]
+
+# replicate them so that V.dat.rep[i,j,k] = V.data[i, k]
+V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2))
+
+# make a logical array   col.is.data[i,j,k] = is.data[j]
+col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT))
+
+# compute value of unsaturated potential sum for each data point i
+# obtained after addition/deletion of each dummy/data point j
+
+if(!(correction %in% c("isotropic", "Ripley"))) {
+  dV <- ifelseNegPos(col.is.data, POT)
+  ##     equivalent to  ifelse(col.is.data, -POT, POT)
+} else {
+  ## Weighted potential is not exactly symmetric
+  dV <- POT
+  dV[col.is.data] <- - aperm(POT[ , is.data, , drop=FALSE], c(2,1,3))
+}
+V.after <- V.dat.rep + dV
+   
+#
+#
+# (c) difference of SATURATED potential sums for each data point i
+# before & after increment/decrement of each dummy/data point j
+#
+# saturated values after increment/decrement
+V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after))
+# saturated values before
+V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep))
+# difference
+V.delta <- V.after.sat - V.dat.rep.sat
+V.delta <- ifelseNegPos(col.is.data, V.delta)
+#
+# (d) Sum (c) over all data points i
+V.delta.sum <- apply(V.delta, c(2,3), sum)
+#
+# (e) Result
+V <- V.sat + V.delta.sum
+
+##########################################
+# remove rows corresponding to supplementary points
+if(somemissing)
+      V <- V[originalrows, , drop=FALSE]
+
+### tack on the saved computations from pairwise.family$eval
+if(savecomputed)
+  attr(V, "computed") <- computed
+
+return(V)
+
+},     ######### end of function $eval                            
+suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") {
+
+# for saturated pairwise models only  (possibly nonstationary)
+  verifyclass(model, "ppm")
+  if(!identical(model$interaction$family$name,"saturated pairwise"))
+    stop("Model is not a saturated pairwise interaction process") 
+
+  if(is.null(X)) {
+    X <- data.ppm(model)
+    modelX <- model
+  } else {
+    verifyclass(X, "ppp")
+    modelX <- update(model, X, method="mpl")
+  }
+
+  # find data points which do not contribute to pseudolikelihood
+  mplsubset <- getglmdata(modelX)$.mpl.SUBSET
+  mpldata   <- is.data(quad.ppm(modelX))
+  contribute <- mplsubset[mpldata]
+  
+  Empty <- X[integer(0)]
+  mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE)
+  # halfway=TRUE is passed to pairsat.family$eval
+  # and yields matrix of saturated potential sums 
+
+  # take only those terms that contribute to the pseudolikelihood
+  mom <- mom[contribute, , drop=FALSE]
+  
+  result <- apply(mom, 2, sum)
+  return(result)
+         
+
+} ######### end of function $suffstat
+)     ######### end of list
+
+class(pairsat.family) <- "isf"
diff --git a/R/pairwise.R b/R/pairwise.R
new file mode 100755
index 0000000..d20c652
--- /dev/null
+++ b/R/pairwise.R
@@ -0,0 +1,78 @@
+#
+#
+#    pairwise.S
+#
+#    $Revision: 1.10 $	$Date: 2015/10/21 09:06:57 $
+#
+#    Pairwise()    create a user-defined pairwise interaction process
+#                 [an object of class 'interact']
+#	
+# -------------------------------------------------------------------
+#	
+
+Pairwise <- function(pot, name = "user-defined pairwise interaction process",
+                     par = NULL, parnames=NULL,
+                     printfun) {
+
+  fop <- names(formals(pot))
+  if(!identical(all.equal(fop, c("d", "par")), TRUE)
+     && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE))
+    stop(paste("Formal arguments of pair potential function",
+               sQuote("pot"),
+               "must be either (d, par) or (d, tx, tu, par)"))
+
+  if(!is.null(parnames)) {
+    stopifnot(is.character(parnames))
+    if(is.null(par) || length(par) != length(parnames))
+      stop("par does not match parnames")
+  }
+  if(missing(printfun))
+    printfun <- function(self) {
+           cat("Potential function:\n")
+           print(self$pot)
+           if(!is.null(parnames <- self$parnames)) {
+             for(i in 1:length(parnames)) {
+               cat(paste(parnames[i], ":\t"))
+               pari <- self$par[[i]]
+               if(is.numeric(pari) && length(pari) == 1)
+                 cat(pari, "\n")
+               else 
+                 print(pari)
+             }
+           }
+         }
+
+  out <- 
+  list(
+         name     = name,
+         creator  = "Pairwise",
+         family   = pairwise.family,
+         pot      = pot,
+         par      = par,
+         parnames = parnames,
+         init     = NULL,
+         update   = function(self, ...){
+           do.call(Pairwise,
+                   resolve.defaults(list(...),
+                                    list(pot=self$pot, name=self$name,
+                                         par=self$par, parnames=self$parnames,
+                                         printfun=self$print)))
+         } , 
+         print    = printfun,
+         version  = versionstring.spatstat()
+  )
+  class(out) <- "interact"
+  return(out)
+}
+
+Pairwise <- intermaker(Pairwise,
+                       list(creator="Pairwise",
+                            name="user-defined pairwise interaction process",
+                            par=formals(Pairwise),
+                            parnames=list("the potential",
+                                "the name of the interaction",
+                                "the list of parameters",
+                                "a description of each parameter",
+                                "an optional print function")))
+
+
diff --git a/R/pairwise.family.R b/R/pairwise.family.R
new file mode 100755
index 0000000..fbe070a
--- /dev/null
+++ b/R/pairwise.family.R
@@ -0,0 +1,480 @@
+#
+#
+#    pairwise.family.S
+#
+#    $Revision: 1.64 $	$Date: 2016/07/15 10:22:11 $
+#
+#    The pairwise interaction family of point process models
+#
+#    pairwise.family:      object of class 'isf' defining pairwise interaction
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+pairwise.family <-
+  list(
+       name  = "pairwise",
+       print = function(self) {
+         cat("Pairwise interaction family\n")
+       },
+       plot = function(fint, ..., d=NULL, plotit=TRUE) {
+         verifyclass(fint, "fii")
+         inter <- fint$interaction
+         unitz <- unitname(fint)
+         if(is.null(inter) || is.null(inter$family)
+            || inter$family$name != "pairwise")
+           stop("Tried to plot the wrong kind of interaction")
+         # get fitted coefficients of interaction terms
+         # and set coefficients of offset terms to 1
+         Vnames <- fint$Vnames
+         IsOffset <- fint$IsOffset
+         coeff <- rep.int(1, length(Vnames))
+         names(coeff) <- Vnames
+         coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]]
+         # 
+         pairpot <- inter$pot
+         potpars <- inter$par
+         rmax <- reach(fint, epsilon=1e-3)
+         xlim <- list(...)$xlim
+         if(is.infinite(rmax)) {
+           if(!is.null(xlim))
+             rmax <- max(xlim)
+           else {
+             warning("Reach of interaction is infinite; need xlim to plot it")
+             return(invisible(NULL))
+           }
+         }
+         if(is.null(d)) {
+           dmax <- 1.25 * rmax
+           d <- seq(from=0, to=dmax, length.out=1024)
+         } else {
+           stopifnot(is.numeric(d) &&
+                     all(is.finite(d)) &&
+                     all(diff(d) > 0))
+           dmax <- max(d)
+         }
+         if(is.null(xlim))
+           xlim <- c(0, dmax)
+         types <- potpars$types
+         if(is.null(types)) {
+           # compute potential function as `fv' object
+           dd <- matrix(d, ncol=1)
+           p <- pairpot(dd, potpars)
+           if(length(dim(p))==2)
+             p <- array(p, dim=c(dim(p),1), dimnames=NULL)
+           if(dim(p)[3] != length(coeff))
+             stop("Dimensions of potential do not match coefficient vector")
+           for(k in seq_len(dim(p)[3])) 
+             p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] )
+           y <- exp(apply(p, c(1,2), sum))
+           ylim <- range(0, 1.1, y, finite=TRUE)
+           fun <- fv(data.frame(r=d, h=y, one=1),
+                     "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r,
+                     xlim, c("r", "h(r)", "1"),
+                     c("distance argument r",
+                       "pairwise interaction term h(r)",
+                       "reference value 1"),
+                     unitname=unitz)
+           if(plotit)
+             do.call(plot.fv,
+                     resolve.defaults(list(fun),
+                                      list(...),
+                                      list(ylim=ylim)))
+           return(invisible(fun))
+         } else{
+           # compute each potential and store in `fasp' object
+           if(!is.factor(types))
+             types <- factor(types, levels=types)
+           m <- length(types)
+           nd <- length(d)
+           dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m)
+           tx <- rep.int(types, rep.int(nd, m))
+           ty <- types
+           p <- pairpot(dd, tx, ty, potpars)
+           if(length(dim(p))==2)
+             p <- array(p, dim=c(dim(p),1), dimnames=NULL)
+           if(dim(p)[3] != length(coeff))
+             stop("Dimensions of potential do not match coefficient vector")
+           for(k in seq_len(dim(p)[3]))
+             p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] )
+           y <- exp(apply(p, c(1,2), sum))
+           ylim <- range(0, 1.1, y, finite=TRUE)
+           fns <- vector(m^2, mode="list")
+           which <- matrix(, m, m)
+           for(i in seq_len(m)) {
+             for(j in seq_len(m)) {
+               # relevant position in matrix
+               ijpos <- i + (j-1) * m
+               which[i,j] <- ijpos
+               # extract values of potential
+               yy <- y[tx == types[i], j]
+               # make fv object
+               fns[[ijpos]] <-
+                   fv(data.frame(r=d, h=yy, one=1),
+                      "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r,
+                      xlim, c("r", "h(r)", "1"),
+                      c("distance argument r",
+                        "pairwise interaction term h(r)",
+                        "reference value 1"),
+                      unitname=unitz)
+               #
+             }
+           }
+           funz <- fasp(fns, which=which,
+                        formulae=list(cbind(h, one) ~ r),
+                        title="Fitted pairwise interactions",
+                        rowNames=paste(types), colNames=paste(types))
+           if(plotit)
+             do.call(plot.fasp,
+                     resolve.defaults(list(funz),
+                                      list(...),
+                                      list(ylim=ylim)))
+           return(invisible(funz))
+         }
+       },
+       # end of function `plot'
+       # ----------------------------------------------------
+       eval  = function(X,U,EqualPairs,pairpot,potpars,correction,
+           ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE,
+           pot.only=FALSE) {
+  #
+  # This is the eval function for the `pairwise' family.
+  # 
+  # This internal function is not meant to be called by the user.
+  # It is called by mpl.prepare() during execution of ppm().
+  #         
+  # The eval functions perform all the manipulations that are common to
+  # a given class of interactions. 
+  #
+  # For the `pairwise' family of pairwise-interaction processes,
+  # this eval function computes the distances between points,
+  # invokes 'pairpot' to evaluate the potential between each pair of points,
+  # applies edge corrections, and then sums the pair potential terms.
+  #
+  # ARGUMENTS:
+  #   All 'eval' functions have the following arguments 
+  #   which are called in sequence (without formal names)
+  #   by mpl.prepare():
+  #       
+  #   X           data point pattern                      'ppp' object
+  #   U           points at which to evaluate potential   list(x,y) suffices
+  #   EqualPairs  two-column matrix of indices i, j such that X[i] == U[j]
+  #               (or NULL, meaning all comparisons are FALSE)
+  #   pot         potential function 
+  #   potpars     auxiliary parameters for pot            list(......)
+  #   correction  edge correction type                    (string)
+  #
+  # VALUE:
+  #    All `eval' functions must return a        
+  #    matrix of values of the total potential
+  #    induced by the pattern X at each location given in U.
+  #    The rows of this matrix correspond to the rows of U (the sample points);
+  #    the k columns are the coordinates of the k-dimensional potential.
+  #
+  ##########################################################################
+
+  # POTENTIAL:
+  #
+  # The pair potential function 'pairpot' should be either
+  #    pairpot(d, par)            [for potentials that don't depend on marks]
+  # or
+  #    pairpot(d, tx, tu, par)    [for potentials that do depend on mark]
+  # where d is a matrix of interpoint distances,
+  # tx is the vector of types for the data points,
+  # tu is the vector of types for all quadrature points          
+  # and
+  #  par is a list of parameters for the potential.
+  #         
+  # It must return a matrix with the same dimensions as d
+  # or an array with its first two dimensions the same as the dimensions of d.
+
+fop <- names(formals(pairpot))
+if(identical(all.equal(fop, c("d", "par")), TRUE))
+  marx <- FALSE
+else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE))
+  marx <- TRUE
+else 
+  stop("Formal arguments of pair potential function are not understood")
+
+## edge correction argument
+
+if(length(correction) > 1)
+  stop("Only one edge correction allowed at a time!")
+
+if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none")))
+  stop(paste("Unrecognised edge correction", sQuote(correction)))
+
+ no.correction <- 
+
+#### Compute basic data
+
+   # Decide whether to apply faster algorithm using 'closepairs'
+   use.closepairs <-
+     (correction %in% c("none", "border", "translate", "translation")) &&
+     !is.null(Reach) && is.finite(Reach) &&
+     is.null(precomputed) && !savecomputed 
+
+if(!is.null(precomputed)) {
+  # precomputed
+  X <- precomputed$X
+  U <- precomputed$U
+  EqualPairs <- precomputed$E
+  M <- precomputed$M
+} else {
+  U <- as.ppp(U, X$window)   # i.e. X$window is DEFAULT window
+  if(!use.closepairs) 
+    # Form the matrix of distances
+    M <- crossdist(X, U, periodic=(correction=="periodic"))
+}
+
+nX <- npoints(X)
+nU <- npoints(U)
+dimM <- c(nX, nU)
+
+# Evaluate the pairwise potential without edge correction
+
+if(use.closepairs)
+  POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach)
+else if(!marx) 
+  POT <- pairpot(M, potpars)
+else
+  POT <- pairpot(M, marks(X), marks(U), potpars)
+
+# Determine whether each column of potential is an offset
+
+  IsOffset <- attr(POT, "IsOffset")
+
+# Check errors and special cases
+
+if(!is.matrix(POT) && !is.array(POT)) {
+  if(length(POT) == 0 && X$n ==  0) # empty pattern
+    POT <- array(POT, dim=c(dimM,1))
+  else
+    stop("Pair potential did not return a matrix or array")
+}
+
+if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) {
+        whinge <- paste0(
+           "The pair potential function ",short.deparse(substitute(pairpot)),
+           " must produce a matrix or array with its first two dimensions\n",
+           "the same as the dimensions of its input.\n")
+	stop(whinge)
+}
+
+# make it a 3D array
+if(length(dim(POT))==2)
+        POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL)
+                          
+if(correction == "translate" || correction == "translation") {
+        edgewt <- edge.Trans(X, U)
+        # sanity check ("everybody knows there ain't no...")
+        if(!is.matrix(edgewt))
+          stop("internal error: edge.Trans() did not yield a matrix")
+        if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x))
+          stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions")
+        POT <- c(edgewt) * POT
+} else if(correction == "isotropic" || correction == "Ripley") {
+        # weights are required for contributions from QUADRATURE points
+        edgewt <- t(edge.Ripley(U, t(M), X$window))
+        if(!is.matrix(edgewt))
+          stop("internal error: edge.Ripley() did not return a matrix")
+        if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x))
+          stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions")
+        POT <- c(edgewt) * POT
+}
+
+# No pair potential term between a point and itself
+if(length(EqualPairs) > 0) {
+  nplanes <- dim(POT)[3]
+  for(k in 1:nplanes)
+    POT[cbind(EqualPairs, k)] <- 0
+}
+
+# Return just the pair potential?
+if(pot.only)
+  return(POT)
+
+# Sum the pairwise potentials 
+
+V <- apply(POT, c(2,3), sum)
+
+# attach the original pair potentials
+attr(V, "POT") <- POT
+
+# attach the offset identifier
+attr(V, "IsOffset") <- IsOffset
+
+# pass computed information out the back door
+if(savecomputed)
+  attr(V, "computed") <- list(E=EqualPairs, M=M)
+return(V)
+
+},
+######### end of function $eval
+       suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") {
+# for pairwise models only  (possibly nonstationary)
+  verifyclass(model, "ppm")
+  if(!identical(model$interaction$family$name,"pairwise"))
+    stop("Model is not a pairwise interaction process")
+
+  if(is.null(X)) {
+    X <- data.ppm(model)
+    modelX <- model
+  } else {
+    verifyclass(X, "ppp")
+    modelX <- update(model, X, method="mpl")
+  }
+
+  # find data points which do not contribute to pseudolikelihood
+  mplsubset <- getglmdata(modelX)$.mpl.SUBSET
+  mpldata   <- is.data(quad.ppm(modelX))
+  contribute <- mplsubset[mpldata]
+
+  Xin  <- X[contribute]
+  Xout <- X[!contribute]
+  
+  # partial model matrix arising from ordered pairs of data points
+  # which both contribute to the pseudolikelihood
+  Empty <- X[numeric(0)]
+  momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat")
+
+  # partial model matrix arising from ordered pairs of data points
+  # the second of which does not contribute to the pseudolikelihood
+  mom <- partialModelMatrix(Xout, Xin, model, "suffstat")
+  indx <- Xout$n + seq_len(Xin$n)
+  momINxOUT <- mom[indx, , drop=FALSE]
+
+  # parameters
+  order2  <- names(coef(model)) %in% model$internal$Vnames
+  order1  <- !order2
+
+  result <- 0 * coef(model)
+  
+  if(any(order1)) {
+    # first order contributions can be determined from INxIN
+    o1terms  <- momINxIN[ , order1, drop=FALSE]
+    o1sum   <- colSums(o1terms)
+    result[order1] <- o1sum
+  }
+  if(any(order2)) {
+    # adjust for double counting of ordered pairs in INxIN but not INxOUT
+    o2termsINxIN  <- momINxIN[, order2, drop=FALSE]
+    o2termsINxOUT <- momINxOUT[, order2, drop=FALSE]
+    o2sum   <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT)
+    result[order2] <- o2sum
+  }
+
+  return(result)
+  },
+######### end of function $suffstat
+  delta2 = function(X, inte, correction, ...) {
+  # Sufficient statistic for second order conditional intensity
+  # for pairwise interaction processes
+  # Equivalent to evaluating pair potential.
+    X <- as.ppp(X)
+    seqX <- seq_len(npoints(X))
+    E <- cbind(seqX, seqX)
+    R <- reach(inte)
+    result <- pairwise.family$eval(X,X,E,
+                                 inte$pot,inte$par,
+                                 correction,
+                                 pot.only=TRUE,
+                                 Reach=R)
+  }
+######### end of function $delta2
+)
+######### end of list
+
+class(pairwise.family) <- "isf"
+
+
+# externally visible
+
+evalPairPotential <- function(X, P, E, pairpot, potpars, R) {
+  # Evaluate pair potential without edge correction weights
+  nX <- npoints(X)
+  nP <- npoints(P)
+  stopifnot(is.function(pairpot))
+  fop <- names(formals(pairpot))
+  if(identical(all.equal(fop, c("d", "par")), TRUE)) {
+    unmarked <- TRUE
+  } else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) {
+    unmarked <- FALSE
+  } else 
+  stop("Formal arguments of pair potential function are not understood")
+  # determine dimension of potential, etc
+  fakePOT <- if(unmarked) pairpot(matrix(, 0, 0), potpars) else 
+                          pairpot(matrix(, 0, 0),
+                                  marks(X)[integer(0)],
+                                  marks(P)[integer(0)],
+                                  potpars)
+  IsOffset <- attr(fakePOT, "IsOffset")
+  fakePOT <- ensure3Darray(fakePOT)
+  Vnames <- dimnames(fakePOT)[[3]]
+  p <- dim(fakePOT)[3]
+  # Identify close pairs X[i], P[j]
+  cl <- crosspairs(X, P, R, what="ijd")
+  I <- cl$i
+  J <- cl$j
+  D <- matrix(cl$d, ncol=1)
+  # deal with empty cases
+  if(nX == 0 || nP == 0 || length(I) == 0) {
+    result <- array(0, dim=c(nX, nP, p), dimnames=list(NULL, NULL, Vnames))
+    attr(result, "IsOffset") <- IsOffset
+    return(result)
+  }
+  # evaluate potential for close pairs
+  # POT is a 1-column matrix or array, with rows corresponding to close pairs
+  if(unmarked) {
+    # unmarked
+    POT <- pairpot(D, potpars)
+    IsOffset <- attr(POT, "IsOffset")
+  } else {
+    # marked
+    marX <- marks(X)
+    marP <- marks(P)
+    if(!identical(levels(marX), levels(marP)))
+      stop("Internal error: marks of X and P have different levels")
+    types <- levels(marX)
+    mI <- marX[I]
+    mJ <- marP[J]
+    POT <- NULL
+    # split data by type of P[j]
+    for(k in types) {
+      relevant <- which(mJ == k)
+      if(length(relevant) > 0) {
+        fk <- factor(k, levels=types)
+        POTk <- pairpot(D[relevant,  , drop=FALSE], mI[relevant], fk, potpars)
+        POTk <- ensure3Darray(POTk)
+        if(is.null(POT)) {
+          # use first result of 'pairpot' to determine dimension
+          POT <- array(, dim=c(length(I), 1, dim(POTk)[3]))
+          # capture information about offsets, and names of interaction terms
+          IsOffset <- attr(POTk, "IsOffset")
+          Vnames <- dimnames(POTk)[[3]]
+        }
+        # insert values just computed
+        POT[relevant, , ] <- POTk
+      }
+    }
+  }
+  POT <- ensure3Darray(POT)
+  p <- dim(POT)[3]
+  # create result array
+  result <- array(0, dim=c(npoints(X), npoints(P), p),
+                  dimnames=list(NULL, NULL, Vnames))
+  # insert results
+  II <- rep(I, p)
+  JJ <- rep(J, p)
+  KK <- rep(1:p, each=length(I))
+  result[cbind(II,JJ,KK)] <- POT
+  # finally identify identical pairs and set value to 0
+  if(length(E) > 0) {
+    E.rep <- apply(E, 2, rep, times=p)
+    p.rep <- rep(1:p, each=nrow(E))
+    result[cbind(E.rep, p.rep)] <- 0
+  }
+  attr(result, "IsOffset") <- IsOffset
+  return(result)
+}
diff --git a/R/parameters.R b/R/parameters.R
new file mode 100644
index 0000000..96b6805
--- /dev/null
+++ b/R/parameters.R
@@ -0,0 +1,30 @@
+##
+##    parameters.R
+##
+##   $Revision: 1.2 $ $Date: 2015/05/08 04:27:15 $
+##
+
+parameters <- function(model, ...) {
+  UseMethod("parameters")
+}
+
+parameters.ppm <- function(model, ...) {
+  ss <- summary(model, quick="no variances")
+  out <- c(list(trend=ss$trend$value),
+           ss$covfunargs,
+           ss$interaction$interaction$par,
+           ss$interaction$sensible$param)
+  return(out)
+}
+
+parameters.kppm <- function(model, ...) {
+  ss <- summary(model, quick="no variances")
+  out <- c(list(trend=ss$trend$trend$value),
+           ss$covfunargs,
+           ss$clustpar,
+           ss$clustargs,
+           list(mu=ss$mu))
+  return(out)
+}
+
+
diff --git a/R/parres.R b/R/parres.R
new file mode 100755
index 0000000..288ed18
--- /dev/null
+++ b/R/parres.R
@@ -0,0 +1,592 @@
+#
+# parres.R
+#
+# code to plot transformation diagnostic
+#
+#   $Revision: 1.9 $  $Date: 2016/12/30 01:44:07 $
+#
+
+parres <- function(model, covariate, ...,
+                   smooth.effect=FALSE, subregion=NULL,
+                   bw="nrd0", adjust=1, from=NULL,to=NULL, n=512,
+                   bw.input = c("points", "quad"),
+                   bw.restrict = FALSE,
+                   covname) {  
+
+  modelname <- deparse(substitute(model))
+  if(missing(covname)) 
+    covname <- sensiblevarname(deparse(substitute(covariate)), "X")
+  callstring <- paste(deparse(sys.call()), collapse = "")
+
+  if(is.marked(model))
+    stop("Sorry, this is not yet implemented for marked models")
+      
+  if(!is.null(subregion)) 
+    stopifnot(is.owin(subregion))
+  
+  if(is.null(adjust)) adjust <- 1
+
+  bw.input <- match.arg(bw.input)
+  
+  # validate model
+  stopifnot(is.ppm(model))
+  modelcall <- model$callstring
+  if(is.null(modelcall))
+    modelcall <- model$call
+  if(is.null(getglmfit(model)))
+    model <- update(model, forcefit=TRUE)
+  
+  # extract spatial locations
+  Q <- quad.ppm(model)
+#  datapoints <- Q$data
+  quadpoints <- union.quad(Q)
+  Z <- is.data(Q)
+  wts <- w.quad(Q)
+  nQ <- npoints(quadpoints)
+  # fitted intensity
+  lam <- fitted(model, type="trend")
+  # subset of quadrature points used to fit model
+  subQset <- getglmsubset(model)
+  if(is.null(subQset)) subQset <- rep.int(TRUE, nQ)
+  # restriction to subregion
+  insubregion <- if(!is.null(subregion)) {
+    inside.owin(quadpoints, w=subregion)
+  } else rep.int(TRUE, nQ)
+
+  ################################################################
+  # Inverse lambda residuals
+
+  rx <- residuals(model, type="inverse")
+  resid <- with(rx, "increment")
+
+  #################################################################
+  # identify the covariate
+  #
+  if(length(covariate) == 0)
+    stop("No covariate specified")
+
+  covtype <- "unknown"
+
+  if(!is.character(covariate)) {
+    # Covariate is some kind of data, treated as external covariate
+    covtype <- "external"
+    beta <- 0
+    covvalues <- evalCovariate(covariate, quadpoints)
+  } else {
+    # Argument is name of covariate
+    covname <- covariate
+    if(length(covname) > 1)
+      stop("Must specify only one covariate")
+    # 'original covariates'
+    orig.covars <- variablesinformula(formula(model))
+    # 'canonical covariates'
+    canon.covars <- names(coef(model))
+    # offsets
+    offset.covars <- offsetsinformula(formula(model))
+    # 
+    if(covname %in% orig.covars) {
+      # one of the original covariates
+      covtype <- "original"
+      covvalues <- evalCovariate(findCovariate(covname, model), quadpoints)
+    } else if(covname %in% canon.covars) {
+      # one of the canonical covariates
+      covtype <- "canonical"
+      mm <- model.matrix(model)
+      covvalues <- mm[, covname]
+      ## extract the corresponding coefficient
+      beta <- coef(model)[[covname]]
+    } else if(covname %in% offset.covars) {
+      # an offset term only
+      covtype <- "offset"
+      mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q)))
+      if(!(covname %in% colnames(mf)))
+        stop(paste("Internal error: offset term", covname,
+                   "not found in model frame"))
+      covvalues <- mf[, covname]
+      ## fixed coefficient (not an estimated parameter)
+      beta <- 1
+    } else{
+      # must be an external covariate (i.e. not used in fitted model)
+      covtype <- "external"
+      beta <- 0
+      covvalues <- evalCovariate(findCovariate(covname, model), quadpoints)
+    }
+  }
+  # validate covvalues
+  #
+  if(is.null(covvalues))
+    stop("Unable to extract covariate values")
+  if(length(covvalues) != npoints(quadpoints))
+    stop(paste("Internal error: number of covariate values =",
+               length(covvalues), "!=", npoints(quadpoints),
+               "= number of quadrature points"))
+  vtype <- typeof(covvalues)
+  switch(vtype,
+         real=,
+         double = { },
+         integer = {
+           warning("Covariate is integer-valued")
+         },
+         stop(paste("Cannot handle covariate of type", sQuote(vtype))))
+  
+  #################################################################
+  # Compute covariate effect
+
+  if(covtype != "original") {
+    effect <- beta * covvalues
+    mediator <- covtype
+    effectfundata <- list(beta=beta)
+    effectFun <- function(x) { (effectfundata$beta) * x }
+    isoffset <- (covtype == "offset")
+    names(isoffset) <- covname
+  } else {
+    ## `original' covariate (passed as argument to ppm)
+    ## may determine one or more canonical covariates and/or offsets
+    origcovdf <- getppmOriginalCovariates(model)[insubregion, , drop=FALSE]
+    isconstant <- lapply(origcovdf,
+                         function(z) { length(unique(z)) == 1 })
+    ##
+    ## Initialise
+    termnames <- character(0)
+    termbetas <- numeric(0)
+    isoffset <- logical(0)
+    mediator <- character(0)
+    effect <- 0
+    effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) }
+    effectFun.can <- effectFun.off <- function(x) { 0 * x }
+    ## Identify relevant canonical covariates
+    dmat <- model.depends(model)
+    if(!(covname %in% colnames(dmat)))
+      stop("Internal error: cannot match covariate names")
+    othercov <- (colnames(dmat) != covname)
+    relevant <- dmat[, covname]
+    if(any(relevant)) {
+      # original covariate determines one or more canonical covariates
+      mediator <- "canonical"
+      # check whether covariate is separable
+      if(any(conflict <- dmat[relevant, othercov, drop=FALSE])) {
+        ## identify entangled covariates
+        entangled <- colnames(conflict)[matcolany(conflict)]
+        ## not problematic if constant
+        ok <- unlist(isconstant[entangled])
+        conflict[ , ok] <- FALSE
+        ## re-test
+        if(any(conflict)) {
+          conflictterms <- matrowany(conflict)
+          conflictcovs  <- matcolany(conflict)
+          stop(paste("The covariate", sQuote(covname),
+                     "cannot be separated from the",
+                     ngettext(sum(conflictcovs), "covariate", "covariates"),
+                     commasep(sQuote(colnames(conflict)[conflictcovs])),
+                     "in the model",
+                     ngettext(sum(conflictterms), "term", "terms"),
+                     commasep(sQuote(rownames(conflict)[conflictterms]))
+                     ))
+        }
+      }
+      # 
+      termnames <- rownames(dmat)[relevant]
+      isoffset <- rep.int(FALSE, length(termnames))
+      names(isoffset) <- termnames
+      # Extract relevant canonical covariates
+      mm <-  model.matrix(model)
+      termvalues <- mm[, relevant, drop=FALSE]
+      # extract corresponding coefficients
+      termbetas <- coef(model)[relevant]
+      # evaluate model effect
+      effect <- as.numeric(termvalues %*% termbetas)
+      # check length
+      if(length(effect) != npoints(quadpoints))
+        stop(paste("Internal error: number of values of fitted effect =",
+                   length(effect), "!=", npoints(quadpoints),
+                   "= number of quadrature points"))
+      # Trap loglinear case
+      if(length(termnames) == 1 && identical(termnames, covname)) {
+        covtype <- "canonical"
+        beta <- termbetas
+      }
+      # construct the corresponding function
+      gd <- getglmdata(model)
+      goodrow <- min(which(complete.cases(gd)))
+      defaultdata <- gd[goodrow, , drop=FALSE]
+      effectfundata.can <- list(covname=covname,
+                            fmla = formula(model),
+                            termbetas = termbetas,
+                            defaultdata = defaultdata,
+                            relevant = relevant,
+                            termnames = termnames)
+      effectFun.can <- function(x) {
+        d <- effectfundata.can
+        # replicate default data to correct length
+        df <- as.data.frame(lapply(d$defaultdata, rep, length(x)))
+        # overwrite value of covariate with new data
+        df[,covname] <- x
+        # construct model matrix 
+        m <- model.matrix(d$fmla, df)
+        # check it conforms to expected structure
+        if(!identical(colnames(m)[d$relevant], d$termnames))
+          stop("Internal error: mismatch in term names in effectFun")
+        me <- m[, d$relevant, drop=FALSE]
+        y <- me %*% as.matrix(d$termbetas, ncol=1) 
+        return(y)
+      }
+    }
+    if(!is.null(offmat <- attr(dmat, "offset")) &&
+       any(relevant <- offmat[, covname])) {
+      # covariate appears in a model offset term
+      mediator <- c(mediator, "offset")
+      # check whether covariate is separable
+      if(any(conflict<- offmat[relevant, othercov, drop=FALSE])) {
+        ## identify entangled covariates
+        entangled <- colnames(conflict)[matcolany(conflict)]
+        ## not problematic if constant
+        ok <- unlist(isconstant[entangled])
+        conflict[ , ok] <- FALSE
+        ## re-test
+        if(any(conflict)) {
+          conflictterms <- matrowany(conflict)
+          conflictcovs  <- matcolany(conflict)
+          stop(paste("The covariate", sQuote(covname),
+                     "cannot be separated from the",
+                     ngettext(sum(conflictcovs), "covariate", "covariates"),
+                     commasep(sQuote(colnames(conflict)[conflictcovs])),
+                     "in the model",
+                     ngettext(sum(conflictterms), "term", "terms"),
+                     commasep(sQuote(rownames(conflict)[conflictterms]))
+                     ))
+        }
+      }
+      # collect information about relevant offset 
+      offnames <- rownames(offmat)[relevant]
+      termnames <- c(termnames, offnames)
+      noff <- length(offnames)
+      termbetas <- c(termbetas, rep.int(1, noff))
+      isoffset  <- c(isoffset, rep.int(TRUE, noff))
+      names(termbetas) <- names(isoffset) <- termnames
+      # extract values of relevant offset 
+      mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q)))
+      if(any(nbg <- !(offnames %in% colnames(mf))))
+        stop(paste("Internal error:",
+                   ngettext(sum(nbg), "offset term", "offset terms"),
+                   offnames[nbg],
+                   "not found in model frame"))
+      effex <- mf[, offnames, drop=FALSE]
+      effect <- effect + rowSums(effex)
+      #
+      # construct the corresponding function
+      gd <- getglmdata(model)
+      goodrow <- min(which(complete.cases(gd)))
+      defaultdata <- gd[goodrow, , drop=FALSE]
+      effectfundata.off <- list(covname=covname,
+                                fmla = formula(model),
+                                defaultdata = defaultdata,
+                                offnames = offnames)
+      effectFun.off <- function(x) {
+        d <- effectfundata.off
+        # replicate default data to correct length
+        df <- as.data.frame(lapply(d$defaultdata, rep, length(x)))
+        # overwrite value of covariate with new data
+        df[,covname] <- x
+        # construct model FRAME
+        mf <- model.frame(d$fmla, df)
+        # check it conforms to expected structure
+        if(!all(d$offnames %in% colnames(mf))) 
+          stop("Internal error: mismatch in term names in effectFun")
+        moff <- mf[, d$offnames, drop=FALSE]
+        y <- rowSums(moff)
+        return(y)
+      }
+    }
+    if(length(termnames) == 0) {
+      # Sanity clause
+      # (everyone knows there ain't no Sanity Clause...)
+      warning(paste("Internal error: could not find any",
+                    "canonical covariates or offset terms",
+                    "that depended on the covariate", sQuote(covname)))
+      # Assume it's an external covariate (i.e. not used in fitted model)
+      covtype <- "external"
+      beta <- 0
+      effect <- beta * covvalues
+      effectFun <- function(x) { 0 * x }
+      isoffset <- FALSE
+      names(isoffset) <- covname
+    }
+  }
+
+  #### Canonical covariates and coefficients
+  switch(covtype,
+         original={
+           cancovs <- termnames
+           canbeta <- termbetas
+         },
+         offset = ,
+         canonical={
+           cancovs <- covname
+           canbeta <- beta
+         },
+         external={
+           cancovs <- canbeta <- NA
+         })
+  
+  #################################################################
+  # Validate covariate values
+
+  # locations that must have finite values 
+  operative <- if(bw.restrict) insubregion & subQset else subQset
+
+  nbg.cov <- !is.finite(covvalues)
+  if(any(offending <- nbg.cov & operative)) {
+    warning(paste(sum(offending), "out of", length(offending),
+                  "covariate values discarded because",
+                  ngettext(sum(offending), "it is", "they are"),
+                  "NA or infinite"))
+  }
+
+  nbg.eff <- !is.finite(effect)
+  if(any(offending <- nbg.eff & operative)) {
+    warning(paste(sum(offending), "out of", length(offending),
+                  "values of fitted effect discarded because",
+                  ngettext(sum(offending), "it is", "they are"),
+                  "NA or infinite"))
+  }
+  
+  #################################################################
+  # Restrict data to 'operative' points
+  #                            with finite values
+  
+  nbg <- nbg.cov | nbg.eff
+  ok <- !nbg & operative
+  
+  Q           <- Q[ok]
+  covvalues   <- covvalues[ok]
+  quadpoints  <- quadpoints[ok]
+  resid       <- resid[ok]
+  lam         <- lam[ok]
+  effect      <- effect[ok]
+  insubregion <- insubregion[ok]
+  Z           <- Z[ok]
+  wts         <- wts[ok]
+
+  ####################################################
+  # assemble data for smoothing 
+  x <- covvalues
+  y <- resid/wts
+  if(smooth.effect) y <- y + effect 
+  w <- wts
+  #
+  if(makefrom <- is.null(from))
+    from <- min(x)
+  if(maketo <- is.null(to))
+    to   <- max(x)
+
+  ####################################################
+  # determine smoothing bandwidth
+  #     from 'operative' data
+
+  switch(bw.input,
+         quad = {
+           # bandwidth selection from covariate values at all quadrature points
+           numer <- unnormdensity(x, weights=w*y,
+                                  bw=bw, adjust=adjust,
+                                  n=n,from=from,to=to, ...)
+           sigma <- numer$bw
+         },
+         points= {
+           # bandwidth selection from covariate values at data points
+           fake <- unnormdensity(x[Z], weights=1/lam[Z],
+                                 bw=bw, adjust=adjust,
+                                 n=n,from=from,to=to, ...)
+           sigma <- fake$bw
+           numer <- unnormdensity(x, weights=w*y,
+                                  bw=sigma, adjust=1,
+                                  n=n,from=from,to=to, ...)
+         })
+
+
+  ####################################################
+  # Restrict data and recompute numerator if required
+
+  if(!is.null(subregion) && !bw.restrict) {
+    # Bandwidth was computed on all data
+    # Restrict to subregion and recompute numerator
+    x   <- x[insubregion]
+    y   <- y[insubregion]
+    w   <- w[insubregion]
+    Z   <- Z[insubregion]
+    lam <- lam[insubregion]
+    if(makefrom) from <- min(x)
+    if(maketo)     to <- max(x)
+    numer <- unnormdensity(x, weights=w*y,
+                           bw=sigma, adjust=1,
+                           n=n,from=from,to=to, ...)
+  }
+
+  ####################################################
+  # Compute denominator
+
+  denom <- unnormdensity(x, weights=w,
+                         bw=sigma, adjust=1,
+                         n=n,from=from,to=to, ...)
+
+  
+  ####################################################
+  # Determine recommended plot range
+
+  xr <- range(as.vector(x[Z]), finite=TRUE)
+  alim <- xr + 0.1 * diff(xr) * c(-1,1)
+  alim <- intersect.ranges(alim, c(from, to))
+  
+  ####################################################
+  # Compute terms 
+
+  interpolate <- function(x,y) {
+    if(inherits(x, "density") && missing(y))
+      approxfun(x$x, x$y, rule=2)
+    else 
+      approxfun(x, y, rule=2)
+  }
+  numfun <- interpolate(numer)
+  denfun <- interpolate(denom)
+  xxx <- numer$x
+  yyy <- numfun(xxx)/denfun(xxx)
+  # variance estimation
+  # smooth 1/lambda(u) with smaller bandwidth
+  tau   <- sigma/sqrt(2)
+  varnumer <- unnormdensity(x, weights=w/lam,
+                            bw=tau, adjust=1,
+                            n=n,from=from,to=to, ...)
+  varnumfun <- interpolate(varnumer)
+  varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2)
+  sd <- sqrt(varestxxx)
+  # alternative estimate of variance using data points only
+  varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2,
+                             bw=tau, adjust=1,
+                             n=n,from=from,to=to, ...)
+  varXnumfun <- interpolate(varXnumer)
+  varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2)
+  sdX <- sqrt(varXestxxx)
+  # fitted effect
+  effxxx <- effectFun(xxx)
+  
+  # add fitted effect of covariate, if not added before smoothing
+  if(!smooth.effect)
+    yyy <- yyy + effxxx
+  
+  ####################################################
+  # pack into fv object
+  
+  df <- data.frame(xxx=xxx,
+                   h  =yyy,
+                   varh=varestxxx,
+                   hi=yyy+2*sd,
+                   lo=yyy-2*sd,
+                   hiX=yyy+2*sdX,
+                   loX=yyy-2*sdX,
+                   fit=effxxx)
+  # remove any funny characters in name of covariate (e.g. if it is an offset)
+  Covname <- make.names(covname)
+  names(df)[1] <- Covname
+  desc <- c(paste("covariate", sQuote(covname)),
+            "Smoothed partial residual",
+            "Variance",
+            "Upper limit of pointwise 5%% significance band (integral)",
+            "Lower limit of pointwise 5%% significance band (integral)",
+            "Upper limit of pointwise 5%% significance band (sum)",
+            "Lower limit of pointwise 5%% significance band (sum)",
+            paste("Parametric fitted effect of", sQuote(covname)))
+  rslt <- fv(df,
+             argu=Covname,
+             ylab=substitute(h(X), list(X=as.name(covname))),
+             valu="h",
+             fmla= as.formula(paste(". ~ ", Covname)),
+             alim=alim,
+             labl=c(covname,
+               paste("%s", paren(covname), sep=""),
+               paste("var", paren(covname), sep=""),
+               paste("hi", paren(covname), sep=""),
+               paste("lo", paren(covname), sep=""),
+               paste("hiX", paren(covname), sep=""),
+               paste("loX", paren(covname), sep=""),
+               paste("fit", paren(covname), sep="")),
+             desc=desc,
+             fname="h",
+             yexp=as.expression(substitute(hat(h)(X), list(X=covname))))
+  attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit")
+  fvnames(rslt, ".s") <- c("hi", "lo")
+  # add special class data
+  class(rslt) <- c("parres", class(rslt))
+  attr(rslt, "stuff") <- list(covname       = paste(covname, collapse=""),
+                              covtype       = covtype,
+                              mediator      = mediator,
+                              cancovs       = cancovs,
+                              canbeta       = canbeta,
+                              isoffset      = isoffset,
+                              modelname     = modelname,
+                              modelcall     = modelcall,
+                              callstring    = callstring,
+                              sigma         = sigma,
+                              smooth.effect = smooth.effect,
+                              restricted    = !is.null(subregion),
+                              bw.input      = bw.input)
+  return(rslt)
+}
+
+print.parres <- function(x, ...) {
+  cat("Transformation diagnostic (class parres)\n")
+  s <- attr(x, "stuff")
+  cat(paste("for the", s$covtype, "covariate", sQuote(s$covname),
+            if(s$covtype != "external") "in" else "for",
+            "the fitted model",
+            if(nchar(s$modelcall) < 30) "" else "\n\t",
+            s$modelcall, "\n"))
+  switch(s$covtype,
+         original={
+           cancovs <- s$cancovs
+           med <- s$mediator
+           isoffset <- s$isoffset
+           if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs))
+           ncc <- length(cancovs)
+           noff <- sum(isoffset)
+           nother <- sum(!isoffset)
+           explain <-
+             paste(ngettext(ncc, "Fitted effect:", "Fitted effect: sum of"),
+                   if(noff == 0) {
+                     paste(paste(med, collapse=" and "),
+                           ngettext(ncc, "term", "terms"),
+                           commasep(dQuote(cancovs)))
+                   } else {
+                     paste(paste(med[med != "offset"], collapse=" and "),
+                           ngettext(nother, "term", "terms"),
+                           commasep(dQuote(cancovs[!isoffset])),
+                           "and offset",
+                           ngettext(noff, "term", "terms"),
+                           commasep(dQuote(cancovs[isoffset])))
+                   })
+           cat(paste(explain, "\n"))
+         },
+         external={
+           cat("Note: effect estimate not justified by delta method\n")
+         },
+         offset={},
+         canonical={})
+  # earlier versions were equivalent to restricted=FALSE
+  if(identical(s$restricted, TRUE))
+    cat("\t--Diagnostic computed for a subregion--\n")
+  cat(paste("Call:", s$callstring, "\n"))
+  cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n"))
+  # earlier versions were equivalent to smooth.effect=TRUE
+  sme <- !identical(s$smooth.effect, FALSE)
+  if(sme) {
+    cat("Algorithm: smooth(effect + residual)\n\n")
+  } else {
+    cat("Algorithm: effect + smooth(residual)\n\n")
+  }
+  NextMethod("print")
+}
+
+plot.parres <- function(x, ...) {
+  xname <- deparse(substitute(x))
+  do.call(plot.fv, resolve.defaults(list(x), list(...),
+                                      list(main=xname, shade=c("hi", "lo"))))
+}
+
diff --git a/R/pcf.R b/R/pcf.R
new file mode 100755
index 0000000..ac7f890
--- /dev/null
+++ b/R/pcf.R
@@ -0,0 +1,375 @@
+#
+#   pcf.R
+#
+#   $Revision: 1.64 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+#   calculate pair correlation function
+#   from point pattern (pcf.ppp)
+#   or from estimate of K or Kcross (pcf.fv)
+#   or from fasp object
+#
+#
+pcf <- function(X, ...) {
+  UseMethod("pcf")
+}
+
+pcf.ppp <- function(X, ..., r=NULL,
+                    kernel="epanechnikov", bw=NULL, stoyan=0.15,
+                    correction=c("translate", "Ripley"),
+                    divisor=c("r", "d"),
+                    var.approx=FALSE,
+                    domain=NULL, ratio=FALSE,
+                    close=NULL)
+{
+  verifyclass(X, "ppp")
+#  r.override <- !is.null(r)
+
+  win <- Window(X)
+  areaW <- area(win)
+  npts <- npoints(X)
+  lambda <- npts/areaW
+  lambda2area <- areaW * lambda^2
+
+  kernel <- match.kernel(kernel)
+  
+  if(!is.null(domain)) {
+    # estimate based on contributions from a subdomain
+    domain <- as.owin(domain)
+    if(!is.subset.owin(domain, win))
+      stop(paste(dQuote("domain"),
+                 "is not a subset of the window of X"))
+    # trick pcfdot() into doing it
+    indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE))
+    g <- pcfdot(X %mark% indom,
+                i="TRUE",
+                r=r,
+                correction=correction, kernel=kernel, bw=bw, stoyan=stoyan,
+                divisor=divisor,
+                ...)
+    if(!ratio) {
+      ## relabel
+      g <- rebadge.fv(g, quote(g(r)), "g")
+    } else {
+      ## construct ratfv object
+      denom <- sum(indom == "TRUE") * lambda
+      g <- ratfv(as.data.frame(g), NULL, denom,
+                 "r", quote(g(r)),
+                 "theo", NULL, alim,
+                 attr(g, "labl"), attr(g, "desc"), fname="g",
+                 ratio=TRUE)
+    }
+    if(var.approx)
+      warning("var.approx is not implemented when 'domain' is given")
+    return(g)
+  }
+
+  correction.given <- !missing(correction)
+  correction <- pickoption("correction", correction,
+                           c(isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, win$type, correction.given)
+
+  divisor <- match.arg(divisor)
+  
+  # bandwidth
+  if(is.null(bw) && (kernel == "epanechnikov")) {
+    # Stoyan & Stoyan 1995, eq (15.16), page 285
+    h <- stoyan /sqrt(lambda)
+    hmax <- h
+    # conversion to standard deviation
+    bw <- h/sqrt(5)
+  } else if(is.numeric(bw)) {
+    # standard deviation of kernel specified
+    # upper bound on half-width
+    hmax <- 3 * bw
+  } else {
+    # data-dependent bandwidth selection: guess upper bound on half-width
+    hmax <- 2 * stoyan /sqrt(lambda)
+  }
+
+  ########## r values ############################
+  # handle arguments r and breaks 
+
+  rmaxdefault <- rmax.rule("K", win, lambda)        
+  breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault)
+  if(!(breaks$even))
+    stop("r values must be evenly spaced")
+  # extract r values
+  r <- breaks$r
+  rmax <- breaks$max
+  # recommended range of r values for plotting
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  # arguments for 'density'
+  denargs <- resolve.defaults(list(kernel=kernel, bw=bw),
+                              list(...),
+                              list(n=length(r), from=0, to=rmax),
+                              .StripNull = TRUE)
+  
+  #################################################
+  
+  # compute pairwise distances
+  if(npts > 1) {
+    needall <- any(correction %in% c("translate", "isotropic"))
+    if(is.null(close)) {
+      what <- if(needall) "all" else "ijd"
+      close <- closepairs(X, rmax + hmax, what=what)
+    } else {
+      #' check 'close' has correct format
+      needed <- if(!needall) c("i", "j", "d") else
+                 c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d")
+      if(any(is.na(match(needed, names(close)))))
+        stop(paste("Argument", sQuote("close"),
+                   "should have components named",
+                   commasep(sQuote(needed))),
+             call.=FALSE)
+    }
+    dIJ <- close$d
+  } else {
+    undefined <- rep(NaN, length(r))
+  }
+
+  # initialise fv object
+  
+  df <- data.frame(r=r, theo=rep.int(1,length(r)))
+  out <- ratfv(df,
+               NULL, lambda2area,
+               "r", quote(g(r)),
+               "theo", NULL,
+               alim,
+               c("r","%s[Pois](r)"),
+               c("distance argument r", "theoretical Poisson %s"),
+               fname="g",
+               ratio=ratio)
+
+  ###### compute #######
+
+  bw.used <- NULL
+  
+  if(any(correction=="translate")) {
+    # translation correction
+    if(npts > 1) {
+      edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE)
+      kdenT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor)
+      gT <- kdenT$g
+      bw.used <- attr(kdenT, "bw")
+    } else gT <- undefined
+    if(!ratio) {
+      out <- bind.fv(out,
+                     data.frame(trans=gT),
+                     "hat(%s)[Trans](r)",
+                     "translation-corrected estimate of %s",
+                     "trans")
+    } else {
+      out <- bind.ratfv(out,
+                        data.frame(trans=gT * lambda2area),
+                        lambda2area,
+                        "hat(%s)[Trans](r)",
+                        "translation-corrected estimate of %s",
+                        "trans")
+    }
+  }
+  if(any(correction=="isotropic")) {
+    # Ripley isotropic correction
+    if(npts > 1) {
+      XI <- ppp(close$xi, close$yi, window=win, check=FALSE)
+      edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1))
+      kdenR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor)
+      gR <- kdenR$g
+      bw.used <- attr(kdenR, "bw")
+    } else gR <- undefined
+    if(!ratio) {
+      out <- bind.fv(out,
+                     data.frame(iso=gR),
+                     "hat(%s)[Ripley](r)",
+                     "isotropic-corrected estimate of %s",
+                     "iso")
+    } else {
+      out <- bind.ratfv(out,
+                        data.frame(iso=gR * lambda2area),
+                        lambda2area,
+                        "hat(%s)[Ripley](r)",
+                        "isotropic-corrected estimate of %s",
+                        "iso")
+    }
+  }
+  
+  # sanity check
+  if(is.null(out)) {
+    warning("Nothing computed - no edge corrections chosen")
+    return(NULL)
+  }
+
+  ## variance approximation
+  ## Illian et al 2008 p 234 equation 4.3.42
+  if(var.approx) {
+    gr <- if(any(correction == "isotropic")) gR else gT
+    # integral of squared kernel
+    intk2 <- kernel.squint(kernel, bw.used)
+    # isotropised set covariance of window
+    gWbar <- as.function(rotmean(setcov(win), result="fv"))
+    vest <- gr * intk2/(pi * r * gWbar(r) * lambda^2)
+    if(!ratio) {
+      out <- bind.fv(out,
+                     data.frame(v=vest),
+                     "v(r)",
+                     "approximate variance of %s",
+                     "v")
+    } else {
+      vden <- rep((npts-1)^2, length(vest))
+      vnum <- vden * vest
+      out <- bind.ratfv(out,
+                        data.frame(v=vnum),
+                        data.frame(c=vden),
+                        "v(r)", 
+                        "approximate variance of %s",
+                        "v")
+    }
+  }
+
+  ## Finish off
+  ## default is to display all corrections
+  formula(out) <- . ~ r
+  fvnames(out, ".") <- setdiff(rev(colnames(out)), c("r", "v"))
+  ##
+  unitname(out) <- unitname(X)
+  ## copy to other components
+  if(ratio)
+    out <- conform.ratfv(out)
+
+  attr(out, "bw") <- bw.used
+  return(out)
+}
+
+# Smoothing Estimate of Weighted Pair Correlation
+# d = vector of relevant distances
+# w = vector of edge correction weights (in normal use)
+# denargs = arguments to density.default
+# lambda2area = constant lambda^2 * areaW (in normal use)
+
+sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) {
+  divisor <- match.arg(divisor)
+  if(divisor == "d") {
+    w <- w/d
+    if(!all(good <- is.finite(w))) {
+      nbad <- sum(!good)
+      warning(paste(nbad, "infinite or NA",
+                    ngettext(nbad, "contribution was", "contributions were"),
+                    "deleted from pcf estimate"))
+      d <- d[good]
+      w <- w[good]
+    }
+  }
+  wtot <- sum(w)
+  kden <- do.call.matched(density.default,
+                  append(list(x=d, weights=w/wtot), denargs))
+  r <- kden$x
+  y <- kden$y * wtot
+  if(divisor == "r")
+    y <- y/r
+  g <- y/(2 * pi * lambda2area)
+  result <- data.frame(r=r,g=g)
+  attr(result, "bw") <- kden$bw
+  return(result)
+}
+
+#
+#---------- OTHER METHODS FOR pcf --------------------
+#
+
+"pcf.fasp" <- function(X, ..., method="c") {
+  verifyclass(X, "fasp")
+  Y <- X
+  Y$title <- paste("Array of pair correlation functions",
+                   if(!is.null(X$dataname)) "for",
+                   X$dataname)
+  # go to work on each function
+  for(i in seq_along(X$fns)) {
+    Xi <- X$fns[[i]]
+    PCFi <- pcf.fv(Xi, ..., method=method)
+    Y$fns[[i]] <- PCFi
+    if(is.fv(PCFi))
+      Y$default.formula[[i]] <- formula(PCFi)
+  }
+  return(Y)
+}
+
+
+pcf.fv <- local({
+
+  callmatched <- function(fun, argue) {
+    formalnames <- names(formals(fun))
+    formalnames <- formalnames[formalnames != "..."]
+    do.call(fun, argue[names(argue) %in% formalnames])
+  }
+
+  pcf.fv <- function(X, ..., method="c") {
+    verifyclass(X, "fv")
+  
+    # extract r and the recommended estimate of K
+    r <- with(X, .x)
+    K <- with(X, .y)
+    alim <- attr(X, "alim")
+
+    # remove NA's
+    ok <- !is.na(K)
+    K <- K[ok]
+    r <- r[ok]
+    switch(method,
+           a = {
+             ss <- callmatched(smooth.spline,
+                               list(x=r, y=K, ...))
+             dK <- predict(ss, r, deriv=1)$y
+             g <- dK/(2 * pi * r)
+           },
+           b = {
+             y <- K/(2 * pi * r)
+             y[!is.finite(y)] <- 0
+             ss <- callmatched(smooth.spline,
+                               list(x=r, y=y, ...))
+             dy <- predict(ss, r, deriv=1)$y
+             g <- dy + y/r
+           },
+           c = {
+             z <- K/(pi * r^2)
+             z[!is.finite(z)] <- 1
+             ss <- callmatched(smooth.spline,
+                               list(x=r, y=z, ...))
+             dz <- predict(ss, r, deriv=1)$y
+             g <- (r/2) * dz + z
+           },
+           d = {
+             z <- sqrt(K)
+             z[!is.finite(z)] <- 0
+             ss <- callmatched(smooth.spline,
+                               list(x=r, y=z, ...))
+             dz <- predict(ss, r, deriv=1)$y
+             g <- z * dz/(pi * r)
+           },
+           stop(paste("unrecognised method", sQuote(method)))
+           )
+
+    # pack result into "fv" data frame
+    Z <- fv(data.frame(r=r,
+                       theo=rep.int(1, length(r)),
+                       pcf=g),
+            "r", substitute(g(r), NULL), "pcf", . ~ r, alim,
+            c("r", "%s[pois](r)", "%s(r)"),
+            c("distance argument r",
+              "theoretical Poisson value of %s",
+              "estimate of %s by numerical differentiation"),
+            fname="g")
+    unitname(Z) <- unitname(X)
+    return(Z)
+  }
+
+  pcf.fv
+})
+
diff --git a/R/pcfinhom.R b/R/pcfinhom.R
new file mode 100755
index 0000000..d2c4a19
--- /dev/null
+++ b/R/pcfinhom.R
@@ -0,0 +1,234 @@
+#
+#   pcfinhom.R
+#
+#   $Revision: 1.21 $   $Date: 2017/06/05 10:31:58 $
+#
+#   inhomogeneous pair correlation function of point pattern 
+#
+#
+
+pcfinhom <- function(X, lambda=NULL, ..., r=NULL,
+                     kernel="epanechnikov", bw=NULL, stoyan=0.15,
+                     correction=c("translate", "Ripley"),
+                     divisor=c("r","d"),
+                     renormalise=TRUE,
+                     normpower=1,
+                     update=TRUE, leaveoneout=TRUE,
+                     reciplambda=NULL, 
+                     sigma=NULL, varcov=NULL, close=NULL)
+{
+  verifyclass(X, "ppp")
+#  r.override <- !is.null(r)
+  miss.update <- missing(update)
+  
+  win <- X$window
+  areaW <- area(win)
+  npts <- npoints(X)
+
+  kernel <- match.kernel(kernel)
+  
+  correction.given <- !missing(correction)
+  correction <- pickoption("correction", correction,
+                           c(isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, win$type, correction.given)
+
+  divisor <- match.arg(divisor)
+  
+  if(is.null(bw) && kernel=="epanechnikov") {
+    # Stoyan & Stoyan 1995, eq (15.16), page 285
+    h <- stoyan /sqrt(npts/areaW)
+    hmax <- h
+    # conversion to standard deviation
+    bw <- h/sqrt(5)
+  } else if(is.numeric(bw)) {
+    # standard deviation of kernel specified
+    # upper bound on half-width
+    hmax <- 3 * bw
+  } else {
+    # data-dependent bandwidth selection: guess upper bound on half-width
+    hmax <- 2 * stoyan /sqrt(npts/areaW)
+  }
+
+
+  ########## intensity values #########################
+
+  dangerous <- c("lambda", "reciplambda")
+  danger <- TRUE
+
+  if(npts == 0) {
+    lambda <- reciplambda <- numeric(0)
+    danger <- FALSE
+  } else if(missing(lambda) && is.null(reciplambda)) {
+    # No intensity data provided
+    danger <- FALSE
+    # Estimate density by leave-one-out kernel smoothing
+    lambda <- density(X, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+    lambda <- as.numeric(lambda)
+    reciplambda <- 1/lambda
+  } else if(!is.null(reciplambda)) {
+    # 1/lambda values provided
+    if(is.im(reciplambda)) 
+      reciplambda <- safelookup(reciplambda, X)
+    else if(is.function(reciplambda))
+      reciplambda <- reciplambda(X$x, X$y)
+    else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda)))
+      check.nvector(reciplambda, npts)
+    else stop(paste(sQuote("reciplambda"),
+                    "should be a vector, a pixel image, or a function"))
+  } else {
+    # lambda values provided
+    if(is.im(lambda)) 
+      lambda <- safelookup(lambda, X)
+    else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) {
+      model <- lambda
+      if(!update) {
+        ## just use intensity of fitted model
+        lambda <- predict(model, locations=X, type="trend")
+      } else {
+        if(is.ppm(model)) {
+          model <- update(model, Q=X)
+          lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else if(is.kppm(model)) {
+          model <- update(model, X=X)
+          lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout)
+        } else {
+          model <- update(model, X=X)
+          lambda <- fitted(model, dataonly=TRUE)
+        }
+        danger <- FALSE
+        if(miss.update) 
+          warn.once(key="pcfinhom.update",
+                    "The behaviour of pcfinhom when lambda is a ppm object",
+                    "has changed (in spatstat 1.45-0 and later).",
+                    "See help(pcfinhom)")
+      }
+    } else if(is.function(lambda)) 
+      lambda <- lambda(X$x, X$y)
+    else if(is.numeric(lambda) && is.vector(as.numeric(lambda)))
+      check.nvector(lambda, npts)
+    else stop(paste(sQuote("lambda"),
+         "should be a vector, a pixel image, a function, or a fitted model"))
+    # evaluate reciprocal
+    reciplambda <- 1/lambda
+  }
+  
+  # renormalise
+  if(renormalise && npts > 0) {
+    check.1.real(normpower)
+    stopifnot(normpower %in% 1:2)
+    renorm.factor <- (areaW/sum(reciplambda))^normpower
+  } 
+  
+  ########## r values ############################
+  # handle arguments r and breaks 
+
+  rmaxdefault <- rmax.rule("K", win, lambda)        
+  breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault)
+  if(!(breaks$even))
+    stop("r values must be evenly spaced")
+  # extract r values
+  r <- breaks$r
+  rmax <- breaks$max
+  # recommended range of r values for plotting
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  ########## smoothing parameters for pcf ############################  
+  # arguments for 'density'
+
+  denargs <- resolve.defaults(list(kernel=kernel, bw=bw),
+                              list(...),
+                              list(n=length(r), from=0, to=rmax))
+  
+  #################################################
+  
+  # compute pairwise distances
+
+  if(npts > 1) {
+    if(is.null(close)) {
+      #' find close pairs
+      close <- closepairs(X, rmax+hmax)
+    } else {
+      #' check 'close' has correct format
+      needed <- c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d")
+      if(any(is.na(match(needed, names(close)))))
+        stop(paste("Argument", sQuote("close"),
+                   "should have components named",
+                   commasep(sQuote(needed))),
+             call.=FALSE)
+    }
+    dIJ <- close$d
+    I <- close$i
+    J <- close$j
+    XI <- ppp(close$xi, close$yi, window=win, check=FALSE)
+    wIJ <- reciplambda[I] * reciplambda[J]
+  } else {
+    undefined <- rep(NaN, length(r))
+  }
+
+  # initialise fv object
+  
+  df <- data.frame(r=r, theo=rep.int(1,length(r)))
+  out <- fv(df, "r",
+            quote(g[inhom](r)), "theo", ,
+            alim,
+            c("r","{%s[%s]^{pois}}(r)"),
+            c("distance argument r", "theoretical Poisson %s"),
+            fname=c("g", "inhom"))
+
+  ###### compute #######
+
+  if(any(correction=="translate")) {
+    # translation correction
+    if(npts > 1) {
+      XJ <- ppp(close$xj, close$yj, window=win, check=FALSE)
+      edgewt <- edge.Trans(XI, XJ, paired=TRUE)
+      gT <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g
+      if(renormalise) gT <- gT * renorm.factor
+    } else gT <- undefined
+    out <- bind.fv(out,
+                   data.frame(trans=gT),
+                   "{hat(%s)[%s]^{Trans}}(r)",
+                   "translation-corrected estimate of %s",
+                   "trans")
+  }
+  if(any(correction=="isotropic")) {
+    # Ripley isotropic correction
+    if(npts > 1) {
+      edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1))
+      gR <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g
+      if(renormalise) gR <- gR * renorm.factor
+    } else gR <- undefined
+    out <- bind.fv(out,
+                   data.frame(iso=gR),
+                   "{hat(%s)[%s]^{Ripley}}(r)",
+                   "isotropic-corrected estimate of %s",
+                   "iso")
+  }
+  
+  # sanity check
+  if(is.null(out)) {
+    warning("Nothing computed - no edge corrections chosen")
+    return(NULL)
+  }
+  
+  # which corrections have been computed?
+  corrxns <- rev(setdiff(names(out), "r"))
+
+  # default is to display them all
+  formula(out) <- . ~ r
+  fvnames(out, ".") <- corrxns
+
+  unitname(out) <- unitname(X)
+  if(danger)
+    attr(out, "dangerous") <- dangerous
+  return(out)
+}
+
diff --git a/R/pcfmulti.R b/R/pcfmulti.R
new file mode 100644
index 0000000..38e4c6f
--- /dev/null
+++ b/R/pcfmulti.R
@@ -0,0 +1,253 @@
+#
+#   pcfmulti.R
+#
+#   $Revision: 1.8 $   $Date: 2016/09/21 07:28:58 $
+#
+#   multitype pair correlation functions
+#
+
+pcfcross <- 
+  function(X, i, j, ...,
+         r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15,
+         correction = c("isotropic", "Ripley", "translate"),
+         divisor=c("r","d"))
+{
+  verifyclass(X, "ppp")
+  stopifnot(is.multitype(X))
+  if(missing(correction))
+    correction <- NULL
+  divisor <- match.arg(divisor)
+  ##
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+  if(missing(j))
+    j <- levels(marx)[2]
+  I <- (marx == i)
+  J <- (marx == j)
+  Iname <- paste("points with mark i =", i)
+  Jname <- paste("points with mark j =", j)
+  ##
+  result <- pcfmulti(X, I, J, ...,
+                     r=r, 
+                     kernel=kernel, bw=bw, stoyan=stoyan,
+                     correction=correction,
+                     divisor=divisor,
+                     Iname=Iname, Jname=Jname)
+  ##
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(result,
+               substitute(g[i,j](r),
+                          list(i=iname,j=jname)),
+               c("g", paste0("list", paren(paste(iname, jname, sep=",")))),
+               new.yexp=substitute(g[list(i,j)](r),
+                                   list(i=iname,j=jname)))
+  return(result)
+}
+
+pcfdot <- 
+function(X, i, ...,
+         r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15,
+         correction = c("isotropic", "Ripley", "translate"),
+         divisor=c("r", "d"))
+{
+  verifyclass(X, "ppp")
+  stopifnot(is.multitype(X))
+  if(missing(correction))
+    correction <- NULL
+  divisor <- match.arg(divisor)
+
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+
+  I <- (marx == i)
+  J <- rep.int(TRUE, X$n)  # i.e. all points
+  Iname <- paste("points with mark i =", i)
+  Jname <- "points"
+	
+  result <- pcfmulti(X, I, J, ...,
+                     r=r, kernel=kernel, bw=bw, stoyan=stoyan,
+                     correction=correction,
+                     divisor=divisor,
+                     Iname=Iname, Jname=Jname)
+
+  iname <- make.parseable(paste(i))
+  result <-
+    rebadge.fv(result,
+               substitute(g[i ~ dot](r), list(i=iname)),
+               c("g", paste0(iname, "~symbol(\"\\267\")")),
+               new.yexp=substitute(g[i ~ symbol("\267")](r),
+                 list(i=iname)))
+  return(result)
+}
+
+
+pcfmulti <- function(X, I, J, ...,
+                     r=NULL, 
+                     kernel="epanechnikov", bw=NULL, stoyan=0.15,
+                     correction=c("translate", "Ripley"),
+                     divisor=c("r","d"),
+                     Iname="points satisfying condition I",
+                     Jname="points satisfying condition J")
+{
+  verifyclass(X, "ppp")
+#  r.override <- !is.null(r)
+  divisor <- match.arg(divisor)
+
+  win <- X$window
+  areaW <- area(win)
+  npts <- npoints(X)
+  
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("translate", "Ripley")
+  correction <- pickoption("correction", correction,
+                           c(isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, win$type, correction.given)
+  
+  ## .......... indices I and J .............................
+  
+  I <- ppsubset(X, I)
+  J <- ppsubset(X, J)
+  if(is.null(I) || is.null(J))
+    stop("I and J must be valid subset indices")
+
+  nI <- sum(I)
+  nJ <- sum(J)
+  if(nI == 0) stop(paste("There are no", Iname))
+  if(nJ == 0) stop(paste("There are no", Jname))
+
+  XI <- X[I]
+  XJ <- X[J]
+
+#  lambdaI <- nI/areaW
+  lambdaJ <- nJ/areaW
+  nIJ <- sum(I & J)
+  lambdaIJarea <- (nI * nJ - nIJ)/areaW
+  
+  ## ...........  kernel bandwidth and support .........................
+  
+  if(is.null(bw) && kernel=="epanechnikov") {
+    # Stoyan & Stoyan 1995, eq (15.16), page 285
+    h <- stoyan /sqrt(lambdaJ)
+    hmax <- h
+    # conversion to standard deviation
+    bw <- h/sqrt(5)
+  } else if(is.numeric(bw)) {
+    # standard deviation of kernel specified
+    # upper bound on half-width
+    hmax <- 3 * bw
+  } else {
+    # data-dependent bandwidth selection: guess upper bound on half-width
+    hmax <- 2 * stoyan /sqrt(lambdaJ)
+  }
+
+
+########## r values ############################
+  # handle argument r 
+
+  rmaxdefault <- rmax.rule("K", win, lambdaJ)
+  breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault)
+  if(!(breaks$even))
+    stop("r values must be evenly spaced")
+  # extract r values
+  r <- breaks$r
+  rmax <- breaks$max
+  # recommended range of r values for plotting
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  # initialise fv object
+  
+  df <- data.frame(r=r, theo=rep.int(1,length(r)))
+  fname <- c("g", "list(I,J)")
+  yexp <- quote(g[list(I,J)](r))
+  out <- fv(df, "r",
+            quote(g[I,J](r)), "theo", ,
+            alim,
+            c("r", makefvlabel(NULL, NULL, fname, "Pois")),
+            c("distance argument r", "theoretical Poisson %s"),
+            fname=fname,
+            yexp=yexp)
+  
+  ########## smoothing parameters for pcf ############################  
+  # arguments for 'density'
+
+  denargs <- resolve.defaults(list(kernel=kernel, bw=bw),
+                              list(...),
+                              list(n=length(r), from=0, to=rmax))
+  
+  #################################################
+  
+  ## compute pairwise distances
+  
+  ## identify close pairs of points
+  what <- if(any(correction == "translate")) "all" else "ijd"
+  close <- crosspairs(XI, XJ, rmax+hmax, what=what)
+  ## map (i,j) to original serial numbers in X
+  orig <- seq_len(npts)
+  imap <- orig[I]
+  jmap <- orig[J]
+  iX <- imap[close$i]
+  jX <- jmap[close$j]
+  ## eliminate any identical pairs
+  if(nIJ > 0) {
+    ok <- (iX != jX)
+    if(!all(ok))
+      close <- as.list(as.data.frame(close)[ok, , drop=FALSE])
+  }
+  ## extract information for these pairs (relative to orderings of XI, XJ)
+  dclose <- close$d
+  icloseI  <- close$i
+#  jcloseJ  <- close$j
+
+  ###### compute #######
+
+  if(any(correction=="translate")) {
+    # translation correction
+    edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE)
+    gT <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g
+    out <- bind.fv(out,
+                   data.frame(trans=gT),
+                   makefvlabel(NULL, "hat", fname, "Trans"),
+                   "translation-corrected estimate of %s",
+                   "trans")
+  }
+  if(any(correction=="isotropic")) {
+    # Ripley isotropic correction
+    edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1))
+    gR <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g
+    out <- bind.fv(out,
+                   data.frame(iso=gR),
+                   makefvlabel(NULL, "hat", fname, "Ripley"),
+                   "isotropic-corrected estimate of %s",
+                   "iso")
+  }
+  
+  ## sanity check
+  if(is.null(out)) {
+    warning("Nothing computed - no edge corrections chosen")
+    return(NULL)
+  }
+  
+  # which corrections have been computed?
+  corrxns <- rev(setdiff(names(out), "r"))
+
+  # default is to display them all
+  formula(out) <- . ~ r
+  fvnames(out, ".") <- corrxns
+
+  # 
+  unitname(out) <- unitname(X)
+  return(out)
+}
+
diff --git a/R/pcfmulti.inhom.R b/R/pcfmulti.inhom.R
new file mode 100755
index 0000000..e8e54b1
--- /dev/null
+++ b/R/pcfmulti.inhom.R
@@ -0,0 +1,298 @@
+#
+#   pcfmulti.inhom.R
+#
+#   $Revision: 1.15 $   $Date: 2016/09/21 07:28:42 $
+#
+#   inhomogeneous multitype pair correlation functions
+#
+#
+
+pcfcross.inhom <- 
+  function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ...,
+         r=NULL, breaks=NULL,
+         kernel="epanechnikov", bw=NULL, stoyan=0.15,
+         correction = c("isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL)
+{
+  verifyclass(X, "ppp")
+  stopifnot(is.multitype(X))
+  if(missing(correction))
+    correction <- NULL
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+  if(missing(j))
+    j <- levels(marx)[2]
+  I <- (marx == i)
+  J <- (marx == j)
+  Iname <- paste("points with mark i =", i)
+  Jname <- paste("points with mark j =", j)
+  g <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ...,
+                      r=r,breaks=breaks,
+                      kernel=kernel, bw=bw, stoyan=stoyan,
+                      correction=correction,
+                      sigma=sigma, varcov=varcov,
+                      Iname=Iname, Jname=Jname)
+  iname <- make.parseable(paste(i))
+  jname <- make.parseable(paste(j))
+  result <-
+    rebadge.fv(g,
+               substitute(g[inhom,i,j](r),
+                          list(i=iname,j=jname)),
+               c("g", paste0("list", paren(paste("inhom", i, j, sep=",")))),
+               new.yexp=substitute(g[list(inhom,i,j)](r),
+                                   list(i=iname,j=jname)))
+  attr(result, "dangerous") <- attr(g, "dangerous")
+  return(result)
+}
+
+pcfdot.inhom <- 
+function(X, i, lambdaI=NULL, lambdadot=NULL, ...,
+         r=NULL, breaks=NULL,
+         kernel="epanechnikov", bw=NULL, stoyan=0.15,
+         correction = c("isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL)
+{
+  verifyclass(X, "ppp")
+  stopifnot(is.multitype(X))
+  if(missing(correction))
+    correction <- NULL
+
+  marx <- marks(X)
+  if(missing(i))
+    i <- levels(marx)[1]
+
+  I <- (marx == i)
+  J <- rep.int(TRUE, X$n)  # i.e. all points
+  Iname <- paste("points with mark i =", i)
+  Jname <- paste("points")
+	
+  g <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ...,
+                      r=r,breaks=breaks,
+                      kernel=kernel, bw=bw, stoyan=stoyan,
+                      correction=correction,
+                      sigma=sigma, varcov=varcov,
+                      Iname=Iname, Jname=Jname)
+  iname <- make.parseable(paste(i))
+  result <-
+    rebadge.fv(g,
+               substitute(g[inhom, i ~ dot](r), list(i=iname)),
+               c("g", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")),
+               new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r),
+                 list(i=iname)))
+  if(!is.null(dang <- attr(g, "dangerous"))) {
+    dang[dang == "lambdaJ"] <- "lambdadot"
+    dang[dang == "lambdaIJ"] <- "lambdaIdot"
+    attr(result, "dangerous") <- dang
+  }
+  return(result)
+}
+
+
+pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ...,
+                           r=NULL, breaks=NULL, 
+                           kernel="epanechnikov", bw=NULL, stoyan=0.15,
+                           correction=c("translate", "Ripley"),
+                           sigma=NULL, varcov=NULL,
+                           Iname="points satisfying condition I",
+                           Jname="points satisfying condition J")
+{
+  verifyclass(X, "ppp")
+#  r.override <- !is.null(r)
+
+  win <- X$window
+  areaW <- area(win)
+  npts <- npoints(X)
+  
+  correction.given <- !missing(correction) && !is.null(correction)
+  if(is.null(correction))
+    correction <- c("translate", "Ripley")
+  correction <- pickoption("correction", correction,
+                           c(isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             best="best"),
+                           multi=TRUE)
+
+  correction <- implemented.for.K(correction, win$type, correction.given)
+  
+  # bandwidth  
+  if(is.null(bw) && kernel=="epanechnikov") {
+    # Stoyan & Stoyan 1995, eq (15.16), page 285
+    h <- stoyan /sqrt(npts/areaW)
+    hmax <- h
+    # conversion to standard deviation
+    bw <- h/sqrt(5)
+  } else if(is.numeric(bw)) {
+    # standard deviation of kernel specified
+    # upper bound on half-width
+    hmax <- 3 * bw
+  } else {
+    # data-dependent bandwidth selection: guess upper bound on half-width
+    hmax <- 2 * stoyan /sqrt(npts/areaW)
+  }
+
+  ##########  indices I and J  ########################
+  
+  if(!is.logical(I) || !is.logical(J))
+    stop("I and J must be logical vectors")
+  if(length(I) != npts || length(J) != npts)
+    stop(paste("The length of I and J must equal",
+               "the number of points in the pattern"))
+	
+  nI <- sum(I)
+  nJ <- sum(J)
+  if(nI == 0) stop(paste("There are no", Iname))
+  if(nJ == 0) stop(paste("There are no", Jname))
+
+  XI <- X[I]
+  XJ <- X[J]
+  
+  ########## intensity values #########################
+
+  dangerous <- c("lambdaI", "lambdaJ")
+  dangerI <- dangerJ <- TRUE
+  
+  if(is.null(lambdaI)) {
+      # Estimate density by leave-one-out kernel smoothing
+    dangerI <- FALSE
+    lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+  } else {
+    # lambda values provided
+    if(is.vector(lambdaI)) 
+      check.nvector(lambdaI, nI)
+    else if(is.im(lambdaI)) 
+      lambdaI <- safelookup(lambdaI, XI)
+    else if(is.function(lambdaI)) 
+      lambdaI <- lambdaI(XI$x, XI$y)
+    else stop(paste(sQuote("lambdaI"),
+                    "should be a vector, a pixel image, or a function"))
+  }
+
+  if(is.null(lambdaJ)) {
+      # Estimate density by leave-one-out kernel smoothing
+    dangerJ <- FALSE
+    lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov,
+                      at="points", leaveoneout=TRUE)
+  } else {
+    # lambda values provided
+    if(is.vector(lambdaJ)) 
+      check.nvector(lambdaJ, nJ)
+    else if(is.im(lambdaJ)) 
+      lambdaJ <- safelookup(lambdaJ, XJ)
+    else if(is.function(lambdaJ)) 
+      lambdaJ <- lambdaJ(XJ$x, XJ$y)
+    else stop(paste(sQuote("lambdaJ"),
+                    "should be a vector, a pixel image, or a function"))
+  }
+
+  danger <- dangerI || dangerJ
+  
+  ########## r values ############################
+  # handle arguments r and breaks 
+
+  rmaxdefault <- rmax.rule("K", win, npts/areaW)        
+  breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault)
+  if(!(breaks$even))
+    stop("r values must be evenly spaced")
+  # extract r values
+  r <- breaks$r
+  rmax <- breaks$max
+  # recommended range of r values for plotting
+  alim <- c(0, min(rmax, rmaxdefault))
+
+  # initialise fv object
+  
+  df <- data.frame(r=r, theo=rep.int(1,length(r)))
+  fname <- c("g", "list(inhom,I,J)")
+  out <- fv(df, "r",
+            quote(g[inhom,I,J](r)), "theo", ,
+            alim,
+            c("r", makefvlabel(NULL, NULL, fname, "pois")),            
+            c("distance argument r", "theoretical Poisson %s"),
+            fname=fname,
+            yexp=quote(g[list(inhom,I,J)](r)))
+  
+  ########## smoothing parameters for pcf ############################  
+  # arguments for 'density'
+
+  denargs <- resolve.defaults(list(kernel=kernel, bw=bw),
+                              list(...),
+                              list(n=length(r), from=0, to=rmax))
+  
+  #################################################
+  
+  # compute pairwise distances
+  
+# identify close pairs of points
+  close <- crosspairs(XI, XJ, rmax+hmax, what="ijd")
+# map (i,j) to original serial numbers in X
+  orig <- seq_len(npts)
+  imap <- orig[I]
+  jmap <- orig[J]
+  iX <- imap[close$i]
+  jX <- jmap[close$j]
+# eliminate any identical pairs
+  if(any(I & J)) {
+    ok <- (iX != jX)
+    if(!all(ok)) {
+      close$i  <- close$i[ok]
+      close$j  <- close$j[ok]
+      close$d  <- close$d[ok]
+    }
+  }
+# extract information for these pairs (relative to orderings of XI, XJ)
+  dclose <- close$d
+  icloseI  <- close$i
+  jcloseJ  <- close$j
+
+# Form weight for each pair
+  weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ])
+
+  ###### compute #######
+
+  if(any(correction=="translate")) {
+    # translation correction
+    edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE)
+    gT <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g
+    out <- bind.fv(out,
+                   data.frame(trans=gT),
+                   makefvlabel(NULL, "hat", fname, "Trans"),
+                   "translation-corrected estimate of %s",
+                   "trans")
+  }
+  if(any(correction=="isotropic")) {
+    # Ripley isotropic correction
+    edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1))
+    gR <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g
+    out <- bind.fv(out,
+                   data.frame(iso=gR),
+                   makefvlabel(NULL, "hat", fname, "Ripley"),
+                   "isotropic-corrected estimate of %s",
+                   "iso")
+  }
+  
+  # sanity check
+  if(is.null(out)) {
+    warning("Nothing computed - no edge corrections chosen")
+    return(NULL)
+  }
+  
+  # which corrections have been computed?
+  corrxns <- rev(setdiff(names(out), "r"))
+
+  # default is to display them all
+  formula(out) <- . ~ r
+  fvnames(out, ".") <- corrxns
+
+  #
+  unitname(out) <- unitname(X)
+
+  if(danger)
+    attr(out, "dangerous") <- dangerous
+  return(out)
+}
+
diff --git a/R/penttinen.R b/R/penttinen.R
new file mode 100644
index 0000000..d8ec431
--- /dev/null
+++ b/R/penttinen.R
@@ -0,0 +1,79 @@
+#
+#
+#    penttinen.R
+#
+#    $Revision: 1.2 $	$Date: 2016/02/16 01:39:12 $
+#
+#    Penttinen pairwise interaction
+#
+#
+# -------------------------------------------------------------------
+#	
+
+Penttinen <- local({
+
+  # create blank template object without family and pars
+
+  BlankAntti <-
+  list(
+       name     = "Penttinen process",
+       creator  = "Penttinen",
+       family    = "pairwise.family", # evaluated later
+       pot      = function(d, par) {
+         ans <- numeric(length(d))
+         dim(ans) <- dim(d)
+         zz <- d/(2 * par$r)
+         ok <- (zz < 1)
+         z <- zz[ok]
+         ans[ok] <- (2/pi) * (acos(z) - z * sqrt(1-z^2))
+         return(ans)
+       },
+       par      = list(r = NULL), # to be filled in
+       parnames = "circle radius",
+       init     = function(self) {
+         r <- self$par$r
+         if(!is.numeric(r) || length(r) != 1 || r <= 0)
+           stop("interaction distance r must be a positive number")
+       },
+       update = NULL,  # default OK
+       print = NULL,    # default OK
+       interpret =  function(coeffs, self) {
+         theta <- as.numeric(coeffs[1])
+         gamma <- exp(theta)
+         return(list(param=list(gamma=gamma),
+                     inames="interaction parameter gamma",
+                     printable=dround(gamma)))
+       },
+       valid = function(coeffs, self) {
+         theta <- as.numeric(coeffs[1])
+         return(is.finite(theta) && (theta <= 0))
+       },
+       project = function(coeffs, self) {
+         if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         r <- self$par$r
+         if(anyNA(coeffs))
+           return(2 * r)
+         theta <- coeffs[1]
+         if(abs(theta) <= epsilon)
+           return(0)
+         else
+           return(2 * r)
+       },
+       version=NULL # to be filled in 
+       )
+  class(BlankAntti) <- "interact"
+
+
+  # Finally define main function
+  
+  Penttinen <- function(r) {
+    instantiate.interact(BlankAntti, list(r=r))
+  }
+
+  Penttinen <- intermaker(Penttinen, BlankAntti)
+  
+  Penttinen
+})
+
diff --git a/R/percy.R b/R/percy.R
new file mode 100644
index 0000000..093fd0d
--- /dev/null
+++ b/R/percy.R
@@ -0,0 +1,100 @@
+## percus.R
+##
+## Percus-Yevick style approximations to pcf and K
+##
+##  $Revision: 1.4 $ $Date: 2014/01/31 10:10:19 $
+
+pcfmodel.ppm <- local({
+
+  pcfmodel.ppm <- function(model, ...) {
+    if(is.multitype(model))
+      stop("Not yet implemented for multitype models")
+    if(!is.stationary(model))
+      stop("Model must be stationary")
+    if(is.poisson(model)) return(function(r) rep(1, length(r)))
+    inte <- as.interact(model)
+    if(inte$family$name != "pairwise")
+      stop("Only implemented for pairwise-interaction models")
+    lambda <- intensity(model)
+    beta <- exp(coef(model)[1])
+    par <- inte$par
+    pot <- inte$pot
+    f <- fitin(model)
+    Vcoefs <- f$coefs[f$Vnames]
+    Mayer <- inte$Mayer
+    G <- Mayer(Vcoefs, inte)
+    irange <- reach(inte, epsilon=1e-6)
+    G2fun <- inte$Percy
+    testit <- resolve.1.default(list(testit=FALSE), list(...))
+    if(testit || is.null(G2fun))
+      G2fun <- pairwisePercy
+    fun <- function(r) {
+      pcfapprox(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange)
+    }
+    return(fun)
+  }
+
+  pcfapprox <- function(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) {
+    as.numeric((beta/lambda)^2 *
+               exp(logpairpot(r, pot, par, Vcoefs)
+                   - lambda * G2fun(r, Vcoefs, par, pot=pot,
+                                    irange=irange, G=G)))
+  }
+
+  logpairpot <- function(r, pot, par, Vcoefs) {
+    as.numeric(pot(matrix(r, ncol=1), par) %*% Vcoefs)
+  }
+  
+  negpair <- function(x,y, pot, par, Vcoefs) {
+    ## evaluate 1 - g(x,y)
+    ## where g(x,y) is pair interaction between (0,0) and (x,y)
+    1 - exp(logpairpot(sqrt(x^2+y^2), pot, par, Vcoefs))
+  }
+  
+  pairwisePercy <- function(r, Vcoefs, par, ..., G, pot, irange, dimyx=256) {
+    S <- max(max(r), irange)
+    ng <- as.im(negpair, square(c(-S,S)),
+                  pot=pot, par=par, Vcoefs=Vcoefs,
+                  dimyx=dimyx)
+    ng2 <- convolve.im(ng)
+    rr <- seq(min(r), max(r), length=dimyx[1])
+    yy <- ng2[list(x=rr, y=rep.int(0, dimyx[1]))]
+    zz <- 2 * G - yy
+    z <- approx(rr, zz, r)$y
+    return(z)
+  }
+
+  pcfmodel.ppm
+})
+
+    
+
+Kmodel.ppm <- local({
+  
+  Kmodel.ppm <- function(model, ...) {
+    if(is.poisson(model)) return(function(r) { pi * r^2 })
+    pc <- pcfmodel(model, ...)
+    K <- function(r) pcf2K(r, pc)
+    return(K)
+  }
+
+  pcf2K <- function(r, pc) {
+    ## integrate the pair correlation function to obtain the K-function
+    if(length(r) == 1) {
+      ## definite integral
+      spcfs <- function(s) { s * pc(s) }
+      y <- 2 * pi * integrate(spcfs, lower=0, upper=r)$value
+    } else {
+      ## indefinite integral
+      rr <- seq(0, max(r), length=1025)
+      dr <- max(r)/(length(rr) - 1)
+      ff <- 2 * pi * rr * pc(rr)
+      yy <- dr * cumsum(ff)
+      y <- approx(rr, yy, r)$y
+    }
+    return(y)
+  }
+
+  Kmodel.ppm
+})
+                    
diff --git a/R/periodify.R b/R/periodify.R
new file mode 100755
index 0000000..7ecfaff
--- /dev/null
+++ b/R/periodify.R
@@ -0,0 +1,127 @@
+#
+# periodify.R
+#
+# replicate a pattern periodically
+#
+#  $Revision: 1.3 $  $Date: 2011/04/17 05:52:50 $
+#
+
+periodify <- function(X, ...) {
+  UseMethod("periodify")
+}
+
+periodify.ppp <- function(X, nx=1, ny=1, ...,
+                          combine=TRUE, warn=TRUE, check=TRUE, 
+                          ix=(-nx):nx, iy=(-ny):ny,
+                          ixy=expand.grid(ix=ix,iy=iy)) {
+  # sanity checks
+  if(!missing(nx) || !missing(ny)) {
+    if(is.null(nx)) nx <- 1
+    if(is.null(ny)) ny <- 1
+    if(length(nx) != 1 || length(ny) != 1)
+      stop("nx and ny should be single integers")
+    if(nx != round(nx) || ny != round(ny))
+      stop("nx and ny should be integers")
+  }
+  force(ixy)
+  W <- X$window
+  isrect <- (W$type == "rectangle")
+  if(warn && combine && !isrect)
+    warning("X has a non-rectangular window")
+  else 
+   isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1)
+  width <- diff(W$xrange)
+  height <- diff(W$yrange)
+  shifts <- cbind(ixy[,1] * width, ixy[,2] * height)
+  Xshift <- list()
+  for(i in 1:nrow(shifts))
+    Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ]))
+  if(!combine)
+    return(Xshift)
+  Wnew <- if(isrect) {
+    owin(range(range(W$xrange) + range(shifts[,1])),
+         range(range(W$yrange) + range(shifts[,2])))
+  } else NULL
+  Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check)))
+  return(Z)
+}
+
+periodify.psp <- function(X, nx=1, ny=1, ...,
+                          combine=TRUE, warn=TRUE, check=TRUE,
+                          ix=(-nx):nx, iy=(-ny):ny,
+                          ixy=expand.grid(ix=ix,iy=iy)) {
+  # sanity checks
+  if(!missing(nx) || !missing(ny)) {
+    if(is.null(nx)) nx <- 1
+    if(is.null(ny)) ny <- 1
+    if(length(nx) != 1 || length(ny) != 1)
+      stop("nx and ny should be single integers")
+    if(nx != round(nx) || ny != round(ny))
+      stop("nx and ny should be integers")
+  }
+  force(ixy)
+  W <- X$window
+  isrect <- (W$type == "rectangle")
+  if(warn && combine && !isrect)
+    warning("X has a non-rectangular window")
+  else 
+   isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1)
+  width <- diff(W$xrange)
+  height <- diff(W$yrange)
+  shifts <- cbind(ixy[,1] * width, ixy[,2] * height)
+  Xshift <- list()
+  for(i in 1:nrow(shifts))
+    Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ]))
+  if(!combine)
+    return(Xshift)
+  Wnew <- if(isrect) {
+    owin(range(range(W$xrange) + range(shifts[,1])),
+         range(range(W$yrange) + range(shifts[,2])))
+  } else NULL
+  Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check)))
+  return(Z)
+}
+
+periodify.owin <- function(X, nx=1, ny=1, ...,
+                          combine=TRUE, warn=TRUE,
+                          ix=(-nx):nx, iy=(-ny):ny,
+                          ixy=expand.grid(ix=ix,iy=iy)) {
+  # sanity checks
+  if(!missing(nx) || !missing(ny)) {
+    if(is.null(nx)) nx <- 1
+    if(is.null(ny)) ny <- 1
+    if(length(nx) != 1 || length(ny) != 1)
+      stop("nx and ny should be single integers")
+    if(nx != round(nx) || ny != round(ny))
+      stop("nx and ny should be integers")
+  }
+  force(ixy)
+  isrect <- (X$type == "rectangle")
+  if(warn && combine && !isrect)
+    warning("X is not rectangular")
+  else 
+    isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1)
+  width <- diff(X$xrange)
+  height <- diff(X$yrange)
+  shifts <- cbind(ixy[,1] * width, ixy[,2] * height)
+  if(combine) {
+    if(isrect) {
+      # result is a rectangle
+      Y <-  owin(range(range(X$xrange) + range(shifts[,1])),
+                    range(range(X$yrange) + range(shifts[,2])))
+    } else {
+      # result is another type of window
+      for(i in 1:nrow(shifts)) {
+        Xi <- shift(X, vec=as.numeric(shifts[i, ]))
+        Y <- if(i == 1) Xi else union.owin(Y, Xi)
+      }
+    }
+  } else {
+    # result is a list
+    Y <- list()
+    for(i in 1:nrow(shifts))
+      Y[[i]] <- shift(X, vec=as.numeric(shifts[i, ]))
+  }
+  return(Y)
+}
+
diff --git a/R/persp.im.R b/R/persp.im.R
new file mode 100644
index 0000000..0b90d3a
--- /dev/null
+++ b/R/persp.im.R
@@ -0,0 +1,329 @@
+##
+## persp.im.R
+##
+##  'persp' method for image objects
+##      plus annotation
+##  
+##  $Revision: 1.20 $ $Date: 2016/09/01 05:49:42 $
+##
+
+persp.im <- local({
+
+  persp.im <- function(x, ...,
+                       colmap=NULL, colin=x, apron=FALSE,
+                       visible=FALSE) {
+    xname <- deparse(substitute(x))
+    xinfo <- summary(x)
+    if(xinfo$type == "factor")
+      stop("Perspective plot is inappropriate for factor-valued image")
+    ## check whether 'col' was specified when 'colmap' was intended
+    Col <- list(...)$col
+    if(is.null(colmap) && !is.null(Col) && !is.matrix(Col) && length(Col) != 1)
+      warning("Argument col is not a matrix. Did you mean colmap?")
+    if(!missing(colin)) {
+      ## separate image to determine colours
+      verifyclass(colin, "im")
+      if(!compatible(colin, x)) {
+        ## resample 'colin' onto grid of 'x'
+        colin <- as.im(colin, W=x)
+      }
+      if(is.null(colmap))
+        colmap <- spatstat.options("image.colfun")(128)
+    }
+    pop <- spatstat.options("par.persp")
+    ##
+    if(is.function(colmap) && !inherits(colmap, "colourmap")) {
+      ## coerce to a 'colourmap' if possible
+      clim <- range(colin, finite=TRUE)
+      if(names(formals(colmap))[1] == "n") {
+        colval <- colmap(128)
+        colmap <- colourmap(colval, range=clim)
+      } else {
+        ## colour map determined by a rule (e.g. 'beachcolours')
+        colmap <- invokeColourmapRule(colmap, colin,
+                                      zlim=clim, colargs=list(...))
+        if(is.null(colmap))
+          stop("Unrecognised syntax for colour function")
+      }
+    }
+    ## colour map?
+    if(is.null(colmap)) {
+      colinfo <- list(col=NULL)
+    } else if(inherits(colmap, "colourmap")) {
+      ## colour map object
+      ## apply colour function to image data
+      colval <- eval.im(colmap(colin))
+      colval <- t(as.matrix(colval))
+      ## strip one row and column for input to persp.default
+      colval <- colval[-1, -1]
+      ## replace NA by arbitrary value
+      isna <- is.na(colval)
+      if(any(isna)) {
+        stuff <- attr(colmap, "stuff")
+        colvalues <- stuff$outputs
+        colval[isna] <- colvalues[1]
+      }
+      ## pass colour matrix (and suppress lines)
+      colinfo <- list(col=colval, border=NA)
+    } else {
+      ## interpret 'colmap' as colour map
+      if(is.list(colmap) && all(c("breaks", "col") %in% names(colmap))) {
+        breaks <- colmap$breaks
+        colvalues <- colmap$col
+      } else if(is.vector(colmap)) {
+        colvalues <- colmap
+        breaks <- quantile(colin,
+                           seq(from=0,to=1,length.out=length(colvalues)+1))
+        if(!all(ok <- !duplicated(breaks))) {
+          breaks <- breaks[ok]
+          colvalues <- colvalues[ok[-1]]
+        }
+      } else warning("Unrecognised format for colour map")
+      ## apply colour map to image values
+      colid <- cut.im(colin, breaks=breaks, include.lowest=TRUE)
+      colval <- eval.im(colvalues[unclass(colid)])
+      colval <- t(as.matrix(colval))
+#      nr <- nrow(colval)
+#      nc <- ncol(colval)
+      ## strip one row and column for input to persp.default
+      colval <- colval[-1, -1]
+      colval[is.na(colval)] <- colvalues[1]
+      ## pass colour matrix (and suppress lines)
+      colinfo <- list(col=colval, border=NA)
+    }
+
+    if(apron) {
+      ## add an 'apron'
+      zlim <- list(...)$zlim
+      bottom <- if(!is.null(zlim)) zlim[1] else min(x)
+      x <- na.handle.im(x, na.replace=bottom)
+      x <- padimage(x, bottom)
+      xinfo <- summary(x)
+      if(is.matrix(colval <- colinfo$col)) {
+        colval <- matrix(col2hex(colval), nrow(colval), ncol(colval))
+        grijs <- col2hex("lightgrey")
+        colval <- cbind(grijs, rbind(grijs, colval, grijs), grijs)
+        colinfo$col <- colval
+      }
+    }
+
+    if(spatstat.options("monochrome"))
+      colinfo$col <- to.grey(colinfo$col)
+  
+    ## get reasonable z scale while fixing x:y aspect ratio
+    if(xinfo$type %in% c("integer", "real")) {
+      zrange <- xinfo$range
+      if(diff(zrange) > 0) {
+        xbox <- as.rectangle(x)
+        zscale <- 0.5 * mean(diff(xbox$xrange), diff(xbox$yrange))/diff(zrange)
+        zlim <- zrange
+      } else {
+        zscale <- NULL
+        mx <- xinfo$mean
+        zlim <- mx + c(-1,1) * if(mx == 0) 0.1 else min(abs(mx), 1)
+      }
+    } else 
+      zscale <- zlim <- NULL
+
+    dotargs <- list(...)
+    if(spatstat.options("monochrome"))
+      dotargs <- col.args.to.grey(dotargs)
+    
+    yargh <- resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)),
+                              dotargs,
+                              pop,
+                              colinfo,
+                              list(xlab="x", ylab="y", zlab=xname),
+                              list(scale=FALSE, expand=zscale,
+                                   zlim=zlim),
+                              list(main=xname),
+                              .StripNull=TRUE)
+
+    jawab <- do.call.matched(persp, yargh, 
+                             funargs=graphicsPars("persp"))
+
+    attr(jawab, "expand") <- yargh$expand
+    
+    if(visible)
+      attr(jawab, "visible") <- perspvis(x, M=jawab)
+    
+    return(invisible(jawab))
+  }
+
+  diffit <- function(x) {
+    y <- diff(x)
+    return(c(y[1], y))
+  }
+  
+  perspvis <- function(X, ..., M=NULL) {
+    stopifnot(is.im(X))
+    ## determine perspective matrix
+    if(is.null(M))
+      M <- persp(X, ...)
+    ## project the coordinates
+    ## onto (x,y) plane of plot and z axis pointing out of it
+    xy <- rasterxy.im(X, drop=TRUE)
+    z <- X[drop=TRUE]
+    xyz <- cbind(xy, z)
+    v <- cbind(xyz, 1) %*% M
+    pxyz <- v[,1:3]/v[,4]
+    px <- pxyz[,1]
+    py <- pxyz[,2]
+    pz <- pxyz[,3]
+    ## determine greatest possible difference in 'depth' in one pixel step
+    PZ <- as.matrix(X)
+    ok <- !is.na(PZ)
+    PZ[ok] <- pz
+    maxslip <- max(0, abs(apply(PZ, 1, diff)),
+                      abs(apply(PZ, 2, diff)), na.rm=TRUE)
+    ## determine which pixels are in front
+    d <- ceiling(dim(X)/2)
+    jx <- cut(px, breaks=d[2])
+    iy <- cut(py, breaks=d[1])
+    zmax <- tapply(pz, list(iy,jx), max)
+    isvis <- infront <- (pz > zmax[cbind(iy,jx)] - 2 * maxslip)
+    ##
+    if(TRUE) {
+      ## Additionally check whether unit normal to surface is pointing to viewer
+      Xmat <- as.matrix(X)
+      dzdx <- cbind(0, t(apply(Xmat, 1, diff)))/X$xstep
+      dzdy <- rbind(0, apply(Xmat, 2, diff))/X$ystep
+      dzdx <- as.vector(dzdx[ok])
+      dzdy <- as.vector(dzdy[ok])
+      ## unscaled normal is (-dzdx, -dzdy, 1)
+      if(FALSE) {
+        ## THIS DOESN'T WORK - not sure why.
+        ## rescale so that length is half diameter of pixel
+        fac <- sqrt(X$xstep^2 + X$ystep^2)/(2 * sqrt(dzdx^2+dzdy^2+1))
+        ## add to spatial coordinates
+        xyzplus <- xyz + fac * cbind(-dzdx, -dzdy, 1)
+        ## transform
+        vplus <- cbind(xyzplus, 1) %*% M
+        pplus <- vplus[,1:3]/vplus[,4]
+        ## determine whether normal is pointing toward viewer
+        deltaz <- pplus[,3] - pz
+        isvis <- infront & (deltaz > 0)
+      } else {
+        theta <- atan2(M[2,1],M[1,1]) + pi/2
+        phi <-  - atan2(M[3,3], M[3,2])
+        ## check agreement
+        ## cat(paste("Guess: theta=", theta * 180/pi, "\n"))
+        ## cat(paste("Guess: phi=", phi * 180/pi, "\n"))
+        ## view vector
+        viewer <- cos(phi) * c(cos(theta), sin(theta), 0)
+                       + c(0, 0, sin(phi))
+        ## inner product
+        dotprod <- -dzdx * viewer[1] - dzdy * viewer[2] + viewer[3]
+        isvis <- infront & (dotprod < 0)
+      }
+    }
+    ## put into image
+    Y <- eval.im(X > 0)
+    Y[] <- isvis
+    ## replace 'NA' by 'FALSE'
+    if(anyNA(Y))
+      Y <- as.im(Y, na.replace=FALSE)
+    return(Y)
+  }
+
+  persp.im
+})
+
+
+perspPoints <- function(x, y=NULL, ..., Z, M) {
+  xy <- xy.coords(x, y)
+  stopifnot(is.im(Z))
+  X <- as.ppp(xy, W=Frame(Z))
+  if(!(is.matrix(M) && all(dim(M) == 4)))
+    stop("M should be a 4 x 4 matrix, returned from persp()")
+  V <- attr(M, "visible")
+  if(is.null(V)) {
+    warning(paste("M does not contain visibility information;",
+               "it should be recomputed by persp() with visible=TRUE"))
+  } else {
+    ## restrict to visible points
+    VX <- V[X, drop=FALSE]
+    VX[is.na(VX)] <- FALSE
+    X <- X[VX]
+  }
+  #' determine heights
+  ZX <- Z[X, drop=FALSE] # may contain NA
+  #' transform and plot
+  points(trans3d(X$x, X$y, ZX, M), ...)
+}
+
+perspSegments <- local({
+  perspSegments <- function(x0, y0=NULL, x1=NULL, y1=NULL, ..., Z, M) {
+    stopifnot(is.im(Z))
+    if(!(is.matrix(M) && all(dim(M) == 4)))
+      stop("M should be a 4 x 4 matrix, returned from persp()")
+    V <- attr(M, "visible")
+    if(is.null(V))
+      warning(paste("M does not contain visibility information;",
+                 "it should be recomputed by persp() with visible=TRUE"))
+    
+    if(is.psp(X <- x0) && is.null(y0) && is.null(x1) && is.null(y1)) {
+      eX <- X$ends
+#      nX <- nrow(eX)
+    } else {
+#      nX <- length(x0)
+      check.nvector(x0, naok=TRUE)
+      check.nvector(y0, naok=TRUE)
+      check.nvector(x1, naok=TRUE)
+      check.nvector(y1, naok=TRUE)
+      eX <- cbind(x0, y0, x1, y1)
+    }
+    if(is.null(V)) {
+      Y <- eX
+    } else {
+      ## chop segments to length of single pixel
+      eps <- with(Z, min(xstep,ystep))
+      Y <- do.call(rbind, lapply(as.data.frame(t(eX)), chopsegment, eps=eps))
+      ## determine which segments are visible
+      yleft  <- list(x=Y[,1], y=Y[,2])
+      yright <- list(x=Y[,3], y=Y[,4])
+      ok <- V[yleft, drop=FALSE] & V[yright, drop=FALSE]
+      ok[is.na(ok)] <- FALSE
+      Y <- Y[ok, ,drop=FALSE]
+    }
+    if(nrow(Y) == 0) return(invisible(NULL))
+    ## map to projected plane
+    x0y0 <- trans3d(Y[,1], Y[,2], Z[list(x=Y[,1],y=Y[,2]), drop=FALSE], M)
+    x1y1 <- trans3d(Y[,3], Y[,4], Z[list(x=Y[,3],y=Y[,4]), drop=FALSE], M)
+    segments(x0y0$x, x0y0$y, x1y1$x, x1y1$y, ...)
+  }
+
+  chopsegment <- function(x, eps) {
+    len2 <- (x[3] - x[1])^2 + (x[4] - x[2])^2
+    if(len2 <= eps^2) return(x)
+    n <- ceiling(sqrt(len2)/eps)
+    b <- (1:n)/n
+    a <- (0:(n-1))/n
+    return(cbind(x[1] + a * (x[3]-x[1]),
+                 x[2] + a * (x[4]-x[2]),
+                 x[1] + b * (x[3]-x[1]),
+                 x[2] + b * (x[4]-x[2])))
+  }
+      
+  perspSegments
+})
+
+perspLines <- function(x, y=NULL, ..., Z, M) {
+  xy <- xy.coords(x, y)
+  n <- length(xy$x)
+  perspSegments(x[-n], y[-n], x[-1], y[-1], Z=Z, M=M, ...)
+}
+
+perspContour <- function(Z, M, ...,
+                         nlevels=10, levels=pretty(range(Z), nlevels)) {
+  cl <- contourLines(x=Z$xcol,
+                     y=Z$yrow,
+                     z=t(Z$v),
+                     nlevels=nlevels, levels=levels)
+  for(i in seq_along(cl)) {
+    cli <- cl[[i]]
+    perspLines(cli$x, cli$y, ..., Z=Z, M=M)
+  }
+  invisible(NULL)
+}
+
diff --git a/R/pickoption.R b/R/pickoption.R
new file mode 100755
index 0000000..fce7755
--- /dev/null
+++ b/R/pickoption.R
@@ -0,0 +1,51 @@
+#
+#  pickoption.R
+#
+#  $Revision: 1.6 $  $Date: 2016/04/25 02:34:40 $
+#
+
+pickoption <- function(what="option", key, keymap, ...,
+                       exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE,
+                       allow.all=TRUE)
+{
+  keyname <- short.deparse(substitute(key))
+
+  if(!is.character(key))
+    stop(paste(keyname, "must be a character string",
+               if(multi) "or strings" else NULL))
+  if(length(key) == 0)
+    stop(paste("Argument", sQuote(keyname), "has length zero"))
+  key <- unique(key)
+  if(!multi && length(key) > 1)
+    stop(paste("Must specify only one", what, sQuote(keyname)))
+  allow.all <- allow.all && multi
+
+  id <-
+    if(allow.all && identical(key, "all")) {
+      seq_along(keymap)
+    } else if(exact) {
+      match(key, names(keymap), nomatch=NA)
+    } else {
+      pmatch(key, names(keymap), nomatch=NA)
+    }
+  
+  if(any(nbg <- is.na(id))) {
+    # no match
+    whinge <- paste("unrecognised", what,
+                    paste(dQuote(key[nbg]), collapse=", "),
+                    "in argument", sQuote(keyname))
+    if(list.on.err) {
+      cat(paste(whinge, "\n", "Options are:"),
+          paste(dQuote(names(keymap)), collapse=","), "\n")
+    }
+    if(die) 
+      stop(whinge, call.=FALSE)
+    else
+      return(NULL)
+  }
+
+  key <- keymap[id]
+  names(key) <- NULL
+  return(key)
+}
+
diff --git a/R/pixellate.R b/R/pixellate.R
new file mode 100755
index 0000000..4928747
--- /dev/null
+++ b/R/pixellate.R
@@ -0,0 +1,226 @@
+#
+#           pixellate.R
+#
+#           $Revision: 1.24 $    $Date: 2017/06/05 10:31:58 $
+#
+#     pixellate            convert an object to a pixel image
+#
+#     pixellate.ppp        convert a point pattern to a pixel image
+#                          (pixel value = number of points in pixel)
+#
+#     pixellate.owin       convert a window to a pixel image
+#                          (pixel value = area of intersection with pixel)
+#
+
+pixellate <- function(x, ...) {
+  UseMethod("pixellate")
+}
+
+pixellate.ppp <- function(x, W=NULL, ..., weights=NULL, padzero=FALSE,
+                          fractional=FALSE, preserve=FALSE) {
+  verifyclass(x, "ppp")
+
+  if(is.null(W))
+    W <- Window(x)
+  isrect <- is.rectangle(W)
+  preserve <- preserve && !isrect
+  iscount <- is.null(weights) && !fractional && !preserve
+  
+  W <- do.call.matched(as.mask,
+                       resolve.defaults(list(...),
+                                        list(w=W)))
+
+  nx <- npoints(x)
+  
+  insideW <- W$m
+  dimW   <- W$dim
+  nr <- dimW[1L]
+  nc <- dimW[2L]
+  xcolW <- W$xcol
+  yrowW <- W$yrow
+  xrangeW <- W$xrange
+  yrangeW <- W$yrange
+  unitsW <- unitname(W)
+    
+  # multiple columns of weights?
+  if(is.data.frame(weights) || is.matrix(weights)) {
+    k <- ncol(weights)
+    stopifnot(nrow(weights) == npoints(x))
+    weights <- if(k == 1) as.vector(weights) else as.data.frame(weights)
+  } else {
+    k <- 1
+    if(length(weights) == 0) weights <- NULL else 
+      stopifnot(length(weights) == npoints(x) || length(weights) == 1)
+    if(length(weights) == 1)
+      weights <- rep(weights, npoints(x))
+  }
+
+  # handle empty point pattern
+  if(nx == 0) {
+    zerovalue <- if(iscount) 0L else as.double(0)
+    zeroimage <- as.im(zerovalue, W)
+    if(padzero) # map NA to 0
+      zeroimage <- na.handle.im(zeroimage, zerovalue)
+    result <- zeroimage
+    if(k > 1) {
+      result <- as.solist(rep(list(zeroimage), k))
+      names(result) <- colnames(weights)
+    }
+    return(result)
+  }
+
+  # map points to pixels 
+  xx <- x$x
+  yy <- x$y
+  if(!fractional) {
+    #' map (x,y) to nearest raster point
+    pixels <- if(preserve) nearest.valid.pixel(xx, yy, W) else 
+              nearest.raster.point(xx, yy, W)
+    rowfac <- factor(pixels$row, levels=1:nr)
+    colfac <- factor(pixels$col, levels=1:nc)
+  } else {
+    #' attribute fractional weights to the 4 pixel centres surrounding (x,y)
+    #' find surrounding pixel centres
+    jj <- findInterval(xx, xcolW, rightmost.closed=TRUE)
+    ii <- findInterval(yy, yrowW, rightmost.closed=TRUE)
+    jleft <- pmax(jj, 1)
+    jright <- pmin(jj + 1, nr) 
+    ibot <- pmax(ii, 1)
+    itop <- pmin(ii+1, nc)
+    #' compute fractional weights
+    wleft <- pmin(1, abs(xcolW[jright] - xx)/W$xstep)
+    wright <- 1 - wleft
+    wbot <- pmin(1, abs(yrowW[itop] - yy)/W$ystep)
+    wtop <- 1 - wbot
+    #' pack together
+    ww <- c(wleft * wbot, wleft * wtop, wright * wbot, wright * wtop)
+    rowfac <- factor(c(ibot, itop, ibot, itop), levels=1:nr)
+    colfac <- factor(c(jleft, jleft, jright, jright), levels=1:nc)
+    if(preserve) {
+      #' normalise fractions for each data point to sum to 1 inside window
+      ok <- insideW[cbind(as.integer(rowfac), as.integer(colfac))]
+      wwok <- ww * ok
+      denom <- .colSums(wwok, 4, nx, na.rm=TRUE)
+      recip <- ifelse(denom == 0, 1, 1/denom)
+      ww <- wwok * rep(recip, each=4)
+    }
+    #' data weights must be replicated
+    if(is.null(weights)) {
+      weights <- ww
+    } else if(k == 1) {
+      weights <- ww * rep(weights, 4)
+    } else {
+      weights <- ww * apply(weights, 2, rep, times=4)
+    }
+  }
+  
+  #' sum weights
+  if(is.null(weights)) {
+    ta <- table(row = rowfac, col = colfac)
+  } else if(k == 1) {
+    ta <- tapplysum(weights, list(row = rowfac, col=colfac))
+  } else {
+    ta <- list()
+    for(j in 1:k) {
+      ta[[j]] <- tapplysum(weights[,j], list(row = rowfac, col=colfac))
+    }
+  }
+
+  # pack up as image(s)
+  if(k == 1) {
+    # single image
+    # clip to window of data
+    if(!padzero)
+      ta[!insideW] <- NA
+    out <- im(ta,
+              xcol = xcolW, yrow = yrowW,
+              xrange = xrangeW, yrange = yrangeW,
+              unitname=unitsW)
+  } else {
+    # case k > 1
+    # create template image to reduce overhead
+    template <- im(ta[[1L]],
+                   xcol = xcolW, yrow = yrowW,
+                   xrange = xrangeW, yrange = yrangeW,
+                   unitname=unitsW)
+    out <- list()
+    for(j in 1:k) {
+      taj <- ta[[j]]
+      # clip to window of data
+      if(!padzero) 
+        taj[!insideW] <- NA
+      # copy template and reassign pixel values
+      outj <- template
+      outj$v <- taj
+      # store
+      out[[j]] <- outj
+    }
+    out <- as.solist(out)
+    names(out) <- names(weights)
+  }
+  return(out)
+}
+
+pixellate.owin <- function(x, W=NULL, ...) {
+  stopifnot(is.owin(x))
+  P <- as.polygonal(x)
+  R <- as.rectangle(x)
+  if(is.null(W)) 
+    W <- R
+  else if(!is.subset.owin(R, as.rectangle(W)))
+    stop("W does not cover the domain of x")
+  
+  W <- do.call.matched(as.mask,
+                       resolve.defaults(list(...),
+                                        list(w=W)))
+  ## compute
+  Zmat <- polytileareaEngine(P, W$xrange, W$yrange, nx=W$dim[2L], ny=W$dim[1L])
+  ## convert to image
+  Z <- im(Zmat, xcol=W$xcol, yrow=W$yrow, xrange=W$xrange, yrange=W$yrange,
+          unitname=unitname(W))
+  return(Z)
+}
+
+polytileareaEngine <- function(P, xrange, yrange, nx, ny) {
+  x0 <- xrange[1L]
+  y0 <- yrange[1L]
+  dx <- diff(xrange)/nx
+  dy <- diff(yrange)/ny
+  # process each component polygon
+  Z <- matrix(0.0, ny, nx)
+  B <- P$bdry
+  for(i in seq_along(B)) {
+    PP <- B[[i]]
+    # transform so that pixels become unit squares
+    QQ <- affinexypolygon(PP, vec = c(-x0, -y0))
+    RR <- affinexypolygon(QQ, mat = diag(1/c(dx, dy)))
+    # 
+    xx <- RR$x
+    yy <- RR$y
+    nn <- length(xx)
+    # close polygon
+    xx <- c(xx, xx[1L])
+    yy <- c(yy, yy[1L])
+    nn <- nn+1
+    # call C routine
+    zz <- .C("poly2imA",
+             ncol=as.integer(nx),
+             nrow=as.integer(ny),
+             xpoly=as.double(xx),
+             ypoly=as.double(yy),
+             npoly=as.integer(nn),
+             out=as.double(numeric(nx * ny)),
+             status=as.integer(integer(1L)),
+             PACKAGE = "spatstat")
+    if(zz$status != 0)
+      stop("Internal error")
+    # increment output 
+    Z[] <- Z[] + zz$out
+  }
+  # revert to original scale
+  pixelarea <- dx * dy
+  return(Z * pixelarea)
+}
+
+
+  
diff --git a/R/plot.anylist.R b/R/plot.anylist.R
new file mode 100644
index 0000000..559ac6a
--- /dev/null
+++ b/R/plot.anylist.R
@@ -0,0 +1,571 @@
+##
+##  plot.anylist.R
+##
+##  Plotting functions for 'solist', 'anylist', 'imlist'
+##       and legacy class 'listof'
+##
+##  $Revision: 1.24 $ $Date: 2017/08/02 09:50:55 $
+##
+
+plot.anylist <- plot.solist <- plot.listof <-
+  local({
+
+  ## auxiliary functions
+
+  has.multiplot <- function(x) { is.ppp(x) }
+  
+  extraplot <- function(nnn, x, ..., add=FALSE, extrargs=list(),
+                        panel.args=NULL, plotcommand="plot") {
+    argh <- list(...)
+    if(has.multiplot(x) && identical(plotcommand,"plot"))
+      argh <- c(argh, list(multiplot=FALSE))
+    if(!is.null(panel.args)) {
+      xtra <- if(is.function(panel.args)) panel.args(nnn) else panel.args
+      if(!is.list(xtra))
+        stop(paste0("panel.args",
+                    if(is.function(panel.args)) "(i)" else "",
+                    " should be a list"))
+      argh <- resolve.defaults(xtra, argh)
+    }
+    if(length(extrargs) > 0)
+      argh <- resolve.defaults(argh, extrargs)
+    ## some plot commands don't recognise 'add'
+    if(add)
+      argh <- append(argh, list(add=TRUE))
+    do.call(plotcommand, append(list(x=x), argh))
+  }
+
+  exec.or.plot <- function(cmd, i, xi, ..., extrargs=list(), add=FALSE) {
+    if(is.null(cmd)) return(NULL)
+    argh <-
+      resolve.defaults(list(...),
+                       extrargs,
+                       ## some plot commands don't recognise 'add' 
+                       if(add) list(add=TRUE) else NULL,
+                       if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL)
+    if(is.function(cmd)) {
+      do.call(cmd, resolve.defaults(list(i, xi), argh))
+    } else {
+      do.call(plot, resolve.defaults(list(cmd), argh))
+    }
+  }
+
+  exec.or.plotshift <- function(cmd, i, xi, ..., vec=vec,
+                                extrargs=list(), add=FALSE) {
+    if(is.null(cmd)) return(NULL)
+    argh <-
+      resolve.defaults(list(...),
+                       extrargs,
+                       ## some plot commands don't recognise 'add' 
+                       if(add) list(add=TRUE) else NULL,
+                       if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL)
+    if(is.function(cmd)) {
+      do.call(cmd, resolve.defaults(list(i, xi), argh))
+    } else {
+      cmd <- shift(cmd, vec)
+      do.call(plot, resolve.defaults(list(cmd), argh))
+    }
+  }
+
+  classes.with.do.plot <- c("im", "ppp", "psp", "msr", "layered", "tess")
+  
+  ## bounding box, including ribbon for images, legend for point patterns
+  getplotbox <- function(x, ..., do.plot, plotcommand="plot", multiplot) {
+    if(inherits(x, classes.with.do.plot)) {
+      if(identical(plotcommand, "plot")) {
+        y <- if(has.multiplot(x))
+          plot(x, ..., multiplot=FALSE, do.plot=FALSE) else 
+          plot(x, ..., do.plot=FALSE)
+        return(as.owin(y))
+      } else if(identical(plotcommand, "contour")) {
+        y <- contour(x, ..., do.plot=FALSE)      
+        return(as.owin(y))
+      } else {
+        plc <- plotcommand
+        if(is.character(plc)) plc <- get(plc)
+        if(!is.function(plc)) stop("Unrecognised plot function")
+        if("do.plot" %in% names(args(plc)))
+          return(as.owin(do.call(plc, list(x=x, ..., do.plot=FALSE))))
+        return(as.rectangle(x))
+      }
+    }
+    return(try(as.rectangle(x), silent=TRUE))
+  }
+
+  # calculate bounding boxes for each panel using intended arguments!
+  getPlotBoxes <- function(xlist, ..., panel.args=NULL, extrargs=list()) {
+    userargs <- list(...)
+    n <- length(xlist)
+    result <- vector(length=n, mode="list")
+    for(i in seq_len(n)) {
+      pai <- if(is.function(panel.args)) panel.args(i) else list()
+      argh <- resolve.defaults(pai, userargs, extrargs)
+      result[[i]] <- do.call(getplotbox, append(list(x=xlist[[i]]), argh))
+    }
+    return(result)
+  }
+    
+  is.shiftable <- function(x) {
+    if(is.null(x)) return(TRUE)
+    if(is.function(x)) return(FALSE)
+    y <- try(as.rectangle(x), silent=TRUE)
+    return(!inherits(y, "try-error"))
+  }
+
+  maxassigned <- function(i, values) max(-1, values[i[i > 0]])
+  
+  plot.anylist <- function(x, ..., main, arrange=TRUE,
+                            nrows=NULL, ncols=NULL,
+                            main.panel=NULL,
+                            mar.panel=c(2,1,1,2),
+                            hsep = 0,
+                            vsep = 0,
+                            panel.begin=NULL,
+                            panel.end=NULL,
+                            panel.args=NULL,
+                            panel.begin.args=NULL,
+                            panel.end.args=NULL,
+                            plotcommand="plot",
+                            adorn.left=NULL,
+                            adorn.right=NULL,
+                            adorn.top=NULL,
+                            adorn.bottom=NULL,
+                            adorn.size=0.2,
+                            equal.scales=FALSE,
+                            halign=FALSE, valign=FALSE
+                           ) {
+    xname <- short.deparse(substitute(x))
+
+    ## recursively expand entries which are 'anylist' etc
+    while(any(sapply(x, inherits, what="anylist"))) 
+      x <- as.solist(expandSpecialLists(x, "anylist"), demote=TRUE)
+    
+    isSo <- inherits(x, "solist")
+    isIm <- inherits(x, "imlist") || (isSo && all(unlist(lapply(x, is.im))))
+    
+    ## `boomerang despatch'
+    cl <- match.call()
+    if(missing(plotcommand) && isIm) {
+      cl[[1]] <- as.name("image.imlist")
+      parenv <- sys.parent()
+      return(invisible(eval(cl, envir=parenv)))
+    }
+
+    if(isSo) {
+      allfv <- somefv <- FALSE
+    } else {
+      isfv <- unlist(lapply(x, is.fv))
+      allfv <- all(isfv)
+      somefv <- any(isfv)
+    }
+    
+    ## panel margins
+    if(!missing(mar.panel)) {
+      nm <- length(mar.panel)
+      if(nm == 1) mar.panel <- rep(mar.panel, 4) else
+      if(nm == 2) mar.panel <- rep(mar.panel, 2) else
+      if(nm != 4) stop("mar.panel should have length 1, 2 or 4")
+    } else if(somefv) {
+      ## change default
+      mar.panel <- 0.25+c(4,4,2,2)
+    }
+    
+    n <- length(x)
+    names(x) <- good.names(names(x), "Component_", 1:n)
+    if(is.null(main.panel))
+      main.panel <- names(x)
+    else {
+      if(!is.expression(main.panel))
+        main.panel <- as.character(main.panel)
+      nmp <- length(main.panel)
+      if(nmp == 1)
+        main.panel <- rep.int(main.panel, n)
+      else if(nmp != n)
+        stop("Incorrect length for main.panel")
+    }
+
+    if(allfv && equal.scales) {
+      ## all entries are 'fv' objects: determine their plot limits
+      fvlims <- lapply(x, plot, ..., limitsonly=TRUE)
+      ## establish common x,y limits for all panels
+      xlim <- range(unlist(lapply(fvlims, getElement, name="xlim")))
+      ylim <- range(unlist(lapply(fvlims, getElement, name="ylim")))
+      extrargs <- list(xlim=xlim, ylim=ylim)
+    } else extrargs <- list()
+
+    extrargs.begin <- resolve.defaults(panel.begin.args, extrargs)
+    extrargs.end <- resolve.defaults(panel.end.args, extrargs)
+    
+    if(!arrange) {
+      ## sequence of plots
+      result <- vector(mode="list", length=n)
+      for(i in 1:n) {
+        xi <- x[[i]]
+        exec.or.plot(panel.begin, i, xi, main=main.panel[i],
+                     extrargs=extrargs.begin)
+        result[[i]] <-
+          extraplot(i, xi, ...,
+                    add=!is.null(panel.begin),
+                    main=main.panel[i],
+                    panel.args=panel.args, extrargs=extrargs,
+                    plotcommand=plotcommand) %orifnull% list()
+        exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end)
+      }
+      if(!is.null(adorn.left))
+        warning("adorn.left was ignored because arrange=FALSE")
+      if(!is.null(adorn.right))
+        warning("adorn.right was ignored because arrange=FALSE")
+      if(!is.null(adorn.top))
+        warning("adorn.top was ignored because arrange=FALSE")
+      if(!is.null(adorn.bottom))
+        warning("adorn.bottom was ignored because arrange=FALSE")
+      return(invisible(result))
+    }
+
+    ## ARRAY of plots
+    ## decide whether to plot a main header
+    main <- if(!missing(main) && !is.null(main)) main else xname
+    if(!is.character(main)) {
+      ## main title could be an expression
+      nlines <- 1
+      banner <- TRUE
+    } else {
+      ## main title is character string/vector, possibly ""
+      banner <- any(nzchar(main))
+      if(length(main) > 1)
+        main <- paste(main, collapse="\n")
+      nlines <- length(unlist(strsplit(main, "\n")))
+    }
+    ## determine arrangement of plots
+    ## arrange like mfrow(nrows, ncols) plus a banner at the top
+    if(is.null(nrows) && is.null(ncols)) {
+      nrows <- as.integer(floor(sqrt(n)))
+      ncols <- as.integer(ceiling(n/nrows))
+    } else if(!is.null(nrows) && is.null(ncols))
+      ncols <- as.integer(ceiling(n/nrows))
+    else if(is.null(nrows) && !is.null(ncols))
+      nrows <- as.integer(ceiling(n/ncols))
+    else stopifnot(nrows * ncols >= length(x))
+    nblank <- ncols * nrows - n
+    if(allfv || list(plotcommand) %in% list("persp", persp)) {
+      ## Function plots do not have physical 'size'
+      sizes.known <- FALSE
+    } else {
+      ## Determine dimensions of objects
+      ##     (including space for colour ribbons, if they are images)
+      boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand,
+                            panel.args=panel.args, extrargs=extrargs)
+      sizes.known <- !any(sapply(boxes, inherits, what="try-error"))
+      if(sizes.known) {
+        extrargs <- resolve.defaults(extrargs, list(claim.title.space=TRUE))
+        boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand,
+                              panel.args=panel.args, extrargs=extrargs)
+      }
+      if(equal.scales && !sizes.known) {
+        warning("Ignored equal.scales=TRUE; scales could not be determined")
+        equal.scales <- FALSE
+      }
+    }
+    if(sizes.known) {
+      ## determine size of each panel
+      if(equal.scales) {
+        ## do not rescale panels
+        scaledboxes <- boxes
+      } else {
+        ## rescale panels
+        sides <- lapply(boxes, sidelengths)
+        bwidths <- unlist(lapply(sides, "[", 1))
+        bheights <- unlist(lapply(sides, "[", 2))
+        ## Force equal heights, unless there is only one column
+        scales <- if(ncols > 1) 1/bheights else 1/bwidths
+        scaledboxes <- vector(mode="list", length=n)
+        for(i in 1:n)
+          scaledboxes[[i]] <- scalardilate(boxes[[i]], scales[i])
+      }
+    }
+    ## determine whether to display all objects in one enormous plot
+    ## Precondition is that everything has a spatial bounding box
+    single.plot <- equal.scales && sizes.known
+    if(equal.scales && !single.plot && !allfv)
+      warning("equal.scales=TRUE ignored ", "because bounding boxes ",
+              "could not be determined", call.=FALSE)
+    ## enforce alignment by expanding boxes
+    if(halign) {
+      if(!equal.scales)
+        warning("halign=TRUE ignored because equal.scales=FALSE")
+      ## x coordinates align in each column
+      xr <- range(sapply(scaledboxes, getElement, name="xrange"))
+      scaledboxes <- lapply(scaledboxes, "[[<-", i="xrange", value=xr)
+    }
+    if(valign) {
+      if(!equal.scales)
+        warning("valign=TRUE ignored because equal.scales=FALSE")
+      ## y coordinates align in each column
+      yr <- range(sapply(scaledboxes, getElement, name="yrange"))
+      scaledboxes <- lapply(scaledboxes, "[[<-", i="yrange", value=yr)
+    }
+    ## set up layout
+    mat <- matrix(c(seq_len(n), integer(nblank)),
+                  byrow=TRUE, ncol=ncols, nrow=nrows)
+    if(sizes.known) {
+      boxsides <- lapply(scaledboxes, sidelengths)
+      xwidths <- sapply(boxsides, "[", i=1)
+      xheights <- sapply(boxsides, "[", i=2)
+      heights <- apply(mat, 1, maxassigned, values=xheights)
+      widths <- apply(mat, 2, maxassigned, values=xwidths)
+    } else {
+      heights <- rep.int(1, nrows)
+      widths <- rep.int(1, ncols)
+    }
+    #' negative heights/widths arise if a row/column is not used.
+    meanheight <- mean(heights[heights > 0])
+    meanwidth  <- mean(widths[heights > 0])
+    heights[heights <= 0] <- meanheight
+    widths[widths <= 0] <- meanwidth
+    nall <- n
+    ##
+    if(single.plot) {
+      ## .........  create a single plot ..................
+      ## determine sizes
+      ht <- max(heights)
+      wd <- max(widths)
+      marpar <- mar.panel * c(ht, wd, ht, wd)/6
+      vsep <- vsep * ht/6
+      hsep <- hsep * wd/6
+      mainheight <- any(nzchar(main.panel)) * ht/5
+      ewidths <- marpar[2] + widths + marpar[4]
+      eheights <- marpar[1] + heights + marpar[3] + mainheight
+      Width <- sum(ewidths) + hsep * (length(ewidths) - 1)
+      Height <- sum(eheights) + vsep * (length(eheights) - 1)
+      bigbox <- owin(c(0, Width), c(0, Height))
+      ox <- marpar[2] + cumsum(c(0, ewidths + hsep))[1:ncols]
+      oy <- marpar[1] + cumsum(c(0, rev(eheights) + vsep))[nrows:1]
+      panelorigin <- as.matrix(expand.grid(x=ox, y=oy))
+      ## initialise, with banner
+      cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex.main')
+      plot(bigbox, type="n", main=main, cex.main=cex)
+      ## plot individual objects
+      result <- vector(mode="list", length=n)
+      for(i in 1:n) {
+        ## determine shift vector that moves bottom left corner of spatial box
+        ## to bottom left corner of target area on plot device
+        vec <- panelorigin[i,] - with(scaledboxes[[i]], c(xrange[1], yrange[1]))
+        ## shift panel contents
+        xi <- x[[i]]
+        xishift <- shift(xi, vec)
+        ## let rip
+        if(!is.null(panel.begin))
+          exec.or.plotshift(panel.begin, i, xishift,
+                            add=TRUE,
+                            main=main.panel[i], show.all=TRUE,
+                            extrargs=extrargs.begin,
+                            vec=vec)
+        result[[i]] <-
+          extraplot(i, xishift, ...,
+                    add=TRUE, show.all=is.null(panel.begin),
+                    main=main.panel[i],
+                    extrargs=extrargs,
+                    panel.args=panel.args,
+                    plotcommand=plotcommand) %orifnull% list()
+        exec.or.plotshift(panel.end, i, xishift, add=TRUE,
+                          extrargs=extrargs.end,
+                          vec=vec)
+      }
+      return(invisible(result))
+    }
+    ## ................. multiple logical plots using 'layout' ..............
+    ## adjust panel margins to accommodate desired extra separation
+    mar.panel <- pmax(0, mar.panel + c(vsep, hsep, vsep, hsep)/2)
+    ## check for adornment
+    if(!is.null(adorn.left)) {
+      ## add margin at left, of width adorn.size * meanwidth
+      nall <- i.left <- n+1
+      mat <- cbind(i.left, mat)
+      widths <- c(adorn.size * meanwidth, widths)
+    } 
+    if(!is.null(adorn.right)) {
+      ## add margin at right, of width adorn.size * meanwidth
+      nall <- i.right <- nall+1
+      mat <- cbind(mat, i.right)
+      widths <- c(widths, adorn.size * meanwidth)
+    } 
+    if(!is.null(adorn.bottom)) {
+      ## add margin at bottom, of height adorn.size * meanheight
+      nall <- i.bottom <- nall+1
+      mat <- rbind(mat, i.bottom)
+      heights <- c(heights, adorn.size * meanheight)
+    } 
+    if(!is.null(adorn.top)) {
+      ## add margin at top, of height adorn.size * meanheight
+      nall <- i.top <- nall + 1
+      mat <- rbind(i.top, mat)
+      heights <- c(adorn.size * meanheight, heights)
+    } 
+    if(banner) {
+      ## Increment existing panel numbers
+      ## New panel 1 is the banner
+      panels <- (mat > 0)
+      mat[panels] <- mat[panels] + 1
+      mat <- rbind(1, mat)
+      heights <- c(0.1 * meanheight * (1 + nlines), heights)
+    }
+    ## declare layout
+    layout(mat, heights=heights, widths=widths, respect=sizes.known)
+    ## start output .....
+    ## .... plot banner
+    if(banner) {
+      opa <- par(mar=rep.int(0,4), xpd=TRUE)
+      plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE,
+           xlim=c(-1,1),ylim=c(-1,1))
+      cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex')
+      text(0,0,main, cex=cex)
+    }
+    ## plot panels
+    npa <- par(mar=mar.panel)
+    if(!banner) opa <- npa
+    result <- vector(mode="list", length=n)
+    for(i in 1:n) {
+      xi <- x[[i]]
+      exec.or.plot(panel.begin, i, xi, main=main.panel[i],
+                   extrargs=extrargs.begin)
+      result <-
+        extraplot(i, xi, ...,
+                  add = !is.null(panel.begin), 
+                  main = main.panel[i],
+                  extrargs=extrargs,
+                  panel.args=panel.args,
+                  plotcommand=plotcommand) %orifnull% list()
+      exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end)
+    }
+    ## adornments
+    if(nall > n) {
+      par(mar=rep.int(0,4), xpd=TRUE)
+      if(!is.null(adorn.left))
+        adorn.left()
+      if(!is.null(adorn.right))
+        adorn.right()
+      if(!is.null(adorn.bottom))
+        adorn.bottom()
+      if(!is.null(adorn.top))
+        adorn.top()
+    }
+    ## revert
+    layout(1)
+    par(opa)
+    return(invisible(result))
+  }
+
+  plot.anylist
+})
+
+
+contour.imlist <- contour.listof <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  do.call(plot.solist,
+          resolve.defaults(list(x=x, plotcommand="contour"),
+                           list(...),
+                           list(main=xname)))
+}
+
+plot.imlist <- local({
+
+  plot.imlist <- function(x, ..., plotcommand="image",
+                          equal.ribbon = FALSE, ribmar=NULL) {
+    xname <- short.deparse(substitute(x))
+    if(missing(plotcommand) &&
+       any(sapply(x, inherits, what=c("linim", "linfun"))))
+      plotcommand <- "plot"
+    if(equal.ribbon &&
+       (list(plotcommand) %in% list("image", "plot", image, plot))) {
+      out <- imagecommon(x, ..., xname=xname, ribmar=ribmar)
+    } else {
+      out <- do.call(plot.solist,
+                     resolve.defaults(list(x=x, plotcommand=plotcommand), 
+                                      list(...),
+                                      list(main=xname)))
+    }
+    return(invisible(out))
+  }
+
+  imagecommon <- function(x, ...,
+                          xname,
+                          zlim=NULL,
+                          ribbon=TRUE,
+                          ribside=c("right", "left", "bottom", "top"),
+                          ribsep=NULL, ribwid=0.5, ribn=1024,
+                          ribscale=NULL, ribargs=list(),
+                          ribmar = NULL, mar.panel = c(2,1,1,2)) {
+    if(missing(xname))
+      xname <- short.deparse(substitute(x))
+    ribside <- match.arg(ribside)
+    stopifnot(is.list(ribargs))
+    if(!is.null(ribsep))
+      warning("Argument ribsep is not yet implemented for image arrays")
+    ## determine range of values
+    if(is.null(zlim))
+      zlim <- range(unlist(lapply(x, range)))
+    ## determine common colour map
+    imcolmap <- plot.im(x[[1]], do.plot=FALSE, zlim=zlim, ..., ribn=ribn)
+    ## plot ribbon?
+    if(!ribbon) {
+      ribadorn <- list()
+    } else {
+      ## determine plot arguments for colour ribbon
+      vertical <- (ribside %in% c("right", "left"))
+      scaleinfo <- if(!is.null(ribscale)) list(labelmap=ribscale) else list()
+      sidecode <- match(ribside, c("bottom", "left", "top", "right"))
+      ribstuff <- c(list(x=imcolmap, main="", vertical=vertical),
+                    ribargs,
+                    scaleinfo,
+                    list(side=sidecode))
+      if (is.null(mar.panel)) 
+        mar.panel <- c(2, 1, 1, 2)
+      if (length(mar.panel) != 4) 
+        mar.panel <- rep(mar.panel, 4)[1:4]
+      if (is.null(ribmar)) {
+        ribmar <- mar.panel/2
+        newmar <- c(2, 0)
+        switch(ribside,
+               left   = { ribmar[c(2, 4)] <- newmar },
+               right  = { ribmar[c(4, 2)] <- newmar },
+               bottom = { ribmar[c(1, 3)] <- newmar },
+               top    = { ribmar[c(3, 1)] <- newmar }
+               )
+      }
+      ## bespoke function executed to plot colour ribbon
+      do.ribbon <- function() {
+        opa <- par(mar=ribmar)
+        do.call(plot, ribstuff)
+        par(opa)
+      }
+      ## ribbon plot function encoded as 'adorn' argument
+      ribadorn <- list(adorn=do.ribbon, adorn.size=ribwid)
+      names(ribadorn)[1] <- paste("adorn", ribside, sep=".")
+    }
+    ##
+    result <- do.call(plot.solist,
+                      resolve.defaults(list(x=x, plotcommand="image"),
+                                       list(...),
+                                       list(mar.panel=mar.panel,
+                                            main=xname,
+                                            col=imcolmap, zlim=zlim,
+                                            ribbon=FALSE),
+                                       ribadorn))
+    return(invisible(result))
+  }
+
+  plot.imlist
+})
+
+image.imlist <- image.listof <-
+  function(x, ..., equal.ribbon = FALSE, ribmar=NULL) {
+    plc <- resolve.1.default(list(plotcommand="image"), list(...))
+    if(list(plc) %in% list("image", "plot", image, plot)) {
+      out <- plot.imlist(x, ..., plotcommand="image",
+                         equal.ribbon=equal.ribbon, ribmar=ribmar)
+    } else {
+      out <- plot.solist(x, ..., ribmar=ribmar)
+    }
+    return(invisible(out))
+  }
+
diff --git a/R/plot.fasp.R b/R/plot.fasp.R
new file mode 100755
index 0000000..8ff7412
--- /dev/null
+++ b/R/plot.fasp.R
@@ -0,0 +1,183 @@
+#
+#   plot.fasp.R
+#
+#   $Revision: 1.29 $   $Date: 2016/02/11 10:17:12 $
+#
+plot.fasp <- function(x, formule=NULL, ..., subset=NULL,
+                      title=NULL, banner=TRUE,
+                      transpose=FALSE,
+                      samex=FALSE, samey=FALSE,
+                      mar.panel=NULL,
+                      outerlabels=TRUE, cex.outerlabels=1.25,
+                      legend=FALSE) {
+  # plot dimensions
+  which <- x$which
+  if(transpose)
+    which <- t(which)
+  nrows  <- nrow(which)
+  ncols  <- ncol(which)
+  
+# Determine the overall title of the plot
+  if(banner) {
+    if(!is.null(title)) overall <- title
+    else if(!is.null(x$title)) overall <- x$title
+    else {
+      if(prod(dim(which)) > 1)
+        overall <- "Array of diagnostic functions"
+      else
+        overall <- "Diagnostic function"
+      if(is.null(x$dataname)) overall <- paste(overall,".",sep="")
+      else overall <- paste(overall," for ",x$dataname,".",sep="")
+    }
+    if(length(overall) > 1)
+      overall <- paste(overall, collapse="\n")
+    nlines <-
+      if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n")))
+  } 
+
+# If no formula is given, look for a default formula in x:
+  defaultplot <- is.null(formule)
+  if(defaultplot && !is.null(x$default.formula))
+    formule <- x$default.formula
+
+  if(!is.null(formule)) {
+    # ensure formulae are given as character strings.
+    formule <- FormatFaspFormulae(formule, "formule")
+    # Number of formulae should match number of functions.
+    nf <- length(formule)
+    nfun <- length(x$fns)
+    if(nf == 1 && nfun > 1)
+      formule <- rep.int(formule, nfun)
+    else if(nf != nfun)
+      stop(paste("Wrong number of entries in", sQuote("formule")))
+  }
+  
+# Check on the length of the subset argument.
+  ns <- length(subset)
+  if(ns > 1) {
+    if(ns != length(x$fns))
+      stop("Wrong number of entries in subset argument.\n")
+    msub <- TRUE
+  } else msub <- FALSE
+
+# compute common x, y axis limits for all plots ?
+  xlim <- ylim <- NULL
+  if(samex || samey) {
+    cat("Computing limits\n")
+    # call plot.fv to determine plot limits of each panel
+    for(i in 1:nrows) {
+      for(j in 1:ncols) {
+        k <- which[i,j]
+        if(!is.na(k)) {
+          fun <- as.fv(x$fns[[k]])
+          fmla <- if(!defaultplot) formule[k] else NULL
+          sub <- if(msub) subset[[k]] else subset
+          lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE)
+          # update the limits
+          if(samex) xlim <- range(xlim, lims$xlim)
+          if(samey) ylim <- range(ylim, lims$ylim)
+        }
+      }
+    }
+  } 
+
+#############################################################  
+# Set up the plot layout
+  n <- nrows * ncols
+# panels 1..n = plot panels
+  codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows)
+  heights <- rep.int(1, nrows)
+  widths  <- rep.int(1, ncols)
+# annotation as chosen
+  if(outerlabels) {
+    # column headings
+    colhead.codes <- max(codes) + (1:ncols)
+    colhead.height <- 0.2
+    codes <- rbind(colhead.codes, codes)
+    heights <- c(colhead.height, heights)
+    # row headings
+    rowhead.codes <- max(codes) + (1:nrows)
+    rowhead.width <- 0.2
+    codes <- cbind(c(0,rowhead.codes), codes)
+    widths <- c(rowhead.width, widths)
+  }
+  if(banner) {
+    # overall banner
+    top.code <- max(codes) + 1
+    top.height <- 0.1 * (1+nlines)
+    codes <- rbind(top.code, codes)
+    heights <- c(top.height, heights)
+  }
+
+# declare layout  
+  layout(codes, widths=widths, heights=heights)
+
+############################################################  
+# Plot the function panels 
+#
+# determine annotation
+  colNames <- colnames(which)
+  rowNames <- rownames(which)
+  nrc <- max(nrows, ncols)
+  ann.def <- par("ann") && (nrc <= 3)
+# determine margin around each panel
+  if(is.null(mar.panel)) 
+    mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar")
+  opa <- par(mar=mar.panel, xpd=TRUE)
+#
+# plot each function  
+  for(i in 1:nrows) {
+    for(j in 1:ncols) {
+      k <- which[i,j]
+      if(is.na(k)) plot(0,0,type='n',xlim=c(0,1),
+                        ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...)
+      else {
+        fun <- as.fv(x$fns[[k]])
+        fmla <- if(!defaultplot) formule[k] else NULL
+        sub <- if(msub) subset[[k]] else subset
+        main <- if(outerlabels) "" else
+                if(nrows == 1) colNames[j] else
+                if(ncols == 1) rowNames[i] else 
+                paren(paste(rowNames[i], colNames[j], sep=","))
+        do.call(plot,
+                resolve.defaults(list(x=fun, fmla=fmla, subset=sub),
+                                 list(...),
+                                 list(xlim=xlim, ylim=ylim,
+                                      main=main, legend=legend),
+                                 list(ann=ann.def, axes=ann.def,
+                                      frame.plot=TRUE)))
+      }
+    }
+  }
+############################################################
+#
+# Annotation as selected
+  if(outerlabels) {
+    par(mar=rep.int(0,4), xpd=TRUE)
+    # Plot the column headers
+    for(j in 1:ncols) {
+      plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE,
+           xlim=c(-1,1),ylim=c(-1,1))
+      text(0,0,colNames[j], cex=cex.outerlabels)
+    }
+    # Plot the row labels
+    for(i in 1:nrows) {
+      plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE,
+           xlim=c(-1,1),ylim=c(-1,1))
+      text(0,0,rowNames[i], srt=90, cex=cex.outerlabels)
+    }
+  }
+  if(banner) {
+    par(mar=rep.int(0,4), xpd=TRUE)
+    # plot the banner
+    plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE,
+         xlim=c(-1,1),ylim=c(-1,1))
+    cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title
+    text(0,0, overall, cex=cex)
+  }
+  
+  # revert
+  layout(1)
+  par(opa)
+  return(invisible(NULL))
+}
diff --git a/R/plot.fv.R b/R/plot.fv.R
new file mode 100755
index 0000000..8a17bd4
--- /dev/null
+++ b/R/plot.fv.R
@@ -0,0 +1,768 @@
+#
+#       plot.fv.R   (was: conspire.S)
+#
+#  $Revision: 1.128 $    $Date: 2016/12/30 01:44:07 $
+#
+#
+
+conspire <- function(...) {
+  .Deprecated("plot.fv", package="spatstat")
+  plot.fv(...)
+}
+
+plot.fv <- local({
+
+  hasonlyone <- function(x, amongst) {
+    sum(all.vars(parse(text=x)) %in% amongst) == 1
+  }
+
+  extendifvector <- function(a, n, nmore) {
+    if(is.null(a)) return(a)
+    if(length(a) == 1) return(a)
+    return(c(a, rep(a[1], nmore)))
+  }
+
+  fixit <- function(a, n, a0, a00) {
+    # 'a' is formal argument
+    # 'a0' and 'a00' are default and fallback default
+    # 'n' is number of values required
+    if(is.null(a))
+      a <- if(!is.null(a0)) a0 else a00
+    if(length(a) == 1)
+      return(rep.int(a, n))
+    else if(length(a) != n)
+      stop(paste("Length of", short.deparse(substitute(a)),
+                 "does not match number of curves to be plotted"))
+    else 
+      return(a)
+  }
+
+  pow10 <- function(x) { 10^x }
+
+  clip.to.usr <- function() {
+    usr <- par('usr')
+    clip(usr[1], usr[2], usr[3], usr[4])
+  }
+  
+  plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL,
+                      xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL,
+                      ylim.covers=NULL, legend=!add, legendpos="topleft",
+                      legendavoid=missing(legendpos),
+                      legendmath=TRUE, legendargs=list(),
+                      shade=fvnames(x, ".s"), shadecol="grey", add=FALSE,
+                      log="",
+                      mathfont=c("italic", "plain", "bold", "bolditalic"), 
+                      limitsonly=FALSE) {
+
+    xname <-
+      if(is.language(substitute(x))) short.deparse(substitute(x)) else ""
+
+    force(legendavoid)
+    if(is.null(legend))
+      legend <- !add
+
+    mathfont <- match.arg(mathfont)
+
+    verifyclass(x, "fv")
+    env.user <- parent.frame()
+
+    indata <- as.data.frame(x)
+
+    xlogscale <- (log %in% c("x", "xy", "yx"))
+    ylogscale <- (log %in% c("y", "xy", "yx"))
+
+    ## ---------------- determine plot formula ----------------
+  
+    defaultplot <- missing(fmla) || is.null(fmla)
+    if(defaultplot) 
+      fmla <- formula(x)
+
+    ## This *is* the last possible moment, so...
+    fmla <- as.formula(fmla, env=env.user)
+
+    ## validate the variable names
+    vars <- variablesinformula(fmla)
+    reserved <- c(".", ".x", ".y", ".a", ".s")
+    external <- !(vars %in% c(colnames(x), reserved))
+    if(any(external)) {
+      sought <- vars[external]
+      found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric"))
+      if(any(!found)) {
+        nnot <- sum(!found)
+        stop(paste(ngettext(nnot, "Variable", "Variables"),
+                   commasep(sQuote(sought[!found])),
+                   ngettext(nnot, "was", "were"),
+                   "not found"))
+      } else {
+        ## validate the found variables
+        externvars <- lapply(sought, get, envir=env.user)
+        isnum <- sapply(externvars, is.numeric)
+        len <- lengths(externvars)
+        ok <- isnum & (len == 1 | len == nrow(x))
+        if(!all(ok)) {
+          nnot <- sum(!ok)
+          stop(paste(ngettext(nnot, "Variable", "Variables"),
+                     commasep(sQuote(sought[!ok])),
+                     ngettext(nnot, "is", "are"),
+                     "not of the right format"))
+        }
+      }
+    }
+  
+    ## Extract left hand side as given
+#    lhs.original <- fmla[[2]]
+    fmla.original <- fmla
+  
+    ## expand "."
+    dotnames <- fvnames(x, ".")
+    starnames <- fvnames(x, "*")
+    umap <- fvexprmap(x)
+    fmla <- eval(substitute(substitute(fom, um), list(fom=fmla, um=umap)))
+
+    ## ------------------- extract data for plot ---------------------
+  
+    ## extract LHS and RHS of formula
+    lhs <- fmla[[2]]
+    rhs <- fmla[[3]]
+
+    ## extract data 
+    lhsdata <- eval(lhs, envir=indata)
+    rhsdata <- eval(rhs, envir=indata)
+
+    ## reformat
+    if(is.vector(lhsdata)) {
+      lhsdata <- matrix(lhsdata, ncol=1)
+      lhsvars <- all.vars(as.expression(lhs))
+      lhsvars <- lhsvars[lhsvars %in% names(x)]
+      colnames(lhsdata) <-
+        if(length(lhsvars) == 1) lhsvars else
+        if(length(starnames) == 1 && (starnames %in% lhsvars)) starnames else 
+        paste(deparse(lhs), collapse="")
+    }
+    ## check lhs names exist
+    lnames <- colnames(lhsdata)
+    nc <- ncol(lhsdata)
+    lnames0 <- paste("V", seq_len(nc), sep="")
+    if(length(lnames) != nc)
+      colnames(lhsdata) <- lnames0
+    else if(any(uhoh <- !nzchar(lnames)))
+      colnames(lhsdata)[uhoh] <- lnames0[uhoh]
+    lhs.names <- colnames(lhsdata)
+
+    ## check whether each lhs column is associated with a single column of 'x'
+    ## that is one of the alternative versions of the function.
+    ##    This may be unreliable, as it depends on the
+    ##    column names assigned to lhsdata by eval()
+    one.star <- unlist(lapply(lhs.names, hasonlyone, amongst=fvnames(x, "*")))
+    one.dot  <- unlist(lapply(lhs.names, hasonlyone, amongst=dotnames))
+    explicit.lhs.names    <- ifelse(one.star, lhs.names, "")
+    explicit.lhs.dotnames <- ifelse(one.star & one.dot, lhs.names, "")
+  
+    ## check rhs data
+    if(is.matrix(rhsdata))
+      stop("rhs of formula should yield a vector")
+    rhsdata <- as.numeric(rhsdata)
+
+    nplots <- ncol(lhsdata)
+    allind <- 1:nplots
+  
+    ## ---------- extra plots may be implied by 'shade' -----------------
+    extrashadevars <- NULL
+  
+    if(!is.null(shade)) {
+      ## select columns by name or number
+      names(allind) <- explicit.lhs.names
+      shind <- try(allind[shade])
+      if(inherits(shind, "try-error")) 
+        stop(paste("The argument shade should be a valid subset index",
+                   "for columns of x"), call.=FALSE)
+      if(any(nbg <- is.na(shind))) {
+        ## columns not included in formula: add them
+        morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE]))
+        if(inherits(morelhs, "try-error")) 
+          stop(paste("The argument shade should be a valid subset index",
+                     "for columns of x"), call.=FALSE)
+        nmore <- ncol(morelhs)
+        extrashadevars <- colnames(morelhs)
+        if(defaultplot) {
+          success <- TRUE
+        } else if("." %in% variablesinformula(fmla.original)) {
+          ## evaluate lhs of formula, expanding "." to shade names
+          u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else {
+            as.call(lapply(c("cbind", extrashadevars), as.name))
+          }
+          ux <- as.name(fvnames(x, ".x"))
+          uy <- as.name(fvnames(x, ".y"))
+          foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)),
+                                 list(fom=fmla.original)))
+          dont.complain.about(u, ux, uy)
+          lhsnew <- foo[[2]]
+          morelhs <- eval(lhsnew, envir=indata)
+          success <- identical(colnames(morelhs), extrashadevars)
+        } else if(is.name(lhs) && as.character(lhs) %in% names(indata)) {
+          ## lhs is the name of a single column in x
+          ## expand the LHS 
+          explicit.lhs.names <- c(explicit.lhs.names, extrashadevars)
+          ff <- paste("cbind",
+                      paren(paste(explicit.lhs.names, collapse=", ")),
+                      "~ 1")
+          lhs <- lhs.of.formula(as.formula(ff))
+          success <- TRUE
+        } else if(length(explicit.lhs.dotnames) > 1) {
+          ## lhs = cbind(...) where ... are dotnames
+          cbound <- paste0("cbind",
+                           paren(paste(explicit.lhs.dotnames, collapse=", ")))
+          if(identical(deparse(lhs), cbound)) {
+            success <- TRUE
+            explicit.lhs.names <- union(explicit.lhs.names, extrashadevars)
+            ff <- paste("cbind",
+                        paren(paste(explicit.lhs.names, collapse=", ")),
+                        "~ 1")
+            lhs <- lhs.of.formula(as.formula(ff))
+          } else success <- FALSE
+        } else success <- FALSE
+        if(success) {
+          ## add these columns to the plotting data
+          lhsdata <- cbind(lhsdata, morelhs)
+          shind[nbg] <- nplots + seq_len(nmore)
+          lty <- extendifvector(lty, nplots, nmore)
+          col <- extendifvector(col, nplots, nmore)
+          lwd <- extendifvector(lwd, nplots, nmore)
+          nplots <- nplots + nmore
+          ## update the names
+          one.star <- unlist(lapply(explicit.lhs.names,
+                                    hasonlyone, amongst=fvnames(x, "*")))
+          one.dot  <- unlist(lapply(explicit.lhs.names,
+                                    hasonlyone, amongst=dotnames))
+          explicit.lhs.names    <- ifelse(one.star, explicit.lhs.names, "")
+          explicit.lhs.dotnames <- ifelse(one.star & one.dot,
+                                          explicit.lhs.names, "")
+        } else {
+          ## cannot add columns
+          warning(paste("Shade",
+                        ngettext(sum(nbg), "column", "columns"),
+                        commasep(sQuote(shade[nbg])),
+                        "were missing from the plot formula, and were omitted"))
+          shade <- NULL
+          extrashadevars <- NULL
+        }
+      }
+    }
+
+    ## -------------------- determine plotting limits ----------------------
+  
+    ## restrict data to subset if desired
+    if(!is.null(subset)) {
+      keep <- if(is.character(subset)) {
+                eval(parse(text=subset), envir=indata)
+              } else eval(subset, envir=indata)
+      lhsdata <- lhsdata[keep, , drop=FALSE]
+      rhsdata <- rhsdata[keep]
+    }
+  
+    ## determine x and y limits and clip data to these limits
+    if(is.null(xlim) && add) {
+      ## x limits are determined by existing plot
+      xlim <- par("usr")[1:2]
+    }
+    if(!is.null(xlim)) {
+      ok <- !is.finite(rhsdata) | (xlim[1] <= rhsdata & rhsdata <= xlim[2])
+      rhsdata <- rhsdata[ok]
+      lhsdata <- lhsdata[ok, , drop=FALSE]
+    } else {
+      ## if we're using the default argument, use its recommended range
+      if(rhs == fvnames(x, ".x")) {
+        xlim <- attr(x, "alim") %orifnull% range(as.vector(rhsdata),
+                                                 finite=TRUE)
+        if(xlogscale && xlim[1] <= 0) 
+          xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE)
+        ok <- !is.finite(rhsdata) | (rhsdata >= xlim[1] & rhsdata <= xlim[2])
+        rhsdata <- rhsdata[ok]
+        lhsdata <- lhsdata[ok, , drop=FALSE]
+      } else { ## actual range of values to be plotted
+        if(xlogscale) {
+          ok <- is.finite(rhsdata) & (rhsdata > 0) & matrowany(lhsdata > 0)
+          xlim <- range(rhsdata[ok])
+        } else {
+          xlim <- range(rhsdata, na.rm=TRUE)
+        }
+      }
+    }
+
+    if(is.null(ylim)) {
+      yok <- is.finite(lhsdata)
+      if(ylogscale)
+        yok <- yok & (lhsdata > 0)
+      ylim <- range(lhsdata[yok],na.rm=TRUE)
+    }
+    if(!is.null(ylim.covers))
+      ylim <- range(ylim, ylim.covers)
+
+    ## return x, y limits only?
+    if(limitsonly)
+      return(list(xlim=xlim, ylim=ylim))
+
+    ## -------------  work out how to label the plot --------------------
+
+    ## extract plot labels, substituting function name
+    labl <- fvlabels(x, expand=TRUE)
+    ## create plot label map (key -> algebraic expression)
+    map <- fvlabelmap(x) 
+
+    ## ......... label for x axis ..................
+
+    if(is.null(xlab)) {
+      argname <- fvnames(x, ".x")
+      if(as.character(fmla)[3] == argname) {
+        ## The x axis variable is the default function argument.
+        ArgString <- fvlabels(x, expand=TRUE)[[argname]]
+        xexpr <- parse(text=ArgString)
+        ## use specified font
+        xexpr <- fontify(xexpr, mathfont)
+        ## Add name of unit of length?
+        ax <- summary(unitname(x))$axis
+        if(is.null(ax)) {
+          xlab <- xexpr
+        } else {
+          xlab <- expression(VAR ~ COMMENT)
+          xlab[[1]][[2]] <- xexpr[[1]]
+          xlab[[1]][[3]] <- ax
+        }
+      } else {
+        ## map ident to label
+        xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map)))
+        ## use specified font
+        xlab <- fontify(xlab, mathfont)
+      }
+    }
+    if(is.language(xlab) && !is.expression(xlab))
+      xlab <- as.expression(xlab)
+
+    ## ......... label for y axis ...................
+
+    leftside <- lhs
+    if(ncol(lhsdata) > 1 || length(dotnames) == 1) {
+      ## For labelling purposes only, simplify the LHS by 
+      ## replacing 'cbind(.....)' by '.'
+      ## even if not all columns are included.
+      leftside <- paste(as.expression(leftside))
+      eln <- explicit.lhs.dotnames
+      eln <- eln[nzchar(eln)]
+      cb <- if(length(eln) == 1) eln else {
+        paste("cbind(",
+              paste(eln, collapse=", "),
+              ")", sep="")
+      }
+      compactleftside <- gsub(cb, ".", leftside, fixed=TRUE)
+      ## Separately expand "." to cbind(.....)
+      ## and ".x", ".y" to their real names
+      dotdot <- c(dotnames, extrashadevars)
+      cball <- if(length(dotdot) == 1) dotdot else {
+        paste("cbind(",
+              paste(dotdot, collapse=", "),
+              ")", sep="")
+      }
+      expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE)
+      expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE)
+      expandleftside <- gsubdot(cball, expandleftside)
+      ## convert back to language
+      compactleftside <- parse(text=compactleftside)[[1]]
+      expandleftside <- parse(text=expandleftside)[[1]]
+    } else {
+      compactleftside <- expandleftside <- leftside
+    }
+
+    ## construct label for y axis
+    if(is.null(ylab)) {
+      yl <- attr(x, "yexp")
+      if(defaultplot && !is.null(yl)) {
+        ylab <- yl
+      } else {
+        ## replace "." and short identifiers by plot labels
+        ylab <- eval(substitute(substitute(le, mp),
+                                list(le=compactleftside, mp=map)))
+      }
+    }
+    if(is.language(ylab)) {
+      ## use specified font
+      ylab <- fontify(ylab, mathfont)
+      ## ensure it's an expression
+      if(!is.expression(ylab))
+        ylab <- as.expression(ylab)
+    }
+
+    ## ------------------ start plotting ---------------------------
+
+    ## create new plot
+    if(!add)
+      do.call(plot.default,
+              resolve.defaults(list(xlim, ylim, type="n", log=log),
+                               list(xlab=xlab, ylab=ylab),
+                               list(...),
+                               list(main=xname)))
+
+    ## handle 'type' = "n" 
+    giventype <- resolve.defaults(list(...), list(type=NA))$type
+    if(identical(giventype, "n"))
+      return(invisible(NULL))
+
+    ## process lty, col, lwd arguments
+
+    opt0 <- spatstat.options("par.fv")
+  
+    lty <- fixit(lty, nplots, opt0$lty, 1:nplots)
+    col <- fixit(col, nplots, opt0$col, 1:nplots)
+    lwd <- fixit(lwd, nplots, opt0$lwd, 1)
+
+    ## convert to greyscale?
+    if(spatstat.options("monochrome"))
+      col <- to.grey(col)
+    
+    if(!is.null(shade)) {
+      ## shade region between critical boundaries
+      ## extract relevant columns for shaded bands
+      shdata <- lhsdata[, shind]
+      if(!is.matrix(shdata) || ncol(shdata) != 2) 
+        stop("The argument shade should select two columns of x")
+      ## truncate infinite values to plot limits
+      if(any(isinf <- is.infinite(shdata))) {
+        if(is.null(ylim)) {
+          warning("Unable to truncate infinite values to the plot area")
+        } else {
+          shdata[isinf & (shdata == Inf)] <- ylim[2]
+          shdata[isinf & (shdata == -Inf)] <- ylim[1]
+        }
+      }
+      ## determine limits of shading
+      shdata1 <- shdata[,1]
+      shdata2 <- shdata[,2]
+      ## plot grey polygon
+      xpoly <- c(rhsdata, rev(rhsdata))
+      ypoly <- c(shdata1, rev(shdata2)) 
+      miss1 <- !is.finite(shdata1)
+      miss2 <- !is.finite(shdata2)
+      if(!any(broken <- (miss1 | miss2))) {
+        ## single polygon
+        clip.to.usr()
+        polygon(xpoly, ypoly, border=shadecol, col=shadecol)
+      } else {
+        ## interrupted
+        dat <- data.frame(rhsdata=rhsdata, shdata1=shdata1, shdata2=shdata2)
+        serial <- cumsum(broken)
+        lapply(split(dat, serial),
+               function(z) {
+                 with(z, {
+                   xp <- c(rhsdata, rev(rhsdata))
+                   yp <- c(shdata1, rev(shdata2))
+                   clip.to.usr()
+                   polygon(xp, yp, border=shadecol, col=shadecol)
+                 })
+               })
+        ## save for use in placing legend
+        okp <- !c(broken, rev(broken))
+        xpoly <- xpoly[okp]
+        ypoly <- ypoly[okp]
+      }
+      ## overwrite graphical parameters
+      lty[shind] <- 1
+      ## try to preserve the same type of colour specification
+      if(is.character(col) && is.character(shadecol)) {
+        ## character representations 
+        col[shind] <- shadecol
+      } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) {
+        ## indices in colour palette
+        col[shind] <- sc
+      } else {
+        ## convert colours to hexadecimal and edit relevant values
+        col <- col2hex(col)
+        col[shind] <- col2hex(shadecol)
+      }
+      ## remove these columns from further plotting
+      allind <- allind[-shind]
+      ## 
+    } else xpoly <- ypoly <- numeric(0)
+  
+    ## ----------------- plot lines ------------------------------
+
+    for(i in allind) {
+      clip.to.usr()
+      lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i])
+    }
+
+    if(nplots == 1)
+      return(invisible(NULL))
+
+    ## ---------------- determine legend -------------------------
+    key <- colnames(lhsdata)
+    mat <- match(key, names(x))
+    keyok <- !is.na(mat)
+    matok <- mat[keyok]
+    legdesc <- rep.int("constructed variable", length(key))
+    legdesc[keyok] <- attr(x, "desc")[matok]
+    leglabl <- lnames0
+    leglabl[keyok] <- labl[matok]
+    ylab <- attr(x, "ylab")
+    if(!is.null(ylab)) {
+      if(is.language(ylab)) 
+        ylab <- flat.deparse(ylab)
+      legdesc <- sprintf(legdesc, ylab)
+    }
+    ## compute legend info
+    legtxt <- key
+    if(legendmath) {
+      legtxt <- leglabl
+      if(defaultplot) {
+        ## try to convert individual labels to expressions
+        fancy <- try(parse(text=leglabl), silent=TRUE)
+        if(!inherits(fancy, "try-error"))
+          legtxt <- fancy
+      } else {
+        ## try to navigate the parse tree
+        fancy <- try(fvlegend(x, expandleftside), silent=TRUE)
+        if(!inherits(fancy, "try-error"))
+          legtxt <- fancy
+      }
+    }
+
+    if(is.expression(legtxt) ||
+       is.language(legtxt) ||
+       all(sapply(legtxt, is.language)))
+      legtxt <- fontify(legtxt, mathfont)
+
+    ## --------------- handle legend plotting  -----------------------------
+    
+    if(identical(legend, TRUE)) {
+      ## legend will be plotted
+      ## Basic parameters of legend
+      legendxpref <- if(identical(legendpos, "float")) NULL else legendpos
+      optparfv <- spatstat.options("par.fv")$legendargs %orifnull% list()
+      legendspec <- resolve.defaults(legendargs,
+                                     list(lty=lty,
+                                          col=col,
+                                          lwd=lwd),
+                                     optparfv,
+                                     list(x=legendxpref,
+                                          legend=legtxt,
+                                          inset=0.05,
+                                          y.intersp=if(legendmath) 1.3 else 1),
+                                     .StripNull=TRUE)
+      tB <- dev.capabilities()$transparentBackground
+      if(!any(names(legendspec) == "bg") &&
+         !is.na(tB) && !identical(tB, "no"))
+        legendspec$bg <- "transparent"
+      
+      if(legendavoid || identical(legendpos, "float")) {
+        ## Automatic determination of legend position
+        ## Assemble data for all plot objects
+        linedata <- list()
+        xmap <- if(xlogscale) log10 else identity
+        ymap <- if(ylogscale) log10 else identity
+        inv.xmap <- if(xlogscale) pow10 else identity
+        inv.ymap <- if(ylogscale) pow10 else identity 
+        for(i in seq_along(allind)) 
+          linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i]))
+        polydata <-
+          if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL
+        #' ensure xlim, ylim define a box
+        boxXlim <- if(diff(xlim) > 0) xlim else par('usr')[1:2]
+        boxYlim <- if(diff(ylim) > 0) ylim else par('usr')[3:4]
+        #' 
+        objects <- assemble.plot.objects(xmap(boxXlim), ymap(boxYlim),
+                                         lines=linedata, polygon=polydata)
+        ## find best position to avoid them
+        legendbest <- findbestlegendpos(objects, preference=legendpos,
+                                      legendspec=legendspec)
+        ## handle log scale
+        if((xlogscale || ylogscale) &&
+           checkfields(legendbest, c("x", "xjust", "yjust"))) {
+          ## back-transform x, y coordinates
+          legendbest$x$x <- inv.xmap(legendbest$x$x)
+          legendbest$x$y <- inv.ymap(legendbest$x$y)
+        }
+      } else legendbest <- list()
+    
+      ##  ********** plot legend *************************
+      if(!is.null(legend) && legend) 
+        do.call(graphics::legend,
+                resolve.defaults(legendargs,
+                                 legendbest,
+                                 legendspec,
+                                 .StripNull=TRUE))
+      
+    }
+
+    ## convert labels back to character
+    labl <- paste.expr(legtxt)
+    labl <- gsub(" ", "", labl)
+    ## return legend info
+    df <- data.frame(lty=lty, col=col, key=key, label=labl,
+                     meaning=legdesc, row.names=key)
+    return(invisible(df))
+  }
+  plot.fv
+
+})
+
+
+
+assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) {
+  # Take data that would have been passed to the commands 'lines' and 'polygon'
+  # and form corresponding geometrical objects.
+  objects <- list()
+  if(!is.null(lines)) {
+    if(is.psp(lines)) {
+      objects <- list(lines)
+    } else {
+      if(checkfields(lines, c("x", "y"))) {
+        lines <- list(lines)
+      } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y")))))
+        stop("lines should be a psp object, a list(x,y) or a list of list(x,y)")
+      W <- owin(xlim, ylim)
+      for(i in seq_along(lines)) {
+        lines.i <- lines[[i]]
+        x.i <- lines.i$x
+        y.i <- lines.i$y
+        n <- length(x.i)
+        if(length(y.i) != n)
+          stop(paste(paste("In lines[[", i, "]]", sep=""),
+                     "the vectors x and y have unequal length"))
+        if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) {
+          x.i <- x.i[ok]
+          y.i <- y.i[ok]
+          n <- sum(ok)
+        }
+        segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE)
+        objects <- append(objects, list(segs.i))        
+      }
+    }
+  }
+  if(!is.null(polygon)) {
+    # Add filled polygon
+    pol <- polygon[c("x", "y")]
+    ok <- with(pol, is.finite(x) & is.finite(y))
+    if(!all(ok))
+      pol <- with(pol, list(x=x[ok], y=y[ok]))
+    if(Area.xypolygon(pol) < 0) pol <- lapply(pol, rev)
+    P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE))
+    if(!inherits(P, "try-error"))
+      objects <- append(objects, list(P))
+  }
+  return(objects)
+}
+
+findbestlegendpos <- local({
+  # Given a list of geometrical objects, find the best position
+  # to avoid them.
+  thefunction <- function(objects, show=FALSE, aspect=1, bdryok=TRUE,
+                          preference="float", verbose=FALSE,
+                          legendspec=NULL) {
+    # find bounding box
+    W <- do.call(boundingbox, lapply(objects, as.rectangle))
+    # convert to common box
+    objects <- lapply(objects, rebound, rect=W)
+    # comp
+    # rescale x and y axes so that bounding box has aspect ratio 'aspect'
+    aspectW <- with(W, diff(yrange)/diff(xrange))
+    s <- aspect/aspectW
+    mat <- diag(c(1, s))
+    invmat <- diag(c(1, 1/s))
+    scaled.objects <- lapply(objects, affine, mat=mat)
+    scaledW <- affine(W, mat=mat)
+    if(verbose) {
+      cat("Scaled space:\n")
+      print(scaledW)
+    }
+    # pixellate the scaled objects
+    pix.scal.objects <- lapply(scaled.objects, asma)
+    # apply distance transforms in scaled space
+    D1 <- distmap(pix.scal.objects[[1]])
+    Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow))
+    # distance transform of superposition
+    D <- im.apply(Dlist, min)
+    if(!bdryok) {
+      # include distance to boundary
+      B <- attr(D1, "bdry")
+      D <- eval.im(pmin.int(D, B))
+    }
+    if(show) {
+      plot(affine(D, mat=invmat), add=TRUE)
+      lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE)
+    }
+    if(preference != "float") {
+      # evaluate preferred location (check for collision)
+      if(!is.null(legendspec)) {
+        # pretend to plot the legend as specified
+        legout <- do.call(graphics::legend,
+                          append(legendspec, list(plot=FALSE)))
+        # determine bounding box
+        legbox <- with(legout$rect, owin(c(left, left+w), c(top-h, top)))
+        scaledlegbox <- affine(legbox, mat=mat)
+        # check for collision 
+        Dmin <- min(D[scaledlegbox])
+        if(Dmin >= 0.02) {
+          # no collision: stay at preferred location. Exit.
+          return(list(x=preference))
+        }
+        # collision occurred! 
+      } else {
+        # no legend information.
+        # Pretend legend is 15% of plot width and height
+        xr <- scaledW$xrange
+        yr <- scaledW$yrange
+        testloc <- switch(preference,
+                          topleft     = c(xr[1],yr[2]),
+                          top         = c(mean(xr), yr[2]),
+                          topright    = c(xr[2], yr[2]),
+                          right       = c(xr[2], mean(yr)),
+                          bottomright = c(xr[2], yr[1]),
+                          bottom      = c(mean(xr), yr[1]),
+                          bottomleft  = c(xr[1], yr[1]),
+                          left        = c(xr[1], mean(yr)),
+                          center      = c(mean(xr), mean(yr)),
+                          NULL)
+        if(!is.null(testloc)) {
+          # look up distance value at preferred location
+          testpat <- ppp(x=testloc[1], y=testloc[2], xr, yr, check=FALSE)
+          val <- safelookup(D, testpat)
+          crit <- 0.15 * min(diff(xr), diff(yr))
+          if(verbose)
+            cat(paste("val=",val, ", crit=", crit, "\n"))
+          if(val > crit) {
+            # no collision: stay at preferred location. Exit.
+            return(list(x=preference))
+          }
+        # collision occurred! 
+        }
+      }
+      # collision occurred! 
+    }
+    # find location of max
+    locmax <- which(D$v == max(D), arr.ind=TRUE)
+    locmax <- unname(locmax[1,])
+    pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]])
+    pos <- affinexy(pos, mat=invmat)
+    if(show) 
+      points(pos)
+    # determine justification of legend relative to this point
+    # to avoid crossing edges of plot
+    xrel <- (pos$x - W$xrange[1])/diff(W$xrange)
+    yrel <- (pos$y - W$yrange[1])/diff(W$yrange)
+    xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 
+    yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5
+    #
+    out <- list(x=pos, xjust=xjust, yjust=yjust)
+    return(out)
+  }
+
+  asma <- function(z) { if(is.owin(z)) as.mask(z) else
+                        if(is.psp(z)) as.mask.psp(z) else NULL }
+  
+  callit <- function(...) {
+    rslt <- try(thefunction(...))
+    if(!inherits(rslt, "try-error"))
+      return(rslt)
+    return(list())
+  }
+  callit
+})
+  
diff --git a/R/plot.im.R b/R/plot.im.R
new file mode 100755
index 0000000..85d1157
--- /dev/null
+++ b/R/plot.im.R
@@ -0,0 +1,772 @@
+#
+#   plot.im.R
+#
+#  $Revision: 1.115 $   $Date: 2016/09/10 10:23:21 $
+#
+#  Plotting code for pixel images
+#
+#  plot.im
+#  image.im
+#  contour.im
+#
+###########################################################################
+
+plot.im <- local({
+
+  ## auxiliary functions
+
+  image.doit <- function(imagedata, ...,
+                         extrargs=graphicsPars("image"), W,
+                         workaround=FALSE) {
+    aarg <- resolve.defaults(...)
+    add      <- resolve.1.default(list(add=FALSE),     aarg)
+    show.all <- resolve.1.default(list(show.all=!add), aarg)
+    if(add && show.all) {
+      ## set up the window space *with* the main title
+      ## using the same code as plot.owin, for consistency
+      do.call.matched(plot.owin,
+                      resolve.defaults(list(x=W, type="n"), aarg), 
+                      extrargs=graphicsPars("owin"))
+    }
+    if(workaround && identical(aarg$useRaster, TRUE)) {
+      #' workaround for bug 16035
+      #' detect reversed coordinates
+      usr <- par('usr')
+      xrev <- (diff(usr[1:2]) < 0) 
+      yrev <- (diff(usr[3:4]) < 0)
+      if(xrev || yrev) {
+        #' flip matrix of pixel values, because the device driver does not
+        z <- imagedata$z
+        d <- dim(z) # z is in the orientation expected for image.default
+        if(xrev) z <- z[d[1]:1,       , drop=FALSE]
+        if(yrev) z <- z[      , d[2]:1, drop=FALSE]
+        imagedata$z <- z
+      }
+    }
+    extrargs <- setdiff(extrargs, c("claim.title.space", "box"))
+    do.call.matched(image.default,
+                    append(imagedata, aarg),
+                    extrargs=extrargs)
+  }
+
+  do.box.etc <- function(bb, add, argh)
+    do.call(box.etc, append(list(bb=bb, add=add), argh))
+  
+  box.etc <- function(bb, ..., add=FALSE, axes=FALSE, box=!add) {
+    # axes for image
+    xr <- bb$xrange
+    yr <- bb$yrange
+    if(box)
+      rect(xr[1], yr[1], xr[2], yr[2])
+    if(axes) {
+      px <- pretty(xr)
+      py <- pretty(yr)
+      do.call.plotfun(graphics::axis,
+                      resolve.defaults(
+                                       list(side=1, at=px), 
+                                       list(...),
+                                       list(pos=yr[1])),
+                      extrargs=graphicsPars("axis"))
+      do.call.plotfun(graphics::axis,
+                      resolve.defaults(
+                                       list(side=2, at=py), 
+                                       list(...),
+                                       list(pos=xr[1])),
+                      extrargs=graphicsPars("axis"))
+    }
+  }
+  
+  
+  clamp <- function(x, v, tol=0.02 * diff(v)) {
+    ok <- (x >= v[1] - tol) & (x <= v[2] + tol)
+    x[ok]
+  }
+  
+  cellbreaks <- function(x, dx) {
+    nx <- length(x)
+    seq(x[1] - dx/2, x[nx] + dx/2, length.out=nx+1)
+  }
+
+  log10orNA <- function(x) {
+    y <- rep(NA_real_, length(x))
+    ok <- !is.na(x) & (x > 0)
+    y[ok] <- log10(x[ok])
+    return(y)
+  }
+  
+  # main function
+  PlotIm <- function(x, ...,
+                     main, 
+                     add=FALSE, clipwin=NULL,
+                     col=NULL, valuesAreColours=NULL, log=FALSE,
+                     ribbon=show.all, show.all=!add,
+                     ribside=c("right", "left", "bottom", "top"),
+                     ribsep=0.15, ribwid=0.05, ribn=1024,
+                     ribscale=1, ribargs=list(), colargs=list(),
+                     useRaster=NULL, workaround=FALSE,
+                     do.plot=TRUE) {
+    if(missing(main)) main <- short.deparse(substitute(x))
+    verifyclass(x, "im")
+    if(x$type == "complex") {
+      cl <- match.call()
+      cl$x <- solist(Re=Re(x), Im=Im(x), Mod=Mod(x), Arg=Arg(x))
+      cl[[1]] <- as.name('plot')
+      cl$main <- main
+      out <- eval(cl, parent.frame())
+      return(invisible(out))
+    }
+    ribside <- match.arg(ribside)
+    col.given <- !is.null(col)
+    dotargs <- list(...)
+
+    stopifnot(is.list(ribargs))
+    user.ticks <- ribargs$at
+    
+    if(!is.null(clipwin)) {
+      x <- x[as.rectangle(clipwin)]
+      if(!is.rectangle(clipwin)) x <- x[clipwin, drop=FALSE]
+    }
+
+    zlim <- dotargs$zlim
+
+    x <- repair.image.xycoords(x)
+
+    xtype <- x$type
+    xbox <- as.rectangle(x)
+    
+    do.log <- identical(log, TRUE)
+    if(do.log && !(x$type %in% c("real", "integer")))
+      stop(paste("Log transform is undefined for an image of type",
+                 sQuote(xtype)))
+
+    # determine whether pixel values are to be treated as colours
+    if(!is.null(valuesAreColours)) {
+      # argument given - validate
+      stopifnot(is.logical(valuesAreColours))
+      if(valuesAreColours) {
+        # pixel values must be factor or character
+        if(!xtype %in% c("factor", "character")) {
+          warning(paste("Pixel values of type", sQuote(xtype),
+                        "are not interpretable as colours"))
+          valuesAreColours <- FALSE
+        } else if(col.given) {
+          # colour info provided: contradictory
+          warning(paste("Pixel values are taken to be colour values,",
+                        "because valuesAreColours=TRUE;", 
+                        "the colour map (argument col) is ignored"),
+                  call.=FALSE)
+          col <- NULL
+        }
+        if(do.log) 
+          warning(paste("Pixel values are taken to be colour values,",
+                        "because valuesAreColours=TRUE;", 
+                        "the argument log=TRUE is ignored"),
+                  call.=FALSE)
+      }
+    } else if(col.given) {
+      # argument 'col' controls colours
+      valuesAreColours <- FALSE
+    } else if(spatstat.options("monochrome")) {
+      valuesAreColours <- FALSE
+    } else {
+      ## default : determine whether pixel values are colours
+      strings <- switch(xtype,
+                        character = { as.vector(x$v) },
+                        factor    = { levels(x) },
+                        { NULL })
+      valuesAreColours <- is.character(strings) && 
+      !inherits(try(col2rgb(strings), silent=TRUE), "try-error")
+      if(valuesAreColours)
+        cat("Interpreting pixel values as colours\n")
+    }
+    # 
+    if(valuesAreColours) {
+      # colour-valued images are plotted using the code for factor images
+      # with the colour map equal to the levels of the factor
+      switch(xtype,
+             factor = {
+               col <- levels(x)
+             },
+             character = {
+               x <- eval.im(factor(x))
+               xtype <- "factor"
+               col <- levels(x)
+             },
+             {
+               warning(paste("Pixel values of type", sQuote(xtype),
+                             "are not interpretable as colours"))
+             })
+      # colours not suitable for ribbon
+      ribbon <- FALSE
+    } 
+    
+    # transform pixel values to log scale?
+    if(do.log) {
+      rx <- range(x, finite=TRUE)
+      if(all(rx > 0)) {
+        x <- eval.im(log10(x))
+      } else {
+        if(any(rx < 0)) 
+          warning(paste("Negative pixel values",
+                        "omitted from logarithmic colour map;",
+                        "range of values =", prange(rx)),
+                  call.=FALSE)
+        if(!all(rx < 0))
+          warning("Zero pixel values omitted from logarithmic colour map",
+                  call.=FALSE)
+        x <- eval.im(log10orNA(x))
+      } 
+      xtype <- x$type
+      Log <- log10
+      Exp <- function(x) { 10^x }
+    } else {
+      Log <- Exp <- function(x) { x }
+    }
+    
+    imagebreaks <- NULL
+#    ribbonvalues <- ribbonbreaks <- NULL
+    ribbonvalues <- NULL
+
+    ## NOW DETERMINE THE COLOUR MAP
+    colfun <- colmap <- NULL
+    if(valuesAreColours) {
+      ## pixel values are colours; set of colours was determined earlier
+      colmap <- colourmap(col=col, inputs=col)
+    } else if(!col.given) {
+      ## no colour information given: use default
+      colfun <- spatstat.options("image.colfun")
+    } else if(inherits(col, "colourmap")) {
+      ## Bob's your uncle
+      colmap <- col
+    } else if(is.function(col)) {
+      ## Some kind of function determining a colour map
+      if(names(formals(col))[1] == "n") {
+        ## function(n) -> colour values
+        colfun <- col
+      } else {
+        ## colour map determined by a rule (e.g. 'beachcolours')
+        colmap <- invokeColourmapRule(col, x, zlim=zlim, colargs=colargs)
+        if(is.null(colmap))
+          stop("Unrecognised syntax for colour function")
+      }
+    }
+
+    switch(xtype,
+           real    = {
+             vrange <- range(x, finite=TRUE)
+             vrange <- range(zlim, vrange)
+             if(!is.null(colmap)) {
+               # explicit colour map
+               s <- summary(colmap)
+               if(s$discrete)
+                 stop("Discrete colour map is not applicable to real values")
+               imagebreaks <- s$breaks
+               vrange <- range(imagebreaks)
+               col <- s$outputs
+             } 
+             trivial <- (diff(vrange) <= .Machine$double.eps)
+             if(!trivial) {
+               # ribbonvalues: domain of colour map (pixel values)
+               # ribbonrange: (min, max) of pixel values in image
+               # nominalrange: range of values shown on ribbon 
+               # nominalmarks: values shown on ribbon at tick marks
+               # ribbonticks: pixel values of tick marks 
+               # ribbonlabels: text displayed at tick marks
+               ribbonvalues <- seq(from=vrange[1], to=vrange[2],
+                                   length.out=ribn)
+               ribbonrange <- vrange
+               nominalrange <- Log(ribscale * Exp(ribbonrange))
+               nominalmarks <-
+                 user.ticks %orifnull% axisTicks(nominalrange, log=do.log)
+               ribbonticks <- Log(nominalmarks/ribscale)
+               ribbonlabels <- paste(nominalmarks)
+             }
+           },
+           integer = {
+             values <- as.vector(x$v)
+             values <- values[!is.na(values)]
+             uv <- unique(values)
+             vrange <- range(uv, finite=TRUE)
+             vrange <- range(zlim, vrange)
+             nvalues <- length(uv)
+             trivial <- (nvalues < 2)
+             if(!trivial){
+               nominalrange <- Log(ribscale * Exp(vrange))
+               if(!is.null(user.ticks)) {
+                 nominalmarks <- user.ticks
+               } else {
+                 nominalmarks <- axisTicks(nominalrange, log=do.log)
+                 nominalmarks <- nominalmarks[nominalmarks %% 1 == 0]
+               }
+               ribbonticks <- Log(nominalmarks/ribscale)
+               ribbonlabels <- paste(nominalmarks)
+               if(!do.log && identical(all.equal(ribbonticks,
+                                                 vrange[1]:vrange[2]), TRUE)) {
+                 # each possible pixel value will appear in ribbon
+                 ribbonvalues <- vrange[1]:vrange[2]
+                 imagebreaks <- c(ribbonvalues - 0.5, vrange[2] + 0.5)
+                 ribbonrange <- range(imagebreaks)
+                 ribbonticks <- ribbonvalues
+                 ribbonlabels <- paste(ribbonticks * ribscale)
+               } else {
+                 # not all possible values will appear in ribbon
+                 ribn <- min(ribn, diff(vrange)+1)
+                 ribbonvalues <- seq(from=vrange[1], to=vrange[2],
+                                     length.out=ribn)
+                 ribbonrange <- vrange
+               }
+             }
+             if(!is.null(colmap)) {
+               # explicit colour map
+               s <- summary(colmap)
+               imagebreaks <-
+                 if(!s$discrete) s$breaks else
+                 c(s$inputs[1] - 0.5, s$inputs + 0.5)
+               col <- s$outputs
+             }
+           },
+           logical = {
+             values <- as.integer(as.vector(x$v))
+             values <- values[!is.na(values)]
+             uv <- unique(values)
+             trivial <- (length(uv) < 2)
+             vrange <- c(0,1)
+             imagebreaks <- c(-0.5, 0.5, 1.5)
+             ribbonvalues <- c(0,1)
+             ribbonrange <- range(imagebreaks)
+#             ribbonbreaks <- imagebreaks
+             ribbonticks <- user.ticks %orifnull% ribbonvalues
+             ribbonlabels <- c("FALSE", "TRUE")
+             if(!is.null(colmap)) 
+               col <- colmap(c(FALSE,TRUE))
+           },
+           factor  = {
+             lev <- levels(x)
+             nvalues <- length(lev)
+             trivial <- (nvalues < 2)
+             # ensure all factor levels plotted separately
+             fac <- factor(lev, levels=lev)
+             intlev <- as.integer(fac)
+             imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5)
+             ribbonvalues <- intlev
+             ribbonrange <- range(imagebreaks)
+#             ribbonbreaks <- imagebreaks
+             ribbonticks <- user.ticks %orifnull% ribbonvalues
+             ribbonlabels <- paste(lev)
+             vrange <- range(intlev)
+             if(!is.null(colmap) && !valuesAreColours) 
+               col <- colmap(fac)
+           },
+           character  = {
+             x <- eval.im(factor(x))
+             lev <- levels(x)
+             nvalues <- length(lev)
+             trivial <- (nvalues < 2)
+             # ensure all factor levels plotted separately
+             fac <- factor(lev, levels=lev)
+             intlev <- as.integer(fac)
+             imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5)
+             ribbonvalues <- intlev
+             ribbonrange <- range(imagebreaks)
+#             ribbonbreaks <- imagebreaks
+             ribbonticks <- user.ticks %orifnull% ribbonvalues
+             ribbonlabels <- paste(lev)
+             vrange <- range(intlev)
+             if(!is.null(colmap)) 
+               col <- colmap(fac)
+           },
+           stop(paste("Do not know how to plot image of type", sQuote(xtype)))
+           )
+  
+    ## Compute colour values to be passed to image.default
+    if(!is.null(colmap)) {
+      ## Explicit colour map object
+      colourinfo <- list(breaks=imagebreaks, col=col)
+    } else if(!is.null(colfun)) {
+      ## Function colfun(n)
+      colourinfo <- if(is.null(imagebreaks)) list(col=colfun(256)) else
+                    list(breaks=imagebreaks, col=colfun(length(imagebreaks)-1))
+    } else if(col.given) {
+      ## Colour values
+      if(inherits(try(col2rgb(col), silent=TRUE), "try-error"))
+        stop("Unable to interpret argument col as colour values")
+      if(is.null(imagebreaks)) {
+        colourinfo <- list(col=col)
+      } else {
+        nintervals <- length(imagebreaks) - 1
+        colourinfo <- list(breaks=imagebreaks, col=col)
+        if(length(col) != nintervals)
+          stop(paste("Length of argument", dQuote("col"),
+                     paren(paste(length(col))),
+                     "does not match the number of distinct values",
+                     paren(paste(nintervals))))
+      }
+    } else stop("Internal error: unable to determine colour values")
+
+    if(spatstat.options("monochrome")) {
+      ## transform to grey scale
+      colourinfo$col <- to.grey(colourinfo$col)
+    }
+    
+    # colour map to be returned (invisibly)
+    i.col <- colourinfo$col
+    i.bks <- colourinfo$breaks
+    output.colmap <-
+      if(is.null(i.col)) NULL else
+      if(inherits(i.col, "colourmap")) i.col else
+      if(valuesAreColours) colourmap(col=i.col, inputs=i.col) else
+      switch(xtype,
+             integer=,
+             real= {
+               if(!is.null(i.bks)) {
+                 colourmap(col=i.col, breaks=i.bks)
+               } else colourmap(col=i.col, range=vrange)
+             },
+             logical={
+               colourmap(col=i.col, inputs=c(FALSE,TRUE))
+             },
+             character=,
+             factor={
+               colourmap(col=i.col, inputs=lev)
+             },
+             NULL)
+
+    ##  ........ decide whether to use rasterImage .........
+    
+    ## get device capabilities
+    ##      (this will start a graphics device if none is active)
+    rasterable <- dev.capabilities()$rasterImage
+    if(is.null(rasterable)) rasterable <- "no"
+    ##
+    can.use.raster <-
+      switch(rasterable,
+             yes=TRUE,
+             no=FALSE,
+             "non-missing"=!anyNA(x$v),
+             FALSE)
+    if(is.null(useRaster)) {
+      useRaster <- can.use.raster
+    } else if(useRaster && !can.use.raster) {
+        whinge <- "useRaster=TRUE is not supported by the graphics device"
+        if(rasterable == "non-missing")
+          whinge <- paste(whinge, "for images with NA values")
+        warning(whinge, call.=FALSE)
+    } 
+    
+    ## ........ start plotting .................
+
+    if(!identical(ribbon, TRUE) || trivial) {
+      ## no ribbon wanted
+
+      attr(output.colmap, "bbox") <- as.rectangle(x)
+      if(!do.plot)
+        return(output.colmap)
+
+      ## plot image without ribbon
+      image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep),
+                                y=cellbreaks(x$yrow, x$ystep),
+                                z=t(x$v)),
+                 W=xbox,
+                 workaround=workaround,
+                 dotargs,
+                 list(useRaster=useRaster, add=add, show.all=show.all),
+                 colourinfo,
+                 list(xlab = "", ylab = ""),
+                 list(asp = 1, main = main, axes=FALSE))
+##      if(add && show.all)
+##        fakemaintitle(x, main, dotargs)
+
+      do.box.etc(Frame(x), add, dotargs)
+      
+      return(invisible(output.colmap))
+    }
+    
+    # determine plot region
+    bb <- owin(x$xrange, x$yrange)
+    Width <- diff(bb$xrange)
+    Height <- diff(bb$yrange)
+    Size <- max(Width, Height)
+    switch(ribside,
+           right={
+             # ribbon to right of image
+             bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * Size,
+                            bb$yrange)
+             rib.iside <- 4
+           },
+           left={
+             # ribbon to left of image
+             bb.rib <- owin(bb$xrange[1] - c(ribsep+ribwid, ribsep) * Size,
+                            bb$yrange)
+             rib.iside <- 2
+           },
+           top={
+             # ribbon above image
+             bb.rib <- owin(bb$xrange,
+                            bb$yrange[2] + c(ribsep, ribsep+ribwid) * Size)
+             rib.iside <- 3
+           },
+           bottom={
+             # ribbon below image
+             bb.rib <- owin(bb$xrange,
+                            bb$yrange[1] - c(ribsep+ribwid, ribsep) * Size)
+             rib.iside <- 1
+           })
+    bb.all <- boundingbox(bb.rib, bb)
+
+    attr(output.colmap, "bbox") <- bb.all
+    attr(output.colmap, "bbox.legend") <- bb.rib
+    if(!do.plot)
+      return(output.colmap)
+
+    pt <- prepareTitle(main)
+    
+    if(!add) {
+      ## establish coordinate system
+      do.call.plotfun(plot.owin,
+                      resolve.defaults(list(x=bb.all,
+                                            type="n",
+                                            main=pt$blank),
+                                       dotargs),
+                      extrargs=graphicsPars("owin"))
+    }
+    if(show.all) {
+      ## plot title centred over main image area 'bb'
+      do.call.plotfun(plot.owin,
+                      resolve.defaults(list(x=bb,
+                                            type="n",
+                                            main=main,
+                                            add=TRUE,
+                                            show.all=TRUE),
+                                       dotargs),
+                      extrargs=graphicsPars("owin"))
+      main <- ""
+    }
+    # plot image
+    image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep),
+                              y=cellbreaks(x$yrow, x$ystep),
+                              z=t(x$v)),
+               W=xbox,
+               workaround=workaround,
+               list(add=TRUE, show.all=show.all),
+               dotargs,
+               list(useRaster=useRaster),
+               colourinfo,
+               list(xlab = "", ylab = ""),
+               list(asp = 1, main = main))
+
+##    if(add && show.all)
+##      fakemaintitle(bb.all, main, ...)
+    
+    # box or axes for image
+    do.box.etc(bb, add, dotargs)
+
+    # plot ribbon image containing the range of image values
+    rib.npixel <- length(ribbonvalues) + 1
+    switch(ribside,
+           left=,
+           right={
+             # vertical ribbon
+             rib.xcoords <- bb.rib$xrange
+             rib.ycoords <- seq(from=bb.rib$yrange[1],
+                                to=bb.rib$yrange[2],
+                                length.out=rib.npixel)
+             rib.z <- matrix(ribbonvalues, ncol=1)
+             rib.useRaster <- useRaster
+           },
+           top=,
+           bottom={
+             # horizontal ribbon
+             rib.ycoords <- bb.rib$yrange
+             rib.xcoords <- seq(from=bb.rib$xrange[1],
+                                to=bb.rib$xrange[2],
+                                length.out=rib.npixel)
+             rib.z <- matrix(ribbonvalues, nrow=1)
+             # bug workaround
+             rib.useRaster <- FALSE 
+           })
+    image.doit(imagedata=list(x=rib.xcoords,
+                              y=rib.ycoords,
+                              z=t(rib.z)),
+               W=bb.rib,
+               workaround=workaround,
+               list(add=TRUE,
+                    show.all=show.all),
+               ribargs,
+               list(useRaster=rib.useRaster),
+               list(main="", sub=""),
+               dotargs,
+               colourinfo)
+    # box around ribbon?
+    resol <- resolve.defaults(ribargs, dotargs)
+    if(!identical(resol$box, FALSE))
+      plot(as.owin(bb.rib), add=TRUE)
+    # scale axis for ribbon image
+    ribaxis <- !(identical(resol$axes, FALSE) || identical(resol$ann, FALSE))
+    if(ribaxis) {
+      ribaxis.iside <- rib.iside
+      ## check for user-supplied xlim, ylim with reverse order
+      ll <- resolve.defaults(ribargs, dotargs, list(xlim=NULL, ylim=NULL))
+      xlimflip <- is.numeric(ll$xlim) && (diff(ll$xlim) < 0)
+      ylimflip <- is.numeric(ll$ylim) && (diff(ll$ylim) < 0)
+      if(xlimflip) ribaxis.iside <- c(1, 4, 3, 2)[ribaxis.iside] 
+      if(ylimflip) ribaxis.iside <- c(3, 2, 1, 4)[ribaxis.iside]
+      ##
+      axisargs <- list(side=ribaxis.iside, labels=ribbonlabels)
+      switch(ribside,
+             right={
+               scal <- diff(bb.rib$yrange)/diff(ribbonrange)
+               at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1])
+               axisargs <- append(axisargs, list(at=at))
+               posargs <- list(pos=bb.rib$xrange[2],
+                               yaxp=c(bb.rib$yrange, length(ribbonticks)))
+             },
+             left={
+               scal <- diff(bb.rib$yrange)/diff(ribbonrange)
+               at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1])
+               axisargs <- append(axisargs, list(at=at))
+               posargs <- list(pos=bb.rib$xrange[1],
+                               yaxp=c(bb.rib$yrange, length(ribbonticks)))
+             },
+             top={
+               scal <- diff(bb.rib$xrange)/diff(ribbonrange)
+               at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1])
+               axisargs <- append(axisargs, list(at=at))
+               posargs <- list(pos=bb.rib$yrange[2],
+                               xaxp=c(bb.rib$xrange, length(ribbonticks)))
+             },
+             bottom={
+               scal <- diff(bb.rib$xrange)/diff(ribbonrange)
+               at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1])
+               axisargs <- append(axisargs, list(at=at))
+               posargs <- list(pos=bb.rib$yrange[1],
+                               xaxp=c(bb.rib$xrange, length(ribbonticks)))
+             })
+      do.call.plotfun(graphics::axis,
+                      resolve.defaults(ribargs,
+                                       axisargs, dotargs,
+                                       posargs),
+                      extrargs=graphicsPars("axis"))
+    }
+    #
+    return(invisible(output.colmap))
+  }
+
+  PlotIm
+})
+
+invokeColourmapRule <- function(colfun, x, ..., zlim=NULL, colargs=list()) {
+  ## utility for handling special functions that generate colour maps
+  ## either 
+  ##        function(... range) -> colourmap
+  ##        function(... inputs) -> colourmap
+  stopifnot(is.im(x))
+  stopifnot(is.function(colfun))
+  colargnames <- names(formals(colfun))
+  ## Convert it to a 'colourmap'
+  colmap <- NULL
+  xtype <- x$type
+  if(xtype %in% c("real", "integer") && "range" %in% colargnames) {
+    ## function(range) -> colourmap
+    vrange <- range(range(x, finite=TRUE), zlim)
+    cvals <- try(do.call.matched(colfun,
+                                 append(list(range=vrange), colargs)),
+                 silent=TRUE)
+    if(!inherits(cvals, "try-error")) {
+      colmap <- if(inherits(cvals, "colourmap")) cvals else
+      if(is.character(cvals)) colourmap(cvals, range=vrange) else NULL
+    }
+  } else if(xtype != "real" && "inputs" %in% colargnames) {
+    ## function(inputs) -> colourmap
+    vpossible <- switch(xtype,
+                        logical = c(FALSE, TRUE),
+                        factor = levels(x),
+                        unique(as.matrix(x)))
+    if(!is.null(vpossible) && length(vpossible) < 256) {
+      cvals <- try(do.call.matched(colfun,
+                                   append(list(inputs=vpossible),
+                                          colargs)),
+                   silent=TRUE)
+      if(!inherits(cvals, "try-error")) {
+        colmap <- if(inherits(cvals, "colourmap")) cvals else
+        if(is.character(cvals))
+          colourmap(cvals, inputs=vpossible) else NULL
+      }
+    }
+  }
+  return(colmap)
+}
+
+########################################################################
+
+image.im <- plot.im
+
+######################################################################
+
+contour.im <- function (x, ..., main, axes=FALSE, add=FALSE,
+                        col=par("fg"), 
+                        clipwin=NULL, show.all=!add, do.plot=TRUE)
+{
+  defaultmain <- deparse(substitute(x))
+  ## return value
+  z <- as.rectangle(x)
+  attr(z, "bbox") <- z
+  if(!do.plot) return(z)
+  ## 
+  sop <- spatstat.options("par.contour")
+  if(missing(main)) 
+    main <- resolve.1.default(list(main=defaultmain), sop)
+  if(missing(add)) {
+    force(add) ## use default in formal arguments, unless overridden
+    add <- resolve.1.default(list(add=add), sop)
+  }
+  if(missing(axes)) {
+    force(axes)
+    axes <- resolve.1.default(list(axes=axes), sop)
+  }
+  if(!is.null(clipwin))
+    x <- x[clipwin, drop=FALSE]
+  if(show.all) {
+    col0 <- if(inherits(col, "colourmap")) par("fg") else col
+    if(axes) # with axes
+      do.call.plotfun(plot.default,
+                      resolve.defaults(
+                                       list(x = range(x$xcol),
+                                            y = range(x$yrow),
+                                            type = "n", add=add),
+                                       list(...),
+                                       list(asp = 1,
+                                            xlab = "x",
+                                            ylab = "y",
+                                            col = col0,
+                                            main = main)))
+    else { # box without axes
+      rec <- owin(x$xrange, x$yrange)
+      do.call.matched(plot.owin,
+                      resolve.defaults(list(x=rec, add=add, show.all=TRUE),
+                                       list(...),
+                                       list(col=col0, main=main)))
+    }
+  }
+  if(!inherits(col, "colourmap")) {
+    do.call.plotfun(contour.default,
+                    resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)),
+                                     list(add=TRUE, col=col),
+                                     list(...)))
+  } else {
+    clin <- do.call.matched(contourLines,
+                            append(list(x=x$xcol, y=x$yrow, z=t(x$v)),
+                                   list(...)))
+    linpar <- graphicsPars("lines")
+    for(i in seq_along(clin)) {
+      lini <- clin[[i]]
+      levi <- lini$level
+      coli <- col(levi)
+      argi <- resolve.defaults(lini[c("x", "y")],
+                               list(...),
+                               list(col=coli))
+      do.call.matched(lines.default, argi, extrargs=linpar)
+    }
+  }
+  return(invisible(z))
+}
+
diff --git a/R/plot.mppm.R b/R/plot.mppm.R
new file mode 100755
index 0000000..c6f69a3
--- /dev/null
+++ b/R/plot.mppm.R
@@ -0,0 +1,24 @@
+#
+# plot.mppm.R
+#
+#   $Revision: 1.4 $  $Date: 2016/02/11 10:17:12 $
+#
+#
+
+plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, se=FALSE,
+                      how=c("image", "contour", "persp")) {
+  xname <- deparse(substitute(x))
+  how <- match.arg(how)
+  subs <- subfits(x)
+  arglist <- resolve.defaults(list(x=subs, how=how),
+                              list(...),
+                              list(main=xname))
+  if(trend) 
+    do.call(plot, c(arglist, list(trend=TRUE, cif=FALSE, se=FALSE)))
+  if(cif) 
+    do.call(plot, c(arglist, list(trend=FALSE, cif=TRUE, se=FALSE)))
+  if(se) 
+    do.call(plot, c(arglist, list(trend=FALSE, cif=FALSE, se=TRUE)))
+  invisible(NULL)
+}
+
diff --git a/R/plot.owin.R b/R/plot.owin.R
new file mode 100755
index 0000000..632fcbc
--- /dev/null
+++ b/R/plot.owin.R
@@ -0,0 +1,296 @@
+#
+#	plot.owin.S
+#
+#	The 'plot' method for observation windows (class "owin")
+#
+#	$Revision: 1.58 $	$Date: 2016/07/16 06:11:47 $
+#
+#
+#
+
+plot.owin <- function(x, main, add=FALSE, ..., box, edge=0.04,
+                      type = c("w", "n"), show.all=!add,
+                      hatch=FALSE,
+                      hatchargs=list(), 
+                      invert=FALSE, do.plot=TRUE,
+                      claim.title.space=FALSE) 
+{
+#
+# Function plot.owin.  A method for plot.
+#
+  if(missing(main))
+    main <- short.deparse(substitute(x))
+
+  W <- x
+  verifyclass(W, "owin")
+  if(!do.plot) 
+    return(invisible(as.rectangle(W)))
+  
+  type <- match.arg(type)
+
+  if(missing(box) || is.null(box)) {
+    box <- is.mask(W) && show.all
+  } else stopifnot(is.logical(box) && length(box) == 1)
+
+####
+  pt <- prepareTitle(main)
+  main <- pt$main
+  nlines <- pt$nlines
+#########        
+  xlim <- xr <- W$xrange
+  ylim <- yr <- W$yrange
+
+####################################################
+
+  ## graphics parameters that can be overridden by user
+  gparam <- resolve.defaults(list(...), par())
+  ## character expansion factors
+  ##     main title size = 'cex.main' * par(cex.main) * par(cex)
+  ## user's graphics expansion factor (*multiplies* par)
+  cex.main.user <- resolve.1.default(list(cex.main=1), list(...))
+  ## size of main title as multiple of par('cex')
+  cex.main.rela <- cex.main.user * par('cex.main') 
+  ## absolute size
+  cex.main.absol <- cex.main.rela * par('cex')
+    
+  if(!add) {
+    ## new plot
+    if(claim.title.space && nlines > 0) {
+      ## allow space for main title (only in multi-panel plots)
+      guesslinespace <- 0.07 * sqrt(diff(xr)^2 + diff(yr)^2) * cex.main.absol
+      added <- (nlines + 1) * guesslinespace
+      ylim[2] <- ylim[2] + added
+    }
+    ## set up plot with equal scales
+    do.call.plotfun(plot.default,
+                    resolve.defaults(list(x=numeric(0), y=numeric(0),
+                                          type="n"),
+                                     list(...),
+                                     list(xlim=xlim, ylim=ylim,
+                                          ann=FALSE, axes=FALSE,
+                                          asp=1.0,
+                                          xaxs="i", yaxs="i"),
+                                     .MatchNull=FALSE))
+  }
+  if(show.all && nlines > 0) {
+    ## add title 
+    if(claim.title.space) {
+      mainheight <- sum(strheight(main, units="user", cex=cex.main.rela))
+      gapheight <- (strheight("b\nb", units="user", cex=cex.main.rela)
+                    - 2 * strheight("b", units="user", cex=cex.main.rela))
+      if(nlines > 1 && !is.expression(main))
+        main <- paste(main, collapse="\n")
+      text(x=mean(xr), y=yr[2] + mainheight + 0.5 * gapheight, labels=main,
+           cex=cex.main.rela,
+           col=gparam$col.main,
+           font=gparam$font.main)
+    } else {
+      title(main=main,
+            cex=cex.main.rela,
+            col=gparam$col.main,
+            font=gparam$font.main)
+    }
+  }
+  
+# Draw surrounding box
+  if(box)
+    do.call.plotfun(segments,
+                    resolve.defaults(
+                                     list(x0=xr[c(1,2,2,1)],
+                                          y0=yr[c(1,1,2,2)],
+                                          x1=xr[c(2,2,1,1)],
+                                          y1=yr[c(1,2,2,1)]),
+                                     list(...)))
+  
+# If type = "n", do not plot the window.
+    if(type == "n")
+      return(invisible(as.rectangle(W)))
+
+  
+# Draw window
+
+  switch(W$type,
+         rectangle = {
+           Wpoly <- as.polygonal(W)
+           po <- Wpoly$bdry[[1]]
+           do.call.plotfun(polygon,
+                           resolve.defaults(list(x=po),
+                                            list(...)),
+                           extrargs="lwd")
+           if(hatch)
+             do.call(add.texture, append(list(W=W), hatchargs))
+         },
+         polygonal = {
+           p <- W$bdry
+           # Determine whether user wants to fill the interior
+           col.poly <- resolve.defaults(list(...), list(col=NA))$col
+           den.poly <- resolve.defaults(list(...), list(density=NULL))$density
+           no.fill  <- is.null(den.poly) &&
+                       (is.null(col.poly) || is.na(col.poly))
+           # Determine whether we need to triangulate the interior.
+           # If it is required to fill the interior,
+           # this can be done directly using polygon() provided
+           # there are no holes. Otherwise we must triangulate the interior.
+           if(no.fill)
+             triangulate <- FALSE
+           else {
+             # Determine whether there are any holes
+             holes <- unlist(lapply(p, is.hole.xypolygon))
+             triangulate <- any(holes)
+           }
+
+           if(!triangulate) {
+             # No triangulation required;
+             # simply plot the polygons
+             for(i in seq_along(p))
+               do.call.plotfun(polygon,
+                               resolve.defaults(
+                                                list(x=p[[i]]),
+                                                list(...)),
+                               extrargs="lwd")
+           } else {
+              # Try using polypath():
+             lucy <- names(dev.cur())
+             if(!(lucy %in% c("xfig","pictex","X11"))) {
+               ppa <- owin2polypath(W)
+               do.call.plotfun(polypath,
+                               resolve.defaults(ppa,
+                                                list(border=col.poly),
+                                                list(...)))
+             } else {
+               # decompose window into simply-connected pieces
+               broken <- try(break.holes(W))
+               if(inherits(broken, "try-error")) {
+                 warning("Unable to plot filled polygons")
+               } else {
+                 # Fill pieces with colour (and draw border in same colour)
+                 pp <- broken$bdry
+                 for(i in seq_len(length(pp)))
+                   do.call.plotfun(polygon,
+                                   resolve.defaults(list(x=pp[[i]],
+                                                         border=col.poly),
+                                                    list(...)))
+               }
+             }
+             # Now draw polygon boundaries
+             for(i in seq_along(p))
+               do.call.plotfun(polygon,
+                               resolve.defaults(
+                                                list(x=p[[i]]),
+                                                list(density=0, col=NA),
+                                                list(...)),
+                               extrargs="lwd")
+           }
+           if(hatch)
+             do.call(add.texture, append(list(W=W), hatchargs))
+         },
+         mask = {
+           # capture 'col' argument and ensure it's at least 2 values
+           coldefault <- c(par("bg"), par("fg"))
+           col <- resolve.defaults(
+                                   list(...),
+                                   spatstat.options("par.binary"),
+                                   list(col=coldefault)
+                                   )$col
+           if(length(col) == 1) {
+             col <- unique(c(par("bg"), col))
+             if(length(col) == 1) 
+               col <- c(par("fg"), col)
+           }
+           ## invert colours?
+           if(invert)
+             col <- rev(col)
+           ## convert to greyscale?
+           if(spatstat.options("monochrome"))
+             col <- to.grey(col)
+           
+           do.call.matched(image.default,
+                           resolve.defaults(
+                           list(x=W$xcol, y=W$yrow, z=t(W$m), add=TRUE),
+                           list(col=col),       
+                           list(...),
+                           spatstat.options("par.binary"),
+                           list(zlim=c(FALSE, TRUE))))
+           if(hatch)
+             do.call(add.texture, append(list(W=W), hatchargs))
+         },
+         stop(paste("Don't know how to plot window of type", sQuote(W$type)))
+         )
+  return(invisible(as.rectangle(W)))
+}
+
+break.holes <- local({
+
+  insect <- function(A, Box) {
+    ## efficient version of intersect.owin which doesn't 'fix' the polygons
+    a <- lapply(A$bdry, reverse.xypolygon)
+    b <- lapply(as.polygonal(Box)$bdry, reverse.xypolygon)
+    ab <- polyclip::polyclip(a, b, "intersection",
+                             fillA="nonzero", fillB="nonzero")
+    if(length(ab)==0)
+      return(emptywindow(Box))
+    # ensure correct polarity
+    totarea <- sum(unlist(lapply(ab, Area.xypolygon)))
+    if(totarea < 0)
+      ab <- lapply(ab, reverse.xypolygon)
+    AB <- owin(Box$xrange, Box$yrange,
+               poly=ab, check=FALSE, strict=FALSE, fix=FALSE,
+               unitname=unitname(A))
+    return(AB)
+  }
+
+  break.holes <- function(x, splitby=NULL, depth=0, maxdepth=100) {
+    if(is.null(splitby)) {
+      ## first call: validate x
+      stopifnot(is.owin(x))
+      splitby <- "x"
+    }
+    if(depth > maxdepth)
+      stop("Unable to divide window into simply-connected pieces")
+    p <- x$bdry
+    holes <- unlist(lapply(p, is.hole.xypolygon))
+    if(!any(holes)) return(x)
+    nholes <- sum(holes)
+    maxdepth <- max(maxdepth, 4 * nholes)
+    i <- min(which(holes))
+    p.i <- p[[i]]
+    b <- as.rectangle(x)
+    xr <- b$xrange
+    yr <- b$yrange
+    switch(splitby,
+           x = {
+             xsplit <- mean(range(p.i$x))
+             left <- c(xr[1], xsplit)
+             right <- c(xsplit, xr[2])
+             xleft <- insect(x, owin(left, yr))
+             xright <- insect(x, owin(right, yr))
+             ## recurse
+             xleft <- break.holes(xleft, splitby="y",
+                                  depth=depth+1, maxdepth=maxdepth)
+             xright <- break.holes(xright, splitby="y",
+                                  depth=depth+1, maxdepth=maxdepth)
+             ## recombine (without fusing polygons again!)
+             result <- owin(xr, yr, poly=c(xleft$bdry, xright$bdry),
+                            check=FALSE, strict=FALSE, fix=FALSE)
+           },
+           y = {
+             ysplit <- mean(range(p.i$y))
+             lower <- c(yr[1], ysplit)
+             upper <- c(ysplit, yr[2])
+             xlower <- insect(x, owin(xr, lower))
+             xupper <- insect(x, owin(xr, upper))
+             ## recurse
+             xlower <- break.holes(xlower, splitby="x",
+                                   depth=depth+1, maxdepth=maxdepth)
+             xupper <- break.holes(xupper, splitby="x",
+                                   depth=depth+1, maxdepth=maxdepth)
+             ## recombine (without fusing polygons again!)
+             result <- owin(xr, yr, poly=c(xlower$bdry, xupper$bdry),
+                            check=FALSE, strict=FALSE, fix=FALSE)
+           })
+    return(result)
+  }
+
+  break.holes
+})
+
diff --git a/R/plot.plotppm.R b/R/plot.plotppm.R
new file mode 100755
index 0000000..b40bde3
--- /dev/null
+++ b/R/plot.plotppm.R
@@ -0,0 +1,144 @@
+#
+# plot.plotppm.R
+#
+# engine of plot method for ppm
+#
+# $Revision: 1.20 $  $Date: 2016/12/30 01:44:07 $
+#
+#
+
+plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE,
+                         pause=interactive(),
+                         how=c("persp","image","contour"), ...,
+                         pppargs=list())
+{
+  verifyclass(x,"plotppm")
+  
+  # determine main plotting actions
+  superimposed <- !is.null(data)
+  if(!missing(trend) && (trend & is.null(x[["trend"]])))
+    stop("No trend to plot.\n")
+  trend <- trend & !is.null(x[["trend"]])
+  if(!missing(cif) && (cif & is.null(x[["cif"]])))
+    stop("No cif to plot.\n")
+  cif <- cif & !is.null(x[["cif"]])
+  if(!missing(se) && (se & is.null(x[["se"]])))
+    stop("No SE to plot.\n")
+  se <- se & !is.null(x[["se"]])
+  surftypes <- c("trend", "cif", "se")[c(trend, cif, se)]
+
+  # marked point process?
+  mrkvals <- attr(x,"mrkvals")
+  marked <- (length(mrkvals) > 1)
+  if(marked)
+    data.marks <- marks(data)
+  if(marked & superimposed) {
+    data.types <- levels(data.marks)
+    if(any(sort(data.types) != sort(mrkvals)))
+      stop(paste("Data marks are different from mark",
+                 "values for argument x.\n"))
+  }
+
+  # plotting style
+  howmat <- outer(how, c("persp", "image", "contour"), "==")
+  howmatch <- matrowany(howmat)
+  if (any(!howmatch)) 
+    stop(paste("unrecognised option", how[!howmatch]))
+
+  # no pause required for single display
+  if(missing(pause) || is.null(pause)) {
+    nplots <- length(surftypes) * length(mrkvals)
+    pause <- interactive() && (nplots != 1)
+  }
+  
+  # start plotting
+  if(pause)
+    oldpar <- par(ask = TRUE)
+  on.exit(if(pause) par(oldpar))
+
+  
+  for(ttt in surftypes) {
+    xs <- x[[ttt]]
+    for (i in seq_along(mrkvals)) {
+      level <- mrkvals[i]
+      main <- paste(if(ttt == "se") "Estimated" else "Fitted",
+                    ttt, 
+                    if(marked) paste("\n mark =", level) else NULL)
+      for (style in how) {
+        switch(style,
+               persp = {
+                 do.call(persp,
+                         resolve.defaults(list(xs[[i]]),
+                                          list(...), 
+                                          spatstat.options("par.persp"),
+                                          list(xlab="x", zlab=ttt, main=main)))
+               },
+               image = {
+                 do.call(image,
+                         resolve.defaults(list(xs[[i]]),
+                                          list(...),
+                                          list(main=main)))
+                 if(superimposed) {
+                   X <- if(marked) data[data.marks == level] else data
+                   do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs))
+                 }
+               },
+               contour = {
+                 do.call(contour,
+                         resolve.defaults(list(xs[[i]]),
+                                          list(...),
+                                          list(main=main)))
+                 if(superimposed) {
+                   X <- if(marked) data[data.marks == level] else data
+                   do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs))
+                 }
+               },
+               {
+                 stop(paste("Unrecognised plot style", style))
+               })
+      }
+    }
+  }
+  return(invisible())
+}
+
+print.plotppm <- function(x, ...) {
+  verifyclass(x, "plotppm")
+  trend   <- x$trend
+  cif     <- x$cif
+  mrkvals <- attr(x, "mrkvals")
+  ntypes  <- length(mrkvals)
+  unmarked <- (ntypes == 1 )
+  cat(paste("Object of class", sQuote("plotppm"), "\n"))
+  if(unmarked)
+    cat("Computed for an unmarked point process\n")
+  else {
+    cat("Computed for a marked point process, with mark values:\n")
+    print(mrkvals)
+  }
+  cat("Contains the following components:\n")
+  if(!is.null(trend)) {
+    cat("\n$trend:\tFitted trend.\n")
+    if(unmarked) {
+      cat("A list containing 1 image\n")
+      print(trend[[1]], ...)
+    } else {
+      cat(paste("A list of", ntypes, "images\n"))
+      cat("Typical details:\n")
+      print(trend[[1]], ...)
+    }
+  }
+  if(!is.null(cif)) {
+    cat("\n$cif:\tFitted conditional intensity.\n")
+    if(unmarked) {
+      cat("A list containing 1 image\n")
+      print(cif[[1]], ...)
+    } else {
+      cat(paste("A list of", ntypes, "images\n"))
+      cat("Typical details:\n")
+      print(cif[[1]], ...)
+    }
+  }
+  invisible(NULL)
+}
+
diff --git a/R/plot.ppm.R b/R/plot.ppm.R
new file mode 100755
index 0000000..8dfd377
--- /dev/null
+++ b/R/plot.ppm.R
@@ -0,0 +1,112 @@
+#
+#    plot.ppm.S
+#
+#    $Revision: 2.12 $    $Date: 2016/06/11 08:02:17 $
+#
+#    plot.ppm()
+#         Plot a point process model fitted by ppm().
+#        
+#
+#
+plot.ppm <- function(x, ngrid = c(40,40),
+		     superimpose = TRUE,
+                     trend=TRUE, cif=TRUE, se=TRUE, 
+                     pause = interactive(),
+                     how=c("persp","image", "contour"),
+                     plot.it=TRUE,
+                     locations=NULL, covariates=NULL, ...)
+{
+  model <- x
+#       Plot a point process model fitted by ppm().
+#
+  verifyclass(model, "ppm")
+#
+#       find out what kind of model it is
+#
+  mod <- summary(model, quick="entries")
+  stationary <- mod$stationary
+  poisson    <- mod$poisson
+  marked     <- mod$marked
+  multitype  <- mod$multitype
+  data       <- mod$entries$data
+        
+  if(marked) {
+    if(!multitype)
+      stop("Not implemented for general marked point processes")
+    else
+      mrkvals <- levels(marks(data))
+  } else mrkvals <- 1
+#  ntypes <- length(mrkvals)
+        
+#
+#        Interpret options
+#        -----------------
+#        
+#        Whether to plot trend, cif, se
+        
+  if(!trend && !cif && !se) {
+    cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"),
+              "and", sQuote("se"), "are all FALSE\n"))
+    return(invisible(NULL))
+  }
+#        Suppress uninteresting plots
+#        unless explicitly instructed otherwise
+  if(missing(trend))
+    trend <- !stationary
+  if(missing(cif))
+    cif <- !poisson
+  if(missing(se))
+    se <- poisson && !stationary 
+  else if(se && !poisson) {
+      warning(paste("standard error calculation",
+                  "is only implemented for Poisson models"))
+      se <- FALSE
+  }
+  if(!trend && !cif && !se) {
+    cat("Nothing plotted -- all plots selected are flat surfaces.\n")
+    return(invisible(NULL))
+  }
+#
+#  style of plot: suppress pseudo-default
+#  
+    if(missing(how))
+      how <- "image"
+#
+#
+#        Do the prediction
+#        ------------------
+
+  out <- list()
+  surftypes <- c("trend","cif","se")[c(trend,cif,se)]
+  ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid
+
+  for (ttt in surftypes) {
+    p <- predict(model,
+                 ngrid=ng, locations=locations, covariates=covariates,
+                 type = ttt,
+                 getoutofjail=TRUE)  # permit outdated usage type="se"
+    if(is.im(p))
+      p <- list(p)
+    out[[ttt]] <- p
+  }
+
+#        Make it a plotppm object
+#        ------------------------  
+  
+  class(out) <- "plotppm"
+  attr(out, "mrkvals") <- mrkvals
+
+#        Actually plot it if required
+#        ----------------------------  
+  if(plot.it) {
+    if(!superimpose)
+      data <- NULL
+    if(missing(pause))
+      pause <- NULL
+    plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...)
+  }
+
+  
+  return(invisible(out)) 
+}
+
diff --git a/R/plot.ppp.R b/R/plot.ppp.R
new file mode 100755
index 0000000..bd764c6
--- /dev/null
+++ b/R/plot.ppp.R
@@ -0,0 +1,500 @@
+#
+#	plot.ppp.R
+#
+#	$Revision: 1.91 $	$Date: 2017/06/05 10:31:58 $
+#
+#
+#--------------------------------------------------------------------------
+
+plot.ppp <- local({
+
+  transparencyfun <- function(n) {
+    if(n <= 100) 1 else (0.2 + 0.8 * exp(-(n-100)/1000))
+  }
+  
+  ## determine symbol map for marks of points
+  default.symap.points <- function(x, ..., 
+                                  chars=NULL, cols=NULL, 
+                                  maxsize=NULL, meansize=NULL, markscale=NULL) {
+    marx <- marks(x)
+    if(is.null(marx)) {
+      ## null or constant symbol map
+      ## consider using transparent colours
+      if(is.null(cols) &&
+         !any(c("col", "fg", "bg") %in% names(list(...))) &&
+         (nx <- npoints(x)) > 100 &&
+         identical(dev.capabilities()$semiTransparency, TRUE) &&
+         spatstat.options("transparent"))
+        cols <- rgb(0,0,0,transparencyfun(nx))
+      return(symbolmap(..., chars=chars, cols=cols))
+    }
+    if(!is.null(dim(marx)))
+      stop("Internal error: multivariate marks in default.symap.points")
+
+    argnames <- names(list(...))
+    shapegiven <- "shape" %in% argnames
+    chargiven <- (!is.null(chars)) || ("pch" %in% argnames)
+    assumecircles <- !(shapegiven || chargiven)
+    sizegiven <- ("size" %in% argnames) ||
+                 (("cex" %in% argnames) && !shapegiven)
+    
+    if(inherits(marx, c("Date", "POSIXt"))) {
+      ## ......... marks are dates or date/times .....................
+      timerange <- range(marx, na.rm=TRUE)
+      shapedefault <- if(!assumecircles) list() else list(shape="circles")
+      if(sizegiven) {
+        g <- do.call(symbolmap,
+          resolve.defaults(list(range=timerange),
+                           list(...),
+                           shapedefault,
+                           list(chars=chars, cols=cols)))
+        return(g)
+      }
+      ## attempt to determine a scale for the marks 
+      y <- scaletointerval(marx, 0, 1, timerange)
+      y <- y[is.finite(y)]
+      if(length(y) == 0) return(symbolmap(..., chars=chars, cols=cols))
+      scal <- mark.scale.default(y, as.owin(x), 
+                                 markscale=markscale, maxsize=maxsize,
+                                 meansize=meansize, 
+                                 characters=chargiven)
+      if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
+      ## scale determined
+      sizefun <- function(x, scal=1) {
+        (scal/2) * scaletointerval(x, 0, 1, timerange)
+      }
+      formals(sizefun)[[2]] <- scal  ## ensures value of 'scal' is printed
+      ##
+      g <- do.call(symbolmap,
+                   resolve.defaults(list(range=timerange),
+                                    list(...),
+                                    shapedefault,
+                                    list(size=sizefun)))
+      return(g)
+    }
+    if(is.numeric(marx)) {
+      ## ............. marks are numeric values ...................
+      marx <- marx[is.finite(marx)]
+      if(length(marx) == 0)
+        return(symbolmap(..., chars=chars, cols=cols))
+      markrange <- range(marx)
+      ## 
+      if(sizegiven) {
+        g <- do.call(symbolmap,
+          resolve.defaults(list(range=markrange),
+                           list(...),
+                           if(assumecircles) list(shape="circles") else list(),
+                           list(chars=chars, cols=cols)))
+        return(g)
+      }
+      ## attempt to determine a scale for the marks 
+      if(all(markrange == 0))
+        return(symbolmap(..., chars=chars, cols=cols))
+      scal <- mark.scale.default(marx, as.owin(x), 
+                                 markscale=markscale, maxsize=maxsize,
+                                 meansize=meansize,
+                                 characters=chargiven)
+      if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols))
+      ## scale determined
+      if(markrange[1] >= 0) {
+        ## all marks are nonnegative
+        shapedefault <-
+          if(!assumecircles) list() else list(shape="circles")
+        cexfun <- function(x, scal=1) { scal * x }
+        circfun <- function(x, scal=1) { scal * x }
+        formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
+        sizedefault <-
+          if(sizegiven) list() else
+          if(chargiven) list(cex=cexfun) else list(size=circfun)
+      } else {
+        ## some marks are negative
+        shapedefault <-
+          if(!assumecircles) list() else
+          list(shape=function(x) { ifelse(x >= 0, "circles", "squares") })
+        cexfun <- function(x, scal=1) { scal * abs(x) }
+        circfun <- function(x, scal=1) { scal * abs(x) }
+        formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal
+        sizedefault <-
+          if(sizegiven) list() else
+          if(chargiven) list(cex=cexfun) else list(size=circfun)
+      }
+      g <- do.call(symbolmap,
+                   resolve.defaults(list(range=markrange),
+                                    list(...),
+                                    shapedefault,
+                                    sizedefault,
+                                    list(chars=chars, cols=cols)))
+      return(g)
+    }
+    ##  ...........  non-numeric marks .........................
+    um <- if(is.factor(marx)) levels(marx) else sort(unique(marx))
+    ntypes <- length(um)
+    if(!is.null(cols))
+      cols <- rep.int(cols, ntypes)[1:ntypes]
+    if(shapegiven && sizegiven) {
+      #' values mapped to symbols (shape and size specified)
+      g <- symbolmap(inputs=um, ..., cols=cols)
+    } else if(!shapegiven) {
+      #' values mapped to 'pch'
+      chars <- default.charmap(ntypes, chars)
+      g <- symbolmap(inputs=um, ..., chars=chars, cols=cols)
+    } else {
+      #' values mapped to symbols
+      #' determine size
+      scal <- mark.scale.default(rep(1, npoints(x)),
+                                 Window(x), 
+                                 maxsize=maxsize,
+                                 meansize=meansize,
+                                 characters=FALSE)
+      g <- symbolmap(inputs=um, ..., size=scal, cols=cols)
+    }
+    return(g)
+  }
+                                  
+  default.charmap <- function(n, ch=NULL) {
+    if(!is.null(ch))
+      return(rep.int(ch, n)[1:n])
+    if(n <= 25)
+      return(1:n)
+    ltr <- c(letters, LETTERS)
+    if(n <= 52)
+      return(ltr[1:n])
+    ## wrapped sequence of letters
+    warning("Too many types to display every type as a different character")
+    return(ltr[1 + (0:(n - 1) %% 52)])
+  }
+
+  ## main function
+  plot.ppp <-
+    function(x, main, ..., clipwin=NULL,
+             chars=NULL, cols=NULL, use.marks=TRUE,
+             which.marks=NULL, add=FALSE, type=c("p", "n"), 
+             legend=TRUE, leg.side=c("left", "bottom", "top", "right"),
+             leg.args=list(),
+             symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01, 
+             show.window=show.all, show.all=!add, do.plot=TRUE,
+             multiplot=TRUE)
+{
+  if(missing(main))
+    main <- short.deparse(substitute(x))
+
+  type <- match.arg(type)
+  if(missing(legend)) legend <- (type == "p")
+
+  if(!missing(maxsize) || !missing(markscale) || !missing(meansize))
+    warn.once("circlescale",
+              "Interpretation of arguments maxsize and markscale",
+              "has changed (in spatstat version 1.37-0 and later).",
+              "Size of a circle is now measured by its diameter.")
+
+  if(clipped <- !is.null(clipwin)) {
+    stopifnot(is.owin(clipwin))
+    W <- Window(x)
+    clippy <- if(is.mask(W)) intersect.owin(W, clipwin) else edges(W)[clipwin]
+    x <- x[clipwin]
+  } else clippy <- NULL
+  
+  ## sensible default position
+  legend <- legend && show.all
+  if(legend) {
+    leg.side <- match.arg(leg.side)
+    vertical <- (leg.side %in% c("left", "right"))
+  }
+  
+#  if(type == "n" || npoints(x) == 0) {
+#    ## plot the window only
+#    xwindow <- x$window
+#    if(do.plot) 
+#      do.call(plot.owin,
+#              resolve.defaults(list(xwindow),
+#                               list(...),
+#                               list(main=main, invert=TRUE, add=add,
+#                                    type=if(show.window) "w" else "n")))
+#    if(is.null(symap)) symap <- symbolmap()
+#    attr(symap, "bbox") <- as.rectangle(xwindow)
+#    return(invisible(symap))
+#  }
+
+  ## ................................................................
+  ## Handle multiple columns of marks as separate plots
+  ##  (unless add=TRUE or which.marks selects a single column
+  ##   or multipage = FALSE)
+  if(use.marks && is.data.frame(mx <- marks(x))) {
+    implied.all <- is.null(which.marks)
+    want.several <- implied.all || is.data.frame(mx <- mx[,which.marks])
+    do.several <- want.several && !add && multiplot
+    if(do.several) {
+      ## generate one plot for each column of marks
+      y <- solapply(mx, setmarks, x=x)
+      out <- do.call(plot,
+                     resolve.defaults(list(x=y, main=main,
+                                           show.window=show.window && !clipped,
+                                           do.plot=do.plot,
+                                           type=type),
+                                      list(...),
+                                      list(equal.scales=TRUE),
+                                      list(panel.end=clippy),
+                                      list(legend=legend,
+                                           leg.side=leg.side,
+                                           leg.args=leg.args),
+                                      list(chars=chars, cols=cols,
+                                           maxsize=maxsize,
+                                           meansize=meansize,
+                                           markscale=markscale,
+                                           zap=zap)))
+      return(invisible(out))
+    } 
+    if(is.null(which.marks)) {
+      which.marks <- 1
+      if(do.plot) message("Plotting the first column of marks")
+    }
+  }
+  
+  ## ............... unmarked, or single column of marks ....................
+
+  ## Determine symbol map and mark values to be used
+  y <- x
+  if(!is.marked(x, na.action="ignore") || !use.marks) {
+    ## Marks are not mapped.
+    marx <- NULL
+    if(is.null(symap))
+      symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols)
+  } else {
+    ## Marked point pattern
+    marx <- marks(y, dfok=TRUE)
+    if(is.data.frame(marx)) {
+      ## select column or take first colum
+      marx <- marx[, which.marks]
+      y <- setmarks(y, marx)
+    }
+    if(npoints(y) > 0) {
+      ok <- complete.cases(as.data.frame(y))
+      if(!any(ok)) {
+        warning("All mark values are NA; plotting locations only.")
+        if(is.null(symap))
+          symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols)
+      } else if(any(!ok)) {
+        warning(paste("Some marks are NA;",
+                      "corresponding points are omitted."))
+        x <- x[ok]
+        y <- y[ok]
+        marx <- marks(y)
+      }
+    }
+    ## apply default symbol map
+    if(is.null(symap))
+      symap <- default.symap.points(y, chars=chars, cols=cols, 
+                                    maxsize=maxsize, meansize=meansize,
+                                    markscale=markscale,
+                                    ...)
+  }
+#  gtype <- symbolmaptype(symap)
+
+  ## Determine bounding box for main plot
+  BB <- as.rectangle(x)
+  sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects"))
+  if(sick) {
+    ## Get relevant parameters
+    par.direct <- list(main=main, use.marks=use.marks,
+                   maxsize=maxsize, meansize=meansize, markscale=markscale)
+    par.rejects <- resolve.1.default(list(par.rejects=list(pch="+")),
+                                     list(...))
+    par.all <- resolve.defaults(par.rejects, par.direct)
+    rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow
+    ## determine window for rejects
+    rwin <-
+      if(is.null(rw))
+        rejects$window
+      else if(is.logical(rw) && rw)
+        rejects$window
+      else if(inherits(rw, "owin"))
+        rw
+      else if(is.character(rw)) {
+        switch(rw,
+               box={boundingbox(rejects, x)},
+               ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))},
+               stop(paste("Unrecognised option: rejectwindow=", rw)))
+      } else stop("Unrecognised format for rejectwindow")
+    if(is.null(rwin))
+      stop("Selected window for rejects pattern is NULL")
+    BB <- boundingbox(BB, as.rectangle(rwin))
+  }
+
+  ## Augment bounding box with space for legend, if appropriate
+  legend <- legend && (symbolmaptype(symap) != "constant") 
+  if(legend) {
+    ## guess maximum size of symbols
+    maxsize <- invoke.symbolmap(symap, marx,
+                                corners(as.rectangle(x)),
+                                add=add, do.plot=FALSE)
+    sizeguess <- if(maxsize <= 0) NULL else (1.5 * maxsize)
+    leg.args <- append(list(side=leg.side, vertical=vertical), leg.args)
+    ## draw up layout
+    legbox <- do.call.matched(plan.legend.layout,
+                              append(list(B=BB, size = sizeguess,
+                                          started=FALSE, map=symap),
+                                     leg.args))
+    ## bounding box for everything
+    BB <- legbox$A
+  }
+
+  ## return now if not plotting
+  attr(symap, "bbox") <- BB
+  if(!do.plot)
+    return(invisible(symap))
+    
+  ## ............. start plotting .......................
+  pt <- prepareTitle(main)
+  main <- pt$main
+  nlines <- pt$nlines
+  blankmain <- if(nlines == 0) "" else rep("  ", nlines)
+  rez <- resolve.defaults(list(...),
+                          list(cex.main=1,
+                               xlim=NULL,
+                               ylim=NULL))
+  plot(BB, type="n", add=add, main=blankmain, show.all=show.all,
+       cex.main=rez$cex.main, xlim=rez$xlim, ylim=rez$ylim)
+
+  if(sick) {
+    if(show.window) {
+      ## plot windows
+      if(!is.null(rw)) {
+        ## plot window for rejects
+        rwinpardefault <- list(lty=2,lwd=1,border=1)
+        rwinpars <-
+          resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)]
+        do.call(plot.owin, append(list(rwin, add=TRUE), rwinpars))
+      }
+      ## plot window of main pattern
+      if(!clipped) {
+        do.call(plot.owin,
+                resolve.defaults(list(x$window, add=TRUE),
+                                 list(...),
+                                 list(invert=TRUE)))
+      } else plot(clippy, add=TRUE, ...)
+    }
+    if(type != "n") {
+      ## plot reject points
+      do.call(plot.ppp, append(list(rejects, add=TRUE), par.all))
+      warning(paste(rejects$n, "illegal points also plotted"))
+    }
+    ## the rest is added
+    add <- TRUE
+  }
+
+  ## Now convert to bona fide point pattern
+  x <- as.ppp(x)
+  xwindow <- x$window
+
+  ## Plot observation window (or at least the main title)
+  do.call(plot.owin,
+          resolve.defaults(list(x=xwindow,
+                                add=TRUE,
+                                main=main,
+                                type=if(show.window && !clipped) "w" else "n",
+                                show.all=show.all),
+                           list(...),
+                           list(invert=TRUE)))
+  ## If clipped, plot visible part of original window
+  if(show.window && clipped)
+    plot(clippy, add=TRUE, ...)
+  # else if(show.all) fakemaintitle(as.rectangle(xwindow), main, ...)
+
+  if(type != "n") {
+    ## plot symbols ##
+    invoke.symbolmap(symap, marx, x, add=TRUE)
+  }
+  
+  ## add legend
+  if(legend) {
+    b <- legbox$b
+    legendmap <- if(length(leg.args) == 0) symap else 
+                 do.call(update, append(list(object=symap), leg.args))
+    do.call(plot,
+            append(list(x=legendmap, main="", add=TRUE,
+                        xlim=b$xrange, ylim=b$yrange),
+                   leg.args))
+  }
+  
+  return(invisible(symap))
+}
+
+plot.ppp
+
+})
+
+
+mark.scale.default <- function(marx, w, markscale=NULL,
+                               maxsize=NULL, meansize=NULL,
+                               characters=FALSE) {
+  ## establish values of markscale, maxsize, meansize
+  ngiven <- (!is.null(markscale)) +
+            (!is.null(maxsize)) +
+            (!is.null(meansize))
+  if(ngiven > 1)
+     stop("Only one of the arguments markscale, maxsize, meansize",
+          " should be given", call.=FALSE)
+  if(ngiven == 0) {
+    ## if ALL are absent, enforce the spatstat defaults
+    ## (which could also be null)
+    pop <- spatstat.options("par.points")
+    markscale <- pop$markscale
+    maxsize   <- pop$maxsize
+    meansize <- pop$meansize
+  }
+  ## Now check whether markscale is fixed
+  if(!is.null(markscale)) {
+    stopifnot(markscale > 0)
+    return(markscale)
+  }
+  # Usual case: markscale is to be determined from maximum/mean physical size
+  if(is.null(maxsize) && is.null(meansize)) {
+    ## compute default value of 'maxsize'
+    ## guess appropriate max physical size of symbols
+    bb <- as.rectangle(w)
+    maxsize <- 1.4/sqrt(pi * length(marx)/area(bb))
+    maxsize <- min(maxsize, diameter(bb) * 0.07)
+    ## updated: maxsize now represents *diameter*
+    maxsize <- 2 * maxsize
+  } else {
+    if(!is.null(maxsize)) stopifnot(maxsize > 0) else stopifnot(meansize > 0)
+  }
+  
+  # Examine mark values
+  absmarx <- abs(marx)
+  maxabs <- max(absmarx)
+  tiny <- (maxabs < 4 * .Machine$double.eps)
+  if(tiny)
+    return(NA)
+
+  ## finally determine physical scale for symbols
+  if(!is.null(maxsize)) {
+    scal <- maxsize/maxabs
+  } else {
+    meanabs <- mean(absmarx)
+    scal <- meansize/meanabs
+  }
+  if(!characters) return(scal)
+
+  ## if using characters ('pch') we need to
+  ## convert physical sizes to 'cex' values
+  charsize <- max(sidelengths(as.rectangle(w)))/40
+  return(scal/charsize)
+}
+
+fakemaintitle <- function(bb, main, ...) {
+  ## Try to imitate effect of 'title(main=main)' above a specified box
+  if(!any(nzchar(main))) return(invisible(NULL))
+  bb <- as.rectangle(bb)
+  x0 <- mean(bb$xrange)
+  y0 <- bb$yrange[2] + length(main) * diff(bb$yrange)/12
+  parnames <- c('cex.main', 'col.main', 'font.main')
+  parlist <- par(parnames)
+  parlist <- resolve.defaults(list(...), parlist)[parnames]
+  names(parlist) <- c('cex', 'col', 'font')
+  do.call.matched(text.default,
+                  resolve.defaults(list(x=x0, y=y0, labels=main),
+                                   parlist,    list(...)),
+                  funargs=graphicsPars("text"))
+  return(invisible(NULL))
+}
diff --git a/R/plot3d.R b/R/plot3d.R
new file mode 100644
index 0000000..fd1b2ab
--- /dev/null
+++ b/R/plot3d.R
@@ -0,0 +1,197 @@
+#'  perspective plot of 3D 
+#'
+#'  $Revision: 1.5 $ $Date: 2016/09/23 04:57:43 $
+#'
+
+
+project3Dhom <- local({
+
+  check3dvector <- function(x) {
+    xname <- deparse(substitute(x))
+    if(!(is.numeric(x) && length(x) == 3))
+      stop(paste(xname, "should be a numeric vector of length 3"),
+           call.=FALSE)
+    return(NULL)
+  }
+
+  normalise <- function(x) {
+    len <- sqrt(sum(x^2))
+    if(len == 0) stop("Attempted to normalise a vector of length 0")
+    return(x/len)
+  }
+
+  innerprod <- function(a, b) sum(a*b)
+
+  crossprod <- function(u, v) {
+    c(u[2] * v[3] - u[3] * v[2],
+      -(u[1] * v[3] - u[3] * v[1]),
+      u[1] * v[2] - u[2] * v[1])
+  }
+
+  project3Dhom <- function(xyz, eye=c(0,-3,1), org=c(0,0,0), vert=c(0,0,1)) {
+    ## xyz: data to be projected (matrix n * 3)
+    stopifnot(is.matrix(xyz) && ncol(xyz) == 3)
+    ## eye: eye position (x,y,z)
+    check3dvector(eye)
+    ## org: origin (x,y,z) becomes middle of projection plane
+    check3dvector(org)
+    ## vert: unit vector in direction to become the 'vertical'
+    if(!missing(vert)) {
+      check3dvector(vert)
+      vert <- normalise(vert)
+    }
+    ## vector pointing into screen
+    vin <- normalise(org - eye)
+    ## projection of vertical onto screen
+    vup <- normalise(vert - innerprod(vert, vin) * vin)
+    ## horizontal axis in screen
+    vhoriz <- crossprod(vin, vup)
+    ##
+    dbg <- FALSE
+    if(dbg) {
+      cat("vin=")
+      print(vin)
+      cat("vup=")
+      print(vup)
+      cat("vhoriz=")
+      print(vhoriz)
+    }
+    ## homogeneous coordinates
+    hom <- t(t(xyz) - eye) %*% cbind(vhoriz, vup, vin)
+    colnames(hom) <- c("x", "y", "d")
+    return(hom)
+  }
+
+  project3Dhom
+})
+
+plot3Dpoints <- local({
+
+  plot3Dpoints <- function(xyz, eye=c(2,-3,2), org=c(0,0,0),
+                           ...,
+                           type=c("p", "n", "h"),
+                           xlim=c(0,1), ylim=c(0,1), zlim=c(0,1),
+                           add=FALSE, box=TRUE, 
+                           main, cex=par('cex'), 
+                           box.back=list(col="pink"),
+                           box.front=list(col="blue", lwd=2)
+                           ) {
+    if(missing(main)) main <- short.deparse(substitute(xyz))
+    type <- match.arg(type)
+    #'
+    if(is.null(box.back) || (is.logical(box.back) && box.back))
+      box.back <- list(col="pink")
+    if(is.null(box.front) || (is.logical(box.front) && box.front))
+      box.front <- list(col="blue", lwd=2)
+    stopifnot(is.list(box.back) || is.logical(box.back))
+    stopifnot(is.list(box.front) || is.logical(box.front))
+    #'
+    stopifnot(is.matrix(xyz) && ncol(xyz) == 3)
+    if(nrow(xyz) > 0) {
+      if(missing(xlim)) xlim <- range(pretty(xyz[,1]))
+      if(missing(ylim)) ylim <- range(pretty(xyz[,2]))
+      if(missing(zlim)) zlim <- range(pretty(xyz[,3]))
+      if(missing(org)) org <- c(mean(xlim), mean(ylim), mean(zlim))
+    }
+    if(!add) {
+      #' initialise plot
+      bb <- plot3Dbox(xlim, ylim, zlim, eye=eye, org=org, do.plot=FALSE)
+      plot(bb$xlim, bb$ylim, axes=FALSE, asp=1, type="n",
+           xlab="", ylab="", main=main)
+    }
+    if(is.list(box.back)) {
+      #' plot rear of box
+      do.call(plot3DboxPart,
+              resolve.defaults(list(xlim=xlim,
+                                    ylim=ylim,
+                                    zlim=zlim,
+                                    eye=eye, org=org,
+                                    part="back"),
+                               box.back,
+                               list(...)))
+    }
+    if(type != "n") {
+      #' plot points
+      uv <- project3Dhom(xyz, eye=eye, org=org)
+      uv <- as.data.frame(uv)
+      dord <- order(uv$d, decreasing=TRUE)
+      uv <- uv[dord, , drop=FALSE]
+      if(type == "h") {
+        xy0 <- cbind(xyz[,1:2,drop=FALSE], zlim[1])
+        uv0 <- as.data.frame(project3Dhom(xy0, eye=eye, org=org))
+        uv0 <- uv0[dord, , drop=FALSE]
+        do.call.matched(segments,
+                        list(x0=with(uv0, x/d),
+                             y0=with(uv0, y/d),
+                             x1=with(uv,  x/d),
+                             y1=with(uv,  y/d),
+                             ...))
+      }
+      with(uv, points(x/d, y/d, cex=cex * min(d)/d, ...))
+    }
+    if(is.list(box.front)) 
+      do.call(plot3DboxPart,
+              resolve.defaults(list(xlim=xlim,
+                                    ylim=ylim,
+                                    zlim=zlim,
+                                    eye=eye, org=org,
+                                    part="front"),
+                               box.front,
+                               list(...)))
+    return(invisible(NULL))
+  }
+
+  vertexind <- data.frame(i=rep(1:2,4),
+                          j=rep(rep(1:2,each=2),2),
+                          k=rep(1:2, each=4))
+
+  edgepairs <- data.frame(from=c(1, 1, 2, 3, 1, 2, 5, 3, 5, 4, 6, 7),
+                          to = c(2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8))
+  
+  vertexfrom <- vertexind[edgepairs$from,]
+  vertexto   <- vertexind[edgepairs$to,]
+
+  hamming <- function(a, b) sum(abs(a-b))
+
+  ## determine projected positions of box vertices
+  ## and optionally plot the box
+  plot3Dbox <- function(xlim=c(0,1), ylim=xlim, zlim=ylim,
+                        eye=c(0,-3,1), org=c(0,0,0),
+                        do.plot=TRUE) {
+    fromxyz <- with(vertexfrom, cbind(xlim[i], ylim[j], zlim[k]))
+    toxyz   <- with(vertexto,   cbind(xlim[i], ylim[j], zlim[k]))
+    fromuv <-  project3Dhom(fromxyz, eye=eye, org=org)
+    touv <-  project3Dhom(toxyz, eye=eye, org=org)
+    xfrom <- fromuv[,1]/fromuv[,3]
+    xto   <- touv[,1]/touv[,3]
+    yfrom <- fromuv[,2]/fromuv[,3]
+    yto   <- touv[,2]/touv[,3]
+    if(do.plot) 
+      segments(xfrom, yfrom, xto, yto)
+    return(invisible(list(xlim=range(xfrom, xto), ylim=range(yfrom, yto))))
+  }
+
+  ## plot either back or front of box
+  plot3DboxPart <- function(xlim=c(0,1), ylim=xlim, zlim=ylim,
+                            eye=c(0,-3,1), org=c(0,0,0),
+                            part=c("front", "back"), ...) {
+    part <- match.arg(part)
+    boxvert <- with(vertexind, cbind(xlim[i], ylim[j], zlim[k]))
+    pvert <- project3Dhom(boxvert, eye=eye, org=org)
+    xyvert <- pvert[,c("x","y")]/pvert[,"d"]
+    ## find vertex which is furthest away
+    nback <- which.max(pvert[,"d"])
+    nearback <- with(edgepairs, (from==nback) | (to==nback))
+    ind <- if(part == "back") nearback else !nearback
+    ## draw lines
+    with(edgepairs[ind,],
+         segments(xyvert[from, 1],
+                  xyvert[from, 2],
+                  xyvert[to,   1],
+                  xyvert[to,   2],
+                  ...))
+  }
+
+  plot3Dpoints
+})
+
diff --git a/R/pointsonlines.R b/R/pointsonlines.R
new file mode 100755
index 0000000..dff9e8b
--- /dev/null
+++ b/R/pointsonlines.R
@@ -0,0 +1,50 @@
+#
+#   pointsonlines.R
+#
+# place points at regular intervals along line segments
+#
+#   $Revision: 1.7 $  $Date: 2014/11/10 11:21:02 $
+#
+
+pointsOnLines <- function(X, eps=NULL, np=1000, shortok=TRUE) {
+  stopifnot(is.psp(X))
+  len <- lengths.psp(X)
+  nseg <- length(len)
+  if(is.null(eps)) {
+    stopifnot(is.numeric(np) && length(np) == 1)
+    stopifnot(is.finite(np) && np > 0)
+    eps <- sum(len)/np
+  } else {
+    stopifnot(is.numeric(eps) && length(eps) == 1)
+    stopifnot(is.finite(eps) && eps > 0)
+  }
+  # initialise
+  Xdf    <- as.data.frame(X)
+  xmid <- with(Xdf, (x0+x1)/2)
+  ymid <- with(Xdf, (y0+y1)/2)
+  # handle very short segments
+#  allsegs <- 1:nseg
+  if(any(short <- (len <= eps)) && shortok) {
+    # very short segments: use midpoints
+    Z <- data.frame(x = xmid[short], y = ymid[short])
+  } else Z <- data.frame(x=numeric(0), y=numeric(0))
+  # handle other segments
+  for(i in (1:nseg)[!short]) {
+    # divide segment into pieces of length eps
+    # with shorter bits at each end
+    leni <- len[i]
+    nwhole <- floor(leni/eps)
+    if(leni/eps - nwhole < 0.5 && nwhole > 2)
+      nwhole <- nwhole - 1
+    rump <- (leni - nwhole * eps)/2
+    brks <- c(0, rump + (0:nwhole) * eps, leni)
+    nbrks <- length(brks)
+    # points at middle of each piece
+    ss <- (brks[-1] + brks[-nbrks])/2
+    x <- with(Xdf, x0[i] + (ss/leni) * (x1[i]-x0[i]))
+    y <- with(Xdf, y0[i] + (ss/leni) * (y1[i]-y0[i]))
+    Z <- rbind(Z, data.frame(x=x, y=y))
+  }
+  Z <- as.ppp(Z, W=X$window)
+  return(Z)
+}
diff --git a/R/poisson.R b/R/poisson.R
new file mode 100755
index 0000000..58912d0
--- /dev/null
+++ b/R/poisson.R
@@ -0,0 +1,46 @@
+#
+#
+#    poisson.S
+#
+#    $Revision: 1.8 $	$Date: 2015/10/21 09:06:57 $
+#
+#    The Poisson process
+#
+#    Poisson()    create an object of class 'interact' describing
+#                 the (null) interpoint interaction structure
+#                 of the Poisson process.
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Poisson <- local({
+
+  BlankPoisson <- list(
+    name     = "Poisson process",
+    creator  = "Poisson",
+    family   = NULL,
+    pot      = NULL,
+    par      = NULL,
+    parnames = NULL,
+    init     = function(...) { },
+    update   = function(...) { },
+    print    = function(self) {
+      cat("Poisson process\n")
+      invisible()
+    },
+    valid = function(...) { TRUE },
+    project = function(...) NULL, 
+    irange = function(...) { 0 },
+    version=NULL
+    )
+  
+  class(BlankPoisson) <- "interact"
+
+  Poisson <- function() { BlankPoisson }
+
+  Poisson <- intermaker(Poisson, BlankPoisson)
+
+  Poisson
+})
+                 
diff --git a/R/polygood.R b/R/polygood.R
new file mode 100644
index 0000000..909f801
--- /dev/null
+++ b/R/polygood.R
@@ -0,0 +1,199 @@
+#'
+#'    polygood.R
+#'
+#'   Check validity of polygon data
+#'
+#'  $Revision: 1.2 $  $Date: 2017/06/05 10:31:58 $
+#'
+
+#' check validity of a polygonal owin
+
+owinpolycheck <- function(W, verbose=TRUE) {
+  verifyclass(W, "owin")
+  stopifnot(W$type == "polygonal")
+
+  # extract stuff
+  B <- W$bdry
+  npoly <- length(B)
+  outerframe <- owin(W$xrange, W$yrange)
+  # can't use as.rectangle here; we're still checking validity
+  boxarea.mineps <- area.owin(outerframe) * (1 - 0.00001)
+
+  # detect very large datasets
+  BS <- object.size(B)
+  blowbyblow <- verbose & (BS > 1e4 || npoly > 20)
+  #
+  
+  answer <- TRUE
+  notes <- character(0)
+  err <- character(0)
+  
+  # check for duplicated points, self-intersection, outer frame
+  if(blowbyblow) {
+    cat(paste("Checking", npoly, ngettext(npoly, "polygon...", "polygons...")))
+    pstate <- list()
+  }
+
+  dup <- self <- is.box <- logical(npoly)
+  
+  for(i in 1:npoly) {
+    if(blowbyblow && npoly > 1L)
+      pstate <- progressreport(i, npoly, state=pstate)
+    Bi <- B[[i]]
+    # check for duplicated vertices
+    dup[i] <- as.logical(anyDuplicated(ppp(Bi$x, Bi$y,
+                                           window=outerframe, check=FALSE)))
+    if(dup[i] && blowbyblow)
+      message(paste("Polygon", i, "contains duplicated vertices"))
+    # check for self-intersection
+    self[i] <- xypolyselfint(B[[i]], proper=TRUE, yesorno=TRUE)
+    if(self[i] && blowbyblow)
+      message(paste("Polygon", i, "is self-intersecting"))
+    # check whether one of the current boundary polygons
+    # is the bounding box itself (with + sign)
+    is.box[i] <- (length(Bi$x) == 4) && (Area.xypolygon(Bi) >= boxarea.mineps)
+  }
+  if(blowbyblow)
+    cat("done.\n")
+  
+  if((ndup <- sum(dup)) > 0) {
+    whinge <- paste(ngettext(ndup, "Polygon", "Polygons"),
+                    if(npoly == 1L) NULL else
+                    commasep(which(dup)), 
+                    ngettext(ndup, "contains", "contain"),
+                    "duplicated vertices")
+    notes <- c(notes, whinge)
+    err <- c(err, "duplicated vertices")
+    if(verbose) 
+      message(whinge)
+    answer <- FALSE
+  }
+  
+  if((nself <- sum(self)) > 0) {
+    whinge <-  paste(ngettext(nself, "Polygon", "Polygons"),
+                     if(npoly == 1L) NULL else
+                     commasep(which(self)),
+                     ngettext(nself, "is", "are"),
+                     "self-intersecting")
+    notes <- c(notes, whinge)
+    if(verbose) 
+      message(whinge)
+    err <- c(err, "self-intersection")
+    answer <- FALSE
+  }
+  
+  if(sum(is.box) > 1L) {
+    answer <- FALSE
+    whinge <- paste("Polygons",
+                    commasep(which(is.box)),
+                    "coincide with the outer frame")
+    notes <- c(notes, whinge)
+    err <- c(err, "polygons duplicating the outer frame")
+  }
+  
+  # check for crossings between different polygons
+  cross <- matrix(FALSE, npoly, npoly)
+  if(npoly > 1L) {
+    if(blowbyblow) {
+      cat(paste("Checking for cross-intersection between",
+                npoly, "polygons..."))
+      pstate <- list()
+    }
+    P <- lapply(B, xypolygon2psp, w=outerframe, check=FALSE)
+    for(i in seq_len(npoly-1L)) {
+      if(blowbyblow)
+        pstate <- progressreport(i, npoly-1L, state=pstate)
+      Pi <- P[[i]]
+      for(j in (i+1L):npoly) {
+        crosses <- if(is.box[i] || is.box[j]) FALSE else {
+          anycrossing.psp(Pi, P[[j]])
+        }
+        cross[i,j] <- cross[j,i] <- crosses
+        if(crosses) {
+          answer <- FALSE
+          whinge <- paste("Polygons", i, "and", j, "cross over")
+          notes <- c(notes, whinge)
+          if(verbose) 
+            message(whinge)
+          err <- c(err, "overlaps between polygons")
+        }
+      }
+    }
+    if(blowbyblow)
+      cat("done.\n")
+  }
+
+  err <- unique(err)
+  attr(answer, "notes") <- notes
+  attr(answer, "err") <-  err
+  return(answer)
+}
+
+#' check for self-intersections in an xypolygon
+
+xypolyselfint <- function(p, eps=.Machine$double.eps,
+                          proper=FALSE, yesorno=FALSE, checkinternal=FALSE) {
+  verify.xypolygon(p)
+  n <- length(p$x)
+  verbose <- (n > 1000)
+  if(verbose)
+    cat(paste("[Checking polygon with", n, "edges..."))
+  x0 <- p$x
+  y0 <- p$y
+  dx <- diff(x0[c(1:n,1L)])
+  dy <- diff(y0[c(1:n,1L)])
+  if(yesorno) {
+    # get a yes-or-no answer
+    answer <- .C("xypsi",
+                 n=as.integer(n),
+                 x0=as.double(x0),
+                 y0=as.double(y0),
+                 dx=as.double(dx),
+                 dy=as.double(dy),
+                 xsep=as.double(2 * max(abs(dx))),
+                 ysep=as.double(2 * max(abs(dy))),
+                 eps=as.double(eps),
+                 proper=as.integer(proper),
+                 answer=as.integer(integer(1L)),
+                 PACKAGE = "spatstat")$answer
+    if(verbose)
+      cat("]\n")
+    return(answer != 0)
+  }
+  out <- .C("Cxypolyselfint",
+            n=as.integer(n),
+            x0=as.double(x0),
+            y0=as.double(y0),
+            dx=as.double(dx),
+            dy=as.double(dy), 
+            eps=as.double(eps),
+            xx=as.double(numeric(n^2)),
+            yy=as.double(numeric(n^2)),
+            ti=as.double(numeric(n^2)),
+            tj=as.double(numeric(n^2)),
+            ok=as.integer(integer(n^2)),
+            PACKAGE = "spatstat")
+
+  uhoh <- (matrix(out$ok, n, n) != 0)
+  if(proper) {
+    # ignore cases where two vertices coincide 
+    ti <- matrix(out$ti, n, n)[uhoh]
+    tj <- matrix(out$tj, n, n)[uhoh]
+    i.is.vertex <- (abs(ti) < eps) | (abs(ti - 1) < eps)
+    j.is.vertex <- (abs(tj) < eps) | (abs(tj - 1) < eps)
+    dup <- i.is.vertex & j.is.vertex
+    uhoh[uhoh] <- !dup
+  }
+  if(checkinternal && any(uhoh != t(uhoh)))
+    warning("Internal error: incidence matrix is not symmetric")
+  xx <- matrix(out$xx, n, n)
+  yy <- matrix(out$yy, n, n)
+  uptri <- (row(uhoh) < col(uhoh))
+  xx <- as.vector(xx[uhoh & uptri])
+  yy <- as.vector(yy[uhoh & uptri])
+  result <- list(x=xx, y=yy)
+  if(verbose)
+    cat("]\n")
+  return(result)
+}
+  
diff --git a/R/polynom.R b/R/polynom.R
new file mode 100644
index 0000000..b0bf089
--- /dev/null
+++ b/R/polynom.R
@@ -0,0 +1,84 @@
+#'
+#'    polynom.R
+#'
+#'   $Revision: 1.1 $  $Date: 2017/01/02 09:48:36 $
+#'
+
+polynom <- function(x, ...) {
+  rest <- list(...)
+  # degree not given
+  if(length(rest) == 0)
+    stop("degree of polynomial must be given")
+  #call with single variable and degree
+  if(length(rest) == 1) {
+    degree <- ..1
+    if((degree %% 1) != 0 || length(degree) != 1 || degree < 1)
+      stop("degree of polynomial should be a positive integer")
+
+    # compute values
+    result <- outer(x, 1:degree, "^")
+
+    # compute column names - the hard part !
+    namex <- deparse(substitute(x))
+    # check whether it needs to be parenthesised
+    if(!is.name(substitute(x))) 
+      namex <- paste("(", namex, ")", sep="")
+    # column names
+    namepowers <- if(degree == 1) namex else 
+                       c(namex, paste(namex, "^", 2:degree, sep=""))
+    namepowers <- paste("[", namepowers, "]", sep="")
+    # stick them on
+    dimnames(result) <- list(NULL, namepowers)
+    return(result)
+  }
+  # call with two variables and degree
+  if(length(rest) == 2) {
+
+    y <- ..1
+    degree <- ..2
+
+    # list of exponents of x and y, in nice order
+    xexp <- yexp <- numeric()
+    for(i in 1:degree) {
+      xexp <- c(xexp, i:0)
+      yexp <- c(yexp, 0:i)
+    }
+    nterms <- length(xexp)
+    
+    # compute 
+
+    result <- matrix(, nrow=length(x), ncol=nterms)
+    for(i in 1:nterms) 
+      result[, i] <- x^xexp[i] * y^yexp[i]
+
+    #  names of these terms
+    
+    namex <- deparse(substitute(x))
+    # namey <- deparse(substitute(..1)) ### seems not to work in R
+    zzz <- as.list(match.call())
+    namey <- deparse(zzz[[3]])
+
+    # check whether they need to be parenthesised
+    # if so, add parentheses
+    if(!is.name(substitute(x))) 
+      namex <- paste("(", namex, ")", sep="")
+    if(!is.name(zzz[[3]])) 
+      namey <- paste("(", namey, ")", sep="")
+
+    nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep=""))
+    nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep=""))
+
+    # make the term names
+       
+    termnames <- paste(nameXexp[xexp + 1],
+                       ifelse(xexp > 0 & yexp > 0, ".", ""),
+                       nameYexp[yexp + 1],
+                       sep="")
+    termnames <- paste("[", termnames, "]", sep="")
+
+    dimnames(result) <- list(NULL, termnames)
+    # 
+    return(result)
+  }
+  stop("Can't deal with more than 2 variables yet")
+}
diff --git a/R/pool.R b/R/pool.R
new file mode 100644
index 0000000..f630bfb
--- /dev/null
+++ b/R/pool.R
@@ -0,0 +1,99 @@
+#'
+#'     pool.R
+#'
+#'  $Revision: 1.5 $  $Date: 2017/06/05 10:31:58 $
+
+pool <- function(...) {
+  UseMethod("pool")
+}
+
+pool.fv <- local({
+
+  Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) }
+  Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) }
+  Cmul <- function(A, f) { force(A); force(f); eval.fv(f * A, relabel=FALSE) }
+
+  pool.fv <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) {
+    argh <- list(...)
+    n <- narg <- length(argh)
+    if(narg == 0) return(NULL)
+    if(narg == 1) return(argh[[1]])
+    ## validate 
+    isfv <- unlist(lapply(argh, is.fv))
+    if(!all(isfv))
+      stop("All arguments must be fv objects")
+    argh <- do.call(harmonise, append(argh, list(strict=TRUE)))
+    template <- vanilla.fv(argh[[1]])
+    ## compute products
+    if(!is.null(weights)) {
+      check.nvector(weights, narg, things="Functions")
+      Y <- Map(Cmul, argh, weights)
+      XY <- Map(Cmul, argh, weights^2)
+      sumX <- sum(weights)
+      sumX2 <- sum(weights^2)
+    } else {
+      ## default: weights=1
+      Y <- XY <- argh
+      sumX <- sumX2 <- narg
+    }
+    ## sum
+    sumY <- Reduce(Add, Y)
+    attributes(sumY) <- attributes(template)
+    ## ratio-of-sums
+    Ratio <- eval.fv(sumY/sumX, relabel=FALSE)
+    if(variance) {
+      ## variance calculation
+      meanX <- sumX/n
+      meanY <- eval.fv(sumY/n, relabel=FALSE)
+      sumY2 <- Reduce(Add, lapply(Y, Square))
+      varX   <- (sumX2 - n * meanX^2)/(n-1)
+      varY   <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE)
+      sumXY <- Reduce(Add, XY)
+      covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE)
+      ## variance by delta method
+      relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2
+                                 - 2 * covXY/(meanX * meanY)),
+                        relabel=FALSE)
+      Variance <- eval.fv(Ratio^2 * relvar/n,
+                          relabel=FALSE)
+      ## two sigma CI
+      hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE)
+      loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE)
+    }
+    ## tweak labels of main estimate
+    attributes(Ratio) <- attributes(template)
+    if(relabel)
+      Ratio <- prefixfv(Ratio,
+                        tagprefix="pool",
+                        descprefix="pooled ",
+                        lablprefix="")
+    if(!variance)
+      return(Ratio)
+    ## tweak labels of variance terms
+    attributes(Variance) <- attributes(template)
+    Variance <- prefixfv(Variance,
+                         tagprefix="var",
+                         descprefix="delta-method variance estimate of ",
+                         lablprefix="bold(var)~")
+    attributes(hiCI) <- attributes(loCI) <-  attributes(template)
+    hiCI <- prefixfv(hiCI,
+                     tagprefix="hi",
+                     descprefix="upper limit of two-sigma CI based on ",
+                     lablprefix="bold(hi)~")
+    loCI <- prefixfv(loCI,
+                     tagprefix="lo",
+                     descprefix="lower limit of two-sigma CI based on ",
+                     lablprefix="bold(lo)~")
+    ## glue together
+    result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI))
+    ## don't plot variances, by default
+    fvnames(result, ".") <- setdiff(fvnames(result, "."),
+                                    fvnames(Variance, "."))
+    return(result)
+  }
+
+  pool.fv
+})
+
+
+  
diff --git a/R/pp3.R b/R/pp3.R
new file mode 100755
index 0000000..1424e17
--- /dev/null
+++ b/R/pp3.R
@@ -0,0 +1,254 @@
+#
+#   pp3.R
+#
+#  class of three-dimensional point patterns in rectangular boxes
+#
+#  $Revision: 1.26 $  $Date: 2016/09/23 11:02:36 $
+#
+
+box3 <- function(xrange=c(0,1), yrange=xrange, zrange=yrange, unitname=NULL) {
+  stopifnot(is.numeric(xrange) && length(xrange) == 2 && diff(xrange) > 0)
+  stopifnot(is.numeric(yrange) && length(yrange) == 2 && diff(yrange) > 0)
+  stopifnot(is.numeric(zrange) && length(zrange) == 2 && diff(zrange) > 0)
+  out <- list(xrange=xrange, yrange=yrange, zrange=zrange,
+              units=as.units(unitname))
+  class(out) <- "box3"
+  return(out)
+}
+
+as.box3 <- function(...) {
+  a <- list(...)
+  n <- length(a)
+  if(n == 0)
+    stop("No arguments given")
+  if(n == 1) {
+    a <- a[[1]]
+    if(inherits(a, "box3"))
+      return(a)
+    if(inherits(a, "pp3"))
+      return(a$domain)
+    if(inherits(a, "boxx")){
+      if(ncol(a$ranges)==3)
+        return(box3(a$ranges[,1], a$ranges[,2], a$ranges[,3]))
+      stop("Supplied boxx object does not have dimension three")
+    }
+    if(inherits(a, "ppx"))
+      return(as.box3(a$domain))
+    if(is.numeric(a)) {
+      if(length(a) == 6)
+        return(box3(a[1:2], a[3:4], a[5:6]))
+      stop(paste("Don't know how to interpret", length(a), "numbers as a box"))
+    }
+    if(!is.list(a))
+      stop("Don't know how to interpret data as a box")
+  }
+  return(do.call(box3, a))
+}
+
+print.box3 <- function(x, ...) {
+  bracket <- function(z) paste("[",
+                               paste(signif(z, 5), collapse=", "),
+                               "]", sep="")
+  v <- paste(unlist(lapply(x[1:3], bracket)), collapse=" x ")
+  s <- summary(unitname(x))
+  splat("Box:", v, s$plural, s$explain)
+  invisible(NULL)
+}
+
+unitname.box3 <- function(x) { x$units }
+
+"unitname<-.box3" <- function(x, value) {
+  x$units <- as.units(value)
+  return(x)
+}
+
+grow.box3 <- function(W, left, right=left) {
+  as.box3(grow.boxx(as.boxx(W), left, right))
+}
+
+eroded.volumes <- function(x, r) { UseMethod("eroded.volumes") }
+
+eroded.volumes.box3 <- function(x, r) {
+  b <- as.box3(x)
+  ax <- pmax.int(0, diff(b$xrange) - 2 * r)
+  ay <- pmax.int(0, diff(b$yrange) - 2 * r)
+  az <- pmax.int(0, diff(b$zrange) - 2 * r)
+  ax * ay * az
+}
+
+shortside <- function(x) { UseMethod("shortside") }
+
+shortside.box3 <- function(x) {
+  min(sidelengths(x))
+}
+
+sidelengths <- function(x) { UseMethod("sidelengths") }
+
+sidelengths.box3 <- function(x) {
+  with(x, c(diff(xrange), diff(yrange), diff(zrange)))
+}
+
+bounding.box3 <- function(...) {
+  wins <- list(...)
+  boxes <- lapply(wins, as.box3)
+  xr <- range(unlist(lapply(boxes, getElement, name="xrange")))
+  yr <- range(unlist(lapply(boxes, getElement, name="yrange")))
+  zr <- range(unlist(lapply(boxes, getElement, name="zrange")))
+  box3(xr, yr, zr)
+}
+
+pp3 <- function(x, y, z, ...) {
+  stopifnot(is.numeric(x))
+  stopifnot(is.numeric(y))
+  stopifnot(is.numeric(z)) 
+  b <- as.box3(...)
+  out <- ppx(data=data.frame(x=x,y=y,z=z), domain=b)
+  class(out) <- c("pp3", class(out))
+  return(out)
+}
+
+domain.pp3 <- function(X, ...) { X$domain }
+
+is.pp3 <- function(x) { inherits(x, "pp3") }
+
+npoints.pp3 <- function(x) { nrow(x$data) }
+
+print.pp3 <- function(x, ...) {
+  splat("Three-dimensional point pattern")
+  sd <- summary(x$data)
+  np <- sd$ncases
+  splat(np, ngettext(np, "point", "points"))
+  print(x$domain)
+  invisible(NULL)
+}
+
+summary.pp3 <- function(object, ...) {
+  sd <- summary(object$data)
+  np <- sd$ncases
+  dom <- object$domain
+  v <- volume.box3(dom)
+  u <- summary(unitname(dom))
+  intens <- np/v
+  out <-  list(np=np, sumdat=sd, dom=dom, v=v, u=u, intensity=intens)
+  class(out) <- "summary.pp3"
+  return(out)
+}
+
+print.summary.pp3 <- function(x, ...) {
+  splat("Three-dimensional point pattern")
+  splat(x$np, ngettext(x$np, "point", "points"))
+  print(x$dom)
+  u <- x$u
+  v <- x$v
+  splat("Volume", v, "cubic",
+        if(v == 1) u$singular else u$plural,
+        u$explain)
+  splat("Average intensity", x$intensity,
+        "points per cubic", u$singular, u$explain)
+  invisible(NULL)
+}
+
+plot.pp3 <- function(x, ..., eye=NULL, org=NULL, theta=25, phi=15,
+                     type=c("p", "n", "h"),
+                     box.back=list(col="pink"),
+                     box.front=list(col="blue", lwd=2)) {
+  xname <- short.deparse(substitute(x))
+  type <- match.arg(type)
+  # given arguments
+  argh <- list(...)
+  if(!missing(box.front)) argh$box.front <- box.front
+  if(!missing(box.back))  argh$box.back  <- box.back
+  # Now apply formal defaults above
+  formaldefaults <- list(box.front=box.front, box.back=box.back)
+  #'
+  coo <- as.matrix(coords(x))
+  xlim <- x$domain$xrange
+  ylim <- x$domain$yrange
+  zlim <- x$domain$zrange
+  if(is.null(org)) org <- c(mean(xlim), mean(ylim), mean(zlim))
+  if(is.null(eye)) {
+    theta <- theta * pi/180
+    phi   <- phi * pi/180
+    d <- 2 * diameter(x$domain)
+    eye <- org + d * c(cos(phi) * c(sin(theta), -cos(theta)), sin(phi))
+  }
+  deefolts <- spatstat.options('par.pp3')
+  ## determine default eye position and centre of view
+  do.call(plot3Dpoints,
+          resolve.defaults(list(xyz=coo, eye=eye, org=org, type=type),
+                           argh,
+                           deefolts,
+                           formaldefaults,
+                           list(main=xname,
+                                xlim=xlim,
+                                ylim=ylim,
+                                zlim=zlim)))
+}
+
+"[.pp3" <- function(x, i, drop=FALSE, ...) {
+  answer <- NextMethod("[")
+  if(is.ppx(answer))
+    class(answer) <- c("pp3", class(answer))
+  return(answer)
+}
+  
+unitname.pp3 <- function(x) { unitname(x$domain) }
+
+"unitname<-.pp3" <- function(x, value) {
+  d <- x$domain
+  unitname(d) <- value
+  x$domain <- d
+  return(x)
+}
+
+diameter.box3 <- function(x) {
+  stopifnot(inherits(x, "box3"))
+  with(x, sqrt(diff(xrange)^2+diff(yrange)^2+diff(zrange)^2))
+}
+
+volume <- function(x) { UseMethod("volume") }
+
+volume.box3 <- function(x) {
+  stopifnot(inherits(x, "box3"))
+  with(x, prod(diff(xrange), diff(yrange), diff(zrange)))
+}
+
+runifpoint3 <- function(n, domain=box3(), nsim=1, drop=TRUE) {
+  domain <- as.box3(domain)
+  result <- vector(mode="list", length=nsim)
+  dd <- as.list(domain)[c("xrange", "yrange", "zrange")]
+  for(i in 1:nsim) {
+    x <- with(dd, runif(n, min=xrange[1], max=xrange[2]))
+    y <- with(dd, runif(n, min=yrange[1], max=yrange[2]))
+    z <- with(dd, runif(n, min=zrange[1], max=zrange[2]))
+    result[[i]] <- pp3(x,y,z,domain)
+  }
+  if(drop && nsim == 1) return(result[[1]])
+  result <- as.anylist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+rpoispp3 <- function(lambda, domain=box3(), nsim=1, drop=TRUE) {
+  domain <- as.box3(domain)
+  v <- volume(domain)
+  if(!(is.numeric(lambda) && length(lambda) == 1))
+    stop("lambda must be a single numeric value")
+  np <- rpois(nsim, lambda * v)
+  dd <- as.list(domain)[c("xrange", "yrange", "zrange")]
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    ni <- np[i]
+    x <- with(dd, runif(ni, min=xrange[1], max=xrange[2]))
+    y <- with(dd, runif(ni, min=yrange[1], max=yrange[2]))
+    z <- with(dd, runif(ni, min=zrange[1], max=zrange[2]))
+    result[[i]] <- pp3(x,y,z,domain)
+  }
+  if(drop && nsim == 1) return(result[[1]])
+  result <- as.anylist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+
+
diff --git a/R/ppm.R b/R/ppm.R
new file mode 100755
index 0000000..5b0394c
--- /dev/null
+++ b/R/ppm.R
@@ -0,0 +1,279 @@
+#
+#	$Revision: 1.57 $	$Date: 2017/07/13 02:06:02 $
+#
+#    ppm()
+#          Fit a point process model to a two-dimensional point pattern
+#
+#
+
+ppm <- function(Q, ...) {
+  UseMethod("ppm")
+}
+
+
+ppm.formula <- function(Q, interaction=NULL, ..., data=NULL, subset) {
+  ## remember call
+  callstring <- short.deparse(sys.call())
+  cl <- match.call()
+
+  ## trap a common error to give a more informative message
+  if(is.sob(data) || is.function(data)) 
+    stop(paste("The argument", sQuote("data"),
+               "should not be a spatial object;",
+               "it should be a list of spatial objects"),
+         call.=FALSE)
+  
+  ########### INTERPRET FORMULA ##############################
+  
+  if(!inherits(Q, "formula"))
+    stop(paste("Argument 'Q' should be a formula"))
+  formula <- Q
+  
+  ## check formula has LHS and RHS. Extract them
+  if(length(formula) < 3)
+    stop(paste("Formula must have a left hand side"))
+  Yexpr <- formula[[2]]
+  trend <- formula[c(1,3)]
+  
+  ## FIT #######################################
+  thecall <- if(missing(subset)) {
+    call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction)
+  } else {
+    call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction,
+         subset=substitute(subset))
+  }
+  ncall <- length(thecall)
+  argh <- list(...)
+  nargh <- length(argh)
+  if(nargh > 0) {
+    thecall[ncall + 1:nargh] <- argh
+    names(thecall)[ncall + 1:nargh] <- names(argh)
+  }
+  callenv <- list2env(as.list(data), parent=parent.frame())
+  result <- eval(thecall, envir=callenv)
+
+  result$call <- cl
+  result$callstring <- callstring
+  result$callframe <- parent.frame()
+  
+  return(result)
+}
+
+
+ppm.quad <- ppm.ppp <- ppm.default <- 
+function(Q,
+         trend = ~1,
+	 interaction = Poisson(),
+         ..., 
+         covariates = data,
+         data = NULL,
+         covfunargs = list(),
+         subset,
+	 correction="border",
+	 rbord = reach(interaction),
+         use.gam=FALSE,
+         method = "mpl",
+         forcefit=FALSE,
+         emend=project,
+         project=FALSE,
+         prior.mean = NULL,
+         prior.var = NULL,
+         nd = NULL,
+         eps = NULL,
+         gcontrol=list(),
+         nsim=100,
+         nrmh=1e5,
+         start=NULL,
+         control=list(nrep=nrmh),
+         verb=TRUE,
+         callstring=NULL
+) {
+  Qname <- short.deparse(substitute(Q))
+
+  subsetexpr <- if(!missing(subset)) substitute(subset) else NULL
+
+  datalistname <- if(missing(covariates)) "data" else "covariates"
+
+  if(!(method %in% c("mpl", "ho", "logi", "VBlogi")))
+      stop(paste("Unrecognised fitting method", sQuote(method)))
+
+  if(!missing(emend) && !missing(project) && emend != project)
+    stop("Conflicting options: emend != project")
+          
+  if(!is.null(prior.mean) | !is.null(prior.var)){
+      if(missing(method))
+          method <- "VBlogi"
+      if(method!="VBlogi")
+          stop("Prior specification only works with method ",
+               sQuote("VBlogi"))
+  }
+  if(method=="VBlogi"){
+      VB <- TRUE
+      method <- "logi"
+  } else{
+      VB <- FALSE
+  }
+
+  if(is.sob(covariates) || is.function(covariates))
+    stop(paste("The argument", sQuote(datalistname),
+               "should not be a spatial object;",
+               "it should be a list of spatial objects"),
+         call.=FALSE)
+    
+  if(inherits(Q, "logiquad")){
+    if(missing(method))
+      method <- "logi"
+    if(method != "logi")
+      stop(paste("Only method =", sQuote("logi"),
+                 "makes sense when Q is of type", sQuote("logiquad")))
+  }
+  cl <- match.call()
+  if(is.null(callstring)) 
+    callstring <- paste(short.deparse(sys.call()), collapse="")
+
+  if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) 
+    stop(paste("ppm is not yet implemented for marked point patterns,",
+               "other than multitype patterns."))
+  if(!(is.ppp(Q) ||
+       inherits(Q, "quad") ||
+       checkfields(Q, c("data", "dummy")))) {
+    stop("Argument Q must be a point pattern or a quadrature scheme")
+  }
+  X <- if(is.ppp(Q)) Q else Q$data
+
+  ## Validate interaction
+  if(is.null(interaction)) {
+    interaction <- Poisson()
+  } else if(inherits(interaction, "intermaker")) {
+    ## e.g. 'interaction=Hardcore': invoke it without arguments
+    interaction <- (f <- interaction)()
+    dont.complain.about(f)
+  } else if(!is.interact(interaction))
+    stop("Argument 'interaction' must be an object of class 'interact'")
+  
+  ## Ensure interaction is fully defined  
+  if(!is.null(ss <- interaction$selfstart)) {
+    # invoke selfstart mechanism to fix all parameters
+    interaction <- ss(X, interaction)
+  }
+
+  if(inherits(trend, "formula")) {
+    ## handle "." in formula, representing all variables in 'data'
+    if("." %in% variablesinformula(trend)) {
+      if(is.null(covariates))
+        stop("Cannot expand '.' since 'data' is not present", call.=FALSE)
+      rhs <- paste(names(covariates), collapse=" + ")
+      allmaineffects <- as.formula(paste("~", rhs))
+      environment(allmaineffects) <- environment(trend)
+      trend <- update(allmaineffects, trend)
+    }
+    ## expand polynom() in formula
+    if(spatstat.options("expand.polynom"))
+      trend <- expand.polynom(trend)
+  }
+  
+  # validate choice of edge correction
+  correction <- pickoption("correction", correction,
+                           c(border="border",
+                             periodic="periodic",
+                             isotropic="isotropic",
+                             Ripley="isotropic",
+                             trans="translate",
+                             translate="translate",
+                             translation="translate",
+                             none="none"))
+  
+  # validate rbord 
+  if(correction == "border") {
+    # rbord for border correction
+    rbord.given <- !missing(rbord) && !is.null(rbord)
+    if(is.null(rbord))
+      rbord <- reach(interaction)
+    infin <- is.infinite(rbord)
+    too.large <- infin || (eroded.areas(as.owin(X), rbord) == 0)
+    if(too.large) {
+      whinge <-
+        paste(if(rbord.given) "rbord" else "the reach of this interaction",
+              if(infin) "is infinite or unknown;"
+              else "is too large for this window;",
+              "please specify",
+              if(rbord.given) "a smaller value of",
+              "rbord, or use a different edge correction")
+      stop(whinge)
+    }
+  } else {
+    # rbord must be numeric to satisfy mpl.engine
+    if(is.null(rbord))
+      rbord <- 0
+  }
+
+  if(method == "logi") {
+    fitLOGI <- logi.engine(Q=Q, trend=trend,
+                           interaction=interaction,
+                           covariates=covariates,
+                           covfunargs=covfunargs,
+                           subsetexpr=subsetexpr,
+                           correction=correction,
+                           rbord=rbord,
+                           use.gam=use.gam,
+                           forcefit=forcefit,
+                           nd = nd,
+                           gcontrol=gcontrol,
+                           callstring=callstring,
+                           prior.mean=prior.mean,
+                           prior.var=prior.var,
+                           VB=VB,
+                           ...)
+    fitLOGI$Qname <- Qname
+    fitLOGI$call <- cl
+    fitLOGI$callstring <- callstring
+    fitLOGI$callframe <- parent.frame()
+    if(emend && !valid.ppm(fitLOGI))
+      fitLOGI <- emend.ppm(fitLOGI)
+    return(fitLOGI)
+  }
+  
+  # fit by maximum pseudolikelihood
+  fitMPL <- mpl.engine(Q=Q, trend=trend,
+                       interaction=interaction,
+                       covariates=covariates,
+                       covfunargs=covfunargs,
+                       subsetexpr=subsetexpr,
+                       correction=correction,
+                       rbord=rbord,
+                       use.gam=use.gam,
+                       forcefit=forcefit,
+                       nd = nd,
+                       eps = eps, 
+                       gcontrol=gcontrol,
+                       callstring=callstring,
+                       ...)
+  fitMPL$Qname <- Qname
+
+  if(!is.ppm(fitMPL)) {
+    # internal use only - returns some other data
+    return(fitMPL)
+  }
+  
+  fitMPL$call <- cl
+  fitMPL$callstring <- callstring
+  fitMPL$callframe <- parent.frame()
+
+  if(emend && !valid.ppm(fitMPL))
+    fitMPL <- emend.ppm(fitMPL)
+  
+  if(method == "mpl" || is.poisson.ppm(fitMPL))
+    return(fitMPL)
+
+  fitHO <- ho.engine(fitMPL, nsim=nsim, nrmh=nrmh, start=start,
+                     control=control, verb=verb)
+
+  if(is.null(fitHO))
+    return(fitMPL)
+  
+  if(emend && !valid.ppm(fitHO))
+    fitHO <- emend.ppm(fitHO)
+  
+  return(fitHO)
+}
+
diff --git a/R/ppmclass.R b/R/ppmclass.R
new file mode 100755
index 0000000..9321128
--- /dev/null
+++ b/R/ppmclass.R
@@ -0,0 +1,955 @@
+#
+#	ppmclass.R
+#
+#	Class 'ppm' representing fitted point process models.
+#
+#
+#	$Revision: 2.134 $	$Date: 2017/07/13 02:03:11 $
+#
+#       An object of class 'ppm' contains the following:
+#
+#            $method           model-fitting method (currently "mpl")
+#
+#            $coef             vector of fitted regular parameters
+#                              as given by coef(glm(....))
+#
+#            $trend            the trend formula
+#                              or NULL 
+#
+#            $interaction      the interaction family 
+#                              (an object of class 'interact') or NULL
+#
+#            $Q                the quadrature scheme used
+#
+#            $maxlogpl         the maximised value of log pseudolikelihood
+#
+#            $internal         list of internal calculation results
+#
+#            $correction       name of edge correction method used
+#            $rbord            erosion distance for border correction (or NULL)
+#
+#            $the.call         the originating call to ppm()
+#
+#            $the.version      version of mpl() which yielded the fit
+#
+#
+#------------------------------------------------------------------------
+
+is.ppm <- function(x) { inherits(x, "ppm") }
+
+print.ppm <-
+function(x, ...,
+         what=c("all", "model", "trend", "interaction", "se", "errors")) {
+
+  verifyclass(x, "ppm")
+
+  misswhat <- missing(what) 
+
+  opts <- c("model", "trend", "interaction", "se", "errors")
+  what <- match.arg(what, c("all", opts), several.ok=TRUE)
+  if("all" %in% what) what <- opts
+
+  np <- length(coef(x))
+  terselevel <- spatstat.options("terse")
+  digits <- getOption('digits')
+  
+  # If SE was explicitly requested, calculate it.
+  # Otherwise, do it only if the model is Poisson (by default)
+  do.SE <- force.no.SE <- force.SE <- FALSE
+  if(np == 0) {
+    force.no.SE <- TRUE
+  } else if(!is.null(x$internal$VB)) {
+    force.no.SE <- TRUE
+  } else if(!misswhat && ("se" %in% what)) {
+    force.SE <- TRUE
+  } else switch(spatstat.options("print.ppm.SE"),
+                always = { force.SE <- TRUE }, 
+                never  = { force.no.SE <- TRUE },
+                poisson = {
+                  do.SE <-
+                    is.poisson(x) &&
+                      !identical(x$fitter, "gam") &&
+                        (!is.null(x$varcov) || x$method != "logi") &&
+                          waxlyrical("extras", terselevel)
+                })
+  do.SE <- (do.SE || force.SE) && !force.no.SE
+
+  s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances")
+        
+  notrend <-    s$no.trend
+#  stationary <- s$stationary
+  poisson <-    s$poisson
+  markeddata <- s$marked
+  multitype  <- s$multitype
+        
+#  markedpoisson <- poisson && markeddata
+  csr <- poisson && notrend && !markeddata
+
+  special <- csr && all(c("model", "trend") %in% what)
+  if(special) {
+    ## ---------- Trivial/special cases -----------------------
+    splat("Stationary Poisson process")
+    cat("Intensity:", signif(s$trend$value, digits), fill=TRUE)
+  } else {
+    ## ----------- Print model type -------------------
+    if("model" %in% what) {
+      splat(s$name)
+      parbreak(terselevel)
+        
+      if(markeddata) mrk <- s$entries$marks
+      if(multitype) {
+        splat(paste("Possible marks:",
+                    commasep(sQuote(levels(mrk)))))
+        parbreak(terselevel)
+      }
+    }
+    ## ----- trend --------------------------
+    if("trend" %in% what) {
+      if(!notrend) {
+        splat("Log",
+              if(poisson) "intensity: " else "trend: ",
+              pasteFormula(s$trend$formula))
+        parbreak(terselevel)
+      }
+
+      if(waxlyrical('space', terselevel) || !do.SE) {
+        ## print trend coefficients, unless redundant and space is tight
+        tv <- s$trend$value
+      
+        if(length(tv) == 0) 
+          splat("[No trend coefficients]")
+        else {
+          thead <- paste0(s$trend$label, ":")
+          if(is.list(tv)) {
+            splat(thead)
+            for(i in seq_along(tv))
+              print(tv[[i]])
+          } else if(is.numeric(tv) && length(tv) == 1) {
+            ## single number: append to end of current line
+            tvn <- names(tv)
+            tveq <- if(is.null(tvn)) "\t" else paste(" ", tvn, "= ")
+            splat(paste0(thead, tveq, signif(tv, digits)))
+          } else {
+            ## some other format 
+            splat(thead)
+            print(tv)
+          }
+        }
+        parbreak(terselevel)
+      }
+    }
+
+    if(waxlyrical("space", terselevel) &&
+       !is.null(cfa <- s$covfunargs) && length(cfa) > 0) {
+      cfafitter <- s$cfafitter
+      if(is.null(cfafitter)) {
+        cat("Covariate", "function", "arguments", "(covfunargs)",
+            "provided:", fill=TRUE)
+      } else {
+        cat("Irregular", "parameters", "(covfunargs)",
+            "fitted", "by", paste0(sQuote(cfafitter), ":"),
+            fill=TRUE)
+      }
+      for(i in seq_along(cfa)) {
+        cat(paste(names(cfa)[i], "= "))
+        cfai <- cfa[[i]]
+        if(is.numeric(cfai) && length(cfai) == 1) {
+          cfai <- signif(cfai, digits)
+          cat(paste(cfai, "\n"))
+        } else print(cfai)
+      }
+    }
+  }
+  
+  # ---- Interaction ----------------------------
+
+  if("interaction" %in% what) {
+    if(!poisson) {
+      print(s$interaction, family=FALSE, banner=FALSE, 
+            brief=!waxlyrical("extras"))
+      parbreak(terselevel)
+    }
+  }
+  
+  # ----- parameter estimates with SE and 95% CI --------------------
+  if(waxlyrical("extras", terselevel) && ("se" %in% what) && (np > 0)) {
+    if(!is.null(cose <- s$coefs.SE.CI)) {
+      print(cose, digits=digits)
+    } else if(do.SE) {
+      # standard error calculation failed
+      splat("Standard errors unavailable; variance-covariance matrix is singular")
+    } else if(!force.no.SE) {
+      # standard error was voluntarily omitted
+      if(waxlyrical('space', terselevel))
+        splat("For standard errors, type coef(summary(x))\n")
+    }
+  }
+  
+  # ---- Warnings issued in mpl.prepare  ---------------------
+
+  if(waxlyrical("errors", terselevel) && "errors" %in% what) {
+    probs <- s$problems
+    if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) 
+      lapply(probs,
+             function(x) {
+               if(is.list(x) && !is.null(p <- x$print))
+                 splat(paste("Problem:\n", p, "\n\n"))
+             })
+    
+    if(s$old)
+      warning(paste("Model fitted by old spatstat version", s$version))
+        
+  # ---- Algorithm status ----------------------------
+
+    fitter <- s$fitter
+    converged <- s$converged
+    if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged)
+      splat("*** Fitting algorithm for", sQuote(fitter),
+            "did not converge ***")
+  }
+
+  if(waxlyrical("extras", terselevel) && s$projected) {
+    parbreak()
+    splat("Fit was emended to obtain a valid point process model")
+  }
+
+  if(identical(s$valid, FALSE) && waxlyrical("errors", terselevel)) {
+    parbreak()
+    splat("*** Model is not valid ***")
+    if(!all(is.finite(s$entries$coef))) {
+      splat("*** Some coefficients are NA or Inf ***")
+    } else {
+      splat("*** Interaction parameters are outside valid range ***")
+    }
+  } else if(is.na(s$valid) && waxlyrical("extras", terselevel)) {
+    parbreak()
+    splat("[Validity of model could not be checked]")
+  }
+  
+  return(invisible(NULL))
+}
+
+quad.ppm <- function(object, drop=FALSE, clip=FALSE) {
+  if(!is.ppm(object)) {
+    if(is.kppm(object)) object <- object$po else
+    if(is.lppm(object)) object <- object$fit else
+    stop("object is not of class ppm, kppm or lppm")
+  }
+  Q <- object$Q
+  if(is.null(Q))
+    return(Q)
+  if(drop || clip) {
+    ok <- getglmsubset(object)
+    if(!is.null(ok))
+      Q <- Q[ok]
+  }
+  if(clip && object$correction == "border") {
+    Wminus <- erosion(as.owin(object), object$rbord)
+    Q <- Q[Wminus]
+  }
+  return(Q)
+}
+
+data.ppm <- function(object) { 
+  verifyclass(object, "ppm")
+  object$Q$data
+}
+
+dummy.ppm <- function(object, drop=FALSE) { 
+  return(quad.ppm(object, drop=drop)$dummy)
+}
+  
+# method for 'coef'
+coef.ppm <- function(object, ...) {
+  verifyclass(object, "ppm")
+  object$coef
+}
+
+
+getglmfit <- function(object) {
+  verifyclass(object, "ppm")
+  glmfit <- object$internal$glmfit
+  if(is.null(glmfit))
+      return(NULL)
+  if(object$method != "mpl")
+    glmfit$coefficients <- object$coef
+  return(glmfit)
+}
+
+getglmdata <- function(object, drop=FALSE) {
+  verifyclass(object, "ppm")
+  gd <- object$internal$glmdata
+  if(!drop) return(gd)
+  return(gd[getglmsubset(object), , drop=FALSE])
+}
+
+getglmsubset <- function(object) {
+  gd <- object$internal$glmdata
+  if(object$method=="logi")
+    return(gd$.logi.ok)
+  return(gd$.mpl.SUBSET)
+}
+
+getppmdatasubset <- function(object) {
+  # Equivalent to getglmsubset(object)[is.data(quad.ppm(object))]
+  # but also works for models fitted exactly, etc
+  #
+  if(object$method %in% c("mpl", "ho")) {
+    sub <- getglmsubset(object)
+    if(!is.null(sub)) {
+      Z <- is.data(quad.ppm(object))
+      return(sub[Z])
+    }
+  }
+  X <- data.ppm(object)
+  sub <- if(object$correction == "border") {
+    (bdist.points(X) >= object$rbord)
+  } else rep(TRUE, npoints(X))
+  return(sub)
+}
+
+
+getppmOriginalCovariates <- function(object) {
+  df <- as.data.frame(as.ppp(quad.ppm(object)))
+  cova <- object$covariates
+  if(length(cova) > 0) {
+    df2 <- mpl.get.covariates(object$covariates,
+                              union.quad(quad.ppm(object)),
+                              "quadrature points",
+                              object$covfunargs)
+    df <- cbind(df, df2)
+  } 
+  return(df)
+}
+  
+# ??? method for 'effects' ???
+
+valid <- function(object, ...) {
+  UseMethod("valid")
+}
+
+valid.ppm <- function(object, warn=TRUE, ...) {
+  verifyclass(object, "ppm")
+  coeffs <- coef(object)
+  # ensure all coefficients are fitted, and finite
+  if(!all(is.finite(coeffs)))
+    return(FALSE)
+  # inspect interaction
+  inte <- object$interaction
+  if(is.poisson(object))
+    return(TRUE) # Poisson process
+  # extract fitted interaction coefficients
+  Vnames <- object$internal$Vnames
+  IsOffset <- object$internal$IsOffset  
+  Icoeffs <- coeffs[Vnames[!IsOffset]]
+  # check interaction
+  checker <- inte$valid
+  if(is.null(checker) || !newstyle.coeff.handling(inte)) {
+    if(warn) warning("Internal error: unable to check validity of model")
+    return(NA)
+  }
+  answer <- checker(Icoeffs, inte)
+  return(answer)
+}
+
+emend <- function(object, ...) {
+  UseMethod("emend")
+}
+
+emend.ppm <- project.ppm <- local({
+  tracemessage <- function(depth, ...) {
+    if(depth == 0) return(NULL)
+    spacer <- paste(rep.int("  ", depth), collapse="")
+    marker <- ngettext(depth, "trace", paste("trace", depth))
+    marker <- paren(marker, "[")
+    splat(paste0(spacer, marker, " ", paste(...)))
+  }
+  leaving <- function(depth) {
+    tracemessage(depth, ngettext(depth, "Returning.", "Exiting level."))
+  }
+  emend.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) {
+    verifyclass(object, "ppm")
+    fast <- spatstat.options("project.fast")
+    # user specifies 'trace' as logical
+    # but 'trace' can also be integer representing trace depth
+    td <- as.integer(trace)
+    trace <- (td > 0)
+    tdnext <- if(trace) td+1 else 0
+    if(valid.ppm(object)) {
+      tracemessage(td, "Model is valid.")
+      leaving(td)
+      return(object)
+    }
+    # First ensure trend coefficients are all finite
+    coeffs <- coef(object)
+    # Which coefficients are trend coefficients
+    coefnames  <- names(coeffs)
+    internames <- object$internal$Vnames
+    trendnames <- coefnames[!(coefnames %in% internames)]
+    # Trend terms in trend formula
+    trendterms <- attr(terms(object), "term.labels")
+    # Mapping from coefficients to terms of GLM
+    coef2term  <- attr(model.matrix(object), "assign")
+    istrend <- (coef2term > 0) & (coefnames %in% trendnames)
+    # Identify non-finite trend coefficients
+    bad <- istrend & !is.finite(coeffs)
+    if(!any(bad)) {
+      tracemessage(td, "Trend terms are valid.")
+    } else {
+      nbad <- sum(bad)
+      tracemessage(td,
+                   "Non-finite ",
+                   ngettext(nbad,
+                            "coefficient for term ",
+                            "coefficients for terms "),
+                   commasep(sQuote(trendterms[coef2term[bad]])))
+      if(fast) {
+        # remove first illegal term
+        firstbad <- min(which(bad))
+        badterm <- trendterms[coef2term[firstbad]]
+        # remove this term from model
+        tracemessage(td, "Removing term ", sQuote(badterm))
+        removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe)
+        newobject <- update(object, removebad)
+        if(trace) {
+          tracemessage(td, "Updated model:")
+          print(newobject)
+        }
+        # recurse
+        newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext)
+        # return
+        leaving(td)
+        return(newobject)
+      } else {
+        # consider all illegal terms
+        bestobject <- NULL
+        for(i in which(bad)) {
+          badterm <- trendterms[coef2term[i]]
+          # remove this term from model
+          tracemessage(td, "Considering removing term ", sQuote(badterm))
+          removebad <- as.formula(paste("~ . - ", badterm),
+                                  env=object$callframe)
+          object.i <- update(object, removebad)
+          if(trace) {
+            tracemessage(td, "Considering updated model:")
+            print(object.i)
+          }
+          # recurse
+          object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext)
+          # evaluate logPL
+          logPL.i   <- logLik(object.i, warn=FALSE)
+          tracemessage(td, "max log pseudolikelihood = ", logPL.i)
+          # optimise
+          if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i))
+            bestobject <- object.i
+        }
+        if(trace) {
+          tracemessage(td, "Best submodel:")
+          print(bestobject)
+        }
+        # return
+        leaving(td)
+        return(bestobject)
+      }
+    } 
+    # Now handle interaction
+    inte <- object$interaction
+    if(is.null(inte)) {
+      tracemessage(td, "No interaction to check.")
+      leaving(td)
+      return(object)
+    }
+    tracemessage(td, "Inspecting interaction terms.")
+    proj <- inte$project
+    if(is.null(proj)) {
+      whinge <- "Internal error: interaction has no projection operator"
+      if(fatal) stop(whinge) 
+      warning(whinge)
+      leaving(td)
+      return(object)
+    }
+    # ensure the same edge correction is used!
+    correction <- object$correction
+    rbord      <- object$rbord
+    # apply projection 
+    coef.orig <- coeffs <- coef(object)
+    Vnames   <- object$internal$Vnames
+    Icoeffs  <- coeffs[Vnames]
+    change <- proj(Icoeffs, inte)
+    if(is.null(change)) {
+      tracemessage(td, "Interaction does not need updating.")
+      leaving(td)
+      return(object)
+    }
+    tracemessage(td, "Interaction is not valid.")
+    if(is.numeric(change)) {
+      tracemessage(td, "Interaction coefficients updated without re-fitting.")
+      # old style: 'project' returned a vector of updated coefficients
+      Icoeffs <- change
+      # tweak interaction coefficients
+      object$coef[Vnames] <- Icoeffs
+      # recompute fitted interaction
+      object$fitin <- NULL
+      object$fitin <- fitin(object)
+    } else if(is.interact(change)) {
+      # new style: 'project' returns an interaction
+      if(trace) {
+        tracemessage(td, "Interaction changed to:")
+        print(change)
+      }
+      # refit the whole model 
+      #      (using the same edge correction)
+      #      (and the same quadrature scheme)
+      newobject <- update(object, interaction=change,
+                          correction=correction, rbord=rbord,
+                          forcefit=TRUE,
+                          envir=object$callframe)
+      if(trace) {
+        tracemessage(td, "Updated model:")
+        print(newobject)
+      }
+      # recurse
+      newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext)
+      object <- newobject
+    } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) {
+      # new style: 'project' returns a list of candidate interactions
+      nchange <- length(change)
+      tracemessage(td, "Considering", nchange,
+                   ngettext(nchange, "submodel", "submodels"))
+      bestobject <- NULL
+      for(i in seq_len(nchange)) {
+        change.i <- change[[i]]
+        if(trace) {
+          tracemessage(td,
+                       "Considering", ordinal(i), 
+                       "candidate submodel, with interaction:")
+          print(change.i)
+        }
+        # refit the whole model
+        object.i <- update(object, interaction=change.i,
+                           correction=correction, rbord=rbord,
+                           forcefit=TRUE,
+                           envir=object$callframe)
+        if(trace) {
+          tracemessage(td, "Considering", ordinal(i),
+                       "candidate updated model:")
+          print(object.i)
+        }
+        # recurse
+        object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext)
+        # evaluate logPL
+        logPL.i   <- logLik(object.i, warn=FALSE)
+        tracemessage(td, "max log pseudolikelihood = ", logPL.i)
+        # optimise
+        if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i))
+          bestobject <- object.i
+      }
+      # end loop through submodels
+      if(trace) {
+        tracemessage(td, "Best submodel:")
+        print(bestobject)
+      }
+      object <- bestobject
+    } else stop("Internal error: unrecognised format of update")
+    object$projected <- TRUE
+    object$coef.orig  <- coef.orig
+    leaving(td)
+    return(object)
+  }
+  emend.ppm
+})
+
+# more methods
+
+deviance.ppm <- function(object, ...) {
+  satlogpl <- object$satlogpl
+  if(is.null(satlogpl)) {
+    object <- update(object, forcefit=TRUE)
+    satlogpl <- object$satlogpl
+  }
+  if(is.null(satlogpl) || !is.finite(satlogpl))
+    return(NA)
+  ll <- do.call(logLik,
+                resolve.defaults(list(object=object, absolute=FALSE),
+                                 list(...)))
+  ll <- as.numeric(ll)
+  2 * (satlogpl - ll)
+}
+
+logLik.ppm <- function(object, ..., new.coef=NULL, warn=TRUE, absolute=FALSE) {
+  if(!is.poisson.ppm(object) && warn) 
+    warn.once("ppmLogLik",
+              "log likelihood is not available for non-Poisson model;",
+              "log pseudolikelihood returned")
+  ## degrees of freedom
+  nip <- if(!inherits(object, "ippm")) 0 else
+           length(attr(object$covfunargs, "free"))
+  df <- length(coef(object)) + nip
+  ## compute adjustment constant
+  if(absolute && object$method %in% c("exact", "mpl", "ho")) {
+    X <- data.ppm(object)
+    W <- Window(X)
+    areaW <-
+      if(object$correction == "border" && object$rbord > 0) 
+      eroded.areas(W, object$rbord) else area(W)
+    constant <- areaW * markspace.integral(X)
+  } else constant <- 0
+  ##
+  if(is.null(new.coef)) {
+    ## extract from object
+    ll <- object$maxlogpl + constant
+    attr(ll, "df") <- df
+    class(ll) <- "logLik"
+    return(ll)
+  } 
+  ## recompute for new parameter values
+  method <- object$method
+  if(method == "exact")
+    method <- update(method, forcefit=TRUE)
+  Q <- quad.ppm(object, drop=TRUE)
+  Z <- is.data(Q)
+  cif <- fitted(object, type="cif", new.coef=new.coef, drop=TRUE)
+  cifdata <- cif[Z]
+  switch(method,
+         mpl=,
+         exact=,
+         ho = {
+           w <- w.quad(Q)
+           ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif)
+         },
+         logi=,
+         VBlogi={
+           B <- getglmdata(object, drop=TRUE)$.logi.B
+           p <- cif/(B+cif)
+           ll <- sum(log(p/(1-p))[Z]) + sum(log(1-p)) + sum(log(B[Z]))
+         },
+         stop(paste("Internal error: unrecognised ppm method:",
+                    dQuote(method)))
+         )
+  ll <- ll + constant
+  attr(ll, "df") <- df
+  class(ll) <- "logLik"
+  return(ll)
+}
+
+pseudoR2 <- function(object, ...) {
+  UseMethod("pseudoR2")
+}
+
+pseudoR2.ppm <- function(object, ...) {
+  dres <- deviance(object, ..., warn=FALSE)
+  nullmod <- update(object, . ~ 1, forcefit=TRUE)
+  dnul <- deviance(nullmod, warn=FALSE)
+  return(1 - dres/dnul)
+}
+
+formula.ppm <- function(x, ...) {
+  return(x$trend)
+}
+
+terms.ppm <- function(x, ...) {
+  terms(x$terms, ...)
+}
+
+labels.ppm <- function(object, ...) {
+  # extract fitted trend coefficients
+  co <- coef(object)
+  Vnames <- object$internal$Vnames
+  is.trend <- !(names(co) %in% Vnames)
+  # model terms
+  tt <- terms(object)
+  lab <- attr(tt, "term.labels")
+  if(length(lab) == 0)
+    return(character(0))
+  # model matrix
+  mm <- model.matrix(object)
+  ass <- attr(mm, "assign")
+  # 'ass' associates coefficients with model terms
+  # except ass == 0 for the Intercept
+  coef.ok <- is.finite(co)
+  relevant <- (ass > 0) & is.trend
+  okterms <- unique(ass[coef.ok & relevant])
+  return(lab[okterms])
+}
+
+AIC.ppm <- function(object, ..., k=2, takeuchi=TRUE) {
+  ll <- logLik(object, warn=FALSE)
+  pen <- attr(ll, "df")
+  if(takeuchi && !is.poisson(object)) {
+    vv <- vcov(object, what="internals")
+    logi <- (object$method == "logi")
+    J  <- with(vv, if(!logi) Sigma else (Sigma1log+Sigma2log))
+    H  <- with(vv, if(!logi) A1 else Slog)
+    ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J
+    JiH <- try(solve(H, J), silent=TRUE)
+    if(!inherits(JiH, "try-error")) 
+      pen <- sum(diag(JiH))
+  } 
+  return(- 2 * as.numeric(ll) + k * pen)
+}
+
+extractAIC.ppm <- function (fit, scale = 0, k = 2, ..., takeuchi=TRUE)
+{
+  edf <- length(coef(fit))
+  aic <- AIC(fit, k=k, takeuchi=takeuchi)
+  c(edf, aic)
+}
+
+#
+# method for model.frame
+
+model.frame.ppm <- function(formula, ...) {
+  object <- formula
+  gf <- getglmfit(object)
+  if(is.null(gf)) {
+    warning("Model re-fitted with forcefit=TRUE")
+    object <- update(object, forcefit=TRUE)
+    gf <- getglmfit(object)
+  }
+#  gd <- getglmdata(object)
+#  model.frame(gf, data=gd, ...)
+  if(object$fitter == "gam") modelFrameGam(gf, ...) else model.frame(gf, ...)
+}
+
+#' a hacked version of model.frame.glm that works for gam objects (mgcv)
+modelFrameGam <- function(formula, ...) {
+  dots <- list(...)
+  nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 
+                      0L)]
+  if (length(nargs) || is.null(formula$model)) {
+    fcall <- formula$call
+#    fcall$method <- "model.frame"
+    fcall[[1L]] <- quote(mgcv::gam)
+    fcall[names(nargs)] <- nargs
+    env <- environment(formula$terms)
+    if (is.null(env)) 
+      env <- parent.frame()
+    refut <- eval(fcall, env)
+    refut$model
+  } else formula$model
+}
+
+#
+# method for model.matrix
+
+model.matrix.ppm <- function(object,
+                             data=model.frame(object, na.action=NULL),
+                             ..., Q=NULL, keepNA=TRUE) {
+  if(missing(data)) data <- NULL			     
+  PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA)
+}
+
+model.matrix.ippm <- function(object,
+                              data=model.frame(object, na.action=NULL),
+                              ..., Q=NULL, keepNA=TRUE, irregular=FALSE) {
+  if(missing(data)) data <- NULL			     
+  PPMmodelmatrix(object, data=data, ...,
+                 Q=Q, keepNA=keepNA, irregular=irregular)
+}
+
+PPMmodelmatrix <- function(object,
+                           data=model.frame(object, na.action=NULL),
+                           ..., Q=NULL, keepNA=TRUE, irregular=FALSE) {
+  # handles ppm and ippm			      
+  data.given <- !is.null(data)
+  irregular <- irregular && inherits(object, "ippm") && !is.null(object$iScore)
+  if(!is.null(Q)) {
+    if(data.given) stop("Arguments Q and data are incompatible")
+    if(!inherits(Q, c("ppp", "quad")))
+      stop("Q should be a point pattern or quadrature scheme")
+    if(is.ppp(Q)) Q <- quad(Q, Q[FALSE])
+    ## construct Berman-Turner frame
+    needed <- c("trend", "interaction", "covariates", "covfunargs",
+                "correction", "rbord")
+    bt <- do.call(bt.frame, append(list(Q), object[needed]))
+    ## compute model matrix
+    mf <- model.frame(bt$fmla, bt$glmdata, ...)
+    mm <- model.matrix(bt$fmla, mf, ...)
+    if(irregular) {
+       ## add irregular score components
+       U <- union.quad(Q)
+       mi <- sapply(object$iScore, do.call,
+                    args=append(list(x=U$x, y=U$y), object$covfunargs),
+		    envir=environment(terms(object)))
+       if(nrow(mi) != nrow(mm))
+         stop("Internal error: incorrect number of rows in iScore")
+       mm <- cbind(mm, mi)
+    }
+    ## remove NA's ?
+    if(!keepNA)
+      mm <- mm[complete.cases(mm), , drop=FALSE]
+    return(mm)
+  }
+  gf <- getglmfit(object)
+  if(is.null(gf)) {
+    warning("Model re-fitted with forcefit=TRUE")
+    object <- update(object, forcefit=TRUE)
+    gf <- getglmfit(object)
+    if(is.null(gf))
+      stop("internal error: unable to extract a glm fit")
+  }
+  
+  if(data.given) {
+    # new data. Must contain the Berman-Turner variables as well.
+    bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE)
+    if(any(forgot <- !(names(bt) %in% names(data)))) 
+      data <- do.call(cbind, append(list(data), bt[forgot]))
+    mm <- model.matrix(gf, data=data, ...)
+    if(irregular) {
+       ## add irregular score components 
+       mi <- sapply(object$iScore, do.call,
+                    args=append(list(x=data$x, y=data$y), object$covfunargs),
+		    envir=environment(terms(object)))
+       if(nrow(mi) != nrow(mm))
+         stop("Internal error: incorrect number of rows in iScore")
+       mm <- cbind(mm, mi)
+    }
+    if(inherits(gf, "gam")) 
+      attr(mm, "assign") <- gf$assign
+    return(mm)
+  }
+
+  if(!keepNA && !irregular) {
+    # extract model matrix of glm fit object
+    # restricting to its 'subset' 
+    mm <- model.matrix(gf, ...)
+    if(inherits(gf, "gam")) 
+      attr(mm, "assign") <- gf$assign
+    return(mm)
+  }
+  
+  # extract model matrix for all cases
+  mm <- model.matrix(gf, ..., subset=NULL, na.action=NULL)
+  cn <- colnames(mm)
+  gd <- getglmdata(object, drop=FALSE)
+  if(nrow(mm) != nrow(gd)) {
+    # can occur if covariates include NA's or interaction is -Inf
+    insubset <- getglmsubset(object)
+    isna <- is.na(insubset) | !insubset
+    if(sum(isna) + nrow(mm) == nrow(gd)) {
+      # insert rows of NA's
+      mmplus <- matrix( , nrow(gd), ncol(mm))
+      mmplus[isna, ] <- NA
+      mmplus[!isna, ] <- mm
+      mm <- mmplus
+    } else 
+    stop("internal error: model matrix does not match glm data frame")
+  }
+  if(irregular) {
+     ## add irregular score components 
+     U <- union.quad(quad.ppm(object, drop=FALSE))
+     mi <- sapply(object$iScore, do.call,
+                  args=append(list(x=U$x, y=U$y), object$covfunargs),
+		  envir=environment(terms(object)))
+     if(nrow(mi) != nrow(mm))
+       stop("Internal error: incorrect number of rows in iScore")
+     mm <- cbind(mm, mi)
+     cn <- c(cn, colnames(mi))
+  }
+  if(!keepNA)
+    mm <- mm[complete.cases(mm), , drop=FALSE]
+  if(inherits(gf, "gam")) 
+    attr(mm, "assign") <- gf$assign
+  colnames(mm) <- cn
+  return(mm)
+}
+
+model.images <- function(object, ...) {
+  UseMethod("model.images")
+}
+
+model.images.ppm <- function(object, W=as.owin(object), ...) {
+  X <- data.ppm(object)
+#  irregular <- resolve.1.default(list(irregular=FALSE), list(...))
+  ## make a quadscheme with a dummy point at every pixel
+  Q <- pixelquad(X, W)
+  ## compute model matrix
+  mm <- model.matrix(object, Q=Q, ...)
+  ## retain only the entries for dummy points (pixels)
+  mm <- mm[!is.data(Q), , drop=FALSE]
+  mm <- as.data.frame(mm)
+  ## create template image
+  Z <- as.im(attr(Q, "M"))
+  ok <- !is.na(Z$v)
+  ## make images
+  imagenames <- colnames(mm)
+  if(!is.multitype(object)) {
+    result <- lapply(as.list(mm), replace, list=ok, x=Z)
+    result <- as.solist(result)
+    names(result) <- imagenames
+  } else {
+    marx <- marks(Q$dummy)
+    mmsplit <- split(mm, marx)
+    result <- vector(mode="list", length=length(mmsplit))
+    for(i in seq_along(mmsplit))
+      result[[i]] <- as.solist(lapply(as.list(mmsplit[[i]]),
+                                      replace, list=ok, x=Z))
+    names(result) <- names(mmsplit)
+    result <- do.call(hyperframe, result)
+    row.names(result) <- imagenames
+  }
+  return(result)
+}
+
+unitname.ppm <- function(x) {
+  return(unitname(x$Q))
+}
+
+"unitname<-.ppm" <- function(x, value) {
+  unitname(x$Q) <- value
+  return(x)
+}
+
+nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) }
+
+as.interact.ppm <- function(object) {
+ verifyclass(object, "ppm")
+ inte <- object$interaction
+ if(is.null(inte))
+   inte <- Poisson()
+ return(inte)
+}
+
+as.ppm <- function(object) {
+  UseMethod("as.ppm")
+}
+
+as.ppm.ppm <- function(object) {
+  object
+}
+
+## method for as.owin
+
+as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) {
+  if(!verifyclass(W, "ppm", fatal=fatal))
+    return(NULL)
+  from <- match.arg(from)
+  datawin <- as.owin(data.ppm(W))
+  if(from == "points")
+    return(datawin)
+  covs <- W$covariates
+  isim <- unlist(lapply(covs, is.im))
+  if(!any(isim))
+    return(datawin)
+  cwins <- lapply(covs[isim], as.owin)
+  covwin <- do.call(intersect.owin, unname(cwins))
+  result <- intersect.owin(covwin, datawin)
+  return(result)
+}
+
+domain.ppm <- Window.ppm <- function(X, ..., from=c("points", "covariates")) {
+  from <- match.arg(from)
+  as.owin(X, ..., from=from)
+}
+
+## change the coefficients in a ppm or other model
+
+tweak.coefs <- function(model, new.coef) {
+  if(is.null(new.coef)) return(model)
+  co <- coef(model)
+  check.nvector(new.coef, length(co), things="coefficients")
+  model$coef.orig <- co
+  model$coef <- new.coef
+  return(model)
+}
+
diff --git a/R/ppp.R b/R/ppp.R
new file mode 100755
index 0000000..8864775
--- /dev/null
+++ b/R/ppp.R
@@ -0,0 +1,686 @@
+#
+#	ppp.R
+#
+#	A class 'ppp' to define point patterns
+#	observed in arbitrary windows in two dimensions.
+#
+#	$Revision: 4.111 $	$Date: 2017/06/05 10:31:58 $
+#
+#	A point pattern contains the following entries:	
+#
+#		$window:	an object of class 'owin'
+#				defining the observation window
+#
+#		$n:	the number of points (for efficiency)
+#	
+#		$x:	
+#		$y:	vectors of length n giving the Cartesian
+#			coordinates of the points.
+#
+#	It may also contain the entry:	
+#
+#		$marks:	a vector of length n
+#			whose entries are interpreted as the
+#			'marks' attached to the corresponding points.	
+#	
+#--------------------------------------------------------------------------
+ppp <- function(x, y, ..., window, marks,
+                check=TRUE, checkdup=check, drop=TRUE) {
+  # Constructs an object of class 'ppp'
+  #
+  if(!missing(window))
+    verifyclass(window, "owin")
+  else
+    window <- owin(...)
+
+  if((missing(x) && missing(y)) || (length(x) == 0 && length(y) == 0))
+    x <- y <- numeric(0)
+
+  n <- length(x)
+  if(length(y) != n)
+    stop("coordinate vectors x and y are not of equal length")
+  
+  # validate x, y coordinates
+  stopifnot(is.numeric(x))
+  stopifnot(is.numeric(y))
+  good <- is.finite(x) & is.finite(y)
+  if(naughty <- !all(good)) {
+    #' bad values will be discarded
+    nbad <- sum(!good)
+    nna <- sum(is.na(x) | is.na(y))
+    ninf <- nbad - nna
+    if(nna > 0) 
+      warning(paste(nna,  "out of", n, ngettext(n, "point", "points"),
+                    "had NA or NaN coordinate values, and",
+                    ngettext(nna, "was", "were"), "discarded"))
+    if(ninf > 0) 
+      warning(paste(ninf,  "out of", n, ngettext(n, "point", "points"),
+                    "had infinite coordinate values, and",
+                    ngettext(ninf, "was", "were"), "discarded"))
+    #' chuck out
+    x <- x[good]
+    y <- y[good]
+    n <- sum(good)
+  }
+
+  names(x) <- NULL
+  names(y) <- NULL
+  
+  # check (x,y) points lie inside window
+  if(check && n > 0) {
+    ok <- inside.owin(x, y, window)
+    nout <- sum(!ok)
+    if(nout > 0) {
+      warning(paste(nout,
+                    ngettext(nout, "point was", "points were"),
+                    "rejected as lying outside the specified window"),
+              call.=FALSE)
+      rr <- ripras(x,y)
+      bb <- boundingbox(x,y)
+      bb <- boundingbox(rr, bb, window)
+      rejectwindow <-
+        if(!is.null(rr)) rebound.owin(rr, bb) else bb
+      rejects <- ppp(x[!ok], y[!ok], window=rejectwindow, check=FALSE)
+      # discard illegal points
+      x <- x[ok]
+      y <- y[ok]
+      n <- length(x)
+    }
+  } else nout <- 0
+  # initialise ppp object
+  pp <- list(window=window, n=n, x=x, y=y)
+  # coerce marks to appropriate format
+  if(missing(marks))
+    marks <- NULL
+  if(is.hyperframe(marks)) 
+    stop("Hyperframes of marks are not implemented for ppp objects; use ppx")
+  if(is.matrix(marks)) 
+    marks <- as.data.frame(marks)
+  ## drop dimensions?
+  if(drop && is.data.frame(marks)) {
+    nc <- ncol(marks)
+    if(nc == 0)
+      marks <- NULL
+    else if(nc == 1)
+      marks <- marks[,,drop=TRUE]
+  }
+  # attach marks 
+  if(is.null(marks)) {
+    # no marks
+    pp$markformat <- "none"
+  } else if(is.data.frame(marks)) {
+    # data frame of marks
+    pp$markformat <- "dataframe"
+    if(naughty) {
+      #' remove marks attached to discarded points with non-finite coordinates
+      marks <- marks[good, ]
+    }
+    if(nout > 0) {
+      #' sequester marks of points falling outside window
+      marks(rejects) <- marks[!ok,]
+      marks <- marks[ok, ]
+    }
+    if(nrow(marks) != n)
+      stop("number of rows of marks != length of x and y")
+    pp$marks <- marks
+  } else {
+    # should be a vector or factor
+    # To recognise vector, strip attributes
+    isspecial <- is.factor(marks) ||
+                 inherits(marks, "POSIXt") || inherits(marks, "Date")
+    if(!isspecial)
+      attributes(marks) <- NULL
+    if(!(is.vector(marks) || isspecial))
+      stop("Format of marks not understood")
+    # OK, it's a vector or factor
+    pp$markformat <- "vector"
+    if(naughty) {
+      #' remove marks attached to discarded points with non-finite coordinates
+      marks <- marks[good]
+    }
+    if(nout > 0) {
+      #' sequester marks of points falling outside window
+      marks(rejects) <- marks[!ok]
+      marks <- marks[ok]
+    }
+    if(length(marks) != n)
+      stop("length of marks vector != length of x and y")
+    names(marks) <- NULL
+    pp$marks <- marks
+  }
+  class(pp) <- "ppp"
+  if(checkdup && anyDuplicated(pp))
+    warning("data contain duplicated points", call.=FALSE)
+  if(nout > 0) 
+    attr(pp, "rejects") <- rejects
+  pp
+}
+
+#
+#--------------------------------------------------------------------------
+#
+
+is.ppp <- function(x) { inherits(x, "ppp") }
+
+#
+#--------------------------------------------------------------------------
+#
+
+as.ppp <- function(X, ..., fatal=TRUE) {
+  UseMethod("as.ppp")
+}
+
+as.ppp.ppp <- function(X, ..., fatal=TRUE) {
+  check <- resolve.defaults(list(...), list(check=FALSE))$check
+  return(ppp(X$x, X$y, window=X$window, marks=X$marks, check=check))
+}
+
+as.ppp.quad <- function(X, ..., fatal=TRUE) {
+  return(union.quad(X))
+}
+
+as.ppp.data.frame <- function(X, W = NULL, ..., fatal=TRUE) {
+  X <- as.data.frame(X) #' swim against the tidyverse
+  check <- resolve.defaults(list(...), list(check=TRUE))$check
+  if(ncol(X) < 2) 
+    return(complaining("X must have at least two columns",
+                       fatal, value=NULL))
+
+  if(is.null(W))
+    return(complaining("x,y coords given but no window specified",
+                       fatal, value=NULL))
+
+  # columns 1 and 2 are assumed to be coordinates
+  # marks from other columns
+  marx <- if(ncol(X) > 2) X[, -(1:2)] else NULL
+
+  if(is.function(W))
+    Z <- cobble.xy(X[,1], X[,2], W, fatal, marks=marx, check=check)
+  else {
+    win <- as.owin(W)
+    Z <- ppp(X[,1], X[,2], window = win, marks=marx, check=check)
+  }
+
+  return(Z)
+}
+    
+as.ppp.matrix <- function(X, W = NULL, ..., fatal=TRUE) {
+  check <- resolve.defaults(list(...), list(check=TRUE))$check
+  if(!verifyclass(X, "matrix", fatal=fatal)
+     || !is.numeric(X))
+    return(complaining("X must be a numeric matrix",
+                       fatal, value=NULL))
+
+  if(ncol(X) < 2)
+    return(complaining("X must have at least two columns",
+                       fatal, value=NULL))
+
+  if(is.null(W))
+    return(complaining("x,y coords given but no window specified",
+                       fatal, value=NULL))
+    
+  if(is.function(W))
+    Z <- cobble.xy(X[,1], X[,2], W, fatal)
+  else {
+    win <- as.owin(W)
+    Z <- ppp(X[,1], X[,2], window = win, check=check)
+  }
+
+  # add marks from other columns
+  if(ncol(X) > 2)
+    marks(Z) <- X[, -(1:2)]
+
+  return(Z)
+}
+    
+as.ppp.default <- function(X, W=NULL, ..., fatal=TRUE) {
+	# tries to coerce data X to a point pattern
+	# X may be:
+	#	1. a structure with entries x, y, xl, xu, yl, yu
+	#	2. a structure with entries x, y, area where
+        #                    'area' has entries xl, xu, yl, yu
+	#	3. a structure with entries x, y
+        #       4. a vector of length 2, interpreted as a single point.
+	# The second argument W is coerced to an object of class 'owin' by the 
+	# function "as.owin" in window.S
+        # If X also has an entry X$marks
+        # then this will be interpreted as the marks vector for the pattern.
+	#
+  check <- resolve.defaults(list(...), list(check=TRUE))$check
+  if(checkfields(X, c("x", "y", "xl", "xu", "yl", "yu"))) {
+		xrange <- c(X$xl, X$xu)
+		yrange <- c(X$yl, X$yu)
+		if(is.null(X$marks))
+			Z <- ppp(X$x, X$y, xrange, yrange, check=check)
+		else
+			Z <- ppp(X$x, X$y, xrange, yrange, 
+				marks=X$marks, check=check)
+		return(Z)
+        } else if(checkfields(X, c("x", "y", "area"))
+                  && checkfields(X$area, c("xl", "xu", "yl", "yu"))) {
+                win <- as.owin(X$area)
+                if (is.null(X$marks))
+                  Z <- ppp(X$x, X$y, window=win, check=check)
+                else
+                  Z <- ppp(X$x, X$y, window=win, marks = X$marks, check=check)
+                return(Z)
+	} else if(checkfields(X, c("x", "y"))) {
+                if(is.function(W))
+                  return(cobble.xy(X$x, X$y, W, fatal))
+		if(is.null(W)) {
+                  if(fatal)
+                    stop("x,y coords given but no window specified")
+                  else
+                    return(NULL)
+                }
+		win <- as.owin(W)
+		if(is.null(X$marks))
+                  Z <- ppp(X$x, X$y, window=win, check=check)
+                else
+                  Z <- ppp(X$x, X$y, window=win, marks=X$marks, check=check)
+                return(Z)
+        } else if(is.vector(X) && length(X) == 2) {
+                win <- as.owin(W)
+                Z <- ppp(X[1], X[2], window=win, check=check)
+                return(Z)
+	} else {
+          if(fatal)
+            stop("Can't interpret X as a point pattern")
+          else
+            return(NULL)
+        }
+}
+
+cobble.xy <- function(x, y, f=ripras, fatal=TRUE, ...) {
+  if(!is.function(f))
+    stop("f is not a function")
+  w <- f(x,y)
+  if(!is.owin(w)) {
+    gripe <- "Supplied function f did not return an owin object"
+    if(fatal)
+      stop(gripe)
+    else {
+      warning(gripe)
+      return(NULL)
+    }
+  }
+  return(ppp(x, y, window=w, ...))
+}
+  
+
+# --------------------------------------------------------------
+
+"[.ppp" <-
+  function(x, i, j, drop=FALSE, ..., clip=FALSE) {
+
+        verifyclass(x, "ppp")
+        
+        if(!missing(i)) {
+          if(inherits(i, "owin")) {
+            # i is a window
+            window <- i
+            if(clip) window <- intersect.owin(window, x$window)
+            ok <- inside.owin(x$x, x$y, window)
+            x <- ppp(x$x[ok], x$y[ok], window=window, #SIC
+                     marks=marksubset(x$marks, ok),
+                     check=FALSE)
+          } else if(inherits(i, "im")) {
+            # i is an image
+            if(i$type != "logical")
+              stop(paste("Subset operator X[i] undefined",
+                         "when i is a pixel image",
+                         "unless it has logical values"), call.=FALSE)
+            # convert logical image to window
+            e <- sys.frame(sys.nframe())
+            window <- solutionset(i, e)
+            if(clip) window <- intersect.owin(window, x$window)
+            ok <- inside.owin(x$x, x$y, window)
+            x <- ppp(x$x[ok], x$y[ok], window=window, #SIC
+                     marks=marksubset(x$marks, ok),
+                     check=FALSE)
+          } else {
+            # assume i is a subset index
+            nx <- x$n
+            if(nx == 0)
+              return(x)
+            subset <- seq_len(nx)[i]
+            if(anyNA(subset))
+              stop("Index out of bounds in [.ppp", call.=FALSE)
+            x <- ppp(x$x[subset], x$y[subset], window=x$window,
+                     marks=marksubset(x$marks, subset),
+                     check=FALSE)
+          } 
+        }
+
+        if(!missing(j))
+          x <- x[j]   # invokes code above
+
+        if(drop) {
+          mx <- x$marks
+          switch(markformat(mx),
+                 none = { },
+                 vector = {
+                   if(is.factor(mx))
+                     marks(x) <- factor(mx)
+                 },
+                 dataframe = {
+                   isfac <- sapply(mx, is.factor)
+                   if(any(isfac))
+                     mx[, isfac] <- lapply(mx[, isfac], factor)
+                 },
+                 hyperframe = { })
+        }
+               
+        return(x)
+}
+
+
+# ------------------------------------------------------------------
+#
+#
+scanpp <- function(filename, window, header=TRUE, dir="",
+                   factor.marks = NULL, ...) {
+  filename <- if(dir=="") filename else
+              paste(dir, filename, sep=.Platform$file.sep)
+  df <- read.table(filename, header=header,
+                   stringsAsFactors = is.null(factor.marks))
+  if(header) {
+    # check whether there are columns named 'x' and 'y'
+    colnames <- dimnames(df)[[2]]
+    xycolumns <- match(c("x", "y"), colnames, 0)
+    named <- all(xycolumns > 0)
+  } else {
+    named <- FALSE
+  }
+  if(named) {
+    x <- df$x
+    y <- df$y
+  } else {
+    # assume x, y given in columns 1, 2 respectively
+    x <- df[,1]
+    y <- df[,2]
+    xycolumns <- c(1,2)
+  }
+  if(ncol(df) == 2) 
+      X <- ppp(x, y, window=window)
+  else {
+      # Catch old argument "multitype":
+      dots <- list(...)
+      multi <- charmatch(names(dots), "multitype")
+      argindex <- which(!is.na(multi))
+      if(length(argindex)>0){
+          if(missing(factor.marks)){
+              factor.marks <- dots[[argindex]]
+              ignored <- ""
+          } else{
+              ignored <- paste(" and it is ignored since",
+                               sQuote("factor.marks"),
+                               "is also supplied")
+          }
+          warning("It appears you have called scanpp ",
+                  " with (something partially matching) ",
+                  " the deprecated argument ",
+                  paste0(sQuote("multitype"), ignored, "."),
+                  " Please change to the new syntax.")
+      }
+    marks <- df[ , -xycolumns, drop=FALSE]
+    if(any(factor.marks)){
+        # Find indices to convert to factors (recycling to obtain correct length)
+        factorid <- (1:ncol(marks))[factor.marks]
+        # Convert relevant columns to factors
+        marks[,factorid] <- lapply(marks[,factorid,drop=FALSE], factor)
+    }
+    X <- ppp(x, y, window=window, marks = marks)
+  }
+  X
+}
+
+#-------------------------------------------------------------------
+
+"markspace.integral" <-
+  function(X) {
+  verifyclass(X, "ppp")
+  if(!is.marked(X, dfok=TRUE))
+    return(1)
+  if(is.multitype(X))
+    return(length(levels(marks(X))))
+  else
+    stop("Don't know how to compute total mass of mark space")
+}
+
+#-------------------------------------------------------------------
+
+print.ppp <- function(x, ...) {
+  verifyclass(x, "ppp")
+  ism <- is.marked(x, dfok=TRUE)
+  nx <- x$n
+  splat(if(ism) "Marked planar" else "Planar",
+        "point pattern:",
+        nx, ngettext(nx, "point", "points"))
+  if(ism) {
+    mks <- marks(x, dfok=TRUE)
+    if(is.data.frame(mks)) {
+      ## data frame of marks
+      exhibitStringList("Mark variables:", names(mks))
+    } else {
+      ## vector of marks
+      if(is.factor(mks)) {
+        exhibitStringList("Multitype, with levels =", levels(mks))
+      } else {
+        ## Numeric, or could be dates
+        if(inherits(mks, "Date")) {
+          splat("marks are dates, of class", sQuote("Date"))
+        } else if(inherits(mks, "POSIXt")) {
+          splat("marks are dates, of class", sQuote("POSIXt"))
+        } else {
+          splat(paste0("marks are", if(is.numeric(mks)) " numeric," else NULL),
+                "of storage type ", sQuote(typeof(mks)))
+        }
+      }
+    }
+  }
+  print(x$window)
+  terselevel <- spatstat.options('terse')
+  if(waxlyrical('errors', terselevel) &&
+     !is.null(rejects <- attr(x, "rejects"))) {
+    nrejects <- rejects$n
+    splat("***",
+          nrejects,
+          ngettext(nrejects, "illegal point", "illegal points"),
+          "stored in",
+          paste0("attr(,", dQuote("rejects"), ")"),
+          "***")
+  }
+  if(waxlyrical('extras', terselevel) &&
+     !is.null(info <- attr(x, "info")) && inherits(info, "rmhInfoList"))
+    splat("Pattern was generated by",
+          if(is.poisson(info$model)) "Poisson" else "Metropolis-Hastings",
+	  "simulation.")
+  return(invisible(NULL))
+}
+
+
+summary.ppp <- function(object, ..., checkdup=TRUE) {
+  verifyclass(object, "ppp")
+  result <- list()
+  result$is.marked <- is.marked(object, dfok=TRUE)
+  result$n <- object$n
+  result$window <- summary(object$window)
+  result$intensity <- result$n/result$window$area
+  if(checkdup) {
+    result$nduplicated <- sum(duplicated(object))
+    result$rounding <- rounding(object)
+  }
+  if(result$is.marked) {
+    mks <- marks(object, dfok=TRUE)
+    if(result$multiple.marks <- is.data.frame(mks)) {
+      result$marknames <- names(mks)
+      result$is.numeric <- FALSE
+      result$marktype <- "dataframe"
+      result$is.multitype <- FALSE
+    } else {
+      result$is.numeric <- is.numeric(mks)
+      result$marknames <- "marks"
+      result$marktype <- typeof(mks)
+      result$is.multitype <- is.multitype(object)
+    }
+    if(result$is.multitype) {
+      tm <- as.vector(table(mks))
+      tfp <- data.frame(frequency=tm,
+                        proportion=tm/sum(tm),
+                        intensity=tm/result$window$area,
+                        row.names=levels(mks))
+      result$marks <- tfp
+    } else 
+      result$marks <- summary(mks)
+  }
+  class(result) <- "summary.ppp"
+  if(!is.null(rejects <- attr(object, "rejects"))) 
+    result$rejects <- rejects$n
+  if(!is.null(info <- attr(object, "info")) && inherits(info, "rmhInfoList"))
+    result$rmhinfo <- info
+  return(result)
+}
+
+print.summary.ppp <- function(x, ..., dp=getOption("digits")) {
+  verifyclass(x, "summary.ppp")
+  terselevel <- spatstat.options("terse")
+  splat(if(x$is.marked) "Marked planar" else "Planar",
+        "point pattern: ",
+        x$n,
+        "points")
+  oneline <- resolve.defaults(list(...), list(oneline=FALSE))$oneline
+  if(oneline) return(invisible(NULL))
+  unitinfo <- summary(x$window$units)
+  splat("Average intensity",
+        signif(x$intensity,dp),
+        "points per square",
+        unitinfo$singular,
+        unitinfo$explain)
+  ndup <- x$nduplicated
+  if(waxlyrical('extras', terselevel) && !is.null(ndup) && (ndup > 0)) {
+    parbreak(terselevel)
+    splat("*Pattern contains duplicated points*")
+  }
+  rndg <- x$rounding
+  if(waxlyrical('gory', terselevel) && !is.null(rndg)) {
+    cat("\n")
+    if(rndg >= 1) {
+      cat("Coordinates are", "given to",
+          rndg,
+          "decimal", ngettext(rndg, "place", "places"),
+          fill=TRUE)
+      if(rndg <= 3) {
+        cat("i.e. rounded to", "the nearest", "multiple of",
+            10^(-rndg), unitinfo$plural, unitinfo$explain,
+            fill=TRUE)
+      }
+    } else if(rndg == 0) {
+      cat("Coordinates are", "integers", fill=TRUE)
+      cat("i.e. rounded to", "the nearest", unitinfo$singular,
+          unitinfo$explain, 
+          fill=TRUE)
+    } else {
+      cat("Coordinates are", "multiples of",
+          10^(-rndg), unitinfo$plural, unitinfo$explain, 
+          fill=TRUE)
+    }
+    parbreak(terselevel)
+  }
+  if(x$is.marked) {
+    if(x$multiple.marks) {
+      splat("Mark variables:", commasep(x$marknames, ", "))
+      cat("Summary:\n")
+      print(x$marks)
+    } else if(x$is.multitype) {
+      cat("Multitype:\n")
+      print(signif(x$marks,dp))
+    } else {
+      splat("marks are ",
+            if(x$is.numeric) "numeric, ",
+            "of type ", sQuote(x$marktype),
+            sep="")
+      cat("Summary:\n")
+      print(x$marks)
+    }
+    parbreak(terselevel)
+  }
+  if(waxlyrical('extras', terselevel))
+    print(x$window)
+  if(waxlyrical('errors', terselevel) && !is.null(nrejects <- x$rejects)) {
+    parbreak(terselevel)
+    splat("***",
+          nrejects,
+          ngettext(nrejects, "illegal point", "illegal points"),
+          "stored in",
+          paste("attr(,", dQuote("rejects"), ")", sep=""),
+          "***")
+  }
+  if(waxlyrical('gory', terselevel) && !is.null(info <- x$rmhinfo)) {
+    cat("\nPattern was generated by",
+        "Metropolis-Hastings algorithm rmh",
+        fill=TRUE)
+    print(info)
+  }
+  return(invisible(x))
+}
+
+# ---------------------------------------------------------------
+
+identify.ppp <- function(x, ...) {
+  verifyclass(x, "ppp")
+  id <- identify(x$x, x$y, ...)
+  if(!is.marked(x)) return(id)
+  marks <- as.data.frame(x)[id, -(1:2)]
+  out <- cbind(data.frame(id=id), marks)
+  row.names(out) <- NULL
+  return(out)
+}
+
+rebound <- function(x, rect) {
+  UseMethod("rebound")
+}
+
+rebound.ppp <- function(x, rect) {
+  verifyclass(x, "ppp")
+  x$window <- rebound.owin(x$window, rect)
+  return(x)
+}
+
+as.data.frame.ppp <- function(x, row.names=NULL, ...) {
+  df <- data.frame(x=x$x, y=x$y, row.names=row.names)
+  marx <- marks(x, dfok=TRUE)
+  if(is.null(marx))
+    return(df)
+  if(is.data.frame(marx))
+    df <- cbind(df, marx)
+  else
+    df <- data.frame(df, marks=marx)
+  return(df)
+}
+
+is.empty.ppp <- function(x) { return(x$n == 0) }
+
+npoints <- function(x) {
+  UseMethod("npoints")
+}
+
+nobjects <- function(x) {
+  UseMethod("nobjects")
+}
+
+nobjects.ppp <- npoints.ppp <- function(x) { x$n }
+
+
+domain.ppp <- Window.ppp <- function(X, ...) { as.owin(X) }
+
+"Window<-.ppp" <- function(X, ..., value) {
+  verifyclass(value, "owin")
+  return(X[value])
+}
+
+"Frame<-.ppp" <- function(X, value) {
+  Frame(Window(X)) <- value
+  return(X)
+}
+
diff --git a/R/pppmatch.R b/R/pppmatch.R
new file mode 100755
index 0000000..3663588
--- /dev/null
+++ b/R/pppmatch.R
@@ -0,0 +1,823 @@
+#
+# pppmatch.R
+#
+# $Revision: 1.23 $  $Date: 2017/06/05 10:31:58 $
+#
+# Code by Dominic Schuhmacher
+#
+#
+# -----------------------------------------------------------------
+# The standard functions for the new class pppmatching
+#
+# Objects of class pppmatching consist of two point patterns pp1 and pp2,
+# and either an adjacency matrix ((i,j)-th entry 1 if i-th point of pp1 and j-th
+# point of pp2 are matched, 0 otherwise) for "full point matchings" or
+# a "generalized adjacency matrix" (or flow matrix; positive values are
+# no longer limited to 1, (i,j)-th entry gives the "flow" between
+# the i-th point of pp1 and the j-th point of pp2) for "fractional matchings".
+# Optional elements are the type
+# of the matching, the cutoff value for distances in R^2, the order
+# of averages taken, and the resulting distance for the matching.
+# Currently recognized types are "spa" (subpattern assignment,
+# where dummy points at maximal dist are introduced if cardinalities differ), 
+# "ace" (assignment if cardinalities equal, where dist is maximal if cards differ),
+# and "mat" (mass transfer, fractional matching that belongs to the
+# Wasserstein distance obtained if point patterns are normalized to probability measures).
+# -----------------------------------------------------------------
+
+pppmatching <- function(X, Y, am, type = NULL, cutoff = NULL,
+   q = NULL, mdist = NULL) {
+   verifyclass(X, "ppp")
+   verifyclass(Y, "ppp")
+   n1 <- X$n
+   n2 <- Y$n
+   am <- as.matrix(am)
+   if (length(am) == 0) {
+      if (min(n1,n2) == 0) 
+         am <- matrix(am, nrow=n1, ncol=n2)
+      else
+         stop("Adjacency matrix does not have the right dimensions")
+   }
+   if (dim(am)[1] != n1 || dim(am)[2] != n2)
+      stop("Adjacency matrix does not have the right dimensions")
+   am <- matrix(as.numeric(am), n1, n2)
+   #am <- apply(am, c(1,2), as.numeric)
+   res <- list("pp1" = X, "pp2" = Y, "matrix" = am, "type" = type, "cutoff" = cutoff, 
+      "q" = q, "distance" = mdist)
+   class(res) <- "pppmatching"
+   res
+}
+
+# currently, for fractional matchings all the flows are plotted the same way
+# irrespective of their weights
+plot.pppmatching <- function(x, addmatch = NULL, main = NULL, ...) {
+   if (is.null(main))
+      main <- short.deparse(substitute(x))
+   pp1 <- x$pp1
+   pp2 <- x$pp2
+   plot.owin(pp1$window, main = main, ...)
+   here <- which((x$matrix > 0), arr.ind = TRUE)
+   if (!is.null(addmatch)) {
+      addhere <- which((addmatch > 0), arr.ind = TRUE)
+      seg <- as.psp(from=pp1[addhere[,1]], to=pp2[addhere[,2]])
+      plot(seg, add=TRUE, lty = 2, col="gray70")
+   }
+   if (length(here) > 0) {
+     seg <- as.psp(from=pp1[here[,1]], to=pp2[here[,2]])
+     plot(seg, add=TRUE, ...)
+   }
+   points(x$pp1, pch=20, col=2, ...)
+   points(x$pp2, pch=20, col=4, ...)
+   return(invisible(NULL))
+}
+
+print.pppmatching <- function(x, ...) {
+   n1 <- x$pp1$n
+   n2 <- x$pp2$n
+   if (is.null(x$type) || is.null(x$q) || is.null(x$cutoff))
+     cat("Generic matching of two planar point patterns \n")
+   else
+     cat(x$type, "-", x$q, " matching of two planar point patterns (cutoff = ",
+       x$cutoff, ") \n", sep = "")
+   cat("pp1:", n1, ngettext(n1, "point", "points"), "\n")
+   cat("pp2:", n2, ngettext(n2, "point", "points"), "\n")
+   print.owin(x$pp1$window)
+   npair <- sum(x$matrix > 0)
+   if (npair == 0)
+     cat("matching is empty \n") 
+   else {
+     if (any(x$matrix != trunc(x$matrix)))
+       cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n")
+     else
+       cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n")
+   }
+   if (!is.null(x$distance))
+     cat("distance:", x$distance, "\n") 
+   return(invisible(NULL))
+}
+
+summary.pppmatching <- function(object, ...) {
+   X <- object$pp1
+   Y <- object$pp2
+   n1 <- X$n
+   n2 <- Y$n
+   if (is.null(object$type) || is.null(object$q) || is.null(object$cutoff))
+     cat("Generic matching of two planar point patterns \n")
+   else
+     cat(object$type, "-", object$q, " matching of two planar point patterns (cutoff = ",
+       object$cutoff, ") \n", sep = "")
+   cat("pp1:", n1, ngettext(n1, "point", "points"), "\n")
+   cat("pp2:", n2, ngettext(n2, "point", "points"), "\n")
+   print.owin(X$window)
+   npair <- sum(object$matrix > 0)
+   if (npair == 0)
+     cat("matching is empty \n") 
+   else {
+     if (any(object$matrix != trunc(object$matrix))) {
+       cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n")
+     }
+     else {
+       cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n")
+       rowsum <- rowSums(object$matrix)
+       colsum <- colSums(object$matrix)
+       lt <- ifelse(min(rowsum) >= 1, TRUE, FALSE)
+       ru <- ifelse(max(rowsum) <= 1, TRUE, FALSE)
+       rt <- ifelse(min(colsum) >= 1, TRUE, FALSE)
+       lu <- ifelse(max(colsum) <= 1, TRUE, FALSE)
+       if (lt && ru && rt && lu)
+         cat("matching is 1-1 \n")
+       else if (any(lt, ru, rt, lu)) {
+         cat("matching is",
+                   ifelse(lt, " left-total", ""),
+                   ifelse(lu, " left-unique", ""),
+                   ifelse(rt, " right-total", ""),
+                   ifelse(ru, " right-unique", ""),
+                   "\n", sep="")
+         }
+     }
+   }
+   if (!is.null(object$distance))
+     cat("distance:", object$distance, "\n") 
+   return(invisible(NULL))
+}
+
+
+# -----------------------------------------------------------------
+# matchingdist computes the distance associated with a certain kind of matching.
+# Any of the arguments type, cutoff and order (if supplied) override the 
+# the corresponding arguments in the matching.
+# This function is useful for verifying the distance element of an
+# object of class pppmatching as well as for comparing different
+# (typically non-optimal) matchings.
+# -----------------------------------------------------------------
+
+matchingdist <- function(matching, type = NULL, cutoff = NULL, q = NULL) {
+  verifyclass(matching, "pppmatching")
+  if (is.null(type))
+    if (is.null(matching$type))
+      stop("Type of matching unknown. Distance cannot be computed")
+    else
+      type <- matching$type
+  if (is.null(cutoff))
+    if (is.null(matching$cutoff))
+      stop("Cutoff value unknown. Distance cannot be computed")
+    else
+      cutoff <- matching$cutoff
+  if (is.null(q))
+    if (is.null(matching$q))
+      stop("Order unknown. Distance cannot be computed")
+    else
+      q <- matching$q
+
+  X <- matching$pp1
+  Y <- matching$pp2
+  n1 <- X$n
+  n2 <- Y$n
+  Lpexpect <- function(x, w, p) {
+    f <- max(x)
+      return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p)))
+  }
+
+  if (type == "spa") {
+    n <- max(n1,n2) # divisor for Lpexpect
+    if (n == 0)
+      return(0)
+    else if (min(n1,n2) == 0)
+      return(cutoff)
+    shortdim <- which.min(c(n1,n2))
+    shortsum <- apply(matching$matrix, shortdim, sum)
+    if (any(shortsum != 1))
+      warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality")
+#    dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) })
+    dfix <- pmin(crossdist(X,Y), cutoff)
+    if (is.finite(q))
+      resdist <- (Lpexpect(dfix, matching$matrix/n, q)^q + abs(n2-n1)/n * cutoff^q)^(1/q)
+    else
+      resdist <- ifelse(n1==n2, max(dfix[matching$matrix > 0]), cutoff)
+  }
+  else if (type == "ace") {
+    n <- n1 # divisor for Lpexpect
+    if (n1 != n2)
+      return(cutoff)
+    if (n == 0)
+      return(0)
+    rowsum <- rowSums(matching$matrix)
+    colsum <- colSums(matching$matrix)
+    if (any(c(rowsum, colsum) != 1))
+      warning("matching is not 1-1")
+#    dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) })
+    dfix <- pmin(crossdist(X,Y), cutoff)
+    if (is.finite(q))
+      resdist <- Lpexpect(dfix, matching$matrix/n, q)
+    else
+      resdist <- max(dfix[matching$matrix > 0])
+  }
+  else if (type == "mat") {
+    n <- min(n1,n2) # divisor for Lpexpect
+    if (min(n1,n2) == 0)
+      return(NaN)
+    shortdim <- which.min(c(n1,n2))
+    shortsum <- apply(matching$matrix, shortdim, sum)
+    if (any(shortsum != 1))
+      warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality")
+#    dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) })
+    dfix <- pmin(crossdist(X,Y), cutoff)
+    if (is.finite(q))
+      resdist <- Lpexpect(dfix, matching$matrix/n, q)
+    else
+      resdist <- max(dfix[matching$matrix > 0])
+  }
+  else 
+    stop(paste("Unrecognised type", sQuote(type)))
+  return(resdist)
+}
+
+
+# -----------------------------------------------------------------
+# The main function for computation of distances and finding optimal
+# matchings between point patterns: pppdist
+# -----------------------------------------------------------------
+#
+# pppdist uses several helper functions not normally called by the user 
+#
+# The arguments of pppdist are 
+#
+# x and y of class ppp (the two point patterns for which we want to compute
+#   a distance)
+# The type of distance to be computed; any one of "spa" (default), "ace", "mat".
+#   For details of this and the following two arguments see above (description
+#   for class "pppmatching")
+# cutoff and order q of the distance
+# Set matching to TRUE if the full point matching (including distance)
+#   should be returned; otherwise only the distance is returned
+# If ccode is FALSE R code is used where available. This may be useful if q
+#   is high (say above 10) and severe warning messages pop up. R can
+#   (on most machines) deal with a higher number of significant digits per
+#   number than C (at least with the code used below)
+# precision should only be entered by advanced users. Empirically reasonable defaults
+#   are used otherwise. As a rule of thumb, if ccode is TRUE, precision should
+#   be the highest value that does not give an error (typically 9); if ccode
+#   is FALSE, precision should be balanced (typically between 10 and 100) in
+#   such a way that the sum of the  number of zeroes and pseudo-zeroes given in the
+#   warning messages is minimal
+# approximation: if q = Inf, by the distance of which order should 
+#   the true distance be approximated. If approximation is Inf, brute force
+#   computation is used, which is only practicable for point patterns with
+#   very few points (see also the remarks just before the pppdist.prohorov
+#   function below).  
+# show.rprimal=TRUE shows at each stage of the algorithm what the current restricted
+#   primal problem and its solution are (algorithm jumps between restricted primal
+#   and dual problem until the solution to the restricted primal (a partial
+#   matching of the point patterns) is a full matching)
+# timelag gives the number of seconds of pause added each time a solution to
+#   the current restricted primal is found (has only an effect if show.primal=TRUE) 
+# -----------------------------------------------------------------
+
+pppdist <- function(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE,
+  ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) {
+
+  verifyclass(X, "ppp")
+  verifyclass(Y, "ppp")
+  if (!ccode && type == "mat") {
+    warning("R code is not available for type = ", dQuote("mat"), ". C code is
+    used instead")
+    ccode <- TRUE
+  }
+  if (!ccode && is.infinite(q) && is.infinite(approximation)) {
+    warning("R code is not available for q = Inf and approximation = Inf. C code is
+    used instead")
+    ccode <- TRUE
+  }
+  if (ccode && is.infinite(q) && is.infinite(approximation) && type == "spa" && X$n != Y$n) {
+    warning("approximation = Inf not available for type = ",
+        dQuote("spa"), " and point patterns with differing cardinalities")
+    approximation <- 10
+  }
+  if (is.infinite(q) && is.infinite(approximation) && type == "mat") {
+    warning("approximation = Inf not available for type = ",
+        dQuote("mat"))
+    approximation <- 10
+  }
+  if (show.rprimal) {
+    ccode <- FALSE
+    auction <- FALSE
+      if (type != "ace"){
+        warning("show.rprimal = TRUE not available for type = ",
+        dQuote(type), ". Type is changed to ", dQuote("ace"))
+        type <- "ace"
+    }
+  }
+
+  if (is.null(precision)) {
+    if (ccode)
+      precision <- trunc(log10(.Machine$integer.max))
+    else {
+      db <- .Machine$double.base
+      minprec <- trunc(log10(.Machine$double.base^.Machine$double.digits))
+      if (is.finite(q))
+        precision <- min(max(minprec,2*q),(.Machine$double.max.exp-1)*log(db)/log(10))
+      else
+        precision <- min(max(minprec,2*approximation),(.Machine$double.max.exp-1)*log(db)/log(10))
+      }
+  }
+
+  if (type == "spa") {
+    if (X$n == 0 && Y$n == 0) {
+      if (!matching)
+        return(0)
+      else {
+        return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0))
+      }
+    }
+    n1 <- X$n
+    n2 <- Y$n
+    n <- max(n1,n2)
+    dfix <- matrix(cutoff,n,n)
+    if (min(n1,n2) > 0)
+      dfix[1:n1,1:n2] <- crossdist(X,Y)
+#    d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) })
+    d <- dfix <- pmin(dfix,cutoff)
+    if (is.infinite(q)) {
+      if (n1 == n2 || matching)
+        return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode,
+        auction, precision, approximation))
+      else
+        return(cutoff)
+      # in the case n1 != n2 the distance is clear, and in a sense any
+      # matching would be correct. We go here the extra mile and call
+      # pppdist.prohorov in order to find (approximate) the matching
+      # that is intuitively most interesting (i.e. the one that
+      # pairs the points of the
+      # smaller cardinality point pattern with the points of the larger
+      # cardinality point pattern in such a way that the maximal pairing distance
+      # is minimal (for q < Inf the q-th order pairing distance before the introduction
+      # of dummy points is automatically minimal if it is minimal after the
+      # introduction of dummy points)
+      # which would be the case for the obtained pairing if q < Inf
+    }
+  }
+  else if (type == "ace") {
+    if (X$n != Y$n) {
+      if (!matching)
+        return(cutoff)
+      else {
+        return(pppmatching(X, Y, matrix(0, nrow=X$n, ncol=Y$n), type, cutoff, q, cutoff))
+      }
+    }
+    if (X$n == 0) {
+      if (!matching)
+        return(0)
+      else {
+        return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0))
+      }
+    }
+    n <- n1 <- n2 <- X$n
+    dfix <- crossdist(X,Y)
+#    d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) })
+    d <- dfix <- pmin(dfix, cutoff)
+    if (is.infinite(q))
+      return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode,
+      auction, precision, approximation))
+  }
+  else if (type == "mat") {
+    if (!ccode)
+      warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead")
+    if (auction)
+      warning("Auction algorithm is not available for type = ", dQuote("mat"), ". Primal-dual algorithm is used instead")
+    return(pppdist.mat(X, Y, cutoff, q, matching, precision, approximation))
+  }
+  else stop(paste("Unrecognised type", sQuote(type)))
+
+  d <- d/max(d)
+  d <- round((d^q)*(10^precision))
+  nzeroes <- sum(d == 0 & dfix > 0)
+  if(nzeroes > 0)
+    warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances"))
+  if(ccode & any(d > .Machine$integer.max))
+    stop("integer overflow, while rounding the q-th powers of distances")
+  if(!ccode) {
+    if (any(is.infinite(d)))
+      stop("Inf obtained, while taking the q-th powers of distances")
+    maxd <- max(d)
+    npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits)
+    if (npszeroes > 0)
+      warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances"))
+      # a pseudo-zero is a value that is positive but contributes nothing to the
+      # q-th order average because it is too small compared to the other values
+  }
+
+  Lpmean <- function(x, p) {
+    f <- max(x)
+    return(ifelse(f==0, 0, f * mean((x/f)^p)^(1/p)))
+  }
+    
+  if (show.rprimal && type == "ace") {
+    assig <- acedist.show(X, Y, n, d, timelag)
+    am <- matrix(0, n, n)
+    am[cbind(1:n, assig[1:n])] <- 1
+  }
+  else if (ccode) {
+  	if (auction) {
+  	  dupper <- max(d)/10	
+  	  lasteps <- 1/(n+1)
+  	  epsfac <- 10
+      epsvec <- lasteps
+      # Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant
+      while (lasteps < dupper) {
+        lasteps <- lasteps*epsfac
+        epsvec <- c(epsvec,lasteps)
+      }
+      epsvec <- rev(epsvec)[-1]
+      neps <- length(epsvec)
+      stopifnot(neps >= 1)
+  	  d <- max(d)-d
+  	  # auctionbf uses a "desire matrix"
+  	  res <- .C("auctionbf",
+                    as.integer(d),
+                    as.integer(n),
+                    pers_to_obj = as.integer(rep(-1,n)),
+                    price = as.double(rep(0,n)),
+                    profit = as.double(rep(0,n)),
+                    as.integer(neps),
+                    as.double(epsvec),
+  	                PACKAGE = "spatstat")
+      am <- matrix(0, n, n)
+      am[cbind(1:n,res$pers_to_obj+1)] <- 1
+    }
+    else {           
+      res <- .C("dwpure",
+             as.integer(d),
+             as.integer(rep.int(1,n)),
+             as.integer(rep.int(1,n)),
+             as.integer(n),
+             as.integer(n),
+             flowmatrix = as.integer(integer(n^2)),
+             PACKAGE = "spatstat")
+      am <- matrix(res$flowmatrix, n, n)
+    }
+  }
+  else {
+    assig <- acedist.noshow(X, Y, n, d)
+    am <- matrix(0, n, n)
+    am[cbind(1:n, assig[1:n])] <- 1
+  }
+  resdist <- Lpmean(dfix[am == 1], q)
+  if (!matching)
+    return(resdist)
+  else {
+    amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2))
+    # previous line solves various problems associated with min(n1,n2) = 0 or = 1
+    return(pppmatching(X, Y, amsmall, type, cutoff, q, resdist))
+  }
+}   
+
+#
+#
+# ===========================================================
+# ===========================================================
+#                   Anything below:
+#    Internal functions usually not to be called by user
+# ===========================================================
+# ===========================================================
+#
+
+#
+#   Called if show.rprimal is true
+#
+
+acedist.show <- function(X, Y, n, d, timelag = 0) {
+      plot(pppmatching(X, Y, matrix(0, n, n)))
+      # initialization of dual variables
+      u <- apply(d, 1, min)
+      d <- d - u
+      v <- apply(d, 2, min)
+      d <- d - rep(v, each=n)
+      # the main loop
+      feasible <- FALSE
+      while (!feasible) {
+         rpsol <- maxflow(d)  # rpsol = restricted primal, solution
+         am <- matrix(0, n, n)
+         for (i in 1:n) {
+            if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE
+         }
+         Sys.sleep(timelag)
+         channelmat <- (d == 0 & !am)
+         plot(pppmatching(X, Y, am), addmatch = channelmat)
+         # if the solution of the restricted primal is not feasible for  
+         # the original primal, update dual variables
+         if (min(rpsol$assignment) == -1) {
+            w1 <- which(rpsol$fi_rowlab > -1)
+            w2 <- which(rpsol$fi_collab == -1)
+            subtractor <- min(d[w1, w2])
+            d[w1,] <- d[w1,] - subtractor
+            d[,-w2] <- d[,-w2] + subtractor 
+         }
+         # otherwise break the loop
+         else {
+            feasible <- TRUE
+         }   
+      }
+      return(rpsol$assignment)
+}
+
+#
+#   R-version of hungarian algo without the pictures
+#   useful if q is large
+#
+
+acedist.noshow <- function(X, Y, n, d) {
+      # initialization of dual variables
+      u <- apply(d, 1, min)
+      d <- d - u
+      v <- apply(d, 2, min)
+      d <- d - rep(v, each=n)
+      # the main loop
+      feasible <- FALSE
+      while (!feasible) {
+         rpsol <- maxflow(d)  # rpsol = restricted primal, solution
+# ~~~~~~~~~ deleted by AJB ~~~~~~~~~~~~~~~~~
+#         am <- matrix(0, n, n)
+#         for (i in 1:n) {
+#            if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE
+#         }
+#         channelmat <- (d == 0 & !am)
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~         
+         # if the solution of the restricted primal is not feasible for  
+         # the original primal, update dual variables
+         if (min(rpsol$assignment) == -1) {
+            w1 <- which(rpsol$fi_rowlab > -1)
+            w2 <- which(rpsol$fi_collab == -1)
+            subtractor <- min(d[w1, w2])
+            d[w1,] <- d[w1,] - subtractor
+            d[,-w2] <- d[,-w2] + subtractor 
+         }
+         # otherwise break the loop
+         else {
+            feasible <- TRUE
+         }   
+      }
+      return(rpsol$assignment)
+}
+
+#  
+# Solution of restricted primal
+# 
+
+maxflow <- function(costm) {
+  stopifnot(is.matrix(costm))
+  stopifnot(nrow(costm) == ncol(costm))
+  if(!all(apply(costm == 0, 1, any)))
+    stop("Each row of the cost matrix must contain a zero")
+  
+  m <- dim(costm)[1]   # cost matrix is square m * m
+  assignment <- rep.int(-1, m)   # -1 means no pp2-point assigned to i-th pp1-point
+   # initial assignment or rowlabel <- source label (= 0) where not possible
+   for (i in 1:m) {
+      j <- match(0, costm[i,])
+      if (!(j %in% assignment))
+         assignment[i] <- j
+   }
+   newlabelfound <- TRUE
+   while (newlabelfound) {
+     rowlab <- rep.int(-1, m)   # -1 means no label given, 0 stands for source label
+     collab <- rep.int(-1, m)
+     rowlab <- ifelse(assignment == -1, 0, rowlab)
+     # column and row labeling procedure until either breakthrough occurs
+     # (which means that there is a better point assignment, i.e. one that
+     # creates more point pairs than the current one (flow can be increased))
+     # or no more labeling is possible
+     breakthrough <- -1
+     while (newlabelfound && breakthrough == -1) { 
+         newlabelfound <- FALSE
+         for (i in 1:m) {
+            if (rowlab[i] != -1) {
+               for (j in 1:m) {
+                  if (costm[i,j] == 0 && collab[j] == -1) {
+                     collab[j] <- i
+                     newlabelfound <- TRUE
+                     if (!(j %in% assignment) && breakthrough == -1)
+                        breakthrough <- j
+                  }
+               }
+            }
+         }
+         for (j in 1:m) {
+            if (collab[j] != -1) {
+               for (i in 1:m) {
+                  if (assignment[i] == j && rowlab[i] == -1) {
+                     rowlab[i] <- j
+                     newlabelfound <- TRUE
+                  }
+               }
+            }
+         }
+      }
+      # if the while-loop was left due to breakthrough,
+      # reassign points (i.e. redirect flow) and restart labeling procedure
+      if (breakthrough != -1) {
+         l <- breakthrough
+         while (l != 0) {
+            k <- collab[l]
+            assignment[k] <- l
+            l <- rowlab[k] 
+         }
+      }
+   }
+   # the outermost while-loop is left, no more labels can be given; hence
+   # the maximal number of points are paired given the current restriction
+   # (flow is maximal given the current graph)
+   return(list("assignment"=assignment, "fi_rowlab"=rowlab, "fi_collab"=collab))  
+}
+
+# 
+# Prohorov distance computation/approximation (called if q = Inf in pppdist
+#   and type = "spa" or "ace")
+# Exact brute force computation of distance if approximation = Inf,
+#   scales very badly, should not be used for cardinality n larger than 10-12
+# Approximation by order q distance gives often (if the warning messages 
+#   are not too extreme) the right matching and therefore the exact Prohorov distance,
+#   but in very rare cases the result can be very wrong. However, it is always
+#   an exact upper bound of the Prohorov distance (since based on *a* pairing
+#   as opposed to optimal pairing.
+#
+
+pppdist.prohorov <- function(X, Y, n, dfix, type, cutoff = 1, matching = TRUE,
+  ccode = TRUE, auction = TRUE, precision = 9, approximation = 10) {
+  n1 <- X$n
+  n2 <- Y$n
+  d <- dfix/max(dfix)
+  if (is.finite(approximation)) {
+      warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation))
+    d <- round((d^approximation)*(10^precision)) 
+    nzeroes <- sum(d == 0 & dfix > 0)
+    if (nzeroes > 0)
+      warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances"))
+    if (ccode) {
+      if (any(d > .Machine$integer.max))
+        stop("integer overflow, while rounding the q-th powers of distances")
+      if (auction) {
+      	dupper <- max(d)/10
+  	    lasteps <- 1/(n+1)
+  	    epsfac <- 10
+        epsvec <- lasteps
+        # Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant
+        while (lasteps < dupper) {
+          lasteps <- lasteps*epsfac
+          epsvec <- c(epsvec,lasteps)
+        }
+        epsvec <- rev(epsvec)[-1]
+        neps <- length(epsvec)
+        stopifnot(neps >= 1)
+  	    d <- max(d)-d
+  	    # auctionbf uses a "desire matrix"
+  	    res <- .C("auctionbf",
+                      as.integer(d),
+                      as.integer(n),
+                      pers_to_obj = as.integer(rep(-1,n)),
+                      price = as.double(rep(0,n)),
+                      profit = as.double(rep(0,n)),
+                      as.integer(neps),
+                      as.double(epsvec),
+  	                  PACKAGE = "spatstat")
+        am <- matrix(0, n, n)
+        am[cbind(1:n,res$pers_to_obj+1)] <- 1
+      }
+      else {           
+        res <- .C("dwpure",
+                 as.integer(d),
+                 as.integer(rep.int(1,n)),
+                 as.integer(rep.int(1,n)),
+                 as.integer(n),
+                 as.integer(n),
+                 flowmatrix = as.integer(integer(n^2)),
+                 PACKAGE = "spatstat")
+        am <- matrix(res$flowmatrix, n, n)
+      }
+    }
+    else {
+      if (any(is.infinite(d)))
+        stop("Inf obtained, while taking the q-th powers of distances")
+      maxd <- max(d)
+      npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits)
+      if (npszeroes > 0)
+        warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances"))
+      assig <- acedist.noshow(X, Y, n, d)
+      am <- matrix(0, n, n)
+      am[cbind(1:n, assig[1:n])] <- 1
+    }
+  }
+  else {
+    d <- round(d*(10^precision))
+    nzeroes <- sum(d == 0 & dfix > 0)
+    if (nzeroes > 0)
+      warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances"))
+    if (any(d > .Machine$integer.max))
+      stop("integer overflow, while rounding the q-th powers of distances")
+    res <- .C("dinfty_R",
+             as.integer(d),
+             as.integer(n),
+             assignment = as.integer(rep.int(-1,n)),
+             PACKAGE = "spatstat")
+    assig <- res$assignment
+    am <- matrix(0, n, n)
+    am[cbind(1:n, assig[1:n])] <- 1
+  }
+  if (n1 == n2)
+    resdist <- max(dfix[am == 1])
+  else
+    resdist <- cutoff
+  if (!matching)
+    return(resdist)
+  else {
+    amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2))
+    # previous line solves various problems associated with min(n1,n2) = 0 or = 1
+    return(pppmatching(X, Y, amsmall, type, cutoff, Inf, resdist))
+  }
+}   
+
+# 
+# Computation of "pure Wasserstein distance" for any q (called if type="mat"
+#   in pppdist, no matter if q finite or not).
+# If q = Inf, approximation using ccode is enforced
+# (approximation == Inf is not allowed here).
+#
+
+pppdist.mat <- function(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9,
+  approximation = 10) {
+  n1 <- X$n
+  n2 <- Y$n
+  n <- min(n1,n2)
+  if (n == 0) {
+    if (!matching)
+      return(NaN)
+    else
+      return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), "mat", cutoff, q, NaN))
+  }
+
+  dfix <- crossdist(X,Y)
+#  d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) })
+  d <- dfix <- pmin(dfix, cutoff)
+  d <- d/max(d)
+  if (is.infinite(q)) {
+    if (is.infinite(approximation))
+      stop("approximation = Inf")
+    warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation))
+    d <- round((d^approximation)*(10^precision)) 
+    nzeroes <- sum(d == 0 & dfix > 0)
+    if (nzeroes > 0)
+      warning(paste(nzeroes, "zeroes introduced, while rounding distances"))
+    if (any(d > .Machine$integer.max))
+      stop("integer overflow, while rounding the q-th powers of distances")
+    gcd <- greatest.common.divisor(n1,n2)
+    mass1 <- n2/gcd
+    mass2 <- n1/gcd
+
+    res <- .C("dwpure",
+             as.integer(d),
+             as.integer(rep.int(mass1,n1)),
+             as.integer(rep.int(mass2,n2)),
+             as.integer(n1),
+             as.integer(n2),
+             flowmatrix = as.integer(integer(n1*n2)),
+             PACKAGE = "spatstat")
+    am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2)
+    resdist <- max(dfix[am > 0])
+  }
+  else {
+    d <- round((d^q)*(10^precision))
+    nzeroes <- sum(d == 0 & dfix > 0)
+    if(nzeroes > 0)
+      warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances"))
+    if(any(d > .Machine$integer.max))
+      stop("integer overflow, while rounding the q-th powers of distances")
+    gcd <- greatest.common.divisor(n1,n2)
+    mass1 <- n2/gcd
+    mass2 <- n1/gcd
+
+    Lpexpect <- function(x, w, p) {
+      f <- max(x)
+      return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p)))
+    }
+
+    res <- .C("dwpure",
+             as.integer(d),
+             as.integer(rep.int(mass1,n1)),
+             as.integer(rep.int(mass2,n2)),
+             as.integer(n1),
+             as.integer(n2),
+             flowmatrix = as.integer(integer(n1*n2)),
+             PACKAGE = "spatstat")
+    am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2)
+    # our "adjacency matrix" in this case is standardized to have
+    # rowsum 1 if n1 <= n2 and colsum 1 if n1 >= n2
+    resdist <- Lpexpect(dfix, am/n, q)
+  }
+  if (!matching)
+    return(resdist)
+  else {
+   amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2))
+   # previous line solves various problems associated with min(n1,n2) = 0 or = 1
+   return(pppmatching(X, Y, amsmall, "mat", cutoff, q, resdist))
+  }
+}
+
diff --git a/R/ppqq.R b/R/ppqq.R
new file mode 100644
index 0000000..c71b87f
--- /dev/null
+++ b/R/ppqq.R
@@ -0,0 +1,114 @@
+##
+##  ppqq.R
+##
+## P-P and Q-Q versions of fv objects
+##
+
+PPversion <- local({
+
+  PPversion <- function(f, theo="theo", columns=".") {
+    if(!any(colnames(f) == theo))
+      stop(paste(sQuote(theo), "is not the name of a column of f"))
+    ## set up inverse theoretical function f_0: 'theo' |-> 'r'
+    xname <- fvnames(f, ".x")
+    df <- as.data.frame(f)
+    theo.table <- df[,theo]
+    x.table    <- df[,xname]
+    invfun <- approxfun(x=theo.table, y=x.table, rule=1)
+    ## evaluate f_0^{-1}(theo) for evenly-spaced grid of 'theo' values
+    ra <- range(theo.table)
+    theo.seq <- seq(from=ra[1], to=ra[2], length.out=nrow(df))
+    x.vals <- invfun(theo.seq)
+    ## convert f to a function and evaluate at these 'r' values
+    ynames <- setdiff(fvnames(f, columns), theo)
+    ff <- as.function(f, value=ynames)
+    y.vals <- lapply(ynames, evalselected, x=x.vals, f=ff)
+    ## build data frame
+    all.vals <- append(list(theo=theo.seq), y.vals)
+    names(all.vals) <- c(theo, ynames)
+    DF <- as.data.frame(all.vals)
+    ## set up fv object
+    atr <- attributes(f)
+    cnames <- colnames(f)
+    i.theo <- match(theo,   cnames)
+    i.yval <- match(ynames, cnames)
+    ii <- c(i.theo, i.yval)
+    old.best <- fvnames(f, ".y")
+    best <- if(old.best %in% ynames) old.best else ynames[length(ynames)]
+    result <- fv(DF,
+                 argu = theo,
+                 ylab = atr$ylab,
+                 valu = best,
+                 fmla = . ~ .x,
+                 alim = ra,
+                 labl = atr$labl[ii], 
+                 desc = atr$desc[ii],
+                 unitname = NULL,
+                 fname = atr$fname,
+                 yexp = atr$yexp)
+    fvnames(result, ".") <- c(ynames, theo)
+    return(result)
+  }
+
+  evalselected <- function(what, f, x){ f(x, what=what) } 
+
+  PPversion
+})
+
+
+QQversion <- function(f, theo="theo", columns=".") {
+  if(!any(colnames(f) == theo))
+    stop(paste(sQuote(theo), "is not the name of a column of f"))
+  ## extract relevant columns of data
+  xname <- fvnames(f, ".x")
+  ynames <- fvnames(f, columns)
+  df <- as.data.frame(f)
+  theo.table <- df[,theo]
+  x.table    <- df[,xname]
+  y.table    <- df[,ynames, drop=FALSE]
+  ## set up inverse theoretical function f_0: 'theo' |-> 'r'
+  invfun <- approxfun(x=theo.table, y=x.table, rule=1)
+  ## apply f_0^{-1} to tabulated function values
+  z.table <- as.data.frame(lapply(y.table, invfun))
+  ## build data frame
+  DF <- cbind(df[,xname,drop=FALSE], z.table)
+  ## set up fv object
+  atr <- attributes(f)
+  cnames <- colnames(f)
+  i.x <- match(xname,   cnames)
+  i.y <- match(ynames, cnames)
+  ii <- c(i.x, i.y)
+  old.best <- fvnames(f, ".y")
+  best <- if(old.best %in% ynames) old.best else ynames[length(ynames)]
+  if(versionstring.spatstat() < package_version("1.38-2")) {
+    fvl <- fvlabels(f, expand=TRUE)
+    theo.string <- fvl[colnames(f) == theo]
+  } else {
+    theo.string <- fvlabels(f, expand=TRUE)[[theo]]
+  }
+  ## remove '(r)' from outer function
+  theo.string <- sub(paren(xname), "", theo.string, fixed=TRUE)
+  theo.expr <- parse(text=theo.string)
+  theo.lang <- theo.expr[[1]]
+  ylab <- substitute({{THEO}^{-1}}(FUN),
+                     list(FUN=atr$ylab, THEO=theo.lang))
+  yexp <- substitute({{THEO}^{-1}}(FUN),
+                     list(FUN=atr$yexp, THEO=theo.lang))
+  oldlabl <- atr$labl
+  labl.iy <- sprintf("{{%s}^{-1}}(%s)",  theo.string, oldlabl[i.y])
+  labl.ii <- c(oldlabl[i.x], labl.iy)
+  result <- fv(DF,
+               argu = atr$argu,
+               ylab = ylab,
+               valu = best,
+               fmla = . ~ .x,
+               alim = atr$alim,
+               labl = labl.ii,
+               desc = atr$desc[ii],
+               unitname = NULL,
+               fname = atr$fname,
+               yexp = yexp)
+  fvnames(result, ".") <- ynames
+  unitname(result) <- unitname(f)
+  return(result)
+}
diff --git a/R/ppx.R b/R/ppx.R
new file mode 100755
index 0000000..531cd96
--- /dev/null
+++ b/R/ppx.R
@@ -0,0 +1,543 @@
+#
+#   ppx.R
+#
+#  class of general point patterns in any dimension
+#
+#  $Revision: 1.60 $  $Date: 2017/06/05 10:31:58 $
+#
+
+ppx <- local({
+  
+  ctype.table <- c("spatial", "temporal", "local", "mark")
+  ctype.real  <- c(TRUE,      TRUE,       FALSE,   FALSE)
+
+  ppx <- function(data, domain=NULL, coord.type=NULL, simplify=FALSE) {
+    data <- as.hyperframe(data)
+    # columns suitable for spatial coordinates
+    suitable <- with(unclass(data),
+                     vtype == "dfcolumn" &
+                     (vclass == "numeric" | vclass == "integer"))
+    if(is.null(coord.type)) {
+      # assume all suitable columns of data are spatial coordinates
+      # and all other columns are marks.
+      ctype <- ifelse(suitable, "spatial", "mark")
+    } else {
+      stopifnot(is.character(coord.type))
+      stopifnot(length(coord.type) == ncol(data))
+      ctypeid <- pmatch(coord.type, ctype.table, duplicates.ok=TRUE)
+      # validate
+      if(any(uhoh <- is.na(ctypeid)))
+        stop(paste("Unrecognised coordinate",
+                   ngettext(sum(uhoh), "type", "types"),
+                   commasep(sQuote(coord.type[uhoh]))))
+      if(any(uhoh <- (!suitable & ctype.real[ctypeid]))) {
+        nuh <- sum(uhoh)
+        stop(paste(ngettext(nuh, "Coordinate", "Coordinates"),
+                   commasep(sQuote(names(data)[uhoh])),
+                   ngettext(nuh, "does not", "do not"),
+                   "contain real numbers"))
+      }
+      ctype <- ctype.table[ctypeid]
+    }
+    ctype <- factor(ctype, levels=ctype.table)
+    #
+    if(simplify && all(ctype == "spatial")) {
+       # attempt to reduce to ppp or pp3
+      d <- length(ctype)
+      if(d == 2) {
+        ow <- try(as.owin(domain), silent=TRUE)
+        if(!inherits(ow, "try-error")) {
+          X <- try(as.ppp(as.data.frame(data), W=ow))
+          if(!inherits(X, "try-error"))
+            return(X)
+        }
+      } else if(d == 3) {
+        bx <- try(as.box3(domain), silent=TRUE)
+        if(!inherits(bx, "try-error")) {
+          m <- as.matrix(as.data.frame(data))
+          X <- try(pp3(m[,1], m[,2], m[,3], bx))
+          if(!inherits(X, "try-error"))
+            return(X)
+        }
+      }
+    }
+    out <- list(data=data, ctype=ctype, domain=domain)
+    class(out) <- "ppx"
+    return(out)
+  }
+
+  ppx
+})
+
+
+is.ppx <- function(x) { inherits(x, "ppx") }
+
+nobjects.ppx <- npoints.ppx <- function(x) { nrow(x$data) }
+
+print.ppx <- function(x, ...) {
+  cat("Multidimensional point pattern\n")
+  sd <- summary(x$data)
+  np <- sd$ncases
+  nama <- sd$col.names
+  cat(paste(np, ngettext(np, "point", "points"), "\n"))
+  if(any(iscoord <- (x$ctype == "spatial")))
+    cat(paste(sum(iscoord), "-dimensional space coordinates ",
+              paren(paste(nama[iscoord], collapse=",")), "\n", sep=""))
+  if(any(istime <- (x$ctype == "temporal")))
+    cat(paste(sum(istime), "-dimensional time coordinates ",
+              paren(paste(nama[istime], collapse=",")), "\n", sep=""))
+  if(any(islocal <- (x$ctype == "local"))) 
+    cat(paste(sum(islocal), ngettext(sum(islocal), "column", "columns"),
+              "of local coordinates:",
+              commasep(sQuote(nama[islocal])), "\n"))
+  if(any(ismark <- (x$ctype == "mark"))) 
+    cat(paste(sum(ismark), ngettext(sum(ismark), "column", "columns"),
+              "of marks:",
+              commasep(sQuote(nama[ismark])), "\n"))
+  if(!is.null(x$domain)) {
+    cat("Domain:\n\t")
+    print(x$domain)
+  }
+  invisible(NULL)
+}
+
+summary.ppx <- function(object, ...) { object }
+
+plot.ppx <- function(x, ...) {
+  xname <- short.deparse(substitute(x))
+  coo <- coords(x, local=FALSE)
+  dom <- x$domain
+  m <- ncol(coo)
+  if(m == 1) {
+    coo <- coo[,1]
+    ran <- diff(range(coo))
+    ylim <- c(-1,1) * ran/20
+    do.call(plot.default,
+            resolve.defaults(list(coo, numeric(length(coo))),
+                             list(...),
+                             list(asp=1, ylim=ylim,
+                                  axes=FALSE, xlab="", ylab="")))
+    axis(1, pos=ylim[1])
+  } else if(m == 2) {
+    if(is.null(dom)) {
+      # plot x, y coordinates only
+      nama <- names(coo)
+      do.call.matched(plot.default,
+                      resolve.defaults(list(x=coo[,1], y=coo[,2], asp=1),
+                                       list(...),
+                                       list(main=xname),
+                                       list(xlab=nama[1], ylab=nama[2])))
+    } else {
+      add <- resolve.defaults(list(...), list(add=FALSE))$add
+      if(!add) {
+        # plot domain, whatever it is
+        do.call(plot, resolve.defaults(list(dom),
+                                       list(...),
+                                       list(main=xname)))
+      }
+      # convert to ppp
+      x2 <- ppp(coo[,1], coo[,2], window=as.owin(dom),
+                marks=as.data.frame(marks(x)), check=FALSE)
+      # invoke plot.ppp
+      return(do.call(plot, resolve.defaults(list(x2),
+                                              list(add=TRUE),
+                                              list(...))))
+    }
+  } else if(m == 3) {
+    # convert to pp3
+    if(is.null(dom))
+      dom <- box3(range(coo[,1]), range(coo[,2]), range(coo[,3]))
+    x3 <- pp3(coo[,1], coo[,2], coo[,3], dom)
+    # invoke plot.pp3
+    nama <- names(coo)
+    do.call(plot,
+            resolve.defaults(list(x3),
+                             list(...),
+                             list(main=xname),
+                             list(xlab=nama[1], ylab=nama[2], zlab=nama[3])))
+  } else stop(paste("Don't know how to plot a general point pattern in",
+               ncol(coo), "dimensions"))
+  return(invisible(NULL))
+}
+
+"[.ppx" <- function (x, i, drop=FALSE, ...) {
+  da <- x$data
+  dom <- x$domain
+  if(!missing(i)) {
+    if(inherits(i, c("boxx", "box3"))) {
+      dom <- i
+      i <- inside.boxx(da, w=i)
+    }
+    da <- da[i, , drop=FALSE]
+  }
+  out <- list(data=da, ctype=x$ctype, domain=dom)
+  class(out) <- "ppx"
+  if(drop) {
+    # remove unused factor levels
+    mo <- marks(out)
+    switch(markformat(mo),
+           none = { },
+           vector = {
+             if(is.factor(mo))
+               marks(out) <- factor(mo)
+           },
+           dataframe = {
+             isfac <- sapply(mo, is.factor)
+             if(any(isfac))
+               mo[, isfac] <- lapply(mo[, isfac], factor)
+             marks(out) <- mo
+           },
+           hyperframe = {
+             lmo <- as.list(mo)
+             isfac <- sapply(lmo, is.factor)
+             if(any(isfac))
+               mo[, isfac] <- as.hyperframe(lapply(lmo[isfac], factor))
+             marks(out) <- mo
+           })
+  }
+  return(out)
+}
+
+domain <- function(X, ...) { UseMethod("domain") }
+
+domain.ppx <- function(X, ...) { X$domain }
+
+coords <- function(x, ...) {
+  UseMethod("coords")
+}
+
+coords.ppx <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE) {
+  ctype <- x$ctype
+  chosen <- (ctype == "spatial" & spatial) |
+            (ctype == "temporal" & temporal) | 
+            (ctype == "local" & local) 
+  as.data.frame(x$data[, chosen, drop=FALSE])
+}
+
+coords.ppp <- function(x, ...) { data.frame(x=x$x,y=x$y) }
+
+"coords<-" <- function(x, ..., value) {
+  UseMethod("coords<-")
+}
+
+"coords<-.ppp" <- function(x, ..., value) {
+  win <- x$window
+  if(is.null(value)) {
+    # empty pattern
+    return(ppp(window=win))
+  }
+  value <- as.data.frame(value)
+  if(ncol(value) != 2)
+    stop("Expecting a 2-column matrix or data frame, or two vectors")
+  result <- as.ppp(value, win)
+  marks(result) <- marks(x)
+  return(result)
+}
+
+"coords<-.ppx" <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE, value) {
+  ctype <- x$ctype
+  chosen <- (ctype == "spatial" & spatial) |
+            (ctype == "temporal" & temporal) | 
+            (ctype == "local" & local) 
+  x$data[, chosen] <- value
+  return(x)
+}
+
+as.hyperframe.ppx <- function(x, ...) { x$data }
+
+as.data.frame.ppx <- function(x, ...) { as.data.frame(x$data, ...) } 
+
+as.matrix.ppx <- function(x, ...) { as.matrix(as.data.frame(x, ...)) }
+
+marks.ppx <- function(x, ..., drop=TRUE) {
+  ctype <- x$ctype
+  chosen <- (ctype == "mark")
+  if(!any(chosen)) return(NULL)
+  x$data[, chosen, drop=drop]
+}
+
+"marks<-.ppx" <- function(x, ..., value) {
+  ctype <- x$ctype
+  retain <- (ctype != "mark")
+  coorddata <- x$data[, retain, drop=FALSE]
+  if(is.null(value)) {
+    newdata <- coorddata
+    newctype <- ctype[retain]
+  } else {
+    if(is.matrix(value) && nrow(value) == nrow(x$data)) {
+      # assume matrix is to be treated as data frame
+      value <- as.data.frame(value)
+    }
+    if(!is.data.frame(value) && !is.hyperframe(value)) 
+      value <- hyperframe(marks=value)
+    if(is.hyperframe(value) || is.hyperframe(coorddata)) {
+      value <- as.hyperframe(value)
+      coorddata <- as.hyperframe(coorddata)
+    }
+    if(ncol(value) == 0) {
+      newdata <- coorddata
+      newctype <- ctype[retain]
+    } else {
+      if(nrow(coorddata) == 0) 
+        value <- value[integer(0), , drop=FALSE]
+      newdata <- cbind(coorddata, value)
+      newctype <- factor(c(as.character(ctype[retain]),
+                           rep.int("mark", ncol(value))),
+                         levels=levels(ctype))
+    }
+  }
+  out <- list(data=newdata, ctype=newctype, domain=x$domain)
+  class(out) <- class(x)
+  return(out)
+}
+
+unmark.ppx <- function(X) {
+  marks(X) <- NULL
+  return(X)
+}
+
+markformat.ppx <- function(x) {
+  mf <- x$markformat
+  if(is.null(mf)) 
+    mf <- markformat(marks(x))
+  return(mf)
+}
+
+boxx <- function(..., unitname=NULL) {
+  if(length(list(...)) == 0)
+    stop("No data")
+  ranges <- data.frame(...)
+  nama <- names(list(...))
+  if(is.null(nama) || !all(nzchar(nama)))
+    names(ranges) <- paste("x", 1:ncol(ranges),sep="")
+  if(nrow(ranges) != 2)
+    stop("Data should be vectors of length 2")
+  if(any(unlist(lapply(ranges, diff)) <= 0))
+    stop("Illegal range: Second element <= first element")
+  out <- list(ranges=ranges, units=as.units(unitname))
+  class(out) <- "boxx"
+  return(out)
+}
+
+as.boxx <- function(..., warn.owin = TRUE) {
+  a <- list(...)
+  n <- length(a)
+  if (n == 0) 
+    stop("No arguments given")
+  if (n == 1) {
+    a <- a[[1]]
+    if (inherits(a, "boxx")) 
+      return(a)
+    if (inherits(a, "box3")) 
+      return(boxx(a$xrange, a$yrange, a$zrange, unitname = a$units))
+    if (inherits(a, "owin")) {
+      if (!is.rectangle(a) && warn.owin) 
+        warning("The owin object does not appear to be rectangular - the bounding box is used!")
+      return(boxx(a$xrange, a$yrange, unitname = a$units))
+    }
+    if (is.numeric(a)) {
+      if ((length(a)%%2) == 0) 
+        return(boxx(split(a, rep(1:(length(a)/2), each = 2))))
+      stop(paste("Don't know how to interpret", length(a), "numbers as a box"))
+    }
+    if (!is.list(a)) 
+      stop("Don't know how to interpret data as a box")
+  }
+  return(do.call(boxx, a))
+}
+
+print.boxx <- function(x, ...) {
+  m <- ncol(x$ranges)
+  cat(paste(m, "-dimensional box:\n", sep=""))
+  bracket <- function(z) paste("[",
+                               paste(signif(z, 5), collapse=", "),
+                               "]", sep="")
+  v <- paste(unlist(lapply(x$ranges, bracket)), collapse=" x ")
+  s <- summary(unitname(x))
+  cat(paste(v, s$plural, s$explain, "\n"))
+  invisible(NULL)
+}
+
+unitname.boxx <- function(x) { x$units }
+
+"unitname<-.boxx" <- function(x, value) {
+  x$units <- as.units(value)
+  return(x)
+}
+
+unitname.ppx <- function(x) { unitname(x$domain) }
+
+"unitname<-.ppx" <- function(x, value) {
+  d <- x$domain
+  unitname(d) <- value
+  x$domain <- d
+  return(x)
+}
+
+as.owin.boxx <- function(W, ..., fatal=TRUE) {
+  ra <- W$ranges
+  if(length(ra) == 2) return(owin(ra[[1]], ra[[2]]))
+  if(fatal) stop(paste("Cannot interpret box of dimension",
+                       length(ra), "as a window"))
+  return(NULL)
+}
+
+sidelengths.boxx <- function(x) {
+  stopifnot(inherits(x, "boxx"))
+  y <- unlist(lapply(x$ranges, diff))
+  return(y)
+}
+  
+volume.boxx <- function(x) {
+  prod(sidelengths(x))
+}
+
+diameter.boxx <- function(x) {
+  d <- sqrt(sum(sidelengths(x)^2))
+  return(d)
+}
+
+shortside.boxx <- function(x) {
+  return(min(sidelengths(x)))
+}
+
+eroded.volumes.boxx <- local({
+
+  eroded.volumes.boxx <- function(x, r) {
+    len <- sidelengths(x)
+    ero <- sapply(as.list(len), erode1side, r=r)
+    apply(ero, 1, prod)
+  }
+
+  erode1side <- function(z, r) { pmax.int(0, z - 2 * r)}
+  
+  eroded.volumes.boxx
+})
+
+
+runifpointx <- function(n, domain, nsim=1, drop=TRUE) {
+  check.1.integer(n)
+  check.1.integer(nsim)
+  stopifnot(inherits(domain, "boxx"))
+  ra <- domain$ranges
+  d <- length(ra)
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    if(n == 0) {
+      coo <- matrix(, nrow=0, ncol=d)
+    } else {
+      coo <- mapply(runif,
+                    n=rep(n, d),
+                    min=ra[1,],
+                    max=ra[2,])
+    }
+    colnames(coo) <- colnames(ra)
+    df <- as.data.frame(coo)
+    result[[i]] <- ppx(df, domain)
+  }
+  if(nsim == 1 && drop)
+    return(result[[1]])
+  result <- as.anylist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+rpoisppx <- function(lambda, domain, nsim=1, drop=TRUE) {
+  stopifnot(inherits(domain, "boxx"))
+  stopifnot(is.numeric(lambda) && length(lambda) == 1 && lambda >= 0)
+  n <- rpois(nsim, lambda * volume.boxx(domain))
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) 
+    result[[i]] <- runifpointx(n[i], domain)
+  if(nsim == 1 && drop)
+    return(result[[1]])
+  result <- as.anylist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+unique.ppx <- function(x, ..., warn=FALSE) {
+  dup <- duplicated(x, ...)
+  if(!any(dup)) return(x)
+  if(warn) warning(paste(sum(dup), "duplicated points were removed"),
+                   call.=FALSE)
+  y <- x[!dup]
+  return(y)
+}
+
+duplicated.ppx <- function(x, ...) {
+  dup <- duplicated(as.data.frame(x), ...)
+  return(dup)
+}
+
+anyDuplicated.ppx <- function(x, ...) {
+  anyDuplicated(as.data.frame(x), ...)
+}
+
+
+multiplicity.ppx <- function(x) {
+  mul <- multiplicity(as.data.frame(x))
+  return(mul)
+}
+
+intensity.ppx <- function(X, ...) {
+  if(!is.multitype(X)) {
+    n <- npoints(X)
+  } else {
+    mks <- marks(X)
+    n <- as.vector(table(mks))
+    names(n) <- levels(mks)
+  }
+  v <- volume(domain(X))
+  return(n/v)
+}
+
+grow.boxx <- function(W, left, right = left){
+  W <- as.boxx(W)
+  ra <- W$ranges
+  d <- length(ra)
+  if(any(left < 0) || any(right < 0))
+    stop("values of left and right margin must be nonnegative.")
+  if(length(left)==1) left <- rep(left, d)
+  if(length(right)==1) right <- rep(right, d)
+  if(length(left)!=d || length(right)!=d){
+    stop("left and right margin must be either of length 1 or the dimension of the boxx.")
+  }
+  W$ranges[1,] <- ra[1,]-left
+  W$ranges[2,] <- ra[2,]+right
+  return(W)
+}
+
+inside.boxx <- function(..., w = NULL){
+  if(is.null(w))
+    stop("Please provide a boxx using the named argument w.")
+  w <- as.boxx(w)
+  dat <- list(...)
+  if(length(dat)==1){
+    dat1 <- dat[[1]]
+    if(inherits(dat1, "ppx"))
+      dat <- coords(dat1)
+    if(inherits(dat1, "hyperframe"))
+      dat <- as.data.frame(dat1)
+  }
+  ra <- w$ranges
+  if(length(ra)!=length(dat))
+    stop("Mismatch between dimension of boxx and number of coordinate vectors.")
+  ## Check coord. vectors have equal length
+  n <- length(dat[[1]])
+  if(any(lengths(dat)!=n))
+    stop("Coordinate vectors have unequal length.")
+  index <- rep(TRUE, n)
+  for(i in seq_along(ra)){
+    index <- index & inside.range(dat[[i]], ra[[i]])
+  }
+  return(index)
+}
+
+
+spatdim <- function(X) {
+  if(is.sob(X)) 2L else
+  if(inherits(X, "box3")) 3 else
+  if(inherits(X, "boxx")) length(X$ranges) else 
+  if(is.ppx(X)) as.integer(sum(X$ctype == "spatial")) else NA_integer_
+}
diff --git a/R/predict.ppm.R b/R/predict.ppm.R
new file mode 100755
index 0000000..0698c47
--- /dev/null
+++ b/R/predict.ppm.R
@@ -0,0 +1,793 @@
+#
+#    predict.ppm.S
+#
+#	$Revision: 1.100 $	$Date: 2016/12/19 09:13:07 $
+#
+#    predict.ppm()
+#	   From fitted model obtained by ppm(),	
+#	   evaluate the fitted trend or conditional intensity 
+#	   at a grid/list of other locations 
+#
+#
+# -------------------------------------------------------------------
+
+predict.ppm <- local({
+  ##
+  ##  extract undocumented/outdated arguments, and trap others
+  ##
+  xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL, total=NULL,
+                     getoutofjail=FALSE) {
+    if(!is.null(newdata))
+      warning(paste("The use of the argument", sQuote("newdata"),
+                    "is out-of-date. See help(predict.ppm)"))
+    if(!is.null(total)) 
+      message(paste("The use of the argument", sQuote("total"),
+                    "is out-of-date. See help(predict.ppm)"))
+    trap.extra.arguments(..., .Context="In predict.ppm")
+    return(list(sumobj=sumobj, E=E, total=total, getoutofjail=getoutofjail))
+  }
+  ##
+  ## confidence/prediction intervals for number of points
+  predconfPois <- function(region, object, level,
+                           what=c("estimate", "se",
+                             "confidence", "prediction")) {
+    what <- match.arg(what)
+    stopifnot(0 < level && level < 1)
+    lam <- predict(object, window=region)
+    mu.hat <- integral.im(lam)
+    if(what == "estimate") return(mu.hat)
+    mo <- model.images(object, W=as.owin(lam))
+    ZL <- unlist(lapply(mo,
+                        function(z, w) integral.im(eval.im(z * w)),
+                        w = lam))
+    ZL <- matrix(ZL, nrow=1)
+    var.muhat <- as.numeric(ZL %*% vcov(object) %*% t(ZL))
+    sd.muhat <- sqrt(var.muhat)
+    if(what == "se") return(sd.muhat)
+    alpha2 <- (1-level)/2
+    pp <- sort(c(alpha2, 1-alpha2))
+    out <- switch(what,
+                  confidence = mu.hat + qnorm(pp) * sd.muhat,
+                  prediction = qmixpois(pp, mu.hat, sd.muhat, I))
+    names(out) <- paste0(signif(100 * pp, 3), "%")
+    out
+  }
+
+  typepublic <- c("trend", "cif", "intensity", "count")
+  typeaccept <- c(typepublic, "lambda", "se", "SE", "covariates")
+  typeuse    <- c(typepublic, "cif",    "se", "se", "covariates")
+  
+  predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL,
+                          covariates=NULL,
+                          type=c("trend", "cif", "intensity", "count"),
+                          se=FALSE,
+                          interval=c("none", "confidence", "prediction"),
+                          level = 0.95,
+                          X=data.ppm(object),
+                          correction,
+                          ..., new.coef=NULL, check=TRUE, repair=TRUE) {
+    interval <- match.arg(interval)
+    ## extract undocumented arguments 
+    xarg <- xtract(...)
+    sumobj <- xarg$sumobj
+    E      <- xarg$E
+    total  <- xarg$total
+    getoutofjail <- xarg$getoutofjail
+    ## match 'type' argument including 'legacy' options
+    seonly <- FALSE
+    if(missing(type)) type <- type[1] else {
+      if(length(type) > 1) stop("Argument 'type' should be a single value")
+      mt <- pmatch(type, typeaccept)
+      if(is.na(mt)) stop("Argument 'type' should be one of",
+                         commasep(sQuote(typepublic), " or "))
+      type <- typeuse[mt]
+      if(type == "se") {
+        if(!getoutofjail)
+          message(paste("Outdated syntax:",
+                        "type='se' should be replaced by se=TRUE;",
+                        "then the standard error is predict(...)$se"))
+        type <- "trend"
+        se <- TRUE
+        seonly <- TRUE
+      }
+    } 
+    if(!is.null(total)) {
+      message("Outdated argument 'total': use 'window' and set type='count'")
+      type <- "count" 
+      if(!is.logical(total))
+        window <- if(is.tess(total)) total else as.owin(total)
+    }
+    ##
+    model <- object
+    verifyclass(model, "ppm")
+    ##  
+    if(check && damaged.ppm(object)) {
+      if(!repair)
+        stop("object format corrupted; try update(object, use.internal=TRUE)")
+      message("object format corrupted; repairing it.")
+      object <- update(object, use.internal=TRUE)
+    }
+
+    if(missing(correction) || is.null(correction))
+      correction <- object$correction
+  
+    fitcoef <- coef(object)
+    if(!is.null(new.coef)) {
+      ## validate coefs
+      if(length(new.coef) != length(fitcoef))
+        stop(paste("Argument new.coef has wrong length",
+                   length(new.coef), ": should be", length(fitcoef)))
+      coeffs <- new.coef
+    } else {
+      coeffs <- fitcoef
+    }
+
+    ##       find out what kind of model it is
+    if(is.null(sumobj))
+      sumobj <- summary(model, quick="entries")  # undocumented hack!
+#    stationary  <- sumobj$stationary
+    poisson     <- sumobj$poisson
+    marked      <- sumobj$marked
+    multitype   <- sumobj$multitype
+    notrend     <- sumobj$no.trend
+    changedcoef <- sumobj$changedcoef || !is.null(new.coef)
+    trivial     <- poisson && notrend
+  
+    need.covariates <- sumobj$uses.covars
+    covnames.needed <- sumobj$covars.used
+
+    if(sumobj$antiquated)
+      warning("The model was fitted by an out-of-date version of spatstat")  
+
+    ##       determine mark space
+    if(marked) {
+      if(!multitype)
+        stop("Prediction not yet implemented for general marked point processes")
+      else 
+        types <- levels(marks(sumobj$entries$data))
+    }
+
+    ## For Poisson models cif=intensity=trend
+    if(poisson && type %in% c("cif", "intensity"))
+      type <- "trend"
+
+    ## ............. trap un-implemented cases ...................
+    
+    ## Standard errors not yet available for cif, intensity
+    if(se && type %in% c("cif", "intensity"))
+      stop(paste("Standard error for", type, "is not yet implemented"),
+           call.=FALSE)
+
+    ## Intervals are only available for unmarked Poisson models
+    if(type == "count" && interval != "none" && (marked || !poisson)) {
+      stop(paste0(interval, " intervals for counts are only implemented for",
+                  if(marked) " unmarked" else "",
+                  if(!poisson) " Poisson",
+                  " models"),
+           call.=FALSE)
+    }
+
+    if(interval == "prediction" && type != "count")
+      stop("Prediction intervals are only available for type='count'",
+           call.=FALSE)
+    
+    if(interval == "confidence" && type %in% c("intensity", "cif")) 
+      stop(paste("Confidence intervals are not yet available for", type),
+           call.=FALSE)
+
+    estimatename <- if(interval == "none") "estimate" else interval
+    
+    ## ............. start computing .............................
+    
+    ## Total count in a region
+    
+    if(type == "count") {
+      ## point or interval estimate, optionally with SE
+      if(is.null(window)) {
+        ## domain of the original data
+        if(!seonly) est <- predconfPois(NULL, model, level, estimatename)
+        if(se) sem <- predconfPois(NULL, model, level, "se")
+      } else if(is.tess(window)) {
+        ## quadrats
+        tilz <- tiles(window)
+        if(!seonly) {
+          est <- unlist(lapply(tilz, predconfPois,
+                               object=model, level=level, what=estimatename))
+          if(interval != "none") # reshape
+            est <- matrix(unlist(est), byrow=TRUE, ncol=2,
+                          dimnames=list(names(est), names(est[[1]])))
+        }
+        if(se) sem <- unlist(lapply(tilz, predconfPois,
+                                    object=model, level=level, what="se"))
+      } else {
+        ## window
+        if(!seonly) est <- predconfPois(window, model, level, estimatename)
+        if(se) sem <- predconfPois(window, model, level, "se")
+      }
+      if(!se) return(est)
+      if(seonly) return(sem)
+      result <- list(est, sem)
+      names(result) <- c(estimatename, "se")
+      return(result)
+    }
+
+    ## .....   Predict a spatial function .......
+    
+    if(interval != "none") {
+      ## Prepare for confidence interval 
+      alpha2 <- (1-level)/2
+      pp <- sort(c(alpha2, 1-alpha2))
+      ci.names <- paste0(signif(100 * pp, 3), "%")
+      ci.q <- qnorm(pp)
+    }
+    
+    ##      determine what kind of output is required:
+    ##      (arguments present)    (output)  
+    ##         window, ngrid    ->   image
+    ##         locations (mask) ->   image
+    ##         locations (image) ->   image
+    ##         locations (rectangle) ->  treat locations as 'window'
+    ##         locations (polygonal) ->  treat locations as 'window'
+    ##         locations (other) ->  data frame
+    ##
+
+    if(is.im(locations))
+      locations <- as.owin(locations)
+    
+    if(is.null(window) && is.owin(locations) && !is.mask(locations)) {
+      window <- locations
+      locations <- NULL
+    }
+  
+    if(!is.null(ngrid) && !is.null(locations))
+      stop(paste("Only one of",
+                 sQuote("ngrid"), "and", sQuote("locations"),
+                 "should be specified"))
+
+    if(is.null(ngrid) && is.null(locations)) 
+      ## use regular grid
+      ngrid <- rev(spatstat.options("npixel"))
+    
+    want.image <- is.null(locations) || is.mask(locations)
+    make.grid <- !is.null(ngrid)
+
+    ## ##############   Determine prediction points  #####################
+
+    if(!want.image) {
+      ## (A) list of (x,y) coordinates given by `locations'
+      xpredict <- locations$x
+      ypredict <- locations$y
+      if(is.null(xpredict) || is.null(ypredict)) {
+        xy <- xy.coords(locations)
+        xpredict <- xy$x
+        xpredict <- xy$y
+      }
+      if(is.null(xpredict) || is.null(ypredict))
+        stop(paste("Don't know how to extract x,y coordinates from",
+                   sQuote("locations")))
+      ## marks if required
+      if(marked) {
+        ## extract marks from data frame `locations'
+        mpredict <- locations$marks 
+        if(is.null(mpredict))
+          stop(paste("The argument", sQuote("locations"),
+                     "does not contain a column of marks",
+                     "(required since the fitted model",
+                     "is a marked point process)"))
+        if(is.factor(mpredict)) {
+          ## verify mark levels match those in model
+          if(!identical(all.equal(levels(mpredict), types), TRUE)) {
+            if(all(levels(mpredict) %in% types))
+              mpredict <- factor(mpredict, levels=types)
+            else 
+              stop(paste("The marks in", sQuote("locations"),
+                         "do not have the same levels as",
+                         "the marks in the model"))
+          }
+        } else {
+          ## coerce to factor if possible
+          if(all(mpredict %in% types))
+            mpredict <- factor(mpredict, levels=types)
+          else
+            stop(paste("The marks in", sQuote("locations"),
+                       "do not have the same values as the marks in the model"))
+        }
+      }
+    } else {
+      ## (B) pixel grid of points
+      if(!make.grid) 
+        ##    (B)(i) The grid is given in `locations'
+        masque <- locations
+      else {
+        ##    (B)(ii) We have to make the grid ourselves  
+        ##    Validate ngrid
+        if(!is.null(ngrid)) {
+          if(!is.numeric(ngrid))
+            stop("ngrid should be a numeric vector")
+          ngrid <- ensure2vector(ngrid)
+        }
+        if(is.null(window))
+          window <- sumobj$entries$data$window
+        masque <- as.mask(window, dimyx=ngrid)
+      }
+      ## Hack -----------------------------------------------
+      ## gam with lo() will not allow extrapolation beyond the range of x,y
+      ## values actually used for the fit. Check this:
+      tums <- termsinformula(model$trend)
+      if(any(
+             tums == "lo(x)" |
+             tums == "lo(y)" |
+             tums == "lo(x,y)" |
+             tums == "lo(y,x)")
+         ) {
+        ## determine range of x,y used for fit
+        gg <- model$internal$glmdata
+        gxr <- range(gg$x[gg$SUBSET])
+        gyr <- range(gg$y[gg$SUBSET])
+        ## trim window to this range
+        masque <- intersect.owin(masque, owin(gxr, gyr))
+      }
+      ## ------------------------------------ End Hack
+      ##
+      ## Finally, determine x and y vectors for grid
+      rxy <- rasterxy.mask(masque, drop=TRUE)
+      xpredict <- rxy$x
+      ypredict <- rxy$y 
+    }
+
+    ## ################  CREATE DATA FRAME  ##########################
+    ##                           ... to be passed to predict.glm()  
+    ##
+    ## First the x, y coordinates
+  
+    if(!marked) 
+      newdata <- data.frame(x=xpredict, y=ypredict)
+    else if(!want.image) 
+      newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict)
+    else {
+      ## replicate
+      nt <- length(types)
+      np <- length(xpredict)
+      xpredict <- rep.int(xpredict,nt)
+      ypredict <- rep.int(ypredict,nt)
+      mpredict <- rep.int(types, rep.int(np, nt))
+      mpredict <- factor(mpredict, levels=types)
+      newdata <- data.frame(x = xpredict,
+                            y = ypredict,
+                            marks=mpredict)
+    }
+
+    ## ## Next the external covariates, if any
+    ##
+    if(need.covariates) {
+      if(is.null(covariates)) {
+        ## Extract covariates from fitted model object
+        ## They have to be images.
+        oldcov <- model$covariates
+        if(is.null(oldcov))
+          stop("External covariates are required, and are not available")
+        if(is.data.frame(oldcov))
+          stop(paste("External covariates are required.",
+                     "Prediction is not possible at new locations"))
+        covariates <- oldcov
+      }
+      ## restrict to covariates actually required for formula
+      covariates <- if(is.data.frame(covariates)) {
+        covariates[,covnames.needed, drop=FALSE]
+      } else covariates[covnames.needed]
+      covfunargs <- model$covfunargs
+      covariates.df <-
+        mpl.get.covariates(covariates,
+                           list(x=xpredict, y=ypredict),
+                           "prediction points",
+                           covfunargs)
+      newdata <- cbind(newdata, covariates.df)
+    }
+
+    ## ###### Set up prediction variables ################################
+    ##
+    ## Provide SUBSET variable
+    ##
+    if(is.null(newdata$SUBSET))
+      newdata$SUBSET <- rep.int(TRUE, nrow(newdata))
+    ##
+    ## Dig out information used in Berman-Turner device 
+    ##        Vnames:     the names for the ``interaction variables''
+    ##        glmdata:    the data frame used for the glm fit
+    ##        glmfit:     the fitted glm object
+    ##
+
+    if(!trivial) {
+      Vnames <- model$internal$Vnames
+      vnameprefix <- model$internal$vnameprefix
+      glmdata <- getglmdata(model)
+      glmfit <- getglmfit(model)
+      if(object$method=="logi")
+        newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata))
+    }
+
+    ## Undocumented secret exit
+    if(type == "covariates")
+      return(list(newdata=newdata, mask=if(want.image) masque else NULL))
+             
+    ## ##########  COMPUTE PREDICTION ##############################
+    ##
+    ##   Compute the predicted value z[i] for each row of 'newdata'
+    ##   Store in a vector z and reshape it later
+    ##
+    ##
+    ## #############################################################
+
+    needSE <- se || (interval != "none")
+    
+    if(trivial) {
+      ## ###########  UNIFORM POISSON PROCESS #####################
+
+      lambda <- exp(coeffs[[1]])
+      if(needSE) {
+        npts <- nobs(model)
+        se.lambda <- lambda/sqrt(npts)
+      }
+      switch(interval,
+             none = {
+               z <- rep.int(lambda, nrow(newdata))
+             },
+             confidence = {
+               z <- matrix(lambda + se.lambda * ci.q, 
+                           byrow=TRUE,
+                           nrow=nrow(newdata), ncol=2,
+                           dimnames=list(NULL, ci.names))
+             },
+             stop("Internal error: unreached"))
+
+      if(se) 
+        zse <- rep.int(se.lambda, nrow(newdata))
+    
+      ## ##############################################################
+    } else if((type %in% c("trend", "intensity")) || poisson) {
+      ##
+      ## ##########  COMPUTE TREND ###################################
+      ##	
+      ##   set explanatory variables to zero
+      ##	
+      zeroes <- numeric(nrow(newdata))    
+      for(vn in Vnames)    
+        newdata[[vn]] <- zeroes
+      ##
+      ##   predict trend
+      ##
+      z <- lambda <- GLMpredict(glmfit, newdata, coeffs, 
+                                changecoef=changedcoef)
+      ##
+      if(type == "intensity") 
+        z <- PoisSaddle(z, fitin(model))
+      
+      ##
+      if(needSE) {
+        ## extract variance-covariance matrix of parameters
+        vc <- vcov(model)
+        ## compute model matrix
+        fmla <- formula(model)
+#        mf <- model.frame(fmla, newdata, ..., na.action=na.pass)
+#        mm <- model.matrix(fmla, mf, ..., na.action=na.pass)
+        mf <- model.frame(fmla, newdata, na.action=na.pass)
+        mm <- model.matrix(fmla, mf, na.action=na.pass)
+        if(nrow(mm) != nrow(newdata))
+          stop("Internal error: row mismatch in SE calculation")
+        ## compute relative variance = diagonal of quadratic form
+        vv <- quadform(mm, vc)
+        ## standard error
+        SE <- lambda * sqrt(vv)
+        if(se) 
+          zse <- SE
+        if(interval == "confidence") {
+          z <- lambda + outer(SE, ci.q, "*")
+          colnames(z) <- ci.names
+        } 
+      } 
+      
+      ## ############################################################  
+    } else if(type == "cif" || type =="lambda") {
+      ## ####### COMPUTE FITTED CONDITIONAL INTENSITY ################
+      ##
+      ## set up arguments
+      inter <- model$interaction
+      if(!missing(X)) stopifnot(is.ppp(X))
+      W <- as.owin(data.ppm(model))
+      U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE)
+      if(marked) 
+        marks(U) <- newdata$marks
+      ## determine which prediction points are data points
+      if(is.null(E))
+        E <- equalpairs(U, X, marked)
+    
+      ## evaluate interaction
+      Vnew <- evalInteraction(X, U, E, inter, correction=correction,
+                              check=check)
+
+      ## Negative infinite values signify cif = zero
+      cif.equals.zero <- matrowany(Vnew == -Inf)
+    
+      ## Insert the potential into the relevant column(s) of `newdata'
+      if(ncol(Vnew) == 1) {
+        ## Potential is real valued (Vnew is a column vector)
+        ## Assign values to a column of the same name in newdata
+        newdata[[Vnames]] <- as.vector(Vnew)
+      ##
+      } else if(is.null(avail <- colnames(Vnew))) {
+        ## Potential is vector-valued (Vnew is a matrix)
+        ## with unnamed components.
+        ## Assign the components, in order of their appearance,
+        ## to the columns of newdata labelled Vnames[1], Vnames[2],... 
+        for(i in seq_along(Vnames))
+          newdata[[Vnames[i] ]] <- Vnew[,i]
+        ##
+      } else {
+        ## Potential is vector-valued (Vnew is a matrix)
+        ## with named components.
+        ## Match variables by name
+        if(all(Vnames %in% avail)) {
+          for(vn in Vnames)
+            newdata[[ vn ]] <- Vnew[ , vn]
+        } else if(all(Vnames %in% (Pavail <- paste0(vnameprefix, avail)))) {
+          for(vn in Vnames)
+            newdata[[ vn ]] <- Vnew[ , match(vn, Pavail)]
+        } else
+          stop(paste(
+            "Internal error: unable to match names",
+            "of available interaction terms",
+            commasep(sQuote(avail)),
+            "to required interaction terms",
+            commasep(sQuote(Vnames))
+            ), call.=FALSE)
+      }
+      ## invoke predict.glm or compute prediction
+      z <- GLMpredict(glmfit, newdata, coeffs, 
+                      changecoef=changedcoef)
+    
+      ## reset to zero if potential was zero
+      if(any(cif.equals.zero))
+        z[cif.equals.zero] <- 0
+    
+      ## ###############################################################    
+    } else
+    stop(paste("Unrecognised type", sQuote(type)))
+
+    ## ###############################################################
+    ##
+    ## reshape the result
+    ##
+    if(!want.image) {
+      if(!se) {
+        out <- as.vector(z)
+      } else if(seonly) {
+        out <- as.vector(zse)
+      } else {
+        out <- list(as.vector(z), as.vector(zse))
+        names(out) <- c(estimatename, "se")
+      }
+    }
+    else {
+      ## make an image of the right shape and value
+      imago <- as.im(masque, value=1.0)
+      if(!marked && interval=="none") {
+        ## single image
+        if(!se) {
+          out <- imago
+          ## set entries
+          out[] <- z
+        } else if(seonly) {
+          out <- imago
+          out[] <- zse
+        } else {
+          est <- std <- imago
+          est[] <- z
+          std[] <- zse
+          out <- list(est, std)
+          names(out) <- c(estimatename, "se")
+        }
+      } else if(interval != "none") {
+        ## list of 2 images for CI
+        if(!seonly) {
+          hi <- lo <- imago
+          hi[] <- z[,1]
+          lo[] <- z[,2]
+          est <- solist(hi, lo)
+          names(est) <- ci.names
+        }
+        if(se) {
+          std <- imago
+          std[] <- zse
+        }
+        if(!se) {
+          out <- est
+        } else if(seonly) {
+          out <- std
+        } else {
+          out <- list(est, std)
+          names(out) <- c(estimatename, "se")
+        }
+      } else {
+        ## list of images, one for each level of marks
+        out <- list()
+        for(i in seq_along(types)) {
+          outi <- imago
+          ## set entries
+          outi[] <- z[newdata$marks == types[i]]
+          out[[i]] <- outi
+        }
+        out <- as.solist(out)
+        names(out) <- as.character(types)
+      }
+    }
+    ##  
+    ##  FINISHED
+    ##  
+    return(out)
+  }
+
+  predict.ppm
+})
+
+
+
+####################################################################
+#
+# compute pointwise uncertainty of fitted intensity
+#
+model.se.image <- function(fit, W=as.owin(fit), ..., what="sd") {
+  if(!is.poisson.ppm(fit))
+    stop("Only implemented for Poisson point process models", call.=FALSE)
+  what <- pickoption("option", what,
+                     c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce"))
+  W <- as.mask(as.owin(W))
+  # variance-covariance matrix of coefficients
+  vc <- vcov(fit)
+  np <- dim(vc)[1]
+  # extract sufficient statistic for each coefficient
+  mm <- model.images(fit, W, ...)
+  # compute fitted intensity 
+  lam <- predict(fit, locations=W)
+  # initialise resulting image
+  U <- as.im(W)
+  U[] <- 0
+  # compute pointwise matrix product, assuming vc is symmetric
+  for(i in 1:np) {
+    Si <- mm[[i]]
+    aii <- vc[i,i]
+    U <- eval.im(U + aii * Si^2)
+    if(i > 1) {
+      for(j in 1:(i-1)) {
+        Sj <- mm[[j]]
+        aij <- vc[i,j]
+        twoaij <- 2 * aij
+        U <- eval.im(U + twoaij * Si * Sj)
+      }
+    }
+  }
+  # the matrix product is the relative variance (CV)
+  if(what=="cv")
+    return(U)
+  # relative sd
+  if(what=="ce") {
+    U <- eval.im(sqrt(U))
+    return(U)
+  }
+  # multiply by squared intensity to obtain variance
+  U <- eval.im(U * lam^2)
+  # variance
+  if(what=="var")
+    return(U)
+  # compute SD and return
+  U <- eval.im(sqrt(U))
+  return(U)
+}
+
+GLMpredict <- function(fit, data, coefs, changecoef=TRUE,
+                       type=c("response", "link")) {
+  ok <- is.finite(coefs)
+  type <- match.arg(type)
+  if(!changecoef && all(ok)) {
+    answer <- predict(fit, newdata=data, type=type)
+  } else {
+    # do it by hand
+    fmla <- formula(fit)
+    data$.mpl.Y <- 1
+    fram <- model.frame(fmla, data=data, na.action=NULL)
+    # linear predictor
+    mm <- model.matrix(fmla, data=fram)
+    # ensure all required coefficients are present
+    coefs <- fill.coefs(coefs, colnames(mm))
+    ok <- is.finite(coefs)
+    #
+    if(all(ok)) {
+      eta <- as.vector(mm %*% coefs)
+    } else {
+      #' ensure 0 * anything = 0
+      eta <- as.vector(mm[ , ok, drop=FALSE] %*% coefs[ok])
+      for(j in which(!ok)) {
+        mmj <- mm[, j]
+        nonzero <- is.na(mmj) | (mmj != 0)
+        if(any(nonzero))
+          eta[nonzero] <- eta[nonzero] + mmj[nonzero] * coefs[j]
+      }
+    }
+    # offset
+    mo <- model.offset(fram)
+    if(!is.null(mo)) {
+      if(is.matrix(mo))
+        mo <- apply(mo, 1, sum)
+      eta <- mo + eta
+    }
+    switch(type,
+           link = {
+             answer <- eta
+           },
+           response = {
+             linkinv <- family(fit)$linkinv
+             answer <- linkinv(eta)
+           })
+  }
+  # Convert from fitted logistic prob. to lambda for logistic fit
+  if(type == "response" && family(fit)$family=="binomial")
+    answer <- fit$data$.logi.B[1] * answer/(1-answer)
+  return(answer)
+}
+
+# An 'equalpairs' matrix E is needed in the ppm class
+# to determine which quadrature points and data points are identical
+# (not just which quadrature points are data points).
+# It is a two-column matrix specifying all the identical pairs.
+# The first column gives the index of a data point (in the data pattern X)
+# and the second column gives the corresponding index in U.
+
+# The following function determines the equal pair information
+# from the coordinates (and marks) of U and X alone;
+# it should be used only if we can't figure out this information otherwise.
+
+equalpairs <- function(U, X, marked=FALSE) {
+  nn <- nncross(U, X)
+  coincides <- (nn$dist == 0)
+  Xind <- nn$which[coincides]
+  Uind <- which(coincides)
+  if(marked) {
+    samemarks <- (marks(X)[Xind] == marks(U)[Uind])
+    Xind <- Xind[samemarks]
+    Uind <- Uind[samemarks]
+  }
+  return(cbind(Xind, Uind))
+}
+
+  
+fill.coefs <- function(coefs, required) {
+  # 'coefs' should contain all the 'required' values
+  coefsname <- deparse(substitute(coefs))
+  nama <- names(coefs)
+  if(is.null(nama)) {
+    #' names cannot be matched
+    if(length(coefs) != length(required))
+      stop(paste("The unnamed argument", sQuote(coefsname),
+                 "has", length(coefs), "entries, but",
+                 length(required), "are required"),
+           call.=FALSE)
+    # blithely assume they match 1-1
+    names(coefs) <- required
+    return(coefs)
+  }
+  stopifnot(is.character(required))
+  if(identical(nama, required)) return(coefs)
+  inject <- match(nama, required)
+  if(any(notneeded <- is.na(inject))) {
+    warning(paste("Internal glitch: some coefficients were not required:",
+                  commasep(sQuote(nama[notneeded]))),
+            call.=FALSE)
+    coefs <- coefs[!notneeded]
+    nama <- names(coefs)
+    inject <- match(nama, required)
+  }
+  y <- numeric(length(required))
+  names(y) <- required
+  y[inject] <- coefs
+  return(y)
+}
+ 
diff --git a/R/predictmppm.R b/R/predictmppm.R
new file mode 100755
index 0000000..e634e7b
--- /dev/null
+++ b/R/predictmppm.R
@@ -0,0 +1,370 @@
+#
+#    predictmppm.R
+#
+#	$Revision: 1.9 $	$Date: 2015/10/21 09:06:57 $
+#
+#
+# -------------------------------------------------------------------
+
+predict.mppm <- local({
+
+  predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"),
+                           ngrid=40, locations=NULL, verbose=FALSE) {
+    ##
+    ##	'object' is the output of mppm()
+    ##
+    model <- object
+    verifyclass(model, "mppm")
+    ##
+    ##  
+    ##       'type'  
+    type <- pickoption("type", type, c(trend="trend",
+                                       lambda="cif",
+                                       cif="cif"), multi=TRUE)
+    want.trend <- "trend" %in% type
+    want.cif   <- "cif"   %in% type
+    selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck
+    ##
+    ##
+    if(verbose)
+      cat("Inspecting arguments...")
+    ##
+    ##       'newdata'
+    use.olddata <- is.null(newdata)
+    if(use.olddata) {
+      newdata <- model$data
+      newdataname <- "Original data"
+    } else {
+      stopifnot(is.data.frame(newdata) || is.hyperframe(newdata))
+      newdataname <- sQuote("newdata")
+    }
+    ##
+    ##
+    ##    Locations for prediction
+    if(is.hyperframe(locations)) 
+      locations <- locations[,1,drop=TRUE]
+    if(is.list(locations))
+      cls <- unique(sapply(locations, class))
+
+    loctype <-
+      if(is.null(locations)) "null" else
+      if(is.data.frame(locations))  "data.frame" else
+      if(is.list(locations)) {
+        if(any(c("ppp", "quad") %in% cls)) "points"
+        else if("owin" %in% cls) {
+          if(all(sapply(locations, is.mask)))
+            "mask"
+          else
+            "window"
+        } else "unknown"
+      } else "unknown"
+
+    need.grid <- switch(loctype,
+                        null      =TRUE,
+                        data.frame=FALSE,
+                        points    =FALSE,
+                        mask      =FALSE,
+                        window    =TRUE,
+                        unknown   =stop("Unrecognised format for locations"))
+    make.image <- need.grid || (loctype == "mask")
+    ##  
+    locationvars <- c("x", "y", "id")
+    ##  
+    ##
+    if(verbose)
+      cat("done.\nDetermining locations for prediction...")
+    if(need.grid) {
+      ## prediction on a grid is required
+      if(is.data.frame(newdata))
+        stop(paste("Cannot predict model on a grid;", newdataname,
+                   "are a data frame"))
+    } else {
+      ## prediction at  `locations' is required
+      if(is.hyperframe(newdata)) {
+        ## check consistency between locations and newdata
+        nloc <- length(locations)
+        nnew <- summary(newdata)$ncases
+        if(nloc != nnew)
+          stop(paste("Length of argument", sQuote("locations"), paren(nloc),
+                     "does not match number of rows in",
+                     newdataname, paren(nnew)))
+      } else {
+        ## newdata is a data frame
+        if(!is.data.frame(locations)) 
+          stop(paste(newdataname,
+                     "is a data frame; locations must be a data frame"))
+        else {
+          stopifnot(nrow(locations) == nrow(newdata))
+          dup <- names(newdata) %in% names(locations)
+          if(any(dup))
+            for(nam in names(newdata)[dup])
+              if(!all.equal(newdata[,nam], locations[,nam]))
+                stop(paste("The data frames newdata and locations",
+                           "both have a column called", sQuote(nam),
+                           "but the entries differ"))
+          nbg <- !(locationvars %in% c(names(newdata),names(locations)))
+          if(any(nbg))
+            stop(paste(ngettext(sum(nbg), "Variable", "Variables"),
+                       commasep(locationvars[nbg]),
+                       "not provided"))
+          ## merge the two data frames
+          newdata <- cbind(newdata[,!dup], locations)
+          locations <- NULL
+        }
+      }
+    }
+    if(verbose)
+      cat("done.\n Constructing data for prediction...")
+    ##  
+    ##
+    ## extract fitted glm/gam/glmm object
+    FIT <- model$Fit$FIT
+    ## extract names of interaction variables
+    Vnamelist <- model$Fit$Vnamelist
+    vnames <- unlist(Vnamelist)
+    ##
+    ##  
+    ## newdata is data frame
+    if(is.data.frame(newdata)) {
+      if(verbose)
+        cat("(data frame)...")
+      if(need.grid)
+        stop("Cannot predict model on a grid; newdata is a data frame")
+      ## use newdata as covariates
+      nbg <- !(locationvars %in% names(newdata))
+      if(any(nbg))
+        stop(paste(ngettext(sum(nbg), "variable", "variables"),
+                   commasep(locationvars[nbg]),
+                   "not provided"))
+      ## create output data frame
+      answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0),
+                              row.names=row.names(newdata))
+      if(want.trend) {
+        ## add interaction components, set to zero (if any)
+        if(length(vnames) > 0)
+          newdata[, vnames] <- 0
+        ## compute fitted values
+        answer$trend <- predict(FIT, newdata=newdata, type="response")
+      }
+      if(want.cif) {
+        warning("Not yet implemented (computation of cif in data frame case)")
+        ## split data frame by 'id'
+        ## compute interaction components using existing point patterns
+        ## compute fitted values
+      }
+      return(answer)
+    }
+  
+    ## newdata is a hyperframe
+    if(verbose)
+      cat("(hyperframe)...")
+    sumry <- summary(newdata)
+    npat.new <- sumry$ncases
+    ## name of response point pattern in model
+    Yname <- model$Info$Yname
+    ##
+    ## Determine response point patterns if known.
+    ## Extract from newdata if available
+    ## Otherwise from the original data if appropriate
+    if(verbose)
+      cat("(responses)...")
+    Y <- if(Yname %in% sumry$col.names) 
+      newdata[, Yname, drop=TRUE, strip=FALSE]
+    else if(npat.new == model$npat)
+      data[, Yname, drop=TRUE, strip=FALSE]
+    else NULL
+    ##
+    if(want.cif && is.null(Y))
+      stop(paste("Cannot compute cif:",
+                 "newdata does not contain column", dQuote(Yname),
+                 "of response point patterns"))
+    ##
+    ## Determine windows for prediction 
+    if(verbose)
+      cat("(windows)...")
+    Wins <- if(!need.grid)
+      lapply(locations, as.owin, fatal=FALSE)
+    else if(!is.null(Y))
+      lapply(Y, as.owin, fatal=FALSE)
+    else NULL
+    if(is.null(Wins) || any(sapply(Wins, is.null)))
+      stop("Cannot determine windows where predictions should be made")
+    ##
+    ##
+    if(is.null(Y)) {
+      ## only want trend; empty patterns will do
+      Y <- lapply(Wins, emptypattern)
+    }
+    
+    ## ensure Y contains data points only 
+    if(inherits(Y[[1]], "quad"))
+      Y <- lapply(Y, getElement, name="data")
+
+    ## Determine locations for prediction
+    if(need.grid) {
+      ## Generate grids of dummy locations 
+      if(verbose)
+        cat("(grids)...")
+      Gridded <- lapply(Wins, gridsample, ngrid=ngrid)
+      Dummies   <- lapply(Gridded, getElement, name="D")
+      Templates <- lapply(Gridded, getElement, name="I")
+    } else {
+      ## locations are given somehow
+      if(verbose)
+        cat("(locations)...")
+      if(loctype == "points")
+        Dummies <- locations
+      else if(loctype == "mask") {
+        Dummies <- lapply(locations, punctify)
+        Templates <- lapply(locations, as.im)
+      } else
+        stop("Internal error: illegal loctype")
+    }
+  
+    ## Pack into quadschemes
+    if(verbose)
+      cat("(quadschemes)...")
+    Quads <- list()
+    for(i in seq(npat.new)) 
+      Quads[[i]] <- quad(data=Y[[i]], dummy=Dummies[[i]])
+    ## Insert quadschemes into newdata
+    newdata[, Yname] <- Quads
+    
+    ## Determine interactions to be used
+    if(verbose)
+      cat("(interactions)...")
+    interactions <- model$Inter$interaction
+    ninter <- if(is.hyperframe(interactions)) nrow(interactions) else 1
+    nnew <- nrow(newdata)
+    if(ninter != nnew && ninter != 1) {
+      if(!all(model$Inter$constant))
+        stop(paste("Number of rows of newdata", paren(nnew),
+                   "does not match number of interactions in model",
+                   paren(ninter)))
+      interactions <- interactions[1, ]
+    }
+
+    ## compute the Berman-Turner frame
+    if(verbose)
+      cat("done.\nStarting prediction...(Berman-Turner frame)...")
+    moadf <- mppm(formula     = model$formula,
+                  data        = newdata,
+                  interaction = interactions,
+                  iformula    = model$iformula,
+#%^!ifdef RANDOMEFFECTS                
+                  random      = model$random,
+#%^!endif                
+                  use.gam     = model$Fit$use.gam,
+                  correction  = model$Info$correction,
+                  rbord       = model$Info$rbord,
+                  backdoor    = TRUE)
+    ## compute fitted values
+    if(verbose)
+      cat("(glm prediction)...")
+    values <- moadf[, c("x", "y", "id")]
+    if(want.cif)
+      values$cif <- predict(FIT, newdata=moadf, type="response")
+    if(want.trend) {
+      if(length(vnames) == 0) {
+        ## Poisson model: trend = cif 
+        values$trend <-
+          if(want.cif) values$cif else
+          predict(FIT, newdata=moadf, type="response")
+      } else {
+        ## zero the interaction components
+        moadf[, vnames] <- 0
+        ## compute fitted values
+        values$trend <- predict(FIT, newdata=moadf, type="response")
+      }
+    }
+    if(verbose)
+      cat("done.\nReshaping results...")
+    ##
+    ## Reshape results
+    ## separate answers for each image
+    values <- split(values, values$id)
+    ## 
+    Trends <- list()
+    Lambdas <- list()
+    if(!make.image) {
+      if(verbose)
+        cat("(marked point patterns)...")
+      ## values become marks attached to locations
+      for(i in seq(npat.new)) {
+        Val <- values[[i]]
+        Loc <- Dummies[[i]]
+        isdum <- !is.data(Quads[[i]])
+        if(selfcheck)
+          if(length(isdum) != length(Val$trend))
+            stop("Internal error: mismatch between data frame and locations")
+        if(want.trend)
+          Trends[[i]] <- Loc %mark% (Val$trend[isdum])
+        if(want.cif)
+          Lambdas[[i]] <- Loc %mark% (Val$cif[isdum])
+      }
+    } else {
+      if(verbose)
+        cat("(pixel images)...")
+      ## assign values to pixel images
+      for(i in seq(npat.new)) {
+        values.i <- values[[i]]
+        Q.i <- Quads[[i]]
+        values.i <- values.i[!is.data(Q.i), ]
+        Template.i <- Templates[[i]]
+        ok.i <- !is.na(Template.i$v)
+        if(sum(ok.i) != nrow(values.i))
+          stop("Internal error: mismatch between data frame and image")
+        if(selfcheck) {
+          dx <- rasterx.im(Template.i)[ok.i] - values.i$x
+          dy <- rastery.im(Template.i)[ok.i] - values.i$y
+          cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "),
+                    "range(dy) =", paste(range(dy), collapse=", "), "\n"))
+        }
+        if(want.trend) {
+          Trend.i <- Template.i
+          Trend.i$v[ok.i] <- values.i$trend
+          Trends[[i]] <- Trend.i
+        }
+        if(want.cif) {
+          Lambda.i <- Template.i
+          Lambda.i$v[ok.i] <- values.i$cif
+          Lambdas[[i]] <- Lambda.i
+        }
+      }
+    }
+    if(verbose)
+      cat("done.\n")
+    ## answer is a hyperframe
+    Answer <- hyperframe(id=factor(levels(moadf$id)),
+                         row.names=sumry$row.names)
+    if(want.trend)
+      Answer$trend <- Trends
+    if(want.cif)
+      Answer$cif <- Lambdas
+    return(Answer)
+  }
+
+  ## helper functions
+  emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) }
+
+  gridsample <- function(W, ngrid) {
+    masque <- as.mask(W, dimyx=ngrid)
+    xx <- raster.x(masque)
+    yy <- raster.y(masque)
+    xpredict <- xx[masque$m]
+    ypredict <- yy[masque$m]
+    Dummy <- ppp(xpredict, ypredict, window=W)
+    Image <- as.im(masque)
+    return(list(D=Dummy, I=Image))
+  }
+
+  punctify <- function(M) { 
+    xx <- raster.x(M)
+    yy <- raster.y(M)
+    xpredict <- xx[M$m]
+    ypredict <- yy[M$m]
+    return(ppp(xpredict, ypredict, window=M))
+  }
+  
+  predict.mppm
+})
diff --git a/R/profilepl.R b/R/profilepl.R
new file mode 100755
index 0000000..6402737
--- /dev/null
+++ b/R/profilepl.R
@@ -0,0 +1,367 @@
+#
+# profilepl.R
+#
+#  $Revision: 1.43 $  $Date: 2017/06/05 10:31:58 $
+#
+#  computes profile log pseudolikelihood
+#
+
+profilepl <- local({
+
+  ## Determine edge correction
+  ## with partial matching, avoiding collisions with
+  ## other arguments to ppm that have similar names.
+  getppmcorrection <- function(..., correction = "border",
+           covariates = NULL, covfunargs = NULL, control = NULL) {
+    return(correction)
+  }
+  isSingleNA <- function(x) { length(x) == 1 && is.na(x) }
+  
+  profilepl <- function(s, f, ..., aic=FALSE, rbord=NULL, verbose=TRUE) {
+    callenv <- parent.frame()
+    s <- as.data.frame(s)
+    n <- nrow(s)
+    stopifnot(is.function(f))
+    ## validate 's'
+    parms <- names(s)
+    fargs <- names(formals(f))
+    if(!all(fargs %in% parms)) {
+      bad <- !(fargs %in% parms)
+      forgiven <- sapply(formals(f)[bad], isSingleNA)
+      if(!all(forgiven)) {
+        slecht <- fargs[bad[!forgiven]]
+        nsl <- length(slecht)
+        stop(paste(ngettext(nsl, "Argument", "Arguments"),
+                   commasep(sQuote(slecht)),
+                   ngettext(nsl, "is", "are"),
+                   "not provided in the data frame s"))
+      }
+    }
+    ## extra columns in 's' are assumed to be parameters of covariate functions
+    is.farg <- parms %in% fargs
+    pass.cfa <- any(!is.farg)
+    got.cfa <- "covfunargs" %in% names(list(...))
+    if(pass.cfa && got.cfa)
+      stop("Some columns in s are superfluous")
+    ##
+    criterion <- numeric(n)
+    ## make a fake call
+    pseudocall <- match.call()
+    pseudocall[[1]] <- as.symbol("ppm")
+    namcal <- names(pseudocall)
+    ## remove arguments 's' and 'verbose'
+    retain <- !(namcal %in% c("s", "verbose"))
+    pseudocall <- pseudocall[retain]
+    namcal <- namcal[retain]
+    ## place 'f' argument third 
+    np <- length(pseudocall)
+    fpos <- (1:np)[namcal == "f"]
+    indices <- (1:np)[-fpos]
+    if(length(indices) < 3) {
+      indices <- c(indices, fpos)
+    } else {
+      indices <- c(indices[1:3], fpos, indices[-(1:3)])
+    }
+    pseudocall <- pseudocall[indices]
+    namcal <- names(pseudocall)
+    namcal[namcal=="f"] <- "interaction"
+    names(pseudocall) <- namcal
+    ## get correction
+    correction <- getppmcorrection(...)
+    if(correction == "border") {
+      ## determine border correction distance
+      if(is.null(rbord)) {
+        ## compute rbord = max reach of interactions
+        if(verbose) message("(computing rbord)")
+        for(i in 1:n) {
+          fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE]))
+          if(!inherits(fi, "interact"))
+            stop(paste("f did not yield an object of class",
+                       sQuote("interact")))
+          re <- reach(fi)
+          if(is.null(rbord))
+            rbord <- re
+          else if(rbord < re)
+            rbord <- re
+        }
+      }
+    } 
+    ## determine whether computations can be saved
+    if(pass.cfa || got.cfa) {
+      savecomp <- FALSE
+    } else {
+      Q <- do.call(ppm,
+                   append(list(...), list(rbord=rbord, justQ=TRUE)),
+                   envir=callenv)
+      savecomp <- !oversize.quad(Q)
+    }
+    ## go
+    gc()
+    if(verbose) {
+      message(paste("comparing", n, "models..."))
+      pstate <- list()
+    }
+    for(i in 1:n) {
+      if(verbose)
+        pstate <- progressreport(i, n, state=pstate)
+      fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE]))
+      if(!inherits(fi, "interact"))
+        stop(paste("f did not yield an object of class", sQuote("interact")))
+      if(pass.cfa)
+        cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) 
+      ## fit model
+      if(i == 1) {
+        ## fit from scratch
+        arg1 <- list(...,
+                     interaction=fi, 
+                     rbord=rbord, savecomputed=savecomp,
+                     warn.illegal=FALSE,
+                     callstring="",
+                     skip.border=TRUE)
+        if(pass.cfa) arg1 <- append(arg1, cfai)
+        fiti <- do.call(ppm, arg1, envir=callenv)
+        ## save intermediate computations (pairwise distances, etc)
+        precomp <- fiti$internal$computed
+        savedargs <- list(...,
+                          rbord=rbord, precomputed=precomp,
+                          warn.illegal=FALSE,
+                          callstring="",
+                          skip.border=TRUE)
+      } else {
+        ## use precomputed data
+        argi <- append(savedargs, list(interaction=fi))
+        if(pass.cfa) argi <- append(argi, cfai)
+        fiti <- do.call(ppm, argi, envir=callenv)
+      }
+      ## save log pl for each fit
+      criterion[i] <-
+          if(aic) -AIC(fiti) else as.numeric(logLik(fiti, warn=FALSE))
+      ## save fitted coefficients for each fit
+      co <- coef(fiti)
+      if(i == 1) {
+        allcoef <- data.frame(matrix(co, nrow=1))
+        names(allcoef) <- names(co)
+      } else
+        allcoef <- rbind(allcoef, co)
+    }
+    if(verbose) message("fitting optimal model...")
+    opti <- which.max(criterion)
+    gc()
+    optint <- do.call(f, as.list(s[opti, is.farg, drop=FALSE]))
+    optarg <- list(..., interaction=optint, rbord=rbord)
+    if(pass.cfa) {
+      optcfa <- as.list(s[opti, !is.farg, drop=FALSE])
+      attr(optcfa, "fitter") <- "profilepl"
+      optarg <- append(optarg, list(covfunargs=optcfa))
+    }
+    optfit <- do.call(ppm, optarg, envir=callenv)
+    if(verbose) message("done.")
+    critname <- if(aic) "-AIC" else
+                if(is.poisson(optfit)) "log l" else
+                if(optfit$method == "logi") "log CL" else "log PL"
+    result <- list(param=s,
+                   prof=criterion,
+                   critname=critname,
+                   iopt=opti,
+                   fit=optfit,
+                   rbord=rbord,
+                   fname=as.interact(optfit)$name,
+                   allcoef=allcoef,
+                   otherstuff=list(...),
+                   pseudocall=pseudocall)
+    class(result) <- c("profilepl", class(result))
+    return(result)
+  }
+
+  profilepl
+})
+
+##
+##   print method
+##
+
+print.profilepl <- function(x, ...) {
+  head1 <- "profile log pseudolikelihood"
+  head2 <- "for model: "
+  psc <- paste(unlist(strsplitretain(format(x$pseudocall))),
+               collapse=" ")
+  if(nchar(psc) + nchar(head2) + 1 <= getOption('width')) {
+    splat(head1)
+    splat(head2, psc)
+  } else {
+    splat(head1, head2)
+    splat(psc)
+  }
+  nparm <- ncol(x$param)
+  if(waxlyrical('extras')) {
+    corx <- x$fit$correction
+    if(identical(corx, "border") && !is.null(x$rbord))
+      splat("fitted with rbord =", x$rbord)
+    splat("interaction:", x$fname)
+    splat("irregular",
+          ngettext(nparm, "parameter:", "parameters:\n"),
+          paste(names(x$param),
+                "in",
+                unlist(lapply(lapply(as.list(x$param), range), prange)),
+                collapse="\n"))
+  }
+  popt <- x$param[x$iopt,, drop=FALSE]
+  splat("optimum",
+        ngettext(nparm, "value", "values"),
+        "of irregular",
+        ngettext(nparm, "parameter: ", "parameters:\n"),
+        commasep(paste(names(popt), "=", popt)))
+  invisible(NULL)
+}
+
+##
+##   summary method
+##
+
+summary.profilepl <- function(object, ...) {
+  print(object)
+  cat("\n\noptimal model:\n")
+  print(object$fit)
+}
+
+as.ppm.profilepl <- function(object) {
+  object$fit
+}
+
+predict.profilepl <- function(object, ...) {
+  predict(object$fit, ...)
+}
+
+##
+##  plot method 
+##
+
+plot.profilepl <- local({
+
+  plot.profilepl <- function(x, ..., add=FALSE, main=NULL,
+                             tag=TRUE, coeff=NULL, xvariable=NULL,
+                             col=1, lty=1, lwd=1,
+                             col.opt="green", lty.opt=3, lwd.opt=1) {
+    para <- x$param
+    ## graphics arguments may be expressions involving parameters
+    if(ncol(para) > 1) {
+      col <- eval(substitute(col), para)
+      lwd <- eval(substitute(lwd), para)
+      lty <- eval(substitute(lty), para)
+      px <- cbind(para, col, lwd, lty, stringsAsFactors=FALSE)
+      col <- px$col
+      lwd <- px$lwd
+      lty <- px$lty
+    }
+    ## strip any column that is entirely na
+    nacol <- sapply(para, none.finite)
+    para <- para[, !nacol, drop=FALSE]
+    ## 
+    npara <- ncol(para)
+    ## main header
+    if(is.null(main))
+      main <- short.deparse(x$pseudocall)
+    ## x variable for plot
+    if(is.null(xvariable)) {
+      xvalues <- para[,1]
+      xname <- names(para)[1]
+    } else {
+      stopifnot(is.character(xvariable))
+      if(!(xvariable %in% names(para)))
+        stop("there is no irregular parameter named", sQuote(xvariable))
+      xvalues <- para[[xvariable]]
+      xname <- xvariable
+    }
+    ## y variable for plot                  
+    if(is.null(coeff)) {
+      yvalues <- x$prof
+      ylab <- x$critname %orifnull% "log pl"
+    } else {
+      stopifnot(is.character(coeff))
+      allcoef <- x$allcoef
+      if(!(coeff %in% names(allcoef)))
+        stop(paste("there is no coefficient named", sQuote(coeff),
+                   "in the fitted model"))
+      yvalues <- allcoef[[coeff]]
+      ylab <- paste("coefficient:", coeff)
+    }
+    ## start plot
+    if(!add)
+      do.call.matched(plot.default,
+                      resolve.defaults(list(x=range(xvalues), y=range(yvalues)),
+                                       list(type="n", main=main),
+                                       list(...),
+                                       list(ylab=ylab, xlab=xname)),
+                      extrargs=graphicsPars("plot"))
+
+    linepars <- graphicsPars("lines")
+  
+    if(npara == 1) {
+      ## single curve
+      do.call.matched(lines.default,
+                      resolve.defaults(list(x=xvalues, y=yvalues, ...),
+                                       spatstat.options("par.fv")),
+                      extrargs=linepars)
+    } else {
+      ## multiple curves
+      other <- para[, -1, drop=FALSE]
+      tapply(1:nrow(para),
+             as.list(other),
+             plotslice, 
+             xvalues=xvalues, yvalues=yvalues, other=other,
+             tag=tag, ...,
+             col=col, lwd=lwd, lty=lty,
+             lineargs=linepars)
+    }
+
+    
+    ## show optimal value
+    do.call.matched(abline,
+                    resolve.defaults(list(v = xvalues[x$iopt]),
+                                     list(...),
+                                     list(lty=lty.opt, lwd=lwd.opt,
+                                          col=col.opt)),
+                    extrargs=linepars)
+    return(invisible(NULL))
+  }
+
+  plotslice <- function(z, xvalues, yvalues, other, tag=TRUE, ...,
+                        lty=1, col=1, lwd=1, lineargs) {
+    fz <- xvalues[z]
+    pz <- yvalues[z]
+    n <- length(xvalues)
+    if(length(lty) == n) lty <- unique(lty[z])[1]
+    if(length(col) == n) col <- unique(col[z])[1]
+    if(length(lwd) == n) lwd <- unique(lwd[z])[1]
+    do.call.matched(lines.default,
+                    resolve.defaults(list(x=fz, y=pz,
+                                          col=col, lwd=lwd, lty=lty),
+                                     list(...)),
+                    extrargs=lineargs)
+    if(tag) {
+      oz <- other[z, , drop=FALSE]
+      uniques <- apply(oz, 2, unique)
+      labels <- paste(names(uniques), "=", uniques, sep="")
+      label <- paste(labels, sep=",")
+      ii <- which.max(pz)
+      do.call.matched(text.default,
+                      list(x=fz[ii], y=pz[ii], labels=label,
+                           col=col, ...),
+                      funargs=graphicsPars("text"))
+    }
+    return(NULL)
+  }
+
+  none.finite <- function(x) all(!is.finite(x))
+  
+  plot.profilepl
+})
+
+
+simulate.profilepl <- function(object, ...) {
+  simulate(as.ppm(object), ...)
+}
+
+parameters.profilepl <- function(model, ...) {
+  parameters(as.ppm(model))
+}
diff --git a/R/progress.R b/R/progress.R
new file mode 100644
index 0000000..eed5dc9
--- /dev/null
+++ b/R/progress.R
@@ -0,0 +1,314 @@
+#
+#   progress.R
+#
+#   $Revision: 1.21 $  $Date: 2016/04/25 02:34:40 $
+#
+#   progress plots (envelope representations)
+#
+
+dclf.progress <- function(X, ...)
+  mctest.progress(X, ..., exponent=2)
+
+mad.progress <- function(X, ...)
+  mctest.progress(X, ..., exponent=Inf)
+
+mctest.progress <- local({
+
+  smoothquantile <- function(z, alpha) {
+    min(quantile(density(z), 1-alpha), max(z))
+  }
+  
+  silentmax <- function(z) {
+    if(all(is.nan(z))) return(NaN)
+    z <- z[is.finite(z)]
+    if(length(z) == 0) return(NA) else return(max(z))
+  }
+
+  mctest.progress <- function(X, fun=Lest, ...,
+                              exponent=1, nrank=1, interpolate=FALSE,
+                              alpha, rmin=0) {
+    check.1.real(exponent)
+    explain.ifnot(exponent >= 0)
+    if(missing(fun) && inherits(X, "envelope"))
+      fun <- NULL
+    Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent)
+    R       <- Z$R
+    devdata <- Z$devdata
+    devsim  <- Z$devsim
+    nsim    <- ncol(devsim)
+    # determine 'alpha' and 'nrank'
+    if(missing(alpha)) {
+      if((nrank %% 1) != 0)
+        stop("nrank must be an integer")
+      alpha   <- nrank/(nsim + 1)
+    } else {
+      check.1.real(alpha)
+      stopifnot(alpha > 0 && alpha < 1)
+      if(!interpolate) {
+        if(!missing(nrank))
+          warning("nrank was ignored because alpha was given", call.=FALSE)
+        nrank <- alpha * (nsim + 1)
+        if(abs(nrank - round(nrank)) > 1e-2)
+          stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE)
+        nrank <- as.integer(round(nrank))
+      }
+    }
+    alphastring <- paste(100 * alpha, "%%", sep="")
+    # compute critical values
+    critval <-
+      if(interpolate) apply(devsim, 1, smoothquantile, alpha=alpha) else
+      if(nrank == 1) apply(devsim, 1, silentmax) else
+      apply(devsim, 1, orderstats, k=nrank, decreasing=TRUE)
+    # create fv object
+    fname  <- if(is.infinite(exponent)) "mad" else
+              if(exponent == 2) "T" else paste("D[",exponent,"]", sep="")
+    ylab <- if(is.infinite(exponent)) quote(mad(R)) else
+            if(exponent == 2) quote(T(R)) else
+            eval(substitute(quote(D[p](R)), list(p=exponent)))
+    df <- data.frame(R=R, obs=devdata, crit=critval, zero=0)
+    mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo"
+    p <- fv(df,
+            argu="R", ylab=ylab, valu="obs", fmla = . ~ R, 
+            desc = c("Interval endpoint R",
+              "observed value of test statistic %s",
+              paste(mcname, alphastring, "critical value for %s"),
+              "zero"),
+            labl=c("R", "%s(R)", "%s[crit](R)", "0"),
+            unitname = unitname(X), fname = fname)
+    fvnames(p, ".") <- c("obs", "crit", "zero")
+    fvnames(p, ".s") <- c("zero", "crit")
+    p <- hasenvelope(p, Z$envelope)  # envelope may be NULL
+    return(p)
+  }
+
+  mctest.progress
+})
+
+
+# Do not call this function.
+# Performs underlying computations
+
+envelopeProgressData <- local({
+  envelopeProgressData <-
+    function(X, fun=Lest, ..., exponent=1,
+             alternative=c("two.sided", "less", "greater"),
+             leaveout=1, scale=NULL, clamp=FALSE, 
+             normalize=FALSE, deflate=FALSE,
+             rmin=0,
+             save.envelope = savefuns || savepatterns,
+             savefuns = FALSE, 
+             savepatterns = FALSE) {
+    alternative <- match.arg(alternative)
+    if(!(leaveout %in% 0:2))
+      stop("Argument leaveout should equal 0, 1 or 2")
+    ## compute or extract simulated functions
+    X <- envelope(X, fun=fun, ..., alternative=alternative,
+                  savefuns=TRUE, savepatterns=savepatterns)
+    Y <- attr(X, "simfuns")
+    ## extract values
+    R   <- with(X, .x)
+    obs <- with(X, .y)
+    sim <- as.matrix(as.data.frame(Y))[, -1]
+    nsim <- ncol(sim)
+    ## choose function as reference
+    has.theo <- ("theo" %in% names(X))
+    use.theo <- identical(attr(X, "einfo")$use.theory, TRUE)
+    if(use.theo && !has.theo)
+      warning("No theoretical function available; use.theory ignored")
+    if(use.theo && has.theo) {
+#      theo.used <- TRUE
+      reference <- with(X, theo)
+      leaveout <- 0
+    } else {
+#      theo.used <- FALSE
+      if(leaveout == 2) {
+        ## use sample mean of simulations only
+        reference <- with(X, mmean)
+      } else {
+        ## use sample mean of simulations *and* observed 
+        reference <- (nsim * with(X, mmean) + obs)/(nsim + 1)
+      }
+    }
+    ## restrict range
+    if(rmin > 0) {
+      if(sum(R >= rmin) < 2)
+        stop("rmin is too large for the available range of r values")
+      nskip <- sum(R < rmin)
+    } else nskip <- 0
+  
+    ## determine rescaling if any
+    if(is.null(scale)) {
+      scaling <- NULL
+      scr <- 1
+    } else if(is.function(scale)) {
+      scaling <- scale(R)
+      sname <- "scale(r)"
+      ans <- check.nvector(scaling, length(R), things="values of r",
+                           fatal=FALSE, vname=sname)
+      if(!ans)
+        stop(attr(ans, "whinge"), call.=FALSE)
+      if(any(bad <- (scaling <= 0))) {
+        ## issue a warning unless this only happens at r=0
+        if(any(bad[R > 0]))
+          warning(paste("Some values of", sname, "were negative or zero:",
+                        "scale was reset to 1 for these values"),
+                  call.=FALSE)
+        scaling[bad] <- 1
+      }
+      scr <- scaling
+    } else stop("Argument scale should be a function")
+
+    ## compute deviations
+    rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1])
+    rawdevSim <- Deviation(sim, reference, leaveout, nsim)
+    ## evaluate signed/absolute deviation relevant to alternative
+    ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling)
+    dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling)
+
+    ## compute test statistics
+    if(is.infinite(exponent)) {
+      ## MAD
+      devdata <- cummaxskip(ddat, nskip)
+      devsim <- apply(dsim, 2, cummaxskip, nskip=nskip)
+      if(deflate) {
+        devdata <- scr * devdata
+        devsim <-  scr * devsim
+      }
+      testname <- "Maximum absolute deviation test"
+    } else {
+      dR <- c(0, diff(R))
+      if(clamp || (alternative == "two.sided")) {
+        ## deviations are nonnegative
+        devdata <- cumsumskip(dR * ddat^exponent, nskip)
+        devsim  <- apply(dR * dsim^exponent, 2, cumsumskip, nskip=nskip)
+      } else {
+        ## sign of deviations should be retained
+        devdata <- cumsumskip(dR * sign(ddat) * abs(ddat)^exponent,
+                                  nskip=nskip)
+        devsim  <- apply(dR * sign(dsim) * abs(dsim)^exponent,
+                         2, cumsumskip, nskip=nskip)
+      }
+      if(normalize) {
+        devdata <- devdata/R
+        devsim <- sweep(devsim, 1, R, "/")
+      }
+      if(deflate) {
+        devdata <- scr * sign(devdata) * abs(devdata)^(1/exponent) 
+        devsim <-  scr * sign(devsim) * abs(devsim)^(1/exponent) 
+      }
+      testname <- if(exponent == 2) "Diggle-Cressie-Loosmore-Ford test" else
+                  if(exponent == 1) "Integral absolute deviation test" else
+                  paste("Integrated", ordinal(exponent), "Power Deviation test")
+    }
+    result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname,
+                   scaleR=scr, clamp=clamp)
+    if(save.envelope) 
+      result$envelope <- X
+    return(result)
+  }
+
+  cumsumskip <- function(x, nskip=0) {
+    if(nskip == 0) cumsum(x) else c(rep(NA, nskip), cumsum(x[-seq_len(nskip)]))
+  }
+
+  cummaxskip <- function(x, nskip=0) {
+    if(nskip == 0) cummax(x) else c(rep(NA, nskip), cummax(x[-seq_len(nskip)]))
+  }
+
+  envelopeProgressData
+})
+
+dg.progress <- function(X, fun=Lest, ...,   
+                        exponent=2, nsim=19, nsimsub=nsim-1, nrank=1, alpha, 
+                        leaveout=1, interpolate=FALSE, rmin=0, 
+                        savefuns=FALSE, savepatterns=FALSE,
+                        verbose=TRUE) {
+  env.here <- sys.frame(sys.nframe())
+  if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub))
+    stop("nsim and nsimsub must be relatively prime")
+  ## determine 'alpha' and 'nrank'
+  if(missing(alpha)) {
+    if((nrank %% 1) != 0)
+      stop("nrank must be an integer")
+    alpha   <- nrank/(nsim + 1)
+  } else {
+    check.1.real(alpha)
+    stopifnot(alpha > 0 && alpha < 1)
+    if(!interpolate) {
+      if(!missing(nrank))
+        warning("nrank was ignored because alpha was given", call.=FALSE)
+      nrank <- alpha * (nsim + 1)
+      if(abs(nrank - round(nrank)) > 1e-2)
+        stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE)
+      nrank <- as.integer(round(nrank))
+    }
+  }
+  if(verbose)
+    cat("Computing first-level test data...")
+  ## generate or extract simulated patterns and functions
+  E <- envelope(X, fun=fun, ..., nsim=nsim,
+                savepatterns=TRUE, savefuns=TRUE,
+                verbose=FALSE,
+                envir.simul=env.here)
+  ## get progress data
+  PD <- envelopeProgressData(E, fun=fun, ..., rmin=rmin, nsim=nsim,
+                             exponent=exponent, leaveout=leaveout,
+                             verbose=FALSE)
+  ## get first level MC test significance trace
+  T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, 
+                        exponent=exponent,
+                        leaveout=leaveout,
+                        interpolate=interpolate, rmin=rmin,
+                        confint=FALSE, verbose=FALSE, ...)
+  R    <- T1$R
+  phat <- T1$pest
+  if(verbose) {
+    cat("Done.\nComputing second-level data... ")
+    state <- list()
+  }
+  ## second level traces
+  simpat <- attr(E, "simpatterns")
+  phat2 <- matrix(, length(R), nsim)
+  for(j in seq_len(nsim)) {
+    simj <- simpat[[j]]
+    sigj <- mctest.sigtrace(simj,
+                            fun=fun, nsim=nsimsub, 
+                            exponent=exponent,
+                            interpolate=interpolate,
+                            leaveout=leaveout,
+                            rmin=rmin,
+                            confint=FALSE, verbose=FALSE, ...)
+    phat2[,j] <- sigj$pest
+    if(verbose) state <- progressreport(j, nsim, state=state)
+  }
+  if(verbose) cat("Done.\n")
+  ## Dao-Genton procedure
+  dgcritrank <- 1 + rowSums(phat > phat2)
+  dgcritrank <- pmin(dgcritrank, nsim)
+  devsim.sort <- t(apply(PD$devsim, 1, sort, decreasing=TRUE, na.last=TRUE))
+  ii <- cbind(seq_along(dgcritrank), dgcritrank)
+  devcrit <- devsim.sort[ii]
+  devdata <- PD$devdata
+  ## create fv object
+  fname  <- if(is.infinite(exponent)) "mad" else
+            if(exponent == 2) "T" else paste("D[",exponent,"]", sep="")
+  ylab <- if(is.infinite(exponent)) quote(mad(R)) else
+          if(exponent == 2) quote(T(R)) else
+          eval(substitute(quote(D[p](R)), list(p=exponent)))
+  df <- data.frame(R=R, obs=devdata, crit=devcrit, zero=0)
+  mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo"
+  p <- fv(df,
+          argu="R", ylab=ylab, valu="obs", fmla = . ~ R, 
+          desc = c("Interval endpoint R",
+            "observed value of test statistic %s",
+            paste(mcname, paste0(100 * alpha, "%%"), "critical value for %s"),
+            "zero"),
+          labl=c("R", "%s(R)", "%s[crit](R)", "0"),
+          unitname = unitname(X), fname = fname)
+  fvnames(p, ".") <- c("obs", "crit", "zero")
+  fvnames(p, ".s") <- c("zero", "crit")
+  if(savefuns || savepatterns)
+    p <- hasenvelope(p, E)
+  return(p)
+}
+
diff --git a/R/psp.R b/R/psp.R
new file mode 100755
index 0000000..2849b80
--- /dev/null
+++ b/R/psp.R
@@ -0,0 +1,753 @@
+#
+#  psp.R
+#
+#  $Revision: 1.87 $ $Date: 2017/06/05 10:31:58 $
+#
+# Class "psp" of planar line segment patterns
+#
+#
+#################################################
+# creator
+#################################################
+psp <- function(x0, y0, x1, y1, window, marks=NULL,
+                check=spatstat.options("checksegments")) {
+  stopifnot(is.numeric(x0))
+  stopifnot(is.numeric(y0))
+  stopifnot(is.numeric(x1))
+  stopifnot(is.numeric(y1))
+  stopifnot(is.vector(x0))
+  stopifnot(is.vector(y0))
+  stopifnot(is.vector(x1))
+  stopifnot(is.vector(y1))
+  stopifnot(length(x0) == length(y0))
+  stopifnot(length(x1) == length(y1))
+  stopifnot(length(x0) == length(x1))
+  ends <- data.frame(x0=x0,y0=y0,x1=x1,y1=y1)
+  if(!missing(window))
+    verifyclass(window,"owin")
+  if(check) {
+    ok <- inside.owin(x0,y0, window) & inside.owin(x1,y1,window)
+    if((nerr <- sum(!ok)) > 0)
+      stop(paste(nerr, ngettext(nerr, "segment does not", "segments do not"),
+                 "lie entirely inside the window.\n"), call.=FALSE)
+  }
+  out <- list(ends=ends,
+              window=window,
+              n = nrow(ends))
+
+# add marks if any
+  if(!is.null(marks)) {
+    if(is.matrix(marks))
+      marks <- as.data.frame(marks)
+    if(is.data.frame(marks)) {
+      omf <- "dataframe"
+      nmarks <- nrow(marks)
+      rownames(marks) <- seq_len(nmarks)
+      whinge <- "The number of rows of marks"
+    } else {
+      omf <- "vector"
+      names(marks) <- NULL
+      nmarks <- length(marks)
+      whinge <- "The length of the marks vector"
+    }
+    if(nmarks != out$n) stop(paste(whinge, "!= length of x and y.\n"))
+    out$marks <- marks
+    out$markformat <- omf
+  } else {
+    out$markformat <- "none"
+  }
+
+  class(out) <- c("psp", class(out))
+  return(out)
+}
+
+######################################################
+#  conversion
+######################################################
+
+is.psp <- function(x) { inherits(x, "psp") }
+
+as.psp <- function(x, ..., from=NULL, to=NULL) {
+  # special case: two point patterns
+  if(is.null(from) != is.null(to))
+    stop(paste("If one of", sQuote("from"), "and", sQuote("to"),
+               "is specified, then both must be specified.\n"))
+  if(!is.null(from) && !is.null(to)) {
+    verifyclass(from, "ppp")
+    verifyclass(to, "ppp")
+    if(from$n != to$n)
+      stop(paste("The point patterns", sQuote("from"), "and", sQuote("to"),
+                 "have different numbers of points.\n"))
+    uni <- union.owin(from$window, to$window)
+    Y <- do.call(psp,
+                 resolve.defaults(list(from$x, from$y, to$x, to$y),
+                                  list(...),
+                                  list(window=uni)))
+    return(Y)
+  }
+  UseMethod("as.psp")
+}
+
+as.psp.psp <- function(x, ..., check=FALSE, fatal=TRUE) {
+  if(!verifyclass(x, "psp", fatal=fatal))
+    return(NULL)
+  ends <- x$ends
+  psp(ends$x0, ends$y0, ends$x1, ends$y1, window=x$window,
+      marks=x$marks, check=check)
+}
+
+as.psp.data.frame <- function(x, ..., window=NULL, marks=NULL,
+                              check=spatstat.options("checksegments"), fatal=TRUE) {
+  window <- suppressWarnings(as.owin(window,fatal=FALSE))
+  if(!is.owin(window)) {
+    if(fatal) stop("Cannot interpret \"window\" as an object of class owin.\n")
+    return(NULL)
+  }
+
+  if(checkfields(x,"marks")) {
+    if(is.null(marks)) marks <- x$marks
+    else warning(paste("Column named \"marks\" ignored;\n",
+                       "argument named \"marks\" has precedence.\n",sep=""))
+    x$marks <- NULL
+  }
+
+  if(checkfields(x, c("x0", "y0", "x1", "y1"))) {
+    out <- psp(x$x0, x$y0, x$x1, x$y1, window=window,
+               check=check)
+    x <- x[-match(c("x0","y0","x1","y1"),names(x))]
+  }
+  else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) {
+    rr <- x$length/2
+    dx <- cos(x$angle) * rr
+    dy <- sin(x$angle) * rr
+    bb <- boundingbox(window)
+    rmax <- max(rr)
+    bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax)
+    pattern <- psp(x$xmid - dx, x$ymid - dy, x$xmid + dx, x$ymid + dy,
+                   window=bigbox,check=FALSE)
+    out <- pattern[window]
+    x <- x[-match(c("xmid","ymid","length","angle"),names(x))]
+  }
+  else if(ncol(x) >= 4) {
+    out <- psp(x[,1], x[,2], x[,3], x[,4], window=window,
+               check=check)
+    x <- x[-(1:4)]
+  }
+  else if(fatal)
+    stop("Unable to interpret x as a line segment pattern.", call.=FALSE)
+  else out <- NULL
+
+  if(!is.null(out)) {
+    if(is.null(marks) & ncol(x) > 0) marks <- x
+    if(is.null(marks)) {
+       out$markformat <- "none"
+    } else {
+       out$marks <- marks
+       out$markformat <- if(is.data.frame(marks)) "dataframe" else "vector"
+       out <- as.psp(out,check=FALSE)
+    }
+  }
+  return(out)
+}
+
+as.psp.matrix <- function(x, ..., window=NULL, marks=NULL,
+                          check=spatstat.options("checksegments"), fatal=TRUE) {
+   x <- as.data.frame(x)
+   as.psp(x,...,window=window,marks=marks,check=check,fatal=fatal)
+}
+
+as.psp.default <- function(x, ..., window=NULL, marks=NULL,
+                           check=spatstat.options("checksegments"), fatal=TRUE) {
+  if(checkfields(x,"marks")) {
+	if(is.null(marks)) marks <- x$marks
+	else warning(paste("Component of \"x\" named \"marks\" ignored;\n",
+                             "argument named \"marks\" has precedence.\n",sep=""))
+  }
+  if(checkfields(x, c("x0", "y0", "x1", "y1")))
+    return(psp(x$x0, x$y0, x$x1, x$y1, window=window, marks=marks,
+               check=check))
+  else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) {
+    rr <- x$length/2
+    dx <- cos(x$angle) * rr
+    dy <- sin(x$angle) * rr
+    window <- as.owin(window)
+    bb <- boundingbox(window)
+    rmax <- max(rr)
+    bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax)
+    pattern <- psp(x$x - dx, x$y - dy, x$x + dx, x$y + dy,
+                   window=bigbox, marks=marks, check=FALSE)
+    clipped <- pattern[window]
+    return(clipped)
+  }
+  else if(fatal)
+    stop("Unable to interpret x as a line segment pattern")
+  return(NULL)
+}
+
+as.psp.owin <- function(x, ..., window=NULL,
+                        check=spatstat.options("checksegments"), fatal=TRUE) {
+  .Deprecated("edges", package="spatstat")
+  edges(x, ..., window=window, check=check)
+}
+
+
+edges <- function(x, ...,
+                  window=NULL, check=FALSE) {
+  x <- as.owin(x)
+  if(is.null(window)) window <- as.rectangle(x)
+  x <- as.polygonal(x)
+  x0 <- y0 <- x1 <- y1 <- numeric(0)
+  bdry <- x$bdry
+  for(i in seq_along(bdry)) {
+    po <- bdry[[i]]
+    ni <- length(po$x)
+    nxt <- c(2:ni, 1)
+    x0 <- c(x0, po$x)
+    y0 <- c(y0, po$y)
+    x1 <- c(x1, po$x[nxt])
+    y1 <- c(y1, po$y[nxt])
+  }
+  out <- psp(x0, y0, x1, y1,  window=window, check=check)
+  return(out)
+}
+
+xypolygon2psp <- function(p, w, check=spatstat.options("checksegments")) {
+  verify.xypolygon(p)
+  n <- length(p$x)
+  nxt <- c(2:n, 1)
+  return(psp(p$x, p$y, p$x[nxt], p$y[nxt], window=w, check=check))
+}
+         
+
+#################
+
+as.data.frame.psp <- function(x, row.names=NULL, ...) {
+  df <- as.data.frame(x$ends, row.names=row.names)
+  if(is.marked(x))
+    df <- cbind(df, if(x$markformat=="dataframe") marks(x)
+                    else data.frame(marks=marks(x)))
+  return(df)
+}
+
+#######  manipulation ##########################
+
+append.psp <- function(A,B) {
+  verifyclass(A, "psp")
+  verifyclass(B, "psp")
+  stopifnot(identical(A$window, B$window))
+  marks <- marks(A) %mapp% marks(B)
+  ends <- rbind(A$ends, B$ends)
+  out  <- as.psp(ends,window=A$window,marks=marks,check=FALSE)
+  return(out)
+}
+
+rebound.psp <- function(x, rect) {
+  verifyclass(x, "psp")
+  x$window <- rebound.owin(x$window, rect)
+  return(x)
+}
+
+
+#################################################
+#  marks
+#################################################
+
+is.marked.psp <- function(X, ...) {
+  marx <- marks(X, ...)
+  return(!is.null(marx))
+}
+
+marks.psp <- function(x, ..., dfok = TRUE) {
+  # data frames of marks are as of 19/March 2011 implemented for psp
+    ma <- x$marks
+    if ((is.data.frame(ma) || is.matrix(ma)) && !dfok) 
+        stop("Sorry, not implemented when the marks are a data frame.\n")
+    return(ma)
+}
+
+"marks<-.psp" <- function(x, ..., value) {
+  stopifnot(is.psp(x))
+  if(is.null(value)) {
+    return(unmark(x))
+  }
+  m <- value
+  if(!(is.vector(m) || is.factor(m) || is.data.frame(m) || is.matrix(m)))
+    stop("Incorrect format for marks")
+
+    if (is.hyperframe(m)) 
+        stop("Hyperframes of marks are not supported in psp objects.\n")
+    nseg <- nsegments(x)
+    if (!is.data.frame(m) && !is.matrix(m)) {
+        if (length(m) == 1) 
+            m <- rep.int(m, nseg)
+        else if (nseg == 0) 
+            m <- rep.int(m, 0)
+        else if (length(m) != nseg) 
+            stop("Number of marks != number of line segments.\n")
+        marx <- m
+    }
+    else {
+        m <- as.data.frame(m)
+        if (ncol(m) == 0) {
+            marx <- NULL
+        }
+        else {
+            if (nrow(m) == nseg) {
+                marx <- m
+            }
+            else {
+                if (nrow(m) == 1 || nseg == 0) {
+                  marx <- as.data.frame(lapply(as.list(m), rep.int, times=nseg))
+                }
+                else stop("Number of rows of data frame != number of points.\n")
+            }
+        }
+    }
+    Y <- as.psp(x$ends, window = x$window, marks = marx, check = FALSE)
+    return(Y)
+}
+
+markformat.psp <- function(x) {
+    mf <- x$markformat
+    if(is.null(mf)) 
+      mf <- markformat(marks(x))
+    return(mf)
+}
+
+unmark.psp <- function(X) {
+  X$marks <- NULL
+  X$markformat <- "none"
+  return(X)
+}
+
+#################################################
+#  plot and print methods
+#################################################
+
+plot.psp <- function(x, ..., main, add=FALSE,
+                     show.all=!add, 
+                     show.window=show.all,
+                     which.marks=1,
+                     ribbon=show.all, ribsep=0.15, ribwid=0.05, ribn=1024,
+                     do.plot=TRUE) {
+  if(missing(main) || is.null(main))
+    main <- short.deparse(substitute(x))
+  verifyclass(x, "psp")
+  #
+  n <- nsegments(x)
+  marx <- marks(x)
+  #
+  use.colour <- !is.null(marx) && (n != 0)
+  do.ribbon <- identical(ribbon, TRUE) && use.colour 
+  ##
+  ## ....   initialise plot; draw observation window  ......
+  owinpars <- setdiff(graphicsPars("owin"), "col")
+  if(!do.ribbon) {
+    ## window of x only
+    bb.all <- as.rectangle(as.owin(x))
+    if(do.plot && (!add || show.window))
+      do.call.plotfun(plot.owin, 
+                      resolve.defaults(list(x=x$window,
+		                            main=if(show.all) main else "",
+                                            add=add,
+                                            type = if(show.window) "w" else "n",
+                                            show.all=show.all),
+                                       list(...)),
+                      extrargs=owinpars)
+  } else {
+    ## enlarged window with room for colour ribbon
+    ## x at left, ribbon at right
+    bb <- as.rectangle(as.owin(x))
+    xwidth <- diff(bb$xrange)
+    xheight <- diff(bb$yrange)
+    xsize <- max(xwidth, xheight)
+    bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * xsize,
+                   bb$yrange)
+    bb.all <- boundingbox(bb.rib, bb)
+    if(do.plot) {
+      pt <- prepareTitle(main)
+      ## establish coordinate system
+      if(!add)
+      do.call.plotfun(plot.owin,
+                      resolve.defaults(list(x=bb.all,
+                                            type="n",
+                                            main=pt$blank),
+                                       list(...)),
+                      extrargs=owinpars)
+      ## now plot window of x
+      ## with title centred on this window
+      if(show.window) {
+        do.call.plotfun(plot.owin, 
+                        resolve.defaults(list(x=x$window,
+                                              add=TRUE,
+                                              main=main,
+                                              show.all=TRUE),
+                                         list(...)),
+                        extrargs=owinpars)
+        ## title done. 
+        main <- ""
+      }
+    }
+  }
+
+  # plot segments
+  if(n == 0) {
+    result <- symbolmap()
+    attr(result, "bbox") <- bb.all
+    return(invisible(result))
+  }
+  
+  # determine colours if any
+  if(!use.colour) {
+    # black
+    col <- colmap <- NULL
+  } else {
+    # multicoloured 
+    marx <- as.data.frame(marx)[, which.marks]
+    if(is.character(marx) || length(unique(marx)) == 1)
+      marx <- factor(marx)
+    if(is.factor(marx)) {
+      lev <- levels(marx)
+      colmap <- colourmap(col=rainbow(length(lev)), inputs=factor(lev))
+    } else {
+      if(!all(is.finite(marx)))
+        warning("Some mark values are infinite or NaN or NA")
+      colmap <- colourmap(col=rainbow(ribn), range=range(marx, finite=TRUE))
+    }
+    col <- colmap(marx)
+  }
+
+  ## convert to greyscale?
+  if(spatstat.options("monochrome")) {
+    col <- to.grey(col)
+    colmap <- to.grey(colmap)
+  }
+
+  if(do.plot) {
+    ## plot segments
+    do.call.plotfun(segments,
+                    resolve.defaults(as.list(x$ends),
+                                     list(...),
+                                     list(col=col),
+                                     .StripNull=TRUE),
+                    extrargs=names(par()))
+    ## plot ribbon
+    if(do.ribbon) 
+      plot(colmap, vertical=TRUE, add=TRUE,
+           xlim=bb.rib$xrange, ylim=bb.rib$yrange)
+  }
+  
+  # return colour map
+  result <- colmap %orifnull% colourmap()
+  attr(result, "bbox") <- bb.all
+  return(invisible(result))
+}
+
+print.psp <- function(x, ...) {
+  verifyclass(x, "psp")
+  n <- x$n
+  ism <- is.marked(x, dfok = TRUE)
+  splat(if(ism) "marked" else NULL,
+        "planar line segment pattern:",
+        n, ngettext(n, "line segment", "line segments"))
+  if(ism) {
+    mks <- marks(x, dfok = TRUE)
+    if(is.data.frame(mks)) {
+      splat("Mark variables: ",
+            paste(names(mks), collapse = ", "))
+    } else {
+      if(is.factor(mks)) {
+        splat("multitype, with levels =",
+              paste(levels(mks), collapse = "\t"))
+      } else {
+        splat("marks are",
+              if(is.numeric(mks)) "numeric," else NULL,
+              "of type", sQuote(typeof(mks)))
+      }
+    }
+  }
+  print(x$window)
+  return(invisible(NULL))
+}
+
+unitname.psp <- function(x) {
+  return(unitname(x$window))
+}
+
+"unitname<-.psp" <- function(x, value) {
+  w <- x$window
+  unitname(w) <- value
+  x$window <- w
+  return(x)
+}
+
+####################################################
+#    summary information
+####################################################
+
+endpoints.psp <- function(x, which="both") {
+  verifyclass(x, "psp")
+  ends <- x$ends
+  n <- x$n
+  switch(which,
+         both={
+           first <- second <- rep.int(TRUE, n)
+         },
+         first={
+           first <- rep.int(TRUE, n)
+           second <- rep.int(FALSE, n)
+         },
+         second={
+           first <- rep.int(FALSE, n)
+           second <- rep.int(TRUE, n)
+         },
+         left={
+           first <- (ends$x0 < ends$x1)
+           second <- !first
+         },
+         right={
+           first <- (ends$x0 > ends$x1)
+           second <- !first
+         },
+         lower={
+           first <- (ends$y0 < ends$y1)
+           second <- !first
+         },
+         upper={
+           first <- (ends$y0 > ends$y1)
+           second <- !first
+         },
+         stop(paste("Unrecognised option: which=", sQuote(which)))
+         )
+  ok <- rbind(first, second)
+  xmat <- rbind(ends$x0, ends$x1)
+  ymat <- rbind(ends$y0, ends$y1)
+  idmat <- col(ok)
+  xx <- as.vector(xmat[ok])
+  yy <- as.vector(ymat[ok])
+  id <- as.vector(idmat[ok])
+  result <- ppp(xx, yy, window=x$window, check=FALSE)
+  attr(result, "id") <- id
+  return(result)
+}
+
+midpoints.psp <- function(x) {
+  verifyclass(x, "psp")
+  xm <- eval(expression((x0+x1)/2), envir=x$ends)
+  ym <- eval(expression((y0+y1)/2), envir=x$ends)
+  win <- x$window
+  ok <- inside.owin(xm, ym, win)
+  if(any(!ok)) {
+    warning(paste("Some segment midpoints lie outside the original window;",
+                  "window replaced by bounding box"))
+    win <- boundingbox(win)
+  }
+  ppp(x=xm, y=ym, window=win, check=FALSE)
+}
+
+lengths.psp <- function(x, squared=FALSE) {
+  verifyclass(x, "psp")
+  lengths2 <- eval(expression((x1-x0)^2 + (y1-y0)^2), envir=x$ends)
+  return(if(squared) lengths2 else sqrt(lengths2))
+}
+
+angles.psp <- function(x, directed=FALSE) {
+  verifyclass(x, "psp")
+  a <- eval(expression(atan2(y1-y0, x1-x0)), envir=x$ends)
+  if(!directed) 
+    a <- a %% pi
+  return(a)
+}
+
+summary.psp <- function(object, ...) {
+  verifyclass(object, "psp")
+  len <- lengths.psp(object)
+  out <- list(n = object$n,
+              len = summary(len),
+              totlen = sum(len),
+              ang= summary(angles.psp(object)),
+              w = summary.owin(object$window),
+              marks=if(is.null(object$marks)) NULL else summary(object$marks),
+              unitinfo=summary(unitname(object)))
+  class(out) <- c("summary.psp", class(out))
+  return(out)
+}
+
+print.summary.psp <- function(x, ...) {
+  cat(paste(x$n, "line segments\n"))
+  cat("Lengths:\n")
+  print(x$len)
+  unitblurb <- paste(x$unitinfo$plural, x$unitinfo$explain)
+  cat(paste("Total length:", x$totlen, unitblurb, "\n"))
+  cat(paste("Length per unit area:", x$totlen/x$w$area, "\n"))
+  cat("Angles (radians):\n")
+  print(x$ang)
+  print(x$w)
+  if(!is.null(x$marks)) {
+    cat("Marks:\n")
+    print(x$marks)
+  }
+  return(invisible(NULL))
+}
+
+  
+########################################################
+#  subsets
+########################################################
+
+"[.psp" <-
+  function(x, i, j, drop, ..., fragments=TRUE) {
+
+    verifyclass(x, "psp")
+    
+    if(missing(i) && missing(j))
+      return(x)
+        
+    if(!missing(i)) {
+      style <- if(inherits(i, "owin")) "window" else "index"
+      switch(style,
+             window={
+               x <- clip.psp(x, window=i, check=FALSE, fragments=fragments)
+             },
+             index={
+               enz <- x$ends[i, ]
+               win <- x$window
+               marx <- marksubset(x$marks, i, markformat(x))
+               x <- with(enz, psp(x0, y0, x1, y1, window=win, marks=marx,
+                                  check=FALSE))
+             })
+    }
+
+    if(!missing(j))
+      x <- x[j] # invokes code above
+    
+    return(x)
+ }
+  
+
+
+####################################################
+# affine transformations
+####################################################
+
+affine.psp <- function(X,  mat=diag(c(1,1)), vec=c(0,0), ...) {
+  verifyclass(X, "psp")
+  W <- affine.owin(X$window, mat=mat, vec=vec, ...)
+  E <- X$ends
+  ends0 <- affinexy(list(x=E$x0,y=E$y0), mat=mat, vec=vec)
+  ends1 <- affinexy(list(x=E$x1,y=E$y1), mat=mat, vec=vec)
+  psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE),
+      check=FALSE)
+}
+
+shift.psp <- function(X, vec=c(0,0), ..., origin=NULL) {
+  verifyclass(X, "psp")
+  if(!is.null(origin)) {
+    stopifnot(is.character(origin))
+    if(!missing(vec))
+      warning("Argument vec ignored; argument origin has precedence.\n")
+    origin <- pickoption("origin", origin, c(centroid="centroid",
+                                             midpoint="midpoint",
+                                             bottomleft="bottomleft"))
+    W <- as.owin(X)
+    locn <- switch(origin,
+                   centroid={ unlist(centroid.owin(W)) },
+                   midpoint={ c(mean(W$xrange), mean(W$yrange)) },
+                   bottomleft={ c(W$xrange[1], W$yrange[1]) })
+    return(shift(X, -locn))
+  }
+  # perform shift
+  W <- shift.owin(X$window, vec=vec, ...)
+  E <- X$ends
+  ends0 <- shiftxy(list(x=E$x0,y=E$y0), vec=vec, ...)
+  ends1 <- shiftxy(list(x=E$x1,y=E$y1), vec=vec, ...)
+  Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y,
+           window=W, marks=marks(X, dfok=TRUE),
+           check=FALSE)
+  # tack on shift vector
+  attr(Y, "lastshift") <- vec
+  return(Y)
+}
+
+rotate.psp <- function(X, angle=pi/2, ..., centre=NULL) {
+  verifyclass(X, "psp")
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  W <- rotate.owin(X$window, angle=angle, ...)
+  E <- X$ends
+  ends0 <- rotxy(list(x=E$x0,y=E$y0), angle=angle)
+  ends1 <- rotxy(list(x=E$x1,y=E$y1), angle=angle)
+  Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y,
+           window=W, marks=marks(X, dfok=TRUE),
+           check=FALSE)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
+is.empty.psp <- function(x) { return(x$n == 0) } 
+
+identify.psp <- function(x, ..., labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) {
+  Y <- x
+  W <- as.owin(Y)
+  mids <- midpoints.psp(Y)
+  if(!(is.numeric(n) && (length(n) == 1) && (n %% 1 == 0) && (n >= 0)))
+    stop("n should be a single integer")
+  out <- integer(0)
+  while(length(out) < n) {
+    xy <- locator(1)
+    # check for interrupt exit
+    if(length(xy$x) == 0)
+      return(out)
+    # find nearest segment
+    X <- ppp(xy$x, xy$y, window=W)
+    ident <- project2segment(X, Y)$mapXY
+    # add to list
+    if(ident %in% out) {
+      cat(paste("Segment", ident, "already selected\n"))
+    } else {
+      if(plot) {
+        # Display
+        mi <- mids[ident]
+        li <- labels[ident]
+        text(mi$x, mi$y, labels=li)
+      }
+      out <- c(out, ident)
+    }
+  }
+  # exit if max n reached
+  return(out)
+}
+
+nsegments <- function(x) {
+	UseMethod("nsegments")
+}
+
+nobjects.psp <- nsegments.psp <- function(x) {
+   x$n
+}
+
+as.ppp.psp <- function (X, ..., fatal=TRUE) 
+{
+  Y <- endpoints.psp(X, which="both")
+  m  <- marks(X)
+  marks(Y) <- markappend(m, m)
+  return(Y)
+}
+
+domain.psp <- Window.psp <- function(X, ...) { as.owin(X) }
+
+"Window<-.psp" <- function(X, ..., value) {
+  verifyclass(value, "owin")
+  X[value]
+}
+
+edit.psp <- function(name, ...) {
+  x <- name
+  y <- edit(as.data.frame(x), ...)
+  xnew <- as.psp(y, window=Window(x))
+  return(xnew)
+}
+
diff --git a/R/psp2pix.R b/R/psp2pix.R
new file mode 100755
index 0000000..37ce89e
--- /dev/null
+++ b/R/psp2pix.R
@@ -0,0 +1,132 @@
+#
+# psp2pix.R
+#
+#  $Revision: 1.11 $  $Date: 2017/06/05 10:31:58 $
+#
+#
+
+as.mask.psp <- function(x, W=NULL, ...) {
+  L <- as.psp(x)
+  if(is.null(W))
+    W <- as.owin(L)
+  else
+    W <- as.owin(W)
+
+  W <- do.call.matched(as.mask,
+                       resolve.defaults(list(...),
+                                        list(w=W)))
+
+  ends <- L$ends
+  nseg <- nrow(ends)
+  
+  if(nseg == 0) {
+    # empty
+    W$m[] <- FALSE
+    return(W)
+  }
+    
+  x0 <- (ends$x0 - W$xrange[1])/W$xstep
+  x1 <- (ends$x1 - W$xrange[1])/W$xstep
+  y0 <- (ends$y0 - W$yrange[1])/W$ystep
+  y1 <- (ends$y1 - W$yrange[1])/W$ystep
+  nr <- W$dim[1]
+  nc <- W$dim[2]
+  zz <- .C("seg2pixI",
+           ns=as.integer(nseg),
+           x0=as.double(x0),
+           y0=as.double(y0),
+           x1=as.double(x1),
+           y1=as.double(y1),
+           nx=as.integer(nc),
+           ny=as.integer(nr),
+           out=as.integer(integer(nr * nc)),
+           PACKAGE = "spatstat")
+  mm <- matrix(zz$out, nr, nc)
+  # intersect with existing window
+  W$m <- W$m & mm
+  W
+}
+
+
+pixellate.psp <- function(x, W=NULL, ..., weights=NULL,
+                          what=c("length", "number")) {
+  L <- as.psp(x)
+  what <- match.arg(what)
+  
+  if(is.null(W))
+    W <- as.owin(L)
+  else
+    W <- as.owin(W)
+
+  W <- do.call.matched(as.mask,
+                         resolve.defaults(list(...),
+                                          list(w=W)))
+
+  Z <- as.im(W)
+
+  ends <- L$ends
+  nseg <- nrow(ends)
+
+  if(nseg == 0) {
+    # empty
+    Z$v[] <- 0
+    return(Z)
+  }
+
+  
+  if(is.null(weights))
+    weights <- rep.int(1, nseg)
+  else {
+    if(!is.numeric(weights)) stop("weights must be numeric")
+    if(anyNA(weights)) stop("weights must not be NA")
+    if(!all(is.finite(weights))) stop("weights must not be infinite")
+    if(length(weights) == 1)
+      weights <- rep.int(weights, nseg)
+    else if(length(weights) != nseg)
+      stop(paste("weights vector has length", length(weights),
+                 "but there are", nseg, "line segments"))
+  }
+      
+  x0 <- (ends$x0 - Z$xrange[1])/Z$xstep
+  x1 <- (ends$x1 - Z$xrange[1])/Z$xstep
+  y0 <- (ends$y0 - Z$yrange[1])/Z$ystep
+  y1 <- (ends$y1 - Z$yrange[1])/Z$ystep
+  nr <- Z$dim[1]
+  nc <- Z$dim[2]
+  switch(what,
+         length = {
+           zz <- .C("seg2pixL",
+                    ns=as.integer(nseg),
+                    x0=as.double(x0),
+                    y0=as.double(y0),
+                    x1=as.double(x1),
+                    y1=as.double(y1),
+                    weights=as.double(weights),
+                    pixwidth=as.double(Z$xstep),
+                    pixheight=as.double(Z$ystep),
+                    nx=as.integer(nc),
+                    ny=as.integer(nr),
+                    out=as.double(numeric(nr * nc)),
+                    PACKAGE = "spatstat")
+         },
+         number = {
+           zz <- .C("seg2pixN",
+                    ns=as.integer(nseg),
+                    x0=as.double(x0),
+                    y0=as.double(y0),
+                    x1=as.double(x1),
+                    y1=as.double(y1),
+                    w=as.double(weights),
+                    nx=as.integer(nc),
+                    ny=as.integer(nr),
+                    out=as.double(numeric(nr * nc)),
+                    PACKAGE = "spatstat")
+         })
+  mm <- matrix(zz$out, nr, nc)
+  mm[is.na(Z$v)] <- NA
+  ## intersect with existing window
+  Z$v <- mm
+  Z
+}
+
+
diff --git a/R/pspcross.R b/R/pspcross.R
new file mode 100755
index 0000000..eb3ca56
--- /dev/null
+++ b/R/pspcross.R
@@ -0,0 +1,328 @@
+#
+#    pspcross.R
+#
+#    Intersections of line segments
+#    
+#    $Revision: 1.23 $   $Date: 2017/06/05 10:31:58 $
+#
+#
+crossing.psp <- function(A,B,fatal=TRUE,details=FALSE) {
+  verifyclass(A, "psp")
+  verifyclass(B, "psp")
+  
+  # first check for intersection of windows
+  ABW <- intersect.owin(A$window, B$window, fatal=fatal)
+  if(is.null(ABW)) 
+    return(NULL)
+  
+  eps <- .Machine$double.eps
+
+  na <- A$n
+  eA <- A$ends
+  x0a <- eA$x0
+  y0a <- eA$y0
+  dxa <- eA$x1 - eA$x0
+  dya <- eA$y1 - eA$y0
+
+  nb <- B$n
+  eB <- B$ends
+  x0b <- eB$x0
+  y0b <- eB$y0
+  dxb <- eB$x1 - eB$x0
+  dyb <- eB$y1 - eB$y0
+
+  useCall <- spatstat.options("crossing.psp.useCall")
+  if(!useCall) {
+    # old C routine
+    out <- .C("xysegint",
+              na=as.integer(na),
+              x0a=as.double(x0a),
+              y0a=as.double(y0a),
+              dxa=as.double(dxa),
+              dya=as.double(dya), 
+              nb=as.integer(nb),
+              x0b=as.double(x0b),
+              y0b=as.double(y0b),
+              dxb=as.double(dxb),
+              dyb=as.double(dyb), 
+              eps=as.double(eps),
+              xx=as.double(numeric(na * nb)),
+              yy=as.double(numeric(na * nb)),
+              ta=as.double(numeric(na * nb)),
+              tb=as.double(numeric(na * nb)),
+              ok=as.integer(integer(na * nb)),
+              PACKAGE = "spatstat")
+    
+    ok <- (matrix(out$ok, na, nb) != 0)
+    xx <- matrix(out$xx, na, nb)
+    yy <- matrix(out$yy, na, nb)
+    xx <- as.vector(xx[ok])
+    yy <- as.vector(yy[ok])
+    if(details) {
+      ia <- as.vector(row(ok)[ok])
+      jb <- as.vector(col(ok)[ok])
+      ta <- as.vector(matrix(out$ta, na, nb)[ok])
+      tb <- as.vector(matrix(out$tb, na, nb)[ok])
+    }
+  } else {
+    # new
+    storage.mode(x0a) <- storage.mode(y0a) <- "double"
+    storage.mode(dxa) <- storage.mode(dya) <- "double"
+    storage.mode(x0b) <- storage.mode(y0b) <- "double"
+    storage.mode(dxb) <- storage.mode(dyb) <- "double"
+    storage.mode(eps) <- "double"
+    out <- .Call("Cxysegint",
+                 x0a, 
+                 y0a, 
+                 dxa, 
+                 dya, 
+                 x0b, 
+                 y0b, 
+                 dxb, 
+                 dyb, 
+    	           eps,
+                 PACKAGE="spatstat")
+    xx <- out[[5]]
+    yy <- out[[6]]
+    if(details) {
+      ia <- out[[1L]] + 1L
+      jb <- out[[2L]] + 1L
+      ta <- out[[3L]]
+      tb <- out[[4L]]
+    }
+  }
+  result <- ppp(xx, yy, window=ABW, check=FALSE)
+  if(details)
+    marks(result) <- data.frame(iA=ia, jB=jb, tA=ta, tB=tb)
+  return(result)
+}
+
+test.crossing.psp <- function(A,B) {
+  # return logical matrix specifying whether A[i] and B[j] cross
+  verifyclass(A, "psp")
+  verifyclass(B, "psp")
+  eps <- .Machine$double.eps
+
+  na <- A$n
+  eA <- A$ends
+  x0a <- eA$x0
+  y0a <- eA$y0
+  dxa <- eA$x1 - eA$x0
+  dya <- eA$y1 - eA$y0
+
+  nb <- B$n
+  eB <- B$ends
+  x0b <- eB$x0
+  y0b <- eB$y0
+  dxb <- eB$x1 - eB$x0
+  dyb <- eB$y1 - eB$y0
+
+  out <- .C("xysi",
+            na=as.integer(na),
+            x0a=as.double(x0a),
+            y0a=as.double(y0a),
+            dxa=as.double(dxa),
+            dya=as.double(dya), 
+            nb=as.integer(nb),
+            x0b=as.double(x0b),
+            y0b=as.double(y0b),
+            dxb=as.double(dxb),
+            dyb=as.double(dyb), 
+            eps=as.double(eps),
+            ok=as.integer(integer(na * nb)),
+            PACKAGE = "spatstat")
+
+  hit <- (matrix(out$ok, na, nb) != 0)
+  return(hit)
+}
+
+anycrossing.psp <- function(A,B) {
+  # equivalent to: any(test.crossing.psp(A,B))
+  # Test whether two psp objects have at least one crossing point
+  verifyclass(A, "psp")
+  verifyclass(B, "psp")
+  eps <- .Machine$double.eps
+
+  na <- A$n
+  eA <- A$ends
+  x0a <- eA$x0
+  y0a <- eA$y0
+  dxa <- eA$x1 - eA$x0
+  dya <- eA$y1 - eA$y0
+
+  nb <- B$n
+  eB <- B$ends
+  x0b <- eB$x0
+  y0b <- eB$y0
+  dxb <- eB$x1 - eB$x0
+  dyb <- eB$y1 - eB$y0
+
+  out <- .C("xysiANY",
+            na=as.integer(na),
+            x0a=as.double(x0a),
+            y0a=as.double(y0a),
+            dxa=as.double(dxa),
+            dya=as.double(dya), 
+            nb=as.integer(nb),
+            x0b=as.double(x0b),
+            y0b=as.double(y0b),
+            dxb=as.double(dxb),
+            dyb=as.double(dyb), 
+            eps=as.double(eps),
+            ok=as.integer(integer(1L)),
+            PACKAGE = "spatstat")
+  hit <- (out$ok != 0)
+  return(hit)
+}
+
+selfcrossing.psp <- function(A) {
+  verifyclass(A, "psp")
+  eps <- .Machine$double.eps
+
+  n <- A$n
+  eA <- A$ends
+  x0 <- eA$x0
+  y0 <- eA$y0
+  dx <- eA$x1 - eA$x0
+  dy <- eA$y1 - eA$y0
+
+  useCall <- spatstat.options("selfcrossing.psp.useCall")
+  if(!useCall) {
+    # old C routine
+    out <- .C("xysegXint",
+              n=as.integer(n),
+              x0=as.double(x0),
+              y0=as.double(y0),
+              dx=as.double(dx),
+              dy=as.double(dy), 
+              eps=as.double(eps),
+              xx=as.double(numeric(n^2)),
+              yy=as.double(numeric(n^2)),
+              ti=as.double(numeric(n^2)),
+              tj=as.double(numeric(n^2)),
+              ok=as.integer(integer(n^2)),
+              PACKAGE = "spatstat")
+
+    ok <- (matrix(out$ok, n, n) != 0)
+    xx <- matrix(out$xx, n, n)
+    yy <- matrix(out$yy, n, n)
+    xx <- as.vector(xx[ok])
+    yy <- as.vector(yy[ok])
+  } else {
+    # new
+    storage.mode(x0) <- storage.mode(y0) <- "double"
+    storage.mode(dx) <- storage.mode(dy) <- "double"
+    storage.mode(eps) <- "double"
+    out <- .Call("CxysegXint",
+                 x0, 
+                 y0, 
+                 dx, 
+                 dy, 
+    	           eps,
+                 PACKAGE="spatstat")
+    xx <- out[[5L]]
+    yy <- out[[6L]]
+  }
+  result <- ppp(xx, yy, window=A$window, check=FALSE)
+  return(result)
+}
+
+
+test.selfcrossing.psp <- function(A) {
+  verifyclass(A, "psp")
+  eps <- .Machine$double.eps
+
+  n <- A$n
+  eA <- A$ends
+  x0 <- eA$x0
+  y0 <- eA$y0
+  dx <- eA$x1 - eA$x0
+  dy <- eA$y1 - eA$y0
+
+  out <- .C("xysxi",
+            na=as.integer(n),
+            x0=as.double(x0),
+            y0=as.double(y0),
+            dx=as.double(dx),
+            dy=as.double(dy), 
+            eps=as.double(eps),
+            ok=as.integer(integer(n*n)),
+            PACKAGE = "spatstat")
+  hit <- (matrix(out$ok, n, n) != 0)
+  return(hit)
+}
+
+selfcut.psp <- function(A, ..., eps) {
+  stopifnot(is.psp(A))
+#  n <- A$n
+  eA <- A$ends
+  x0 <- eA$x0
+  y0 <- eA$y0
+  dx <- eA$x1 - eA$x0
+  dy <- eA$y1 - eA$y0
+  if(missing(eps) || is.null(eps)) {
+    eps <- sqrt(.Machine$double.eps) * diameter(Frame(A))
+  } else {
+    check.1.real(eps)
+    stopifnot(eps >= 0)
+  }
+  ## identify self-crossings
+  eps <- .Machine$double.eps
+  storage.mode(x0) <- storage.mode(y0) <- "double"
+  storage.mode(dx) <- storage.mode(dy) <- "double"
+  storage.mode(eps) <- "double"
+  zz <- .Call("CxysegXint",
+              x0, 
+              y0, 
+              dx, 
+              dy, 
+              eps,
+              PACKAGE = "spatstat")
+  if(length(zz[[1]]) == 0)
+    return(A)
+  ##
+  names(zz) <- c("i", "j", "ti", "tj", "x", "y")
+  df <- as.data.frame(zz)
+  df$i <- df$i + 1L
+  df$j <- df$j + 1L
+  ##
+  gone <- with(df, unique(c(i,j)))
+  newends <- as.matrix(eA)
+  newends <- newends[-gone, , drop=FALSE]
+  newmarx <- marx <- marks(A)
+  if(mama <- !is.null(marx)) {
+    marx <- as.data.frame(marx)
+    newmarx <- marx[-gone, ,drop=FALSE]
+  }
+  ## cut each segment using the *provided* values of x,y
+  for(ii in gone) {
+    ## assemble cuts through segment ii
+    imatch <- with(df, which(i == ii))
+    jmatch <- with(df, which(j == ii))
+    df.i <- with(df,
+                 data.frame(t=c(ti[imatch], tj[jmatch]),
+                            x=x[c(imatch, jmatch)],
+                            y=y[c(imatch, jmatch)]))
+    # discard T-junctions
+    ok <- with(df.i, t > 0 & t < 1)
+    df.i <- df.i[ok, ,drop=FALSE]
+    # order the pieces
+    ord <- with(df.i, order(t))
+    df.i <- df.i[ord, , drop=FALSE]
+    ## add endpoints
+    xnew <- c(eA[ii,"x0"], df.i$x, eA[ii,"x1"])
+    ynew <- c(eA[ii,"y0"], df.i$y, eA[ii,"y1"])
+    m <- length(xnew)
+    newsegs <- cbind(xnew[-m], ynew[-m], xnew[-1], ynew[-1])
+    newends <- rbind(newends, newsegs)
+    if(mama)
+      newmarx <- rbind(newmarx, marx[rep(ii, m-1), , drop=FALSE])
+  }
+  Y <- as.psp(newends, window=Window(A), marks=newmarx)
+  if(eps > 0) {
+    ok <- (lengths.psp(Y) > eps)
+    if(any(!ok)) Y <- Y[ok]
+  }
+  return(Y)
+}
+  
diff --git a/R/psst.R b/R/psst.R
new file mode 100755
index 0000000..317eda6
--- /dev/null
+++ b/R/psst.R
@@ -0,0 +1,206 @@
+#
+#	psst.R
+#
+#	Computes the GNZ contrast of delta-f for any function f
+#
+#	$Revision: 1.9 $	$Date: 2015/07/11 08:19:26 $
+#
+################################################################################
+#
+
+psst <- function(object, fun, r=NULL, breaks=NULL, ...,
+                 model=NULL,
+                 trend=~1, interaction=Poisson(),
+                 rbord=reach(interaction),
+                 truecoef=NULL, hi.res=NULL,
+                 funargs=list(correction="best"),
+                 verbose=TRUE) {
+  if(inherits(object, "ppm")) {
+    fit <- object
+  } else if(is.ppp(object) || inherits(object, "quad")) {
+    if(is.ppp(object)) object <- quadscheme(object, ...)
+    if(!is.null(model)) {
+      fit <- update(model, Q=object, forcefit=TRUE)
+    } else {
+      fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord,
+                 forcefit=TRUE)
+    }
+  } else 
+    stop("object should be a fitted point process model or a point pattern")
+
+#  rfixed <- !is.null(r) || !is.null(breaks)
+  
+  # Extract data and quadrature points
+  Q <- quad.ppm(fit, drop=FALSE)
+  X <- data.ppm(fit)
+  U <- union.quad(Q)
+  Z <- is.data(Q) # indicator data/dummy
+#  E <- equalsfun.quad(Q)
+#  WQ <- w.quad(Q)  # quadrature weights
+
+  # integrals will be restricted to quadrature points
+  # that were actually used in the fit
+#  USED <- getglmsubset(fit)
+  if(fit$correction == "border") {
+    rbord <- fit$rbord
+    b <- bdist.points(U)
+    USED <- (b > rbord)
+  } else USED <- rep.int(TRUE, U$n)
+  
+  # basic statistics
+  Win <- Window(X)
+  npts <- npoints(X)
+  areaW <- area(Win)
+  lambda <- npts/areaW
+
+  # adjustments to account for restricted domain of pseudolikelihood
+#  if(any(!USED) && spatstat.options("eroded.intensity")) {
+#    XUSED <- USED[Z]
+#    npts.used <- sum(Z & USED)
+#    area.used <- sum(WQ[USED])
+#    lambda.used <- npts.used/area.used
+#  } else {
+#    XUSED <- rep.int(TRUE, npts)
+#    npts.used <- npts
+#    area.used <- areaW
+#    lambda.used <- lambda
+#  }
+  
+  #  determine breakpoints for r values
+  rmaxdefault <- rmax.rule("G", Win, lambda)
+  breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault)
+  rvals <- breaks$r
+  rmax  <- breaks$max
+  
+  # residuals
+  resid <- residuals(fit, type="raw",drop=FALSE,
+                    new.coef=truecoef, quad=hi.res)
+  rescts <- with(resid, "continuous")
+  # absolute weight for continuous integrals
+  wc   <- -rescts
+
+  # initialise fv object
+  df <- data.frame(r=rvals, theo=0)
+  desc <- c("distance argument r", "value 0 corresponding to perfect fit")
+  ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL),
+            "theo", . ~ r,
+            alim=c(0, rmax), c("r","%s[theo](r)"), desc,
+            fname="bold(R)~Delta~S")
+
+  # evaluate fun(X) for data
+  fX <- do.call(fun, append(list(X, r=rvals), funargs))
+  fXunits <- unitname(fX)
+  # Extract 'best' estimate only
+  fX <- with(fX, .y)
+  zero <- numeric(length(fX))
+  # sum over all quadrature points
+  iused <- seq(U$n)[USED]
+  nused <- length(iused)
+  if(verbose) cat(paste("\nProcessing", nused, "quadrature points..."))
+  # running sums & integrals
+  sumX <- zero
+  integ <- integ2 <- zero
+  # template for X \cup {u}
+  uX <- superimpose(U[1], X, W=Win, check=FALSE)
+  Ux <- U$x
+  Uy <- U$y
+  #
+  if(verbose) pstate <- list()
+  #
+  for(j in seq(nused)) {
+    i <- iused[j]
+    wi <- wc[i]
+    if(Z[i]) {
+      # data point
+      fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs))
+      fXi <- with(fXi, .y)
+      deltaf <- fX - fXi
+      sumX <- sumX + deltaf
+    } else {
+      # dummy point
+      uX$x[1] <- Ux[i]
+      uX$y[1] <- Uy[i]
+      fuX <- do.call(fun, append(list(uX, r=rvals), funargs))
+      fuX <- with(fuX, .y)
+      deltaf <- fuX - fX
+    }
+    integ <- integ + wi * deltaf
+    integ2 <- integ2 + wi * deltaf^2
+    # 
+    if(j %% 500 == 0) {
+      cat("[garbage ")
+      gc()
+      cat("collected]")
+    }
+    if(verbose) pstate <- progressreport(j, nused, state=pstate)
+  }
+
+  sdv <- sqrt(integ2)
+  res <- sumX - integ
+  ans <- bind.fv(ans,
+                 data.frame(dat=sumX,
+                            com=integ,
+                            var=integ2,
+                            sd=sdv,
+                            hi=2*sdv,
+                            lo=-2*sdv,
+                            res=res,
+                            stdres=res/sdv),
+                 c("Sigma~Delta~S(r)",
+                   "bold(C)~Delta~S(r)",
+                   "bold(C)^2~Delta~S(r)",
+                   "sqrt(bold(C)^2~Delta~S(r))",
+                   "%s[hi](r)",
+                   "%s[lo](r)",
+                   "bold(R)~Delta~S(r)",
+                   "bold(T)~Delta~S(r)"),
+               c("data pseudosum (contribution to %s)",
+                 "model compensator (contribution to %s)",
+                 "pseudovariance of %s",
+                 "sqrt(pseudovariance) of %s",
+                 "upper 2 sigma critical band for %s",
+                 "lower 2 sigma critical band for %s",
+                 "pseudoresidual function %s",
+                 "standardised pseudoresidual function %s"),
+               "res")
+
+  fvnames(ans,".") <- c("res", "hi", "lo", "theo")
+  unitname(ans) <- fXunits
+  # 
+  return(ans)
+}
+
+npfun <- function(X, ..., r) {
+  npts <- npoints(X)
+  # initialise fv object
+  df <- data.frame(r=r, theo=0, npoint=npts)
+  desc <- c("distance argument r",
+            "value 0",
+            "value equal to number of points")
+  ans <- fv(df, "r", substitute(npoints(r), NULL),
+            "npoint", . ~ r,
+            alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"),
+            desc, fname="npoints")
+  unitname(ans) <- unitname(X)
+  return(ans)
+}
+
+nndcumfun <- function(X, ..., r) {
+  nn <- nndist(X)
+  bk <- breakpts.from.r(r)
+#  nn <- nn[nn <= bdist.points(X)]
+  h <- whist(nn, bk$val)
+  # initialise fv object
+  df <- data.frame(r=r, theo=0, obs=h)
+  desc <- c("distance argument r",
+            "value 0",
+            "observed count")
+  ans <- fv(df, "r", substitute(nndcount(r), NULL),
+            "obs", . ~ r,
+            alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"),
+            desc, fname="nndcount")
+  unitname(ans) <- unitname(X)
+  return(ans)
+}
+
+  
diff --git a/R/psstA.R b/R/psstA.R
new file mode 100755
index 0000000..bba82c5
--- /dev/null
+++ b/R/psstA.R
@@ -0,0 +1,157 @@
+#
+#	psstA.R
+#
+#	Pseudoscore residual for unnormalised F (area-interaction)
+#
+#	$Revision: 1.7 $	$Date: 2014/11/11 02:31:44 $
+#
+################################################################################
+#
+
+psstA <- function(object, r=NULL, breaks=NULL, ...,
+                  model=NULL,
+                  trend=~1, interaction=Poisson(),
+                  rbord=reach(interaction), ppmcorrection="border",
+                  correction="all",
+                  truecoef=NULL, hi.res=NULL,
+                  nr=spatstat.options("psstA.nr"),
+                  ngrid=spatstat.options("psstA.ngrid")) {
+  if(inherits(object, "ppm")) 
+    fit <- object
+  else if(inherits(object, "ppp") || inherits(object, "quad")) {
+    # convert to quadscheme
+    if(inherits(object, "ppp"))
+      object <- quadscheme(object, ...)
+    # fit model
+    if(!is.null(model))
+      fit <- update(model, Q=object, forcefit=TRUE)
+    else if(ppmcorrection == "border")
+      fit <- ppm(object,
+                 trend=trend, interaction=interaction,
+                 rbord=rbord, forcefit=TRUE)
+    else
+      fit <- ppm(object,
+                 trend=trend, interaction=interaction,
+                 correction=ppmcorrection, forcefit=TRUE)
+  } else 
+    stop("object should be a fitted point process model or a point pattern")
+
+  rfixed <- !is.null(r) || !is.null(breaks)
+  
+  # Extract data and quadrature points
+  Q <- quad.ppm(fit, drop=FALSE)
+  X <- data.ppm(fit)
+  U <- union.quad(Q)
+  Z <- is.data(Q) # indicator data/dummy
+#  E <- equalsfun.quad(Q)
+#  WQ <- w.quad(Q)  # quadrature weights
+
+  # integrals will be restricted to quadrature points
+  # that were actually used in the fit
+#  USED <- getglmsubset(fit)
+  if(fit$correction == "border") {
+    rbord <- fit$rbord
+    b <- bdist.points(U)
+    USED <- (b > rbord)
+    bX <- bdist.points(X)
+    USEDX <- (bX > rbord)
+  } else {
+    USED <- rep.int(TRUE, U$n)
+    USEDX <- rep.int(TRUE, X$n)
+  }
+  
+  # basic statistics
+  Win <- Window(X)
+  npts <- npoints(X)
+  areaW <- area(Win)
+  lambda <- npts/areaW
+
+  #  determine breakpoints for r values
+  rmaxdefault <- rmax.rule("F", Win, lambda)
+  if(rfixed) 
+    breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault)
+  else {
+    # create fairly coarse 'r' values
+    r <- seq(0, rmaxdefault, length=nr)
+    breaks <- breakpts.from.r(r)
+  }
+  rvals <- breaks$r
+  rmax  <- breaks$max
+  
+  # residuals
+  res <- residuals(fit, type="raw", drop=FALSE,
+                    new.coef=truecoef, quad=hi.res)
+  # 
+  rescts <- with(res, "continuous")
+  # absolute weight for continuous integrals
+  wc   <- -rescts
+
+  # initialise fv object
+  df <- data.frame(r=rvals, theo=0)
+  desc <- c("distance argument r", "value 0 corresponding to perfect fit")
+  ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL),
+            "theo", . ~ r,
+            alim=c(0, rmax), c("r","%s[theo](r)"), desc,
+            fname="bold(R)~Delta~V[A]")
+
+  #
+  # for efficiency, compute the largest value of distance transform
+  Dmax <- 0
+  for(i in 1:npts) {
+    Di <- distmap(X[-i])
+    Dimax <- summary(Di)$max
+    Dmax <- max(Dmax, Dimax)
+  }
+  Rmax <- min(max(rvals), Dmax * 1.1)
+  nontrivial <- (rvals <= Rmax)
+  trivialzeroes <- numeric(sum(!nontrivial))
+  
+  # pseudosum
+  Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid)
+  C1 <- apply(Ax, 2, sum)
+  C1 <- c(C1, trivialzeroes)
+  # pseudocompensator
+  OK <- USED & !Z
+  Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid)
+  lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au))
+  C2 <- apply(lamu * Au, 2, sum)
+  C2 <- c(C2, trivialzeroes)
+  # pseudoscore residual
+  Ctot <- C1 - C2
+  # tack on
+  ans <- bind.fv(ans,
+                 data.frame(dat=C1,
+                            com=C2,
+                            res=Ctot),
+                 c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"),
+                 c("data pseudosum (contribution to %s)",
+                   "model pseudocompensator (contribution to %s)",
+                   "pseudoscore residual %s"),
+               "res")
+  #
+  # pseudovariance
+  #        (skipped if called by envelope() etc)
+  #
+  if(correction == "all") {
+    lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax))
+    Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum)
+    Var <- c(Var, trivialzeroes)
+    # two-sigma limits
+    TwoSig <- 2 * sqrt(Var)
+    # tack on
+    ans <- bind.fv(ans,
+                   data.frame(var=Var,
+                              up=TwoSig,
+                              lo=-TwoSig),
+                 c("bold(C)^2~Delta~V[A](r)",
+                   "%s[up](r)", "%s[lo](r)"),
+                 c("pseudovariance of %s",
+                   "upper 2sigma critical limit for %s",
+                   "lower 2sigma critical limit for %s"),
+               "res")
+    fvnames(ans, ".") <- c("res", "up", "lo", "theo")
+  }
+  unitname(ans) <- unitname(fit)
+  # 
+  return(ans)
+}
diff --git a/R/psstG.R b/R/psstG.R
new file mode 100755
index 0000000..1084633
--- /dev/null
+++ b/R/psstG.R
@@ -0,0 +1,184 @@
+#
+#	psstG.R
+#
+#	Pseudoscore residual for unnormalised G (saturation process)
+#
+#	$Revision: 1.9 $	$Date: 2015/10/21 09:06:57 $
+#
+################################################################################
+#
+
+psstG <- function(object, r=NULL, breaks=NULL, ...,
+                  model=NULL, 
+                  trend=~1, interaction=Poisson(),
+                  rbord=reach(interaction),
+                  truecoef=NULL, hi.res=NULL) {
+  if(inherits(object, "ppm")) 
+    fit <- object
+  else if(inherits(object, "ppp") || inherits(object, "quad")) {
+    # convert to quadscheme
+    if(inherits(object, "ppp"))
+      object <- quadscheme(object, ...)
+    # fit model
+    if(!is.null(model))
+      fit <- update(model, Q=object, forcefit=TRUE)
+    else 
+      fit <- ppm(object,
+                 trend=trend, interaction=interaction,
+                 rbord=rbord, forcefit=TRUE)
+  } else 
+    stop("object should be a fitted point process model or a point pattern")
+
+#  rfixed <- !is.null(r) || !is.null(breaks)
+  
+  # Extract data and quadrature points
+  Q <- quad.ppm(fit, drop=FALSE)
+  X <- data.ppm(fit)
+  U <- union.quad(Q)
+  Z <- is.data(Q) # indicator data/dummy
+  E <- equalsfun.quad(Q)
+#  WQ <- w.quad(Q)  # quadrature weights
+
+  # integrals will be restricted to quadrature points
+  # that were actually used in the fit
+#  USED <- getglmsubset(fit)
+  if(fit$correction == "border") {
+    rbord <- fit$rbord
+    b <- bdist.points(U)
+    USED <- (b > rbord)
+  } else USED <- rep.int(TRUE, U$n)
+  
+  # basic statistics
+  Win <- Window(X)
+  npts <- npoints(X)
+  areaW <- area(Win)
+  lambda <- npts/areaW
+
+  # adjustments to account for restricted domain of pseudolikelihood
+#  if(any(!USED)) {
+#    npts.used <- sum(Z & USED)
+#    area.used <- sum(WQ[USED])
+#    lambda.used <- npts.used/area.used
+#  } else {
+#    npts.used <- npts
+#    area.used <- areaW
+#    lambda.used <- lambda
+#  }
+  
+  #  determine breakpoints for r values
+  rmaxdefault <- rmax.rule("G", Win, lambda)
+  breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault)
+  rvals <- breaks$r
+  rmax  <- breaks$max
+  
+  # residuals
+  res <- residuals(fit, type="raw",drop=FALSE,
+                    new.coef=truecoef, quad=hi.res)
+#  resval <- with(res, "increment")
+  rescts <- with(res, "continuous")
+  # absolute weight for continuous integrals
+  wc   <- -rescts
+
+  # initialise fv object
+  df <- data.frame(r=rvals, theo=0)
+  desc <- c("distance argument r", "value 0 corresponding to perfect fit")
+  ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL),
+            "theo", . ~ r,
+            alim=c(0, rmax), c("r","%s[theo](r)"), desc,
+            fname="bold(R)~Delta~V[S]")
+
+  # First phase: .................................................
+  # nearest neighbours (quadrature point to data point)
+  nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs
+  dIJ <- nn$dist
+  I <- seq(U$n)
+  J <- nn$which
+  DD <- (I <= X$n)  # TRUE for data points
+  wcIJ <- wc
+  okI <- USED[I]
+
+  # histogram of nndist for data points only (without edge correction)
+  Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val))
+  # weighted histogram of nncross (without edge correction)
+  Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI]))
+  # residual
+  Bres <- Bsum - Bint
+  # tack on 
+  
+  ans <- bind.fv(ans,
+                 data.frame(dat1=Bsum,
+                            com1=Bint,
+                            res1=Bres),
+                 c("%s[dat1](r)",
+                   "%s[com1](r)",
+                   "%s[res1](r)"),
+                 c("phase 1 pseudosum (contribution to %s)",
+                   "phase 1 pseudocompensator (contribution to %s)",
+                   "phase 1 pseudoresidual (contribution to %s)"))
+  
+  # Second phase: ................................................
+  # close pairs (quadrature point to data point)
+  close <- crosspairs(U, X, rmax, what="ijd")
+  dIJ <- close$d
+  I   <- close$i
+  J   <- close$j
+#  UI <- U[I]
+#  XJ <- X[J]
+  EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] 
+  ZI <- Z[I]     # TRUE if U[I[k]] is a data point
+  DD <- ZI & !EIJ  # TRUE for pairs of distinct data points only
+#  nDD <- sum(DD)
+  okI <- USED[I]
+  
+  # residual weights
+#  wIJ <- ifelseXY(EIJ, rescts[I], resval[I])
+  # absolute weight for continuous integrals
+  wc   <- -rescts
+  wcIJ <- -rescts[I]
+  
+  # nearest and second-nearest neighbour distances in X
+  nn1 <- nndist(X)
+  nn2 <- nndist(X, k=2)
+  nn1J <- nn1[J]
+  nn2J <- nn2[J]
+  
+  # weird use of the reduced sample estimator
+  # data sum:
+  RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks)
+  Csum <- RSX$numerator
+  # integral:
+  if(spatstat.options("psstG.remove.zeroes"))
+    okE <- okI & !EIJ
+  else
+    okE <- okI
+  RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE],
+                  nn1, rep.int(1, length(nn1)), breaks)
+  Cint <- RSD$numerator
+  #
+  Cres <- Bres + Csum - Cint
+  # tack on 
+  ans <- bind.fv(ans,
+                 data.frame(dat2=Csum,
+                            com2=Cint,
+                            res2=Cres,
+                            dat=Bsum+Csum,
+                            com=Bint+Cint,
+                            res=Bres+Cres),
+                 c("%s[dat2](r)",
+                   "%s[com2](r)",
+                   "%s[res2](r)",
+                   "Sigma~Delta~V[S](r)",
+                   "bold(C)~Delta~V[S](r)",
+                   "bold(R)~Delta~V[S](r)"),
+                 c("phase 2 pseudosum (contribution to %s)",
+                   "phase 2 pseudocompensator (contribution to %s)",
+                   "phase 2 pseudoresidual (contribution to %s)",
+                   "pseudosum (contribution to %s)",
+                   "pseudocompensator (contribution to %s)",
+                   "pseudoresidual function %s"),
+                 "res")
+  # restrict choice of curves in default plot
+  fvnames(ans, ".") <- c("dat", "com", "res", "theo")
+  # 
+  return(ans)
+}
diff --git a/R/qqplotppm.R b/R/qqplotppm.R
new file mode 100755
index 0000000..b6e98c9
--- /dev/null
+++ b/R/qqplotppm.R
@@ -0,0 +1,333 @@
+#
+#    QQ plot of smoothed residual field against model
+#
+#  qqplot.ppm()       QQ plot (including simulation)
+#
+#  $Revision: 1.30 $   $Date: 2016/04/25 02:34:40 $
+#
+
+qqplot.ppm <- local({
+
+  ## How to refit the model
+  refit <- function(fit, pattern) {
+    update.ppm(fit, Q=pattern, use.internal=(fit$method != "mppm"))
+  }
+  
+  ## how to compute the residual field
+  residualfield <- function(fit, ...) {
+    d <- diagnose.ppm(fit, which="smooth",
+                      plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE,
+                      check=FALSE, ...)
+    return(d$smooth$Z$v)
+  }
+
+  qqplot.ppm <-
+    function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean",
+             fast=TRUE, verbose=TRUE, plot.it=TRUE,
+             dimyx=NULL, nrep=if(fast) 5e4 else 1e5,
+             control=update(default.rmhcontrol(fit), nrep=nrep),
+             saveall=FALSE,
+             monochrome=FALSE,
+             limcol=if(monochrome) "black" else "red",
+             maxerr=max(100, ceiling(nsim/10)),
+             check=TRUE, repair=TRUE, envir.expr) {
+    verifyclass(fit, "ppm")
+
+    if(check && damaged.ppm(fit)) {
+      if(!repair)
+        stop("object format corrupted; try update(fit, use.internal=TRUE)")
+      message("object format corrupted; repairing it.")
+      fit <- update(fit, use.internal=TRUE)
+    }
+  
+    if(fast) {
+      oldnpixel <- spatstat.options("npixel")
+      if(is.null(dimyx)) 
+        dimyx <- pmin(40, rev(oldnpixel))
+      spatstat.options(npixel=rev(dimyx))
+    } 
+    
+    ################   How to evaluate residuals ##########################
+  
+    ## Quantiles of the residual field will be computed.
+
+    ## Data values
+    dat <- residualfield(fit, type=type, ..., dimyx=dimyx)
+
+    ##################  How to perform simulations?  #######################
+
+    ## envir.call <- sys.parent()
+    envir.here <- sys.frame(sys.nframe())
+
+    ## extract.from.list <- FALSE
+    inext <- 0 # to placate package checker
+    dont.complain.about(inext)
+    
+    if(is.null(expr)) {
+      ## We will simulate from the fitted model 'nsim' times
+      ## and refit the model to these simulations
+      simsource <- "fit"
+      how.simulating <- "simulating from fitted model" 
+
+      ## prepare rmh arguments
+      rcontrol <- rmhcontrol(control)
+      rmodel   <- rmhmodel(fit,
+                           control=rcontrol, project=FALSE, verbose=verbose)
+      rstart   <- rmhstart(n.start=data.ppm(fit)$n)
+      ## pre-digest arguments
+      rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE)
+    
+      ## expression to be evaluated each time
+      expr <- expression(
+        refit(fit, 
+              rmhEngine(rmhinfolist, verbose=FALSE)))
+      envir.expr <- envir.here
+
+      ## pacify code checkers
+      dont.complain.about(rmhinfolist)
+    } else if(is.expression(expr)) {
+      simsource <- "expr"
+      how.simulating <- paste("evaluating", sQuote("expr"))  
+      if(missing(envir.expr) || is.null(envir.expr))
+        envir.expr <- parent.frame()
+    } else if(inherits(expr, "envelope")) {
+      simpat <- attr(expr, "simpatterns")
+      if(!is.null(simpat) && all(sapply(simpat, is.ppp))) {
+        expr <- expression(simpat[[inext]])
+        envir.expr <- envir.here
+        dont.complain.about(simpat)
+        simsource <- "list"
+        how.simulating <- "extracting point pattern from list"
+      } else stop(paste("Argument", sQuote("expr"),
+                        "is an envelope object,",
+                        "but does not contain point patterns"),
+                  call.=FALSE)
+    } else if(is.list(expr) && all(sapply(expr, is.ppp))) {
+      simpat <- expr
+      expr <- expression(simpat[[inext]])
+      envir.expr <- envir.here
+      dont.complain.about(simpat)
+      simsource <- "list"
+      how.simulating <- "extracting point pattern from list"
+    } else stop(paste(sQuote("expr"),
+                      "should be an expression, or an envelope object,",
+                      "or a list of point patterns"),
+                call.=FALSE)
+
+    exprstring <- if(simsource == "expr") deparse(expr) else NULL
+
+    ######  Perform simulations
+    if(verbose) {
+      cat(paste("Simulating", nsim, "realisations... "))
+      pstate <- list()
+    }
+    simul.sizes <- numeric(nsim)
+    isim <- 0
+    ierr <- 0
+    repeat {
+      inext <- isim + 1
+      ## protect from randomly-generated crashes in gam
+      ei <- try(eval(expr, envir=envir.expr), silent=!verbose)
+      if(inherits(ei, "try-error")) {
+        ## error encountered in evaluating 'expr'
+        ierr <- ierr + 1
+        if(ierr > maxerr) 
+          stop(paste("Exceeded maximum of", maxerr,
+                     "failures in", how.simulating,
+                     "after generating only", isim, "realisations"))
+        else break
+      } else {
+        ## simulation successful
+        isim <- isim + 1
+        fiti <- 
+          if(simsource == "fit")
+            ei
+          else if(is.ppm(ei))
+            ei
+          else if(is.ppp(ei))
+            refit(fit, ei)
+          else
+            stop("result of eval(expr) is not a ppm or ppp object")
+        ## diagnostic info
+        simul.sizes[isim] <- data.ppm(fiti)$n
+        ## compute residual field
+        resi <- residualfield(fiti, type=type, ..., dimyx=dimyx)
+        if(isim == 1)
+          sim <- array(, dim=c(dim(resi), nsim))
+        sim[,,isim] <- resi
+        if(verbose) 
+          pstate <- progressreport(isim, nsim, state=pstate)
+        if(isim >= nsim)
+          break
+      }
+    }
+
+    ###### Report diagnostics
+    if(ierr > 0)
+      cat(paste("\n\n**Alert:",
+                ierr, "failures occurred in", how.simulating, "\n\n"))
+    nempty <- sum(simul.sizes == 0)
+    if(nempty > 0)
+      cat(paste("\n\n**Alert:",
+                nempty, "out of", nsim,
+                "simulated patterns were empty.\n\n"))
+    else
+      cat(paste("\nDiagnostic info:\n",
+                "simulated patterns contained an average of",
+                mean(simul.sizes), "points.\n"))
+    if(nempty == nsim)
+      warning("All simulated patterns were empty")
+    ############ Plot them
+    switch(style,
+           classical = {
+             rr <- range(c(dat,sim))
+             result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0,
+                              xlab="Quantiles of simulation",
+                              ylab="Quantiles of data",plot.it=plot.it)
+             title(sub=paste("Residuals:", type))
+             abline(0,1, lty=2)
+             result <- append(result,
+                              list(data=dat,
+                                   sim=sim,
+                                   xlim=rr,
+                                   ylim=rr,
+                                   xlab="Quantiles of simulation",
+                                   ylab="Quantiles of data",
+                                   rtype=type,
+                                   nsim=nsim,
+                                   fit=fit,
+                                   expr=exprstring,
+                                   simsource = simsource
+                                   )
+                              )
+           },
+           mean = {
+             ## compute quantiles corresponding to probabilities p[i]
+             ## separately in each realisation.
+             if(verbose) cat("Calculating quantiles...")
+             if(fast) {
+               p <- ppoints(min(100,length(dat)), 3/8)
+               qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE)
+             } else {
+               qsim <- apply(sim, 3, sort, na.last=TRUE)
+             }
+             if(verbose) cat("averaging...")
+             ## sample mean of each quantile
+             meanq <- apply(qsim, 1, mean, na.rm=TRUE)
+             ## et cetera
+             varq <- apply(qsim, 1, var, na.rm=TRUE)
+             sdq <- sqrt(varq)
+             q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE)
+             q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE)
+  
+             rr <- range(c(meanq,dat), na.rm=TRUE)
+
+             dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else
+                              sort(dat, na.last=TRUE)
+
+             if(verbose) cat("..Done.\n")
+             if(plot.it) {
+               plot(meanq, dats,
+                    xlab="Mean quantile of simulations", ylab="data quantile",
+                    xlim=rr, ylim=rr, asp=1.0)
+               abline(0,1)
+               lines(meanq, q.025, lty=2, col=limcol)
+               lines(meanq, q.975, lty=2, col=limcol)
+               title(sub=paste("Residuals:", type))
+             }
+             result <- list(x=meanq, y=dats, sdq=sdq,
+                            q.025=q.025, q.975=q.975,
+                            data=dat, sim=sim,
+                            xlim=rr, ylim=rr,
+                            xlab="Mean quantile of simulations",
+                            ylab="data quantile",
+                            rtype=type,
+                            nsim=nsim,
+                            fit=fit,
+                            expr=exprstring,
+                            simsource=simsource)
+           },
+           stop(paste("Unrecognised option for", sQuote("style")))
+           )
+
+    ## Throw out baggage if not wanted         
+    if(!saveall) {
+      result$fit <- summary(fit, quick=TRUE)
+      result$sim <- NULL
+    }
+         
+    ## reset npixel
+    if(fast)
+      spatstat.options(npixel=oldnpixel)
+    ##
+    class(result) <- c("qqppm", class(result))
+    return(invisible(result))
+  }
+
+  qqplot.ppm
+
+})
+
+
+plot.qqppm <- local({
+
+  plot.qqppm <- function(x, ..., limits=TRUE,
+                         monochrome=spatstat.options('monochrome'),
+                         limcol=if(monochrome) "black" else "red") {
+    stopifnot(inherits(x, "qqppm"))
+    default.type <- if(length(x$x) > 150) "l" else "p"
+    do.call(myplot,
+            resolve.defaults(list(x, ..., type=default.type,
+                                  limits=limits, limcol=limcol)))
+    return(invisible(x))
+  }
+
+  myplot <- function(object,
+                     xlab = object$xlab, ylab = object$ylab,
+                     xlim = object$xlim, ylim = object$ylim,
+                     asp = 1,
+                     type = default.type,
+                     ..., limits=TRUE, limcol="red") {
+    plot(object$x, object$y, xlab = xlab, ylab = ylab,
+         xlim = xlim, ylim = ylim, asp = asp, type = type, ...)
+    abline(0, 1)
+    
+    if(limits) {
+      if(!is.null(object$q.025))
+        lines(object$x, object$q.025, lty = 2, col=limcol)
+      if(!is.null(object$q.975))
+        lines(object$x, object$q.975, lty = 2, col=limcol)
+    }
+    title(sub=paste("Residuals:", object$rtype))
+  }
+
+  plot.qqppm
+})
+
+
+print.qqppm <- function(x, ...) {
+  stopifnot(inherits(x, "qqppm"))
+  splat("Q-Q plot of point process residuals",
+        "of type", sQuote(x$rtype), "\n",
+        "based on", x$nsim, "simulations")
+  simsource <- x$simsource
+  if(is.null(simsource)) # old version
+    simsource <- if(x$simulate.from.fit) "fit" else "expr"
+  switch(simsource,
+         fit = {
+           fit  <- x$fit
+           sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE)
+                     else if(inherits(fit, "summary.ppm")) fit
+                     else list(name="(unrecognised format)")
+           splat("\nSimulations from fitted model:", sumfit$name)
+         },
+         expr = {
+           splat("Simulations obtained by evaluating the following expression:")
+           print(x$expr)
+         },
+         list = {
+           splat("Simulated point patterns were provided in a list")
+         })
+  invisible(NULL)
+}
+
diff --git a/R/quadclass.R b/R/quadclass.R
new file mode 100755
index 0000000..3dc1d5c
--- /dev/null
+++ b/R/quadclass.R
@@ -0,0 +1,318 @@
+#
+#	quadclass.S
+#
+#	Class 'quad' to define quadrature schemes
+#	in (rectangular) windows in two dimensions.
+#
+#	$Revision: 4.26 $	$Date: 2016/02/16 01:39:12 $
+#
+# An object of class 'quad' contains the following entries:
+#
+#	$data:	an object of class 'ppp'
+#		defining the OBSERVATION window, 
+#		giving the locations (& marks) of the data points.
+#
+#	$dummy:	object of class 'ppp'
+#		defining the QUADRATURE window, 
+#		giving the locations (& marks) of the dummy points.
+#	
+#	$w: 	vector giving the nonnegative weights for the
+#		data and dummy points (data first, followed by dummy)
+#
+#		w may also have an attribute attr(w, "zeroes")
+#               equivalent to (w == 0). If this is absent
+#               then all points are known to have positive weights.
+#
+#       $param:
+#               parameters that were used to compute the weights
+#               and possibly to create the dummy points (see below).
+#              
+#       The combined (data+dummy) vectors of x, y coordinates of the points, 
+#       and their weights, are extracted using standard functions 
+#       x.quad(), y.quad(), w.quad() etc.
+#
+# ----------------------------------------------------------------------
+#  Note about parameters:
+#
+#       If the quadrature scheme was created by quadscheme(),
+#       then $param contains
+#
+#           $param$weight
+#                list containing the values of all parameters
+#                actually used to compute the weights.
+#
+#           $param$dummy
+#                list containing the values of all parameters
+#                actually used to construct the dummy pattern
+#                via default.dummy();
+#                or NULL if the dummy pattern was provided externally
+#
+#           $param$sourceid
+#                vector mapping the quadrature points to the
+#                original data and dummy points.
+#
+#   If you constructed the quadrature scheme manually, this
+#   structure may not be present.
+#
+#-------------------------------------------------------------
+
+quad <- function(data, dummy, w, param=NULL) {
+  
+  data <- as.ppp(data)
+  dummy <- as.ppp(dummy)
+
+  n <- data$n + dummy$n
+	
+  if(missing(w))
+    w <- rep.int(1, n)
+  else {
+    w <- as.vector(w)
+    if(length(w) != n)
+      stop("length of weights vector w is not equal to total number of points")
+  }
+
+  if(is.null(attr(w, "zeroes")) && any( w == 0))
+	attr(w, "zeroes") <- (w == 0)
+
+  Q <- list(data=data, dummy=dummy, w=w, param=param)
+  class(Q) <- "quad"
+
+  invisible(Q)
+}
+
+# ------------------ extractor functions ----------------------
+
+x.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  c(Q$data$x, Q$dummy$x)
+}
+
+y.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  c(Q$data$y, Q$dummy$y)
+}
+
+w.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  Q$w
+}
+
+param.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  Q$param
+}
+ 
+n.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  Q$data$n + Q$dummy$n
+}
+
+marks.quad <- function(x, dfok=FALSE, ...) {
+  verifyclass(x, "quad")
+  dat <- x$data
+  dum <- x$dummy
+  if(dfok) warning("ignored dfok = TRUE; not implemented")
+  mdat <- marks(dat, dfok=FALSE, ...)
+  mdum <- marks(dum, dfok=FALSE, ...)
+  if(is.null(mdat) && is.null(mdum))
+    return(NULL)
+  if(is.null(mdat))
+    mdat <- rep.int(NA_integer_, dat$n)
+  if(is.null(mdum))
+    mdum <- rep.int(NA_integer_, dum$n)
+  if(is.factor(mdat) && is.factor(mdum)) {
+    mall <- cat.factor(mdat, mdum)
+  } else mall <- c(mdat, mdum)
+  return(mall)
+}
+
+is.marked.quad <- function(X, na.action="warn", ...) {
+  marx <- marks(X, ...)
+  if(is.null(marx))
+    return(FALSE)
+  if(anyNA(marx))
+    switch(na.action,
+           warn = {
+             warning(paste("some mark values are NA in the point pattern",
+                           short.deparse(substitute(X))))
+           },
+           fatal = {
+             return(FALSE)
+           },
+           ignore = {}
+           )
+  return(TRUE)
+}
+
+is.multitype.quad <- function(X, na.action="warn", ...) {
+  marx <- marks(X, ...)
+  if(is.null(marx))
+    return(FALSE)
+  if(anyNA(marx))
+    switch(na.action,
+           warn = {
+             warning(paste("some mark values are NA in the point pattern",
+                           short.deparse(substitute(X))))
+           },
+           fatal = {
+             return(FALSE)
+           },
+           ignore = {}
+           )
+  return(!is.data.frame(marx) && is.factor(marx))
+}
+
+is.data <- function(Q) {
+  verifyclass(Q, "quad")
+  return(c(rep.int(TRUE, Q$data$n),
+	   rep.int(FALSE, Q$dummy$n)))
+}
+
+equals.quad <- function(Q) {
+    # return matrix E such that E[i,j] = (X[i] == U[j])
+    # where X = Q$data and U = union.quad(Q)
+    n <- Q$data$n
+    m <- Q$dummy$n
+    E <- matrix(FALSE, nrow=n, ncol=n+m)
+    diag(E) <- TRUE
+    E
+}
+
+equalsfun.quad <- function(Q) {
+  stopifnot(inherits(Q, "quad"))
+  return(function(i,j) { i == j })
+}
+
+equalpairs.quad <- function(Q) {
+  # return two-column matrix E such that
+  #     X[E[i,1]] == U[E[i,2]] for all i
+  # where X = Q$data and U = union.quad(Q)
+  n <- Q$data$n
+  return(matrix(rep.int(seq_len(n),2), ncol=2))
+}
+      
+union.quad <- function(Q) {
+  verifyclass(Q, "quad")
+  ppp(x= c(Q$data$x, Q$dummy$x),
+      y= c(Q$data$y, Q$dummy$y),
+      window=Q$dummy$window,
+      marks=marks.quad(Q),
+      check=FALSE)
+}
+	
+#
+#   Plot a quadrature scheme
+#
+#
+plot.quad <- function(x, ..., main, add=FALSE, dum=list(), tiles=FALSE) {
+  if(missing(main) || is.null(main)) 
+    main <- short.deparse(substitute(x))
+  verifyclass(x, "quad")
+  data <- x$data
+  dummy <- x$dummy
+  # determine plot parameters for dummy points
+  dum <- resolve.defaults(dum, list(pch=".", add=TRUE))
+  tt <- NULL
+  if(tiles) {
+    # show tiles that determined the weights
+    wp <- x$param$weight
+    tt <- NULL
+    if(is.null(wp) || is.null(wp$method)) {
+      warning("Tile information is not available")
+    } else {
+      switch(wp$method,
+             grid = {
+               ntile <- wp$ntile
+               tt <- quadrats(as.owin(x), ntile[1], ntile[2])
+             },
+             dirichlet = {
+               U <- union.quad(x)
+               if(wp$exact) {
+                 tt <- dirichlet(U)
+               } else {
+                 win <- as.mask(as.owin(U))
+                 tileid <- image(exactdt(U)$i,
+                                 win$xcol, win$yrow, win$xrange, win$yrange)
+                 tt <- tess(image=tileid[win, drop=FALSE])
+               }
+             },
+             warning("Unrecognised 'method' for tile weights")
+             )
+    }
+  }
+  pixeltiles <- !is.null(tt) && tt$type == "image"
+  tileargs <- resolve.defaults(list(x=tt, main=main, add=add),
+                               list(...),
+                               if(!pixeltiles) list(col="grey") else NULL)
+  if(!is.marked(data)) {
+    if(!is.null(tt)) {
+      do.call(plot, tileargs)
+      add <- TRUE
+    }
+    plot(data, main=main, add=add, ...)
+    do.call(plot, append(list(x=dummy), dum))
+  } else if(is.multitype(data) && !add) {
+    oldpar <- par(ask = interactive() &&
+                  (.Device %in% c("X11", "GTK", "windows", "Macintosh")))
+    on.exit(par(oldpar))
+    data.marks <- marks(data)
+    dummy.marks <- marks(dummy)
+    types <- levels(data.marks)
+    for(k in types) {
+      add <- FALSE
+      if(!is.null(tt)) {
+        do.call(plot, tileargs)
+        add <- TRUE
+      }
+      maink <- paste(main, "\n mark = ", k, sep="")
+      plot(unmark(data[data.marks == k]), main=maink, add=add, ...)
+      do.call(plot, append(list(x=unmark(dummy[dummy.marks == k])),
+                             dum))
+    }
+  } else {
+    if(!is.null(tt)) {
+      do.call(plot, tileargs)
+      add <- TRUE
+    }
+    plot(data, ..., main=main, add=add)
+    do.call(plot, append(list(x=dummy), dum))
+  }
+  invisible(NULL)
+}
+
+# subset operator
+
+"[.quad" <- function(x, ...) {
+  U <- union.quad(x)
+  Z <- is.data(x)
+  w <- w.quad(x)
+  # determine serial numbers of points to be included
+  V <- U %mark% seq_len(U$n)
+  i <- marks(V[...])
+  # extract corresponding subsets of vectors
+  Z <- Z[i]
+  w <- w[i]
+  # take subset of points, using any type of subset index
+  U <- U[...]
+  # stick together
+  quad(U[Z], U[!Z], w)
+}
+
+domain.quad <- Window.quad <- function(X, ...) { as.owin(X) }
+
+"Window<-.quad" <- function(X, ..., value) {
+  verifyclass(value, "owin")
+  return(X[value])
+}
+
+unitname.quad <- function(x) {
+  return(unitname(x$data))
+}
+
+"unitname<-.quad" <- function(x, value) {
+  unitname(x$data) <- value
+  unitname(x$dummy) <- value
+  return(x)
+}
+
+
diff --git a/R/quadratcount.R b/R/quadratcount.R
new file mode 100755
index 0000000..b0bc802
--- /dev/null
+++ b/R/quadratcount.R
@@ -0,0 +1,213 @@
+#
+#  quadratcount.R
+#
+#  $Revision: 1.57 $  $Date: 2016/08/15 03:05:15 $
+#
+
+quadratcount <- function(X, ...) {
+  UseMethod("quadratcount")
+}
+
+quadratcount.splitppp <- function(X, ...) {
+  solapply(X, quadratcount, ...)
+}
+
+quadratcount.ppp <- function(X, nx=5, ny=nx, ...,
+                             xbreaks=NULL, ybreaks=NULL,
+                             tess=NULL)  {
+  verifyclass(X, "ppp")
+  W <- X$window
+
+  if(is.null(tess)) {
+    # rectangular boundaries 
+    if(!is.numeric(nx))
+      stop("nx should be numeric")
+    # start with rectangular tessellation
+    tess <- quadrats(as.rectangle(W),
+                     nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks)
+    # fast code for counting points in rectangular grid
+    Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid)
+    #
+    if(W$type != "rectangle") {
+      # intersections of rectangles with window including empty intersections
+      tess <- quadrats(X,
+                       nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks,
+                       keepempty=TRUE)
+      # now delete the empty quadrats and the corresponding counts
+      nonempty <- !tiles.empty(tess)
+#     WAS: nonempty <- !unlist(lapply(tiles(tess), is.empty))
+      if(!any(nonempty))
+        stop("All tiles are empty")
+      if(!all(nonempty)) {
+#        ntiles <- sum(nonempty)
+        tess   <- tess[nonempty]
+        Xcount <- t(Xcount)[nonempty]
+        # matrices and tables are in row-major order,
+        # tiles in a rectangular tessellation are in column-major order
+        Xcount <- array(Xcount,
+                        dimnames=list(tile=tilenames(tess)))
+        class(Xcount) <- "table"
+      }
+    }
+  } else {
+    # user-supplied tessellation
+    if(!inherits(tess, "tess")) {
+      tess <- try(as.tess(tess), silent=TRUE)
+      if(inherits(tess, "try-error"))
+        stop("The argument tess should be a tessellation", call.=FALSE)
+    }
+    if(tess$type == "rect") {
+      # fast code for counting points in rectangular grid
+      Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid)
+    } else {
+      # quadrats are another type of tessellation
+      Y <- cut(X, tess)
+      if(anyNA(marks(Y)))
+        warning("Tessellation does not contain all the points of X")
+      Xcount <- table(tile=marks(Y))
+    }
+  }
+  attr(Xcount, "tess") <- tess
+  class(Xcount) <- c("quadratcount", class(Xcount))
+  return(Xcount)
+}
+
+plot.quadratcount <- function(x, ...,
+                              add=FALSE, entries=as.vector(t(as.table(x))),
+                              dx=0, dy=0, show.tiles=TRUE,
+                              textargs = list()) {
+  xname <- short.deparse(substitute(x))
+  tess <- attr(x, "tess")
+  # add=FALSE, show.tiles=TRUE  => plot tiles + numbers
+  # add=FALSE, show.tiles=FALSE => plot window (add=FALSE) + numbers
+  # add=TRUE,  show.tiles=TRUE  => plot tiles  (add=TRUE) + numbers
+  # add=TRUE,  show.tiles=FALSE => plot numbers
+  if(show.tiles || !add) {
+    context <- if(show.tiles) tess else as.owin(tess)
+    do.call(plot,
+            resolve.defaults(list(context, add=add),
+                             list(...),
+                             list(main=xname),
+                             .StripNull=TRUE))
+  }
+  if(!is.null(entries)) {
+    labels <- paste(as.vector(entries))
+    til <- tiles(tess)
+    incircles <- lapply(til, incircle)
+    x0 <- sapply(incircles, getElement, name="x")
+    y0 <- sapply(incircles, getElement, name="y")
+    ra <- sapply(incircles, getElement, name="r")
+    do.call.matched(text.default,
+                    resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra),
+                                     list(labels=labels),
+                                     textargs, 
+                                     list(...)),
+                    funargs=graphicsPars("text"))
+  }
+  return(invisible(NULL))
+}
+
+rectquadrat.breaks <- function(xr, yr, nx=5, ny=nx, xbreaks=NULL, ybreaks=NULL) {
+  if(is.null(xbreaks))
+    xbreaks <- seq(from=xr[1], to=xr[2], length.out=nx+1)
+  else if(min(xbreaks) > xr[1] || max(xbreaks) < xr[2])
+    stop("xbreaks do not span the range of x coordinates in the window")
+  if(is.null(ybreaks))
+    ybreaks <- seq(from=yr[1], to=yr[2], length.out=ny+1)
+  else if(min(ybreaks) > yr[1] || max(ybreaks) < yr[2])
+    stop("ybreaks do not span the range of y coordinates in the window")
+  return(list(xbreaks=xbreaks, ybreaks=ybreaks))
+}
+
+rectquadrat.countEngine <- function(x, y, xbreaks, ybreaks, weights) {
+  if(length(x) > 0) {
+    # check validity of breaks
+    if(!all(inside.range(range(x), range(xbreaks))))
+      stop("xbreaks do not span the actual range of x coordinates in data")
+    if(!all(inside.range(range(y), range(ybreaks))))
+      stop("ybreaks do not span the actual range of y coordinates in data")
+  }
+  # WAS: 
+  # xg <- cut(x, breaks=xbreaks, include.lowest=TRUE)
+  # yg <- cut(y, breaks=ybreaks, include.lowest=TRUE)
+  xg <- fastFindInterval(x, xbreaks, labels=TRUE)
+  yg <- fastFindInterval(y, ybreaks, labels=TRUE)
+  if(missing(weights)) {
+    sumz <- table(list(y=yg, x=xg))
+  } else {
+    # was: 
+    # sumz <- tapply(weights, list(y=yg, x=xg), sum)
+    # if(any(nbg <- is.na(sumz)))
+    #  sumz[nbg] <- 0
+    sumz <- tapplysum(weights, list(y=yg, x=xg), do.names=TRUE)
+  }
+  # reverse order of y 
+  sumz <- sumz[rev(seq_len(nrow(sumz))), ]
+  sumz <- as.table(sumz)
+  #
+  attr(sumz, "xbreaks") <- xbreaks
+  attr(sumz, "ybreaks") <- ybreaks
+  return(sumz)
+}
+
+quadrats <- function(X, nx=5, ny=nx, xbreaks = NULL, ybreaks = NULL,
+                     keepempty=FALSE) {
+  W <- as.owin(X)
+  xr <- W$xrange
+  yr <- W$yrange
+  b <- rectquadrat.breaks(xr, yr, nx, ny, xbreaks, ybreaks)
+  # rectangular tiles
+  Z <- tess(xgrid=b$xbreaks, ygrid=b$ybreaks, unitname=unitname(W))
+  if(W$type != "rectangle") {
+    # intersect rectangular tiles with window W
+    if(!keepempty) {
+      Z <- intersect.tess(Z, W)
+    } else {
+      til <- tiles(Z)
+      for(i in seq_along(til))
+        til[[i]] <- intersect.owin(til[[i]], W)
+      Z <- tess(tiles=til, window=W, keepempty=TRUE)
+    }
+  }
+  return(Z)
+}
+
+as.tess.quadratcount <- function(X) {
+  return(attr(X, "tess"))
+}
+
+as.owin.quadratcount <- function(W, ..., fatal=TRUE) {
+  return(as.owin(as.tess(W), ..., fatal=fatal))
+}
+
+domain.quadratcount <- Window.quadratcount <- function(X, ...) { as.owin(X) }
+
+intensity.quadratcount <- function(X, ..., image=FALSE) {
+  Y <- as.tess(X)
+  a <- tile.areas(Y)
+  ## in the rectangular case, tiles are indexed in column-major order
+  if(Y$type == "rect" && length(dim(X)) > 1) 
+    a <- matrix(a, byrow=TRUE, nrow(X), ncol(X))
+  lambda <- X/a
+  if(!image) {
+    trap.extra.arguments(...)
+    class(lambda) <- "table"
+    attr(lambda, "tess") <- NULL
+    return(lambda)
+  }
+  ## again to handle rectangular case
+  lambda <- as.vector(t(lambda))
+  tileid <- as.im(Y, ...)
+  result <- eval.im(lambda[tileid])
+  return(result)
+}
+
+## The shift method is undocumented.
+## It is only needed in plot.listof / plot.solist / plot.layered
+
+shift.quadratcount <- function(X, ...) {
+  attr(X, "tess") <- te <- shift(attr(X, "tess"), ...)
+  attr(X, "lastshift") <- getlastshift(te)
+  return(X)
+}
+
diff --git a/R/quadratmtest.R b/R/quadratmtest.R
new file mode 100755
index 0000000..526e879
--- /dev/null
+++ b/R/quadratmtest.R
@@ -0,0 +1,17 @@
+#
+#   method for 'quadrat.test' for class mppm
+#
+#   $Revision: 1.8 $   $Date: 2015/08/12 07:29:17 $
+#
+quadrat.test.mppm <- function(X, ...) {
+  Xname <- short.deparse(substitute(X))
+  if(!is.poisson.mppm(X))
+    stop("Model is not a Poisson point process")
+  
+  subs <- subfits(X)
+  tests <- anylapply(subs, quadrat.test.ppm, ..., fitname=Xname)
+
+  df.est <- length(coef(X))
+  return(pool.quadrattest(tests, Xname=Xname, df.est=df.est))
+}
+
diff --git a/R/quadratresample.R b/R/quadratresample.R
new file mode 100755
index 0000000..a7e8cb7
--- /dev/null
+++ b/R/quadratresample.R
@@ -0,0 +1,45 @@
+#
+# quadratresample.R
+#
+# resample a point pattern by resampling quadrats
+#
+# $Revision: 1.7 $  $Date: 2015/10/21 09:06:57 $
+#
+
+quadratresample <- function(X, nx, ny=nx, ...,
+                            replace=FALSE, nsamples=1,
+                            verbose=(nsamples > 1)) {
+  stopifnot(is.ppp(X))
+  if(X$window$type != "rectangle")
+    stop("Resampling is only implemented for rectangular windows")
+  # create tessellation
+  A <- quadrats(X, nx=nx, ny=ny)
+  # split data over tessellation
+  B <- split(X, A)
+  nq <- length(B)
+  # determine bottom left corner of each tile
+  V <- lapply(B, framebottomleft)
+  out <- list()
+  if(verbose) {
+    cat("Generating resampled patterns...")
+    pstate <- list()
+  }
+  for(i in 1:nsamples) {
+    # resample tiles
+    ind <- sample(1:nq, nq, replace=replace)
+    Xresampled <- X
+    Bresampled <- B
+    for(j in 1:nq) {
+      k <- ind[j]
+      Bresampled[[j]] <- shift(B[[k]], unlist(V[[j]]) - unlist(V[[k]]))
+    }
+    split(Xresampled, A) <- Bresampled
+    out[[i]] <- Xresampled
+    if(verbose)
+      pstate <- progressreport(i, nsamples, state=pstate)
+  }
+  if(nsamples == 1)
+    return(out[[1]])
+  return(as.solist(out))
+}
+
diff --git a/R/quadrattest.R b/R/quadrattest.R
new file mode 100755
index 0000000..4564c10
--- /dev/null
+++ b/R/quadrattest.R
@@ -0,0 +1,506 @@
+#
+#   quadrattest.R
+#
+#   $Revision: 1.54 $  $Date: 2016/04/25 02:34:40 $
+#
+
+quadrat.test <- function(X, ...) {
+   UseMethod("quadrat.test")
+}
+
+quadrat.test.ppp <-
+  function(X, nx=5, ny=nx,
+           alternative = c("two.sided", "regular", "clustered"),
+           method = c("Chisq", "MonteCarlo"),
+           conditional=TRUE, CR=1,
+           lambda=NULL, 
+           ...,
+           xbreaks=NULL, ybreaks=NULL,
+           tess=NULL, nsim=1999)
+{
+   Xname <- short.deparse(substitute(X))
+   method <- match.arg(method)
+   alternative <- match.arg(alternative)
+   do.call(quadrat.testEngine,
+          resolve.defaults(list(X, nx=nx, ny=ny,
+                                alternative=alternative,
+                                method=method,
+                                conditional=conditional,
+                                CR=CR,
+                                fit=lambda,
+                                xbreaks=xbreaks, ybreaks=ybreaks,
+                                tess=tess,
+                                nsim=nsim),
+                           list(...), 
+                           list(Xname=Xname, fitname="CSR")))
+}
+
+quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL)
+{
+  if(is.null(Xname))
+    Xname <- short.deparse(substitute(X))
+  pool.quadrattest(lapply(X, quadrat.test.ppp, ...),
+                   df=df, df.est=df.est, Xname=Xname)
+}
+
+quadrat.test.ppm <-
+  function(X, nx=5, ny=nx,
+           alternative = c("two.sided", "regular", "clustered"),      
+           method=c("Chisq", "MonteCarlo"),
+           conditional=TRUE, CR=1, ...,
+           xbreaks=NULL, ybreaks=NULL,
+           tess=NULL, nsim=1999)
+{
+   fitname <- short.deparse(substitute(X))
+   dataname <- paste("data from", fitname)
+   method <- match.arg(method)
+   alternative <- match.arg(alternative)
+   if(!is.poisson.ppm(X))
+    stop("Test is only defined for Poisson point process models")
+   if(is.marked(X))
+    stop("Sorry, not yet implemented for marked point process models")
+   do.call(quadrat.testEngine,
+          resolve.defaults(list(data.ppm(X), nx=nx, ny=ny,
+                                alternative=alternative,
+                                method=method,
+                                conditional=conditional, CR=CR,
+                                xbreaks=xbreaks, ybreaks=ybreaks,
+                                tess=tess,
+                                nsim=nsim, 
+                                fit=X),
+                           list(...),
+                           list(Xname=dataname, fitname=fitname)))
+}
+
+quadrat.test.quadratcount <-
+  function(X,
+           alternative = c("two.sided", "regular", "clustered"),
+           method=c("Chisq", "MonteCarlo"),
+           conditional=TRUE, CR=1,
+           lambda=NULL, 
+           ...,
+           nsim=1999) {
+   trap.extra.arguments(...)
+   method <- match.arg(method)
+   alternative <- match.arg(alternative)
+   quadrat.testEngine(Xcount=X,
+                      alternative=alternative,
+                      fit=lambda,
+                      method=method, conditional=conditional, CR=CR, nsim=nsim)
+}
+
+quadrat.testEngine <- function(X, nx, ny,
+                               alternative = c("two.sided",
+                                                "regular", "clustered"),
+                               method=c("Chisq", "MonteCarlo"),
+                               conditional=TRUE, CR=1, ...,
+                               nsim=1999,
+                               Xcount=NULL,
+                               xbreaks=NULL, ybreaks=NULL, tess=NULL,
+                               fit=NULL, Xname=NULL, fitname=NULL) {
+  trap.extra.arguments(...)
+  method <- match.arg(method)
+  alternative <- match.arg(alternative)
+  if(method == "MonteCarlo") {
+    check.1.real(nsim)
+    explain.ifnot(nsim > 0)
+  }
+  if(is.null(Xcount))
+    Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks,
+                           tess=tess)
+  tess <- attr(Xcount, "tess")
+  testname <- switch(method,
+                     Chisq = "Chi-squared test",
+                     MonteCarlo = paste(
+                       if(conditional) "Conditional" else "Unconditional",
+                       "Monte Carlo test")
+                     )
+  # determine expected values under model
+  if(is.null(fit)) {
+    nullname <- "CSR"
+    if(tess$type == "rect") 
+      areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*")
+    else 
+      areas <- unlist(lapply(tiles(tess), area))
+    fitmeans <- sum(Xcount) * areas/sum(areas)
+    df <- switch(method,
+                 Chisq      = length(fitmeans) - 1,
+                 MonteCarlo = NULL)
+  } else if(is.im(fit) || inherits(fit, "funxy")) {
+    nullname <- "Poisson process with given intensity"
+    fit <- as.im(fit, W=Window(tess))
+    areas <- integral(fit, tess)
+    fitmeans <- sum(Xcount) * areas/sum(areas)
+    df <- switch(method,
+                 Chisq      = length(fitmeans) - 1,
+                 MonteCarlo = NULL)    
+  } else {
+    if(!is.ppm(fit))
+      stop("fit should be a ppm object")
+    if(!is.poisson.ppm(fit))
+      stop("Quadrat test only supported for Poisson point process models")
+    if(is.marked(fit))
+      stop("Sorry, not yet implemented for marked point process models")
+    nullname <- paste("fitted Poisson model", sQuote(fitname))
+    Q <- quad.ppm(fit, drop=TRUE)
+    ww <- w.quad(Q)
+    lambda <- fitted(fit, drop=TRUE)
+    masses <- lambda * ww
+    # sum weights of quadrature points in each tile 
+    if(tess$type == "rect") {
+      xx <- x.quad(Q)
+      yy <- y.quad(Q)
+      xbreaks <- tess$xgrid
+      ybreaks <- tess$ygrid
+      fitmeans <- rectquadrat.countEngine(xx, yy, xbreaks, ybreaks,
+                                          weights=masses)
+      fitmeans <- as.vector(t(fitmeans))
+    } else {
+      U <- as.ppp(Q)
+      V <- marks(cut(U, tess), dfok=FALSE)
+      fitmeans <- tapply(masses, list(tile=V), sum)
+      fitmeans[is.na(fitmeans)] <- 0
+    }
+    switch(method,
+           Chisq = {
+             df <- length(fitmeans) - length(coef(fit))
+             if(df < 1)
+               stop(paste("Not enough quadrats: degrees of freedom df =", df))
+           },
+           MonteCarlo = {
+             df <- NA
+           })
+  }
+  OBS <- as.vector(t(as.table(Xcount)))
+  EXP <- as.vector(fitmeans)
+  testname <- paste(testname, "of", nullname, "using quadrat counts")
+
+  testname <- c(testname, CressieReadName(CR))
+
+  result <- X2testEngine(OBS, EXP,
+                         method=method, df=df, nsim=nsim,
+                         conditional=conditional, CR=CR,
+                         alternative=alternative,
+                         testname=testname, dataname=Xname)
+
+  class(result) <- c("quadrattest", class(result))
+  attr(result, "quadratcount") <- Xcount
+  return(result)
+}
+
+CressieReadStatistic <- function(OBS, EXP, lambda=1) {
+  y <- if(lambda == 1) sum((OBS - EXP)^2/EXP) else
+       if(lambda == 0) 2 * sum(OBS * log(OBS/EXP)) else
+       if(lambda == -1) 2 * sum(EXP * log(EXP/OBS)) else
+       (2/(lambda * (lambda + 1))) * sum(OBS * ((OBS/EXP)^lambda - 1))
+  names(y) <- CressieReadSymbol(lambda)
+  return(y)
+}
+
+CressieReadSymbol <- function(lambda) {
+  if(lambda == 1) "X2" else
+  if(lambda == 0) "G2" else
+  if(lambda == -1/2) "T2" else
+  if(lambda == -1) "GM2" else
+  if(lambda == -2) "NM2" else "CR"
+}
+
+CressieReadName <- function(lambda) {
+  if(lambda == 1) "Pearson X2 statistic" else
+  if(lambda == 0) "likelihood ratio test statistic G2" else
+  if(lambda == -1/2) "Freeman-Tukey statistic T2" else
+  if(lambda == -1) "modified likelihood ratio test statistic GM2" else
+  if(lambda == -2) "Neyman modified X2 statistic NM2" else
+  paste("Cressie-Read statistic",
+        paren(paste("lambda =",
+                    if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda)
+              )
+        )
+}
+
+X2testEngine <- function(OBS, EXP, ...,
+                         method=c("Chisq", "MonteCarlo"),
+                         CR=1,
+                         df=NULL, nsim=NULL, 
+                         conditional, alternative, testname, dataname) {
+  method <- match.arg(method)
+  if(method == "Chisq" & any(EXP < 5)) 
+    warning(paste("Some expected counts are small;",
+                  "chi^2 approximation may be inaccurate"),
+            call.=FALSE)
+  X2 <- CressieReadStatistic(OBS, EXP, CR)
+  # conduct test
+  switch(method,
+         Chisq = {
+           if(!is.null(df))
+             names(df) <- "df"
+           pup <- pchisq(X2, df, lower.tail=FALSE)
+           plo <- pchisq(X2, df, lower.tail=TRUE)
+           PVAL <- switch(alternative,
+                          regular   = plo,
+                          clustered = pup,
+                          two.sided = 2 * min(pup, plo))
+         },
+         MonteCarlo = {
+           nsim <- as.integer(nsim)
+           if(conditional) {
+             npts <- sum(OBS)
+             p <- EXP/sum(EXP)
+             SIM <- rmultinom(n=nsim,size=npts,prob=p)
+           } else {
+             ne <- length(EXP)
+             SIM  <- matrix(rpois(nsim*ne,EXP),nrow=ne)
+           }
+           simstats <- apply(SIM, 2, CressieReadStatistic, EXP=EXP)
+           if(anyDuplicated(simstats))
+             simstats <- jitter(simstats)
+           phi <- (1 + sum(simstats >= X2))/(1+nsim)
+           plo <- (1 + sum(simstats <= X2))/(1+nsim)
+           PVAL <- switch(alternative,
+                          clustered = phi,
+                          regular   = plo,
+                          two.sided = min(1, 2 * min(phi,plo)))
+         })
+    result <- structure(list(statistic = X2,
+                             parameter = df,
+                             p.value = PVAL,
+                             method = testname,
+                             data.name = dataname,
+                             alternative = alternative,
+                             observed = OBS,
+                             expected = EXP,
+                             residuals = (OBS - EXP)/sqrt(EXP),
+                             CR = CR,
+                             method.key = method),
+                        class = "htest")
+  return(result)
+}
+                         
+print.quadrattest <- function(x, ...) {
+   NextMethod("print")
+   single <- is.atomicQtest(x)
+   if(!single)
+     splat("Pooled test")
+   if(waxlyrical('gory')) {
+     if(single) {
+       cat("Quadrats: ")
+     } else {
+       splat("Quadrats of component tests:")
+     }
+     do.call(print,
+             resolve.defaults(list(x=as.tess(x)),
+                              list(...),
+                              list(brief=TRUE)))
+   }
+   return(invisible(NULL))
+}
+
+plot.quadrattest <- local({
+
+  plot.quadrattest <- function(x, ..., textargs=list()) {
+    xname <- short.deparse(substitute(x))
+
+    if(!is.atomicQtest(x)) {
+      # pooled test - plot the original tests
+      tests <- extractAtomicQtests(x)
+      do.call(plot,
+              resolve.defaults(list(x=tests),
+                               list(...),
+                               list(main=xname)))
+      return(invisible(NULL))
+    }
+    Xcount <- attr(x, "quadratcount")
+
+    # plot tessellation
+    tess  <- as.tess(Xcount)
+    do.call(plot.tess,
+            resolve.defaults(list(tess),
+                             list(...),
+                             list(main=xname)))
+    # compute locations for text
+    til <- tiles(tess)
+    ok <- sapply(til, haspositivearea)
+    incircles <- lapply(til[ok], incircle)
+    x0 <- sapply(incircles, getElement, name="x")
+    y0 <- sapply(incircles, getElement, name="y")
+    ra <- sapply(incircles, getElement, name="r")
+    # plot observed counts
+    cos30 <- sqrt(2)/2
+    sin30 <- 1/2
+    f <- 0.4
+    dotext(-f * cos30, f * sin30,
+           as.vector(t(as.table(Xcount)))[ok],
+           x0, y0, ra, textargs, 
+           adj=c(1,0), ...)
+    # plot expected counts
+    dotext(f * cos30, f * sin30,
+           round(x$expected,1)[ok],
+           x0, y0, ra, textargs,
+           adj=c(0,0), ...)
+    # plot Pearson residuals
+    dotext(0, -f,  signif(x$residuals,2)[ok],
+           x0, y0, ra, textargs,
+           ...)
+    return(invisible(NULL))
+  }
+ 
+  dotext <- function(dx, dy, values, x0, y0, ra, textargs, ...) {
+    do.call.matched(text.default,
+                    resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra),
+                                     list(labels=paste(as.vector(values))),
+                                     textargs, 
+                                     list(...)),
+                    funargs=graphicsPars("text"))
+  }
+
+  haspositivearea <- function(x) { !is.null(x) && area(x) > 0 }
+  
+  plot.quadrattest
+})
+
+########  pooling multiple quadrat tests into a quadrat test
+
+pool.quadrattest <- function(...,
+                             df=NULL, df.est=NULL, nsim=1999, Xname=NULL,
+                             CR=NULL) {
+  argh <- list(...)
+  if(!is.null(df) + !is.null(df.est))
+    stop("Arguments df and df.est are incompatible")
+  
+  if(all(unlist(lapply(argh, inherits, what="quadrattest")))) {
+    # Each argument is a quadrattest object
+    tests <- argh
+  } else if(length(argh) == 1 &&
+            is.list(arg1 <- argh[[1]]) &&
+            all(unlist(lapply(arg1, inherits, "quadrattest")))) {
+    # There is just one argument, which is a list of quadrattests
+    tests <- arg1
+  } else stop("Each entry in the list must be a quadrat test")
+
+  # data from all cells in all tests
+  OBS <- unlist(lapply(tests, getElement, name="observed"))
+  EXP <- unlist(lapply(tests, getElement, name="expected"))
+  # RES <- unlist(lapply(tests, getElement, name="residuals"))
+  # STA <- unlist(lapply(tests, getElement, name="statistic"))
+
+  # information about each test
+  Mkey <- unlist(lapply(tests, getElement, name="method.key"))
+  Testname <- lapply(tests, getElement, name="method")
+  Alternative <- unlist(lapply(tests, getElement, name="alternative"))
+  Conditional <- unlist(lapply(tests, getElement, name="conditional"))
+  
+  # name of data
+  if(is.null(Xname)) {
+    Nam <-  unlist(lapply(tests, getElement, name="data.name"))
+    Xname <- commasep(sQuote(Nam))
+  }
+
+  # name of test
+  testname    <- unique(Testname)
+  method.key <- unique(Mkey)
+  if(length(testname) > 1)
+    stop(paste("Cannot combine different types of tests:",
+               commasep(sQuote(method.key))))
+  testname <- testname[[1]]
+
+  # alternative hypothesis
+  alternative <- unique(Alternative)
+  if(length(alternative) > 1)
+    stop(paste("Cannot combine tests with different alternatives:",
+               commasep(sQuote(alternative))))
+
+  # conditional tests
+  conditional <- any(Conditional)
+  if(conditional)
+    stop("Sorry, not implemented for conditional tests")
+
+  # Cressie-Read exponent
+  if(is.null(CR)) {
+    CR <- unlist(lapply(tests, getElement, name="CR"))
+    CR <- unique(CR)
+    if(length(CR) > 1) {
+      warning("Tests used different values of CR; assuming CR=1")
+      CR <- 1
+    }
+  }
+                 
+  if(method.key == "Chisq") {
+    # determine degrees of freedom
+    if(is.null(df)) {
+      if(!is.null(df.est)) {
+        # total number of observations minus number of fitted parameters
+        df <- length(OBS) - df.est
+      } else {
+        # total degrees of freedom of tests
+        # implicitly assumes independence of tests
+        PAR <- unlist(lapply(tests, getElement, name="parameter"))
+        df <- sum(PAR)
+      }
+    }
+    # validate df
+    if(df < 1)
+      stop(paste("Degrees of freedom = ", df))
+    names(df) <- "df"
+  }
+    
+  # perform test
+  result <- X2testEngine(OBS, EXP,
+                         method=method.key, df=df, nsim=nsim,
+                         conditional=conditional, CR=CR,
+                         alternative=alternative,
+                         testname=testname, dataname=Xname)
+  # add info
+  class(result) <- c("quadrattest", class(result))
+  attr(result, "tests") <- as.solist(tests)
+  # there is no quadratcount attribute 
+  return(result)
+}
+
+is.atomicQtest <- function(x) {
+  inherits(x, "quadrattest") && is.null(attr(x, "tests"))
+}
+
+extractAtomicQtests <- function(x) {
+  if(is.atomicQtest(x))
+    return(list(x))
+  stopifnot(inherits(x, "quadrattest"))
+  tests <- attr(x, "tests")
+  y <- lapply(tests, extractAtomicQtests)
+  z <- do.call(c, y)
+  return(as.solist(z))
+}
+
+as.tess.quadrattest <- function(X) {
+  if(is.atomicQtest(X)) {
+    Y <- attr(X, "quadratcount")
+    return(as.tess(Y))
+  }
+  tests <- extractAtomicQtests(X)
+  return(as.solist(lapply(tests, as.tess.quadrattest)))
+}
+
+as.owin.quadrattest <- function(W, ..., fatal=TRUE) {
+  if(is.atomicQtest(W))
+    return(as.owin(as.tess(W), ..., fatal=fatal))    
+  gezeur <- paste("Cannot convert quadrat test result to a window;",
+                  "it contains data for several windows")
+  if(fatal) stop(gezeur) else warning(gezeur)
+  return(NULL)
+}
+
+domain.quadrattest <- Window.quadrattest <- function(X, ...) { as.owin(X) }
+
+## The shift method is undocumented.
+## It is only needed in plot.listof etc
+
+shift.quadrattest <- function(X, ...) {
+  if(is.atomicQtest(X)) {
+    attr(X, "quadratcount") <- qc <- shift(attr(X, "quadratcount"), ...)
+    attr(X, "lastshift") <- getlastshift(qc)
+  } else {
+    tests <- extractAtomicQtests(X)
+    attr(X, "tests") <- te <- lapply(tests, shift, ...)
+    attr(X, "lastshift") <- getlastshift(te[[1]])
+  }
+  return(X)
+}
diff --git a/R/quadscheme.R b/R/quadscheme.R
new file mode 100755
index 0000000..6c5820d
--- /dev/null
+++ b/R/quadscheme.R
@@ -0,0 +1,340 @@
+#
+#
+#      quadscheme.S
+#
+#      $Revision: 4.35 $    $Date: 2016/02/11 10:17:12 $
+#
+#      quadscheme()    generate a quadrature scheme from 
+#		       data and dummy point patterns.
+#
+#      quadscheme.spatial()    case where both patterns are unmarked
+#
+#      quadscheme.replicated() case where data are multitype
+#
+#
+#---------------------------------------------------------------------
+
+quadscheme <- function(data, dummy, method="grid", ...) {
+        #
+	# generate a quadrature scheme from data and dummy patterns.
+	#
+	# Other arguments control how the quadrature weights are computed
+        #
+
+  data <- as.ppp(data)
+
+  if(missing(dummy)) {
+    # create dummy points
+    dummy <- default.dummy(data, method=method, ...)
+    # extract full set of parameters used to create dummy points
+    dp <- attr(dummy, "dummy.parameters")
+    # extract recommended parameters for computing weights
+    wp <- attr(dummy, "weight.parameters")
+  } else {
+    # user-supplied dummy points
+    if(!is.ppp(dummy)) {
+      # convert to ppp object
+      dummy <- as.ppp(dummy, data$window, check=FALSE)
+      # confine dummy points to data window 
+      dummy <- dummy[data$window]
+      wp <- dp <- list()
+    } else {
+      # if it's already a ppp, it may have been created by default.dummy
+     dp <- attr(dummy, "dummy.parameters")
+     wp <- attr(dummy, "weight.parameters")     
+   }
+  }
+  # arguments supplied directly to quadscheme()
+  # override any arguments passed as attributes
+  wp <- resolve.defaults(list(method=method), list(...), wp)
+  
+  mX <- is.marked(data)
+  mD <- is.marked(dummy)
+
+  if(!mX && !mD)
+    Q <- do.call(quadscheme.spatial,
+                 append(list(data, dummy, check=FALSE), wp))
+  else if(mX && !mD)
+    Q <- do.call(quadscheme.replicated,
+                 append(list(data, dummy, check=FALSE), wp))
+  else if(!mX && mD)
+    stop("dummy points are marked but data are unmarked")
+  else
+    stop("marked data and marked dummy points -- sorry, this case is not implemented")
+
+  # record parameters used to make dummy points
+  Q$param$dummy <- dp
+
+  return(Q)
+}
+
+quadscheme.spatial <-
+  function(data, dummy, method=c("grid", "dirichlet"), ...) {
+        #
+	# generate a quadrature scheme from data and dummy patterns.
+	#
+	# The 'method' may be "grid" or "dirichlet"
+	#
+	# '...' are passed to gridweights() or dirichletWeights()
+        #
+        # quadscheme.spatial:
+        #       for unmarked point patterns.
+        #
+        #       weights are determined only by spatial locations
+        #       (i.e. weight computations ignore any marks)
+	#
+        # No two points should have the same spatial location
+        # 
+
+    check <- resolve.defaults(list(...), list(check=TRUE))$check
+    method <- match.arg(method)
+    
+    data <- as.ppp(data, check=check)
+    dummy <- as.ppp(dummy, data$window, check=check)
+    # note data$window is the DEFAULT quadrature window
+    # applicable when 'dummy' does not contain a window
+
+    if(is.marked(data, dfok=TRUE))
+      warning("marks in data pattern - ignored")
+    if(is.marked(dummy, dfok=TRUE))
+      warning("marks in dummy pattern - ignored")
+    
+    both <- as.ppp(concatxy(data, dummy), dummy$window, check=check)
+    switch(method,
+           grid={
+             w <- gridweights(both, window= dummy$window, ...)
+           },
+           dirichlet = {
+             w <- dirichletWeights(both, window=dummy$window, ...)
+           },
+           { 
+             stop(paste("unrecognised method", sQuote(method)))
+           }
+           )
+    # parameters actually used to make weights
+    wp <- attr(w, "weight.parameters")
+    param <- list(weight = wp, dummy = NULL)
+    
+    Q <- quad(data, dummy, w, param)
+    return(Q)
+  }
+
+"quadscheme.replicated" <-
+  function(data, dummy, method=c("grid", "dirichlet"), ...) {
+    ##
+    ## generate a quadrature scheme from data and dummy patterns.
+    ##
+    ## The 'method' may be "grid" or "dirichlet"
+    ##
+    ## '...' are passed to gridweights() or dirichletWeights()
+    ##
+    ## quadscheme.replicated:
+    ##       for multitype point patterns.
+    ##
+    ## No two points in 'data'+'dummy' should have the same spatial location
+
+    check <- resolve.defaults(list(...), list(check=TRUE))$check
+    method <- match.arg(method)
+    
+    data <- as.ppp(data, check=check)
+    dummy <- as.ppp(dummy, data$window, check=check)
+		## note data$window is the DEFAULT quadrature window
+		## unless otherwise specified in 'dummy'
+    ndata <- data$n
+    ndummy <- dummy$n
+
+    if(!is.marked(data))
+      stop("data pattern does not have marks")
+    if(is.marked(dummy, dfok=TRUE) && npoints(dummy) > 0)
+      warning("dummy points have marks --- ignored")
+
+    ## first, ignore marks and compute spatial weights
+    P <- quadscheme.spatial(unmark(data), dummy, method, ...)
+    W <- w.quad(P)
+    iz <- is.data(P)
+    Wdat <- W[iz]
+    Wdum <- W[!iz]
+
+    ## find the set of all possible marks
+
+    if(!is.multitype(data))
+      stop("data pattern is not multitype")
+    data.marks <- marks(data)
+    markset <- levels(data.marks)
+    nmarks <- length(markset)
+    
+    ## replicate dummy points, one copy for each possible mark
+    ## -> dummy x {1,..,K}
+        
+    dumdum <- cartesian(dummy, markset)
+    Wdumdum <- rep.int(Wdum, nmarks)
+    Idumdum <- rep.int(ndata + seq_len(ndummy), nmarks)
+        
+    ## also make dummy marked points at same locations as data points
+    ## but with different marks
+
+    dumdat <- cartesian(unmark(data), markset)
+    Wdumdat <- rep.int(Wdat, nmarks)
+    Mdumdat <- marks(dumdat)
+    Idumdat <- rep.int(1:ndata, nmarks)
+        
+    Mrepdat <- rep.int(data.marks, nmarks)
+
+    ok <- (Mdumdat != Mrepdat)
+    dumdat <- dumdat[ok,]
+    Wdumdat <- Wdumdat[ok]
+    Idumdat <- Idumdat[ok]
+
+    ## combine the two dummy patterns
+    dumb <- superimpose(dumdum, dumdat, W=dummy$window, check=FALSE)
+    Wdumb <- c(Wdumdum, Wdumdat)
+    Idumb <- c(Idumdum, Idumdat)
+    
+    ## record the quadrature parameters
+    param <- list(weight = P$param$weight,
+                  dummy = NULL,
+                  sourceid=c(1:ndata, Idumb))
+
+    ## wrap up
+    Q <- quad(data, dumb, c(Wdat, Wdumb), param)
+    return(Q)
+}
+
+
+"cartesian" <-
+function(pp, markset, fac=TRUE) {
+  ## given an unmarked point pattern 'pp'
+  ## and a finite set of marks,
+  ## create the marked point pattern which is
+  ## the Cartesian product, consisting of all pairs (u,k)
+  ## where u is a point of 'pp' and k is a mark in 'markset'
+  nmarks <- length(markset)
+  result <- ppp(rep.int(pp$x, nmarks),
+                rep.int(pp$y, nmarks),
+                window=pp$window,
+                check=FALSE)
+  marx <- rep.int(markset, rep.int(pp$n, nmarks))
+  if(fac)
+    marx <- factor(marx, levels=markset)
+  marks(result) <- marx
+  return(result)
+}
+
+
+validate.quad <- function(Q, fatal=FALSE, repair=TRUE, announce=FALSE) {
+  X <- Q$data
+  D <- Q$dummy
+  mX <- is.marked(X)
+  mD <- is.marked(D)
+  nbg <- function(whinge, fatal=FALSE, announce=FALSE) {
+    if(fatal)
+      stop(whinge, call.=FALSE)
+    else {
+      if(announce)
+        warning(whinge, call.=FALSE)
+      return(FALSE)
+    }
+  }
+  if(mX != mD) {
+    whinge <-
+      if(mX)
+        "data points are marked, but dummy points are not"
+      else
+        "dummy points are marked, but data points are not"
+    return(nbg(whinge, fatal, announce))
+  }
+  if(!mX)
+    return(TRUE)
+  # marked points 
+  fX <- is.factor(Xmarx <- marks(X))
+  fD <- is.factor(Dmarx <- marks(D))
+  if(fX != fD) {
+    whinge <-
+      if(fX)
+        "data points are multitype, but dummy points are not"
+      else
+        "dummy points are multitype, but data points are not"
+    return(nbg(whinge, fatal, announce))
+  }
+  if(!fX)
+    return(TRUE)
+  # multitype points
+  lX <- levels(Xmarx)
+  lD <- levels(Dmarx)
+  if(length(lX) != length(lD) || any(lX != lD)) {
+    whinge <- "data and dummy points have different sets of possible marks"
+    return(nbg(whinge, fatal, announce))
+  }
+  return(TRUE)
+}
+
+  
+
+pixelquad <- function(X, W=as.owin(X)) {
+  ## make a quadscheme with a dummy point at every pixel
+  verifyclass(X, "ppp")
+  
+  ## convert window to mask if not already one
+  W <- as.owin(W)
+  M <- as.mask(W)
+  MM <- M$m
+  pixelarea <- M$xstep * M$ystep
+  
+  ## create pixel coordinates and corresponding row, column indices
+  rxy <- rasterxy.mask(M, drop=TRUE)
+  xx <- rxy$x
+  yy <- rxy$y
+  cc <- as.vector(col(MM)[MM])
+  rr <- as.vector(row(MM)[MM])
+  Nr <- M$dim[1]
+  Nc <- M$dim[2]
+  
+  ## dummy point pattern
+  dum <- ppp(xx, yy, window=W, check=FALSE)
+  
+  ## discretise data points
+  ij <- nearest.raster.point(X$x, X$y, M)
+  ijrow <- ij$row
+  ijcol <- ij$col
+
+
+  if(!is.marked(X)) {
+    ## tabulate pixel locations of data points
+    Xtab <- table(row=factor(ijrow, levels=1:Nr),
+                  col=factor(ijcol, levels=1:Nc))
+    ## every pixel contains exactly one dummy point,
+    ## so the total count of quadrature points in each pixel is:
+    Qtab <- Xtab + 1
+    ## compute counting weights for data points
+    wdat <- 1/Qtab[cbind(ijrow, ijcol)]
+    ## compute counting weights for dummy points
+    wdum <- 1/Qtab[cbind(rr, cc)]
+  } else {
+    marx <- marks(X)
+    ## tabulate pixel locations and marks of data points
+    Xtab <- table(row=factor(ijrow, levels=1:Nr),
+                  col=factor(ijcol, levels=1:Nc),
+                  mark=marx)
+    ## replicate dummy points (pixel centres) for each mark
+    dum <- cartesian(dum, levels(marx))
+    ## every marked pixel contains exactly one dummy point,
+    ## so the total count of quadrature points in each marked pixel is:
+    Qtab <- Xtab + 1
+    ## compute counting weights for data points
+    wdat <- 1/Qtab[cbind(ijrow, ijcol, as.integer(marx))]
+    ## compute counting weights for dummy points
+    nm <- length(levels(marx))
+    wdum <- 1/Qtab[cbind(rep.int(rr, nm),
+                         rep.int(cc, nm),
+                         rep(1:nm, each=length(rr)))]
+  }
+  ## create quadrature scheme
+  wboth <- pixelarea * c(wdat, wdum)
+  Q <- quad(X, dum, wboth)
+  
+  attr(Q, "M") <- M
+  return(Q)
+}
+
+  
+  
diff --git a/R/quantess.R b/R/quantess.R
new file mode 100644
index 0000000..8ed15f4
--- /dev/null
+++ b/R/quantess.R
@@ -0,0 +1,222 @@
+#'     quantess.R
+#' 
+#'     Quantile Tessellation
+#'
+#'   $Revision: 1.12 $  $Date: 2016/02/18 07:26:34 $
+
+quantess <- function(M, Z, n, ...) {
+  UseMethod("quantess")
+}
+
+quantess.owin <- function(M, Z, n, ..., type=2) {
+  W <- as.owin(M)
+  tcross <- MinimalTess(W, ...)
+  force(n)
+  if(!is.character(Z)) {
+    Zim <- as.im(Z, W)
+    Zrange <- range(Zim)
+  } else {
+    if(!(Z %in% c("x", "y")))
+      stop(paste("Unrecognised covariate", dQuote(Z)))
+    if(is.rectangle(W)) {
+      out <- switch(Z,
+                    x={ quadrats(W, nx=n, ny=1) },
+                    y={ quadrats(W, nx=1, ny=n) })
+      if(!is.null(tcross)) out <- intersect.tess(out, tcross)
+      return(out)
+    }
+    switch(Z,
+           x={
+             Zfun <- function(x,y){x}
+             Zrange <- boundingbox(W)$xrange
+           },
+           y={
+             Zfun <- function(x,y){y}
+             Zrange <- boundingbox(W)$yrange
+           })
+    Zim <- as.im(Zfun, W)
+  }
+  qZ <- quantile(Zim, probs=(1:(n-1))/n, type=type)
+  qZ <- c(Zrange[1], qZ, Zrange[2])
+  if(is.polygonal(W) && is.character(Z)) {
+    R <- Frame(W)
+    strips <- switch(Z,
+                     x = tess(xgrid=qZ, ygrid=R$yrange),
+                     y = tess(xgrid=R$xrange, ygrid=qZ))
+    out <- intersect.tess(strips, tess(tiles=list(W)))
+    qzz <- signif(qZ, 3)
+    tilenames(out) <- paste0("[", qzz[1:n], ",",
+                             qzz[-1], c(rep(")", n-1), "]"))
+  } else {
+    ZC <- cut(Z, breaks=qZ, include.lowest=TRUE, right=FALSE)
+    out <- tess(image=ZC)
+  }
+  if(!is.null(tcross)) out <- intersect.tess(out, tcross)
+  return(out)
+}
+
+quantess.ppp <- function(M, Z, n, ..., type=2) {
+  W <- as.owin(M)
+  tcross <- MinimalTess(W, ...)
+  force(n)
+  if(!is.character(Z)) {
+    Zim <- as.im(Z, W)
+    ZM <- if(is.function(Z)) Z(M$x, M$y) else Zim[M]
+    Zrange <- range(range(Zim), ZM)
+  } else {
+    if(!(Z %in% c("x", "y")))
+      stop(paste("Unrecognised covariate", dQuote(Z)))
+    if(is.rectangle(W)) {
+      switch(Z,
+             x={
+               qx <- quantile(M$x, probs=(1:(n-1))/n, type=type)
+               qx <- c(W$xrange[1], qx, W$xrange[2])
+               out <- tess(xgrid=qx, ygrid=W$yrange)
+             },
+             y={
+               qy <- quantile(M$y, probs=(1:(n-1))/n, type=type)
+               qy <- c(W$yrange[1], qy, W$yrange[2])
+               out <- tess(xgrid=W$xrange, ygrid=qy)
+             })
+      if(!is.null(tcross)) out <- intersect.tess(out, tcross)
+      return(out)
+    }
+    switch(Z,
+           x={
+             Zfun <- function(x,y){x}
+             ZM <- M$x
+             Zrange <- boundingbox(W)$xrange
+           },
+           y={
+             Zfun <- function(x,y){y}
+             ZM <- M$y
+             Zrange <- boundingbox(W)$yrange
+           })
+    Zim <- as.im(Zfun, W)
+  } 
+  qZ <- quantile(ZM, probs=(1:(n-1))/n, type=type)
+  qZ <- c(Zrange[1], qZ, Zrange[2])
+  if(is.polygonal(W) && is.character(Z)) {
+    R <- Frame(W)
+    strips <- switch(Z,
+                     x = tess(xgrid=qZ, ygrid=R$yrange),
+                     y = tess(xgrid=R$xrange, ygrid=qZ))
+    out <- intersect.tess(strips, tess(tiles=list(W)))
+    qzz <- signif(qZ, 3)
+    tilenames(out) <- paste0("[", qzz[1:n], ",",
+                             qzz[-1], c(rep(")", n-1), "]"))
+  } else {
+    ZC <- cut(Zim, breaks=qZ, include.lowest=TRUE)
+    out <- tess(image=ZC)
+  }
+  if(!is.null(tcross)) out <- intersect.tess(out, tcross)
+  return(out)
+}
+
+quantess.im <- function(M, Z, n, ..., type=2) {
+  W <- Window(M)
+  tcross <- MinimalTess(W, ...)
+  force(n)
+  if(!(type %in% c(1,2)))
+    stop("Only quantiles of type 1 and 2 are implemented for quantess.im")
+  if(is.character(Z)) 
+    Z <- switch(Z,
+                x=function(x,y){x},
+                y=function(x,y){y},
+                stop(paste("Unrecognised covariate", dQuote(Z))))
+  MZ <- harmonise(M=M, Z=Z)
+  M <- MZ$M[W, drop=FALSE]
+  Z <- MZ$Z[W, drop=FALSE]
+  Zrange <- range(Z)
+  Fun <- ewcdf(Z[], weights=M[]/sum(M[]))
+  qZ <- quantile(Fun, probs=(1:(n-1))/n, type=type)
+  qZ <- c(Zrange[1], qZ, Zrange[2])
+  ZC <- cut(Z, breaks=qZ, include.lowest=TRUE)
+  out <- tess(image=ZC)
+  qzz <- signif(qZ, 3)
+  tilenames(out) <- paste0("[", qzz[1:(n-1)], ",",
+                           qzz[-1], c(rep(")", n-1), "]"))
+  if(!is.null(tcross)) out <- intersect.tess(out, tcross)
+  return(out)
+}
+
+MinimalTess <- function(W, ...) {
+  # find the minimal tessellation of W consistent with the arguments 
+  argh <- list(...)
+  if(length(argh) == 0) return(NULL)
+  nama <- names(argh)
+  if(any(c("nx", "ny") %in% nama)) {
+    fun <- quadrats
+    dflt <- list(nx=1, ny=1)
+  } else if(any(c("xbreaks", "ybreaks") %in% nama)) {
+    fun <- quadrats
+    dflt <- list(xbreaks=W$xrange, ybreaks=W$yrange)
+  } else {
+    fun <- tess
+    dflt <- list(window=W, keepempty=TRUE)
+  }
+  v <- do.call(fun, resolve.defaults(list(W), argh, dflt))
+  return(v)
+}
+
+nestsplit <- function(X, ...) {
+  stopifnot(is.ppp(X))
+  flist <- list(...)
+  cansplit <- sapply(flist, inherits,
+                     what=c("factor", "tess", "owin", "im", "character"))
+  splitted <- lapply(flist[cansplit], split, x=X)
+  splitters <- lapply(splitted, attr, which="fsplit")
+  if(any(!cansplit)) {
+    extra <- do.call(MinimalTess, append(list(W=Window(X)), flist[!cansplit]))
+    pos <- min(which(!cansplit))
+    ns <- length(splitters)
+    if(pos > ns) {
+      splitters <- append(splitters, list(extra))
+    } else {
+      before <- splitters[seq_len(pos-1)]
+      after  <- splitters[pos:ns]
+      splitters <- c(before, list(extra), after)
+    }
+  }
+  ns <- length(splitters)
+  if(ns == 0) return(X)
+  if(ns == 1) return(split(X, splitters[[1]]))
+  if(ns > 2) stop("Nesting depths greater than 2 are not yet implemented")
+  names(splitters) <- good.names(names(splitters), paste0("f", 1:ns))
+  fax1 <- is.factor(sp1 <- splitters[[1]])
+  fax2 <- is.factor(sp2 <- splitters[[2]])
+  lev1 <- if(fax1) levels(sp1) else seq_len(sp1$n)
+  lev2 <- if(fax2) levels(sp2) else seq_len(sp2$n)
+  if(!fax1 && !fax2) {
+    ## two tessellations
+    marks(sp1) <- factor(lev1, levels=lev1)
+    marks(sp2) <- factor(lev2, levels=lev2)
+    sp12 <- intersect.tess(sp1, sp2, keepmarks=TRUE)
+    pats <- split(X, sp12)
+    f1 <- marks(sp12)[,1]
+    f2 <- marks(sp12)[,2]
+  } else {
+    if(fax1 && fax2) {
+      ## two grouping factors
+      Xsp1 <- split(X, sp1)
+      sp2.1 <- split(sp2, sp1)
+      ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE)
+    } else if(fax1 && !fax2) {
+      ## grouping factor and tessellation
+      Xsp1 <- split(X, sp1)
+      ll <- lapply(Xsp1, split, f=sp2)
+    } else if(!fax1 && fax2) {
+      ## tessellation and grouping factor
+      Xsp1 <- split(X, sp1)
+      sp2.1 <- split(sp2, attr(Xsp1, "fgroup"))
+      ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE)
+    }
+    neach <- lengths(ll)
+    f1 <- rep(factor(lev1, levels=lev1), neach)
+    f2 <- rep(factor(lev2, levels=lev2), length(Xsp1))
+    pats <- do.call(c, unname(ll))
+  }
+  h <- hyperframe(pts=pats, f1=f1, f2=f2)
+  names(h)[2:3] <- names(splitters)
+  return(h)
+}
diff --git a/R/quantiledensity.R b/R/quantiledensity.R
new file mode 100644
index 0000000..2b89853
--- /dev/null
+++ b/R/quantiledensity.R
@@ -0,0 +1,94 @@
+#'
+#'   quantiledensity.R
+#'
+#'  quantile method for class 'density'
+#'
+#'  Also a CDF from a 'density'
+#' 
+#'  $Revision: 1.3 $ $Date: 2015/09/01 11:53:15 $
+
+quantile.density <- local({
+
+  quantile.density <- function(x, probs = seq(0, 1, 0.25), names = TRUE, ...,
+                               warn=TRUE) {
+    stopifnot(inherits(x, "density"))
+    #' check whether density estimate was restricted to an interval
+    if(warn && is.call(cl <- x$call) && any(c("from", "to") %in% names(cl)))
+      warning(paste("Density was normalised within the computed range",
+                    "of x values", prange(c(cl$from, cl$to))),
+              call.=FALSE)
+    #' validate probs
+    eps <- 100 * .Machine$double.eps
+    if(any((p.ok <- !is.na(probs)) & (probs < -eps | probs > 1 + eps))) 
+      stop("'probs' outside [0,1]")
+    if (na.p <- any(!p.ok)) {
+      o.pr <- probs
+      probs <- probs[p.ok]
+      probs <- pmax(0, pmin(1, probs))
+    }
+    np <- length(probs)
+    qs <- rep(NA_real_, np)
+    if (np > 0) {
+      #' extract density values 
+      xx <- x$x
+      yy <- x$y
+      nn <- length(xx)
+      #' integrate, normalise
+      Fx <- cumsum(yy * c(0, diff(xx)))
+      Fx <- Fx/Fx[nn]
+      #' quantile
+      for(j in 1:np) {
+        ii <- min(which(Fx >= probs[j]))
+        if(!is.na(ii) && ii >= 1 && ii <= nn) 
+          qs[j] <- xx[ii]
+      }
+      if (names && np > 0L) {
+        names(qs) <- format_perc(probs)
+      }
+    }
+    if (na.p) {
+      o.pr[p.ok] <- qs
+      names(o.pr) <- rep("", length(o.pr))
+      names(o.pr)[p.ok] <- names(qs)
+      return(o.pr)
+    } else return(qs)
+  }
+
+  format_perc <- function (x, digits = max(2L, getOption("digits")),
+                           probability = TRUE, use.fC = length(x) < 100, ...) {
+    if (length(x)) {
+      if (probability) x <- 100 * x
+      paste0(if (use.fC) 
+             formatC(x, format = "fg", width = 1, digits = digits)
+      else format(x, trim = TRUE, digits = digits, ...), "%")
+    }
+    else character(0)
+  }
+
+  quantile.density
+})
+
+
+CDF <- function(f, ...) {
+  UseMethod("CDF")
+}
+
+CDF.density <- function(f, ..., warn=TRUE) {
+  stopifnot(inherits(f, "density"))
+  #' check whether density estimate was restricted to an interval
+  if(warn && is.call(cl <- f$call) && any(c("from", "to") %in% names(cl)))
+    warning(paste("Density was normalised within the computed range",
+                  "of x values", prange(c(cl$from, cl$to))),
+            call.=FALSE)
+  #' integrate
+  xx <- f$x
+  yy <- f$y
+  nn <- length(xx)
+  Fx <- cumsum(yy * c(0, diff(xx)))
+  #' normalise
+  Fx <- Fx/Fx[nn]
+  #' 
+  FF <- approxfun(xx, Fx, method="linear", rule=2)
+  return(FF)
+}
+
diff --git a/R/quasirandom.R b/R/quasirandom.R
new file mode 100644
index 0000000..38dfeab
--- /dev/null
+++ b/R/quasirandom.R
@@ -0,0 +1,55 @@
+##
+##     quasirandom.R
+##
+##  Quasi-random sequence generators
+##
+##  $Revision: 1.6 $   $Date: 2017/06/05 10:31:58 $
+##
+
+vdCorput <- function(n, base) {
+  stopifnot(is.prime(base))
+  z <- .C("Corput",
+          base=as.integer(base),
+          n=as.integer(n),
+          result=as.double(numeric(n)),
+          PACKAGE = "spatstat")
+  return(z$result)
+}
+
+Halton <- function(n, bases=c(2,3), raw=FALSE, simplify=TRUE) {
+  d <- length(bases)
+  if(d==2 && !raw && simplify)
+    return(ppp(vdCorput(n, bases[1]),
+               vdCorput(n, bases[2]),
+               window=owin(), check=FALSE))
+  z <- matrix(, nrow=n, ncol=d)
+  for(j in 1:d)
+    z[,j] <- vdCorput(n, bases[j])
+  if(raw || d < 2) return(z)
+  b <- do.call(boxx, rep(list(c(0,1)), d))
+  return(ppx(z, b, simplify=simplify))
+}
+
+Hammersley <- function(n, bases=2, raw=FALSE, simplify=TRUE) {
+  d <- length(bases) + 1
+  z <- cbind(Halton(n, bases, raw=TRUE), (1:n)/n)
+  dimnames(z) <- NULL
+  if(raw || d < 2) return(z)
+  b <- do.call(boxx, rep(list(c(0,1)), d))
+  return(ppx(z, b, simplify=simplify))
+}
+
+rQuasi <- function(n, W, type=c("Halton", "Hammersley"), ...) {
+  R <- as.rectangle(W)
+  type <- match.arg(type)
+  X <- switch(type,
+              Halton=Halton(n, ...),
+              Hammersley=Hammersley(n, ...))
+  Y <- ppp(R$xrange[1] + diff(R$xrange) * X$x,
+           R$yrange[1] + diff(R$yrange) * X$y,
+           window=R, check=FALSE)
+  if(!is.rectangle(W))
+    Y <- Y[W]
+  return(Y)
+}
+
diff --git a/R/rLGCP.R b/R/rLGCP.R
new file mode 100755
index 0000000..664d027
--- /dev/null
+++ b/R/rLGCP.R
@@ -0,0 +1,94 @@
+#
+#   rLGCP.R
+#
+#   simulation of log-Gaussian Cox process
+#
+#   original code by Abdollah Jalilian
+#
+#  $Revision: 1.19 $    $Date: 2016/09/12 02:08:18 $
+#
+
+rLGCP <- local({
+
+  rLGCP <- function(model="exp", mu = 0, param = NULL, ...,
+                    win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) {
+    ## validate
+    if (!(is.numeric(mu) || is.function(mu) || is.im(mu))) 
+      stop(paste(sQuote("mu"), "must be a constant, a function or an image"))
+    if (is.numeric(mu) && !(length(mu) == 1)) 
+      stop(paste(sQuote("mu"), "must be a single number"))
+    ## check for outdated usage
+    if(!all(nzchar(names(param))))
+      stop("Outdated syntax of argument 'param' to rLGCP", call.=FALSE)
+    ## 
+    do.rLGCP(model=model, mu=mu, param=param, ...,
+             win=win, saveLambda=saveLambda, nsim=nsim, drop=drop)
+  }
+
+  do.rLGCP <- function(model="exp", mu = 0, param = NULL, ...,
+                       win=NULL, saveLambda=TRUE,
+                       eps = NULL, dimyx = NULL, xy = NULL,
+                       modelonly=FALSE, nsim=1, drop=TRUE) {
+    ## make RF model object from RandomFields package
+    ## get the 'model generator'
+    modgen <- getRandomFieldsModelGen(model)
+    ## now create a RandomFields 'model' object
+    rfmodel <- do.call(modgen, append(as.list(param), list(...)))
+    if(!inherits(rfmodel, "RMmodel"))
+      stop("Unable to create RandomFields model object", call.=FALSE)
+
+    ## secret exit
+    if(modelonly)
+      return(rfmodel)
+
+    ## simulation window
+    win.given <- !is.null(win)
+    mu.image <- is.im(mu)
+    win <- if(win.given) as.owin(win) else if(mu.image) as.owin(mu) else owin()
+  
+    if(win.given && mu.image && !is.subset.owin(win, as.owin(mu)))
+      stop(paste("The spatial domain of the pixel image", sQuote("mu"),
+                 "does not cover the simulation window", sQuote("win")))
+
+    ## convert win to a mask
+    w <- as.mask(w=win, eps=eps, dimyx=dimyx, xy=xy)
+    xcol <- w$xcol
+    yrow <- w$yrow
+    dim <- w$dim
+    xy <- expand.grid(x=xcol, y=yrow)
+    xx <- xy$x
+    yy <- xy$y
+
+    muxy <- if(is.numeric(mu)) mu else
+            if (is.function(mu)) mu(xx,yy) else
+            lookup.im(mu, xx, yy, naok=TRUE, strict=TRUE)
+    muxy[is.na(muxy)] <- -Inf
+
+    stopifnot(nsim >= 1)
+    result <- vector(mode="list", length=nsim)
+    for(i in 1:nsim) {
+      ## generate zero-mean Gaussian random field
+      spc <- RandomFields::RFoptions()$general$spConform
+      if(spc) RandomFields::RFoptions(spConform=FALSE)
+      z <- RandomFields::RFsimulate(rfmodel, xcol, yrow, grid = TRUE)
+      if(spc) RandomFields::RFoptions(spConform=TRUE)
+
+      ## convert to log-Gaussian image
+      logLambda <- muxy + z
+      Lambda <- matrix(exp(logLambda), nrow=dim[1], ncol=dim[2], byrow=TRUE)
+      Lambda <- as.im(Lambda, W=w)
+      ## generate Poisson points
+      X <- rpoispp(Lambda)[win]
+      ## 
+      if(saveLambda)
+        attr(X, "Lambda") <- Lambda
+      result[[i]] <- X
+    }
+    if(drop && nsim == 1)
+      return(result[[1]])
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.solist(result))
+  }
+
+  rLGCP
+})
diff --git a/R/rPerfect.R b/R/rPerfect.R
new file mode 100755
index 0000000..827118d
--- /dev/null
+++ b/R/rPerfect.R
@@ -0,0 +1,411 @@
+#
+#  Perfect Simulation 
+#
+#  $Revision: 1.21 $ $Date: 2017/06/05 10:31:58 $
+#
+#  rStrauss
+#  rHardcore
+#  rStraussHard
+#  rDiggleGratton
+#  rDGS
+#  rPenttinen
+
+rStrauss <- function(beta, gamma=1, R=0, W=owin(), expand=TRUE,
+                     nsim=1, drop=TRUE) {
+
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(gamma)
+  check.1.real(R)
+
+  check.finite(beta)
+  check.finite(gamma)
+  check.finite(R)
+  
+  stopifnot(beta > 0)
+  stopifnot(gamma >= 0)
+  stopifnot(gamma <= 1)
+  stopifnot(R >= 0)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectStrauss",
+               beta,
+               gamma,
+               R,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+    times <- c(start=z[[4]], end=z[[5]])
+    
+    if(nout<0)
+      stop("internal error: copying failed in PerfectStrauss")
+
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+    attr(P, "times") <- times
+
+    if(nsim == 1 && drop) return(P)
+    
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+#  Perfect Simulation of Hardcore process
+
+rHardcore <- function(beta, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) {
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(R)
+
+  check.finite(beta)
+  check.finite(R)
+
+  stopifnot(beta > 0)
+  stopifnot(R    >= 0)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- storage.mode(R) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectHardcore",
+               beta,
+               R,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+    
+    if(nout<0)
+      stop("internal error: copying failed in PerfectHardcore")
+
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+
+    if(nsim == 1 && drop) return(P)
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+#
+#  Perfect simulation of hybrid Strauss-Hardcore
+#        provided gamma <= 1
+#
+
+rStraussHard <- function(beta, gamma=1, R=0, H=0, W=owin(),
+                         expand=TRUE, nsim=1, drop=TRUE) {
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(gamma)
+  check.1.real(R)
+  check.1.real(H)
+
+  check.finite(beta)
+  check.finite(gamma)
+  check.finite(R)
+  check.finite(H)
+  
+  stopifnot(beta > 0)
+  stopifnot(gamma >= 0)
+  if(gamma > 1)
+    stop("Sorry, perfect simulation is only implemented for gamma <= 1")
+  stopifnot(R >= 0)
+  stopifnot(H >= 0)
+  stopifnot(H <= R)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- storage.mode(gamma) <-
+      storage.mode(R) <- storage.mode(H) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectStraussHard",
+               beta,
+               gamma,
+               R,
+               H,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+
+    if(nout<0)
+      stop("internal error: copying failed in PerfectStraussHard")
+
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+
+    if(nsim == 1 && drop) return(P)
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+#
+#  Perfect Simulation of Diggle-Gratton process
+#
+
+rDiggleGratton <- function(beta, delta, rho, kappa=1, W=owin(),
+                           expand=TRUE, nsim=1, drop=TRUE) {
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(delta)
+  check.1.real(rho)
+  check.1.real(kappa)
+
+  check.finite(beta)
+  check.finite(delta)
+  check.finite(rho)
+  check.finite(kappa)
+
+  stopifnot(beta > 0)
+  stopifnot(delta >= 0)
+  stopifnot(rho   >= 0)
+  stopifnot(delta <= rho)
+  stopifnot(kappa >= 0)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- "double"
+    storage.mode(delta) <- storage.mode(rho) <- storage.mode(kappa) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectDiggleGratton",
+               beta,
+               delta,
+               rho,
+               kappa,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+
+    if(nout<0)
+      stop("internal error: copying failed in PerfectDiggleGratton")
+
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+
+    if(nsim == 1 && drop) return(P)
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+
+#
+#  Perfect Simulation of Diggle-Gates-Stibbard process
+#
+
+rDGS <- function(beta, rho, W=owin(), expand=TRUE, nsim=1, drop=TRUE) {
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(rho)
+
+  check.finite(beta)
+  check.finite(rho)
+
+  stopifnot(beta > 0)
+  stopifnot(rho  >= 0)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- "double"
+    storage.mode(rho) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectDGS",
+               beta,
+               rho,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+    
+    if(nout<0)
+      stop("internal error: copying failed in PerfectDGS")
+    
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+
+    if(nsim == 1 && drop) return(P)
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+
+#
+#  Perfect Simulation of Penttinen process
+#
+
+rPenttinen <- function(beta, gamma=1, R, W=owin(),
+                       expand=TRUE, nsim=1, drop=TRUE) {
+  if(!missing(W)) 
+    verifyclass(W, "owin")
+
+  check.1.real(beta)
+  check.1.real(gamma)
+  check.1.real(R)
+
+  check.finite(beta)
+  check.finite(gamma)
+  check.finite(R)
+
+  stopifnot(beta > 0)
+  stopifnot(gamma >= 0)
+  stopifnot(gamma <= 1)
+  stopifnot(R >= 0)
+
+  runif(1)
+
+  Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R))
+  xrange <- Wsim$xrange
+  yrange <- Wsim$yrange
+
+  result <- vector(mode="list", length=nsim)
+
+  for(i in 1:nsim) {
+    storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double"
+    storage.mode(xrange) <- storage.mode(yrange) <- "double"
+  
+    z <- .Call("PerfectPenttinen",
+               beta,
+               gamma,
+               R,
+               xrange,
+               yrange,
+               PACKAGE = "spatstat")
+
+    X <- z[[1]]
+    Y <- z[[2]]
+    nout <- z[[3]]
+    
+    if(nout<0)
+      stop("internal error: copying failed in PerfectPenttinen")
+    
+    seqn <- seq_len(nout)
+    P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE)
+    if(attr(Wsim, "changed"))
+      P <- P[W]
+
+    if(nsim == 1 && drop) return(P)
+    result[[i]] <- P
+  }
+  result <- as.solist(result)
+  names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+
+## .......  utilities .................................
+
+expandwinPerfect <- function(W, expand, amount) {
+  ## expand 'W' if expand=TRUE according to default 'amount'
+  ## or expand 'W' using rmhexpand(expand)
+  if(!is.logical(expand)) {
+    amount <- rmhexpand(expand)
+    expand <- TRUE
+  }
+  changed <- FALSE
+  if(expand) {
+    W <- expand.owin(W, amount)
+    changed <- TRUE
+  }
+  if(!is.rectangle(W)) {
+    W <- as.rectangle(W)
+    changed <- TRUE
+    warning(paste("Simulation will be performed in the containing rectangle",
+                  "and clipped to the original window."),
+            call.=FALSE)
+  }
+  attr(W, "changed") <- changed
+  return(W)
+}
diff --git a/R/rags.R b/R/rags.R
new file mode 100644
index 0000000..7056377
--- /dev/null
+++ b/R/rags.R
@@ -0,0 +1,80 @@
+#'
+#'      rags.R
+#'
+#'   Alternating Gibbs Sampler
+#'
+#'      $Revision: 1.6 $  $Date: 2016/11/29 05:01:51 $
+#'
+#' Initial implementation for multitype hard core process
+#' without interaction within types
+
+rags <- function(model, ..., ncycles=100) {
+  if(!is.list(model)) stop("Argument 'model' should be a list")
+  if(!all(c("beta", "hradii") %in% names(model)))
+    stop("Argument 'model' should have entries 'beta' and 'hradii'")
+  do.call(ragsMultiHard, append(model, list(..., ncycles=ncycles)))
+}
+
+ragsMultiHard <- function(beta, hradii, ...,
+                          types=NULL, bmax=NULL,
+                          periodic=FALSE, ncycles=100) {
+  ## validate beta by generating first proposal points
+  Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types)
+  ntypes <- length(levels(marks(Xprop)))
+  check.nmatrix(hradii, ntypes, things="types of points")
+  if(any(is.finite(dh <- diag(hradii)) & dh > 0))
+    stop("Interaction between points of the same type is not permitted")
+  ## initial state empty
+  X <- Xprop[integer(0)]
+  Y <- split(X)
+  ##
+  for(cycle in 1:ncycles) {
+    if(cycle > 1)
+      Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types)
+    Xprop <- Xprop[order(coords(Xprop)$x)]
+    Yprop <- split(Xprop)
+    for(i in 1:ntypes) {
+      Xi <- Yprop[[i]]
+      ok <- TRUE
+      for(j in (1:ntypes)[-i]) {
+        if(!any(ok)) break;
+        ok <- ok & !has.close(Xi, hradii[i,j], Y[[j]], sorted=TRUE,
+                             periodic=periodic)
+      }
+      Y[[i]] <- Xi[ok]
+    }
+  }
+  Z <- do.call(superimpose, Y)
+  return(Z)
+}
+
+ragsAreaInter <- function(beta, eta, r, ...,
+                          win=NULL, bmax=NULL,
+                          periodic=FALSE, ncycles=100) {
+  check.1.real(eta)
+  check.1.real(r)
+  if(r == 0 || eta == 1) return(rpoispp(beta, win=win, lmax=bmax, ...))
+  if(eta < 1)
+    stop("Alternating Gibbs algorithm requires eta >= 1", call.=FALSE)
+  if(is.function(beta)) {
+    beta <- as.im(beta, W=win, ...)
+  } else if(is.numeric(beta)) {
+    check.1.real(beta)
+    stopifnot(beta >= 0)
+  } else if(!is.im(beta)) {
+    stop("beta should be a number, a pixel image, or a function(x,y)",
+         call.=FALSE)
+  }
+  if(is.im(beta) && is.null(win))
+    win <- as.owin(beta)
+  kappa <- beta * eta
+  loggamma <- log(eta)/(pi * r^2)
+  bmax <- if(is.null(bmax)) NULL else c(max(kappa), loggamma)
+  B <- if(is.numeric(beta)) c(kappa, loggamma) else
+       solist(kappa, as.im(loggamma, W=win))
+  H <- matrix(c(0,r,r,0), 2, 2)
+  Y <- ragsMultiHard(B, H, types=1:2, bmax=bmax, periodic=periodic,
+                     ncycles=ncycles)
+  X <- split(Y)[[1]]
+  return(X)
+}
diff --git a/R/random.R b/R/random.R
new file mode 100755
index 0000000..4755d0e
--- /dev/null
+++ b/R/random.R
@@ -0,0 +1,1084 @@
+##
+##    random.R
+##
+##    Functions for generating random point patterns
+##
+##    $Revision: 4.92 $   $Date: 2017/06/05 10:31:58 $
+##
+##
+##    runifpoint()      n i.i.d. uniform random points ("binomial process")
+##
+##    runifpoispp()     uniform Poisson point process
+##
+##    rpoispp()         general Poisson point process (thinning method)
+##
+##    rpoint()          n independent random points (rejection/pixel list)
+##
+##    rMaternI()        Mat'ern model I 
+##    rMaternII()       Mat'ern model II
+##    rSSI()            Simple Sequential Inhibition process
+##
+##    rthin()           independent random thinning
+##    rjitter()         random perturbation
+##
+##    Examples:
+##          u01 <- owin(0:1,0:1)
+##          plot(runifpoispp(100, u01))
+##          X <- rpoispp(function(x,y) {100 * (1-x/2)}, 100, u01)
+##          X <- rpoispp(function(x,y) {ifelse(x < 0.5, 100, 20)}, 100)
+##          plot(X)
+##          plot(rMaternI(100, 0.02))
+##          plot(rMaternII(100, 0.05))
+##
+
+runifrect <- function(n, win=owin(c(0,1),c(0,1)), nsim=1, drop=TRUE)
+{
+  ## no checking
+  xr <- win$xrange
+  yr <- win$yrange
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    x <- runif(n, min=xr[1], max=xr[2])
+    y <- runif(n, min=yr[1], max=yr[2])
+    result[[isim]] <- ppp(x, y, window=win, check=FALSE)
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+runifdisc <- function(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE)
+{
+  ## i.i.d. uniform points in the disc of radius r and centre (x,y)
+  check.1.real(radius)
+  stopifnot(radius > 0)
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  disque <- disc(centre=centre, radius=radius, ...)
+  twopi <- 2 * pi
+  rad2 <- radius^2
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    theta <- runif(n, min=0, max=twopi)
+    s <- sqrt(runif(n, min=0, max=rad2))
+    result[[isim]] <- ppp(centre[1] + s * cos(theta),
+                          centre[2] + s * sin(theta),
+                          window=disque, check=FALSE)
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+
+runifpoint <- function(n, win=owin(c(0,1),c(0,1)),
+                       giveup=1000, warn=TRUE, ...,
+                       nsim=1, drop=TRUE, ex=NULL)
+{
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  
+  if(missing(n) && missing(win) && !is.null(ex)) {
+    stopifnot(is.ppp(ex))
+    n <- npoints(ex)
+    win <- Window(ex)
+  } else {
+    win <- as.owin(win)
+    check.1.integer(n)
+    stopifnot(n >= 0)
+  }
+
+  if(n == 0) {
+    emp <- ppp(numeric(0), numeric(0), window=win)
+    if(nsim == 1) return(emp)
+    result <- rep(list(emp), nsim)
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.ppplist(result))
+  }
+
+  if(warn) {
+    nhuge <- spatstat.options("huge.npoints")
+    if(n > nhuge) {
+      whinge <- paste("Attempting to generate", n, "random points")
+      message(whinge)
+      warning(whinge, call.=FALSE)
+    }
+  }
+
+  switch(win$type,
+         rectangle = {
+           return(runifrect(n, win, nsim=nsim, drop=drop))
+         },
+         mask = {
+           dx <- win$xstep
+           dy <- win$ystep
+           ## extract pixel coordinates and probabilities
+           rxy <- rasterxy.mask(win, drop=TRUE)
+           xpix <- rxy$x
+           ypix <- rxy$y
+           ## make a list of nsim point patterns
+           result <- vector(mode="list", length=nsim)
+           for(isim in 1:nsim) {
+             ## select pixels with equal probability
+             id <- sample(seq_along(xpix), n, replace=TRUE)
+             ## extract pixel centres and randomise within pixels
+             x <- xpix[id] + runif(n, min= -dx/2, max=dx/2)
+             y <- ypix[id] + runif(n, min= -dy/2, max=dy/2)
+             result[[isim]] <- ppp(x, y, window=win, check=FALSE)
+           }
+         },
+         polygonal={
+           ## make a list of nsim point patterns
+           result <- vector(mode="list", length=nsim)
+           for(isim in 1:nsim) {
+             ## rejection method
+             ## initialise empty pattern
+             x <- numeric(0)
+             y <- numeric(0)
+             X <- ppp(x, y, window=win)
+             ##
+             ## rectangle in which trial points will be generated
+             box <- boundingbox(win)
+             ## 
+             ntries <- 0
+             repeat {
+               ntries <- ntries + 1
+               ## generate trial points in batches of n
+               qq <- runifrect(n, box) 
+               ## retain those which are inside 'win'
+               qq <- qq[win]
+               ## add them to result
+               X <- superimpose(X, qq, W=win, check=FALSE)
+               ## if we have enough points, exit
+               if(X$n > n) {
+                 result[[isim]] <- X[1:n]
+                 break
+               } else if(X$n == n) {
+                 result[[isim]] <- X
+                 break
+               } else if(ntries >= giveup) {
+                 ## otherwise get bored eventually
+                 stop(paste("Gave up after", giveup * n, "trials,",
+                            X$n, "points accepted"))
+               }
+             }
+           }
+         },
+         stop("Unrecognised window type")
+         )
+  
+  ## list of point patterns produced.
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+runifpoispp <- function(lambda, win = owin(c(0,1),c(0,1)), ...,
+                        nsim=1, drop=TRUE) {
+  win <- as.owin(win)
+  if(!is.numeric(lambda) || length(lambda) > 1 ||
+     !is.finite(lambda) || lambda < 0)
+    stop("Intensity lambda must be a single finite number >= 0")
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+
+  if(lambda == 0) {
+    ## return empty pattern
+    emp <- ppp(numeric(0), numeric(0), window=win)
+    if(nsim == 1 && drop) return(emp)
+    result <- rep(list(emp), nsim)
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.ppplist(result))
+  }
+
+  ## will generate Poisson process in enclosing rectangle and trim it
+  box <- boundingbox(win)
+  meanN <- lambda * area(box)
+  
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    n <- rpois(1, meanN)
+    if(!is.finite(n))
+      stop(paste("Unable to generate Poisson process with a mean of",
+                 meanN, "points"))
+    X <- runifpoint(n, box)
+    ## trim to window
+    if(win$type != "rectangle")
+      X <- X[win]
+    result[[isim]] <- X
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+rpoint <- function(n, f, fmax=NULL,
+                   win=unit.square(), ..., giveup=1000,verbose=FALSE,
+                   nsim=1, drop=TRUE) {
+  
+  if(missing(f) || (is.numeric(f) && length(f) == 1))
+    ## uniform distribution
+    return(runifpoint(n, win, giveup, nsim=nsim, drop=drop))
+  
+  ## non-uniform distribution....
+  if(!is.function(f) && !is.im(f))
+    stop(paste(sQuote("f"),
+               "must be either a function or an",
+               sQuote("im"), "object"))
+
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  
+  if(is.im(f)) {
+    ## ------------ PIXEL IMAGE ---------------------
+    wf <- as.owin(f)
+    if(n == 0) {
+      ## return empty pattern(s)
+      emp <- ppp(numeric(0), numeric(0), window=wf)
+      if(nsim == 1 && drop) return(emp)
+      result <- rep(list(emp), nsim)
+      names(result) <- paste("Simulation", 1:nsim)
+      return(as.ppplist(result))
+    }
+    w <- as.mask(wf)
+    M <- w$m
+    dx <- w$xstep
+    dy <- w$ystep
+    ## extract pixel coordinates and probabilities
+    rxy <- rasterxy.mask(w, drop=TRUE)
+    xpix <- rxy$x
+    ypix <- rxy$y
+    ppix <- as.vector(f$v[M]) ## not normalised - OK
+    ##
+    result <- vector(mode="list", length=nsim)
+    for(isim in 1:nsim) {
+      ## select pixels
+      id <- sample(length(xpix), n, replace=TRUE, prob=ppix)
+      ## extract pixel centres and randomise within pixels
+      x <- xpix[id] + runif(n, min= -dx/2, max=dx/2)
+      y <- ypix[id] + runif(n, min= -dy/2, max=dy/2)
+      result[[isim]] <- ppp(x, y, window=wf, check=FALSE)
+    }
+    if(nsim == 1 && drop)
+      return(result[[1L]])
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.ppplist(result))
+  }
+
+  ## ------------ FUNCTION  ---------------------  
+  ## Establish parameters for rejection method
+
+  verifyclass(win, "owin")
+
+  if(n == 0) {
+    ## return empty pattern(s)
+    emp <- ppp(numeric(0), numeric(0), window=win)
+    if(nsim == 1 && drop) return(emp)
+    result <- rep(list(emp), nsim)
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.ppplist(result))
+  }
+  
+  if(is.null(fmax)) {
+    ## compute approx maximum value of f
+    imag <- as.im(f, win, ...)
+    summ <- summary(imag)
+    fmax <- summ$max + 0.05 * diff(summ$range)
+  }
+  irregular <- (win$type != "rectangle")
+  box <- boundingbox(win)
+
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+
+    ## initialise empty pattern
+    X <- ppp(numeric(0), numeric(0), window=win)
+  
+    pbar <- 1
+    nremaining <- n
+    totngen <- 0
+    
+    ## generate uniform random points in batches
+    ## and apply the rejection method.
+    ## Collect any points that are retained in X
+
+    ntries <- 0
+    repeat{
+      ntries <- ntries + 1
+      ## proposal points
+      ngen <- nremaining/pbar + 10
+      totngen <- totngen + ngen
+      prop <- runifrect(ngen, box)
+      if(irregular)
+        prop <- prop[win]
+      if(prop$n > 0) {
+        fvalues <- f(prop$x, prop$y, ...)
+        paccept <- fvalues/fmax
+        u <- runif(prop$n)
+        ## accepted points
+        Y <- prop[u < paccept]
+        if(Y$n > 0) {
+          ## add to X
+          X <- superimpose(X, Y, W=win, check=FALSE)
+          nX <- X$n
+          pbar <- nX/totngen
+          nremaining <- n - nX
+          if(nremaining <= 0) {
+            ## we have enough!
+            if(verbose)
+              splat("acceptance rate = ", round(100 * pbar, 2), "%")
+            result[[isim]] <- if(nX == n) X else X[1:n]
+            break
+          }
+        }
+      }
+      if(ntries > giveup)
+        stop(paste("Gave up after",giveup * n,"trials with",
+                   X$n, "points accepted"))
+    }
+  }
+  if(nsim == 1 && drop) return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+rpoispp <- function(lambda, lmax=NULL, win = owin(), ...,
+                    nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) {
+  ## arguments:
+  ##     lambda  intensity: constant, function(x,y,...) or image
+  ##     lmax     maximum possible value of lambda(x,y,...)
+  ##     win     default observation window (of class 'owin')
+  ##   ...       arguments passed to lambda(x, y, ...)
+  ##     nsim    number of replicate simulations
+
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  
+  if(missing(lambda) && is.null(lmax) && missing(win) && !is.null(ex)) {
+    lambda <- intensity(unmark(ex))
+    win <- Window(ex)
+  } else {
+    if(!(is.numeric(lambda) || is.function(lambda) || is.im(lambda)))
+      stop(paste(sQuote("lambda"),
+                 "must be a constant, a function or an image"))
+    if(is.numeric(lambda) && !(length(lambda) == 1 && lambda >= 0))
+      stop(paste(sQuote("lambda"),
+                 "must be a single, nonnegative number"))
+    if(!is.null(lmax)) {
+      if(!is.numeric(lmax))
+        stop("lmax should be a number")
+      if(length(lmax) > 1)
+        stop("lmax should be a single number")
+    }
+    if(is.im(lambda)) {
+      if(warnwin && !missing(win))
+        warning("Argument win ignored", call.=FALSE)
+      win <- rescue.rectangle(as.owin(lambda))
+    } else {
+      win <- as.owin(win)
+    }
+  }
+  
+  if(is.numeric(lambda)) 
+    ## uniform Poisson
+    return(runifpoispp(lambda, win, nsim=nsim, drop=drop))
+
+  ## inhomogeneous Poisson
+  ## perform thinning of uniform Poisson
+  ## determine upper bound
+  if(is.null(lmax)) {
+    imag <- as.im(lambda, win, ...)
+    summ <- summary(imag)
+    lmax <- summ$max + 0.05 * diff(summ$range)
+  } 
+
+  if(is.function(lambda)) {
+    ## function lambda
+    #'      runifpoispp checks 'lmax'
+    result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE)
+    #'      result is a 'ppplist' with appropriate names
+    for(isim in 1:nsim) {
+      X <- result[[isim]]
+      if(X$n > 0) {
+        prob <- lambda(X$x, X$y, ...)/lmax
+        u <- runif(X$n)
+        retain <- (u <= prob)
+        result[[isim]] <- X[retain]
+      }
+    }
+    if(nsim == 1 && drop)
+       result <- result[[1L]]
+    return(result)
+  }
+
+  if(is.im(lambda)) {
+    ## image lambda
+    if(spatstat.options("fastpois")) {
+      ## new code: sample pixels directly
+      mu <- integral(lambda)
+      dx <- lambda$xstep/2
+      dy <- lambda$ystep/2
+      df <- as.data.frame(lambda)
+      npix <- nrow(df)
+      lpix <- df$value
+      result <- vector(mode="list", length=nsim)
+      nn <- rpois(nsim, mu)
+      if(!all(is.finite(nn)))
+        stop(paste("Unable to generate Poisson process with a mean of",
+                   mu, "points"))
+      for(isim in seq_len(nsim)) {
+        ni <- nn[isim]
+        ii <- sample.int(npix, size=ni, replace=TRUE, prob=lpix)
+        xx <- df$x[ii] + runif(ni, -dx, dx)
+        yy <- df$y[ii] + runif(ni, -dy, dy)
+        result[[isim]] <- ppp(xx, yy, window=win, check=FALSE)
+      }
+      if(nsim == 1 && drop) return(result[[1L]])
+      names(result) <- paste("Simulation", 1:nsim)
+      return(as.ppplist(result))
+    } else {
+      ## old code: thinning
+      result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE)
+      for(isim in 1:nsim) {
+        X <- result[[isim]]
+        if(X$n > 0) {
+          prob <- lambda[X]/lmax
+          u <- runif(X$n)
+          retain <- (u <= prob)
+          result[[isim]] <- X[retain]
+        }
+      }
+      if(nsim == 1 && drop)
+         return(result[[1L]])
+      return(result)
+    }
+  }
+  stop(paste(sQuote("lambda"), "must be a constant, a function or an image"))
+}
+    
+rMaternI <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE,
+                     ..., nsim=1, drop=TRUE)
+{
+  rMaternInhibition(type=1,
+                    kappa=kappa, r=r, win=win, stationary=stationary,
+                    ..., nsim=nsim, drop=drop)
+}
+
+rMaternII <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE,
+                     ..., nsim=1, drop=TRUE)
+{
+  rMaternInhibition(type=2,
+                    kappa=kappa, r=r, win=win, stationary=stationary,
+                    ..., nsim=nsim, drop=drop)
+}
+
+rMaternInhibition <- function(type, 
+                              kappa, r, win = owin(c(0,1),c(0,1)),
+                              stationary=TRUE,
+                              ..., nsim=1, drop=TRUE) {
+  stopifnot(is.numeric(r) && length(r) == 1)
+  stopifnot(type %in% c(1,2))
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  ## Resolve window class
+  if(!inherits(win, c("owin", "box3", "boxx"))) {
+    givenwin <- win
+    win <- try(as.owin(givenwin), silent = TRUE)
+    if(inherits(win, "try-error"))
+      win <- try(as.boxx(givenwin), silent = TRUE)
+    if(inherits(win, "try-error"))
+      stop("Could not coerce argument win to a window (owin, box3 or boxx).")
+  }
+  dimen <- spatdim(win)
+  if(dimen == 2) {
+    bigbox <- if(stationary) grow.rectangle(win, r) else win
+    result <- rpoispp(kappa, win = bigbox, nsim = nsim, drop=FALSE)
+  } else if(dimen == 3) {
+    bigbox <- if(stationary) grow.box3(win, r) else win
+    result <- rpoispp3(kappa, domain = bigbox, nsim = nsim, drop=FALSE)
+  } else {
+    bigbox <- if(stationary) grow.boxx(win, r) else win
+    result <- rpoisppx(kappa, domain = bigbox, nsim = nsim, drop=FALSE)
+  }
+  for(isim in 1:nsim) {
+    Y <- result[[isim]]
+    nY <- npoints(Y)
+    if(type == 1) {
+      ## Matern Model I
+      if(nY > 1) {
+        d <- nndist(Y)
+        Y <- Y[d > r]
+      }
+    } else {
+      ## Matern Model II
+      if(nY > 1) {
+        ## matrix of squared pairwise distances
+        d2 <- pairdist(Y, squared=TRUE)
+        close <- (d2 <= r^2)
+        ## random order 1:n
+        age <- sample(seq_len(nY), nY, replace=FALSE)
+        earlier <- outer(age, age, ">")
+        conflict <- close & earlier
+        ## delete <- apply(conflict, 1, any)
+        delete <- matrowany(conflict)
+        Y <- Y[!delete]
+      }
+    }
+    if(stationary)
+      Y <- Y[win]
+    result[[isim]] <- Y
+  }
+  if(nsim == 1 && drop) return(result[[1L]])
+  if(is.owin(win))
+    result <- as.ppplist(result)
+  return(result)
+}
+
+rSSI <- function(r, n=Inf, win = square(1), 
+                 giveup = 1000, x.init=NULL, ...,
+                 f=NULL, fmax=NULL,
+                 nsim=1, drop=TRUE)
+{
+  win.given <- !missing(win) && !is.null(win)
+  stopifnot(is.numeric(r) && length(r) == 1 && r >= 0)
+  stopifnot(is.numeric(n) && length(n) == 1 && n >= 0)
+  must.reach.n <- is.finite(n)
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  ##
+  if(!is.null(f)) {
+    stopifnot(is.numeric(f) || is.im(f) || is.function(f))
+    if(is.null(fmax) && !is.numeric(f))
+      fmax <- if(is.im(f)) max(f) else max(as.im(f, win))
+  }
+  ##
+  result <- vector(mode="list", length=nsim)
+  if(!win.given) win <- square(1)
+  ## validate initial state
+  if(is.null(x.init)) {
+    ## start with empty pattern in specified window
+    win <- as.owin(win)
+    x.init <- ppp(numeric(0),numeric(0), window=win)
+  } else {
+    ## start with specified pattern
+    stopifnot(is.ppp(x.init))
+    if(!win.given) {
+      win <- as.owin(x.init)
+    } else {
+      ## check compatibility of windows
+      if(!identical(win, as.owin(x.init)))
+        warning(paste("Argument", sQuote("win"),
+                      "is not the same as the window of", sQuote("x.init")))
+      x.init.new <- x.init[win]
+      if(npoints(x.init.new) == 0)
+        stop(paste("No points of x.init lie inside the specified window",
+                   sQuote("win")))
+      nlost <- npoints(x.init) - npoints(x.init.new)
+      if(nlost > 0) 
+        warning(paste(nlost, "out of",
+                      npoints(x.init), "points of the pattern x.init",
+                      "lay outside the specified window",
+                      sQuote("win")))
+      x.init <- x.init.new
+    }
+    if(n < npoints(x.init))
+      stop(paste("x.init contains", npoints(x.init), "points",
+                 "but a pattern containing only n =", n, "points", 
+                 "is required"))
+    if(n == npoints(x.init)) {
+      warning(paste("Initial state x.init already contains", n, "points;",
+                    "no further points were added"))
+      if(nsim == 1 && drop)
+         return(x.init)
+      result <- rep(list(x.init), nsim)
+      names(result) <- paste("Simulation", 1:nsim)
+      return(as.ppplist(result))
+    }
+  }
+  #' validate radius
+  r2 <- r^2
+  if(!is.infinite(n) && (n * pi * r2/4  > area(win)))
+      warning(paste("Window is too small to fit", n, "points",
+                    "at minimum separation", r))
+  #' start simulation 		    
+  pstate <- list()
+  for(isim in 1:nsim) {
+    if(nsim > 1) pstate <- progressreport(isim, nsim, state=pstate)
+    ## Simple Sequential Inhibition process
+    ## fixed number of points
+    ## Naive implementation, proposals are uniform
+    X <- x.init
+    ntries <- 0
+    while(ntries < giveup) {
+      ntries <- ntries + 1
+      qq <- if(is.null(f)) runifpoint(1, win) else rpoint(1, f, fmax, win)
+      dx <- qq$x[1] - X$x
+      dy <- qq$y[1] - X$y
+      if(all(dx^2 + dy^2 > r2)) {
+        X <- superimpose(X, qq, W=win, check=FALSE)
+        ntries <- 0
+      }
+      if(X$n >= n)
+        break
+    }
+    if(must.reach.n && X$n < n)
+      warning(paste("Gave up after", giveup,
+                    "attempts with only", X$n, "points placed out of", n))
+    result[[isim]] <- X
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+rPoissonCluster <-
+  function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ...,
+           lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE)
+{
+  ## Generic Poisson cluster process
+  ## Implementation for bounded cluster radius
+  ##
+  ## 'rcluster' is a function(x,y) that takes the coordinates
+  ## (x,y) of the parent point and generates a list(x,y) of offspring
+  ##
+  ## "..." are arguments to be passed to 'rcluster()'
+  ##
+
+  ## Catch old argument name rmax for expand, and allow rmax to be
+  ## passed to rcluster (and then be ignored)
+  if(missing(expand) && !is.null(rmax <- list(...)$rmax)){
+      expand <- rmax
+      f <- rcluster
+      rcluster <- function(..., rmax) f(...)
+  }
+  win <- as.owin(win)
+  
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+
+  ## Generate parents in dilated window
+  frame <- boundingbox(win)
+  dilated <- owin(frame$xrange + c(-expand, expand),
+                  frame$yrange + c(-expand, expand))
+  if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa)))
+    stop(paste("The window in which the image",
+               sQuote("kappa"),
+               "is defined\n",
+               "is not large enough to contain the dilation of the window",
+               sQuote("win")))
+  parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim)
+  if(nsim == 1) parentlist <- list(parentlist)
+
+  resultlist <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    parents <- parentlist[[isim]]
+    result <- NULL
+    ## generate clusters
+    np <- parents$n
+    if(np > 0) {
+      xparent <- parents$x
+      yparent <- parents$y
+      for(i in seq_len(np)) {
+        ## generate random offspring of i-th parent point
+        cluster <- rcluster(xparent[i], yparent[i], ...)
+        if(!inherits(cluster, "ppp"))
+          cluster <- ppp(cluster$x, cluster$y, window=frame, check=FALSE)
+        ## skip if cluster is empty
+        if(cluster$n > 0) {
+          ## trim to window
+          cluster <- cluster[win]
+          if(is.null(result)) {
+            ## initialise offspring pattern and offspring-to-parent map
+            result <- cluster
+            parentid <- rep.int(1, cluster$n)
+          } else {
+            ## add to pattern
+            result <- superimpose(result, cluster, W=win, check=FALSE)
+            ## update offspring-to-parent map
+            parentid <- c(parentid, rep.int(i, cluster$n))
+          }
+        }
+      }
+    } else {
+      ## no parents - empty pattern
+      result <- ppp(numeric(0), numeric(0), window=win)
+      parentid <- integer(0)
+    }
+
+    if(saveparents) {
+      attr(result, "parents") <- parents
+      attr(result, "parentid") <- parentid
+      attr(result, "expand") <- expand
+    }
+    
+    resultlist[[isim]] <- result
+  }
+
+  if(nsim == 1 && drop) return(resultlist[[1]])
+
+  names(resultlist) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(resultlist))
+}  
+
+rGaussPoisson <- local({
+  
+  rGaussPoisson <- function(kappa, r, p2, win=owin(c(0,1), c(0,1)),
+                            ..., nsim=1, drop=TRUE) {
+    ## Gauss-Poisson process
+    result <- rPoissonCluster(kappa, 1.05 * r, oneortwo,
+                              win, radius=r/2, p2=p2, nsim=nsim, drop=drop)
+    return(result)
+  }
+
+  oneortwo <- function(x0, y0, radius, p2) {
+    if(runif(1) > p2) 
+      ## one point
+      return(list(x=x0, y=y0))
+    ## two points
+    theta <- runif(1, min=0, max=2*pi)
+    return(list(x=x0+c(-1,1)*radius*cos(theta),
+                y=y0+c(-1,1)*radius*sin(theta)))
+  }
+
+  rGaussPoisson
+})
+
+  
+rstrat <- function(win=square(1), nx, ny=nx, k=1, nsim=1, drop=TRUE) {
+  win <- as.owin(win)
+  stopifnot(nx >= 1 && ny >= 1)
+  stopifnot(k >= 1)
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    xy <- stratrand(win, nx, ny, k)
+    Xbox <- ppp(xy$x, xy$y, win$xrange, win$yrange, check=FALSE)
+    result[[isim]] <- Xbox[win]
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+xy.grid <- function(xr, yr, nx, ny, dx, dy) {
+  nx.given <- !is.null(nx)
+  ny.given <- !is.null(ny)
+  dx.given <- !is.null(dx)
+  dy.given <- !is.null(dy)
+  if(nx.given && dx.given)
+    stop("Do not give both nx and dx")    
+  if(nx.given) {
+    stopifnot(nx >= 1)
+    x0 <- seq(from=xr[1], to=xr[2], length.out=nx+1)
+    dx <- diff(xr)/nx
+  } else if(dx.given) {
+    stopifnot(dx > 0)
+    x0 <- seq(from=xr[1], to=xr[2], by=dx)
+    nx <- length(x0) - 1
+  } else stop("Need either nx or dx")
+  ## determine y grid
+  if(ny.given && dy.given)
+    stop("Do not give both ny and dy")    
+  if(ny.given) {
+    stopifnot(ny >= 1)
+    y0 <- seq(from=yr[1], to=yr[2], length.out=ny+1)
+    dy <- diff(yr)/ny
+  } else {
+    if(is.null(dy)) dy <- dx
+    stopifnot(dy > 0)
+    y0 <- seq(from=yr[1], to=yr[2], by=dy)
+    ny <- length(y0) - 1
+  }
+  return(list(x0=x0, y0=y0, nx=nx, ny=ny, dx=dx, dy=dy))
+}
+  
+rsyst <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx,
+                  nsim=1, drop=TRUE) {
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  win <- as.owin(win)
+  xr <- win$xrange
+  yr <- win$yrange
+  ## determine grid coordinates 
+  if(missing(ny)) ny <- NULL
+  if(missing(dy)) dy <- NULL
+  g <- xy.grid(xr, yr, nx, ny, dx, dy)
+  x0 <- g$x0
+  y0 <- g$y0
+  dx <- g$dx
+  dy <- g$dy
+  ## assemble grid and randomise location
+  xy0 <- expand.grid(x=x0, y=y0)
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    x <- xy0$x + runif(1, min = 0, max = dx)
+    y <- xy0$y + runif(1, min = 0, max = dy)
+    Xbox <- ppp(x, y, xr, yr, check=FALSE)
+    ## trim to window
+    result[[isim]] <- Xbox[win]
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+rcellnumber <- local({
+
+  rcellnumber <- function(n, N=10, mu=1) {
+    if(missing(mu) || mu == 1) {
+      z <- rCellUnit(n=n, N=N)
+    } else {
+      z <- replicate(n, rCellCumul(x=mu, N=N))
+    }
+    return(z)
+  }
+  
+  rCellUnit <- function(n, N=10) {
+    if(!missing(N)) {
+      if(round(N) != N) stop("N must be an integer")
+      stopifnot(is.finite(N))
+      stopifnot(N > 1)
+    }
+    u <- runif(n, min=0, max=1)
+    p0 <- 1/N
+    pN <- 1/(N * (N-1))
+    k <- ifelse(u < p0, 0, ifelse(u < (1 - pN), 1, N))
+    return(k)
+  }
+  
+  rCellCumul <- function(x, N=10) {
+    check.1.real(x)
+    n <- ceiling(x)
+    if(n <= 0) return(0)
+    y <- rCellUnit(n=n, N=N)
+    if(n == x) return(sum(y))
+    p <- x - (n-1)
+    z <- sum(y[-1]) + rbinom(1, size=y[1], prob=p)
+    return(z)
+  }
+
+  rcellnumber
+})
+
+rcell <- function(win=square(1), nx=NULL, ny=nx, ...,
+                  dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) {
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  win <- as.owin(win)
+  xr <- win$xrange
+  yr <- win$yrange
+  ## determine grid coordinates 
+  if(missing(ny)) ny <- NULL
+  if(missing(dy)) dy <- NULL
+  g <- xy.grid(xr, yr, nx, ny, dx, dy)
+  nx <- g$nx
+  ny <- g$ny
+  x0 <- g$x0
+  y0 <- g$y0
+  dx <- g$dx
+  dy <- g$dy
+  ## generate pattern(s)
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    x <- numeric(0)
+    y <- numeric(0)
+    for(ix in seq_len(nx))
+      for(iy in seq_len(ny)) {
+        nij <- rcellnumber(1, N)
+        x <- c(x, x0[ix] + runif(nij, min=0, max=dx))
+        y <- c(y, y0[iy] + runif(nij, min=0, max=dy))
+      }
+    Xbox <- ppp(x, y, xr, yr, check=FALSE)
+    result[[isim]] <- Xbox[win]
+  }
+  if(nsim == 1 && drop) return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
+
+thinjump <- function(n, p) {
+  # equivalent to which(runif(n) < p) for constant p
+  stopifnot(length(p) == 1)
+  if(p <= 0) return(integer(0))
+  if(p >= 1) return(seq_len(n))
+  if(p > 0.5) return(-thinjump(n, 1-p))
+  guessmaxlength <- ceiling(n * p + 2 * sqrt(n * p * (1-p)))
+  i <- .Call("thinjumpequal",
+             n, p, guessmaxlength,
+             PACKAGE = "spatstat")
+  return(i)
+}
+
+rthin <- function(X, P, ..., nsim=1, drop=TRUE) {
+  stopifnot(is.ppp(X) || is.lpp(X))
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  nX <- npoints(X)
+  if(nX == 0) {
+    if(nsim == 1 && drop) return(X)
+    result <- rep(list(X), nsim)
+    names(result) <- paste("Simulation", 1:nsim)
+    result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result)
+    return(result)
+  }
+
+  if(is.numeric(P) && length(P) == 1 && spatstat.options("fastthin")) {
+    # special algorithm for constant probability
+    result <- vector(mode="list", length=nsim)
+    for(isim in 1:nsim) {
+      retain <- thinjump(nX, P)
+      Y <- X[retain]
+      ## also handle offspring-to-parent map if present
+      if(!is.null(parentid <- attr(X, "parentid")))
+        attr(Y, "parentid") <- parentid[retain]
+      result[[isim]] <- Y
+    }
+    if(nsim == 1 && drop)
+      return(result[[1L]])
+    names(result) <- paste("Simulation", 1:nsim)
+    result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result)
+    return(result)
+  }
+
+  if(is.numeric(P)) {
+    ## vector of retention probabilities
+    pX <- P
+    if(length(pX) != nX) {
+      if(length(pX) == 1)
+        pX <- rep.int(pX, nX)
+      else 
+        stop("Length of vector P does not match number of points of X")
+    }
+    if(anyNA(pX))
+      stop("P contains NA's")
+  } else if(is.function(P)) {
+    ## function - evaluate it at points of X
+    pX <- if(inherits(P, c("linfun", "funxy"))) P(X, ...) else P(X$x, X$y, ...)
+    if(length(pX) != nX)
+      stop("Function P returned a vector of incorrect length")
+    if(!is.numeric(pX))
+      stop("Function P returned non-numeric values")
+    if(anyNA(pX))
+      stop("Function P returned some NA values")
+  } else if(is.im(P)) {
+    ## image - look it up
+    if(!(P$type %in% c("integer", "real")))
+      stop("Values of image P should be numeric")
+    pX <- P[X, drop=FALSE]
+    if(anyNA(pX))
+      stop("some points of X lie outside the domain of image P")
+  } else
+  stop("Unrecognised format for P")
+
+  if(min(pX) < 0) stop("some probabilities are negative")
+  if(max(pX) > 1) stop("some probabilities are greater than 1")
+
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    retain <- (runif(length(pX)) < pX)
+    Y <- X[retain]
+    ## also handle offspring-to-parent map if present
+    if(!is.null(parentid <- attr(X, "parentid")))
+      attr(Y, "parentid") <- parentid[retain]
+    result[[isim]] <- Y
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  result <- if(is.ppp(X)) as.ppplist(result) else as.solist(result)
+  return(result)
+}
+
+
+## rjitter
+
+rjitter <- function(X, radius, retry=TRUE, giveup=10000, ...,
+                    nsim=1, drop=TRUE) {
+  verifyclass(X, "ppp")
+  if(missing(radius) || is.null(radius))
+    radius <- bw.stoyan(X)
+  
+  if(!missing(nsim)) {
+    check.1.integer(nsim)
+    stopifnot(nsim >= 1)
+  }
+  nX <- npoints(X)
+  W <- X$window
+  if(nX == 0) {
+    if(nsim == 1 && drop) return(X)
+    result <- rep(list(X), nsim)
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.ppplist(result))
+  }
+  result <- vector(mode="list", length=nsim)
+  for(isim in 1:nsim) {
+    if(!retry) {
+      ## points outside window are lost
+      D <- runifdisc(nX, radius=radius)
+      xnew <- X$x + D$x
+      ynew <- X$y + D$y
+      ok <- inside.owin(xnew, ynew, W)
+      result[[isim]] <- ppp(xnew[ok], ynew[ok], window=W, check=FALSE)
+    } else {
+      ## retry = TRUE: condition on points being inside window
+      undone <- rep.int(TRUE, nX)
+      triesleft <- giveup
+      Xshift <- X
+      while(any(undone)) {
+        triesleft <- triesleft - 1
+        if(triesleft <= 0) 
+	  break
+        Y <- Xshift[undone]
+        D <- runifdisc(Y$n, radius=radius)
+        xnew <- Y$x + D$x
+        ynew <- Y$y + D$y
+        ok <- inside.owin(xnew, ynew, W)
+        if(any(ok)) {
+          changed <- which(undone)[ok]
+          Xshift$x[changed] <- xnew[ok]
+          Xshift$y[changed] <- ynew[ok]
+          undone[changed] <- FALSE
+        }
+      }
+      result[[isim]] <- Xshift
+    }
+  }
+  if(nsim == 1 && drop)
+    return(result[[1L]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.ppplist(result))
+}
+
diff --git a/R/randomImage.R b/R/randomImage.R
new file mode 100644
index 0000000..7a3185e
--- /dev/null
+++ b/R/randomImage.R
@@ -0,0 +1,21 @@
+#'
+#' randomImage.R
+#'
+#' Functions for generating random images
+#' 
+#'    $Revision: 1.1 $  $Date: 2015/03/23 10:44:04 $
+#'
+#'
+
+rnoise <- function(rgen=runif, w=square(1), ...) {
+  a <- do.call.matched(as.mask, list(w=w, ...), sieve=TRUE)
+  W <- a$result
+  argh <- a$otherargs
+  Z <- as.im(W)
+  n <- sum(W$m)
+  Z[] <- do.call(rgen, append(list(n=n), argh))
+  return(Z)
+}
+
+  
+  
diff --git a/R/randomNS.R b/R/randomNS.R
new file mode 100644
index 0000000..7601066
--- /dev/null
+++ b/R/randomNS.R
@@ -0,0 +1,410 @@
+##
+##   randomNS.R
+##
+##   simulating from Neyman-Scott processes
+##
+##   $Revision: 1.23 $  $Date: 2015/10/21 09:06:57 $
+##
+##    Original code for rCauchy and rVarGamma by Abdollah Jalilian
+##    Other code and modifications by Adrian Baddeley
+##    Bug fixes by Abdollah, Adrian, and Rolf Turner
+
+rNeymanScott <- 
+  function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ...,
+           lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE)
+{
+  ## Generic Neyman-Scott process
+  ## Implementation for bounded cluster radius
+  ##
+
+  ## Catch old argument name rmax for expand
+  if(missing(expand) && !is.null(rmax <- list(...)$rmax))
+    expand <- rmax
+    
+  ## 'rcluster' may be
+  ##
+  ##     (1) a function(x,y, ...) that takes the coordinates
+  ##         (x,y) of the parent point and generates a list(x,y) of offspring
+  ##
+
+  if(is.function(rcluster))
+    return(rPoissonCluster(kappa, expand, rcluster, win, ...,
+                           lmax=lmax, nsim=nsim, drop=drop,
+                           saveparents=saveparents))
+
+  ##     (2) a list(mu, f) where mu is a numeric value, function, or pixel image
+  ##         and f is a function(n, ...) generating n i.i.d. offspring at 0,0
+  
+  if(!(is.list(rcluster) && length(rcluster) == 2))
+    stop("rcluster should be either a function, or a list of two elements")
+  win <- as.owin(win)
+  mu <- rcluster[[1]]
+  rdisplace <- rcluster[[2]]
+  if(is.numeric(mu)) {
+    ## homogeneous
+    if(!(length(mu) == 1 && mu >= 0))
+      stop("rcluster[[1]] should be a single nonnegative number")
+    mumax <- mu
+  } else if (is.im(mu) || is.function(mu)) {
+      ## inhomogeneous
+    if(is.function(mu)) mu <- as.im(mu, W=win, ..., strict=TRUE)
+    mumax <- max(mu)
+  } else stop("rcluster[[1]] should be a number, a function or a pixel image")  
+  if(!is.function(rdisplace))
+    stop("rcluster[[2]] should be a function")
+
+  ## Generate parents in dilated window
+  frame <- boundingbox(win)
+  dilated <- grow.rectangle(frame, expand)
+  if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa)))
+    stop(paste("The window in which the image",
+               sQuote("kappa"),
+               "is defined\n",
+               "is not large enough to contain the dilation of the window",
+               sQuote("win")))
+  if(nonempty) {
+    if(is.function(kappa)) {
+      kappa <- as.im(kappa, W=dilated, ..., strict=TRUE)
+      lmax <- NULL
+    }
+    ## intensity of parents with at least one offspring point
+    kappa <- kappa * (1 - exp(-mumax))
+  }
+  ## generate
+  parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim,
+                        drop=FALSE, warnwin=FALSE)
+
+  resultlist <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    parents <- parentlist[[i]]
+    
+    np <- npoints(parents)
+    ## generate cluster sizes
+    if(np == 0) {
+      ## no parents - empty pattern
+      result <- ppp(numeric(0), numeric(0), window=win)
+      parentid <- integer(0)
+    } else {
+      if(!nonempty) {
+        ## cluster sizes are Poisson
+        csize <- rpois(np, mumax)
+      } else {
+        ## cluster sizes are Poisson conditional on > 0
+        csize <- qpois(runif(np, min=dpois(0, mumax)), mumax)
+      }
+      noff <- sum(csize)
+      xparent <- parents$x
+      yparent <- parents$y
+      x0 <- rep.int(xparent, csize)
+      y0 <- rep.int(yparent, csize)
+      ## invoke random generator
+      dd <- rdisplace(noff, ...)
+      mm <- if(is.ppp(dd)) marks(dd) else NULL
+      ## validate
+      xy <- xy.coords(dd)
+      dx <- xy$x
+      dy <- xy$y
+      if(!(length(dx) == noff))
+        stop("rcluster returned the wrong number of points")
+      ## create offspring and offspring-to-parent map
+      xoff <- x0 + dx
+      yoff <- y0 + dy
+      parentid <- rep.int(1:np, csize)
+      ## trim to window
+      retain <- inside.owin(xoff, yoff, win)
+      if(is.im(mu))
+        retain[retain] <- inside.owin(xoff[retain], yoff[retain], as.owin(mu))
+      xoff <- xoff[retain]
+      yoff <- yoff[retain]
+      parentid <- parentid[retain]
+      if(!is.null(mm)) mm <- marksubset(mm, retain)
+      ## done
+      result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm)
+    }
+
+    if(is.im(mu)) {
+      ## inhomogeneously modulated clusters a la Waagepetersen
+      P <- eval.im(mu/mumax)
+      result <- rthin(result, P)
+    }
+
+    if(saveparents) {
+      attr(result, "parents") <- parents
+      attr(result, "parentid") <- parentid
+      attr(result, "expand") <- expand
+    }
+    
+    resultlist[[i]] <- result
+  }
+
+  if(nsim == 1 && drop) return(resultlist[[1]])
+  names(resultlist) <- paste("Simulation", 1:nsim)
+  return(as.solist(resultlist))
+}  
+
+rMatClust <- local({
+  
+  ## like runifdisc but returns only the coordinates
+  rundisk <- function(n, radius) {
+    R <- radius * sqrt(runif(n, min=0, max=1))
+    Theta <- runif(n, min=0, max=2*pi)
+    cbind(R * cos(Theta), R * sin(Theta))
+  }
+
+  rMatClust <- 
+  function(kappa, scale, mu, win = owin(c(0,1),c(0,1)),
+           nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ...,
+           poisthresh=1e-6, saveparents=TRUE) {
+    ## Matern Cluster Process with Poisson (mu) offspring distribution
+    ## Catch old scale syntax (r)
+    if(missing(scale)) scale <- list(...)$r
+    check.1.real(scale)
+    stopifnot(scale > 0)
+
+    ## trap case of large clusters, close to Poisson
+    kok <- is.numeric(kappa) || is.im(kappa)
+    if(kok) {
+      kappamax <- max(kappa)
+    } else {
+      kim <- as.im(kappa, W=win, ..., strict=TRUE)
+      kra <- range(kim)
+      kappamax <- kra[2] + 0.05 * diff(kra)
+    }
+    if(1/(pi * kappamax * scale^2) < poisthresh) {
+      kapmu <- mu * (if(kok) kappa else kim)
+      result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
+      return(result)
+    }
+
+    result <- rNeymanScott(kappa, scale, list(mu, rundisk), win, radius=scale,
+                           nsim=nsim, drop=FALSE,
+                           saveparents = saveparents || saveLambda)
+    if(saveLambda){
+      for(i in 1:nsim) {
+        parents <- attr(result[[i]], "parents")
+        Lambda <- clusterfield("MatClust", parents, scale=scale, mu=mu, ...)
+        attr(result[[i]], "Lambda") <- Lambda[win]
+      }
+    }
+    return(if(nsim == 1 && drop) result[[1]] else result)
+  }
+
+  rMatClust
+})
+
+                  
+rThomas <- local({
+
+  ## random displacements
+  gaus <- function(n, sigma) {
+    matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2)
+  }
+
+  ## main function
+  rThomas <-
+      function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, 
+               saveLambda=FALSE, expand = 4*scale, ...,
+               poisthresh=1e-6, saveparents=TRUE) {
+      ## Thomas process with Poisson(mu) number of offspring
+      ## at isotropic Normal(0,sigma^2) displacements from parent
+      ##
+      ## Catch old scale syntax (sigma)
+      if(missing(scale)) scale <- list(...)$sigma
+      check.1.real(scale)
+      stopifnot(scale > 0)
+
+      ## trap case of large clusters, close to Poisson
+      kok <- is.numeric(kappa) || is.im(kappa)
+      if(kok) {
+        kappamax <- max(kappa)
+      } else {
+        kim <- as.im(kappa, W=win, ..., strict=TRUE)
+        kra <- range(kim)
+        kappamax <- kra[2] + 0.05 * diff(kra)
+      }
+      if(1/(4*pi * kappamax * scale^2) < poisthresh) {
+        kapmu <- mu * (if(kok) kappa else kim)
+        result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
+        return(result)
+      }
+      
+      ## determine the maximum radius of clusters
+      if(missing(expand))
+          expand <- clusterradius("Thomas", scale = scale, ...)
+
+      result <- rNeymanScott(kappa, expand, list(mu, gaus),
+                             win, sigma=scale,
+                             nsim=nsim, drop=FALSE,
+                             saveparents = saveparents || saveLambda)  
+      if(saveLambda){
+        for(i in 1:nsim) {
+          parents <- attr(result[[i]], "parents")
+          Lambda <- clusterfield("Thomas", parents, scale=scale, mu=mu, ...)
+          attr(result[[i]], "Lambda") <- Lambda[win]
+        }
+      }
+      return(if(nsim == 1 && drop) result[[1]] else result)
+    }
+
+  rThomas
+})
+
+
+## ================================================
+## Neyman-Scott process with Cauchy kernel function
+## ================================================
+
+## scale / omega: scale parameter of Cauchy kernel function
+## eta: scale parameter of Cauchy pair correlation function
+## eta = 2 * omega
+
+rCauchy <- local({
+
+  ## simulate mixture of normals with inverse-gamma distributed variance
+  rnmix.invgam <- function(n = 1, rate) {
+    V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2)
+    s <- 1/rgamma(n, shape=1/2, rate=rate)
+    return(sqrt(s) * V)
+  }
+
+  ## main function
+  rCauchy <- function (kappa, scale, mu, win = owin(), thresh = 0.001,
+                       nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL,
+                       ..., poisthresh=1e-6, saveparents=TRUE) {
+    ## scale / omega: scale parameter of Cauchy kernel function
+    ## eta: scale parameter of Cauchy pair correlation function
+
+    ## Catch old scale syntax (omega)
+    dots <- list(...)
+    if(missing(scale)) scale <- dots$omega
+    
+    ## Catch old name 'eps' for 'thresh':
+    if(missing(thresh))
+        thresh <- dots$eps %orifnull% 0.001
+
+    ## trap case of large clusters, close to Poisson
+    kok <- is.numeric(kappa) || is.im(kappa)
+    if(kok) {
+      kappamax <- max(kappa)
+    } else {
+      kim <- as.im(kappa, W=win, ..., strict=TRUE)
+      kra <- range(kim)
+      kappamax <- kra[2] + 0.05 * diff(kra)
+    }
+    if(1/(pi * kappamax * scale^2) < poisthresh) {
+      kapmu <- mu * (if(kok) kappa else kim)
+      result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
+      return(result)
+    }
+    
+    ## determine the maximum radius of clusters
+    if(missing(expand)){
+        expand <- clusterradius("Cauchy", scale = scale, thresh = thresh, ...)
+    } else if(!missing(thresh)){
+        warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given")
+    }
+
+    ## simulate
+    result <- rNeymanScott(kappa, expand,
+                           list(mu, rnmix.invgam),
+                           win, rate = scale^2/2, nsim=nsim, drop=FALSE,
+                           saveparents = saveparents || saveLambda)
+    ## correction from Abdollah: the rate is beta = omega^2 / 2 = eta^2 / 8.
+    if(saveLambda){
+      for(i in 1:nsim) {
+        parents <- attr(result[[i]], "parents")
+        Lambda <- clusterfield("Cauchy", parents, scale=scale, mu=mu, ...)
+        attr(result[[i]], "Lambda") <- Lambda[win]
+      }
+    }
+    return(if(nsim == 1 && drop) result[[1]] else result)
+  }
+
+  rCauchy })
+
+##    
+## =================================================================
+## Neyman-Scott process with Variance Gamma (Bessel) kernel function
+## =================================================================
+
+## nu.ker: smoothness parameter of Variance Gamma kernel function
+## omega: scale parameter of kernel function
+## nu.pcf: smoothness parameter of Variance Gamma pair correlation function
+## eta: scale parameter of Variance Gamma pair correlation function
+## nu.pcf = 2 * nu.ker + 1    and    eta = omega
+
+rVarGamma <- local({
+  
+  ## simulates mixture of isotropic Normal points in 2D with gamma variances
+  rnmix.gamma <- function(n = 1, shape, rate) {
+    V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2)
+    s <- rgamma(n, shape=shape, rate=rate)
+    return(sqrt(s) * V)
+  }
+
+  ## main function
+  rVarGamma <- function (kappa, nu, scale, mu, win = owin(),
+                         thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE,
+                         expand = NULL, ..., poisthresh=1e-6,
+                         saveparents=TRUE) {
+    ## nu / nu.ker: smoothness parameter of Variance Gamma kernel function
+    ## scale / omega: scale parameter of kernel function
+    ## Catch old nu.ker/nu.pcf syntax and resolve nu-value.
+    dots <- list(...)
+    if(missing(nu)){
+        nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker
+    } else{
+        check.1.real(nu)
+        stopifnot(nu > -1/2)
+    }
+    ## Catch old scale syntax (omega)
+    if(missing(scale)) scale <- dots$omega
+    
+    ## Catch old name 'eps' for 'thresh':
+    if(missthresh <- missing(thresh))
+        thresh <- dots$eps %orifnull% 0.001
+
+    ## trap case of large clusters, close to Poisson
+    kok <- is.numeric(kappa) || is.im(kappa)
+    if(kok) {
+      kappamax <- max(kappa)
+    } else {
+      kim <- as.im(kappa, W=win, ..., strict=TRUE)
+      kra <- range(kim)
+      kappamax <- kra[2] + 0.05 * diff(kra)
+    }
+    if(1/(4 * pi * kappamax * scale^2) < poisthresh) {
+      kapmu <- mu * (if(kok) kappa else kim)
+      result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE)
+      return(result)
+    }
+    
+     ## determine the maximum radius of clusters
+    if(missing(expand)){
+        expand <- clusterradius("VarGamma", scale = scale, nu = nu,
+                             thresh = thresh, ...)
+    } else if(!missthresh){
+        warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given")
+    }
+
+    ## simulate
+    result <- rNeymanScott(kappa, expand,
+                           list(mu, rnmix.gamma), win,
+##                          WAS:  shape = 2 * (nu.ker + 1)
+                           shape = nu + 1,
+                           rate = 1/(2 * scale^2),
+                           nsim=nsim, drop=FALSE,
+                           saveparents = saveparents || saveLambda)
+    if(saveLambda){
+      for(i in 1:nsim) {
+        parents <- attr(result[[i]], "parents")
+        Lambda <- clusterfield("VarGamma", parents, scale=scale,
+                               nu=nu, mu=mu, ...)
+        attr(result[[i]], "Lambda") <- Lambda[win]
+      }
+    }
+    return(if(nsim == 1 && drop) result[[1]] else result)
+  }
+
+  rVarGamma
+})
diff --git a/R/randomlpp.R b/R/randomlpp.R
new file mode 100755
index 0000000..815a70d
--- /dev/null
+++ b/R/randomlpp.R
@@ -0,0 +1,101 @@
+#
+#  random.R
+#
+#  Random point pattern generators for a linear network
+#
+#  $Revision: 1.9 $   $Date: 2016/11/23 07:25:50 $
+#
+
+rpoislpp <- function(lambda, L, ..., nsim=1, drop=TRUE) {
+  if(missing(L) || is.null(L)) {
+    if(!inherits(lambda, c("linim", "linfun")))
+      stop("L is missing", call.=FALSE)
+    L <- as.linnet(lambda)
+  } else verifyclass(L, "linnet")
+  result <- vector(mode="list", length=nsim)
+  S <- as.psp(L)
+  bugout <- (nsim == 1) && drop
+  for(i in seq_len(nsim)) {
+    X <- datagen.rpoisppOnLines(lambda, S, ...)
+    Y <- lpp(X, L)
+    if(bugout) return(Y)
+    result[[i]] <- Y
+  }
+  result <- as.solist(result)
+  if(nsim > 0) names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+runiflpp <- function(n, L, nsim=1, drop=TRUE) {
+  verifyclass(L, "linnet")
+  result <- vector(mode="list", length=nsim)
+  S <- as.psp(L)
+  bugout <- (nsim == 1) && drop
+  for(i in seq_len(nsim)) {
+    X <- datagen.runifpointOnLines(n, S)
+    Y <- lpp(X, L)
+    if(bugout) return(Y)
+    result[[i]] <- Y
+  }
+  result <- as.solist(result)
+  if(nsim > 0) names(result) <- paste("Simulation", 1:nsim)
+  return(result)
+}
+
+rlpp <- function(n, f, ..., nsim=1, drop=TRUE) {
+  if(inherits(f, "linfun")) 
+    f <- as.linim(f, ...)
+  if(!inherits(f, "linim") && is.list(f) &&
+     all(sapply(f, inherits, what=c("linim", "linfun")))) {
+    #' f is a list of densities for each type of point
+    stopifnot(length(n) == length(f))
+    Y <- mapply(rlpp, n=as.list(n), f=f,
+                MoreArgs=list(nsim=nsim, drop=FALSE, ...),
+                SIMPLIFY=FALSE)
+    Z <- do.call(mapply, c(list(superimpose), Y, list(SIMPLIFY=FALSE)))
+    if(nsim == 1 && drop) return(Z[[1]])
+    return(as.solist(Z))
+  }
+  if(!inherits(f, "linim"))
+    stop("f should be a linfun or linim object")
+  if(length(n) > 1) {
+    flist <- rep(list(f), length(n))
+    return(rlpp(n, flist, nsim=nsim, drop=drop, ...))
+  }
+  check.1.integer(nsim)
+  if(nsim <= 0) return(list())
+  #' extract data
+  L <- as.linnet(f)
+  df <- attr(f, "df")
+  seglen <- lengths.psp(as.psp(L))
+  #' sort into segments, left-to-right within segments
+  df <- df[order(df$mapXY, df$tp), , drop=FALSE]
+  nr <- nrow(df)
+  fvals <- df$values
+  if(anyNA(fvals)) stop("f has some NA values")
+  if(min(fvals) < 0) stop("f has some negative values")
+  #' find interval corresponding to each sample point
+  sameseg <- (diff(df$mapXY) == 0)
+  sharenext     <- c(sameseg, FALSE)
+  shareprevious <- c(FALSE, sameseg)
+  tcur   <- df$tp
+  tnext  <- c(tcur[-1], NA)
+  tprev  <- c(NA, tcur[-nr])
+  tleft  <- ifelse(shareprevious, (tcur + tprev)/2, 0)
+  tright <- ifelse(sharenext,     (tcur + tnext)/2, 1)
+  #' compute probability of each interval
+  probs <- fvals * (tright - tleft) * seglen[df$mapXY]
+  probs <- probs/sum(probs)
+  #' 
+  result <- list()
+  for(isim in 1:nsim) {
+    #' sample intervals and place point uniformly in each interval
+    ii <- sample.int(nr, size=n, replace=TRUE, prob=probs)
+    seg <- df[ii, "mapXY"]
+    tp  <- runif(n, tleft[ii], tright[ii])
+    result[[isim]] <- as.lpp(seg=seg, tp=tp, L=L)
+  }
+  if(nsim == 1 && drop) return(result[[1]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.solist(result))
+}
diff --git a/R/randommk.R b/R/randommk.R
new file mode 100755
index 0000000..066516e
--- /dev/null
+++ b/R/randommk.R
@@ -0,0 +1,477 @@
+#
+#
+#   randommk.R
+#
+#   Random generators for MULTITYPE point processes
+#
+#   $Revision: 1.37 $   $Date: 2017/01/08 06:24:51 $
+#
+#   rmpoispp()   random marked Poisson pp
+#   rmpoint()    n independent random marked points
+#   rmpoint.I.allim()  ... internal
+#   rpoint.multi()   temporary wrapper 
+#
+rmpoispp <- local({
+
+  ## Argument checking
+  is.numvector <- function(x) {is.numeric(x) && is.vector(x)}
+  is.constant <- function(x) {is.numvector(x) && length(x) == 1}
+  checkone <- function(x) {
+    if(is.constant(x)) {
+      if(x >= 0) return(TRUE) else stop("Intensity is negative!")
+    }
+    return(is.function(x) || is.im(x))
+  }
+
+  ## Ensure that m can be passed as a single value to function(x,y,m,...)
+  slice.fun <- function(x,y,fun,mvalue, ...) {
+    m <- if(length(mvalue) == 1) rep.int(mvalue, length(x)) else mvalue
+    result <- fun(x,y,m, ...)
+    return(result)
+  }
+
+  ## Main function
+  rmpoispp <- 
+    function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)),
+             types, ..., nsim=1, drop=TRUE, warnwin=!missing(win)) {
+      ## arguments:
+      ##     lambda  intensity:
+      ##                constant, function(x,y,m,...), image,
+      ##                vector, list of function(x,y,...) or list of images
+      ##
+      ##     lmax     maximum possible value of lambda
+      ##                constant, vector, or list
+      ##
+      ##     win     default observation window (of class 'owin')
+      ##
+      ##     types    possible types for multitype pattern
+      ##    
+      ##     ...     extra arguments passed to lambda()
+      ##
+
+      if(missing(types)) types <- NULL
+      force(warnwin)
+      
+      if(nsim > 1) {
+        result <- vector(mode="list", length=nsim)
+        for(i in 1:nsim)
+          result[[i]] <- rmpoispp(lambda, lmax, win, types, ...,
+                                  warnwin=warnwin)
+        names(result) <- paste("Simulation", 1:nsim)
+        return(as.solist(result))
+      }
+      
+      ## Validate arguments
+      single.arg <- checkone(lambda)
+      vector.arg <- !single.arg && is.numvector(lambda) 
+      list.arg <- !single.arg && is.list(lambda)
+      if(! (single.arg || vector.arg || list.arg))
+        stop(paste("argument", sQuote("lambda"), "not understood"))
+    
+      if(list.arg && !all(unlist(lapply(lambda, checkone))))
+        stop(paste("Each entry in the list",
+                   sQuote("lambda"),
+                   "must be either a constant, a function or an image"))
+      if(vector.arg && any(lambda < 0))
+        stop(paste("Some entries in the vector",
+                   sQuote("lambda"), "are negative"))
+
+      ## Determine & validate the set of possible types
+      if(is.null(types)) {
+        if(single.arg) {
+          stop(paste(sQuote("types"), "must be given explicitly when",
+                     sQuote("lambda"), "is a constant, a function or an image"))
+        } else if(!is.null(nama <- names(lambda)) &&
+                  sum(nzchar(nama)) == length(lambda)) {
+          types <- nama
+        } else {
+          types <- seq_along(lambda)
+        }
+      } 
+
+      ntypes <- length(types)
+      if(!single.arg && (length(lambda) != ntypes))
+        stop(paste("The lengths of", sQuote("lambda"),
+                   "and", sQuote("types"), "do not match"))
+
+      factortype <- factor(types, levels=types)
+
+      ## Validate `lmax'
+      if(! (is.null(lmax) || is.numvector(lmax) || is.list(lmax) ))
+        stop(paste(sQuote("lmax"),
+                   "should be a constant, a vector, a list or NULL"))
+       
+      ## coerce lmax to a vector, to save confusion
+      if(is.null(lmax))
+        maxes <- rep(NULL, ntypes)
+      else if(is.numvector(lmax) && length(lmax) == 1)
+        maxes <- rep.int(lmax, ntypes)
+      else if(length(lmax) != ntypes)
+        stop(paste("The length of",
+                   sQuote("lmax"),
+                   "does not match the number of possible types"))
+      else if(is.list(lmax))
+        maxes <- unlist(lmax)
+      else maxes <- lmax
+
+      ## coerce lambda to a list, to save confusion
+      lam <- if(single.arg) rep(list(lambda), ntypes) else
+             if(vector.arg) as.list(lambda) else lambda
+
+      ## Simulate
+      for(i in 1:ntypes) {
+        if(single.arg && is.function(lambda)) {
+          ## call f(x,y,m, ...)
+          Y <- rpoispp(slice.fun, lmax=maxes[i], win=win,
+                       fun=lambda, mvalue=types[i], ..., warnwin=warnwin)
+        } else {
+          ## call f(x,y, ...) or use other formats
+          Y <- rpoispp(lam[[i]], lmax=maxes[i], win=win, ..., warnwin=warnwin)
+        }
+        Y <- Y %mark% factortype[i]
+        X <- if(i == 1) Y else superimpose(X, Y, W=X$window, check=FALSE)
+      }
+
+      ## Randomly permute, just in case the order is important
+      permu <- sample(X$n)
+      X <- X[permu]
+      return(if(drop) X else solist(X))
+    }
+
+  rmpoispp
+})
+
+## ------------------------------------------------------------------------
+
+rmpoint <- local({
+
+  ## argument validation
+  is.numvector <- function(x) {is.numeric(x) && is.vector(x)}
+  is.constant <- function(x) {is.numvector(x) && length(x) == 1}
+  checkone <- function(x) {
+    if(is.constant(x)) {
+      if(x >= 0) return(TRUE) else stop("Intensity is negative!")
+    }
+    return(is.function(x) || is.im(x))
+  }
+
+  # integration..
+  integratexy <- function(f, win, ...) {
+    imag <- as.im(f, W=win, ...)
+    integral.im(imag)
+  }
+  ## create a counterpart of f(x,y,m) that works when m is a single value
+  funwithfixedmark <- function(xx, yy, ..., m, fun) {
+    mm <- rep.int(m, length(xx))
+    fun(xx, yy, mm, ...)
+  }
+  integratewithfixedmark <- function(m, fun, win, ...) {
+    integratexy(funwithfixedmark, win=win, m=m, fun=fun, ...)
+  }
+
+  # Main function
+  rmpoint <- function(n, f=1, fmax=NULL, 
+                      win = unit.square(), 
+                      types, ptypes, ...,
+                      giveup = 1000, verbose = FALSE,
+                      nsim = 1, drop=TRUE) {
+    if(!is.numeric(n))
+      stop("n must be a scalar or vector")
+    if(any(ceiling(n) != floor(n)))
+      stop("n must be an integer or integers")
+    if(any(n < 0))
+      stop("n must be non-negative")
+    if(missing(types)) types <- NULL
+    if(missing(ptypes)) ptypes <- NULL
+
+    if(nsim > 1) {
+      result <- vector(mode="list", length=nsim)
+      for(i in 1:nsim)
+        result[[i]] <- rmpoint(n, f, fmax, win, types, ptypes, ...,
+                               giveup=giveup, verbose=verbose)
+      names(result) <- paste("Simulation", 1:nsim)
+      return(as.solist(result))
+    }
+      
+    if(sum(n) == 0) {
+      nopoints <- ppp(x=numeric(0), y=numeric(0), window=win, check=FALSE)
+      if(!is.null(types)) {
+        nomarks <- factor(types[numeric(0)], levels=types)
+        nopoints <- nopoints %mark% nomarks
+      }
+      return(if(drop) nopoints else solist(nopoints))
+    }         
+    #############
+  
+    Model <- if(length(n) == 1) {
+      if(is.null(ptypes)) "I" else "II"
+    } else "III"
+  
+    ##############  Validate f argument
+    single.arg <- checkone(f)
+    vector.arg <- !single.arg && is.numvector(f) 
+    list.arg <- !single.arg && is.list(f)
+    if(! (single.arg || vector.arg || list.arg))
+      stop(paste("argument", sQuote("f"), "not understood"))
+    
+    if(list.arg && !all(unlist(lapply(f, checkone))))
+      stop(paste("Each entry in the list", sQuote("f"),
+                 "must be either a constant, a function or an image"))
+    if(vector.arg && any(f < 0))
+      stop(paste("Some entries in the vector",
+                 sQuote("f"), "are negative"))
+    
+    ## cases where it's known that all types of points 
+    ## have the same conditional density of location (x,y)
+    const.density <- vector.arg ||
+                     (list.arg && all(unlist(lapply(f, is.constant))))
+    same.density <- const.density || (single.arg && !is.function(f))
+
+    ################   Determine & validate the set of possible types
+    if(is.null(types)) {
+      if(single.arg && length(n) == 1)
+        stop(paste(sQuote("types"), "must be given explicitly when",
+                   sQuote("f"),
+                   "is a single number, a function or an image and",
+                   sQuote("n"), "is a single number"))
+      else {
+        basis <- if(single.arg) n else f
+        if(!is.null(nama <- names(basis)) &&
+           sum(nzchar(nama)) == length(basis)) {
+          types <- nama
+        } else {
+          types <- seq_along(basis)
+        }
+      }
+    }
+
+    ntypes <- length(types)
+    if(!single.arg && (length(f) != ntypes))
+      stop(paste("The lengths of",
+                 sQuote("f"), "and", sQuote("types"),
+                 "do not match"))
+    if(length(n) > 1 && ntypes != length(n))
+      stop(paste("The lengths of",
+                 sQuote("n"), "and", sQuote("types"),
+                 "do not match"))
+
+    factortype <- factor(types, levels=types)
+  
+    #######################  Validate `fmax'
+    if(! (is.null(fmax) || is.numvector(fmax) || is.list(fmax) ))
+      stop(paste(sQuote("fmax"),
+                 "should be a constant, a vector, a list or NULL"))
+       
+    ## coerce fmax to a vector, to save confusion
+    if(is.null(fmax))
+      maxes <- rep(NULL, ntypes)
+    else if(is.constant(fmax))
+      maxes <- rep.int(fmax, ntypes)
+    else if(length(fmax) != ntypes)
+      stop(paste("The length of", sQuote("fmax"),
+                 "does not match the number of possible types"))
+    else if(is.list(fmax))
+      maxes <- unlist(fmax)
+    else maxes <- fmax
+
+    ## coerce f to a list, to save confusion
+    flist <- if(single.arg) rep(list(f), ntypes) else
+             if(vector.arg) as.list(f) else f
+
+    #################### START ##################################
+
+    ## special algorithm for Model I when all f[[i]] are images
+
+    if(Model == "I" && !same.density && all(unlist(lapply(flist, is.im)))) {
+      X <- rmpoint.I.allim(n, flist, types)
+      return(if(drop) X else solist(X))
+    }
+
+    ## otherwise, first select types, then locations given types
+  
+    if(Model == "I") {
+      ## Compute approximate marginal distribution of type
+      if(vector.arg)
+        ptypes <- f/sum(f)
+      else if(list.arg) {
+        fintegrals <- unlist(lapply(flist, integratexy, win=win, ...))
+        ptypes <- fintegrals/sum(fintegrals)
+      } else {
+        ## single argument
+        if(is.constant(f)) {
+          ptypes <- rep.int(1/ntypes, ntypes)
+        } else {
+          ## f is a function (x,y,m)
+          ## convert to images and integrate
+          fintegrals <- unlist(lapply(types,
+                                      integratewithfixedmark,
+                                      win=win, fun=f, ...))
+          ## normalise
+          ptypes <- fintegrals/sum(fintegrals)
+        }
+      }
+    }
+
+    ## Generate marks 
+
+    if(Model == "I" || Model == "II") {
+      ## i.i.d.: n marks with distribution 'ptypes'
+      marques <- sample(factortype, n, prob=ptypes, replace=TRUE)
+      nn <- table(marques)
+    } else {
+      ## multinomial: fixed number n[i] of types[i]
+      repmarks <- factor(rep.int(types, n), levels=types)
+      marques <- sample(repmarks)
+      nn <- n
+    }
+    ntot <- sum(nn)
+
+    ##############  SIMULATE !!!  #########################
+
+    ## If all types have the same conditional density of location,
+    ## generate the locations using rpoint, and return.
+    if(same.density) {
+      X <- rpoint(ntot, flist[[1]], maxes[[1]], win=win, ...,
+                  giveup=giveup, verbose=verbose)
+      X <- X %mark% marques
+      return(if(drop) X else solist(X))
+    }
+    ## Otherwise invoke rpoint() for each type separately
+    X <- ppp(numeric(ntot), numeric(ntot), window=win, marks=marques,
+              check=FALSE)
+
+    for(i in 1:ntypes) {
+      if(verbose) cat(paste("Type", i, "\n"))
+      if(single.arg && is.function(f)) {
+        ## want to call f(x,y,m, ...)
+        Y <- rpoint(nn[i], funwithfixedmark, fmax=maxes[i], win=win,
+                    ..., m=factortype[i], fun=f, giveup=giveup, verbose=verbose)
+      } else {
+        ## call f(x,y, ...) or use other formats
+        Y <- rpoint(nn[i], flist[[i]], fmax=maxes[i], win=win,
+                    ..., giveup=giveup, verbose=verbose)
+      }
+      Y <- Y %mark% factortype[i]
+      X[marques == factortype[i]] <- Y
+    }
+    return(if(drop) X else solist(X))
+  }
+
+  rmpoint
+})
+
+rmpoint.I.allim <- local({
+
+  ## Extract pixel coordinates and probabilities
+  get.stuff <- function(imag) {
+    w <- as.mask(as.owin(imag))
+    dx <- w$xstep
+    dy <- w$ystep
+    rxy <- rasterxy.mask(w, drop=TRUE)
+    xpix <- rxy$x
+    ypix <- rxy$y
+    ppix <- as.vector(imag$v[w$m]) ## not normalised - OK
+    npix <- length(xpix)
+    return(list(xpix=xpix, ypix=ypix, ppix=ppix,
+                dx=rep.int(dx,npix), dy=rep.int(dy, npix),
+                npix=npix))
+  }
+
+  rmpoint.I.allim <- function(n, f, types) {
+    ## Internal use only!
+    ## Generates random marked points (Model I *only*)
+    ## when all f[[i]] are pixel images.
+    ##
+    stuff <- lapply(f, get.stuff)
+    ## Concatenate into loooong vectors
+    xpix <- unlist(lapply(stuff, getElement, name="xpix"))
+    ypix <- unlist(lapply(stuff, getElement, name="ypix"))
+    ppix <- unlist(lapply(stuff, getElement, name="ppix"))
+    dx   <- unlist(lapply(stuff, getElement, name="dx"))
+    dy   <- unlist(lapply(stuff, getElement, name="dy"))
+    ## replicate types
+    numpix <- unlist(lapply(stuff, getElement, name="npix"))
+    tpix <- rep.int(seq_along(types), numpix)
+    ##
+    ## sample pixels from union of all images
+    ##
+    npix <- sum(numpix)
+    id <- sample(npix, n, replace=TRUE, prob=ppix)
+    ## get pixel centre coordinates and randomise within pixel
+    x <- xpix[id] + (runif(n) - 1/2) * dx[id]
+    y <- ypix[id] + (runif(n) - 1/2) * dy[id]
+    ## compute types
+    marx <- factor(types[tpix[id]],levels=types)
+    ## et voila!
+    return(ppp(x, y, window=as.owin(f[[1]]), marks=marx, check=FALSE))
+  }
+
+  rmpoint.I.allim
+})
+
+##
+##     wrapper for Rolf's function
+##
+rpoint.multi <- function (n, f, fmax=NULL, marks = NULL,
+                          win = unit.square(),
+                          giveup = 1000, verbose = FALSE,
+                          warn=TRUE, nsim=1, drop=TRUE) {
+  if(nsim > 1) {
+    result <- vector(mode="list", length=nsim)
+    for(i in 1:nsim)
+      result[[i]] <- rpoint.multi(n, f, fmax, marks, win, giveup, verbose)
+    names(result) <- paste("Simulation", 1:nsim)
+    return(as.solist(result))
+  }
+  
+  no.marks <- is.null(marks) ||
+               (is.factor(marks) && length(levels(marks)) == 1)
+  if(warn) {
+    nhuge <- spatstat.options("huge.npoints")
+    if(n > nhuge)
+      warning(paste("Attempting to generate", n, "random points"))
+  }
+  ## unmarked case
+  if (no.marks) {
+    X <- if(is.function(f)) {
+      rpoint(n, f, fmax, win, giveup=giveup, verbose=verbose)
+    } else {
+      rpoint(n, f, fmax, giveup=giveup, verbose=verbose)
+    }
+    return(if(drop) X else solist(X))
+  }
+  ## multitype case
+  if(length(marks) != n)
+    stop("length of marks vector != n")
+  if(!is.factor(marks))
+    stop("marks should be a factor")
+  types <- levels(marks)
+  types <- factor(types, levels=types)
+  ## generate required number of points of each type
+  nums <- table(marks)
+  X <- rmpoint(nums, f, fmax, win=win, types=types,
+               giveup=giveup, verbose=verbose)
+  if(any(table(marks(X)) != nums))
+    stop("Internal error: output of rmpoint illegal")
+  ## reorder them to correspond to the desired 'marks' vector
+  Y <- X
+  Xmarks <- marks(X)
+  for(ty in types) {
+    to   <- (marks == ty)
+    from <- (Xmarks == ty)
+    if(sum(to) != sum(from))
+      stop(paste("Internal error: mismatch for mark =", ty))
+    if(any(to)) {
+      Y$x[to] <- X$x[from]
+      Y$y[to] <- X$y[from]
+      Y$marks[to] <- ty
+    }
+  }
+  return(if(drop) Y else solist(Y))
+}
+
+
+  
+  
+
+    
diff --git a/R/randomonlines.R b/R/randomonlines.R
new file mode 100755
index 0000000..0f9c974
--- /dev/null
+++ b/R/randomonlines.R
@@ -0,0 +1,220 @@
+#
+# randomOnLines.R
+#
+# $Revision: 1.8 $  $Date: 2014/11/17 04:40:14 $
+#
+# Generate random points on specified lines
+#
+
+runifpointOnLines <- function(n, L, nsim=1) {
+  if(!is.numeric(n) || any(n < 0) || any(n %% 1 != 0))
+    stop("n should be a nonnegative integer or integers")
+  if(!is.psp(L))
+    L <- as.psp(L)
+  W <- as.owin(L)
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    X <- datagen.runifpointOnLines(n, L)
+    Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE)
+    result[[i]] <- Y
+  }
+  if(nsim == 1) return(result[[1]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.solist(result))
+}
+
+datagen.runifpointOnLines <- function(n, L) {
+  stopifnot(is.psp(L))
+  m <- length(n)
+  ismarked <- (m > 1)
+  if(m == 0 || (m == 1 && n == 0))
+    return(data.frame(x=numeric(0),
+                      y=numeric(0),
+                      seg=integer(0),
+                      tp=numeric(0)))
+  # extract segment information
+  len <- lengths.psp(L)
+  sumlen <- sum(len)
+  cumlen <- cumsum(len)
+  cum0len <- c(0, cumlen)
+  Ldf <- as.data.frame(L)
+  x0 <- with(Ldf, x0)
+  y0 <- with(Ldf, y0)
+  dx <- with(Ldf, x1-x0)
+  dy <- with(Ldf, y1-y0)
+  # determine mark space
+  if(ismarked) {
+    markvalues <- names(n)
+    if(sum(nzchar(markvalues)) < m)
+      markvalues <- paste(1:m)
+  }
+  # initialise output data.frame
+  out <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0))
+  if(ismarked) 
+    out <- cbind(out, data.frame(marks=character(0)))
+  # generate points of each mark in turn
+  for(j in 1:m) {
+    if(n[[j]] > 0) {
+      # generate random positions
+      uu <- runif(n[[j]], min=0, max=sumlen)
+      # identify segment for each point
+      kk <- findInterval(uu, cum0len, rightmost.closed=TRUE, all.inside=TRUE)
+      # parametric position along segment
+      tt <- (uu - cum0len[kk])/len[kk]
+      tt[!is.finite(tt)] <- 0
+      # convert to (x,y)
+      x <- x0[kk] + tt * dx[kk]
+      y <- y0[kk] + tt * dy[kk]
+      # assemble result
+      if(!ismarked) {
+        out <- data.frame(x=x, y=y, seg=kk, tp=tt)
+      } else {
+        outj <- data.frame(x=x, y=y, seg=kk, tp=tt, marks=markvalues[j])
+        out <- rbind(out, outj)
+      }
+    }
+  }
+  if(ismarked) out$marks <- factor(out$marks, levels=markvalues)
+  return(out)
+}
+
+runifpoisppOnLines <- function(lambda, L, nsim=1) {
+  if(!is.numeric(lambda) || !all(is.finite(lambda) && (lambda >= 0)))
+    stop("lambda should be a finite, nonnegative number or numbers")
+  if(!is.psp(L))
+    L <- as.psp(L)
+  W <- as.owin(L)
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    X <- datagen.runifpoisppOnLines(lambda, L)
+    Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE)
+    result[[i]] <- Y
+  }
+  if(nsim == 1) return(result[[1]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.solist(result))
+}
+
+datagen.runifpoisppOnLines <- function(lambda, L) {
+  stopifnot(is.psp(L))
+  mu <- lambda * sum(lengths.psp(L))
+  n <- rpois(rep.int(1, length(mu)), mu)
+  if(length(n) > 1)
+    names(n) <- names(lambda)
+  df <- datagen.runifpointOnLines(n, L)
+  return(df)
+}
+
+rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., nsim=1) {
+  if(!is.psp(L))
+    L <- as.psp(L)
+  W <- as.owin(L)
+  result <- vector(mode="list", length=nsim)
+  for(i in 1:nsim) {
+    X <- datagen.rpoisppOnLines(lambda, L, lmax=lmax, ...)
+    Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE)
+    result[[i]] <- Y
+  }
+  if(nsim == 1) return(result[[1]])
+  names(result) <- paste("Simulation", 1:nsim)
+  return(as.solist(result))
+}
+
+datagen.rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., check=TRUE)  {
+  stopifnot(is.psp(L))
+  if(is.numeric(lambda)) 
+    return(datagen.runifpoisppOnLines(lambda, L))
+  # ensure lambda is a list
+  if(is.function(lambda) || is.im(lambda))
+    lambda <- list(lambda)
+  m <- length(lambda)
+  # determine type of argument
+  argtype <-
+    if(all(unlist(lapply(lambda, is.im)))) "im" else
+    if(all(unlist(lapply(lambda, is.function)))) "function" else
+    stop(paste(sQuote("lambda"),
+               "must be a numeric vector, a function, an image,",
+               "a list of functions, or a list of images"))
+  # check values of lambda
+  if(argtype == "im") {
+    for(j in seq_len(m)) {
+      lamj <- lambda[[j]]
+      if(!(lamj$type %in% c("real", "integer")))
+        stop("lambda must be numeric-valued or integer-valued")
+      lrange <- range(lamj)
+      if(any(is.infinite(lrange)))
+        stop("Infinite pixel values not permitted")
+      if(lrange[1] < 0)
+        stop("Negative pixel values not permitted")
+    }
+  }
+  # determine uniform bound
+  if(!is.null(lmax)) {
+    stopifnot(is.numeric(lmax))
+    if(length(lmax) != m) {
+      if(length(lmax) == 1) {
+        lmax <- rep.int(lmax, m)
+      } else stop("Length of lmax does not match length of lambda")
+    }
+  } else {
+    # compute lmax
+    lmax <- numeric(m)
+    for(j in seq_len(m)) {
+      lamj <- lambda[[j]]
+      if(is.function(lamj)) {
+        X <- pointsOnLines(L, np=10000)
+        lambdaX <- lamj(X$x, X$y, ...)
+        lmax[j] <- max(lambdaX, na.rm=TRUE)
+      } else if(is.im(lamj)) 
+        lmax[j] <- max(lamj)
+    }
+    if(!all(is.finite(lmax)))
+      stop("Infinite values of lambda obtained")
+    if(any(lmax < 0))
+      stop("Negative upper bound for lambda obtained")
+    names(lmax) <- names(lambda)
+  } 
+  # Lewis-Shedler (rejection) method
+  Y <- datagen.runifpoisppOnLines(lmax, L)
+  n <- nrow(Y)
+  if(n == 0)
+    return(Y)
+  # evaluate lambda at each simulated point
+  if(m == 1) {
+    lambda <- lambda[[1]]
+    markindex <- 1
+    if(is.function(lambda)) 
+      lambdaY <- lambda(Y$x, Y$y, ...)
+    else
+      lambdaY <- safelookup(lambda, as.ppp(Y, W=as.owin(L)))
+  } else {
+    lambdaY <- numeric(n)
+    markindex <- as.integer(Y$marks)
+    for(j in seq_len(m)) {
+      lamj <- lambda[[j]]
+      jrows <- (markindex == j)
+      Yj <- Y[jrows, , drop=FALSE]
+      if(is.function(lamj)) 
+        lambdaY[jrows] <- lamj(Yj$x, Yj$y, ...)
+      else
+        lambdaY[jrows] <- safelookup(lamj, as.ppp(Yj, W=as.owin(L)))
+    }
+  }
+  lambdaY[is.na(lambdaY)] <- 0
+  # accept/reject
+  pY <- lambdaY/lmax[markindex]
+  if(check) {
+    if(any(pY < 0))
+      warning("Negative values of lambda obtained")
+    if(any(pY > 1))
+      warning("lmax is not an upper bound for lambda")
+  }
+  retain <- (runif(n) < pY)
+  Y <- Y[retain, , drop=FALSE]
+  return(Y)
+}
+
+      
+  
+  
+  
diff --git a/R/randomseg.R b/R/randomseg.R
new file mode 100755
index 0000000..89628f1
--- /dev/null
+++ b/R/randomseg.R
@@ -0,0 +1,80 @@
+#
+# randomseg.R
+#
+# $Revision: 1.12 $ $Date: 2016/12/01 09:32:41 $
+#
+
+rpoisline <- function(lambda, win=owin()) {
+  win <- as.owin(win)
+  # determine circumcircle
+  xr <- win$xrange
+  yr <- win$yrange
+  xmid <- mean(xr)
+  ymid <- mean(yr)
+  width <- diff(xr)
+  height <- diff(yr)
+  rmax <- sqrt(width^2 + height^2)/2
+  boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax)
+  # generate poisson lines through circumcircle
+  n <- rpois(1, lambda * 2 * pi * rmax)
+  if(n == 0) {
+    X <- psp(numeric(0), numeric(0), numeric(0), numeric(0),
+             marks=integer(0), 
+             window=win)
+    attr(X, "lines") <- infline(p=numeric(0), theta=numeric(0))
+    attr(X, "linemap") <- integer(0)
+    return(X)
+  }
+  theta <- runif(n, max= 2 * pi)
+  p <- runif(n, max=rmax)
+  # compute intersection points with circle
+  q <- sqrt(rmax^2 - p^2)
+  co <- cos(theta)
+  si <- sin(theta)
+  X <- psp(x0= xmid + p * co + q * si,
+           y0= ymid + p * si - q * co,
+           x1= xmid + p * co - q * si,
+           y1= ymid + p * si + q * co,
+           marks = seq_len(n),
+           window=boundbox, check=FALSE)
+  # infinite lines
+  L <- infline(p = p + xmid * co + ymid * si,
+               theta = theta)
+  # clip to window
+  X <- X[win]
+  # append info
+  linemap <- as.integer(marks(X))
+  X <- unmark(X)
+  attr(X, "lines") <- L
+  attr(X, "linemap") <- linemap
+  return(X)
+}
+
+rlinegrid <- function(angle=45, spacing=0.1, win=owin()) {
+  win <- as.owin(win)
+  # determine circumcircle
+  width <- diff(win$xrange)
+  height <- diff(win$yrange)
+  rmax <- sqrt(width^2 + height^2)/2
+  xmid <- mean(win$xrange)
+  ymid <- mean(win$yrange)
+  # generate randomly-displaced grid of lines through circumcircle
+  u <- runif(1, min=0, max=spacing) - rmax
+  if(u >= rmax)   
+    return(psp(numeric(0), numeric(0), numeric(0), numeric(0),
+               window=win, check=FALSE))
+  p <- seq(from=u, to=rmax, by=spacing)
+  # compute intersection points with circle
+  q <- sqrt(rmax^2 - p^2)
+  theta <- pi * ((angle - 90)/180)
+  co <- cos(theta)
+  si <- sin(theta)
+  X <- psp(x0= xmid + p * co + q * si,
+           y0= ymid + p * si - q * co,
+           x1= xmid + p * co - q * si,
+           y1= ymid + p * si + q * co,
+           window=owin(xmid+c(-1,1)*rmax, ymid+c(-1,1)*rmax), check=FALSE)
+  # clip to window
+  X <- X[win]
+  return(X)
+}
diff --git a/R/randomtess.R b/R/randomtess.R
new file mode 100755
index 0000000..1655231
--- /dev/null
+++ b/R/randomtess.R
@@ -0,0 +1,62 @@
+#
+# randomtess.R
+#
+# Random tessellations
+#
+# $Revision: 1.7 $  $Date: 2015/10/21 09:06:57 $
+#
+
+# Poisson line tessellation
+
+rpoislinetess <- function(lambda, win=owin()) {
+  win <- as.owin(win)
+  if(win$type == "mask")
+    stop("Not implemented for masks")
+  # determine circumcircle
+  xr <- win$xrange
+  yr <- win$yrange
+  xmid <- mean(xr)
+  ymid <- mean(yr)
+  width <- diff(xr)
+  height <- diff(yr)
+  rmax <- sqrt(width^2 + height^2)/2
+  boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax)
+  # generate poisson lines through circumcircle
+  n <- rpois(1, lambda * 2 * pi * rmax)
+  if(n == 0)
+    return(tess(tiles=list(win)))
+  theta <- runif(n, max= 2 * pi)
+  p <- runif(n, max=rmax)
+  Y <- infline(p=p, theta=theta)
+  # form the induced tessellation in bounding box
+  Z <- chop.tess(boundbox, Y)
+  # clip to window
+  Z <- intersect.tess(Z, win)
+  attr(Z, "lines") <- Y
+  return(Z)
+}
+
+rMosaicSet <- function(X, p=0.5) {
+  stopifnot(is.tess(X))
+  Y <- tiles(X)
+  Y <- Y[runif(length(Y)) < p]
+  if(length(Y) == 0)
+    return(NULL)
+  Z <- NULL
+  for(i in seq_along(Y))
+    Z <- union.owin(Z, Y[[i]])
+  return(Z)
+}
+
+rMosaicField <- function(X,
+                    rgen=function(n) { sample(0:1, n, replace=TRUE)},
+                    ..., 
+                    rgenargs=NULL ) {
+  stopifnot(is.tess(X))
+  Y <- as.im(X, ...)
+  ntiles <- length(levels(Y))
+  values <- do.call(rgen, append(list(ntiles),rgenargs))
+  Z <- eval.im(values[as.integer(Y)])
+  return(Z)
+}
+
diff --git a/R/rat.R b/R/rat.R
new file mode 100644
index 0000000..e16fd79
--- /dev/null
+++ b/R/rat.R
@@ -0,0 +1,166 @@
+#
+#    rat.R
+#
+#   Ratio objects
+#
+#   Numerator and denominator are stored as attributes
+#
+#   $Revision: 1.11 $   $Date: 2017/07/13 08:02:16 $
+#
+
+rat <- function(ratio, numerator, denominator, check=TRUE) {
+  if(check) {
+    stopifnot(compatible(numerator, denominator))
+    stopifnot(compatible(ratio, denominator))
+  }
+  attr(ratio, "numerator") <- numerator
+  attr(ratio, "denominator") <- denominator
+  class(ratio) <- c("rat", class(ratio))
+  return(ratio)
+}
+
+print.rat <- function(x, ...) {
+  NextMethod("print")
+  cat("[Contains ratio information]\n")
+  return(invisible(NULL))
+}
+
+compatible.rat <- function(A, B, ...) {
+  NextMethod("compatible")
+}
+
+pool.rat <- local({
+
+  Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) }
+  Square <- function(A) { force(A);         eval.fv(A^2, relabel=FALSE) }
+  Mul <- function(A,B){ force(A); force(B); eval.fv(A*B, relabel=FALSE) }
+
+  pool.rat <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) {
+    argh <- list(...)
+    n <- narg <- length(argh)
+    if(narg == 0) return(NULL)
+    if(narg == 1) return(argh[[1]])
+    ##
+    israt <- unlist(lapply(argh, inherits, what="rat"))
+    if(any(bad <- !israt)) {
+      nbad <- sum(bad)
+      stop(paste(ngettext(nbad, "Argument", "Arguments"),
+                 commasep(which(bad)),
+                 ngettext(nbad, "does not", "do not"),
+                 "contain ratio (numerator/denominator) information"))
+    }
+    isfv <- unlist(lapply(argh, is.fv))
+    if(!all(isfv))
+      stop("All arguments must be fv objects")
+    ## extract
+    template <- vanilla.fv(argh[[1]])
+    Y <- lapply(argh, attr, which="numerator")
+    X <- lapply(argh, attr, which="denominator")
+    X <- do.call(harmonise, X)
+    Y <- do.call(harmonise, Y)
+    templateX <- vanilla.fv(X[[1]])
+    templateY <- vanilla.fv(Y[[1]])
+    ## compute products
+    if(!is.null(weights)) {
+      check.nvector(weights, narg, things="Functions")
+      X <- Map(Mul, X, weights)
+      Y <- Map(Mul, Y, weights)
+    } 
+    ## sum
+    sumX <- Reduce(Add, X)
+    sumY <- Reduce(Add, Y)
+    attributes(sumX) <- attributes(templateX)
+    attributes(sumY) <- attributes(templateY)
+    ## ratio-of-sums
+    Ratio <- eval.fv(sumY/sumX, relabel=FALSE)
+    attributes(Ratio) <- attributes(template)
+    ## variance calculation
+    if(variance) { 
+      meanX <- eval.fv(sumX/n, relabel=FALSE)
+      meanY <- eval.fv(sumY/n, relabel=FALSE)
+      sumX2 <- Reduce(Add, lapply(X, Square))
+      sumY2 <- Reduce(Add, lapply(Y, Square))
+      varX   <- eval.fv((sumX2 - n * meanX^2)/(n-1), relabel=FALSE)
+      varY   <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE)
+      XY <- Map(Mul, X, Y)
+      sumXY <- Reduce(Add, XY)
+      covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE)
+      ## variance by delta method
+      relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2
+                                 - 2 * covXY/(meanX * meanY)),
+		        relabel=FALSE)
+      Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE)
+      attributes(Variance) <- attributes(template)
+      ## two sigma CI
+      hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE)
+      loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE)
+      attributes(hiCI) <- attributes(loCI) <-  attributes(template)
+    }
+    ## dress up
+    if(relabel) {
+      Ratio <- prefixfv(Ratio,
+                        tagprefix="pool",
+                        descprefix="pooled ",
+                        lablprefix="")
+      if(variance) {		      
+        Variance <- prefixfv(Variance,
+                             tagprefix="var",
+                             descprefix="delta-method variance estimate of ",
+                             lablprefix="bold(var)~")
+        hiCI <- prefixfv(hiCI,
+                         tagprefix="hi",
+                         descprefix="upper limit of two-sigma CI based on ",
+                         lablprefix="bold(hi)~")
+        loCI <- prefixfv(loCI,
+                         tagprefix="lo",
+                         descprefix="lower limit of two-sigma CI based on ",
+                         lablprefix="bold(lo)~")
+      }
+    }
+    result <- if(!variance) Ratio else
+              Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI))
+    return(result)
+  }
+
+  pool.rat
+  
+})
+
+adjust.ratfv <- function(f, columns=fvnames(f, "*"), numfactor=1, denfactor=1) {
+  stopifnot(is.fv(f))
+  f[,columns] <- (numfactor/denfactor) * as.data.frame(f)[,columns]
+  if(numfactor != 1 && !is.null(num <- attr(f, "numerator"))) {
+    num[,columns] <- numfactor * as.data.frame(num)[,columns]
+    attr(f, "numerator") <- num
+  }	    
+  if(denfactor != 1 && !is.null(den <- attr(f, "denominator"))) {
+    den[,columns] <- denfactor * as.data.frame(den)[,columns]
+    attr(f, "denominator") <- den
+  }
+  return(f)
+}  
+
+tweak.ratfv.entry <- function(x, ...) {
+  # apply same tweak to function, numerator and denominator.
+  x <- tweak.fv.entry(x, ...)
+  if(!is.null(num <- attr(x, "numerator")))
+    attr(x, "numerator") <- tweak.fv.entry(num, ...)
+  if(!is.null(den <- attr(x, "denominator")))
+    attr(x, "denominator") <- tweak.fv.entry(den, ...)
+  return(x)
+}
+
+"[.rat" <- function(x, ...) {
+   if(!is.fv(x)) stop("Not yet implemented for non-fv ratios")
+   num <- attr(x, "numerator")
+   den <- attr(x, "denominator")
+   class(x) <- "fv"
+   x <- x[...]
+   den <- den[...]
+   num <- num[...]
+   attr(x, "numerator") <- num
+   attr(x, "denominator") <- den
+   class(x) <- c("rat", class(x))
+   return(x)
+}
+  
diff --git a/R/reach.R b/R/reach.R
new file mode 100755
index 0000000..803ddf8
--- /dev/null
+++ b/R/reach.R
@@ -0,0 +1,54 @@
+#
+#   reach.R
+#
+#  $Revision: 1.8 $   $Date: 2007/10/24 09:41:15 $
+#
+
+reach <- function(x, ...) {
+  UseMethod("reach")
+}
+
+reach.interact <- function(x, ...) {
+  verifyclass(x, "interact")
+  irange <- x$irange
+  if(is.null(irange))
+    return(Inf)
+  if(!is.function(irange))
+    stop("Internal error - x$irange is not a function")
+  ir <- irange(x)
+  if(is.na(ir))
+    ir <- Inf
+  return(ir)
+}
+
+reach.ppm <- function(x, ..., epsilon=0) {
+  verifyclass(x, "ppm")
+  
+  # Poisson case
+  if(is.poisson.ppm(x))
+    return(0)
+
+  # extract info
+  inte <- x$interaction
+  coeffs <- coef(x)
+
+  if(newstyle.coeff.handling(inte)) {
+    # extract only interaction coefficients
+    Vnames <- x$internal$Vnames
+    coeffs <- coeffs[Vnames]
+  } 
+  
+  # apply 'irange' function
+  irange <- inte$irange
+  if(is.null(irange))
+    return(Inf)
+  ir <- irange(inte, coeffs, epsilon=epsilon)
+
+  if(is.na(ir))
+    ir <- Inf
+
+  return(ir)
+}
+
+
+
diff --git a/R/reduceformula.R b/R/reduceformula.R
new file mode 100755
index 0000000..815046f
--- /dev/null
+++ b/R/reduceformula.R
@@ -0,0 +1,91 @@
+#
+#  reduceformula.R
+#
+#  $Revision: 1.7 $   $Date: 2016/12/30 01:44:07 $
+#
+# delete variable from formula 
+#
+#......................................................
+#
+
+reduceformula <- function(fmla, deletevar, verbose=FALSE) {
+  ## removes the variable `deletevar' from the formula `fmla'
+  ## returns a simplified formula, or NULL if it can't simplify.
+  stopifnot(inherits(fmla, "formula"))
+  stopifnot(is.character(deletevar) && length(deletevar) == 1)
+  if(!(deletevar %in% all.vars(as.expression(fmla)))) {
+    if(verbose)
+      message(paste("The formula does not involve", dQuote(deletevar),
+                    "and is therefore unchanged"))
+    return(fmla)
+  }
+  lhs <- if(length(fmla) < 3) NULL else fmla[[2]]
+  ## create terms object
+  tt <- attributes(terms(fmla))
+         ##  formula.has.intercept <- (tt$intercept == 1)
+  ## extract all variables appearing in the model
+  vars <- as.list(tt$variables)[-1]
+  nvars <- length(vars)
+  varexprs <- lapply(vars, as.expression)
+  varstrings <- sapply(varexprs, paste)
+  ## identify any offsets
+  offs <- tt$offset
+  v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars)
+  ## remove the response
+  repo <- tt$response
+  if(repo != 0) {
+    vars <- vars[-repo]
+    varstrings <- varstrings[-repo]
+    varexprs <- varexprs[-repo]
+    v.is.offset <- v.is.offset[-repo]
+  }
+  ## a term may be a variable name
+           ##  v.is.name <- sapply(vars, is.name)
+  ## a term may be an expression like sin(x), poly(x,y,degree=2)
+  v.args <- lapply(varexprs, all.vars)
+  matches.delete <- lapply(v.args, "==", deletevar)
+  v.has.delete <- sapply(matches.delete, any)
+  v.has.other <- !sapply(matches.delete, all)
+  v.is.mixed <- v.has.delete & v.has.other
+  ## we can't handle mixed terms like sin(x-d), poly(x,d)
+  ## where d is to be deleted. Handling these would require
+  ## knowledge about the functions sin and poly.
+  if(any(v.is.mixed)) {
+    nmixed <- sum(v.is.mixed)
+    if(verbose)
+      message(paste("Don't know how to reduce the",
+              ngettext(nmixed, "term", "terms"),
+              paste(dQuote(varstrings[v.is.mixed]), collapse=",")))
+    return(NULL)
+  }
+  ## OK. We have identified all first order terms to be deleted.
+  condemned.names <- varstrings[v.has.delete]
+  ## Determine the terms of all orders that include these first order terms
+  ## (1) terms with model coefficients
+  fax <- tt$factors
+  if(prod(dim(fax)) == 0)
+    retained.terms <- character(0)
+  else {
+    ## Rows are first order terms 
+    condemned.row <- rownames(fax) %in% condemned.names
+    ## Columns are the terms of all orders
+    allterms <- colnames(fax)
+    ## Find all columns containing a 1 in a row that is to be deleted
+    if(any(condemned.row)) {
+      condemned.column <- matcolany(fax[condemned.row, , drop=FALSE] != 0)
+      retained.terms <- allterms[!condemned.column]
+    } else retained.terms <- allterms
+  }
+  ## (2) offsets if any
+  if(any(v.is.offset))
+    retained.terms <- c(retained.terms,
+                        varstrings[v.is.offset & !v.has.delete])
+  ## (3) intercept forced?
+  if(length(retained.terms) == 0)
+    retained.terms <- "1"
+  
+  ## OK. Cut-and-paste
+  f <- paste(lhs, "~", paste(retained.terms, collapse=" + "))
+  return(as.formula(f))
+} 
+
diff --git a/R/relrisk.R b/R/relrisk.R
new file mode 100755
index 0000000..e60b89d
--- /dev/null
+++ b/R/relrisk.R
@@ -0,0 +1,493 @@
+#
+#    relrisk.R
+#
+#   Estimation of relative risk
+#
+#  $Revision: 1.33 $  $Date: 2017/01/28 06:29:07 $
+#
+
+relrisk <- function(X, ...) UseMethod("relrisk")
+                                      
+relrisk.ppp <- local({
+
+  relrisk.ppp <- function(X, sigma=NULL, ..., varcov=NULL, at="pixels",
+                      relative=FALSE, se=FALSE,
+                      casecontrol=TRUE, control=1, case) {
+    stopifnot(is.ppp(X))
+    stopifnot(is.multitype(X))
+    control.given <- !missing(control)
+    case.given <- !missing(case)
+    if(!relative && (control.given || case.given)) {
+      aa <- c("control", "case")[c(control.given, case.given)]
+      nn <- length(aa)
+      warning(paste(ngettext(nn, "Argument", "Arguments"),
+                    paste(sQuote(aa), collapse=" and "),
+                    ngettext(nn, "was", "were"),
+                    "ignored, because relative=FALSE"))
+    }
+    npts <- npoints(X)
+    Y <- split(X)
+    uX <- unmark(X)
+    types <- names(Y)
+    ntypes <- length(Y)
+    if(ntypes == 1)
+      stop("Data contains only one type of points")
+    marx <- marks(X)
+    imarks <- as.integer(marx)
+    lev <- levels(marx)
+    ## trap arguments
+    dotargs <- list(...)
+    isbwarg <- names(dotargs) %in% c("method", "nh", "hmin", "hmax", "warn")
+    bwargs <- dotargs[isbwarg]
+    dargs  <- dotargs[!isbwarg]
+    ## using edge corrections?
+    edge   <- resolve.1.default(list(edge=TRUE), list(...))
+    diggle <- resolve.1.default(list(diggle=FALSE), list(...))
+    ## bandwidth
+    if(is.null(sigma) && is.null(varcov)) 
+      sigma <- do.call(bw.relrisk, append(list(X), bwargs))
+    SmoothPars <- append(list(sigma=sigma, varcov=varcov, at=at), dargs)
+    if(se) {
+      ## determine other bandwidth for variance estimation
+      if(is.null(varcov)) {
+        varconst <- 1/(4 * pi * prod(sigma))
+        VarPars <- append(list(sigma=sigma/sqrt(2), at=at), dargs)
+      } else {
+        varconst <- 1/(4 * pi * sqrt(det(varcov)))
+        VarPars <- append(list(varcov=varcov/2, at=at), dargs)
+      }
+      if(edge) {
+        ## evaluate edge correction weights
+        edgeim <- second.moment.calc(uX, sigma, what="edge", ...,
+                                     varcov=varcov)
+        if(diggle || at == "points") {
+          edgeX <- safelookup(edgeim, uX, warn=FALSE)
+          diggleX <- 1/edgeX
+          diggleX[!is.finite(diggleX)] <- 0
+        }
+        edgeim <- edgeim[Window(X), drop=FALSE]
+      }
+    }
+    ## .........................................
+    ## compute intensity estimates for each type
+    ## .........................................
+    switch(at,
+           pixels = {
+             ## intensity estimates of each type
+             Deach <- do.call(density.splitppp,
+                              append(list(x=Y), SmoothPars))
+             ## compute intensity estimate for unmarked pattern
+             Dall <- Reduce("+", Deach)
+             ## variance terms
+             if(se) {
+               if(!edge) {
+                 ## no edge correction
+                 Veach <- do.call(density.splitppp,
+                                  append(list(x=Y), VarPars))
+               } else if(!diggle) {
+                 ## edge correction e(u)
+                 Veach <- do.call(density.splitppp,
+                                  append(list(x=Y), VarPars))
+                 Veach <- lapply(Veach, "/", e2=edgeim)
+               } else {
+                 ## Diggle edge correction e(x_i)
+                 Veach <- mapply(density.ppp,
+                                 x=Y,
+                                 weights=split(diggleX, marx),
+                                 MoreArgs=VarPars,
+                                 SIMPLIFY=FALSE)
+               }
+               Veach <- lapply(Veach, "*", varconst)
+               Vall <- Reduce("+", Veach)
+             }
+           },
+           points = {
+             ## intensity estimates of each type **at each data point**
+             ## dummy variable matrix
+             dumm <- matrix(0, npts, ntypes)
+             dumm[cbind(seq_len(npts), imarks)] <- 1
+             colnames(dumm) <- lev
+             Deach <- do.call(density.ppp,
+                              append(list(x=uX, weights=dumm),
+                                     SmoothPars))
+             ## compute intensity estimate for unmarked pattern
+             Dall <- rowSums(Deach)
+             ## variance terms
+             if(se) {
+               if(!edge) {
+                 ## no edge correction
+                 Veach <- do.call(density.ppp,
+                                  append(list(x=uX, weights=dumm),
+                                         VarPars))
+               } else if(!diggle) {
+                 ## edge correction e(u)
+                 Veach <- do.call(density.ppp,
+                                  append(list(x=uX, weights=dumm),
+                                         VarPars))
+                 Veach <- Veach * diggleX
+               } else {
+                 ## Diggle edge correction e(x_i)
+                 Veach <- do.call(density.ppp,
+                                  append(list(x=uX, weights=dumm * diggleX),
+                                         VarPars))
+               }
+               Veach <- Veach * varconst
+               Vall <- rowSums(Veach)
+             }
+           })
+    ## .........................................
+    ## compute probabilities/risks
+    ## .........................................
+    if(ntypes == 2 && casecontrol) {
+      if(control.given || !case.given) {
+        stopifnot(length(control) == 1)
+        if(is.numeric(control)) {
+          icontrol <- control <- as.integer(control)
+          stopifnot(control %in% 1:2)
+        } else if(is.character(control)) {
+          icontrol <- match(control, levels(marks(X)))
+          if(is.na(icontrol)) stop(paste("No points have mark =", control))
+        } else
+          stop(paste("Unrecognised format for argument", sQuote("control")))
+        if(!case.given)
+          icase <- 3 - icontrol
+      }
+      if(case.given) {
+        stopifnot(length(case) == 1)
+        if(is.numeric(case)) {
+          icase <- case <- as.integer(case)
+          stopifnot(case %in% 1:2)
+        } else if(is.character(case)) {
+          icase <- match(case, levels(marks(X)))
+          if(is.na(icase)) stop(paste("No points have mark =", case))
+        } else stop(paste("Unrecognised format for argument", sQuote("case")))
+        if(!control.given) 
+          icontrol <- 3 - icase
+      }
+      ## compute ......
+      switch(at,
+             pixels = {
+               ## compute probability of case
+               pcase <- Deach[[icase]]/Dall
+               ## correct small numerical errors
+               pcase <- clamp01(pcase)
+               ## trap NaN values
+               nbg <- badvalues(pcase)
+               if(any(nbg)) {
+                 ## apply l'Hopital's rule:
+                 ##     p(case) = 1{nearest neighbour is case}
+                 distcase <- distmap(Y[[icase]], xy=pcase)
+                 distcontrol <- distmap(Y[[icontrol]], xy=pcase)
+                 closecase <- eval.im(as.integer(distcase < distcontrol))
+                 pcase[nbg] <- closecase[nbg]
+               }
+               if(!relative) {
+                 if(!se) {
+                   result <- pcase
+                 } else {
+                   Vcase <- Veach[[icase]]
+                   NUM <- eval.im(Vcase * (1-2*pcase) + Vall * pcase^2)
+                   SE <- eval.im(sqrt(pmax(NUM, 0))/Dall)
+                   result <- list(estimate=pcase, SE=SE)
+                 }
+               } else {
+                 rcase <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA))
+                 if(!se) {
+                   result <- rcase
+                 } else {
+                   Vcase <- Veach[[icase]]
+                   Vctrl <- Veach[[icontrol]]
+                   Dctrl <- Deach[[icontrol]]
+                   NUM <- eval.im(Vcase + Vctrl * rcase^2)
+                   SE <- eval.im(sqrt(pmax(NUM, 0))/Dctrl)
+                   result <- list(estimate=rcase, SE=SE)
+                 }
+               }
+             },
+             points={
+               ## compute probability of case
+               pcase <- Deach[,icase]/Dall
+               ## correct small numerical errors
+               pcase <- clamp01(pcase)
+               ## trap NaN values
+               if(any(nbg <- badvalues(pcase))) {
+                 ## apply l'Hopital's rule
+                 nntype <- imarks[nnwhich(X)]
+                 pcase[nbg] <- as.integer(nntype[nbg] == icase)
+               }
+               if(!relative) {
+                 if(!se) {
+                   result <- pcase
+                 } else {
+                   NUM <- Veach[,icase] * (1-2*pcase) + Vall * pcase^2
+                   SE <- sqrt(pmax(NUM, 0))/Dall
+                   result <- list(estimate=pcase, SE=SE)
+                 }
+               } else {
+                 rcase <- ifelse(pcase < 1, pcase/(1-pcase), NA)
+                 if(!se) {
+                   result <- rcase
+                 } else {
+                   NUM <- Veach[,icase] + Veach[,icontrol] * rcase^2
+                   SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol]
+                   result <- list(estimate=rcase, SE=SE)
+                 }
+               }
+             })
+    } else {
+      ## several types
+      if(relative) {
+        ## need 'control' type
+        stopifnot(length(control) == 1)
+        if(is.numeric(control)) {
+          icontrol <- control <- as.integer(control)
+          stopifnot(control %in% 1:ntypes)
+        } else if(is.character(control)) {
+          icontrol <- match(control, levels(marks(X)))
+          if(is.na(icontrol)) stop(paste("No points have mark =", control))
+        } else
+          stop(paste("Unrecognised format for argument", sQuote("control")))
+      }
+      switch(at,
+             pixels={
+               probs <- as.solist(lapply(Deach, "/", e2=Dall))
+               ## correct small numerical errors
+               probs <- as.solist(lapply(probs, clamp01))
+               ## trap NaN values
+               nbg <- lapply(probs, badvalues)
+               nbg <- Reduce("|", nbg)
+               if(any(nbg)) {
+                 ## apply l'Hopital's rule
+                 distX <- distmap(X, xy=Dall)
+                 whichnn <- attr(distX, "index")
+                 typenn <- eval.im(imarks[whichnn])
+                 typennsub <- as.matrix(typenn)[nbg]
+                 for(k in seq_along(result)) 
+                   probs[[k]][nbg] <- (typennsub == k)
+               }
+               if(!relative) {
+                 if(!se) {
+                   result <- probs
+                 } else {
+                   SE <- list()
+                   for(i in 1:ntypes) {
+                     NUM <- (Veach[[i]] * (1 - 2 * probs[[i]])
+                             + Vall * probs[[i]]^2)
+                     SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dall)
+                   }
+                   SE <- as.solist(SE)
+                   names(SE) <- types
+                   result <- list(estimate=probs, SE=SE)
+                 }
+               } else {
+                 risks <- as.solist(lapply(probs,
+                                           function(z, d) {
+                                             eval.im(ifelse(d > 0, z/d, NA))
+                                           },
+                                           d = probs[[icontrol]]))
+                 if(!se) {
+                   result <- risks
+                 } else {
+                   Vctrl <- Veach[[icontrol]]
+                   Dctrl <- Deach[[icontrol]]
+                   SE <- list()
+                   for(i in 1:ntypes) {
+                     NUM <- Veach[[i]] + Vctrl * risks[[i]]^2
+                     SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dctrl)
+                   }
+                   SE <- as.solist(SE)
+                   names(SE) <- types
+                   result <- list(estimate=risks, SE=SE)
+                   
+                 }
+               }
+             },
+             points = {
+               probs <- Deach/Dall
+               ## correct small numerical errors
+               probs <- clamp01(probs)
+               ## trap NaN values
+               bad <- badvalues(probs)
+               badrow <- matrowany(bad)
+               if(any(badrow)) {
+                 ## apply l'Hopital's rule
+                 typenn <- imarks[nnwhich(X)]
+                 probs[badrow, ] <- (typenn == col(result))[badrow, ]
+               }
+               if(!relative) {
+                 if(!se) {
+                   result <- probs
+                 } else {
+                   NUM <- Veach * (1-2*probs) + Vall * probs^2
+                   SE <- sqrt(pmax(NUM, 0))/Dall
+                   result <- list(estimate=probs, SE=SE)
+                }
+               } else {
+                 risks <- probs/probs[,icontrol]
+                 if(!se) {
+                   result <- risks
+                 } else {
+                   NUM <- Veach + Veach[,icontrol] * risks^2
+                   NUM[,icontrol] <- 0
+                   SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol]
+                   result <- list(estimate=risks, SE=SE)
+                 }
+               }
+            })
+    }
+    attr(result, "sigma") <- sigma
+    attr(result, "varcov") <- varcov
+    return(result)
+  }
+
+  clamp01 <- function(x) {
+    if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1)))
+    return(pmin(pmax(x, 0), 1))
+  }
+
+  badvalues <- function(x) {
+    if(is.im(x)) x <- as.matrix(x)
+    return(!(is.finite(x) | is.na(x)))
+  }
+
+  reciprocal <- function(x) 1/x
+  
+  relrisk.ppp
+})
+
+
+bw.stoyan <- function(X, co=0.15) {
+  ## Stoyan's rule of thumb
+  stopifnot(is.ppp(X))
+  n <- npoints(X)
+  W <- Window(X)
+  a <- area(W)
+  stoyan <- co/sqrt(5 * n/a)
+  return(stoyan)
+}
+
+
+bw.relrisk <- function(X, method="likelihood",
+                       nh=spatstat.options("n.bandwidth"),
+                       hmin=NULL, hmax=NULL, warn=TRUE) {
+  stopifnot(is.ppp(X))
+  stopifnot(is.multitype(X))
+  ## rearrange in ascending order of x-coordinate (for C code)
+  X <- X[fave.order(X$x)]
+  ##
+  Y <- split(X)
+  ntypes <- length(Y)
+  if(ntypes == 1)
+    stop("Data contains only one type of points")
+  marx <- marks(X)
+  method <- pickoption("method", method,
+                       c(likelihood="likelihood",
+                         leastsquares="leastsquares",
+                         ls="leastsquares",
+                         LS="leastsquares",
+                         weightedleastsquares="weightedleastsquares",
+                         wls="weightedleastsquares",
+                         WLS="weightedleastsquares"))
+  ## 
+  if(method != "likelihood") {
+    ## dummy variables for each type
+    imarks <- as.integer(marx)
+    if(ntypes == 2) {
+      ## 1 = control, 2 = case
+      indic <- (imarks == 2)
+      y01   <- as.integer(indic)
+    } else {
+      indic <- matrix(FALSE, n, ntypes)
+      indic[cbind(seq_len(n), imarks)] <- TRUE
+      y01  <- indic * 1
+    }
+    X01 <- X %mark% y01
+  }
+  ## cross-validated bandwidth selection
+  ## determine a range of bandwidth values
+  n <- npoints(X)
+  if(is.null(hmin) || is.null(hmax)) {
+    W <- Window(X)
+    a <- area(W)
+    d <- diameter(as.rectangle(W))
+    ## Stoyan's rule of thumb applied to the least and most common types
+    mcount <- table(marx)
+    nmin <- max(1, min(mcount))
+    nmax <- max(1, max(mcount))
+    stoyan.low <- 0.15/sqrt(nmax/a)
+    stoyan.high <- 0.15/sqrt(nmin/a)
+    if(is.null(hmin)) 
+      hmin <- max(minnndist(unique(X)), stoyan.low/5)
+    if(is.null(hmax)) {
+      hmax <- min(d/4, stoyan.high * 20)
+      hmax <- max(hmax, hmin * 2)
+    }
+  } else stopifnot(hmin < hmax)
+  ##
+  h <- geomseq(from=hmin, to=hmax, length.out=nh)
+  cv <- numeric(nh)
+  ## 
+  ## compute cross-validation criterion
+  switch(method,
+         likelihood={
+           methodname <- "Likelihood"
+           ## for efficiency, only compute the estimate of p_j(x_i)
+           ## when j = m_i = mark of x_i.
+           Dthis <- numeric(n)
+           for(i in seq_len(nh)) {
+             Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE,
+                                 sorted=TRUE)
+             Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE,
+                                       sorted=TRUE)
+             split(Dthis, marx) <- Deach
+             pthis <- Dthis/Dall
+             cv[i] <- -mean(log(pthis))
+           }
+         },
+         leastsquares={
+           methodname <- "Least Squares"
+           for(i in seq_len(nh)) {
+             phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE,
+                            sorted=TRUE)
+             cv[i] <- mean((y01 - phat)^2)
+           }
+         },
+         weightedleastsquares={
+           methodname <- "Weighted Least Squares"
+           ## need initial value of h from least squares
+           h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4))
+           phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE,
+                           sorted=TRUE)
+           var0 <- phat0 * (1-phat0)
+           var0 <- pmax.int(var0, 1e-6)
+           for(i in seq_len(nh)) {
+             phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE,
+                            sorted=TRUE)
+             cv[i] <- mean((y01 - phat)^2/var0)
+           }
+         })
+  ## optimize
+  iopt <- which.min(cv)
+  ##
+  if(warn && (iopt == nh || iopt == 1)) 
+    warning(paste("Cross-validation criterion was minimised at",
+                  if(iopt == 1) "left-hand" else "right-hand",
+                  "end of interval",
+                  "[", signif(hmin, 3), ",", signif(hmax, 3), "];",
+                  "use arguments hmin, hmax to specify a wider interval"))
+  ##    
+  result <- bw.optim(cv, h, iopt,
+                     hname="sigma", 
+                     creator="bw.relrisk",
+                     criterion=paste(methodname, "Cross-Validation"),
+                     unitname=unitname(X))
+  return(result)
+}
+
+which.max.im <- function(x) {
+  .Deprecated("im.apply", "spatstat",
+              "which.max.im(x) is deprecated: use im.apply(x, which.max)")
+  ans <- im.apply(x, which.max)
+  return(ans)
+}
+
diff --git a/R/relrisk.ppm.R b/R/relrisk.ppm.R
new file mode 100644
index 0000000..fb8d634
--- /dev/null
+++ b/R/relrisk.ppm.R
@@ -0,0 +1,386 @@
+##
+##  relrisk.ppm.R
+##
+##  $Revision: 1.7 $ $Date: 2016/07/15 10:21:26 $
+##
+
+relrisk.ppm <- local({
+
+  relrisk.ppm <- function(X, ..., at=c("pixels", "points"),
+                          relative=FALSE, se=FALSE, 
+                          casecontrol=TRUE, control=1, case,
+                          ngrid=NULL, window=NULL) {
+    stopifnot(is.ppm(X))
+    stopifnot(is.multitype(X))
+    control.given <- !missing(control)
+    case.given <- !missing(case)
+    at <- match.arg(at)
+    if(!relative && (control.given || case.given)) {
+      aa <- c("control", "case")[c(control.given, case.given)]
+      nn <- length(aa)
+      warning(paste(ngettext(nn, "Argument", "Arguments"),
+                    paste(sQuote(aa), collapse=" and "),
+                    ngettext(nn, "was", "were"),
+                    "ignored, because relative=FALSE"))
+    }
+    model <- X
+    Y <- data.ppm(model)
+    types <- levels(marks(Y))
+    ntypes <- length(types)
+#    np <- length(coef(model))
+    ## compute probabilities or risks
+    if(ntypes == 2 && casecontrol) {
+      if(control.given || !case.given) {
+        stopifnot(length(control) == 1)
+        if(is.numeric(control)) {
+          icontrol <- control <- as.integer(control)
+          stopifnot(control %in% 1:2)
+        } else if(is.character(control)) {
+          icontrol <- match(control, types)
+          if(is.na(icontrol)) stop(paste("No points have mark =", control))
+        } else
+          stop(paste("Unrecognised format for argument", sQuote("control")))
+        if(!case.given)
+          icase <- 3 - icontrol
+      }
+      if(case.given) {
+        stopifnot(length(case) == 1)
+        if(is.numeric(case)) {
+          icase <- case <- as.integer(case)
+          stopifnot(case %in% 1:2)
+        } else if(is.character(case)) {
+          icase <- match(case, types)
+          if(is.na(icase)) stop(paste("No points have mark =", case))
+        } else stop(paste("Unrecognised format for argument", sQuote("case")))
+        if(!control.given) 
+          icontrol <- 3 - icase
+      }
+      switch(at,
+             pixels= {
+               ## estimate is a single image
+               ## compute images of intensities of each mark
+               lambda.each <- predict(model, ngrid=ngrid, window=window)
+               if(!relative) {
+                 ## compute probabilities..
+                 ## total intensity (image)
+                 lambda.all <- Reduce("+", lambda.each)
+                 if(!se) {
+                   result <- lambda.each[[icase]]/lambda.all
+                   result <- killglitches(result)
+                 } else {
+                   probs <- lapply(lambda.each, "/", e2=lambda.all)
+                   probs <- as.solist(lapply(probs, killglitches))
+                   estimate <- probs[[icase]]
+                   SE <- SEprobPixels(model, probs)[[icase]]
+                   SE <- killglitches(SE)
+                   result <- list(estimate=estimate, SE=SE)
+                 }
+               } else {
+                 ## relative risks
+                 lambda.ctrl <- lambda.each[[icontrol]]
+                 if(!se) {
+                   result <- lambda.each[[icase]]/lambda.ctrl
+                   result <- killglitches(result)
+                 } else {
+                   risks <- lapply(lambda.each, "/", e2=lambda.ctrl)
+                   risks <- as.solist(lapply(risks, killglitches))
+                   estimate <- risks[[icase]]
+                   SE <- SErelriskPixels(model, risks, icontrol)[[icase]]
+                   SE <- killglitches(SE)
+                   result <- list(estimate=estimate, SE=SE)
+                 }
+               }
+             },
+             points={
+               ## compute intensities of each type
+               Ycase <- unmark(Y) %mark% factor(types[icase], levels=types)
+               Yctrl <- unmark(Y) %mark% factor(types[icontrol], levels=types)
+               lambda.case <- predict(model, locations=Ycase)
+               lambda.ctrl <- predict(model, locations=Yctrl)
+               if(!relative) {
+                 ## compute probabilities
+                 ## total intensity
+                 lambda.all  <- lambda.case + lambda.ctrl
+                 prob.case <- lambda.case/lambda.all
+                 if(!se) {
+                   result <- prob.case
+                 } else {
+                   probs <- matrix(, length(prob.case), 2)
+                   probs[,icase] <- prob.case
+                   probs[,icontrol] <- 1 - prob.case
+                   SE <- SEprobPoints(model, probs)[,icase]
+                   result <- list(estimate=prob.case, SE=SE)
+                 }
+               } else {
+                 ## compute relative risks
+                 risk.case <- lambda.case/lambda.ctrl
+                 if(!se) {
+                   result <- risk.case
+                 } else {
+                   risks <- matrix(, length(risk.case), 2)
+                   risks[,icase] <- risk.case
+                   risks[,icontrol] <- 1
+                   SE <- SErelriskPoints(model, risks, icontrol)[,icase]
+                   result <- list(estimate=risk.case, SE=SE)
+                 }
+               }
+             })
+    } else {
+      ## several types
+      if(relative) {
+        ## need 'control' type
+        stopifnot(length(control) == 1)
+        if(is.numeric(control)) {
+          icontrol <- control <- as.integer(control)
+          stopifnot(control %in% 1:ntypes)
+        } else if(is.character(control)) {
+          icontrol <- match(control, types)
+          if(is.na(icontrol)) stop(paste("No points have mark =", control))
+        } else
+          stop(paste("Unrecognised format for argument", sQuote("control")))
+      }
+      switch(at,
+             pixels={
+               ## estimate is a list of images
+               ## Compute images of intensities of each type
+               lambda.each <- predict(model, ngrid=ngrid, window=window)
+               if(!relative) {
+                 ## compute probabilities...
+                 ## image of total intensity
+                 lambda.all <- Reduce("+", lambda.each)
+                 probs <- lapply(lambda.each, "/", e2=lambda.all)
+                 probs <- as.solist(lapply(probs, killglitches))
+                 if(!se) {
+                   result <- probs
+                 } else {
+                   SE <- SEprobPixels(model, probs)
+                   SE <- as.solist(lapply(SE, killglitches))
+                   result <- list(estimate=probs, SE=SE)
+                 }
+               } else {
+                 ## compute relative risks
+                 risks <- lapply(lambda.each, "/",
+                                 e2=lambda.each[[icontrol]])
+                 risks <- as.solist(lapply(risks, killglitches))
+                 if(!se) {
+                   result <- risks
+                 } else {
+                   SE <- SErelriskPixels(model, risks, icontrol)
+                   SE <- as.solist(lapply(SE, killglitches))
+                   result <- list(estimate=risks, SE=SE)
+                 }
+               }
+             },
+             points = {
+               ## matrix of intensities of each type at each point
+               ## rows=locations, cols=types
+               lambda.each <- sapply(types,
+                                     predictfortype, 
+                                     loc=unmark(Y), model=model, types=types)
+               if(!relative) {
+                 ## compute probabilities
+                 lambda.all <- rowSums(lambda.each)
+                 probs <- lambda.each/lambda.all
+                 if(!se) {
+                   result <- probs
+                 } else {
+                   SE <- SEprobPoints(model, probs)
+                   result <- list(estimate=probs, SE=SE)
+                 }
+               } else {
+                 ## compute relative risks
+                 risks <- lambda.each/lambda.each[,icontrol]
+                 if(!se) {
+                   result <- risks
+                 } else {
+                   SE <- SErelriskPoints(model, risks, icontrol)
+                   result <- list(estimate=risks, SE=SE)
+                 }
+               }
+            })
+    }
+    return(result)
+  }
+
+  modmats <- function(model) {
+    # model matrices for data locations for each possible mark
+    QM <- quad.ppm(model)
+    Y <- QM$data
+    QR <- quadscheme.replicated(Y, unmark(Y[FALSE]))
+    sourceid <- QR$param$sourceid
+    ## canonical covariates 
+    mm <- model.matrix(model, Q=QR)
+    ## mm is a matrix with one column for canonical covariate
+    ## and one row for each marked point in QR.
+    mm <- cbind(data.frame(".s"=sourceid, ".m"=marks(QR)), mm)
+    ## Split by marks 
+    ss <- split(mm, mm$.m)
+    ## Reorganise into compatible matrices
+    zz <- lapply(ss, reorg)
+    return(zz)
+  }
+  
+  reorg <- function(x) {
+      z <- x
+      rownames(z) <- NULL
+      z[x$.s, ] <- z
+      return(z[,-(1:2), drop=FALSE])
+  }
+
+  SErelriskPoints <- function(model, riskvalues, icontrol) {
+    ## riskvalues is a matrix with rows=data locations, cols=types
+    types <- colnames(riskvalues)
+    ntypes <- length(types)
+    ## 
+    S.um <- modmats(model)
+    S.um <- lapply(S.um, as.matrix)
+    ## S.um is a list of matrices, one for each possible type,
+    ## each matrix having one row per data location 
+    dS.um <- lapply(S.um, "-", e2=S.um[[icontrol]])
+    R.um <- mapply("*",
+                   dS.um,
+                   as.list(as.data.frame(riskvalues)),
+                   SIMPLIFY=FALSE)
+    ## likewise R.um is a list of matrices
+    ##
+    vc <- vcov(model)
+    VAR <- lapply(R.um, quadform, v=vc)
+    VAR <- do.call(cbind, VAR)
+    SE <- sqrt(VAR)
+    colnames(SE) <- types
+    return(SE)
+  }
+
+  msubtract <- function(z1, z2) mapply("-", e1=z1, e2=z2, SIMPLIFY=FALSE)
+
+  mmultiply <- function(z1, z2) solapply(z1, "*", e2=z2)
+  
+  SErelriskPixels <- function(model, riskvalues, icontrol) {
+    ## riskvalues is an imlist
+    types <- names(riskvalues)
+    ntypes <- length(types)
+    ## canonical covariates
+    S.um <- model.images(model)
+    ## S.um is a hyperframe with one column for each mark value
+    ## and one row for each canonical covariate
+    dS.um <- lapply(S.um, msubtract, 
+                    z2=S.um[,icontrol,drop=TRUE])
+    R.um <- mapply(mmultiply,
+                   z1=dS.um,
+                   z2=riskvalues,
+                   SIMPLIFY=FALSE)
+    VAR <- vector(mode="list", length=ntypes)
+    ntypes <- length(types)
+    vc <- vcov(model)
+    ncoef <- nrow(vc)
+    for(type in 1:ntypes) {
+      v <- 0
+      Rum <- R.um[[type]]
+      for(i in 1:ncoef) {
+        for(j in 1:ncoef) {
+          v <- v + Rum[[i]] * vc[i,j] * Rum[[j]]
+        }
+      }
+      VAR[[type]] <- v
+    }
+    names(VAR) <- types
+    VAR <- as.solist(VAR)
+    SE <- as.solist(lapply(VAR, sqrt))
+    return(SE)
+  }
+
+
+  SEprobPixels <- function(model, probvalues) {
+    ## probvalues is an imlist
+    types <- names(probvalues)
+    ntypes <- length(types)
+    ## canonical covariates
+    S.um <- model.images(model)
+    ## S.um is a hyperframe with one column for each mark value
+    ## and one row for each canonical covariate
+    ncoef <- length(coef(model))
+    Sbar.u <- vector(mode="list", length=ncoef)
+    for(k in 1:ncoef)
+      Sbar.u[[k]] <- Reduce("+",
+                            mapply("*", e1=S.um[k,,drop=TRUE], e2=probvalues,
+                                   SIMPLIFY=FALSE))
+    ## Sbar.u is a list of images, one for each canonical covariate
+    Sdif.um <- lapply(as.list(S.um), 
+                      msubtract,
+                      z2=Sbar.u)
+    ## Sdif.um is a list of lists of images.
+    ##   List of length ntypes,
+    ##   each entry being an imlist of length ncoef
+    P.um <- mapply(mmultiply,
+                   Sdif.um, 
+                   probvalues, 
+                   SIMPLIFY=FALSE)
+    ## P.um is same format as Sdif.um
+    vc <- vcov(model)
+    ncoef <- nrow(vc)
+    VAR <- vector(mode="list", length=ntypes)
+    for(m in 1:ntypes) {
+      v <- 0
+      Pum <- P.um[[m]]
+      for(i in 1:ncoef) {
+        for(j in 1:ncoef) {
+          v <- v + Pum[[i]] * vc[i,j] * Pum[[j]]
+        }
+      }
+      VAR[[m]] <- v
+    }
+    names(VAR) <- types
+    VAR <- as.solist(VAR)
+    SE <- as.solist(lapply(VAR, sqrt))
+  }
+  
+  SEprobPoints <- function(model, probvalues) {
+    ## probvalues is a matrix with row=location and column=type
+    types <- colnames(probvalues)
+    ntypes <- length(types)
+    ## canonical covariates
+    S.um <- modmats(model)
+    S.um <- lapply(S.um, as.matrix)
+    ## S.um is a list of matrices, one for each possible type,
+    ## each matrix having rows=locations and cols=covariates
+    ## Weight each matrix by its mark probabilities
+    SW <- mapply("*",
+                 e1=S.um,
+                 e2=as.list(as.data.frame(probvalues)),
+                 SIMPLIFY=FALSE)
+    ## average them
+    Sbar.u <- Reduce("+", SW)
+    ## Sbar.u is a matrix with rows=locations and cols=covariates
+    Sdif.um <- lapply(S.um, "-", e2=Sbar.u)
+    ## Sdif.um is a list of matrices like S.um
+    P.um <- mapply("*",
+                   e1=Sdif.um, 
+                   e2=as.list(as.data.frame(probvalues)),
+                   SIMPLIFY=FALSE)
+    ## P.um likewise
+    vc <- vcov(model)
+    VAR <- lapply(P.um, quadform, v=vc)
+    VAR <- do.call(cbind, VAR)
+    SE <- sqrt(VAR)
+    colnames(SE) <- types
+    return(SE)
+  }
+  
+  predictfortype <- function(type, model, types, loc) {
+    predict(model, locations=loc %mark% factor(type, levels=types))
+  }
+
+  killglitches <- function(z, eps=.Machine$double.eps) {
+    ra <- range(z, finite=TRUE)
+    if(max(abs(ra)) < eps) {
+      z[] <- 0
+      return(z)
+    }
+    if(diff(ra) < eps) 
+      z[] <- mean(z, na.rm=TRUE)
+    return(z)
+  }
+
+  relrisk.ppm
+})
+
diff --git a/R/replace.ppp.R b/R/replace.ppp.R
new file mode 100755
index 0000000..fd4bcdb
--- /dev/null
+++ b/R/replace.ppp.R
@@ -0,0 +1,74 @@
+#
+# replace.ppp.R
+#
+
+
+"[<-.ppp" <-
+  function(x, i, j, value) {
+    verifyclass(x, "ppp")
+    verifyclass(value, "ppp")
+    
+    if(missing(i) && missing(j))
+      return(value)
+
+    if(missing(i)) {
+      message("The use of argument j in [<-.ppp is deprecated; use argument i")
+      # invoke code below
+      x[j] <- value
+      return(x)
+    }
+
+    xmf <- markformat(x)
+    vmf <- markformat(value)
+    if(xmf != vmf) {
+      if(xmf == "none")
+        stop("Replacement points are marked, but x is not marked")
+      else if(vmf == "none")
+        stop("Replacement points have no marks, but x is marked")
+      else
+        stop("Format of marks in replacement is incompatible with original")
+    }
+    
+    if(inherits(i, "owin")) {
+      win <- i
+      vok <- inside.owin(value$x, value$y, win)
+      if(!all(vok)) {
+        warning("Replacement points outside the specified window were deleted")
+        value <- value[vok]
+      }
+      # convert to vector index
+      i <- inside.owin(x$x, x$y, win)
+    }
+    if(!is.vector(i))
+      stop("Unrecognised format for subset index i")
+    
+    # vector index
+    # determine index subset
+    n <- x$n
+    SUB <- seq_len(n)[i]
+    # anything to replace?
+    if(length(SUB) == 0)
+      return(x)
+    # sanity checks
+    if(anyNA(SUB))
+      stop("Invalid subset: the resulting subscripts include NAs")
+    # exact replacement of this subset?
+    if(value$n == length(SUB)) {
+      x$x[SUB] <- value$x
+      x$y[SUB] <- value$y
+      switch(xmf,
+             none={},
+             list=,
+             vector={ x$marks[SUB] <- value$marks },
+             dataframe={ x$marks[SUB,] <- value$marks })
+    } else 
+      x <- superimpose(x[-SUB], value, W=x$window)
+
+    if(!missing(j)) {
+      warning("The use of argument j in [<-.ppp is deprecated; use argument i")
+      # invoke code above
+      x[j] <- value
+    }
+      
+    return(x)
+}
diff --git a/R/rescale.R b/R/rescale.R
new file mode 100755
index 0000000..11298f3
--- /dev/null
+++ b/R/rescale.R
@@ -0,0 +1,65 @@
+#
+#
+#   rescale.R
+#
+#   $Revision: 1.6 $ $Date: 2014/10/24 00:22:30 $
+#
+#
+
+rescale <- function(X, s, unitname) {
+  UseMethod("rescale")
+}
+
+rescale.ppp <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier
+  Y <- affine.ppp(X, mat=diag(c(1/s,1/s)))
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+
+rescale.owin <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier
+  Y <- affine.owin(X, mat=diag(c(1/s,1/s)))
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+
+rescale.im <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier
+  Y <- X
+  Y$xrange <- X$xrange/s
+  Y$yrange <- X$yrange/s
+  Y$xstep  <- X$xstep/s
+  Y$ystep  <- X$ystep/s
+  Y$xcol   <- X$xcol/s
+  Y$yrow   <- X$yrow/s
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+
+rescale.psp <- function(X, s, unitname) {
+  if(missing(unitname)) unitname <- NULL
+  if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier
+  Y <- affine.psp(X, mat=diag(c(1/s,1/s)))
+  unitname(Y) <- rescale(unitname(X), s, unitname)
+  return(Y)
+}
+  
+rescale.units <- function(X, s, unitname) {
+  if(!missing(unitname) && !is.null(unitname)) return(as.units(unitname))
+  if(summary(X)$vanilla)
+    return(X)
+  if(missing(s)) {
+    X$multiplier <- 1
+  } else {
+    if(!is.numeric(s) || length(s) != 1 || s <= 0)
+      stop("s should be a positive number")
+    X$multiplier <- s * X$multiplier
+  }
+  return(X)
+}
+
+
diff --git a/R/rescue.rectangle.R b/R/rescue.rectangle.R
new file mode 100755
index 0000000..b4656d7
--- /dev/null
+++ b/R/rescue.rectangle.R
@@ -0,0 +1,33 @@
+#
+#    rescue.rectangle.R
+# 
+#    $Revision: 1.6 $   $Date: 2008/06/15 14:53:11 $
+#
+rescue.rectangle <- function(W) {
+  verifyclass(W, "owin")
+
+  if(W$type == "mask" && all(W$m))
+     return(owin(W$xrange, W$yrange, unitname=unitname(W)))
+
+  if(W$type == "polygonal" && length(W$bdry) == 1) {
+    x <- W$bdry[[1]]$x
+    y <- W$bdry[[1]]$y
+    if(length(x) == 4 && length(y) == 4) {
+      # could be a rectangle
+      veryunique <- function(z) {
+        uz <- sort(unique(z))
+        epsilon <- 2 * .Machine$double.eps * diff(range(uz))
+        close <- (diff(uz) <= epsilon)
+        uz <- uz[c(TRUE, !close)]
+        return(uz)
+      }
+      ux <- veryunique(x)
+      uy <- veryunique(y)
+      if(length(ux) == 2 && length(uy) == 2)
+        return(owin(ux,uy, unitname=unitname(W)))
+    }
+  }
+  
+  return(W)
+}
+
diff --git a/R/resid4plot.R b/R/resid4plot.R
new file mode 100755
index 0000000..7aeec94
--- /dev/null
+++ b/R/resid4plot.R
@@ -0,0 +1,693 @@
+#
+#
+#   Residual plots:
+#         resid4plot       four panels with matching coordinates
+#         resid1plot       one or more unrelated individual plots 
+#         resid1panel      one panel of resid1plot
+#
+#   $Revision: 1.34 $    $Date: 2016/04/19 00:11:51 $
+#
+#
+
+resid4plot <- local({
+
+  Contour <- function(...,
+                      pch, chars, cols, etch, size,
+                      maxsize, meansize, markscale, symap, zap,
+                      legend, leg.side, leg.args) {
+    ## avoid passing arguments of plot.ppp to contour.default
+    contour(...)
+  }
+
+  do.clean <- function(fun, ..., 
+                       pch, chars, cols, etch, size,
+                       maxsize, meansize, markscale, symap, zap,
+                       legend, leg.side, leg.args,
+                       nlevels, levels, labels, drawlabels, labcex) {
+    ## avoid passing arguments of plot.ppp, contour.default to other functions
+    do.call(fun, list(...)) 
+  }
+
+  do.lines <- function(x, y, defaulty=1, ...) {
+    do.call(lines,
+            resolve.defaults(list(x, y),
+                             list(...),
+                             list(lty=defaulty)))
+  }
+
+  resid4plot <-
+    function(RES,
+             plot.neg=c("image", "discrete", "contour", "imagecontour"),
+             plot.smooth=c("imagecontour", "image", "contour", "persp"),
+             spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL,
+             xlab="x coordinate", ylab="y coordinate", rlab,
+             col.neg=NULL, col.smooth=NULL,
+             ...)
+{
+  plot.neg <- match.arg(plot.neg)
+  if(missing(rlab)) rlab <- NULL
+  rlablines <- if(is.null(rlab)) 1 else sum(nzchar(rlab))
+  clip     <- RES$clip
+  Yclip    <- RES$Yclip
+  Z        <- RES$smooth$Z
+  W        <- RES$W
+  Wclip    <- Yclip$window
+  type     <- RES$type
+  typename <- RES$typename
+  Ydens    <- RES$Ydens[Wclip, drop=FALSE]
+  Ymass    <- RES$Ymass[Wclip]
+  # set up 2 x 2 plot with space
+  wide <- diff(W$xrange)
+  high <- diff(W$yrange)
+  space <- spacing * max(wide,high)
+  width <- wide + space + wide
+  height <- high + space + high
+  outerspace <- outer * space
+  outerRspace <- (outer - 1 + rlablines) * space
+  plot(c(0, width) + c(-outerRspace, outerspace),
+       c(0, height) + c(-outerspace, outerRspace),
+       type="n", asp=1.0, axes=FALSE, xlab="", ylab="")
+  # determine colour map for background
+  if(is.null(srange)) {
+    Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL
+    Zrange <- if(!is.null(Z)) summary(Z)$range else NULL
+    srange <- range(c(0, Yrange, Zrange), na.rm=TRUE)
+  } else {
+    stopifnot(is.numeric(srange) && length(srange) == 2)
+    stopifnot(all(is.finite(srange)))
+  }
+  backcols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome)
+  if(is.null(col.neg)) col.neg <- backcols
+  if(is.null(col.smooth)) col.smooth <- backcols
+  
+  # ------ plot residuals/marks (in top left panel) ------------
+  Xlowleft <- c(W$xrange[1],W$yrange[1])
+  vec <- c(0, high) + c(0, space) - Xlowleft
+  # shift the original window
+  Ws <- shift(W, vec)
+  # shift the residuals 
+  Ys <- shift(Yclip,vec)
+
+  # determine whether pre-plotting the window(s) is redundant
+  redundant <- 
+    (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask")
+
+  # pre-plot the window(s)
+  if(!redundant) {
+    if(!clip) 
+      do.clean(plot, Ys$window, add=TRUE, ...)
+    else
+      do.clean(ploterodewin, Ws, Ys$window, add=TRUE, ...)
+  }
+
+  ## adjust position of legend associated with eroded window
+  sep <- if(clip) Wclip$yrange[1] - W$yrange[1] else NULL
+  
+  ## decide whether mark scale should be shown
+  showscale <- (type != "raw")
+  
+  switch(plot.neg,
+         discrete={
+           neg <- (Ys$marks < 0)
+           ## plot negative masses of discretised measure as squares
+           if(any(c("maxsize","meansize","markscale") %in% names(list(...)))) {
+             plot(Ys[neg], add=TRUE, legend=FALSE, ...)
+           } else {
+             hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n)
+             plot(Ys[neg], add=TRUE, legend=FALSE, maxsize=hackmax, ...)
+           }
+           ## plot positive masses at atoms
+           plot(Ys[!neg], add=TRUE,
+                leg.side="left", leg.args=list(sep=sep),
+                show.all=TRUE, main="",
+                ...)
+         },
+         contour = {
+           Yds <- shift(Ydens, vec)
+           Yms <- shift(Ymass, vec)
+           Contour(Yds, add=TRUE, ...)
+           do.call(plot,
+                   resolve.defaults(list(x=Yms, add=TRUE),
+                                    list(...), 
+                                    list(use.marks=showscale,
+                                         leg.side="left", show.all=TRUE,
+                                         main="", leg.args=list(sep=sep))))
+         },
+         imagecontour=,
+         image={
+           Yds <- shift(Ydens, vec)
+           Yms <- shift(Ymass, vec)
+           if(redundant)
+             do.clean(ploterodeimage,
+                      Ws, Yds, rangeZ=srange, colsZ=col.neg,
+                      ...)
+           else if(type != "eem")
+             do.clean(image,
+                      Yds, add=TRUE, ribbon=FALSE,
+                      col=col.neg, zlim=srange,
+                      ...)
+           if(plot.neg == "imagecontour")
+             Contour(Yds, add=TRUE, ...)
+           ## plot positive masses at atoms
+           do.call(plot,
+                   resolve.defaults(list(x=Yms, add=TRUE),
+                                    list(...),
+                                    list(use.marks=showscale,
+                                         leg.side="left", show.all=TRUE,
+                                         main="", leg.args=list(sep=sep))))
+         }
+         )
+  # --------- plot smoothed surface (in bottom right panel) ------------
+  vec <- c(wide, 0) + c(space, 0) - Xlowleft
+  Zs <- shift.im(Z, vec)
+  switch(plot.smooth,
+         image={
+           do.clean(image,
+                    Zs, add=TRUE, col=col.smooth,
+                    zlim=srange, ribbon=FALSE,
+                    ...)
+         },
+         contour={
+           Contour(Zs, add=TRUE, ...)
+         },
+         persp={ warning("persp not available in 4-panel plot") },
+         imagecontour={
+             do.clean(image,
+                      Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE,
+                      ...)
+             Contour(Zs, add=TRUE, ...)
+           }
+         )
+  lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)])
+  # -------------- lurking variable plots -----------------------
+  # --------- lurking variable plot for x coordinate ------------------
+  #           (cumulative or marginal)
+  #           in bottom left panel
+  if(!is.null(RES$xmargin)) {
+    a <- RES$xmargin
+    observedV <-    a$xZ
+    observedX <-    a$x
+    theoreticalV <- a$ExZ
+    theoreticalX <- a$x
+    theoreticalSD <- theoreticalHI <- theoreticalLO <- NULL
+    if(is.null(rlab)) rlab <- paste("marginal of", typename)
+  } else if(!is.null(RES$xcumul)) {
+    a <- RES$xcumul
+    observedX <- a$empirical$covariate
+    observedV <- a$empirical$value
+    theoreticalX <- a$theoretical$covariate
+    theoreticalV <- a$theoretical$mean
+    theoreticalSD <- a$theoretical$sd
+    theoreticalHI <- a$theoretical$upper
+    theoreticalLO <- a$theoretical$lower
+    if(is.null(rlab)) rlab <- paste("cumulative sum of", typename)
+  }
+  # pretty axis marks
+  pX <- pretty(theoreticalX)
+  rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO)
+  if(!is.null(theoreticalSD))
+    rV <- range(rV,
+                theoreticalV+2*theoreticalSD,
+                theoreticalV-2*theoreticalSD)
+  pV <- pretty(rV)
+  # rescale smoothed values
+  rr <- range(c(0, observedV, theoreticalV, pV))
+  yscale <- function(y) { high * (y - rr[1])/diff(rr) }
+  xscale <- function(x) { x - W$xrange[1] }
+  if(!is.null(theoreticalHI)) 
+    do.call.matched(polygon,
+                    resolve.defaults(
+                      list(x=xscale(c(theoreticalX, rev(theoreticalX))),
+                           y=yscale(c(theoreticalHI, rev(theoreticalLO)))),
+                      list(...),
+                      list(col="grey", border=NA)))
+  do.clean(do.lines, xscale(observedX), yscale(observedV), 1, ...)
+  do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV), 2, ...)
+  if(!is.null(theoreticalSD)) {
+    do.clean(do.lines,
+             xscale(theoreticalX),
+             yscale(theoreticalV + 2 * theoreticalSD),
+             3, ...)
+    do.clean(do.lines,
+             xscale(theoreticalX),
+             yscale(theoreticalV - 2 * theoreticalSD),
+             3, ...)
+  }
+  axis(side=1, pos=0, at=xscale(pX), labels=pX)
+  text(xscale(mean(theoreticalX)), - outerspace, xlab)
+  axis(side=2, pos=0, at=yscale(pV), labels=pV)
+  text(-outerRspace, yscale(mean(pV)), rlab, srt=90)
+  
+  # --------- lurking variable plot for y coordinate ------------------
+  #           (cumulative or marginal)
+  #           in top right panel
+  if(!is.null(RES$ymargin)) {
+    a <- RES$ymargin
+    observedV <-    a$yZ
+    observedY <-    a$y
+    theoreticalV <- a$EyZ
+    theoreticalY <- a$y
+    theoreticalSD <- NULL
+    if(is.null(rlab)) rlab <- paste("marginal of", typename)
+  } else if(!is.null(RES$ycumul)) {
+    a <- RES$ycumul
+    observedV <- a$empirical$value
+    observedY <- a$empirical$covariate
+    theoreticalY <- a$theoretical$covariate
+    theoreticalV <- a$theoretical$mean
+    theoreticalSD <- a$theoretical$sd
+    theoreticalHI <- a$theoretical$upper
+    theoreticalLO <- a$theoretical$lower
+    if(is.null(rlab)) rlab <- paste("cumulative sum of", typename)
+  }
+  # pretty axis marks
+  pY <- pretty(theoreticalY)
+  rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO)
+  if(!is.null(theoreticalSD))
+    rV <- range(rV,
+                theoreticalV+2*theoreticalSD,
+                theoreticalV-2*theoreticalSD)
+  pV <- pretty(rV)
+  # rescale smoothed values
+  rr <- range(c(0, observedV, theoreticalV, pV))
+  yscale <- function(y) { y - W$yrange[1] + high + space}
+  xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) }
+  if(!is.null(theoreticalHI)) 
+    do.call.matched(polygon,
+                    resolve.defaults(
+                      list(x=xscale(c(theoreticalHI, rev(theoreticalLO))),
+                           y=yscale(c(theoreticalY,  rev(theoreticalY)))),
+                      list(...),
+                      list(col="grey", border=NA)))
+  do.clean(do.lines, xscale(observedV), yscale(observedY), 1, ...)
+  do.clean(do.lines, xscale(theoreticalV), yscale(theoreticalY), 2, ...)
+  if(!is.null(theoreticalSD)) {
+    do.clean(do.lines,
+             xscale(theoreticalV+2*theoreticalSD),
+             yscale(theoreticalY),
+             3, ...)
+    do.clean(do.lines,
+             xscale(theoreticalV-2*theoreticalSD),
+             yscale(theoreticalY),
+             3, ...)
+  }
+  axis(side=4, pos=width, at=yscale(pY), labels=pY)
+  text(width + outerspace, yscale(mean(theoreticalY)), ylab, srt=90)
+  axis(side=3, pos=height, at=xscale(pV), labels=pV)
+  text(xscale(mean(pV)), height + outerRspace, rlab)
+  #
+  if(!is.null(main))
+    title(main=main)
+  invisible(NULL)
+}
+
+  resid4plot
+})
+
+#
+#
+#   Residual plot: single panel(s)
+#
+#
+
+resid1plot <- local({
+
+  Contour <- function(...,
+                      pch, chars, cols, etch, size,
+                      maxsize, meansize, markscale, symap, zap,
+                      legend, leg.side, leg.args) {
+    ## avoid passing arguments of plot.ppp to contour.default
+    contour(...)
+  }
+
+  do.clean <- function(fun, ..., 
+                       pch, chars, cols, etch, size,
+                       maxsize, meansize, markscale, symap, zap,
+                       legend, leg.side, leg.args,
+                       nlevels, levels, labels, drawlabels, labcex) {
+    ## avoid passing arguments of plot.ppp, contour.default to other functions
+    do.call(fun, list(...)) 
+  }
+
+  resid1plot <- 
+  function(RES, opt,
+           plot.neg=c("image", "discrete", "contour", "imagecontour"),
+           plot.smooth=c("imagecontour", "image", "contour", "persp"),
+           srange=NULL, monochrome=FALSE, main=NULL,
+           add=FALSE, show.all=!add, do.plot=TRUE,
+           col.neg=NULL, col.smooth=NULL, 
+           ...) {
+    if(!any(unlist(opt[c("all", "marks", "smooth",
+                         "xmargin", "ymargin", "xcumul", "ycumul")])))
+      return(invisible(NULL))
+    if(!add && do.plot) {
+      ## determine size of plot area by calling again with do.plot=FALSE
+      cl <- match.call()
+      cl$do.plot <- FALSE
+      b <- eval(cl, parent.frame())
+      bb <- as.owin(b, fatal=FALSE)
+      if(is.owin(bb)) {
+        ## initialise plot area
+        plot(bb, type="n", main="")
+        force(show.all)
+        add <- TRUE
+      }
+    }
+    ## extract info
+    clip  <- RES$clip
+    Y     <- RES$Y
+    Yclip <- RES$Yclip
+    Z     <- RES$smooth$Z
+    W     <- RES$W
+    Wclip <- Yclip$window
+    type  <- RES$type
+    Ydens <- RES$Ydens[Wclip, drop=FALSE]
+    Ymass <- RES$Ymass[Wclip]
+    ## determine colour map
+    if(opt$all || opt$marks || opt$smooth) {
+      if(is.null(srange)) {
+        Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL
+        Zrange <- if(!is.null(Z)) summary(Z)$range else NULL
+        srange <- range(c(0, Yrange, Zrange), na.rm=TRUE)
+      }
+      backcols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome)
+      if(is.null(col.neg)) col.neg <- backcols
+      if(is.null(col.smooth)) col.smooth <- backcols
+    }
+    ## determine main heading
+    if(is.null(main)) {
+      prefix <- if(opt$marks) NULL else
+      if(opt$smooth) "Smoothed" else
+      if(opt$xcumul) "Lurking variable plot for x coordinate\n" else 
+      if(opt$ycumul) "Lurking variable plot for y coordinate\n" else
+      if(opt$xmargin) "Lurking variable plot for x coordinate\n" else
+      if(opt$ymargin) "Lurking variable plot for y coordinate\n" else NULL
+      main <- paste(prefix, RES$typename)
+    }
+    ## ------------- residuals ---------------------------------
+    if(opt$marks) {
+      ## determine whether pre-plotting the window(s) is redundant
+      redundant <- (plot.neg == "image") &&
+                   (type != "eem") && (Yclip$window$type == "mask")
+      ## pre-plot the window(s)
+      if(redundant && !add) {
+        z <- do.clean(plot,
+                      as.rectangle(W), box=FALSE, main="",
+                      do.plot=do.plot, ...)
+      } else {
+        if(!clip) 
+          z <- do.clean(plot,
+                        W, main="",
+                        add=add, show.all=show.all, do.plot=do.plot, ...)
+        else
+          z <- do.clean(ploterodewin,
+                        W, Wclip, main="",
+                        add=add, show.all=show.all, do.plot=do.plot, ...)
+      }
+      bb <- as.owin(z)
+
+      switch(plot.neg,
+             discrete={
+               neg <- (Y$marks < 0)
+               ## plot negative masses of discretised measure as squares
+               if(any(c("maxsize", "markscale") %in% names(list(...)))) {
+                 z <- plot(Y[neg], add=TRUE,
+                          show.all=show.all, do.plot=do.plot, ...)
+               } else {
+                 hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n)
+                 z <- plot(Y[neg], add=TRUE, maxsize=hackmax,
+                           show.all=show.all, do.plot=do.plot, ...)
+               }
+               ## plot positive masses at atoms
+               zp <- plot(Y[!neg], add=TRUE,
+                          show.all=show.all, do.plot=do.plot, ...)
+               bb <- boundingbox(bb, z, zp)
+           },
+           contour = {
+             z <- Contour(Ydens, add=TRUE, do.plot=do.plot, ...)
+             bb <- boundingbox(bb, z)
+           },
+           imagecontour=,
+           image={
+             if(redundant) {
+               z <- do.clean(ploterodeimage,
+                             W, Ydens, rangeZ=srange, colsZ=col.neg,
+                             add=add, show.all=show.all, main="", 
+                             do.plot=do.plot, ...)
+             } else if(type != "eem") {
+               z <- do.clean(image,
+                             Ydens, col=col.neg, zlim=srange, ribbon=FALSE,
+                             add=TRUE, show.all=show.all, do.plot=do.plot,
+                             main="", ...)
+             }
+             bb <- boundingbox(bb, z)
+             if(plot.neg == "imagecontour") {
+               z <- Contour(Ydens, add=TRUE,
+                            show.all=show.all, do.plot=do.plot, ...)
+               bb <- boundingbox(bb, z)
+             }
+             ## decide whether mark scale should be shown
+             showscale <- (type != "raw")
+             ## plot positive masses at atoms
+             z <- do.call(plot,
+                          resolve.defaults(list(x=Ymass, add=TRUE),
+                                           list(...),
+                                           list(use.marks=showscale,
+                                                do.plot=do.plot)))
+             bb <- boundingbox(bb, z)
+           }
+           )
+    if(do.plot && show.all) title(main=main)
+  }
+  # -------------  smooth -------------------------------------
+  if(opt$smooth) {
+    if(!clip) {
+      switch(plot.smooth,
+           image={
+             z <- do.clean(image,
+                           Z, main="", axes=FALSE, xlab="", ylab="",
+                           col=col.smooth, zlim=srange, ribbon=FALSE,
+                           do.plot=do.plot, add=add, show.all=show.all, ...)
+             bb <- as.owin(z)
+           },
+           contour={
+             z <- Contour(Z, main="", axes=FALSE, xlab="", ylab="",
+                          do.plot=do.plot, add=add, show.all=show.all, ...)
+             bb <- as.owin(z)
+           },
+           persp={
+             if(do.plot)
+               do.clean(persp,
+                        Z, main="", axes=FALSE, xlab="", ylab="", ...)
+             bb <- NULL
+           },
+           imagecontour={
+             z <- do.clean(image,
+                           Z, main="", axes=FALSE, xlab="", ylab="",
+                           col=col.smooth, zlim=srange, ribbon=FALSE,
+                           do.plot=do.plot, add=add, show.all=show.all, ...)
+             Contour(Z, add=TRUE, do.plot=do.plot, ...)
+             bb <- as.owin(z)
+           }
+             )
+      if(do.plot && show.all) title(main=main)             
+    } else {
+      switch(plot.smooth,
+             image={
+               do.clean(plot,
+                        as.rectangle(W), box=FALSE, main=main,
+                        do.plot=do.plot, add=add, ...)
+               z <- do.clean(ploterodeimage,
+                             W, Z, colsZ=col.smooth, rangeZ=srange,
+                             do.plot=do.plot, ...)
+               bb <- boundingbox(as.rectangle(W), z)
+             },
+             contour={
+               do.clean(plot,
+                        W, main=main,
+                        do.plot=do.plot, add=add, show.all=show.all, ...)
+               z <- Contour(Z, add=TRUE,
+                            show.all=show.all, do.plot=do.plot, ...)
+               bb <- as.owin(z)
+             },
+             persp={
+               if(do.plot) 
+                 do.clean(persp,
+                          Z, main=main, axes=FALSE, xlab="", ylab="", ...)
+               bb <- NULL
+             },
+             imagecontour={
+               do.clean(plot,
+                        as.rectangle(W), box=FALSE, main=main,
+                        do.plot=do.plot, add=add, ...)
+               z <- do.clean(ploterodeimage,
+                             W, Z, colsZ=col.smooth, rangeZ=srange,
+                             do.plot=do.plot, ...)
+               Contour(Z, add=TRUE, do.plot=do.plot, ...)
+               bb <- as.owin(z)
+             }
+             )
+    }
+  }
+
+  # ------------  cumulative x -----------------------------------------
+  if(opt$xcumul) {
+    a <- RES$xcumul
+    obs <- a$empirical
+    theo <- a$theoretical
+    do.clean(resid1panel,
+             obs$covariate, obs$value,
+             theo$covariate, theo$mean, theo$sd,
+             "x coordinate", "cumulative mark", main=main,
+             ...,
+             do.plot=do.plot)
+    bb <- NULL
+  }
+  
+  # ------------  cumulative y -----------------------------------------
+  if(opt$ycumul) {
+    a <- RES$ycumul
+    obs <- a$empirical
+    theo <- a$theoretical
+    do.clean(resid1panel,
+             obs$covariate, obs$value,
+             theo$covariate, theo$mean, theo$sd,
+             "y coordinate", "cumulative mark", main=main,
+             ...,
+             do.plot=do.plot)
+    bb <- NULL
+  }
+  ## ------------  x margin -----------------------------------------
+  if(opt$xmargin) {
+    a <- RES$xmargin
+    do.clean(resid1panel,
+             a$x, a$xZ, a$x, a$ExZ, NULL,
+             "x coordinate", "marginal of residuals", main=main,
+             ...,
+             do.plot=do.plot)
+    bb <- NULL
+  }
+  # ------------  y margin -----------------------------------------
+  if(opt$ymargin) {
+    a <- RES$ymargin
+    do.clean(resid1panel,
+             a$y, a$yZ, a$y, a$EyZ, NULL,
+             "y coordinate", "marginal of residuals", main=main,
+             ...,
+             do.plot=do.plot)
+    bb <- NULL
+  }
+
+  attr(bb, "bbox") <- bb  
+  return(invisible(bb))
+}
+
+resid1plot
+})
+
+
+resid1panel <- local({
+
+  do.lines <- function(x, y, defaulty=1, ...) {
+      do.call(lines,
+              resolve.defaults(list(x, y),
+                               list(...),
+                               list(lty=defaulty)))
+  }
+
+resid1panel <- function(observedX, observedV,
+                        theoreticalX, theoreticalV, theoreticalSD, xlab, ylab,
+                        ..., do.plot=TRUE)
+{
+  if(!do.plot) return(NULL)
+  ## work out plot range
+  rX <- range(observedX, theoreticalX)
+  rV <- range(c(0, observedV, theoreticalV))
+  if(!is.null(theoreticalSD))
+    rV <- range(c(rV, theoreticalV + 2*theoreticalSD,
+                  theoreticalV - 2*theoreticalSD))
+  ## argument handling
+  ## start plot
+  plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...)
+  do.lines(observedX, observedV, 1, ...)
+  do.lines(theoreticalX, theoreticalV, 2, ...)
+  if(!is.null(theoreticalSD)) {
+    do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...)
+    do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...)
+  }
+}
+
+resid1panel
+})
+
+#
+#
+ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0),
+                         do.plot=TRUE, ...) {
+  ## internal use only
+  ## W2 is assumed to be an erosion of W1
+  switch(W1$type,
+         rectangle={
+           z <- plot(W1, ..., do.plot=do.plot)
+           plot(W2, add=TRUE, lty=2, do.plot=do.plot)
+         },
+         polygonal={
+           z <- plot(W1, ..., do.plot=do.plot)
+           plot(W2, add=TRUE, lty=2, do.plot=do.plot)
+         },
+         mask={
+           Z <- as.im(W1)
+           x <- as.vector(rasterx.mask(W1))
+           y <- as.vector(rastery.mask(W1))
+           ok <- inside.owin(x, y, W2)
+           Z$v[ok] <- 2
+           z <- plot(Z, ..., col=c(col.edge, col.inside),
+                     add=TRUE, ribbon=FALSE, do.plot=do.plot)
+         }
+         )
+  return(z)
+}
+
+ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ,
+                           do.plot=TRUE) {
+  # Internal use only
+  # Image Z is assumed to live on a subset of mask W
+  # colsZ are the colours for the values in the range 'rangeZ'
+
+  if(!is.mask(W)) {
+    if(do.plot)
+      plot(W, add=TRUE)
+    W <- as.mask(W)
+  }
+  
+  # Extend the colour map to include an extra colour for pixels in W
+  # (1) Add the desired colour of W to the colour map
+  pseudocols <- c(Wcol, colsZ)
+  # (2) Breakpoints
+  bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1)
+  dZ <- diff(bks)[1]
+  pseudobreaks <- c(rangeZ[1] - dZ, bks)
+  # (3) Determine a fake value for pixels in W
+  Wvalue <- rangeZ[1] - dZ/2
+
+  # Create composite image on W grid
+  # (with W-pixels initialised to Wvalue)
+  X <- as.im(Wvalue, W)
+  # Look up Z-values of W-pixels
+  xx <- as.vector(rasterx.mask(W))
+  yy <- as.vector(rastery.mask(W))
+  Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE)
+  # Overwrite pixels in Z
+  inZ <- !is.na(Zvalues)
+  X$v[inZ] <- Zvalues[inZ]
+
+  z <- image(X, ..., add=TRUE, ribbon=FALSE, 
+             col=pseudocols, breaks=pseudobreaks,
+             do.plot=do.plot)
+  out <- list(X, pseudocols, pseudobreaks)
+  attr(out, "bbox") <- as.owin(z)
+  return(out)
+}
+
+
+  
diff --git a/R/residppm.R b/R/residppm.R
new file mode 100755
index 0000000..5db2603
--- /dev/null
+++ b/R/residppm.R
@@ -0,0 +1,119 @@
+#
+#  residppm.R
+#
+# computes residuals for fitted point process model
+#
+#
+# $Revision: 1.24 $ $Date: 2016/06/30 03:29:54 $
+#
+
+residuals.ppm <-
+  function(object, type="raw", ...,
+           check=TRUE, drop=FALSE,
+           fittedvalues = NULL,
+           new.coef=NULL, dropcoef=FALSE,
+           quad=NULL) {
+  
+  verifyclass(object, "ppm")
+  trap.extra.arguments(..., .Context="In residuals.ppm")
+
+  type <- pickoption("type", type,
+                     c(inverse="inverse",
+                       raw="raw",
+                       pearson="pearson",
+                       Pearson="pearson",
+                       score="score"))
+  typenames <- c(inverse="inverse-lambda residuals",
+                 raw="raw residuals",
+                 pearson="Pearson residuals",
+                 score="score residuals")
+  typename <- typenames[[type]]
+
+  given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues)
+
+  # ................. determine fitted values .................
+
+  NewCoef <- NULL
+  if(is.null(new.coef) && is.null(quad)) {
+    # use 'object' without modification
+    # validate 'object'
+    if(check && !given.fitted && damaged.ppm(object)) 
+      stop("object format corrupted; try update(object, use.internal=TRUE)")
+  } else {
+    # determine a new set of model coefficients
+    if(!is.null(new.coef)) {
+      # use specified model parameters
+      NewCoef <- new.coef
+    } else {
+      # estimate model parameters using a (presumably) denser set of dummy pts
+      # Determine new quadrature scheme
+      if(inherits(quad, "quad")) 
+        hi.res.quad <- quad
+      else if(is.ppp(quad))
+        hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad)
+      else {
+        # assume 'quad' is a list of arguments to 'quadscheme'
+        hi.res.quad <- do.call(quadscheme,
+                               append(list(data.ppm(object)),
+                                      quad))
+      }
+      # refit the model with new quadscheme
+      hi.res.fit <- update(object, hi.res.quad)
+      NewCoef <- coef(hi.res.fit)
+    }
+  }
+  #' now compute fitted values using new coefficients
+  if(!given.fitted) 
+    fittedvalues <- fitted(object, drop=drop, check=check,
+                           new.coef=NewCoef, dropcoef=dropcoef)
+
+  # ..................... compute residuals .....................
+
+  # Extract quadrature points and weights
+  Q <- quad.ppm(object, drop=drop, clip=drop)
+#  U <- union.quad(Q) # quadrature points
+  Z <- is.data(Q) # indicator data/dummy
+#  W <- w.quad(Q) # quadrature weights
+
+  # Compute fitted conditional intensity at quadrature points
+  lambda <- fittedvalues
+
+  # indicator is 1 if lambda > 0
+  # (adjusted for numerical behaviour of predict.glm)
+  indicator <- (lambda > .Machine$double.eps)
+
+  if(type == "score") {
+    # need the covariates
+    X <- model.matrix(object)
+    if(drop) {
+      gs <- getglmsubset(object)
+      ok <- !is.na(gs) & gs
+      X <- X[ok,]
+    }
+  }
+      
+  # Evaluate residual measure components
+
+  discrete <- switch(type,
+                     raw     = rep.int(1, sum(Z)), 
+                     inverse = 1/lambda[Z],
+                     pearson = 1/sqrt(lambda[Z]),
+                     score   = X[Z, ]
+                     )
+
+  density <- switch(type,
+                    raw     = -lambda,
+                    inverse = -indicator,
+                    pearson = -indicator * sqrt(lambda),
+                    score   = -lambda * X)
+
+  # Residual measure (return value)
+  res <- msr(Q, discrete, density)
+
+  # name the residuals
+  attr(res, "type") <- type
+  attr(res, "typename") <- typename
+
+  return(res)
+}
+
diff --git a/R/residuals.mppm.R b/R/residuals.mppm.R
new file mode 100755
index 0000000..1b98fba
--- /dev/null
+++ b/R/residuals.mppm.R
@@ -0,0 +1,90 @@
+#
+#  residuals.mppm.R
+#
+# computes residuals for fitted multiple point process model
+#
+#
+#  $Revision: 1.5 $ $Date: 2015/01/29 06:44:26 $
+#
+
+residuals.mppm <- function(object, type="raw", ..., 
+                          fittedvalues = fitted.mppm(object)) {
+  
+  verifyclass(object, "mppm")
+  userfitted <- !missing(fittedvalues)
+  type <- pickoption("type", type,
+                     c(inverse="inverse",
+                       raw="raw",
+                       pearson="pearson",
+                       Pearson="pearson"))
+  typenames <- c(inverse="inverse-lambda residuals",
+                 raw="raw residuals",
+                 pearson="Pearson residuals")
+  typename <- typenames[[type]]
+  
+  # Extract quadrature points and weights
+  Q <- quad.mppm(object)
+#  U <- lapply(Q, union.quad) # quadrature point patterns
+  Z <- unlist(lapply(Q, is.data)) # indicator data/dummy
+  W <- unlist(lapply(Q, w.quad)) # quadrature weights
+  # total number of quadrature points
+  nquadrature <- length(W)
+  # number of quadrature points in each pattern
+  nU <- unlist(lapply(Q, n.quad))
+  # number of rows of hyperframe
+  npat <- object$npat
+  # attribution of each quadrature point
+  id <- factor(rep(1:npat, nU), levels=1:npat)
+  
+  # Compute fitted conditional intensity at quadrature points
+
+  if(!is.list(fittedvalues) || length(fittedvalues) != npat)
+    stop(paste(sQuote("fittedvalues"), "should be a list of length",
+               npat, "containing vectors of fitted values"))
+  
+  lambda <- unlist(fittedvalues)
+
+  # consistency check
+  if(length(lambda) != nquadrature)
+    stop(paste(if(!userfitted) "internal error:" else NULL,
+               "number of fitted values", paren(length(lambda)),
+               "does not match number of quadrature points",
+               paren(nquadrature)))
+
+  # indicator is 1 if lambda > 0
+  # (adjusted for numerical behaviour of predict.glm)
+  indicator <- (lambda > .Machine$double.eps)
+
+  # Evaluate residual measure components
+  discrete <- ifelse(Z,
+                     switch(type,
+                            raw     = 1,
+                            inverse = 1/lambda,
+                            pearson = 1/sqrt(lambda)
+                            ),
+                     0)
+
+  density <- switch(type,
+                    raw     = -lambda,
+                    inverse = -indicator,
+                    pearson = -indicator * sqrt(lambda))
+
+  atoms <- as.logical(Z)
+  
+  # All components
+  resdf <- data.frame(discrete=discrete,
+                      density=density,
+                      atoms=atoms)
+
+  # Split residual data according to point pattern affiliation
+  splitres <- split(resdf, id)
+  # Associate with quadrature scheme
+  reshf <- hyperframe(R=splitres, Q=Q)
+  # Convert to signed measures
+  answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density))
+  # tag
+  answer <- lapply(answer, "attr<-", which="type", value=type)
+  answer <- lapply(answer, "attr<-", which="typename", value=typename)
+  return(as.solist(answer))
+}
+
diff --git a/R/rho2hat.R b/R/rho2hat.R
new file mode 100755
index 0000000..dc983f1
--- /dev/null
+++ b/R/rho2hat.R
@@ -0,0 +1,267 @@
+#
+#   rho2hat.R
+#
+#   Relative risk for pairs of covariate values
+#
+#   $Revision: 1.25 $   $Date: 2016/07/15 10:21:12 $
+#
+
+rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) {
+  cov1name <- short.deparse(substitute(cov1))
+  cov2name <- short.deparse(substitute(cov2))
+  callstring <- short.deparse(sys.call())
+  method <- match.arg(method)
+  # validate model
+  if(is.ppp(object) || inherits(object, "quad")) {
+    model <- ppm(object, ~1, forcefit=TRUE)
+    reference <- "area"
+    modelcall <- NULL
+  } else if(is.ppm(object)) {
+    model <- object
+    reference <- "model"
+    modelcall <- model$call
+    if(is.null(getglmfit(model)))
+      model <- update(model, forcefit=TRUE)
+  } else stop("object should be a point pattern or a point process model")
+
+  # interpret string "x" or "y" as a coordinate function
+  getxyfun <- function(s) {
+    switch(s,
+           x = { function(x,y) { x } },
+           y = { function(x,y) { y } },
+           stop(paste("Unrecognised covariate name", sQuote(s))))
+  }
+  if(is.character(cov1) && length(cov1) == 1) {
+    cov1name <- cov1
+    cov1 <- getxyfun(cov1name)
+  }
+  if(is.character(cov2) && length(cov2) == 1) {
+    cov2name <- cov2
+    cov2 <- getxyfun(cov2name)
+  }
+  if(   (cov1name == "x" && cov2name == "y")
+     || (cov1name == "y" && cov2name == "x")) {
+    # spatial relative risk
+    isxy <- TRUE
+    needflip <- (cov1name == "y" && cov2name == "x")
+    X <- data.ppm(model)
+    if(needflip) X <- flipxy(X)
+    
+    switch(method,
+           ratio = {
+             # ratio of smoothed intensity estimates
+             den <- density(X, ...)
+             sigma <- attr(den, "sigma")
+             varcov <- attr(den, "varcov")
+             W <- as.owin(den)
+             if(!needflip) {
+               lambda <- predict(model, locations=W)
+             } else {
+               lambda <- flipxy(predict(model, locations=flipxy(W)))
+             }
+             rslt <- switch(reference,
+                            area = { den },
+                            model = {
+                              lam <- blur(lambda, sigma=sigma, varcov=varcov,
+                                          normalise=TRUE)
+                              eval.im(den/lam)
+                            })
+           },
+           reweight = {
+             # smoothed point pattern with weights = 1/reference
+             W <- do.call.matched(as.mask,
+                                  append(list(w=as.owin(X)), list(...)))
+             if(!needflip) {
+               lambda <- predict(model, locations=W)
+             } else {
+               lambda <- flipxy(predict(model, locations=flipxy(W)))
+             }
+             gstarX <- switch(reference,
+                              area = {
+                                rep.int(area(W), npoints(X))
+                              },
+                              model = {
+                                lambda[X]
+                              })
+             rslt <- density(X, ..., weights=1/gstarX)
+             sigma <- attr(rslt, "sigma")
+             varcov <- attr(rslt, "varcov")
+           })
+    Z12points <- X
+    r1 <- W$xrange
+    r2 <- W$yrange
+    lambda <- lambda[]
+  } else {
+    # general case
+    isxy <- FALSE
+    # harmonise covariates 
+    if(is.function(cov1) && is.im(cov2)) {
+      cov1 <- as.im(cov1, W=cov2)
+    } else if(is.im(cov1) && is.function(cov2)) {
+      cov2 <- as.im(cov2, W=cov1)
+    }
+    # evaluate each covariate at data points and at pixels
+    stuff1 <- evalCovar(model, cov1)
+    stuff2 <- evalCovar(model, cov2)
+    # unpack
+    values1 <- stuff1$values
+    values2 <- stuff2$values
+    # covariate values at each data point
+    Z1X      <- values1$ZX
+    Z2X      <- values2$ZX
+    # covariate values at each pixel
+    Z1values <- values1$Zvalues
+    Z2values <- values2$Zvalues
+    # model intensity
+    lambda  <- values1$lambda
+    # ranges of each covariate
+    r1 <- range(Z1X, Z1values, finite=TRUE)
+    r2 <- range(Z2X, Z2values, finite=TRUE)
+    scal <- function(x, r) { (x - r[1])/diff(r) }
+    # scatterplot coordinates
+    Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1))
+    Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1))
+    # normalising constants
+#    nX   <- length(Z1X)
+    npixel <- length(lambda)
+    areaW <- area(Window(model))
+    pixelarea <- areaW/npixel
+    baseline <- if(reference == "area") rep.int(1, npixel) else lambda
+    wts <- baseline * pixelarea
+    switch(method,
+           ratio = {
+             # estimate intensities
+             fhat <- density(Z12points, ...)
+             sigma <- attr(fhat, "sigma")
+             varcov <- attr(fhat, "varcov")
+             ghat <- do.call(density.ppp,
+                             resolve.defaults(list(Z12pixels, weights=wts),
+                                              list(...),
+                                              list(sigma=sigma,
+                                                   varcov=varcov)))
+             # compute ratio of smoothed densities
+             rslt <- eval.im(fhat/ghat)
+           },
+           reweight = {
+             # compute smoothed intensity with weight = 1/reference
+             ghat <- density(Z12pixels, weights=wts, ...)
+             rslt <- density(Z12points, weights=1/ghat[Z12points], ...)
+             sigma <- attr(rslt, "sigma")
+             varcov <- attr(rslt, "varcov")
+           })
+  }
+  # add scale and label info
+  attr(rslt, "stuff") <- list(isxy=isxy,
+                              cov1=cov1,
+                              cov2=cov2,
+                              cov1name=cov1name,
+                              cov2name=cov2name,
+                              r1=r1,
+                              r2=r2,
+                              reference=reference,
+                              lambda=lambda,
+                              modelcall=modelcall,
+                              callstring=callstring,
+                              Z12points=Z12points,
+                              sigma=sigma,
+                              varcov=varcov)
+  class(rslt) <- c("rho2hat", class(rslt))
+  rslt
+}
+
+plot.rho2hat <- function(x, ..., do.points=FALSE) {
+  xname <- short.deparse(substitute(x))
+  s <- attr(x, "stuff")
+  # resolve "..." arguments
+  rd <- resolve.defaults(list(...),
+                         list(add=FALSE, axes=!s$isxy,
+                              xlab=s$cov1name, ylab=s$cov2name))
+  # plot image
+  plotparams <- graphicsPars("plot")
+  do.call.matched(plot.im,
+                  resolve.defaults(list(x=x, axes=FALSE),
+                                   list(...),
+                                   list(main=xname, ribargs=list(axes=TRUE))),
+                  extrargs=c(plotparams, "add", "zlim", "breaks"))
+  # add axes 
+  if(rd$axes) {
+    axisparams <- graphicsPars("axis")
+    Axis <- function(..., extrargs=axisparams) {
+      do.call.matched(graphics::axis,
+                      resolve.defaults(list(...)), extrargs=extrargs)
+    }
+    if(s$isxy) {
+      # for (x,y) plots the image is at the correct physical scale
+      xr <- x$xrange
+      yr <- x$yrange
+      spak <- 0.05 * max(diff(xr), diff(yr))
+      Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak)
+      Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak)
+    } else {
+      # for other plots the image was scaled to the unit square
+      rx <- s$r1
+      ry <- s$r2
+      px <- pretty(rx)
+      py <- pretty(ry)
+      Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...)
+      Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...)
+    }
+    title(xlab=rd$xlab)
+    title(ylab=rd$ylab)
+  }
+  if(do.points) {
+    do.call.matched(plot.ppp,
+                    resolve.defaults(list(x=s$Z12points, add=TRUE),
+                                     list(...)),
+                    extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty"))
+  }
+  invisible(NULL)
+}
+
+print.rho2hat <- function(x, ...) {
+  s <- attr(x, "stuff")
+  cat("Scatterplot intensity estimate (class rho2hat)\n")
+  cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n"))
+  switch(s$reference,
+         area=cat("Function values are absolute intensities\n"),
+         model={
+           cat("Function values are relative to fitted model\n")
+           print(s$modelcall)
+         })
+  cat(paste("Call:", s$callstring, "\n"))
+  if(s$isxy) {
+    cat("Obtained by spatial smoothing of original data\n")
+    cat("Smoothing parameters used by density.ppp:\n")
+  } else {
+    cat("Obtained by transforming to the unit square and smoothing\n")
+    cat("Smoothing parameters (on unit square) used by density.ppp:\n")
+  }
+  if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n"))
+  if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) }
+  cat("Intensity values:\n")
+  NextMethod("print")
+}
+
+predict.rho2hat <- function(object, ..., relative=FALSE) {
+  if(length(list(...)) > 0)
+    warning("Additional arguments ignored in predict.rho2hat")
+  # extract info
+  s <- attr(object, "stuff")
+  reference <- s$reference
+  # extract images of covariate, scaled to [0,1]
+  Z1 <- scaletointerval(s$cov1, xrange=s$r1)
+  Z2 <- scaletointerval(s$cov2, xrange=s$r2)
+  # extract pairs of covariate values
+  ZZ <- pairs(Z1, Z2, plot=FALSE)
+  # apply rho to Z
+  YY <- safelookup(object, ppp(ZZ[,1], ZZ[,2], c(0,1), c(0,1)), warn=FALSE)
+  # reform as image
+  Y <- Z1
+  Y[] <- YY
+  # adjust to reference baseline
+  if(!(relative || reference == "area")) {
+    lambda <- s$lambda
+    Y <- Y * lambda
+  }
+  return(Y)
+}
diff --git a/R/rhohat.R b/R/rhohat.R
new file mode 100755
index 0000000..e4ab63d
--- /dev/null
+++ b/R/rhohat.R
@@ -0,0 +1,623 @@
+#
+#  rhohat.R
+#
+#  $Revision: 1.71 $  $Date: 2017/06/05 10:31:58 $
+#
+#  Non-parametric estimation of a transformation rho(z) determining
+#  the intensity function lambda(u) of a point process in terms of a
+#  spatial covariate Z(u) through lambda(u) = rho(Z(u)).
+#  More generally allows offsets etc.
+
+rhohat <- function(object, covariate, ...) {
+  UseMethod("rhohat")
+}
+
+rhohat.ppp <- rhohat.quad <- 
+  function(object, covariate, ...,
+           baseline=NULL, weights=NULL,
+           method=c("ratio", "reweight", "transform"),
+           horvitz=FALSE,
+           smoother=c("kernel", "local"),
+           dimyx=NULL, eps=NULL,
+           n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, 
+           bwref=bw, covname, confidence=0.95) {
+  callstring <- short.deparse(sys.call())
+  smoother <- match.arg(smoother)
+  method <- match.arg(method)
+  if(missing(covname)) 
+    covname <- sensiblevarname(short.deparse(substitute(covariate)), "X")
+  if(is.null(adjust))
+    adjust <- 1
+  # validate model
+  if(is.null(baseline)) {
+    model <- ppm(object ~1)
+    reference <- "Lebesgue"
+  } else {
+    model <- ppm(object ~ offset(log(baseline)))
+    reference <- "baseline"
+  } 
+  modelcall <- NULL
+
+  if(is.character(covariate) && length(covariate) == 1) {
+    covname <- covariate
+    switch(covname,
+           x={
+             covariate <- function(x,y) { x }
+           }, 
+           y={
+             covariate <- function(x,y) { y }
+           },
+           stop("Unrecognised covariate name")
+         )
+    covunits <- unitname(data.ppm(model))
+  } else {
+    covunits <- NULL
+  }
+
+  areaW <- area(Window(data.ppm(model)))
+  
+  rhohatEngine(model, covariate, reference, areaW, ...,
+               weights=weights,
+               method=method,
+               horvitz=horvitz,
+               smoother=smoother,
+               resolution=list(dimyx=dimyx, eps=eps),
+               n=n, bw=bw, adjust=adjust, from=from, to=to,
+               bwref=bwref, covname=covname, covunits=covunits,
+               confidence=confidence,
+               modelcall=modelcall, callstring=callstring)
+}
+
+rhohat.ppm <- function(object, covariate, ...,
+                       weights=NULL,
+                       method=c("ratio", "reweight", "transform"),
+                       horvitz=FALSE,
+                       smoother=c("kernel", "local"),
+                       dimyx=NULL, eps=NULL,
+                       n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, 
+                       bwref=bw, covname, confidence=0.95) {
+  callstring <- short.deparse(sys.call())
+  smoother <- match.arg(smoother)
+  method <- match.arg(method)
+  if(missing(covname)) 
+    covname <- sensiblevarname(short.deparse(substitute(covariate)), "X")
+  if(is.null(adjust))
+    adjust <- 1
+
+  if("baseline" %in% names(list(...)))
+    warning("Argument 'baseline' ignored: not available for rhohat.ppm")
+
+  ## validate model
+  model <- object
+  reference <- "model"
+  modelcall <- model$call
+
+  if(is.character(covariate) && length(covariate) == 1) {
+    covname <- covariate
+    switch(covname,
+           x={
+             covariate <- function(x,y) { x }
+           }, 
+           y={
+             covariate <- function(x,y) { y }
+           },
+           stop("Unrecognised covariate name")
+         )
+    covunits <- unitname(data.ppm(model))
+  } else {
+    covunits <- NULL
+  }
+
+  areaW <- area(Window(data.ppm(model)))
+  
+  rhohatEngine(model, covariate, reference, areaW, ...,
+               weights=weights,
+               method=method,
+               horvitz=horvitz,
+               smoother=smoother,
+               resolution=list(dimyx=dimyx, eps=eps),
+               n=n, bw=bw, adjust=adjust, from=from, to=to,
+               bwref=bwref, covname=covname, covunits=covunits,
+               confidence=confidence,
+               modelcall=modelcall, callstring=callstring)
+}
+
+rhohat.lpp <- rhohat.lppm <- 
+  function(object, covariate, ...,
+           weights=NULL,
+           method=c("ratio", "reweight", "transform"),
+           horvitz=FALSE,
+           smoother=c("kernel", "local"),
+           nd=1000, eps=NULL, random=TRUE, 
+           n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, 
+           bwref=bw, covname, confidence=0.95) {
+  callstring <- short.deparse(sys.call())
+  smoother <- match.arg(smoother)
+  method <- match.arg(method)
+  if(missing(covname)) 
+    covname <- sensiblevarname(short.deparse(substitute(covariate)), "X")
+  if(is.null(adjust))
+    adjust <- 1
+  # validate model
+  if(is.lpp(object)) {
+    X <- object
+    model <- lppm(object, ~1, eps=eps, nd=nd, random=random)
+    reference <- "Lebesgue"
+    modelcall <- NULL
+  } else if(inherits(object, "lppm")) {
+    model <- object
+    X <- model$X
+    reference <- "model"
+    modelcall <- model$call
+  } else stop("object should be of class lpp or lppm")
+  
+  if("baseline" %in% names(list(...)))
+    warning("Argument 'baseline' ignored: not available for ",
+            if(is.lpp(object)) "rhohat.lpp" else "rhohat.lppm")
+
+  if(is.character(covariate) && length(covariate) == 1) {
+    covname <- covariate
+    switch(covname,
+           x={
+             covariate <- function(x,y) { x }
+           }, 
+           y={
+             covariate <- function(x,y) { y }
+           },
+           stop("Unrecognised covariate name")
+         )
+    covunits <- unitname(X)
+  } else {
+    covunits <- NULL
+  }
+
+  totlen <- sum(lengths.psp(as.psp(as.linnet(X))))
+  
+  rhohatEngine(model, covariate, reference, totlen, ...,
+               weights=weights,
+               method=method,
+               horvitz=horvitz,
+               smoother=smoother,
+               resolution=list(nd=nd, eps=eps, random=random),
+               n=n, bw=bw, adjust=adjust, from=from, to=to,
+               bwref=bwref, covname=covname, covunits=covunits,
+               confidence=confidence,
+               modelcall=modelcall, callstring=callstring)
+}
+
+rhohatEngine <- function(model, covariate,
+                         reference=c("Lebesgue", "model", "baseline"),
+                         volume,
+                         ...,
+                         weights=NULL,
+                         method=c("ratio", "reweight", "transform"),
+                         horvitz=FALSE,
+                         smoother=c("kernel", "local"),
+                         resolution=list(), 
+                         n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, 
+                         bwref=bw, covname, covunits=NULL, confidence=0.95,
+                         modelcall=NULL, callstring="rhohat") {
+  reference <- match.arg(reference)
+  # evaluate the covariate at data points and at pixels
+  stuff <- do.call(evalCovar,
+                   append(list(model, covariate), resolution))
+  # unpack
+#  info   <- stuff$info
+  values <- stuff$values
+  # values at each data point
+  ZX      <- values$ZX
+  lambdaX <- if(horvitz) fitted(model, dataonly=TRUE) else NULL
+  # values at each pixel
+  Zimage  <- values$Zimage
+  Zvalues <- values$Zvalues
+  lambda  <- values$lambda
+  ## weights
+  if(!is.null(weights)) {
+    X <- data.ppm(model)
+    if(is.im(weights)) 
+      weights <- safelookup(weights, X)
+    else if(is.function(weights))
+      weights <- weights(X$x, X$y)
+    else if(is.numeric(weights) && is.vector(as.numeric(weights))) 
+      check.nvector(weights, npoints(X))
+    else stop(paste(sQuote("weights"),
+                    "should be a vector, a pixel image, or a function"))
+  }
+  # normalising constants
+  denom <- volume * (if(reference == "Lebesgue" || horvitz) 1 else mean(lambda))
+  # info 
+  savestuff <- list(reference  = reference,
+                    horvitz    = horvitz,
+                    Zimage     = Zimage)
+  # calculate rho-hat
+  result <- rhohatCalc(ZX, Zvalues, lambda, denom,
+                       ...,
+                       weights=weights,
+                       lambdaX=lambdaX,
+                       method=method,
+                       horvitz=horvitz,
+                       smoother=smoother,
+                       n=n, bw=bw, adjust=adjust, from=from, to=to,
+                       bwref=bwref, covname=covname, confidence=confidence,
+                       covunits=covunits,
+                       modelcall=modelcall, callstring=callstring,
+                       savestuff=savestuff)
+  return(result)
+}
+
+
+# basic calculation of rhohat from covariate values
+
+rhohatCalc <- local({
+  
+  interpolate <- function(x,y) {
+    if(inherits(x, "density") && missing(y))
+      approxfun(x$x, x$y, rule=2)
+    else 
+      approxfun(x, y, rule=2)
+  }
+
+  ## note: this function normalises the weights, like density.default
+  LocfitRaw <- function(x, ..., weights=NULL) {
+    if(is.null(weights)) weights <- 1
+    requireNamespace("locfit", quietly=TRUE)
+    do.call.matched(locfit::locfit.raw,
+                    append(list(x=x, weights=weights), list(...)))
+  }
+
+  varlog <- function(obj,xx) {
+    ## variance of log f-hat
+    stopifnot(inherits(obj, "locfit"))
+    if(!identical(obj$trans, exp))
+      stop("internal error: locfit object does not have log link")
+    ## the following call should have band="local" but that produces NaN's
+    pred <- predict(obj, newdata=xx,
+                    se.fit=TRUE, what="coef")
+    se <- pred$se.fit
+    return(se^2)
+  }
+
+  rhohatCalc <- function(ZX, Zvalues, lambda, denom, ...,
+                         weights=NULL, lambdaX=NULL,
+                         method=c("ratio", "reweight", "transform"),
+                         horvitz=FALSE, 
+                         smoother=c("kernel", "local"),
+                         n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, 
+                         bwref=bw, covname, confidence=0.95,
+                         covunits = NULL, modelcall=NULL, callstring=NULL,
+                         savestuff=list()) {
+    method <- match.arg(method)
+    smoother <- match.arg(smoother)
+    ## check availability of locfit package
+    if(smoother == "local" && !requireNamespace("locfit", quietly=TRUE)) {
+      warning(paste("In", paste(dQuote(callstring), ":", sep=""),
+                    "package", sQuote("locfit"), "is not available;",
+                    "unable to perform local likelihood smoothing;",
+                    "using kernel smoothing instead"),
+              call.=FALSE)
+      smoother <- "kernel"
+    }
+    ## validate
+    stopifnot(is.numeric(ZX))
+    stopifnot(is.numeric(Zvalues))
+    stopifnot(is.numeric(lambda))
+    stopifnot(length(lambda) == length(Zvalues))
+    stopifnot(all(is.finite(lambda))) 
+    check.1.real(denom)
+    ## 
+    if(horvitz) {
+      ## data points will be weighted by reciprocal of model intensity
+      weights <- (weights %orifnull% 1)/lambdaX
+    }
+    ## normalising constants
+    nX   <- if(is.null(weights)) length(ZX) else sum(weights)
+    kappahat <- nX/denom
+    ## limits
+    Zrange <- range(ZX, Zvalues)
+    if(is.null(from)) from <- Zrange[1] 
+    if(is.null(to))   to   <- Zrange[2]
+    if(from > Zrange[1] || to < Zrange[2])
+      stop("Interval [from, to] = ", prange(c(from,to)), 
+           "does not contain the range of data values =", prange(Zrange))
+    ## critical constant for CI's
+    crit <- qnorm((1+confidence)/2)
+    percentage <- paste(round(100 * confidence), "%%", sep="")
+    CIblurb <- paste("pointwise", percentage, "confidence interval")
+    ## estimate densities   
+    if(smoother == "kernel") {
+      ## ............... kernel smoothing ......................
+      ## reference density (normalised) for calculation
+      ghat <- density(Zvalues,weights=if(horvitz) NULL else lambda/sum(lambda),
+                      bw=bwref,adjust=adjust,n=n,from=from,to=to, ...)
+      xxx <- ghat$x
+      ghatfun <- interpolate(ghat)
+      ## relative density
+      switch(method,
+             ratio={
+               ## compute ratio of smoothed densities
+               fhat <- unnormdensity(ZX,weights=weights,
+                                     bw=bw,adjust=adjust,
+                                     n=n,from=from, to=to, ...)
+               fhatfun <- interpolate(fhat)
+               Ghat.xxx <- denom * ghatfun(xxx)
+               yyy <- fhatfun(xxx)/Ghat.xxx
+               ## compute variance approximation
+               sigma <- fhat$bw
+               weights2 <- if(is.null(weights)) NULL else weights^2
+               fstar <- unnormdensity(ZX,weights=weights2,
+                                      bw=bw,adjust=adjust/sqrt(2),
+                                      n=n,from=from, to=to, ...)
+               fstarfun <- interpolate(fstar)
+               const <- 1/(2 * sigma * sqrt(pi))
+               vvv  <- const * fstarfun(xxx)/Ghat.xxx^2
+             },
+             reweight={
+               ## weight Z values by reciprocal of reference
+               wt <- (weights %orifnull% 1)/(denom * ghatfun(ZX))
+               rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust,
+                                     n=n,from=from, to=to, ...)
+               rhatfun <- interpolate(rhat)
+               yyy <- rhatfun(xxx)
+               ## compute variance approximation
+               sigma <- rhat$bw
+               rongstar <- unnormdensity(ZX, weights=wt^2,
+                                         bw=bw,adjust=adjust/sqrt(2),
+                                         n=n,from=from, to=to, ...)
+               rongstarfun <- interpolate(rongstar)
+               const <- 1/(2 * sigma * sqrt(pi))
+               vvv  <- const * rongstarfun(xxx)
+             },
+             transform={
+               ## probability integral transform
+               Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y))
+               GZX <- Gfun(ZX)
+               ## smooth density on [0,1]
+               qhat <- unnormdensity(GZX,weights=weights,
+                                     bw=bw,adjust=adjust,
+                                     n=n, from=0, to=1, ...)
+               qhatfun <- interpolate(qhat)
+               ## edge effect correction
+               one <- density(seq(from=0,to=1,length.out=512),
+                              bw=qhat$bw, adjust=1,
+                              n=n,from=0, to=1, ...)
+               onefun <- interpolate(one)
+               ## apply to transformed values
+               Gxxx <- Gfun(xxx)
+               Dxxx <- denom * onefun(Gxxx)
+               yyy <- qhatfun(Gxxx)/Dxxx
+               ## compute variance approximation
+               sigma <- qhat$bw
+               weights2 <- if(is.null(weights)) NULL else weights^2
+               qstar <- unnormdensity(GZX,weights=weights2,
+                                      bw=bw,adjust=adjust/sqrt(2),
+                                      n=n,from=0, to=1, ...)
+               qstarfun <- interpolate(qstar)
+               const <- 1/(2 * sigma * sqrt(pi))
+               vvv  <- const * qstarfun(Gxxx)/Dxxx^2
+             })
+      vvvname <- "Variance of estimator"
+      vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="")
+      sd <- sqrt(vvv)
+      hi <- yyy + crit * sd
+      lo <- yyy - crit * sd
+    } else {
+      ## .................. local likelihood smoothing .......................
+      xlim <- c(from, to)
+      xxx <- seq(from, to, length=n)
+      ## reference density
+      ghat <- LocfitRaw(Zvalues,
+                        weights=if(horvitz) NULL else lambda,
+                        xlim=xlim, ...)
+      ggg <- predict(ghat, xxx)
+      ## relative density
+      switch(method,
+             ratio={
+               ## compute ratio of smoothed densities
+               fhat <- LocfitRaw(ZX, weights=weights, xlim=xlim, ...)
+               fff <- predict(fhat, xxx)
+               yyy <- kappahat * fff/ggg
+               ## compute approximation to variance of log rho-hat
+               varlogN <- 1/nX
+               vvv <- varlog(fhat, xxx) + varlogN
+             },
+             reweight={
+               ## weight Z values by reciprocal of reference
+               wt <- (weights %orifnull% 1)/(denom * predict(ghat,ZX))
+               sumwt <- sum(wt)
+               rhat <- LocfitRaw(ZX, weights=wt, xlim=xlim, ...)
+               rrr <- predict(rhat, xxx)
+               yyy <- sumwt * rrr
+               ## compute approximation to variance of log rho-hat
+               varsumwt <- mean(yyy /(denom * ggg)) * diff(xlim)
+               varlogsumwt <- varsumwt/sumwt^2
+               vvv <- varlog(rhat, xxx) + varlogsumwt
+             },
+             transform={
+               ## probability integral transform
+               Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2)
+               GZX <- Gfun(ZX)
+               ## smooth density on [0,1], end effect corrected
+               qhat <- LocfitRaw(GZX, weights=weights, xlim=c(0,1), ...)
+               ## apply to transformed values
+               Gxxx <- Gfun(xxx)
+               qqq <- predict(qhat, Gxxx)
+               yyy <- kappahat * qqq
+               ## compute approximation to variance of log rho-hat
+               varlogN <- 1/nX
+               vvv <- varlog(qhat, Gxxx) + varlogN
+             })
+      vvvname <- "Variance of log of estimator"
+      vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="")
+      sss <- exp(crit * sqrt(vvv))
+      hi <- yyy * sss
+      lo <- yyy / sss
+    }
+    ## pack into fv object
+    df <- data.frame(xxx=xxx, rho=yyy, var=vvv, hi=hi, lo=lo)
+    names(df)[1] <- covname
+    desc <- c(paste("covariate", covname),
+              "Estimated intensity",
+              vvvname,
+              paste("Upper limit of", CIblurb),
+              paste("Lower limit of", CIblurb))
+    rslt <- fv(df,
+               argu=covname,
+               ylab=substitute(rho(X), list(X=as.name(covname))),
+               valu="rho",
+               fmla= as.formula(paste(". ~ ", covname)),
+               alim=range(ZX),
+               labl=c(covname,
+                 paste("hat(%s)", paren(covname), sep=""),
+                 vvvlabel,
+                 paste("%s[hi]", paren(covname), sep=""),
+                 paste("%s[lo]", paren(covname), sep="")),
+               desc=desc,
+               unitname=covunits,
+               fname="rho",
+               yexp=substitute(rho(X), list(X=as.name(covname))))
+    fvnames(rslt, ".")  <- c("rho", "hi", "lo")
+    fvnames(rslt, ".s") <- c("hi", "lo")
+    ## pack up
+    class(rslt) <- c("rhohat", class(rslt))
+    ## add info
+    stuff <- 
+      list(modelcall  = modelcall, 
+           callstring = callstring,
+           sigma      = switch(smoother, kernel=sigma, local=NULL),
+           covname    = paste(covname, collapse=""),
+           ZX         = ZX,
+           lambda     = lambda,
+           method     = method,
+           smoother   = smoother)
+    attr(rslt, "stuff") <- append(stuff, savestuff)
+    return(rslt)
+  }
+  
+  rhohatCalc
+})
+
+## ........... end of 'rhohatCalc' .................................
+
+
+print.rhohat <- function(x, ...) {
+  s <- attr(x, "stuff")
+  splat("Intensity function estimate (class rhohat)",
+        "for the covariate", s$covname)
+  switch(s$reference,
+         Lebesgue=splat("Function values are absolute intensities"),
+         baseline=splat("Function values are relative to baseline"),
+         model={
+           splat("Function values are relative to fitted model")
+           print(s$modelcall)
+         })
+  cat("Estimation method: ")
+  switch(s$method,
+         ratio={
+           splat("ratio of fixed-bandwidth kernel smoothers")
+         },
+         reweight={
+           splat("fixed-bandwidth kernel smoother of weighted data")
+         },
+         transform={
+           splat("probability integral transform,",
+                 "edge-corrected fixed bandwidth kernel smoothing",
+                 "on [0,1]")
+         },
+         cat("UNKNOWN\n"))
+  if(identical(s$horvitz, TRUE))
+    splat("\twith Horvitz-Thompson weight")
+  cat("Smoother: ")
+  switch(s$smoother,
+         kernel={
+           splat("Kernel density estimator")
+           splat("Actual smoothing bandwidth sigma = ",
+                 signif(s$sigma,5))
+         },
+         local ={ splat("Local likelihood density estimator") }
+         )
+  splat("Call:", s$callstring)
+
+  NextMethod("print")
+}
+
+plot.rhohat <- function(x, ..., do.rug=TRUE) {
+  xname <- short.deparse(substitute(x))
+  s <- attr(x, "stuff")
+  covname <- s$covname
+  asked.rug <- !missing(do.rug) && identical(rug, TRUE)
+  out <- do.call(plot.fv,
+                 resolve.defaults(list(x=x), list(...),
+                                  list(main=xname, shade=c("hi", "lo"))))
+  if(identical(list(...)$limitsonly, TRUE))
+    return(out)
+  if(do.rug) {
+    rugx <- ZX <- s$ZX
+    # check whether it's the default plot
+    argh <- list(...)
+    isfo <- unlist(lapply(argh, inherits, what="formula"))
+    if(any(isfo)) {
+      # a plot formula was given; inspect RHS
+      fmla <- argh[[min(which(isfo))]]
+      rhs <- rhs.of.formula(fmla)
+      vars <- variablesinformula(rhs)
+      vars <- vars[vars %in% c(colnames(x), ".x", ".y")]
+      if(length(vars) == 1 && vars %in% c(covname, ".x")) {
+        # expression in terms of covariate
+        rhstr <- as.character(rhs)[2]
+        dat <- list(ZX)
+        names(dat) <- vars[1]
+        rugx <- as.numeric(eval(parse(text=rhstr), dat))
+      } else {
+        if(asked.rug) warning("Unable to add rug plot")
+        rugx <- NULL
+      }
+    } 
+    if(!is.null(rugx)) {
+      # restrict to x limits, if given
+      if(!is.null(xlim <- list(...)$xlim))
+        rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]]
+      # finally plot the rug
+      if(length(rugx) > 0)
+        rug(rugx)
+    }
+  }
+  invisible(NULL)
+}
+
+predict.rhohat <- function(object, ..., relative=FALSE,
+                    what=c("rho", "lo", "hi", "se")) {
+  trap.extra.arguments(..., .Context="in predict.rhohat")
+  what <- match.arg(what)
+  # extract info
+  s <- attr(object, "stuff")
+  reference <- s$reference
+  # convert to (linearly interpolated) function 
+  x <- with(object, .x)
+  y <- if(what == "se") sqrt(object[["var"]]) else object[[what]]
+  fun <- approxfun(x, y, rule=2)
+  # extract image of covariate
+  Z <- s$Zimage
+  # apply fun to Z
+  Y <- if(inherits(Z, "linim")) eval.linim(fun(Z)) else eval.im(fun(Z))
+  # adjust to reference baseline
+  if(reference != "Lebesgue" && !relative) {
+    lambda <- s$lambda
+    Y[] <- Y[] * lambda
+  }
+  return(Y)
+}
+
+as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) {
+  NextMethod("as.function")
+}
+
+simulate.rhohat <- function(object, nsim=1, ..., drop=TRUE) {
+  trap.extra.arguments(..., .Context="in simulate.rhohat")
+  lambda <- predict(object)
+  if(inherits(lambda, "linim")) {
+    result <- rpoislpp(lambda, nsim=nsim, drop=drop)
+  } else {
+    result <- rpoispp(lambda, nsim=nsim, drop=drop)
+  }
+  return(result)
+}
diff --git a/R/ripras.R b/R/ripras.R
new file mode 100755
index 0000000..8da44e5
--- /dev/null
+++ b/R/ripras.R
@@ -0,0 +1,62 @@
+#
+#	ripras.S	Ripley-Rasson estimator of domain
+#
+#
+#	$Revision: 1.14 $	$Date: 2014/10/24 00:22:30 $
+#
+#
+#
+#
+#-------------------------------------
+bounding.box.xy <- function(x, y=NULL) {
+  xy <- xy.coords(x,y)
+  if(length(xy$x) == 0)
+    return(NULL)
+  owin(range(xy$x), range(xy$y), check=FALSE)
+}
+
+convexhull.xy <- function(x, y=NULL) {
+  xy <- xy.coords(x, y)
+  x <- xy$x
+  y <- xy$y
+  if(length(x) < 3)
+    return(NULL)
+  h <- rev(chull(x, y))  # must be anticlockwise
+  if(length(h) < 3)
+    return(NULL)
+  w <- owin(poly=list(x=x[h], y=y[h]), check=FALSE)
+  return(w)
+}
+
+ripras <- function(x, y=NULL, shape="convex", f) {
+  xy <- xy.coords(x, y)
+  n <- length(xy$x)
+  w <- switch(shape,
+              convex = convexhull.xy(xy),
+              rectangle = boundingbox(xy),
+              stop(paste("Unrecognised option: shape=", dQuote(shape))))
+  if(is.null(w))
+    return(NULL)
+  # expansion factor
+  if(!missing(f))
+    stopifnot(is.numeric(f) && length(f) == 1 && f >= 1)
+  else switch(shape,
+              convex = {
+                # number of vertices
+                m <- summary(w)$nvertices
+                f <- if(m < n) 1/sqrt(1 - m/n) else 2
+              },
+              rectangle = {
+                f <- (n+1)/(n-1)
+              })
+  # centroid
+  ce <- unlist(centroid.owin(w))
+  # shift centroid to origin
+  W <- shift(w, -ce)
+  # rescale
+  W <- affine(W, mat=diag(c(f,f)))
+  # shift origin to centroid
+  W <- shift(W, ce)
+  return(W)
+}
+
diff --git a/R/rknn.R b/R/rknn.R
new file mode 100755
index 0000000..c75cd2d
--- /dev/null
+++ b/R/rknn.R
@@ -0,0 +1,45 @@
+#
+#   rknn.R
+#
+#   Distribution of distance to k-th nearest point in d dimensions
+#   (Poisson process of intensity lambda)
+#
+#   $Revision: 1.2 $  $Date: 2009/12/31 01:33:44 $
+#
+
+dknn <- function(x, k=1, d=2, lambda=1) {
+  validposint(k, "dknn")
+  validposint(d, "dknn")
+  alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.))
+  y <- dgamma(x^d, shape=k, rate=lambda * alpha.d)
+  y <- y * d * x^(d-1)
+  return(y)
+}
+
+pknn <- function(q, k=1, d=2, lambda=1) {
+  validposint(k, "pknn")
+  validposint(d, "pknn")
+  alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.))
+  p <- pgamma(q^d, shape=k, rate=lambda * alpha.d)
+  return(p)
+}
+
+qknn <- function(p, k=1, d=2, lambda=1) {
+  validposint(k, "qknn")
+  validposint(d, "qknn")
+  alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.))
+  y <- qgamma(p, shape=k, rate=lambda * alpha.d)
+  z <- y^(1/d)
+  return(z)
+}
+
+rknn <- function(n, k=1, d=2, lambda=1) {
+  validposint(k, "rknn")
+  validposint(d, "rknn")
+  alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.))  
+  y <- rgamma(n, shape=k, rate=lambda * alpha.d)
+  x <- y^(1/d)
+  return(x)
+}
+
+  
diff --git a/R/rlabel.R b/R/rlabel.R
new file mode 100755
index 0000000..d30e6d1
--- /dev/null
+++ b/R/rlabel.R
@@ -0,0 +1,27 @@
+#
+#   rlabel.R
+#
+#   random (re)labelling
+#
+#   $Revision: 1.8 $   $Date: 2015/02/11 09:19:10 $
+#
+#
+rlabel <- function(X, labels=marks(X), permute=TRUE) {
+  stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X))
+  if(is.null(labels))
+    stop("labels not given and marks not present")
+  npts <- npoints(X)
+  if(is.vector(labels) || is.factor(labels)) {
+    nlabels <- length(labels)
+    if(permute && (nlabels != npts))
+      stop("length of labels vector does not match number of points")
+    Y <- X %mark% sample(labels, npts, replace=!permute)
+  } else if(is.data.frame(labels) || is.hyperframe(labels)) {
+    nlabels <- nrow(labels)
+    if(permute && (nlabels != npts))
+      stop("number of rows of data frame does not match number of points")      
+    Y <- X %mark% labels[sample(1:nlabels, npts, replace=!permute), ,drop=FALSE]
+  } else stop("Format of labels argument is not understood")
+  return(Y)
+}
+
diff --git a/R/rmh.R b/R/rmh.R
new file mode 100755
index 0000000..49fc4f6
--- /dev/null
+++ b/R/rmh.R
@@ -0,0 +1,7 @@
+#
+# generic rmh
+
+rmh <- function(model, ...){
+     UseMethod("rmh")
+}
+
diff --git a/R/rmh.default.R b/R/rmh.default.R
new file mode 100755
index 0000000..7c9e4f2
--- /dev/null
+++ b/R/rmh.default.R
@@ -0,0 +1,1084 @@
+#
+# $Id: rmh.default.R,v 1.108 2017/06/05 10:31:58 adrian Exp adrian $
+#
+rmh.default <- function(model,start=NULL,
+                        control=default.rmhcontrol(model),
+                        ...,
+                        nsim=1, drop=TRUE, saveinfo=TRUE,
+                        verbose=TRUE, snoop=FALSE) {
+#
+# Function rmh.  To simulate realizations of 2-dimensional point
+# patterns, given the conditional intensity function of the 
+# underlying process, via the Metropolis-Hastings algorithm.
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     V A L I D A T E
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+  
+  if(verbose)
+    cat("Checking arguments..")
+
+# validate arguments and fill in the defaults
+
+  model <- rmhmodel(model)
+  start <- rmhstart(start)
+  if(is.null(control)) {
+    control <- default.rmhcontrol(model)
+  } else {
+    control <- rmhcontrol(control)
+  }
+  # override 
+  if(length(list(...)) > 0)
+    control <- update(control, ...)
+
+  control <- rmhResolveControl(control, model)
+
+  saveinfo <- as.logical(saveinfo)
+  
+  # retain "..." arguments unrecognised by rmhcontrol
+  # These are assumed to be arguments of functions defining the trend
+  argh <- list(...)
+  known <- names(argh) %in% names(formals(rmhcontrol.default))
+  f.args <- argh[!known]
+
+#### Multitype models
+  
+# Decide whether the model is multitype; if so, find the types.
+
+  types <- rmhResolveTypes(model, start, control)
+  ntypes <- length(types)
+  mtype <- (ntypes > 1)
+
+# If the model is multitype, check that the model parameters agree with types
+# and digest them
+  
+  if(mtype && !is.null(model$check)) {
+    model <- rmhmodel(model, types=types)
+  } else {
+    model$types <- types
+  }
+  
+######## Check for illegal combinations of model, start and control  ########
+
+  # No expansion can be done if we are using x.start
+
+  if(start$given == "x") {
+    if(control$expand$force.exp)
+      stop("Cannot expand window when using x.start.\n", call.=FALSE)
+    control$expand <- .no.expansion
+  }
+
+# Warn about a silly value of fixall:
+  if(control$fixall & ntypes==1) {
+    warning("control$fixall applies only to multitype processes. Ignored. \n")
+    control$fixall <- FALSE
+    if(control$fixing == "n.each.type")
+      control$fixing <- "n.total"
+  }
+
+  
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     M O D E L   P A R A M E T E R S
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+#######  Determine windows  ################################
+
+  if(verbose)
+    cat("determining simulation windows...")
+  
+# these may be NULL  
+  w.model <- model$w
+  x.start <- start$x.start
+  trend <- model$trend
+  trendy <- !is.null(trend)
+
+  singletrend <- trendy && (is.im(trend) ||
+                            is.function(trend) ||
+                            (is.numeric(trend) && length(trend) == 1))
+  trendlist <- if(singletrend) list(trend) else trend
+  
+# window implied by trend image, if any
+  
+  w.trend <- 
+    if(is.im(trend))
+      as.owin(trend)
+    else if(is.list(trend) && any(ok <- unlist(lapply(trend, is.im))))
+      as.owin((trend[ok])[[1L]])
+    else NULL
+
+##  Clipping window (for final result)
+  
+  w.clip <-
+    if(!is.null(w.model))
+      w.model
+    else if(!will.expand(control$expand)) {
+      if(start$given == "x" && is.ppp(x.start))
+        x.start$window
+      else if(is.owin(w.trend))
+        w.trend
+    } else NULL
+
+  if(!is.owin(w.clip))
+    stop("Unable to determine window for pattern")
+
+  
+##  Simulation window 
+
+  xpn <- rmhResolveExpansion(w.clip, control, trendlist, "trend")
+  w.sim <- xpn$wsim
+  expanded <- xpn$expanded
+
+## Check the fine print   
+
+  if(expanded) {
+
+    if(control$fixing != "none")
+      stop(paste("If we're conditioning on the number of points,",
+                 "we cannot clip the result to another window.\n"))
+
+    if(!is.subset.owin(w.clip, w.sim))
+      stop("Expanded simulation window does not contain model window")
+  }
+
+
+#######  Trend  ################################
+  
+# Check that the expanded window fits inside the window
+# upon which the trend(s) live if there are trends and
+# if any trend is given by an image.
+
+  if(expanded && !is.null(trend)) {
+    trends <- if(is.im(trend)) list(trend) else trend
+    images <- unlist(lapply(trends, is.im))
+    if(any(images)) {
+      iwindows <- lapply(trends[images], as.owin)
+      nimages <- length(iwindows)
+      misfit <- !sapply(iwindows, is.subset.owin, A=w.sim)
+      nmisfit <- sum(misfit)
+      if(nmisfit > 1) 
+        stop(paste("Expanded simulation window is not contained in",
+                   "several of the trend windows.\n",
+                   "Bailing out.\n"))
+      else if(nmisfit == 1) {
+        warning(paste("Expanded simulation window is not contained in",
+                      if(nimages == 1) "the trend window.\n"
+                      else "one of the trend windows.\n",
+                      "Expanding to this trend window (only).\n"))
+        w.sim <- iwindows[[which(misfit)]]
+      }
+    }
+  }
+
+# Extract the 'beta' parameters
+
+  if(length(model$cif) == 1) {
+    # single interaction
+    beta <- model$C.beta
+    betalist <- list(beta)
+  } else {
+    # hybrid
+    betalist <- model$C.betalist
+    # multiply beta vectors for each component
+    beta <- Reduce("*", betalist)
+  }
+  
+##### .................. CONDITIONAL SIMULATION ...................
+
+#####  
+#||   Determine windows for conditional simulation
+#||
+#||      w.state  = window for the full configuration
+#||  
+#||      w.sim    = window for the 'free' (random) points
+#||
+
+  w.state <- w.sim
+  
+  condtype <- control$condtype
+  x.cond   <- control$x.cond
+#  n.cond   <- control$n.cond
+
+  switch(condtype,
+         none={
+           w.cond <- NULL
+         },
+         window={
+           # conditioning on the realisation inside a subwindow
+           w.cond <- as.owin(x.cond)
+           # subtract from w.sim
+           w.sim <- setminus.owin(w.state, w.cond)
+           if(is.empty(w.sim))
+             stop(paste("Conditional simulation is undefined;",
+                        "the conditioning window",
+                        sQuote("as.owin(control$x.cond)"),
+                        "covers the entire simulation window"))
+         },
+         Palm={
+           # Palm conditioning
+           w.cond <- NULL
+         })
+
+#####  
+#||   Convert conditioning points to appropriate format
+
+
+  x.condpp <- switch(condtype,
+                        none=NULL,
+                        window=x.cond,
+                        Palm=as.ppp(x.cond, w.state))
+
+# validate  
+  if(!is.null(x.condpp)) {
+    if(mtype) {
+      if(!is.marked(x.condpp))
+        stop("Model is multitype, but x.cond is unmarked")
+      if(!identical(all.equal(types, levels(marks(x.condpp))), TRUE))
+        stop("Types of points in x.cond do not match types in model")
+    }
+  }
+  
+
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     S T A R T I N G      S T A T E
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+###################### Starting state data ############################
+
+# whether the initial state should be thinned
+  
+  thin <- (start$given != "x") && (control$fixing == "none")
+  
+# There must now be a starting state.
+  
+  if(start$given == "none") {
+    # For conditional simulation, the starting state must be given
+    if(condtype != "none")
+      stop("No starting state given")
+    # Determine integral of beta * trend over data window.
+    # This is the expected number of points in the reference Poisson process.
+    area.w.clip <- area(w.clip)
+    if(trendy) {
+      tsummaries <- summarise.trend(trend, w=w.clip, a=area.w.clip)
+      En <- beta * sapply(tsummaries, getElement, name="integral")
+    } else {
+      En <- beta * area.w.clip
+    }
+    # Fix n.start equal to this integral
+    n.start <- if(spatstat.options("scalable")) round(En) else ceiling(En)
+    start <- rmhstart(n.start=n.start)
+  }
+  
+# In the case of conditional simulation, the start data determine
+# the 'free' points (i.e. excluding x.cond) in the initial state.
+
+  switch(start$given,
+         none={
+           stop("No starting state given")
+         },
+         x = {
+           # x.start was given
+           # coerce it to a ppp object
+           if(!is.ppp(x.start))
+             x.start <- as.ppp(x.start, w.state)
+           if(condtype == "window") {
+             # clip to simulation window
+             xs <- x.start[w.sim]
+             nlost <- x.start$n - xs$n
+             if(nlost > 0) 
+               warning(paste(nlost,
+                             ngettext(nlost, "point","points"),
+                             "of x.start",
+                             ngettext(nlost, "was", "were"),
+                             "removed because",
+                             ngettext(nlost, "it", "they"),
+                             "fell in the window of x.cond"))
+             x.start <- xs
+           }
+           npts.free <- x.start$n
+         },
+         n = {
+           # n.start was given
+           n.start <- start$n.start
+           # Adjust the number of points in the starting state in accordance
+           # with the expansion that has occurred.  
+           if(expanded) {
+	     holnum <- if(spatstat.options("scalable")) round else ceiling
+             n.start <- holnum(n.start * area(w.sim)/area(w.clip))
+           }
+           #
+           npts.free <- sum(n.start) # The ``sum()'' is redundant if n.start
+                                # is scalar; no harm, but.
+         },
+         stop("Internal error: start$given unrecognized"))
+
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     C O N T R O L    P A R A M E T E R S
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+###################  Periodic boundary conditions #########################
+
+  periodic <- control$periodic
+  
+  if(is.null(periodic)) {
+    # undecided. Use default rule
+    control$periodic <- periodic <- expanded && is.rectangle(w.state)
+  } else if(periodic && !is.rectangle(w.state)) {
+    # if periodic is TRUE we have to be simulating in a rectangular window.
+    stop("Need rectangular window for periodic simulation.\n")
+  }
+
+# parameter passed to C:  
+  period <-
+    if(periodic)
+      c(diff(w.state$xrange), diff(w.state$yrange))
+    else
+      c(-1,-1)
+
+
+
+#### vector of proposal probabilities 
+
+  if(!mtype) 
+    ptypes <- 1
+  else {
+    ptypes <- control$ptypes
+    if(is.null(ptypes)) {
+      # default proposal probabilities
+      ptypes <- if(start$given == "x" && (nx <- npoints(x.start)) > 0) {
+        table(marks(x.start, dfok=FALSE))/nx
+      } else rep.int(1/ntypes, ntypes)
+    } else {
+      # Validate ptypes
+      if(length(ptypes) != ntypes | sum(ptypes) != 1)
+        stop("Argument ptypes is mis-specified.\n")
+    }
+  } 
+
+
+  
+########################################################################
+#  Normalising constant for proposal density
+# 
+# Integral of trend over the expanded window (or area of window):
+# Iota == Integral Of Trend (or) Area.
+
+  area.w.sim <- area(w.sim)
+  if(trendy) {
+    if(verbose)
+      cat("Evaluating trend integral...")
+    tsummaries <- summarise.trend(trend, w=w.sim, a=area.w.sim)
+    mins  <- sapply(tsummaries, getElement, name="min")
+    if(any(mins < 0))
+      stop("Trend has negative values")
+    iota <- sapply(tsummaries, getElement, name="integral")
+    tmax <- sapply(tsummaries, getElement, name="max")
+  } else {
+    iota <- area.w.sim
+    tmax <- NULL
+  }
+
+  
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     A.S. EMPTY PROCESS
+#
+#         for conditional simulation, 'empty' means there are no 'free' points
+#  
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+  a.s.empty <- FALSE
+  
+#
+#  Empty pattern, simulated conditional on n
+#  
+  if(npts.free == 0 && control$fixing != "none") {
+    a.s.empty <- TRUE
+    if(verbose) {
+      mess <- paste("Initial pattern has 0 random points,",
+                    "and simulation is conditional on the number of points -")
+      if(condtype == "none")
+        warning(paste(mess, "returning an empty pattern\n"))
+      else
+        warning(paste(mess, "returning a pattern with no random points\n"))
+    }
+  }
+
+#
+#  If beta = 0, the process is almost surely empty
+#
+  
+  if(all(beta < .Machine$double.eps)) {
+    if(control$fixing == "none" && condtype == "none") {
+      # return empty pattern
+      if(verbose)
+        warning("beta = 0 implies an empty pattern\n")
+      a.s.empty <- TRUE
+    } else 
+      stop("beta = 0 implies an empty pattern, but we are simulating conditional on a nonzero number of points")
+  }
+
+#
+# If we're conditioning on the contents of a subwindow,
+# and the subwindow covers the clipping region,
+# the result is deterministic.  
+
+  if(condtype == "window" && is.subset.owin(w.clip, w.cond)) {
+    a.s.empty <- TRUE
+    warning(paste("Model window is a subset of conditioning window:",
+              "result is deterministic\n"))
+  }    
+
+#
+#  
+  if(a.s.empty) {
+    # create empty pattern, to be returned
+    if(!is.null(x.condpp)) 
+      empty <- x.condpp[w.clip]
+    else {
+      empty <- ppp(numeric(0), numeric(0), window=w.clip)
+      if(mtype) {
+        vide <- factor(types[integer(0)], levels=types)
+        empty <- empty %mark% vide
+      }
+    }
+  }
+
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     PACK UP
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+######### Store decisions
+
+  Model <- model
+  Start <- start
+  Control <- control
+
+  Model$w <- w.clip
+  Model$types <- types
+  
+  Control$expand <- if(expanded) rmhexpand(w.state) else .no.expansion
+
+  Control$internal <- list(w.sim=w.sim,
+                           w.state=w.state,
+                           x.condpp=x.condpp,
+                           ptypes=ptypes,
+                           period=period,
+                           thin=thin)
+
+  Model$internal <- list(a.s.empty=a.s.empty,
+                         empty=if(a.s.empty) empty else NULL,
+                         mtype=mtype,
+                         trendy=trendy,
+                         betalist=betalist,
+                         beta=beta,
+                         iota=iota,
+                         tmax=tmax)
+
+  Start$internal <- list(npts.free=npts.free)
+
+  InfoList <- list(model=Model, start=Start, control=Control)
+  class(InfoList) <- c("rmhInfoList", class(InfoList))
+
+  # go
+  if(nsim == 1 && drop) {
+    result <- do.call(rmhEngine,
+                      append(list(InfoList,
+                                  verbose=verbose,
+                                  snoop=snoop,
+                                  kitchensink=saveinfo),
+                             f.args))
+  } else {
+    result <- vector(mode="list", length=nsim)
+    if(verbose) {
+      splat("Generating", nsim, "point patterns...")
+      pstate <- list()
+    }
+    subverb <- verbose && (nsim == 1)
+    for(isim in 1:nsim) {
+      if(verbose) pstate <- progressreport(isim, nsim, state=pstate)
+      result[[isim]] <- do.call(rmhEngine,
+                                append(list(InfoList,
+                                            verbose=subverb,
+                                            snoop=snoop,
+                                            kitchensink=saveinfo),
+                                       f.args))
+    }
+    result <- as.solist(result)
+    if(verbose) splat("Done.\n")
+  }
+  return(result)
+}
+
+print.rmhInfoList <- function(x, ...) {
+  cat("\nPre-digested Metropolis-Hastings algorithm parameters (rmhInfoList)\n")
+  print(as.anylist(x))
+}
+
+#---------------  rmhEngine -------------------------------------------
+#
+# This is the interface to the C code.
+#
+# InfoList is a list of pre-digested, validated arguments
+# obtained from rmh.default.
+#
+# This function is called by rmh.default to generate one simulated
+# realisation of the model.
+# It's called repeatedly by ho.engine and qqplot.ppm to generate multiple
+# realisations (saving time by not repeating the argument checking
+# in rmh.default).
+
+# arguments:  
+# kitchensink: whether to tack InfoList on to the return value as an attribute
+# preponly: whether to just return InfoList without simulating
+#
+#   rmh.default digests arguments and calls rmhEngine with kitchensink=T
+#
+#   qqplot.ppm first gets InfoList by calling rmh.default with preponly=T
+#              (which digests the model arguments and calls rmhEngine
+#               with preponly=T, returning InfoList),
+#              then repeatedly calls rmhEngine(InfoList) to simulate.
+#
+# -------------------------------------------------------
+
+rmhEngine <- function(InfoList, ...,
+                       verbose=FALSE, kitchensink=FALSE,
+                       preponly=FALSE, snoop=FALSE,
+                       overrideXstart=NULL, overrideclip=FALSE) {
+# Internal Use Only!
+# This is the interface to the C code.
+
+  if(!inherits(InfoList, "rmhInfoList"))
+    stop("data not in correct format for internal function rmhEngine")
+
+  
+  if(preponly)
+    return(InfoList)
+
+  model <- InfoList$model
+  start <- InfoList$start
+  control <- InfoList$control
+
+  w.sim <- control$internal$w.sim
+  w.state <- control$internal$w.state
+  w.clip <- model$w
+
+  condtype <- control$condtype
+  x.condpp <- control$internal$x.condpp
+
+  types <- model$types
+  ntypes <- length(types)
+  
+  ptypes <- control$internal$ptypes
+  period <- control$internal$period
+
+  mtype <- model$internal$mtype
+
+  trend <- model$trend
+  trendy <- model$internal$trendy
+#  betalist <- model$internal$betalist
+  beta <- model$internal$beta
+  iota <- model$internal$iota
+  tmax <- model$internal$tmax
+
+  npts.free <- start$internal$npts.free
+
+  n.start <- start$n.start
+  x.start <- start$x.start
+
+  
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     E M P T Y   P A T T E R N
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+  if(model$internal$a.s.empty) {
+    if(verbose) cat("\n")
+    empty <- model$internal$empty
+    attr(empty, "info") <- InfoList
+    return(empty)
+  }
+  
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+#
+#     S I M U L A T I O N     
+#
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+#############################################
+####  
+####  Random number seed: initialisation & capture
+####  
+#############################################  
+  
+  if(!exists(".Random.seed"))
+    runif(1L)
+
+  saved.seed <- .Random.seed
+  
+
+#############################################
+####  
+####  Poisson case
+####  
+#############################################  
+  
+  if(is.poisson.rmhmodel(model)) {
+    if(verbose) cat("\n")
+    intensity <- if(!trendy) beta else model$trend
+    Xsim <-
+      switch(control$fixing,
+             none= {
+               # Poisson process 
+               if(!mtype)
+                 rpoispp(intensity, win=w.sim, ..., warnwin=FALSE)
+               else
+                 rmpoispp(intensity, win=w.sim, types=types, warnwin=FALSE)
+             },
+             n.total = {
+               # Binomial/multinomial process with fixed total number of points
+               if(!mtype) 
+                 rpoint(npts.free, intensity, win=w.sim, verbose=verbose)
+               else
+                 rmpoint(npts.free, intensity, win=w.sim, types=types,
+                         verbose=verbose)
+             },
+             n.each.type = {
+               # Multinomial process with fixed number of points of each type
+               npts.each <-
+                 switch(start$given,
+                        n = n.start,
+                        x = as.integer(table(marks(x.start, dfok=FALSE))),
+  stop("No starting state given; can't condition on fixed number of points"))
+               rmpoint(npts.each, intensity, win=w.sim, types=types,
+                       verbose=verbose)
+             },
+             stop("Internal error: control$fixing unrecognised")
+             )
+    # if conditioning, add fixed points
+    if(condtype != "none")
+      Xsim <- superimpose(Xsim, x.condpp, W=w.state)
+    # clip result to output window
+    Xclip <- if(!overrideclip) Xsim[w.clip] else Xsim
+    attr(Xclip, "info") <- InfoList
+    return(Xclip)
+  }
+
+  
+########################################################################  
+#      M e t r o p o l i s  H a s t i n g s    s i m u l a t i o n
+########################################################################
+
+  if(verbose)
+    cat("Starting simulation.\nInitial state...")
+  
+
+#### Build starting state
+
+  npts.cond  <- if(condtype != "none") x.condpp$n else 0
+#  npts.total <- npts.free + npts.cond
+
+#### FIRST generate the 'free' points
+  
+#### First the marks, if any.
+#### The marks must be integers 0 to (ntypes-1) for passing to C
+
+  Ctypes <- if(mtype) 0:(ntypes-1) else 0
+  
+  Cmarks <-
+    if(!mtype)
+      0
+    else
+      switch(start$given,
+             n = {
+               # n.start given
+               if(control$fixing=="n.each.type")
+                 rep.int(Ctypes,n.start)
+               else
+                 sample(Ctypes,npts.free,TRUE,ptypes)
+             },
+             x = {
+               # x.start given
+               as.integer(marks(x.start, dfok=FALSE))-1L
+             },
+             stop("internal error: start$given unrecognised")
+             )
+#
+# Then the x, y coordinates
+#
+  switch(start$given,
+         x = {
+           x <- x.start$x
+           y <- x.start$y
+         },
+         n = {
+           xy <-
+             if(!trendy)
+               runifpoint(npts.free, w.sim, ...)
+             else
+               rpoint.multi(npts.free, trend, tmax,
+                      factor(Cmarks,levels=Ctypes), w.sim, ...)
+           x <- xy$x
+           y <- xy$y
+         })
+
+## APPEND the free points AFTER the conditioning points
+
+  if(condtype != "none") {
+    x <- c(x.condpp$x, x)
+    y <- c(x.condpp$y, y)
+    if(mtype)
+      Cmarks <- c(as.integer(marks(x.condpp))-1L, Cmarks)
+  }
+
+  if(!is.null(overrideXstart)) {
+    #' override the previous data
+    x <- overrideXstart$x
+    y <- overrideXstart$y
+    if(mtype) Cmarks <- as.integer(marks(overrideXstart))-1L
+  }
+
+# decide whether to activate visual debugger
+  if(snoop) {
+    Xinit <- ppp(x, y, window=w.sim)
+    if(mtype)
+      marks(Xinit) <- Cmarks + 1L
+    if(verbose) cat("\nCreating debugger environment..")
+    snoopenv <- rmhSnoopEnv(Xinit=Xinit, Wclip=w.clip, R=reach(model))
+    if(verbose) cat("Done.\n")
+  } else snoopenv <- "none"
+
+  
+#######################################################################
+#  Set up C call
+######################################################################    
+
+# Determine the name of the cif used in the C code
+
+  C.id <- model$C.id
+  ncif <- length(C.id)
+  
+# Get the parameters in C-ese
+    
+  ipar <- model$C.ipar
+  iparlist <- if(ncif == 1) list(ipar) else model$C.iparlist
+  iparlen <- lengths(iparlist)
+
+  beta <- model$internal$beta
+  
+# Absorb the constants or vectors `iota' and 'ptypes' into the beta parameters
+  beta <- (iota/ptypes) * beta
+  
+# Algorithm control parameters
+
+  p       <- control$p
+  q       <- control$q
+  nrep    <- control$nrep
+#  fixcode <- control$fixcode
+#  fixing  <- control$fixing
+  fixall  <- control$fixall
+  nverb   <- control$nverb
+  saving  <- control$saving
+  nsave   <- control$nsave
+  nburn   <- control$nburn
+  track   <- control$track
+  thin    <- control$internal$thin
+  pstage  <- control$pstage %orifnull% "start"
+  if(pstage == "block" && !saving) pstage <- "start"
+  temper  <- FALSE
+  invertemp <- 1.0
+
+  if(verbose)
+    cat("Ready to simulate. ")
+
+  storage.mode(ncif)   <- "integer"
+  storage.mode(C.id)   <- "character"
+  storage.mode(beta)    <- "double"
+  storage.mode(ipar)    <- "double"
+  storage.mode(iparlen) <- "integer"
+  storage.mode(period) <- "double"
+  storage.mode(ntypes) <- "integer"
+  storage.mode(nrep)   <- "integer"
+  storage.mode(p) <- storage.mode(q) <- "double"
+  storage.mode(nverb)  <- "integer"
+  storage.mode(x) <- storage.mode(y) <- "double"
+  storage.mode(Cmarks) <- "integer"
+  storage.mode(fixall) <- "integer"
+  storage.mode(npts.cond) <- "integer"
+  storage.mode(track) <- "integer"
+  storage.mode(thin) <- "integer"
+  storage.mode(temper) <- "integer"
+  storage.mode(invertemp) <- "double"
+
+  if(pstage == "start" || !saving) {
+    #' generate all proposal points now.
+    if(verbose)
+      cat("Generating proposal points...")
+
+    #' If the pattern is multitype, generate the mark proposals (0 to ntypes-1)
+    Cmprop <- if(mtype) sample(Ctypes,nrep,TRUE,prob=ptypes) else 0
+    storage.mode(Cmprop) <- "integer"
+
+    #' Generate the ``proposal points'' in the expanded window.
+    xy <- if(trendy) {
+      rpoint.multi(nrep,trend,tmax,
+                   factor(Cmprop, levels=Ctypes),
+                   w.sim, ..., warn=FALSE)
+    } else runifpoint(nrep, w.sim, warn=FALSE)
+    xprop <- xy$x
+    yprop <- xy$y
+    storage.mode(xprop)  <- storage.mode(yprop) <- "double"
+  }
+  
+  if(!saving) {
+    # ////////// Single block /////////////////////////////////
+
+    nrep0 <- 0
+    storage.mode(nrep0)  <- "integer"
+
+    # Call the Metropolis-Hastings C code:
+    if(verbose)
+      cat("Running Metropolis-Hastings.\n")
+    out <- .Call("xmethas",
+                 ncif,
+                 C.id,
+                 beta,
+                 ipar,
+                 iparlen,
+                 period,
+                 xprop, yprop, Cmprop,
+                 ntypes,
+                 nrep,
+                 p, q,
+                 nverb,
+                 nrep0,
+                 x, y, Cmarks,
+                 npts.cond,
+                 fixall,
+                 track,
+                 thin,
+                 snoopenv,
+                 temper,
+                 invertemp,
+                 PACKAGE="spatstat")
+  
+    # Extract the point pattern returned from C
+    X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE)
+    if(mtype) {
+      # convert integer marks from C to R
+      marx <- factor(out[[3L]], levels=0:(ntypes-1))
+      # then restore original type levels
+      levels(marx) <- types
+      # glue to points
+      marks(X) <- marx
+    }
+
+    # Now clip the pattern to the ``clipping'' window:
+    if(!overrideclip && !control$expand$force.noexp)
+      X <- X[w.clip]
+
+    # Extract transition history:
+    if(track) {
+      usedout <- if(mtype) 3 else 2
+      proptype <- factor(out[[usedout+1]], levels=1:3,
+                         labels=c("Birth", "Death", "Shift"))
+      accepted <- as.logical(out[[usedout+2]])
+      History <- data.frame(proposaltype=proptype, accepted=accepted)
+      if(length(out) >= usedout + 4) {
+        # history includes numerator & denominator of Hastings ratio
+        numerator <- as.double(out[[usedout + 3]])
+        denominator <- as.double(out[[usedout + 4]])
+        History <- cbind(History,
+                         data.frame(numerator=numerator,
+                                    denominator=denominator))
+      }
+    }
+  } else {
+    # ////////// Multiple blocks /////////////////////////////////
+    # determine length of each block of simulations
+    nblocks <- as.integer(1L + ceiling((nrep - nburn)/nsave))
+    block <- c(nburn, rep.int(nsave, nblocks-1L))
+    block[nblocks] <- block[nblocks] - (sum(block)-nrep)
+    block <- block[block >= 1L]
+    nblocks <- length(block)
+    blockend <- cumsum(block)
+    # set up list to contain the saved point patterns
+    Xlist <- vector(mode="list", length=nblocks)
+    # Call the Metropolis-Hastings C code repeatedly:
+    xprev <- x
+    yprev <- y
+    Cmarksprev <- Cmarks
+    #
+    thinFALSE <- as.integer(FALSE)
+    storage.mode(thinFALSE) <- "integer"
+    # ................ loop .........................
+    for(I in 1:nblocks) {
+      # number of iterations for this block
+      nrepI <- block[I]
+      storage.mode(nrepI) <- "integer"
+      # number of previous iterations
+      nrep0 <- if(I == 1) 0 else blockend[I-1]
+      storage.mode(nrep0)  <- "integer"
+      # Generate or extract proposals
+      switch(pstage,
+             start = {
+               #' extract proposals from previously-generated vectors
+               if(verbose)
+                 cat("Extracting proposal points...")
+               seqI <- 1:nrepI
+               xpropI <- xprop[seqI]
+               ypropI <- yprop[seqI]
+               CmpropI <- Cmprop[seqI]
+               storage.mode(xpropI) <- storage.mode(ypropI) <- "double"
+               storage.mode(CmpropI) <- "integer"
+             },
+             block = {
+               # generate 'nrepI' random proposals
+               if(verbose)
+                 cat("Generating proposal points...")
+               #' If the pattern is multitype, generate the mark proposals 
+               CmpropI <- if(mtype) sample(Ctypes,nrepI,TRUE,prob=ptypes) else 0
+               storage.mode(CmpropI) <- "integer"
+               #' Generate the ``proposal points'' in the expanded window.
+               xy <- if(trendy) {
+                 rpoint.multi(nrepI,trend,tmax,
+                              factor(Cmprop, levels=Ctypes),
+                              w.sim, ..., warn=FALSE)
+               } else runifpoint(nrepI, w.sim, warn=FALSE)
+               xpropI <- xy$x
+               ypropI <- xy$y
+               storage.mode(xpropI)  <- storage.mode(ypropI) <- "double"
+             })
+      # no thinning in subsequent blocks
+      if(I > 1) thin <- thinFALSE
+      #' call
+      if(verbose)
+        cat("Running Metropolis-Hastings.\n")
+      out <- .Call("xmethas",
+                   ncif,
+                   C.id,
+                   beta,
+                   ipar,
+                   iparlen,
+                   period,
+                   xpropI, ypropI, CmpropI,
+                   ntypes,
+                   nrepI,
+                   p, q,
+                   nverb,
+                   nrep0,
+                   xprev, yprev, Cmarksprev,
+                   npts.cond,
+                   fixall,
+                   track,
+                   thin,
+                   snoopenv,
+                   temper,
+                   invertemp,
+                   PACKAGE = "spatstat")
+      # Extract the point pattern returned from C
+      X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE)
+      if(mtype) {
+        # convert integer marks from C to R
+        marx <- factor(out[[3L]], levels=0:(ntypes-1))
+        # then restore original type levels
+        levels(marx) <- types
+        # glue to points
+        marks(X) <- marx
+      }
+      
+      # Now clip the pattern to the ``clipping'' window:
+      if(!overrideclip && !control$expand$force.noexp)
+        X <- X[w.clip]
+
+      # commit to list
+      Xlist[[I]] <- X
+      
+      # Extract transition history:
+      if(track) {
+        usedout <- if(mtype) 3 else 2
+        proptype <- factor(out[[usedout+1]], levels=1:3,
+                           labels=c("Birth", "Death", "Shift"))
+        accepted <- as.logical(out[[usedout+2]])
+        HistoryI <- data.frame(proposaltype=proptype, accepted=accepted)
+        if(length(out) >= usedout + 4) {
+          # history includes numerator & denominator of Hastings ratio
+          numerator <- as.double(out[[usedout + 3]])
+          denominator <- as.double(out[[usedout + 4]])
+          HistoryI <- cbind(HistoryI,
+                            data.frame(numerator=numerator,
+                                       denominator=denominator))
+        }
+        # concatenate with histories of previous blocks
+        History <- if(I == 1) HistoryI else rbind(History, HistoryI)
+      }
+
+      # update 'previous state'
+      xprev <- out[[1L]]
+      yprev <- out[[2L]]
+      Cmarksprev <- if(!mtype) 0 else out[[3]]
+      storage.mode(xprev) <- storage.mode(yprev) <- "double"
+      storage.mode(Cmarksprev) <- "integer"
+
+      if(pstage == "start") {
+        #' discard used proposals
+        xprop <- xprop[-seqI]
+        yprop <- yprop[-seqI]
+        Cmprop <- Cmprop[-seqI]
+      }
+    }
+    # .............. end loop ...............................
+    
+    # Result of simulation is final state 'X'
+    # Tack on the list of intermediate states
+    names(Xlist) <- paste("Iteration", as.integer(blockend), sep="_")
+    attr(X, "saved") <- as.solist(Xlist)
+  }
+
+# Append to the result information about how it was generated.
+  if(kitchensink) {
+    attr(X, "info") <- InfoList
+    attr(X, "seed") <- saved.seed
+  }
+  if(track)
+    attr(X, "history") <- History
+  
+  return(X)
+}
+
+
+# helper function
+
+summarise.trend <- local({
+  # main function
+  summarise.trend <- function(trend, w, a=area(w)) {
+    tlist <- if(is.function(trend) || is.im(trend)) list(trend) else trend
+    return(lapply(tlist, summarise1, w=w, a=a))
+  }
+  # 
+  summarise1 <-  function(x, w, a) {
+    if(is.numeric(x)) {
+      mini <- maxi <- x
+      integ <- a*x
+    } else {
+      Z  <- as.im(x, w)[w, drop=FALSE]
+      ran <- range(Z)
+      mini <- ran[1L]
+      maxi <- ran[2L]
+      integ <- integral.im(Z)
+    }
+    return(list(min=mini, max=maxi, integral=integ))
+  }
+  summarise.trend
+})
+  
diff --git a/R/rmh.ppm.R b/R/rmh.ppm.R
new file mode 100755
index 0000000..2852daa
--- /dev/null
+++ b/R/rmh.ppm.R
@@ -0,0 +1,167 @@
+#
+# simulation of FITTED model
+#
+#  $Revision: 1.34 $ $Date: 2016/07/31 07:39:32 $
+#
+#
+rmh.ppm <- function(model, start = NULL,
+                    control = default.rmhcontrol(model, w=w),
+                    ...,
+                    w = NULL, project=TRUE,
+                    nsim=1, drop=TRUE, saveinfo=TRUE,
+                    verbose=TRUE,
+                    new.coef=NULL) {
+  verifyclass(model, "ppm")
+  argh <- list(...)
+
+  if(is.null(control)) {
+    control <- default.rmhcontrol(model, w=w)
+  } else {
+    control <- rmhcontrol(control)
+  }
+
+  # override 
+  if(length(list(...)) > 0)
+    control <- update(control, ...)
+  
+  # convert fitted model object to list of parameters for rmh.default
+  X <- rmhmodel(model, w=w, verbose=verbose, project=project, control=control,
+                new.coef=new.coef)
+
+  # set initial state
+
+  if(is.null(start)) {
+    datapattern <- data.ppm(model)
+    start <- rmhstart(n.start=datapattern$n)
+  }
+  
+  # call rmh.default 
+  # passing only arguments unrecognised by rmhcontrol
+  known <- names(argh) %in% names(formals(rmhcontrol.default))
+  fargs <- argh[!known]
+
+  Y <- do.call(rmh.default,
+               append(list(model=X, start=start, control=control,
+                           nsim=nsim, drop=drop, saveinfo=saveinfo,
+                           verbose=verbose),
+                      fargs))
+  return(Y)
+}
+
+simulate.ppm <- function(object, nsim=1, ...,
+                         singlerun=FALSE,
+                         start = NULL,
+                         control = default.rmhcontrol(object, w=w),
+                         w = NULL,
+                         project=TRUE,
+                         new.coef=NULL,
+                         verbose=FALSE,
+                         progress=(nsim > 1),
+                         drop=FALSE) {
+  verifyclass(object, "ppm")
+  argh <- list(...)
+  if(nsim == 0) return(list())
+
+  starttime = proc.time()
+  
+  # set up control parameters
+  if(missing(control) || is.null(control)) {
+    rcontr <- default.rmhcontrol(object, w=w)
+  } else {
+    rcontr <- rmhcontrol(control)
+  }
+  if(singlerun) {
+    # allow nsave, nburn to determine nrep
+    nsave <- resolve.1.default("nsave", list(...), as.list(rcontr),
+                               .MatchNull=FALSE)
+    nburn <- resolve.1.default("nburn", list(...), as.list(rcontr),
+                               list(nburn=nsave),
+                               .MatchNull=FALSE)
+    if(!is.null(nsave)) {
+      nrep <- nburn + (nsim-1) * nsave
+      rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn)
+    } 
+  }
+  # other overrides
+  if(length(list(...)) > 0)
+    rcontr <- update(rcontr, ...)
+
+  # Set up model parameters for rmh
+  rmodel <- rmhmodel(object, w=w, verbose=FALSE, project=TRUE, control=rcontr,
+                     new.coef=new.coef)
+  if(is.null(start)) {
+    datapattern <- data.ppm(object)
+    start <- rmhstart(n.start=datapattern$n)
+  }
+  rstart <- rmhstart(start)
+
+  #########
+  
+  if(singlerun && nsim > 1) {
+    # //////////////////////////////////////////////////
+    # execute one long run and save every k-th iteration
+    if(is.null(rcontr$nsave)) {
+      # determine spacing between subsamples
+      if(!is.null(rcontr$nburn)) {
+        nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1))))
+      } else {
+        # assume nburn = 2 * nsave
+        nsave <- max(1, with(rcontr, floor(nrep/(nsim+1))))
+        nburn <- 2 * nsave
+      }
+      rcontr <- update(rcontr, nsave=nsave, nburn=nburn)
+    }
+    # check nrep is enough
+    nrepmin <- with(rcontr, nburn + (nsim-1) * nsave)
+    if(rcontr$nrep < nrepmin)
+      rcontr <- update(rcontr, nrep=nrepmin)
+    # OK, run it
+    if(progress) {
+      cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) 
+      flush.console()
+    }
+    Y <- rmh(rmodel, rstart, rcontr, verbose=verbose)
+    if(progress)
+      cat("Done.\n")
+    # extract sampled states
+    out <- attr(Y, "saved")
+    if(length(out) != nsim)
+      stop(paste("Internal error: wrong number of simulations generated:",
+                 length(out), "!=", nsim))
+  } else {
+    # //////////////////////////////////////////////////
+    # execute 'nsim' independent runs
+    out <- list()
+    # pre-digest arguments
+    rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose)
+    # go
+    if(nsim > 0) {
+      if(progress) {
+        cat(paste("Generating", nsim, "simulated", 
+                  ngettext(nsim, "pattern", "patterns"),
+                  "..."))
+        flush.console()
+      }
+      # call rmh
+      # passing only arguments unrecognised by rmhcontrol
+      known <- names(argh) %in% names(formals(rmhcontrol.default))
+      fargs <- argh[!known]
+      rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs)
+      if(progress)
+        pstate <- list()
+      for(i in 1:nsim) {
+        out[[i]] <- do.call(rmhEngine, rmhargs)
+        if(progress) pstate <- progressreport(i, nsim, state=pstate)
+      }
+    }
+  }
+  if(nsim == 1 && drop) {
+    out <- out[[1]]
+  } else {
+    out <- as.solist(out)
+    if(nsim > 0)
+      names(out) <- paste("Simulation", 1:nsim)
+  }
+  out <- timed(out, starttime=starttime)
+  return(out)
+}  
diff --git a/R/rmhResolveTypes.R b/R/rmhResolveTypes.R
new file mode 100755
index 0000000..9c6c658
--- /dev/null
+++ b/R/rmhResolveTypes.R
@@ -0,0 +1,96 @@
+#
+#
+#   rmhResolveTypes.R
+#
+#   $Revision: 1.9 $   $Date: 2009/10/31 01:52:54 $
+#
+#
+rmhResolveTypes <- function(model, start, control) {
+
+# Decide whether a multitype point process is to be simulated.
+# If so, determine the vector of types.
+
+  verifyclass(model, "rmhmodel")
+  verifyclass(start, "rmhstart")
+  verifyclass(control, "rmhcontrol")
+
+# Different ways of specifying types directly
+
+  types.model <- model$types
+  types.start <- if(start$given=="x" && is.marked(x.start <- start$x.start))
+                     levels(marks(x.start, dfok=FALSE)) else NULL
+  
+# Check for inconsistencies  
+  if(!is.null(types.model) && !is.null(types.start))
+    if(!identical(all.equal(types.model, types.start), TRUE))
+      stop("marks in start$x.start do not match model$types")
+  
+  types.given <- if(!is.null(types.model)) types.model else types.start
+  types.given.source <-
+    if(!is.null(types.model)) "model$types" else "marks of x.start"
+  
+# Different ways of implying the number of types
+  
+  ntypes.beta <- length(model$par[["beta"]])
+  ntypes.ptypes <- length(control$ptypes)
+  ntypes.nstart <- if(start$given == "n") length(start$n.start) else 0
+  mot <- model$trend
+  ntypes.trend <-  if(is.null(mot)) 0 else
+                   if(is.im(mot)) 1 else
+                   if(is.list(mot) &&
+                      all(unlist(lapply(mot, is.im))))
+                     length(mot) else 0
+  
+# Check for inconsistencies in implied number of types (only for numbers > 1)
+
+  nty <- c(ntypes.beta, ntypes.ptypes, ntypes.nstart, ntypes.trend)
+  nam <- c("model$par$beta", "control$ptypes", "start$n.start", "model$trend")
+  implied <- (nty > 1)
+  if(!any(implied))
+    ntypes.implied <- 1
+  else {
+    if(length(unique(nty[implied])) > 1)
+      stop(paste("Mismatch in numbers of types implied by",
+                 commasep(sQuote(nam[implied]))))
+    ntypes.implied <- unique(nty[implied])
+    ntypes.implied.source <- (nam[implied])[1]
+  } 
+
+# Check consistency between types.given and ntypes.implied 
+
+  if(!is.null(types.given) && ntypes.implied > 1)
+    if(length(types.given) != ntypes.implied)
+      stop(paste("Mismatch between number of types in",
+                 types.given.source,
+                 "and length of",
+                 ntypes.implied.source))
+
+# Finally determine the types
+  
+  if(model$multitype.interact) {
+    # There MUST be a types vector
+    types <- if(!is.null(types.given)) types.given
+             else if(ntypes.implied > 1) 1:ntypes.implied
+             else stop("Cannot determine types for multitype process")
+  } else {
+    types <- if(!is.null(types.given)) types.given
+             else if(ntypes.implied > 1) 1:ntypes.implied
+             else 1
+  }
+
+  ntypes <- length(types)
+  
+# If we are conditioning on the number of points of each type,
+# make sure the starting state is appropriate
+
+  if(control$fixing == "n.each.type") {
+    if(start$given == "n" && ntypes.nstart != ntypes)
+      stop("Length of start$n.start not equal to number of types.\n")
+    else if(start$given == "x" && length(types.given) != ntypes) 
+      stop("Marks of start$x.start do not match number of types.\n")
+  }
+  
+  return(types)
+}
+
+  
diff --git a/R/rmhcontrol.R b/R/rmhcontrol.R
new file mode 100755
index 0000000..f92d8b6
--- /dev/null
+++ b/R/rmhcontrol.R
@@ -0,0 +1,231 @@
+#
+#
+#   rmhcontrol.R
+#
+#   $Revision: 1.30 $  $Date: 2017/01/29 07:20:51 $
+#
+#
+
+rmhcontrol <- function(...) {
+  UseMethod("rmhcontrol")
+}
+
+rmhcontrol.rmhcontrol <- function(...) {
+  argz <- list(...)
+  if(length(argz) == 1)
+    return(argz[[1]])
+  stop("Arguments not understood")
+}
+
+rmhcontrol.list <- function(...) {
+  argz <- list(...)
+  nama <- names(argz)
+  if(length(argz) == 1 && !any(nzchar(nama)))
+    do.call(rmhcontrol.default, argz[[1]])
+  else
+    do.call.matched(rmhcontrol.default, argz)
+}
+
+rmhcontrol.default <- function(..., p=0.9, q=0.5, nrep=5e5,
+                        expand=NULL, periodic=NULL, ptypes=NULL,
+                        x.cond=NULL, fixall=FALSE, nverb=0,
+                        nsave=NULL, nburn=nsave, track=FALSE,
+                        pstage=c("block", "start"))
+{
+  argh <- list(...)
+  nargh <- length(argh)
+  if(nargh > 0) {
+    # allow rmhcontrol(NULL), otherwise flag an error
+    if(!(nargh == 1 && is.null(argh[[1]])))
+      stop(paste("Unrecognised arguments to rmhcontrol;",
+                 "valid arguments are listed in help(rmhcontrol.default)"))
+  }
+  # impose default values
+  if(missing(p)) p <- spatstat.options("rmh.p")
+  if(missing(q)) q <- spatstat.options("rmh.q")
+  if(missing(nrep)) nrep <- spatstat.options("rmh.nrep")
+  # validate arguments
+  if(!is.numeric(p) || length(p) != 1
+     || p < 0 || p > 1)
+    stop("p should be a number in [0,1]")
+  if(!is.numeric(q) || length(q) != 1
+     || q < 0 || q > 1)
+    stop("q should be a number in [0,1]")
+  if(!is.numeric(nrep) || length(nrep) != 1
+     || nrep < 1)
+    stop("nrep should be an integer >= 1")
+  nrep <- as.integer(nrep)
+  if(!is.numeric(nverb) || length(nverb) != 1
+     || nverb < 0 || nverb > nrep)
+    stop("nverb should be an integer <= nrep")
+  nverb <- as.integer(nverb)
+  if(!is.logical(fixall) || length(fixall) != 1)
+    stop("fixall should be a logical value")
+  if(!is.null(periodic) && (!is.logical(periodic) || length(periodic) != 1))
+    stop(paste(sQuote("periodic"), "should be a logical value or NULL"))
+  if(saving <- !is.null(nsave)) {
+    if(!is.numeric(nsave) || length(nsave) != 1
+       || nsave < 0 || nsave >= nrep)
+      stop("nsave should be an integer < nrep")
+    if(is.null(nburn)) nburn <- min(nsave, nrep-nsave)
+    if(!is.null(nburn)) stopifnot(nburn + nsave <= nrep)
+  }
+  stopifnot(is.logical(track))
+  pstage <- match.arg(pstage) 
+
+#################################################################
+# Conditioning on point configuration
+#
+# condtype = "none": no conditioning
+# condtype = "Palm": conditioning on the presence of specified points
+# condtype = "window": conditioning on the configuration in a subwindow
+#
+  if(is.null(x.cond)) {
+    condtype <- "none"
+    n.cond <- NULL
+  } else if(is.ppp(x.cond)) {
+    condtype <- "window"
+    n.cond <- x.cond$n
+  } else if(is.data.frame(x.cond)) {
+    if(ncol(x.cond) %in% c(2,3)) {
+      condtype <- "Palm"
+      n.cond <- nrow(x.cond)
+    } else stop("Wrong number of columns in data frame x.cond")
+  } else if(is.list(x.cond)) {
+    if(length(x.cond) %in% c(2,3)) {
+      x.cond <- as.data.frame(x.cond)
+      condtype <- "Palm"
+      n.cond <- nrow(x.cond)
+    } else stop("Wrong number of components in list x.cond")
+  } else stop("Unrecognised format for x.cond")
+
+  if(condtype == "Palm" && n.cond == 0) {
+    warning(paste("Ignored empty configuration x.cond;",
+                  "conditional (Palm) simulation given an empty point pattern",
+                  "is equivalent to unconditional simulation"))
+    condtype <- "none"
+    x.cond <- NULL
+    n.cond <- NULL
+  }
+    
+#################################################################
+# Fixing the number of points?
+#  
+# fixcode = 1 <--> no conditioning
+# fixcode = 2 <--> conditioning on n = number of points
+# fixcode = 3 <--> conditioning on the number of points of each type.
+
+  fixcode    <- 2 - (p<1) + fixall - fixall*(p<1)
+  fixing <- switch(fixcode, "none", "n.total", "n.each.type")
+  
+# Warn about silly combination
+  if(fixall && p < 1)
+	warning("fixall = TRUE conflicts with p < 1. Ignored.\n")
+
+###############################################################  
+# `expand' determines expansion of the simulation window
+
+  expand <- rmhexpand(expand)
+
+# No expansion is permitted if we are conditioning on the
+# number of points
+  
+  if(fixing != "none") {
+    if(expand$force.exp)
+      stop(paste("When conditioning on the number of points,",
+                 "no expansion may be done.\n"), call.=FALSE)
+    # no expansion
+    expand <- .no.expansion
+  }
+
+###################################################################
+# return augmented list  
+  out <- list(p=p, q=q, 
+              nrep=nrep, nverb=nverb,
+              expand=expand, 
+              periodic=periodic, 
+              ptypes=ptypes,
+              fixall=fixall,
+              fixcode=fixcode,
+              fixing=fixing,
+              condtype=condtype,
+              x.cond=x.cond,
+              saving=saving, nsave=nsave, nburn=nburn,
+              track=track, pstage=pstage)
+  class(out) <- c("rmhcontrol", class(out))
+  return(out)
+}
+
+print.rmhcontrol <- function(x, ...) {
+  verifyclass(x, "rmhcontrol")
+
+  splat("Metropolis-Hastings algorithm control parameters")
+  splat("Probability of shift proposal: p =", x$p)
+  if(x$fixing == "none") {
+    splat("Conditional probability of death proposal: q =", x$q)
+    if(!is.null(x$ptypes)) {
+      splat("Birth proposal probabilities for each type of point:")
+      print(x$ptypes)
+    }
+  }
+  switch(x$fixing,
+         none={},
+         n.total=splat("The total number of points is fixed"),
+         n.each.type=splat("The number of points of each type is fixed"))
+  switch(x$condtype,
+         none={},
+         window={
+           splat("Conditional simulation given the",
+                 "configuration in a subwindow")
+           print(x$x.cond$window)
+         },
+         Palm={
+           splat("Conditional simulation of Palm type")
+         })
+  splat("Number of M-H iterations: nrep =", x$nrep)
+  if(x$saving) 
+    splat("Save point pattern every", x$nsave, "iterations",
+          "after a burn-in of", x$nburn, "iterations.")
+  pstage <- x$pstage %orifnull% "start"
+  hdr <- "Generate random proposal points:"
+  switch(pstage,
+         start = splat(hdr, "at start of simulations."),
+         block = splat(hdr, "before each block of", x$nsave, "iterations."))
+  cat(paste("Track proposal type and acceptance/rejection?",
+            if(x$track) "yes" else "no", "\n"))
+  if(x$nverb > 0)
+    cat(paste("Progress report every nverb=", x$nverb, "iterations\n"))
+  else
+    cat("No progress reports (nverb = 0).\n")
+
+  # invoke print.rmhexpand
+  print(x$expand)
+
+  cat("Periodic edge correction? ")
+  if(is.null(x$periodic)) cat("Not yet determined.\n") else 
+  if(x$periodic) cat("Yes.\n") else cat("No.\n")
+  #
+  
+  return(invisible(NULL))
+}
+
+default.rmhcontrol <- function(model, w=NULL) {
+  # set default for 'expand'
+  return(rmhcontrol(expand=default.expand(model, w=w)))
+}
+
+update.rmhcontrol <- function(object, ...) {
+  do.call.matched(rmhcontrol.default,
+                  resolve.defaults(list(...), as.list(object),
+                                   .StripNull=TRUE))
+}
+
+rmhResolveControl <- function(control, model) {
+  # adjust control information once the model is known
+  stopifnot(inherits(control, "rmhcontrol"))
+  # change *default* expansion rule to something appropriate for model
+  # (applies only if expansion rule is undecided)
+  control$expand <- change.default.expand(control$expand, default.expand(model))
+  return(control)
+}
+
diff --git a/R/rmhexpand.R b/R/rmhexpand.R
new file mode 100644
index 0000000..f590755
--- /dev/null
+++ b/R/rmhexpand.R
@@ -0,0 +1,220 @@
+#
+#   rmhexpand.R
+#
+#   Rules/data for expanding the simulation window in rmh
+#
+#   $Revision: 1.8 $  $Date: 2016/02/11 10:17:12 $
+#
+
+# Establish names and rules for each type of expansion
+
+RmhExpandRule <- local({
+
+  .RmhExpandTable <-
+  list(area=list(descrip ="Area expansion factor",
+                 minval = 1,
+                 expands = function(x) { unname(x) > 1 }),
+       length=list(descrip ="Length expansion factor",
+                   minval = 1,
+                   expands = function(x) { unname(x) > 1 }),
+       distance=list(descrip="Expansion buffer distance",
+                     minval = 0,
+                     expands = function(x) { unname(x) > 0 }))
+  
+  RmhExpandRule <- function(nama) {
+    if(length(nama) == 0) nama <- "area"
+    if(length(nama) > 1)
+      stop("Internal error: too many names in RmhExpandRule", call.=FALSE)
+    if(!(nama %in% names(.RmhExpandTable)))
+      stop(paste("Internal error: unrecognised expansion type",
+                 sQuote(nama)),
+           call.=FALSE)
+    return(.RmhExpandTable[[nama]])
+  }
+  RmhExpandRule
+})
+  
+                        
+rmhexpand <- function(x=NULL, ..., area=NULL, length=NULL, distance=NULL) {
+  trap.extra.arguments(..., .Context="In rmhexpand")
+  # check for incompatibility
+  n <- (!is.null(x)) + (!is.null(area)) +
+       (!is.null(length)) + (!is.null(distance))
+  if(n > 1) stop("Only one argument should be given")
+  # absorb other arguments into 'x'
+  if(is.null(x) && n > 0) {
+      if(!is.null(area)) x <- c(area=area)
+      if(!is.null(length)) x <- c(length=length)
+      if(!is.null(distance)) x <- c(distance=distance)
+  }
+  if(is.null(x)) {
+    # No expansion rule supplied.
+    # Use spatstat default, indicating that the user did not choose it.
+    force.exp <- force.noexp <- FALSE
+    x <- spatstat.options("expand")
+    x <- rmhexpand(x)$expand
+  } else {
+    # process x
+    if(inherits(x, "rmhexpand"))
+      return(x)
+    if(is.owin(x)) {
+      force.exp <- TRUE
+      force.noexp <- FALSE
+    } else {
+      # expecting c(name=value) or list(name=value)
+      if(is.list(x))
+        x <- unlist(x)
+      if(!is.numeric(x))
+        stop(paste("Expansion argument must be either",
+                   "a number, a window, or NULL.\n"))
+      # x is numeric
+      check.1.real(x, "In rmhexpand(x)")
+      explain.ifnot(is.finite(x), "In rmhexpand(x)")
+      # an unlabelled numeric value is interpreted as an area expansion factor
+      if(!any(nzchar(names(x))))
+        names(x) <- "area"
+      # validate
+      rule <- RmhExpandRule(names(x))
+      if(x < rule$minval) {
+        warning(paste(rule$descrip, "<", rule$minval,
+                      "has been reset to", rule$minval),
+                call.=FALSE)
+        x[] <- rule$minval
+      }
+      force.exp <- rule$expands(x)
+      force.noexp <- !force.exp
+    }
+  }
+  result <- list(expand=x, force.exp=force.exp, force.noexp=force.noexp)
+  class(result) <- "rmhexpand"
+  return(result)
+}
+
+.no.expansion <- list(expand=c(area=1), force.exp=FALSE, force.noexp=TRUE)
+class(.no.expansion) <- "rmhexpand"
+
+print.rmhexpand <- function(x, ..., prefix=TRUE) {
+  if(prefix) cat("Expand the simulation window? ")
+  if(x$force.noexp) {
+    cat("No.\n")
+  } else {
+    if(x$force.exp) cat("Yes:\n") else cat("Not determined. Default is:\n")
+
+    y <- x$expand
+    if(is.null(y)) {
+      print(rmhexpand(spatstat.options("expand")), prefix=FALSE)
+    } else if(is.numeric(y)) {
+      descrip <- RmhExpandRule(names(y))$descrip
+      cat(paste("\t", descrip, unname(y), "\n"))
+    } else {
+      print(y)
+    }
+  }
+  return(invisible(NULL))
+}
+
+summary.rmhexpand <- function(object, ...) {
+  decided <- with(object, force.exp || force.noexp)
+  ex <- object$expand
+  if(is.null(ex))
+    ex <- rmhexpand(spatstat.options("expand"))$expand
+  if(is.owin(ex)) {
+    willexpand <- TRUE
+    descrip <- "Window"
+  } else if(is.numeric(ex)) {
+    rule <- RmhExpandRule(names(ex))
+    descrip    <- rule$descrip
+    willexpand <- if(object$force.exp) TRUE else
+                  if(object$force.noexp) FALSE else
+                  (unname(ex) > rule$minval)
+  } else stop("Internal error: unrecognised format in summary.rmhexpand",
+              call.=FALSE)
+              
+  out <- list(rule.decided=decided,
+              window.decided=decided && is.owin(ex), 
+              expand=ex,
+              descrip=descrip,
+              willexpand=willexpand)
+  class(out) <- "summary.rmhexpand"
+  return(out)
+}
+
+print.summary.rmhexpand <- function(x, ...) {
+  cat("Expansion rule\n")
+  ex <- x$expand
+  if(x$window.decided) {
+    cat("Window is decided.\n")
+    print(ex)
+  } else {
+    if(x$rule.decided) {
+      cat("Rule is decided.\n")
+    } else {
+      cat("Rule is not decided.\nDefault is:\n")
+    }
+    if(!x$willexpand) {
+      cat("No expansion\n")
+    } else {
+      if(is.numeric(ex)) cat(paste(x$descrip, ex, "\n")) else print(ex)
+    }
+  }
+  return(invisible(NULL))
+}
+
+expand.owin <- function(W, ...) {
+  ex <- list(...)
+  if(length(ex) > 1) stop("Too many arguments")
+  # get an rmhexpand object
+  if(inherits(ex[[1]], "rmhexpand")) {
+    ex <- ex[[1]]
+  } else ex <- do.call(rmhexpand, ex)
+  f <- ex$expand
+  if(is.null(f)) return(W)
+  if(is.owin(f)) return(f)
+  if(!is.numeric(f)) stop("Format not understood")
+  switch(names(f),
+         area = {
+           if(f == 1)
+             return(W)
+           bb <- boundingbox(W)
+           xr <- bb$xrange
+           yr <- bb$yrange
+           fff <- (sqrt(f) - 1)/2
+           Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr))
+         },
+         length = {
+           if(f == 1)
+             return(W)
+           bb <- boundingbox(W)
+           xr <- bb$xrange
+           yr <- bb$yrange
+           fff <- (f - 1)/2
+           Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr))
+         },
+         distance = {
+           if(f == 0)
+             return(W)
+           Wexp <- if(is.rectangle(W)) grow.rectangle(W, f) else dilation(W, f)
+         },
+         stop("Internal error: unrecognised type")
+         )
+  return(Wexp)
+}
+
+will.expand <- function(x) {
+  stopifnot(inherits(x, "rmhexpand"))
+  if(x$force.exp) return(TRUE)
+  if(x$force.noexp) return(FALSE)
+  return(summary(x)$willexpand)
+}
+
+is.expandable <- function(x) { UseMethod("is.expandable") }
+
+change.default.expand <- function(x, newdefault) {
+  stopifnot(inherits(x, "rmhexpand"))
+  decided <- with(x, force.exp || force.noexp)
+  if(!decided)
+    x$expand <- rmhexpand(newdefault)$expand
+  return(x)
+}
+
+  
diff --git a/R/rmhmodel.R b/R/rmhmodel.R
new file mode 100755
index 0000000..722b453
--- /dev/null
+++ b/R/rmhmodel.R
@@ -0,0 +1,1335 @@
+#
+#
+#   rmhmodel.R
+#
+#   $Revision: 1.74 $  $Date: 2017/06/05 10:31:58 $
+#
+#
+
+rmhmodel <- function(...) {
+  UseMethod("rmhmodel")
+}
+
+
+rmhmodel.rmhmodel <- function(model, ...) {
+  # Check for outdated internal format
+  # C.par was replaced by C.beta and C.ipar in spatstat 1.22-3 
+  if(outdated <- !is.null(model$C.par))
+    warning("Outdated internal format of rmhmodel object; rebuilding it")
+  if(outdated || (length(list(...)) > 0))
+    model <- rmhmodel.list(unclass(model), ...)
+  return(model)
+}
+
+rmhmodel.list <- function(model, ...) {
+  argnames <- c("cif","par","w","trend","types")
+  ok <- argnames %in% names(model)
+  do.call(rmhmodel.default,
+          resolve.defaults(list(...), model[argnames[ok]]))
+}
+
+rmhmodel.default <- local({
+  
+  rmhmodel.default <- function(...,
+    cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) {
+    rmhmodelDefault(..., cif=cif, par=par, w=w, trend=trend, types=types)
+  }
+
+  rmhmodelDefault <- function(...,
+    cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL,
+    stopinvalid=TRUE) {
+
+    if(length(list(...)) > 0)
+      stop(paste("rmhmodel.default: syntax should be", 
+                 "rmhmodel(cif, par, w, trend, types)",
+                 "with arguments given by name if they are present"), 
+           call. = FALSE)
+
+    ## Validate parameters
+    if(is.null(cif)) stop("cif is missing or NULL")
+    if(is.null(par)) stop("par is missing or NULL")
+
+    if(!is.null(w))
+      w <- as.owin(w)
+  
+    if(!is.character(cif))
+      stop("cif should be a character string")
+
+    betamultiplier <- 1
+    
+    Ncif <- length(cif)
+    if(Ncif > 1) {
+      ## hybrid
+      ## check for Poisson components
+      ispois <- (cif == 'poisson')
+      if(any(ispois)) {
+        ## validate Poisson components
+        Npois <- sum(ispois)
+        poismodels <- vector(mode="list", length=Npois)
+        parpois <- par[ispois]
+        for(i in 1:Npois)
+          poismodels[[i]] <- rmhmodel(cif='poisson', par=parpois[[i]],
+                                      w=w, trend=NULL, types=types,
+                                      stopinvalid=FALSE)
+        ## consolidate Poisson intensity parameters
+        poisbetalist <- lapply(poismodels, getElement, name="C.beta")
+        poisbeta <- Reduce("*", poisbetalist)
+        if(all(ispois)) {
+          ## model collapses to a Poisson process
+          cif <- 'poisson'
+          Ncif <- 1
+          par <- list(beta=poisbeta)
+          betamultiplier <- 1
+        } else {
+          ## remove Poisson components
+          cif <- cif[!ispois]
+          Ncif <- sum(!ispois)
+          par <- par[!ispois]
+          if(Ncif == 1) # revert to single-cif format
+            par <- par[[1]]
+          ## absorb beta parameters 
+          betamultiplier <- poisbeta
+        }
+      }
+    }
+  
+    if(Ncif > 1) {
+      ## genuine hybrid 
+      models <- vector(mode="list", length=Ncif)
+      check <- vector(mode="list", length=Ncif)
+      for(i in 1:Ncif) 
+        models[[i]] <- rmhmodel(cif=cif[i], par=par[[i]],
+                                w=w, trend=NULL, types=types,
+                                stopinvalid=FALSE)
+      C.id  <- unlist(lapply(models, getElement, name="C.id"))
+      C.betalist <- lapply(models, getElement, name="C.beta")
+      C.iparlist <- lapply(models, getElement, name="C.ipar")
+      ## absorb beta multiplier into beta parameter of first component
+      C.betalist[[1]] <- C.betalist[[1]] * betamultiplier
+      ## concatenate for use in C
+      C.beta     <- unlist(C.betalist)
+      C.ipar     <- unlist(C.iparlist)
+      check <- lapply(models, getElement, name="check")
+      maxr <- max(unlist(lapply(models, getElement, name="reach")))
+      ismulti <- unlist(lapply(models, getElement, name="multitype.interact"))
+      multi <- any(ismulti)
+      ## determine whether model exists
+      integ <- unlist(lapply(models, getElement, name="integrable"))
+      stabi <- unlist(lapply(models, getElement, name="stabilising"))
+      integrable <- all(integ) || any(stabi)
+      stabilising <- any(stabi)
+      ## string explanations of conditions for validity
+      expl <- lapply(models, getElement, name="explainvalid")
+      integ.ex <- unlist(lapply(expl, getElement, name="integrable"))
+      stabi.ex <- unlist(lapply(expl, getElement, name="stabilising"))
+      stabi.oper <- !(stabi.ex %in% c("TRUE", "FALSE"))
+      integ.oper <- !(integ.ex %in% c("TRUE", "FALSE"))
+      compnames <- if(!anyDuplicated(C.id)) paste("cif", sQuote(C.id)) else 
+      paste("component", 1:Ncif, paren(sQuote(C.id)))
+      if(!integrable && stopinvalid) {
+        ## model is not integrable: explain why
+        ifail <- !integ & integ.oper
+        ireason <- paste(compnames[ifail], "should satisfy",
+                         paren(integ.ex[ifail], "{"))
+        ireason <- verbalogic(ireason, "and")
+        if(sum(ifail) <= 1) {
+          ## There's only one offending cif, so stability is redundant
+          sreason <- "FALSE"
+        } else {
+          sfail <- !stabi & stabi.oper
+          sreason <- paste(compnames[sfail], "should satisfy",
+                           paren(stabi.ex[sfail], "{"))
+          sreason <- verbalogic(sreason, "or")
+        }
+        reason <- verbalogic(c(ireason, sreason), "or")
+        stop(paste("rmhmodel: hybrid model is not integrable; ", reason),
+             call.=FALSE)
+      } else {
+        ## construct strings summarising conditions for validity
+        if(!any(integ.oper))
+          ireason <- as.character(integrable)
+        else {
+          ireason <- paste(compnames[integ.oper], "should satisfy",
+                           paren(integ.ex[integ.oper], "{"))
+          ireason <- verbalogic(ireason, "and")
+        }
+        if(!any(stabi.oper))
+          sreason <- as.character(stabilising)
+        else {
+          sreason <- paste(compnames[stabi.oper], "should satisfy",
+                           paren(stabi.ex[stabi.oper], "{"))
+          sreason <- verbalogic(sreason, "or")
+        }
+        ireason <- verbalogic(c(ireason, sreason), "or")
+        explainvalid <- list(integrable=ireason,
+                             stabilising=sreason)
+      }
+      
+      out <- list(cif=cif,
+                  par=par,
+                  w=w,
+                  trend=trend,
+                  types=types,
+                  C.id=C.id,
+                  C.beta=C.beta,
+                  C.ipar=C.ipar,
+                  C.betalist=C.betalist,
+                  C.iparlist=C.iparlist,
+                  check=check,
+                  multitype.interact=multi,
+                  integrable=integrable,
+                  stabilising=stabilising,
+                  explainvalid=explainvalid,
+                  reach=maxr)
+      class(out) <- c("rmhmodel", class(out))
+      return(out)
+    }
+
+    ## non-hybrid
+  
+    ## Check that this is a recognised model
+    ## and look up the rules for this model
+    rules <- spatstatRmhInfo(cif)
+  
+    ## Map the name of the cif from R to C
+    ##      (the names are normally identical in R and C,
+    ##      except "poisson" -> NA)
+    C.id <- rules$C.id
+  
+    ## Check that the C name is recognised in C 
+    if(!is.na(C.id)) {
+      z <- .C("knownCif",
+              cifname=as.character(C.id),
+              answer=as.integer(0),
+              PACKAGE = "spatstat")
+      ok <- as.logical(z$answer)
+      if(!ok)
+        stop(paste("Internal error: the cif", sQuote(C.id),
+                   "is not recognised in the C code"))
+    }
+
+    ## Validate the model parameters and reformat them 
+    check <- rules$parhandler
+    checkedpar <-
+      if(!rules$multitype)
+        check(par)
+      else if(!is.null(types))
+        check(par, types)
+      else 
+      ## types vector not given - defer checking
+      NULL
+
+    if(!is.null(checkedpar)) {
+      stopifnot(is.list(checkedpar))
+      stopifnot(!is.null(names(checkedpar)) && all(nzchar(names(checkedpar))))
+      stopifnot(names(checkedpar)[[1]] == "beta")
+      C.beta  <- unlist(checkedpar[[1]])
+      C.beta <- C.beta * betamultiplier
+      C.ipar <- as.numeric(unlist(checkedpar[-1]))
+    } else {
+      C.beta <- C.ipar <- NULL
+    }
+  
+    ## Determine whether model is integrable
+    integrable <- rules$validity(par, "integrable")
+    explainvalid  <- rules$explainvalid
+    
+    if(!integrable && stopinvalid) 
+      stop(paste("rmhmodel: the model is not integrable; it should satisfy",
+                 explainvalid$integrable),
+           call.=FALSE)
+  
+    ## Determine whether cif is stabilising
+    ## (i.e. any hybrid including this cif will be integrable)
+    stabilising <- rules$validity(par, "stabilising")
+
+    ## Calculate reach of model
+    mreach <- rules$reach(par)
+
+    ###################################################################
+    ## return augmented list  
+    out <- list(cif=cif,
+                par=par,
+                w=w,
+                trend=trend,
+                types=types,
+                C.id=C.id,
+                C.beta=C.beta,
+                C.ipar=C.ipar,
+                check= if(is.null(C.ipar)) check else NULL,
+                multitype.interact=rules$multitype,
+                integrable=integrable,
+                stabilising=stabilising,
+                explainvalid=explainvalid,
+                reach=mreach
+                )
+    class(out) <- c("rmhmodel", class(out))
+    return(out)
+  }
+
+  rmhmodel.default
+})
+
+print.rmhmodel <- function(x, ...) {
+  verifyclass(x, "rmhmodel")
+
+  cat("Metropolis-Hastings algorithm, model parameters\n")
+
+  Ncif <- length(x$cif)
+  cat(paste("Conditional intensity:",
+            if(Ncif == 1) "cif=" else "hybrid of cifs",
+            commasep(sQuote(x$cif)), "\n"))
+  
+  if(!is.null(x$types)) {
+    if(length(x$types) == 1)
+      cat("Univariate process.\n")
+    else {
+      cat("Multitype process with types =\n")
+      print(x$types)
+      if(!x$multitype.interact)
+        cat("Interaction does not depend on type\n")
+    }
+  } else if(x$multitype.interact) 
+    cat("Multitype process, types not yet specified.\n")
+  
+  cat("Numerical parameters: par =\n")
+  print(x$par)
+  if(is.null(x$C.ipar))
+    cat("Parameters have not yet been checked for compatibility with types.\n")
+  if(is.owin(x$w)) print(x$w) else cat("Window: not specified.\n")
+  cat("Trend: ")
+  if(!is.null(x$trend)) print(x$trend) else cat("none.\n")
+  if(!is.null(x$integrable) && !x$integrable) {
+    cat("\n*Warning: model is not integrable and cannot be simulated*\n")
+  }
+  invisible(NULL)
+}
+
+reach.rmhmodel <- function(x, ...) {
+  if(length(list(...)) == 0)
+    return(x$reach)
+  # reach must be recomputed 
+  cif <- x$cif
+  Ncif <- length(cif)
+  pars <- if(Ncif == 1) list(x$par) else x$par
+  maxr <- 0
+  for(i in seq_len(Ncif)) {
+    cif.i <- cif[i]
+    par.i <- pars[[i]]
+    rules <- spatstatRmhInfo(cif.i)
+    rchfun  <- rules$reach
+    if(!is.function(rchfun))
+      stop(paste("Internal error: reach is unknown for cif=", sQuote(cif.i)),
+           call.=FALSE)
+    r.i <- rchfun(par.i, ...)
+    maxr <- max(maxr, r.i, na.rm=TRUE)
+  }
+  return(maxr)
+}
+
+is.poisson.rmhmodel <- function(x) {
+  verifyclass(x, "rmhmodel")
+  identical(x$cif, 'poisson')
+}
+
+is.stationary.rmhmodel <- function(x) {
+  verifyclass(x, "rmhmodel")
+  tren <- x$trend
+  return(is.null(tren) || is.numeric(tren))
+}
+
+as.owin.rmhmodel <- function(W, ..., fatal=FALSE) {
+  # W is the rmhmodel object. It contains a window w
+  ans <- W$w
+  if(is.owin(ans)) return(ans)
+  if(fatal) stop("rmhmodel object does not contain a window")
+  return(NULL)
+}
+
+domain.rmhmodel <- Window.rmhmodel <- function(X, ...) { as.owin(X) }
+
+is.expandable.rmhmodel <- function(x) {
+  tren <- x$tren
+  ok <- function(z) { is.null(z) || is.numeric(z) || is.function(z) }
+  return(if(!is.list(tren)) ok(tren) else all(unlist(lapply(tren, ok))))
+}
+
+  
+#####  Table of rules for handling rmh models ##################
+
+spatstatRmhInfo <- function(cifname) {
+  rules <- .Spatstat.RmhTable[[cifname]]
+  if(is.null(rules))
+    stop(paste("Unrecognised cif:", sQuote(cifname)), call.=FALSE)
+  return(rules)
+}
+  
+.Spatstat.RmhTable <-
+  list(
+#
+# 0. Poisson (special case)
+#
+       'poisson'=
+       list(
+            C.id=NA,
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the Poisson process"
+              with(par, forbidNA(beta, ctxt))
+              par <- check.named.list(par, "beta", ctxt)
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE",stabilising="FALSE"),
+            reach = function(par, ...) { return(0) },
+            hardcore = function(par, ...) { return(0) },
+            temper = function(par, invtemp) { return(par^invtemp) }
+            ),
+#       
+# 1. Strauss.
+#       
+       'strauss'=
+       list(
+            C.id="strauss",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the strauss cif"
+              par <- check.named.list(par, c("beta","gamma","r"), ctxt)
+              # treat r=NA as absence of interaction
+              par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 })
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.1.real(gamma, ctxt))
+              with(par, check.1.real(r,     ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(gamma >= 0, ctxt))
+              with(par, explain.ifnot(r >= 0, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              gamma <- par$gamma
+              switch(kind,
+                     integrable=(gamma <= 1),
+                     stabilising=(gamma == 0)
+                     )
+            },
+            explainvalid=list(
+              integrable="gamma <= 1",
+              stabilising="gamma == 0"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g == 1) 0 else r)
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g <= epsilon) r else 0)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 2. Strauss with hardcore.
+#       
+       'straush' =
+       list(
+            C.id="straush",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the straush cif"
+              par <- check.named.list(par, c("beta","gamma","r","hc"), ctxt)
+              # treat hc=NA as absence of hard core
+              par <- within(par, if(is.na(hc)) { hc <- 0 } )
+              # treat r=NA as absence of interaction
+              par <- within(par, if(is.na(r)) { r <- hc; gamma <- 1 } )
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.finite(hc, ctxt))
+              with(par, check.1.real(gamma, ctxt))
+              with(par, check.1.real(r,     ctxt))
+              with(par, check.1.real(hc,     ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(gamma >= 0, ctxt))
+              with(par, explain.ifnot(r >= 0, ctxt))
+              with(par, explain.ifnot(hc >= 0, ctxt))
+              with(par, explain.ifnot(hc <= r, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              hc <- par$hc
+              gamma <- par$gamma
+              switch(kind,
+                     integrable=(hc > 0 || gamma <= 1),
+                     stabilising=(hc > 0)
+                   )
+            },
+            explainvalid=list(
+              integrable="hc > 0 or gamma <= 1",
+              stabilising="hc > 0"),
+            reach = function(par, ...) {
+              h <- par[["hc"]]
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g == 1) h else r)
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              h <- par[["hc"]]
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g <= epsilon) r else h)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 3. Softcore.
+#
+       'sftcr' =
+       list(
+            C.id="sftcr",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the sftcr cif"
+              par <- check.named.list(par, c("beta","sigma","kappa"), ctxt)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(sigma, ctxt))
+              with(par, check.finite(kappa, ctxt))
+              with(par, check.1.real(sigma, ctxt))
+              with(par, check.1.real(kappa, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(sigma >= 0, ctxt))
+              with(par, explain.ifnot(kappa >= 0 && kappa <= 1, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE",stabilising="FALSE"),
+            reach = function(par, ..., epsilon=0) {
+              if(epsilon==0)
+                return(Inf)
+              kappa <- par[["kappa"]]
+              sigma <- par[["sigma"]]
+              return(sigma/(epsilon^(kappa/2)))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              if(epsilon==0)
+                return(0)
+              kappa <- par[["kappa"]]
+              sigma <- par[["sigma"]]
+              return(sigma/((-log(epsilon))^(kappa/2)))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                sigma <- sigma * (invtemp^(kappa/2))
+              })
+            }
+            ),
+#       
+# 4. Multitype Strauss.
+#       
+       'straussm' =
+       list(
+            C.id="straussm",
+            multitype=TRUE,
+            parhandler=function(par, types) {
+              ctxt <- "For the straussm cif"
+              par <- check.named.list(par, c("beta","gamma","radii"), ctxt)
+
+              beta <- par$beta
+              gamma <- par$gamma
+              r <- par$radii
+              ntypes <- length(types)
+
+              check.finite(beta, ctxt)
+              check.nvector(beta, ntypes, TRUE, "types")
+
+              MultiPair.checkmatrix(gamma, ntypes, "par$gamma")
+	      gamma[is.na(gamma)] <- 1
+              check.finite(gamma, ctxt)
+
+              MultiPair.checkmatrix(r, ntypes, "par$radii")
+              if(any(nar <- is.na(r))) {
+                r[nar] <- 0
+                gamma[nar] <- 1
+              }
+              check.finite(r, ctxt)
+
+              explain.ifnot(all(beta >= 0), ctxt)
+              explain.ifnot(all(gamma >= 0), ctxt)
+              explain.ifnot(all(r >= 0), ctxt)
+
+              par <- list(beta=beta, gamma=gamma, r=r)
+              return(par)
+            }, 
+            validity=function(par, kind) {
+              gamma <- par$gamma
+              radii <- par$radii
+              dg <- diag(gamma)
+              dr <- diag(radii)
+              hard <-!is.na(dg) & (dg == 0) & !is.na(dr) & (dr > 0)
+              operative <- !is.na(gamma) & !is.na(radii) & (radii > 0)
+              switch(kind,
+                     stabilising=all(hard),
+                     integrable=all(hard) || all(gamma[operative] <= 1))
+            },
+            explainvalid=list(
+              integrable=paste(
+                "gamma[i,j] <= 1 for all i and j,",
+                "or gamma[i,i] = 0 for all i"),
+              stabilising="gamma[i,i] = 0 for all i"),
+            reach = function(par, ...) {
+              r <- par$radii
+              g <- par$gamma
+              operative <- ! (is.na(r) | (g == 1))
+              return(max(0, r[operative]))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par$radii
+              g <- par$gamma
+              return(max(0, r[!is.na(r) & g <= epsilon]))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 5. Multitype Strauss with hardcore.
+#       
+       'straushm' = 
+       list(
+            C.id="straushm",
+            multitype=TRUE,
+            parhandler=function(par, types) {
+              ctxt="For the straushm cif"
+              par <- check.named.list(par,
+                                      c("beta","gamma","iradii","hradii"),
+                                      ctxt)
+
+              beta <- par$beta
+              gamma <- par$gamma
+              iradii <- par$iradii
+              hradii <- par$hradii
+              ntypes <- length(types)
+
+              check.nvector(beta, ntypes, TRUE, "types")
+              check.finite(beta, ctxt)
+              
+              MultiPair.checkmatrix(gamma, ntypes, "par$gamma")
+              gamma[is.na(gamma)] <- 1
+              check.finite(gamma, ctxt)
+
+              MultiPair.checkmatrix(iradii, ntypes, "par$iradii")
+              if(any(nar <- is.na(iradii))) {
+                iradii[nar] <- 0
+                gamma[nar] <- 1
+              }
+              check.finite(iradii, ctxt)
+
+              MultiPair.checkmatrix(hradii, ntypes, "par$hradii")
+              nah <- is.na(hradii)
+              hradii[nah] <- 0
+              check.finite(hradii, ctxt)
+
+              explain.ifnot(all(beta >= 0), ctxt)
+              explain.ifnot(all(gamma >= 0), ctxt)
+              explain.ifnot(all(iradii >= 0), ctxt)
+              explain.ifnot(all(hradii >= 0), ctxt)
+
+              comparable <- !nar & !nah
+              explain.ifnot(all((iradii >= hradii)[comparable]), ctxt)
+
+              par <- list(beta=beta,gamma=gamma,iradii=iradii,hradii=hradii)
+              return(par)
+            },
+            validity=function(par, kind) {
+              gamma <- par$gamma
+              iradii <- par$iradii
+              hradii <- par$hradii
+              dh <- diag(hradii)
+              dg <- diag(gamma)
+              dr <- diag(iradii)
+              hhard <- !is.na(dh) & (dh > 0)
+              ihard <- !is.na(dr) & (dr > 0) & !is.na(dg) & (dg == 0)
+              hard <- hhard | ihard
+              operative <- !is.na(gamma) & !is.na(iradii) & (iradii > 0)
+              switch(kind,
+                     stabilising=all(hard),
+                     integrable={
+                       all(hard) || all(gamma[operative] <= 1)
+                     })
+            },
+            explainvalid=list(
+              integrable=paste(
+                "hradii[i,i] > 0 or gamma[i,i] = 0 for all i, or",
+                "gamma[i,j] <= 1 for all i and j"),
+              stabilising="hradii[i,i] > 0 or gamma[i,i] = 0 for all i"),
+            reach=function(par, ...) {
+              r <- par$iradii
+              h <- par$hradii
+              g <- par$gamma
+              roperative <- ! (is.na(r) | (g == 1))
+              hoperative <- ! is.na(h)
+              return(max(0, r[roperative], h[hoperative]))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par$radii
+              h <- par$hradii
+              g <- par$gamma
+              return(max(h[!is.na(h)],
+                         r[!is.na(r) & g <= epsilon]))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 6. Diggle-Gates-Stibbard interaction
+#    (function number 1 from Diggle, Gates, and Stibbard)
+       
+       'dgs' = 
+       list(
+            C.id="dgs",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the dgs cif"
+              par <- check.named.list(par, c("beta","rho"), ctxt)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(rho, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, check.1.real(rho, ctxt))
+              with(par, explain.ifnot(rho >= 0, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE", stabilising="FALSE"),
+            reach=function(par, ...) {
+              return(par[["rho"]])
+            },
+            hardcore=function(par, ..., epsilon=0) {
+              if(epsilon == 0) return(0)
+              return(par[["rho"]] * (2/pi) * asin(sqrt(epsilon)))
+            },
+            temper = NULL  # not a loglinear model
+            ),
+#
+# 7. Diggle-Gratton interaction 
+#    (function number 2 from Diggle, Gates, and Stibbard).
+
+       'diggra' =
+       list(
+            C.id="diggra",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the diggra cif"
+              par <- check.named.list(par, c("beta","kappa","delta","rho"),
+                                      ctxt)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(kappa, ctxt))
+              with(par, check.finite(delta, ctxt))
+              with(par, check.finite(rho, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, check.1.real(kappa, ctxt))
+              with(par, check.1.real(delta, ctxt))
+              with(par, check.1.real(rho,   ctxt))
+              with(par, explain.ifnot(kappa >= 0, ctxt))              
+              with(par, explain.ifnot(delta >= 0, ctxt))              
+              with(par, explain.ifnot(rho >= 0, ctxt))              
+              with(par, explain.ifnot(delta < rho, ctxt))              
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE",stabilising="FALSE"),
+            reach=function(par, ...) {
+              return(par[["rho"]])
+            },
+            hardcore=function(par, ..., epsilon=0) {
+              return(par[["delta"]])
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                kappa <- kappa * invtemp
+              })
+            }),
+#       
+# 8. Geyer saturation model
+#       
+       'geyer' = 
+       list(
+            C.id="geyer",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the geyer cif"
+              par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt)
+              with(par, check.1.real(gamma, ctxt))
+              with(par, check.1.real(r,     ctxt))
+              with(par, check.1.real(sat,   ctxt))
+              par <- within(par, sat <- min(sat, .Machine$integer.max-100))
+              par <- within(par, if(is.na(gamma)) { r <- 0; gamma <- 1 })
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.finite(sat, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE", stabilising="FALSE"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g == 1) 0 else 2 * r)
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g <= epsilon) r else 0)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 9. The ``lookup'' device.  This permits simulating, at least
+# approximately, ANY pairwise interaction function model
+# with isotropic pair interaction (i.e. depending only on distance).
+# The pair interaction function is provided as a vector of
+# distances and corresponding function values which are used
+# as a ``lookup table'' by the C code.
+#
+       'lookup' = 
+       list(
+            C.id="lookup",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the lookup cif"
+              par <- check.named.list(par, c("beta","h"), ctxt, "r")
+              with(par, check.finite(beta, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              beta   <- par[["beta"]]
+              h.init <- par[["h"]]
+              r      <- par[["r"]]
+              if(is.null(r)) {
+		if(!is.stepfun(h.init))
+                  stop(paste("For cif=lookup, if component r of",
+                             "par is absent then component h must",
+                             "be a stepfun object."))
+		if(!is.cadlag(h.init))
+                  stop(paste("The lookup pairwise interaction step",
+			     "function must be right continuous,\n",
+			     "i.e. built using the default values of the",
+                             sQuote("f"), "and", sQuote("right"),
+                             "arguments for stepfun."))
+		r     <- knots(h.init)
+		h0    <- get("yleft",envir=environment(h.init))
+		h     <- h.init(r)
+		nlook <- length(r)
+		if(!identical(all.equal(h[nlook],1),TRUE))
+                  stop(paste("The lookup interaction step function",
+                             "must be equal to 1 for", dQuote("large"),
+                             "distances."))
+		if(r[1] <= 0)
+                  stop(paste("The first jump point (knot) of the lookup",
+                             "interaction step function must be",
+                             "strictly positive."))
+		h <- c(h0,h)
+              } else {
+		h     <- h.init
+		nlook <- length(r)
+		if(length(h) != nlook)
+                  stop("Mismatch of lengths of h and r lookup vectors.")
+		if(anyNA(r))
+                  stop("Missing values not allowed in r lookup vector.")
+		if(is.unsorted(r))
+                  stop("The r lookup vector must be in increasing order.")
+		if(r[1] <= 0)
+                  stop(paste("The first entry of the lookup vector r",
+                             "should be strictly positive."))
+		h <- c(h,1)
+              }
+              if(any(h < 0))
+		stop(paste("Negative values in the lookup",
+                           "pairwise interaction function."))
+              if(h[1] > 0 & any(h > 1))
+		stop(paste("Lookup pairwise interaction function does",
+                           "not define a valid point process."))
+              rmax   <- r[nlook]
+              r <- c(0,r)
+              nlook <- nlook+1
+              deltar <- mean(diff(r))
+              if(identical(all.equal(diff(r),rep.int(deltar,nlook-1)),TRUE)) {
+		par <- list(beta=beta,nlook=nlook,
+                            equisp=1,
+                            deltar=deltar,rmax=rmax, h=h)
+              } else {
+		par <- list(beta=beta,nlook=nlook,
+                            equisp=0,
+                            deltar=deltar,rmax=rmax, h=h,
+                            r=r)
+              }
+              return(par) 
+            },
+            validity=function(par, kind) {
+              h <- par$h
+              if(is.stepfun(h))
+                h <- eval(expression(c(yleft,y)),envir=environment(h))
+              switch(kind,
+                     integrable={
+                       (h[1] == 0) || all(h <= 1)
+                     },
+                     stabilising={ h[1] == 0 })
+            },
+            explainvalid=list(
+              integrable="h[1] == 0 or h[i] <= 1 for all i",
+              stabilising="h[1] == 0"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              h <- par[["h"]]
+              if(is.null(r)) 
+                r <- knots(h)
+              return(max(r))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              h <- par[["h"]]
+              if(is.null(r)) 
+                r <- knots(h)
+              return(max(0, r[h <= epsilon]))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                h <- h^invtemp
+              })
+            }
+            ),
+#       
+# 10. Area interaction
+#       
+       'areaint'=
+       list(
+            C.id="areaint",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the areaint cif"
+              par <- check.named.list(par, c("beta","eta","r"), ctxt)
+              par <- within(par, if(is.na(r)) { r <- 0 })
+              with(par, check.finite(beta, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, check.1.real(eta, ctxt))
+              with(par, check.1.real(r,   ctxt))
+              with(par, check.finite(eta, ctxt))
+              with(par, check.finite(r,   ctxt))
+              with(par, explain.ifnot(eta >= 0, ctxt))
+              with(par, explain.ifnot(r >= 0,   ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE", stabilising="FALSE"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              eta <- par[["eta"]]
+              return(if(eta == 1) 0 else (2 * r))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              eta <- par[["eta"]]
+              if(eta > epsilon) return(0)
+              if(eta == 0) return(2 * r)
+              # linear approximation
+              return(2 * r * eta/epsilon)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                eta  <- eta^invtemp
+              })
+            }
+            ),
+#
+# 11. The ``badgey'' (Baddeley-Geyer) model.
+#
+       'badgey' =
+       list(
+            C.id="badgey",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the badgey cif"
+              par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt)
+              par <- within(par, sat <- pmin(sat, .Machine$integer.max-100))
+              par <- within(par, gamma[is.na(gamma) | is.na(r)] <- 1)
+              par <- within(par, r[is.na(r)] <- 0)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.finite(sat, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(all(gamma >= 0), ctxt))
+              with(par, explain.ifnot(all(r >= 0), ctxt))
+              with(par, explain.ifnot(all(sat >= 0), ctxt))
+              with(par, explain.ifnot(length(gamma) == length(r), ctxt)) 
+              gamma <- par[["gamma"]]
+              r     <- par[["r"]]
+              sat   <- par[["sat"]]
+              if(length(sat)==1) sat <- rep.int(sat,length(gamma))
+              else explain.ifnot(length(sat) == length(gamma), ctxt)
+              mmm <- cbind(gamma,r,sat)
+              mmm <- mmm[fave.order(r),]
+              ndisc <- length(r)
+              par <- list(beta=par$beta,ndisc=ndisc,parms=as.vector(t(mmm)))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=FALSE)
+            },
+            explainvalid=list(integrable="TRUE", stabilising="FALSE"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              gamma <- par[["gamma"]]
+              operative <- (gamma != 1)
+              return(if(!any(operative)) 0 else (2 * max(r[operative])))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              gamma <- par[["gamma"]]
+              return(max(0, r[gamma <= epsilon]))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#
+# 12. The hard core process
+       'hardcore' =
+       list(
+            C.id="hardcore",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the hardcore cif"
+              par <- check.named.list(par, c("beta", "hc"), ctxt)
+              par <- within(par, if(is.na(hc)) { hc <- 0 })
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(hc, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, check.1.real(hc, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              hc <- par$hc
+              switch(kind,
+                     integrable=TRUE,
+                     stabilising=(hc > 0))
+            },
+            explainvalid=list(integrable="TRUE", stabilising="hc > 0"),
+            reach = function(par, ...) {
+              hc <- par[["hc"]]
+              return(hc)
+            },
+            hardcore = function(par, ...) {
+              hc <- par[["hc"]]
+              return(hc)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+              })
+            }
+            ),
+#
+# Lucky 13. Fiksel process
+       'fiksel' =
+       list(
+            C.id="fiksel",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the Fiksel cif"
+              par <- check.named.list(par,
+                                      c("beta", "r", "hc", "kappa", "a"),
+                                      ctxt)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.finite(hc, ctxt))
+              with(par, check.finite(kappa, ctxt))
+              with(par, check.finite(a, ctxt))
+              with(par, check.1.real(r, ctxt))
+              with(par, check.1.real(hc, ctxt))
+              with(par, check.1.real(kappa, ctxt))
+              with(par, check.1.real(a, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(hc >= 0, ctxt))
+              with(par, explain.ifnot(r > hc, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              hc <- par$hc
+              a  <- par$a
+              switch(kind,
+                     integrable=(hc > 0 || a <= 0),
+                     stabilising=(hc > 0))
+            },
+            explainvalid=list(
+              integrable="hc > 0 or a <= 0",
+              stabilising="hc > 0"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              hc <- par[["hc"]]              
+              a <- par[["a"]]
+              return(if(a != 0) r else hc)
+            },
+            hardcore = function(par, ...) {
+              return(par[["hc"]])
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+                a <- a * invtemp
+              })
+            }
+            ),
+#
+# 14. Lennard-Jones
+       'lennard' =
+       list(
+            C.id="lennard",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the Lennard-Jones cif"
+              par <- check.named.list(par,
+                                      c("beta", "sigma", "epsilon"),
+                                      ctxt)
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(sigma, ctxt))
+              with(par, check.finite(epsilon, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, check.1.real(sigma, ctxt))
+              with(par, check.1.real(epsilon, ctxt))
+              with(par, explain.ifnot(sigma > 0, ctxt))
+              with(par, explain.ifnot(epsilon > 0, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=(par$sigma > 0),
+                     stabilising=FALSE)
+            },
+            explainvalid=list(
+              integrable="sigma > 0",
+              stabilising="FALSE"),
+            reach = function(par, ...) {
+              sigma <- par[["sigma"]]
+              return(2.5 * sigma)
+            },
+            hardcore = function(par, ...) {
+              sigma <- par[["sigma"]]
+              return(sigma/2.5)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+                epsilon <- epsilon * invtemp
+              })
+            }
+            ),
+#       
+# 15. Multitype hardcore.
+#       
+       'multihard' = 
+       list(
+            C.id="multihard",
+            multitype=TRUE,
+            parhandler=function(par, types) {
+              ctxt="For the multihard cif"
+              par <- check.named.list(par,
+                                      c("beta","hradii"),
+                                      ctxt)
+
+              beta <- par$beta
+              hradii <- par$hradii
+              ntypes <- length(types)
+
+              check.nvector(beta, ntypes, TRUE, "types")
+              check.finite(beta, ctxt)
+              
+              MultiPair.checkmatrix(hradii, ntypes, "par$hradii")
+              hradii[is.na(hradii)] <- 0
+              check.finite(hradii, ctxt)
+
+              explain.ifnot(all(beta >= 0), ctxt)
+              explain.ifnot(all(hradii >= 0), ctxt)
+
+              par <- list(beta=beta,hradii=hradii)
+              return(par)
+            },
+            validity=function(par, kind) {
+              switch(kind,
+                     integrable=return(TRUE),
+                     stabilising={
+                       hself <- diag(par$hradii)
+                       repel <- !is.na(hself) & (hself > 0)
+                       return(all(repel))
+                     })
+            },
+            explainvalid=list(
+              integrable="TRUE",
+              stabilising="hradii[i,i] > 0 for all i"),
+            reach=function(par, ...) {
+              return(max(0, par$hradii, na.rm=TRUE))
+            },
+            hardcore=function(par, ..., epsilon=0) {
+              return(max(0, par$hradii, na.rm=TRUE))
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+              })
+            }
+            ),
+#       
+# 16. Triplets.
+#       
+       'triplets'=
+       list(
+            C.id="triplets",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the triplets cif"
+              par <- check.named.list(par, c("beta","gamma","r"), ctxt)
+              # treat r=NA as absence of interaction
+              par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 })
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.1.real(gamma, ctxt))
+              with(par, check.1.real(r,     ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(gamma >= 0, ctxt))
+              with(par, explain.ifnot(r >= 0, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              gamma <- par$gamma
+              switch(kind,
+                     integrable=(gamma <= 1),
+                     stabilising=(gamma == 0)
+                     )
+            },
+            explainvalid=list(
+              integrable="gamma <= 1",
+              stabilising="gamma == 0"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g == 1) 0 else r)
+            },
+            hardcore = function(par, ...) {
+              return(0)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta  <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            ),
+#       
+# 17. Penttinen.
+#       
+       'penttinen'=
+       list(
+            C.id="penttinen",
+            multitype=FALSE,
+            parhandler=function(par, ...) {
+              ctxt <- "For the penttinen cif"
+              par <- check.named.list(par, c("beta", "gamma", "r"), ctxt)
+              # treat r=NA as absence of interaction
+              par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 })
+              with(par, check.finite(beta, ctxt))
+              with(par, check.finite(gamma, ctxt))
+              with(par, check.finite(r, ctxt))
+              with(par, check.1.real(gamma, ctxt))
+              with(par, check.1.real(r, ctxt))
+              with(par, explain.ifnot(all(beta >= 0), ctxt))
+              with(par, explain.ifnot(gamma >= 0, ctxt))
+              with(par, explain.ifnot(r > 0, ctxt))
+              return(par)
+            },
+            validity=function(par, kind) {
+              gamma <- par$gamma
+              switch(kind,
+                     integrable=(gamma <= 1),
+                     stabilising=(gamma == 0)
+                     )
+            },
+            explainvalid=list(
+              integrable="gamma <= 1",
+              stabilising="gamma == 0"),
+            reach = function(par, ...) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g == 1) 0 else (2 * r))
+            },
+            hardcore = function(par, ..., epsilon=0) {
+              r <- par[["r"]]
+              g <- par[["gamma"]]
+              return(if(g <= epsilon) (2 * r) else 0)
+            },
+            temper = function(par, invtemp) {
+              within(par, {
+                beta <- beta^invtemp
+                gamma <- gamma^invtemp
+              })
+            }
+            )
+       # end of list '.Spatstat.RmhTable'
+       )
+
+
diff --git a/R/rmhmodel.ppm.R b/R/rmhmodel.ppm.R
new file mode 100755
index 0000000..875f85a
--- /dev/null
+++ b/R/rmhmodel.ppm.R
@@ -0,0 +1,427 @@
+#
+#  rmhmodel.ppm.R
+#
+#   convert ppm object into format palatable to rmh.default
+#
+#  $Revision: 2.64 $   $Date: 2017/06/05 10:31:58 $
+#
+#   .Spatstat.rmhinfo
+#   rmhmodel.ppm()
+#
+
+.Spatstat.Rmhinfo <-
+list(
+     "Multitype Hardcore process" =
+     function(coeffs, inte) {
+       # hard core radii r[i,j]
+       hradii <- inte$par[["hradii"]]
+       return(list(cif='multihard',
+                   par=list(hradii=hradii),
+                   ntypes=ncol(hradii)))
+     },
+     "Lennard-Jones process" =
+     function(coeffs, inte) {
+       pa <- inte$interpret(coeffs,inte)$param
+       sigma   <- pa[["sigma"]]
+       epsilon <- pa[["epsilon"]]
+       return(list(cif='lennard',
+                   par=list(sigma=sigma, epsilon=epsilon),
+                   ntypes=1))
+     },
+     "Fiksel process" =
+     function(coeffs, inte) {
+       hc <- inte$par[["hc"]]
+       r  <- inte$par[["r"]]
+       kappa <- inte$par[["kappa"]]
+       a <- inte$interpret(coeffs,inte)$param$a
+       return(list(cif='fiksel',
+                   par=list(r=r,hc=hc,kappa=kappa,a=a),
+                   ntypes=1))
+     },
+     "Diggle-Gates-Stibbard process" =
+     function(coeffs, inte) {
+       rho   <- inte$par[["rho"]]
+       return(list(cif='dgs',
+                   par=list(rho=rho),
+                   ntypes=1))
+     },
+     "Diggle-Gratton process" =
+     function(coeffs, inte) {
+       kappa <- inte$interpret(coeffs,inte)$param$kappa
+       delta <- inte$par[["delta"]]
+       rho   <- inte$par[["rho"]]
+       return(list(cif='diggra',
+                   par=list(kappa=kappa,delta=delta,rho=rho),
+                   ntypes=1))
+     },
+     "Hard core process" =
+     function(coeffs, inte) {
+       hc <- inte$par[["hc"]]
+       return(list(cif='hardcore',
+                   par=list(hc=hc),
+                   ntypes=1))
+     },
+     "Geyer saturation process" =
+     function(coeffs, inte) {
+       gamma <- inte$interpret(coeffs,inte)$param$gamma
+       r <- inte$par[["r"]]
+       sat <- inte$par[["sat"]]
+       return(list(cif='geyer',
+                   par=list(gamma=gamma,r=r,sat=sat),
+                   ntypes=1))
+     },
+     "Soft core process" =
+     function(coeffs, inte) {
+       kappa <- inte$par[["kappa"]]
+       sigma <- inte$interpret(coeffs,inte)$param$sigma
+       return(list(cif="sftcr",
+                   par=list(sigma=sigma,kappa=kappa),
+                   ntypes=1))
+     },
+     "Strauss process" =
+     function(coeffs, inte) {
+       gamma <- inte$interpret(coeffs,inte)$param$gamma
+       r <- inte$par[["r"]]
+       return(list(cif = "strauss",
+                   par = list(gamma = gamma, r = r),
+                   ntypes=1))
+     },
+     "Strauss - hard core process" =
+     function(coeffs, inte) {
+       gamma <- inte$interpret(coeffs,inte)$param$gamma
+       r <- inte$par[["r"]]
+       hc <- inte$par[["hc"]]
+       return(list(cif='straush',
+                   par=list(gamma=gamma,r=r,hc=hc),
+                   ntypes=1))
+     },
+     "Triplets process" =
+     function(coeffs, inte) {
+       gamma <- inte$interpret(coeffs,inte)$param$gamma
+       r <- inte$par[["r"]]
+       return(list(cif = "triplets",
+                   par = list(gamma = gamma, r = r),
+                   ntypes=1))
+     },
+     "Penttinen process" =
+     function(coeffs, inte) {
+       gamma <- inte$interpret(coeffs,inte)$param$gamma
+       r   <- inte$par[["r"]]
+       return(list(cif='penttinen',
+                   par=list(gamma=gamma, r=r),
+                   ntypes=1))
+     },
+     "Multitype Strauss process" =
+     function(coeffs, inte) {
+       # interaction radii r[i,j]
+       radii <- inte$par[["radii"]]
+       # interaction parameters gamma[i,j]
+       gamma <- (inte$interpret)(coeffs, inte)$param$gammas
+       return(list(cif='straussm',
+                   par=list(gamma=gamma,radii=radii),
+                   ntypes=ncol(radii)))
+     },
+     "Multitype Strauss Hardcore process" =
+     function(coeffs, inte) {
+       # interaction radii r[i,j]
+       iradii <- inte$par[["iradii"]]
+       # hard core radii r[i,j]
+       hradii <- inte$par[["hradii"]]
+       # interaction parameters gamma[i,j]
+       gamma <- (inte$interpret)(coeffs, inte)$param$gammas
+       return(list(cif='straushm',
+                   par=list(gamma=gamma,iradii=iradii,hradii=hradii),
+                   ntypes=ncol(iradii)))
+     },
+     "Piecewise constant pairwise interaction process" =
+     function(coeffs, inte) {
+       r <- inte$par[["r"]]
+       gamma <- (inte$interpret)(coeffs, inte)$param$gammas
+       h <- stepfun(r, c(gamma, 1))
+       return(list(cif='lookup', par=list(h=h),
+                   ntypes=1))
+     },
+     "Area-interaction process" =
+     function(coeffs, inte) {
+       r <- inte$par[["r"]]
+       eta <- (inte$interpret)(coeffs, inte)$param$eta
+       return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1))
+     },
+     "hybrid Geyer process" =
+     function(coeffs, inte) {
+       r <- inte$par[["r"]]
+       sat <- inte$par[["sat"]]
+       gamma <- (inte$interpret)(coeffs,inte)$param$gammas
+       return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1))
+     },
+     "Hybrid interaction"=
+     function(coeffs, inte){
+       # for hybrids, $par is a list of the component interactions
+       interlist <- inte$par
+       # check for Poisson components
+       ispois <- unlist(lapply(interlist, is.poisson))
+       if(all(ispois)) {
+         # reduces to Poisson
+         Z <- list(cif='poisson', par=list())
+         return(Z)
+       } else if(any(ispois)) {
+         # remove Poisson components
+         interlist <- interlist[!ispois]
+       }
+       # 
+       N <- length(interlist)
+       cifs <- character(N)
+       pars <- vector(mode="list", length=N)
+       ntyp <- integer(N)
+       for(i in 1:N) {
+         interI <- interlist[[i]]
+         # forbid hybrids-of-hybrids - these should not occur anyway
+         if(interI$name == "Hybrid interaction")
+           stop("Simulation of a hybrid-of-hybrid interaction is not implemented")
+         # get RMH mapping for I-th component
+         siminfoI <- .Spatstat.Rmhinfo[[interI$name]]
+         if(is.null(siminfoI))
+           stop(paste("Simulation of a fitted", sQuote(interI$name),
+                      "has not yet been implemented"),
+                call.=FALSE)
+         # nameI is the tag that identifies I-th component in hybrid
+         nameI  <- names(interlist)[[i]]
+         nameI. <- paste(nameI, ".", sep="")
+         # find coefficients with prefix that exactly matches nameI.
+         Cname  <- names(coeffs)
+         prefixlength <- nchar(nameI.)
+         Cprefix <- substr(Cname, 1, prefixlength)
+         relevant <- (Cprefix == nameI.)
+         # extract coefficients
+         #   (there may be none, if this interaction is an 'offset')
+         coeffsI <- coeffs[relevant]
+         # remove the prefix so the coefficients are recognisable to 'siminfoI'
+         if(any(relevant)) 
+           names(coeffsI) <-
+             substr(Cname[relevant], prefixlength+1, max(nchar(Cname)))
+         # compute RMH info
+         ZI <- siminfoI(coeffsI, interI)
+         cifs[i] <- ZI$cif
+         pars[[i]] <- ZI$par
+         ntyp[i] <- ZI$ntypes
+       }
+       nt <- unique(ntyp[ntyp != 1])
+       if(length(nt) > 1)
+         stop(paste("Hybrid components have different numbers of types:",
+                    commasep(nt)))
+       if(N == 1) {
+         # single cif: revert to original format: par is a list of parameters
+         Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp)
+       } else {
+         # hybrid cif: par is a list of lists of parameters
+         Z <- list(cif=cifs,    par=pars,      ntypes=ntyp)
+       }
+       return(Z)
+     }
+)
+
+
+# OTHER MODELS not yet implemented:
+#
+#
+#      interaction object           rmh.default 
+#      ------------------           -----------
+#
+#           OrdThresh                <none>
+#
+
+
+rmhmodel.ppm <- function(model, w, ...,
+                         verbose=TRUE, project=TRUE,
+                         control=rmhcontrol(),
+                         new.coef=NULL) {
+  ## converts ppm object `model' into format palatable to rmh.default
+  
+  verifyclass(model, "ppm")
+  argh <- list(...)
+
+  if(!is.null(new.coef))
+    model <- tweak.coefs(model, new.coef)
+  
+  ## Ensure the fitted model is valid
+  ## (i.e. exists mathematically as a point process)
+  if(!valid.ppm(model)) {
+    if(project) {
+      if(verbose)
+        cat("Model is invalid - projecting it\n")
+      model <- project.ppm(model, fatal=TRUE)
+    } else stop("The fitted model is not a valid point process")
+  }
+    
+  if(verbose)
+    cat("Extracting model information...")
+    
+  ## Extract essential information
+  Y <- summary(model, quick="no variances")
+
+  if(Y$marked && !Y$multitype)
+    stop("Not implemented for marked point processes other than multitype")
+
+  if(Y$uses.covars && is.data.frame(model$covariates))
+    stop(paste("This model cannot be simulated, because the",
+               "covariate values were given as a data frame."))
+    
+  ## enforce defaults for `control'
+
+  control <- rmhcontrol(control)
+
+  ## adjust to peculiarities of model
+    
+  control <- rmhResolveControl(control, model)
+    
+  ########  Interpoint interaction
+  if(Y$poisson) {
+    Z <- list(cif="poisson",
+              par=list())  # par is filled in later
+  } else {
+    ## First check version number of ppm object
+    if(Y$antiquated) 
+      stop(paste("This model was fitted by a very old version",
+                 "of the package: spatstat", Y$version,
+                 "; simulation is not possible.",
+                 "Re-fit the model using your original code"))
+    else if(Y$old)
+      warning(paste("This model was fitted by an old version",
+                    "of the package: spatstat", Y$version,
+                    ". Re-fit the model using update.ppm",
+                    "or your original code"))
+    ## Extract the interpoint interaction object
+    inte <- Y$entries$interaction
+    ## Determine whether the model can be simulated using rmh
+    siminfo <- .Spatstat.Rmhinfo[[inte$name]]
+    if(is.null(siminfo))
+      stop(paste("Simulation of a fitted", sQuote(inte$name),
+                 "has not yet been implemented"))
+      
+    ## Get fitted model's canonical coefficients
+    coeffs <- Y$entries$coef
+    if(newstyle.coeff.handling(inte)) {
+      ## extract only the interaction coefficients
+      Vnames <- Y$entries$Vnames
+      IsOffset <- Y$entries$IsOffset
+      coeffs <- coeffs[Vnames[!IsOffset]]
+    }
+    ## Translate the model to the format required by rmh.default
+    Z <- siminfo(coeffs, inte)
+    if(is.null(Z))
+      stop("The model cannot be simulated")
+    else if(is.null(Z$cif))
+      stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo"))
+  }
+
+  ## Don't forget the types
+  if(Y$multitype && is.null(Z$types))
+    Z$types <- levels(Y$entries$marks)
+       
+  ######## Window for result 
+    
+  if(missing(w) || is.null(w)) {
+    ## check for outdated argument name 'win'
+    if(!is.na(m <- match("win", names(argh)))) {
+      warning("Argument 'win' to rmhmodel.ppm is deprecated; use 'w'")
+      w <- argh[[m]]
+      argh <- argh[-m]
+    } else w <- Y$entries$data$window
+  }
+
+  Z$w <- w
+
+  ######## Expanded window for simulation?
+
+  covims <- if(Y$uses.covars) model$covariates[Y$covars.used] else NULL
+    
+  wsim <- rmhResolveExpansion(w, control, covims, "covariate")$wsim
+      
+  ###### Trend or Intensity ############
+
+  if(verbose)
+    cat("Evaluating trend...")
+    
+  if(Y$stationary) {
+    ## first order terms (beta or beta[i]) are carried in Z$par
+    beta <- as.numeric(Y$trend$value)
+    Z$trend <- NULL
+  } else {
+    ## trend terms present
+    ## all first order effects are subsumed in Z$trend
+    beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types))
+    ## predict on window possibly larger than original data window
+    Z$trend <- 
+      if(wsim$type == "mask")
+        predict(model, window=wsim, type="trend", locations=wsim)
+      else 
+        predict(model, window=wsim, type="trend")
+  }
+    
+  Ncif <- length(Z$cif)
+  if(Ncif == 1) {
+    ## single interaction
+    Z$par[["beta"]] <- beta
+  } else {
+    ## hybrid interaction
+    if(all(Z$ntypes == 1)) {
+      ## unmarked model: scalar 'beta' is absorbed in first cif
+      absorb <- 1
+    } else {
+      ## multitype model: vector 'beta' is absorbed in a multitype cif
+      absorb <- min(which(Z$ntypes > 1))
+    }
+    Z$par[[absorb]]$beta <- beta
+    ## other cifs have par$beta = 1 
+    for(i in (1:Ncif)[-absorb])
+      Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i])
+  }
+  if(verbose)
+    cat("done.\n")
+  Z <- do.call(rmhmodel, append(list(Z), argh))
+  return(Z)
+}
+
+rmhResolveExpansion <- function(win, control, imagelist, itype="covariate") {
+  # Determine expansion window for simulation
+  ex <- control$expand
+  
+# The following is redundant because it is implied by !will.expand(ex)  
+#  if(ex$force.noexp) {
+#    # Expansion prohibited
+#    return(list(wsim=win, expanded=FALSE))
+#  }
+  
+  # Is expansion contemplated?
+  if(!will.expand(ex))
+    return(list(wsim=win, expanded=FALSE))
+
+  # Proposed expansion window
+  wexp <- expand.owin(win, ex)
+
+  # Check feasibility
+  isim <- unlist(lapply(imagelist, is.im))
+  imagelist <- imagelist[isim]
+
+  if(length(imagelist) == 0) {
+    # Unlimited expansion is feasible
+    return(list(wsim=wexp, expanded=TRUE))
+  }
+
+  # Expansion is limited to domain of image data
+  # Determine maximum possible expansion window
+  wins <- lapply(imagelist, as.owin)
+  cwin <- do.call(intersect.owin, unname(wins))
+  
+  if(!is.subset.owin(wexp, cwin)) {
+    # Cannot expand to proposed window
+    if(ex$force.exp)
+      stop(paste("Cannot expand the simulation window,",
+                 "because the", itype, "images do not cover",
+                 "the expanded window"), call.=FALSE)
+      # Take largest possible window
+    wexp <- intersect.owin(wexp, cwin)
+  }
+  return(list(wsim=wexp, expanded=TRUE))
+}
+
diff --git a/R/rmhsnoop.R b/R/rmhsnoop.R
new file mode 100644
index 0000000..74abb0b
--- /dev/null
+++ b/R/rmhsnoop.R
@@ -0,0 +1,573 @@
+#
+# rmhsnoop.R
+#
+#   visual debug mechanism for rmh
+#
+#   $Revision: 1.26 $  $Date: 2014/10/24 00:22:30 $
+#
+#   When rmh is called in visual debug mode (snooping = TRUE),
+#   it calls e <- rmhSnoopEnv(...) to create an R environment 'e'
+#   containing variables that will represent the current state
+#   of the M-H algorithm with initial state X and model reach R.
+#
+#   The environment 'e' is passed to the C routine xmethas.
+#   This makes it possible for data to be exchanged between
+#   the C and R code.
+#
+#   When xmethas reaches the debugger's stopping time,
+#   the current state of the simulation and the proposal
+#   are copied from C into the R environment 'e'.
+#
+#   Then to execute the visual display, the C code calls
+#   'eval' to execute the R function rmhsnoop().
+#
+#   The function rmhsnoop uses the 'simplepanel' class
+#   to generate a plot showing the state of the simulation
+#   and the proposal, and then wait for point-and-click input using
+#   locator(). 
+#  
+#   When rmhsnoop() exits, it returns an integer giving the
+#   (user-specified) next stoppping time. This is read back into
+#   the C code. Then xmethas resumes simulations.
+#
+#   I said it was simple! %^]
+
+rmhSnoopEnv <- function(Xinit, Wclip, R) {
+  stopifnot(is.ppp(Xinit))
+  # Create an environment that will be accessible to R and C code
+  e <- new.env()
+  # initial state (point pattern)
+  X <- Xinit
+  assign("Wsim",     as.owin(X),      envir=e)
+  assign("xcoords",  coords(X)[,1],   envir=e)
+  assign("ycoords",  coords(X)[,2],   envir=e)
+  if(is.multitype(X)) {
+    mcodes <- as.integer(marks(X)) - 1L
+    mlevels <- levels(marks(X))
+    assign("mcodes",  mcodes,  envir=e)
+    assign("mlevels", mlevels, envir=e)
+  } else {
+    assign("mcodes",  NULL, envir=e)
+    assign("mlevels", NULL, envir=e)
+  }
+  # clipping window
+  assign("Wclip",    Wclip,           envir=e)
+  # reach of model (could be infinite)
+  assign("R",        R,               envir=e)
+  # current iteration number 
+  assign("irep", 0L,             envir=e)
+  # next iteration to be inspected
+  assign("inxt",  1L,             envir=e)
+  # next transition to be inspected
+  assign("tnxt",  1L,             envir=e)
+  # proposal type
+  assign("proptype", NULL,     envir=e)
+  # outcome of proposal
+  assign("itype",    NULL,     envir=e)
+  # proposal location
+  assign("proplocn", NULL,     envir=e)
+  # proposal mark 
+  assign("propmark", NULL,     envir=e)
+  # index of proposal point in existing pattern
+  assign("propindx", NULL,     envir=e)
+  # Hastings ratio
+  assign("numerator",   NULL,  envir=e)
+  assign("denominator", NULL,  envir=e)
+  # Expression actually evaluated to execute visual debug
+  # Expression is evaluated in the environment 'e'
+  snoopexpr <-
+    expression({
+      rslt <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R,
+                       xcoords=xcoords,
+                       ycoords=ycoords,
+                       mlevels=mlevels,
+                       mcodes=mcodes,
+                       irep=irep,
+                       itype=itype,
+                       proptype=proptype,
+                       proplocn=proplocn,
+                       propmark=propmark,
+                       propindx=propindx,
+                       numerator=numerator,
+                       denominator=denominator)
+      inxt <- rslt$inxt
+      tnxt <- rslt$tnxt
+      itype <- if(rslt$accepted) rslt$itype else 0
+      storage.mode(tnxt) <-
+        storage.mode(inxt) <- storage.mode(itype) <- "integer"
+})
+  assign("snoopexpr", snoopexpr,  envir=e)
+  # callback expression
+  assign("callbackexpr", quote(eval(snoopexpr)), envir=e)
+  return(e)
+}
+
+# visual debug display using base graphics
+
+rmhsnoop <- local({
+
+  rmhsnoop <- function(..., Wsim, Wclip, R,
+                       xcoords, ycoords,
+                       mlevels, mcodes,
+                       irep, itype, 
+                       proptype, proplocn, propmark, propindx,
+                       numerator, denominator) {
+    trap.extra.arguments(..., .Context="In rmhsnoop")
+    X <- ppp(xcoords, ycoords, window=Wsim)
+    if(!missing(mlevels) && length(mlevels) > 0)
+      marks(X) <- factor(mlevels[mcodes+1], levels=mlevels)
+    Wclip.orig <- Wclip
+    # determine plot arguments
+    if(is.mask(Wclip)) {
+      parg.Wclip <- list(invert=TRUE, col="grey")
+    } else {
+      Wclip <- edges(Wclip) 
+      parg.Wclip <- list(lty=3, lwd=2, col="grey")
+    }
+    parg.birth <- list(pch=16, cols="green")
+    parg.death <- list(pch=4, cols="red", lwd=2)
+    parg.birthcircle <- list(col="green", lty=3)
+    parg.deathcircle <- list(col="red", lty=3)
+
+    # assemble a layered object representing the state and the proposal
+    if(is.null(proptype)) {
+      # initial state
+      L <- layered(Wsim,
+                   Wclip,
+                   X)
+      layerplotargs(L)$Wclip <- parg.Wclip
+      accepted <- TRUE
+    } else {
+      accepted <- (itype == proptype)
+      # add proposal info
+      switch(decode.proptype(proptype),
+             Reject=
+             {
+               propname <- "rejected"
+               L <- layered(Wsim=Wsim,
+                            Wclip=Wclip,
+                            X=X)
+               layerplotargs(L)$Wclip <- parg.Wclip
+             },
+             Birth = 
+             {
+               propname <- "birth proposal"
+               U <- ppp(proplocn[1], proplocn[2], window=Wsim)
+               D <- if(is.finite(R) && R > 0) {
+                 edges(disc(R, proplocn))[Wsim]
+               } else NULL
+               L <- layered(Wsim=Wsim,
+                            Wclip=Wclip,
+                            PrevState=X,
+                            Reach=D,
+                            NewPoint=U)
+               layerplotargs(L)$Wclip <- parg.Wclip
+               layerplotargs(L)$NewPoint <- parg.birth
+             },
+             Death = 
+             {
+               propname <- "death proposal"
+               # convert from C to R indexing
+               propindx <- propindx + 1
+               XminI <- X[-propindx]
+               XI <- X[propindx]
+               D <- if(is.finite(R) && R > 0) {
+                 edges(disc(R, c(XI$x, XI$y)))[Wsim]
+               } else NULL
+               L <- layered(Wsim=Wsim,
+                            Wclip=Wclip,
+                            RetainedPoints=XminI,
+                            Reach=D,
+                            Deletion=XI)
+               layerplotargs(L)$Wclip    <- parg.Wclip
+               layerplotargs(L)$Reach    <-  parg.deathcircle
+               layerplotargs(L)$Deletion <- parg.death
+             },
+             Shift = 
+             {
+               propname <- "shift proposal"
+               # convert from C to R indexing
+               propindx <- propindx + 1
+               # make objects
+               XminI <- X[-propindx]
+               XI <- X[propindx]
+               U <- ppp(proplocn[1], proplocn[2], window=Wsim)
+               if(is.finite(R) && R > 0) {
+                 DU <- edges(disc(R, proplocn))[Wsim]
+                 DXI <- edges(disc(R, c(XI$x, XI$y)))[Wsim]
+               } else { DU <- DXI <- NULL }
+               # make layers
+               L <- layered(Wsim=Wsim,
+                            Wclip=Wclip,
+                            OtherPoints=XminI,
+                            ReachAfter=DU,
+                            AfterShift=U,
+                            ReachBefore=DXI,
+                            BeforeShift=XI)
+               layerplotargs(L)$Wclip       <- parg.Wclip
+               layerplotargs(L)$ReachAfter  <- parg.birthcircle
+               layerplotargs(L)$AfterShift  <- parg.birth
+               layerplotargs(L)$ReachBefore <- parg.deathcircle
+               layerplotargs(L)$BeforeShift <- parg.death
+             },
+             stop("Unrecognised proposal type")
+             )
+    }
+    header <- c(paste("Iteration", irep),
+                propname,
+                paste("Hastings ratio =",
+                      signif(numerator, 4), "/", signif(denominator, 4)))
+    info <- list(irep=irep,
+                 Wsim=Wsim,
+                 Wclip=Wclip.orig,
+                 X=X,
+                 proptype=proptype,
+                 proplocn=proplocn,
+                 propindx=propindx,
+                 propmark=propmark,
+                 accepted=accepted,
+                 numerator=numerator,
+                 denominator=denominator)
+    inspectProposal(L, info, title=header)
+  }
+
+  decode.proptype <- function(n) {
+    if(n < 0 || n > 3) stop(paste("Unrecognised proposal type:", n))
+    switch(n+1, "Reject", "Birth", "Death", "Shift")
+  }
+  encode.proptype <- function(s) {
+    switch(s, Reject=0, Birth=1, Death=2, Shift=3)
+  }
+  
+  inspectProposal <- function(X, info, ..., title) {
+    if(missing(title)) title <- short.deparse(substitute(X))
+    if(!inherits(X, "layered"))
+      X <- layered(X)
+    lnames <- names(X)
+    if(sum(nzchar(lnames)) != length(X))
+      lnames <- paste("Layer", seq_len(length(X)))
+    # Find window and bounding box (validates X)
+    W <- as.owin(X)
+    BX <- as.rectangle(W)
+    # Initialise environment for state variables etc
+    # This environment is accessible to the panel button functions
+    en <- new.env()
+    assign("X", X, envir=en)
+    assign("W", W, envir=en)
+    assign("BX", BX, envir=en)
+    assign("zoomfactor", 1L, envir=en)
+    midX <- unlist(centroid.owin(BX))
+    assign("midX", midX, envir=en)
+    assign("zoomcentre", midX, envir=en)
+    assign("irep", info$irep, envir=en)
+    assign("inxt", info$irep+1, envir=en) 
+    assign("tnxt", -1, envir=en)
+    assign("accepted", info$accepted, envir=en)
+    assign("proplocn", info$proplocn, envir=en)
+    assign("info", info, envir=en)
+    # Build interactive panel
+    # Start with data panel
+    P <- simplepanel(title,
+                     BX,
+                     list(Data=BX),
+                     list(Data=dataclickfun),
+                     list(Data=dataredrawfun),
+                     snoopexit,
+                     en)
+    # Add pan buttons
+    margin <- max(sidelengths(BX))/4
+    panelwidth <- sidelengths(BX)[1]/2
+    P <- grow.simplepanel(P, "top", margin, navfuns["Up"], aspect=1)
+    P <- grow.simplepanel(P, "bottom", margin, navfuns["Down"], aspect=1)
+    P <- grow.simplepanel(P, "left", margin, navfuns["Left"], aspect=1)
+    P <- grow.simplepanel(P, "right", margin, navfuns["Right"], aspect=1)
+    # Zoom/Pan buttons at right
+    P <- grow.simplepanel(P, "right", panelwidth, zoomfuns)
+    # Accept/reject buttons at top
+    P <- grow.simplepanel(P, "top", margin, accept.clicks, accept.redraws)
+    # Dump/print buttons at bottom 
+    P <- grow.simplepanel(P, "bottom", margin, dumpfuns)
+    # Jump controls at left
+    maxchars <- max(4, nchar(names(jump.clicks)))
+    P <- grow.simplepanel(P, "left", panelwidth * maxchars/6, jump.clicks)
+    # go
+    rslt <- run.simplepanel(P, popup=FALSE)
+    clear.simplepanel(P)
+    rm(en)
+    return(rslt)
+  }
+
+
+# button control functions
+  zoomfuns <- 
+    rev(list(
+             "Zoom In"=function(env, xy) {
+               z <- get("zoomfactor", envir=env)
+               assign("zoomfactor", z * 2, envir=env)
+               return(TRUE)
+             },
+             "Zoom Out"=function(env, xy) {
+               z <- get("zoomfactor", envir=env)
+               assign("zoomfactor", z / 2, envir=env)
+               return(TRUE)
+             },
+             "At Proposal"=function(env, xy) {
+               proplocn <- get("proplocn", envir=env)
+               assign("zoomcentre", proplocn, envir=env)
+               return(TRUE)
+             },
+             Reset=function(env, xy) {
+               assign("zoomfactor", 1L, envir=env)
+               midX <- get("midX", envir=env)
+               assign("zoomcentre", midX, envir=env)
+               return(TRUE)
+             }))
+                           
+  navfuns <-
+    list(
+         Left = function(env, xy) {
+           zoom <- get("zoomfactor", envir=env)
+           ce <- get("zoomcentre", envir=env)
+           BX <- get("BX", envir=env)
+           width <- sidelengths(BX)[1]
+           stepsize <- (width/4)/zoom
+           ce <- ce - c(stepsize, 0)
+           assign("zoomcentre", ce, envir=env)
+           return(TRUE)
+         },
+         Right = function(env, xy) {
+           zoom <- get("zoomfactor", envir=env)
+           ce <- get("zoomcentre", envir=env)
+           BX <- get("BX", envir=env)
+           width <- sidelengths(BX)[1]
+           stepsize <- (width/4)/zoom
+           ce <- ce + c(stepsize, 0)
+           assign("zoomcentre", ce, envir=env)
+           return(TRUE)
+         },
+         Up = function(env, xy) {
+           zoom <- get("zoomfactor", envir=env)
+           ce <- get("zoomcentre", envir=env)
+           BX <- get("BX", envir=env)
+           height <- sidelengths(BX)[2]
+           stepsize <- (height/4)/zoom
+           ce <- ce + c(0, stepsize)
+           assign("zoomcentre", ce, envir=env)
+           return(TRUE)
+         },
+         Down = function(env, xy) {
+           zoom <- get("zoomfactor", envir=env)
+           ce <- get("zoomcentre", envir=env)
+           BX <- get("BX", envir=env)
+           height <- sidelengths(BX)[2]
+           stepsize <- (height/4)/zoom
+           ce <- ce - c(0, stepsize)
+           assign("zoomcentre", ce, envir=env)
+           return(TRUE)
+         })
+
+  accept.clicks <-
+    rev(list(
+             Accept=function(env, xy) {
+               assign("accepted", TRUE, envir=env)
+               return(TRUE)
+             },
+             Reject=function(env, xy) {
+               assign("accepted", FALSE, envir=env)
+               return(TRUE)
+             }))
+
+  accept.redraws <-
+    rev(list(
+             Accept=function(button, name, env) {
+               accepted <- get("accepted", envir=env)
+               if(accepted) {
+                 plot(button, add=TRUE, col="green")
+               } else {
+                 plot(button, add=TRUE)
+               }
+               text(centroid.owin(button), labels=name)
+             },
+             Reject=function(button, name, env) {
+               accepted <- get("accepted", envir=env)
+               if(accepted) {
+                 plot(button, add=TRUE)
+               } else {
+                 plot(button, add=TRUE, col="pink")
+               }
+               text(centroid.owin(button), labels=name)
+             }))
+             
+  jump.clicks <-
+    rev(list(
+             "Next Iteration"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+1, envir=env)
+               return(FALSE)
+             },
+             "Skip 10"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+10, envir=env)
+               return(FALSE)
+             },
+             "Skip 100"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+100, envir=env)
+               return(FALSE)
+             },
+             "Skip 1000"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+1000, envir=env)
+               return(FALSE)
+             },
+             "Skip 10,000"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+10000, envir=env)
+               return(FALSE)
+             },
+             "Skip 100,000"=function(env, xy) {
+               irep <- get("irep", envir=env)
+               assign("inxt", irep+100000, envir=env)
+               return(FALSE)
+             },
+             "Next Birth"=function(env, xy) {
+               assign("inxt", -1, envir=env)
+               assign("tnxt", encode.proptype("Birth"), envir=env)
+               return(FALSE)
+             },
+             "Next Death"=function(env, xy) {
+               assign("inxt", -1, envir=env)
+               assign("tnxt", encode.proptype("Death"), envir=env)
+               return(FALSE)
+             },
+             "Next Shift"=function(env, xy) {
+               assign("inxt", -1, envir=env)
+               assign("tnxt", encode.proptype("Shift"), envir=env)
+               return(FALSE)
+             },
+             "Exit Debugger"=function(env, xy) {
+               assign("inxt", -1L, envir=env)
+               return(FALSE)
+             }))
+
+  dataclickfun <- function(env, xy) {
+    # function for handling clicks in the data window
+    z <- get("zoomfactor", envir=env)
+    ce <- get("zoomcentre", envir=env)
+    midX <- get("midX", envir=env)
+    ce <- ce + (unlist(xy) - midX)/z
+    assign("zoomcentre", ce, envir=env)
+    return(TRUE)
+  }
+
+  dataredrawfun <- function(button, name, env) {                             
+    # redraw data window
+    X <- get("X", envir=env)
+    BX <- get("BX", envir=env)
+    W <- get("W", envir=env)
+    midX <- get("midX", envir=env)
+    z <- get("zoomfactor", envir=env)
+    ce <- get("zoomcentre", envir=env)
+    scaleX <- shift(affine(shift(X, -ce), diag(c(z,z))), unlist(midX))
+    scaleW <- shift(affine(shift(W, -ce), diag(c(z,z))), unlist(midX))
+    scaleX <- scaleX[, BX]
+    scaleW <- intersect.owin(scaleW, BX, fatal=FALSE)
+    # redraw data in 'BX' 
+    if(!is.null(scaleW)) {
+      if(z == 1 && is.rectangle(scaleW)) {
+        plot(scaleW, add=TRUE, lwd=2)
+      } else {
+        plot(BX, add=TRUE, lty=3, border="red")
+        if(!identical(BX, scaleW))
+          plot(scaleW, add=TRUE, invert=TRUE)
+      }
+    }
+    if(!is.null(scaleX))
+      plot(scaleX, add=TRUE)
+    invisible(NULL)
+  }
+
+# functions to dump the current state, etc
+  dumpfuns <- list(
+                   "Dump to file"=function(env, xy) {
+                     irep <- get("irep", envir=env)
+                     X <- get("X", envir=env)
+                     xname <- paste("dump", irep, sep="")
+                     assign(xname, X)
+                     fname <- paste(xname, ".rda", sep="")
+                     eval(substitute(save(x, file=y, compress=TRUE), 
+                                     list(x=xname, y=fname)))
+                     cat(paste("Saved to", sQuote(fname), "\n"))
+                     return(TRUE)
+                   },
+                   "Print Info"=function(env, xy) {
+                     info <- get("info", envir=env)
+                     will.accept <- get("accepted", envir=env)
+                     with(info, {
+                       cat(paste("Iteration", irep, "\n"))
+                       cat("Simulation window:\n")
+                       print(Wsim)
+                       cat("Clipping window:\n")
+                       print(Wclip)
+                       cat("Current state:\n")
+                       print(X)
+                       propname <- decode.proptype(proptype)
+                       cat(paste("Proposal type:", propname, "\n"))
+                       prxy <- function(z) paren(paste(z, collapse=", "))
+                       switch(propname,
+                              Reject = { },
+                              Birth = {
+                                cat(paste("Birth of new point at location",
+                                          prxy(proplocn), "\n"))
+                              },
+                              Death = {
+                                Xi <- X[propindx]
+                                cat(paste("Death of data point", propindx,
+                                          "located at",  
+                                          prxy(as.numeric(coords(Xi))),
+                                          "\n"))
+                              },
+                              Shift = {
+                                Xi <- X[propindx]
+                                cat(paste("Shift data point",
+                                          propindx,
+                                          "from current location",
+                                          prxy(as.numeric(coords(Xi))),
+                                          "to new location",
+                                          prxy(proplocn),
+                                          "\n"))
+                              })
+                       cat(paste("Hastings ratio = ",
+                                 numerator, "/", denominator,
+                                 "=", numerator/denominator, "\n"))
+                       cat(paste("Fate of proposal:",
+                                 if(will.accept) "Accepted" else "Rejected",
+                                 "\n"))
+                       return(TRUE)
+                     })
+                   })
+  
+# function to determine return value
+                             
+  snoopexit <- function(env) {
+    ans <- eval(quote(list(inxt=inxt,
+                           tnxt=tnxt,
+                           accepted=accepted)),
+                envir=env)
+    return(ans)
+  }
+                             
+  testit <- function() {
+    rmhsnoop(Wsim=owin(), Wclip=square(0.7), R=0.1,
+             xcoords=runif(40),
+             ycoords=runif(40),
+             mlevels=NULL, mcodes=NULL,
+             irep=3, itype=1,
+             proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0,
+             numerator=42, denominator=24)
+  }
+                             
+  rmhsnoop
+})
+
+
diff --git a/R/rmhstart.R b/R/rmhstart.R
new file mode 100755
index 0000000..30fb69b
--- /dev/null
+++ b/R/rmhstart.R
@@ -0,0 +1,91 @@
+#
+#
+#   rmhstart.R
+#
+#   $Revision: 1.12 $  $Date: 2016/02/11 10:17:12 $
+#
+#
+
+rmhstart <- function(start, ...) {
+  UseMethod("rmhstart")
+}
+
+rmhstart.rmhstart <- function(start, ...) {
+  return(start)
+}
+
+rmhstart.list <- function(start, ...) {
+  st <- do.call.matched(rmhstart.default, start)
+  return(st)
+}
+
+rmhstart.default <- function(start=NULL, ..., n.start=NULL, x.start=NULL)
+{
+ if(!is.null(start) || length(list(...)) > 0)
+    stop("Syntax should be rmhstart(n.start) or rmhstart(x.start)")
+ 
+  ngiven <- !is.null(n.start)
+  xgiven <- !is.null(x.start)
+  
+  # n.start and x.start are incompatible
+  if(ngiven && xgiven)
+    stop("Give only one of the arguments n.start and x.start")
+
+  given <- if(ngiven) "n" else if(xgiven) "x" else "none"
+
+  # Validate arguments
+  if(ngiven && !is.numeric(n.start))
+      stop("n.start should be numeric")
+  if(xgiven) {
+    # We can't check x.start properly because we don't have the relevant window
+    # Just check that it is INTERPRETABLE as a point pattern  
+    xx <- as.ppp(x.start, W=ripras, fatal=FALSE)
+    if(is.null(xx))
+      stop(paste("x.start should be a point pattern object,",
+                 "or coordinate data in a format recognised by as.ppp"))
+  } else
+     xx <- NULL
+
+###################################################################
+# return augmented list  
+  out <- list(n.start=n.start,
+              x.start=x.start,
+              given=given,
+              xx=xx)
+  class(out) <- c("rmhstart", class(out))
+  return(out)
+}
+
+print.rmhstart <- function(x, ...) {
+  verifyclass(x, "rmhstart")
+
+  cat("Metropolis-Hastings algorithm starting parameters\n")
+  cat("Initial state: ")
+  switch(x$given,
+         none={ cat("not given\n") },
+         x = {
+               cat("given as x.start\n")
+               if(is.ppp(x$x.start)) 
+                 print(x$x.start)
+               else
+                 cat(paste("(x,y) coordinates of", x$xx$n,
+                           "points (window unspecified)\n"))
+               cat("\n")
+             },
+         n = {
+           n.start <- x$n.start
+           nstring <-
+             if(length(n.start) == 1)
+               paste(n.start)
+             else 
+               paste("(", paste(n.start, collapse=","), ")", sep="")
+           cat(paste("number fixed at n.start =", nstring, "\n")) }
+         )
+}
+
+update.rmhstart <- function(object, ...) {
+  do.call.matched(rmhstart.default,
+                  resolve.defaults(list(...), as.list(object),
+                                   .StripNull=TRUE))
+}
+
diff --git a/R/rmhtemper.R b/R/rmhtemper.R
new file mode 100644
index 0000000..820f94c
--- /dev/null
+++ b/R/rmhtemper.R
@@ -0,0 +1,76 @@
+#'
+#'      rmhtemper.R
+#'
+#'   $Revision: 1.3 $  $Date: 2015/10/21 09:06:57 $
+#'
+
+reheat <- local({
+
+  expon <- function(x, alpha) {
+    if(is.null(x)) return(NULL)
+    if(is.numeric(x)) return(x^alpha)
+    if(is.im(x)) return(x^alpha)
+    if(is.function(x)) {
+      f <- x
+      g <- function(...) { f(...)^alpha }
+      if(!inherits(f, "funxy")) return(g)
+      return(funxy(g, W=as.owin(f)))
+    }
+    if(is.list(x)) return(lapply(x, expon))
+    stop("Unrecognised format for x in x^alpha", call.=FALSE)
+  }
+    
+  reheat <- function(model, invtemp) {
+    model <- rmhmodel(model)
+    cif   <- model$cif
+    par   <- model$par
+    w     <- model$w
+    trend <- model$trend
+    types <- model$types
+
+    newtrend <- expon(trend, invtemp)
+
+    rules <- lapply(cif, spatstatRmhInfo)
+    temperfuns <- lapply(rules, getElement, name="temper")
+    if(any(bad <- sapply(temperfuns, is.null)))
+      stop(paste("reheating the", commasep(sQuote(cif[bad])),
+                 ngettext(sum(bad), "cif", "cifs"),
+                 "is not supported"))
+
+    Ncif <- length(cif)
+    if(Ncif == 1) {
+      newpar <- temperfuns[[1]](par, invtemp)
+    } else {
+      newpar <- par
+      for(i in 1:Ncif) 
+        newpar[[i]] <- temperfuns[[i]](par[[i]], invtemp)
+    }
+    newmodel <- rmhmodel(cif=cif,
+                         par=newpar, trend=newtrend,
+                         w=w, types=types)
+    return(newmodel)
+  } 
+
+  reheat
+  
+})
+
+
+rtemper <- function(model, invtemp, nrep, ..., start=NULL, verbose=FALSE){
+  df <- data.frame(invtemp, nrep)
+  ndf <- nrow(df)
+  X <- NULL
+  for(i in 1:ndf) {
+    if(verbose)
+      cat(paste("Running", nrep[i], "steps",
+                "at inverse temperature", invtemp[i], "... "))
+    model.i <- reheat(model, invtemp[i])
+    X <- rmh(model.i, nrep=nrep[i], ...,
+             start=start,
+             overrideXstart = X,
+             overrideclip   = (i != ndf),
+             verbose=FALSE)
+    if(verbose) cat(" Done.\n")
+  }
+  return(X)
+}
diff --git a/R/rose.R b/R/rose.R
new file mode 100644
index 0000000..1a05fe5
--- /dev/null
+++ b/R/rose.R
@@ -0,0 +1,306 @@
+#'
+#'    rose.R
+#'
+#'   Rose diagrams
+#'
+#'   $Revision: 1.9 $  $Date: 2015/08/25 08:19:19 $
+#'
+
+rose <- function(x, ...) UseMethod("rose")
+
+rose.default <- local({
+
+  rose.default <- function(x, breaks = NULL, ...,
+                           weights=NULL,
+                           nclass=NULL,
+                           unit=c("degree", "radian"),
+                           start=0, clockwise=FALSE,
+                           main) {
+    if(missing(main) || is.null(main))
+      main <- short.deparse(substitute(x))
+    stopifnot(is.numeric(x))
+    if(!is.null(weights))
+      check.nvector(weights, length(x), things="observations")
+    #' determine units
+    missu <- missing(unit)
+    unit <- match.arg(unit)
+    unit <- validate.angles(x, unit, missu)
+    FullCircle <- switch(unit, degree = 360, radian = 2*pi)
+    #' reduce to [0, 2pi]
+    x <- x %% FullCircle
+    #' determine breakpoints strictly inside full circle
+    breaks <- makebreaks(x, c(0, FullCircle), breaks, nclass)
+    #' histogram without weights
+    h <- do.call.matched(hist.default,
+                         list(x=x, breaks=breaks, ..., plot=FALSE),
+                         skipargs=graphicsAargh,
+                         sieve=TRUE)
+    result <- h$result
+    otherargs <- h$otherargs
+    #' redo weights, if given
+    if(!is.null(weights)) {
+      wh <- whist(x=x, breaks=breaks, weights=weights)
+      result$count <- wh
+      result$density <- wh/diff(breaks)
+    }
+    #
+    do.call(rose.histogram,
+            c(list(x=result, main=main,
+                   unit=unit, start=start, clockwise=clockwise),
+              otherargs))
+  }
+
+  graphicsAargh <- c("density", "angle", "col", "border",
+                     "xlim", "ylim", "xlab", "ylab", "axes")
+
+  makebreaks <- function(x, r, breaks=NULL, nclass=NULL) {
+    use.br <- !is.null(breaks)
+    if (use.br) {
+      if (!is.null(nclass)) 
+        warning("'nclass' not used when 'breaks' is specified")
+    } else if (!is.null(nclass) && length(nclass) == 1L) {
+      breaks <- nclass
+    } else breaks <- "Sturges"
+    use.br <- use.br && (nB <- length(breaks)) > 1L
+    if (use.br) 
+      breaks <- sort(breaks)
+    else {
+      if (is.character(breaks)) {
+        breaks <- match.arg(tolower(breaks),
+                            c("sturges", 
+                              "fd",
+                              "freedman-diaconis",
+                              "scott"))
+        breaks <- switch(breaks,
+                         sturges = nclass.Sturges(x), 
+                         `freedman-diaconis` = ,
+                         fd = nclass.FD(x),
+                         scott = nclass.scott(x), 
+                         stop("unknown 'breaks' algorithm"))
+      }
+      else if (is.function(breaks)) {
+        breaks <- breaks(x)
+      }
+      if (length(breaks) == 1) {
+        if (!is.numeric(breaks) || !is.finite(breaks) || 
+            breaks < 1L) 
+          stop("invalid number of 'breaks'")
+        breaks <- seq(r[1], r[2], length.out=breaks)
+      }
+      else {
+        if (!is.numeric(breaks) || length(breaks) <= 1) 
+          stop(gettextf("Invalid breakpoints produced by 'breaks(x)': %s", 
+                        format(breaks)), domain = NA)
+        breaks <- sort(breaks)
+      }
+    }
+    return(breaks)
+  }
+  
+  rose.default
+})
+
+
+rose.histogram <- function(x, ...,
+                           unit=c("degree", "radian"),
+                           start=0, clockwise=FALSE,
+                           main, labels=TRUE, at=NULL, do.plot=TRUE) {
+  if(missing(main) || is.null(main))
+    main <- short.deparse(substitute(x))
+  #' determine units
+  missu <- missing(unit)
+  unit <- match.arg(unit)
+  #' validate
+  bks <- x$breaks
+  unit <- validate.angles(bks, unit, missu)
+#  FullCircle <- switch(unit, degree = 360, radian = 2*pi)
+  #' get sector sizes
+  y <- x$density
+  ymax <- max(y)
+  #' draw disc
+  insideclearance <- 0.1
+  outsidespace <- if(!is.null(at) && length(at) == 0) 0 else
+                  if(identical(labels, FALSE)) 0.1 else 0.25
+  R <- (1+insideclearance) * ymax
+  DD <- disc(R)
+  Rout <- (1 + outsidespace) * R
+  result <- do.call.matched(plot.owin,
+                            resolve.defaults(list(x=disc(Rout),
+                                                  main=main,
+                                                  type="n"), 
+                                             list(...)))
+  do.call.matched(plot.owin,
+                  resolve.defaults(list(x=DD,
+                                        hatch=FALSE,
+                                        add=TRUE),
+                                   list(...)),
+                  extrargs=graphicsPars("owin"),
+                  skipargs="col")
+  if(do.plot) {
+    #' draw sectors
+    ang <- ang2rad(bks, unit=unit, start=start, clockwise=clockwise)
+    eps <- min(diff(ang), pi/128)/2
+    for(i in seq_along(y)) {
+      aa <- seq(ang[i], ang[i+1], by=eps)
+      aa[length(aa)] <- ang[i+1]
+      yi <- y[i]
+      xx <- c(0, yi * cos(aa), 0)
+      yy <- c(0, yi * sin(aa), 0)
+      do.call.matched(polygon, list(x=xx, y=yy, ...))
+    }
+    #' add tick marks
+    circticks(R, at=at, unit=unit, start=start, clockwise=clockwise,
+              labels=labels)
+  }
+  #'
+  return(invisible(result))
+}
+
+rose.density <- function(x, ..., unit=c("degree", "radian"),
+                         start=0, clockwise=FALSE,
+                         main, labels=TRUE, at=NULL, do.plot=TRUE) {
+  if(missing(main) || is.null(main))
+    main <- short.deparse(substitute(x))
+  ang <- x$x
+  rad <- x$y
+  missu <- missing(unit)
+  unit <- match.arg(unit)
+  unit <- validate.angles(ang, unit, missu)
+  #'
+  result <- roseContinuous(ang, rad, unit, ...,
+                           start=start, clockwise=clockwise,
+                           main=main, labels=labels, at=at,
+                           do.plot=do.plot)
+  return(invisible(result))
+}
+
+rose.fv <- function(x, ..., unit=c("degree", "radian"),
+                    start=0, clockwise=FALSE,
+                    main, labels=TRUE, at=NULL, do.plot=TRUE) {
+  if(missing(main) || is.null(main))
+    main <- short.deparse(substitute(x))
+  ang <- with(x, .x)
+  rad <- with(x, .y)
+  missu <- missing(unit)
+  unit <- match.arg(unit)
+  unit <- validate.angles(ang, unit, missu)
+  #'
+  result <- roseContinuous(ang, rad, unit, ...,
+                           start=start, clockwise=clockwise,
+                           main=main, labels=labels, at=at,
+                           do.plot=do.plot)
+  return(invisible(result))
+}
+
+roseContinuous <- function(ang, rad, unit, ...,
+                           start=0, clockwise=FALSE,
+                           main,
+                           labels=TRUE, at=NULL,
+                           do.plot=TRUE) {
+  rmax <- max(rad)
+  #' draw disc
+  insideclearance <- 0.1
+  outsidespace <- if(!is.null(at) && length(at) == 0) 0 else
+                  if(identical(labels, FALSE)) 0.1 else 0.25
+  R <- (1+insideclearance) * rmax
+  DD <- disc(R)
+  Rout <- (1 + outsidespace) * R
+  result <- do.call.matched(plot.owin,
+                            resolve.defaults(list(x=disc(Rout),
+                                                  main=main,
+                                                  type="n"), 
+                                             list(...)))
+  do.call.matched(plot.owin,
+                  resolve.defaults(list(x=DD,
+                                        add=TRUE,
+                                        hatch=FALSE),
+                                   list(...)),
+                  extrargs=graphicsPars("owin"),
+                  skipargs="col")
+  #' draw plot
+  if(do.plot) {
+    ang <- ang2rad(ang, unit=unit, start=start, clockwise=clockwise)
+    xx <- rad * cos(ang)
+    yy <- rad * sin(ang)
+    do.call.matched(polygon, list(x=xx, y=yy, ...), extrargs="lwd")
+    circticks(R, at=at, unit=unit, start=start, clockwise=clockwise,
+              labels=labels)
+  }
+  return(result)
+}
+
+ang2rad <- local({
+
+  compasspoints <- c(E=0,N=90,W=180,S=270)
+  
+  ang2rad <- function(ang, unit=c("degree", "radian"),
+                         start=0, clockwise=FALSE) {
+    unit <- match.arg(unit)
+    clocksign <- if(clockwise) -1 else 1
+    stopifnot(length(start) == 1)
+    if(is.character(start)) {
+      if(is.na(match(toupper(start), names(compasspoints))))
+        stop(paste("Unrecognised compass point", sQuote(start)), call.=FALSE)
+      startdegrees <- compasspoints[[start]]
+      start <- switch(unit,
+                      degree = startdegrees,
+                      radian = pi * (startdegrees/180))
+      # start is measured anticlockwise
+      ang <- start + clocksign * ang
+    } else {
+      stopifnot(is.numeric(start))
+      # start is measured according to value of 'clockwise'
+      ang <- clocksign * (start + ang)
+    }
+    rad <- switch(unit,
+                  degree = pi * (ang/180),
+                  radian = ang)
+    return(rad)
+  }
+
+  ang2rad
+})
+
+
+circticks <- function(R, at=NULL, unit=c("degree", "radian"),
+                      start=0, clockwise=FALSE, labels=TRUE) {
+  unit <- match.arg(unit)
+  FullCircle <- switch(unit, degree = 360, radian = 2*pi)
+  if(is.null(at)) {
+    at <- FullCircle * (0:23)/24
+    major <- ((0:23) %% 6 == 0)
+  } else {
+    if(length(at) == 0) return(invisible(NULL))
+    nat <- (at/FullCircle) * 4
+    major <- abs(nat - round(nat)) < 0.01
+  }
+  atradians <- ang2rad(ang=at, unit=unit, start=start, clockwise=clockwise)
+  tx <- R * cos(atradians)
+  ty <- R * sin(atradians)
+  expan <- ifelse(major, 1.1, 1.05)
+  segments(tx, ty, expan * tx, expan * ty, lwd=major+1)
+  if(!identical(labels, FALSE)) {
+    if(identical(labels, TRUE)) {
+      labels <- switch(unit,
+                       degree=paste(round(at)),
+                       radian=parse(text= simplenumber(at/pi, "pi", "*", 1e-3)))
+    } else stopifnot(is.vector(labels) && length(labels) == length(at))
+    big <- expan + 0.1
+    text(big * tx, big * ty, labels=labels)
+  }
+  invisible(NULL)
+}
+
+validate.angles <- function(angles, unit=c("degree", "radian"), guess=TRUE) {
+  #' validate
+  width <- diff(range(angles))
+  if(missing(unit) && guess && width <= 6.2832) {
+    warning("Very small range of angles: treating them as radian")
+    unit <- "radian"
+  } else unit <- match.arg(unit)
+  FullCircle <- switch(unit, degree = 360, radian = 2*pi)
+  if(width > 1.002 * FullCircle)
+    stop("Range of angles exceeds a full circle")
+  return(unit)
+}
+
diff --git a/R/rotate.R b/R/rotate.R
new file mode 100755
index 0000000..6d25763
--- /dev/null
+++ b/R/rotate.R
@@ -0,0 +1,95 @@
+#
+#	rotate.S
+#
+#	$Revision: 1.21 $	$Date: 2014/10/24 00:22:30 $
+#
+
+rotxy <- function(X, angle=pi/2) {
+  co <- cos(angle)
+  si <- sin(angle)
+  list(x = co * X$x - si * X$y,
+       y = si * X$x + co * X$y)
+}
+
+rotxypolygon <- function(p, angle=pi/2) {
+  p[c("x","y")] <- rotxy(p, angle=angle)
+  # area and hole status are invariant under rotation
+  return(p)
+}
+
+rotate <- function(X, ...) {
+  UseMethod("rotate")
+}
+
+rotate.owin <- function(X, angle=pi/2, ..., rescue=TRUE, centre=NULL) {
+  verifyclass(X, "owin")
+  if(!is.null(centre)) {
+    ## rotation about designated centre
+    X <- shift(X, origin=centre)
+    negorig <- getlastshift(X)
+  } else negorig <- NULL
+  switch(X$type,
+         rectangle={
+           # convert rectangle to polygon
+           P <- owin(X$xrange, X$yrange, poly=
+                     list(x=X$xrange[c(1,2,2,1)],
+                          y=X$yrange[c(1,1,2,2)]),
+                     unitname=unitname(X))
+           # call polygonal case
+           Y <- rotate.owin(P, angle, rescue=rescue)
+         },
+         polygonal={
+           # First rotate the polygonal boundaries
+           bdry <- lapply(X$bdry, rotxypolygon, angle=angle)
+           # wrap up
+           Y <- owin(poly=bdry, check=FALSE, unitname=unitname(X))
+           if(rescue)
+             Y <- rescue.rectangle(Y)
+         },
+         mask={
+           newframe <- boundingbox(rotxy(corners(X), angle))
+           Y <- if(length(list(...)) > 0) as.mask(newframe, ...) else 
+                   as.mask(newframe, eps=with(X, min(xstep, ystep)))
+           pixelxy <- rasterxy.mask(Y)
+           xybefore <- rotxy(pixelxy, -angle)
+           Y$m[] <- with(xybefore, inside.owin(x, y, X))
+           Y <- intersect.owin(Y, boundingbox(Y))
+           if(rescue)
+             Y <- rescue.rectangle(Y)
+           unitname(Y) <- unitname(X)
+         },
+         stop("Unrecognised window type")
+         )
+  if(!is.null(negorig))
+    Y <- shift(Y, -negorig)
+  return(Y)
+}
+
+rotate.ppp <- function(X, angle=pi/2, ..., centre=NULL) {
+  verifyclass(X, "ppp")
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  r <- rotxy(X, angle)
+  w <- rotate.owin(X$window, angle, ...)
+  Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
+rotate.im <- function(X, angle=pi/2, ..., centre=NULL) {
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  co <- cos(angle)
+  si <- sin(angle)
+  m <- matrix(c(co,si,-si,co), nrow=2, ncol=2)
+  Y <- affine(X, mat=m)
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+
diff --git a/R/rotmean.R b/R/rotmean.R
new file mode 100644
index 0000000..a11ac8a
--- /dev/null
+++ b/R/rotmean.R
@@ -0,0 +1,44 @@
+##
+## rotmean.R
+##
+## rotational average of pixel values
+##
+##  $Revision: 1.9 $ $Date: 2015/06/18 02:45:42 $
+
+rotmean <- function(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im")) {
+  if(missing(Xname))
+    Xname <- sensiblevarname(short.deparse(substitute(X)), "X")
+  trap.extra.arguments(..., .Context="rotmean")
+  stopifnot(is.im(X))
+  if(!missing(origin))
+    X <- shift(X, origin=origin)
+  result <- match.arg(result)
+  rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2)))
+  if(padzero) 
+    X <- padimage(na.handle.im(X, 0), 0, W=square(c(-1,1)*rmax))
+  values <- X[drop=TRUE]
+  radii <- with(as.data.frame(rasterxy.im(X, drop=TRUE)), sqrt(x^2+y^2))
+  ra <- pmin(range(radii), rmax)
+  eps <- sqrt(X$xstep^2 + X$ystep^2)
+  a <- unnormdensity(radii,                 from=ra[1], to=ra[2], bw=eps)
+  b <- unnormdensity(radii, weights=values, from=ra[1], to=ra[2], bw=eps)
+  df <- data.frame(r=a$x, f=b$y/a$y)
+  FUN <- fv(df,
+            argu="r",
+            ylab=substitute(bar(X)(r), list(X=as.name(Xname))),
+            valu="f",
+            fmla=(. ~ r),
+            alim=ra,
+            labl=c("r", "%s(r)"),
+            desc=c("distance argument r",
+                "rotational average"),
+            unitname=unitname(X),
+            fname=paste0("bar", paren(Xname)))
+  attr(FUN, "dotnames") <- "f"
+  if(result == "fv") return(FUN)
+  ## compute image
+  FUN <- as.function(FUN)
+  XX <- as.im(X, na.replace=1)
+  IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, XX, FUN=FUN)
+  return(IM)
+}
diff --git a/R/round.R b/R/round.R
new file mode 100644
index 0000000..380b30c
--- /dev/null
+++ b/R/round.R
@@ -0,0 +1,44 @@
+#
+#   round.R
+#
+#   discretisation of coordinates
+#
+#   $Revision: 1.5 $  $Date: 2013/01/09 03:13:10 $
+
+round.ppp <- round.pp3 <- round.ppx <- function(x, digits=0) {
+  coords(x) <- round(as.matrix(coords(x)), digits=digits)
+  return(x)
+}
+
+rounding <- function(x) {
+  UseMethod("rounding")
+}
+
+rounding.ppp <- rounding.pp3 <- rounding.ppx <- function(x) {
+  rounding(as.matrix(coords(x)))
+}
+
+rounding.default <- function(x) {
+  # works for numeric, complex, matrix etc
+  if(all(x == 0))
+    return(NULL)
+  if(identical(all.equal(x, round(x)), TRUE)) { 
+    # integers: go up
+    k <- 0
+    smallk <- -log10(.Machine$double.xmax)
+    repeat {
+      if(k < smallk || !identical(all.equal(x, round(x, k-1)), TRUE))
+        return(k)
+      k <- k-1
+    }
+  } else {
+    # not integers: go down
+    k <- 1
+    bigk <- -log10(.Machine$double.eps)
+    repeat {
+      if(k > bigk || identical(all.equal(x, round(x, k)), TRUE))
+        return(k)
+      k <- k+1
+    }
+  }
+}
diff --git a/R/rppm.R b/R/rppm.R
new file mode 100644
index 0000000..63ae2c1
--- /dev/null
+++ b/R/rppm.R
@@ -0,0 +1,126 @@
+#'
+#' rppm.R
+#' 
+#'  Recursive Partitioning for Point Process Models
+#'
+#'  $Revision: 1.12 $  $Date: 2017/06/05 10:31:58 $
+
+rppm <- function(..., rpargs=list()) {
+  ## do the equivalent of ppm(...)
+  cl <- match.call()
+  cl[[1]] <- as.name('ppm')
+  if("rpargs" %in% names(cl)) cl$rpargs <- NULL
+  cl$forcefit <- TRUE
+  pfit <- eval(cl, envir=parent.frame())
+  ## 
+  if(!is.poisson(pfit))
+    warning("Interpoint interaction will be ignored", call.=FALSE)
+  df <- getglmdata(pfit)
+  gf <- getglmfit(pfit)
+  sf <- getglmsubset(pfit)
+  rp <- do.call(rpart,
+                resolve.defaults(list(formula=formula(gf),
+                                      data=df,
+                                      subset=sf,
+                                      weights=df$.mpl.W),
+                                 rpargs,
+                                 list(method="poisson")))
+  result <- list(pfit=pfit, rp=rp)
+  class(result) <- c("rppm", class(result))
+  return(result)
+}
+
+# undocumented
+as.ppm.rppm <- function(object) { object$pfit }
+
+print.rppm <- function(x, ...) {
+  splat("Point process model with recursive partitioning")
+  splat("Data:", sQuote(x$pfit$Qname))
+  splat("Covariates:", commasep(sQuote(variablesinformula(formula(x$pfit)))))
+  splat("Regression tree:")
+  print(x$rp)
+  invisible(NULL)
+}
+
+plot.rppm <- local({
+
+  argsPlotRpart <- c("x", "uniform", "branch",
+                     "compress", "margin", "minbranch")
+  argsTextRpart <- c("splits", "label", "FUN", "all", "pretty",
+                     "digits", "use.n", "fancy",
+                     "fwidth", "fheight", "bg", "minlength")
+  
+  plot.rppm <- function(x, ..., what=c("tree", "spatial"), treeplot=NULL) {
+    xname <- short.deparse(substitute(x))
+    what <- match.arg(what)
+    switch(what,
+           tree = {
+             if(is.function(treeplot)) 
+               return(treeplot(x$rp, ...))
+             out <- do.call.matched(plot,
+                                    list(x=x$rp, ...),
+                                    funargs=argsPlotRpart,
+                                    extrargs=graphicsPars("plot"))
+             # note: plot.rpart does not pass arguments to 'lines'
+             do.call.matched(text,
+                             list(x=x$rp, ...),
+                             funargs=argsTextRpart,
+                             extrargs=graphicsPars("text"))
+           },
+           spatial = {
+             p <- predict(x)
+             out <- do.call("plot",
+                            resolve.defaults(list(x=p),
+                                             list(...),
+                                             list(main=xname)))
+           })
+    return(invisible(out))
+  }
+
+  plot.rppm
+
+})
+
+
+#' prune method
+
+prune.rppm <- function(tree, ...) {
+  tree$rp <- rpart::prune(tree$rp, ...)
+  return(tree)
+}
+
+#' predict method
+
+predict.rppm <- function(object, ...) {
+  model <- object$pfit
+  tree  <- object$rp
+  #' assemble covariates for prediction, using rules of predict.ppm
+  co <- predict(model, ..., type="covariates", check=FALSE, repair=FALSE)
+  newdata <- co$newdata
+  masque  <- co$mask
+  #' perform prediction using the tree
+  pred <- predict(tree, newdata=co$newdata)
+  #' pack up appropriately
+  if(is.null(masque))
+    return(pred)
+  imago <- as.im(masque, value=1.0)
+  if(!is.marked(model)) {
+    out <- imago
+    out[] <- pred
+  } else {
+    lev <- levels(marks(data.ppm(model)))
+    nlev <- length(lev)
+    out <- rep(list(imago), nlev)
+    names(out) <- lev
+    splitpred <- split(pred, newdata$marks)
+    for(i in seq_len(nlev))
+      out[[i]][] <- splitpred[[i]]
+    out <- as.solist(out)
+  }
+  return(out)
+}
+    
+fitted.rppm <- function(object, ...) {
+  predict(object, locations=data.ppm(object$pfit))
+}
+
diff --git a/R/rshift.R b/R/rshift.R
new file mode 100755
index 0000000..37a4da6
--- /dev/null
+++ b/R/rshift.R
@@ -0,0 +1,175 @@
+#
+#   rshift.R
+#
+#   random shift with optional toroidal boundary
+#
+#   $Revision: 1.17 $   $Date: 2016/02/11 10:17:12 $
+#
+#
+rshift <- function(X, ...) {
+  UseMethod("rshift")
+}
+
+rshift.splitppp <- function(X, ..., which=seq_along(X))
+{
+  verifyclass(X, "splitppp")
+  if("group" %in% names(list(...)))
+    stop(paste("argument", sQuote("group"),
+               "not implemented for splitppp objects"))
+
+  if(is.null(which)) {
+    iwhich <- which <- seq_along(X)
+  } else {
+    id <- seq_along(X)
+    names(id) <- names(X)
+    iwhich <- id[which]
+    if(length(iwhich) == 0)
+      stop(paste("Argument", sQuote("which"), "did not match any marks"))
+  }
+  
+  # validate arguments and determine common clipping window
+  arglist <- handle.rshift.args(X[[1]]$window, ..., edgedefault="torus")
+
+  if(!is.null(clip <- arglist$clip)) {
+    # clip the patterns that are not to be shifted
+    if(length(iwhich) < length(X)) 
+      X[-iwhich] <- lapply(X[-iwhich], "[.ppp", i=clip)
+  }
+  # perform shift on selected patterns
+  # (setting group = NULL ensures each pattern is not split further)
+  shiftXsub <- do.call(lapply, append(list(X[iwhich], rshift.ppp, group=NULL),
+                                        arglist))
+  # put back
+  X[iwhich] <- shiftXsub
+
+  return(X)
+}
+
+rshift.ppp <- function(X, ..., which=NULL, group)
+{
+  verifyclass(X, "ppp")
+  
+  # validate arguments and determine common clipping window
+  arglist <- handle.rshift.args(X$window, ..., edgedefault="torus")
+
+  # default grouping
+  #   (NULL is not the default)
+  #   (NULL means all points shifted in parallel)
+  if(missing(group))
+    group <- if(is.multitype(X)) marks(X) else NULL
+
+  # if no grouping, use of `which' is undefined
+  if(is.null(group) && !is.null(which))
+    stop(paste("Cannot apply argument", sQuote("which"),
+               "; no grouping defined"))
+
+  # if grouping, use split
+  if(!is.null(group)) {
+    Y <- split(X, group)
+    split(X, group) <- do.call(rshift.splitppp,
+                               append(list(Y, which=which),
+                                      arglist))
+    return(X)
+  } 
+    
+  # ungrouped point pattern
+  # shift all points in parallel
+
+  # recover arguments
+  radius <- arglist$radius
+  width  <- arglist$width
+  height <- arglist$height
+  edge   <- arglist$edge
+  clip   <- arglist$clip
+ 
+  W <- X$window
+  W <- rescue.rectangle(W)
+  if(W$type != "rectangle" && edge=="torus")
+    stop("Torus (periodic) boundary is only meaningful for rectangular windows")
+
+  # generate random translation vector
+  
+  if(!is.null(radius)) 
+    jump <- runifdisc(1, radius=radius)
+  else {
+    jump <- list(x=runif(1, min=0, max=width),
+                 y=runif(1, min=0, max=height))
+  }
+
+  # translate points
+  x <- X$x + jump$x
+  y <- X$y + jump$y
+
+  # wrap points
+  if(edge == "torus") {
+    xr <- W$xrange
+    yr <- W$yrange
+    Wide <- diff(xr)
+    High <- diff(yr)
+    x <- xr[1] + (x - xr[1]) %% Wide
+    y <- yr[1] + (y - yr[1]) %% High
+  }
+
+  # put back into point pattern
+  X$x <- x
+  X$y <- y
+
+  # clip to window
+  if(!is.null(clip))
+    X <- X[clip]
+
+  return(X)
+}
+
+
+handle.rshift.args <- function(W, ...,
+                               radius=NULL, width=NULL, height=NULL,
+                               edge=NULL, clip=NULL, edgedefault)
+{
+  verifyclass(W, "owin")
+  W <- rescue.rectangle(W)
+  
+  if(length(aargh <- list(...)) > 0)
+    stop(paste("Unrecognised arguments:",
+               paste(names(aargh), collapse=",")))
+  
+  if(!is.null(radius)) {
+    # radial generator
+    if(!(is.null(width) && is.null(height)))
+    stop(paste(sQuote("radius"), "is incompatible with",
+               sQuote("width"), "and", sQuote("height")))
+  } else {
+    # rectangular generator
+    if(is.null(width) != is.null(height))
+      stop("Must specify both width and height, if one is specified")
+    if(is.null(width)) width <- diff(W$xrange)
+    if(is.null(height)) height <- diff(W$yrange)
+  }
+  
+  if(is.null(edge))
+    edge <- edgedefault
+  else if(!(edge %in% c("torus", "erode", "none")))
+    stop(paste("Unrecognised option erode=", sQuote(edge)))
+
+  # determine whether clipping window is needed
+  if(is.null(clip))
+    clip <- switch(edge,
+                   torus= NULL,
+                   none= W,
+                   erode={
+                     if(!is.null(radius))
+                       erosion.owin(W, radius)
+                     else if(W$type == "rectangle")
+                       trim.rectangle(W, width, height)
+                     else
+                       erosion.owin(W, max(width, height))
+                   })
+
+  return(list(radius=radius, width=width, height=height,
+              edge=edge, clip=clip))
+}
+
+rtoro <- function(X, which=NULL, radius=NULL, width=NULL, height=NULL) {
+  .Deprecated("rshift", package="spatstat")
+  rshift(X, which=which, radius=radius, width=width, height=height)
+}
diff --git a/R/rshift.psp.R b/R/rshift.psp.R
new file mode 100755
index 0000000..e69c9cf
--- /dev/null
+++ b/R/rshift.psp.R
@@ -0,0 +1,64 @@
+#
+# rshift.psp.R
+#
+#  $Revision: 1.6 $  $Date: 2011/05/18 09:10:12 $
+#
+
+
+rshift.psp <- function(X, ..., group=NULL, which=NULL) {
+  verifyclass(X, "psp")
+  
+  # process arguments
+  W <- rescue.rectangle(X$window)
+  arglist <- handle.rshift.args(W, ..., edgedefault="erode")
+  radius <- arglist$radius
+  width  <- arglist$width
+  height <- arglist$height
+  edge   <- arglist$edge
+  clip   <- arglist$clip
+  if(W$type != "rectangle")
+    stop("Not yet implemented for non-rectangular windows")
+  if(edge != "erode")
+    stop(paste("Only implemented for edge=", dQuote("erode")))
+
+  # split into groups
+  if(is.null(group))
+    Y <- list(X)
+  else {
+    stopifnot(is.factor(group))
+    stopifnot(length(group) == X$n)
+    Y <- lapply(levels(group),
+                function(l, X, group) {X[group == l]},
+                X=X, group=group)
+  }
+
+  ############ loop ################
+  result <- psp(numeric(0), numeric(0), numeric(0), numeric(0),
+                X$window)
+  
+  for(i in seq_along(Y)) {
+    
+    Z <- Y[[i]]
+    
+    # generate random translation vector
+    if(!is.null(radius)) 
+      jump <- runifdisc(1, radius=radius)
+    else {
+      jump <- list(x=runif(1, min=0, max=width),
+                   y=runif(1, min=0, max=height))
+    }
+    # translate segments
+    Zsh <- shift(Z, c(jump$x, jump$y))
+    Zsh$window <- W
+
+    # append to result
+    result <- append.psp(result, Zsh)
+  }
+
+  # clip 
+  if(!is.null(clip))
+   result <- result[clip]
+
+  return(result)
+}
+
diff --git a/R/satpiece.R b/R/satpiece.R
new file mode 100755
index 0000000..74b84dd
--- /dev/null
+++ b/R/satpiece.R
@@ -0,0 +1,136 @@
+#
+#
+#    satpiece.S
+#
+#    $Revision: 1.16 $	$Date: 2016/02/16 01:39:12 $
+#
+#    Saturated pairwise interaction process with piecewise constant potential
+#
+#    SatPiece()   create an instance of the process
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+SatPiece <- local({
+
+  # ..... auxiliary functions ......
+
+  delSP <- function(i, r, sat) {
+    r   <- r[-i]
+    sat <- sat[-i]
+    nr <- length(r)
+    if(nr == 0) return(Poisson())
+    if(nr == 1) return(Geyer(r, sat))
+    return(SatPiece(r, sat))
+  }
+
+  # ....... template object ..........
+  
+  BlankSatPiece <- 
+    list(
+         name     = "piecewise constant Saturated pairwise interaction process",
+         creator  = "SatPiece",
+         family   = "pairsat.family", # evaluated later
+         pot      = function(d, par) {
+                       r <- par$r
+                       nr <- length(r)
+                       out <- array(FALSE, dim=c(dim(d), nr))
+                       out[,,1] <- (d < r[1])
+                       if(nr > 1) {
+                         for(i in 2:nr) 
+                           out[,,i] <- (d >= r[i-1]) & (d < r[i])
+                       }
+                       out
+                    },
+         par      = list(r = NULL, sat=NULL), # filled in later
+         parnames = c("interaction thresholds", "saturation parameters"),
+         init     = function(self) {
+                      r <- self$par$r
+                      sat <- self$par$sat
+                      if(!is.numeric(r) || !all(r > 0))
+                        stop("interaction thresholds r must be positive numbers")
+                      if(length(r) > 1 && !all(diff(r) > 0))
+                        stop("interaction thresholds r must be strictly increasing")
+                      if(!is.numeric(sat) || any(sat < 0))
+                        stop("saturation parameters must be nonnegative numbers")
+                      if(any(ceiling(sat) != floor(sat)))
+                        warning("saturation parameter has a non-integer value")
+                      if(length(sat) != length(r) && length(sat) != 1)
+                        stop("vectors r and sat must have equal length")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           r <- self$par$r
+           npiece <- length(r)
+           # extract coefficients
+           gammas <- exp(as.numeric(coeffs))
+           # name them
+           gn <- gammas
+           names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="")
+           #
+           return(list(param=list(gammas=gammas),
+                       inames="interaction parameters gamma_i",
+                       printable=dround(gn)))
+         },
+        valid = function(coeffs, self) {
+           # interaction parameters gamma must be
+           #   non-NA 
+           #   finite, if sat > 0
+           #   less than 1, if sat = Inf
+           gamma <- (self$interpret)(coeffs, self)$param$gammas
+           sat <- self$par$sat
+           if(anyNA(gamma))
+             return(FALSE)
+           return(all((is.finite(gamma) | sat == 0)
+                      & (gamma <= 1 | sat != Inf)))
+        },
+        project = function(coeffs, self){
+          loggammas <- as.numeric(coeffs)
+          sat <- self$par$sat
+          r   <- self$par$r
+          ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0)
+          if(all(ok))
+            return(NULL)
+          if(!any(ok))
+            return(Poisson())
+          bad <- !ok
+          if(spatstat.options("project.fast") || sum(bad) == 1) {
+            # remove smallest threshold with an unidentifiable parameter
+            firstbad <- min(which(bad))
+            return(delSP(firstbad, r, sat))
+          } else {
+            # consider all candidate submodels
+            subs <- lapply(which(bad), delSP, r=r, sat=sat)
+            return(subs)
+          }
+        },
+        irange = function(self, coeffs=NA, epsilon=0, ...) {
+          r <- self$par$r
+          sat <- self$par$sat
+          if(all(is.na(coeffs)))
+            return(2 * max(r))
+          gamma <- (self$interpret)(coeffs, self)$param$gammas
+          gamma[is.na(gamma)] <- 1
+          active <- (abs(log(gamma)) > epsilon) & (sat > 0)
+          if(!any(active))
+            return(0)
+          else return(2 * max(r[active]))
+        },
+       version=NULL # added later
+  )
+  class(BlankSatPiece) <- "interact"
+
+  SatPiece <- function(r, sat) {
+    instantiate.interact(BlankSatPiece, list(r=r, sat=sat))
+  }
+
+  SatPiece <- intermaker(SatPiece, BlankSatPiece)
+  
+  SatPiece
+})
+
+
+                  
diff --git a/R/saturated.R b/R/saturated.R
new file mode 100755
index 0000000..e8e1e90
--- /dev/null
+++ b/R/saturated.R
@@ -0,0 +1,59 @@
+#
+#
+#    saturated.S
+#
+#    $Revision: 1.8 $	$Date: 2015/10/21 09:06:57 $
+#
+#    Saturated pairwise process with user-supplied potential
+#
+#    Saturated()  create a saturated pairwise process
+#                 [an object of class 'interact']
+#                 with user-supplied potential
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Saturated <- function(pot, name) {
+  if(missing(name))
+    name <- "Saturated process with user-defined potential"
+  
+  fop <- names(formals(pot))
+  if(!identical(all.equal(fop, c("d", "par")), TRUE)
+     && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE))
+    stop(paste("Formal arguments of pair potential function",
+               sQuote("pot"),
+               "must be either (d, par) or (d, tx, tu, par)"))
+
+  out <- 
+  list(
+         name     = name,
+         creator  = "Saturated",
+         family    = pairsat.family,
+         pot      = pot,
+         par      = NULL,
+         parnames = NULL,
+         init     = NULL,
+         update   = function(self, ...){
+           do.call(Saturated,
+                   resolve.defaults(list(...),
+                                    list(pot=self$pot, name=self$name)))
+         } , 
+         print = function(self) {
+           cat("Potential function:\n")
+           print(self$pot)
+           invisible()
+         },
+       version=versionstring.spatstat()
+  )
+  class(out) <- "interact"
+  return(out)
+}
+
+Saturated <-
+    intermaker(Saturated,
+               list(creator="Saturated",
+                    name="saturated process with user-defined potential",
+                    par=formals(Saturated),
+                    parnames=list("the potential",
+                        "the name of the interaction")))
diff --git a/R/scanstat.R b/R/scanstat.R
new file mode 100644
index 0000000..4843a10
--- /dev/null
+++ b/R/scanstat.R
@@ -0,0 +1,317 @@
+##
+##  scanstat.R
+##
+##  Spatial scan statistics
+##
+##  $Revision: 1.17 $  $Date: 2017/06/05 10:31:58 $
+##
+
+scanmeasure <- function(X, ...){
+  UseMethod("scanmeasure")
+}
+
+
+scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) {
+  method <- match.arg(method)
+  check.1.real(r)
+  ## enclosing window
+  R <- as.rectangle(as.owin(X))
+  ## determine pixel resolution  
+  M <- as.mask(R, ...)
+  ## expand domain to include centres of all circles intersecting R
+  W <- grow.mask(M, r)
+  ## 
+  switch(method,
+         counts = {
+           ## direct calculation using C code
+           ## get new dimensions
+           dimyx <- W$dim
+           xr <- W$xrange
+           yr <- W$yrange
+           nr <- dimyx[1]
+           nc <- dimyx[2]
+           ##
+           n <- npoints(X)
+           zz <- .C("scantrans",
+                    x=as.double(X$x),
+                    y=as.double(X$y),
+                    n=as.integer(n),
+                    xmin=as.double(xr[1]),
+                    ymin=as.double(yr[1]),
+                    xmax=as.double(xr[2]),
+                    ymax=as.double(yr[2]),
+                    nr=as.integer(nr),
+                    nc=as.integer(nc),
+                    R=as.double(r),
+                    counts=as.integer(numeric(prod(dimyx))),
+                    PACKAGE = "spatstat")
+           zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE)
+           Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X))
+         },
+         fft = {
+           ## Previous version of scanmeasure.ppp had
+           ##    Y <- pixellate(X, ..., padzero=TRUE)
+           ## but this is liable to Gibbs phenomena.
+           ## Instead, convolve with small Gaussian (sd = 1 pixel width)
+           sigma <- with(W, unique(c(xstep, ystep)))
+           Y <- density(X, ..., sigma=sigma)
+           ## invoke scanmeasure.im
+           Z <- scanmeasure(Y, r)
+           Z <- eval.im(as.integer(round(Z)))
+         })
+  return(Z)
+}
+
+scanmeasure.im <- function(X, r, ...) {
+  D <- disc(radius=r)
+  eps <- with(X, c(xstep,ystep))
+  if(any(eps >= 2 * r)) return(eval.im(X * pi * r^2))
+  D <- as.im(as.mask(D, eps=eps))
+  Z <- imcov(X, D)
+  return(Z)
+}
+
+scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) {
+  nZco <- nG - nZ
+  muZco <- muG - muZ
+  nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a))
+  ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG)
+  criterion <- (nZ * muZco - muZ * nZco)
+  switch(alternative,
+         less={
+           ll[criterion > 0] <- 0
+         },
+         greater={
+           ll[criterion < 0] <- 0
+         },
+         two.sided={})
+  return(2 * ll)
+}
+
+scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) {
+  nZco <- nG - nZ
+  muZco <- muG - muZ
+  nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a))
+  logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) }
+  ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG)
+  criterion <- (nZ * muZco - muZ * nZco)
+  switch(alternative,
+         less={
+           ll[criterion > 0] <- 0
+         },
+         greater={
+           ll[criterion < 0] <- 0
+         },
+         two.sided={})
+  return(2 * ll)
+}
+
+scanLRTS <- function(X, r, ...,
+                       method=c("poisson", "binomial"),
+                       baseline=NULL,
+                       case=2,
+                       alternative=c("greater", "less", "two.sided"),
+                       saveopt = FALSE,
+                       Xmask=NULL) {
+  stopifnot(is.ppp(X))
+  stopifnot(check.nvector(r))
+  method <- match.arg(method)
+  alternative <- match.arg(alternative)
+  if(is.null(Xmask)) Xmask <- as.mask(as.owin(X), ...)
+  switch(method,
+         poisson={
+           Y <- X
+           if(is.null(baseline)) {
+             mu <- as.im(Xmask, value=1)
+           } else if(is.ppm(baseline)) {
+             if(is.marked(baseline))
+               stop("baseline is a marked point process: not supported")
+             mu <- predict(baseline, locations=Xmask)
+           } else if(is.im(baseline) || is.function(baseline)) {
+             mu <- as.im(baseline, W=Xmask)
+           } else stop(paste("baseline should be",
+                             "a pixel image, a function, or a fitted model"))
+           nG <- npoints(Y)
+         },
+         binomial={
+           stopifnot(is.multitype(X))
+           lev <- levels(marks(X))
+           if(length(lev) != 2)
+             warning("X should usually be a bivariate (2-type) point pattern")
+           if(!is.null(baseline))
+             stop("baseline is not supported in the binomial case")
+           if(is.character(case) && !(case %in% lev))
+             stop(paste("Unrecognised label for cases:", sQuote(case)))
+           if(is.numeric(case) && !(case %in% seq_along(lev)))
+             stop(paste("Undefined level:", case))
+           Y <- split(X)[[case]]
+           nG <- npoints(Y)
+           mu <- unmark(X)
+         })
+  ## The following line ensures that the same pixel resolution information
+  ## is passed to the two calls to 'scanmeasure' below
+  Y$window <- Xmask
+  ## 
+  nr <- length(r)
+  lrts <- vector(mode="list", length=nr)
+  for(i in 1:nr) {
+    ri <- r[i]
+    nZ <- scanmeasure(Y, ri)
+    muZ <- scanmeasure(mu, ri)
+    if(!compatible.im(nZ, muZ)) {
+      ha <- harmonise.im(nZ, muZ)
+      nZ <- ha[[1]]
+      muZ <- ha[[2]]
+    }
+    switch(method,
+           poisson = {
+             muG <- integral.im(mu)
+             lrts[[i]] <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative))
+           },
+           binomial = {
+             muG <- npoints(mu)
+             lrts[[i]] <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative))
+           })
+  }
+  if(length(lrts) == 1) {
+    result <- lrts[[1]]
+  } else {
+    result <- im.apply(lrts, max)
+    if(saveopt)
+      attr(result, "iopt") <- im.apply(lrts, which.max)
+  }
+  return(result)
+}
+
+scan.test <- function(X, r, ...,
+                      method=c("poisson", "binomial"),
+                      nsim = 19,
+                      baseline=NULL,
+                      case = 2,
+                      alternative=c("greater", "less", "two.sided"),
+                      verbose=TRUE) {
+  dataname <- short.deparse(substitute(X))
+  stopifnot(is.ppp(X))
+  method <- match.arg(method)
+  alternative <- match.arg(alternative)
+  stopifnot(is.numeric(r))
+  check.1.real(nsim)
+  if(!(round(nsim) == nsim && nsim > 1))
+    stop("nsim should be an integer > 1")
+  regionname <-
+    paste("circles of radius",
+          if(length(r) == 1) r else paste("between", min(r), "and", max(r)))
+  ##
+  ## compute observed loglikelihood function
+  ## This also validates the arguments.
+  obsLRTS <- scanLRTS(X=X, r=r,
+                          method=method,
+                          alternative=alternative, baseline=baseline,
+                          case=case, ..., saveopt=TRUE)
+  obs <- max(obsLRTS)
+  sim <- numeric(nsim)
+  ## determine how to simulate
+  switch(method,
+         binomial={
+           methodname <- c("Spatial scan test",
+                           "Null hypothesis: constant relative risk",
+                           paste("Candidate cluster regions:", regionname),
+                           "Likelihood: binomial",
+                           paste("Monte Carlo p-value based on",
+                                 nsim, "simulations"))
+
+           lev <- levels(marks(X))
+           names(lev) <- lev
+           casename <- lev[case]
+           counted <- paste("points with mark", sQuote(casename), 
+                            "inside cluster region")
+           simexpr <- expression(rlabel(X))
+         },
+         poisson={
+           counted <- paste("points inside cluster region")
+           X <- unmark(X)
+           Xwin <- as.owin(X)
+           Xmask <- as.mask(Xwin, ...)
+           if(is.null(baseline)) {
+             nullname <- "Complete Spatial Randomness (CSR)"
+             lambda <- intensity(X)
+             simexpr <- expression(runifpoispp(lambda, Xwin))
+             dont.complain.about(lambda)
+           } else if(is.ppm(baseline)) {
+             nullname <- baseline$callstring
+             rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE)
+             simexpr <- expression(rmhEngine(rmhstuff))
+             dont.complain.about(rmhstuff)
+           } else if(is.im(baseline) || is.function(baseline)) {
+             nullname <- "Poisson process with intensity proportional to baseline"
+             base <- as.im(baseline, W=Xmask)
+             alpha <- npoints(X)/integral.im(base)
+             lambda <- eval.im(alpha * base)
+             simexpr <- expression(rpoispp(lambda))
+             dont.complain.about(lambda)
+           } else stop(paste("baseline should be",
+                             "a pixel image, a function, or a fitted model"))
+           methodname <- c("Spatial scan test",
+                           paste("Null hypothesis:", nullname),
+                           paste("Candidate cluster regions:", regionname),
+                           "Likelihood: Poisson",
+                           paste("Monte Carlo p-value based on",
+                                 nsim, "simulations"))
+         })
+  if(verbose) {
+    cat("Simulating...")
+    pstate <- list()
+  }
+  for(i in 1:nsim) {
+    if(verbose) pstate <- progressreport(i, nsim, state=pstate)
+    Xsim <- eval(simexpr)
+    simLRTS <- scanLRTS(X=Xsim, r=r,
+                         method=method, alternative=alternative,
+                         baseline=baseline,
+                         case=case,
+                         ...)
+    sim[i] <- max(simLRTS)
+  }
+  pval <- mean(c(sim,obs) >= obs, na.rm=TRUE)
+  names(obs) <- "maxLRTS"
+  nm.alternative <- switch(alternative,
+                           greater="Excess of",
+                           less="Deficit of",
+                           two.sided="Two-sided: excess or deficit of",
+                           stop("Unknown alternative"))
+  nm.alternative <- paste(nm.alternative, counted)
+  result <- list(statistic = obs,
+                 p.value = pval,
+                 alternative = nm.alternative, 
+                 method = methodname,
+                 data.name = dataname)
+  class(result) <- c("scan.test", "htest")
+  attr(result, "obsLRTS") <- obsLRTS
+  attr(result, "X") <- X
+  attr(result, "r") <- r
+  return(result)
+}
+
+plot.scan.test <- function(x, ..., what=c("statistic", "radius"),
+                           do.window=TRUE) {
+  xname <- short.deparse(substitute(x))
+  what <- match.arg(what)
+  Z <- as.im(x, what=what)
+  do.call(plot, resolve.defaults(list(x=Z), list(...), list(main=xname)))
+  if(do.window) {
+    X <- attr(x, "X")
+    plot(as.owin(X), add=TRUE, invert=TRUE)
+  }
+  invisible(NULL)
+}
+
+as.im.scan.test <- function(X, ..., what=c("statistic", "radius")) {
+  Y <- attr(X, "obsLRTS")
+  what <- match.arg(what)
+  if(what == "radius") {
+    iopt <- attr(Y, "iopt")
+    r <- attr(X, "r")
+    Y <- eval.im(r[iopt])
+  }
+  return(as.im(Y, ...))
+}
diff --git a/R/scriptUtils.R b/R/scriptUtils.R
new file mode 100644
index 0000000..43a4093
--- /dev/null
+++ b/R/scriptUtils.R
@@ -0,0 +1,39 @@
+## scriptUtils.R
+##       $Revision: 1.4 $ $Date: 2014/02/07 06:58:43 $
+
+## slick way to use precomputed data
+##    If the named file exists, it is loaded, giving access to the data.
+##    Otherwise, 'expr' is evaluated, and all objects created
+##    are saved in the designated file, for loading next time.
+
+reload.or.compute <- function(filename, expr, 
+                              objects=NULL,
+                              destination=parent.frame()) {
+  stopifnot(is.character(filename) && length(filename) == 1)
+  if(!file.exists(filename)) {
+    ## evaluate 'expr' in a fresh environment
+    ee <- as.expression(substitute(expr))
+    en <- new.env()
+    local(eval(ee), envir=en)
+    ## default is to save all objects that were created
+    if(is.null(objects))
+      objects <- ls(envir=en)
+    ## save them in the designated file
+    evalq(save(list=objects, file=filename, compress=TRUE), envir=en)
+    ## assign them into the parent frame 
+    for(i in seq_along(objects))
+      assign(objects[i], get(objects[i], envir=en), envir=destination)
+    result <- objects
+  } else {
+    result <- load(filename, envir=destination)
+    if(!all(ok <- (objects %in% result))) {
+      nbad <- sum(!ok)
+      warning(paste(ngettext(nbad, "object", "objects"),
+                    commasep(sQuote(objects[!ok])),
+                    ngettext(nbad, "was", "were"),
+                    "not present in data file", dQuote(filename)),
+              call.=FALSE)
+    }
+  }
+  return(invisible(result))
+}
diff --git a/R/sdr.R b/R/sdr.R
new file mode 100644
index 0000000..40c78c1
--- /dev/null
+++ b/R/sdr.R
@@ -0,0 +1,263 @@
+#'
+#'    sdr.R
+#'
+#'  Sufficient Dimension Reduction
+#'
+#'  Matlab original: Yongtao Guan
+#'  Translated to R by:  Suman Rakshit
+#'  Adapted for spatstat: Adrian Baddeley
+#'
+#'  GNU Public Licence 2.0 || 3.0
+#'
+#'    $Revision: 1.11 $  $Date: 2016/11/26 07:41:34 $
+#'
+
+sdr <- local({
+
+  sdr <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"),
+                  Dim1=1, Dim2=1, predict=FALSE) {
+    stopifnot(is.ppp(X))
+    method <- match.arg(method)
+    #' ensure 'covariates' is a list of compatible images
+    if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im)))
+      stop("Argument 'covariates' must be a list of images")
+    nc <- length(covariates)
+    if(nc == 0)
+      stop("Need at least one covariate!")
+    if(nc < Dim1 + (method == "TSE") * Dim2)
+      stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1",
+                 "must not exceed the number of covariates"),
+           call.=FALSE)
+    if(nc > 1 && !do.call(compatible, unname(covariates)))
+      covariates <- do.call(harmonise, covariates)
+    #' extract corresponding pixel values including NA's
+    Ypixval <- sapply(lapply(covariates, as.matrix), as.vector)
+    #' compute sample mean and covariance matrix
+    m <- colMeans(Ypixval, na.rm=TRUE)
+    V <- cov(Ypixval, use="complete")
+    #' evaluate each image at point data locations
+    YX <- sapply(covariates, safelook, Y=X)
+    #' apply precomputed standardisation
+    Zx <- t(t(YX) - m) %*% matrixinvsqrt(V)
+    #' ready
+    coordsX <- coords(X)
+    result <-
+      switch(method,
+             DR   =   calc.DR(COV=V, z=Zx,              Dim=Dim1),
+             NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1),
+             SAVE = calc.SAVE(COV=V, z=Zx,              Dim=Dim1),
+             SIR  =  calc.SIR(COV=V, z=Zx                      ),
+             TSE  =  calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2)
+             )
+    #'
+    covnames <- names(covariates) %orifnull% paste0("Y", 1:nc)
+    dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B)))
+    if(method == "TSE") {
+      result$M1 <- namez(result$M1)
+      result$M2 <- namez(result$M2)
+    } else {
+      result$M <- namez(result$M)
+    }
+    if(predict) result$Y <- sdrPredict(covariates, result$B)
+    return(result)
+  }
+
+  safelook <- function(Z, Y, ...) { safelookup(Z, Y, ...) }
+
+  namez <- function(M, prefix="Z") {
+    dimnames(M) <- list(paste0(prefix, 1:nrow(M)),
+                     paste0(prefix, 1:ncol(M)))
+    return(M)
+  }
+  
+  sdr
+})
+
+sdrPredict <- function(covariates, B) {
+  if(!is.matrix(B)) {
+    if(is.list(B) && is.matrix(BB <- B$B)) B <- BB else
+    stop("B should be a matrix, or the result of a call to sdr()",
+         call.=FALSE)
+  }
+  if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im)))
+    stop("Argument 'covariates' must be a list of images")
+  stopifnot(nrow(B) == length(covariates))
+  result <- vector(mode="list", length=ncol(B))
+  for(j in seq_along(result)) {
+    cj <- as.list(B[,j])
+    result[[j]] <- Reduce("+", mapply("*", cj, covariates, SIMPLIFY=FALSE))
+  }
+  names(result) <- colnames(B)
+  return(as.solist(result))
+}
+
+##............ DR (Directional Regression) ..........................
+calc.DR <- function(COV, z, Dim){
+  ## Description: Naive Directional Regression Method
+  ## Input:
+  ##   COV - cov{X(s)}
+  ##   z   - standardized X(s) on SPP locations
+  ##   Dim - the CS dimension
+  ## Output:
+  ##   B   - the estimated CS basis
+  ##   M - the kernel matrix
+  ss <- nrow(z)
+  ncov <- ncol(z)
+  M1 <- (t(z) %*% z)/ss - diag(1,ncov)
+  M1 <- M1 %*% M1                             # the SAVE kernel
+  covMean <- matrix(colMeans(z),ncol=1)
+  M2 <- covMean %*% t(covMean)
+  M3 <- M2 * (base::norm(covMean, type="2"))^2      # the SIR kernel
+  M2 <- M2 %*% M2                             # the SIR-2 kernel
+  M <- (M1 + M2 + M3)/3                       # the DR kernel
+  SVD <- svd(M)
+  B <- SVD$u[,1:Dim]
+  B <- matrixinvsqrt(COV) %*% B               # back to original scale
+  return(list(B=B, M=M))
+}
+
+
+## ............ NNIR (Nearest Neighbor Inverse Regression) ...........
+calc.NNIR <- function(COV, z, pos, Dim) {
+  ## Description: Nearest Neighbor Inverse Regression
+  ## Input:
+  ##   COV - cov{X(s)}
+  ##   z   - standardized X(s) on SPP locations
+  ##   pos - the position of SPP events
+  ##   Dim - the CS dimension
+  ## Output:
+  ##   B   - the estimated CS basis
+  ##   M - the kernel matrix
+
+  ss   <- nrow(z)   # sample size
+#  ncov <- ncol(z)   # predictor dimension
+  jj <- nnwhich(pos) # identify nearest neighbour of each point
+  dir <- z - z[jj, , drop=FALSE] # empirical direction
+  IM <- sumouter(dir) # inverse of kernel matrix: sum of outer(dir[i,], dir[i,])
+  M <- solve(IM/ss)   # invert kernel matrix
+  SVD <- svd(M)
+  B <- matrixinvsqrt(COV) %*% SVD$u[, 1:Dim, drop=FALSE]
+  return(list(B=B, M=M))
+}
+
+## ...........  SAVE (Sliced Average Variance Estimation) ...........
+calc.SAVE <- function(COV, z, Dim){
+  ## Description: Naive Directional Regression Method
+  ## Input
+  ##   COV - cov{X(s)}
+  ##   z - standardized X(s) on SPP locations
+  ##   Dim - the central space dimension
+  ## Value
+  ##   B - the estimated CS basis
+  ##   M - the kernel matrix
+#  ss <- nrow(z)
+  ncov <- ncol(z)
+  M <- diag(1,ncov) - cov(z)
+  M <- M %*% M
+  SVD <- svd(M)
+  B <- SVD$u[,1:Dim]
+  B <- matrixinvsqrt(COV) %*% B
+  return(list(B=B, M=M))
+}
+
+##..........  SIR (Sliced Inverse Regression) ......................
+calc.SIR <- function(COV, z){
+  ## Description: Naive Directional Regression Method
+  ## Input:
+  ##   COV - cov{X(s)}
+  ##   z   - standardized X(s) on SPP locations
+  ## Output:
+  ##   B   - the estimated CS basis
+  ##   M - the kernel matrix
+  covMean <- colMeans(z)
+  B <- matrixinvsqrt(COV) %*% covMean    # do SIR estimation
+  B <- B/sqrt(sum(B^2))                   # normalise to unit length
+  M <- covMean %*% t(covMean)             # create kernel matrix
+  return(list(B=B, M=M))
+}
+
+## .............  TSE (Two-Step Estimation) ....................
+calc.TSE <- function(COV, z, pos, Dim1, Dim2) {
+  ## Description: A Two-Step Method
+  ## Input:
+  ##   COV - cov{X(s)}
+  ##   z   - standardized X(s) on SPP locations
+  ##   Dim1 - the S1 dimension
+  ##   Dim2 - the S2 dimension
+  ## Output:
+  ##   B   - the estimated CS basis. Its first Dim1 columns
+  ##       are estimating S1 and the remaining Dim2 columns are
+  ##       estimating S2. In case of null space, a zero vector is reported.
+  ##   M1  - the kernel matrix of DR
+  ##   M2  - the kernel matrix of NNIR, which might be subject 
+  ##         to some change, depending on the results of M1.
+
+#  ss   <- nrow(z)  # sample size
+  ncov <- ncol(z)  # predictor dimension
+
+  est1 <- calc.DR(COV, z, ncov)           # do DR estimation
+  est2 <- calc.NNIR(COV, z, pos, ncov)  # do NNIR estimation
+  M1 <- est1$M
+  M2 <- est2$M
+
+  if(Dim1 > 0) {
+    U <- svd(M1)$u
+    B1 <- U[ , 1:Dim1, drop=FALSE]  # get S1 estimate
+    Q  <- diag(1, ncov) - B1 %*% solve(t(B1) %*% B1) %*% t(B1)
+                     # contract orthogonal basis
+    M2 <- Q %*% M2 %*% Q  # do constrained NNIR
+  } else {
+    B1 <- matrix(0, ncov, 1)
+  }
+
+  if(Dim2 > 0) {
+    U <- svd(M2)$u   # do SVD for possibly updated M2
+    B2 <- U[ , 1:Dim2, drop=FALSE]  # get basis estimator
+  } else {
+    B2 <- matrix(0, ncov, 1)
+  }
+  B <- matrixinvsqrt(COV) %*% cbind(B1,B2)
+  return(list(B=B, M1=M1, M2=M2))
+}
+
+## //////////////////  ADDITIONAL FUNCTIONS /////////////////////
+
+subspaceDistance <- function(B0,B1) {
+  ## ======================================================== #
+  ## Evaluate the distance between the two linear spaces S(B0) and S(B1). 
+  ## The measure used is the one proposed by Li et al. (2004). 
+  ## ======================================================== #
+  stopifnot(is.matrix(B0))
+  stopifnot(is.matrix(B1))
+  Proj0 <- B0 %*% solve((t(B0) %*% B0)) %*% t(B0)  # Proj matrix on S(B0)
+  lam <- svd(B1) # check whether B1 is singular
+  U <- lam$u
+  D <- lam$d
+#  V <- lam$v
+  B2 <- U[, D > 1e-09] # keep non-singular directions
+  Proj1 <- B2 %*% solve((t(B2) %*% B2)) %*% t(B2) # Proj matrix on S(B.hat)
+  Svd <- svd(Proj0 - Proj1)  # Do svd for P0-P1
+  dist <- max(abs(Svd$d)) # Get the maximum absolute svd value
+  return(dist)
+}
+
+dimhat <- function(M){
+  #' Description: Maximum Descent Estimator for CS Dim
+  #' Input:
+  #'   M   - the estimated kernel matrix
+  #' Output:
+  #'   dimhat   - the estimated CS dim (assume dim>0)
+  stopifnot(is.matrix(M))
+  ncov <- ncol(M)                       # predictor dimension
+  maxdim <- max((ncov-1), 5)            # maximum structure dimension
+  SVD <- svd(M)                         # svd of kernel matrix
+  lam <- SVD$d
+  eps <- 1e-06
+  lam <- lam + rep(eps,ncov)            # add ridge effect
+  lam1 <- lam[-ncov]
+  lam2 <- lam[-1]
+  dif <- lam1/lam2
+  dif <- dif[1 : maxdim]                # the magnitude of drop
+  retval <- which.max(dif)              # find Maximum Descent estimator
+  return(retval)
+}
diff --git a/R/segtest.R b/R/segtest.R
new file mode 100644
index 0000000..d203f3b
--- /dev/null
+++ b/R/segtest.R
@@ -0,0 +1,62 @@
+#'
+#'      segtest.R
+#'
+#'   Monte Carlo test of segregation for multitype patterns
+#'
+#'    $Revision: 1.3 $ $Date: 2015/07/11 08:19:26 $
+#'
+
+segregation.test <- function(X, ...) {
+  UseMethod("segregation.test")
+}
+
+segregation.test.ppp <- function(X, ..., nsim=19, permute=TRUE,
+                                 verbose=TRUE, Xname) {
+  if(missing(Xname))
+    Xname <- short.deparse(substitute(X))
+  stopifnot(is.ppp(X))
+  stopifnot(is.multitype(X))
+  verboten <- c("at", "relative", "se", "leaveoneout",
+                "casecontrol", "case", "control")
+  if(any(nyet <- (verboten %in% names(list(...)))))
+    stop(paste(ngettext(sum(nyet), "Argument", "Arguments"),
+               commasep(sQuote(verboten[nyet])),
+               "cannot be used"))
+  lam <- intensity(X)
+  pbar <- lam/sum(lam)
+  np <- npoints(X)
+  nt <- length(pbar)
+  pbar <- matrix(pbar, byrow=TRUE, nrow=np, ncol=nt)
+  if(verbose) cat("Computing observed value... ")
+  phat <- relrisk(X, at="points", ...)
+  obs <- mean((phat-pbar)^2)
+  if(verbose) {
+    cat("Done.\nComputing simulated values... ")
+    pstate <- list()
+  }
+  sim <- numeric(nsim)
+  for(i in 1:nsim) {
+    Xsim <- rlabel(X, permute=permute)
+    phatsim <- relrisk(Xsim, at="points", ...)
+    if(permute) pbarsim <- pbar else {
+      lamsim <- intensity(Xsim)
+      pbarsim <- lamsim/sum(lamsim)
+      pbarsim <- matrix(pbarsim, byrow=TRUE, nrow=np, ncol=nt)
+    }
+    sim[i] <- mean((phatsim - pbarsim)^2)
+    if(verbose) pstate <- progressreport(i, nsim, state=pstate)
+  }
+  p.value <- (1+sum(sim >= obs))/(1+nsim)
+  names(obs) <- "T"
+  out <- list(statistic=obs,
+              p.value=p.value,
+              method="Monte Carlo test of spatial segregation of types",
+              data.name=Xname)
+  class(out) <- "htest"
+  return(out)
+}
+
+
+
+
+
diff --git a/R/setcov.R b/R/setcov.R
new file mode 100755
index 0000000..a0e188c
--- /dev/null
+++ b/R/setcov.R
@@ -0,0 +1,117 @@
+#
+#
+#     setcov.R
+#
+#     $Revision: 1.15 $ $Date: 2017/06/05 10:31:58 $
+#
+#    Compute the set covariance function of a window
+#    or the (noncentred) spatial covariance function of an image
+#
+
+setcov <- function(W, V=W, ...) {
+  W <- as.owin(W)
+  # pixel approximation
+  mW <- as.mask(W, ...)
+  Z <- as.im(mW, na.replace=0)
+  if(missing(V)) 
+    return(imcov(Z))
+  # cross-covariance
+  V <- as.owin(V)
+  mV <- as.mask(V, ...)
+  Z2 <- as.im(mV, na.replace=0)
+  imcov(Z, Z2)
+}
+
+imcov <- function(X, Y=X) {
+  if(missing(Y)) Y <- NULL
+  convolve.im(X, Y, reflectX = FALSE, reflectY=TRUE)
+}
+
+convolve.im <- function(X, Y=X, ..., reflectX=FALSE, reflectY=FALSE) {
+  stopifnot(is.im(X))
+  have.Y <- !missing(Y) && !is.null(Y)
+  crosscov <- have.Y || reflectX || reflectY
+  trap.extra.arguments(..., .Context="In convolve.im")
+  #' Check whether Fastest Fourier Transform in the West is available
+  west <- fftwAvailable()
+  #'
+  if(have.Y) {
+    # cross-covariance 
+    stopifnot(is.im(Y))
+    Xbox <- as.rectangle(X)
+    Ybox <- as.rectangle(Y)
+    # first shift images to common midpoint, to reduce storage
+    Xmid <- centroid.owin(Xbox)
+    Ymid <- centroid.owin(Ybox)
+    svec <- as.numeric(Xmid) - as.numeric(Ymid)
+    Y <- shift(Y, svec)
+    # ensure images are compatible
+    XY <- harmonise.im(X=X, Y=Y)
+    X <- XY$X
+    Y <- XY$Y
+  } else {
+    # Y is missing or NULL
+    Y <- X
+    Xbox <- Ybox <- as.rectangle(X)
+  }
+  M <- X$v
+  M[is.na(M)] <- 0
+  xstep <- X$xstep
+  ystep <- X$ystep
+  # pad with zeroes
+  nr <- nrow(M)
+  nc <- ncol(M)
+  Mpad <- matrix(0, ncol=2*nc, nrow=2*nr)
+  Mpad[1:nr, 1:nc] <- M
+  lengthMpad <- 4 * nc * nr
+  fM <- fft2D(Mpad, west=west)
+  if(!crosscov) {
+    # compute convolution square
+    G <- fft2D(fM^2, inverse=TRUE, west=west)/lengthMpad
+  } else {
+    # compute set cross-covariance or convolution by FFT
+    N <- Y$v
+    N[is.na(N)] <- 0
+    Npad <- matrix(0, ncol=2*nc, nrow=2*nr)
+    Npad[1:nr, 1:nc] <- N
+    fN <- fft2D(Npad, west=west)
+    if(reflectY) fN <- Conj(fN)
+    if(reflectX) fM <- Conj(fM)
+    G <- fft2D(fM * fN, inverse=TRUE, west=west)/lengthMpad
+  }
+#  cat(paste("maximum imaginary part=", max(Im(G)), "\n"))
+  G <- Mod(G) * xstep * ystep
+  if(reflectX != reflectY) {
+    # Currently G[i,j] corresponds to a vector shift of
+    #     dy = (i-1) mod nr, dx = (j-1) mod nc.
+    # Rearrange this periodic function so that 
+    # the origin of translations (0,0) is at matrix position (nr,nc)
+    # NB this introduces an extra row and column
+    G <- G[ ((-nr):nr) %% (2 * nr) + 1, (-nc):nc %% (2*nc) + 1]
+  }
+  # Determine spatial domain of full raster image
+  XB <- as.rectangle(X)
+  YB <- as.rectangle(Y)
+  # undo shift
+  if(have.Y) YB <- shift(YB, -svec)
+  # reflect
+  if(reflectX) XB <- reflect(XB)
+  if(reflectY) YB <- reflect(YB)
+  # Minkowski sum of covering boxes
+  xran <- XB$xrange + YB$xrange
+  yran <- XB$yrange + YB$yrange
+  # Declare spatial domain
+  out <- im(G, xrange = xran, yrange=yran)
+  if(crosscov) {
+    # restrict to actual spatial domain of function
+    if(reflectX) Xbox <- reflect(Xbox)
+    if(reflectY) Ybox <- reflect(Ybox)
+   # Minkowski sum 
+    xran <- Xbox$xrange + Ybox$xrange
+    yran <- Xbox$yrange + Ybox$yrange   
+    XYbox <- owin(xran, yran)
+    out <- out[XYbox, rescue=TRUE]
+  }
+  return(out)
+}
+
diff --git a/R/sharpen.R b/R/sharpen.R
new file mode 100755
index 0000000..c27b2f6
--- /dev/null
+++ b/R/sharpen.R
@@ -0,0 +1,65 @@
+#
+#      sharpen.R
+#
+#      $Revision: 1.6 $  $Date: 2013/08/29 03:52:17 $
+#
+
+sharpen <- function(X, ...) {
+  UseMethod("sharpen")
+}
+
+sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL,
+                        edgecorrect=FALSE) {
+  stopifnot(is.ppp(X))
+  Yx <- Smooth(X %mark% X$x,
+               at="points", sigma=sigma, varcov=varcov, edge=TRUE)
+  Yy <- Smooth(X %mark% X$y,
+               at="points", sigma=sigma, varcov=varcov, edge=TRUE)
+  # trap NaN etc
+  nbad <- sum(!(is.finite(Yx) & is.finite(Yy)))
+  if(nbad > 0)
+    stop(paste(nbad,
+               ngettext(nbad, "point is", "points are"),
+               "undefined due to numerical problems;",
+               "smoothing parameter is probably too small"))
+  #
+  W <- as.owin(X)
+  if(edgecorrect) {
+    # convolve x and y coordinate functions with kernel
+    xim <- as.im(function(x,y){x}, W)
+    yim <- as.im(function(x,y){y}, W)
+    xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...)
+    yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...)
+    # evaluate at data locations 
+    xx <- safelookup(xblur, X, warn=FALSE)
+    yy <- safelookup(yblur, X, warn=FALSE)
+    # estimated vector bias of sharpening procedure
+    xbias <- xx - X$x
+    ybias <- yy - X$y
+    # adjust
+    Yx <- Yx - xbias
+    Yy <- Yy - ybias
+    # check this does not place points outside window
+    if(any(uhoh <- !inside.owin(Yx, Yy, W))) {
+      # determine mass of edge effect
+      edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...)
+      edg <- safelookup(edgeim, X[uhoh], warn=FALSE)
+      # contract bias correction
+      Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh]
+      Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh]
+    }
+    # check again
+    if(any(nbg <- !inside.owin(Yx, Yy, W))) {
+      # give up
+      Yx[nbg] <- X$x[nbg]
+      Yy[nbg] <- X$y[nbg]
+    }
+  }
+  # make point pattern
+  Y <- ppp(Yx, Yy, marks=marks(X), window=W)
+  # tack on smoothing information
+  attr(Y, "sigma") <- sigma
+  attr(Y, "varcov") <- varcov
+  attr(Y, "edgecorrected") <- edgecorrect
+  return(Y)
+}
diff --git a/R/sigtrace.R b/R/sigtrace.R
new file mode 100644
index 0000000..a8ea207
--- /dev/null
+++ b/R/sigtrace.R
@@ -0,0 +1,170 @@
+#
+#  sigtrace.R
+#
+#  $Revision: 1.10 $  $Date: 2016/02/11 09:36:11 $
+#
+#  Significance traces 
+#
+
+dclf.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=2)
+
+mad.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=Inf)
+
+mctest.sigtrace <- function(X, fun=Lest, ..., exponent=1,
+                            interpolate=FALSE, alpha=0.05,
+                            confint=TRUE, rmin=0) {
+  check.1.real(exponent)
+  explain.ifnot(exponent >= 0)
+  if(missing(fun) && inherits(X, c("envelope", "hasenvelope")))
+    fun <- NULL
+  Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent)
+  R       <- Z$R
+  devdata <- Z$devdata
+  devsim  <- Z$devsim
+  result <- mctestSigtraceEngine(R, devdata, devsim,
+                                 interpolate=interpolate,
+                                 confint=confint,
+                                 alpha=alpha,
+                                 exponent=exponent,
+                                 unitname=unitname(X))
+  result <- hasenvelope(result, Z$envelope) # envelope may be NULL
+  return(result)
+}
+
+mctestSigtraceEngine <- local({
+
+  mctestSigtraceEngine <- function(R, devdata, devsim, ..., 
+                                   interpolate=FALSE, confint=TRUE,
+                                   alpha=0.05, exponent=2, unitname=NULL) {
+    nsim     <- ncol(devsim)
+    if(!interpolate) {
+      #' Monte Carlo p-value
+      datarank <- apply(devdata < devsim, 1, sum) +
+        apply(devdata == devsim, 1, sum)/2 + 1
+      pvalue <- datarank/(nsim+1)
+    } else {
+      #' interpolated p-value
+      devs <- cbind(devdata, devsim)
+      pvalue <- apply(devs, 1, rowwise.interp.tailprob)
+    }
+    if(!confint) {
+      #' create fv object without confidence interval
+      p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha), 
+              argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, 
+              desc = c("Interval endpoint R",
+                "calculated p-value %s",
+                "threshold for significance"), 
+              labl=c("R", "%s(R)", paste(alpha)), 
+              unitname = unitname, fname = "p")
+      fvnames(p, ".") <- c("pest", "alpha")
+    } else {
+      # confidence interval
+      if(!interpolate) {
+        #' Agresti-Coull confidence interval
+        successes <- datarank - 1
+        trials    <- nsim
+        z <- qnorm(1 - (1-0.95)/2)
+        nplus <- trials + z^2
+        pplus <- (successes + z^2/2)/nplus
+        sigmaplus <- sqrt(pplus * (1-pplus)/nplus)
+        lo <- pplus - z * sigmaplus
+        hi <- pplus + z * sigmaplus
+      } else {
+        #' confidence interval by delta method
+        pSE    <- apply(devs, 1, rowwise.se)
+        z <- qnorm(1 - (1-0.95)/2)
+        lo <- pmax(0, pvalue - z * pSE)
+        hi <- pmin(1, pvalue + z * pSE)
+      }
+      #' create fv object with confidence interval
+      p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha, lo=lo, hi=hi),
+              argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, 
+              desc = c("Interval endpoint R",
+                "calculated p-value %s",
+                "threshold for significance",
+                "lower 95%% limit for p-value",
+                "upper 95%% limit for p-value"),
+              labl=c("R", "%s(R)", paste(alpha), "lo(R)", "hi(R)"),
+              unitname = unitname, fname = "p")
+      fvnames(p, ".") <- c("pest", "alpha", "lo", "hi")
+      fvnames(p, ".s") <- c("lo", "hi")
+    }
+    return(p)
+  }
+
+  ## interpolated p-value
+  interpol.tailprob <- function(x, q) {
+    sigma <- bw.nrd0(x)
+    mean(pnorm(q, mean=x, sd=sigma, lower.tail=FALSE))
+  }
+  rowwise.interp.tailprob <- function(x) {
+    interpol.tailprob(x[-1], x[1])
+  }
+  ## estimated SE of p-value
+  interpol.se <- function(x, q) {
+    sigma <- bw.nrd0(x)
+    z <- density(x, sigma)
+    v <- mean(z$y * pnorm(q, mean=z$x, sd=sigma, lower.tail=FALSE)^2) * diff(range(z$x))
+    sqrt(v)/length(x)
+  }
+  rowwise.se <- function(x) {
+    interpol.se(x[-1], x[1])
+  }
+    
+  mctestSigtraceEngine
+})
+
+
+dg.sigtrace <- function(X, fun=Lest, ...,   
+                        exponent=2, nsim=19, nsimsub=nsim-1,
+                        alternative=c("two.sided", "less", "greater"),
+                        rmin=0, leaveout=1,
+                        interpolate=FALSE, confint=TRUE, alpha=0.05,
+                        savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) {
+  alternative <- match.arg(alternative)
+  env.here <- sys.frame(sys.nframe())
+  if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub))
+    stop("nsim and nsimsub must be relatively prime")
+  ## generate or extract simulated patterns and functions
+  if(verbose) cat("Generating first-level data...")
+  E <- envelope(X, fun=fun, ..., nsim=nsim,
+                savepatterns=TRUE, savefuns=TRUE,
+                verbose=verbose,
+                envir.simul=env.here)
+  ## get first level MC test significance trace
+  if(verbose) cat("Computing significance trace...")
+  T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, 
+                        exponent=exponent,
+                        rmin=rmin,
+                        alternative=alternative,
+                        leaveout=leaveout,
+                        interpolate=interpolate,
+                        confint=FALSE, verbose=verbose, ...)
+  R    <- T1$R
+  phat <- T1$pest
+  ## second level traces
+  if(verbose) cat(" Done.\nGenerating second-level data... [silently] ..")
+  Pat <- attr(E, "simpatterns")
+  T2list <- lapply(Pat,
+                   mctest.sigtrace,
+                   fun=fun, nsim=nsimsub, 
+                   exponent=exponent,
+                   rmin=rmin,
+                   alternative=alternative,
+                   leaveout=leaveout,
+                   interpolate=interpolate,
+                   confint=FALSE, verbose=FALSE, ...)
+  phati <- sapply(T2list, getElement, name="pest")
+  ## Dao-Genton p-value
+  if(verbose) cat(" Computing significance trace...")
+  result <- mctestSigtraceEngine(R, -phat, -phati,
+                                 interpolate=FALSE, 
+                                 confint=confint,
+                                 exponent=exponent,
+                                 alpha=alpha,
+                                 unitname=unitname(X))
+  if(verbose) cat(" Done.\n")
+  if(savefuns || savepatterns)
+    result <- hasenvelope(result, E)
+  return(result)
+}
diff --git a/R/simplepanel.R b/R/simplepanel.R
new file mode 100644
index 0000000..557150c
--- /dev/null
+++ b/R/simplepanel.R
@@ -0,0 +1,221 @@
+#
+# simplepanel.R
+#
+#  A simple, robust point & click interface
+#     used in rmh visual debugger.
+#
+#  $Revision: 1.14 $  $Date: 2016/04/25 02:34:40 $
+#
+
+simplepanel <- function(title, B, boxes, clicks, redraws=NULL, exit=NULL, env) {
+  stopifnot(is.rectangle(B))
+  stopifnot(is.list(boxes))
+  if(!all(unlist(lapply(boxes, is.rectangle))))
+    stop("some of the boxes are not rectangles")
+  if(!all(unlist(lapply(boxes, is.subset.owin, B=B))))
+    stop("Some boxes do not lie inside the bounding box B")
+  stopifnot(is.list(clicks) && length(clicks) == length(boxes))
+  if(!all(unlist(lapply(clicks, is.function))))
+    stop("clicks must be a list of functions")
+  if(is.null(redraws)) {
+    redraws <- rep.int(list(dflt.redraw), length(boxes))
+  } else {
+    stopifnot(is.list(redraws) && length(redraws) == length(boxes))
+    if(any(isnul <- unlist(lapply(redraws, is.null))))
+      redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul))
+    if(!all(unlist(lapply(redraws, is.function))))
+      stop("redraws must be a list of functions")
+  }
+  if(is.null(exit)) {
+    exit <- function(...) { NULL}
+  } else stopifnot(is.function(exit))
+  stopifnot(is.environment(env))
+  n <- length(boxes)
+  bnames <- names(boxes) %orifnull% rep("", n)
+  cnames <- names(clicks) %orifnull% rep("", n)
+  dnames <- paste("Button", seq_len(n))
+  nama <- ifelse(nzchar(bnames), bnames, ifelse(nzchar(cnames), cnames, dnames))
+  out <- list(title=title, B=B,
+              nama=nama, boxes=boxes, clicks=clicks, redraws=redraws,
+              exit=exit, env=env)
+  class(out) <- c("simplepanel", class(out))
+  return(out)
+}
+
+grow.simplepanel <- function(P, side=c("right","left","top","bottom"),
+                             len=NULL,
+                             new.clicks, new.redraws=NULL, ..., aspect) {
+  verifyclass(P, "simplepanel")
+  side <- match.arg(side)
+  stopifnot(is.list(new.clicks))
+  if(!all(unlist(lapply(new.clicks, is.function))))
+    stop("new.clicks must be a list of functions")
+  if(is.null(new.redraws)) {
+    new.redraws <- rep.int(list(dflt.redraw), length(new.clicks))
+  } else {
+    stopifnot(is.list(new.redraws) && length(new.redraws) == length(new.clicks))
+    if(any(isnul <- sapply(new.redraws, is.null)))
+      new.redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul))
+    if(!all(unlist(lapply(new.redraws, is.function))))
+      stop("new.redraws must be a list of functions")
+  }
+  if(missing(aspect) || is.null(aspect)) {
+    # determine aspect ratio from length of longest text string
+    n <- length(new.clicks)
+    nama <- names(new.clicks)
+    if(sum(nzchar(nama)) != n)
+      nama <- names(new.redraws)
+    if(sum(nzchar(nama)) != n)
+      nama <- paste("Box", seq_len(n))
+    aspect <- 3/max(4, nchar(nama))
+  }
+  B <- P$B
+  n <- length(new.clicks)
+  switch(side,
+         right={
+           new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2
+           extraspace <- owin(B$xrange[2] + c(0, new.width), B$yrange)
+           new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect)
+         },
+         left={
+           new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2
+           extraspace <- owin(B$xrange[1] - c(new.width, 0), B$yrange)
+           new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect)
+         },
+         top={
+           new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2
+           extraspace <- owin(B$xrange, B$yrange[2] + c(0, new.height))
+           new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect,
+                                     horizontal=TRUE)
+         },
+         bottom={
+           new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2
+           extraspace <- owin(B$xrange, B$yrange[1] - c(new.height, 0))
+           new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect,
+                                     horizontal=TRUE)
+         })
+  with(P, simplepanel(title,
+                      boundingbox(B, extraspace),
+                      append(boxes, new.boxes),
+                      append(clicks, new.clicks),
+                      append(redraws, new.redraws),
+                      exit, env))
+}
+
+                             
+redraw.simplepanel <- function(P, verbose=FALSE) {
+  verifyclass(P, "simplepanel")
+  if(verbose)
+    cat("Redrawing entire panel\n")
+  with(P, {
+#    ntitle <- sum(nzchar(title))
+    plot(B, type="n", main=title)
+    for(j in seq_along(nama)) 
+      (redraws[[j]])(boxes[[j]], nama[j], env)
+  })
+  invisible(NULL)
+}
+
+clear.simplepanel <- function(P) {
+  verifyclass(P, "simplepanel")
+  plot(P$B, main="")
+  invisible(NULL)
+}
+                             
+run.simplepanel <- function(P, popup=TRUE, verbose=FALSE) {
+  verifyclass(P, "simplepanel")
+  if(popup) dev.new()
+  ntitle <- sum(nzchar(P$title))
+  opa <- par(mar=c(0,0,ntitle+0.2,0),ask=FALSE)
+  with(P, {
+    # interaction loop
+    more <- TRUE
+    while(more) {
+      redraw.simplepanel(P, verbose=verbose)
+      xy <- locator(1)
+      if(is.null(xy)) {
+        if(verbose) cat("No (x,y) coordinates\n")
+        break
+      }
+      found <- FALSE
+      for(j in seq_along(boxes)) {
+        if(inside.owin(xy$x, xy$y, boxes[[j]])) {
+          found <- TRUE
+          if(verbose) cat(paste("Caught click on", sQuote(nama[j]), "\n"))
+          more <- (clicks[[j]])(env, xy)
+          if(!is.logical(more) || length(more) != 1) {
+            warning(paste("Click function for",
+                          sQuote(nama[j]),
+                          "did not return TRUE/FALSE"))
+            more <- FALSE
+          }
+          if(verbose) cat(if(more) "Continuing\n" else "Terminating\n")
+          break
+        }
+      }
+      if(verbose && !found)
+        cat(paste("Coordinates", paren(paste(xy, collapse=",")),
+                  "not matched to any box\n"))
+    }
+  })
+  if(verbose)
+    cat("Calling exit function\n")
+
+  rslt <- with(P, exit(env))
+  
+  # revert to original graphics parameters
+  par(opa)
+  # close popup window?
+  if(popup) dev.off()
+  
+  # return value of 'exit' function
+  return(rslt)
+}
+
+layout.boxes <- function(B, n, horizontal=FALSE, aspect=0.5, usefrac=0.9){
+  # make n boxes in B
+  stopifnot(is.rectangle(B))
+  stopifnot(n > 0)
+  width <- sidelengths(B)[1]
+  height <- sidelengths(B)[2]
+  if(!horizontal) {
+    heightshare <- height/n
+    useheight <- min(width * aspect, heightshare * usefrac)
+    usewidth <-  min(useheight /aspect, width * usefrac)
+    lostwidth <- width - usewidth
+    lostheightshare <- heightshare - useheight
+    template <- owin(c(0, usewidth), c(0, useheight))
+    boxes <- list()
+    boxes[[1]] <- shift(template,
+                        c(B$xrange[1]+lostwidth/2,
+                          B$yrange[1] + lostheightshare/2))
+    if(n > 1) 
+      for(j in 2:n) 
+        boxes[[j]] <- shift(boxes[[j-1]], c(0, heightshare))
+  } else {
+    boxes <- layout.boxes(flipxy(B), n,
+                            horizontal=FALSE, aspect=1/aspect, usefrac=usefrac)
+    boxes <-  lapply(boxes, flipxy)
+  }
+  return(boxes)
+}
+
+# default redraw function for control buttons
+
+dflt.redraw <- function(button, name, env) {
+  plot(button, add=TRUE, border="pink")
+  text(centroid.owin(button), labels=name)
+}
+
+print.simplepanel <- function(x, ...) {
+  nama <- x$nama
+  cat("simplepanel object\n")
+  cat(paste("\tTitle:", sQuote(x$title), "\n"))
+  cat("\tPanel names:")
+  for(i in seq_along(nama)) {
+    if(i %% 6 == 1) cat("\n\t")
+    cat(paste0(sQuote(nama[i]), "  "))
+  }
+  cat("\n")
+  return(invisible(NULL))
+}
diff --git a/R/simulate.detPPF.R b/R/simulate.detPPF.R
new file mode 100644
index 0000000..5f4e227
--- /dev/null
+++ b/R/simulate.detPPF.R
@@ -0,0 +1,384 @@
+## This file contains functions to simulate DPP models.
+## Two simulation functions are visible:
+## - simulate.detpointprocfamily (most useful)
+## - rdpp (more generic workhorse function -- actually the real workhorse is the locally defined rdppp)
+##
+## Furthermore the auxilliary function dppeigen is defined here.
+
+rdpp <- local({
+
+## Generates an empty point pattern
+emptyppx <- function(W, simplify = TRUE){
+  W <- as.boxx(W)
+  r <- W$ranges
+  d <- ncol(r)
+  if(simplify){
+      if(d==2)
+          return(ppp(numeric(0), numeric(0), window=as.owin(W)))
+      if(d==3)
+          return(pp3(numeric(0), numeric(0), numeric(0), W))
+  }
+  rslt <- replicate(d, numeric(0), simplify=FALSE)
+  names(rslt) <- paste("x",1:d,sep="")
+  rslt <- as.data.frame(rslt)
+  return(ppx(rslt, domain = W, coord.type= rep("spatial", d)))
+}
+
+rdppp <- function(index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))),
+                  reject_max = 1e4, progress = 0, debug = FALSE, ...){
+  ## Check arguments:
+  if (!(is.logical(debug)))
+    stop(paste(sQuote("debug"), "must be TRUE or FALSE"))
+  if (!is.numeric(reject_max)||reject_max<=1)
+    stop(paste(sQuote("reject_max"), "must be a numeric greater than 1"))
+  if (!is.numeric(progress)||reject_max<1)
+    stop(paste(sQuote("progress"), "must be a numeric greater than or equal to 1"))
+  index <- as.matrix(index)
+  d <- ncol(index)
+  window <- as.boxx(window)
+  ranges <- window$ranges
+  if(ncol(ranges)!=d)
+    stop("The dimension differs from the number of columns in index")
+  basis <- get(basis)
+  if (!(is.function(basis)))
+    stop(paste(sQuote("basis"), "must be a function"))
+  tmp <- basis(ranges[1,,drop=FALSE], index, window)
+  if (!(is.numeric(tmp) || is.complex(tmp)))
+    stop(paste("Output of", sQuote("basis"), "must be numeric or complex"))
+
+  ## Number of points to simulate:
+  n <- nrow(index)
+
+  ## Return empty point pattern if n=0:
+  empty <- emptyppx(window)
+  if (n==0)
+    return(empty)
+
+  ## Initialize debug info:
+  if(debug){
+    debugList = replicate(n, list(old=empty, accepted=empty, rejected=empty, index=index), simplify=FALSE)
+  }
+
+  # Matrix of coordinates:
+  x <- matrix(0,n,d)
+  colnames(x) <- paste("x",1:d,sep="")
+  x[1,] <- runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,]))
+  
+  # Debug info:
+  if(debug){
+    debugList[[1]]=list(old=empty, accepted=ppx(x[1,,drop=FALSE],window,simplify=TRUE), rejected=empty, index=index, estar=rep(1/n,n))
+  }
+  
+  if (n==1)
+    return(ppx(x[1,,drop=FALSE], window, simplify = TRUE))
+  
+  # Initialize matrices for Gram-Schmidt vectors and conj. trans.:
+  e <- matrix(as.complex(0),n,n-1)
+  estar <- matrix(as.complex(0),n-1,n)
+  # First vector of basis-functions evaluated at first point:
+  v <- basis(x[1,,drop=FALSE],index,window)
+  ## Record normalized version in the Gram-Schmidt matrices:
+  e[,1] <- v/sqrt(sum(abs(v)^2))
+  estar[1,] <- Conj(e[,1])
+  if(progress>0)
+    cat(paste("Simulating", n, "points:\n"))
+
+  ## Main for loop over number of points:
+  for(i in (n-1):1){
+    ## Print progress:
+    if(progress>0)
+      progressreport(n-i, n, every=progress)
+    ## Aux. variable to count number of rejection steps:
+    tries <- 0
+    # Debug info:
+    if(debug){
+      rejected <- matrix(NA,reject_max,d)
+    }
+    ## Non-zero vectors of estar matrix:
+    estar2 <- estar[1:(n-i),]
+    repeat{
+      ## Proposed point:
+      newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d)
+      ## Basis functions eval. at proposed point:
+      v <- basis(newx, index, window)
+      ## Vector of projection weights (has length n-i)
+      wei <- estar2%*%v
+      ## Accept probability:
+      tmp <- prod(ranges[2,]-ranges[1,])/n*(sum(abs(v)^2)-sum(abs(wei)^2))
+      ## If proposal is accepted the loop is broken:
+      if(runif(1)<as.numeric(abs(tmp))){
+        break
+      }
+      ## If rejected, check that we have not tried too many times:
+      if(tries>reject_max){
+        stop(paste("Rejection sampling failed reject_max =",reject_max,"times in a row"))
+      }
+      ## Increase the count of rejection steps:
+      tries <- tries+1
+      # Debug info:
+      if(debug){
+        rejected[tries,] <- newx
+      }
+    } ## END OF REJECTION LOOP
+
+    # Record the accepted point:
+    x[n-i+1,] <- newx
+
+    # Debug info:
+    if(debug){
+      if(tries==0){
+        rej <- empty
+      } else{
+        rej <- ppx(rejected[1:tries,,drop=FALSE],window, simplify=TRUE)
+      }
+      debugList[[n-i+1]] = list(
+                old=ppx(x[1:(n-i),,drop=FALSE],window, simplify=TRUE),
+                accepted=ppx(newx,window,simplify=TRUE),
+                rejected=rej, index=index, estar = estar2)
+    }
+
+    ## If it is the last point exit the main loop:
+    if(i==1){break}
+
+    ## Calculate orthogonal vector for Gram-Schmidt procedure:
+    w <- v - rowSums(matrix(wei,n,n-i,byrow=TRUE)*e[,1:(n-i)])
+    ## Record normalized version in the Gram-Schmidt matrices:
+    e[,n-i+1]=w/sqrt(sum(abs(w)^2))
+    estar[n-i+1,] <- Conj(e[,n-i+1])
+  } ## END OF MAIN FOR LOOP
+  # Save points as point pattern:
+  X <- ppx(x, window, simplify = TRUE)
+  # Debug info:
+  if(debug){
+    attr(X, "dpp") <- list(debug=debugList)
+  }
+  if(progress>0)
+    cat(" Done!\n")
+  return(X)
+}
+
+rdpp <- function(eig, index, basis = "fourierbasis",
+                 window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4,
+                 progress = 0, debug = FALSE, ...){
+  window2d <- NULL
+  if (is.owin(window)) 
+    window2d <- window
+  sampleindex <- as.matrix(index[rbinom(nrow(index), 1, eig)==1, ])
+  X <- rdppp(sampleindex, basis=basis, window=window, reject_max=reject_max, progress=progress, debug=debug, ...)
+  if(!is.null(window2d))
+    X <- X[window2d]
+  return(X)
+}
+
+rdpp
+}
+)
+
+simulate.dppm <-
+simulate.detpointprocfamily <- function(object, nsim = 1, seed = NULL, ..., W = NULL,
+                              trunc = .99, correction = "periodic", rbord = reach(object)
+                              #  parallel = FALSE
+                              ){
+  
+  # .... copied from simulate.lm ....
+  if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
+    runif(1)
+  if (is.null(seed))
+    RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+  else {
+    R.seed <- get(".Random.seed", envir = .GlobalEnv)
+    set.seed(seed)
+    RNGstate <- structure(seed, kind = as.list(RNGkind()))
+    on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+  }
+  # ..................................
+
+  if(inherits(object, "dppm")){
+      if(is.null(W))
+          W <- Window(object$X)
+      object <- object$fitted
+  }
+  if(!inherits(object, "detpointprocfamily"))
+    stop("The model to simulate must be of class detpointprocfamily")
+  if(length(tmp <- object$freepar)>0)
+    stop(paste("The model to simulate must be completely specified. The following parameters are unspecified:", tmp))
+  if(!valid(object))
+    stop("The model is invalid. Please change parameter values to get a valid model")
+  if(!is.numeric(nsim)||nsim<1)
+    stop(paste(sQuote("nsim"), "must be a numeric greater than or equal to 1"))
+  nsim <- floor(nsim)
+  dim <- dim(object)
+  basis <- object$basis
+  ####### BACKDOOR TO SPHERICAL CASE ########
+  if(!is.null(spherefun <- object$sim_engine)){
+      sphereSimEngine <- get(spherefun)
+      rslt <- sphereSimEngine(object, trunc, nsim, ...)
+      attr(rslt, "seed") <- RNGstate
+      return(rslt)
+  }
+  ###########################################
+
+  # Check stationarity and window compatibility (if 'W' and 'thin' both are present)
+  statmodel <- is.null(thin <- object$thin)
+  if(is.null(W)){
+    if(!statmodel) W <- domain(thin)
+  }
+  Wowin <- if(is.owin(W)) W else NULL
+  if(is.null(W)){
+    W <- boxx(rep(list(0:1), dim))
+  } else{
+    W <- as.boxx(W, warn.owin = FALSE)
+  }
+  if(!statmodel){
+    if(!is.subset.owin(Wowin,thin))
+      stop("The window of simulation is not contained in the window of the inhomogeneous intensity.")
+  }
+  r <- W$ranges
+  if(dim!=ncol(r))
+    stop(paste("The dimension of the window:", ncol(r), "is inconsistent with the dimension of the model:", dim))
+  Wscale <- as.numeric(r[2,]-r[1,])
+  if(correction=="border"){
+    if(!is.numeric(rbord)||any(rbord<0))
+      stop(paste(sQuote("rbord"), "must be a non-negative numeric"))
+    borderscale <- pmin((Wscale+2*rbord)/Wscale, 2)
+    Wscale <- borderscale*Wscale
+  }
+  
+  ##  lambda <- intensity(object)
+  tmp <- dppeigen(object, trunc, Wscale)
+  trunc <- tmp$trunc
+  prec <- tmp$prec
+  n <- length(tmp$eig)
+  indexlist <- replicate(nsim, {x <- as.matrix(tmp$index[rbinom(n, 1, tmp$eig)==1, ]); gc(); x}, simplify = FALSE)
+  rm(tmp)
+  gc()
+  onesim <- function(i, win=NULL){
+    X <- rdpp(1, indexlist[[i]], basis = basis, window = boxx(rep(list(c(-.5,.5)), dim)), ...)
+    a <- attr(X, "dpp")
+    a <- c(a, list(prec = prec, trunc = trunc))
+    if(correction=="border"){
+      if(dim!=2)
+        stop("Border correction only implemented for dimension 2 at the moment.")
+      X <- X[affine.owin(as.owin(X), mat = diag(1/borderscale))]
+    }
+    if(is.ppp(X)){
+      X <- affine(X, matrix(c(Wscale[1],0,0,Wscale[2]), 2, 2), c(mean(r[,1]), mean(r[,2])))
+      if(!is.null(win))
+        X <- X[win]
+    } else{
+      X <- ppx(X$data, domain = as.boxx(X$domain), coord.type = rep("spatial", dim))
+      X$data <- as.hyperframe(as.data.frame(X$data)*matrix(Wscale, nrow(X$data), ncol(X$data), byrow = TRUE))
+      X$domain$ranges <- X$domain$ranges*matrix(Wscale, 2, dim, byrow = TRUE)
+      X <- ppx(X$data, X$domain, simplify = TRUE)
+    }
+    attr(X, "dpp") <- a
+    return(X)
+  }
+  if(nsim==1){
+    rslt <- onesim(1,win=Wowin)
+    if(!statmodel)
+        rslt <- rthin(rslt, P=thin)
+  } else{
+######## Old code for parallel simulation #########
+#     if(is.logical(parallel)){
+#       cl.cores <- if(parallel) NULL else 1
+#     } else{
+#       cl.cores <- parallel
+#     }
+#     rslt <- detlapply(1:nsim, onesim, cl.cores=cl.cores, win=Wowin)
+###################################################
+    rslt <- lapply(1:nsim, onesim, win=Wowin)
+    if(!statmodel)
+        rslt <- lapply(rslt, rthin, P=thin)
+    names(rslt) <- paste("Simulation", 1:nsim)
+    rslt <- as.solist(rslt)
+  }
+  attr(rslt, "seed") <- RNGstate
+  return(rslt)
+
+}
+
+dppeigen <- function(model, trunc, Wscale, stationary = FALSE){
+    dim <- dim(model)
+    if(stationary&&dim!=2)
+        stop("Stationarity can only be exploited in dimension 2 at the moment.")
+    
+    ## Calculate expected number of points if the intensity is a parameter
+    expnum <- NULL
+    lambdaname <- model$intensity
+    if(!is.null(lambdaname))
+        expnum <- getElement(model$fixedpar, lambdaname)*prod(Wscale)
+    ## Get the maximal truncation in each dimension
+    maxtrunc <- spatstat.options("dpp.maxmatrix")^(1/dim)
+    ## Extract spectral density
+    specden <- dppspecden(model)
+    truncrange <- dppspecdenrange(model)*max(Wscale)
+    
+    if(trunc>=1){ ## Integer truncation fixed by user.
+        if(stationary){
+             ## Coordinates on axes:
+            index1a <- c(rep(0,trunc),1:trunc)
+            index2a <- c(1:trunc,rep(0,trunc))
+            ## Coordinates of ordinary points:
+            index1 <- rep(1:trunc,trunc)
+            index2 <- rep(1:trunc,each=trunc)
+            ## Spectral densities:
+            eigo <- specden(0)
+            eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2))
+            eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2))
+            prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum   
+        } else{
+            trunc <- floor(trunc)
+            index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE))
+            indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE)
+            if(model$isotropic){
+                eig <- specden(sqrt(rowSums(indexscaled^2)))
+            } else{
+                eig <- specden(indexscaled)
+            }
+            prec <- sum(eig)/expnum
+        }
+    } else{ ## Integer truncation calculated from user-specified precision.
+        if(is.null(expnum))
+            stop("Cannot calculate truncation adaptively in a model without intensity parameter. Please specify trunc directly as a positive integer.")
+        prec0 <- trunc
+        trunc <- 1
+        prec <- 0
+        ## cat("truncation is being calculated adaptively. Current truncation:\n")
+        while(prec<=prec0 & (2*trunc)<=maxtrunc & trunc<=truncrange){
+            trunc <- 2*trunc
+            if(stationary){
+                ## Coordinates on axes:
+                index1a <- c(rep(0,trunc),1:trunc)
+                index2a <- c(1:trunc,rep(0,trunc))
+                ## Coordinates of ordinary points:
+                index1 <- rep(1:trunc,trunc)
+                index2 <- rep(1:trunc,each=trunc)
+                ## Spectral densities:
+                eigo <- specden(0)
+                eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2))
+                eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2))
+                prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum
+            } else{
+                index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE))
+                indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE)
+                if(model$isotropic){
+                    eig <- specden(sqrt(rowSums(indexscaled^2)))
+                } else{
+                    eig <- specden(indexscaled)
+                }
+                prec <- sum(eig)/expnum
+            }
+        }
+        ## cat("\n")
+        if(prec<prec0){
+            warning(paste0("Adaptive truncation stopped at ", trunc, ". The precision is only ", prec))
+        }
+    }
+    if(stationary){
+        rslt <- list(eigo=eigo, eiga=eiga, eig=eig, index1a=index1a, index2a=index2a)
+    } else{
+        rslt <- list(eig=eig, index=index)
+    }
+    return(c(rslt, list(prec=prec, trunc=trunc)))
+}
diff --git a/R/simulatelppm.R b/R/simulatelppm.R
new file mode 100644
index 0000000..2cdcd61
--- /dev/null
+++ b/R/simulatelppm.R
@@ -0,0 +1,35 @@
+##
+## simulatelppm.R
+##
+##  Simulation of lppm objects
+##
+##  $Revision: 1.6 $  $Date: 2015/07/11 08:19:26 $
+##
+
+simulate.lppm <- function(object, nsim=1, ...,
+                          new.coef=NULL,
+                          progress=(nsim > 1),
+                          drop=FALSE) {
+  starttime <- proc.time()
+  if(!is.poisson(object$fit))
+    stop("Simulation of non-Poisson models is not yet implemented")
+  lambda <- predict(object, ..., new.coef=new.coef)
+  lmax <- if(is.im(lambda)) max(lambda) else unlist(lapply(lambda, max))
+  L <- as.linnet(object)
+  result <- vector(mode="list", length=nsim)
+  pstate <- list()
+  for(i in seq_len(nsim)) {
+    if(progress) pstate <- progressreport(i, nsim, state=pstate)
+    result[[i]] <- rpoislpp(lambda, L, lmax=lmax)
+  }
+  if(nsim == 1 && drop) {
+    result <- result[[1]]
+  } else {
+    result <- as.solist(result)
+    if(nsim > 0)
+      names(result) <- paste("Simulation", 1:nsim)
+  }
+  result <- timed(result, starttime=starttime)
+  return(result)
+}
+
diff --git a/R/slrm.R b/R/slrm.R
new file mode 100755
index 0000000..be11c0a
--- /dev/null
+++ b/R/slrm.R
@@ -0,0 +1,637 @@
+#
+#  slrm.R
+#
+#  Spatial Logistic Regression
+#
+#  $Revision: 1.28 $   $Date: 2016/04/25 02:34:40 $
+#
+
+slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit",
+                 dataAtPoints=NULL, splitby=NULL) {
+  
+  # remember call
+  CallInfo <- list(callstring = short.deparse(sys.call()),
+                   cl = match.call(),
+                   formula = formula,
+                   offset=offset,
+                   link=link,
+                   splitby=splitby,
+                   dotargs=list(...))
+  if(!(link %in% c("logit", "cloglog")))
+    stop(paste("Unrecognised link", dQuote(link)))
+
+  ########### INTERPRET FORMULA ##############################
+  
+  if(!inherits(formula, "formula"))
+    stop(paste("Argument", dQuote("formula"), "should be a formula"))
+
+  # check formula has LHS and RHS. Extract them
+  if(length(formula) < 3)
+    stop(paste("Argument", sQuote("formula"),
+               "must have a left hand side"))
+  Yname <- formula[[2]]
+  trend <- rhs <- formula[c(1,3)]
+  if(!is.name(Yname))
+    stop("Left hand side of formula should be a single name")
+  Yname <- paste(Yname)
+  if(!inherits(trend, "formula"))
+    stop("Internal error: failed to extract RHS of formula")
+
+  varnames <- unique(variablesinformula(trend))
+  specials <- c("x", "y", "logpixelarea")
+  covnames <- varnames[!(varnames %in% specials)]
+
+  # add 'splitby' to covariate names
+  if(!is.null(splitby)) {
+    if(!is.character(splitby) || length(splitby) != 1)
+      stop("splitby should be a single character string")
+    covnames <- unique(c(covnames, splitby))
+  }
+
+  CallInfo$responsename <- Yname
+  CallInfo$varnames     <- varnames
+  CallInfo$covnames     <- covnames
+  
+  # Parent environment
+  parenv <- environment(formula)
+
+  ########  FIND DATA AND RESHAPE #######################
+
+  Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby)
+
+#  W  <- Data$W
+  df <- Data$df
+  
+  ########  FIT MODEL ###############################
+
+  dformula <- formula
+  if(offset) {
+    # insert offset term in formula
+    rhs <- paste(as.character(rhs), collapse=" ")
+    rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+")
+    dformula <- as.formula(paste(Yname, rhs))
+  }
+
+  linkname <- link
+  FIT  <- glm(dformula, family=binomial(link=linkname),
+              data=df, na.action=na.exclude)
+
+  result <- list(call     = CallInfo$cl,
+                 CallInfo = CallInfo,
+                 Data     = Data,
+                 Fit      = list(FIT=FIT, dformula=dformula),
+                 terms    = terms(formula))
+
+  class(result) <- c("slrm", class(result))
+  return(result)
+}
+
+################ UTILITY TO FIND AND RESHAPE DATA #################
+
+slr.prepare <- function(CallInfo, envir, data,
+                        dataAtPoints=NULL, splitby=NULL,
+                        clip=TRUE) {
+  # CallInfo is produced by slrm()
+  # envir is parent environment of model formula
+  # data  is 'data' argument that takes precedence over 'envir'
+  # 'clip' is TRUE if the data should be clipped to the domain of Y
+  Yname    <- CallInfo$responsename
+#  varnames <- CallInfo$varnames
+  covnames <- CallInfo$covnames
+  dotargs  <- CallInfo$dotargs
+  #
+  getobj <- function(nama, env, dat) {
+    if(!is.null(dat) && !is.null(x <- dat[[nama]]))
+      return(x)
+    else return(get(nama, envir=env))
+  }
+  # Get the response point pattern Y 
+  Y <- getobj(Yname, envir, data)
+  if(!is.ppp(Y))
+    stop(paste("The response", sQuote(Yname), "must be a point pattern"))
+  #
+  if(!is.null(dataAtPoints)) {
+    dataAtPoints <- as.data.frame(dataAtPoints)
+    if(nrow(dataAtPoints) != npoints(Y))
+      stop(paste("dataAtPoints should have one row for each point in",
+                 dQuote(Yname)))
+  }
+  # Find the covariates
+  ncov <- length(covnames)
+  covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data)
+  names(covlist) <- covnames
+  # Each covariate should be an image, a window, a function, or a single number
+  if(ncov == 0) {
+    isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0)
+  } else {
+    isim  <- sapply(covlist, is.im)
+    isowin  <- sapply(covlist, is.owin)
+    ismask  <- sapply(covlist, is.mask)
+    isfun  <- sapply(covlist, is.function)
+    isspatial <- isim | isowin | isfun
+    israster <- isim | ismask
+    isnum <- sapply(covlist, is.numeric) & (lengths(covlist) == 1)
+  }
+  if(!all(ok <- (isspatial | isnum))) {
+    n <- sum(!ok)
+    stop(paste(ngettext(n, "The argument", "Each of the arguments"),
+               commasep(sQuote(covnames[!ok])),
+               "should be either an image, a window, or a single number"))
+  }
+  # 'splitby' 
+  if(!is.null(splitby)) {
+    splitwin <- covlist[[splitby]]
+    if(!is.owin(splitwin))
+      stop("The splitting covariate must be a window")
+    # ensure it is a polygonal window
+    covlist[[splitby]] <- splitwin <- as.polygonal(splitwin)
+    # delete splitting covariate from lists to be processed
+    issplit <- (covnames == splitby)
+    isspatial[issplit] <- FALSE
+    israster[issplit] <- FALSE
+  }
+  # 
+#  nnum <- sum(isnum)
+#  nspatial <- sum(isspatial)
+  nraster <- sum(israster)
+  #
+  numlist <- covlist[isnum]
+  spatiallist <- covlist[isspatial]
+  rasterlist <- covlist[israster]
+  #
+  numnames <- names(numlist)
+  spatialnames <- names(spatiallist)
+#  rasternames <- names(rasterlist)
+  #
+  
+  ########  CONVERT TO RASTER DATA  ###############################
+
+  convert <- function(x,W) {
+    if(is.im(x) || is.function(x)) return(as.im(x,W))
+    if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE))
+    return(NULL)
+  }
+
+  # determine spatial domain & common resolution: convert all data to it
+  if(length(dotargs) > 0 || nraster == 0) {
+    # Pixel resolution is determined by explicit arguments
+    if(clip) {
+      # Window extent is determined by response point pattern
+      D <- as.owin(Y)
+    } else {
+      # Window extent is union of domains of data
+      domains <- lapply(append(spatiallist, list(Y)), as.owin)
+      D <- do.call(union.owin, domains)
+    }
+    # Create template mask
+    W <- do.call(as.mask, append(list(D), dotargs))
+    # Convert all spatial objects to this resolution
+    spatiallist <- lapply(spatiallist, convert, W=W)
+  } else {
+    # Pixel resolution is determined implicitly by covariate data
+    W <- do.call(commonGrid, rasterlist)
+    if(clip) {
+      # Restrict data to spatial extent of response point pattern
+      W <- intersect.owin(W, as.owin(Y))
+    }
+    # Adjust spatial objects to this resolution
+    spatiallist <- lapply(spatiallist, convert, W=W)
+  }
+  # images containing coordinate values
+  xcoordim <- as.im(function(x,y){x}, W=W)
+  ycoordim <- as.im(function(x,y){y}, W=W)
+  #
+  # create a list of covariate images, with names as in formula
+  covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist)
+
+  basepixelarea <- W$xstep * W$ystep
+
+  ########  ASSEMBLE DATA FRAME  ###############################
+
+  if(is.null(splitby)) {
+    df <- slrAssemblePixelData(Y, Yname, W,
+                               covimages, dataAtPoints, basepixelarea)
+    sumYloga <- Y$n * log(basepixelarea)
+    serial <- attr(df, "serial")
+  } else {
+    # fractional pixel areas
+    pixsplit <- pixellate(splitwin, W)
+    splitpixelarea <- as.vector(as.matrix(pixsplit))
+    # determine which points of Y are inside/outside window
+    ins <- inside.owin(Y$x, Y$y, splitwin)
+    # split processing
+    dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages,
+                                 dataAtPoints[ins, ], splitpixelarea)
+    serialIN <- attr(dfIN, "serial")
+    dfIN[[splitby]] <- TRUE
+    dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages,
+                                  dataAtPoints[!ins, ],
+                                  basepixelarea - splitpixelarea)
+    serialOUT <- attr(dfOUT, "serial")
+    dfOUT[[splitby]] <- FALSE
+    df <- rbind(dfIN, dfOUT)
+    serial <- c(serialIN, serialOUT)
+    # sum of log pixel areas associated with points
+    Ysplit <- pixsplit[Y]
+    sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit)))
+  }
+  
+  # tack on any numeric values
+  df <- do.call(cbind, append(list(df), numlist))
+  
+  ### RETURN ALL 
+  Data <- list(response=Y,
+               covariates=covlist,
+               spatialnames=spatialnames,
+               numnames=numnames,
+               W=W,
+               df=df,
+               serial=serial,
+               sumYloga=sumYloga,
+               dataAtPoints=dataAtPoints)
+  return(Data)
+}
+
+#  
+slrAssemblePixelData <- function(Y, Yname, W,
+                                 covimages, dataAtPoints, pixelarea) {
+  # pixellate point pattern
+  Z <- pixellate(Y, W=W)
+  Z <- eval.im(as.integer(Z>0))
+  # overwrite pixel entries for data points using exact values
+  # coordinates
+  xcoordim <- covimages[["x"]]
+  ycoordim <- covimages[["y"]]
+  xcoordim[Y] <- Y$x
+  ycoordim[Y] <- Y$y
+  covimages[["x"]] <- xcoordim
+  covimages[["y"]] <- ycoordim
+  # overwrite pixel entries
+  if(!is.null(dataAtPoints)) {
+    enames <- colnames(dataAtPoints)
+    relevant <- enames %in% names(covimages)
+    for(v in enames[relevant]) {
+      cova <- covimages[[v]]
+      cova[Y] <- dataAtPoints[, v, drop=TRUE]
+      covimages[[v]] <- cova
+    }
+  }
+  # assemble list of all images
+  Ylist <- list(Z)
+  names(Ylist) <- Yname
+  allimages <- append(Ylist, covimages)
+  # extract pixel values of each image
+  pixelvalues <-
+    function(z) {
+      v <- as.vector(as.matrix(z))
+      if(z$type != "factor") return(v)
+      lev <- levels(z)
+      return(factor(v, levels=seq_along(lev), labels=lev))
+    }
+  pixdata <- lapply(allimages, pixelvalues)
+  df <- as.data.frame(pixdata)
+  serial <- seq_len(nrow(df))
+  # add log(pixel area) column
+  if(length(pixelarea) == 1) {
+    df <- cbind(df, logpixelarea=log(pixelarea))
+  } else {
+    ok <- (pixelarea > 0)
+    df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok]))
+    serial <- serial[ok]
+  }
+  attr(df, "serial") <- serial
+  return(df)
+}
+
+is.slrm <- function(x) {
+  inherits(x, "slrm")
+}
+
+coef.slrm <- function(object, ...) {
+  coef(object$Fit$FIT)
+}
+
+print.slrm <- function(x, ...) {
+  lk <- x$CallInfo$link
+  switch(lk,
+         logit= {
+           splat("Fitted spatial logistic regression model")
+         },
+         cloglog= {
+           splat("Fitted spatial regression model (complementary log-log)")
+         },
+         {
+           splat("Fitted spatial regression model")
+           splat("Link =", dQuote(lk))
+         })
+  cat("Formula:\t")
+  print(x$CallInfo$formula)
+  splat("Fitted coefficients:")
+  print(coef(x))
+  return(invisible(NULL))
+}
+
+logLik.slrm <- function(object, ..., adjust=TRUE) {
+  FIT  <- object$Fit$FIT
+  ll <- -deviance(FIT)/2
+  if(adjust) {
+    sumYloga <- object$Data$sumYloga
+    ll <- ll - sumYloga
+  }
+  attr(ll, "df") <- length(coef(object))
+  class(ll) <- "logLik"
+  return(ll)
+}
+
+fitted.slrm <- function(object, ...) {
+  if(length(list(...)) > 0)
+    warning("second argument (and any subsequent arguments) ignored")
+  predict(object, type="probabilities")
+}
+
+predict.slrm <- function(object, ..., type="intensity",
+                         newdata=NULL, window=NULL) {
+  type <- pickoption("type", type,
+                     c(probabilities="probabilities",
+                       link="link",
+                       intensity="intensity",
+                       lambda="intensity"))
+  
+  FIT  <- object$Fit$FIT
+  link <- object$CallInfo$link
+  W    <- object$Data$W
+  df   <- object$Data$df
+  loga <- df$logpixelarea
+
+  if(is.null(newdata) && is.null(window)) {
+    # fitted values from existing fit
+    switch(type,
+           probabilities={
+             values <- fitted(FIT)
+           },
+           link={
+             values <- predict(FIT, type="link")
+           },
+           intensity={
+             # this calculation applies whether an offset was included or not
+             if(link == "cloglog") {
+               linkvalues <- predict(FIT, type="link")
+               values <- exp(linkvalues - loga)
+             } else {
+               probs <- fitted(FIT)
+               values <- -log(1-probs)/exp(loga)
+             }
+           }
+           )
+    out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W))
+    return(out)
+  } else {
+    # prediction using new values
+    # update arguments that may affect pixel resolution
+    CallInfo <- object$CallInfo
+    CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs)
+    #
+    if(!is.null(window)) {
+      # insert fake response in new window
+      if(is.null(newdata)) newdata <- list()
+      window <- as.owin(window)
+      newdata[[CallInfo$responsename]] <- ppp(numeric(0), numeric(0),
+                                            window=window)
+    }
+    # process new data
+    newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata,
+                           clip=!is.null(window))
+    newdf   <- newData$df
+    newW    <- newData$W
+    newloga <- newdf$logpixelarea
+    # avoid NA etc
+    npixel <- nrow(newdf)
+    ok <- complete.cases(newdf)
+    if(!all(ok)) {
+      newdf   <- newdf[ok, , drop=FALSE]
+      newloga <- newloga[ok]
+    }
+    # compute link values
+    linkvalues <- predict(FIT, newdata=newdf, type="link")
+    # transform to desired scale
+    linkinv <- family(FIT)$linkinv
+    switch(type,
+           probabilities={
+             values <- linkinv(linkvalues)
+           },
+           link={
+             values <- linkvalues
+           },
+           intensity={
+             # this calculation applies whether an offset was included or not
+             if(link == "cloglog") {
+               values <- exp(linkvalues - newloga)
+             } else {
+               probs <- linkinv(linkvalues)
+               values <- -log(1-probs)/exp(newloga)
+             }
+           }
+           )
+    # form image
+    v <- rep.int(NA_real_, npixel)
+    v[ok] <- values
+    out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W))
+    return(out)
+  }
+}
+
+plot.slrm <- function(x, ..., type="intensity") {
+  xname <- short.deparse(substitute(x))
+  y <- predict(x, type=type)
+  do.call(plot.im, resolve.defaults(list(x=y), list(...), list(main=xname)))
+}
+
+formula.slrm <- function(x, ...) {
+  f <- x$CallInfo$formula
+  return(f)
+}
+
+terms.slrm <- function(x, ...) {
+  terms(formula(x), ...)
+}
+
+labels.slrm <- function(object, ...) {
+  # extract fitted trend coefficients
+  co <- coef(object)
+  # model terms
+  tt <- terms(object)
+  lab <- attr(tt, "term.labels")
+  if(length(lab) == 0)
+    return(character(0))
+  # model matrix
+  mm <- model.matrix(object)
+  ass <- attr(mm, "assign")
+  # 'ass' associates coefficients with model terms
+  # except ass == 0 for the Intercept
+  coef.ok <- is.finite(co)
+  relevant <- (ass > 0) 
+  okterms <- unique(ass[coef.ok & relevant])
+  return(lab[okterms])
+}
+
+extractAIC.slrm <- function (fit, scale = 0, k = 2, ...)
+{
+    edf <- length(coef(fit))
+    aic <- AIC(fit)
+    c(edf, aic + (k - 2) * edf)
+}
+
+model.matrix.slrm <- function(object,..., keepNA=TRUE) {
+  FIT <- object$Fit$FIT
+  mm <- model.matrix(FIT, ...)
+  if(!keepNA)
+    return(mm)
+  df <- object$Data$df
+  comp <- complete.cases(df)
+  if(all(comp))
+    return(mm)
+  if(sum(comp) != nrow(mm))
+      stop("Internal error in patching NA's")
+  mmplus <- matrix(NA, nrow(df), ncol(mm))
+  mmplus[comp, ] <- mm
+  return(mmplus)
+}
+
+model.images.slrm <- function(object, ...) {
+  mm <- model.matrix(object, ...)
+  mm <- as.data.frame(mm)
+  Data <- object$Data
+  W      <- Data$W
+  serial <- Data$serial
+  splitby <- object$CallInfo$splitby
+  blank   <- as.im(NA_real_, W)
+  assignbyserial <- function(values, serial, template) {
+    Z <- template
+    Z$v[serial] <- values
+    return(Z)
+  }
+  if(is.null(splitby)) {
+    result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank)
+  } else {
+    df <- Data$df
+    IN <- as.logical(df[[splitby]])
+    OUT <- !IN
+    mmIN <- mm[IN, , drop=FALSE]
+    mmOUT <- mm[OUT, , drop=FALSE]
+    resultIN <- lapply(as.list(mmIN), assignbyserial,
+                       serial=serial[IN], template=blank)
+    resultOUT <- lapply(as.list(mmOUT), assignbyserial,
+                       serial=serial[OUT], template=blank)
+    names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="")
+    names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="")
+    result <- c(resultIN, resultOUT)
+  }
+  return(as.solist(result))
+}
+
+update.slrm <- function(object, ..., evaluate=TRUE, env=parent.frame()) {
+  e <- update.default(object, ..., evaluate=FALSE)
+  if(evaluate)
+    e <- eval(e, envir=env)
+  return(e)
+}
+
+anova.slrm <- local({
+
+  anova.slrm <- function(object, ..., test=NULL) {
+    objex <- append(list(object), list(...))
+    if(!all(unlist(lapply(objex, is.slrm))))
+      stop("Some arguments are not of class slrm")
+    fitz <- lapply(objex, getFIT)
+    do.call(anova, append(fitz, list(test=test)))
+  }
+
+  getFIT <- function(z) {z$Fit$FIT}
+
+  anova.slrm
+})
+
+
+vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) {
+  stopifnot(is.slrm(object))
+  what <- match.arg(what)
+  vc <- vcov(object$Fit$FIT)
+  result <- switch(what,
+                   vcov = vc,
+                   corr = {
+                     sd <- sqrt(diag(vc))
+                     vc / outer(sd, sd, "*")
+                   },
+                   fisher=,
+                   Fisher={
+                     solve(vc)
+                   })
+  return(result)
+}
+
+unitname.slrm <- function(x) {
+  return(unitname(x$Data$response))
+}
+
+"unitname<-.slrm" <- function(x, value) {
+  unitname(x$Data$response) <- value
+  return(x)
+}
+
+is.stationary.slrm <- function(x) {
+  fo <- formula(x)
+  trend <- fo[c(1,3)]
+  return(identical.formulae(trend, ~1))
+}
+
+is.poisson.slrm <- function(x) { TRUE }
+
+
+simulate.slrm <- function(object, nsim=1, seed=NULL, ...,
+                          window=NULL, covariates=NULL, 
+                          verbose=TRUE, drop=FALSE) {
+  # .... copied from simulate.lm ....
+  if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
+    runif(1)
+  if (is.null(seed))
+    RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+  else {
+    R.seed <- get(".Random.seed", envir = .GlobalEnv)
+    set.seed(seed)
+    RNGstate <- structure(seed, kind = as.list(RNGkind()))
+    on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+  }
+  starttime <- proc.time()
+  
+  # determine simulation window and compute intensity
+  if(!is.null(window))
+    stopifnot(is.owin(window))
+  lambda <- predict(object, type="intensity", newdata=covariates, window=window)
+
+  # max lambda (for efficiency)
+  summ <- summary(lambda)
+  lmax <- summ$max + 0.05 * diff(summ$range)
+
+  # run
+  out <- list()
+  verbose <- verbose && (nsim > 1)
+  if(verbose) {
+    cat(paste("Generating", nsim, "simulations... "))
+    pstate <- list()
+  }
+  for(i in 1:nsim) {
+    out[[i]] <- rpoispp(lambda, lmax=lmax)
+    if(verbose) pstate <- progressreport(i, nsim, state=pstate)
+  }
+  # pack up
+  if(nsim == 1 && drop) {
+    out <- out[[1]]
+  } else {
+    out <- as.solist(out)
+    if(nsim > 0)
+      names(out) <- paste("Simulation", 1:nsim)
+  }
+  out <- timed(out, starttime=starttime)
+  attr(out, "seed") <- RNGstate
+  return(out)
+}
diff --git a/R/smooth.ppp.R b/R/smooth.ppp.R
new file mode 100755
index 0000000..b462f60
--- /dev/null
+++ b/R/smooth.ppp.R
@@ -0,0 +1,818 @@
+#
+#  smooth.ppp.R
+#
+#  Smooth the marks of a point pattern
+# 
+#  $Revision: 1.44 $  $Date: 2017/08/16 05:40:56 $
+#
+
+smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") {
+  .Deprecated("Smooth.ppp", package="spatstat",
+    msg="smooth.ppp is deprecated: use the generic Smooth with a capital S")
+  Smooth(X, ..., weights=weights, at=at)
+}
+
+Smooth <- function(X, ...) {
+  UseMethod("Smooth")
+}
+
+Smooth.solist <- function(X, ...) {
+  solapply(X, Smooth, ...)
+}
+
+Smooth.ppp <- function(X, sigma=NULL, ...,
+                       weights=rep(1, npoints(X)), at="pixels",
+                       edge=TRUE, diggle=FALSE, geometric=FALSE) {
+  verifyclass(X, "ppp")
+  if(!is.marked(X, dfok=TRUE, na.action="fatal"))
+    stop("X should be a marked point pattern", call.=FALSE)
+  X <- coerce.marks.numeric(X)
+  if(!all(is.finite(as.matrix(marks(X)))))
+    stop("Some mark values are Inf, NaN or NA", call.=FALSE)
+  at <- pickoption("output location type", at,
+                   c(pixels="pixels",
+                     points="points"))
+  ## weights
+  weightsgiven <- !missing(weights) && !is.null(weights) 
+  if(weightsgiven) {
+    # convert to numeric
+    if(is.im(weights)) {
+      weights <- safelookup(weights, X) # includes warning if NA
+    } else if(is.expression(weights)) 
+      weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame())
+    if(length(weights) == 0)
+      weightsgiven <- FALSE
+  }
+  if(weightsgiven) {
+    check.nvector(weights, npoints(X))
+  } else weights <- NULL
+  ## geometric mean smoothing
+  if(geometric) 
+    return(ExpSmoothLog(X, sigma=sigma, ..., at=at,
+                        weights=weights, edge=edge, diggle=diggle))
+  ## determine smoothing parameters
+  ker <- resolve.2D.kernel(sigma=sigma, ...,
+                           x=X, bwfun=bw.smoothppp, allow.zero=TRUE)
+  sigma <- ker$sigma
+  varcov <- ker$varcov
+  ## Diggle's edge correction?
+  if(diggle && !edge) warning("Option diggle=TRUE overridden by edge=FALSE")
+  diggle <- diggle && edge
+  ## 
+  if(ker$cutoff < minnndist(X)) {
+    # very small bandwidth
+    leaveoneout <- resolve.1.default("leaveoneout",
+                                     list(...), list(leaveoneout=TRUE))
+    if(!leaveoneout && at=="points") {
+      warning(paste("Bandwidth is close to zero:",
+                    "original values returned"))
+      Y <- marks(X)
+    } else {
+      warning(paste("Bandwidth is close to zero:",
+                    "nearest-neighbour interpolation performed"))
+      Y <- nnmark(X, ..., k=1, at=at)
+    }
+    return(Y)
+  }
+
+  if(diggle) {
+    ## absorb Diggle edge correction into weights vector
+    edg <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov)
+    ei <- safelookup(edg, X, warn=FALSE)
+    weights <- if(weightsgiven) weights/ei else 1/ei
+    weights[!is.finite(weights)] <- 0
+    weightsgiven <- TRUE
+  }
+  ## rescale weights to avoid numerical gremlins
+  if(weightsgiven && ((mw <- median(abs(weights))) > 0))
+    weights <- weights/mw
+
+  ## calculate...
+  marx <- marks(X)
+  if(!is.data.frame(marx)) {
+    # ........ vector of marks ...................
+    values <- marx
+    if(is.factor(values)) {
+      warning("Factor valued marks were converted to integers")
+      values <- as.numeric(values)
+    }
+    ## detect constant values
+    ra <- range(values, na.rm=TRUE)
+    if(diff(ra) == 0) {
+      switch(at,
+             points = {
+               result <- values
+             },
+             pixels = {
+               M <- do.call.matched(as.mask, list(w=as.owin(X), ...))
+               result <- as.im(ra[1], M)
+             })
+    } else {
+      switch(at,
+             points={
+               result <-
+                 do.call(smoothpointsEngine,
+                         resolve.defaults(list(x=X,
+                                               values=values, weights=weights,
+                                               sigma=sigma, varcov=varcov,
+                                               edge=FALSE),
+                                          list(...)))
+             },
+             pixels={
+               values.weights <- if(weightsgiven) values * weights else values
+               numerator <-
+                 do.call(density.ppp,
+                         resolve.defaults(list(x=X,
+                                               at="pixels",
+                                               weights = values.weights,
+                                               sigma=sigma, varcov=varcov,
+                                               edge=FALSE),
+                                          list(...)))
+               denominator <-
+                 do.call(density.ppp,
+                         resolve.defaults(list(x=X,
+                                               at="pixels",
+                                               weights = weights,
+                                               sigma=sigma,
+                                               varcov=varcov,
+                                               edge=FALSE),
+                                          list(...)))
+               result <- eval.im(numerator/denominator)
+               ## trap small values of denominator
+               ## trap NaN and +/- Inf values of result, but not NA
+               eps <- .Machine$double.eps
+               nbg <- eval.im(is.infinite(result)
+                              | is.nan(result)
+                              | (denominator < eps))
+               if(any(as.matrix(nbg), na.rm=TRUE)) {
+                 warning(paste("Numerical underflow detected:",
+                               "sigma is probably too small"))
+                 ## l'Hopital's rule
+                 distX <- distmap(X, xy=numerator)
+                 whichnn <- attr(distX, "index")
+                 nnvalues <- eval.im(values[whichnn])
+                 result[nbg] <- nnvalues[nbg]
+               }
+               attr(result, "warnings") <- attr(numerator, "warnings")
+             })
+    }
+  } else {
+    ## ......... data frame of marks ..................
+    ## detect constant columns
+    ra <- apply(marx, 2, range, na.rm=TRUE)
+    isconst <- (apply(ra, 2, diff) == 0)
+    if(anyisconst <- any(isconst)) {
+      oldmarx <- marx
+#      oldX <- X
+      marx <- marx[, !isconst]
+      X <- X %mark% marx
+    }
+    if(any(!isconst)) {
+      ## compute denominator
+      denominator <-
+        do.call(density.ppp,
+                resolve.defaults(list(x=X,
+                                      at=at,
+                                      weights = weights,
+                                      sigma=sigma, varcov=varcov,
+                                      edge=FALSE),
+                                 list(...)))
+      ## compute numerator for each column of marks
+      marx.weights <- if(weightsgiven) marx * weights else marx
+      numerators <-
+        do.call(density.ppp,
+                resolve.defaults(list(x=X,
+                                      at=at,
+                                      weights = marx.weights,
+                                      sigma=sigma, varcov=varcov,
+                                      edge=FALSE),
+                                 list(...)))
+      uhoh <- attr(numerators, "warnings")
+      ## calculate ratios
+      switch(at,
+             points={
+               if(is.null(uhoh)) {
+                 ## numerators is a matrix (or may have dropped to vector)
+                 if(!is.matrix(numerators))
+                   numerators <- matrix(numerators, ncol=1)
+                 ratio <- numerators/denominator
+                 if(any(badpoints <- matrowany(!is.finite(ratio)))) {
+                   whichnnX <- nnwhich(X)
+                   ratio[badpoints,] <-
+                     as.matrix(marx[whichnnX[badpoints], , drop=FALSE])
+                 }
+               } else {
+                 warning("returning original values")
+                 ratio <- marx
+               }
+               result <- as.data.frame(ratio)
+               colnames(result) <- colnames(marx)
+             },
+             pixels={
+               ## numerators is a list of images (or may have dropped to 'im')
+               if(is.im(numerators))
+                 numerators <- list(numerators)
+               ratio <- lapply(numerators, "/", e2=denominator)
+               if(!is.null(uhoh)) {
+                 ## compute nearest neighbour map on same raster
+                 distX <- distmap(X, xy=denominator)
+                 whichnnX <- attr(distX, "index")
+                 ## fix images
+                 for(j in 1:length(ratio)) {
+                   ratj <- ratio[[j]]
+                   valj <- marx[,j]
+                   ratio[[j]] <-
+                     eval.im(ifelseXY(is.finite(ratj), ratj, valj[whichnnX]))
+                 }
+                 attr(ratio, "warnings") <- uhoh
+               }
+               result <- as.solist(ratio)
+               names(result) <- colnames(marx)
+             })
+    } else result <- NULL 
+    if(anyisconst) {
+      partresult <- result
+      switch(at,
+             points = {
+               nX <- npoints(X)
+               result <- matrix(, nX, ncol(oldmarx))
+               if(length(partresult) > 0)
+                 result[,!isconst] <- as.matrix(partresult)
+               result[,isconst] <- rep(ra[1,isconst], each=nX)
+               colnames(result) <- colnames(oldmarx)
+             },
+             pixels = {
+               result <- vector(mode="list", length=ncol(oldmarx))
+               if(length(partresult) > 0) {
+                 result[!isconst] <- partresult
+                 M <- as.owin(partresult[[1]])
+               } else {
+                 M <- do.call.matched(as.mask, list(w=as.owin(X), ...))
+               }
+               result[isconst] <- lapply(ra[1, isconst], as.im, W=M)
+               result <- as.solist(result)
+               names(result) <- colnames(oldmarx)
+             })
+    }
+  }
+  ## wrap up
+  attr(result, "warnings") <-
+    unlist(lapply(result, attr, which="warnings"))
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  return(result)
+}
+
+
+smoothpointsEngine <- function(x, values, sigma, ...,
+                               weights=NULL, varcov=NULL,
+                               leaveoneout=TRUE,
+                               sorted=FALSE, cutoff=NULL) {
+  debugging <- spatstat.options("developer")
+  stopifnot(is.logical(leaveoneout))
+  #' detect constant values
+  if(diff(range(values, na.rm=TRUE)) == 0) { 
+    result <- values
+    attr(result, "sigma") <- sigma
+    attr(result, "varcov") <- varcov
+    return(result)
+  }
+  #' Contributions from pairs of distinct points
+  #' closer than 8 standard deviations
+  sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov)))
+  if(is.null(cutoff)) 
+    cutoff <- 8 * sd
+  if(debugging)
+    cat(paste("cutoff=", cutoff, "\n"))
+  
+  ## Handle weights that are meant to be null
+  if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0))
+     weights <- NULL
+     
+  # detect very small bandwidth
+  nnd <- nndist(x)
+  nnrange <- range(nnd)
+  if(cutoff < nnrange[1]) {
+    if(leaveoneout && (npoints(x) > 1)) {
+      warning("Very small bandwidth; values of nearest neighbours returned")
+      result <- values[nnwhich(x)]
+    } else {
+      warning("Very small bandwidth; original values returned")
+      result <- values
+    }
+    attr(result, "sigma") <- sigma
+    attr(result, "varcov") <- varcov
+    attr(result, "warnings") <- "underflow"
+    return(result)
+  }
+  if(leaveoneout) {
+    # ensure cutoff includes at least one point
+    cutoff <- max(1.1 * nnrange[2], cutoff)
+  }
+  if(spatstat.options("densityTransform") && spatstat.options("densityC")) {
+    ## .................. experimental C code .....................
+    if(debugging)
+      cat('Using experimental code!\n')
+    npts <- npoints(x)
+    result <- numeric(npts)
+    ## transform to standard coordinates
+    xx <- x$x
+    yy <- x$y
+    if(is.null(varcov)) {
+      xx <- xx/(sqrt(2) * sigma)
+      yy <- yy/(sqrt(2) * sigma)
+    } else {
+      Sinv <- solve(varcov)
+      xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2)
+      xx <- xy[,1]
+      yy <- xy[,2]
+      sorted <- FALSE
+    }
+    ## cutoff in standard coordinates
+    cutoff <- cutoff/(sqrt(2) * sd)
+    ## sort into increasing order of x coordinate (required by C code)
+    if(!sorted) {
+      oo <- fave.order(xx)
+      xx <- xx[oo]
+      yy <- yy[oo]
+      vv <- values[oo]
+    } else {
+      vv <- values
+    }
+    if(is.null(weights)) {
+      zz <- .C("Gsmoopt",
+               nxy     = as.integer(npts),
+               x       = as.double(xx),
+               y       = as.double(yy),
+               v       = as.double(vv),
+               self    = as.integer(!leaveoneout),
+               rmaxi   = as.double(cutoff),
+               result  = as.double(double(npts)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[oo] <- zz$result
+    } else {
+      wtsort <- weights[oo]
+      zz <- .C("Gwtsmoopt",
+               nxy     = as.integer(npts),
+               x       = as.double(xx),
+               y       = as.double(yy),
+               v       = as.double(vv),
+               self    = as.integer(!leaveoneout),
+               rmaxi   = as.double(cutoff),
+               weight  = as.double(wtsort),
+               result  = as.double(double(npts)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[oo] <- zz$result
+    }
+    if(any(nbg <- (is.infinite(result) | is.nan(result)))) {
+      # NaN or +/-Inf can occur if bandwidth is small
+      # Use mark of nearest neighbour (by l'Hopital's rule)
+      result[nbg] <- values[nnwhich(x)[nbg]]
+    }
+  } else if(spatstat.options("densityC")) {
+    # .................. C code ...........................
+    if(debugging)
+      cat('Using standard code.\n')
+    npts <- npoints(x)
+    result <- numeric(npts)
+    # sort into increasing order of x coordinate (required by C code)
+    if(sorted) {
+      xx <- x$x
+      yy <- x$y
+      vv <- values
+    } else {
+      oo <- fave.order(x$x)
+      xx <- x$x[oo]
+      yy <- x$y[oo]
+      vv <- values[oo]
+    }
+    if(is.null(varcov)) {
+      # isotropic kernel
+      if(is.null(weights)) {
+        zz <- .C("smoopt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 v       = as.double(vv),
+                 self    = as.integer(!leaveoneout),
+                 rmaxi   = as.double(cutoff),
+                 sig     = as.double(sd),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result
+      } else {
+        wtsort <- weights[oo]
+        zz <- .C("wtsmoopt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 v       = as.double(vv),
+                 self    = as.integer(!leaveoneout),
+                 rmaxi   = as.double(cutoff),
+                 sig     = as.double(sd),
+                 weight  = as.double(wtsort),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result
+      }
+    } else {
+      # anisotropic kernel
+      Sinv <- solve(varcov)
+      flatSinv <- as.vector(t(Sinv))
+      if(is.null(weights)) {
+        zz <- .C("asmoopt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 v       = as.double(vv),
+                 self    = as.integer(!leaveoneout),
+                 rmaxi   = as.double(cutoff),
+                 sinv    = as.double(flatSinv),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result
+      } else {
+        wtsort <- weights[oo]
+        zz <- .C("awtsmoopt",
+                 nxy     = as.integer(npts),
+                 x       = as.double(xx),
+                 y       = as.double(yy),
+                 v       = as.double(vv),
+                 self    = as.integer(!leaveoneout),
+                 rmaxi   = as.double(cutoff),
+                 sinv    = as.double(flatSinv),
+                 weight  = as.double(wtsort),
+                 result  = as.double(double(npts)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[oo] <- zz$result
+      }
+    }
+    if(any(nbg <- (is.infinite(result) | is.nan(result)))) {
+      # NaN or +/-Inf can occur if bandwidth is small
+      # Use mark of nearest neighbour (by l'Hopital's rule)
+      result[nbg] <- values[nnwhich(x)[nbg]]
+    }
+  } else {
+    # previous, partly interpreted code
+    # compute weighted densities
+    if(is.null(weights)) {
+      # weights are implicitly equal to 1
+      numerator <- do.call(density.ppp,
+                         resolve.defaults(list(x=x, at="points"),
+                                          list(weights = values),
+                                          list(sigma=sigma, varcov=varcov),
+                                          list(leaveoneout=leaveoneout),
+                                          list(sorted=sorted),
+                                          list(...),
+                                          list(edge=FALSE)))
+      denominator <- do.call(density.ppp,
+                             resolve.defaults(list(x=x, at="points"),
+                                              list(sigma=sigma, varcov=varcov),
+                                              list(leaveoneout=leaveoneout),
+                                              list(sorted=sorted),
+                                              list(...),
+                                              list(edge=FALSE)))
+    } else {
+      numerator <- do.call(density.ppp,
+                           resolve.defaults(list(x=x, at="points"),
+                                            list(weights = values * weights),
+                                            list(sigma=sigma, varcov=varcov),
+                                            list(leaveoneout=leaveoneout),
+                                            list(sorted=sorted),
+                                            list(...),
+                                            list(edge=FALSE)))
+      denominator <- do.call(density.ppp,
+                             resolve.defaults(list(x=x, at="points"),
+                                              list(weights = weights),
+                                              list(sigma=sigma, varcov=varcov),
+                                              list(leaveoneout=leaveoneout),
+                                              list(sorted=sorted),
+                                              list(...),
+                                              list(edge=FALSE)))
+    }
+    if(is.null(uhoh <- attr(numerator, "warnings"))) {
+      result <- numerator/denominator
+      result <- ifelseXB(is.finite(result), result, NA)
+    } else {
+      warning("returning original values")
+      result <- values
+      attr(result, "warnings") <- uhoh
+    }
+  }
+  # pack up and return
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  return(result)
+}
+
+
+markmean <- function(X, ...) {
+  stopifnot(is.marked(X))
+  Y <- Smooth(X, ...)
+  return(Y)
+}
+
+markvar  <- function(X, sigma=NULL, ..., weights=NULL, varcov=NULL) {
+  stopifnot(is.marked(X))
+  if(is.expression(weights)) 
+    weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame())
+  E1 <- Smooth(X, sigma=sigma, varcov=varcov, weights=weights, ...)
+  X2 <- X %mark% marks(X)^2
+  ## ensure smoothing bandwidth is the same!
+  sigma <- attr(E1, "sigma")
+  varcov <- attr(E1, "varcov")
+  E2 <- Smooth(X2, sigma=sigma, varcov=varcov, weights=weights, ...)
+  V <- eval.im(E2 - E1^2)
+  return(V)
+}
+
+bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"),
+                       hmin=NULL, hmax=NULL, warn=TRUE) {
+  stopifnot(is.ppp(X))
+  stopifnot(is.marked(X))
+  X <- coerce.marks.numeric(X)
+  # rearrange in ascending order of x-coordinate (for C code)
+  X <- X[fave.order(X$x)]
+  #
+  marx <- marks(X)
+  dimmarx <- dim(marx)
+  if(!is.null(dimmarx))
+    marx <- as.matrix(as.data.frame(marx))
+  # determine a range of bandwidth values
+#  n <- npoints(X)
+  if(is.null(hmin) || is.null(hmax)) {
+    W <- Window(X)
+#    a <- area(W)
+    d <- diameter(as.rectangle(W))
+    # Stoyan's rule of thumb 
+    stoyan <- bw.stoyan(X)
+    # rule of thumb based on nearest-neighbour distances
+    nnd <- nndist(X)
+    nnd <- nnd[nnd > 0]
+    if(is.null(hmin)) {
+      hmin <- max(1.1 * min(nnd), stoyan/5)
+      hmin <- min(d/8, hmin)
+    }
+    if(is.null(hmax)) {
+      hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2)
+      hmax <- min(d/2, hmax)
+    }
+  } else stopifnot(hmin < hmax)
+  #
+  h <- geomseq(from=hmin, to=hmax, length.out=nh)
+  cv <- numeric(nh)
+  # 
+  # compute cross-validation criterion
+  for(i in seq_len(nh)) {
+    yhat <- Smooth(X, sigma=h[i], at="points", leaveoneout=TRUE,
+                   sorted=TRUE)
+    if(!is.null(dimmarx))
+      yhat <- as.matrix(as.data.frame(yhat))
+    cv[i] <- mean((marx - yhat)^2)
+  }
+
+  # optimize
+  iopt <- which.min(cv)
+#  hopt <- h[iopt]
+  #
+  if(warn && (iopt == nh || iopt == 1)) 
+    warning(paste("Cross-validation criterion was minimised at",
+                  if(iopt == 1) "left-hand" else "right-hand",
+                  "end of interval",
+                  paste(prange(signif(c(hmin, hmax), 3)), ";", sep=""),
+                  "use arguments hmin, hmax to specify a wider interval"),
+            call.=FALSE)
+  #
+  result <- bw.optim(cv, h, iopt,
+                     hname="sigma",
+                     creator="bw.smoothppp",
+                     criterion="Least Squares Cross-Validation",
+                     unitname=unitname(X))
+  return(result)
+}
+
+smoothcrossEngine <- function(Xdata, Xquery, values, sigma, ...,
+                              weights=NULL, varcov=NULL,
+                              sorted=FALSE) {
+#  if(is.null(varcov)) {
+#    const <- 1/(2 * pi * sigma^2)
+#  } else {
+#    detSigma <- det(varcov)
+#    Sinv <- solve(varcov)
+#    const <- 1/(2 * pi * sqrt(detSigma))
+#  }
+  if(!is.null(dim(weights)))
+    stop("weights must be a vector")
+
+  if(npoints(Xquery) == 0 || npoints(Xdata) == 0) {
+    if(is.null(dim(values))) return(rep(NA, npoints(Xquery)))
+    nuttin <- matrix(NA, nrow=npoints(Xquery), ncol=ncol(values))
+    colnames(nuttin) <- colnames(values)
+    return(nuttin)
+  }
+  
+  ## Contributions from pairs of distinct points
+  ## closer than 8 standard deviations
+  sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov)))
+  cutoff <- 8 * sd
+
+  ## detect very small bandwidth
+  nnc <- nncross(Xquery, Xdata)
+  if(cutoff < min(nnc$dist)) {
+    if(npoints(Xdata) > 1) {
+      warning("Very small bandwidth; values of nearest neighbours returned")
+      nw <- nnc$which
+      result <- if(is.null(dim(values))) values[nw] else values[nw,,drop=FALSE]
+    } else {
+      warning("Very small bandwidth; original values returned")
+      result <- values
+    }
+    attr(result, "sigma") <- sigma
+    attr(result, "varcov") <- varcov
+    attr(result, "warnings") <- "underflow"
+    return(result)
+  }
+  
+  ## Handle weights that are meant to be null
+  if(length(weights) == 0)
+     weights <- NULL
+     
+  ## handle multiple columns of values
+  if(is.matrix(values) || is.data.frame(values)) {
+    k <- ncol(values)
+    stopifnot(nrow(values) == npoints(Xdata))
+    values <- as.data.frame(values)
+    result <- matrix(, npoints(Xdata), k)
+    colnames(result) <- colnames(values)
+    if(!sorted) {
+      ood <- fave.order(Xdata$x)
+      Xdata <- Xdata[ood]
+      values <- values[ood, ]
+      ooq <- fave.order(Xquery$x)
+      Xquery <- Xquery[ooq]
+    }
+    for(j in 1:k) 
+      result[,j] <- smoothcrossEngine(Xdata, Xquery, values[,j],
+                                      sigma=sigma, varcov=varcov,
+                                      weights=weights, sorted=TRUE,
+                                      ...)
+    if(!sorted) {
+      sortresult <- result
+      result[ooq,] <- sortresult
+    }
+    attr(result, "sigma") <- sigma
+    attr(result, "varcov") <- varcov
+    return(result)
+  }
+
+  ## values must be a vector
+  stopifnot(length(values) == npoints(Xdata) || length(values) == 1)
+  if(length(values) == 1) values <- rep(values, npoints(Xdata))
+
+  ndata <- npoints(Xdata)
+  nquery <- npoints(Xquery)
+  result <- numeric(nquery) 
+  ## coordinates and values
+  xq <- Xquery$x
+  yq <- Xquery$y
+  xd <- Xdata$x
+  yd <- Xdata$y
+  vd <- values
+  if(!sorted) {
+    ## sort into increasing order of x coordinate (required by C code)
+    ooq <- fave.order(Xquery$x)
+    xq <- xq[ooq]
+    yq <- yq[ooq]
+    ood <- fave.order(Xdata$x)
+    xd <- xd[ood]
+    yd <- yd[ood]
+    vd <- vd[ood] 
+  }
+  if(is.null(varcov)) {
+    ## isotropic kernel
+    if(is.null(weights)) {
+      zz <- .C("crsmoopt",
+               nquery      = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               vd      = as.double(vd),
+               rmaxi   = as.double(cutoff),
+               sig     = as.double(sd),
+               result  = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+      if(sorted) result <- zz$result else result[ooq] <- zz$result
+    } else {
+      wtsort <- weights[ood]
+      zz <- .C("wtcrsmoopt",
+               nquery      = as.integer(nquery),
+               xq      = as.double(xq),
+               yq      = as.double(yq),
+               ndata   = as.integer(ndata),
+               xd      = as.double(xd),
+               yd      = as.double(yd),
+               vd      = as.double(vd),
+               wd      = as.double(wtsort),
+               rmaxi   = as.double(cutoff),
+               sig     = as.double(sd),
+               result  = as.double(double(nquery)),
+               PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[ooq] <- zz$result
+      }
+    } else {
+      # anisotropic kernel
+      Sinv <- solve(varcov)
+      flatSinv <- as.vector(t(Sinv))
+      if(is.null(weights)) {
+        zz <- .C("acrsmoopt",
+                 nquery      = as.integer(nquery),
+                 xq      = as.double(xq),
+                 yq      = as.double(yq),
+                 ndata   = as.integer(ndata),
+                 xd      = as.double(xd),
+                 yd      = as.double(yd),
+                 vd      = as.double(vd),
+                 rmaxi   = as.double(cutoff),
+                 sinv    = as.double(flatSinv),
+                 result  = as.double(double(nquery)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[ooq] <- zz$result
+      } else {
+        wtsort <- weights[ood]
+        zz <- .C("awtcrsmoopt",
+                 nquery      = as.integer(nquery),
+                 xq      = as.double(xq),
+                 yq      = as.double(yq),
+                 ndata   = as.integer(ndata),
+                 xd      = as.double(xd),
+                 yd      = as.double(yd),
+                 vd      = as.double(vd),
+                 wd      = as.double(wtsort),
+                 rmaxi   = as.double(cutoff),
+                 sinv    = as.double(flatSinv),
+                 result  = as.double(double(nquery)),
+                 PACKAGE = "spatstat")
+        if(sorted) result <- zz$result else result[ooq] <- zz$result
+      }
+    }
+    if(any(nbg <- (is.infinite(result) | is.nan(result)))) {
+      # NaN or +/-Inf can occur if bandwidth is small
+      # Use mark of nearest neighbour (by l'Hopital's rule)
+      result[nbg] <- values[nnc$which[nbg]]
+    }
+  # pack up and return
+  attr(result, "sigma") <- sigma
+  attr(result, "varcov") <- varcov
+  return(result)
+}
+
+ExpSmoothLog <- function(X, ..., at=c("pixels", "points"), weights=NULL) {
+  verifyclass(X, "ppp")
+  at <- match.arg(at)
+  if(!is.null(weights)) 
+    check.nvector(weights, npoints(X))
+  X <- coerce.marks.numeric(X)
+  marx <- marks(X)
+  d <- dim(marx)
+  if(!is.null(d) && d[2] > 1) {
+    switch(at,
+           points = {
+             Z <- lapply(unstack(X), ExpSmoothLog, ...,
+                         at=at, weights=weights)
+             Z <- do.call(data.frame(Z))
+           },
+           pixels = {
+             Z <- solapply(unstack(X), ExpSmoothLog, ...,
+                           at=at, weights=weights)
+           })
+    return(Z)
+  }
+  # vector or single column of numeric marks
+  v <- as.numeric(marx)
+  vmin <- min(v)
+  if(vmin < 0) stop("Negative values in geometric mean smoothing",
+                       call.=FALSE)
+  Y <- X %mark% log(v)
+  if(vmin > 0) {
+    Z <- Smooth(Y, ..., at=at, weights=weights)
+  } else {
+    yok <- is.finite(marks(Y))
+    YOK <- Y[yok]
+    weightsOK <- if(is.null(weights)) NULL else weights[yok]
+    switch(at,
+           points = {
+             Z <- rep(-Inf, npoints(X))
+             Z[yok] <- Smooth(YOK, ..., at=at, weights=weightsOK)
+           },
+           pixels = {
+             isfinite <- nnmark(Y %mark% yok, ...)
+             support <- solutionset(isfinite)
+             Window(YOK) <- support
+             Z <- as.im(-Inf, W=Window(Y), ...)
+             Z[support] <- Smooth(YOK, ..., at=at, weights=weightsOK)[]
+           })
+  }
+  return(exp(Z))
+}
diff --git a/R/smoothfun.R b/R/smoothfun.R
new file mode 100644
index 0000000..af93ac7
--- /dev/null
+++ b/R/smoothfun.R
@@ -0,0 +1,59 @@
+##
+## smoothfun.R
+##
+## Exact 'funxy' counterpart of Smooth.ppp
+##
+##  $Revision: 1.2 $ $Date: 2016/02/11 10:17:12 $
+
+
+Smoothfun <- function(X, ...) {
+  UseMethod("Smoothfun")
+}
+
+Smoothfun.ppp <- function(X, sigma=NULL, ...,
+                          weights=NULL, edge=TRUE, diggle=FALSE) {
+  verifyclass(X, "ppp")
+  if(!is.marked(X, dfok=TRUE))
+    stop("X should be a marked point pattern")
+  stuff <- list(X=X, weights=weights, edge=edge, diggle=diggle)
+  X <- coerce.marks.numeric(X)
+  ## determine smoothing parameters
+  ker <- resolve.2D.kernel(sigma=sigma, ...,
+                           x=X, bwfun=bw.smoothppp, allow.zero=TRUE)
+  stuff <- append(stuff, ker[c("sigma", "varcov")])
+  ##
+  g <- function(x, y=NULL) {
+    Y <- xy.coords(x, y)[c("x", "y")]
+    with(stuff,
+         smoothcrossEngine(Xdata=X,
+                           Xquery=as.ppp(Y, X$window),
+                           values=marks(X),
+                           sigma=sigma,
+                           varcov=varcov, 
+                           weights=weights,
+                           edge=edge, diggle=diggle))
+  }
+  g <- funxy(g, as.rectangle(as.owin(X)))
+  class(g) <- c("Smoothfun", class(g))
+  return(g)
+}
+
+print.Smoothfun <- function(x, ...) {
+  cat("function(x,y)", "which returns",
+      "values", "interpolated from", fill=TRUE)
+  X <- get("X", envir=environment(x))
+  print(X, ...)
+  return(invisible(NULL))
+}
+
+## Method for as.im
+## (enables plot.funxy, persp.funxy, contour.funxy to work for this class)
+
+as.im.Smoothfun <- function(X, W=NULL, ...) {
+  stuff <- get("stuff", envir=environment(X))
+  if(!is.null(W)) stuff$X <- stuff$X[W]
+  do.call(Smooth, resolve.defaults(list(...), stuff))
+}
+
+
+  
diff --git a/R/smoothfv.R b/R/smoothfv.R
new file mode 100755
index 0000000..e08154a
--- /dev/null
+++ b/R/smoothfv.R
@@ -0,0 +1,54 @@
+#
+#  smoothfv.R
+#
+#   $Revision: 1.13 $   $Date: 2014/01/15 10:03:35 $
+#
+  
+smooth.fv <- function(x, which="*", ..., 
+                      method=c("smooth.spline", "loess"),
+                      xinterval=NULL) {
+  .Deprecated("Smooth.fv", package="spatstat",
+     msg="smooth.fv is deprecated: use the generic Smooth with a capital S")
+  Smooth(x, which=which, ..., method=method, xinterval=xinterval)
+}
+  
+Smooth.fv <- function(X, which="*", ..., 
+                      method=c("smooth.spline", "loess"),
+                      xinterval=NULL) {
+  x <- X
+  stopifnot(is.character(which))
+  method <- match.arg(method)
+  if(!is.null(xinterval))
+    check.range(xinterval) 
+  if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) {
+    if(which == ".x")
+      stop("Cannot smooth the function argument")
+    which <- fvnames(x, which)
+  }
+  if(any(nbg <- !(which %in% names(x)))) 
+    stop(paste("Unrecognised column",
+               ngettext(sum(nbg), "name", "names"),
+               commasep(sQuote(which[nbg])), 
+               "in argument", sQuote("which")))
+  xx <- x[[fvnames(x, ".x")]]
+  # process each column of function values
+  for(ynam in which) {
+    yy <- x[[ynam]]
+    ok <- is.finite(yy)
+    if(!is.null(xinterval))
+      ok <- ok & inside.range(xx, xinterval)
+    switch(method,
+           smooth.spline = {
+             ss <- smooth.spline(xx[ok], yy[ok], ...)
+             yhat <- predict(ss, xx[ok])$y
+           },
+           loess = {
+             df <- data.frame(x=xx[ok], y=yy[ok])
+             lo <- loess(y ~ x, df, ...)
+             yhat <- predict(lo, df[,"x", drop=FALSE])
+           })
+    yy[ok] <- yhat
+    x[[ynam]] <- yy
+  }
+  return(x)
+}
diff --git a/R/softcore.R b/R/softcore.R
new file mode 100755
index 0000000..fa6e788
--- /dev/null
+++ b/R/softcore.R
@@ -0,0 +1,115 @@
+#
+#
+#    softcore.S
+#
+#    $Revision: 2.15 $   $Date: 2016/02/16 01:39:12 $
+#
+#    Soft core processes.
+#
+#    Softcore()    create an instance of a soft core process
+#                 [an object of class 'interact']
+#
+#
+# -------------------------------------------------------------------
+#
+
+Softcore <- local({
+
+  BlankSoftcore <- 
+  list(
+       name     = "Soft core process",
+       creator  = "Softcore",
+       family   = "pairwise.family",  # evaluated later
+       pot      = function(d, par) {
+         sig0 <- par$sigma0
+         if(is.na(sig0)) {
+           p <- -d^(-2/par$kappa)
+         } else {
+           # expand around sigma0 and set large negative numbers to -Inf
+           drat <- d/sig0
+           p <- -drat^(-2/par$kappa)
+           p[p < -25] <- -Inf
+         }
+         return(p)
+       },
+       par      = list(kappa = NULL, sigma0=NA),  # filled in later
+       parnames = c("Exponent kappa", "Initial approximation to sigma"),
+       selfstart = function(X, self) {
+         # self starter for Softcore
+         if(npoints(X) < 2) {
+           # not enough points to make any decisions
+           return(self)
+         }
+         md <- minnndist(X)
+         if(md == 0) {
+           warning(paste("Pattern contains duplicated points:",
+                         "impossible under Softcore model"))
+           return(self)
+         }
+         kappa <- self$par$kappa
+         if(!is.na(sigma0 <- self$par$sigma0)) {
+           # value fixed by user or previous invocation
+           # check it
+           if((md/sigma0)^(-2/kappa) > 25)
+             warning(paste("Initial approximation sigma0 is too large;",
+                           "some data points will have zero probability"))
+           return(self)
+         }
+         # take sigma0 = minimum interpoint distance
+         Softcore(kappa=kappa, sigma0=md)
+       },
+       init     = function(self) {
+         kappa <- self$par$kappa
+         if(!is.numeric(kappa) || length(kappa) != 1 ||
+            kappa <= 0 || kappa >= 1)
+           stop(paste("Exponent kappa must be a",
+                      "positive number less than 1"))
+       },
+       update = NULL,  # default OK
+       print = NULL,    # default OK
+       interpret =  function(coeffs, self) {
+         theta <- as.numeric(coeffs[1])
+         sigma <- theta^(self$par$kappa/2)
+         if(!is.na(sig0 <- self$par$sigma0))
+           sigma <- sigma * sig0
+         return(list(param=list(sigma=sigma),
+                     inames="interaction parameter sigma",
+                     printable=signif(sigma)))
+       },
+       valid = function(coeffs, self) {
+         theta <- coeffs[1]
+         return(is.finite(theta) && (theta >= 0))
+       },
+       project = function(coeffs, self) {
+         if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         # distance d beyond which log(interaction factor) <= epsilon
+         if(anyNA(coeffs) || epsilon == 0)
+           return(Inf)
+         theta <- as.numeric(coeffs[1])
+         kappa <- self$par$kappa
+         sig0  <- self$par$sigma0
+         if(is.na(sig0)) sig0 <- 1
+         return(sig0 * (theta/epsilon)^(kappa/2))
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         kappa <- self$par$kappa
+         sigma <- (self$interpret)(coeffs, self)$param$sigma
+         return(pi * (sigma^2) * gamma(1 - kappa))
+       },
+       version=NULL # filled in later
+  )
+  class(BlankSoftcore) <- "interact"
+
+  Softcore <- function(kappa, sigma0=NA) {
+    instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0))
+  }
+
+  Softcore <- intermaker(Softcore, BlankSoftcore)
+  
+  Softcore
+})
+
+                  
diff --git a/R/solist.R b/R/solist.R
new file mode 100644
index 0000000..09ebfa4
--- /dev/null
+++ b/R/solist.R
@@ -0,0 +1,219 @@
+##
+## solist.R
+##
+## Methods for class `solist' (spatial object list)
+##
+##      and related classes 'anylist', 'ppplist', 'imlist'
+##
+## plot.solist is defined in plot.solist.R
+##
+## $Revision: 1.15 $ $Date: 2017/06/05 10:31:58 $
+
+anylist <- function(...) {
+  x <- list(...)
+  class(x) <- c("anylist", "listof", class(x))
+  return(x)
+}
+
+print.anylist <- function (x, ...) {
+  ll <- length(x)
+  if(ll == 0) {
+    splat("(Zero length list)")
+    return(invisible(NULL))
+  }
+  nn <- names(x)
+  if (length(nn) != ll) 
+    nn <- paste("Component", seq.int(ll))
+  spaceok <- waxlyrical('space')
+  for (i in seq_len(ll)) {
+    splat(paste0(nn[i], ":"))
+    print(x[[i]], ...)
+    if(spaceok && i < ll) cat("\n")
+  }
+  return(invisible(NULL))
+}
+
+as.anylist <- function(x) {
+  if(inherits(x, "anylist")) return(x)
+  if(!is.list(x))
+    x <- list(x)
+  class(x) <- c("anylist", "listof", class(x))
+  return(x)
+}
+  
+"[.anylist" <- function(x, i, ...) {
+  cl <- oldClass(x)
+  ## invoke list method
+  y <- NextMethod("[")
+  if(length(y) == 0) return(list())
+  class(y) <- cl
+  return(y)
+}
+
+"[<-.anylist" <- function(x, i, value) {
+  as.anylist(NextMethod("[<-"))
+}
+
+summary.anylist <- function(object, ...) {
+  as.anylist(lapply(object, summary, ...))
+}
+
+pool.anylist <- function(x, ...) {
+  do.call(pool, append(x, list(...)))
+}
+
+## .................... solist .............................
+
+is.sob <- local({
+  ## test whether x is a spatial object suitable for solist
+  sobjectclasses <- c("ppp", "psp", "im", "owin", 
+                      "quad", "tess", "msr",
+                      "quadratcount", "quadrattest", 
+                      "layered",
+                      "funxy", "distfun", "nnfun", 
+                      "lpp", "linnet", "linfun",      
+                      "influence.ppm", "leverage.ppm")
+  # Note 'linim' inherits 'im'
+  #      'dfbetas.ppm' inherits 'msr'
+
+  is.sob <- function(x) { inherits(x, what=sobjectclasses) }
+  is.sob
+})
+  
+solist <- function(..., check=TRUE, promote=TRUE, demote=FALSE) {
+  stuff <- list(...)
+  if((check || demote) && !all(sapply(stuff, is.sob))) {
+    if(demote)
+      return(as.anylist(stuff))
+    stop("Some arguments of solist() are not 2D spatial objects")
+  }
+  class(stuff) <- c("solist", "anylist", "listof", class(stuff))
+  if(promote) {
+    if(all(unlist(lapply(stuff, is.ppp)))) {
+      class(stuff) <- c("ppplist", class(stuff))
+    } else if(all(unlist(lapply(stuff, is.im)))) {
+      class(stuff) <- c("imlist", class(stuff))
+    }
+  }
+  return(stuff)
+}
+
+as.solist <- function(x, ...) {
+  if(inherits(x, "solist") && length(list(...)) == 0)
+    return(x)
+  if(!is.list(x) || is.sob(x))
+    x <- list(x)
+  return(do.call(solist, append(x, list(...))))
+}
+
+print.solist <- function (x, ...) {
+  what <- if(inherits(x, "ppplist")) "point patterns" else
+          if(inherits(x, "imlist")) "pixel images" else "spatial objects"
+  splat(paste("List of", what))
+  parbreak()
+  NextMethod("print")
+}
+
+
+"[.solist" <- function(x, i, ...) {
+  cl <- oldClass(x)
+  if(!missing(i) && is.owin(i)) {
+    ## spatial subset
+    y <- lapply(unclass(x), "[", i=i)
+  } else {
+    ## invoke list method
+    y <- NextMethod("[")
+  }
+  if(length(y) == 0) return(list())
+  class(y) <- cl
+  return(y)
+}
+  
+"[<-.solist" <- function(x, i, value) {
+  ## invoke list method
+  y <- NextMethod("[<-")
+  ## check again
+  return(do.call(solist, y))
+}
+  
+summary.solist <- function(object, ...) {
+  x <- lapply(object, summary, ...)
+  attr(x, "otype") <-
+    if(inherits(object, "ppplist")) "ppp" else
+    if(inherits(object, "imlist")) "im" else ""
+  class(x) <- c("summary.solist", "anylist")
+  x
+}
+
+print.summary.solist <- function(x, ...) {
+  what <- switch(attr(x, "otype"),
+                 ppp="point patterns",
+                 im="pixel images",
+                 "spatial objects")
+  splat("Summary of", length(x), what)
+  parbreak()
+  NextMethod("print")
+}
+
+as.layered.solist <- function(X) {
+  layered(LayerList=X)
+}
+
+#'  ----- ppplist and imlist ----------------------------
+#'  for efficiency only
+
+as.ppplist <- function(x, check=TRUE) {
+  if(check) {
+     x <- as.solist(x)
+     if(inherits(x, "ppplist"))
+       return(x)
+     stop("some entries are not point patterns")
+  }
+  class(x) <- unique(c("ppplist", "solist", "anylist", "listof", class(x)))
+  return(x)
+}
+
+as.imlist <- function(x, check=TRUE) {
+  if(check) {
+     x <- as.solist(x)
+     if(inherits(x, "imlist"))
+       return(x)
+     stop("some entries are not images")
+  }
+  class(x) <- unique(c("imlist", "solist", "anylist", "listof", class(x)))
+  return(x)
+}
+
+# --------------- counterparts of 'lapply' --------------------
+
+anylapply <- function(X, FUN, ...) {
+  v <- lapply(X, FUN, ...)
+  return(as.anylist(v))
+}
+
+solapply <- function(X, FUN, ..., check=TRUE, promote=TRUE, demote=FALSE) {
+  v <- lapply(X, FUN, ...)
+  u <- as.solist(v, check=check, promote=promote, demote=demote)
+  return(u)
+}
+
+density.ppplist <- function(x, ..., se=FALSE) {
+  y <- lapply(x, density, ..., se=se)
+  if(!se) return(as.solist(y, demote=TRUE))
+  y.est <- lapply(y, getElement, name="estimate")
+  y.se  <- lapply(y, getElement, name="SE")
+  z <- list(estimate = as.solist(y.est, demote=TRUE),
+            SE       = as.solist(y.se,  demote=TRUE))
+  return(z)
+}
+
+expandSpecialLists <- function(x, special="solist") {
+  ## x is a list which may include entries which are lists, of class 'special'
+  ## unlist these entries only
+  hit <- sapply(x, inherits, what=special) 
+  if(!any(hit)) return(x)
+  # wrap each *non*-special entry in list()
+  x[!hit] <- lapply(x[!hit], list)
+  # now strip one layer of list() from all entries
+  return(unlist(x, recursive=FALSE))
+}
diff --git a/R/sparse3Darray.R b/R/sparse3Darray.R
new file mode 100644
index 0000000..51206b3
--- /dev/null
+++ b/R/sparse3Darray.R
@@ -0,0 +1,916 @@
+#'
+#' sparse3Darray.R
+#'
+#' Sparse 3D arrays represented as list(i,j,k,x)
+#' 
+#' $Revision: 1.25 $  $Date: 2017/07/13 02:01:19 $
+#'
+
+sparse3Darray <- function(i=integer(0), j=integer(0), k=integer(0),
+                          x=numeric(0),
+                          dims=c(max(i),max(j),max(k)),
+                          dimnames=NULL, strict=FALSE, nonzero=FALSE) {
+  dat <- data.frame(i, j, k, x)
+  if(typeof(x) == "complex")
+    warn.once("sparse.complex",
+              "complex-valued sparse 3D arrays are supported in spatstat,",
+              "but complex-valued sparse matrices",
+              "are not yet supported by the Matrix package")
+  stopifnot(length(dims) == 3)
+  dims <- as.integer(dims)
+  if(!all(inside.range(i, c(1, dims[1])))) stop("indices i are outside range")
+  if(!all(inside.range(j, c(1, dims[2])))) stop("indices j are outside range")
+  if(!all(inside.range(k, c(1, dims[3])))) stop("indices k are outside range")
+  if(!is.null(dimnames)) {
+    stopifnot(is.list(dimnames))
+    stopifnot(length(dimnames) == 3)
+    notnull <- !sapply(dimnames, is.null)
+    dimnames[notnull] <- lapply(dimnames[notnull], as.character)
+  }
+  if(nonzero || strict) {
+    #' drop zeroes
+    ok <- (x != RelevantZero(x))
+    dat <- dat[ok, , drop=FALSE]
+  }
+  if(strict) {
+    #' arrange in 'R order'
+    dat <- dat[with(dat, order(k,j,i)), , drop=FALSE]
+    #' duplicates will be adjacent
+    dup <- with(dat, c(FALSE, diff(i) == 0 & diff(j) == 0 & diff(k) == 0))
+    if(any(dup)) {
+      #' accumulate values at the same array location
+      retain <- !dup
+      newrow <- cumsum(retain)
+      newx <- as(tapply(dat$x, newrow, sum), typeof(dat$x))
+      newdat <- dat[retain,,drop=FALSE]
+      newdat$x <- newx
+      dat <- newdat
+    }
+  }
+  result <- append(as.list(dat),
+                   list(dim=dims, dimnames=dimnames))
+  class(result) <- "sparse3Darray"
+  return(result)
+}
+
+as.sparse3Darray <- function(x, ...) {
+  if(inherits(x, "sparse3Darray")) {
+    y <- x
+  } else if(inherits(x, c("matrix", "sparseMatrix"))) {
+    z <- as(x, Class="TsparseMatrix")
+    dn <- dimnames(x)
+    dn <- if(is.null(dn)) NULL else c(dn, list(NULL))
+    one <- if(length(z at i) > 0) 1L else integer(0)
+    y <- sparse3Darray(i=z at i + 1L, j=z at j + 1L, k=one, x=z at x,
+                       dims=c(dim(x), 1L), dimnames=dn)
+  } else if(is.array(x)) {
+    stopifnot(length(dim(x)) == 3)
+    dimx <- dim(x)
+    if(prod(dimx) == 0) {
+      y <- sparse3Darray(, dims=dimx, dimnames=dimnames(x))
+    } else {
+      ijk <- which(x != RelevantZero(x), arr.ind=TRUE)
+      ijk <- cbind(as.data.frame(ijk), x[ijk])
+      y <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=ijk[,4L],
+                         dims=dimx, dimnames=dimnames(x))
+    }
+  } else if(inherits(x, "sparseVector")) {
+    one <- if(length(x at i) > 0) 1L else integer(0)
+    y <- sparse3Darray(i=x at i, j=one, k=one, x=x at x,
+                       dims=c(x at length, 1L, 1L))
+  } else if(is.null(dim(x)) && is.atomic(x)) {
+    n <- length(x)
+    dn <- names(x)
+    if(!is.null(dn)) dn <- list(dn, NULL, NULL)
+    one <- if(n > 0) 1L else integer(0)
+    y <- sparse3Darray(i=seq_len(n), j=one, k=one, x=x,
+                       dims=c(n, 1L, 1L), dimnames=dn)
+    
+  } else if(is.list(x) && length(x) > 0) {
+    n <- length(x)
+    if(all(sapply(x, is.matrix))) {
+      z <- Reduce(abind, x)
+      y <- as.sparse3Darray(z)
+    } else if(all(sapply(x, inherits, what="sparseMatrix"))) {
+      dimlist <- unique(lapply(x, dim))
+      if(length(dimlist) > 1) stop("Dimensions of matrices do not match")
+      dimx <- c(dimlist[[1L]], n)
+      dnlist <- lapply(x, dimnames)
+      isnul <- sapply(dnlist, is.null)
+      dnlist <- unique(dnlist[!isnul])
+      if(length(dnlist) > 1) stop("Dimnames of matrices do not match")
+      dn <- if(length(dnlist) == 0) NULL else c(dn[[1L]], NULL)
+      for(k in seq_len(n)) {
+        mk <- as(x[[k]], "TsparseMatrix")
+        kvalue <- if(length(mk at i) > 0) k else integer(0)
+        dfk <- data.frame(i=mk at i + 1L, j=mk at j + 1L, k=kvalue, x=mk at x)
+        df <- if(k == 1) dfk else rbind(df, dfk)
+      }
+      y <- sparse3Darray(i=df$i, j=df$j, k=df$k, x=df$x,
+                         dims=dimx, dimnames=dn)
+    } else {
+      warning("I don't know how to convert a list to a sparse array")
+      return(NULL)
+    }
+  } else {          
+    warning("I don't know how to convert x to a sparse array")
+    return(NULL)
+  }
+  return(y)
+}
+
+dim.sparse3Darray <- function(x) { x$dim }
+
+"dim<-.sparse3Darray" <- function(x, value) {
+  stopifnot(length(value) == 3)
+  if(!all(inside.range(x$i, c(1, value[1]))))
+    stop("indices i are outside new range")
+  if(!all(inside.range(x$j, c(1, value[2]))))
+    stop("indices j are outside new range")
+  if(!all(inside.range(x$k, c(1, value[3]))))
+    stop("indices k are outside new range")
+  dimx <- dim(x)
+  x$dim <- value
+  if(!is.null(dimnames(x))) {
+    dn <- dimnames(x)
+    for(n in 1:3) {
+      if(value[n] < dimx[n]) dn[[n]] <- dn[[n]][1:value[n]] else
+      if(value[n] > dimx[n]) dn[n] <- list(NULL)
+    }
+    dimnames(x) <- dn
+  }
+  return(x)
+}
+
+dimnames.sparse3Darray <- function(x) { x$dimnames }
+
+"dimnames<-.sparse3Darray" <- function(x, value) {
+  if(!is.list(value)) value <- list(value)
+  if(length(value) == 1) value <- rep(value, 3)
+  x$dimnames <- value
+  return(x)
+}
+
+print.sparse3Darray <- function(x, ...) {
+  dimx <- dim(x)
+  cat("Sparse 3D array of dimensions", paste(dimx, collapse="x"), fill=TRUE)
+  dn <- dimnames(x) %orifnull% rep(list(NULL), 3)
+  d3 <- dimx[3]
+  dn3 <- dn[[3]] %orifnull% as.character(seq_len(d3))
+  df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x)
+  pieces <- split(df, factor(df$k, levels=1:d3))
+  dim2 <- dimx[1:2]
+  dn2 <- dn[1:2]
+  if(typeof(x$x) == "complex") {
+    splat("\t[Complex-valued sparse matrices are not printable]")
+  } else {
+    for(k in seq_along(pieces)) {
+      cat(paste0("\n\t[ , , ", dn3[k], "]\n\n"))
+      Mi <- with(pieces[[k]],
+                 sparseMatrix(i=i, j=j, x=x, dims=dim2, dimnames=dn2))
+      stuff <- capture.output(eval(Mi))
+      #' Remove 'sparse Matrix' header blurb
+      stuff <- stuff[-1]
+      if(is.blank(stuff[1]))
+        stuff <- stuff[-1]
+      cat(stuff, sep="\n")
+    }
+  }
+  return(invisible(NULL))
+}
+
+aperm.sparse3Darray <- function(a, perm=NULL, resize=TRUE, ...) {
+  if(is.null(perm)) return(a)
+  stopifnot(length(perm) == 3)
+  a <- unclass(a)
+  a[c("i", "j", "k")] <- a[c("i", "j", "k")][perm]
+  if(resize) {
+    a$dim <- a$dim[perm]
+    if(length(a$dimnames)==3) a$dimnames <- a$dimnames[perm]
+  }
+  class(a) <- c("sparse3Darray", class(a))
+  return(a)
+}
+
+as.array.sparse3Darray <- function(x, ...) {
+  zerovalue <- vector(mode=typeof(x$x), length=1L)
+  z <- array(zerovalue, dim=dim(x), dimnames=dimnames(x))
+  z[cbind(x$i,x$j,x$k)] <- x$x
+  return(z)
+}
+
+"[.sparse3Darray" <- local({
+
+  Extract <- function(x, i,j,k, drop=TRUE, ...) {
+    dimx <- dim(x)
+    dn <- dimnames(x) %orifnull% rep(list(NULL), 3)
+    if(!missing(i) && length(dim(i)) == 2) {
+      ## matrix index
+      i <- as.matrix(i)
+      if(!(missing(j) && missing(k)))
+        stop("If i is a matrix, j and k should not be given", call.=FALSE)
+      if(ncol(i) != 3)
+        stop("If i is a matrix, it should have 3 columns", call.=FALSE)
+      ## start with vector of 'zero' answers of the correct type
+      answer <- sparseVector(x=RelevantZero(x$x)[integer(0)],
+                             i=integer(0),
+                             length=nrow(i))
+      ## values outside array return NA
+      if(any(bad <- !inside3Darray(dim(x), i)))
+        answer[bad] <- NA
+      ## if entire array is zero, there is nothing to match
+      if(length(x$x) == 0)
+        return(answer)
+      ## match desired indices to sparse entries
+      varies <- (dimx > 1)
+      nvary <- sum(varies)
+      varying <- which(varies)
+      if(nvary == 3) {
+        ## ---- older code -----
+        ## convert triples of integers to character codes
+        #### icode <- apply(i, 1, paste, collapse=",") << is too slow >>
+        ## icode <- paste(i[,1], i[,2], i[,3], sep=",")
+        ## dcode <- paste(x$i, x$j, x$k, sep=",")
+	## ------------------
+	m <- matchIntegerDataFrames(i, cbind(x$i, x$j, x$k))
+      } else if(nvary == 2) {
+        ## effectively a sparse matrix
+        ## ---- older code -----
+        ## icode <- paste(i[,varying[1]], i[,varying[2]], sep=",")
+        ## ijk <- cbind(x$i, x$j, x$k)
+        ## dcode <- paste(ijk[,varying[1]], ijk[,varying[2]], sep=",")
+	## ------------------
+	ijk <- cbind(x$i, x$j, x$k)
+	m <- matchIntegerDataFrames(i[,varying,drop=FALSE],
+	                            ijk[,varying,drop=FALSE])
+      } else if(nvary == 1) {
+        ## effectively a sparse vector
+        ## ---- older code -----
+        ## icode <- i[,varying]
+        ## dcode <- switch(varying, x$i, x$j, x$k)
+	## ------------------
+	m <- match(i[,varying], switch(varying, x$i, x$j, x$k))
+      } else {
+        ## effectively a single value
+        ## ---- older code -----
+        ## icode <- rep(1, nrow(i))
+        ## dcode <- 1  # since we know length(x$x) > 0
+	m <- 1
+      }
+      ## insert any found elements
+      found <- !is.na(m)
+      answer[found] <- x$x[m[found]]
+      return(answer)
+    }
+    if(!(missing(i) && missing(j) && missing(k))) {
+      I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]])
+      J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]])
+      K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]])
+      IJK <- list(I,J,K)
+      if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) {
+        ## indices exceed array bounds; result is a full array containing NA's
+        result <- as.array(x)[I$full$i, J$full$j, K$full$k, drop=drop]
+        return(result)
+      }
+      IJK <- lapply(IJK, getElement, name="strict")
+      I <- IJK[[1]]
+      J <- IJK[[2]]
+      K <- IJK[[3]]
+      #' number of values to be returned along each margin
+      newdims <- sapply(IJK, getElement, name="n")
+      #' dimnames of return array
+      newdn <- lapply(IJK, getElement, name="s")
+      #' find all required data (not necessarily in required order)
+      inI <- I$lo
+      inJ <- J$lo
+      inK <- K$lo
+      df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x)
+      use <- with(df, inI[i] & inJ[j] & inK[k])
+      df <- df[use, ,drop=FALSE]
+      #' contract sub-array to (1:n) * (1:m) * (1:l)
+      df <- transform(df,
+                      i = cumsum(inI)[i],
+                      j = cumsum(inJ)[j],
+                      k = cumsum(inK)[k])
+      Imap <- I$map
+      Jmap <- J$map
+      Kmap <- K$map
+      if(nrow(df) == 0 || (is.null(Imap) && is.null(Jmap) && is.null(Kmap))) {
+        ## return values are already in correct position
+        outdf <- df
+      } else {
+        #' invert map to determine output positions (reorder/repeat entries)
+        imap <- Imap %orifnull% df$i
+        jmap <- Jmap %orifnull% df$j
+        kmap <- Kmap %orifnull% df$k
+        sn <- seq_len(nrow(df))
+        whichi <- split(seq_along(imap), factor(imap, levels=sn))
+        whichj <- split(seq_along(jmap), factor(jmap, levels=sn))
+        whichk <- split(seq_along(kmap), factor(kmap, levels=sn))
+        dat.i <- whichi[df$i]
+        dat.j <- whichj[df$j]
+        dat.k <- whichk[df$k]
+        stuff <- mapply(expandwithdata,
+                        i=dat.i, j=dat.j, k=dat.k, x=df$x)
+        outdf <- rbindCompatibleDataFrames(stuff)
+      }
+      x <- sparse3Darray(i=outdf$i, j=outdf$j, k=outdf$k, x=outdf$x,
+                         dims=newdims, dimnames=newdn)
+      dimx <- newdims
+      dn <- newdn
+    }
+    if(drop) {
+      retain <- (dimx > 1)
+      nretain <- sum(retain)
+      if(nretain == 2) {
+        #' result is a matrix
+        retained <- which(retain)
+        newi <- getElement(x, name=c("i","j","k")[ retained[1] ])
+        newj <- getElement(x, name=c("i","j","k")[ retained[2] ])
+        newdim <- dimx[retain]
+        newdn <- dn[retain]
+        return(sparseMatrix(i=newi, j=newj, x=x$x, dims=newdim, dimnames=newdn))
+      } else if(nretain == 1) {
+        #' sparse vector
+        retained <- which(retain)
+        newi <- getElement(x, name=c("i","j","k")[retained])
+        #' ensure 'strict' 
+        ord <- order(newi)
+        newi <- newi[ord]
+        newx <- x$x[ord]
+        if(any(dup <- c(FALSE, diff(newi) == 0))) {
+          retain <- !dup
+          ii <- cumsum(retain)
+          newi <- newi[retain]
+          newx <- as(tapply(newx, ii, sum), typeof(newx))
+        }
+        x <- sparseVector(x=newx, i=newi, length=dimx[retained])
+      } else if(nretain == 0) {
+        #' single value
+        x <- as.vector(as.array(x))
+      }
+    }
+    return(x)
+  }
+
+  expandwithdata <- function(i, j, k, x) {
+    z <- expand.grid(i=i, j=j, k=k)
+    if(nrow(z) > 0)
+      z$x <- x
+    return(z)
+  }
+
+  Extract
+})
+
+
+rbindCompatibleDataFrames <- function(x) {
+  #' faster version of Reduce(rbind, x) when entries are known to be compatible
+  nama2 <- colnames(x[[1]])
+  y <- vector(mode="list", length=length(nama2))
+  names(y) <- nama2
+  for(nam in nama2)
+    y[[nam]] <- unlist(lapply(x, getElement, name=nam))
+  return(as.data.frame(y))
+}
+
+
+"[<-.sparse3Darray" <- function(x, i, j, k, ..., value) {
+  dimx <- dim(x)
+  dn <- dimnames(x) %orifnull% rep(list(NULL), 3)
+  #' interpret indices
+  if(!missing(i) && length(dim(i)) == 2) {
+    ## matrix index
+    ijk <- as.matrix(i)
+    if(!(missing(j) && missing(k)))
+      stop("If i is a matrix, j and k should not be given", call.=FALSE)
+    if(ncol(ijk) != 3)
+      stop("If i is a matrix, it should have 3 columns", call.=FALSE)
+    if(!all(inside3Darray(dimx, i)))
+      stop("Some indices lie outside array limits", call.=FALSE)
+    if(nrow(ijk) == 0)
+      return(x) # no items to replace
+    ## assemble data frame
+    xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x)
+    ## match xdata into ijk (not necessarily the first match in original order)
+    m <- matchIntegerDataFrames(xdata[,1:3,drop=FALSE], ijk)
+    ## ------- OLDER VERSION: --------
+    ## convert triples of integers to character codes
+    ## icode <- apply(ijk, 1, paste, collapse=",") << is too slow >>
+    ## icode <- paste(ijk[,1], ijk[,2], ijk[,3], sep=",")
+    ## xcode <- paste(x$i, x$j, x$k, sep=",")
+    ##  m <- match(xcode, icode)
+    ## -------------------------------
+    ## remove any matches, retaining only data that do not match 'i'
+    xdata <- xdata[is.na(m), , drop=FALSE]   # sic
+    ## ensure replacement value is vector-like
+    value <- as.vector(value)
+    nv <- length(value)
+    if(nv != nrow(i) && nv != 1)
+      stop(paste("Number of items to replace", paren(nrow(i)),
+                 "does not match number of items given", paren(nv)),
+           call.=FALSE)
+    vdata <- data.frame(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=value)
+    ## combine
+    ydata <- rbind(xdata, vdata)
+    y <- with(ydata, sparse3Darray(i=i,j=j,k=k,x=x,
+                                   dims=dimx, dimnames=dn, strict=TRUE))
+    return(y)
+  }
+  I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]])
+  J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]])
+  K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]])
+  IJK <- list(I,J,K)
+  if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) {
+    warning("indices exceed array bounds; using full array", call.=FALSE)
+    x <- as.array(x)
+    x[I$full$i, J$full$j, K$full$k] <- value
+    x <- as.sparse3Darray(x)
+    return(x)
+  }
+  IJK <- lapply(IJK, getElement, name="strict")
+  if(all(sapply(IJK, getElement, name="nind") == 0)) {
+    # no elements are indexed
+    return(x)
+  }
+  I <- IJK[[1]]
+  J <- IJK[[2]]
+  K <- IJK[[3]]
+  #' extract current array entries
+  xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x)
+  #' identify data volume that will be overwritten
+  inI <- I$lo
+  inJ <- J$lo
+  inK <- K$lo
+  #' remove data that will be overwritten
+  retain <- !with(xdata, inI[i] & inJ[j] & inK[k])
+  xdata <- xdata[retain,,drop=FALSE]
+  #' expected dimensions of 'value' implied by indices
+  dimVshould <- sapply(IJK, getElement, name="nind")
+  dimV <- dim(value)
+  if(length(dimV) == 3) {
+    #' both source and destination are 3D
+    if(all(dimVshould == dimV)) {
+      #' replace 3D block by 3D block of same dimensions
+      value <- as.sparse3Darray(value)
+      vdata <- data.frame(i=value$i, j=value$j, k=value$k, x=value$x)
+      # determine positions of replacement data in original array
+      vdata <- transform(vdata,
+                         i=replacementIndex(i, I),
+                         j=replacementIndex(j, J),
+                         k=replacementIndex(k, K))
+    } else
+      stop(paste("Replacement value has wrong dimensions:",
+                 paste(dimV, collapse="x"),
+                 "instead of",
+                 paste(dimVshould, collapse="x")),
+           call.=FALSE)
+  } else if(is.null(dimV)) {
+    #' replacement value is a vector or sparseVector
+    value <- as(value, "sparseVector")
+    iv <- value at i
+    xv <- value at x
+    nv <- value at length
+    collapsing <- (dimVshould == 1)
+    realdim <- sum(!collapsing)
+    if(nv == 1) {
+      #' replacement value is a constant
+      value <- as.vector(value[1])
+      if(identical(value, RelevantZero(x$x))) {
+        #' assignment causes relevant entries to be set to zero;
+        #' these entries have already been deleted from 'xdata';
+        #' nothing to add
+        vdata <- data.frame(i=integer(0), j=integer(0), k=integer(0),
+                            x=x$x[integer(0)])
+      } else {
+        #' replicate the constant
+        vdata <- expand.grid(i=I$i, j=J$i, k=K$i, x=as.vector(value[1]))
+      }
+    } else if(realdim == 0) {
+        stop(paste("Replacement value has too many entries:",
+                   nv, "instead of 1"),
+             call.=FALSE)
+    } else if(realdim == 1) {
+      theindex <- which(!collapsing)
+      # target slice is one-dimensional
+      if(nv != dimVshould[theindex]) 
+        stop(paste("Replacement value has wrong number of entries:",
+                   nv, "instead of", dimVshould[theindex]),
+             call.=FALSE)
+      newpos <- replacementIndex(iv, IJK[[theindex]])
+      vdata <- switch(theindex,
+                      data.frame(i=newpos, j=J$i,    k=K$i,     x=xv),
+                      data.frame(i=I$i,    j=newpos, k=K$i,     x=xv),
+                      data.frame(i=I$i,    j=J$i,    k=newpos,  x=xv))
+    } else {
+      # target slice is two-dimensional
+      sdim <- dimVshould[!collapsing]
+      sd1 <- sdim[1]
+      sd2 <- sdim[2]
+      if(nv != sd1)
+        stop(paste("Length of replacement vector", paren(nv),
+                   "does not match dimensions of array subset",
+                   paren(paste(dimVshould, collapse="x"))),
+             call.=FALSE)
+      firstindex <- which(!collapsing)[1]
+      secondindex <- which(!collapsing)[2]
+      pos1 <- replacementIndex(iv, IJK[[firstindex]])
+      pos2 <- replacementIndex(seq_len(sd2), IJK[[secondindex]])
+      xv   <- rep(xv, sd2)
+      pos1 <- rep(pos1, sd2)
+      pos2 <- rep(pos2, each=length(pos1))
+      pos3 <- if(length(pos1)) IJK[[which(collapsing)]]$i else integer(0)
+      vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv)
+      vdata[,firstindex] <- pos1
+      vdata[,secondindex] <- pos2
+    }
+  } else if(identical(dimVshould[dimVshould > 1],  dimV[dimV > 1])) {
+    #' lower dimensional sets of the same dimension
+    value <- value[drop=TRUE]
+    dimV <- dim(value)
+    dropping <- (dimVshould == 1)
+    if(length(dimV) == 2) {
+      value <- as(value, "TsparseMatrix")
+      iv <- value at i + 1L
+      jv <- value at j + 1L
+      xv <- value at x
+      firstindex <- which(!dropping)[1]
+      secondindex <- which(!dropping)[2]
+      pos1 <- replacementIndex(iv, IJK[[firstindex]])
+      pos2 <- replacementIndex(jv, IJK[[secondindex]])
+      pos3 <- if(length(pos1)) IJK[[which(dropping)]]$i else integer(0)
+      vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv)
+      vdata[,firstindex] <- pos1
+      vdata[,secondindex] <- pos2
+    } else {
+      value <- as(value, "sparseVector")
+      iv <- value at i
+      xv <- value at x
+      vdata <- data.frame(i=if(dropping[1]) I$i else replacementIndex(iv, I),
+                          j=if(dropping[2]) J$i else replacementIndex(iv, J),
+                          k=if(dropping[3]) K$i else replacementIndex(iv, K),
+                          x=xv)
+    }
+  } else
+    stop(paste("Replacement value has wrong dimensions:",
+               paste(dimV, collapse="x"),
+               "instead of",
+               paste(dimVshould, collapse="x")),
+         call.=FALSE)
+    
+  ## combine
+  if(nrow(vdata) > 0)
+    xdata <- rbind(xdata, vdata)
+  y <- with(xdata, sparse3Darray(i=i,j=j,k=k,x=x,
+                                 dims=dimx, dimnames=dn, strict=TRUE))
+  return(y)
+}
+
+bind.sparse3Darray <- function(A,B,along) {
+  A <- as.sparse3Darray(A)
+  B <- as.sparse3Darray(B)
+  check.1.integer(along)
+  stopifnot(along %in% 1:3)
+  dimA <- dim(A)
+  dimB <- dim(B)
+  if(!all(dimA[-along] == dimB[-along]))
+    stop("dimensions of A and B do not match")
+  dimC <- dimA
+  dimC[along] <- dimA[along] + dimB[along]
+  # extract data
+  Adf <- SparseEntries(A)
+  Bdf <- SparseEntries(B)
+  # realign 'B' coordinate
+  Bdf[,along] <- Bdf[,along] + dimA[along]
+  # combine
+  C <- EntriesToSparse(rbind(Adf, Bdf), dimC)
+  # add dimnames
+  dnA <- dimnames(A)
+  dnB <- dimnames(B)
+  if(!is.null(dnA) || !is.null(dnB)) {
+    if(length(dnA) != 3) dnA <- rep(list(NULL), 3)
+    if(length(dnB) != 3) dnB <- rep(list(NULL), 3)
+    dnC <- dnA
+    dnC[[along]] <- c(dnA[[along]] %orifnull% rep("", dimA[along]),
+                      dnB[[along]] %orifnull% rep("", dimB[along]))
+    dimnames(C) <- dnC
+  }
+  return(C)
+}
+
+
+anyNA.sparse3Darray <- function(x, recursive=FALSE) {
+  anyNA(x$x)
+}
+
+RelevantZero <- function(x) vector(mode=typeof(x), length=1L)
+isRelevantZero <- function(x) identical(x, RelevantZero(x))
+RelevantEmpty <- function(x) vector(mode=typeof(x), length=0L)
+
+unionOfSparseIndices <- function(A, B) {
+  #' A, B are data frames of indices i, j, k
+  ijk <- unique(rbind(A, B))
+  colnames(ijk) <- c("i", "j", "k")
+  return(ijk)
+}
+  
+Ops.sparse3Darray <- function(e1,e2=NULL){
+  if(nargs() == 1L) {
+    switch(.Generic,
+           "!" = {
+             result <- do.call(.Generic, list(as.array(e1)))
+           },
+           "-" = ,
+           "+" = {
+             result <- e1
+             result$x <- do.call(.Generic, list(e1$x))
+           },
+           stop(paste("Unary", sQuote(.Generic),
+                      "is undefined for sparse 3D arrays."), call.=FALSE))
+    return(result)
+  }
+  # binary operation
+  # Decide whether full or sparse
+  elist <- list(e1, e2)
+  isfull <- sapply(elist, inherits, what=c("matrix", "array"))
+  if(any(isfull) &&
+     any(sapply(lapply(elist[isfull], dim), prod) > 1)) {
+    # full array
+    n1 <- length(dim(e1))
+    n2 <- length(dim(e2))
+    e1 <- if(n1 == 3) as.array(e1) else
+          if(n1 == 2) as.matrix(e1) else as.vector(as.matrix(as.array(e1)))
+    e2 <- if(n2 == 3) as.array(e2) else
+          if(n2 == 2) as.matrix(e2) else as.vector(as.matrix(as.array(e2)))
+    result <- do.call(.Generic, list(e1, e2))
+    return(result)
+  }
+  # sparse result (usually)
+  e1 <- as.sparse3Darray(e1)
+  e2 <- as.sparse3Darray(e2)
+  dim1 <- dim(e1)
+  dim2 <- dim(e2)
+  mode1 <- typeof(e1$x)
+  mode2 <- typeof(e2$x)
+  zero1 <- vector(mode=mode1, length=1L)
+  zero2 <- vector(mode=mode2, length=1L)
+  
+  if(prod(dim1) == 1) {
+    ## e1 is constant
+    e1 <- as.vector(as.array(e1))
+    z12 <- do.call(.Generic, list(e1, zero2))
+    if(!isRelevantZero(z12)) {
+      # full matrix/array will be generated
+      result <- do.call(.Generic, list(e1, as.array(e2)[drop=TRUE]))
+    } else {
+      # sparse 
+      result <- e2
+      result$x <- do.call(.Generic, list(e1, e2$x))
+    }
+    return(result)
+  }
+
+  if(prod(dim2) == 1) {
+    ## e2 is constant
+    e2 <- as.vector(as.array(e2))
+    z12 <- do.call(.Generic, list(zero1, e2))
+    if(!isRelevantZero(z12)) {
+      # full matrix/array will be generated
+      result <- do.call(.Generic, list(as.array(e1)[drop=TRUE], e2))
+    } else {
+      # sparse 
+      result <- e1
+      result$x <- do.call(.Generic, list(e1$x, e2))
+    }
+    return(result)
+  }
+  
+  z12 <- do.call(.Generic, list(zero1, zero2))
+  if(!isRelevantZero(z12)) {
+    #' Result is an array
+    e1 <- as.array(e1)
+    e2 <- as.array(e2)
+    result <- do.call(.Generic, list(e1, e2))
+    return(result)
+  }
+
+  # Result is sparse
+  if(identical(dim1, dim2)) {
+    #' extents are identical
+    ijk1 <- SparseIndices(e1)
+    ijk2 <- SparseIndices(e2)
+    if(identical(ijk1, ijk2)) {
+      #' patterns of nonzero entries are identical
+      ijk <- ijk1
+      values <- do.call(.Generic, list(e1$x, e2$x))
+    } else {			   
+      #' different patterns of nonzero entries
+      ijk <- unionOfSparseIndices(ijk1, ijk2)
+      values <- as.vector(do.call(.Generic, list(e1[ijk], e2[ijk])))
+    }			      
+    dn <- dimnames(e1) %orifnull% dimnames(e2)
+    result <- sparse3Darray(i=ijk$i, j=ijk$j, k=ijk$k, x=values,
+                              dims=dim1, dimnames=dn, strict=TRUE)
+    return(result)
+  }
+
+  drop1 <- (dim1 == 1)
+  drop2 <- (dim2 == 1)
+  if(!any(drop1 & !drop2) && identical(dim1[!drop2], dim2[!drop2])) {
+    #' dim2 is a slice of dim1
+    ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k)
+    ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k)
+    expanding <- which(drop2 & !drop1)
+    if(length(expanding) == 1) {
+      n <- dim1[expanding]
+      m <- nrow(ijk2)
+      ijk2 <- as.data.frame(lapply(ijk2, rep, times=n))
+      ijk2[,expanding] <- rep(seq_len(n), each=m)
+      ijk <- unionOfSparseIndices(ijk1, ijk2)
+      ijkdrop <- ijk
+      if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1
+      xout <- do.call(.Generic, list(e1[ijk], e2[ijkdrop]))
+      result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L],
+                              x=as.vector(xout),
+                              dims=dim1, dimnames=dimnames(e1), strict=TRUE)
+      return(result)
+    }
+  }
+
+  if(!any(drop2 & !drop1) && identical(dim2[!drop1], dim1[!drop1])) {
+    #' dim1 is a slice of dim2
+    ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k)
+    ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k)
+    expanding <- which(drop1 & !drop2)
+    if(length(expanding) == 1) {
+      n <- dim2[expanding]
+      m <- nrow(ijk1)
+      ijk1 <- as.data.frame(lapply(ijk1, rep, times=n))
+      ijk1[,expanding] <- rep(seq_len(n), each=m)
+      ijk <- unionOfSparseIndices(ijk1, ijk2)
+      ijkdrop <- ijk
+      if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1L
+      xout <- do.call(.Generic, list(e1[ijkdrop], e2[ijk]))
+      result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L],
+                              x=as.vector(xout),
+                              dims=dim2, dimnames=dimnames(e2), strict=TRUE)
+      return(result)
+    }
+  }
+
+  if(all(drop1[-1]) && dim1[1L] == dim2[1L]) {
+    #' e1 is a (sparse) vector matching the first extent of e2
+    if(.Generic %in% c("*", "&")) {
+      # result is sparse
+      ijk <- data.frame(i=e2$i, j=e2$j, k=e2$k)
+      ones <- rep(1L, nrow(ijk))
+      i11 <- data.frame(i=e2$i, j=ones, k=ones)
+      xout <- do.call(.Generic, list(e1[i11], e2[ijk]))
+      result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L],
+                              x=as.vector(xout),
+                              dims=dim2, dimnames=dimnames(e2), strict=TRUE)
+    } else {
+      # result is full array
+      e1 <- as.array(e1)[,,,drop=TRUE]
+      e2 <- as.array(e2)
+      result <- do.call(.Generic, list(e1, e2))
+    }
+    return(result)
+  }
+  
+  stop(paste("Non-conformable arrays:",
+             paste(dim1, collapse="x"), "and", paste(dim2, collapse="x")),
+       call.=FALSE)
+}
+
+Math.sparse3Darray <- function(x, ...){
+  z <- RelevantZero(x$x)
+  fz <- do.call(.Generic, list(z))
+  if(!isRelevantZero(fz)) {
+    # result is a full array
+    result <- do.call(.Generic, list(as.array(x), ...))
+    return(result)
+  }
+  x$x <- do.call(.Generic, list(x$x))
+  return(x)
+}
+
+Summary.sparse3Darray <- function(..., na.rm=FALSE) {
+  argh <- list(...)
+  is3D <- sapply(argh, inherits, what="sparse3Darray")
+  if(any(is3D)) {
+    xvalues <- lapply(argh[is3D], getElement, name="x")
+    argh[is3D] <- lapply(xvalues, .Generic, na.rm=na.rm)
+    zeroes <- lapply(xvalues, RelevantZero)
+    fzeroes <- lapply(zeroes, .Generic, na.rm=na.rm)
+    argh <- append(argh, fzeroes)
+  }
+  rslt <- do.call(.Generic, append(argh, list(na.rm=na.rm)))
+  return(rslt)
+}
+
+
+SparseIndices <- function(x) {
+  #' extract indices of entries of sparse vector/matrix/array
+  nd <- length(dim(x))
+  if(nd > 3)
+    stop("Arrays of more than 3 dimensions are not supported", call.=FALSE)
+  if(nd == 0 || nd == 1) {
+    x <- as(x, "sparseVector")
+    df <- data.frame(i=x at i)
+  } else if(nd == 2) {
+    x <- as(x, "TsparseMatrix")
+    df <- data.frame(i=x at i + 1L, j=x at j + 1L)
+  } else if(nd == 3) {
+    x <- as.sparse3Darray(x)
+    df <- data.frame(i=x$i, j=x$j, k=x$k)
+  }
+  return(df)
+}
+
+SparseEntries <- function(x) {
+  #' extract entries of sparse vector/matrix/array
+  nd <- length(dim(x))
+  if(nd > 3)
+    stop("Arrays of more than 3 dimensions are not supported", call.=FALSE)
+  if(nd == 0 || nd == 1) {
+    x <- as(x, "sparseVector")
+    df <- data.frame(i=x at i, x=x at x)
+  } else if(nd == 2) {
+    x <- as(x, "TsparseMatrix")
+    df <- data.frame(i=x at i + 1L, j=x at j + 1L, x=x at x)
+  } else if(nd == 3) {
+    x <- as.sparse3Darray(x)
+    df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x)
+  }
+  return(df)
+}
+
+EntriesToSparse <- function(df, dims) {
+  #' convert data frame of indices and values
+  #' to sparse vector/matrix/array
+  nd <- length(dims)
+  if(nd == 0)
+    return(with(df, as(sum(x), typeof(x))))
+  sn <- seq_len(nd)
+  colnames(df)[sn] <- c("i","j","k")[sn]
+  if(nd == 1) {
+    #' sparse vector: duplicate entries not allowed
+    df <- df[with(df, order(i)), , drop=FALSE]
+    dup <- c(FALSE, with(df, diff(i) == 0))
+    if(any(dup)) {
+      #' accumulate values at the same array location
+      first <- !dup
+      newi <- cumsum(first)
+      newx <- as(tapply(df$x, newi, sum), typeof(df$x))
+      df <- data.frame(i=newi[first], x=newx)
+    }
+    result <- with(df, sparseVector(i=i, x=x, length=dims))
+  } else if(nd == 2) {
+    result <- with(df, sparseMatrix(i=i, j=j, x=x, dims=dims))
+  } else if(nd == 3) {
+    result <- with(df, sparse3Darray(i=i, j=j, k=k, x=x, dims=dims))
+  }
+  return(result)
+}
+
+evalSparse3Dentrywise <- function(expr, envir) {
+  ## DANGER: this assumes all sparse arrays in the expression
+  ##         have the same pattern of nonzero elements!
+  e <- as.expression(substitute(expr))
+  ## get names of all variables in the expression
+  varnames <- all.vars(e)
+  allnames <- all.names(e, unique=TRUE)
+  funnames <- allnames[!(allnames %in% varnames)]
+  if(length(varnames) == 0)
+    stop("No variables in this expression")
+  ## get the values of the variables
+  if(missing(envir)) {
+    envir <- parent.frame() # WAS: sys.parent()
+  } else if(is.list(envir)) {
+    envir <- list2env(envir, parent=parent.frame())
+  }
+  vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+  funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL))
+  ## find out which variables are sparse3Darray
+  isSpud <- sapply(vars, inherits, what="sparse3Darray")
+  if(!any(isSpud))
+    stop("No sparse 3D arrays in this expression")
+  spuds <- vars[isSpud]
+  template <- spuds[[1L]]
+  ## replace each array by its entries, and evaluate
+  spudvalues <- lapply(spuds, getElement, name="x")
+  ## minimal safety check
+  if(length(unique(lengths(spudvalues))) > 1)
+    stop("Different numbers of sparse entries", call.=FALSE)
+  vars[isSpud] <- spudvalues
+  v <- eval(e, append(vars, funs))
+  ## reshape as 3D array
+  result <- sparse3Darray(x=v,
+  	                  i=template$i,
+  	                  j=template$j,
+  	                  k=template$k,
+			  dims=dim(template),
+			  dimnames=dimnames(template))
+  return(result)
+}
diff --git a/R/sparsecommon.R b/R/sparsecommon.R
new file mode 100644
index 0000000..c2b9009
--- /dev/null
+++ b/R/sparsecommon.R
@@ -0,0 +1,232 @@
+#'
+#'    sparsecommon.R
+#'
+#'  Utilities for sparse arrays
+#'
+#'  $Revision: 1.6 $  $Date: 2017/06/05 10:31:58 $
+#'
+
+#'  .............. completely generic ....................
+
+
+inside3Darray <- function(d, i, j, k) {
+  stopifnot(length(d) == 3)
+  if(length(dim(i)) == 2 && missing(j) && missing(k)) {
+    stopifnot(ncol(i) == 3)
+    j <- i[,2]
+    k <- i[,3]
+    i <- i[,1]
+  }
+  ans <- inside.range(i, c(1, d[1])) &
+         inside.range(j, c(1, d[2])) &
+         inside.range(k, c(1, d[3]))
+  return(ans)
+}
+
+#'  .............. depends on Matrix package ................
+
+sparseVectorCumul <- function(x, i, length) {
+  #' extension of 'sparseVector' to allow repeated indices 
+  #'   (the corresponding entries are added)
+  z <- tapply(x, list(factor(i, levels=1:length)), sum)
+  z <- z[!is.na(z)]
+  sparseVector(i=as.integer(names(z)), x=as.numeric(z), length=length)
+}
+
+#'  .............. code that mentions sparse3Darray ................
+
+expandSparse <- function(x, n, across) {
+  #' x is a sparse vector/matrix; replicate it 'n' times
+  #' and form a sparse matrix/array
+  #' in which each slice along the 'across' dimension is identical to 'x'
+  #' Default is across = length(dim(x)) + 1
+  check.1.integer(n)
+  stopifnot(n >= 1)
+  dimx <- dim(x)
+  if(is.null(dimx)) {
+    if(inherits(x, "sparseVector")) dimx <- x at length else
+    if(is.vector(x)) dimx <- length(x) else
+    stop("Format of x is not understood", call.=FALSE)
+  }
+  nd <- length(dimx)
+  if(missing(across)) across <- nd + 1L else {
+    check.1.integer(across)
+    if(!(across %in% (1:(nd+1L))))
+      stop(paste("Argument 'across' must be an integer from 1 to", nd+1L),
+           call.=FALSE)
+  }
+  if(nd == 1) {
+    if(inherits(x, "sparseVector")) {
+      m <- length(x at x)
+      y <- switch(across,
+                  sparseMatrix(i=rep(1:n, times=m),
+		               j=rep(x at i, each=n),
+			       x=rep(x at x, each=n),
+			       dims=c(n, dimx)),
+                  sparseMatrix(i=rep(x at i, each=n),
+		  	       j=rep(1:n, times=m),
+			       x=rep(x at x, each=n),
+			       dims=c(dimx, n)))
+    } else {
+      y <- switch(across,
+                  outer(1:n, x, function(a,b) b),
+                  outer(x, 1:n, function(a,b) a))
+    }
+  } else if(nd == 2) {
+    if(inherits(x, "sparseMatrix")) {
+      z <- as(x, "TsparseMatrix")
+      m <- length(z at x)
+      y <- switch(across,
+                  sparse3Darray(i=rep(1:n, times=m),
+		                j=rep(z at i + 1L, each=n),
+				k=rep(z at j + 1L, each=n),
+				x=rep(z at x, each=n),
+				dims=c(n, dimx)),
+                  sparse3Darray(i=rep(z at i + 1L, each=n),
+		                j=rep(1:n, times=m),
+				k=rep(z at j + 1L, each=n),
+				x=rep(z at x, each=n),
+				dims=c(dimx[1], n, dimx[2])),
+                  sparse3Darray(i=rep(z at i + 1L, each=n),
+		                j=rep(z at j + 1L, each=n),
+				k=rep(1:n, times=m),
+				x=rep(z at x, each=n),
+				dims=c(dimx, n)))
+    } else stop("Not yet implemented for full arrays")
+  } else 
+     stop("Not implemented for arrays of more than 2 dimensions", call.=FALSE)
+  return(y)
+}
+
+mapSparseEntries <- function(x, margin, values, conform=TRUE, across) {
+  # replace the NONZERO entries of sparse matrix or array
+  # by values[l] where l is one of the slice indices
+  dimx <- dim(x)
+  if(is.null(dimx)) {
+    if(inherits(x, "sparseVector")) dimx <- x at length else
+    if(is.vector(x)) dimx <- length(x) else
+    stop("Format of x is not understood", call.=FALSE)
+  }
+  if(inherits(x, "sparseMatrix")) {
+    x <- as(x, Class="TsparseMatrix")
+    if(length(x$i) == 0) {
+      # no entries
+      return(x)
+    }
+    check.1.integer(margin)
+    stopifnot(margin %in% 1:2)
+    check.nvector(values, dimx[margin], things=c("rows","columns")[margin],
+                  oneok=TRUE)
+    if(length(values) == 1) values <- rep(values, dimx[margin])
+    i <- x at i + 1L
+    j <- x at j + 1L
+    yindex <- switch(margin, i, j)
+    y <- sparseMatrix(i=i, j=j, x=values[yindex],
+                      dims=dimx, dimnames=dimnames(x))
+    return(y)
+  }
+  if(inherits(x, "sparse3Darray")) {
+    if(length(x$i) == 0) {
+      # no entries
+      return(x)
+    }
+    ijk <- cbind(i=x$i, j=x$j, k=x$k)
+    if(conform) {
+      #' ensure common pattern of sparse values
+      #' in each slice on 'across' margin
+      nslice <- dimx[across]
+      #' pick one representative of each equivalence class
+      ## ---- old code ---------
+      ## dup <- duplicated(ijk[,-across,drop=FALSE])
+      ## ijk <- ijk[!dup, , drop=FALSE]
+      ## ---------------------
+      use <- representativeRows(ijk[,-across,drop=FALSE])
+      ijk <- ijk[use, , drop=FALSE]
+      ## 
+      npattern <- nrow(ijk)
+      #' repeat this pattern in each 'across' slice
+      ijk <- apply(ijk, 2, rep, times=nslice)
+      ijk[, across] <- rep(seq_len(nslice), each=npattern)
+    }
+    if(is.vector(values)) {
+      # vector of values matching margin extent
+      check.nvector(values, dimx[margin],
+                    things=c("rows","columns","planes")[margin],
+                    oneok=TRUE)
+      if(length(values) == 1) values <- rep(values, dimx[margin])
+      yindex <- ijk[,margin]
+      y <- sparse3Darray(i=ijk[,1],
+                         j=ijk[,2],
+                         k=ijk[,3],
+                         x=values[yindex],
+                         dims=dimx, dimnames=dimnames(x))
+      return(y)
+    } else if(is.matrix(values)) {
+      #' matrix of values.
+      force(across)
+      stopifnot(across != margin) 
+      #' rows of matrix must match 'margin'
+      if(nrow(values) != dimx[margin])
+        stop(paste("Number of rows of values", paren(nrow(values)),
+                   "does not match array size in margin", paren(dimx[margin])),
+             call.=FALSE)
+      #' columns of matrix must match 'across'
+      if(ncol(values) != dimx[across])
+        stop(paste("Number of columns of values", paren(ncol(values)),
+                   "does not match array size in 'across'",
+                   paren(dimx[across])),
+             call.=FALSE)
+      # map
+      yindex <- ijk[,margin]
+      zindex <- ijk[,across]
+      y <- sparse3Darray(i=ijk[,1], j=ijk[,2], k=ijk[,3],
+                         x=values[cbind(yindex,zindex)],
+                         dims=dimx, dimnames=dimnames(x))
+      return(y)
+    } else stop("Format of values not understood", call.=FALSE)
+  }
+  stop("Format of x not understood", call.=FALSE)
+}
+
+
+applySparseEntries <- local({
+
+  applySparseEntries <- function(x, f, ...) {
+    ## apply vectorised function 'f' only to the nonzero entries of 'x'
+    if(inherits(x, "sparseMatrix")) {
+      x <- applytoxslot(x, f, ...)
+    } else if(inherits(x, "sparse3Dmatrix")) {
+      x <- applytoxentry(x, f, ...)
+    } else {
+      x <- f(x, ...)
+    }
+    return(x)
+  }
+
+  applytoxslot <- function(x, f, ...) {
+    xx <- x at x
+    n <- length(xx)
+    xx <- f(xx, ...)
+    if(length(xx) != n)
+      stop(paste("Function f returned the wrong number of values:",
+                 length(xx), "instead of", n),
+           call.=FALSE)
+    x at x <- xx
+    return(x)
+  }
+  
+  applytoxentry <- function(x, f, ...) {
+    xx <- x$x
+    n <- length(xx)
+    xx <- f(xx, ...)
+    if(length(xx) != n)
+      stop(paste("Function f returned the wrong number of values:",
+                 length(xx), "instead of", n),
+           call.=FALSE)
+    x$x <- xx
+    return(x)
+  }
+  
+  applySparseEntries
+})
+
diff --git a/R/sparselinalg.R b/R/sparselinalg.R
new file mode 100644
index 0000000..c6bb7ab
--- /dev/null
+++ b/R/sparselinalg.R
@@ -0,0 +1,259 @@
+#'
+#'    sparselinalg.R
+#'
+#'   Counterpart of linalg.R for sparse matrices/arrays
+#'
+#' 
+#'   $Revision: 1.10 $  $Date: 2017/06/05 10:31:58 $
+
+marginSums <- function(X, MARGIN) {
+  #' equivalent to apply(X, MARGIN, sum)
+  if(length(MARGIN) == 0) return(sum(X))
+  if(is.array(X) || is.matrix(X))
+    return(apply(X, MARGIN, sum))
+  dimX <- dim(X)
+  if(length(MARGIN) == length(dimX)) 
+    return(aperm(X, MARGIN))
+  if(any(huh <- (MARGIN < 0 | MARGIN > length(dimX))))
+    stop(paste(commasep(sQuote(paste0("MARGIN=", MARGIN[huh]))),
+               ngettext(sum(huh), "is", "are"), "not defined"), call.=FALSE)
+  df <- SparseEntries(X)
+  # discard other indices
+  nonmargin <- setdiff(seq_along(dimX), MARGIN)
+  df <- df[ , -nonmargin, drop=FALSE]
+  # implicitly accumulate
+  result <- EntriesToSparse(df, dimX[MARGIN])
+  return(result)
+}
+
+tensor1x1 <- function(A, B) {
+  ## equivalent of tensor(A, B, 1, 1)
+  ## when A is a vector and B is a sparse array.
+  stopifnot(length(dim(B)) == 3)
+  A <- as.vector(as.matrix(A))
+  stopifnot(length(A) == dim(B)[1])
+  if(is.array(B)) {
+    result <- tensor::tensor(A,B,1,1)
+  } else if(inherits(B, "sparse3Darray")) {
+    result <- sparseMatrix(i=B$j,
+                           j=B$k,
+                           x=B$x * A[B$i], # values for same (i,j) are summed
+                           dims=dim(B)[-1],
+                           dimnames=dimnames(B)[2:3])
+  } else stop("Format of B not understood", call.=FALSE)
+  return(result)
+}
+
+tenseur <- local({
+
+  tenseur <- function(A, B, alongA=integer(0), alongB=integer(0)) {
+    #' full arrays?
+    if(isfull(A) && isfull(B))
+      return(tensor::tensor(A=A, B=B, alongA=alongA, alongB=alongB))
+    #' check dimensions
+    dimA <- dim(A) %orifnull% length(A)
+    dnA <- dimnames(A)
+    if(is.null(dnA))
+      dnA <- rep(list(NULL), length(dimA))
+    dimB <- dim(B) %orifnull% length(B)
+    dnB <- dimnames(B)
+    if(is.null(dnB))
+      dnB <- rep(list(NULL), length(dimB))
+    #' check 'along'
+    if (length(alongA) != length(alongB)) 
+      stop("\"along\" vectors must be same length")
+    mtch <- dimA[alongA] == dimB[alongB]
+    if (any(is.na(mtch)) || !all(mtch)) 
+      stop("Mismatch in \"along\" dimensions")
+    #' dimensions of result
+    retainA <- !(seq_along(dimA) %in% alongA)
+    retainB <- !(seq_along(dimB) %in% alongB)
+    dimC <- c(dimA[retainA], dimB[retainB])
+    nC <- length(dimC)
+    if(nC > 3)
+      stop("Sorry, sparse arrays of more than 3 dimensions are not supported",
+           call.=FALSE)
+    #' fast code for special cases
+    if(length(dimA) == 1 && length(alongA) == 1 && !isfull(B)) {
+      BB <- SparseEntries(B)
+      Bx  <- BB[,ncol(BB)]
+      ijk <- BB[,-ncol(BB),drop=FALSE]
+      kalong  <- ijk[,alongB]
+      ABalong <- as.numeric(Bx * A[kalong])
+      ndimB <- ncol(ijk)
+      switch(ndimB,
+          { 
+            result <- sum(ABalong)
+	  },
+	  {
+	    iout <- ijk[,-alongB]
+	    result <- sparseVectorCumul(i=iout,
+	                                x=ABalong, # values aggregated by i
+				        length=dimC)
+          },			
+	  {
+	    ijout <- ijk[,-alongB,drop=FALSE]
+	    result <- sparseMatrix(i=ijout[,1],
+            	                   j=ijout[,2],
+                    		   x=ABalong, # values aggregated by (i,j)
+                                   dims=dimC,
+                                   dimnames=dnB[-alongB])
+	    result <- drop0(result)			   
+	  })
+      return(result)			     
+    }
+    if(length(dimB) == 1 && length(alongB) == 1 && !isfull(A)) {
+      AA <- SparseEntries(A)
+      Ax <- AA[,ncol(AA)]
+      ijk <- AA[,-ncol(AA),drop=FALSE]
+      kalong  <- ijk[,alongA]
+      ABalong <- as.numeric(Ax * B[kalong])
+      nA <- ncol(ijk)
+      switch(nA,
+          { 
+            result <- sum(ABalong)
+	  },
+	  {
+            iout <- ijk[,-alongA]
+	    result <- sparseVectorCumul(i=iout,
+   	                                x=ABalong, # values aggregated by i
+				        length=dimC)
+          },			
+	  {
+            ijout <- ijk[,-alongA,drop=FALSE]
+	    result <- sparseMatrix(i=ijout[,1],
+            	                   j=ijout[,2],
+                    		   x=ABalong, # values aggregated by (i,j)
+                                   dims=dimC,
+                                   dimnames=dnA[-alongA])
+	    result <- drop0(result)			   
+	  })
+      return(result)			     
+    }
+    #' extract indices and values of nonzero entries
+    dfA <- SparseEntries(A)
+    dfB <- SparseEntries(B)
+    #' assemble all tuples which contribute 
+    if(length(alongA) == 0) {
+      #' outer product
+      dfC <- outersparse(dfA, dfB)
+    } else {
+      if(length(alongA) == 1) {
+        Acode <- dfA[,alongA]
+        Bcode <- dfB[,alongB]
+      } else {
+        Along <- unname(as.list(dfA[,alongA, drop=FALSE]))
+        Blong <- unname(as.list(dfB[,alongB, drop=FALSE]))
+        Acode <- do.call(paste, append(Along, list(sep=",")))
+        Bcode <- do.call(paste, append(Blong, list(sep=",")))
+      }
+      lev <- unique(c(Acode,Bcode))
+      Acode <- factor(Acode, levels=lev)
+      Bcode <- factor(Bcode, levels=lev)
+      splitA <- split(dfA, Acode)
+      splitB <- split(dfB, Bcode)
+      splitC <- mapply(outersparse, splitA, splitB, SIMPLIFY=FALSE)
+      dfC <- rbindCompatibleDataFrames(splitC)
+    }
+    #' form product of contributing entries
+    dfC$x <- with(dfC, A.x * B.x)
+    #' retain only appropriate columns
+    retain <- c(retainA, FALSE, retainB, FALSE, TRUE)
+    dfC <- dfC[, retain, drop=FALSE]
+    #' collect result
+    result <- EntriesToSparse(dfC, dimC)
+    return(result)
+  }
+
+  isfull <- function(z) {
+    if(is.array(z) || is.matrix(z) || is.data.frame(z)) return(TRUE)
+    if(inherits(z, c("sparseVector", "sparseMatrix", "sparse3Darray")))
+      return(FALSE)
+    return(TRUE)
+  }
+  
+  outersparse <- function(dfA, dfB) {
+    if(is.null(dfA) || is.null(dfB)) return(NULL)
+    IJ <- expand.grid(I=seq_len(nrow(dfA)),
+                      J=seq_len(nrow(dfB)))
+    dfC <- with(IJ, cbind(A=dfA[I,,drop=FALSE], B=dfB[J,,drop=FALSE]))
+    return(dfC)
+  }
+
+  tenseur
+})
+
+sumsymouterSparse <- function(x, w=NULL, dbg=FALSE) {
+  dimx <- dim(x)
+  if(length(dimx) != 3) stop("x should be a 3D array")
+  stopifnot(dim(x)[2] == dim(x)[3])
+  if(!is.null(w)) {
+    stopifnot(inherits(w, "sparseMatrix"))
+    stopifnot(all(dim(w) == dim(x)[2:3]))
+  }
+  m <- dimx[1]
+  n <- dimx[2]
+  if(inherits(x, "sparse3Darray")) {
+    df <- data.frame(i = x$i - 1L,  # need 0-based indices
+                     j = x$j - 1L,
+                     k = x$k - 1L,
+                     value = x$x)
+  } else stop("x is not a recognised kind of sparse array")
+  # trivial?
+  if(nrow(df) < 2) {
+    y <- matrix(0, m, m)
+    dimnames(y) <- rep(dimnames(x)[1], 2)
+    return(y)
+  }
+  # order by increasing j, then k
+  oo <- with(df, order(j, k, i))
+  df <- df[oo, ]
+  # now provide ordering by increasing k then j
+  ff <- with(df, order(k,j,i))
+  #
+  if(dbg) {
+    cat("----------------- Data ---------------------\n")
+    print(df)
+    cat("-------------- Reordered data --------------\n")
+    print(df[ff,])
+    cat("Calling......\n")
+  }
+  if(is.null(w)) {
+    z <- .C("CspaSumSymOut",
+            m = as.integer(m),
+            n = as.integer(n),
+            lenx = as.integer(nrow(df)),
+            ix = as.integer(df$i), # indices are already 0-based
+            jx = as.integer(df$j),
+            kx = as.integer(df$k),
+            x  = as.double(df$value),
+            flip = as.integer(ff - 1L), # convert 1-based to 0-based
+            y  = as.double(numeric(m * m)),
+            PACKAGE = "spatstat")
+  } else {
+    # extract triplet representation of w
+    w <- as(w, Class="TsparseMatrix")
+    dfw <- data.frame(j=w at i, k=w at j, w=w at x)
+    woo <- with(dfw, order(j, k))
+    dfw <- dfw[woo, , drop=FALSE]
+    z <- .C("CspaWtSumSymOut",
+            m = as.integer(m),
+            n = as.integer(n),
+            lenx = as.integer(nrow(df)),
+            ix = as.integer(df$i), # indices are already 0-based
+            jx = as.integer(df$j),
+            kx = as.integer(df$k),
+            x  = as.double(df$value),
+            flip = as.integer(ff - 1L),  # convert 1-based to 0-based
+            lenw = as.integer(nrow(dfw)),
+            jw = as.integer(dfw$j),
+            kw = as.integer(dfw$k),
+            w = as.double(dfw$w),
+            y  = as.double(numeric(m * m)),
+            PACKAGE = "spatstat")
+  }
+  y <- matrix(z$y, m, m)
+  dimnames(y) <- rep(dimnames(x)[1], 2)
+  return(y)
+}
+
diff --git a/R/spatialcdf.R b/R/spatialcdf.R
new file mode 100644
index 0000000..68b6434
--- /dev/null
+++ b/R/spatialcdf.R
@@ -0,0 +1,60 @@
+##
+## spatialcdf.R
+##
+##  $Revision: 1.2 $ $Date: 2014/10/24 00:22:30 $
+##
+
+spatialcdf <- function(Z, weights=NULL, normalise=FALSE, ...,
+                       W=NULL, Zname=NULL) {
+  Zdefaultname <- singlestring(short.deparse(substitute(Z)))
+  if(is.character(Z) && length(Z) == 1) {
+    if(is.null(Zname)) Zname <- Z
+    switch(Zname,
+           x={
+             Z <- function(x,y) { x }
+           }, 
+           y={
+             Z <- function(x,y) { y }
+           },
+           stop("Unrecognised covariate name")
+         )
+  }
+  if(is.null(Zname)) Zname <- Zdefaultname
+  ##
+  if(is.ppm(weights) || is.kppm(weights) || is.dppm(weights)) {
+    Q <- quad.ppm(as.ppm(weights))
+    loc <- as.ppp(Q)
+    df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...))
+    df$wt <- fitted(weights) * w.quad(Q)
+    wtname <- if(normalise) "fraction of points" else "number of points"
+  } else {
+    if(is.null(W)) W <- as.owin(weights, fatal=FALSE)
+    if(is.null(W)) W <- as.owin(Z, fatal=FALSE)
+    if(is.null(W)) stop("No information specifying the spatial window")
+    if(is.null(weights)) weights <- 1
+    M <- as.mask(W, ...)
+    loc <- rasterxy.mask(M, drop=TRUE)
+    df <- mpl.get.covariates(list(Z=Z, weights=weights), loc,
+                             covfunargs=list(...))
+    pixelarea <- with(unclass(M), xstep * ystep)
+    df$wt <- rep(pixelarea, nrow(df))
+    wtname <- if(normalise) "fraction of weight" else "weight"
+  }
+  if(normalise) 
+    df$wt <- with(df, wt/sum(wt))
+  G <- with(df, ewcdf(Z, wt))
+  class(G) <- c("spatialcdf", class(G))
+  attr(G, "call") <- sys.call()
+  attr(G, "Zname") <- Zname
+  attr(G, "ylab") <- paste("Cumulative", wtname)
+  return(G)
+}
+
+plot.spatialcdf <- function(x, ..., xlab, ylab) {
+  if(missing(xlab) || is.null(xlab))
+    xlab <- attr(x, "Zname")
+  if(missing(ylab) || is.null(ylab))
+    ylab <- attr(x, "ylab")
+  plot.ecdf(x, ..., xlab=xlab, ylab=ylab)
+}
+
diff --git a/R/split.ppp.R b/R/split.ppp.R
new file mode 100755
index 0000000..2d6c026
--- /dev/null
+++ b/R/split.ppp.R
@@ -0,0 +1,323 @@
+#
+# split.ppp.R
+#
+# $Revision: 1.32 $ $Date: 2015/08/05 02:50:25 $
+#
+# split.ppp and "split<-.ppp"
+#
+#########################################
+
+split.ppp <- function(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, ...) {
+  verifyclass(x, "ppp")
+  mf <- markformat(x)
+  fgiven <- !missing(f)
+  
+  if(is.null(un)) {
+    un <- !fgiven && (mf != "dataframe")
+  } else un <- as.logical(un)
+
+  if(!fgiven) {
+    # f defaults to marks of x
+    switch(mf,
+           none={
+             stop("f is missing and there are no marks")
+           },
+           vector={
+             if(!is.multitype(x)) 
+               stop("f is missing and the pattern is not multitype")
+             f <- fsplit <- marks(x)
+           },
+           dataframe={
+             f <- fsplit <- firstfactor(marks(x))
+             if(is.null(f))
+               stop("Data frame of marks contains no factors")
+           })
+    splittype <- "factor"
+  } else {
+    # f was given
+    fsplit <- f
+    if(is.factor(f)) {
+      splittype <- "factor"
+    } else if(is.logical(f)) {
+      splittype <- "factor"
+      f <- factor(f)
+    } else if(is.tess(f)) {
+      # f is a tessellation: determine the grouping
+      f <- marks(cut(x, fsplit))
+      splittype <- "tess"
+    } else if(is.owin(f)) {
+      # f is a window: coerce to a tessellation
+      W <- as.owin(x)
+      fsplit <- tess(tiles=list(fsplit, setminus.owin(W, fsplit)),
+                     window=W)
+      f <- marks(cut(x, fsplit))
+      splittype <- "tess"
+    } else if(is.im(f)) {
+      # f is an image: coerce to a tessellation
+      fsplit <- tess(image=f)
+      f <- marks(cut(x, fsplit))
+      splittype <- "tess"
+    } else if(is.character(f) && length(f) == 1) {
+      # f is the name of a column of marks
+      marx <- marks(x)
+      if(is.data.frame(marx) && (f %in% names(marx))) 
+        fsplit <- f <- marx[[f]]
+      else
+        stop(paste("The name", sQuote(f), "does not match any column of marks"))
+      splittype <- "factor"
+    } else 
+      stop(paste("f must be",
+                 "a factor, a logical vector,", 
+                 "a tessellation, a window, an image,",
+                 "or the name of a column of marks"))
+    if(length(f) != npoints(x))
+      stop("length(f) must equal the number of points in x")
+  }
+
+  # At this point
+  # 'f' is a factor that can be used to separate the points
+  # 'fsplit' is the object (either a factor or a tessellation)
+  # that determines the split (and can be "un-split")
+
+  lev <- levels(f)
+  if(drop) {
+    # remove components that don't contain points
+    retain <- (table(f) > 0)
+    lev <- lev[retain]
+    switch(splittype,
+           tess = {
+             # remove tiles that don't contain points
+             fsplit <- fsplit[retain]
+           },
+           factor = {
+             # delete levels that don't occur
+             fsplit <- factor(fsplit, levels=lev)
+           },
+           stop("Internal error: wrong format for fsplit"))
+  }
+
+  ## remove marks that will not be retained
+  if(un && reduce && mf == "dataframe")
+    warning("Incompatible arguments un=TRUE and reduce=TRUE: assumed un=TRUE")
+  if(un) {
+    x <- unmark(x)
+  } else if(reduce && !fgiven && mf == "dataframe") {
+    # remove the column of marks that determined the split
+    j <- findfirstfactor(marks(x))
+    if(!is.null(j))
+      marks(x) <- marks(x)[, -j]
+  }
+  
+  ## split the data
+  out <- list()
+  fok <- !is.na(f)
+  for(l in lev) 
+    out[[paste(l)]] <- x[fok & (f == l)]
+
+  ## 
+  if(splittype == "tess") {
+    til <- tiles(fsplit)
+    for(i in seq_along(out))
+      out[[i]]$window <- til[[i]]
+  }
+  class(out) <- c("splitppp", "ppplist", "solist", class(out))
+  attr(out, "fsplit") <- fsplit
+  attr(out, "fgroup") <- f
+  return(out)
+}
+
+"split<-.ppp" <- function(x, f=marks(x), drop=FALSE, un=missing(f), 
+                          ..., value) {
+  verifyclass(x, "ppp")
+  W <- x$window
+  mf <- markformat(x)
+  # evaluate `un' before assigning value of 'f'
+  force(un)
+
+  # validate assignment value
+  stopifnot(is.list(value))
+  if(!all(unlist(lapply(value, is.ppp))))
+    stop(paste("Each entry of", sQuote("value"),
+               "must be a point pattern"))
+
+  ismark <- unlist(lapply(value, is.marked))
+  if(any(ismark) && !all(ismark))
+    stop(paste("Some entries of",
+               sQuote("value"),
+               "are marked, and others are unmarked"))
+  vmarked <- all(ismark)
+
+  # determine type of splitting
+  if(missing(f)) {
+    # f defaults to marks of x
+    switch(mf,
+           none={
+             stop("f is missing and there are no marks")
+           },
+           vector={
+             if(!is.multitype(x)) 
+               stop("f is missing and the pattern is not multitype")
+             f <- fsplit <- marks(x)
+           },
+           dataframe={
+             f <- fsplit <- firstfactor(marks(x))
+             if(is.null(f))
+               stop("Data frame of marks contains no factors")
+           })
+  } else {
+    # f given
+    fsplit <- f
+    if(is.tess(f)) {
+      # f is a tessellation: determine the grouping
+      f <- marks(cut(x, fsplit))
+    } else if(is.im(f)) {
+      # f is an image: determine the grouping
+      fsplit <- tess(image=f)
+      f <- marks(cut(x, fsplit))
+    } else if(is.character(f) && length(f) == 1) {
+      # f is the name of a column of marks
+      marx <- marks(x)
+      if(is.data.frame(marx) && (f %in% names(marx))) 
+        fsplit <- f <- marx[[f]]
+      else
+        stop(paste("The name", sQuote(f), "does not match any column of marks"))
+    } else if(is.logical(f)) {
+      f <- factor(f)
+    } else if(!is.factor(f))
+      stop(paste("f must be",
+                 "a factor, a logical vector, a tessellation, an image,",
+                 "or the name of a column of marks"))
+    if(length(f) != x$n)
+      stop("length(f) must equal the number of points in x")
+  } 
+  #
+  all.levels <- lev <- levels(f)
+  if(!drop) 
+    levtype <- "levels of f"
+  else {
+    levtype <- "levels which f actually takes"
+    # remove components that don't contain points
+    lev <- lev[table(f) > 0]
+  }
+  if(length(value) != length(lev))
+      stop(paste("length of", sQuote("value"),
+                 "should equal the number of",
+                 levtype))
+
+  # ensure value[[i]] is associated with lev[i]
+  if(!is.null(names(value))) {
+    if(!all(names(value) %in% as.character(lev)))
+      stop(paste("names of", sQuote("value"), "should be levels of f"))
+    value <- value[lev]
+  }
+  names(value) <- NULL
+
+  # restore the marks, if they were discarded
+  if(un && is.marked(x)) {
+    if(vmarked)
+      warning(paste(sQuote("value"), "contains marked point patterns:",
+                    "this is inconsistent with un=TRUE; marks ignored."))
+    for(i in seq_along(value)) 
+      value[[i]] <- value[[i]] %mark% factor(lev[i], levels=all.levels)
+  }
+
+  # handle NA's in splitting factor
+  if(any(isNA <- is.na(f))) {
+    xNA <- x[isNA]
+    if(un && is.marked(x)) 
+      xNA <- xNA %mark% factor(NA, levels=all.levels)
+    value <- append(value, list(xNA))
+  }
+
+  # put Humpty together again
+  if(npoints(x) == length(f) &&
+     length(levels(f)) == length(value) &&
+     all(table(f) == sapply(value, npoints))) {
+    ## exact correspondence
+    out <- x
+    for(i in seq_along(levels(f)))
+      out[ f == lev[i] ] <- value[[i]]
+  } else {
+    out <- do.call(superimpose,c(value,list(W=W)))
+  }
+  return(out)
+}
+
+
+print.splitppp <- function(x, ...) {
+  f <- attr(x, "fsplit")
+  what <- if(is.tess(f)) "tessellation" else
+          if(is.factor(f)) "factor" else "unknown data"
+  cat(paste("Point pattern split by", what, "\n"))
+  nam <- names(x)
+  for(i in seq_along(x)) {
+    cat(paste("\n", nam[i], ":\n", sep=""))
+    print(x[[i]])
+  }
+  return(invisible(NULL))
+}
+
+summary.splitppp <- function(object, ...) {
+  x <- lapply(object, summary, ...)
+  class(x) <- "summary.splitppp"
+  x
+}
+
+print.summary.splitppp <- function(x, ...) {
+  class(x) <- "anylist"
+  print(x)
+  invisible(NULL)
+}
+
+"[.splitppp" <- function(x, ...) {
+  f <- attr(x, "fsplit")
+  # invoke list method on x
+  class(x) <- "list"
+  y <- x[...]
+  # then make it a 'splitppp' object too
+  class(y) <- c("splitppp", class(y))
+  if(is.tess(f)) {
+    fsplit <- f[...]
+  } else if(is.factor(f)) {
+    lev <- levels(f)
+    sublev <- lev[...]
+    subf <- f[f %in% sublev]
+    fsplit <- factor(subf, levels=lev)
+  } else stop("Unknown splitting type")
+  attr(y, "fsplit") <- fsplit
+  y
+}
+
+"[<-.splitppp" <- function(x, ..., value) {
+  if(!all(unlist(lapply(value, is.ppp))))
+    stop("replacement value must be a list of point patterns")
+  f <- attr(x, "fsplit")
+  # invoke list method
+  class(x) <- "list"
+  x[...] <- value
+  # then make it a 'splitppp' object too
+  class(x) <- c("splitppp", class(x))
+  if(is.tess(f)) {
+    fsplit <- f
+  } else if(is.factor(f)) {
+    lev <- levels(f)
+    fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev)
+  }
+  attr(x, "fsplit") <- fsplit
+  x
+}
+  
+density.splitppp <- function(x, ..., se=FALSE) {
+  density.ppplist(x, ..., se=se)
+}
+
+plot.splitppp <- function(x, ..., main) {
+  if(missing(main)) main <- short.deparse(substitute(x))
+  do.call(plot.solist,
+          resolve.defaults(list(x=x, main=main),
+                           list(...),
+                           list(equal.scales=TRUE)))
+}
+
+as.layered.splitppp <- function(X) { do.call(layered, X) }
+
diff --git a/R/split.ppx.R b/R/split.ppx.R
new file mode 100644
index 0000000..b2856bf
--- /dev/null
+++ b/R/split.ppx.R
@@ -0,0 +1,143 @@
+#
+# split.ppx.R
+#
+# $Revision: 1.5 $ $Date: 2016/03/05 01:33:38 $
+#
+# split.ppx etc
+#
+#########################################
+
+split.ppx <- function(x, f = marks(x), drop=FALSE, un=NULL, ...) {
+  stopifnot(inherits(x, "ppx"))
+  mf <- markformat(x)
+  if(is.null(un))
+    un <- missing(f) && !(mf %in% c("dataframe", "hyperframe"))
+
+  if(missing(f)) {
+    # f defaults to marks of x
+    switch(mf,
+           none={
+             stop("f is missing and there are no marks")
+           },
+           vector={
+             if(!is.multitype(x)) 
+               stop("f is missing and the pattern is not multitype")
+             f <- fsplit <- marks(x)
+           },
+           hyperframe=,
+           dataframe={
+             f <- fsplit <- firstfactor(marks(x))
+             if(is.null(f))
+               stop("Marks do not include a factor")
+           })
+    splittype <- "factor"
+  } else{
+    # f was given
+    fsplit <- f
+    if(is.factor(f)) {
+      splittype <- "factor"
+    } else if(is.character(f) && length(f) == 1) {
+      # f is the name of a column of marks
+      marx <- marks(x)
+      if((is.data.frame(marx) || is.hyperframe(marx))
+         && (f %in% names(marx))) {
+        fsplit <- f <- as.factor(marx[ ,f,drop=TRUE])
+      } else
+        stop(paste("The name", sQuote(f), "does not match any column of marks"))
+      splittype <- "factor"
+    } else 
+      stop(paste("f must be",
+                 "a factor,",
+                 "or the name of a column of marks"))
+    if(length(f) != npoints(x))
+      stop("length(f) must equal the number of points in x")
+  }
+
+  # At this point
+  # 'f' is a factor that can be used to separate the points
+  # 'fsplit' is the object (either a factor or a tessellation)
+  # that determines the split (and can be "un-split")
+
+  lev <- levels(f)
+  if(drop) {
+    # remove components that don't contain points
+    retain <- (table(f) > 0)
+    lev <- lev[retain]
+    switch(splittype,
+           factor = {
+             # delete levels that don't occur
+             fsplit <- factor(fsplit, levels=lev)
+           },
+           stop("Internal error: wrong format for fsplit"))
+  }
+
+  # split the data
+  out <- list()
+  for(l in lev) 
+    out[[paste(l)]] <- x[!is.na(f) & (f == l)]
+  
+  if(un)
+     out <- lapply(out, unmark)
+  class(out) <- c("splitppx", "anylist", class(out))
+  attr(out, "fsplit") <- fsplit
+  return(out)
+}
+
+print.splitppx <- function(x, ...) {
+  f <- attr(x, "fsplit")
+  what <- if(is.factor(f)) "factor" else "unknown data"
+  cat(paste("Multidimensional point pattern split by", what, "\n"))
+  nam <- names(x)
+  for(i in seq_along(x)) {
+    cat(paste("\n", nam[i], ":\n", sep=""))
+    print(x[[i]])
+  }
+  return(invisible(NULL))
+}
+
+summary.splitppx <- function(object, ...) {
+  x <- lapply(object, summary, ...)
+  class(x) <- "summary.splitppx"
+  x
+}
+
+print.summary.splitppx <- function(x, ...) {
+  class(x) <- "anylist"
+  print(x)
+  invisible(NULL)
+}
+
+"[.splitppx" <- function(x, ...) {
+  f <- attr(x, "fsplit")
+  # invoke list method on x
+  class(x) <- "list"
+  y <- x[...]
+  # then make it a 'splitppx' object too
+  class(y) <- c("splitppx", class(y))
+  if(is.factor(f)) {
+    lev <- levels(f)
+    sublev <- lev[...]
+    subf <- f[f %in% sublev]
+    fsplit <- factor(subf, levels=lev)
+  } else stop("Unknown splitting type")
+  attr(y, "fsplit") <- fsplit
+  y
+}
+
+"[<-.splitppx" <- function(x, ..., value) {
+  if(!all(unlist(lapply(value, is.ppx))))
+    stop("replacement value must be a list of point patterns (ppx)")
+  f <- attr(x, "fsplit")
+  # invoke list method
+  class(x) <- "list"
+  x[...] <- value
+  # then make it a 'splitppx' object too
+  class(x) <- c("splitppx", class(x))
+  if(is.factor(f)) {
+    lev <- levels(f)
+    fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev)
+  }
+  attr(x, "fsplit") <- fsplit
+  x
+}
+  
diff --git a/R/ssf.R b/R/ssf.R
new file mode 100644
index 0000000..5fc7305
--- /dev/null
+++ b/R/ssf.R
@@ -0,0 +1,238 @@
+#
+#   ssf.R
+#
+#  spatially sampled functions
+#
+#  $Revision: 1.17 $  $Date: 2017/01/26 00:55:22 $
+#
+
+ssf <- function(loc, val) {
+  stopifnot(is.ppp(loc))
+  if(is.function(val))
+    val <- val(loc$x, loc$y)
+  if(is.data.frame(val))
+    val <- as.matrix(val)
+  if(!is.matrix(val))
+    val <- matrix(val, ncol=1, dimnames=list(NULL, "value"))
+  if(nrow(val) != npoints(loc))
+    stop("Incompatible lengths")
+  result <- loc %mark% val
+  class(result) <- c("ssf", class(result))
+  attr(result, "ok") <- complete.cases(val)
+  return(result)
+}
+
+print.ssf <- function(x, ..., brief=FALSE) {
+  if(brief) {
+    cat(paste("Spatial function sampled at", npoints(x), "locations\n"))
+  } else {
+    cat("Spatially sampled function\n")
+    cat("Locations:\n\t")
+    print(unmark(x))
+  }
+  val <- marks(x)
+  if(!is.matrix(val)) {
+    d <- 1
+    warning("Internal format error: val is not a matrix")
+  } else d <- ncol(val) 
+  if(!brief) {
+    type <- if(d == 1) "Scalar" else paste(d, "-vector", sep="")
+    cat(paste(type, "valued function\n"))
+  }
+  if(d > 1 && !is.null(nama <- colnames(val)))
+    cat(paste("Component names:", commasep(sQuote(nama)), "\n"))
+  return(invisible(NULL))
+}
+
+image.ssf <- function(x, ...) {
+  do.call("plot", resolve.defaults(list(x, how="smoothed"), list(...)))
+}
+
+as.im.ssf <- function(X, ...) nnmark(X, ...)
+
+as.function.ssf <- function(x, ...) {
+  X <- x
+  mX <- marks(X)
+  switch(markformat(X),
+         vector = {
+           g <- function(x, y=NULL) {
+             Y <- xy.coords(x,y)[c("x","y")]
+             J <- nncross(Y, X, what="which")
+             result <- mX[J]
+             return(unname(result))
+           }
+         },
+         dataframe = {
+           g <- function(x, y=NULL) {
+             Y <- xy.coords(x,y)[c("x","y")]
+             J <- nncross(Y, X, what="which")
+             result <-  mX[J,,drop=FALSE]
+             row.names(result) <- NULL
+             return(result)
+           }
+         },
+         stop("Marks must be a vector or data.frame"))
+  h <- funxy(g, Frame(X))
+  return(h)
+}
+
+plot.ssf <- function(x, ..., how=c("smoothed", "nearest", "points"),
+                     style = c("image", "contour", "imagecontour"),
+                     sigma=NULL, contourargs=list()) {
+  xname <- short.deparse(substitute(x))
+  how <- match.arg(how)
+  style <- match.arg(style)
+  otherargs <- list(...)
+  # convert to images
+  y <- switch(how,
+              points = as.ppp(x),
+              nearest = nnmark(x), 
+              smoothed = Smooth(x, sigma=sigma)
+              )
+  # points plot
+  if(how == "points") {
+    out <- do.call("plot",
+                   resolve.defaults(list(y), otherargs,
+                                    list(main=xname)))
+    if(is.null(out)) return(invisible(NULL))
+    return(out)
+  }
+  # image plot
+  switch(style,
+         image = {
+           out <- do.call("plot",
+                          resolve.defaults(list(y), otherargs,
+                                           list(main=xname)))
+         },
+         contour = {
+           do.call("plot",
+                   resolve.defaults(list(as.owin(x)),
+                                    otherargs, list(main=xname)))
+           do.call("contour",
+                   resolve.defaults(list(y, add=TRUE), contourargs))
+           out <- NULL
+         },
+         imagecontour = {
+           out <- do.call("plot",
+                          resolve.defaults(list(y), otherargs,
+                                           list(main=xname)))
+           do.call("contour",
+                   resolve.defaults(list(y, add=TRUE), contourargs))
+         })
+  return(invisible(out))
+}
+
+contour.ssf <- function(x, ..., main, sigma=NULL) {
+  if(missing(main))
+    main <- short.deparse(substitute(x))
+  y <- Smooth(x, sigma=sigma)
+  contour(y, ..., main=main)
+  return(invisible(NULL))
+}
+
+Smooth.ssf <- function(X, ...) {
+  stopifnot(inherits(X, "ssf"))
+  ok  <- attr(X, "ok")
+  Y   <- as.ppp(X)[ok]
+  argh <- list(...)
+  isnul <- as.logical(unlist(lapply(argh, is.null)))
+  nonnularg <- argh[!isnul]
+  sigma0 <- if(any(c("sigma", "varcov") %in% names(nonnularg)))
+            NULL else 1.4 * max(nndist(X))
+  Z <- do.call("Smooth.ppp",
+               resolve.defaults(list(X = Y),
+                                list(...),
+                                list(sigma=sigma0),
+                                .MatchNull=FALSE))
+                                # don't take NULL for an answer!
+  return(Z)
+}
+
+"[.ssf" <-
+  function(x, i, j, ..., drop) {
+  loc <- unmark(x)
+  val <- marks(x)
+  ok  <- attr(x, "ok")
+  #
+  if(!missing(j)) 
+    val <- val[, j, drop=FALSE]
+  if(!missing(i)) {
+    # use [.ppp to identify which points are retained
+    locn <- loc %mark% seq_len(npoints(loc))
+    loci <- locn[i]
+    loc  <- unmark(loci)
+    id   <- marks(loci)
+    # extract
+    val  <- val[id, , drop=FALSE]
+    ok   <- ok[id]
+  }
+  out <- loc %mark% val
+  class(out) <- c("ssf", class(out))
+  attr(out, "ok") <- ok
+  return(out)    
+}
+
+as.ppp.ssf <- function(X, ...) {
+  class(X) <- "ppp"
+  attr(X, "ok") <- NULL
+  return(X)
+}
+
+marks.ssf <-  function(x, ...) {
+  val <- x$marks
+  if(is.null(dim(val))) val <- matrix(val, ncol=1)
+  if(is.data.frame(val)) val <- as.matrix(val)
+  return(val)
+}
+
+"marks<-.ssf" <- function(x, ..., value) {
+  ssf(unmark(x), value)
+}
+
+unmark.ssf <- function(X) { unmark(as.ppp(X)) }
+
+with.ssf <- function(data, ...) {
+  loc <- as.ppp(data)
+  val <- marks(data)
+  newval <- with(as.data.frame(val), ...)
+  if(length(newval) == npoints(loc) ||
+     (is.matrix(newval) && nrow(newval) == npoints(loc)))
+    return(ssf(loc, newval))
+  return(newval)
+}
+
+apply.ssf <- function(X, ...) {
+  loc <- as.ppp(X)
+  val <- marks(X)
+  newval <- apply(val, ...)
+  if(length(newval) == npoints(loc) ||
+     (is.matrix(newval) && nrow(newval) == npoints(loc)))
+    return(ssf(loc, newval))
+  return(newval)
+}
+
+range.ssf <- function(x, ...) range(marks(x), ...)
+min.ssf <- function(x, ...) min(marks(x), ...)
+max.ssf <- function(x, ...) max(marks(x), ...)
+
+integral.ssf <- function(f, domain=NULL, ..., weights=attr(f, "weights")) {
+  if(!is.null(weights)) {
+    check.nvector(weights, npoints(f), oneok=TRUE)
+    if(length(weights) == 1) weights <- rep(weights, npoints(f))
+  }
+  if(!is.null(domain)) {
+    ok <- inside.owin(f, w=domain)
+    f <- f[ok,]
+    if(!is.null(weights)) weights <- weights[ok]
+  }
+  y <- marks(f)
+  if(is.null(weights)) {
+    z <- if(!is.matrix(y)) mean(y, na.rm=TRUE) else colMeans(y, na.rm=TRUE)
+    a <- area(Window(f))
+  } else {
+    z <- if(!is.matrix(y)) weighted.mean(y, w=weights, na.rm=TRUE) else 
+         apply(y, 2, weighted.mean, w=weights, na.rm=TRUE)
+    a <- sum(weights)
+  }
+  return(z * a)
+}
diff --git a/R/stienen.R b/R/stienen.R
new file mode 100644
index 0000000..4f4adf2
--- /dev/null
+++ b/R/stienen.R
@@ -0,0 +1,59 @@
+## stienen.R
+##
+##  Stienen diagram with border correction
+##
+##  $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $
+
+stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) {
+  Xname <- short.deparse(substitute(X))
+  stopifnot(is.ppp(X))
+  if(npoints(X) <= 1) {
+    do.call(plot,
+            resolve.defaults(list(x=Window(X)),
+                             list(...),
+                             list(main=Xname)))
+    return(invisible(NULL))
+  }
+  d <- nndist(X)
+  b <- bdist.points(X)
+  Y <- X %mark% d
+  gp <- union(graphicsPars("symbols"), "lwd")
+  do.call.plotfun(plot.ppp,
+                  resolve.defaults(list(x=Y[b >= d],
+                                        markscale=1),
+                                   list(...),
+                                   list(bg=bg),
+                                   list(main=Xname)),
+                  extrargs=gp)
+  if(!identical(border, FALSE)) {
+    if(!is.list(border)) border <- list()
+    do.call.plotfun(plot.ppp,
+                    resolve.defaults(list(x=Y[b < d],
+                                          markscale=1,
+                                          add=TRUE),
+                                     border,
+                                     list(...),
+                                     list(bg=bg),
+                                     list(cols=grey(0.5), lwd=2)),
+                  extrargs=gp)
+  }
+  return(invisible(NULL))
+}
+
+stienenSet <- function(X, edge=TRUE) {
+  stopifnot(is.ppp(X))
+  nnd <- nndist(X)
+  if(!edge) {
+    ok <- bdist.points(X) >= nnd
+    X <- X[ok]
+    nnd <- nnd[ok]
+  }
+  n <- npoints(X)
+  if(n == 0) return(emptywindow(Window(X)))
+  if(n == 1) return(Window(X))
+  d <- nnd/2
+  delta <- 2 * pi * max(d)/128
+  Z <- disc(d[1], X[1], delta=delta)
+  for(i in 2:n) Z <- union.owin(Z, disc(d[i], X[i], delta=delta))
+  return(Z)
+}
diff --git a/R/strauss.R b/R/strauss.R
new file mode 100755
index 0000000..ea61d78
--- /dev/null
+++ b/R/strauss.R
@@ -0,0 +1,199 @@
+#
+#
+#    strauss.R
+#
+#    $Revision: 2.37 $	$Date: 2017/06/05 10:31:58 $
+#
+#    The Strauss process
+#
+#    Strauss()    create an instance of the Strauss process
+#                 [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+Strauss <- local({
+
+  # create blank template object without family and pars
+
+  BlankStrauss <-
+  list(
+       name     = "Strauss process",
+       creator  = "Strauss",
+       family    = "pairwise.family", # evaluated later
+       pot      = function(d, par) {
+         d <= par$r
+       },
+       par      = list(r = NULL), # to be filled in
+       parnames = "interaction distance",
+       init     = function(self) {
+         r <- self$par$r
+         if(!is.numeric(r) || length(r) != 1 || r <= 0)
+           stop("interaction distance r must be a positive number")
+       },
+       update = NULL,  # default OK
+       print = NULL,    # default OK
+       interpret =  function(coeffs, self) {
+         loggamma <- as.numeric(coeffs[1])
+         gamma <- exp(loggamma)
+         return(list(param=list(gamma=gamma),
+                     inames="interaction parameter gamma",
+                     printable=dround(gamma)))
+       },
+       valid = function(coeffs, self) {
+         loggamma <- as.numeric(coeffs[1])
+         return(is.finite(loggamma) && (loggamma <= 0))
+       },
+       project = function(coeffs, self) {
+         if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+       },
+       irange = function(self, coeffs=NA, epsilon=0, ...) {
+         r <- self$par$r
+         if(anyNA(coeffs))
+           return(r)
+         loggamma <- coeffs[1]
+         if(abs(loggamma) <= epsilon)
+           return(0)
+         else
+           return(r)
+       },
+       version=NULL, # to be filled in 
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+         # fast evaluator for Strauss interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for Strauss")
+         r <- potpars$r
+         answer <- strausscounts(U, X, r, EqualPairs)
+         return(matrix(answer, ncol=1))
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         gamma <- exp(as.numeric(coeffs[1]))
+         r <- self$par$r
+         return((1-gamma) * pi * r^2)
+       },
+       Percy=function(d, coeffs, par, ...) {
+         ## term used in Percus-Yevick type approximation
+         gamma <- exp(as.numeric(coeffs[1]))
+         R <- par$r
+         t <- abs(d/(2*R))
+         t <- pmin.int(t, 1)
+         y <- 2 * R^2 * (pi * (1-gamma)
+                         - (1-gamma)^2 * (acos(t) - t * sqrt(1 - t^2)))
+         return(y)
+       },
+       delta2 = function(X, inte, correction, ..., sparseOK=FALSE) {
+         r <- inte$par$r
+         X <- as.ppp(X) # algorithm is the same for data and dummy points
+         nX <- npoints(X)
+	 switch(correction,
+	        none = ,
+		border = {
+                  cl <- closepairs(X, r, what="indices")
+		  weight <- 1
+		},
+		isotropic = ,
+		Ripley = {
+                  cl <- closepairs(X, r, what="ijd")
+		  weight <- edge.Ripley(X[cl$i], cl$d)
+		},
+		translate = {
+                  cl <- closepairs(X, r, what="all")
+		  weight <- edge.Trans(dx = cl$dx,
+		   	    	       dy = cl$dy,
+		                       W = Window(X),
+				       paired=TRUE)
+		},
+		return(NULL)
+		)
+         v <- sparseMatrix(i=cl$i, j=cl$j, x=as.numeric(weight),
+                           dims=c(nX, nX))
+         if(!sparseOK)
+           v <- as.matrix(v)
+         return(v)
+       }
+       )
+  class(BlankStrauss) <- "interact"
+
+
+  # Finally define main function
+  
+  Strauss <- function(r) {
+    instantiate.interact(BlankStrauss, list(r=r))
+  }
+
+  Strauss <- intermaker(Strauss, BlankStrauss)
+  
+  Strauss
+})
+
+# generally accessible functions
+      
+strausscounts <- function(U, X, r, EqualPairs=NULL) {
+  answer <- crosspaircounts(U,X,r)
+  # subtract counts of identical pairs
+  if(length(EqualPairs) > 0) {
+    nU <- npoints(U)
+    idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU)))
+    answer <- answer - idcount
+  }
+  return(answer)
+}
+
+crosspaircounts <- function(X, Y, r) {
+  stopifnot(is.ppp(X))
+  stopifnot(is.numeric(r) && length(r) == 1)
+  stopifnot(is.finite(r))
+  stopifnot(r >= 0)
+  # sort in increasing order of x coordinate
+  oX <- fave.order(X$x)
+  oY <- fave.order(Y$x)
+  Xsort <- X[oX]
+  Ysort <- Y[oY]
+  nX <- npoints(X)
+  nY <- npoints(Y)
+  # call C routine
+  out <- .C("Ccrosspaircounts",
+            nnsource = as.integer(nX),
+            xsource  = as.double(Xsort$x),
+            ysource  = as.double(Xsort$y),
+            nntarget = as.integer(nY),
+            xtarget  = as.double(Ysort$x),
+            ytarget  = as.double(Ysort$y),
+            rrmax    = as.double(r),
+            counts   = as.integer(integer(nX)),
+            PACKAGE = "spatstat")
+  answer <- integer(nX)
+  answer[oX] <- out$counts
+  return(answer)
+}
+
+closepaircounts <- function(X, r) {
+  stopifnot(is.ppp(X))
+  stopifnot(is.numeric(r) && length(r) == 1)
+  stopifnot(is.finite(r))
+  stopifnot(r >= 0)
+  # sort in increasing order of x coordinate
+  oX <- fave.order(X$x)
+  Xsort <- X[oX]
+  nX <- npoints(X)
+  # call C routine
+  out <- .C("Cclosepaircounts",
+            nxy    = as.integer(nX),
+            x      = as.double(Xsort$x),
+            y      = as.double(Xsort$y),
+            rmaxi  = as.double(r),
+            counts = as.integer(integer(nX)),
+            PACKAGE = "spatstat")
+  answer <- integer(nX)
+  answer[oX] <- out$counts
+  return(answer)
+}
+
diff --git a/R/strausshard.R b/R/strausshard.R
new file mode 100755
index 0000000..ca09617
--- /dev/null
+++ b/R/strausshard.R
@@ -0,0 +1,135 @@
+#
+#
+#    strausshard.S
+#
+#    $Revision: 2.22 $	$Date: 2016/02/16 01:39:12 $
+#
+#    The Strauss/hard core process
+#
+#    StraussHard()     create an instance of the Strauss-hardcore process
+#                      [an object of class 'interact']
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+StraussHard <- local({
+
+  BlankStraussHard <- 
+    list(
+         name   = "Strauss - hard core process",
+         creator = "StraussHard",
+         family  = "pairwise.family",  # evaluated later
+         pot    = function(d, par) {
+           v <- 1 * (d <= par$r)
+           v[ d <= par$hc ] <-  (-Inf)
+           v
+         },
+         par    = list(r = NULL, hc = NULL), # filled in later
+         parnames = c("interaction distance",
+                      "hard core distance"), 
+         selfstart = function(X, self) {
+           # self starter for StraussHard
+           nX <- npoints(X)
+           if(nX < 2) {
+             # not enough points to make any decisions
+             return(self)
+           }
+           r <- self$par$r
+           md <- minnndist(X)
+           if(md == 0) {
+             warning(paste("Pattern contains duplicated points:",
+                           "hard core must be zero"))
+             return(StraussHard(r=r, hc=0))
+           }
+           if(!is.na(hc <- self$par$hc)) {
+             # value fixed by user or previous invocation
+             # check it
+             if(md < hc)
+               warning(paste("Hard core distance is too large;",
+                             "some data points will have zero probability"))
+             return(self)
+           }
+           # take hc = minimum interpoint distance * n/(n+1)
+           hcX <- md * nX/(nX+1)
+           StraussHard(r=r, hc = hcX)
+         },
+         init   = function(self) {
+           r <- self$par$r
+           hc <- self$par$hc
+           if(length(hc) != 1)
+             stop("hard core distance must be a single value")
+           if(!is.na(hc)) {
+             if(!is.numeric(hc) || hc <= 0)
+               stop("hard core distance hc must be a positive number, or NA")
+             if(!is.numeric(r) || length(r) != 1 || r <= hc)
+               stop("interaction distance r must be a number greater than hc")
+           }
+         },
+         update = NULL,       # default OK
+         print = NULL,         # default OK
+         interpret =  function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1])
+           gamma <- exp(loggamma)
+           return(list(param=list(gamma=gamma),
+                       inames="interaction parameter gamma",
+                       printable=dround(gamma)))
+         },
+         valid = function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1])
+           return(is.finite(loggamma))
+         },
+         project = function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1])
+           if(is.finite(loggamma))
+             return(NULL)
+           hc <- self$par$hc
+           if(hc > 0) return(Hardcore(hc)) else return(Poisson()) 
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$r
+           hc <- self$par$hc
+           if(anyNA(coeffs))
+             return(r)
+           loggamma <- coeffs[1]
+           if(abs(loggamma) <= epsilon)
+             return(hc)
+           else
+             return(r)
+         },
+       version=NULL, # evaluated later
+       # fast evaluation is available for the border correction only
+       can.do.fast=function(X,correction,par) {
+         return(all(correction %in% c("border", "none")))
+       },
+       fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) {
+         # fast evaluator for StraussHard interaction
+         if(!all(correction %in% c("border", "none")))
+           return(NULL)
+         if(spatstat.options("fasteval") == "test")
+           message("Using fast eval for StraussHard")
+         r <- potpars$r
+         hc <- potpars$hc
+         hclose <- strausscounts(U, X, hc, EqualPairs)
+         rclose <- strausscounts(U, X, r,  EqualPairs)
+         answer <- ifelseXB(hclose == 0, rclose, -Inf)
+         return(matrix(answer, ncol=1))
+       },
+       Mayer=function(coeffs, self) {
+         # second Mayer cluster integral
+         gamma <- exp(as.numeric(coeffs[1]))
+         r <- self$par$r
+         hc <- self$par$hc
+         return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2)))
+       }
+         )
+  class(BlankStraussHard) <- "interact"
+  
+  StraussHard <- function(r, hc=NA) {
+    instantiate.interact(BlankStraussHard, list(r=r, hc=hc))
+  }
+
+  StraussHard <- intermaker(StraussHard, BlankStraussHard)
+  
+  StraussHard
+})
diff --git a/R/studpermutest.R b/R/studpermutest.R
new file mode 100644
index 0000000..440cba7
--- /dev/null
+++ b/R/studpermutest.R
@@ -0,0 +1,561 @@
+#'
+#'  studpermtest.R
+#' 
+#'  Original by Ute Hahn 2014
+#'
+#' $Revision: 1.5 $ $Date: 2015/10/21 09:06:57 $
+#' 
+#' Studentized permutation test for comparison of grouped point patterns;
+#' functions to generate these grouped point patterns;
+#' wrapper for test of reweighted second order stationarity.
+#' 
+
+
+#' studpermu.test
+#' studentized permutation test for grouped point patterns
+#' interpreted version, random permutations only. 
+#' A group needs to contain at least two point patterns with at least minpoints each.
+#
+#' X               the data, may be a list of lists of point patterns, or a hyperframe
+#' formula         if X is a hyperframe, relates point patterns to factor variables that
+#'                 determine the groups. If missing, the first column of X that contains
+#'                 a factor variable is used.
+#' summaryfunction the function used in the test
+#' ...             additional arguments for summaryfunction
+#' rinterval       r-interval where summaryfunction is evaluated. If NULL, the
+#'                 interval is calculated from spatstat defaults 
+#'                 (intersection for all patterns)    
+#' nperm           number of random permutations
+#' use.Tbar        use the alternative test statistic, for summary functions with
+#'                 roughly constant variance, such as K/r or L
+#' minpoints       the minimum number of points a pattern needs to have. Patterns
+#'                 with fewer points are not used.
+#' rsteps          discretization steps of the r-interval
+#' r               arguments at which to evaluate summaryfunction, overrides rinterval
+#'                 Should normally not be given, replace by rinterval instead,
+#'                 this allows r_0 > 0. Also, there is no plausibility check for r so far
+#' arguments.in.data if TRUE, individual extra arguments to summary function that 
+#'                 change are taken from X (which has to be a hyperframe then).
+#'                 Assumes that the first argument of summaryfunction always is the
+#'                 point pattern.
+#'                 This is meant for internal purposes (automatisation)
+#
+#' returns an object of classes htest and studpermutest, that can be plotted. The
+#' plot shows the summary functions for the groups (and the means if requested)
+
+studpermu.test <- local({
+
+studpermu.test <-
+  function (X, formula, summaryfunction = Kest,
+            ...,
+            rinterval = NULL, nperm = 999,
+            use.Tbar = FALSE, # the alternative statistic, use with K/r or L  
+            minpoints = 20, 
+            rsteps = 128, r = NULL,
+            arguments.in.data = FALSE) {
+    #' ---- the loooong preliminaries -------  
+
+    #' ---- argument checking paranoia ----
+    if (arguments.in.data & !is.hyperframe(X))
+      stop(paste("X needs to be a hyperframe",
+                 "if arguments for summary function are to be retrieved"),
+           call.=FALSE)
+    stopifnot(is.function(summaryfunction))
+    #' there could be more...
+  
+    #' first prepare the data
+    if(is.hyperframe(X)) {
+      if(dim(X)[2] < 2) 
+        stop(paste("Hyperframe X needs to contain at least 2 columns,",
+                   "one for patterns, one indicating groups"),
+             call.=FALSE)
+      data <- X # renaming for later. 
+      Xclass <- unclass(X)$vclass
+      factorcandidate <-
+        Xclass %in% c("integer", "numeric", "character", "factor")
+      ppcandidate <- Xclass == "ppp"
+      names(factorcandidate) <- names(ppcandidate) <-
+        names(Xclass) <- Xnames <- names(X)
+      if(all(!factorcandidate) || all(!ppcandidate))
+        stop(paste("Hyperframe X needs to contain at least a column",
+                   "with point patterns, and one indicating groups"),
+             call.=FALSE)
+      if(!missing(formula)){
+        #' safety precautions ;-)
+        if(!inherits(formula, "formula")) 
+          stop(paste("Argument", dQuote("formula"), "should be a formula"))
+        if (length(formula) < 3) 
+          stop(paste("Argument", sQuote("formula"),
+                     "must have a left hand side"))
+        rhs <- rhs.of.formula(formula)
+        ppname <- formula[[2]]
+        if (!is.name(ppname)) 
+          stop("Left hand side of formula should be a single name")
+        ppname <- paste(ppname)
+        if(!ppcandidate[ppname])
+          stop(paste("Left hand side of formula",
+                     "should be the name of a column of point patterns"),
+               call.=FALSE)
+        groupvars <- all.vars(as.expression(rhs))
+        if(!all(groupvars %in% Xnames) || any(!factorcandidate[groupvars]))
+          stop(paste("Not all variables on right hand side of formula",
+                     "can be interpreted as factors"),
+               call.=FALSE)
+        #' make the groups to be compared
+        group <-
+          interaction(lapply(as.data.frame(data[ , groupvars, drop=FALSE]),
+                             factor))
+        #' rename the point patterns, needs the patch      
+        newnames <- Xnames
+        newnames[Xnames == ppname] <- "pp"
+        names(data) <- newnames
+        data$group <- group
+      } else {
+        #' No formula supplied.
+        #' Choose first ppp column and first factor column to make pp and groups
+        thepp <- which.max(ppcandidate)
+        thegroup <- which.max(factorcandidate)
+        #' fake formula for output of test result
+        formula <- as.formula(paste( Xnames[thepp],"~", Xnames[thegroup]))
+        newnames <- Xnames
+        newnames[thepp] <- "pp"
+        newnames[thegroup] <- "group"
+        names(data) <- newnames
+        data$group <- as.factor(data$group)
+      }
+    } else {
+      #' X is not a hyperframe, but hopefully a list of ppp
+      if(!is.list(X))
+        stop("X should be a hyperframe or a list of lists of point patterns")
+      if (!is.list(X[[1]]) || !is.ppp(X[[1]][[1]]))
+        stop("X is a list, but not a list of lists of point patterns")
+      nams <- names(X)
+      if(is.null(nams)) nams <- paste("group", seq_along(X))
+    
+      pp <- list()
+      group <- NULL
+      for (i in seq_along(X)) {
+        pp <- c(pp, X[[i]])
+        group <- c(group, rep(nams[i], length(X[[i]])))
+      }
+      group <- as.factor(group) 
+      data <-  hyperframe(pp = pp, group = group)
+      ppname <- "pp"
+    }
+  
+    framename <- deparse(substitute(X))
+    fooname <- deparse(substitute(summaryfunction))
+  
+    #' sorting out the patterns that contain too few points
+  
+    OK <- sapply(data$pp, npoints) >= minpoints
+    if((nbad <- sum(!OK)) > 0)
+      warning(paste(nbad,
+                    "patterns have been discarded",
+                    "because they contained fewer than",
+                    minpoints, "points"),
+              call.=FALSE)
+    data <- data[OK, ,drop=FALSE]
+    pp <- data$pp
+
+    #' ---- the groups,
+    #' or what remains after discarding the poor patterns with few points -----
+  
+    #' check if at least two observations in each group
+    groupi <- as.integer(data$group)
+    ngroups <- max(groupi)
+    if(ngroups < 2)
+      stop(paste("Sorry, after discarding patterns with fewer than",
+                 minpoints,
+                 "points,",
+                 if(ngroups < 1) "nothing" else "only one group",
+                 "is left over.",
+                 "\n- nothing to compare, take a break!"),
+           call.=FALSE)
+ 
+    lev <- 1:ngroups
+    m <- as.vector(table(groupi))
+    if (any(m < 3))
+      stop(paste("Data groups need to contain at least two patterns;",
+                 "\nafter discarding those with fewer than",
+                 minpoints,
+                 "points, the remaining group sizes are",
+                 commasep(m)),
+           call.=FALSE)
+    #' check if number of possible outcomes is small
+    npossible <- factorial(sum(m))/prod(factorial(m))/prod(factorial(table(m)))
+    if (npossible < max(100, nperm)) 
+      warning("Don't expect exact results - group sizes are too small")
+  
+    #' --------- real preliminaries now ------
+    
+    #' get interval for arguments
+
+    if(!is.null(r)){
+      rinterval <- range(r)
+      rsteps <- length(r)
+    } else if (is.null(rinterval)) {
+      foochar <- substr(fooname, 1, 1)
+      if (foochar %in% c("p", "L")) foochar <- "K"
+      if (fooname %in%  c("Kscaled", "Lscaled")) foochar <- "Kscaled"
+      rinterval <-
+        c(0, min(with(data, rmax.rule(foochar, Window(pp), intensity(pp)))))
+    }  
+    
+    ranger <- diff(range(rinterval))
+    #' r sequence needs to start at 0 for Kest and such
+    rr <- r %orifnull% seq(0, rinterval[2], length.out = rsteps + 1) 
+    taker <- rr >= rinterval[1] & rr <= rinterval[2] # used for testing
+ 
+    #' now estimate the summary function, finally...
+    #' TO DO!!!! Match function call of summary function with data columns!
+    #' use arguments.in.data, if applicable. This is for inhomogeneous summary 
+    #' functions
+ 
+    #' --------- retrieve arguments for summary function from data, hvis det er 
+ 
+    if(arguments.in.data) 
+      fvlist <- multicall(summaryfunction, pp, data, r = rr, ...)
+    else fvlist <- with(data, summaryfunction(pp, r = rr, ...))
+    fvtemplate <- fvlist[[1]]
+ 
+    valu <- attr(fvtemplate, "valu")
+    argu <- attr(fvtemplate, "argu")
+
+    foar <- sapply(lapply(fvlist, "[[", valu),
+                   "[", taker)
+
+    #' --------- the real stuff --------------
+  
+    #' function that calculates the discrepancy
+    #' slow version
+    combs <- combn(lev, 2)
+
+    #' --------- now do the real real stuff :-)  -------------
+  
+    #' generate "simulated values" from random permutations. 
+    #' possible improvement for future work:
+    #' If the number of all permutations (combis) is small,
+    #' first generate all permutations and then
+    #' sample from them to improve precision
+  
+    predigested <- list(lev=lev,
+                        foar=foar, m=m, combs=combs, rrr=rr[taker],
+                        ranger=ranger)
+    if(use.Tbar) {
+      Tobs <- Tbarstat(groupi, predigested)
+      Tsim <- replicate(nperm, Tbarstat(sample(groupi), predigested))    
+    } else {
+      Tobs <- Tstat(groupi, predigested)
+      Tsim <- replicate(nperm, Tstat(sample(groupi), predigested))
+    }  
+    names(Tobs) <- if(use.Tbar) "Tbar" else "T"
+
+    pval <- (1 + sum(Tobs < Tsim))/(1 + nperm)
+  
+    #' ----- making a test object -----
+    method <- c("Studentized permutation test for grouped point patterns",
+                if(is.hyperframe(X)) pasteFormula(formula) else NULL,
+                choptext(ngroups, "groups:",
+                         paste(levels(data$group), collapse=", ")),
+                choptext("summary function:",
+                         paste0(fooname, ","), 
+                         "evaluated on r in",
+                         prange(rinterval)),
+                choptext("test statistic:",
+                         if(use.Tbar) "Tbar," else "T,", 
+                         nperm, "random permutations"))
+    fooshort <- switch(fooname, pcf = "pair correlation ",
+                       Kinhom = "inhomogeneous K-",
+                       Linhom = "inhomogeneous L-",
+                       Kscaled = "locally scaled K-",
+                       Lscaled = "locally scaled L-",
+                       paste(substr(fooname, 1, 1),"-",sep=""))
+    alternative <- c(paste("not the same ",fooshort,"function", sep=""))
+  
+    testerg <- list(statistic = Tobs, 
+                    p.value = pval, 
+                    alternative = alternative,            
+                    method = method, 
+                    data.name = framename)
+    class(testerg) <- c("studpermutest", "htest")
+    #' Add things for plotting
+    
+    #' prepare the fvlist, so that it only contains the estimates used,
+    fvs <- lapply(fvlist, "[.fv", j=c(argu, valu))
+    #' with rinterval as alim
+    fvs <- lapply(fvs, "attr<-", which="alim", value=rinterval)
+
+    testerg$curves <- hyperframe(fvs = fvs, groups = data$group)
+  
+    fvtheo <- fvlist[[1]]
+    fvnames(fvtheo, ".y") <- "theo"
+    attr(fvtheo, "alim") <- rinterval
+    testerg$curvtheo <- fvtheo[ , c(argu, "theo")]
+  
+    #' group means
+    grmn <- lapply(lev, splitmean, ind=groupi, f=foar)
+    testerg$groupmeans <- lapply(grmn, makefv, xvals=rr[taker],
+                                 template=fvtheo)
+  
+    return(testerg)
+  }  
+
+  splitmean <- function(l, ind, f) {
+    apply(f[ , ind == l], 1, mean)
+  }
+  splitvarn <- function(l, ind, f, m) {
+    apply(f[ , ind == l], 1, var) / m[l]
+  }
+  studentstat <- function(i, grmean, grvar) {
+    (grmean[, i[1]] - grmean[, i[2]])^2 / (grvar[i[1],] + grvar[i[2], ])
+  }
+    
+  Tstat <- function (ind = groupi, predigested) {
+    #' predigested should be a list with entries lev, foar, m, combs, rrr
+    with(predigested, {
+      grmean <- sapply(lev, splitmean, ind=ind, f=foar)
+      grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m))
+      y <- apply(combs, 2, studentstat, grmean=grmean, grvar=grvar)
+      sum(apply(y, 2, trapint, x = rrr))
+    })
+  } 
+
+  intstudent <- function(i, rrr, grmean, meangrvar) {
+    trapint(rrr, (grmean[, i[1]] - grmean[, i[2]])^2 / 
+            (meangrvar[i[1]] + meangrvar[i[2]]))
+  }
+    
+  Tbarstat <- function (ind = groupi, predigested) {
+    #' predigested should be a list
+    #' with entries lev, foar, m, combs, rrr, ranger
+    with(predigested, {
+      grmean <- sapply(lev, splitmean, ind=ind, f=foar)
+      grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m))
+      meangrvar <- apply(grvar, 1, trapint, x = rrr)/ranger
+      sum(apply(combs, 2, intstudent, 
+                rrr=rrr, grmean=grmean, meangrvar=meangrvar))
+      #' trapint(rr[taker], grvar[i[1],] + grvar[i[2], ]))))
+    })
+  }
+
+  makefv <- function(yvals, xvals, template) {
+    fdf <- data.frame(r = xvals, y = yvals)
+    argu <- fvnames(template, ".x")
+    valu <- fvnames(template, ".y")
+    names(fdf) <- c(argu,valu)
+    fv(fdf, argu = argu, ylab = attr(template, "ylab"), valu = valu, 
+       fmla = attr(template,"fmla"), alim = attr(template, "alim"))
+  }
+
+  #' Trapezoidal rule approximation to integral
+  #' ------- Trapezregel, mit Behandlung von NAns:
+  #'                  die werden einfach ignoriert ----
+  trapint <- function(x, y) {
+    nonan <- !is.na(y)
+    nn <- sum(nonan)
+    if(nn < 2L) return(0)
+    Y <- y[nonan]
+    X <- x[nonan]
+    0.5 * sum( (Y[-1] + Y[-nn]) * diff(X))
+  }
+
+  #' call foo(x, further arguments) repeatedly
+  #' further arguments are taken from hyperframe H and ...
+  multicall <- function(foo, x, H, ...){
+    stopifnot(is.hyperframe(H))
+    if (is.hyperframe(x)) {
+      x <- as.list(x)[[1]] 
+    } else if(!is.list(x))
+      stop("in multicall: x should be a hyperframe or list", call.=FALSE)
+  
+    #' check if same length
+    nrows <- dim(H)[1]
+    if (length(x) != nrows)
+      stop(paste("in multicall: x and H need to have",
+                 "the same number of rows or list elements"),
+           call.=FALSE)
+    dotargs <- list(...)
+    hnames <- names(H)
+    argnames <- names(formals(foo))#' always assume first argument is given
+  
+    ppname <- argnames[1]
+    argnames <- argnames[-1]
+    dotmatch <- pmatch(names(dotargs), argnames)
+    dotmatched <- dotmatch[!is.na(dotmatch)]
+    dotuseargs <- dotargs[!is.na(dotmatch)]
+    restargs <- if(length(dotmatched) >0) argnames[-dotmatched] else argnames
+    hmatch <- pmatch(hnames, restargs)
+    huse <- !is.na(hmatch)
+    lapply(seq_len(nrows), function (i) 
+           do.call(foo, c(list(x[[i]]),
+                          as.list(H[i, huse, drop=TRUE, strip=FALSE]),
+                          dotargs))) 
+  }
+
+  studpermu.test
+})
+
+
+#' ------------------- plot studpermutest ---------------------------------------
+#
+#' plot.studpermutest
+#' plot the functions that were used in studperm.test
+#' also plot group means, if requested
+#
+#' x               a studpermtest object, the test result
+#' fmla            a plot formula as in plot.fv, should be generic, using "." for values
+#' ...             further plot parameters
+#' col, lty, lwd   parameter (vectors) for plotting the individual summary functions,
+#'                 according to group, if vectors  
+#' col.theo, lty.theo, lwd.theo if not all are NULL, the "theo" curve is also plotted
+#' lwd.mean        a multiplyer for the line width of the group means. 
+#'                 if NULL, group means are not plotted, defaults to NULL
+#' lty.mean, col.mean  selbsterklaerend
+#' separately      generate a separate plot for each group (then no legends are plotted)
+#' meanonly        do not plot individual summary functions
+#' legend          if TRUE, and plots are not separate, plot a legend 
+#' legendpos       ...
+#' lbox            if TRUE, draw box around legend. Defaults to FALSE
+#' add             ...
+
+plot.studpermutest <- local({
+
+  plot.studpermutest <-
+    function(x, fmla, ..., 
+             lty = NULL, col = NULL, lwd = NULL,
+             lty.theo = NULL, col.theo = NULL, lwd.theo = NULL,
+             lwd.mean = if(meanonly) 1 else NULL,
+             lty.mean = lty, col.mean = col,
+             separately = FALSE, meanonly = FALSE,
+             main = if(meanonly) "group means" else NULL,
+             xlim = NULL, ylim = NULL, ylab = NULL,
+             legend = !add, legendpos = "topleft", lbox=FALSE,
+             add = FALSE) {
+      stopifnot(inherits(x, "studpermutest")) 
+      env.user <- parent.frame()
+      curvlists <- split(x$curves, x$curves$groups)
+      ngroups <- length(curvlists)
+      gnames <- names(curvlists)
+      #' check if theoretical functions shall be plottet
+      plottheo <- !(is.null(lty.theo) & is.null(col.theo) & is.null(lwd.theo))
+      #' prepare plot parameters for groups
+      if (is.null(lty)) lty <- 1:ngroups
+      if (is.null(col)) col <- 1:ngroups
+      if (is.null(lwd)) lwd <- par("lwd")
+      if (is.null(col.mean)) col.mean <- col
+      if (is.null(lty.mean)) lty.mean <- lty
+      lty <- rep(lty, length.out = ngroups)
+      col <- rep(col, length.out = ngroups)
+      lwd <- rep(lwd, length.out = ngroups)
+      col.mean <- rep(col.mean, length.out = ngroups)
+      lty.mean <- rep(lty.mean, length.out = ngroups)
+      if (plottheo){
+        if (is.null(lty.theo)) lty.theo <- ngroups + 1#par("lty")
+        if (is.null(col.theo)) col.theo <- ngroups + 1 #par("col")
+        if (is.null(lwd.theo)) lwd.theo <- par("lwd")    
+      }
+      #' transporting the formula in ... unfortunately does not work
+      #' for the axis labels, because the fvs contain only one variable.
+      #' Have to knit them self
+  
+      if (is.null(ylab)) {
+        if (!missing(fmla)) {
+          #' puha. det bliver noget lappevaerk.
+          fmla <- as.formula(fmla, env=env.user)
+          map <- fvlabelmap(x$curvtheo) 
+          lhs <- lhs.of.formula(as.formula(fmla))
+          ylab <- eval(substitute(substitute(le, mp),
+                                  list(le = lhs, mp = map)))   
+        } else ylab <- attr(x$curvtheo, "yexp")
+      } 
+      if (missing(fmla)) fmla <- attr(x$curvtheo, "fmla")
+      if(!is.null(lwd.mean)) lwd.Mean <- lwd.mean*lwd
+      if(separately) {
+        for (i in seq_along(gnames)) {
+          if(!meanonly) 
+            plot.fvlist(curvlists[[i]]$fvs, fmla, ..., 
+                        col = col[i], lwd = lwd[i], lty= lty[i],
+                        xlim = xlim, ylim = ylim, ylab = ylab,
+                        main = gnames[i])
+          if (!is.null(lwd.mean))
+            plot(x$groupmeans[[i]], fmla, ...,
+                 col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], 
+                 main = gnames[i], add = !meanonly, ylim = ylim)
+          if (plottheo)
+            plot(x$curvtheo, fmla, ..., add = TRUE, 
+                 col = col.theo, lwd = lwd.theo, lty = lty.theo)
+        }
+      } else {  
+        #' ---- TODO SIMPLIFY! they should all have the same x-range,
+        #' just check y-range ----
+        lims <- if (meanonly) {
+          plot.fvlist(x$groupmeans, fmla,..., limitsonly = TRUE)
+        } else {
+          as.data.frame(apply(sapply(curvlists, 
+            function(C) plot.fvlist(C$fvs, fmla,..., limitsonly = TRUE)),
+                              1, range))
+        }
+        if(is.null(xlim)) xlim <- lims$xlim
+        if(is.null(ylim)) ylim <- lims$ylim
+        iadd <- add
+        for (i in seq_along(gnames)) {
+          if(!meanonly)
+            plot.fvlist(curvlists[[i]]$fvs, fmla, ..., 
+                        col = col[i], lwd = lwd[i], lty= lty[i],
+                        xlim = xlim, ylim = ylim, ylab= ylab,
+                        main = main,
+                        add = iadd)
+          iadd <- iadd | !meanonly
+          if (!is.null(lwd.mean))
+            plot(x$groupmeans[[i]], fmla, ...,
+                 col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i],
+                 add = iadd,
+                 xlim = xlim, ylim = ylim, ylab= ylab, main=main)  
+        if (plottheo)
+          plot(x$curvtheo, fmla, ..., add = TRUE, 
+               col = col.theo, lwd = lwd.theo, lty = lty.theo,
+               xlim = xlim, ylim = ylim, ylab= ylab, main=main)  
+          iadd <- TRUE
+        } 
+        if(legend) {
+          if(meanonly) {
+            lwd <- lwd.Mean
+            col <- col.mean
+            lty <- lty.mean
+          }
+          if(plottheo){
+            gnames <- c(gnames, "Poisson mean")
+            col <- c(col, col.theo)
+            lty <- c(lty, lty.theo)
+            lwd <- c(lwd, lwd.theo)
+          }
+          legend(legendpos, gnames, col = col, lty = lty, lwd = lwd, 
+                 bty=ifelse(lbox, "o", "n"))
+        }  
+      }
+      return(invisible(NULL))
+    }
+
+  #' ------------------ Helper function----------------
+  #' flist: list of fv, with plot method
+
+  plot.fvlist <- function(x, fmla, ..., xlim=NULL, ylim=NULL, 
+                          add = FALSE, limitsonly = FALSE, main=NULL){
+    #' no safety precautions
+    if (missing(fmla)) fmla <- attr(x[[1]], "fmla")
+    if (!add | limitsonly) {
+      lims <- sapply(x, plot, fmla, ..., limitsonly = TRUE)
+      if(is.null(xlim)) xlim = range(unlist(lims[1,]))
+      if(is.null(ylim)) ylim = range(unlist(lims[2,]))
+      lims=list(xlim=xlim, ylim=ylim)
+      if(limitsonly) return(lims) 
+      plot(x[[1]], fmla, ..., xlim = xlim, ylim = ylim, main = main)
+    }
+    else plot(x[[1]], fmla,..., add=T)
+    for (foo in x[-1]) plot(foo, fmla, ..., add=T)
+  }
+ 
+  plot.studpermutest
+})
+
diff --git a/R/subfits.R b/R/subfits.R
new file mode 100755
index 0000000..9ec159d
--- /dev/null
+++ b/R/subfits.R
@@ -0,0 +1,518 @@
+#
+#
+#  $Revision: 1.48 $   $Date: 2016/04/25 02:34:40 $
+#
+#
+
+subfits.new <- local({
+
+  subfits.new <- function(object, what="models", verbose=FALSE) {
+    stopifnot(inherits(object, "mppm"))
+
+    what <- match.arg(what, c("models", "interactions", "basicmodels"))
+
+    if(what == "interactions")
+      return(subfits.old(object, what, verbose))
+  
+    ## extract stuff
+    announce <- if(verbose) Announce else Ignore
+    
+    announce("Extracting stuff...")
+    fitter   <- object$Fit$fitter
+    FIT      <- object$Fit$FIT
+    trend    <- object$trend
+#%^!ifdef RANDOMEFFECTS  
+    random   <- object$random
+#%^!endif  
+    info     <- object$Info
+    npat     <- object$npat
+    Inter    <- object$Inter
+    interaction <- Inter$interaction
+    itags    <- Inter$itags
+    Vnamelist <- object$Fit$Vnamelist
+    has.design <- info$has.design
+#    has.random <- info$has.random
+    announce("done.\n")
+
+    ## fitted parameters
+    coefs.full <- coef(object)
+    if(is.null(dim(coefs.full))) {
+      ## fixed effects model: replicate vector to matrix
+      coefs.names <- names(coefs.full)
+      coefs.full <- matrix(coefs.full, byrow=TRUE,
+                           nrow=npat, ncol=length(coefs.full),
+                           dimnames=list(NULL, coefs.names))
+    } else {
+      ## random/mixed effects model: coerce to matrix
+      coefs.names <- colnames(coefs.full)
+      coefs.full <- as.matrix(coefs.full)
+    }
+    
+    ## determine which interaction(s) are active on each row
+    announce("Determining active interactions...")
+    active <- active.interactions(object)
+    announce("done.\n")
+
+    ## exceptions
+    if(any(rowSums(active) > 1))
+      stop(paste("subfits() is not implemented for models",
+                 "in which several interpoint interactions",
+                 "are active on the same point pattern"))
+#%^!ifdef RANDOMEFFECTS  
+    if(!is.null(random) && any(variablesinformula(random) %in% itags))
+      stop(paste("subfits() is not yet implemented for models",
+                 "with random effects that involve",
+                 "the interpoint interactions"))
+#%^!endif
+  
+    ## implied coefficients for each active interaction
+    announce("Computing implied coefficients...")
+    implcoef <- list()
+    for(tag in itags) {
+      announce(tag)
+      implcoef[[tag]] <- impliedcoefficients(object, tag)
+      announce(", ")
+    }
+    announce("done.\n")
+
+    ## Fisher information and vcov
+    fisher <- varcov <- NULL
+    if(what == "models") {
+      announce("Fisher information...")
+      fisher   <- vcov(object, what="fisher", err="null")
+      varcov   <- try(solve(fisher), silent=TRUE)
+      if(inherits(varcov, "try-error"))
+        varcov <- NULL
+      announce("done.\n")
+    } 
+  
+    ## Extract data frame 
+    announce("Extracting data...")
+    datadf   <- object$datadf
+    rownames <- object$Info$rownames
+    announce("done.\n")
+
+    ## set up lists for results 
+    models <- rep(list(NULL), npat)
+    interactions <- rep(list(NULL), npat)
+    
+    ## interactions
+    announce("Determining interactions...")
+    pstate <- list()
+    for(i in 1:npat) {
+      if(verbose) pstate <- progressreport(i, npat, state=pstate)
+      ## Find relevant interaction
+      acti <- active[i,]
+      nactive <- sum(acti)
+      interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE]
+      tagi <- names(interaction)[acti]
+      ## Find relevant coefficients
+      coefs.avail  <- coefs.full[i,]
+      names(coefs.avail) <- coefs.names
+      if(nactive == 1) {
+        ic <- implcoef[[tagi]]
+        coefs.implied <- ic[i, ,drop=TRUE]
+        names(coefs.implied) <- colnames(ic)
+        ## overwrite any existing values of coefficients; add new ones.
+        coefs.avail[names(coefs.implied)] <- coefs.implied
+      }
+      ## create fitted interaction with these coefficients
+      vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0)
+      interactions[[i]] <- fii(interi, coefs.avail, vni)
+    }
+    announce("Done!\n")
+    names(interactions) <- rownames
+
+    ##
+    if(what=="interactions") 
+      return(interactions)
+  
+    ## Extract data required to reconstruct complete model fits
+    announce("Extracting more data...")
+    data  <- object$data
+    Y     <- object$Y
+    Yname <- info$Yname
+    moadf <- object$Fit$moadf
+    fmla  <- object$Fit$fmla
+    ## deal with older formats of mppm
+    if(is.null(Yname)) Yname <- info$Xname
+    if(is.null(Y)) Y <- data[ , Yname, drop=TRUE]
+    ## 
+    used.cov.names <- info$used.cov.names
+    has.covar <- info$has.covar
+    if(has.covar) {
+      covariates.hf <- data[, used.cov.names, drop=FALSE]
+      dfvar <- used.cov.names %in% names(datadf)
+    }
+    announce("done.\n")
+
+    ## Construct template for fake ppm object
+    spv <- package_version(versionstring.spatstat())
+    fake.version <- list(major=spv$major,
+                         minor=spv$minor,
+                         release=spv$patchlevel,
+                         date="$Date: 2016/04/25 02:34:40 $")
+    fake.call <- call("cannot.update", Q=NULL, trend=trend,
+                      interaction=NULL, covariates=NULL,
+                      correction=object$Info$correction,
+                      rbord     = object$Info$rbord)
+    fakemodel <- list(
+                    method       = "mpl",
+                    fitter       = fitter,
+                    coef         = coef(object),
+                    trend        = object$trend,
+                    interaction  = NULL,
+                    fitin        = NULL,
+                    Q            = NULL,
+                    maxlogpl     = NA,
+                    internal     = list(glmfit = FIT,
+                                        glmdata  = NULL,
+                                        Vnames   = NULL,
+                                        fmla     = fmla,
+                                        computed = list()),
+                    covariates   = NULL,
+                    correction   = object$Info$correction,
+                    rbord        = object$Info$rbord,
+                    version      = fake.version,
+                    problems     = list(),
+                    fisher       = fisher,
+                    varcov       = varcov,
+                    call         = fake.call,
+                    callstring   = "cannot.update()",
+                    fake         = TRUE)
+    class(fakemodel) <- "ppm"
+
+    ## Loop through point patterns
+    announce("Generating models for each row...")
+    pstate <- list()
+    for(i in 1:npat) {
+      if(verbose) pstate <- progressreport(i, npat, state=pstate)
+      Yi <- Y[[i]]
+      Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window
+      ## assemble relevant covariate images
+      covariates <-
+        if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL
+      if(has.covar && has.design) 
+        ## Convert each data frame covariate value to an image
+        covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi)
+
+      ## Extract relevant interaction
+      finte <- interactions[[i]]
+      inte  <- finte$interaction
+      if(is.poisson.interact(inte)) inte <- NULL
+      Vnames <- finte$Vnames
+      if(length(Vnames) == 0) Vnames <- NULL
+    
+      ## Construct fake ppm object
+      fakemodel$interaction <- inte
+      fakemodel$fitin       <- finte
+      fakemodel$Q           <- Yi
+      fakemodel$covariates  <- covariates
+      fakemodel$internal$glmdata <- moadf[moadf$id == i, ]
+      fakemodel$internal$Vnames  <- Vnames
+
+      fake.call$Q <- Yi
+      fake.call$covariates <- covariates
+      fakemodel$call <- fake.call
+      fakemodel$callstring <- short.deparse(fake.call)
+      
+      ## store in list
+      models[[i]] <- fakemodel
+    }
+    announce("done.\n")
+    names(models) <- rownames
+    models <- as.anylist(models)
+    return(models)
+  }
+
+  Announce <- function(...) cat(...)
+
+  Ignore <- function(...) { NULL }
+
+  subfits.new
+})
+
+
+
+## /////////////////////////////////////////////////////
+
+subfits <-
+subfits.old <- local({
+    
+  subfits.old <- function(object, what="models", verbose=FALSE) {
+    stopifnot(inherits(object, "mppm"))
+    what <- match.arg(what, c("models","interactions", "basicmodels"))
+    ## extract stuff
+    announce <- if(verbose) Announce else Ignore
+    
+    announce("Extracting stuff...")
+    trend    <- object$trend
+    random   <- object$random
+    use.gam  <- object$Fit$use.gam
+    info     <- object$Info
+    npat     <- object$npat
+    Inter    <- object$Inter
+    interaction <- Inter$interaction
+    itags    <- Inter$itags
+    Vnamelist <- object$Fit$Vnamelist
+    has.design <- info$has.design
+    has.random <- info$has.random
+    moadf    <- object$Fit$moadf
+    announce("done.\n")
+
+    ## levels of any factors
+    levelslist <- lapply(as.list(moadf), levelsAsFactor)
+    isfactor <- !sapply(levelslist, is.null)
+    
+    ## fitted parameters
+    coefs.full <- coef(object)
+    if(is.null(dim(coefs.full))) {
+      ## fixed effects model: replicate vector to matrix
+      coefs.names <- names(coefs.full)
+      coefs.full <- matrix(coefs.full, byrow=TRUE,
+                           nrow=npat, ncol=length(coefs.full),
+                           dimnames=list(NULL, coefs.names))
+    } else {
+      ## random/mixed effects model: coerce to matrix
+      coefs.names <- colnames(coefs.full)
+      coefs.full <- as.matrix(coefs.full)
+    }
+  
+    ## determine which interaction(s) are active on each row
+    announce("Determining active interactions...")
+    active <- active.interactions(object)
+    announce("done.\n")
+
+    ## exceptions
+    if(any(rowSums(active) > 1))
+      stop(paste("subfits() is not implemented for models",
+                 "in which several interpoint interactions",
+                 "are active on the same point pattern"))
+#%^!ifdef RANDOMEFFECTS  
+    if(!is.null(random) && any(variablesinformula(random) %in% itags))
+      stop(paste("subfits() is not yet implemented for models",
+                 "with random effects that involve",
+                 "the interpoint interactions"))
+#%^!endif
+  
+    ## implied coefficients for each active interaction
+    announce("Computing implied coefficients...")
+    implcoef <- list()
+    for(tag in itags) {
+      announce(tag)
+      implcoef[[tag]] <- impliedcoefficients(object, tag)
+      announce(", ")
+    }
+    announce("done.\n")
+
+    ## Fisher information and vcov
+    fisher <- varcov <- NULL
+    if(what == "models") {
+      announce("Fisher information...")
+      fisher   <- vcov(object, what="fisher", err="null")
+      varcov   <- try(solve(fisher), silent=TRUE)
+      if(inherits(varcov, "try-error"))
+        varcov <- NULL
+      announce("done.\n")
+    }
+  
+    ## Extract data frame 
+    announce("Extracting data...")
+    datadf   <- object$datadf
+    rownames <- object$Info$rownames
+    announce("done.\n")
+
+    ## set up list for results 
+    results <- rep(list(NULL), npat)
+  
+    if(what == "interactions") {
+      announce("Determining interactions...")
+      pstate <- list()
+      for(i in 1:npat) {
+        if(verbose) pstate <- progressreport(i, npat, state=pstate)
+        ## Find relevant interaction
+        acti <- active[i,]
+        nactive <- sum(acti)
+        interi <- if(nactive == 0) Poisson() else
+                  interaction[i, acti, drop=TRUE]
+        tagi <- names(interaction)[acti]
+        ## Find relevant coefficients
+        coefs.avail  <- coefs.full[i,]
+        names(coefs.avail) <- coefs.names
+        if(nactive == 1) {
+          ic <- implcoef[[tagi]]
+          coefs.implied <- ic[i, ,drop=TRUE]
+          names(coefs.implied) <- colnames(ic)
+          ## overwrite any existing values of coefficients; add new ones.
+          coefs.avail[names(coefs.implied)] <- coefs.implied
+        }
+        ## create fitted interaction with these coefficients
+        vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0)
+        results[[i]] <- fii(interi, coefs.avail, vni)
+      }
+      announce("Done!\n")
+      names(results) <- rownames
+      return(results)
+    }
+  
+    ## Extract data required to reconstruct complete model fits
+    announce("Extracting more data...")
+    data  <- object$data
+    Y     <- object$Y
+    Yname <- info$Yname
+    ## deal with older formats of mppm
+    if(is.null(Yname)) Yname <- info$Xname
+    if(is.null(Y)) Y <- data[ , Yname, drop=TRUE]
+    ##
+    used.cov.names <- info$used.cov.names
+    has.covar <- info$has.covar
+    if(has.covar) {
+      covariates.hf <- data[, used.cov.names, drop=FALSE]
+      dfvar <- used.cov.names %in% names(datadf)
+    }
+    announce("done.\n")
+  
+    ## Loop through point patterns
+    announce("Looping through rows...")
+    pstate <- list()
+    for(i in 1:npat) {
+      if(verbose) pstate <- progressreport(i, npat, state=pstate)
+      Yi <- Y[[i]]
+      Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window
+      ## assemble relevant covariate images
+      if(!has.covar) { 
+        covariates <- NULL
+      } else {
+        covariates <- covariates.hf[i, , drop=TRUE, strip=FALSE] 
+        if(has.design) {
+          ## Convert each data frame covariate value to an image
+          imrowi <- lapply(covariates[dfvar], as.im, W=Wi)
+          ## Problem: constant covariate leads to singular fit
+          ## --------------- Hack: ---------------------------
+          ##  Construct fake data by resampling from possible values
+          covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible)
+          fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data)
+          ## insert fake data into covariates 
+          covariates[dfvar] <- fake.imrowi
+          ## ------------------ end hack ----------------------------
+        }
+        ## identify factor-valued spatial covariates
+        spatialfactors <- !dfvar & isfactor[names(covariates)]
+        if(any(spatialfactors)) {
+          ## problem: factor levels may be dropped
+          ## more fakery...
+          spfnames <- names(spatialfactors)[spatialfactors]
+          covariates[spatialfactors] <-
+            lapply(levelslist[spfnames],
+                   scramble, W=Wi, Y=Yi$data)
+        }
+      }
+      ## Fit ppm to data for case i only
+      ## using relevant interaction
+      acti <- active[i,]
+      nactive <- sum(acti)
+      if(nactive == 1){
+        interi <- interaction[i, acti, drop=TRUE] 
+        tagi <- names(interaction)[acti]
+        fiti <- PiPiM(Yi, trend, interi, covariates=covariates,
+                      allcovar=has.random,
+                      use.gam=use.gam,
+                      vnamebase=tagi, vnameprefix=tagi)
+      } else {
+        fiti <- PiPiM(Yi, trend, Poisson(), covariates=covariates,
+                      allcovar=has.random,
+                      use.gam=use.gam)
+      }
+      ## fiti determines which coefficients are required
+      coefi.fitted <- fiti$coef
+      coefnames.wanted <- names(coefi.fitted)
+      ## take the required coefficients from the full mppm fit
+      coefs.avail  <- coefs.full[i,]
+      names(coefs.avail) <- coefs.names
+      if(nactive == 1) {
+        ic <- implcoef[[tagi]]
+        coefs.implied <- ic[i, ,drop=TRUE]
+        names(coefs.implied) <- colnames(ic)
+        ## overwrite any existing values of coefficients; add new ones.
+        coefs.avail[names(coefs.implied)] <- coefs.implied
+      }
+      if(!all(coefnames.wanted %in% names(coefs.avail))) 
+        stop("Internal error: some fitted coefficients not accessible")
+      coefi.new <- coefs.avail[coefnames.wanted]
+      ## reset coefficients
+      fiti$coef.orig <- coefi.fitted ## (detected by summary.ppm, predict.ppm)
+      fiti$theta <- fiti$coef <- coefi.new
+      fiti$method <- "mppm"
+      ## ... and replace fake data by true data
+      if(has.design) {
+        for(nam in names(imrowi)) {
+          fiti$covariates[[nam]] <- imrowi[[nam]]
+          fiti$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE]
+        }
+      }
+      ## Adjust rank of glm fit object
+#      fiti$internal$glmfit$rank <- FIT$rank 
+      fiti$internal$glmfit$rank <- sum(is.finite(fiti$coef))
+      ## Fisher information and variance-covariance if known
+      ## Extract submatrices for relevant parameters
+      if(!is.null(fisher)) 
+        fiti$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE]
+      if(!is.null(varcov))
+        fiti$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE]
+      ## store in list
+      results[[i]] <- fiti
+    }
+    announce("done.\n")
+    names(results) <- rownames
+    results <- as.anylist(results)
+    return(results)
+  }
+
+  PiPiM <- function(Y, trend, inter, covariates, ...,
+                    allcovar=FALSE, use.gam=FALSE,
+                    vnamebase=c("Interaction", "Interact."),
+                    vnameprefix=NULL) {
+    # This ensures that the model is fitted in a unique environment
+    # so that it can be updated later.
+    force(Y)
+    force(trend)
+    force(inter)
+    force(covariates)
+    force(allcovar)
+    force(use.gam)
+    force(vnamebase)
+    force(vnameprefix)
+    feet <- ppm(Y, trend, inter, covariates=covariates,
+                allcovar=allcovar, use.gam=use.gam,
+                forcefit=TRUE, vnamebase=vnamebase, vnameprefix=vnameprefix)
+    return(feet)
+  }
+  
+  possible <- function(z) {
+    if(!is.factor(z)) unique(z) else factor(levels(z), levels=levels(z))
+  }
+  
+  scramble <- function(vals, W, Y) {
+    W <- as.mask(W)
+    npixels <- prod(W$dim)
+    nvalues <- length(vals)
+    npts <- npoints(Y)
+    ## sample the possible values randomly at the non-data pixels
+    sampled <- sample(vals, npixels, replace=TRUE)
+    Z <- im(sampled, xcol=W$xcol, yrow=W$yrow)
+    ## repeat the possible values cyclically at the data points
+    if(npts >= 1)
+      Z[Y] <- vals[1 + ((1:npts) %% nvalues)]
+    return(Z)
+  }
+
+  Announce <- function(...) cat(...)
+
+  Ignore <- function(...) { NULL }
+
+  subfits.old
+})
+
+cannot.update <- function(...) {
+  stop("This model cannot be updated")
+}
diff --git a/R/subset.R b/R/subset.R
new file mode 100644
index 0000000..a211a3a
--- /dev/null
+++ b/R/subset.R
@@ -0,0 +1,85 @@
+##
+## subset.R
+##
+## Methods for 'subset'
+##
+##   $Revision: 1.5 $  $Date: 2016/03/01 02:07:34 $
+
+subset.ppp <- function(x, subset, select, drop=FALSE, ...) {
+  stopifnot(is.ppp(x))
+  w <- as.owin(x)
+  y <- as.data.frame(x)
+  r <- if (missing(subset)) {
+    rep_len(TRUE, nrow(y))
+  } else {
+    e <- substitute(subset)
+    r <- eval(e, y, parent.frame())
+    if(!is.logical(r))
+      r <- ppsubset(x, r, "subset", fatal=TRUE)
+    r & !is.na(r)
+  }
+  vars <- if (missing(select)) {
+    TRUE
+  } else {
+    ## create an environment in which column names are mapped to their positions
+    nl <- as.list(seq_along(y))
+    names(nl) <- names(y)
+    if(length(nl) > 3) {
+      ## multiple columns of marks: add the name 'marks'
+      nl <- append(nl, list(marks=3:length(nl)))
+    }
+    eval(substitute(select), nl, parent.frame())
+  }
+  ## ensure columns include coordinates
+  nama <- names(y)
+  names(nama) <- nama
+  vars <- union(c("x", "y"), nama[vars])
+  ## take subset
+  z <- y[r, vars, drop = FALSE]
+  ## reinstate as point pattern
+  out <- as.ppp(z, W=w, check=FALSE)
+  if(drop)
+    out <- out[drop=TRUE]
+  return(out)
+}
+
+subset.pp3 <- subset.lpp <- subset.ppx <- function(x, subset, select, drop=FALSE, ...) {
+  y <- as.data.frame(x)
+  r <- if (missing(subset)) 
+    rep_len(TRUE, nrow(y))
+  else {
+    e <- substitute(subset)
+    r <- eval(e, y, parent.frame())
+    if(!is.logical(r))
+      r <- ppsubset(x, r, "subset", fatal=TRUE)
+    r & !is.na(r)
+  }
+  vars <- if (missing(select)) 
+    TRUE
+  else {
+    ## create an environment in which column names are mapped to their positions
+    nl <- as.list(seq_along(y))
+    names(nl) <- names(y)
+    if(!("marks" %in% names(y)) && any(ismark <- (x$ctype == "mark"))) {
+      ## add the symbol 'marks' 
+      nl <- append(nl, list(marks=which(ismark)))
+    }
+    eval(substitute(select), nl, parent.frame())
+  }
+  ## ensure columns include coordinates 
+  nama <- names(y)
+  names(nama) <- nama
+  vars <- union(names(coords(x)), nama[vars])
+  ## take subset
+  z <- y[r, vars, drop = FALSE]
+  ## reinstate as point pattern
+  ctype <- as.character(x$ctype)[match(vars, nama)]
+  out <- ppx(z, domain=x$domain, coord.type=ctype)
+  ## drop unused factor levels
+  if(drop)
+    out <- out[drop=TRUE]
+  ## reinstate class
+  class(out) <- class(x)
+  return(out)
+}
+
diff --git a/R/suffstat.R b/R/suffstat.R
new file mode 100755
index 0000000..5a53796
--- /dev/null
+++ b/R/suffstat.R
@@ -0,0 +1,120 @@
+#
+#   suffstat.R
+#
+# calculate sufficient statistic
+#
+#  $Revision: 1.17 $  $Date: 2013/04/25 06:37:43 $
+#
+#
+
+suffstat <- function(model, X=data.ppm(model)) {
+  cl <- sys.call()
+  callstring <- short.deparse(cl)
+
+  verifyclass(model, "ppm")
+  if(!missing(X))
+    verifyclass(X, "ppp")
+  else
+    X <- NULL
+
+  inter    <- model$interaction
+
+  func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else 
+          if(!is.null(ssinter  <- inter$suffstat)) ssinter else
+          if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else
+          suffstat.generic
+
+  return(func(model, X, callstring))
+}
+
+suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") {
+  # This should work for an arbitrary ppm
+  # since it uses the fundamental relation between
+  # conditional intensity and likelihood.
+  # But it is computationally intensive.
+
+  verifyclass(model, "ppm")
+  coefnames <- names(coef(model))
+
+  if(is.null(X)) {
+    X <- data.ppm(model)
+    modelX <- model
+  } else {
+    verifyclass(X, "ppp")
+    # refit the model to determine which points are used in pseudolikelihood
+    modelX <- update(model, X, method="mpl")
+  }
+  
+  # find data points which do not contribute to pseudolikelihood
+  mplsubset <- getglmdata(modelX)$.mpl.SUBSET
+  mpldata   <- is.data(quad.ppm(modelX))
+  contribute <- mplsubset[mpldata]
+
+  if(!any(contribute)) 
+    # result is zero vector
+    return(0 * coef(model))
+
+  # Add points one-by-one
+  # If there are points which don't contribute, condition on them
+  use <- which(contribute)   
+  dontuse <- which(!contribute)
+  for(i in seq_along(use)) {
+    prior <- if(i == 1) c() else use[1:(i-1)]
+    prior <- c(dontuse, prior)
+    Xprior <- X[prior]
+    Xcurrent <- X[use[i]]
+    mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat")
+    lastrow <- length(prior) + 1
+    momrow <- mom[lastrow, ]
+    if(i == 1)
+      result <- momrow
+    else
+      result <- momrow + result
+  }
+  names(result) <- coefnames
+  attr(result, "mplsubset") <- NULL
+  return(result)
+}
+
+killinteraction <- function(model) {
+  verifyclass(model, "ppm")
+  ispoisson <- summary(model, quick=TRUE)$poisson
+  if(ispoisson)
+    return(model)
+  # surgery required
+  newmodel <- model
+  newmodel$interaction <- NULL
+  if(!is.null(Vnames <- model$internal$Vnames)) {
+    matches <- names(model$coef) %in% Vnames
+    newmodel$coef <- model$coef[!matches]
+    newmodel$internal$Vnames <- NULL
+  }
+  # the other 'internal' stuff may still be wrong (or `preserved')
+  return(newmodel)
+}
+
+suffstat.poisson <- function(model, X, callstring="suffstat.poisson") {
+  verifyclass(model, "ppm")
+  if(is.null(X))
+    X <- data.ppm(model)
+  else 
+    verifyclass(X, "ppp")
+  
+  if(!is.poisson(model))
+    stop("Model is not a Poisson process")
+
+  Empty <- X[numeric(0)]
+  mom <- partialModelMatrix(X, Empty, model, "suffstat")
+
+  nmom <- ncol(mom)
+  ncoef <- length(coef(model))
+  if(nmom != ncoef)
+    stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model")
+  
+  if(nmom > 1 && any(colnames(mom) != names(coef(model))))
+    warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model")
+     
+  o1sum   <- apply(mom, 2, sum)
+  return(o1sum)
+}
+
diff --git a/R/summary.im.R b/R/summary.im.R
new file mode 100755
index 0000000..25aa0e7
--- /dev/null
+++ b/R/summary.im.R
@@ -0,0 +1,149 @@
+#
+#    summary.im.R
+#
+#    summary() method for class "im"
+#
+#    $Revision: 1.21 $   $Date: 2016/09/01 02:31:52 $
+#
+#    summary.im()
+#    print.summary.im()
+#    print.im()
+#
+summary.im <- function(object, ...) {
+  verifyclass(object, "im")
+
+  x <- object
+
+  y <- unclass(x)[c("dim", "xstep", "ystep")]
+  pixelarea <- y$xstep * y$ystep
+
+  # extract image values
+  v <- x$v
+  inside <- !is.na(v)
+  v <- v[inside]
+
+  # type of values?
+  y$type <- x$type
+  
+  # factor-valued?
+  lev <- levels(x)
+  if(!is.null(lev) && !is.factor(v))
+    v <- factor(v, levels=seq_along(lev), labels=lev)
+
+  switch(x$type,
+         integer=,
+         real={
+           y$mean <- mv <- mean(v)
+           y$integral <- mv * length(v) * pixelarea
+           y$range <- ra <- range(v)
+           y$min <- ra[1]  
+           y$max <- ra[2]
+         },
+         factor={
+           y$levels <- lev
+           y$table <- table(v, dnn="")
+         },
+         complex={
+           y$mean <- mv <- mean(v)
+           y$integral <- mv * length(v) * pixelarea
+           rr <- range(Re(v))
+           y$Re <- list(range=rr, min=rr[1], max=rr[2])
+           ri <- range(Im(v))
+           y$Im <- list(range=ri, min=ri[1], max=ri[2])
+         },
+         {
+           # another unknown type
+           pixelvalues <- v
+           y$summary <- summary(pixelvalues)
+         })
+    
+  # summarise pixel raster
+  win <- as.owin(x)
+  y$window <- summary.owin(win)
+
+  y$fullgrid <- (rescue.rectangle(win)$type == "rectangle")
+
+  y$units <- unitname(x)
+  
+  class(y) <- "summary.im"
+  return(y)
+}
+
+print.summary.im <- function(x, ...) {
+  verifyclass(x, "summary.im")
+  splat(paste0(x$type, "-valued"), "pixel image")
+  unitinfo <- summary(x$units)
+  pluralunits <- unitinfo$plural
+  sigdig <- getOption('digits')
+  di <- x$dim
+  win <- x$window
+  splat(di[1], "x", di[2], "pixel array (ny, nx)")
+  splat("enclosing rectangle:",
+        prange(signif(x$window$xrange, sigdig)),
+        "x",
+        prange(signif(x$window$yrange, sigdig)),
+        unitinfo$plural,
+        unitinfo$explain)
+  splat("dimensions of each pixel:",
+        signif(x$xstep, 3), "x", signif(x$ystep, sigdig),
+        pluralunits)
+  if(!is.null(explain <- unitinfo$explain))
+    splat(explain)
+  fullgrid <- x$fullgrid
+  if(fullgrid) {
+    splat("Image is defined on the full rectangular grid")
+    whatpart <- "Frame"
+  } else {
+    splat("Image is defined on a subset of the rectangular grid")
+    whatpart <- "Subset"
+  }
+  splat(whatpart, "area =", win$area, "square", pluralunits)
+  if(!fullgrid) {
+    af <- signif(win$areafraction, min(3, sigdig))
+    splat(whatpart, "area fraction =", af)
+  }
+  if(fullgrid) splat("Pixel values") else
+                 splat("Pixel values (inside window):")
+  switch(x$type,
+         integer=,
+         real={
+           splat("\trange =", prange(signif(x$range, sigdig)))
+           splat("\tintegral =", signif(x$integral, sigdig))
+           splat("\tmean =", signif(x$mean, sigdig))
+         },
+         factor={
+           print(x$table)
+         },
+         complex={
+           splat("\trange: Real",
+                 prange(signif(x$Re$range, sigdig)),
+                 "Imaginary",
+                 prange(signif(x$Im$range, sigdig)))
+           splat("\tintegral =", signif(x$integral, sigdig))
+           splat("\tmean =", signif(x$mean, sigdig))
+         },
+         {
+           print(x$summary)
+         })
+
+  return(invisible(NULL))
+}
+
+print.im <- function(x, ...) {
+  splat(paste0(x$type, "-valued"), "pixel image")
+  if(x$type == "factor") {
+    splat("factor levels:")
+    print(levels(x))
+  }
+  sigdig <- min(5, getOption('digits'))
+  unitinfo <- summary(unitname(x))
+  di <- x$dim
+  splat(di[1], "x", di[2], "pixel array (ny, nx)")
+  splat("enclosing rectangle:",
+        prange(signif(zapsmall(x$xrange), sigdig)),
+        "x",
+        prange(signif(zapsmall(x$yrange), sigdig)),
+        unitinfo$plural,
+        unitinfo$explain)
+  return(invisible(NULL))
+}
diff --git a/R/summary.kppm.R b/R/summary.kppm.R
new file mode 100644
index 0000000..86fe2d8
--- /dev/null
+++ b/R/summary.kppm.R
@@ -0,0 +1,143 @@
+#'
+#'       summary.kppm.R
+#'
+#'   $Revision: 1.5 $  $Date: 2015/05/08 04:25:23 $
+#' 
+
+summary.kppm <- function(object, ..., quick=FALSE) {
+  nama <- names(object)
+  result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))]
+  ## handle old format
+  if(is.null(result$isPCP)) result$isPCP <- TRUE
+  ## summarise trend component
+  result$trend <- summary(as.ppm(object), ..., quick=quick)
+  if(identical(quick, FALSE)) {
+    theta <- coef(object)
+    if(length(theta) > 0) {
+      vc <- vcov(object, matrix.action="warn")
+      if(!is.null(vc)) {
+        se <- if(is.matrix(vc)) sqrt(diag(vc)) else
+        if(length(vc) == 1) sqrt(vc) else NULL
+      }
+      if(!is.null(se)) {
+        two <- qnorm(0.975)
+        lo <- theta - two * se
+        hi <- theta + two * se
+        zval <- theta/se
+        pval <- 2 * pnorm(abs(zval), lower.tail=FALSE)
+        psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1),
+                    labels=c("***", "**", "*", "  "),
+                    include.lowest=TRUE)
+        ## table of coefficient estimates with SE and 95% CI
+        result$coefs.SE.CI <- data.frame(Estimate=theta, S.E.=se,
+                                         CI95.lo=lo, CI95.hi=hi,
+                                         Ztest=psig,
+                                         Zval=zval)
+      }
+    }
+  }
+  class(result) <- "summary.kppm"
+  return(result)
+}
+
+coef.summary.kppm <- function(object, ...) {
+  return(object$coefs.SE.CI)
+}
+
+print.summary.kppm <- function(x, ...) {
+  terselevel <- spatstat.options('terse')
+  digits <- getOption('digits')
+  isPCP <- x$isPCP
+  splat(if(x$stationary) "Stationary" else "Inhomogeneous",
+        if(isPCP) "cluster" else "Cox",
+        "point process model")
+
+  if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20)
+    splat("Fitted to point pattern dataset", sQuote(x$Xname))
+
+  if(waxlyrical('gory', terselevel)) {
+    switch(x$Fit$method,
+           mincon = {
+             splat("Fitted by minimum contrast")
+             splat("\tSummary statistic:", x$Fit$StatName)
+           },
+           clik  =,
+           clik2 = {
+             splat("Fitted by maximum second order composite likelihood")
+             splat("\trmax =", x$Fit$rmax)
+             if(!is.null(wtf <- x$Fit$weightfun)) {
+               cat("\tweight function: ")
+               print(wtf)
+             }
+           },
+           palm = {
+             splat("Fitted by maximum Palm likelihood")
+             splat("\trmax =", x$Fit$rmax)
+             if(!is.null(wtf <- x$Fit$weightfun)) {
+               cat("\tweight function: ")
+               print(wtf)
+             }
+           },
+           warning(paste("Unrecognised fitting method", sQuote(x$Fit$method)))
+           )
+  }
+
+  # ............... trend .........................
+
+  parbreak()
+  splat("----------- TREND MODEL -----")
+  print(x$trend, ...)
+
+  # ..................... clusters ................
+
+  tableentry <- spatstatClusterModelInfo(x$clusters)
+  
+  parbreak()
+  splat("-----------", 
+        if(isPCP) "CLUSTER" else "COX",
+        "MODEL",
+        "-----------")
+  splat("Model:", tableentry$printmodelname(x))
+  parbreak()
+  
+  cm <- x$covmodel
+  if(!isPCP) {
+    # Covariance model - LGCP only
+    splat("\tCovariance model:", cm$model)
+    margs <- cm$margs
+    if(!is.null(margs)) {
+      nama <- names(margs)
+      tags <- ifelse(nzchar(nama), paste(nama, "="), "")
+      tagvalue <- paste(tags, margs)
+      splat("\tCovariance parameters:",
+            paste(tagvalue, collapse=", "))
+    }
+  }
+  pa <- x$clustpar
+  if (!is.null(pa)) {
+    splat("Fitted",
+          if(isPCP) "cluster" else "covariance",
+          "parameters:")
+    print(pa, digits=digits)
+  }
+
+  if(!is.null(mu <- x$mu)) {
+    if(isPCP) {
+      splat("Mean cluster size: ",
+            if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]")
+    } else {
+      splat("Fitted mean of log of random intensity:",
+            if(!is.im(mu)) signif(mu, digits) else "[pixel image]")
+    }
+  }
+  # table of coefficient estimates with SE and 95% CI
+  if(!is.null(cose <- x$coefs.SE.CI)) {
+    parbreak()
+    splat("Final standard error and CI")
+    splat("(allowing for correlation of",
+          if(isPCP) "cluster" else "Cox",
+          "process):")
+    print(cose)
+  }
+  invisible(NULL)
+}
diff --git a/R/summary.mppm.R b/R/summary.mppm.R
new file mode 100755
index 0000000..8dd5569
--- /dev/null
+++ b/R/summary.mppm.R
@@ -0,0 +1,258 @@
+#
+# summary.mppm.R
+#
+# $Revision: 1.15 $  $Date: 2016/04/25 02:34:40 $
+#
+
+
+summary.mppm <- function(object, ..., brief=FALSE) {
+  # y will be the summary 
+  y <- object[c("Call", "Info", "Inter", "trend", "iformula",
+#%^!ifdef RANDOMEFFECTS                
+                "random",
+#%^!endif                
+                "npat", "maxlogpl")]
+  y$brief <- brief
+
+  Info  <- object$Info
+  Inter <- object$Inter
+  FIT   <- object$Fit$FIT
+  moadf <- object$Fit$moadf
+
+  y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist")]
+  y$Fit$FIT <- summary(FIT)
+  y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf))
+  
+  ninteract    <- Inter$ninteract
+  interaction  <- Inter$interaction
+  iused        <- Inter$iused
+  itags        <- Inter$itags
+  processnames <- Inter$processes
+  constant     <- Inter$constant
+  trivial      <- Inter$trivial
+
+  npat      <- y$npat
+  iformula  <- y$iformula
+#%^!ifdef RANDOMEFFECTS  
+  random    <- y$random
+#%^!endif  
+  Vnamelist <- y$Fit$Vnamelist
+  allVnames <- unlist(Vnamelist)
+  poistags  <- itags[trivial]
+
+#  rownames  <- y$Info$rownames
+  
+  switch(y$Fit$fitter,
+#%^!ifdef RANDOMEFFECTS         
+         glmmPQL={
+           y$coef <- co <- fixed.effects(FIT)
+           systematic <- !(names(co) %in% c(allVnames, poistags))
+           y$coef.syst <- co[systematic]
+           y$coef.rand <- random.effects(FIT)
+         },
+#%^!endif         
+         gam=,
+         glm={
+           y$coef <- co <- coef(FIT)
+           systematic <- !(names(co) %in% c(allVnames, poistags))
+           y$coef.syst <- co[systematic]
+         })
+
+  # model depends on covariates
+  y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0)
+
+#%^!ifdef RANDOMEFFECTS  
+  # random effects
+  y$ranef <- if(Info$has.random) summary(FIT$modelStruct) else NULL
+#%^!endif  
+
+  ### Interactions 
+  # model is Poisson 
+  y$poisson <- all(trivial[iused])
+  # Determine how complicated the interactions are:
+#%^!ifdef RANDOMEFFECTS  
+  # (0) are there random effects involving the interactions
+  randominteractions <-
+    !is.null(random) && any(variablesinformula(random) %in% itags)
+#%^!endif  
+  # (1) is the interaction formula of the form ~ tag + tag + ... + tag
+  isimple  <- identical(sort(variablesinformula(iformula)),
+                        sort(termsinformula(iformula)))
+  # (2) is it of the form ~tag 
+  trivialformula <- (isimple && ninteract == 1)
+  # (3) is it of the form ~tag where the interaction is the same in each row
+#%^!ifdef RANDOMEFFECTS
+  fixedinteraction <- (trivialformula && constant && !randominteractions)
+#%^!else
+#  fixedinteraction <- trivialformula && constant
+#%^!endif  
+  
+  ### Determine printing of interactions, accordingly ###
+  iprint <- list()
+#%^!ifdef RANDOMEFFECTS  
+  if(randominteractions) {
+    toohard <- TRUE
+    printeachrow <- FALSE
+  } else 
+#%^!endif  
+  if(fixedinteraction) {    
+    # exactly the same interaction for all patterns
+    interaction <- interaction[1,1,drop=TRUE]
+    fi.all <- fii(interaction, co, Vnamelist[[1]]) 
+    iprint <- list("Interaction for all patterns"=fi.all)
+    printeachrow <- FALSE
+    toohard      <- FALSE
+  } else if(trivialformula) {
+    # same type of process for all patterns
+    pname <-  unlist(processnames)[iused]
+    iprint <- list("Interaction for each pattern" = pname)
+    printeachrow <- TRUE
+    toohard      <- FALSE
+  } else if(isimple && all(constant)) {
+    # several interactions involved, each of which is the same for all patterns
+    iprint <- list("Interaction formula"=iformula,
+                   "Interactions defined for each pattern"=NULL)
+    for(j in (1:ninteract)[iused]) {
+      name.j <- paste("Interaction", sQuote(itags[j]))
+      int.j <- Inter$interaction[1,j,drop=TRUE]
+      Vnames.j <- Vnamelist[[j]]
+      fii.j <- fii(int.j, co, Vnames.j)
+      extra.j <- list(fii.j)
+      names(extra.j) <- name.j
+      iprint <- append(iprint, extra.j)
+    }
+    printeachrow <- FALSE
+    toohard      <- FALSE
+  } else {
+    # general case
+    # determine which interaction(s) are active on each row
+    active <- active.interactions(object)
+    if(ninteract > 1 || !all(active)) 
+      iprint <- list("Active interactions"=active)
+    printeachrow <- TRUE
+    toohard <- any(rowSums(active) > 1)
+  }
+
+  y$ikind <- list(
+#%^!ifdef RANDOMEFFECTS                  
+                  randominteractions=randominteractions,
+#%^!endif                  
+                  isimple           =isimple,
+                  trivialformula    =trivialformula,
+                  fixedinteraction  =fixedinteraction,
+                  toohard           =toohard,
+                  printeachrow      =printeachrow)
+
+  if(toohard)
+    iprint <- append(iprint,
+                     list("(Sorry, cannot interpret fitted interactions)"))
+  else if(printeachrow) {
+    subs <- subfits(object, what="interactions")
+    names(subs) <- paste("Interaction", 1:npat)
+    iprint <- append(iprint, subs)
+  }
+
+  y$iprint <- iprint
+
+  class(y) <- c("summary.mppm", class(list))
+  return(y)
+}
+
+
+print.summary.mppm <- function(x, ..., brief=x$brief) {
+  # NB: x is an object of class "summary.mppm"
+  npat <- x$npat
+#  Inter <- x$Inter
+#  ninteract   <- Inter$ninteract
+#  interaction   <- Inter$interaction
+#  iused     <- Inter$iused
+#  constant <- Inter$constant
+#  iformula <- x$iformula
+#  processnames   <- Inter$processes
+#  itags   <- Inter$itags
+#  trivial  <- Inter$trivial
+#%^!ifdef RANDOMEFFECTS  
+#  random   <- x$random
+#%^!endif  
+
+  FIT <- x$Fit$FIT
+#  Vnamelist <- x$Fit$Vnamelist
+  
+#  allVnames <- unlist(Vnamelist)
+#  poistags <- itags[trivial]
+
+  terselevel <- spatstat.options("terse")
+#  rownames <- x$Info$rownames
+
+  splat("Point process model fitted to", npat, "point patterns")
+  if(waxlyrical('gory', terselevel))
+    splat("Call:", x$Call$callstring)
+  splat("Log trend formula:", pasteFormula(x$trend))
+  switch(x$Fit$fitter,
+#%^!ifdef RANDOMEFFECTS         
+         glmmPQL={
+           cat("Fixed effects:\n")
+           print(x$coef.syst)
+           cat("Random effects:\n")
+           print(x$coef.rand)
+           co <- fixed.effects(FIT)
+         },
+#%^!endif         
+         gam=,
+         glm={
+           cat("Fitted trend coefficients:\n")
+           print(x$coef.syst)
+           co <- coef(FIT)
+         })
+
+  if(!brief && waxlyrical('extras', terselevel)) {
+    cat("All fitted coefficients:\n")
+    print(co)
+  }
+    
+  parbreak(terselevel)
+
+#%^!ifdef RANDOMEFFECTS  
+  if(!is.null(x$ranef)) {
+    splat("Random effects summary:")
+    print(x$ranef)
+    parbreak(terselevel)
+  }
+#%^!endif
+
+  ### Print interaction information ###
+  if(waxlyrical('extras', terselevel)) {
+    iprint <- x$iprint 
+    nama <- names(iprint) %orifnull% rep("", length(iprint))
+    for(i in seq_along(iprint)) {
+      nami <- nama[i]
+      vali <- iprint[[i]]
+      if(brief && is.matrix(vali))
+        vali <- paren(paste(nrow(vali), "x", ncol(vali), "matrix"))
+      if(nami != "") {
+        inline <- inherits(vali, "formula") ||
+                  is.character(vali) ||
+                  (brief && inherits(vali, "fii"))
+        if(inline) cat(paste0(nami, ":\t")) else splat(paste0(nami, ":"))
+      }
+      if(!is.null(vali)) {
+        if(inherits(vali, "fii")) {
+          print(vali, tiny=brief)
+        } else if(is.character(vali)) {
+          splat(vali)
+        } else {
+          print(vali)
+        } 
+      }
+      parbreak(terselevel)
+    }
+  }
+
+  if(!brief && waxlyrical('gory', terselevel)) {
+    splat("--- Gory details: ---")
+    splat("Combined data frame has", x$Fit$moadf$nrow, "rows")
+    print(FIT)
+  }
+  invisible(NULL)
+}
+
diff --git a/R/summary.ppm.R b/R/summary.ppm.R
new file mode 100755
index 0000000..14dfdb7
--- /dev/null
+++ b/R/summary.ppm.R
@@ -0,0 +1,586 @@
+#
+#    summary.ppm.R
+#
+#    summary() method for class "ppm"
+#
+#    $Revision: 1.77 $   $Date: 2016/12/30 01:44:07 $
+#
+#    summary.ppm()
+#    print.summary.ppm()
+#
+
+summary.ppm <- local({
+  
+  covtype <- function(x) {
+    if(is.im(x)) "im" else
+    if(is.function(x)) "function" else
+    if(is.owin(x)) "owin" else
+    if(is.numeric(x) && length(x) == 1) "number" else
+    if(is.factor(x)) "factor" else
+    if(is.integer(x)) "integer" else
+    if(is.numeric(x)) "numeric" else storage.mode(x)
+  }
+  
+  xargs <- function(f) {
+    ar <- names(formals(f))[-(1:2)]
+    return(ar[ar != "..."])
+  }
+
+  summary.ppm <- function(object, ..., quick=FALSE, fine=FALSE) {
+    verifyclass(object, "ppm")
+
+    x <- object
+    y <- list()
+    class(y) <- "summary.ppm"
+
+    #######  Extract main data components #########################
+
+    QUAD <- object$Q
+    DATA <- QUAD$data
+    TREND <- x$trend
+
+    INTERACT <- x$interaction
+    if(is.null(INTERACT)) INTERACT <- Poisson()
+  
+    #######  Check version #########################
+    
+    mpl.ver <- versionstring.ppm(object)
+    int.ver <- versionstring.interact(INTERACT)
+    current <- versionstring.spatstat()
+
+    virgin <- min(package_version(c(mpl.ver, int.ver)))
+    
+    y$antiquated <- antiquated <- (virgin <= package_version("1.5"))
+    y$old        <- (virgin < majorminorversion(current))
+
+    y$version    <- as.character(virgin)
+    
+    ####### Determine type of model ############################
+  
+    y$entries <- list()
+    y$no.trend <- identical.formulae(TREND, NULL) ||
+                  identical.formulae(TREND, ~1)
+    y$trendvar <- trendvar <- variablesinformula(TREND)
+    y$stationary <- y$no.trend || all(trendvar == "marks")
+
+    y$poisson <- is.poisson.interact(INTERACT)
+
+    y$marked <- is.marked.ppp(DATA)
+    y$multitype <- is.multitype.ppp(DATA)
+    y$marktype <- if(y$multitype) "multitype" else
+                  if(y$marked) "marked" else "unmarked"
+
+    if(y$marked) y$entries$marks <- marks(DATA)
+
+    y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ",
+                    if(y$poisson) {
+                      if(y$multitype) "multitype "
+                      else if(y$marked) "marked "
+                      else ""
+                    },
+                    INTERACT$name,
+                    sep="")
+
+    ######  Fitting algorithm ########################################
+
+    y$method <- x$method
+
+    y$VB <- x$internal$VB
+    
+    y$problems <- x$problems
+
+    y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown"
+    if(y$fitter %in% c("glm", "gam"))
+      y$converged <- x$internal$glmfit$converged
+
+    ######  Coefficients were changed after fit? #####################
+  
+    y$projected <- yproj <- identical(x$projected, TRUE)
+    y$changedcoef <- yproj || !is.null(x$coef.orig)
+
+    y$valid <- valid.ppm(x, warn=FALSE)
+      
+    ######  Extract fitted model coefficients #########################
+
+    y$entries$coef <- COEFS <- x$coef
+    y$coef.orig <- x$coef.orig
+
+    y$entries$Vnames <- Vnames <- x$internal$Vnames
+    y$entries$IsOffset <- x$internal$IsOffset
+
+    ###### Extract fitted interaction and summarise  #################
+  
+    FITIN <- fitin(x)
+    y$interaction <- summary(FITIN)
+
+    # Exit here if quick=TRUE
+    
+    if(identical(quick, TRUE)) 
+      return(y)
+
+    ######  Does it have external covariates?  ####################
+
+    # defaults
+    y <- append(y,
+                list(has.covars    = FALSE,
+                     covnames      = character(0),
+                     covars.used   = character(0),
+                     uses.covars   = FALSE,
+                     covars.are.df = FALSE,
+                     expandable    = TRUE,
+                     covar.type    = character(0),
+                     covar.descrip = character(0),
+                     has.funcs     = FALSE,
+                     covfunargs    = NULL,
+                     has.xargs     = FALSE,
+                     xargmap       = NULL))
+    class(y) <- "summary.ppm"
+
+    if(!antiquated) {
+      covars <- x$covariates
+      y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0)
+      if(hc) {
+        y$covnames <- names(covars)
+        used <- (y$trendvar %in% names(covars))
+        y$covars.used <- y$trendvar[used]
+        y$uses.covars <- any(used)
+        y$covars.are.df <- is.data.frame(covars)
+        # describe covariates
+        ctype <- unlist(lapply(covars, covtype))
+        y$expandable <- all(ctype[used] %in%c("function", "number"))
+        names(ctype) <- names(covars)
+        y$covar.type <- ctype
+        y$covar.descrip <- ctype
+        # are there any functions?
+        y$has.funcs <- any(isfun <- (ctype == "function"))
+        # do covariates depend on additional arguments?
+        if(y$has.funcs) {
+          y$covfunargs <- x$covfunargs
+          y$cfafitter <- attr(x$covfunargs, "fitter")
+          funs <- covars[isfun]
+          fdescrip <- function(f) {
+            if(inherits(f, "distfun")) return("distfun")
+            alist <- paste(names(formals(f)), collapse=", ")
+            paste("function(", alist, ")", sep="")
+          }
+          y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip))
+          # find any extra arguments (after args 1 & 2) explicitly named
+          fargs <- lapply(funs, xargs)
+          nxargs <- lengths(fargs)
+          y$has.xargs <- any(nxargs > 0)
+          if(y$has.xargs) {
+            # identify which function arguments are fixed in the call
+            fmap <- data.frame(Covariate=rep.int(names(funs), nxargs),
+                               Argument=unlist(fargs))
+            fmap$Given <- (fmap$Argument %in% names(y$covfunargs))
+            y$xargmap <- fmap
+          }
+        }
+      } 
+    } else {
+      # Antiquated format
+      # Interpret the function call instead
+      callexpr <- parse(text=x$call)
+      callargs <- names(as.list(callexpr[[1]]))
+      # Data frame of covariates was called 'data' in versions up to 1.4-x
+      y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs))
+      # conservative guess
+      y$uses.covars <- y$has.covars
+      y$covfunargs <- NULL
+    }
+    
+    ######  Arguments in call ####################################
+  
+    y$args <- x[c("call", "correction", "rbord")]
+  
+    #######  Main data components #########################
+
+    y$entries <- append(list(quad=QUAD,
+                             data=DATA,
+                             interaction=INTERACT),
+                        y$entries)
+
+    if(is.character(quick) && (quick == "entries"))
+      return(y)
+  
+    ####### Summarise data ############################
+
+    y$data <- summary(DATA, checkdup=FALSE)
+    y$quad <- summary(QUAD, checkdup=FALSE)
+
+    if(is.character(quick) && (quick == "no prediction"))
+      return(y)
+  
+    ######  Trend component #########################
+
+    y$trend <- list()
+
+    y$trend$name <- if(y$poisson) "Intensity" else "Trend"
+
+    y$trend$formula <- if(y$no.trend) NULL else TREND
+
+    if(y$poisson && y$no.trend) {
+      # uniform Poisson process
+      y$trend$value <- exp(COEFS[[1]])
+      y$trend$label <- switch(y$marktype,
+                              unmarked="Uniform intensity",
+                              multitype="Uniform intensity for each mark level",
+                              marked="Uniform intensity in product space",
+                              "")
+    } else if(y$stationary) {
+      # stationary
+      switch(y$marktype,
+             unmarked={
+               # stationary non-poisson non-marked
+               y$trend$label <- "First order term"
+               y$trend$value <- c(beta=exp(COEFS[[1]]))
+             },
+             multitype={
+               # stationary, multitype
+               mrk <- marks(DATA)
+               y$trend$label <-
+                 if(y$poisson) "Intensities" else "First order terms"
+               # Use predict.ppm to evaluate the fitted intensities
+               lev <- factor(levels(mrk), levels=levels(mrk))
+               nlev <- length(lev)
+               marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev)
+               betas <- predict(x, locations=marx, type="trend")
+               names(betas) <- paste("beta_", as.character(lev), sep="")
+               y$trend$value <- betas
+             },
+             marked={
+               # stationary, marked
+               y$trend$label <- "Fitted intensity coefficients"
+               y$trend$value <- blankcoefnames(COEFS)
+             })
+    } else {
+      # not stationary 
+      # extract trend terms without trying to understand them much
+      if(is.null(Vnames)) 
+        trendbits <- COEFS
+      else {
+        agree <- outer(names(COEFS), Vnames, "==")
+        whichbits <- matrowall(!agree)
+        trendbits <- COEFS[whichbits]
+      }
+      y$trend$label <- ngettext(length(trendbits),
+                                "Fitted trend coefficient",
+                                "Fitted trend coefficients")
+      y$trend$value <- blankcoefnames(trendbits)
+    }
+  
+    # ----- parameters with SE --------------------------
+
+    if(is.character(quick) && (quick == "no variances"))
+      return(y)
+
+    # Exit before SE for variational Bayes
+    if(!is.null(x$internal$VB))
+      return(y)
+    
+    if(length(COEFS) > 0) {
+      # compute standard errors
+      se <- x$internal$se
+      if(is.null(se)) {
+        vc <- vcov(x, fine=fine, matrix.action="warn")
+        if(!is.null(vc)) {
+          se <- if(is.matrix(vc)) sqrt(diag(vc)) else
+                if(length(vc) == 1) sqrt(vc) else NULL
+        }
+      }
+      if(!is.null(se)) {
+        two <- qnorm(0.975)
+        lo <- COEFS - two * se
+        hi <- COEFS + two * se
+        zval <- COEFS/se
+        pval <- 2 * pnorm(abs(zval), lower.tail=FALSE)
+        psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1),
+                    labels=c("***", "**", "*", "  "),
+                    include.lowest=TRUE)
+        # table of coefficient estimates with SE and 95% CI
+        y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se,
+                                    CI95.lo=lo, CI95.hi=hi,
+                                    Ztest=psig,
+                                    Zval=zval)
+      }
+    }
+  
+    return(y)
+  }
+  
+  summary.ppm
+})
+
+
+coef.summary.ppm <- function(object, ...) {
+  object$coefs.SE.CI
+}
+
+print.summary.ppm <- function(x, ...) {
+
+  if(x$old)
+    warning("Model was fitted by an older version of spatstat")
+  
+  if(is.null(x$args)) {
+    # this is the quick version
+    splat(x$name)
+    return(invisible(NULL))
+  }
+
+  # otherwise - full details
+  splat("Point process model")
+  fitter <- if(!is.null(x$fitter)) x$fitter else "unknown"
+  methodchosen <-
+    if(is.null(x$method))
+      "unspecified method"
+    else if(fitter == "exact") "maximum likelihood" else 
+      switch(x$method,
+             mpl={
+               if(x$poisson) {
+                 # Poisson process 
+                "maximum likelihood (Berman-Turner approximation)"
+               } else {
+                 "maximum pseudolikelihood (Berman-Turner approximation)"
+               } 
+             },
+             logi={
+               if(is.null(x$VB)){
+                 if(x$poisson) {
+                   # Poisson process
+                   "maximum likelihood (logistic regression approximation)"
+                 } else {
+                   "maximum pseudolikelihood (logistic regression approximation)"
+                 }
+               } else {
+                 "maximum posterior density (variational Bayes approximation)"
+               }
+             },
+             ho="Huang-Ogata method (approximate maximum likelihood)",
+             paste("unrecognised method", sQuote(x$method)))
+  splat("Fitting method:", methodchosen)
+  howfitted <- switch(fitter,
+                      exact= "analytically",
+                      gam  = "using gam()",
+                      glm  = "using glm()",
+                      ho   = NULL,
+                      paste("using unrecognised fitter", sQuote(fitter)))
+  if(!is.null(howfitted)) splat("Model was fitted", howfitted)
+  if(fitter %in% c("glm", "gam")) {
+    if(x$converged) splat("Algorithm converged")
+    else splat("*** Algorithm did not converge ***")
+  }
+  if(x$projected)
+    splat("Fit was projected to obtain a valid point process model")
+
+  cat("Call:\n")
+  print(x$args$call)
+
+  if(x$old) 
+    splat("** Executed by old spatstat version", x$version, " **")
+  
+  splat("Edge correction:", dQuote(x$args$correction))
+  if(x$args$correction == "border")
+    splat("\t[border correction distance r =", x$args$rbord,"]")
+
+  # print summary of quadrature scheme
+  if(is.null(x$quad))
+    return(invisible(NULL))
+  ruletextline()
+  print(x$quad)
+
+
+  ## start printing trend information
+  if(is.null(x$no.trend))
+    return(invisible(NULL))
+
+  ruletextline()
+  splat("FITTED MODEL:")
+  parbreak()
+
+  # This bit is currently identical to print.ppm()
+  # except for a bit more fanfare
+  # and the inclusion of the 'gory details' bit
+  
+  notrend <-    x$no.trend
+#  stationary <- x$stationary
+  poisson <-    x$poisson
+  markeddata <- x$marked
+  multitype  <- x$multitype
+        
+#  markedpoisson <- poisson && markeddata
+
+  # ----------- Print model type -------------------
+        
+  cat(x$name)
+  cat("\n")
+
+  if(markeddata) mrk <- x$entries$marks
+  if(multitype) {
+    splat("Possible marks:")
+    cat(paste(levels(mrk)))
+  }
+
+  # ----- trend --------------------------
+
+  if(length(x$trend) == 0)
+    return(invisible(NULL))
+  
+  parbreak()
+  splat(paste0("---- ", x$trend$name, ": ----"))
+  parbreak()
+
+  if(!notrend) {
+    splat("Log",
+          if(poisson) "intensity:" else "trend:",
+          pasteFormula(x$trend$formula))
+    if(x$uses.covars) 
+      splat("Model depends on external",
+            ngettext(length(x$covars.used), "covariate", "covariates"),
+            commasep(sQuote(x$covars.used)))
+  }
+  if(x$has.covars) {
+    if(notrend || !x$uses.covars)
+      splat("Model object contains external covariates")
+    isdf <- identical(x$covars.are.df, TRUE)
+    if(!is.null(cd <- x$covar.descrip)) {
+      # print description of each covariate
+      splat(paste0("Covariates provided",
+                   if(isdf) " (in data frame)" else NULL,
+                   ":"))
+      namescd <- names(cd)
+      for(i in seq_along(cd))
+        splat(paste0("\t", namescd[i], ": ", cd[i]))
+    }
+    if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) {
+      splat("Covariate function arguments (covfunargs) provided:")
+      namescfa <- names(cfa)
+      for(i in seq_along(cfa)) {
+        cat(paste(namescfa[i], "= "))
+        cfai <- cfa[[i]]
+        if(is.numeric(cfai) && length(cfai) == 1) {
+          cat(paste(cfai, "\n"))
+        } else print(cfa[[i]])
+      }
+    }
+  }
+
+  parbreak()
+  splat(paste0(x$trend$label, ":"))
+  
+  tv <- x$trend$value
+  if(!is.list(tv))
+    print(tv)
+  else 
+    for(i in seq_along(tv))
+      print(tv[[i]])
+
+  # table of coefficient estimates with SE and 95% CI
+  if(!is.null(cose <- x$coefs.SE.CI)) {
+    cat("\n")
+    print(cose)
+  }
+  
+  # ---- Interaction ----------------------------
+
+  
+  if(!poisson) {
+    parbreak()
+    splat(" ---- Interaction: -----")
+    parbreak()
+    print(x$interaction)
+  }
+
+  ####### Gory details ###################################
+  parbreak()
+  splat("----------- gory details -----")
+  parbreak()
+  COEFS <- x$entries$coef
+  
+  splat("Fitted regular parameters (theta):")
+  print(COEFS)
+
+  parbreak()
+  splat("Fitted exp(theta):")
+  print(exp(unlist(COEFS)))
+
+  ##### Warnings issued #######
+
+  probs <- x$problems
+  if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) 
+    lapply(probs,
+           function(a) {
+             if(is.list(a) && !is.null(p <- a$print))
+               cat(paste("Problem:\n", p, "\n\n"))
+           })
+
+  vali <- x$valid
+  if(identical(vali, FALSE) && waxlyrical("errors")) {
+    parbreak()
+    splat("*** Model is not valid ***")
+    if(!all(is.finite(x$entries$coef))) {
+      splat("*** Some coefficients are NA or Inf ***")
+    } else {
+      splat("*** Interaction parameters are outside valid range ***")
+    }
+  } else if(is.na(vali) && waxlyrical("extras")) {
+    parbreak()
+    splat("[Validity of model could not be checked]")
+  }
+  
+  return(invisible(NULL))
+}
+
+no.trend.ppm <- function(x) {
+  summary.ppm(x, quick=TRUE)$no.trend
+}
+
+is.stationary <- function(x) {
+  UseMethod("is.stationary")
+}
+
+is.poisson <- function(x) {
+  UseMethod("is.poisson")
+}
+
+is.stationary.ppm <- function(x) {
+  TREND <- x$trend
+  if(is.null(TREND) || identical.formulae(TREND, ~1))
+    return(TRUE)
+  if(all(variablesinformula(TREND) == "marks"))
+    return(TRUE)
+  return(FALSE)
+}
+
+is.poisson.ppm <- function(x) {
+  stopifnot(is.ppm(x))
+  y <- x$interaction
+  if(is.null(y)) y <- Poisson()
+  is.poisson.interact(y)
+}
+
+is.marked.ppm <- function(X, ...) {
+  summary.ppm(X, quick=TRUE)$marked
+}
+
+is.multitype.ppm <- function(X, ...) {
+  summary.ppm(X, quick=TRUE)$multitype
+}
+
+is.expandable.ppm <- function(x) {
+  return(identical(summary(x, quick="entries")$expandable, TRUE))
+}
+
+blankcoefnames <- function(x) {
+  # remove name labels from ppm coefficients
+  # First decide whether there are 'labels within labels'
+  unlabelled <- unlist(lapply(x,
+                              function(z) { is.null(names(z)) } ))
+  if(all(unlabelled))
+    value <- unlist(x)
+  else {
+    value <- list()
+    for(i in seq_along(x))
+      value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]]
+  }
+  return(value)
+} 
diff --git a/R/summary.quad.R b/R/summary.quad.R
new file mode 100755
index 0000000..a177606
--- /dev/null
+++ b/R/summary.quad.R
@@ -0,0 +1,162 @@
+#
+# summary.quad.R
+#
+#  summary() method for class "quad"
+#
+#  $Revision: 1.11 $ $Date: 2016/09/23 07:38:07 $
+#
+
+summary.quad <- local({
+
+  sumriz <- function(ww) {
+    if(length(ww) > 0) 
+      return(list(range=range(ww), sum=sum(ww)))
+    else
+      return(NULL)
+  }
+
+  summary.quad <- function(object, ..., checkdup=FALSE) {
+    verifyclass(object, "quad")
+    X <- object$data
+    D <- object$dummy
+    s <- list(
+      data  = summary.ppp(X, checkdup=checkdup),
+      dummy = summary.ppp(D, checkdup=checkdup),
+      param = object$param)
+    ## make description of dummy point arrangement
+    dpar <- object$param$dummy
+    eps.given <- dpar$orig$eps # could be NULL
+    eps.actual <- NULL
+    if(is.null(dpar)) {
+      descrip <- "(provided manually)"
+    } else if(is.character(dmethod <- dpar$method)) {
+      descrip <- dmethod
+    } else if(identical(dpar$quasi, TRUE)) {
+      descrip <- paste(npoints(D), "quasirandom dummy points",
+                       "plus 4 corner points")
+      eps.actual <- 1/(2 * sqrt(intensity(D)))
+    } else if(!is.null(nd <- dpar$nd)) {
+      nd <- ensure2vector(nd)
+      eps.actual <- unique(sidelengths(Frame(D))/nd)
+      if(identical(dpar$random, TRUE)) {
+        descrip <- paste("systematic random dummy points in",
+                         nd[1], "x", nd[2], "grid",
+                         "plus 4 corner points")
+      } else {
+        descrip <- paste(nd[1], "x", nd[2],
+                         "grid of dummy points, plus 4 corner points")
+      }
+    } else descrip <- "(rule for creating dummy points not understood)"
+    
+    if(!is.null(eps.actual)) {
+      uD <- unitname(D)
+      s$resolution <- numberwithunit(eps.actual, uD)
+      if(!is.null(eps.given)) {
+        descrip2 <- paste("dummy spacing:",
+                          format(eps.given %unit% uD), "requested,", 
+                          format(eps.actual %unit% uD), "actual")
+      } else {
+        descrip2 <- paste("dummy spacing:", format(eps.actual %unit% uD))
+      }
+      descrip <- c(descrip, descrip2)
+    }
+    s$descrip <- descrip
+    
+    w <- object$w
+    Z <- is.data(object)
+    s$w <- list(all   = sumriz(w),
+                data  = sumriz(w[Z]),
+                dummy = sumriz(w[!Z]))
+    class(s) <- "summary.quad"
+    return(s)
+  }
+
+  summary.quad
+})
+
+print.summary.quad <- local({
+
+  summariseweights <- function(ww, blah, dp=3) {
+    cat(paste(blah, ":\n\t", sep=""))
+    if(is.null(ww)) {
+      cat("(None)\n")
+      return()
+    }
+    splat(paste0("range: ",
+              "[",
+              paste(signif(ww$range, digits=dp), collapse=", "),
+              "]\t",
+              "total: ",
+              signif(ww$sum, digits=dp)))
+  }
+
+  print.summary.quad <- function(x, ..., dp=3) {
+    splat("Quadrature scheme = data + dummy + weights")
+    pa <- x$param
+    if(is.null(pa))
+      splat("created by an unknown function.")
+
+    parbreak()
+
+    splat("Data pattern:")
+    print(x$data, dp=dp)
+
+    parbreak()
+
+    splat("Dummy quadrature points:")
+    ## How they were computed
+    splat(x$descrip, indent=5)
+    parbreak()
+    ## What arguments were given
+    if(!is.null(orig <- pa$dummy$orig))
+      splat("Original dummy parameters:",
+            paste0(names(orig), "=", orig, collapse=", "))
+    ## Description of the dummy points
+    print(x$dummy, dp=dp)
+
+    splat("Quadrature weights:")
+    ## How they were computed
+    if(!is.null(pa)) {
+      wpar <- pa$weight
+      if(is.null(wpar))
+        splat("(values provided manually)", indent=5)
+      else if(is.character(wmethod <- wpar$method)) {
+        switch(wmethod,
+               grid = {
+                 splat("(counting weights based on",
+                       wpar$ntile[1], "x", wpar$ntile[2],
+                       "array of rectangular tiles)",
+                       indent=5)
+               },
+               dirichlet = {
+                 splat("(Dirichlet tile areas, computed",
+                       if(wpar$exact) "exactly)" else "by pixel approximation)",
+                       indent=5)
+               },
+               splat(wmethod, indent=5)
+               )
+      } else splat("(rule for creating dummy points not understood)")
+    }
+    if(waxlyrical('extras')) {
+      summariseweights(x$w$all, "All weights", dp)
+      summariseweights(x$w$data, "Weights on data points", dp)
+      summariseweights(x$w$dummy, "Weights on dummy points", dp)
+    }
+    return(invisible(NULL))
+  }
+
+  print.summary.quad
+})
+
+print.quad <- function(x, ...) {
+  splat("Quadrature scheme")
+  splat(x$data$n, "data points,", x$dummy$n, "dummy points")
+  if(waxlyrical('extras')) {
+    sx <- summary(x)
+    splat(sx$descrip, indent=5)
+  }
+  splat("Total weight", sum(x$w), indent=5)
+  return(invisible(NULL))
+}
+
+
diff --git a/R/superimpose.R b/R/superimpose.R
new file mode 100755
index 0000000..5c42b4d
--- /dev/null
+++ b/R/superimpose.R
@@ -0,0 +1,272 @@
+# superimpose.R
+#
+# $Revision: 1.36 $ $Date: 2016/10/17 06:48:25 $
+#
+#
+############################# 
+
+superimpose <- function(...) {
+  # remove any NULL arguments
+  arglist <- list(...)
+  if(any(isnull <- sapply(arglist, is.null)))
+    return(do.call(superimpose, arglist[!isnull]))
+  UseMethod("superimpose")
+}
+
+superimpose.default <- function(...) {
+  argh <- list(...)
+  #' First expand any arguments which are lists of objects
+  argh <- expandSpecialLists(argh, "solist")
+  #' Now dispatch
+  if(any(sapply(argh, is.lpp)) || any(sapply(argh, inherits, what="linnet")))
+    return(do.call(superimpose.lpp, argh))
+  if(any(sapply(argh, is.psp)))
+    return(do.call(superimpose.psp, argh))
+  #' default
+  return(do.call(superimpose.ppp, argh))
+}
+
+superimpose.ppp <- function(..., W=NULL, check=TRUE) {
+  arglist <- list(...)
+  # Check that all "..." arguments have x, y coordinates
+  hasxy <- unlist(lapply(arglist, checkfields, L=c("x", "y")))
+  if(!all(hasxy)) {
+    nbad <- sum(bad <- !hasxy)
+    stop(paste(ngettext(nbad, "Argument", "Arguments"),
+               commasep(which(bad)),
+               ngettext(nbad, "does not", "do not"),
+               "have components x and y"),
+         call.=FALSE)
+  }
+  
+  # concatenate lists of (x,y) coordinates
+  XY <- do.call(concatxy, arglist)
+  needcheck <- TRUE
+
+  # determine whether there is any window information
+  if(!is.owin(W)) {
+    # we have to compute the final window
+    WXY <- NULL
+    Wppp <- NULL
+    if(any(isppp <- unlist(lapply(arglist, is.ppp)))) {
+      # extract windows from ppp objects
+      wins <- unname(lapply(arglist[isppp], as.owin))
+      # take union
+      Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins)
+    } 
+    if(is.function(W)) {
+      # W is a function like bounding.box.xy or ripras
+      # Apply function to the x,y coordinates; it should return an owin
+      WXY <- W(XY)
+      if(!is.owin(WXY))
+        stop("Function W did not return an owin object", call.=FALSE)
+    }
+    if(is.character(W)) {
+      # character string identifies a function
+      pW <- pmatch(W, c("convex", "rectangle", "bbox", "none"))
+      if(is.na(pW))
+        stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE)
+      WXY <- switch(pW,
+                    convex=ripras(XY),
+                    rectangle=ripras(XY, shape="rectangle"),
+                    bbox=boundingbox(XY),
+                    none=NULL)
+      # in these cases we don't need to verify that the points are inside.
+      needcheck <- !is.null(WXY)
+    }
+    if(is.null(WXY) && is.null(Wppp)) {
+      # no window information
+      return(XY)
+    }
+    W <- union.owin(WXY, Wppp)
+  }
+  # extract the marks if any
+  nobj <- lengths(lapply(arglist, getElement, name="x"))
+  marx  <- superimposeMarks(arglist, nobj)
+  #
+  ppp(XY$x, XY$y, window=W, marks=marx, check=check & needcheck)
+}
+
+superimpose.splitppp <- superimpose.ppplist <-
+  function(..., W=NULL, check=TRUE) {
+    arglist <- list(...)
+    while(any(h <- sapply(arglist, inherits, what=c("splitppp", "ppplist")))) {
+      i <- min(which(h))
+      arglist <- insertinlist(arglist, i, arglist[[i]])
+    }
+    do.call(superimpose, append(arglist, list(W=W, check=check)))
+  }
+
+superimpose.psp <- function(..., W=NULL, check=TRUE) {
+  # superimpose any number of line segment patterns
+  arglist <- list(...)
+  misscheck <- missing(check)
+
+  if(!all(sapply(arglist, is.psp)))
+    stop("Patterns to be superimposed must all be psp objects", call.=FALSE)
+
+  # extract segment coordinates
+  matlist <- lapply(lapply(arglist, getElement, name="ends"),
+                    asNumericMatrix)
+  
+  # tack them together
+  mat <- do.call(rbind, matlist)
+
+  # determine whether there is any window information
+  needcheck <- FALSE
+  if(!is.owin(W)) {
+    # we have to compute the final window
+    WXY <- NULL
+#    Wpsp <- NULL
+    if(any(ispsp <- unlist(lapply(arglist, is.psp)))) {
+      # extract windows from psp objects
+      wins <- unname(lapply(arglist[ispsp], as.owin))
+      # take union
+      Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins)
+    }
+    if(is.function(W) || is.character(W)) {
+      # guess window from x, y coordinates
+      XY <- list(x=cbind(mat[,1], mat[,3]),
+                 y=cbind(mat[,2], mat[,4]))
+      if(is.function(W)) {
+        # W is a function like bounding.box.xy or ripras
+        # Apply function to the x,y coordinates; it should return an owin
+        WXY <- W(XY)
+        if(!is.owin(WXY))
+          stop("Function W did not return an owin object", call.=FALSE)
+      }
+      if(is.character(W)) {
+        # character string identifies a function
+        pW <- pmatch(W, c("convex", "rectangle", "bbox", "none"))
+        if(is.na(pW))
+          stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE)
+        WXY <- switch(pW,
+                      convex=ripras(XY),
+                      rectangle=ripras(XY, shape="rectangle"),
+                      bbox=boundingbox(XY),
+                      none=NULL)
+      # in these cases we don't need to verify that the points are inside.
+        needcheck <- !is.null(WXY)
+      }
+    }
+    W <- union.owin(WXY, Wppp)
+  }
+  
+  # extract marks, if any
+  nobj <- sapply(arglist, nsegments)
+  marx <- superimposeMarks(arglist, nobj)
+
+  if(misscheck && !needcheck) check <- FALSE
+  return(as.psp(mat, window=W, marks=marx, check=check))
+}
+
+superimposeMarks <- function(arglist, nobj) {
+  # combine marks from the objects in the argument list
+  marxlist <- lapply(arglist, marks)
+  marx <- do.call(markappend, unname(marxlist))
+  nama <- names(arglist)
+  if(length(nama) == length(arglist) && all(nzchar(nama))) {
+    # arguments are named: use names as (extra) marks
+    newmarx <- factor(rep.int(nama, nobj), levels=nama)
+    marx <- markcbind(marx, newmarx)
+    if(ncol(marx) == 2) {
+      ## component marks were not named: call them 'origMarks'
+      colnames(marx) <- c("origMarks", "pattern")
+    } else colnames(marx)[ncol(marx)] <- "pattern"
+  }
+  return(marx)
+}
+
+#==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===
+
+  # This function is now deprecated.
+superimposePSP <-
+  function(..., W=NULL, check=TRUE)
+{
+  .Deprecated("superimpose","spatstat")
+  
+  # superimpose any number of line segment patterns
+  arglist <- list(...)
+
+  nargue <- length(arglist)
+  if(nargue == 0)
+    stop("No line segment patterns given", call.=FALSE)
+  
+  # catch possible abuses
+  if(is.null(W) && any(suspicious <- (names(arglist) == "window"))) {
+    id <- min(which(suspicious))
+    Win <- arglist[[id]]
+    if(is.owin(Win) || is.null(Win)) {
+      W <- Win
+      arglist <- arglist[-id]
+      nargue <- length(arglist)
+    }
+  }
+
+  # unpack a list
+  if(nargue == 1) {
+    X <- arglist[[1]]
+    if(!inherits(X, "psp") && inherits(X, "list"))
+      arglist <- X
+  }
+
+  isnull <- unlist(lapply(arglist, is.null))
+  arglist <- arglist[!isnull]
+  
+  if(!all(unlist(lapply(arglist, is.psp))))
+    stop("Some of the arguments are not psp objects", call.=FALSE)
+  
+  # extract segment coordinates
+  matlist <- lapply(arglist, function(x) { as.matrix(x$ends) })
+  # tack them together
+  mat <- do.call(rbind, matlist)
+
+  # extract marks if any
+  marxlist <- lapply(arglist, marks)
+
+  # check on compatibility of marks
+  mkfmt <- sapply(marxlist,markformat)
+  if(length(unique(mkfmt))>1)
+	stop(paste("The marks of the point patterns have different formats:",
+                   commasep(sQuote(mkfmt))),
+             call.=FALSE)
+  mkfmt <- mkfmt[1]
+  if(mkfmt=="dataframe") {
+	mcnms <- lapply(marxlist,names)
+	cdim  <- lengths(mcnms)
+	OK    <- length(unique(cdim)) == 1
+	if(OK) {
+		allInOne <- sapply(mcnms,paste,collapse="")
+		OK <- length(unique(allInOne)) == 1
+		if(!OK)
+                  stop("Data frames of marks have different names", call.=FALSE)
+	} else stop("Data frames of marks have different column dimensions",
+                    call.=FALSE)
+  }
+ 
+  # combine the marks
+  marx <- switch(mkfmt,
+                 none = NULL,
+                 vector = {
+                   marxlist <- lapply(marxlist,
+                                      as.data.frame.vector,
+                                      nm="v1")
+                   do.call(rbind, marxlist)[,1]
+                 },
+                 dataframe = do.call(rbind, marxlist))
+
+  # determine window
+  if(!is.null(W))
+    W <- as.owin(W)
+  else {
+    # extract windows from psp objects
+    Wlist <- lapply(arglist, as.owin)
+    # take the union of all the windows
+    W <- NULL
+    for(i in seq_along(Wlist))
+      W <- union.owin(W, Wlist[[i]])
+  }
+
+  return(as.psp(mat, window=W, marks=marx, check=check))
+}
+
diff --git a/R/symbolmap.R b/R/symbolmap.R
new file mode 100644
index 0000000..225bffb
--- /dev/null
+++ b/R/symbolmap.R
@@ -0,0 +1,659 @@
+##
+## symbolmap.R
+##
+##   $Revision: 1.34 $  $Date: 2016/09/12 10:50:51 $
+##
+
+symbolmap <- local({
+
+  known.unknowns <- c("shape", "pch", "chars",
+                      "size", "cex",
+                      "direction", "arrowtype", "headlength", "headangle", 
+                      "col", "cols", "fg", "bg",
+                      "lty", "lwd", "border", "fill",
+                      "etch")
+
+  trycolourmap <- function(...) {
+    try(colourmap(...), silent=TRUE)
+  }
+
+  symbolmap <- function(..., range=NULL, inputs=NULL) {
+    if(!is.null(range) && !is.null(inputs))
+      stop("Arguments range and inputs are incompatible")
+    ## graphics parameters
+    parlist <- list(...)
+    ## remove unrecognised parameters and NULL values 
+    if(length(parlist) > 0) {
+      ok <- names(parlist) %in% known.unknowns
+      ok <- ok & !unlist(lapply(parlist, is.null))
+      parlist <- parlist[ok]
+    }
+    got.pars <- (length(parlist) > 0)
+    parnames <- names(parlist)
+    type <- if(is.null(inputs) && is.null(range)) "constant" else
+            if(!is.null(inputs)) "discrete" else "continuous"
+    if(got.pars) {
+      ## validate parameters
+      if(is.null(parnames) || !all(nzchar(parnames)))
+        stop("All graphics parameters must have names")
+          atomic <- unlist(lapply(parlist, is.atomic))
+      functions <- unlist(lapply(parlist, is.function))
+      lenfs <- lengths(parlist)
+      constants <- atomic & (lenfs == 1)
+      if(any(bad <- !(constants | functions))) {
+        if(type == "discrete" && any(repairable <- atomic[bad])) {
+          ## recycle data to desired length
+          parlist[repairable] <- lapply(parlist[repairable],
+                                        reptolength,
+                                        n=length(inputs))
+          bad[repairable] <- FALSE
+        }
+        if(type == "continuous") {
+          ## look for vectors of colour values
+          iscol <- bad & sapply(parlist, is.colour) &
+            (names(parlist) %in% c("cols", "col", "fg", "bg"))
+          ## convert colour values to colour map
+          if(any(iscol)) {
+            cmap <- lapply(parlist[iscol], trycolourmap, range=range)
+            success <- sapply(cmap, inherits, what="colourmap")
+            iscol[iscol] <- success
+            if(any(iscol)) {
+              parlist[iscol] <- cmap[success]
+              bad[iscol] <- FALSE
+              functions[iscol] <- TRUE
+            }
+          }
+        }
+        nbad <- sum(bad)
+        if(nbad > 0) 
+          stop(paste(ngettext(nbad, "Argument", "Arguments"),
+                     commasep(sQuote(parnames[bad])),
+                     ngettext(nbad, "is neither a function nor a constant",
+                              "are neither functions nor constants")))
+      }
+      if(type == "constant" && any(functions))
+        type <- "continuous"
+    } 
+    switch(type,
+           constant ={
+             ## set of constant graphics parameters defining a single symbol
+             stuff <- list(type=type, parlist=parlist)
+             ConstantValue <- as.data.frame(parlist, stringsAsFactors=FALSE)
+             f <- function(x) ConstantValue
+           },
+           discrete = {
+             ## finite set of inputs mapped to symbols
+             stuff <- list(type=type, inputs=inputs, parlist=parlist)
+             f <- function(x) ApplyDiscreteSymbolMap(x, stuff)
+           },
+           continuous = {
+             got.shape <- "shape" %in% parnames
+             got.size <- "size" %in% parnames
+             got.cha <- any(c("pch", "chars") %in% parnames)
+             ## interval of real line (etc) mapped to symbols or characters
+             if(!got.cha) {
+               ## mapped to symbols
+               if(!got.shape)
+                 parlist$shape <- "circles"
+               if(!got.size)
+                 stop("Parameter 'size' is missing")
+             }
+             rangetype <- if(is.null(range)) "numeric" else
+                          if(inherits(range, "POSIXt")) "datetime" else
+                          if(inherits(range, "Date")) "date" else
+                          if(is.numeric(range)) "numeric" else "unknown"
+             stuff <- list(type=type, range=range, rangetype=rangetype,
+                           parlist=parlist)
+             f <- function(x) ApplyContinuousSymbolMap(x, stuff)
+           })
+    attr(f, "stuff") <- stuff
+    class(f) <- c("symbolmap", class(f))
+    f
+  }
+
+  reptolength <- function(z, n) { rep.int(z, n)[1:n] }
+  
+  MapDiscrete <- function(f, x, i) {
+    if(is.function(f)) f(x) else if(length(f) == 1) rep.int(f, length(x)) else f[i]
+  }
+  
+  MapContinuous <- function(f, x) {
+    if(is.function(f)) f(x) else rep.int(f, length(x))
+  }
+
+  ApplyContinuousSymbolMap <- function(x, stuff) {
+    with(stuff, {
+      y <- as.data.frame(lapply(parlist, MapContinuous, x=x),
+                         stringsAsFactors=FALSE)
+      return(y)
+    })
+  }
+  
+  ApplyDiscreteSymbolMap <- function(x, stuff) {
+    with(stuff, {
+      ii <- match(x, inputs)
+      if(anyNA(ii))
+        stop("Some values do not belong to the domain of the symbol map")
+      y <- as.data.frame(lapply(parlist, MapDiscrete, x=x, i=ii),
+                         stringsAsFactors=FALSE)
+      return(y)
+    })
+  }
+  symbolmap
+})
+
+symbolmaptype <- function(x) { attr(x, "stuff")$type }
+
+update.symbolmap <- function(object, ...) {
+  y <- attr(object, "stuff")
+  oldargs <- append(y[["parlist"]], y[c("inputs", "range")])
+  do.call(symbolmap, resolve.defaults(list(...), oldargs))
+}
+
+print.symbolmap <- function(x, ...) {
+  with(attr(x, "stuff"), {
+    switch(type,
+           constant = {
+             if(length(parlist) == 0) {
+               cat("Symbol map", "with no parameters", fill=TRUE)
+             } else {
+               cat("Symbol map", "with constant values", fill=TRUE)
+             }
+           },
+           discrete = {
+             cat("Symbol map", "for discrete inputs:", fill=TRUE)
+             print(inputs)
+           },
+           continuous = {
+             cat("Symbol map", "for",
+                 switch(rangetype,
+                        numeric="real numbers",
+                        date = "dates",
+                        datetime = "date/time values",
+                        unknown = "unrecognised data"),
+                 if(!is.null(range)) paste("in", prange(range)) else NULL,
+                 fill=TRUE)
+           })
+    if(length(parlist) > 0) {
+      for(i in seq_along(parlist)) {
+        cat(paste0(names(parlist)[i], ": "))
+        pari <- parlist[[i]]
+        if(!is.function(pari) && length(pari) == 1)
+          cat(pari, fill=TRUE) else print(pari)
+      }
+    }
+    return(invisible(NULL))
+  })
+}
+
+## Function which actually plots the symbols.
+## Called by plot.ppp and plot.symbolmap
+## Returns maximum size of symbols
+
+invoke.symbolmap <- local({
+
+  ## plot points, handling various arguments
+  do.points <- function(x, y, ...,
+                        cex=size, size=NULL, 
+                        col=cols, pch=chars, cols=NULL, chars=NULL,
+                        lwd=1, etch=FALSE, 
+                        do.plot=TRUE) {
+    if(do.plot) {
+      if(length(cex) == 0) cex <- 1
+      if(length(col) == 0) col <- par("col")
+      if(length(pch) == 0) pch <- 1
+      if(length(lwd) == 0) lwd <- 1
+      n <- length(x)
+      if(length(cex) == 1) cex <- rep(cex, n)
+      if(length(col) == 1) col <- rep(col, n)
+      if(length(pch) == 1) pch <- rep(pch, 1)
+      if(length(lwd) == 1) lwd <- rep(lwd, n)
+      if(length(etch) == 1) etch <- rep(etch, n)
+      ## infer which arguments are parallelised
+      other <- append(list(...), list(cex=cex, pch=pch))
+      isvec <- (lengths(other) == n)
+      other.fixed <- other[!isvec]
+      other.vec   <- other[isvec]
+      ##
+      if(any(i <- as.logical(etch))) {
+        anti.col <- complementarycolour(col)
+        anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd
+        do.call.matched(points.default,
+                        resolve.defaults(list(x=x[i], y=y[i]),
+                                         other.fixed,
+                                         lapply(other.vec, "[", i=i),
+                                         list(col=anti.col[i],
+                                              lwd=anti.lwd[i])),
+                        extrargs=c("col", "pch", "type", "bg",
+                                   "cex", "lwd", "lty"))
+      }
+      do.call.matched(points.default,
+                    resolve.defaults(list(x=x, y=y),
+                                     other,
+                                     list(col=col, lwd=lwd)),
+                    extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty"))
+    }
+    return(max(cex %orifnull% 1))
+  }
+  ## plot symbols likewise
+  do.symbols <- function(x, y, ..., 
+                         shape,
+                         size=cex, cex=NULL,
+                         fg=col, col=cols, cols=NULL,
+                         lwd=1, etch=FALSE, do.plot=TRUE) {
+    if(do.plot) {
+      ## zap tiny sizes
+      tiny <- (size < (max(size)/1000))
+      size[tiny] <- 0
+      ## collect arguments
+      n <- length(x)
+      if(length(lwd) == 1) lwd <- rep(lwd, n)
+      if(length(etch) == 1) etch <- rep(etch, n)
+      if(length(fg) == 0) fg <- rep(par("col"), n) else
+      if(length(fg) == 1) fg <- rep(fg, n)
+      other <- resolve.defaults(list(...),
+                                list(add=TRUE, inches=FALSE))
+      ## infer which arguments are parallelised
+      isvec <- (lengths(other) == n)
+      other.fixed <- other[!isvec]
+      other.vec   <- other[isvec]
+      ##
+      if(any(as.logical(etch))) {
+        anti.fg <- complementarycolour(fg)
+        anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd
+      }
+      ## plot
+      if(any(i <- (shape == "circles") & as.logical(etch))) 
+        do.call.matched(symbols,
+                        c(list(x=x[i], y=y[i], circles=size[i]/2),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=anti.lwd[i], fg=anti.fg[i])),
+                        extrargs=c("lwd", "lty"))
+      if(any(i <- (shape == "circles")))
+        do.call.matched(symbols,
+                        c(list(x=x[i], y=y[i], circles=size[i]/2),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=lwd[i], fg=fg[i])),
+                        extrargs=c("lwd", "lty"))
+      if(any(i <- (shape == "squares") & as.logical(etch)))
+        do.call.matched(symbols,
+                        c(list(x=x[i], y=y[i], squares=size[i]),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=anti.lwd[i], fg=anti.fg[i])),
+                        extrargs=c("lwd", "lty"))
+      if(any(i <- (shape == "squares"))) 
+        do.call.matched(symbols,
+                        c(list(x=x[i], y=y[i], squares=size[i]),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=lwd[i], fg=fg[i])),
+                        extrargs=c("lwd", "lty"))
+      if(any(i <- (shape == "arrows") & as.logical(etch)))
+        do.call.matched(do.arrows,
+                        c(list(x=x[i], y=y[i], len=size[i]),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=anti.lwd[i], cols=anti.fg[i])),
+                        extrargs=c("cols", "col", "lwd", "lty"))
+      if(any(i <- (shape == "arrows"))) 
+        do.call.matched(do.arrows,
+                        c(list(x=x[i], y=y[i], len=size[i]),
+                          other.fixed,
+                          lapply(other.vec, "[", i=i),
+                          list(lwd=lwd[i], cols=fg[i])),
+                        extrargs=c("cols", "col", "lwd", "lty"))
+    }
+    return(max(size))
+  }
+
+  do.arrows <- function(x, y, len, direction=0, arrowtype=2, ...,
+                        headlength=len * 0.4, 
+                        headangle=40,
+                        cols=col, col=par('fg'),
+                        lwd=1, lty=1) {
+    #' vectorise all arguments
+    df <- data.frame(x=x, y=y, len=len, direction=direction,
+                     arrowtype=arrowtype, headangle=headangle,
+                     cols=cols, lwd=lwd, lty=lty)
+    with(df, {
+      alpha <- direction * pi/180
+      dx <- len * cos(alpha)/2
+      dy <- len * sin(alpha)/2
+      x0 <- x - dx
+      x1 <- x + dx
+      y0 <- y - dy
+      y1 <- y + dy
+      segments(x0, y0, x1, y1, ..., col=cols, lty=lty, lwd=lwd)
+      if(any(arrowtype != 0)) {
+        halfangle <- (headangle/2) * pi/180
+        beta1 <- alpha + halfangle
+        beta2 <- alpha - halfangle
+        hx1 <- headlength * cos(beta1)
+        hy1 <- headlength * sin(beta1)
+        hx2 <- headlength * cos(beta2)
+        hy2 <- headlength * sin(beta2)
+        if(any(left <- (arrowtype %in% c(1,3)))) {
+          segments(x0[left], y0[left], (x0 + hx1)[left], (y0 + hy1)[left],
+                   ..., col=cols[left], lwd=lwd[left], lty=lty[left])
+          segments(x0[left], y0[left], (x0 + hx2)[left], (y0 + hy2)[left],
+                   ..., col=cols[left], lwd=lwd[left], lty=lty[left])
+        }
+        if(any(right <- (arrowtype %in% c(2,3)))) {
+          segments(x1[right], y1[right], (x1 - hx1)[right], (y1 - hy1)[right],
+                   ..., col=cols[right], lwd=lwd[right], lty=lty[right])
+          segments(x1[right], y1[right], (x1 - hx2)[right], (y1 - hy2)[right],
+                   ..., col=cols[right], lwd=lwd[right], lty=lty[right])
+        }
+      }
+    })
+    return(invisible(NULL))
+  }
+  
+  ## main function
+  invoke.symbolmap <- function(map, values, x=NULL, y=NULL, ...,
+                                 add=FALSE, do.plot=TRUE,
+                                 started = add && do.plot) {
+    if(!inherits(map, "symbolmap"))
+      stop("map should be an object of class 'symbolmap'")
+    if(hasxy <- (!is.null(x) || !is.null(y))) {
+      xy <- xy.coords(x, y)
+      x <- xy$x
+      y <- xy$y
+    } 
+    ## function will return maximum size of symbols plotted.
+    maxsize <- 0
+    if(do.plot && !add) plot(x, y, type="n", ...)
+    force(values)
+    g <- map(values)
+    parnames <- colnames(g)
+    if(do.plot) {
+      xydf <- data.frame(x=x, y=y)
+      if(nrow(xydf) == 0)
+        return(invisible(maxsize))
+      g <- if(prod(dim(g)) == 0) xydf else 
+           do.call(data.frame,
+                   c(as.list(g), as.list(xydf), list(stringsAsFactors=FALSE)))
+    }
+    n <- nrow(g)
+    ## figure out which function does the graphics job
+    need.points <- any(c("pch", "chars") %in% parnames)
+    need.symbols <- "shape" %in% parnames
+    if(need.symbols && need.points) {
+      worker <- with(g, ifelse(!is.na(shape), "symbols", "points"))
+    } else if(need.symbols) {
+      worker <- rep.int("symbols", n)
+    } else {
+      worker <- rep.int("points", n)
+    } 
+    ## split data according to graphics function involved
+    z <- split(g, factor(worker))
+    ## display using 'pch'
+    zpoints <- z[["points"]]
+    if(!is.null(zpoints) && nrow(zpoints) > 0) {
+      ms <- do.call(do.points,
+                    resolve.defaults(as.list(zpoints),
+                                     list(...),
+                                     list(do.plot=do.plot)))
+      ## value is max(cex)
+      ## guess size of one character
+      charsize <- if(started) max(par('cxy')) else
+                  if(hasxy) max(sidelengths(boundingbox(x,y))/40) else 1/40
+      maxsize <- max(maxsize, charsize * ms)
+    }
+    ## display using 'symbols'
+    zsymbols <- z[["symbols"]]
+    if(!is.null(zsymbols) && nrow(zsymbols) > 0) {
+      ms <- do.call(do.symbols,
+                    resolve.defaults(as.list(zsymbols),
+                                     list(...),
+                                     list(do.plot=do.plot)))
+      ## ms value is max physical size.
+      maxsize <- max(maxsize, ms)
+    }
+    return(invisible(maxsize))
+  }
+
+  invoke.symbolmap
+})
+
+
+## Display the symbol map itself (`legend' style)
+
+plot.symbolmap <- function(x, ..., main,
+                           xlim=NULL, ylim=NULL,
+                           vertical=FALSE,
+                           side=c("bottom", "left", "top", "right"),
+                           annotate=TRUE, labelmap=NULL, add=FALSE) {
+  if(missing(main))
+    main <- short.deparse(substitute(x))
+  miss.side <- missing(side)
+  side <- match.arg(side)
+  
+  type <- symbolmaptype(x)
+  map <- x
+  stuff <- attr(map, "stuff")
+
+  if(type == "constant" && length(stuff$parlist) == 0)
+    return(invisible(NULL))
+
+  if(is.null(labelmap)) {
+    labelmap <- function(x) x
+  } else if(type == "continuous" &&
+            is.numeric(labelmap) && length(labelmap) == 1) {
+    labscal <- labelmap
+    labelmap <- function(x) { x * labscal }
+  } else stopifnot(is.function(labelmap))
+
+  ## determine the 'example' input values and their graphical representations
+  switch(type,
+         constant = {
+           vv <- NULL
+         },
+         continuous = {
+           ra <- stuff$range
+           if(is.null(ra))
+             stop("Cannot plot symbolmap with an infinite range")
+           vv <- prettyinside(ra)
+           if(is.numeric(vv))
+             vv <- signif(vv, 4)
+         },
+         discrete = {
+           vv <- prettydiscrete(stuff$inputs)
+           if(vertical) vv <- rev(vv)
+         })
+  nn <- length(vv)
+  ##    gg <- map(vv)
+  ll <- paste(labelmap(vv))
+    
+  ## determine position of plot and symbols
+  if(add) {
+    ## x and y limits must respect existing plot space
+    usr <- par('usr')
+    if(is.null(xlim)) xlim <- usr[1:2]
+    if(is.null(ylim)) ylim <- usr[3:4]
+  } else {
+    ## create new plot
+    maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=FALSE)
+    zz <- c(0, max(1, maxdiam))
+    if(is.null(xlim) && is.null(ylim)) {
+      if(vertical) {
+        xlim <- zz
+        ylim <- length(vv) * zz
+      } else {
+        xlim <- length(vv) * zz
+        ylim <- zz
+      }
+    } else if(is.null(ylim)) {
+      ylim <- zz
+    } else if(is.null(xlim)) {
+      xlim <- zz
+    }
+  }
+
+  ## .......... initialise plot ...............................
+  if(!add)
+    do.call.matched(plot.default,
+                    resolve.defaults(list(x=xlim, y=ylim,
+                                          type="n", main=main,
+                                          axes=FALSE, xlab="", ylab="",
+                                          asp=1.0),
+                                     list(...)))
+  ## maximum symbol diameter
+  maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=TRUE)
+
+  ## .......... plot symbols ....................
+  if(type == "constant") {
+    xp <- mean(xlim)
+    yp <- mean(ylim)
+  } else if(vertical) {
+    ## vertical arrangement
+    xp <- rep(mean(xlim), nn)
+    vskip <- 1.1 * max(maxdiam, 3 * max(strheight(labelmap(vv))))
+    if(diff(ylim) > nn * vskip) {
+      yp <- (1:nn) * vskip
+      yp <- yp - mean(yp) + mean(ylim)
+    } else {
+      z <- seq(ylim[1], ylim[2], length=nn+1)
+      yp <- z[-1] - diff(z)/2
+    }
+  } else {
+    ## horizontal arrangement
+    yp <- rep(mean(ylim), nn)
+    hskip <- 1.1 * max(maxdiam, max(strwidth(labelmap(vv))))
+    if(diff(xlim) > nn * hskip) {
+      xp <- (1:nn) * hskip
+      xp <- xp - mean(xp) + mean(xlim)
+    } else {
+      z <- seq(xlim[1], xlim[2], length=nn+1)
+      xp <- z[-1] - diff(z)/2
+    }
+  }
+  invoke.symbolmap(map, vv, xp, yp, ..., add=TRUE)
+
+  ## ................. draw annotation ..................
+  if(annotate && length(ll) > 0) {
+    if(vertical) {
+      ## default axis position is to the right 
+      if(miss.side) side <- "right"
+      sidecode <- match(side, c("bottom", "left", "top", "right"))
+      if(!(sidecode %in% c(2,4)))
+        warning(paste("side =", sQuote(side),
+                      "is not consistent with vertical orientation"))
+      pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode]
+      ## draw axis
+      do.call.matched(graphics::axis,
+                      resolve.defaults(list(...),
+                                       list(side=sidecode, pos=pos, at=yp,
+                                            labels=ll, tick=FALSE, las=1)),
+                      extrargs=graphicsPars("axis"))
+    } else {
+      ## default axis position is below 
+      if(miss.side) side <- "bottom"
+      sidecode <- match(side, c("bottom", "left", "top", "right"))
+      if(!(sidecode %in% c(1,3)))
+        warning(paste("side =", sQuote(side),
+                      "is not consistent with horizontal orientation"))
+      pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode]
+      ## draw axis
+      do.call.matched(graphics::axis,
+                      resolve.defaults(list(...),
+                                       list(side = sidecode, pos = pos,
+                                            at = xp, labels=ll, tick=FALSE)),
+                      extrargs=graphicsPars("axis"))
+    } 
+  }
+  return(invisible(NULL))
+}
+
+plan.legend.layout <- function(B, 
+                               ..., 
+                               side=c("bottom", "left", "top", "right"),
+                               sep=NULL,
+                               size=NULL,
+                               sep.frac=0.05,
+                               size.frac=0.05,
+                               started=FALSE,
+                               map=NULL) {
+  ## Determine size and position of a box containing legend or symbolmap
+  ## attached to a plot in region 'B'.
+  ##   sep, size are absolute distances;
+  ##   sep.frac, size.frac are fractions of the maximum sidelength of B.
+  side <- match.arg(side)
+  B <- as.rectangle(B)
+  Bsize <- max(sidelengths(B))
+  if(is.null(size)) {
+    size <- size.frac * Bsize
+  } else {
+    check.1.real(size)
+    stopifnot(size > 0)
+  }
+  if(is.null(sep)) {
+    sep <- sep.frac * Bsize
+  } else {
+    check.1.real(sep)
+    stopifnot(sep > 0)
+  }
+  if(is.null(map) || !inherits(map, "symbolmap")) {
+    textlength <- 8
+  } else {
+    vv <- with(attr(map, "stuff"),
+               if(type == "discrete") inputs else prettyinside(range))
+    textlength <- max(nchar(paste(vv)))
+  }
+  if(started) {
+    textwidth <- max(strwidth(vv))
+    textheight <- max(strheight(vv))
+  } else {
+    ## the plot has not been initialised: guess character size
+    charsize <- diff(if(side %in% c("left", "right")) B$yrange else B$xrange)/40
+    textwidth <- charsize * textlength
+    textheight <- charsize
+  }
+  switch(side,
+         right={
+           ## symbols to right of image
+           b <- owin(B$xrange[2] + sep + c(0, size),
+                     B$yrange)
+           ## text to right of symbols
+           tt <- owin(b$xrange[2] + sep + c(0, textwidth),
+                      b$yrange)
+           iside <- 4
+         },
+         left={
+           ## symbols to left of image
+           b <- owin(B$xrange[1] - sep - c(size, 0),
+                     B$yrange)
+           ## text to left of symbols
+           tt <- owin(b$xrange[1] - sep - c(textwidth, 0),
+                      b$yrange)
+           iside <- 2
+         },
+         top={
+           ## symbols above image
+           b <- owin(B$xrange,
+                     B$yrange[2] + sep + c(0, size))
+           ## text above symbols
+           tt <- owin(b$xrange,
+                      b$yrange[2] + 3* charsize + c(0, textheight))
+           iside <- 3
+         },
+         bottom={
+           ## symbols below image
+           b <- owin(B$xrange,
+                     B$yrange[1] - sep - c(size, 0))
+           ## text below symbols
+           tt <- owin(b$xrange,
+                      b$yrange[1] - 3 * charsize - c(textheight, 0))
+           iside <- 1
+         })
+  A <- boundingbox(B, b, tt)
+  return(list(A=A, B=B, b=b, tt=tt,
+              iside=iside, side=side, size=size, charsize=charsize, sep=sep))
+}
+
+         
+  
+  
diff --git a/R/sysdata.rda b/R/sysdata.rda
new file mode 100644
index 0000000..ec15c49
Binary files /dev/null and b/R/sysdata.rda differ
diff --git a/R/terse.R b/R/terse.R
new file mode 100644
index 0000000..68e8027
--- /dev/null
+++ b/R/terse.R
@@ -0,0 +1,50 @@
+##  terse.R
+##
+##  code to control terseness and layout of printed output
+##
+##  $Revision: 1.11 $  $Date: 2016/09/23 02:07:24 $
+##
+
+
+## paragraph break in long output e.g. ppm
+parbreak <- function(terse = spatstat.options("terse")) {
+  if(waxlyrical('space', terse)) cat("\n")
+  return(invisible(NULL))
+}
+
+waxlyrical <- local({
+
+  ##  Values of spatstat.options('terse'):
+  ##        0    default
+  ##        1    suppress obvious wastage e.g. 'gory details'
+  ##        2    contract space between paragraphs in long output
+  ##        3    suppress extras e.g. standard errors and CI 
+  ##        4    suppress error messages eg failed to converge
+
+  TerseCutoff <- list(gory=1,
+                      space=2,
+                      extras=3,
+                      errors=4)
+
+  waxlyrical <- function(type, terse = spatstat.options("terse")) {
+    if(!(type %in% names(TerseCutoff)))
+      stop(paste("Internal error: unrecognised permission request",
+                 sQuote(type)),
+           call.=TRUE)
+    return(terse < TerseCutoff[[type]])
+  }
+  
+  waxlyrical
+  
+})
+
+ruletextline <- function(ch="-", n=getOption('width'),
+                         terse=spatstat.options('terse')) {
+  if(waxlyrical('space', terse)) {
+    chn <- paste(rep(ch, n), collapse="")
+    chn <- substr(chn, 1, n)
+    cat(chn, fill=TRUE)
+  }
+  return(invisible(NULL))
+}
+  
diff --git a/R/tess.R b/R/tess.R
new file mode 100755
index 0000000..7291a3f
--- /dev/null
+++ b/R/tess.R
@@ -0,0 +1,892 @@
+#
+# tess.R
+#
+# support for tessellations
+#
+#   $Revision: 1.75 $ $Date: 2016/12/20 04:06:47 $
+#
+tess <- function(..., xgrid=NULL, ygrid=NULL, tiles=NULL, image=NULL,
+                 window=NULL, marks=NULL, keepempty=FALSE,
+                 unitname=NULL, check=TRUE) {
+  uname <- unitname
+  if(!is.null(window)) {
+    window <- as.owin(window)
+    if(is.null(uname)) uname <- unitname(window) 
+  }
+  isrect <- !is.null(xgrid) && !is.null(ygrid)
+  istiled <- !is.null(tiles)
+  isimage <- !is.null(image)
+  if(isrect + istiled + isimage != 1)
+    stop("Must specify either (xgrid, ygrid) or tiles or img")
+  if(isrect) {
+    stopifnot(is.numeric(xgrid) && all(diff(xgrid) > 0))
+    stopifnot(is.numeric(ygrid) && all(diff(ygrid) > 0))
+    if(!is.null(window))
+      warning("Argument 'window' ignored, because xgrid, grid are given")
+    window <- owin(range(xgrid), range(ygrid), unitname=uname)
+    ntiles <- (length(xgrid)-1) * (length(ygrid)-1)
+    out <- list(type="rect", window=window, xgrid=xgrid, ygrid=ygrid, n=ntiles)
+  } else if(istiled) {
+    stopifnot(is.list(tiles))
+    if(check) {
+      if(!all(sapply(tiles, is.owin)))
+        stop("Tiles must be a list of owin objects")
+      if(!is.null(uname)) {
+        ## attach new unit name to each tile
+        tiles <- lapply(tiles, "unitname<-", value=uname)
+      } else {
+        ## extract unit names from tiles, check agreement, use as unitname
+        uu <- unique(lapply(tiles, unitname))
+        uu <- uu[!sapply(uu, is.null)]
+        nun <- length(uu)
+        if(nun > 1)
+          stop("Tiles have inconsistent names for the unit of length")
+        if(nun == 1) {
+          ## use this unit name
+          uname <- uu[[1]]
+          if(!is.null(window))
+            unitname(window) <- uname
+        }
+      }
+    }
+    if(!keepempty && check) {
+      # remove empty tiles
+      isempty <- sapply(tiles, is.empty)
+      if(all(isempty))
+        stop("All tiles are empty")
+      if(any(isempty))
+        tiles <- tiles[!isempty]
+    }
+    ntiles <- length(tiles)
+    nam <- names(tiles)
+    lev <- if(!is.null(nam) && all(nzchar(nam))) nam else 1:ntiles
+    if(is.null(window)) 
+      window <- do.call(union.owin, unname(tiles))
+    if(is.mask(window) || any(unlist(lapply(tiles, is.mask)))) {
+      # convert to pixel image tessellation
+      window <- as.mask(window)
+      ima <- as.im(window)
+      ima$v[] <- NA
+      for(i in 1:ntiles)
+        ima[tiles[[i]]] <- i
+      ima <- ima[window, drop=FALSE]
+      ima <- eval.im(factor(ima, levels=1:ntiles))
+      levels(ima) <- lev
+      out <- list(type="image",
+                  window=window, image=ima, n=length(lev))
+    } else {
+      # tile list
+      window <- rescue.rectangle(window)
+      out <- list(type="tiled", window=window, tiles=tiles, n=length(tiles))
+    }
+  } else if(isimage) {
+    # convert to factor valued image
+    image <- as.im(image)
+    if(!is.null(uname)) unitname(image) <- uname
+    switch(image$type,
+           logical={
+             # convert to factor
+             if(keepempty) 
+               image <- eval.im(factor(image, levels=c(FALSE,TRUE)))
+             else
+               image <- eval.im(factor(image))
+           },
+           factor={
+             # eradicate unused levels
+             if(!keepempty) 
+               image <- eval.im(factor(image))
+           },
+           {
+             # convert to factor
+             image <- eval.im(factor(image))
+           })
+               
+    if(is.null(window)) window <- as.owin(image)
+    out <- list(type="image", window=window, image=image, n=length(levels(image)))
+  } else stop("Internal error: unrecognised format")
+  ## add marks!
+  if(!is.null(marks)) {
+    marks <- as.data.frame(marks)
+    if(nrow(marks) != out$n)
+      stop(paste("wrong number of marks:",
+                 nrow(marks), "should be", out$n),
+           call.=FALSE)
+    out$marks <- marks
+  }
+  class(out) <- c("tess", class(out))
+  return(out)
+}
+
+is.tess <- function(x) { inherits(x, "tess") }
+
+print.tess <- function(x, ..., brief=FALSE) {
+  full <- !brief
+  if(full) cat("Tessellation\n")
+  win <- x$window
+  switch(x$type,
+         rect={
+           if(full) {
+             unitinfo <- summary(unitname(win))
+             if(equispaced(x$xgrid) && equispaced(x$ygrid)) 
+               splat("Tiles are equal rectangles, of dimension",
+                     signif(mean(diff(x$xgrid)), 5),
+                     "x",
+                     signif(mean(diff(x$ygrid)), 5),
+                     unitinfo$plural, " ", unitinfo$explain)
+             else
+               splat("Tiles are unequal rectangles")
+           }
+           splat(length(x$xgrid)-1, "by", length(x$ygrid)-1, "grid of tiles")
+         },
+         tiled={
+           if(full) {
+             if(win$type == "polygonal")
+               splat("Tiles are irregular polygons")
+             else
+               splat("Tiles are windows of general type")
+           }
+           splat(length(x$tiles), "tiles (irregular windows)")
+         },
+         image={
+           nlev <- length(levels(x$image))
+           if(full) {
+             splat("Tessellation is determined by a factor-valued image with",
+                   nlev, "levels")
+           } else splat(nlev, "tiles (levels of a pixel image)")
+         })
+  if(!is.null(marx <- x$marks)) {
+    m <- dim(marx)[2] %orifnull% 1
+    if(m == 1) splat("Tessellation is marked") else
+    splat("Tessellation has", m, "columns of marks:",
+          commasep(sQuote(colnames(marx))))
+  }
+  if(full) print(win)
+  invisible(NULL)
+}
+
+unitname.tess <- function(x) unitname(x$window)
+
+"unitname<-.tess" <- function(x, value) {
+  unitname(x$window) <- value
+  switch(x$type,
+         rect={},
+         tiled={
+           x$tiles <- lapply(x$tiles, "unitname<-", value)
+         },
+         image={
+           unitname(x$image) <- value
+         })
+  return(x)
+}
+
+plot.tess <- local({
+
+  plotem <- function(z, ..., col=NULL) {
+    if(is.null(col))
+      plot(z, ..., add=TRUE)
+    else if(z$type != "mask")
+      plot(z, ..., border=col, add=TRUE)
+    else plot(z, ..., col=col, add=TRUE)
+  }
+
+  plotpars <- c("sub", "lty", "lwd",
+                "cex.main", "col.main", "font.main",
+                "cex.sub", "col.sub", "font.sub", "border")
+
+  plot.tess <- function(x, ..., main, add=FALSE, show.all=!add, col=NULL,
+                        do.plot=TRUE,
+                        do.labels=FALSE, labels=tilenames(x),
+                        labelargs=list()) {
+    if(missing(main) || is.null(main))
+      main <- short.deparse(substitute(x))
+    switch(x$type,
+           rect={
+             win <- x$window
+             result <-
+               do.call.matched(plot.owin,
+                               resolve.defaults(list(x=win, main=main,
+                                                     add=add,
+                                                     show.all=show.all,
+                                                     do.plot=do.plot),
+                                                list(...)),
+                               extrargs=plotpars)
+             if(do.plot) {
+               xg <- x$xgrid
+               yg <- x$ygrid
+               do.call.matched(segments,
+                               resolve.defaults(list(x0=xg, y0=win$yrange[1],
+                                                     x1=xg, y1=win$yrange[2]),
+                                                list(col=col),
+                                                list(...),
+                                                .StripNull=TRUE))
+               do.call.matched(segments,
+                               resolve.defaults(list(x0=win$xrange[1], y0=yg,
+                                                     x1=win$xrange[2], y1=yg),
+                                                list(col=col),
+                                                list(...),
+                                                .StripNull=TRUE))
+             }
+           },
+           tiled={
+             result <-
+               do.call.matched(plot.owin,
+                               resolve.defaults(list(x=x$window, main=main,
+                                                     add=add,
+                                                     show.all=show.all,
+                                                     do.plot=do.plot),
+                                                list(...)),
+                               extrargs=plotpars)
+             if(do.plot) {
+               til <- tiles(x)
+               lapply(til, plotem, ..., col=col)
+             }
+           },
+           image={
+             result <-
+               do.call(plot,
+                       resolve.defaults(list(x$image, add=add, main=main,
+                                             show.all=show.all,
+                                             do.plot=do.plot),
+                                        list(...),
+                                        list(valuesAreColours=FALSE)))
+           })
+    if(do.plot && do.labels) {
+      labels <- paste(as.vector(labels))
+      til <- tiles(x)
+      incircles <- lapply(til, incircle)
+      x0 <- sapply(incircles, getElement, name="x")
+      y0 <- sapply(incircles, getElement, name="y")
+      do.call.matched(text.default,
+                      resolve.defaults(list(x=x0, y = y0),
+                                       list(labels=labels),
+                                       labelargs),
+                      funargs=graphicsPars("text"))
+    }
+    return(invisible(result))
+  }
+
+  plot.tess
+})
+
+
+"[<-.tess" <- function(x, i, ..., value) {
+  switch(x$type,
+         rect=,
+         tiled={
+           til <- tiles(x)
+           til[i] <- value
+           ok <- !unlist(lapply(til, is.null))
+           x <- tess(tiles=til[ok])
+         },
+         image={
+           stop("Cannot assign new values to subsets of a pixel image")
+         })
+  return(x)
+}
+  
+"[.tess" <- function(x, i, ...) {
+  trap.extra.arguments(..., .Context="in [.tess")
+  if(missing(i)) return(x)
+  if(is.owin(i))
+    return(intersect.tess(x, i))
+  switch(x$type,
+         rect=,
+         tiled={
+           til <- tiles(x)[i]
+           return(tess(tiles=til))
+         },
+         image={
+           img <- x$image
+           oldlev <- levels(img)
+           newlev <- unique(oldlev[i])
+           img <- eval.im(factor(img, levels=newlev))
+           return(tess(image=img))
+         })
+}
+
+tiles <- function(x) {
+  switch(x$type,
+         rect={
+           out <- list()
+           xg <- x$xgrid
+           yg <- x$ygrid
+           nx <- length(xg) - 1
+           ny <- length(yg) - 1
+           for(j in rev(seq_len(ny)))
+             for(i in seq_len(nx)) {
+               winij <- owin(xg[c(i,i+1)], yg[c(j,j+1)])
+               dout <- list(winij)
+               names(dout) <- paste("Tile row ", ny-j+1, ", col ", i,
+                                    sep="")
+               out <- append(out, dout)
+             }
+         },
+         tiled={
+           out <- x$tiles
+           if(is.null(names(out)))
+             names(out) <- paste("Tile", seq_along(out))
+         },
+         image={
+           out <- list()
+           ima <- x$image
+           lev <- levels(ima)
+           for(i in seq_along(lev))
+             out[[i]] <- solutionset(ima == lev[i])
+           names(out) <- paste(lev)
+         })
+  out <- as.solist(out)
+  return(out)
+}
+
+tiles.empty <- function(x) {
+  stopifnot(is.tess(x))
+  switch(x$type,
+         rect = {
+           nx <- length(x$xgrid) - 1
+           ny <- length(x$ygrid) - 1
+           ans <- rep(FALSE, nx * ny)
+         },
+         tiled = {
+           ans <- sapply(x$tiles, is.empty)
+         },
+         image = {
+           ans <- (table(x$image[]) == 0)
+         })
+  return(ans)
+}
+           
+tilenames <- function(x) {
+  stopifnot(is.tess(x))
+  switch(x$type,
+         rect={
+           if(!is.null(x$tilenames)) {
+             out <- x$tilenames
+           } else {
+             nx <- length(x$xgrid) - 1
+             ny <- length(x$ygrid) - 1
+             ij <- expand.grid(1:nx, 1:ny)
+             out <- paste0("Tile row ", ij[,2], ", col ", ij[,1])
+           }
+         },
+         tiled={
+           out <- names(x$tiles)
+           if(sum(nzchar(out)) != x$n)
+             out <- paste("Tile", seq_len(x$n))
+         },
+         image={
+           out <- levels(x$image)
+         }
+         )
+  return(as.character(out))
+}
+
+"tilenames<-" <- function(x, value) {
+  stopifnot(is.tess(x))
+  if(!is.null(value)) {
+    ## validate length
+    value <- as.character(value)
+    nv <- length(value)
+    switch(x$type,
+           rect = {
+             nx <- length(x$xgrid) - 1
+             ny <- length(x$ygrid) - 1
+             n <- nx * ny
+           },
+           tiled = { n <- length(x$tiles) },
+           image = { n <- length(levels(x$image)) })
+    if(nv != n)
+      stop("Replacement value has wrong length",
+           paren(paste(nv, "instead of", n)))
+  }
+  switch(x$type,
+         rect={
+           x$tilenames <- value
+         },
+         tiled={
+           names(x$tiles) <- value
+         },
+         image={
+           levels(x$image) <- value %orifnull% (1:n)
+         }
+         )
+  return(x)
+}
+
+marks.tess <- function(x, ...) {
+  stopifnot(is.tess(x))
+  return(x$marks)
+}
+
+"marks<-.tess" <- function(x, ..., value) {
+  stopifnot(is.tess(x))
+  if(!is.null(value)) {
+    value <- as.data.frame(value)
+    if(nrow(value) != x$n)
+      stop(paste("replacement value for marks has wrong length:",
+                 nrow(value), "should be", x$n),
+           call.=FALSE)
+    rownames(value) <- NULL
+    if(ncol(value) == 1) colnames(value) <- "marks"
+  }
+  x$marks <- value
+  return(x)
+}
+
+unmark.tess <- function(X) { marks(X) <- NULL; return(X) }
+
+tile.areas <- function(x) {
+  stopifnot(is.tess(x))
+  switch(x$type,
+         rect={
+           xg <- x$xgrid
+           yg <- x$ygrid
+#           nx <- length(xg) - 1 
+#           ny <- length(yg) - 1
+           a <- outer(rev(diff(yg)), diff(xg), "*")
+           a <- as.vector(t(a))
+           names(a) <- as.vector(t(tilenames(x)))
+         },
+         tiled={
+           a <- unlist(lapply(x$tiles, area))
+         },
+         image={
+           z <- x$image
+           a <- table(z$v) * z$xstep * z$ystep
+         })
+  return(a)
+}
+
+         
+as.im.tess <- function(X, W=NULL, ...,
+                       eps=NULL, dimyx=NULL, xy=NULL,
+                       na.replace=NULL) {
+  # if W is present, it may have to be converted
+  if(!is.null(W)) {
+    stopifnot(is.owin(W))
+    if(W$type != "mask")
+      W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy)
+  } 
+  switch(X$type,
+         image={
+           out <- as.im(X$image, W=W, eps=eps, dimyx=dimyx, xy=xy,
+                        na.replace=na.replace)
+         },
+         tiled={
+           if(is.null(W))
+             W <- as.mask(as.owin(X), eps=eps, dimyx=dimyx, xy=xy)
+           til <- X$tiles
+           ntil <- length(til)
+           nama <- names(til)
+           if(is.null(nama) || !all(nzchar(nama)))
+             nama <- paste(seq_len(ntil))
+           xy <- list(x=W$xcol, y=W$yrow)
+           for(i in seq_len(ntil)) {
+             indic <- as.mask(til[[i]], xy=xy)
+             tag <- as.im(indic, value=i)
+             if(i == 1) {
+               out <- tag
+               outv <- out$v
+             } else {
+               outv <- pmin.int(outv, tag$v, na.rm=TRUE)
+             }
+           }
+           out <- im(factor(outv, levels=seq_len(ntil), labels=nama),
+                     out$xcol, out$yrow)
+           unitname(out) <- unitname(W)
+         },
+         rect={
+           if(is.null(W))
+             out <- as.im(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy)
+           else
+             out <- as.im(W)
+           xg <- X$xgrid
+           yg <- X$ygrid
+           nrows <- length(yg) - 1
+           ncols <- length(xg) - 1
+           jx <- findInterval(out$xcol, xg, rightmost.closed=TRUE)
+           iy <- findInterval(out$yrow, yg, rightmost.closed=TRUE)
+           M <- as.matrix(out)
+           Jcol <- jx[col(M)]
+           Irow <- nrows - iy[row(M)] + 1
+           Ktile <- Jcol + ncols * (Irow - 1)
+           Ktile <- factor(Ktile, levels=seq_len(nrows * ncols))
+           out <- im(Ktile, xcol=out$xcol, yrow=out$yrow,
+                     unitname=unitname(W))
+         }
+         )
+  return(out)
+}
+
+nobjects.tess <- function(x) {
+  switch(x$type,
+         image = length(levels(x$image)),
+         rect = (length(x$xgrid)-1) * (length(x$ygrid)-1),
+         tiled = length(x$tiles))
+}
+  
+as.function.tess <- function(x, ..., values=NULL) {
+  V <- x
+  if(is.null(values)) {
+    f <- function(x,y) { tileindex(x,y,V) }
+  } else {
+    if(length(values) != nobjects(x))
+      stop("Length of 'values' should equal the number of tiles", call.=FALSE)
+    f <- function(x,y) { values[as.integer(tileindex(x,y,V))] }
+  }
+  g <- funxy(f, Window(V))
+  return(g)
+}
+
+tileindex <- function(x, y, Z) {
+  stopifnot(is.tess(Z))
+  stopifnot(length(x) == length(y))
+  switch(Z$type,
+         rect={
+           jx <- findInterval(x, Z$xgrid, rightmost.closed=TRUE)
+           iy <- findInterval(y, Z$ygrid, rightmost.closed=TRUE)
+           nrows <- length(Z$ygrid) - 1
+           ncols <- length(Z$xgrid) - 1
+           iy[iy < 1 | iy > nrows] <- NA
+           jx[jx < 1 | jx > ncols] <- NA
+           jcol <- jx
+           irow <- nrows - iy + 1
+           ktile <- jcol + ncols * (irow - 1)
+           m <- factor(ktile, levels=seq_len(nrows*ncols))
+           ij <- expand.grid(j=seq_len(ncols),i=seq_len(nrows))
+           levels(m) <- paste("Tile row ", ij$i, ", col ", ij$j, sep="")
+         },
+         tiled={
+           n <- length(x)
+           todo <- seq_len(n)
+           nt <- length(Z$tiles)
+           m <- integer(n)
+           for(i in 1:nt) {
+             ti <- Z$tiles[[i]]
+             hit <- inside.owin(x[todo], y[todo], ti)
+             if(any(hit)) {
+               m[todo[hit]] <- i
+               todo <- todo[!hit]
+             }
+             if(length(todo) == 0)
+               break
+           }
+           m[m == 0] <- NA
+           nama <- names(Z$tiles)
+           lev <- seq_len(nt)
+           lab <- if(!is.null(nama) && all(nzchar(nama))) nama else paste("Tile", lev)
+           m <- factor(m, levels=lev, labels=lab)
+         },
+         image={
+           Zim <- Z$image
+           m <- factor(Zim[list(x=x, y=y), drop=FALSE], levels=levels(Zim))
+         }
+         )
+  return(m)
+}
+  
+as.tess <- function(X) {
+  UseMethod("as.tess")
+}
+
+as.tess.tess <- function(X) {
+  fields <- 
+    switch(X$type,
+           rect={ c("xgrid", "ygrid") },
+           tiled={ "tiles" },
+           image={ "image" },
+           stop(paste("Unrecognised tessellation type", sQuote(X$type))))
+  fields <- c(c("type", "window", "n", "marks"), fields)
+  X <- unclass(X)[fields]
+  class(X) <- c("tess", class(X))
+  return(X)
+}
+
+as.tess.im <- function(X) {
+  return(tess(image = X))
+}
+
+as.tess.list <- function(X) {
+  W <- lapply(X, as.owin)
+  return(tess(tiles=W))
+}
+
+as.tess.owin <- function(X) {
+  return(tess(tiles=list(X)))
+}
+
+domain.tess <- Window.tess <- function(X, ...) { as.owin(X) } 
+
+intersect.tess <- function(X, Y, ..., keepmarks=FALSE) {
+  X <- as.tess(X)
+  if(is.owin(Y) && Y$type == "mask") {
+    # special case
+    # convert to pixel image 
+    result <- as.im(Y)
+    Xtiles <- tiles(X)
+    for(i in seq_along(Xtiles)) {
+      tilei <- Xtiles[[i]]
+      result[tilei] <- i
+    }
+    result <- result[Y, drop=FALSE]
+    out <- tess(image=result, window=Y)
+    if(keepmarks) marks(out) <- marks(X)
+    return(out)
+  }
+  if(is.owin(Y)) {
+    # efficient code when Y is a window, retaining names of tiles of X
+    Ztiles <- lapply(tiles(X), intersect.owin, B=Y, ..., fatal=FALSE)
+    isempty <- sapply(Ztiles, is.empty)
+    Ztiles <- Ztiles[!isempty]
+    Xwin <- as.owin(X)
+    Ywin <- Y
+    if(keepmarks) {
+      marksX <- marks(X)
+      if(!is.null(marksX))
+        marx <- as.data.frame(marksX)[!isempty, ]
+    }
+  } else {
+    # general case
+    Y <- as.tess(Y)
+    Xtiles <- tiles(X)
+    Ytiles <- tiles(Y)
+    Ztiles <- list()
+    namesX <- tilenames(X)
+    namesY <- tilenames(Y)
+    if(keepmarks) {
+      Xmarks <- as.data.frame(marks(X))
+      Ymarks <- as.data.frame(marks(Y))
+      gotXmarks <- (ncol(Xmarks) > 0)
+      gotYmarks <- (ncol(Ymarks) > 0)
+      if(gotXmarks && gotYmarks) {
+        colnames(Xmarks) <- paste0("X", colnames(Xmarks))
+        colnames(Ymarks) <- paste0("Y", colnames(Ymarks))
+      }
+      if(gotXmarks || gotYmarks) {
+        marx <- if(gotXmarks && gotYmarks) {
+          cbind(Xmarks[integer(0), , drop=FALSE],
+                Ymarks[integer(0), , drop=FALSE])
+        } else if(gotXmarks) {
+          Xmarks[integer(0), , drop=FALSE]
+        } else {
+          Ymarks[integer(0), , drop=FALSE]
+        }
+      } else keepmarks <- FALSE
+    }
+    for(i in seq_along(Xtiles)) {
+      Xi <- Xtiles[[i]]
+      Ti <- lapply(Ytiles, intersect.owin, B=Xi, ..., fatal=FALSE)
+      isempty <- sapply(Ti, is.empty)
+      nonempty <- !isempty
+      if(any(nonempty)) {
+        Ti <- Ti[nonempty]
+        names(Ti) <- paste(namesX[i], namesY[nonempty], sep="x")
+        Ztiles <- append(Ztiles, Ti)
+        if(keepmarks) {
+          extra <- if(gotXmarks && gotYmarks) {
+            data.frame(X=Xmarks[i, ,drop=FALSE],
+                       Y=Ymarks[nonempty, ,drop=FALSE],
+                       row.names=NULL)
+          } else if(gotYmarks) {
+            Ymarks[nonempty, ,drop=FALSE]
+          } else {
+            Xmarks[rep(i, sum(nonempty)), ,drop=FALSE]
+          }
+          marx <- rbind(marx, extra)
+        }
+      }
+    }
+    Xwin <- as.owin(X)
+    Ywin <- as.owin(Y)
+  }
+  Zwin <- intersect.owin(Xwin, Ywin)
+  out <- tess(tiles=Ztiles, window=Zwin)
+  if(keepmarks) 
+    marks(out) <- marx
+  return(out)
+}
+
+
+bdist.tiles <- local({
+
+  vdist <- function(x,w) {
+    z <- as.ppp(vertices(x), W=w, check=FALSE)
+    min(bdist.points(z))
+  }
+  edist <- function(x,b) {
+    xd <- crossdist(edges(x, check=FALSE), b, type="separation")
+    min(xd)
+  }
+
+  bdist.tiles <-  function(X) {
+    if(!is.tess(X))
+      stop("X must be a tessellation")
+    W <- as.owin(X)
+    switch(X$type,
+           rect=,
+           tiled={
+             tt <- tiles(X)
+             if(is.convex(W)) {
+               # distance is minimised at a tile vertex
+               d <- sapply(tt, vdist, w=W)
+             } else {
+               # coerce everything to polygons
+               W  <- as.polygonal(W)
+               tt <- lapply(tt, as.polygonal)
+               # compute min dist from tile edges to window edges
+               d <- sapply(tt, edist, b=edges(W))
+             }
+           },
+           image={
+             Xim <- X$image
+             # compute boundary distance for each pixel
+             bd <- bdist.pixels(as.owin(Xim), style="image")
+             bd <- bd[W, drop=FALSE]
+             # split over tiles
+             bX <- split(bd, X)
+             # compute minimum distance over each level of factor
+             d <- sapply(bX, function(z) { summary(z)$min })
+           }
+           )
+    return(d)
+  }
+  bdist.tiles
+})
+
+
+## ......... geometrical transformations ..................
+
+shift.tess <- function(X, ...) {
+  Y <- X
+  Y$window <- wY <- shift(X$window, ...)
+  vec <- getlastshift(wY)
+  switch(X$type,
+         rect={
+           Y$xgrid <- Y$xgrid + vec[1]
+           Y$ygrid <- Y$ygrid + vec[2]
+         },
+         tiled={
+           Y$tiles <- lapply(Y$tiles, shift, vec=vec)
+         },
+         image = {
+           Y$image <- shift(Y$image, vec)
+         })
+  attr(Y, "lastshift") <- vec
+  return(Y)
+}
+
+affine.tess <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) {
+  Y <- X
+  Y$window <- affine(X$window, mat=mat, vec=vec, ...)
+  switch(Y$type,
+         rect = {
+           if(all(mat == diag(diag(mat)))) {
+             ## result is rectangular
+             Y$xgrid <- sort(mat[1,1] * X$xgrid + vec[1])
+             Y$ygrid <- sort(mat[2,2] * X$ygrid + vec[2])
+           } else {
+             ## shear transformation; treat rectangles as general tiles
+             Y <- tess(tiles=tiles(X), window=Y$window)
+             Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...)
+           }
+         },
+         tiled={
+           Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...)
+         },
+         image = {
+           Y$image <- affine(Y$image, mat=mat, vec=vec, ...)
+         })
+  return(Y)
+}
+
+reflect.tess <- function(X) {
+  Y <- X
+  Y$window <- reflect(Y$window)
+  switch(X$type,
+         rect = {
+           Y$xgrid <- rev(- Y$xgrid)
+           Y$ygrid <- rev(- Y$ygrid)
+         },
+         tiled = {
+           Y$tiles <- lapply(Y$tiles, reflect)
+         },
+         image = {
+           Y$image <- reflect(Y$image)
+         })
+  return(Y)
+}
+
+scalardilate.tess <- function(X, f, ...) {
+  Y <- X
+  Y$window <- scalardilate(X$window, f, ...)
+  switch(X$type,
+         rect = {
+           Y$xgrid <- f * Y$xgrid
+           Y$ygrid <- f * Y$ygrid
+         },
+         tiled = {
+           Y$tiles <- lapply(Y$tiles, scalardilate, f=f, ...)
+         },
+         image = {
+           Y$image <- scalardilate(Y$image, f=f, ...)
+         })
+  return(Y)
+}
+
+rotate.tess <- function(X, angle=pi/2, ..., centre=NULL) {
+  if(angle %% (2 * pi) == 0) return(X)
+  if(!is.null(centre)) {
+    X <- shift(X, origin=centre)
+    negorigin <- getlastshift(X)
+  } else negorigin <- NULL
+  Y <- X
+  Y$window <- rotate(X$window, angle=angle, ...)
+  switch(X$type,
+         rect = {
+           if(angle %% (pi/2) == 0) {
+             ## result is rectangular
+             co <- round(cos(angle))
+             si <- round(sin(angle))
+             Y$xgrid <- sort((if(co == 0) 0 else (co * X$xgrid)) -
+                             (if(si == 0) 0 else (si * X$ygrid)))
+             Y$ygrid <- sort((if(si == 0) 0 else (si * X$xgrid)) +
+                             (if(co == 0) 0 else (co * X$ygrid)))
+           } else {
+             ## general tessellation
+             Y <- tess(tiles=lapply(tiles(X), rotate, angle=angle, ...),
+                       window=Y$window)
+           }
+         },
+         tiled = {
+           Y$tiles <- lapply(X$tiles, rotate, angle=angle, ...)
+         },
+         image = {
+           Y$image <- rotate(X$image, angle=angle, ...)
+         })
+  if(!is.null(negorigin))
+    Y <- shift(Y, -negorigin)
+  return(Y)
+}
+  
+as.data.frame.tess <- function(x, ...) {
+  switch(x$type,
+         rect =,
+         tiled = {
+           y <- lapply(tiles(x), as.data.frame, ...)
+           z <- mapply(assignDFcolumn,
+                       x=y, value=tilenames(x),
+                       MoreArgs=list(name="Tile", ...),
+                       SIMPLIFY=FALSE)
+           z <- do.call(rbind, z)
+           row.names(z) <- NULL
+         },
+         image = {
+           z <- as.data.frame(x$image, ...)
+           if(!is.na(m <- match("value", colnames(z))))
+             colnames(z)[m] <- "Tile"
+         },
+         {
+           z <- NULL
+           warning("Unrecognised type of tessellation")
+         })
+  return(z)
+}
diff --git a/R/texture.R b/R/texture.R
new file mode 100644
index 0000000..5500bbc
--- /dev/null
+++ b/R/texture.R
@@ -0,0 +1,373 @@
+##
+##     texture.R
+##
+##     Texture plots and texture maps
+##
+##  $Revision: 1.15 $ $Date: 2016/02/16 01:39:12 $
+
+### .................. basic graphics .............................
+
+## put hatching in a window
+add.texture <- function(W, texture=4, spacing=NULL, ...) {
+  if(is.data.frame(texture)) {
+    ## texture = f(x) where f is a texturemap
+    out <- do.call(add.texture,
+                   resolve.defaults(list(W=W, spacing=spacing),
+                                    list(...),
+                                    as.list(texture)))
+    return(out)
+  }
+  ## texture should be an integer
+  stopifnot(is.owin(W))
+  stopifnot(texture %in% 1:8)
+  if(is.null(spacing)) {
+    spacing <- diameter(as.rectangle(W))/50
+  } else {
+    check.1.real(spacing)
+    stopifnot(spacing > 0)
+  }
+  P <- L <- NULL
+  switch(texture,
+         {
+           ## texture 1: graveyard
+           P <- rsyst(W, dx=3*spacing)
+         },
+         {
+           ## texture 2: vertical lines
+           L <- rlinegrid(90, spacing, W)[W]
+         },
+         {
+           ## texture 3: horizontal lines
+           L <- rlinegrid(0, spacing, W)[W]
+         },
+         {
+           ## texture 4: forward slashes
+           L <- rlinegrid(45, spacing, W)[W]
+         },
+         {
+           ## texture 5: back slashes
+           L <- rlinegrid(135, spacing, W)[W]
+         },
+         {
+           ## texture 6: horiz/vert grid
+           L0 <- rlinegrid(0, spacing, W)[W]
+           L90 <- rlinegrid(90, spacing, W)[W]
+           L <- superimpose(L0, L90, W=W, check=FALSE)
+         },
+         {
+           ## texture 7: diagonal grid
+           L45 <- rlinegrid(45, spacing, W)[W]
+           L135 <- rlinegrid(135, spacing, W)[W]
+           L <- superimpose(L45, L135, W=W, check=FALSE)
+         },
+         {
+           ## texture 8: hexagons
+           H <- hextess(W, spacing, offset=runifpoint(1, W))
+           H <- intersect.tess(H, W)
+           do.call.matched(plot.tess,
+                           resolve.defaults(list(x=H, add=TRUE),
+                                            list(...)))
+         })
+  if(!is.null(P))
+    do.call.matched(plot.ppp,
+                    resolve.defaults(list(x=P, add=TRUE),
+                                     list(...),
+                                     list(chars=3, cex=0.2)),
+                    extrargs=c("lwd", "col", "cols", "pch"))
+  if(!is.null(L))
+    do.call.matched(plot.psp,
+                    resolve.defaults(list(x=L, add=TRUE),
+                                     list(...)),
+                    extrargs=c("lwd","lty","col"))
+  return(invisible(NULL))
+}
+
+## .................. texture maps ................................
+
+## create a texture map
+
+texturemap <- function(inputs, textures, ...) {
+  argh <- list(...)
+  if(length(argh) > 0) {
+    isnul <- unlist(lapply(argh, is.null))
+    argh <- argh[!isnul]
+  }
+  if(missing(textures) || is.null(textures)) textures <- seq_along(inputs)
+  df <- do.call(data.frame,
+                append(list(input=inputs, texture=textures), argh))
+  f <- function(x) {
+    df[match(x, df$input), -1, drop=FALSE]
+  }
+  class(f) <- c("texturemap", class(f))
+  attr(f, "df") <- df
+  return(f)
+}
+
+print.texturemap <- function(x, ...) {
+  cat("Texture map\n")
+  print(attr(x, "df"))
+  return(invisible(NULL))
+}
+
+## plot a texture map
+
+plot.texturemap <- local({
+
+  ## recognised additional arguments to and axis()
+  axisparams <- c("cex", 
+                  "cex.axis", "cex.lab",
+                  "col.axis", "col.lab",
+                  "font.axis", "font.lab",
+                  "las", "mgp", "xaxp", "yaxp",
+                  "tck", "tcl", "xpd")
+
+  # rules to determine the map dimensions when one dimension is given
+  widthrule <- function(heightrange, separate, n, gap) {
+    if(separate) 1 else diff(heightrange)/10
+  }
+  heightrule <- function(widthrange, separate, n, gap) {
+    (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) 
+  }
+
+  plot.texturemap <- function(x, ..., main,
+                             xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE,
+                             labelmap=NULL, gap=0.25,
+                             spacing=NULL, add=FALSE) {
+    if(missing(main))
+      main <- short.deparse(substitute(x))
+    df <- attr(x, "df")
+#    textures <- df$textures
+    n   <- nrow(df)
+    check.1.real(gap, "In plot.texturemap")
+    explain.ifnot(gap >= 0, "In plot.texturemap")
+    separate <- (gap > 0)
+    if(is.null(labelmap)) {
+      labelmap <- function(x) x
+    } else stopifnot(is.function(labelmap))
+    ## determine rectangular window for display
+    rr <- c(0, n + (n-1)*gap)
+    if(is.null(xlim) && is.null(ylim)) {
+      u <- widthrule(rr, separate, n, gap)
+      if(!vertical) {
+        xlim <- rr
+        ylim <- c(0,u)
+      } else {
+        xlim <- c(0,u)
+        ylim <- rr
+      }
+    } else if(is.null(ylim)) {
+      if(!vertical) 
+        ylim <- c(0, widthrule(xlim, separate, n, gap))
+      else 
+        ylim <- c(0, heightrule(xlim, separate, n, gap))
+    } else if(is.null(xlim)) {
+      if(!vertical) 
+        xlim <- c(0, heightrule(ylim, separate, n, gap))
+      else 
+        xlim <- c(0, widthrule(ylim, separate, n, gap))
+    } 
+    width <- diff(xlim)
+    height <- diff(ylim)
+    ## determine boxes to be filled with textures,
+    if(vertical) {
+      boxheight <- min(width, height/(n + (n-1) * gap))
+      vgap   <- (height - n * boxheight)/(n-1)
+      boxes <- list()
+      for(i in 1:n) boxes[[i]] <-
+        owin(xlim, ylim[1] + c(i-1, i) * boxheight + (i-1) * vgap)
+    } else {
+      boxwidth <- min(height, width/(n + (n-1) * gap))
+      hgap   <- (width - n * boxwidth)/(n-1)
+      boxes <- list()
+      for(i in 1:n) boxes[[i]] <-
+        owin(xlim[1] + c(i-1, i) * boxwidth + (i-1) * hgap, ylim)
+    }
+    boxsize <- shortside(boxes[[1]])
+    if(is.null(spacing))
+      spacing <- 0.1 * boxsize
+    
+    # .......... initialise plot ...............................
+    if(!add)
+      do.call.matched(plot.default,
+                      resolve.defaults(list(x=xlim, y=ylim,
+                                            type="n", main=main,
+                                            axes=FALSE, xlab="", ylab="",
+                                            asp=1.0),
+                                       list(...)))
+    
+    ## ................ plot texture blocks .................
+    for(i in 1:n) {
+      dfi <- df[i,,drop=FALSE]
+      add.texture(W=boxes[[i]], texture=dfi, ..., spacing=spacing)
+      plot(boxes[[i]], add=TRUE)
+    }
+
+    if(axis) {
+      # ................. draw annotation ..................
+      la <- paste(labelmap(df$input))
+      if(!vertical) {
+        ## add horizontal axis/annotation
+        at <- lapply(lapply(boxes, centroid.owin), "getElement", name="x")
+        # default axis position is below the ribbon (side=1)
+        sidecode <- resolve.1.default("side", list(...), list(side=1))
+        if(!(sidecode %in% c(1,3)))
+          warning(paste("side =", sidecode,
+                        "is not consistent with horizontal orientation"))
+        pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode]
+        # don't draw axis lines if plotting separate blocks
+        lwd0 <- if(separate) 0 else 1
+        # draw axis
+        do.call.matched(graphics::axis,
+                        resolve.defaults(list(...),
+                                         list(side = 1, pos = pos, at = at),
+                                         list(labels=la, lwd=lwd0)),
+                        extrargs=axisparams)
+      } else {
+        ## add vertical axis
+        at <- lapply(lapply(boxes, centroid.owin), "getElement", name="y")
+        # default axis position is to the right of ribbon (side=4)
+        sidecode <- resolve.1.default("side", list(...), list(side=4))
+        if(!(sidecode %in% c(2,4)))
+          warning(paste("side =", sidecode,
+                        "is not consistent with vertical orientation"))
+        pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode]
+        # don't draw axis lines if plotting separate blocks
+        lwd0 <- if(separate) 0 else 1
+        # draw labels horizontally if plotting separate blocks
+        las0 <- if(separate) 1 else 0
+        # draw axis
+        do.call.matched(graphics::axis,
+                        resolve.defaults(list(...),
+                                         list(side=4, pos=pos, at=at),
+                                         list(labels=la, lwd=lwd0, las=las0)),
+                        extrargs=axisparams)
+      }
+    }
+    invisible(NULL)
+  }
+
+  plot.texturemap
+})
+
+## plot a pixel image using textures
+
+textureplot <- local({
+
+  textureplot <- function(x, ..., main, add=FALSE, clipwin=NULL, do.plot=TRUE,
+                          border=NULL, col=NULL, lwd=NULL, lty=NULL,
+                          spacing=NULL, textures=1:8,
+                          legend=TRUE,
+                          leg.side=c("right", "left", "bottom", "top"),
+                          legsep=0.1, legwid=0.2) {
+    if(missing(main))
+      main <- short.deparse(substitute(x))
+    if(!(is.im(x) || is.tess(x))) {
+      x <- try(as.tess(x), silent=TRUE)
+      if(inherits(x, "try-error")) 
+        x <- try(as.im(x), silent=TRUE)
+      if(inherits(x, "try-error")) 
+        stop("x must be a pixel image or a tessellation", call.=FALSE)
+    }
+    leg.side <- match.arg(leg.side)
+    if(!is.null(clipwin))
+      x <- x[clipwin, drop=FALSE]
+    if(is.im(x)) {
+      if(x$type != "factor")
+        x <- eval.im(factor(x))
+      levX <- levels(x)
+    } else {
+      tilX <- tiles(x)
+      levX <- names(tilX)
+    }
+    n <- length(levX)
+    if(n > 8)
+      stop("Too many factor levels or tiles: maximum is 8")
+    ## determine texture map
+    if(inherits(textures, "texturemap")) {
+      tmap <- textures
+    } else {
+      stopifnot(all(textures %in% 1:8))
+      stopifnot(length(textures) >= n)
+      mono <- spatstat.options("monochrome")
+      col <- enforcelength(col, n, if(mono) 1 else 1:8)
+      lwd <- if(is.null(lwd)) NULL else enforcelength(lwd, n, 1)
+      lty <- if(is.null(lty)) NULL else enforcelength(lwd, n, 1)
+      tmap <- texturemap(inputs=levX, textures=textures[1:n],
+                         col=col, lwd=lwd, lty=lty)
+    }
+    ## determine plot region
+    bb <- as.rectangle(x)
+    if(!legend) {
+      bb.all <- bb
+    } else {
+      Size <- max(sidelengths(bb))
+      bb.leg <-
+        switch(leg.side,
+               right={
+                 ## legend to right of image
+                 owin(bb$xrange[2] + c(legsep, legsep+legwid) * Size,
+                      bb$yrange)
+               },
+               left={
+                 ## legend to left of image
+                 owin(bb$xrange[1] - c(legsep+legwid, legsep) * Size,
+                      bb$yrange)
+               },
+               top={
+                 ## legend above image
+                 owin(bb$xrange,
+                      bb$yrange[2] + c(legsep, legsep+legwid) * Size)
+               },
+               bottom={
+                 ## legend below image
+                 owin(bb$xrange,
+                      bb$yrange[1] - c(legsep+legwid, legsep) * Size)
+           })
+      iside <- match(leg.side, c("bottom", "left", "top", "right"))
+      bb.all <- boundingbox(bb.leg, bb)
+    }
+    ## 
+    result <- tmap
+    attr(result, "bbox") <- bb
+    ##
+    if(do.plot) {
+      ## Plot textures
+      if(!add) {
+        plot(bb.all, type="n", main="")
+        fakemaintitle(bb, main, ...)
+      }
+      if(is.null(spacing)) spacing <- diameter(as.rectangle(x))/50
+      areas <- if(is.im(x)) table(x$v) else tile.areas(x)
+      for(i in which(areas > 0)) {
+        Zi <- if(is.tess(x)) tilX[[i]] else levelset(x, levX[i], "==")
+        Zi <- as.polygonal(Zi)
+        if(is.null(border) || !is.na(border))
+          plot(Zi, add=TRUE, border=border)
+        add.texture(Zi, texture=tmap(levX[i]), spacing=spacing, ...)
+      }
+      vertical <- leg.side %in% c("left", "right")
+      if(legend)
+        do.call(plot.texturemap,
+                resolve.defaults(list(x=tmap, add=TRUE,
+                                      vertical=vertical,
+                                      side=iside,
+                                      xlim=bb.leg$xrange,
+                                      ylim=bb.leg$yrange,
+                                      spacing=spacing),
+                                 list(...)))
+    }
+    return(invisible(result))
+  }
+
+  enforcelength <- function(x, n, x0) {
+    if(is.null(x)) x <- x0
+    if(length(x) < n) x <- rep(x, n)
+    return(x[1:n])
+  }
+
+  textureplot
+})
+
+
+
+  
diff --git a/R/timed.R b/R/timed.R
new file mode 100644
index 0000000..f64ebb9
--- /dev/null
+++ b/R/timed.R
@@ -0,0 +1,104 @@
+#'
+#'     timed.R
+#'
+#'   Timed objects
+#'
+#'   $Revision: 1.3 $ $Date: 2017/07/31 01:08:55 $
+
+timed <- function(x, ..., starttime=NULL, timetaken=NULL) {
+  if(is.null(starttime) && is.null(timetaken)) # time starts now.
+    starttime <- proc.time()
+  # evaluate expression if any
+  object <- x
+  if(is.null(timetaken))
+    timetaken <- proc.time() - starttime
+  if(!inherits(object, "timed"))
+    class(object) <- c("timed", class(object))
+  attr(object, "timetaken") <- timetaken
+  return(object)
+}
+
+print.timed <- function(x, ...) {
+  # strip the timing information and print the rest.
+  taken <- attr(x, "timetaken")
+  cx <- class(x)
+  attr(x, "timetaken") <- NULL
+  class(x) <- cx[cx != "timed"]
+  NextMethod("print")
+  # Now print the timing info
+  cat(paste("\nTime taken:", codetime(taken), "\n"))
+  return(invisible(NULL))
+}
+
+timeTaken <- function(..., warn=TRUE) {
+  allargs <- list(...)
+  hastime <- sapply(allargs, inherits, what="timed")
+  if(!any(hastime)) {
+    if(warn) warning("Data did not contain timing information", call.=FALSE)
+    return(NULL)
+  }
+  if(warn && !all(hastime))
+    warning("Some arguments did not contain timing information", call.=FALSE)
+  times <- sapply(allargs[hastime], attr, which="timetaken")
+  tottime <- rowSums(times)
+  class(tottime) <- "proc_time"
+  return(tottime)
+}
+
+#'  ..............  codetime ....................................
+#'  Basic utility for converting times in seconds to text strings
+
+codetime <- local({
+  uname <- c("min", "hours", "days", "years",
+             "thousand years", "million years", "billion years")
+  u1name <- c("min", "hour", "day", "year",
+             "thousand years", "million years", "billion years")
+  multiple <- c(60, 60, 24, 365, 1e3, 1e3, 1e3)
+  codehms <- function(x) {
+    sgn <- if(x < 0) "-" else ""
+    x <- round(abs(x))
+    hours <- x %/% 3600
+    mins  <- (x %/% 60) %% 60
+    secs  <- x %% 60
+    h <- if(hours > 0) paste(hours, ":", sep="") else ""
+    started <- (hours > 0)
+    m <- if(mins > 0) {
+      paste(if(mins < 10 && started) "0" else "", mins, ":", sep="")
+    } else if(started) "00:" else ""
+    started <- started | (mins > 0)
+    s <- if(secs > 0) {
+      paste(if(secs < 10 && started) "0" else "", secs, sep="")
+    } else if(started) "00" else "0"
+    if(!started) s <- paste(s, "sec")
+    paste(sgn, h, m, s, sep="")
+  }
+  codetime <- function(x, hms=TRUE, what=c("elapsed","user","system")) {
+    if(inherits(x, "proc_time")) {
+      what <- match.arg(what)
+      x <- summary(x)[[match(what, c("user", "system", "elapsed"))]]
+    }
+    if(!is.numeric(x) || length(x) != 1)
+      stop("codetime: x must be a proc_time object or a single number")
+    sgn <- if(x < 0) "-" else ""
+    x <- abs(x)
+    if(x < 60)
+      return(paste(sgn, signif(x, 3), " sec", sep=""))
+    # more than 1 minute: round to whole number of seconds
+    x <- round(x)
+    if(hms && (x < 60 * 60 * 24))
+      return(paste(sgn, codehms(x), sep=""))
+    u <- u1 <- "sec"
+    for(k in seq_along(multiple)) {
+      if(x >= multiple[k]) {
+        x <- x/multiple[k]
+        u <- uname[k]
+        u1 <- u1name[k]
+      } else break
+    }
+    xx <- round(x, 1)
+    ux <- if(xx == 1) u1 else u
+    paste(sgn, xx, " ", ux, sep="")
+  }
+  codetime
+})
+
diff --git a/R/transect.R b/R/transect.R
new file mode 100644
index 0000000..9089743
--- /dev/null
+++ b/R/transect.R
@@ -0,0 +1,83 @@
+#
+#  transect.R
+#
+# Line transects of pixel images
+#
+#  $Revision: 1.6 $  $Date: 2013/03/15 01:28:06 $
+#
+
+transect.im <- local({
+
+  specify.location <- function(loc, rect) {
+    lname <- short.deparse(substitute(loc))
+    if(is.numeric(loc) && length(loc) == 2)
+      return(list(x=loc[1], y=loc[2]))
+    if(is.list(loc))
+      return(xy.coords(loc))
+    if(!(is.character(loc) && length(loc) == 1))
+      stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE)
+    xr <- rect$xrange
+    yr <- rect$yrange
+    switch(loc,
+           bottomleft  = list(x=xr[1],    y=yr[1]),
+           bottom      = list(x=mean(xr), y=yr[1]),
+           bottomright = list(x=xr[2],    y=yr[1]),
+           right       = list(x=xr[2],    y=mean(yr)),
+           topright    = list(x=xr[2],    y=yr[2]),
+           top         = list(x=mean(xr), y=yr[2]),
+           topleft     = list(x=xr[1],    y=yr[2]),
+           left        = list(x=xr[1],    y=mean(yr)),
+           centre=,
+           center      = list(x=mean(xr), y=mean(yr)),
+           stop(paste("Unrecognised location",
+                      sQuote(lname), "=", dQuote(loc)),
+                call.=FALSE)
+           )
+  }
+
+  transect.im <- 
+    function(X, ..., from="bottomleft", to="topright",
+             click=FALSE, add=FALSE) {
+      Xname <- short.deparse(substitute(X))
+      Xname <- sensiblevarname(Xname, "X")
+      stopifnot(is.im(X))
+      # determine transect position
+      if(click) {
+        # interactive
+        if(!add) plot(X)
+        from <- locator(1)
+        points(from)
+        to <- locator(1)
+        points(to)
+        segments(from$x, from$y, to$x, to$y)
+      } else {
+        # data defining a line segment
+        R <- as.rectangle(X)
+        from <- specify.location(from, R)
+        to   <- specify.location(to,   R)
+      }
+      # create sample points along transect
+      if(identical(from,to))
+        stop(paste(sQuote("from"), "and", sQuote("to"),
+                   "must be distinct points"), call.=FALSE)
+      u <- seq(0,1,length=512)
+      x <- from$x + u * (to$x - from$x)
+      y <- from$y + u * (to$y - from$y)
+      leng <- sqrt( (to$x - from$x)^2 +  (to$y - from$y)^2)
+      t <- u * leng
+      # look up pixel values (may be NA)
+      v <- X[list(x=x, y=y), drop=FALSE]
+      # package into fv object
+      df <- data.frame(t=t, v=v)
+      colnames(df)[2] <- Xname
+      fv(df, argu = "t",
+         ylab = substitute(Xname(t), list(Xname=as.name(Xname))),
+         valu=Xname,
+         labl = c("t", "%s(t)"),
+         desc = c("distance along transect",
+           "pixel value of %s"),
+         unitname = unitname(X), fname = Xname)
+    }
+
+  transect.im
+})
diff --git a/R/transmat.R b/R/transmat.R
new file mode 100644
index 0000000..0736105
--- /dev/null
+++ b/R/transmat.R
@@ -0,0 +1,68 @@
+## transmat.R
+##
+## transform matrices between different spatial indexing conventions
+##
+##  $Revision: 1.1 $  $Date: 2015/03/04 07:13:10 $
+
+transmat <- local({
+
+  euro <- matrix(c(0,-1,1,0), 2, 2)
+  spat <- matrix(c(0,1,1,0), 2, 2)
+  cart <- diag(c(1,1))
+  dimnames(euro) <- dimnames(spat) <- dimnames(cart) <- 
+    list(c("x","y"), c("i","j"))
+
+  known <- list(spatstat=spat,
+                cartesian=cart,
+                Cartesian=cart,
+                european=euro,
+                European=euro)
+
+  cmap <- list(x=c(1,0),
+               y=c(0,1),
+               i=c(1,0),
+               j=c(0,1))
+  
+  maptocoef <- function(s) { 
+    e <- parse(text=s)[[1]]
+    eval(eval(substitute(substitute(f, cmap), list(f=e)))) 
+  }
+
+  
+  as.convention <- function(x) {
+    if(is.character(x) && length(x) == 1) {
+      k <- pmatch(x, names(known))
+      if(is.na(k)) 
+        stop(paste("Unrecognised convention", sQuote(x)), call.=FALSE)
+      return(known[[k]])
+    }
+    if(is.list(x) && is.character(unlist(x))) {
+      xx <- lapply(x, maptocoef)
+      if(all(c("x", "y") %in% names(xx))) z <- rbind(xx$x, xx$y) else
+      if(all(c("i", "j") %in% names(xx))) z <- cbind(xx$x, xx$y) else 
+      stop("entries should be named i,j or x,y", call.=FALSE)
+      dimnames(z) <- list(c("x","y"), c("i","j"))
+      if(!(all(z == 0 | z == 1 | z == -1) && 
+           all(rowSums(abs(z)) == 1) && 
+           all(colSums(abs(z)) == 1)))
+        stop("Illegal convention", call.=FALSE)
+      return(z)
+    }
+    stop("Unrecognised format for spatial convention", call.=FALSE)
+  }  
+
+  transmat <- function(m, from, to) {
+    m <- as.matrix(m)
+    from <- as.convention(from)
+    to <- as.convention(to)
+    conv <- solve(from) %*% to
+    flip <- apply(conv == -1, 2, any)
+    if(flip[["i"]]) m <- m[nrow(m):1, , drop=FALSE]
+    if(flip[["j"]]) m <- m[         , ncol(m):1, drop=FALSE]
+    if(all(diag(conv) == 0))
+       m <- t(m)
+    return(m)
+  }
+  
+  transmat
+})
diff --git a/R/treebranches.R b/R/treebranches.R
new file mode 100644
index 0000000..ed5e3db
--- /dev/null
+++ b/R/treebranches.R
@@ -0,0 +1,211 @@
+#'
+#'  treebranches.R
+#'
+#'  Label branches in a tree
+#'
+#'  $Revision: 1.4 $ $Date: 2016/07/16 03:14:51 $
+
+#' compute branch labels for each *vertex* in the tree L
+
+treebranchlabels <- local({
+
+  treebranchlabels <- function(L, root=1) {
+    stopifnot(inherits(L, "linnet"))
+    stopifnot(length(root) == 1)
+    V <- L$vertices
+    #'    M <- L$m
+    #' assign label to each vertex
+    e <- rep(NA_character_, npoints(V))
+    #' do root
+    e[root] <- ""
+    #' recurse
+    descendtree(L, root, e)
+  }
+
+  descendtree <- function(L, at, labels, verbose=FALSE) {
+    if(verbose)
+      cat(paste("Descending from node", at, "\n"))
+    below <- which(L$m[at, ] & is.na(labels))
+    while(length(below) == 1) {
+      if(verbose)
+        cat(paste("Line from", at, paren(labels[at]),
+                  "to", below, "\n"))
+      labels[below] <- labels[at]
+      at <- below
+      below <- which(L$m[at, ] & is.na(labels))
+    }
+    if(length(below) == 0) {
+      if(verbose) cat("*\n")
+      return(labels)
+    }
+    if(verbose)
+      cat(paste("split into", length(below), "\n"))
+    if(length(below) > 26)
+      stop("Oops - degree > 27")
+    labels[below] <- paste(labels[at], letters[1:length(below)], sep="")
+    for(b in below)
+      labels <- descendtree(L, b, labels)
+    return(labels)
+  }
+
+  treebranchlabels
+})
+
+
+#' Function which will return the branch label associated with
+#' any point on the network
+
+branchlabelfun <- function(L, root=1) {
+  L <- as.linnet(L)
+  vertexLabels <- treebranchlabels(L, root=root)
+  labfrom <- vertexLabels[L$from]
+  labto   <- vertexLabels[L$to]
+  segmentLabels <- ifelse(nchar(labfrom) < nchar(labto), labto, labfrom)
+  f <- function(x, y, seg, tp) { segmentLabels[seg] }
+  fL <- linfun(f, L)
+  return(fL)
+}
+
+#' convenience function for use in model formulae
+
+begins <- function(x, firstbit) {
+  stopifnot(is.character(firstbit) && length(firstbit) == 1)
+  n <- nchar(firstbit)
+  if(n == 0) rep(TRUE, length(x)) else (substr(x, 1, n) == firstbit)
+}
+
+#' extract the sub-tree for a particular label
+#' e.g. extractbranch(L, "a") extracts everything whose label begins with 'a'
+
+extractbranch <- function(X, ...) {
+  UseMethod("extractbranch")
+}
+
+extractbranch.linnet <- function(X, code, labels, ..., which=NULL) {
+  L <- X
+  V <- L$vertices
+  if(!is.null(which)) {
+    stopifnot(is.logical(which))
+    if(length(which) != npoints(V))
+      stop("Argument 'which' is the wrong length")
+    vin <- which
+  } else {
+    if(length(labels) != npoints(V))
+      stop("labels vector is the wrong length")
+    #' which vertices are included
+    #'    (a) vertices with the right initial code
+    vin <- (substr(labels, 1, nchar(code)) == code)
+    #'    (b) the apex
+    isneighbour <- (rowSums(L$m[,vin]) > 0)
+    apexcode <- if(nchar(code) > 1) substr(code, 1, nchar(code)-1) else ""
+    vin <- vin | (isneighbour & (labels == apexcode))
+  }
+  #' which edges are included
+  ein <- vin[L$from] & vin[L$to]
+  #' new serial numbers for vertices
+  vId <- cumsum(vin)
+  #' pack up
+  sparse <- L$sparse
+  out <- list(vertices=V[vin],
+              m=L$m[vin,vin],
+              lines=L$lines[ein],
+              from=vId[L$from[ein]], to=vId[L$to[ein]],
+              dpath=if(sparse) NULL else L$dpath[vin,vin],
+              sparse=sparse,
+              window=V$window)
+  class(out) <- c("linnet", class(out))
+  #' pre-compute bounding radius
+  if(sparse)
+    out$boundingradius <- boundingradius(out)
+  out$toler <- default.linnet.tolerance(out)
+  attr(out, "which") <- vin
+  return(out)  
+}
+
+extractbranch.lpp <- function(X, code, labels, ..., which=NULL) {
+  L <- as.linnet(X)
+  #' make sub-network
+  if(missing(code)) code <- NULL
+  if(missing(labels)) labels <- NULL
+  Lnew <- extractbranch(L, code, labels, which=which)
+  #' which vertices are included
+  vin <- attr(Lnew, "vin")
+  #' which edges are included
+  ein <- vin[L$from] & vin[L$to]
+  #' which data points are included
+  xin <- ein[coords(X)$seg]
+  #' new serial numbers for edges
+  eId <- cumsum(ein)
+  #' construct subset
+  Xnew <- X[xin]
+  Xnew$domain <- Lnew
+  #' apply new serial numbers to segment map
+  coords(Xnew)$seg <- eId[coords(Xnew)$seg]
+  #'
+  return(Xnew)  
+}
+
+deletebranch <- function(X, ...) {
+  UseMethod("deletebranch")
+}
+
+deletebranch.linnet <- function(X, code, labels, ...) {
+  L <- X
+  V <- L$vertices
+  if(length(labels) != npoints(V))
+    stop("labels vector is the wrong length")
+  #' which vertices are retained
+  vkeep <- (substr(labels, 1, nchar(code)) != code)
+  #' which edges are retained
+  ekeep <- vkeep[L$from] & vkeep[L$to]
+  #' new serial numbers for vertices
+  vId <- cumsum(vkeep)
+  #' pack up
+  sparse <- L$sparse
+  out <- list(vertices=V[vkeep],
+              m=L$m[vkeep,vkeep],
+              lines=L$lines[ekeep],
+              from=vId[L$from[ekeep]], to=vId[L$to[ekeep]],
+              dpath=if(sparse) NULL else L$dpath[vkeep,vkeep],
+              sparse=sparse,
+              window=V$window)
+  class(out) <- c("linnet", class(out))
+  #' recompute bounding radius
+  if(sparse)
+    out$boundingradius <- boundingradius(out)
+  out$toler <- default.linnet.tolerance(out)
+  attr(out, "which") <- vkeep
+  return(out)  
+}
+
+deletebranch.lpp <- function(X, code, labels, ...) {
+  #' make sub-network
+  L <- as.linnet(X)
+  Lnew <- deletebranch(L, code=code, labels=labels)
+  #' which vertices are retained
+  vkeep <- attr(Lnew, "which")
+  #' which edges are retained
+  ekeep <- vkeep[L$from] & vkeep[L$to]
+  #' which data points are retained
+  xin <- ekeep[coords(X)$seg]
+  #' new serial numbers for vertices
+  #        vId <- cumsum(vkeep)
+  #' new serial numbers for edges
+  eId <- cumsum(ekeep)
+  #' construct subset
+  Xnew <- X[xin]
+  Xnew$domain <- Lnew
+  #' apply new serial numbers to segment map
+  coords(Xnew)$seg <- eId[coords(Xnew)$seg]
+  #'
+  return(Xnew)  
+}
+
+treeprune <- function(X, root=1, level=0){
+  ## collect names of branches to be pruned
+  tb <- treebranchlabels(as.linnet(X), root=root)
+  keep <- (nchar(tb) <= level)
+  Y <- extractbranch(X, which=keep)
+  return(Y)
+}
+
diff --git a/R/triangulate.R b/R/triangulate.R
new file mode 100644
index 0000000..f9ff6ab
--- /dev/null
+++ b/R/triangulate.R
@@ -0,0 +1,36 @@
+#'
+#'    triangulate.R
+#'
+#'   Decompose a polygon into triangles
+#'
+#'    $Revision: 1.4 $  $Date: 2015/11/21 11:13:00 $
+#'
+
+triangulate.owin <- local({
+
+  is.triangle <- function(p) {
+    return((length(p$bdry) == 1) && (length(p$bdry[[1]]$x) == 3))
+  }
+
+  triangulate.owin <- function(W) {
+    stopifnot(is.owin(W))
+    W <- as.polygonal(W, repair=TRUE)
+    P <- as.ppp(vertices(W), W=Frame(W), check=FALSE)
+    D <- delaunay(P)
+    V <- intersect.tess(W, D)
+    Candidates <- tiles(V)
+    istri <- sapply(Candidates, is.triangle)
+    Accepted <- Candidates[istri]
+    if(any(!istri)) {
+      # recurse
+      Worries <- unname(Candidates[!istri])
+      Fixed <- lapply(Worries, triangulate.owin)
+      Fixed <- do.call(c, lapply(Fixed, tiles))
+      Accepted <- append(Accepted, Fixed)
+    }
+    result <- tess(tiles=Accepted, window=W)
+    return(result)
+  }
+
+  triangulate.owin
+})
diff --git a/R/triplet.family.R b/R/triplet.family.R
new file mode 100644
index 0000000..85603ed
--- /dev/null
+++ b/R/triplet.family.R
@@ -0,0 +1,93 @@
+#
+#
+#    triplet.family.R
+#
+#    $Revision: 1.1 $	$Date: 2011/11/05 07:18:51 $
+#
+#    Family of `third-order' point process models
+#
+#    triplet.family:      object of class 'isf' 
+#	
+#
+# -------------------------------------------------------------------
+#	
+
+triplet.family <-
+  list(
+       name  = "triplet",
+       print = function(self) {
+         cat("Family of third-order interactions\n")
+       },
+       plot = NULL,
+       # ----------------------------------------------------
+       eval  = function(X,U,EqualPairs,pot,pars,correction, ...) {
+  #
+  # This is the eval function for the `triplet' family.
+  # 
+  # This internal function is not meant to be called by the user.
+  # It is called by mpl.prepare() during execution of ppm().
+  #         
+  # The eval functions perform all the manipulations that are common to
+  # a given class of interactions. 
+  #
+  # This function is currently modelled on 'inforder.family'.
+  # It simply invokes the potential 'pot' directly
+  # and expects 'pot' to return the values of the sufficient statistic S(u,X).
+  #
+  # ARGUMENTS:
+  #   All 'eval' functions have the following arguments 
+  #   which are called in sequence (without formal names)
+  #   by mpl.prepare():
+  #       
+  #   X           data point pattern                      'ppp' object
+  #   U           points at which to evaluate potential   list(x,y) suffices
+  #   EqualPairs  two-column matrix of indices i, j such that X[i] == U[j]
+  #               (or NULL, meaning all comparisons are FALSE)
+  #   pot         potential function 
+  #   potpars     auxiliary parameters for pairpot        list(......)
+  #   correction  edge correction type                    (string)
+  #
+  # VALUE:
+  #    All `eval' functions must return a        
+  #    matrix of values of the total potential
+  #    induced by the pattern X at each location given in U.
+  #    The rows of this matrix correspond to the rows of U (the sample points);
+  #    the k columns are the coordinates of the k-dimensional potential.
+  #
+  ##########################################################################
+
+  # POTENTIAL:
+  # In this case the potential function 'pot' should have arguments
+  #    pot(X, U, EqualPairs, pars, correction, ...)
+  #         
+  # It must return a vector with length equal to the number of points in U,
+  # or a matrix with as many rows as there are points in U.
+
+         if(!is.ppp(U))
+           U <- ppp(U$x, U$y, window=X$window)
+         
+         POT <- pot(X, U, EqualPairs, pars, correction, ...)
+
+         if(is.matrix(POT)) {
+           if(nrow(POT) != U$n)
+             stop("Internal error: the potential returned a matrix with the wrong number of rows")
+         } else if(is.array(POT) && length(dim(POT)) > 2)
+           stop("Internal error: the potential returned an array with more than 2 dimensions")
+         else if(is.vector(POT)) {
+           if(length(POT) != U$n)
+             stop("Internal error: the potential returned a vector with the wrong length")
+           POT <- matrix(POT, ncol=1)
+         } else
+         stop("Internal error: the return value from the potential is not understood")
+
+         return(POT)
+       },
+######### end of function $eval
+       suffstat = NULL
+######### end of function $suffstat
+)
+######### end of list
+
+class(triplet.family) <- "isf"
+
+
diff --git a/R/triplets.R b/R/triplets.R
new file mode 100644
index 0000000..015e8d5
--- /dev/null
+++ b/R/triplets.R
@@ -0,0 +1,155 @@
+#
+#
+#    triplets.R
+#
+#    $Revision: 1.17 $	$Date: 2016/12/30 01:44:07 $
+#
+#    The triplets interaction
+#
+#    Triplets()    create an instance of the triplets process
+#                 [an object of class 'interact']
+#	
+# -------------------------------------------------------------------
+#
+
+Triplets <- local({
+
+  DebugTriplets <- FALSE
+  
+  # define triplet potential
+  TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) {
+    if(!all(ok <- correction %in% c("border", "none"))) {
+      nbad <- sum(bad <- !ok)
+      warning(paste(ngettext(nbad, "Correction", "Corrections"),
+                    commasep(sQuote(correction[bad])),
+                    ngettext(nbad,
+                             "is unavailable and was ignored",
+                             "are unavailable and were ignored")))
+    }
+    # check that all points of X are included in U
+    nX <- npoints(X)
+    nU <- npoints(U)
+    XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1]
+    missX <- which(table(factor(XinU, levels=1:nX)) == 0)
+    if((nmiss <- length(missX)) > 0) {
+      # add missing points to (the end of) U
+      U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE)
+      EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss))
+      nU <- nU + nmiss
+    }
+    iXX <- EqualPairs[,1]
+    iXU <- EqualPairs[,2]
+    # construct map from X index to U index 
+    mapXU <- integer(nX)
+    mapXU[iXX] <- iXU
+    # construct map from U index to X index 
+    mapUX <- rep.int(NA_integer_, nU)
+    mapUX[iXU] <- iXX
+    # logical vector identifying which quadrature points are in X
+    isdata <- rep.int(FALSE, nU)
+    isdata[iXU] <- TRUE
+    # identify all close pairs u, x
+    r <- pars$r
+    cp <- crosspairs(U, X, r, what="indices")
+    if(DebugTriplets)
+      cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n"))
+    IU <- cp$i
+    J <- cp$j
+    # map X index to U index
+    JU <- mapXU[J]
+    # Each (Xi, Xj) pair will appear twice - eliminate duplicates
+    dupX <- isdata[IU] & isdata[JU] & (IU > JU)
+    retain <- !dupX
+    IU <- IU[retain]
+    JU <- JU[retain]
+    if(DebugTriplets)
+      cat(paste(sum(dupX), "duplicate pairs removed\n"))
+    # find all triangles
+    tri <- edges2triangles(IU, JU, nU, friendly=isdata)
+    if(DebugTriplets)
+      cat(paste(nrow(tri), "triangles identified\n"))
+    if(nrow(tri) == 0) {
+      # there are no triangles; return vector of zeroes
+      return(rep.int(0, nU-nmiss))
+    }
+    # count triangles containing a given quadrature point
+    tcount <- apply(tri, 2,
+                    function(x, n) { table(factor(x, levels=1:n)) }, n=nU)
+    tcount <- .rowSums(tcount, nrow(tcount), ncol(tcount))
+    # select triangles consisting only of data points
+    triX <- matrix(mapUX[tri], nrow=nrow(tri))
+    isX <- matrowall(!is.na(triX))
+    triX <- triX[isX, , drop=FALSE]
+    #
+    if(nrow(triX) > 0) {
+      # count triangles of data points containing each given data point
+      tXcount <- apply(triX, 2,
+                       function(x, n) { table(factor(x, levels=1:n)) }, n=nX)
+      tXcount <- .rowSums(tXcount, nrow(tXcount), ncol(tXcount))
+    } else {
+      # there are no triangles of data points
+      tXcount <- rep.int(0, nX)
+    }
+    #
+    answer <- tcount
+    answer[iXU] <- tXcount[iXX]
+    if(DebugTriplets)
+      cat(paste("Max suff stat: data ", max(tXcount),
+                ", dummy ", max(tcount[isdata]), "\n", sep=""))
+    # truncate to original size
+    if(nmiss > 0)
+      answer <- answer[-((nU-nmiss+1):nU)]
+    return(answer)
+  }
+  # set up basic 'triplets' object except for family and parameters
+  BlankTripletsObject <- 
+    list(
+         name     = "Triplets process",
+         creator  = "Triplets",
+         family   = "triplet.family", # evaluated later
+         pot      = TripletPotential,
+         par      = list(r=NULL), # filled in later
+         parnames = "interaction distance",
+         init     = function(self) {
+                      r <- self$par$r
+                      if(!is.numeric(r) || length(r) != 1 || r <= 0)
+                       stop("interaction distance r must be a positive number")
+                    },
+         update = NULL,  # default OK
+         print = NULL,    # default OK
+         interpret =  function(coeffs, self) {
+           loggamma <- as.numeric(coeffs[1])
+           gamma <- exp(loggamma)
+           return(list(param=list(gamma=gamma),
+                       inames="interaction parameter gamma",
+                       printable=dround(gamma)))
+         },
+         valid = function(coeffs, self) {
+           gamma <- ((self$interpret)(coeffs, self))$param$gamma
+           return(is.finite(gamma) && (gamma <= 1))
+         },
+         project = function(coeffs, self) {
+           if((self$valid)(coeffs, self)) return(NULL) else return(Poisson())
+         },
+         irange = function(self, coeffs=NA, epsilon=0, ...) {
+           r <- self$par$r
+           if(anyNA(coeffs))
+             return(r)
+           loggamma <- coeffs[1]
+           if(abs(loggamma) <= epsilon)
+             return(0)
+           else
+             return(r)
+         },
+         version=NULL # to be added
+         )
+  class(BlankTripletsObject) <- "interact"
+  # define Triplets function
+  Triplets <- function(r) {
+    instantiate.interact(BlankTripletsObject, list(r=r))
+  }
+  Triplets <- intermaker(Triplets, BlankTripletsObject)
+  
+  Triplets
+})
+
diff --git a/R/unique.ppp.R b/R/unique.ppp.R
new file mode 100755
index 0000000..3b96042
--- /dev/null
+++ b/R/unique.ppp.R
@@ -0,0 +1,197 @@
+#
+#   unique.ppp.R
+#
+# $Revision: 1.32 $  $Date: 2016/04/25 02:34:40 $
+#
+# Methods for 'multiplicity' co-authored by Sebastian Meyer
+# Copyright 2013 Adrian Baddeley and Sebastian Meyer 
+
+unique.ppp <- function(x, ..., warn=FALSE) {
+  verifyclass(x, "ppp")
+  dupe <- duplicated.ppp(x, ...) 
+  if(!any(dupe)) return(x)
+  if(warn) warning(paste(sum(dupe), "duplicated points were removed"),
+                   call.=FALSE)
+  return(x[!dupe])
+}
+
+duplicated.ppp <- function(x, ...,
+                           rule=c("spatstat", "deldir", "unmark")) {
+  verifyclass(x, "ppp")
+  rule <- match.arg(rule)
+  if(rule == "deldir")
+    return(deldir::duplicatedxy(x))
+  if(rule == "unmark")
+    x <- unmark(x)
+  n <- npoints(x)
+  switch(markformat(x),
+         none = {
+           # unmarked points
+           # check for duplication of x and y separately (a necessary condition)
+           xx <- x$x
+           yy <- x$y
+           possible <- duplicated(xx) & duplicated(yy)
+           if(!any(possible))
+             return(possible)
+           # split by x coordinate of duplicated x values
+           result <- possible
+           xvals <- unique(xx[possible])
+           for(xvalue in xvals) {
+             sub <- (xx == xvalue)
+           # compare y values
+             result[sub] <- duplicated(yy[sub])
+           }
+         },
+         vector = {
+           # marked points - split by mark value
+           m <- marks(x)
+           um <- if(is.factor(m)) levels(m) else unique(m)
+           xx <- unmark(x)
+           result <- logical(n)
+           for(i in seq_along(um)) {
+             sub <- (m == um[i])
+             result[sub] <- duplicated.ppp(xx[sub])
+           }
+         },
+         dataframe = {
+           result <- duplicated(as.data.frame(x))
+         },
+         # the following are currently not supported
+         hyperframe = {
+           result <- duplicated(as.data.frame(x))
+         }, 
+         list = {
+           result <- duplicated(as.data.frame(as.hyperframe(x)))
+         },
+         stop(paste("Unknown mark type", sQuote(markformat(x))))
+         )
+  return(result)
+}
+
+anyDuplicated.ppp <- function(x, ...) {
+  anyDuplicated(as.data.frame(x), ...)
+}
+
+## utility to check whether two rows are identical
+
+IdenticalRows <- local({
+  id <- function(i,j, a, b=a) {
+    ai <- a[i,]
+    bj <- b[j,]
+    row.names(ai) <- row.names(bj) <- NULL
+    identical(ai, bj)
+  }
+  Vectorize(id, c("i", "j"))
+})
+    
+
+multiplicity <- function(x) {
+  UseMethod("multiplicity")
+}
+  
+multiplicity.ppp <- function(x) {
+  verifyclass(x, "ppp")
+  np <- npoints(x)
+  if(np == 0) return(integer(0))
+  cl <- closepairs(x, 0, what="indices")
+  I <- cl$i
+  J <- cl$j
+  if(length(I) == 0)
+    return(rep.int(1L, np))
+  switch(markformat(x),
+         none = { },
+         vector = {
+           marx <- as.data.frame(marks(x))
+           agree <- IdenticalRows(I, J, marx)
+           I <- I[agree]
+           J <- J[agree]
+         },
+         dataframe = {
+           marx <- marks(x)
+           agree <- IdenticalRows(I, J, marx)
+           I <- I[agree]
+           J <- J[agree]
+         },
+         hyperframe = {
+           marx <- as.data.frame(marks(x)) # possibly discards columns
+           agree <- IdenticalRows(I, J, marx)
+           I <- I[agree]
+           J <- J[agree]
+         }, 
+         list = stop("Not implemented for lists of marks")
+         )
+  if(length(I) == 0)
+    return(rep.int(1L, np))
+  JbyI <- split(J, factor(I, levels=1:np))
+  result <- 1 + lengths(JbyI)
+  return(result)
+}
+  
+multiplicity.data.frame <- function (x) {
+  if(all(unlist(lapply(x, is.numeric))))
+    return(multiplicityNumeric(as.matrix(x)))
+  ## result template (vector of 1's)
+  result <- setNames(rep.int(1L, nrow(x)), rownames(x))
+  ## check for duplicates (works for data frames, arrays and vectors)
+  ## CAVE: comparisons are based on a character representation of x
+  if (!any(dup <- duplicated(x)))
+    return(result)
+  ux <- x[!dup, , drop=FALSE]
+  dx <- x[dup,  , drop=FALSE]
+  nu <- nrow(ux)
+  nd <- nrow(dx)
+  hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx)
+  counts <- as.integer(1L + .rowSums(hit, nu, nd))
+  result[!dup] <- counts
+  dumap <- apply(hit, 2, match, x=TRUE) # equivalent to min(which(z))
+  result[dup] <- counts[dumap]
+  return(result)
+}
+
+### multiplicity method for NUMERIC arrays, data frames, and vectors
+### This implementation is simply based on checking for dist(x)==0
+
+multiplicityNumeric <- function(x)
+{
+  if (anyDuplicated(x)) {
+    distmat <- as.matrix(dist(x, method="manhattan"))  # faster than euclid.
+    as.integer(rowSums(distmat == 0))                  # labels are kept
+  } else {                                             # -> vector of 1's
+    nx <- NROW(x)
+    labels <- if (length(dim(x))) rownames(x) else names(x)
+    if (is.null(labels)) labels <- seq_len(nx)
+    setNames(rep.int(1L, nx), labels)
+  }
+}
+
+### multiplicity method for arrays, data frames, and vectors (including lists)
+### It also works for non-numeric data, since it is based on duplicated().
+
+multiplicity.default <- function (x) {
+  if(is.numeric(x))
+    return(multiplicityNumeric(x))
+  nx <- NROW(x)                   # also works for a vector x
+  ## result template (vector of 1's)
+  labels <- if (length(dim(x))) rownames(x) else names(x)
+  if (is.null(labels)) labels <- seq_len(nx)
+  result <- setNames(rep.int(1L, nx), labels)
+  ## check for duplicates (works for data frames, arrays and vectors)
+  ## CAVE: comparisons are based on a character representation of x
+  if (!any(dup <- duplicated(x)))
+    return(result)
+
+  ## convert x to a matrix for IdenticalRows()
+  x <- as.matrix(x)
+  dimnames(x) <- NULL             # discard any names!
+  ux <- x[!dup, , drop=FALSE]
+  dx <- x[dup,  , drop=FALSE]
+  nu <- nrow(ux)
+  nd <- nrow(dx)
+  hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx)
+  counts <- as.integer(1L + .rowSums(hit, nu, nd))
+  dumap <- apply(hit, 2, match, x=TRUE) # was: function(z) min(which(z)))
+  result[dup] <- counts[dumap]
+  return(result)
+}
+
+
diff --git a/R/units.R b/R/units.R
new file mode 100755
index 0000000..d6030a2
--- /dev/null
+++ b/R/units.R
@@ -0,0 +1,208 @@
+#
+# Functions for extracting and setting the name of the unit of length
+#
+#   $Revision: 1.23 $   $Date: 2016/09/23 07:42:46 $
+#
+#
+
+unitname <- function(x) {
+  UseMethod("unitname")
+}
+
+unitname.owin <- function(x) {
+  u <- as.units(x$units)
+  return(u)
+}
+
+unitname.ppp <- function(x) {
+  u <- as.units(x$window$units)
+  return(u)
+}
+
+unitname.im <- function(x) {
+  u <- as.units(x$units)
+  return(u)
+}
+
+unitname.default <- function(x) {
+  return(as.units(attr(x, "units")))
+}
+
+"unitname<-" <- function(x, value) {
+  UseMethod("unitname<-")
+}
+
+"unitname<-.owin" <- function(x, value) {
+  x$units <- as.units(value)
+  return(x)
+}
+
+"unitname<-.ppp" <- function(x, value) {
+  w <- x$window
+  unitname(w) <- value
+  x$window <- w
+  return(x)
+}
+
+"unitname<-.im" <- function(x, value) {
+  x$units <- as.units(value)
+  return(x)
+}
+
+"unitname<-.default" <- function(x, value) {
+  if(is.null(x)) return(x)
+  attr(x, "units") <- as.units(value)
+  return(x)
+}
+
+
+###  class 'units'
+
+makeunits <- function(sing="unit", plur="units", mul = 1) {
+  if(!is.character(sing))
+    stop("In unit name, first entry should be a character string")
+  if(!is.character(plur))
+    stop("In unit name, second entry should be a character string")
+  if(!is.numeric(mul)) {
+    mul <- try(as.numeric(mul), silent=TRUE)
+    if(inherits(mul, "try-error"))
+      stop("In unit name, third entry should be a number")
+  }
+  if(length(mul) != 1 || mul <= 0)
+    stop("In unit name, third entry should be a single positive number")
+  u <- list(singular=sing, plural=plur, multiplier=mul)
+  if(mul != 1 && (sing=="unit" || plur=="units"))
+    stop(paste("A multiplier is not allowed",
+               "if the unit does not have a specific name"))
+  class(u) <- "units"
+  return(u)
+}
+  
+as.units <- function(s) {
+  if(inherits(s, "units")) return(s)
+  s <- as.list(s)
+  n <- length(s)
+  if(n > 3)
+    stop(paste("Unit name should be a character string,",
+               "or a vector/list of 2 character strings,",
+               "or a list(character, character, numeric)"))
+  
+  out <- switch(n+1,
+                makeunits(),
+                makeunits(s[[1]], s[[1]]),
+                makeunits(s[[1]], s[[2]]),
+                makeunits(s[[1]], s[[2]], s[[3]]))
+  return(out)
+}
+
+print.units <- function(x, ...) {
+  mul <- x$multiplier
+  if(mul == 1)
+    cat(paste(x$singular, "/", x$plural, "\n"))
+  else 
+    cat(paste(mul, x$plural, "\n"))
+  return(invisible(NULL))
+}
+
+as.character.units <- function(x, ...) {
+  mul <- x$multiplier
+  return(if(mul == 1) x$plural else paste(mul, x$plural))
+}
+
+summary.units <- function(object, ...) {
+  x <- object
+  scaled <- (x$multiplier != 1)
+  named  <- (x$singular != "unit")
+  vanilla <- !named && !scaled
+  out <-
+    if(vanilla) {
+      list(legend = NULL,
+           axis   = NULL, 
+           explain = NULL,
+           singular = "unit",
+           plural   = "units")
+    } else if(named & !scaled) {
+      list(legend = paste("Unit of length: 1", x$singular),
+           axis   = paren(x$plural, type=spatstat.options('units.paren')),
+           explain = NULL,
+           singular = x$singular,
+           plural   = x$plural)
+    } else {
+      expanded <- paste(x$multiplier, x$plural)
+      expla <- paren(paste("one unit =", expanded),
+                     type=spatstat.options('units.paren'))
+      list(legend = paste("Unit of length:", expanded),
+           axis   = expla, 
+           explain  = expla,
+           singular = "unit",
+           plural   = "units")
+    }
+  out <- append(out, list(scaled  = scaled,
+                          named   = named,
+                          vanilla = vanilla))
+  class(out) <- "summary.units"
+  return(out)
+}
+
+print.summary.units <- function(x, ...) {
+  if(x$vanilla)
+    cat("Unit of length (unnamed)\n")
+  else
+    cat(paste(x$legend, "\n"))
+  invisible(NULL)
+}
+
+compatible.units <- function(A, B, ..., coerce=TRUE) {
+  stopifnot(inherits(A, "units"))
+  if(missing(B)) return(TRUE)
+  stopifnot(inherits(B, "units"))
+  # check for null units
+  Anull <- summary(A)$vanilla
+  Bnull <- summary(B)$vanilla
+  # `coerce' determines whether `vanilla' units are compatible with other units
+  coerce <- as.logical(coerce)
+  # 
+  agree <- if(!Anull && !Bnull) identical(all.equal(A,B), TRUE) else
+           if(Anull && Bnull) TRUE else coerce 
+  #
+  if(!agree) return(FALSE)
+  # A and B agree
+  if(length(list(...)) == 0) return(TRUE)
+  # recursion
+  return(compatible.units(B, ...))
+}
+
+# class 'numberwithunit':  numeric value(s) with unit of length
+
+numberwithunit <- function(x, u) {
+  u <- as.units(u)
+  x <- as.numeric(x)
+  unitname(x) <- u
+  class(x) <- c(class(x), "numberwithunit")
+  return(x)
+}
+
+"%unit%" <- function(x, u) {
+  numberwithunit(x, u)
+}
+
+format.numberwithunit <- function(x, ..., collapse=" x ", modifier=NULL) {
+  u <- summary(unitname(x))
+  uname <- if(all(x == 1)) u$singular else u$plural
+  y <- format(as.numeric(x), ...)
+  z <- pasteN(paste(y, collapse=collapse), 
+              modifier, uname, u$explain)
+  return(z)
+}
+
+as.character.numberwithunit <- function(x, ...) {
+  return(format(x))
+}
+
+print.numberwithunit <- function(x, ...) {
+  cat(format(x, ...), fill=TRUE)
+  return(invisible(NULL))
+}
+
+
+    
diff --git a/R/unnormdensity.R b/R/unnormdensity.R
new file mode 100644
index 0000000..ff88183
--- /dev/null
+++ b/R/unnormdensity.R
@@ -0,0 +1,67 @@
+#
+#  unnormdensity.R
+#
+#  $Revision: 1.5 $  $Date: 2016/02/11 10:17:12 $
+#
+
+unnormdensity <- function(x, ..., weights=NULL) {
+  if(any(!nzchar(names(list(...)))))
+    stop("All arguments must be named (tag=value)")
+  if(is.null(weights)) {
+    out <- do.call.matched(density.default, c(list(x=x), list(...)))
+    out$y <- length(x) * out$y
+  } else if(all(weights == 0)) {
+    # result is zero
+    out <- do.call.matched(density.default, c(list(x=x), list(...)))
+    out$y <- 0 * out$y
+  } else if(all(weights >= 0)) {
+    # all masses are nonnegative
+    w <- weights
+    totmass <- sum(w)
+    out <- do.call.matched(density.default,
+                           c(list(x=x),
+                             list(...),
+                             list(weights=w/totmass)))
+    out$y <- out$y * totmass
+  } else if(all(weights <= 0)) {
+    # all masses are nonpositive
+    w <- (- weights)
+    totmass <- sum(w)
+    out <- do.call.matched(density.default,
+                           c(list(x=x),
+                             list(...),
+                             list(weights=w/totmass)))
+    out$y <- out$y * (- totmass)
+  } else {
+    # mixture of positive and negative masses
+    w <- weights
+    wabs <- abs(w)
+    wpos <- pmax.int(0, w)
+    wneg <- - pmin.int(0, w)
+    # determine bandwidth using absolute masses
+    dabs <- do.call.matched(density.default,
+                            c(list(x=x),
+                              list(...),
+                              list(weights=wabs/sum(wabs))))
+    bw <- dabs$bw
+    # compute densities for positive and negative masses separately
+    outpos <- do.call.matched(density.default,
+                      resolve.defaults(list(x=x),
+                                       list(bw=bw, adjust=1),
+                                       list(weights=wpos/sum(wpos)),
+                                       list(...),
+                                       .StripNull=TRUE))
+    outneg <- do.call.matched(density.default,
+                      resolve.defaults(list(x=x),
+                                       list(bw=bw, adjust=1),
+                                       list(weights=wneg/sum(wneg)),
+                                       list(...),
+                                       .StripNull=TRUE))
+    # combine
+    out <- outpos
+    out$y <- sum(wpos) * outpos$y - sum(wneg) * outneg$y
+  }
+  out$call <- match.call()
+  return(out)
+}
+
diff --git a/R/unstack.R b/R/unstack.R
new file mode 100644
index 0000000..227e82a
--- /dev/null
+++ b/R/unstack.R
@@ -0,0 +1,85 @@
+#'
+#'   unstack.R
+#'
+#'   Methods for generic 'unstack'
+#' 
+#'   $Revision: 1.3 $  $Date: 2016/06/28 04:01:40 $
+
+unstack.ppp <- unstack.psp <- unstack.lpp <- function(x, ...) {
+  trap.extra.arguments(...)
+  marx <- marks(x)
+  d <- dim(marx)
+  if(is.null(d)) return(solist(x))
+  y <- rep(list(unmark(x)), d[2])
+  for(j in seq_along(y))
+    marks(y[[j]]) <- marx[,j,drop=FALSE]
+  names(y) <- colnames(marx)
+  return(as.solist(y))
+}
+
+
+unstack.msr <- function(x, ...) {
+  trap.extra.arguments(...)
+  d <- dim(x)
+  if(is.null(d)) return(solist(x))
+  smo <- attr(x, "smoothdensity")
+  if(!inherits(smo, "imlist")) smo <- NULL
+  nc <- d[2]
+  y <- vector(mode="list", length=nc)
+  for(j in seq_len(nc)) {
+    xj <- x[,j,drop=FALSE]
+    if(!is.null(smo)) attr(xj, "smoothdensity") <- smo[[j]]
+    y[[j]] <- xj
+  }
+  names(y) <- colnames(x)
+  return(as.solist(y))
+}
+
+unstackFilter <- function(x) {
+  ## deal with a whole swag of classes that do not need to be unstacked
+  nonvectorclasses <- c("im", "owin", "quad", "tess", 
+                        "quadratcount", "quadrattest", 
+                        "funxy", "distfun", "nnfun", 
+                        "linnet", "linfun",
+                        "influence.ppm", "leverage.ppm")
+  y <- if(inherits(x, nonvectorclasses)) solist(x) else unstack(x)
+  return(y)
+}
+
+unstack.solist <- function(x, ...) {
+  trap.extra.arguments(...)
+  as.solist(lapply(x, unstackFilter))
+}
+
+unstack.layered <- function(x, ...) {
+  trap.extra.arguments(...)
+  y <- lapply(x, unstackFilter)
+  ny <- lengths(y)
+  nx <- length(ny)
+  if(all(ny == 1) || nx == 0) return(solist(x))
+  pa <- layerplotargs(x)
+  mm <- indexCartesian(ny)
+  nz <- nrow(mm)
+  z <- vector(mode="list", length=nz)
+  nama <- lapply(y, names)
+  for(i in seq_len(nz)) {
+    ll <- mapply("[[", x=y, i=mm[i,], SIMPLIFY=FALSE)
+    nam <- mapply("[", x=nama, i=mm[i,])
+    nam <- nam[!sapply(nam, is.null)]
+    names(z)[i] <- paste(nam, collapse=".")
+    z[[i]] <- layered(LayerList=ll, plotargs=pa)
+  }
+  z <- as.solist(z)
+  return(z)
+}
+
+
+  
+
+
+
+
+  
+
+
+  
diff --git a/R/update.ppm.R b/R/update.ppm.R
new file mode 100755
index 0000000..e130a4b
--- /dev/null
+++ b/R/update.ppm.R
@@ -0,0 +1,375 @@
+#
+#  update.ppm.R
+#
+#
+#  $Revision: 1.60 $    $Date: 2016/03/08 05:43:46 $
+#
+#
+#
+
+update.ppm <- local({
+
+  ## update point pattern dataset using either data or formula
+  newpattern <- function(oldpattern, lhs, callframe, envir) {
+    eval(eval(substitute(substitute(l, list("."=Q)),
+                         list(l=lhs,
+                              Q=oldpattern)),
+              envir=as.list(envir), enclos=callframe),
+         envir=as.list(envir), enclos=callframe)
+  }
+  
+  update.ppm <- function(object, ...,
+                         fixdummy=TRUE, use.internal=NULL,
+                         envir=environment(terms(object))) {
+    verifyclass(object, "ppm")
+    new.callstring <- short.deparse(sys.call())
+    aargh <- list(...)
+
+    if(inherits(object, "ippm")) {
+      call <- object$dispatched$call
+      callframe <- object$dispatched$callframe
+    } else {
+      call <- getCall(object)
+      if(!is.call(call))
+        stop(paste("Internal error - getCall(object) is not of class",
+                   sQuote("call")))
+      callframe <- object$callframe
+    }
+    
+    callfun <- as.character(call[[1]])
+    newstyle <- (callfun == "ppm.formula")
+    oldstyle <- !newstyle
+
+
+    ## Special cases 
+    ## (1) no new information given
+    if(length(aargh) == 0 && !identical(use.internal, TRUE)) {
+      result <- eval(call, as.list(envir), enclos=callframe)
+      result$callframe <- callframe
+      return(result)
+    }
+
+    ## (2) model can be updated using existing covariate data frame
+    if(!identical(use.internal, FALSE) &&
+       ## single argument which is a formula
+       (length(aargh) == 1) &&
+       inherits(fmla <- aargh[[1]], "formula") &&
+       is.null(lhs.of.formula(fmla)) &&
+       ## not a ppm.formula call
+       oldstyle &&
+       ## fitted by mpl using glm/gam
+       with(object,
+            method == "mpl" &&
+            !is.null(fitter) &&
+            fitter %in% c("gam", "glm"))) {
+      ## This is a dangerous hack! 
+      glmdata <- object$internal$glmdata
+      ## check whether data for new variables are available
+      ## (this doesn't work with things like 'pi')
+      vars.available <- c(colnames(glmdata), names(object$covfunargs))
+      if(all(variablesinformula(fmla) %in% c(".", vars.available))) {
+        ## we can update using internal data
+        FIT <- object$internal$glmfit
+        orig.env <- environment(FIT$terms)
+        ## update formulae using "." rules
+        trend <- newformula(object$trend, fmla, callframe, envir)
+        fmla  <- newformula(formula(FIT), fmla, callframe, envir)
+        ## expand polynom() in formula
+        if(spatstat.options("expand.polynom")) {
+          fmla <- expand.polynom(fmla)
+          trend <- expand.polynom(trend)
+        }
+        ## update GLM/GAM fit 
+        upd.glm.call <- update(FIT, fmla, evaluate=FALSE)
+        FIT <- eval(upd.glm.call, envir=orig.env)
+        environment(FIT$terms) <- orig.env
+        object$internal$glmfit <- FIT
+        ## update entries of object
+        object$trend <- trend
+        object$terms <- terms(fmla)
+        object$coef <- co <- FIT$coef
+        object$callstring <- new.callstring
+        object$internal$fmla <- fmla
+        ##
+        if(is.finite(object$maxlogpl)) {
+          ## Update maxlogpl provided it is finite
+          ## (If the likelihood is infinite, this is due to the interaction;
+          ## if we update the trend, the likelihood will remain infinite.)
+          W <- glmdata$.mpl.W
+          SUBSET <- glmdata$.mpl.SUBSET        
+          Z <- is.data(object$Q)
+          object$maxlogpl <- -(deviance(FIT)/2 +
+                               sum(log(W[Z & SUBSET])) + sum(Z & SUBSET))
+        }
+        ## update the model call
+        upd.call <- call
+        upd.call$trend <- trend
+        object$call <- upd.call
+        ## update fitted interaction (depends on coefficients, if not Poisson)
+        if(!is.null(inter <- object$interaction) && !is.poisson(inter)) 
+          object$fitin <-
+            fii(inter, co, object$internal$Vnames, object$internal$IsOffset)
+        ##
+        if(is.stationary(object) && !is.marked(object)) {
+          ## uniform Poisson
+          if(eval(call$rename.intercept) %orifnull% TRUE) {
+             names(object$coef) <- "log(lambda)"
+          }
+        }
+        return(object)
+      }
+    }
+
+    ## (3) Need to use internal data   
+    if(oldstyle) {
+      ## decide whether to use internal data
+      undecided <- is.null(use.internal) || !is.logical(use.internal)
+      force.int   <- !undecided && use.internal
+      force.ext   <- !undecided && !use.internal
+      if(!force.int) {
+        ## check for validity of format
+        badformat <- damaged.ppm(object)
+      }
+      if(undecided) {
+        use.internal <- badformat
+        if(badformat)
+          message("object format corrupted; repairing it")
+      } else if(force.ext && badformat)
+        warning("object format corrupted; try update(object, use.internal=TRUE)")
+      if(use.internal) {
+        ## reset the main arguments in the call using the internal data
+        call$Q <- quad.ppm(object)
+        namobj <- names(call)
+        if("trend" %in% namobj)
+          call$trend <- newformula(call$trend, object$trend, callframe, envir)
+        if("interaction" %in% namobj) call$interaction <- object$interaction
+        if("covariates" %in% namobj) call$covariates <- object$covariates
+      }
+    }
+
+    ## General case.
+    X.is.new <- FALSE
+    
+    ## First split named and unnamed arguments
+    nama <- names(aargh)
+    named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else nzchar(nama)
+    namedargs <- aargh[named]
+    unnamedargs <- aargh[!named]
+    nama <- names(namedargs)
+
+    ## Find the argument 'Q' by name or implicitly by class
+    ##   (including detection of conflicts)
+    argQ <- NULL
+    if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) {
+      argQ <- unnamedargs[[n]]
+      unnamedargs <- unnamedargs[-n]
+    }
+    if("Q" %in% nama) {
+      argQ <- namedargs$Q
+      nama <- setdiff(nama, "Q")
+      namedargs <- namedargs[nama]
+    }
+    ## Deal with argument 'Q' which has several possible forms
+    if(!is.null(argQ)) {
+      X.is.new <- TRUE
+      if(inherits(argQ, "formula")) {
+        ## Q = X ~ trend
+        if(newstyle) {
+          ## update the formula
+          call$Q <- newformula(call$Q, argQ, callframe, envir)
+        } else {
+          ## split into Q = X and trend = ~trend
+          if(!is.null(lhs <- lhs.of.formula(argQ)))
+            call$Q <- newpattern(call$Q, lhs, callframe, envir)
+          call$trend <- newformula(call$trend,
+                                   rhs.of.formula(eval(argQ)),
+                                   callframe, envir)
+        }
+      } else {
+        ## Q = X
+        if(newstyle) {
+          ## convert old call to old style
+          fo <- as.formula(call$Q)
+          Yexpr <- lhs.of.formula(fo)
+          trend <- rhs.of.formula(fo)
+          newcall <- call("ppm", Q=Yexpr, trend=trend)
+          if(length(call) > 2) {
+            whichQ <- which(names(call) == "Q")
+            morecall <- call[-c(1, whichQ)]
+            if((mc <- length(morecall)) > 0) {
+              newcall[3 + 1:mc] <- morecall
+              names(newcall)[3 + 1:mc] <- names(call)[-c(1, whichQ)]
+            }
+          }
+          call <- newcall
+          newstyle <- FALSE
+          oldstyle <- TRUE
+        }
+        ## Now update the dataset
+        call$Q <- argQ
+      }
+    }
+
+    ## Find any formula arguments 
+    ##   (including detection of conflicts)
+    argfmla <- NULL
+    if(n <- sp.foundclass("formula", unnamedargs, "trend", nama)) {
+      argfmla <- unnamedargs[[n]]
+      unnamedargs <- unnamedargs[-n]
+    } else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) {
+      ## string that might be interpreted as a formula
+      strg <- unnamedargs[[n]]
+      if(!is.na(charmatch("~", strg))) {
+        argfmla <- as.formula(strg)
+        unnamedargs <- unnamedargs[-n]
+      }
+    }
+    if("trend" %in% nama) {
+      argfmla <- namedargs$trend
+      nama <- setdiff(nama, "trend")
+      namedargs <- namedargs[nama]
+    }
+    ## Handle new formula
+    if(!is.null(argfmla)) {
+      lhs <- lhs.of.formula(argfmla)
+      if(newstyle) {
+        ## ppm.formula: update the formula
+        if(is.null(lhs)) {
+          argfmla <- as.formula(paste(".", deparse(argfmla)))
+        } else X.is.new <- TRUE
+        call$Q <- newformula(call$Q, argfmla, callframe, envir)
+      } else {
+        ## ppm.ppp: update the trend and possibly the data
+        if(is.null(lhs)) {
+          ## assign new trend
+          call$trend <- newformula(call$trend, argfmla, callframe, envir)
+        } else {
+          ## split into Q = X and trend = ~trend
+          X.is.new <- TRUE
+          call$Q <- newpattern(call$Q, lhs, callframe, envir)
+          call$trend <- newformula(call$trend,
+                                   rhs.of.formula(argfmla),
+                                   callframe, envir)
+        }
+      } 
+    }
+    
+    if(length(namedargs) > 0) {
+      ## any other named arguments that were also present in the original call
+      ## override their original values.
+      existing <- !is.na(match(nama, names(call)))
+      for (a in nama[existing]) call[[a]] <- aargh[[a]]
+
+      ## add any named arguments not present in the original call
+      if (any(!existing)) {
+        call <- c(as.list(call), namedargs[!existing])
+        call <- as.call(call)
+      }
+    }
+    if(length(unnamedargs) > 0) {
+      ## some further objects identified by their class
+      if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) {
+        call$interaction <- unnamedargs[[n]]
+        unnamedargs <- unnamedargs[-n]
+      }
+      if(n <- sp.foundclasses(c("data.frame", "im"),
+                              unnamedargs, "covariates", nama)) {
+        call$covariates <- unnamedargs[[n]]
+        unnamedargs <- unnamedargs[-n]
+      }
+    }
+  
+    ## *************************************************************
+    ## ****** Special action when Q is a point pattern *************
+    ## *************************************************************
+    if(X.is.new && fixdummy && oldstyle &&
+       inherits((X <- eval(call$Q, as.list(envir), enclos=callframe)), "ppp")) {
+      ## Instead of allowing default.dummy(X) to occur,
+      ## explicitly create a quadrature scheme from X,
+      ## using the same dummy points and weight parameters
+      ## as were used in the fitted model 
+      Qold <- quad.ppm(object)
+      if(is.marked(Qold)) {
+        dpar <- Qold$param$dummy
+        wpar <- Qold$param$weight
+        Qnew <- do.call(quadscheme, append(list(X), append(dpar, wpar)))
+      } else {
+        Dum <- Qold$dummy
+        wpar <- Qold$param$weight
+        Qnew <- do.call(quadscheme, append(list(X, Dum), wpar))
+      }
+      ## replace X by new Q
+      call$Q <- Qnew
+    }
+
+    ## finally call ppm
+    call[[1]] <- as.name('ppm')
+    return(eval(call, as.list(envir), enclos=callframe))
+  }
+
+  update.ppm
+})
+
+sp.foundclass <- function(cname, inlist, formalname, argsgiven) {
+  ok <- unlist(lapply(inlist, inherits, what=cname))
+  nok <- sum(ok)
+  if(nok > 1)
+    stop(paste("I am confused: there are two unnamed arguments",
+               "of class", sQuote(cname)))
+  if(nok == 0) return(0)
+  absent <- !(formalname %in% argsgiven)
+  if(!absent)
+    stop(paste("I am confused: there is an unnamed argument",
+               "of class", sQuote(cname), "which conflicts with the",
+               "named argument", sQuote(formalname)))
+  theposition <- seq_along(ok)[ok]
+  return(theposition)
+}
+
+sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) {
+  ncn <- length(cnames)
+  pozzie <- logical(ncn)
+  for(i in seq_len(ncn))
+    pozzie[i] <- sp.foundclass(cnames[i],  inlist, formalname, argsgiven)
+  found <- (pozzie > 0)
+  nfound <- sum(found)
+  if(nfound == 0)
+    return(0)
+  else if(nfound == 1)
+    return(pozzie[found])
+  else
+    stop(paste("I am confused: there are", nfound,
+               "unnamed arguments of different classes (",
+               paste(sQuote(cnames(pozzie[found])), collapse=", "),
+               ") which could be interpreted as",
+               sQuote(formalname)))
+}
+    
+
+damaged.ppm <- function(object) {
+  ## guess whether the object format has been damaged
+  ## e.g. by dump/restore
+  gf <- getglmfit(object)
+  badfit <- !is.null(gf) && !inherits(gf$terms, "terms")
+  if(badfit)
+    return(TRUE)
+  ## escape clause for fake models
+  if(identical(object$fake, TRUE))
+    return(FALSE)
+  ## otherwise it was made by ppm 
+  Qcall <- object$call$Q
+  cf <- object$callframe
+  if(is.null(cf)) {
+    ## Old format of ppm objects
+    if(is.name(Qcall) && !exists(paste(Qcall)))
+      return(TRUE)
+    Q <- eval(Qcall)
+  } else {
+    ## New format of ppm objects
+    if(is.name(Qcall) && !exists(paste(Qcall), cf))
+      return(TRUE)
+    Q <- eval(Qcall, cf)
+  }
+  badQ <- is.null(Q) || !(inherits(Q, c("ppp", "quad", "formula")))
+  return(badQ)
+}
diff --git a/R/util.R b/R/util.R
new file mode 100755
index 0000000..e81d901
--- /dev/null
+++ b/R/util.R
@@ -0,0 +1,375 @@
+#
+#    util.R    miscellaneous utilities
+#
+#    $Revision: 1.237 $    $Date: 2017/06/05 10:31:58 $
+#
+
+# common invocation of matrixsample
+
+rastersample <- function(X, Y) {
+  stopifnot(is.im(X) || is.mask(X))
+  stopifnot(is.im(Y) || is.mask(Y))
+  phase <- c((Y$yrow[1] - X$yrow[1])/X$ystep,
+             (Y$xcol[1] - X$xcol[1])/X$xstep)
+  scale <- c(Y$ystep/X$ystep,
+             Y$xstep/X$xstep)
+  if(is.im(X)) {
+    # resample an image
+    if(!is.im(Y))
+      Y <- as.im(Y)
+    Xtype <- X$type
+    Xv    <- X$v
+    # handle factor-valued image as integer
+    if(Xtype == "factor") 
+      Xv <- array(as.integer(Xv), dim=X$dim)
+    # resample
+    naval <- switch(Xtype,
+                 factor=,
+                 integer= NA_integer_, 
+                 logical = as.logical(NA_integer_), 
+                 real = NA_real_, 
+                 complex = NA_complex_, 
+                 character = NA_character_,
+                 NA)
+    Y$v <- matrixsample(Xv, Y$dim, phase=phase, scale=scale, na.value=naval)
+    # inherit pixel data type from X
+    Y$type <- Xtype
+    if(Xtype == "factor") {
+      Y$v <- factor(Y$v, labels=levels(X))
+      dim(Y$v) <- Y$dim
+    }
+  } else {
+    # resample a mask
+    if(!is.mask(Y)) Y <- as.mask(Y)
+    Y$m <- matrixsample(X$m, Y$dim, phase=phase, scale=scale, na.value=FALSE)
+  }
+  return(Y)
+}
+
+pointgrid <- function(W, ngrid) {
+  W <- as.owin(W)
+  masque <- as.mask(W, dimyx=ngrid)
+  rxy <- rasterxy.mask(masque, drop=TRUE)
+  xx <- rxy$x
+  yy <- rxy$y
+  return(ppp(xx, yy, W))
+}
+
+onecolumn <- function(m) {
+  switch(markformat(m),
+         none=stop("No marks provided"),
+         vector=m,
+         dataframe=m[,1, drop=TRUE],
+         NA)
+}
+
+
+checkbigmatrix <- function(n, m, fatal=FALSE, silent=FALSE) {
+  if(n * m <= spatstat.options("maxmatrix"))
+    return(TRUE)
+  whinge <- paste("Attempted to create binary mask with",
+                  n, "*", m, "=", n * m, "entries")
+  if(fatal) stop(whinge, call.=FALSE)
+  if(!silent) warning(whinge, call.=FALSE)
+  return(FALSE)
+}
+
+
+## ........... progress reports .....................
+
+progressreport <- local({
+
+  Put <- function(name, value, state) {
+    if(is.null(state)) {
+      putSpatstatVariable(paste0("Spatstat.", name), value)
+    } else {
+      state[[name]] <- value
+    }
+    return(state)
+  }
+  Get <- function(name, state) {
+    if(is.null(state)) {
+      value <- getSpatstatVariable(paste0("Spatstat.", name))
+    } else {
+      value <- state[[name]] 
+    }
+    return(value)
+  }
+
+  IterationsPerLine <- function(charsperline, n, every, tick,
+                                showtime, showevery) {
+    # Calculate number of iterations that triggers a newline.
+    # A dot is printed every 'tick' iterations
+    # Iteration number is printed every 'every' iterations.
+    # If showtime=TRUE, the time is shown every 'showevery' iterations
+    # where showevery \in {1, every, n}.
+    chars.report <- max(1, ceiling(log10(n)))
+    if(showtime) {
+      chars.time <- nchar(' [etd 12:00:00] ')
+      timesperreport <- if(showevery == 1) every else
+                        if(showevery == every) 1 else 0
+      chars.report <- chars.report + timesperreport * chars.time
+    }
+    chars.ticks <- floor((every-1)/tick)
+    chars.block <- chars.report + chars.ticks
+    nblocks <- max(1, floor(charsperline/chars.block))
+    nperline <- nblocks * every
+    leftover <- charsperline - nblocks * chars.block
+    if(leftover > 0)
+      nperline <- nperline + min(leftover * tick, showevery - 1)
+    return(nperline)
+  }
+  
+  progressreport <- function(i, n,
+                             every=min(100,max(1, ceiling(n/100))),
+                             tick=1,
+                             nperline=NULL,
+                             charsperline=getOption("width"),
+                             style=spatstat.options("progress"),
+                             showtime=NULL,
+                             state=NULL) {
+    missevery <- missing(every)
+    nperline.fixed <- !is.null(nperline)
+    showtime.optional <- is.null(showtime)
+    if(showtime.optional) showtime <- FALSE # initialise only
+    if(i > n) {
+      warning(paste("progressreport called with i =", i, "> n =", n))
+      return(invisible(NULL))
+    }
+    if(style == "tk" && !requireNamespace("tcltk")) {
+      warning("tcltk is unavailable; switching to style='txtbar'", call.=FALSE)
+      style <- "txtbar"
+    }
+    if(is.null(state) && style != "tty")
+      stop(paste("Argument 'state' is required when style =",sQuote(style)),
+           call.=FALSE)
+    switch(style,
+           txtbar={
+             if(i == 1) {
+               ## initialise text bar
+               state <- Put("ProgressBar",
+                            txtProgressBar(1, n, 1, style=3),
+                            state)
+             } else {
+               ## get text bar
+               pbar <- Get("ProgressBar", state)
+               ## update 
+               setTxtProgressBar(pbar, i)
+               if(i == n) {
+                 close(pbar)
+                 state <- Put("ProgressBar", NULL, state)
+               } 
+             }
+           },
+           tk={
+             requireNamespace("tcltk")
+             if(i == 1) {
+               ## initialise text bar
+               state <- Put("ProgressBar",
+                            tcltk::tkProgressBar(title="progress",
+                                                 min=0, max=n, width=300),
+                            state)
+             } else {
+               ## get text bar
+               pbar <- Get("ProgressBar", state)
+               ## update 
+               tcltk::setTkProgressBar(pbar, i,
+                                       label=paste0(round(100 * i/n), "%"))
+               if(i == n) {
+                 close(pbar)
+                 state <- Put("ProgressBar", NULL, state)
+               } 
+             }
+           },
+           tty={
+             now <- proc.time()
+             if(i == 1 || is.null(state)) {
+               ## Initialise stuff
+               if(missevery && every > 1 && n > 10) 
+                 every <- niceround(every)
+               showevery <- if(showtime) every else n
+               if(!nperline.fixed) 
+                 nperline <- IterationsPerLine(charsperline, n, every, tick,
+                                               showtime, showevery)
+               state <- Put("ProgressData",
+                            list(every=every,
+                                 tick=tick,
+                                 nperline=nperline,
+                                 starttime=now,
+                                 showtime=showtime,
+                                 showevery=showevery,
+                                 nperline.fixed=nperline.fixed,
+                                 showtime.optional=showtime.optional),
+                            state)
+             } else {
+               pd <- Get("ProgressData", state)
+               if(is.null(pd))
+                 stop(paste("progressreport called with i =", i,
+                            "before i = 1"))
+               every     <- pd$every
+               tick      <- pd$tick
+               nperline  <- pd$nperline
+               showtime  <- pd$showtime
+               showevery <- pd$showevery
+               showtime.optional <- pd$showtime.optional
+               nperline.fixed    <- pd$nperline.fixed
+               if(i < n) {
+                 if(showtime || showtime.optional) {
+                   ## estimate time remaining
+                   starttime <- pd$starttime
+                   elapsed <- now - starttime
+                   elapsed <- unname(elapsed[3])
+                   rate <- elapsed/(i-1)
+                   remaining <- rate * (n-i)
+                   if(!showtime) {
+                     ## show time remaining if..
+                     if(rate > 20) {
+                       ## .. rate is very slow
+                       showtime <- TRUE
+                       showevery <- 1
+                     } else if(remaining > 180) {
+                       ## ... more than 3 minutes remaining
+                       showtime <- TRUE
+                       showevery <- every
+                       aminute <- ceiling(60/rate)
+                       if(aminute < showevery) 
+                         showevery <- min(niceround(aminute), showevery)
+                     }
+                     # update number of iterations per line
+                     if(showtime && !nperline.fixed) 
+                       nperline <- IterationsPerLine(charsperline,
+                                                     n, every, tick,
+                                                     showtime, showevery)
+                   }
+                 }
+                 state <- Put("ProgressData",
+                              list(every=every,
+                                   tick=tick,
+                                   nperline=nperline,
+                                   starttime=starttime,
+                                   showtime=showtime,
+                                   showevery=showevery,
+                                   nperline.fixed=nperline.fixed,
+                                   showtime.optional=showtime.optional),
+                              state)
+               }
+             }
+             if(i == n) 
+               cat(paste(" ", n, ".\n", sep=""))
+             else if(every == 1 || i <= 3)
+               cat(paste(i, ",", if(i %% nperline == 0) "\n" else " ", sep=""))
+             else {
+               if(i %% every == 0) 
+                 cat(i)
+               else if(i %% tick == 0)
+                 cat(".")
+               if(i %% nperline == 0)
+                 cat("\n")
+             }
+             if(i < n && showtime && (i %% showevery == 0)) {
+               st <- paste("etd", codetime(round(remaining)))
+               st <- paren(st, "[")
+               cat(paste("", st, ""))
+             }
+             flush.console()
+           },
+           stop(paste("Unrecognised option for style:", dQuote(style)))
+           )
+    return(invisible(state))
+  }
+
+  progressreport
+})
+  
+
+
+multiply.only.finite.entries <- function(x, a) {
+  # In ppm a potential value that is -Inf must remain -Inf
+  # and a potential value that is 0 multiplied by NA remains 0
+  y <- x
+  ok <- is.finite(x) & (x != 0)
+  y[ok] <- a * x[ok]
+  return(y)
+}
+
+
+ 
+## print names and version numbers of libraries loaded
+
+sessionLibs <- function() {
+  a <- sessionInfo()
+  b <- unlist(lapply(a$otherPkgs, getElement, name="Version"))
+  g <- rbind(names(b), unname(b))
+  d <- apply(g, 2, paste, collapse=" ")
+  if(length(d) > 0) {
+    cat("Libraries loaded:\n")
+    for(di in d) cat(paste("\t", di, "\n"))
+  } else cat("Libraries loaded: None\n")
+  return(invisible(d))
+}
+
+
+
+# ..................
+
+prepareTitle <- function(main) {
+  ## Count the number of lines in a main title
+  ## Convert title to a form usable by plot.owin
+  if(is.expression(main)) {
+    nlines <- 1
+  } else {
+    main <- paste(main)
+    ## break at newline 
+    main <- unlist(strsplit(main, "\n"))
+    nlines <- if(sum(nchar(main)) == 0) 0 else length(main)
+  }
+  return(list(main=main,
+              nlines=nlines,
+              blank=rep('  ', nlines)))
+}
+
+requireversion <- function(pkg, ver) {
+  pkgname <- deparse(substitute(pkg))
+  v <- read.dcf(file=system.file("DESCRIPTION", package=pkgname), 
+                fields="Version")
+  if(package_version(v) < ver)
+    stop(paste("Package",
+               sQuote(pkgname),
+               "is out of date: version >=",
+               ver,
+               "is needed"),
+         call.=FALSE)
+  invisible(NULL)
+}
+
+spatstatDiagnostic <- function(msg) {
+  cat("-----------------------------\n")
+  cat(paste(" >>> Spatstat Diagnostic: ", msg, "<<<\n"))
+  cat("-----------------------------\n")
+  invisible(NULL)
+}
+
+allElementsIdentical <- function(x, entry=NULL) {
+  if(length(x) <= 1) return(TRUE)
+  if(!is.null(entry)) {
+    x1 <- x[[1]]
+    for(i in 2:length(x))
+      if(!identical(x[[i]], x1)) return(FALSE)
+  } else {
+    e1 <- x[[1]][[entry]]
+    for(i in 2:length(x))
+      if(!identical(x[[i]][[entry]], e1)) return(FALSE)
+  }
+  return(TRUE)
+}
+
+representativeRows <- function(x) {
+  # select a unique representative of each equivalence class of rows,
+  # in a numeric matrix or data frame of numeric values.
+  ord <- do.call(order, as.list(as.data.frame(x)))
+  y <- x[ord, , drop=FALSE]
+  dy <- apply(y, 2, diff)
+  answer <- logical(nrow(y))
+  answer[ord] <- c(TRUE, !matrowall(dy == 0))
+  return(answer)
+}
\ No newline at end of file
diff --git a/R/varblock.R b/R/varblock.R
new file mode 100755
index 0000000..8ac7483
--- /dev/null
+++ b/R/varblock.R
@@ -0,0 +1,163 @@
+#
+#   varblock.R
+#
+#   Variance estimation using block subdivision
+#
+#   $Revision: 1.20 $  $Date: 2016/12/30 01:44:50 $
+#
+
+varblock <- local({
+
+  getrvalues <- function(z) { with(z, .x) }
+
+  stepsize <- function(z) { mean(diff(z)) } 
+  
+  dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) }
+
+  varblock <- function(X, fun=Kest,
+                       blocks=quadrats(X, nx=nx, ny=ny), ..., 
+                       nx=3, ny=nx,
+                       confidence=0.95) {
+    stopifnot(is.ppp(X))
+    stopifnot(is.tess(blocks))
+    stopifnot(is.function(fun) || is.character(fun))
+    if(is.character(fun)) 
+      fun <- get(fun, mode="function")
+    ## validate confidence level
+    stopifnot(confidence > 0.5 && confidence < 1)
+    alpha <- 1 - confidence
+    probs <- c(alpha/2, 1-alpha/2)
+    ## determine whether 'fun' has an argument called 'domain'
+    canrestrict <- ("domain" %in% names(formals(fun))) ||
+                   samefunction(fun, pcf) ||
+                   samefunction(fun, Lest)
+    ## check there's at least one point in each block
+    Y <- split(X, blocks)
+    nums <- sapply(Y, npoints)
+    blockok <- (nums > 0)
+    if(some.zeroes <- any(!blockok)) 
+      warning("Some tiles contain no data: they are discarded")
+    if(!canrestrict) {
+      ## divide data into disjoint blocks
+      if(some.zeroes)
+        Y <- Y[blockok]
+      n <- length(Y)
+      if(n <= 1) stop("Need at least 2 blocks")
+      ## apply 'fun' to each block
+      if(any(c("r", "breaks") %in% names(list(...)))) {
+        ## r vector specified
+        fX <- fun(X, ...)
+        z <- lapply(Y, fun, ...)
+      } else {
+        ## need to ensure compatible fv objects
+        z <- lapply(Y, fun, ...)
+        rlist <- lapply(z, getrvalues)
+        rmax <- min(sapply(rlist, max))
+        rstep <- min(sapply(rlist, stepsize))
+        r <- seq(0, rmax, by=rstep)
+        z <- lapply(Y, fun, ..., r=r)
+        fX <- fun(X, ..., r=r)
+      }
+    } else {
+      ## use 'domain' argument of 'fun' to compute contributions from each tile
+      B <- tiles(blocks)
+      if(some.zeroes)
+        B <- B[blockok]
+      n <- length(B)
+      if(any(c("r", "breaks") %in% names(list(...)))) {
+        ## r vector specified
+        fX <- fun(X, ...)
+        z <- lapply(B, dofun, ..., fun=fun, Xpp=X)
+      } else {
+        ## need to ensure compatible fv objects
+        z <- lapply(B, dofun, ..., fun=fun, Xpp=X)
+        rlist <- lapply(z, getrvalues)
+        rmax <- min(sapply(rlist, max))
+        rstep <- min(sapply(rlist, stepsize))
+        r <- seq(0, rmax, by=rstep)
+        z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r)
+        fX <- fun(X, ..., r=r)
+      }
+    }
+    ## find columns that are common to all estimates
+    zzz <- reconcile.fv(append(list(fX), z))
+    fX <- zzz[[1]]
+    z <- zzz[-1]
+    ## sample mean
+    m <- meanlistfv(z)
+    ## sample variance
+    sqdev <- lapply(z, sqdev.fv, m=m)
+    v <- meanlistfv(sqdev)
+    v <- eval.fv(v * n/(n-1), dotonly=FALSE)
+    ## sample standard deviation
+    sd <- eval.fv(sqrt(v), dotonly=FALSE)
+    ## upper and lower limits
+    sem <- eval.fv(sd/sqrt(n), dotonly=FALSE)
+    zcrit <- qnorm(probs)
+    lower <- eval.fv(m + zcrit[1] * sem, dotonly=FALSE)
+    upper <- eval.fv(m + zcrit[2] * sem, dotonly=FALSE)
+    ## rebadge
+    fva <- .Spatstat.FvAttrib
+    fva <- fva[fva %in% names(attributes(fX))]
+    attributes(m)[fva] <- attributes(v)[fva] <- attributes(sd)[fva] <- 
+        attributes(upper)[fva] <- attributes(lower)[fva] <- attributes(fX)[fva]
+    m <- prefixfv(m, "mean", "sample mean of", "bold(mean)~")
+    v <- prefixfv(v, "var", "estimated variance of", "bold(var)~")
+    sd <- prefixfv(sd, "sd", "estimated standard deviation of", "bold(sd)~")
+    CItext <- paste(c("lower", "upper"),
+                    paste0(100 * confidence, "%%"),
+                    "CI limit for")
+    lower <- prefixfv(lower, "lo", CItext[1], "bold(lo)~")
+    upper <- prefixfv(upper, "hi", CItext[2], "bold(hi)~")
+    ## tack together 
+    out <- cbind(fX,m,v,sd,upper,lower)
+    ## restrict r domain
+    bad <- matrowall(!is.finite(as.matrix(as.data.frame(out))))
+    rmax <- max(getrvalues(out)[!bad])
+    alim <- c(0, rmax)
+    if(!canrestrict) alim <- intersect.ranges(attr(out, "alim"), alim)
+    attr(out, "alim") <- alim
+    ## sensible default plot formula
+    ybase <- fvnames(fX, ".y")
+    xname <- fvnames(fX, ".x")
+    tname <- intersect("theo", fvnames(fX, "."))
+    fvnames(out, ".y") <- yname <- paste0("mean", ybase)
+    fvnames(out, ".s") <- snames <- paste0(c("lo", "hi"), ybase)
+    fvnames(out, ".") <- c(yname, tname, snames)
+    attr(out, "fmla") <- paste(". ~ ", xname)
+    return(out)
+  }
+  
+  sqdev.fv <- function(x,m){ eval.fv((x-m)^2, dotonly=FALSE) }
+  
+  varblock
+})
+
+
+meanlistfv <- local({
+
+  getYmatrix <- function(x, yn=ynames) { as.matrix(as.data.frame(x)[,yn]) }
+
+  meanlistfv <- function(z, ...) {
+    ## compute sample mean of a list of fv objects
+    if(!is.list(z) || !all(unlist(lapply(z, is.fv))))
+      stop("z should be a list of fv objects")
+    if(!do.call(compatible, unname(z)))
+      stop("Objects are not compatible")
+    result <- template <- z[[1]]
+    ## extract each object's function values as a matrix
+    ynames <- fvnames(template, "*")
+    matlist <- unname(lapply(z, getYmatrix, yn=ynames))
+    ## stack matrices into an array
+    y <- do.call(abind, append(matlist, list(along=3)))
+    ## take mean 
+    ymean <- apply(y, 1:2, mean, ...)
+    result[,ynames] <- ymean
+    return(result)
+  }
+
+  meanlistfv
+})
+
+
+  
diff --git a/R/varcount.R b/R/varcount.R
new file mode 100644
index 0000000..9586aca
--- /dev/null
+++ b/R/varcount.R
@@ -0,0 +1,65 @@
+#'
+#'    varcount.R
+#'
+#'   Variance of N(B)
+#'
+#'  $Revision: 1.8 $  $Date: 2015/11/21 07:02:51 $
+#'
+
+varcount <- function(model, B, ..., dimyx=NULL) {
+  stopifnot(is.owin(B) || is.im(B) || is.function(B))
+  g <- pcfmodel(model)
+  if(!is.function(g))
+    stop("Pair correlation function cannot be computed")
+  if(is.owin(B)) {
+    lambdaB <- predict(model, locations=B, ngrid=dimyx, type="intensity")
+    v <- varcountEngine(g, B, lambdaB)
+  } else {
+    f <- if(is.im(B)) B else as.im(B, W=as.owin(model), ..., dimyx=dimyx)
+    B <- as.owin(f)
+    lambdaB <- predict(model, locations=B, type="intensity")
+    v <- varcountEngine(g, B, lambdaB, f)
+  } 
+  return(v)
+}
+
+varcountEngine <- local({
+
+  varcountEngine <- function(g, B, lambdaB, f=1) {
+    if(missing(f) || identical(f, 1)) {
+      v <- integral(lambdaB) + covterm(g, B, lambdaB)
+    } else if(min(f) >= 0) {
+      ## nonnegative integrand
+      v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * f)
+    } else if(max(f) <= 0) {
+      ## nonpositive integrand
+      v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * (-f))
+    } else {
+      ## integrand has both positive and negative parts
+      lamfplus <- eval.im(lambdaB * pmax(0, f))
+      lamfminus <- eval.im(lambdaB * pmax(0, -f))
+      v <- integral(lambdaB * f^2) +
+        (covterm(g, B, lamfplus) + covterm(g, B, lamfminus)
+         - covterm(g, B, lamfplus, lamfminus)
+         - covterm(g, B, lamfminus, lamfplus))
+    }
+    return(v)
+  }
+
+  covterm <- function(g, B, f, f2) {
+    if(missing(f2)) {
+      # \int_B \int_B (g(u-v) - 1) f(u) f(v) du dv
+      H <- distcdf(B, dW=f)
+      a <- integral(f)^2 * (as.numeric(stieltjes(g, H)) - 1)
+    } else {
+      # \int_B \int_B (g(u-v) - 1) f(u) f2(v) du dv
+      H <- distcdf(B, dW=f, dV=f2)
+      a <- integral(f) * integral(f2) * (as.numeric(stieltjes(g, H)) - 1)
+    }
+    return(a)
+  }
+  
+  varcountEngine
+})
+
+
diff --git a/R/vblogistic.R b/R/vblogistic.R
new file mode 100644
index 0000000..9805a39
--- /dev/null
+++ b/R/vblogistic.R
@@ -0,0 +1,281 @@
+#' Variational Bayesian Logistic regression
+#' 
+#' author: Tuomas Rajala < tuomas.rajala a iki.fi >
+#'
+#' Copyright (C) Tuomas Rajala 2014
+#' GNU Public License GPL 2.0 | 3.0
+#' 
+#' Special version for 'spatstat'
+#'
+#'    $Revision: 1.5 $ $Date: 2015/04/02 02:17:19 $
+#' 
+####################################################
+#' Used inside ppm
+vblogit.fmla <- function(formula, offset, data, subset, weights,
+                            verbose=FALSE, epsilon=0.01, ...) {
+  mf <- match.call(expand.dots = FALSE)
+  m <- match(c("formula", "data", "subset", "offset"), names(mf), 0L)
+  mf <- mf[c(1L, m)]
+  mf$drop.unused.levels <- TRUE
+  mf[[1L]] <- quote(stats::model.frame)
+  mf <- eval(mf, parent.frame())
+  mt <- attr(mf, "terms")
+  offset <- model.offset(mf)
+  y <- model.response(mf, "any")
+  X <- model.matrix(mt, mf) 
+  colnames(X)[1] <- "(Intercept)"
+  Vnames <- colnames(X)
+  #' then we fit:
+  fit <- vblogit(y=y, X=X, offset=offset, verb=verbose, eps=epsilon, ...)
+  #'
+  names(fit$coefficients) <- names(fit$coef) <- Vnames
+  #' add some variables to conform to summary.ppm
+  fit$se <- sqrt(diag(as.matrix(fit$S)))
+  fit$call <- match.call(expand.dots=FALSE)
+  fit$formula <- formula
+  fit$method <- "vblogit"
+  fit$model <- mf
+  fit$terms <- mt
+  fit$offset <- offset
+  fit$data <- data
+  fit$xlevels <- .getXlevels(mt, mf)
+  fit
+}
+###################################################
+# the fitting function:
+vblogit <- local({
+
+  ## helper functions needed:
+  lambda <- function(x) { -tanh(x/2)/(4*x) }
+  mygamma <- function(x) { x/2 - log(1+exp(x)) + x*tanh(x/2)/4 }
+  
+  vblogit <- function(y, X, offset, eps=1e-2, m0, S0, S0i, xi0,
+                      verb=FALSE, maxiter=1000, ...) {
+    ## Logistic regression using JJ96 idea. Ormeron00 notation.
+    ## p(y, w, t) = p(y | w) p(w | t) p(t) 
+    ##
+    ## Y ~ Bern(logit(Xw + offset))
+    ## w  ~ N(m0, S0) iid
+    ##
+    ## "*0" are fixed priors.
+    ##
+    cat2 <- if(verb) cat else function(...) NULL
+    varnames <- colnames(data.frame(as.matrix(X[1:2,])))
+  
+    ## Write 
+    N <- length(y)
+    K <- ncol(X)
+    #'
+    #'
+    #' offset
+    if(missing('offset')) offset <- 0
+    if(length(offset)<N) offset <- rep(offset, N)[1:N]
+    #'
+    #'
+    #' Priors and initial estimates.
+    if(missing(m0))  m0  <- rep(0, K)
+    if(missing(S0))  S0  <- diag(1e5, K, K)
+    #' Overwrite with priors from ppm if given.
+    if(!is.null(prior.mean <- list(...)$prior.mean))
+        m0 <- prior.mean
+    if(!is.null(prior.var <- list(...)$prior.var))
+        S0 <- prior.var
+    check.nvector(m0, K, things="parameters")
+    stopifnot(is.matrix(S0))
+    if(missing(S0i)) S0i <- solve(S0)
+    #' Constants:
+    oo2 <- offset^2
+    LE_CONST <- as.numeric( -0.5*t(m0)%*%S0i%*%m0
+                           - 0.5*determinant(S0)$mod
+                           + sum((y-0.5)*offset) ) 
+    Sm0 <- S0i%*%m0
+    #' start values for xi:
+    if(missing(xi0))   xi0 <- rep(4, N) # something positive
+    if(length(xi0)!=N) xi0 <- rep(xi0, N)[1:N]
+  
+    est <- list(m=m0, S=S0, Si=S0i, xi=xi0)
+    #'
+    #'
+    #'
+    ## loop
+    le <- -Inf
+    le_hist <- le
+    loop <- TRUE
+    iter <- 0
+    #' initials:
+    la <- lambda(xi0)
+    Si <- S0i - 2 * t(X*la)%*%X
+    S <- solve(Si)
+    m <- S%*%( t(X)%*%( (y-0.5) + 2*la*offset ) + Sm0  )
+    #'
+    #' Main loop:
+    while(loop){
+      old <- le
+      #' update variational parameters
+      M <- S+m%*%t(m)
+      #' Make sure M is symmetric in case of numerical errors:
+      M <- (M+t(M))/2
+      L <- t(chol(M))
+      V <- X%*%L
+      dR <- rowSums(V^2)
+      dO <- 2*offset*c(X%*%m)
+      xi2 <- dR + dO + oo2
+      xi <- sqrt(xi2)
+      la <- lambda(xi)
+      #' update post covariance
+      Si <- S0i - 2 * t(X*la)%*%X
+      S <- solve(Si)
+      #' update post mean
+      m <- S%*%( t(X)%*%( (y-0.5) + 2*la*offset ) + Sm0  )
+      #' compute the log evidence
+      le <-  as.numeric( 0.5*determinant(S)$mod
+                        + sum( mygamma(xi) )
+                        + sum(oo2*la)
+                        + 0.5*t(m)%*%Si%*%m
+                        + LE_CONST)
+      #' check convergence 
+      devi <- le - old
+      if(devi < 0)
+        warning("Log-evidence decreasing; try different starting values for xi.")
+      loop <- abs(devi) > eps & (iter<-iter+1) <= maxiter
+      le_hist <- c(le_hist, le)
+      cat2("diff:", devi, "             \r")
+    }
+    if(iter == maxiter) warning("Maximum iteration limit reached.")
+    cat2("\n")
+    ## done. Compile:
+    est <- list(m=m, S=S, Si=Si, xi=xi, lambda_xi=la)
+    #' Marginal evidence
+    est$logLik <- le
+    #' Compute max logLik with the Bernoulli model;
+    #' this should be what glm gives:
+    est$logLik_ML <- as.numeric( t(y)%*%(X%*%m+offset)
+                                - sum( log( 1 + exp(X%*%m+offset)) ) )
+    #' Max loglik with the approximation
+    est$logLik_ML2 <- as.numeric(  t(y)%*%(X%*%m + offset)
+                                 + t(m)%*%t(X*la)%*%X%*%m
+                                 - 0.5*sum(X%*%m)
+                                 + sum(mygamma(xi))
+                                 + 2*t(offset*la)%*%X%*%m
+                                 + t(offset*la)%*%offset
+                                 - 0.5 * sum(offset)  )
+    #' some additional parts, like in glm output
+    est$coefficients <- est$m[,1]
+    names(est$coefficients) <- varnames
+    est$call <- sys.call()
+    est$converged <- !(maxiter==iter)
+    #' more additional stuff
+    est$logp_hist <- le_hist
+    est$parameters <- list(eps=eps, maxiter=maxiter)
+    est$priors <- list(m=m0, S=S0)
+    est$iterations <- iter
+    class(est) <- "vblogit"
+    ## return
+    est
+  }
+
+  vblogit
+})
+
+
+###################################################
+#' Predict method
+predict.vblogit <- local({
+
+  sigmoid <- function(e) 1/(1+exp(-e))
+
+  predict.vblogit <- function(object, newdata = NULL,
+                              type = c("link", "response", "terms"),
+                              se.fit = FALSE,
+                              dispersion = NULL,
+                              terms = NULL,
+                              na.action = na.pass, 
+                              ...) {
+    type <- match.arg(type)
+    if(type != "response") stop("type not supported.")
+    if(missing(newdata)) {
+      stop("not implemented.")
+    }
+    else{  # newdata
+      #' build the new covariate matrix, inspired by predict.lm
+      tt <- terms(object)
+      Terms <- delete.response(tt)
+      m <- model.frame(Terms, newdata, na.action = na.action, 
+                       xlev = object$xlevels)
+      X <- model.matrix(Terms, m, contrasts.arg = object$contrasts)
+      offset <- rep(0, nrow(X))
+      if (!is.null(off.num <- attr(tt, "offset"))) 
+        for (i in off.num)
+          offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata)
+      if (!is.null(object$call$offset)) 
+        offset <- offset + eval(object$call$offset, newdata)
+      #' predict using probit approximation to logit-function
+      mu <- object$m
+      S <- object$S
+      mua <- as.numeric(X%*%mu)+offset
+      s2a <- diag(X%*%S%*%t(X) )
+      predictor <- sigmoid( as.numeric( mua/sqrt(1+pi*s2a/8) ) ) 
+      names(predictor) <- rownames(X)
+    }
+    predictor
+  }
+
+  predict.vblogit
+})
+
+
+
+# ###################################################
+# print method
+print.vblogit <- function(x, ...) {
+  splat("Variational Bayes logistic regression fit")
+  cat("\nCall: ")
+  print(x$call)
+  cat("\nCoefficients:\n")
+  print(x$coefficients)
+  cat("\n")
+  splat("Log-likelihood:", x$logLik)
+  splat("Converged:", x$converged)
+  splat("Convergence threshold:", x$parameters$eps)
+  splat("Iterations / max:", x$iterations, "/", x$parameters$maxiter)
+  splat("* Caution: the estimates are conditional on convergence.")
+  invisible(NULL)
+}
+####################################################
+# vblogit family method
+family.vblogit <- function(object, ...) binomial()
+
+####################################################
+#' vblogit fit summary method
+summary.vblogit <- function(object, ...) {
+  splat("Variational Bayes logistic regression fit")
+  cat("\nCall: ")
+  print(object$call)
+  splat("\nCoefficients and posterior 95% central regions:")
+  vna <- names(object$coefficients)
+  s <- sqrt(diag(object$S))
+  q0 <- qnorm(c(0.025, 0.975))
+  m <- as.numeric(object$m)
+  df <- data.frame(estimate=m,
+                   "low 0.05"=m+s*q0[1],
+                   "high 97.5"=m+s*q0[2],
+                   "prior mean"=object$priors$m,
+                   "prior var"=diag(object$priors$S))
+  rownames(df) <- vna
+  print(df)
+  cat("\n")
+  splat("Lower bound for log-likelihood:", object$logLik)
+  invisible(NULL)
+}
+
+####################################################
+# Coef
+coef.vblogit <- function(object, ...) object$coefficients
+
+####################################################
+# Log-evidence
+logLik.vblogit <- function(object, ...) {
+  object$logLik
+}
+
+
diff --git a/R/vcov.kppm.R b/R/vcov.kppm.R
new file mode 100755
index 0000000..bb16aec
--- /dev/null
+++ b/R/vcov.kppm.R
@@ -0,0 +1,157 @@
+#
+# vcov.kppm
+#
+#  vcov method for kppm objects
+#
+#   Original code: Abdollah Jalilian
+#
+#   $Revision: 1.10 $  $Date: 2015/07/11 08:19:26 $
+#
+
+vcov.kppm <- function(object, ...,
+                      what=c("vcov", "corr", "fisher", "internals"),
+                      fast = NULL, rmax = NULL, eps.rmax = 0.01,
+                      verbose = TRUE)
+{
+  what <- match.arg(what)
+  verifyclass(object, "kppm")
+  fast.given <- !is.null(fast)
+  if(is.null(object$improve)) {
+    ## Normal composite likelihood (poisson) case
+    ## extract composite likelihood results
+    po <- object$po
+    ## ensure it was fitted with quadscheme
+    if(is.null(getglmfit(po))) {
+      warning("Re-fitting model with forcefit=TRUE")
+      po <- update(po, forcefit=TRUE)
+    }
+    ## extract quadrature scheme information
+    Q <- quad.ppm(po)
+    U <- union.quad(Q)
+    nU <- npoints(U)
+    wt <- w.quad(Q)
+    ## compute fitted intensity values
+    lambda <- fitted(po, type="lambda")
+    ## extract covariate values
+    Z <- model.matrix(po)
+    ## evaluate integrand
+    ff <- Z * lambda * wt
+    ## extract pcf
+    g <- pcfmodel(object)
+    ## resolve options for algorithm
+    maxmat <- spatstat.options("maxmatrix")
+    if(!fast.given) {
+      fast <- (nU^2 > maxmat)
+    } else stopifnot(is.logical(fast))
+    ## attempt to compute large matrix: pair correlation function minus 1
+    if(!fast) {
+      gminus1 <- there.is.no.try(
+        matrix(g(c(pairdist(U))) - 1, nU, nU)
+        )
+    } else {
+      if(is.null(rmax)){
+        diamwin <- diameter(as.owin(U))
+        fnc <- get("fnc", envir = environment(improve.kppm))
+        rmax <- if(fnc(diamwin, eps.rmax, g) >= 0) diamwin else
+                  uniroot(fnc, lower = 0, upper = diamwin,
+                          eps=eps.rmax, g=g)$root
+      }
+      cp <- there.is.no.try(
+        crosspairs(U,U,rmax,what="ijd")
+        )
+      gminus1 <- if(is.null(cp)) NULL else
+                 sparseMatrix(i=cp$i, j=cp$j,
+                              x=g(cp$d) - 1,
+                              dims=c(nU, nU))
+    }
+    ## compute quadratic form
+    if(!is.null(gminus1)) {
+      E <- t(ff) %*% gminus1 %*% ff
+    } else {
+      ## split calculation of (gminus1 %*% ff) into blocks
+      nrowperblock <- max(1, floor(maxmat/nU))
+      nblocks <- ceiling(nU/nrowperblock)
+      g1ff <- NULL
+      if(verbose) {
+        splat("Splitting large matrix calculation into", nblocks, "blocks")
+        pstate <- list()
+      }
+      if(!fast) {
+        for(k in seq_len(nblocks)) {
+          if(verbose) pstate <- progressreport(k, nblocks, state=pstate)
+          istart <- nrowperblock * (k-1) + 1
+          iend   <- min(nrowperblock * k, nU)
+          ii <- istart:iend
+          gm1 <- matrix(g(c(crossdist(U[ii], U))) - 1, iend-istart+1, nU)
+          g1ff <- rbind(g1ff, gm1 %*% ff)
+        }
+      } else {
+        for(k in seq_len(nblocks)) {
+          if(verbose) pstate <- progressreport(k, nblocks, state=pstate)
+          istart <- nrowperblock * (k-1) + 1
+          iend   <- min(nrowperblock * k, nU)
+          ii <- istart:iend
+          cp <- crosspairs(U[ii], U, rmax, what="ijd")
+          gm1 <- sparseMatrix(i=cp$i, j=cp$j,
+                              x=g(cp$d) - 1,
+                              dims=c(iend-istart+1, nU))
+          g1ff <- rbind(g1ff, as.matrix(gm1 %*% ff))
+        }
+      }
+      E <- t(ff) %*% g1ff
+    }
+    ## asymptotic covariance matrix in the Poisson case
+    J <- t(Z) %*% ff
+    J.inv <- try(solve(J))
+    ## could be singular 
+    if(inherits(J.inv, "try-error")) {
+      if(what == "internals") {
+        return(list(ff=ff, J=J, E=E, J.inv=NULL))
+      } else {
+        return(NULL)
+      }
+    }
+    ## asymptotic covariance matrix in the clustered case
+    vc <- J.inv + J.inv %*% E %*% J.inv
+  } else {
+    ## Case of quasi-likelihood (or other things from improve.kppm)
+    run <- is.null(object$vcov) ||
+      (!is.null(fast) && (fast != object$improve$fast.vcov))
+    if(run){
+      ## Calculate vcov if it hasn't already been so
+      ## or if option fast differs from fast.vcov
+      args <- object$improve
+      internal <- what=="internals"
+      if(!is.null(fast))
+        args$fast.vcov <- fast
+      object <- with(args,
+                     improve.kppm(object, type = type,
+                                  rmax = rmax, dimyx = dimyx,
+                                  fast = fast, vcov = TRUE,
+                                  fast.vcov = fast.vcov,
+                                  maxIter = 0,
+                                  save.internals = internal))
+    }
+    vc <- object$vcov
+  }
+
+  ## Convert from Matrix to ordinary matrix:
+  vc <- as.matrix(vc)
+  
+  switch(what,
+         vcov={ return(vc) },
+         corr={
+           sd <- sqrt(diag(vc))
+           co <- vc/outer(sd, sd, "*")
+           return(co)
+         },
+         fisher={
+           fish <- try(solve(vc))
+           if(inherits(fish, "try-error")) fish <- NULL 
+           return(fish)
+         },
+         internals={
+           return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc))
+         })
+  stop(paste("Unrecognised option: what=", what))
+}
diff --git a/R/vcov.mppm.R b/R/vcov.mppm.R
new file mode 100755
index 0000000..4859e90
--- /dev/null
+++ b/R/vcov.mppm.R
@@ -0,0 +1,189 @@
+#  Variance-covariance matrix for mppm objects
+#
+# $Revision: 1.16 $ $Date: 2016/04/25 02:34:40 $
+#
+#
+
+vcov.mppm <- local({
+
+  errhandler <- function(whinge, err) {
+    switch(err,
+           fatal=stop(whinge),
+           warn={
+             warning(whinge)
+             return(NA)
+           },
+           null= return(NULL),
+           stop(paste("Unrecognised option: err=", dQuote(err))))
+  }
+    
+  vcov.mppm <- function(object, ..., what="vcov", err="fatal") {
+
+    what <- match.arg(what,
+                      c("vcov", "corr", "fisher", "Fisher", "internals", "all"))
+    if(what == "Fisher") what <- "fisher"
+
+    if(is.poisson.mppm(object) && object$Fit$fitter == "glm") 
+      return(vcmPois(object, ..., what=what, err=err))
+
+    return(vcmGibbs(object, ..., what=what, err=err))
+  }
+
+  vcmPois <- function(object, ..., what, err) {
+    # legacy algorithm for Poisson case
+    
+    gf <- object$Fit$FIT
+    gd <- object$Fit$moadf
+    wt <- gd$.mpl.W
+    fi <- fitted(gf)
+
+    fo <- object$trend
+    if(is.null(fo)) fo <- (~1)
+
+    mof <- model.frame(fo, gd)
+    mom <- model.matrix(fo, mof)
+    momnames <- dimnames(mom)[[2]]
+
+    fisher <- sumouter(mom, fi * wt)
+    dimnames(fisher) <- list(momnames, momnames)
+
+    switch(what,
+           fisher = { return(fisher) },
+           vcov   = {
+             vc <- try(solve(fisher), silent=(err == "null"))
+             if(inherits(vc, "try-error"))
+               return(errhandler("Fisher information is singular", err))
+             else
+               return(vc)
+           },
+           corr={
+             co <- try(solve(fisher), silent=(err == "null"))
+             if(inherits(co, "try-error"))
+               return(errhandler("Fisher information is singular", err))
+             sd <- sqrt(diag(co))
+             return(co / outer(sd, sd, "*"))
+           })
+  }
+
+  vcmGibbs <- function(object, ..., what, err,
+                       matrix.action=c("warn", "fatal", "silent"),
+                       gam.action=c("warn", "fatal", "silent"),
+                       logi.action=c("warn", "fatal", "silent")
+                       ) {
+    if(!missing(err)) {
+      if(err == "null") err <- "silent" 
+      matrix.action <-
+        if(missing(matrix.action)) err else match.arg(matrix.action)
+      gam.action <- if(missing(gam.action)) err else match.arg(gam.action)
+      logi.action <- if(missing(logi.action)) err else match.arg(logi.action)
+    } else {
+      matrix.action <- match.arg(matrix.action)
+      gam.action <- match.arg(gam.action)
+      logi.action <- match.arg(logi.action)
+    }
+    collectmom <- (what %in% c("internals", "all"))
+    subs <- subfits(object, what="basicmodels")
+    n <- length(subs)
+    guts <- lapply(subs, vcov, what="internals",
+                   matrix.action=matrix.action,
+                   gam.action=gam.action,
+                   logi.action=logi.action,
+                   dropcoef=TRUE,
+                   ...)
+    fish <- lapply(guts, getElement, name="fisher")
+    a1   <- lapply(guts, getElement, name="A1")
+    a2   <- lapply(guts, getElement, name="A2")
+    a3   <- lapply(guts, getElement, name="A3")
+    a1 <- mergeAlternatives(fish, a1)
+    cnames <- unique(unlist(lapply(c(a1, a2, a3), colnames)))
+    if(collectmom) {
+      sufs <- lapply(guts, getElement, name="suff")
+      moms <- lapply(guts, getElement, name="mom")
+      sufs <- mergeAlternatives(sufs, moms)
+      cnames <- unique(c(cnames, unlist(lapply(sufs, colnames))))
+    }
+    nc <- length(cnames)
+    A1 <- A2 <- A3 <- matrix(0, nc, nc, dimnames=list(cnames, cnames))
+    if(collectmom)
+      Mom <- matrix(, 0, nc, dimnames=list(character(0), cnames))
+    for(i in seq_len(n)) {
+      coefnames.i <- names(coef(subs[[i]]))
+      A1 <- addsubmatrix(A1, a1[[i]], coefnames.i)
+      A2 <- addsubmatrix(A2, a2[[i]], coefnames.i)
+      A3 <- addsubmatrix(A3, a3[[i]], coefnames.i)
+      if(collectmom) Mom <- bindsubmatrix(Mom, sufs[[i]])
+    }
+    internals <- list(A1=A1, A2=A2, A3=A3)
+    if(collectmom)
+      internals <- c(internals, list(suff=Mom))
+    if(what %in% c("vcov", "corr", "all")) {
+      #' variance-covariance matrix required
+      U <- checksolve(A1, matrix.action, , "variance")
+      vc <- if(is.null(U)) NULL else (U %*% (A1 + A2 + A3) %*% U)
+    }
+    out <- switch(what,
+                  fisher = A1 + A2 + A3,
+                  vcov   = vc,
+                  corr   = {
+                    if(is.null(vc)) return(NULL)
+                    sd <- sqrt(diag(vc))
+                    vc / outer(sd, sd, "*")
+                  },
+                  internals = internals,
+                  all = list(internals=internals,
+                             fisher=A1+A2+A3,
+                             varcov=vc,
+                             invgrad=A1)
+                  )
+    return(out)
+  }
+
+  addsubmatrix <- function(A, B, guessnames) {
+    if(is.null(B)) return(A)
+    if(is.null(colnames(B)) && !missing(guessnames)) {
+      if(is.character(guessnames))
+        guessnames <- list(guessnames, guessnames)
+      if(all(lengths(guessnames) == dim(B)))
+        colnames(B) <- guessnames
+    }
+    if(is.null(colnames(B))) {
+      if(!all(dim(A) == dim(B))) 
+        stop("Internal error: no column names, and matrices non-conformable")
+      A <- A + B
+      return(A)
+    }
+    j <- match(colnames(B), colnames(A))
+    if(anyNA(j))
+      stop("Internal error: unmatched column name(s)")
+    A[j,j] <- A[j,j] + B
+    return(A)
+  }
+
+  bindsubmatrix <- function(A, B) {
+    if(is.null(B)) return(A)
+    if(is.null(colnames(B))) {
+      if(ncol(A) != ncol(B))
+        stop("Internal error: no column names, and matrices non-conformable")
+      A <- rbind(A, B)
+      return(A)
+    }
+    j <- match(colnames(B), colnames(A))
+    if(anyNA(j))
+      stop("Internal error: unmatched column name(s)")
+    BB <- matrix(0, nrow(B), ncol(A))
+    BB[,j] <- B
+    A <- rbind(A, BB)
+    return(A)
+  }
+
+  mergeAlternatives <- function(A, B) {
+    okA <- !sapply(A, is.null)
+    okB <- !sapply(B, is.null)
+    if(any(override <- !okA & okB))
+      A[override] <- B[override]
+    return(A)
+  }
+
+  vcov.mppm
+  
+})
diff --git a/R/vcov.ppm.R b/R/vcov.ppm.R
new file mode 100755
index 0000000..4d969d0
--- /dev/null
+++ b/R/vcov.ppm.R
@@ -0,0 +1,1674 @@
+##
+## Asymptotic covariance & correlation matrices
+## and Fisher information matrix
+## for ppm objects
+##
+##  $Revision: 1.129 $  $Date: 2017/06/05 10:31:58 $
+##
+
+vcov.ppm <- local({
+
+vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE,
+                     fine=FALSE,
+                     gam.action=c("warn", "fatal", "silent"),
+                     matrix.action=c("warn", "fatal", "silent"),
+                     logi.action=c("warn", "fatal", "silent"),
+                     hessian=FALSE) {
+  verifyclass(object, "ppm")
+  argh <- list(...)
+
+  gam.action <- match.arg(gam.action)
+  matrix.action <- match.arg(matrix.action)
+  logi.action <- match.arg(logi.action)
+
+  if(missing(fine) && ("A1dummy" %in% names(argh))) {
+    message("Argument 'A1dummy' has been replaced by 'fine'")
+    fine <- as.logical(argh$A1dummy)
+  } else fine <- as.logical(fine)
+  
+  stopifnot(length(what) == 1 && is.character(what))
+  what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all")
+  what.map     <- c("vcov", "corr", "fisher", "fisher", "internals", "all")
+  if(is.na(m <- pmatch(what, what.options)))
+    stop(paste("Unrecognised option: what=", sQuote(what)))
+  what <- what.map[m]
+
+  ## No vcov for Variational Bayes
+  if(!is.null(object$internal$VB))
+      stop("Variance calculations currently not possible for variational Bayes fit.")
+  ## no parameters, no variance
+  if(length(coef(object)) == 0) {
+    result <- switch(what,
+                     vcov=, corr=, fisher= {
+                       matrix(, 0, 0)
+                     },
+                     internals=, all={
+                       list()
+                     })
+    return(result)
+  }
+  
+  ## nonstandard calculations (hack) 
+  generic.triggers <- c("A1", "new.coef", "matwt", "saveterms", "sparseOK")
+  nonstandard <- any(generic.triggers %in% names(argh)) || fine
+#  saveterms <- identical(resolve.1.default("saveterms", argh), TRUE)
+  
+  ## Fisher information *may* be contained in object
+  fisher <- object$fisher
+  varcov <- object$varcov
+  
+  ## Do we need to go into the guts?
+  needguts <- nonstandard ||
+    (is.null(fisher) && what=="fisher") ||
+    (is.null(varcov) && what %in% c("vcov", "corr")) ||
+    (what %in% c("internals", "all")) 
+
+  ## In general it is not true that varcov = solve(fisher)
+  ## because we might use different estimators,
+  ## or the parameters might be a subset of the canonical parameter
+
+  if(needguts) {
+    ## warn if fitted model was obtained using GAM
+    if(identical(object$fitter, "gam")) {
+      switch(gam.action,
+             fatal={
+               stop(paste("model was fitted by gam();",
+                          "execution halted because fatal=TRUE"),
+                    call.=FALSE)
+             },
+             warn={
+               warning(paste("model was fitted by gam();",
+                             "asymptotic variance calculation ignores this"),
+                       call.=FALSE)
+             },
+             silent={})
+    }
+    ## ++++ perform main calculation ++++
+    if((is.poisson(object) || (hessian && what!="internals")) && object$method != "logi") {
+      ## Poisson model, or Hessian of Gibbs model without internals
+      results <- vcalcPois(object, ..., what=what,
+                           matrix.action=matrix.action,
+                           verbose=verbose, fisher=fisher)
+    } else {
+      ## Gibbs model 
+      results <- vcalcGibbs(object, ..., what=what,
+                            fine=fine,
+                            matrix.action=matrix.action,
+                            hessian = hessian)
+    }
+    varcov <- results$varcov
+    fisher <- results$fisher
+    internals  <- results$internals
+  }
+  
+  if(what %in% c("vcov", "corr") && is.null(varcov)) {
+    ## Need variance-covariance matrix.
+    if(!is.null(fisher) && is.poisson(object)) 
+      ## Derive from Fisher information
+      varcov <- checksolve(fisher, matrix.action,
+                           "Fisher information matrix",
+                           "variance")
+  }
+
+  out <- switch(what,
+                fisher = fisher,
+                vcov   = varcov,
+                corr   = {
+                  if(is.null(varcov)) return(NULL)
+                  sd <- sqrt(diag(varcov))
+                  varcov / outer(sd, sd, "*")
+                },
+                internals = internals,
+                all = results
+                )
+  return(out)
+}
+
+## ................  variance calculation for Poisson models  .............
+
+vcalcPois <- function(object, ...,
+                      what = c("vcov", "corr", "fisher", "internals", "all"),
+                      matrix.action=c("warn", "fatal", "silent"),
+                      method=c("C", "interpreted"),
+                      verbose=TRUE,
+                      fisher=NULL, 
+                      matwt=NULL, new.coef=NULL, dropcoef=FALSE, 
+                      saveterms=FALSE) {
+  ## variance-covariance matrix of Poisson model,
+  ## or Hessian of Gibbs model
+  what <- match.arg(what)
+  method <- match.arg(method)
+  matrix.action <- match.arg(matrix.action)
+  if(reweighting <- !is.null(matwt)) 
+    stopifnot(is.numeric(matwt) && is.vector(matwt))
+  internals <- NULL
+  nonstandard <- reweighting || !is.null(new.coef) || saveterms
+  ## compute Fisher information if not known
+  if(is.null(fisher) || nonstandard) {
+    gf <- getglmfit(object)
+    ## we need a glm or gam
+    if(is.null(gf)) {
+      if(verbose) 
+        warning("Refitting the model using GLM/GAM")
+      object <- update(object, forcefit=TRUE)
+      gf <- getglmfit(object)
+      if(is.null(gf))
+        stop("Internal error - refitting did not yield a glm object")
+    }
+    ## compute fitted intensity and sufficient statistic
+    ltype <- if(is.poisson(object)) "trend" else "lambda"
+    lambda <- fitted(object, type=ltype, new.coef=new.coef,
+                     dropcoef=dropcoef, check=FALSE)
+    mom <- model.matrix(object)
+    nmom <- nrow(mom)
+    Q <- quad.ppm(object)
+    wt <- w.quad(Q)
+    ok <- getglmsubset(object)
+    Z  <- is.data(Q)
+    ## save them
+    if(what == "internals") {
+      internals <-
+        if(!saveterms) list(suff=mom) else
+      list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok)
+    }
+    ## Now restrict all terms to the domain of the pseudolikelihood
+    lambda <- lambda[ok]
+    mom <- mom[ok, , drop=FALSE]
+    wt <- wt[ok]
+    Z <- Z[ok]
+    ## apply weights to rows of model matrix - temporary hack
+    if(reweighting) {
+      nwt <- length(matwt)
+      if(nwt == nmom) {
+        ## matwt matches original quadrature scheme - trim it
+        matwt <- matwt[ok]
+      } else if(nwt != sum(ok))
+        stop("Hack argument matwt has incompatible length")
+      mom.orig <- mom
+      mom <- matwt * mom
+    }
+    ## compute Fisher information
+    switch(method,
+           C = {
+             fisher <- sumouter(mom, lambda * wt)
+             if(reweighting) {
+               gradient <- sumouter(mom.orig, matwt * lambda * wt)
+             }
+           },
+           interpreted = {
+             if(!reweighting) {
+               fisher <- 0
+               for(i in 1:nrow(mom)) {
+                 ro <- mom[i, ]
+                 v <- outer(ro, ro, "*") * lambda[i] * wt[i]
+                 if(!anyNA(v))
+                   fisher <- fisher + v
+               }
+               momnames <- dimnames(mom)[[2]]
+               dimnames(fisher) <- list(momnames, momnames)
+             } else {
+               fisher <- gradient <- 0
+               for(i in 1:nrow(mom)) {
+                 ro <- mom[i, ]
+                 ro0 <- mom.orig[i,]
+                 ldu <- lambda[i] * wt[i]
+                 v <- outer(ro, ro, "*") * ldu
+                 v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu
+                 if(!anyNA(v))
+                   fisher <- fisher + v
+                 if(!anyNA(v0))
+                   gradient <- gradient + v0
+               }
+               momnames <- dimnames(mom)[[2]]
+               dn <- list(momnames, momnames)
+               dimnames(fisher) <- dimnames(gradient) <- dn
+             }
+           })
+  } 
+
+  if(what %in% c("all", "internals")) {
+    ## Internals needed
+    if(is.null(internals))
+      internals <- list(suff = model.matrix(object))
+    internals$fisher <- fisher
+    if(reweighting)
+      internals$gradient <- gradient
+    ilist <- list(internals=internals)
+  }
+
+  if(what %in% c("all", "vcov", "corr")) {
+    ## Variance-covariance matrix needed
+    if(!reweighting) {
+      ## Derive variance-covariance from Fisher info
+      varcov <- checksolve(fisher, matrix.action,
+                           "Fisher information matrix",
+                           "variance")
+      vcovlist <- list(fisher=fisher, varcov=varcov)
+    } else {
+      invgrad <- checksolve(gradient, matrix.action,
+                            "gradient matrix", "variance")
+      varcov <- if(is.null(invgrad)) NULL else
+      invgrad %*% fisher %*% invgrad
+      vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad)
+    }
+  }
+  result <- switch(what,
+                   fisher    = list(fisher=fisher),
+                   vcov      = vcovlist,
+                   corr      = vcovlist,
+                   internals = ilist,
+                   all       = append(ilist, vcovlist))
+  return(result)
+}
+
+
+## ...................... vcov calculation for Gibbs models ....................
+
+vcalcGibbs <- function(fit, ...,
+                       fine=FALSE,
+                       what = c("vcov", "corr", "fisher", "internals", "all"),
+                       generic=FALSE) {
+  what <- match.arg(what)
+
+  if(missing(generic)) {
+    ## Change default to TRUE in certain cases
+    ## For logistic fits, use generic method by default
+    if(fit$method == "logi")
+      generic <- TRUE
+    ## For 'difficult' interactions, use generic method by default
+    fasterbygeneric <- c("Areainter")
+    if(as.interact(fit)$creator %in% fasterbygeneric)
+      generic <- TRUE
+  }
+  
+  ## decide whether to use the generic algorithm
+  generic.triggers <- c("A1", "hessian",
+                        "new.coef", "matwt", "saveterms", "sparseOK")
+  
+  use.generic <-
+    generic || fine ||
+  !is.stationary(fit) ||
+  (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) ||
+  (fit$method != "logi" && has.offset(fit)) ||
+  (fit$method == "logi" && has.offset.term(fit)) ||
+  !(fit$correction == "border" && fit$rbord == reach(fit)) ||
+  any(generic.triggers %in% names(list(...))) ||
+  !identical(options("contrasts")[[1]],
+             c(unordered="contr.treatment",
+               ordered="contr.poly"))
+  
+  ## compute
+  spill <- (what %in% c("all", "internals", "fisher"))
+  spill.vc <- (what == "all")
+  out <- if(use.generic)
+    vcalcGibbsGeneral(fit, ..., fine=fine, spill=spill, spill.vc=spill.vc) else
+    vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc)
+
+  switch(what,
+         vcov = ,
+         corr = {
+           ## out is the variance-covariance matrix; return it
+           return(list(varcov=out))
+         },
+         fisher = {
+           ## out is a list of internal data: extract the Fisher info
+           Fmat <- with(out,
+                        if(fit$method != "logi") Sigma else Sigma1log+Sigma2log)
+           return(list(fisher=Fmat))
+         },
+         internals = {
+           ## out is a list of internal data: return it
+           ## (ensure model matrix is included)
+           if(is.null(out$mom))
+             out$mom <- model.matrix(fit)
+           return(list(internals=out))
+         },
+         all = {
+           ## out is a list(internals, vc): return it
+           ## (ensure model matrix is included)
+           if(is.null(out$internals$mom))
+             out$internals$mom <- model.matrix(fit)
+           ## ensure Fisher info is included
+           if(is.null(out$internals$fisher)) {
+             Fmat <- with(out$internals,
+                     if(fit$method != "logi") Sigma else Sigma1log+Sigma2log)
+             out$internals$fisher <- Fmat
+           }
+           return(out)
+         },
+         )
+  return(NULL)
+}
+
+## ...................... general algorithm ...........................
+
+vcalcGibbsGeneral <- function(model,
+                         ...,
+                         spill = FALSE,
+                         spill.vc = FALSE,
+                         na.action=c("warn", "fatal", "silent"),
+                         matrix.action=c("warn", "fatal", "silent"),
+                         logi.action=c("warn", "fatal", "silent"),
+                         algorithm=c("vectorclip", "vector", "basic"),
+                         A1 = NULL,
+                         fine = FALSE,
+                         hessian = FALSE,
+                         matwt = NULL, new.coef = NULL, dropcoef=FALSE,
+                         saveterms = FALSE,
+                         parallel = TRUE,
+                         sparseOK = FALSE
+                         ) {
+  na.action <- match.arg(na.action)
+  matrix.action <- match.arg(matrix.action)
+  logi.action <- match.arg(logi.action)
+  algorithm <- match.arg(algorithm)
+  if(reweighting <- !is.null(matwt)) 
+    stopifnot(is.numeric(matwt) && is.vector(matwt))
+  spill <- spill || spill.vc
+  saveterms <- spill && saveterms
+  logi <- model$method=="logi"
+  asked.parallel <- !missing(parallel)
+  
+  old.coef <- coef(model)
+  use.coef <- adaptcoef(new.coef, old.coef, drop=dropcoef)
+  p <- length(old.coef)
+  if(p == 0) {
+    ## this probably can't happen
+    if(!spill) return(matrix(, 0, 0)) else return(list())
+  }
+  pnames <- names(old.coef)
+  dnames <- list(pnames, pnames)
+  # (may be revised later)
+  
+  internals <- list()
+  ##
+  sumobj <- summary(model, quick="entries")
+  correction <- model$correction
+  rbord      <- model$rbord
+  R <- reach(model, epsilon=1e-2)
+  Q <- quad.ppm(model)
+  D <- dummy.ppm(model)
+  rho <- model$internal$logistic$rho
+  #### If dummy intensity rho is unknown we estimate it
+  if(is.null(rho))
+     rho <- npoints(D)/(area(D)*markspace.integral(D))
+  X <- data.ppm(model)
+  Z <- is.data(Q)
+  W <- as.owin(model)
+  areaW <- if(correction == "border") eroded.areas(W, rbord) else area(W)
+  ##
+  ## determine which quadrature points contributed to the
+  ## sum/integral in the pseudolikelihood
+  ## (e.g. some points may be excluded by the border correction)
+  okall <- getglmsubset(model)
+  ## conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i])
+  ## data and dummy:
+  lamall <- fitted(model, check = FALSE, new.coef = new.coef, dropcoef=dropcoef)
+  if(anyNA(lamall)) {
+    whinge <- "Some values of the fitted conditional intensity are NA"
+    switch(na.action,
+           fatal = {
+             stop(whinge, call.=FALSE)
+           },
+           warn = {
+             warning(whinge, call.=FALSE)
+             okall <- okall & !is.na(lamall)
+           },
+           silent = {
+             okall <- okall & !is.na(lamall)
+           })
+  }
+  ## data only:
+  lam <- lamall[Z]
+  ok <- okall[Z]
+  nX <- npoints(X)
+  ## sufficient statistic h(X[i] | X) = h(X[i] | X[-i])
+  ## data and dummy:
+  mall <- model.matrix(model)
+  ## check dimension of canonical statistic 
+  if(ncol(mall) != length(pnames)) {
+    if(!dropcoef)
+      stop(paste("Internal error: dimension of sufficient statistic = ",
+                 ncol(mall), "does not match length of coefficient vector =",
+                 length(pnames)),
+           call.=FALSE)
+    p <- length(pnames)
+    pnames <- colnames(mall)
+    dnames <- list(pnames, pnames)
+  }
+  ## save
+  if(saveterms) 
+    internals <- append(internals,
+                        list(mom=mall, lambda=lamall, Z=Z, ok=okall,
+                             matwt=matwt))
+  if(reweighting) {
+    ## each column of the model matrix is multiplied by 'matwt'
+    check.nvector(matwt, nrow(mall), things="quadrature points")
+    mall.orig <- mall
+    mall      <- mall * matwt
+  }
+  ## subsets of model matrix
+  mokall <- mall[okall, , drop=FALSE]
+  ## data only:
+  m <- mall[Z, , drop=FALSE]
+  mok <- m[ok, , drop=FALSE]
+  ##
+  if(reweighting) {
+    ## save unweighted versions
+    mokall.orig <- mall.orig[okall, , drop=FALSE]
+    m.orig      <- mall.orig[Z, , drop=FALSE]
+    mok.orig    <- m.orig[ok, , drop=FALSE]
+    ##
+    matwtX <- matwt[Z]
+  }
+
+  ## ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S
+  
+  ## logistic 
+  if(logi){
+    ## Sensitivity matrix S for logistic case
+    Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2)
+    dimnames(Slog) <- dnames
+    ## A1 matrix for logistic case
+    A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3)
+    dimnames(A1log) <- dnames
+  }
+  ## Sensitivity matrix for MPLE case (= A1)
+  if(is.null(A1) || reweighting) {
+    if(fine){
+      A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall])
+      if(reweighting)
+        gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall])
+    } else{
+      A1 <- sumouter(mok)
+      if(reweighting)
+        gradient <- sumouter(mok.orig, w=matwtX)
+    }
+  } else {
+    stopifnot(is.matrix(A1))
+    if(!all(dim(A1) == p))
+      stop(paste("Matrix A1 has wrong dimensions:",
+                 prange(dim(A1)), "!=", prange(c(p, p))))
+  }
+  dimnames(A1) <- dnames
+
+  ## ^^^^^^^^^^ Second order interaction effects A2, A3
+
+  if(hessian) {
+    ## interaction terms suppressed
+    A2 <- A3 <- matrix(0, p, p, dimnames=dnames)
+    if(logi)
+      A2log <- A3log <- matrix(0, p, p, dimnames=dnames)
+  } else {
+    ## ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation
+    need.loop <- TRUE
+    if(parallel) {
+      ## compute second order difference
+      ##  ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j])
+      ddS <- deltasuffstat(model, restrict="pairs",
+      	                   force=FALSE, sparseOK=sparseOK)
+      sparse <- inherits(ddS, "sparse3Darray")
+      if(is.null(ddS)) {
+        if(asked.parallel)
+          warning("parallel option not available - reverting to loop")
+      } else {
+        need.loop <- FALSE
+        ## rearrange so that
+        ##  ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j])
+        ddS <- aperm(ddS, c(3,2,1))
+        ## now compute sum_{i,j} for i != j
+        ## outer(ddS[,i,j], ddS[,j,i])
+        ddSok <- ddS[ , ok, ok, drop=FALSE]
+        A3 <- sumsymouter(ddSok)
+        ## compute pairweight and other arrays
+        if(sparse) {
+          ## Entries are only required for pairs i,j which interact.
+          ## mom.array[ ,i,j] = h(X[i] | X)
+          mom.array <- mapSparseEntries(ddS, margin=2, values=m,
+                                        conform=TRUE, across=1)
+          ## momdel[ ,i,j] = h(X[i] | X[-j])
+          momdel <- mom.array - ddS
+          ## pairweight[i,j] = lambda(X[i] | X[-j] )/lambda( X[i] | X ) - 1
+          pairweight <- expm1(tensor1x1(-use.coef, ddS))
+        } else {
+          ## mom.array[ ,i,j] = h(X[i] | X)
+          mom.array <- array(t(m), dim=c(p, nX, nX))
+          ## momdel[ ,i,j] = h(X[i] | X[-j])
+          momdel <- mom.array - ddS
+          ## lamdel[i,j] = lambda(X[i] | X[-j])
+          lamdel <-
+            matrix(lam, nX, nX) * exp(tensor::tensor(-use.coef, ddS, 1, 1))
+          ##  pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 
+          pairweight <- lamdel / lam - 1
+        }
+        ## now compute sum_{i,j} for i != j
+        ## pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i])
+        ## for data points that contributed to the pseudolikelihood
+        momdelok <- momdel[ , ok, ok, drop=FALSE]
+        pwok <- pairweight[ok, ok]
+        if(anyNA(momdelok) || anyNA(pwok))
+          stop("Unable to compute variance: NA values present", call.=FALSE)
+        A2 <- sumsymouter(momdelok, w=pwok)
+        dimnames(A2) <- dimnames(A3) <- dnames
+        if(logi){
+          if(!sparse) {
+            ## lam.array[ ,i,j] = lambda(X[i] | X)
+            lam.array <- array(lam, c(nX,nX,p))
+            lam.array <- aperm(lam.array, c(3,1,2))
+            ## lamdel.array[,i,j] = lambda(X[i] | X[-j])
+            lamdel.array <- array(lamdel, c(nX,nX,p))
+            lamdel.array <- aperm(lamdel.array, c(3,1,2))
+            momdellogi <- rho/(lamdel.array+rho)*momdel
+            ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi
+          } else {
+            ## lam.array[ ,i,j] = lambda(X[i] | X)
+            lam.array <- mapSparseEntries(ddS, margin=2, lam,
+                                          conform=TRUE, across=1)
+            ## lamdel.array[,i,j] = lambda(X[i] | X[-j])
+            pairweight.array <- aperm(as.sparse3Darray(pairweight), c(3,1,2))
+            lamdel.array <- pairweight.array * lam.array + lam.array
+            lamdel.logi <- applySparseEntries(lamdel.array,
+                                              function(y,rho) { rho/(rho+y) },
+                                              rho=rho)
+            lam.logi <- applySparseEntries(lam.array,
+                                          function(y,rho) { rho/(rho+y) },
+                                          rho=rho)
+            momdellogi <- momdel * lamdel.logi
+            ddSlogi <-    mom.array * lam.logi - momdellogi
+          }
+          momdellogiok <- momdellogi[ , ok, ok, drop=FALSE]
+          A2log <- sumsymouter(momdellogiok, w=pwok)
+          ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE]
+          A3log <- sumsymouter(ddSlogiok)
+          dimnames(A2log) <- dimnames(A3log) <- dnames
+        }
+      }
+    }
+  
+    ## ^^^^^^^^^^^^^^^^^^^^ loop evaluation
+    if(need.loop) {
+    
+      A2 <- A3 <- matrix(0, p, p, dimnames=dnames)
+      if(logi)
+        A2log <- A3log <- matrix(0, p, p, dimnames=dnames)
+    
+      if(saveterms) {
+        ## *initialise* matrices 
+        ##  lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)])
+        lamdel <- matrix(lam, nX, nX)
+        ##  momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)])
+        momdel <- array(t(m), dim=c(p, nX, nX))
+      }
+  
+      ## identify close pairs
+      if(is.finite(R)) {
+        cl <- closepairs(X, R, what="indices")
+        I <- cl$i
+        J <- cl$j
+        if(algorithm == "vectorclip") {
+          cl2 <- closepairs(X, 2*R, what="indices")
+          I2 <- cl2$i
+          J2 <- cl2$j
+        }
+      } else {
+        ## either infinite reach, or something wrong
+        IJ <- expand.grid(I=1:nX, J=1:nX)
+        IJ <- subset(IJ, I != J)
+        I2 <- I <- IJ$I
+        J2 <- J <- IJ$J
+      }
+      ## filter:  I and J must both belong to the nominated subset 
+      okIJ <- ok[I] & ok[J]
+      I <- I[okIJ]
+      J <- J[okIJ]
+      ##
+      if(length(I) > 0 && length(J) > 0) {
+        ## .............. loop over pairs ........................
+        ## The following ensures that 'empty' and 'X' have compatible marks 
+        empty <- X[integer(0)]
+        ## make an empty 'equalpairs' matrix
+        nonE <- matrix(, nrow=0, ncol=2)
+        ## Run through pairs
+        switch(algorithm,
+               basic={
+                 for(i in unique(I)) {
+                   Xi <- X[i]
+                   Ji <- unique(J[I==i])
+                   if((nJi <- length(Ji)) > 0) {
+                     for(k in 1:nJi) {
+                       j <- Ji[k]
+                       X.ij <- X[-c(i,j)]
+                       ## compute conditional intensity
+                       ##    lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)]
+                       plamj.i <- predict(model, type="cif",
+                                          locations=X[j], X=X.ij,
+                                          check = FALSE,
+                                          new.coef = new.coef,
+                                          sumobj = sumobj, E=nonE)
+                       ## corresponding values of sufficient statistic 
+                       ##    h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)]
+                       pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ]
+                       ## conditional intensity and sufficient statistic
+                       ## in reverse order
+                       ##    lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]
+                       plami.j <- predict(model, type="cif",
+                                          locations=X[i], X=X.ij,
+                                          check = FALSE,
+                                          new.coef = new.coef,
+                                          sumobj = sumobj, E=nonE)
+                       pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ]
+                       ## 
+                       if(reweighting) {
+                         pmj.i <- pmj.i * matwtX[j]
+                         pmi.j <- pmi.j * matwtX[i]
+                       }
+                       if(saveterms) {
+                         lamdel[i,j] <- plami.j
+                         momdel[ , i, j] <- pmi.j
+                         lamdel[j,i] <- plamj.i
+                         momdel[ , j, i] <- pmj.i
+                       }
+                       ## increment A2, A3
+                       wt <- plami.j / lam[i] - 1
+                       A2 <- A2 + wt * outer(pmi.j, pmj.i)
+                       if(logi)
+                         A2log <- A2log +
+                           wt * rho/(plami.j+rho) *
+                             rho/(plamj.i+rho) * outer(pmi.j, pmj.i)
+                       ## delta sufficient statistic
+                       ## delta_i h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X) - h(X[j] | X[-i])
+                       ## delta_j h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X) - h(X[i] | X[-j])
+                       deltaiSj <- m[j, ] - pmj.i
+                       deltajSi <- m[i, ] - pmi.j
+                       A3 <- A3 + outer(deltaiSj, deltajSi)
+                       if(logi){
+                         deltaiSjlog <- rho*(m[j, ]/
+                                             (lam[j]+rho) - pmj.i/(plamj.i+rho))
+                         deltajSilog <- rho*(m[i, ]/
+                                             (lam[i]+rho) - pmi.j/(plami.j+rho))
+                         A3log <- A3log + outer(deltaiSjlog, deltajSilog)
+                       }
+                     }
+                   }
+                 }
+               },
+               vector={
+                 ## --------- faster algorithm using vector functions --------
+                 for(i in unique(I)) {
+                   Ji <- unique(J[I==i])
+                   nJi <- length(Ji)
+                   if(nJi > 0) {
+                     Xi <- X[i]
+                     ## neighbours of X[i]
+                     XJi <- X[Ji]
+                     ## all points other than X[i]
+                     X.i <- X[-i]
+                     ## index of XJi in X.i
+                     J.i <- Ji - (Ji > i)
+                     ## equalpairs matrix
+                     E.i <- cbind(J.i, seq_len(nJi))
+                     ## compute conditional intensity
+                     ##   lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)]
+                     ## for all j
+                     plamj <- predict(model, type="cif",
+                                      locations=XJi, X=X.i,
+                                      check = FALSE,
+                                      new.coef = new.coef,
+                                      sumobj=sumobj, E=E.i)
+                     ## corresponding values of sufficient statistic 
+                     ##    h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)]
+                     ## for all j
+                     pmj <-
+                       partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE]
+                     ##
+                     ## conditional intensity & sufficient statistic
+                     ## in reverse order
+                     ##    lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]
+                     ## for all j
+                     plami <- numeric(nJi)
+                     pmi <- matrix(, nJi, p)
+                     for(k in 1:nJi) {
+                       j <- Ji[k]
+                       X.ij <- X[-c(i,j)]
+                       plami[k] <- predict(model, type="cif",
+                                           locations=Xi, X=X.ij,
+                                           check = FALSE,
+                                           new.coef = new.coef,
+                                           sumobj = sumobj, E=nonE)
+                       pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ]
+                     }
+                     ##
+                     if(reweighting) {
+                       pmj <- pmj * matwtX[Ji]
+                       pmi <- pmi * matwtX[i]
+                     }
+                     if(saveterms) {
+                       lamdel[Ji, i] <- plamj
+                       momdel[ , Ji, i] <- t(pmj)
+                       lamdel[i,Ji] <- plami
+                       momdel[ , i, Ji] <- t(pmi)
+                     }
+                     ## increment A2, A3
+                     wt <- plami / lam[i] - 1
+                     for(k in 1:nJi) {
+                       j <- Ji[k]
+                       A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,])
+                       if(logi)
+                         A2log <- A2log + wt[k] * rho/(plami[k]+rho) *
+                           rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,])
+                       ## delta sufficient statistic
+                       ## delta_i h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X) - h(X[j] | X[-i])
+                       ## delta_j h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X) - h(X[i] | X[-j])
+                       deltaiSj <- m[j, ] - pmj[k,]
+                       deltajSi <- m[i, ] - pmi[k,]
+                       A3 <- A3 + outer(deltaiSj, deltajSi)
+                       if(logi){
+                         deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) -
+                                             pmj[k,]/(plamj[k]+rho))
+                         deltajSilog <- rho*(m[i, ]/(lam[i]+rho) -
+                                             pmi[k,]/(plami[k]+rho))
+                         A3log <- A3log + outer(deltaiSjlog, deltajSilog)
+                       }
+                     }
+                   }
+                 }
+               },
+               vectorclip={
+                 ## --------- faster version of 'vector' algorithm
+                 ## --------  by removing non-interacting points of X
+                 for(i in unique(I)) {
+                   ## all points within 2R
+                   J2i <- unique(J2[I2==i])
+                   ## all points within R
+                   Ji  <- unique(J[I==i])
+                   nJi <- length(Ji)
+                   if(nJi > 0) {
+                     Xi <- X[i]
+                     ## neighbours of X[i]
+                     XJi <- X[Ji]
+                     ## replace X[-i] by X[-i] \cap b(0, 2R)
+                     X.i <- X[J2i]
+                     nX.i <- length(J2i)
+                     ## index of XJi in X.i
+                     J.i <- match(Ji, J2i)
+                     if(anyNA(J.i))
+                       stop("Internal error: Ji not a subset of J2i")
+                     ## equalpairs matrix
+                     E.i <- cbind(J.i, seq_len(nJi))
+                     ## compute conditional intensity
+                     ##   lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)]
+                     ## for all j
+                     plamj <- predict(model, type="cif",
+                                      locations=XJi, X=X.i,
+                                      check = FALSE,
+                                      new.coef = new.coef,
+                                      sumobj = sumobj, E=E.i)
+                     ## corresponding values of sufficient statistic 
+                     ##    h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)]
+                     ## for all j
+                     pmj <-
+                       partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE]
+                     ##
+                     ## conditional intensity & sufficient statistic
+                     ##  in reverse order
+                     ##    lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]
+                     ## for all j
+                     plami <- numeric(nJi)
+                     pmi <- matrix(, nJi, p)
+                     for(k in 1:nJi) {
+                       j <- Ji[k]
+                       ## X.ij <- X[-c(i,j)]
+                       X.ij <- X.i[-J.i[k]]
+                       plami[k] <- predict(model, type="cif",
+                                           locations=Xi, X=X.ij,
+                                           check = FALSE,
+                                           new.coef = new.coef,
+                                           sumobj = sumobj, E=nonE)
+                       pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ]
+                     }
+                     ##
+                     if(reweighting) {
+                       pmj <- pmj * matwtX[Ji]
+                       pmi <- pmi * matwtX[i]
+                     }
+                     if(saveterms) {
+                       lamdel[Ji, i] <- plamj
+                       momdel[ , Ji, i] <- t(pmj)
+                       lamdel[i,Ji] <- plami
+                       momdel[ , i, Ji] <- t(pmi)
+                     }
+                     ## increment A2, A3
+                     wt <- plami / lam[i] - 1
+                     for(k in 1:nJi) {
+                       j <- Ji[k]
+                       A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,])
+                       if(logi)
+                         A2log <- A2log + wt[k] * rho/(plami[k]+rho) *
+                           rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,])
+                       ## delta sufficient statistic
+                       ## delta_i h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)])
+                       ## = h(X[j] | X) - h(X[j] | X[-i])
+                       ## delta_j h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)])
+                       ## = h(X[i] | X) - h(X[i] | X[-j])
+                       deltaiSj <- m[j, ] - pmj[k,]
+                       deltajSi <- m[i, ] - pmi[k,]
+                       A3 <- A3 + outer(deltaiSj, deltajSi)
+                       if(logi){
+                         deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) -
+                                             pmj[k,]/(plamj[k]+rho))
+                         deltajSilog <- rho*(m[i, ]/(lam[i]+rho) -
+                                             pmi[k,]/(plami[k]+rho))
+                         A3log <- A3log + outer(deltaiSjlog, deltajSilog)
+                       }
+                     }
+                   }
+                 }
+               })
+      }
+    }
+    ## ......... end of loop computation ...............
+  }
+
+  #### Matrix Sigma 
+  Sigma <- A1+A2+A3
+  
+  if(spill) {
+    ## save internal data (with matrices unnormalised) 
+    internals <-
+      c(internals,
+        list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW),
+        if(logi)
+           list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL,
+        if(reweighting) list(gradient=gradient) else NULL,
+        list(hessian = if(reweighting) gradient else if(logi) Slog else A1,
+             fisher = Sigma),
+        if(saveterms) list(lamdel=lamdel, momdel=momdel) else NULL)
+    ## return internal data if no further calculation needed
+    if(!spill.vc && !logi)
+      return(internals)
+  }
+    
+  ## ........... calculate variance/covariance matrix for MPL .........
+
+  if(!reweighting) {
+    ## Normalise
+    A1 <- A1/areaW
+    Sigma <- Sigma/areaW
+    ## Enforce exact symmetry 
+    A1 <- (A1 + t(A1))/2
+    Sigma <- (Sigma + t(Sigma))/2
+    ## calculate inverse negative Hessian
+    U <- checksolve(A1, matrix.action, , "variance")
+  } else {
+    ## Normalise
+    gradient <- gradient/areaW
+    Sigma <- Sigma/areaW
+    ## Enforce exact symmetry
+    gradient <- (gradient + t(gradient))/2
+    Sigma <- (Sigma + t(Sigma))/2
+    ## calculate inverse negative Hessian
+    U <- checksolve(gradient, matrix.action, , "variance")
+  }
+  
+  ## compute variance-covariance
+  vc.mpl <- if(is.null(U)) matrix(NA, p, p) else 
+              U %*% Sigma %*% U / areaW
+  dimnames(vc.mpl) <- dnames
+
+  ## return variance-covariance matrix, if model was fitted by MPL
+  if(!logi) {
+    if(spill.vc) return(list(varcov=vc.mpl, internals=internals))
+    return(vc.mpl)
+  }
+  
+  ###### Everything below is only computed for logistic fits #######
+
+  ## Matrix Sigma1log (A1log+A2log+A3log):
+  Sigma1log <- A1log+A2log+A3log
+  ## Resolving the dummy process type
+  how <- model$internal$logistic$how
+  if(how %in% c("given", "grid", "transgrid")){
+    whinge <- paste("vcov is not implemented for dummy type", sQuote(how))
+    if(logi.action=="fatal")
+      stop(whinge)
+    how <- if(how=="given") "poisson" else "stratrand"
+    if(logi.action=="warn")
+      warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE)
+  }
+  ## Matrix Sigma2log (depends on dummy process type)
+  switch(how,
+         poisson={
+           Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3)
+         },
+         binomial={
+           Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3)
+           A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2)
+           Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho))
+         },
+         stratrand={
+           ## Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.):
+           ## Changed by ER 2013/06/14 to use the new quadscheme.logi
+           ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args)
+           ## Q2 <- quad(data=X, dummy=D2)
+           ## Q2$dummy$Dinfo <- D2$Dinfo
+           Q2 <- quadscheme.logi(data=X, dummytype = "stratrand",
+                                 nd = model$internal$logistic$nd)
+           D2 <- Q2$dummy
+           Q2$dummy$Dinfo <- D2$Dinfo
+           Z2 <- is.data(Q2)
+           arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method,
+                           correction = model$correction, rbord = model$rbord, covariates = model$covariates)
+           arglist <- append(arglist, model$internal$logistic$extraargs)
+           model2 <- do.call(ppm, args = arglist)
+
+           ## New cif
+           lamall2 <- fitted(model2, check = FALSE,
+                             new.coef = new.coef, dropcoef=dropcoef)
+           ## New model matrix
+           mall2 <- model.matrix(model2)
+           okall2 <- getglmsubset(model2)
+
+           ## index vectors of stratrand cell indices of dummy points 
+           inD <- model$internal$logistic$inD
+           inD2 <- model2$internal$logistic$inD
+
+           ## Dummy points inside eroded window (for border correction)
+           if(is.finite(R) && (correction == "border")){
+             ii <- (bdist.points(D) >= R)
+             ii2 <- (bdist.points(D2) >= R)
+           } else{
+             ii <- rep.int(TRUE, npoints(D))
+             ii2 <- rep.int(TRUE, npoints(D2))
+           }
+           ## OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa)
+           okdum <- okall[!Z]
+           okdum2 <- okall2[!Z2]
+           ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2])
+           ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii])
+           ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2])
+           ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii])
+           ## ok1 <- ii & is.element(inD, inD2[ii2])
+           ## ok2 <- ii2 & is.element(inD2, inD[ii])
+
+           ## cif and suff. stat. for valid points in dummy patterns 1 and 2
+           lamdum <- lamall[!Z][ok1]
+           lamdum2 <- lamall2[!Z2][ok2]
+           mdum <- mall[!Z,,drop=FALSE][ok1,]
+           mdum2 <- mall2[!Z2,,drop=FALSE][ok2,]
+
+           ## finally calculation of Sigma2
+           wlam <- mdum * rho*lamdum/(lamdum+rho)
+           wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho)
+           Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho)
+         },
+         stop("sorry - unrecognized dummy process in logistic fit")
+         )
+  ## Attaching to Sigma2log calculated above
+  dimnames(Sigma2log) <- dnames
+
+  
+  if(spill) {
+    ## return internal data only (with matrices unnormalised)
+    internals <- c(internals, 
+                   list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl))
+    if(!spill.vc)
+      return(internals)
+  }
+
+  ## .. Calculate variance-covariance matrix for logistic fit ...........
+  ## normalise
+  Slog <- Slog/areaW
+  Sigma1log <- Sigma1log/areaW
+  Sigma2log <- Sigma2log/areaW
+  ## evaluate
+  Ulog <- checksolve(Slog, matrix.action, , "variance")
+  vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else 
+             Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW
+  dimnames(vc.logi) <- dnames
+  ##
+  if(spill.vc) return(list(varcov=vc.logi, internals=internals))
+  return(vc.logi)
+}
+
+## vcalcGibbs from Ege Rubak and J-F Coeurjolly
+## 2013/06/14, modified by Ege to handle logistic case as well
+
+vcalcGibbsSpecial <- function(fit, ...,
+                              spill=FALSE,
+                              spill.vc=FALSE,
+                              special.alg = TRUE,
+                              matrix.action=c("warn", "fatal", "silent"),
+                              logi.action=c("warn", "fatal", "silent")) {
+  matrix.action <- match.arg(matrix.action)
+  logi.action <- match.arg(logi.action)
+  spill <- spill || spill.vc
+  
+  ## Interaction name:
+  iname <- fit$interaction$name
+  
+  ## Does the model have marks which are in the trend?
+  marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend))
+
+  ## The full data and window:
+  Xplus <- data.ppm(fit)
+  Wplus <- as.owin(Xplus)
+
+  ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.):
+  theta <- coef(fit)
+  p <- length(theta)
+
+  ## Number of points:
+  n <- npoints(Xplus)
+
+  ## Using the faster algorithms for special cases
+  if(special.alg && fit$method != "logi"){
+    param <- coef(fit)
+    switch(iname,
+      "Strauss process"={
+        ## Only implemented for non-marked case:
+        if(!marx)
+	  return(vcovPairPiece(Xplus,
+                               reach(fit$interaction),
+                               exp(coef(fit)[2]),
+                               matrix.action,
+                               spill=spill,
+                               spill.vc=spill.vc))
+      },
+           
+      "Piecewise constant pairwise interaction process"={
+        ## Only implemented for non-marked case:
+        if(!marx)
+          return(vcovPairPiece(Xplus,
+                               fit$interaction$par$r,
+                               exp(coef(fit)[-1]),
+                               matrix.action,
+                               spill=spill,
+                               spill.vc=spill.vc))
+      },
+
+      "Multitype Strauss process"={
+	matR <- fit$interaction$par$radii
+        R <- c(matR[1,1], matR[1,2], matR[2,2])
+        ## Only implemented for 2 types with equal interaction range:
+        if(ncol(matR)==2 && marx){
+          n <- length(theta)
+          res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]),
+                                  matrix.action,spill=spill,spill.vc=spill.vc)
+          if(!spill) {
+            res <- contrastmatrix(res, 2)
+            dimnames(res) <- list(names(theta), names(theta))
+          }
+          return(res)
+        }
+      }
+    )
+  }
+  
+  ## Matrix specifying equal points in the two patterns in the call to eval below:
+  E <- matrix(rep.int(1:n, 2), ncol = 2)
+
+  ## Eval. the interaction potential difference at all points (internal spatstat function):
+#  V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction)
+  oldopt <- NULL
+  if(fit$interaction$family$name=="pairwise"){
+      oldopt <- spatstat.options(fasteval = "off")
+  }
+  V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction)
+  spatstat.options(oldopt)
+
+  ## Calculate parameter dimensions and correct the contrast type parameters:
+  p2 <- ncol(V1)
+  p1 <- p-p2
+  if(p1>1)
+    theta[2:p1] <- theta[2:p1] + theta[1]
+  ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction)
+  POT <- attr(V1, "POT")
+  attr(V1, "POT") <- NULL
+  ## Adding the constant potential as first column (one column per type for multitype):
+  if(!marx){
+    V1 <- cbind(1, V1)
+    colnames(V1) <- names(theta)
+  }
+  else{
+    lev <- levels(marks(Xplus))
+    ## Indicator matrix for mark type attached to V1:
+    tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE)
+    colnames(tmp) <- lev
+    V1 <- cbind(tmp,V1)
+  }
+
+  ## Matrices for differences of potentials:
+  E <- matrix(rep.int(1:(n-1), 2), ncol = 2)
+  dV <- V2 <- array(0,dim=c(n,n,p))
+
+  for(k in 1:p1){
+    V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE)
+  }
+  for(k in (p1+1):p){
+    diag(V2[,,k]) <- V1[,k]
+  }
+  for(j in 1:n){
+    ## Fast evaluation for pairwise interaction processes:
+    if(fit$interaction$family$name=="pairwise" && !is.null(POT)){
+      V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,]
+    }
+    else{
+      V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction)
+      ## Q <- quadscheme(Xplus[-j],emptyppp)
+      ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction)
+    }
+    for(k in 1:p){
+      dV[,j,k] <- V1[,k] - V2[,j,k]
+    }
+  }
+  ## Ratio of first and second order Papangelou - 1:
+  frac <- 0*dV[,,1]
+  for(k in (p1+1):p){
+    frac <- frac + dV[,,k]*theta[k]
+  }
+  frac <- exp(-frac)-1
+
+  ## In the rest we restrict attention to points in the interior:
+  
+  ## The interaction range:
+  R <- reach(fit$interaction)
+
+  ## The reduced window, area and point pattern:
+  W<-erosion.owin(Wplus,R)
+  areaW <- area(W)
+
+  ## Interior points determined by bdist.points:
+  IntPoints <- bdist.points(Xplus)>=R  
+  X <- Xplus[IntPoints]
+  
+  ## Making a logical matrix, I, indicating R-close pairs which are in the interior:
+  D <- pairdist(Xplus)
+  diag(D) <- Inf
+  I <- (D<=R) & outer(IntPoints,IntPoints, "&")
+  
+  ## Matrix A1:
+  A1 <- t(V1[IntPoints,])%*%V1[IntPoints,]
+
+  ## Matrix A2:
+  A2 <- matrix(0,p,p)
+  for(k in 1:p){
+    for(l in k:p){
+      A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l]))
+    }
+  }
+  
+  ## Matrix A3:
+  A3 <- matrix(0,p,p)
+  for(k in 1:p){
+    for(l in k:p){
+      A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l]))
+    }
+  }
+
+  ## Matrix Sigma (A1+A2+A3):
+  Sigma<-A1+A2+A3
+
+  if(spill) {
+    # save internal data (with matrices unnormalised)
+    dimnames(A1) <- dimnames(A2) <-
+      dimnames(A3) <- list(names(theta), names(theta))
+    internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW)
+    # return internal data, if model fitted by MPL
+    if(!spill.vc && fit$method != "logi")
+      return(internals)
+  }
+
+  # ......... Calculate variance-covariance matrix for MPL ........
+  
+  # normalise
+  A1 <- A1/areaW
+  Sigma <- Sigma/areaW
+  # evaluate
+  U <- checksolve(A1, matrix.action, , "variance")
+  vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW
+  ## Convert to treatment contrasts
+  if(marx)
+    vc.mpl <- contrastmatrix(vc.mpl, p1)
+  dimnames(vc.mpl) <- list(names(theta), names(theta))
+  
+  # Return result for standard ppm method:
+  if(fit$method!="logi") {
+    if(spill.vc) return(list(varcov=vc.mpl, internals=internals))
+    return(vc.mpl)
+  }
+  
+  ########################################################################
+  ###### The remainder is only executed when the method is logistic ######
+  ########################################################################
+
+  ### Most of this is copy/pasted from vcalcGibbsGeneral
+  correction <- fit$correction
+  Q <- quad.ppm(fit)
+  D <- dummy.ppm(fit)
+  rho <- fit$internal$logistic$rho
+  ## If dummy intensity rho is unknown we estimate it
+  if(is.null(rho))
+     rho <- npoints(D)/(area(D)*markspace.integral(D))
+  X <- data.ppm(fit)
+  Z <- is.data(Q)
+
+  # determine which data points entered into the sum in the pseudolikelihood
+  # (border correction, nonzero cif)
+  # data and dummy:
+  okall <- getglmsubset(fit)
+  ## # data only:
+  ## ok <- okall[Z]
+
+  # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i])
+  # data and dummy:
+  lamall <- fitted(fit, check = FALSE)
+  ## # data only:
+  ## lam <- lamall[Z]
+
+  # sufficient statistic h(X[i] | X) = h(X[i] | X[-i])
+  # data and dummy:
+  mall <- model.matrix(fit)
+  mokall <- mall[okall, , drop=FALSE]
+  ## # data only:
+  ## m <- mall[Z, , drop=FALSE]
+  ## mok <- m[ok, , drop=FALSE]
+
+  # Sensitivity matrix S and A1 matrix for logistic case
+  Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2)
+  A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3)
+
+  ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged)
+  lambda1 <- exp(.rowSums(matrix(theta,n,p,byrow=TRUE)*V1, n, p))
+  W1 <- V1*rho/(lambda1+rho)
+  lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum))
+  W2 <- V2
+  dW <- dV
+  for(k in 1:p){
+    W2[,,k] <- V2[,,k] * rho/(lambda2+rho)
+    for(j in 1:n){
+      dW[,j,k] <- W1[,k] - W2[,j,k]
+    }
+  }
+  ## Matrices A2log and A3log for the first component Sigma1log of the variance:
+  A2log <- A3log <- matrix(0,p,p)
+  for(k in 1:p){
+    for(l in k:p){
+      A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l]))
+      A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l]))
+    }
+  }
+  A2log <- A2log
+  A3log <- A3log
+  
+  ## First variance component Sigma1log (A1log+A2log+A3log):
+  Sigma1log <- A1log+A2log+A3log
+
+  ## Resolving the dummy process type
+  how <- fit$internal$logistic$how
+  if(how %in% c("given", "grid", "transgrid")){
+    whinge <- paste("vcov is not implemented for dummy type", sQuote(how))
+    if(logi.action=="fatal")
+      stop(whinge)
+    how <- if(how=="given") "poisson" else "stratrand"
+    if(logi.action=="warn")
+      warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE)
+  }
+
+  ## Matrix Sigma2log (depends on dummy process type)
+  switch(how,
+         poisson={
+           Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3)
+         },
+         binomial={
+           Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3)
+           A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2)
+           Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho))
+         },
+         stratrand={
+           ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.):
+           ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args)
+           ## Q2 <- quad(data=X, dummy=D2)
+           ## Q2$dummy$Dinfo <- D2$Dinfo
+           Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd)
+           D2 <- Q2$dummy
+           Z2 <- is.data(Q2)
+           arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method,
+                           correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates)
+           arglist <- append(arglist, fit$internal$logistic$extraargs)
+           fit2 <- do.call(ppm, args = arglist)
+
+           ## New cif
+           lamall2 <- fitted(fit2, check=FALSE)
+           ## New model matrix
+           mall2 <- model.matrix(fit2)
+           okall2 <- getglmsubset(fit2)
+
+           # index vectors of stratrand cell indices of dummy points 
+           inD <- fit$internal$logistic$inD
+           inD2 <- fit2$internal$logistic$inD
+
+           # Dummy points inside eroded window (for border correction)
+           if(is.finite(R) && (correction == "border")){
+             ii <- inside.owin(D, w = W)
+             ii2 <- inside.owin(D2, w = W)
+           } else{
+             ii <- rep.int(TRUE, npoints(D))
+             ii2 <- rep.int(TRUE, npoints(D2))
+           }
+           # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa)
+           okdum <- okall[!Z]
+           okdum2 <- okall2[!Z2]
+           ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2])
+           ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii])
+           ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2])
+           ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii])
+           ## ok1 <- ii & is.element(inD, inD2[ii2])
+           ## ok2 <- ii2 & is.element(inD2, inD[ii])
+
+           # cif and suff. stat. for valid points in dummy patterns 1 and 2
+           lamdum <- lamall[!Z][ok1]
+           lamdum2 <- lamall2[!Z2][ok2]
+           mdum <- mall[!Z,][ok1,]
+           mdum2 <- mall2[!Z2,][ok2,]
+
+           # finally calculation of Sigma2
+           wlam <- mdum * rho*lamdum/(lamdum+rho)
+           wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho)
+           Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho)
+         },
+         stop("sorry - unrecognized dummy process in logistic fit")
+         )
+
+
+  if(spill) {
+    ## Attach dimnames to all matrices
+    dimnames(Sigma2log) <- dimnames(Slog) <-
+      dimnames(Sigma1log) <- dimnames(A1log) <-
+        dimnames(A2log) <- dimnames(A3log) <-
+          list(names(theta),names(theta))
+    # return internal data (with matrices unnormalised)
+    internals <- c(internals,
+                   list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog,
+                        Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl))
+    if(!spill.vc)
+      return(internals)
+  }
+
+  # ....... Compute variance-covariance for logistic fit .............
+  # Normalise
+  Slog <- Slog/areaW
+  Sigma1log <- Sigma1log/areaW
+  Sigma2log <- Sigma2log/areaW
+  ## Finally the result is calculated:
+  Ulog <- checksolve(Slog, matrix.action, , "variance")
+  vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else 
+             Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW
+  #
+  dimnames(vc.logi) <- list(names(theta), names(theta))
+  if(spill.vc) return(list(varcov=vc.logi, internals=internals))
+  return(vc.logi)
+}
+
+vcovPairPiece <- function(Xplus, R, Gam, matrix.action,
+                          spill=FALSE, spill.vc=FALSE){
+  ## R is  the  vector of breaks (R[length(R)]= range of the pp.
+  ## Gam is the vector of weights
+  Rmax <- R[length(R)]
+  
+  ## Xplus : point process observed in W+R
+  ## Extracting the window and calculating area:
+  Wplus<-as.owin(Xplus)
+  W<-erosion.owin(Wplus,Rmax)
+  areaW <- area(W)
+
+  ## Interior points determined by bdist.points:
+  IntPoints <- bdist.points(Xplus)>=Rmax
+  X <- Xplus[IntPoints]
+
+  nX <- npoints(X)
+  nXplus <- npoints(Xplus)
+  ## Matrix D with pairwise distances between points and infinite distance
+  ## between a point and itself:
+  
+  Dplus<-pairdist(Xplus)
+  D <- pairdist(X)
+  diag(D) <- diag(Dplus) <- Inf
+  ## logical matrix, I, indicating R-close pairs:
+  p<-length(R)
+  Tplus<-T<-matrix(0,X$n,p)
+  I<-Iplus<-list()
+  for (i in 1:p){
+     if (i==1){
+	Iplus[[1]]<- Dplus <=R[1]
+	I[[1]] <- D<=R[1]
+     } else {
+	Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i]))
+	I[[i]] <- ((D>R[i-1]) & (D <=R[i]))
+     }
+     ## Vector T with the number of $R$-close neighbours to each point:
+     Tplus[,i]<-  .colSums(Iplus[[i]], nXplus, nXplus)[IntPoints]
+     T[,i] <-  .colSums(I[[i]], nX, nX)
+  }
+  ## Matrices A1, A2 and A3 are initialized to zero:
+  A1 <- A2 <- A3 <- matrix(0,p+1,p+1)
+  ## A1 and A3:
+  A1[1,1] <- npoints(X)
+  
+  for (j in (2:(p+1))){
+    A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1])
+    A3[j,j]<-sum(T[,j-1])
+    for (k in (2:(p+1))){
+      A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1])
+    }
+  }
+  ## A2:
+  for (j in (2:(p+1))){
+    A2[1,1]<-A2[1,1]+(Gam[j-1]^(-1)-1)*sum(T[,j-1])
+    for (l in (2:(p+1))){
+      if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1]
+	A2[1,j]<-A2[1,j]+(Gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) )
+    }
+    A2[j,1]<-A2[1,j]
+    for (k in (2:(p+1))){
+      for (l in (2:(p+1))){
+	if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1]
+	if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1]
+
+	A2[j,k]<-A2[j,k]+ (Gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk))
+      }
+    }
+
+  }
+
+  Sigma<-A1+A2+A3
+
+  nam <- c("(Intercept)", names(Gam))
+  dnam <- list(nam, nam)
+  
+  if(spill) {
+    # return internal data (with matrices unnormalised)
+    dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam
+    internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma)
+    if(!spill.vc) return(internals)
+  }
+           
+  ## Calculate variance-covariance
+  # Normalise:
+  A1    <- A1/areaW
+  Sigma <- Sigma/areaW
+  U <- checksolve(A1, matrix.action, , "variance")
+  mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW
+  dimnames(mat) <- dnam
+
+  if(spill.vc) return(list(varcov=mat, internals=internals))
+  return(mat)
+}
+
+vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action,
+                             spill=FALSE, spill.vc=FALSE){
+  ## Xplus : marked Strauss point process 
+  ## with two types 
+  ## observed in W+R (R=max(R11,R12,R22))
+
+  ## vecg =  estimated parameters of interaction parameters
+  ##	    ordered as the output of ppm, i.e. vecg=(g11,g12,g22)	
+  ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22)
+
+  R <- max(vecR)
+  R11<-vecR[1];R12<-vecR[2];R22<-vecR[3]
+  ## Extracting the window and calculating area:
+  Wplus<-as.owin(Xplus)
+  W<-erosion.owin(Wplus,R)
+  areaW <- area(W)
+  X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]]
+  X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]]
+
+  ## Interior points determined by bdist.points:
+  IntPoints1 <- bdist.points(X1plus)>=R
+  IntPoints2 <- bdist.points(X2plus)>=R
+  X1 <- X1plus[IntPoints1]
+  X2 <- X2plus[IntPoints2]
+
+  nX1 <- npoints(X1)
+  nX2 <- npoints(X2)
+  nX1plus <- npoints(X1plus)
+  nX2plus <- npoints(X2plus)
+  
+  ## Matrix D with pairwise distances between points and infinite distance
+  ## between a point and itself:
+
+  D1plus<-pairdist(X1plus)
+  D1 <- pairdist(X1)
+  diag(D1) <- diag(D1plus) <- Inf
+  
+  D2plus<-pairdist(X2plus)
+  D2 <- pairdist(X2)
+  diag(D2) <- diag(D2plus) <- Inf
+  
+  D12plus<-crossdist(X1,X2plus)  
+  T12plus<-  .rowSums(D12plus<=R12, nX1, nX2plus)
+  D21plus<-crossdist(X2,X1plus) 
+  T21plus<-  .rowSums(D21plus<=R12, nX2, nX1plus)
+  
+  I12<-crossdist(X1,X2)<=R12
+  I21<-crossdist(X2,X1)<=R12
+  T12<-   .rowSums(I12, nX1, nX2)
+  T21<-   .rowSums(I21, nX2, nX1)
+  ## logical matrix, I, indicating R-close pairs:
+  I1plus<- D1plus <=R11
+  I1 <- D1<=R11
+  I2plus<- D2plus <=R22
+  I2 <- D2<=R22
+  ## Vector T with the number of $R$-close neighbours to each point:
+  T1plus<-  .colSums(I1plus, nX1plus, nX1plus)[IntPoints1]
+  T1 <-     .colSums(I1,     nX1,     nX1)
+  T2plus<-  .colSums(I2plus, nX2plus, nX2plus)[IntPoints2]
+  T2 <-     .colSums(I2,     nX2,     nX2)
+
+  ## Matrices A1, A2 and A3 are initialized to zero:
+  A1 <- A2 <- A3 <- matrix(0,5,5)
+  ## A1 is filled:
+  A1[1,1]<-npoints(X1)
+  A1[1,3]<-A1[3,1]<-sum(T1plus)
+  A1[1,4]<-A1[4,1]<-sum(T12plus)
+  A1[2,2]<-npoints(X2)
+  A1[2,5]<-A1[5,2]<-sum(T2plus)
+  A1[2,4]<-A1[4,2]<-sum(T21plus)
+  A1[3,3]<-sum(T1plus*T1plus)
+  A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus)
+  A1[5,5]<-sum(T2plus*T2plus)
+  A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus)
+  A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus)
+
+  ## A3 is filled:
+  A3[3,3]<-sum(T1)
+  A3[5,5]<-sum(T2)
+  A3[4,4]<-sum(T12)+sum(T21)
+   
+
+  ## A2 is filled:
+  gamInv<-vecg^(-1)-1
+  gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3]
+  A2[1,1]<-sum(T1)*gi1
+  A2[1,2]<-A2[2,1]<-sum(T12)*gi12
+  A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1
+  A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12
+  A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1))
+  A2[2,2]<-sum(T2)*gi2
+  A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12
+  A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2
+  A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1))
+
+  A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1))
+  
+  A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus))
+  A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1))
+  
+  A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1))
+  A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1))
+  
+  A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1))
+  
+  Sigma<-A1+A2+A3
+  nam <- c(levels(marks(Xplus)), names(vecg))
+  dnam <- list(nam, nam)
+  
+  if(spill) {
+    # return internal data (with matrices unnormalised)
+    dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam
+    internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma)
+    if(!spill.vc) return(internals)
+  }
+           
+  ## Calculate variance-covariance
+  # Normalise:
+  A1    <- A1/areaW
+  Sigma <- Sigma/areaW
+  U <- checksolve(A1, matrix.action, , "variance")
+  mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW
+  dimnames(mat) <- dnam
+
+  if(spill.vc) return(list(varcov=mat, internals=internals))
+  return(mat)
+}
+
+# Convert the first p rows & columns of variance matrix x
+# to variances of treatment contrasts
+contrastmatrix <- function(x,p){
+  mat <- x
+  ## Correct column and row 1:
+  for(i in 2:p){
+    mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1]
+  }
+  ## Correct columns and rows 2,...,p:
+  for(i in 2:p){
+    for(j in 2:p){
+      mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j]
+    }
+    for(j in (p+1):ncol(x)){
+      mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j]
+    }
+  }
+  mat
+}
+
+vcov.ppm
+}
+)
+
+suffloc <- function(object) {
+  verifyclass(object, "ppm")
+  if(!is.poisson(object))
+    stop("Internals not available for Gibbs models")
+  return(vcov(object, what="internals")$suff)
+}
diff --git a/R/versions.R b/R/versions.R
new file mode 100755
index 0000000..bcfda70
--- /dev/null
+++ b/R/versions.R
@@ -0,0 +1,57 @@
+#
+# versions.R
+#
+# version numbers
+#
+# $Revision: 1.11 $  $Date: 2016/02/09 04:41:31 $
+#
+#####################
+
+
+# Extract version string from ppm object
+
+versionstring.ppm <- function(object) {
+  verifyclass(object, "ppm")
+  v <- object$version
+  if(is.null(v) || !is.list(v))
+    v <- list(major=1, minor=3, release=4)
+  vs <- paste(v$major, ".", v$minor, "-", v$release, sep="")
+  return(vs)
+}
+
+# Extract version string from interact object
+
+versionstring.interact <- function(object) {
+  verifyclass(object, "interact")
+  v <- object$version
+  return(v)  # NULL before 1.11-0
+}
+
+# Get version number of current spatstat installation
+# This is now saved in the spatstat cache environment
+# rather than read from file every time
+
+versionstring.spatstat <- function() {
+  if(!existsSpatstatVariable("SpatstatVersion"))
+    store.versionstring.spatstat()    
+  getSpatstatVariable("SpatstatVersion")
+}
+
+store.versionstring.spatstat <- function() {
+  vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat"),
+                 fields="Version")
+  vs <- as.character(vs)
+  putSpatstatVariable("SpatstatVersion", vs)
+}
+
+
+# Extract major and minor versions only.
+
+majorminorversion <- function(v) {
+  vp <- package_version(v)
+  return(package_version(paste(vp$major, vp$minor, sep=".")))
+}
+
+# legacy function
+
+RandomFieldsSafe <- function() { TRUE }
diff --git a/R/weightedStats.R b/R/weightedStats.R
new file mode 100644
index 0000000..d01ae3a
--- /dev/null
+++ b/R/weightedStats.R
@@ -0,0 +1,103 @@
+#'
+#'     weightedStats.R
+#'
+#'   weighted versions of hist, var, median, quantile
+#'
+#'  $Revision: 1.3 $  $Date: 2017/06/05 10:31:58 $
+#'
+
+
+#'
+#'    whist      weighted histogram
+#'
+
+whist <- function(x, breaks, weights=NULL) {
+    N <- length(breaks)
+    if(length(x) == 0) 
+      h <- numeric(N+1)
+    else {
+      # classify data into histogram cells (breaks need not span range of data)
+      cell <- findInterval(x, breaks, rightmost.closed=TRUE)
+      # values of 'cell' range from 0 to N.
+      nb <- N + 1L
+      if(is.null(weights)) {
+        ## histogram
+        h <- tabulate(cell+1L, nbins=nb)
+      } else {
+        ##  weighted histogram
+        if(!spatstat.options("Cwhist")) {
+          cell <- factor(cell, levels=0:N)
+          h <- unlist(lapply(split(weights, cell), sum, na.rm=TRUE))
+        } else {
+          h <- .Call("Cwhist",
+                     as.integer(cell), as.double(weights), as.integer(nb),
+                     PACKAGE = "spatstat")
+        }
+      }
+    }
+    h <- as.numeric(h)
+    y <- h[2:N]
+    attr(y, "low") <- h[1]
+    attr(y, "high") <- h[N+1]
+    return(y)
+}
+
+#' wrapper for computing weighted variance of a vector
+#' Note: this includes a factor 1 - sum(v^2) in the denominator
+#' where v = w/sum(w). See help(cov.wt)
+
+weighted.var <- function(x, w, na.rm=TRUE) {
+  bad <- is.na(w) | is.na(x)
+  if(any(bad)) {
+    if(!na.rm) return(NA_real_)
+    ok <- !bad
+    x <- x[ok]
+    w <- w[ok]
+  }
+  cov.wt(matrix(x, ncol=1),w)$cov[]
+}
+
+#' weighted median
+
+weighted.median <- function(x, w, na.rm=TRUE) {
+  unname(weighted.quantile(x, probs=0.5, w=w, na.rm=na.rm))
+}
+
+#' weighted quantile
+
+weighted.quantile <- function(x, w, probs=seq(0,1,0.25), na.rm=TRUE) {
+  x <- as.numeric(as.vector(x))
+  w <- as.numeric(as.vector(w))
+  if(anyNA(x) || anyNA(w)) {
+    ok <- !(is.na(x) | is.na(w))
+    x <- x[ok]
+    w <- w[ok]
+  }
+  stopifnot(all(w >= 0))
+  if(all(w == 0)) stop("All weights are zero", call.=FALSE)
+  #'
+  oo <- order(x)
+  x <- x[oo]
+  w <- w[oo]
+  Fx <- cumsum(w)/sum(w)
+  #'
+  result <- numeric(length(probs))
+  for(i in seq_along(result)) {
+    p <- probs[i]
+    lefties <- which(Fx <= p)
+    if(length(lefties) == 0) {
+      result[i] <- x[1]
+    } else {
+      left <- max(lefties)
+      result[i] <- x[left]
+      if(Fx[left] < p && left < length(x)) {
+        right <- left+1
+        y <- x[left] + (x[right]-x[left]) * (p-Fx[left])/(Fx[right]-Fx[left])
+        if(is.finite(y)) result[i] <- y
+      }
+    }
+  }
+  names(result) <- paste0(format(100 * probs, trim = TRUE), "%")
+  return(result)
+}
+
diff --git a/R/weights.R b/R/weights.R
new file mode 100755
index 0000000..9e1fe89
--- /dev/null
+++ b/R/weights.R
@@ -0,0 +1,301 @@
+#
+#	weights.S
+#
+#	Utilities for computing quadrature weights
+#
+#	$Revision: 4.39 $	$Date: 2017/01/18 06:28:17 $
+#
+#
+# Main functions:
+		
+#	gridweights()	    Divide the window frame into a regular nx * ny
+#			    grid of rectangular tiles. Given an arbitrary
+#			    pattern of (data + dummy) points derive the
+#			    'counting weights'.
+#
+#	dirichletWeights()  Compute the areas of the tiles of the
+#			    Dirichlet tessellation generated by the 
+#			    given pattern of (data+dummy) points,
+#			    restricted to the window.
+#	
+# Auxiliary functions:	
+#			
+#       countingweights()   compute the counting weights
+#                           for a GENERIC tiling scheme and an arbitrary
+#			    pattern of (data + dummy) points,
+#			    given the tile areas and the information
+#			    that point number k belongs to tile number id[k]. 
+#
+#
+#	gridindex()	    Divide the window frame into a regular nx * ny
+#			    grid of rectangular tiles. 
+#			    Compute tile membership for arbitrary x,y.
+#				    
+#       grid1index()        1-dimensional analogue of gridindex()
+#
+#
+#-------------------------------------------------------------------
+	
+countingweights <- function(id, areas, check=TRUE) {
+	#
+	# id:        cell indices of n points
+	#                     (length n, values in 1:k)
+	#
+	# areas:     areas of k cells 
+	#                     (length k)
+	#
+  id <- as.integer(id)
+  fid <- factor(id, levels=seq_along(areas))
+  counts <- table(fid)
+  w <- areas[id] / counts[id]     # ensures denominator > 0
+  w <- as.vector(w)
+#	
+# that's it; but check for funny business
+#
+  if(check) {
+    zerocount <- (counts == 0)
+    zeroarea <- (areas == 0)
+    if(any(!zeroarea & zerocount)) {
+      lostfrac <- 1 - sum(w)/sum(areas)
+      lostpc <- round(100 * lostfrac, 1)
+      if(lostpc >= 1) 
+        warning(paste("some tiles with positive area",
+                      "do not contain any quadrature points:",
+                      "relative error =",
+                      paste0(lostpc, "%")))
+    }
+    if(any(!zerocount & zeroarea)) {
+	warning("Some tiles with zero area contain quadrature points")
+	warning("Some weights are zero")
+	attr(w, "zeroes") <- zeroarea[id]
+    }
+  }
+#
+  names(w) <- NULL
+  return(w)
+}
+
+gridindex <- function(x, y, xrange, yrange, nx, ny) {
+	#
+	# The box with dimensions xrange, yrange is divided
+	# into nx * ny cells.
+	#
+	# For each point (x[i], y[i]) compute the index (ix, iy)
+	# of the cell containing the point.
+	# 
+	ix <- grid1index(x, xrange, nx)
+	iy <- grid1index(y, yrange, ny)
+	#
+	return(list(ix=ix, iy=iy, index=as.integer((iy-1) * nx + ix)))
+}
+
+grid1index <- function(x, xrange, nx) {
+	i <- ceiling( nx * (x - xrange[1])/diff(xrange))
+	i <- pmax.int(1, i)
+	i <- pmin.int(i, nx)
+	i
+}
+
+gridweights <- function(X, ntile=NULL, ..., window=NULL, verbose=FALSE,
+                        npix=NULL, areas=NULL) {
+	#
+	# Compute counting weights based on a regular tessellation of the
+	# window frame into ntile[1] * ntile[2] rectangular tiles.
+	#
+	# Arguments X and (optionally) 'window' are interpreted as a
+	# point pattern.
+	#
+	# The window frame is divided into a regular ntile[1] * ntile[2] grid
+	# of rectangular tiles. The counting weights based on this tessellation
+	# are computed for the points (x, y) of the pattern.
+	#
+        # 'npix' determines the dimensions of the pixel raster used to
+        # approximate tile areas.
+	
+	X <- as.ppp(X, window)
+	x <- X$x
+	y <- X$y
+        win <- X$window
+
+        # determine number of tiles
+        if(is.null(ntile))
+          ntile <- default.ntile(X)
+        if(length(ntile) == 1)
+          ntile <- rep.int(ntile, 2)
+        nx <- ntile[1]
+        ny <- ntile[2]
+
+        if(verbose) 
+          cat(paste("grid weights for a", nx, "x", ny, "grid of tiles\n"))
+
+        ## determine pixel resolution in case it is required
+        if(!is.null(npix)) {
+          npix <- ensure2vector(npix)
+        } else {
+          npix <- pmax(rev(spatstat.options("npixel")),
+                       c(nx, ny))
+          if(is.mask(win))
+            npix <- pmax(npix, rev(dim(win)))
+        }
+        
+        if(is.null(areas)) {
+  	  # compute tile areas
+          switch(win$type,
+                 rectangle = {
+                   nxy <- nx * ny
+                   tilearea <- area(win)/nxy
+                   areas <- rep.int(tilearea, nxy)
+                   zeroareas <- rep(FALSE, nxy)
+                 },
+                 polygonal = {
+                   areamat <- polytileareaEngine(win,
+                                                 win$xrange, win$yrange,
+                                                 nx, ny)
+                   ## convert from 'im' to 'gridindex' ordering
+                   areas <- as.vector(t(areamat))
+                   zeroareas <- (areas == 0)
+                   if(verbose)
+                     splat("Split polygonal window of area", area(win),
+                           "into", nx, "x", ny, "grid of tiles",
+                           "of total area", sum(areas))
+                 },
+                 mask = {
+                   win <- as.mask(win, dimyx=rev(npix))
+                   if(verbose) 
+                     splat("Converting mask dimensions to",
+                           npix[1], "x", npix[2], "pixels")
+                   ## extract pixel coordinates inside window
+                   rxy <- rasterxy.mask(win, drop=TRUE)
+                   xx <- rxy$x
+                   yy <- rxy$y
+                   
+                   ## classify all pixels into tiles
+                   pixelid <- gridindex(xx, yy, 
+                                        win$xrange, win$yrange, nx, ny)$index
+                   pixelid <- factor(pixelid, levels=seq_len(nx * ny))
+                                
+                   ## compute digital areas of tiles
+                   tilepixels <- as.vector(table(pixelid))
+                   pixelarea <- win$xstep * win$ystep
+                   areas <- tilepixels * pixelarea
+                   zeroareas <- (tilepixels == 0)
+                 }
+               )
+        } else zeroareas <- (areas == 0)
+        
+        id <- gridindex(x, y, win$xrange, win$yrange, nx, ny)$index
+
+        if(win$type != "rectangle" && any(uhoh <- zeroareas[id])) {
+          # this can happen: the tile has digital area zero
+          # but contains a data/dummy point
+          slivers <- unique(id[uhoh])
+          switch(win$type,
+                 mask = {
+                   offence <- "digital area zero"
+                   epsa <- pixelarea/2
+                 },
+                 polygonal = {
+                   offence <- "very small area"
+                   epsa <- min(areas[!zeroareas])/10
+                 })
+          areas[slivers] <- epsa
+          nsliver <- length(slivers)
+          extraarea <- nsliver * epsa
+          extrafrac <- extraarea/area(win)
+          if(verbose || extrafrac > 0.01) {
+            splat(nsliver, ngettext(nsliver, "tile", "tiles"),
+                  "of", offence,
+                  ngettext(nsliver, "was", "were"),
+                  "given nominal area", signif(epsa, 3),
+                  "increasing the total area by",
+                  signif(extraarea, 5), "square units or",
+                  paste0(round(100 * extrafrac, 1), "% of total area"))
+            if(extrafrac > 0.01)
+              warning(paste("Repairing tiles with", offence,
+                            "caused a",
+                            paste0(round(100 * extrafrac), "%"),
+                            "change in total area"),
+                      call.=FALSE)
+          }
+        }
+ 
+	# compute counting weights 
+	w <- countingweights(id, areas)
+
+        # attach information about weight construction parameters
+        attr(w, "weight.parameters") <-
+          list(method="grid", ntile=ntile, npix=npix, areas=areas)
+        
+	return(w)
+}
+
+
+dirichlet.weights <- function(...) {
+  .Deprecated("dirichletWeights", package="spatstat")
+  dirichletWeights(...)
+}
+
+dirichletWeights <- function(X, window = NULL, exact=TRUE, ...) {
+  #'
+  #' Compute weights based on Dirichlet tessellation of the window 
+  #' induced by the point pattern X. 
+  #' The weights are just the tile areas.
+  #'
+  #' NOTE:	X should contain both data and dummy points,
+  #' if you need these weights for the B-T-B method.
+  #'
+  #' Arguments X and (optionally) 'window' are interpreted as a
+  #' point pattern.
+  #'
+  #' If the window is a rectangle, we invoke Rolf Turner's "deldir"
+  #' package to compute the areas of the tiles of the Dirichlet
+  #' tessellation of the window frame induced by the points.
+  #' [NOTE: the functionality of deldir to create dummy points
+  #' is NOT used. ]
+  #'	if exact=TRUE	compute the exact areas, using "deldir"
+  #'	if exact=FALSE      compute the digital areas using exactdt()
+  #' 
+  #' If the window is a mask, we compute the digital area of
+  #' each tile of the Dirichlet tessellation by counting pixels.
+  #'
+  #'
+  #' 
+  X <- as.ppp(X, window)
+
+  if(!exact && is.polygonal(Window(X)))
+    Window(X) <- as.mask(Window(X))
+  
+  #' compute tile areas
+  w <- dirichletAreas(X)
+
+  #' zero areas can occur due to discretisation or weird geometry
+  zeroes <- (w == 0)
+  if(any(zeroes)) {
+    #' compute weights for subset
+    nX <- npoints(X)
+    Xnew <- X[!zeroes]
+    wnew <- dirichletAreas(Xnew)
+    w    <- numeric(nX)
+    w[!zeroes] <- wnew
+    #' map deleted points to nearest retained point
+    jj <- nncross(X[zeroes], Xnew, what="which")
+    #' map retained points to themselves
+    ii <- Xseq <- seq_len(nX)
+    ii[zeroes] <- (ii[!zeroes])[jj]
+    #' redistribute weights
+    nshare <- table(factor(ii, levels=Xseq))
+    w <- w[ii]/nshare[ii]
+  }
+  #' attach information about weight construction parameters
+  attr(w, "weight.parameters") <- list(method="dirichlet", exact=exact)
+  return(w)
+}
+
+default.ntile <- function(X) { 
+	# default number of tiles (n x n) for tile weights
+        # when data and dummy points are X
+  X <- as.ppp(X)
+  guess.ngrid <- 10 * floor(sqrt(X$n)/10)
+  max(5, guess.ngrid/2)
+}
+
diff --git a/R/window.R b/R/window.R
new file mode 100755
index 0000000..112b69e
--- /dev/null
+++ b/R/window.R
@@ -0,0 +1,1173 @@
+#
+#	window.S
+#
+#	A class 'owin' to define the "observation window"
+#
+#	$Revision: 4.175 $	$Date: 2016/09/01 05:52:38 $
+#
+#
+#	A window may be either
+#
+#		- rectangular:
+#                       a rectangle in R^2
+#                       (with sides parallel to the coordinate axes)
+#
+#		- polygonal:
+#			delineated by 0, 1 or more non-self-intersecting
+#                       polygons, possibly including polygonal holes.
+#	
+#		- digital mask:
+#			defined by a binary image
+#			whose pixel values are TRUE wherever the pixel
+#                       is inside the window
+#
+#	Any window is an object of class 'owin', 
+#       containing at least the following entries:	
+#
+#		$type:	a string ("rectangle", "polygonal" or "mask")
+#
+#		$xrange   
+#		$yrange
+#			vectors of length 2 giving the real dimensions 
+#			of the enclosing box
+#               $units
+#                       name of the unit of length
+#
+#	The 'rectangle' type has only these entries.
+#
+#       The 'polygonal' type has an additional entry
+#
+#               $bdry
+#                       a list of polygons.
+#                       Each entry bdry[[i]] determines a closed polygon.
+#
+#                       bdry[[i]] has components $x and $y which are
+#                       the cartesian coordinates of the vertices of
+#                       the i-th boundary polygon (without repetition of
+#                       the first vertex, i.e. same convention as in the
+#                       plotting function polygon().)
+#
+#
+#	The 'mask' type has entries
+#
+#		$m		logical matrix
+#		$dim		its dimension array
+#		$xstep,ystep	x and y dimensions of a pixel
+#		$xcol	        vector of x values for each column
+#               $yrow           vector of y values for each row
+#	
+#	(the row index corresponds to increasing y coordinate; 
+#	 the column index "   "     "   "  "  "  x "   "    ".)
+#
+#
+#-----------------------------------------------------------------------------
+#
+
+.Spatstat.Image.Warning <-
+  c("Row index corresponds to increasing y coordinate; column to increasing x",
+    "Transpose matrices to get the standard presentation in R",
+    "Example: image(result$xcol,result$yrow,t(result$d))")
+
+owin <- local({
+
+  isxy <- function(x) { (is.matrix(x) || is.data.frame(x)) && ncol(x) == 2 }
+  asxy <- function(xy) { list(x=xy[,1], y=xy[,2]) }
+
+  owin <- function(xrange=c(0,1), yrange=c(0,1),
+                 ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) {
+
+  # trap a common abuse of syntax
+  if(nargs() == 1 && !missing(xrange) && is.owin(xrange))
+    return(xrange)
+  
+  unitname <- as.units(unitname)
+
+  ## Exterminate ambiguities
+  poly.given <- !is.null(poly)
+  mask.given <- !is.null(mask)
+  if(poly.given  && mask.given)
+     stop("Ambiguous -- both polygonal boundary and digital mask supplied")
+
+  if(!is.null(xy) && !mask.given)
+    warning("Argument xy ignored: it is only applicable when a mask is given")
+     
+  if(missing(xrange) != missing(yrange))
+    stop("If one of xrange, yrange is specified then both must be.")
+
+  # convert data frames to vanilla lists
+  if(poly.given) {
+    if(is.data.frame(poly))
+      poly <- as.list(poly)
+    else if(is.list(poly) && any(unlist(lapply(poly, is.data.frame))))
+      poly <- lapply(poly, as.list)
+  }
+  
+  ## Hidden options controlling how much checking is performed
+  check <- resolve.1.default(list(check=TRUE), list(...))
+  calculate <- resolve.1.default(list(calculate=check), list(...))
+  strict <- resolve.1.default(list(strict=spatstat.options("checkpolygons")),
+                              list(...))
+  fix <- resolve.1.default(list(fix=spatstat.options("fixpolygons")),
+                           list(...))
+
+  if(!poly.given && !mask.given) {
+    ######### rectangle #################
+    if(check) {
+      if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L])
+        stop("xrange should be a vector of length 2 giving (xmin, xmax)")
+      if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L])
+        stop("yrange should be a vector of length 2 giving (ymin, ymax)")
+    }
+    w <- list(type="rectangle", xrange=xrange, yrange=yrange, units=unitname)
+    class(w) <- "owin"
+    return(w)
+  } else if(poly.given) {
+    ######### polygonal boundary ########
+    #
+    if(length(poly) == 0) {
+      # empty polygon
+      if(check) {
+        if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L])
+          stop("xrange should be a vector of length 2 giving (xmin, xmax)")
+        if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L])
+          stop("yrange should be a vector of length 2 giving (ymin, ymax)")
+      }
+      w <- list(type="polygonal", xrange=xrange, yrange=yrange,
+                bdry=list(), units=unitname)
+      class(w) <- "owin"
+      return(w)
+    }
+    # convert matrix or data frame to list(x,y)
+    if(isxy(poly)) {
+      poly <- asxy(poly)
+    } else if(is.list(poly) && all(unlist(lapply(poly, isxy)))) {
+      poly <- lapply(poly, asxy)
+    }
+    # nonempty polygon  
+    # test whether it's a single polygon or multiple polygons
+    if(verify.xypolygon(poly, fatal=FALSE))
+      psingle <- TRUE
+    else if(all(unlist(lapply(poly, verify.xypolygon, fatal=FALSE))))
+      psingle <- FALSE
+    else
+      stop("poly must be either a list(x,y) or a list of list(x,y)")
+
+    w.area <- NULL
+    
+    if(psingle) {
+      # single boundary polygon
+      bdry <- list(poly)
+      if(check || calculate) {
+        w.area <- Area.xypolygon(poly)
+        if(w.area < 0)
+          stop(paste("Area of polygon is negative -",
+                     "maybe traversed in wrong direction?"))
+      }
+    } else {
+      # multiple boundary polygons
+      bdry <- poly
+      if(check || calculate) {
+        w.area <- unlist(lapply(poly, Area.xypolygon))
+        if(sum(w.area) < 0)
+          stop(paste("Area of window is negative;\n",
+                     "check that all polygons were traversed",
+                     "in the right direction"))
+      }
+    }
+
+    actual.xrange <- range(unlist(lapply(bdry, getElement, name="x")))
+    if(missing(xrange))
+      xrange <- actual.xrange
+    else if(check) {
+      if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] <= xrange[1L])
+        stop("xrange should be a vector of length 2 giving (xmin, xmax)")
+      if(!all(xrange == range(c(xrange, actual.xrange))))
+        stop("polygon's x coordinates outside xrange")
+    }
+    
+    actual.yrange <- range(unlist(lapply(bdry, getElement, name="y")))
+    if(missing(yrange))
+      yrange <- actual.yrange
+    else if(check) {
+      if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] <= yrange[1L])
+        stop("yrange should be a vector of length 2 giving (ymin, ymax)")
+      if(!all(yrange == range(c(yrange, actual.yrange))))
+      stop("polygon's y coordinates outside yrange")
+    }
+
+    if(!is.null(w.area)) {
+      # tack on area and hole data
+      holes <- (w.area < 0)
+      for(i in seq_along(bdry)) 
+        bdry[[i]] <- append(bdry[[i]], list(area=w.area[i], hole=holes[i]))
+    }
+    
+    w <- list(type="polygonal",
+              xrange=xrange, yrange=yrange, bdry=bdry, units=unitname)
+    class(w) <- "owin"
+
+    if(check && strict) { 
+      ## strict checks on geometry (self-intersection etc)
+      ok <- owinpolycheck(w)
+      if(!ok) {
+        errors <- attr(ok, "err")
+        stop(paste("Polygon data contain", commasep(errors)))
+      }
+    }
+    if(check && fix) {
+      if(length(bdry) == 1 &&
+         length(bx <- bdry[[1L]]$x) == 4 &&
+         length(unique(bx)) == 2 &&
+         length(unique(bdry[[1L]]$y)) == 2) {
+        ## it's really a rectangle
+        if(Area.xypolygon(bdry[[1L]]) < 0)
+          w$bdry <- lapply(bdry, reverse.xypolygon)
+      } else {
+        ## repair polygon data by invoking polyclip
+        ##        to intersect polygon with larger-than-bounding rectangle
+        ##        (Streamlined version of intersect.owin)
+        ww <- lapply(bdry, reverse.xypolygon)
+        xrplus <- mean(xrange) + c(-1,1) * diff(xrange)
+        yrplus <- mean(yrange) + c(-1,1) * diff(yrange)
+        bignum <- (.Machine$integer.max^2)/2
+        epsclip <- max(diff(xrange), diff(yrange))/bignum
+        rr <- list(list(x=xrplus[c(1,2,2,1)], y=yrplus[c(2,2,1,1)]))
+        bb <- polyclip::polyclip(ww, rr, "intersection",
+                                 fillA="nonzero", fillB="nonzero", eps=epsclip)
+        ## ensure correct polarity
+        totarea <- sum(unlist(lapply(bb, Area.xypolygon)))
+        if(totarea < 0)
+          bb <- lapply(bb, reverse.xypolygon)
+        w$bdry <- bb
+      }
+    }
+    return(w)
+  } else if(mask.given) {
+    ######### digital mask #####################
+
+    if(is.data.frame(mask) &&
+       ncol(mask) %in% c(2,3) &&
+       sum(sapply(mask, is.numeric)) == 2) {
+      # data frame with 2 columns of coordinates
+      return(as.owin(W=mask, xy=xy))
+    }
+      
+    if(!is.matrix(mask))
+      stop(paste(sQuote("mask"), "must be a matrix"))
+    if(!is.logical(mask))
+      stop(paste("The entries of", sQuote("mask"), "must be logical"))
+    
+    nc <- ncol(mask)
+    nr <- nrow(mask)
+
+    if(!is.null(xy)) {
+      # pixel coordinates given explicitly
+      # validate dimensions
+      if(!is.list(xy) || !checkfields(xy, c("x","y")))
+        stop("xy should be a list with entries x and y")
+      xcol <- xy$x
+      yrow <- xy$y
+      if(length(xcol) != nc)
+        stop(paste("length of xy$x =", length(xcol),
+                   "!=", nc, "= number of columns of mask"))
+      if(length(yrow) != nr)
+        stop(paste("length of xy$y =", length(yrow),
+                   "!=", nr, "= number of rows of mask"))
+      # x and y should be evenly spaced
+      if(!evenly.spaced(xcol))
+        stop("xy$x is not evenly spaced")
+      if(!evenly.spaced(yrow))
+        stop("xy$y is not evenly spaced")
+      # determine other parameters
+      xstep <- diff(xcol)[1L]
+      ystep <- diff(yrow)[1L]
+      if(missing(xrange) && missing(yrange)) {
+        xrange <- range(xcol) + c(-1,1) * xstep/2
+        yrange <- range(yrow) + c(-1,1) * ystep/2
+      }
+    } else {
+      # determine pixel coordinates from xrange, yrange
+      if(missing(xrange) && missing(yrange)) {
+        # take pixels to be 1 x 1 unit
+        xrange <- c(0,nc)
+        yrange <- c(0,nr)
+      } else if(check) {
+        if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] <= xrange[1L])
+          stop("xrange should be a vector of length 2 giving (xmin, xmax)")
+        if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] <= yrange[1L])
+          stop("yrange should be a vector of length 2 giving (ymin, ymax)")
+      }
+      xstep <- diff(xrange)/nc
+      ystep <- diff(yrange)/nr
+      xcol  <- seq(from=xrange[1L]+xstep/2, to=xrange[2L]-xstep/2, length.out=nc)
+      yrow  <- seq(from=yrange[1L]+ystep/2, to=yrange[2L]-ystep/2, length.out=nr)
+    }
+
+    out <- list(type     = "mask",
+                xrange   = xrange,
+                yrange   = yrange,
+                dim      = c(nr, nc),
+                xstep    = xstep,
+                ystep    = ystep,
+                warnings = .Spatstat.Image.Warning,
+                xcol    = xcol, 
+                yrow    = yrow,
+                m       = mask,
+                units   = unitname)
+    class(out) <- "owin"
+    return(out)
+  }
+  # never reached
+  NULL
+}
+
+  owin
+})
+
+#
+#-----------------------------------------------------------------------------
+#
+
+is.owin <- function(x) { inherits(x, "owin") }
+
+#
+#-----------------------------------------------------------------------------
+#
+
+as.owin <- function(W, ..., fatal=TRUE) {
+  UseMethod("as.owin")
+}
+
+as.owin.owin <- function(W, ..., fatal=TRUE) {
+  if(verifyclass(W, "owin", fatal=fatal)) 
+    return(owin(W$xrange, W$yrange, poly=W$bdry, mask=W$m, unitname=unitname(W), check=FALSE))
+  else
+    return(NULL)
+}
+
+as.owin.ppp <- function(W, ..., fatal=TRUE) {
+  if(verifyclass(W, "ppp", fatal=fatal))
+    return(W$window)
+  else
+    return(NULL)
+}
+
+as.owin.quad <- function(W, ..., fatal=TRUE) {
+  if(verifyclass(W, "quad", fatal=fatal))
+    return(W$data$window)
+  else
+    return(NULL)
+}
+
+as.owin.im <- function(W, ..., fatal=TRUE) {
+  if(!verifyclass(W, "im", fatal=fatal))
+    return(NULL)
+  out <- list(type     = "mask",
+              xrange   = W$xrange,
+              yrange   = W$yrange,
+              dim      = W$dim,
+              xstep    = W$xstep,
+              ystep    = W$ystep,
+              warnings = .Spatstat.Image.Warning,
+              xcol    = W$xcol,
+              yrow    = W$yrow,
+              m       = !is.na(W$v),
+              units   = unitname(W))
+  class(out) <- "owin"
+  return(out)
+}
+
+as.owin.psp <- function(W, ..., fatal=TRUE) {
+  if(!verifyclass(W, "psp", fatal=fatal))
+    return(NULL)
+  return(W$window)
+}
+
+as.owin.tess <- function(W, ..., fatal=TRUE) {
+  if(!verifyclass(W, "tess", fatal=fatal))
+    return(NULL)
+  return(W$window)
+}
+
+as.owin.data.frame <- function(W, ..., step, fatal=TRUE) {
+  if(!verifyclass(W, "data.frame", fatal=fatal))
+    return(NULL)
+  if(missing(step)) {
+    xstep <- ystep <- NULL
+  } else {
+    step <- ensure2vector(step)
+    xstep <- step[1L]
+    ystep <- step[2L]
+  }
+  if(!(ncol(W) %in% c(2,3))) {
+    whinge <- "need exactly 2 or 3 columns of data"
+    if(fatal) stop(whinge)
+    warning(whinge)
+    return(NULL)
+  }
+  if(twocol <- (ncol(W) == 2)) {
+    # assume data is a list of TRUE pixels
+    W <- cbind(W, TRUE)
+  } 
+  mch <- matchNameOrPosition(c("x", "y", "z"), names(W))
+  ix <- mch[1L]
+  iy <- mch[2L]
+  iz <- mch[3L]
+  df <- data.frame(x=W[,ix], y=W[,iy], z=as.logical(W[,iz]))
+  with(df, {
+    xx <- sort(unique(x))
+    yy <- sort(unique(y))
+    jj <- match(x, xx)
+    ii <- match(y, yy)
+    ## make logical matrix (for incomplete x, y sequence)
+    ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal)
+    if(!ok) return(NULL)
+    mm <- matrix(FALSE, length(yy), length(xx))
+    mm[cbind(ii,jj)] <- z
+    ## ensure xx and yy are complete equally-spaced sequences
+    fx <- fillseq(xx, step=xstep)
+    fy <- fillseq(yy, step=ystep)
+    xcol <- fx[[1L]]
+    yrow <- fy[[1L]]
+    ## trap very large matrices
+    ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal)
+    if(!ok) return(NULL)
+    ## mapping from xx to xcol, yy to yrow
+    jjj <- fx[[2L]]
+    iii <- fy[[2L]]
+    ## make logical matrix for full sequence
+    m <- matrix(FALSE, length(yrow), length(xcol))
+    m[iii,jjj] <- mm
+    ## make binary mask
+    out <- owin(mask=m, xy=list(x=xcol, y=yrow))
+    ## warn if area fraction is small: may be a misuse of as.owin
+    if(twocol) {
+      pcarea <- 100 * nrow(df)/prod(dim(m))
+      if(pcarea < 1) 
+       warning(paste("Window occupies only",
+                     paste0(signif(pcarea, 2), "%"),
+                     "of frame area. Did you mean owin(poly=df) ?"),
+               call.=FALSE)
+    }
+    return(out)
+  })
+}
+
+as.owin.default <- function(W, ..., fatal=TRUE) {
+  ## Tries to interpret data as an object of class 'owin'
+  ## W may be
+  ##	a structure with entries xrange, yrange
+  ##	a four-element vector (interpreted xmin, xmax, ymin, ymax)
+  ##	a structure with entries xl, xu, yl, yu
+  ##	an object with attribute "bbox"
+
+  if(inherits(W, "box3")) {
+    #' cannot be flattened
+    if(fatal)
+      stop("3D box cannot be converted to a 2D window")
+    return(NULL)
+  }
+  
+  if(checkfields(W, c("xrange", "yrange"))) {
+    Z <- owin(W$xrange, W$yrange)
+    return(Z)
+  } else if(is.vector(W) && is.numeric(W) && length(W) == 4) {
+    Z <- owin(W[1:2], W[3:4])
+    return(Z)
+  } else if(checkfields(W, c("xl", "xu", "yl", "yu"))) {
+    W <- as.list(W)
+    Z <- owin(c(W$xl, W$xu),c(W$yl, W$yu))
+    return(Z)
+  } else if(checkfields(W, c("x", "y", "area"))
+            && checkfields(W$area, c("xl", "xu", "yl", "yu"))) {
+    V <- as.list(W$area)
+    Z <- owin(c(V$xl, V$xu),c(V$yl, V$yu))
+    return(Z)
+  } else if(!is.null(Z <- attr(W, "bbox"))) {
+    return(as.owin(Z, ..., fatal=fatal))
+  } else if(fatal)
+    stop("Can't interpret W as a window")
+  else return(NULL)
+}		
+
+#
+#-----------------------------------------------------------------------------
+#
+#
+Frame <- function(X) { UseMethod("Frame") }
+
+"Frame<-" <- function(X, value) { UseMethod("Frame<-") }
+
+Frame.default <- function(X) { as.rectangle(X) }
+
+## .........................................................
+
+as.rectangle <- function(w, ...) {
+  if(inherits(w, "owin"))
+    return(owin(w$xrange, w$yrange, unitname=unitname(w)))
+  else if(inherits(w, "im"))
+    return(owin(w$xrange, w$yrange, unitname=unitname(w)))
+  else if(inherits(w, "layered")) 
+    return(do.call(boundingbox, unname(lapply(w, as.rectangle))))
+  else {
+    w <- as.owin(w, ...)
+    return(owin(w$xrange, w$yrange, unitname=unitname(w)))
+  }
+}
+
+#
+#-----------------------------------------------------------------------------
+#
+as.mask <- function(w, eps=NULL, dimyx=NULL, xy=NULL) {
+#	eps:		   grid mesh (pixel) size
+#	dimyx:		   dimensions of pixel raster
+#       xy:                coordinates of pixel raster
+  nonamedargs <- is.null(eps) && is.null(dimyx) && is.null(xy)
+  uname <- as.units(NULL)
+  if(!missing(w) && !is.null(w)) {
+    if(is.data.frame(w)) return(owin(mask=w, xy=xy))
+    if(is.matrix(w)) {
+      w <- as.data.frame(w)
+      colnames(w) <- c("x", "y")
+      return(owin(mask=w, xy=xy))
+    }
+    w <- as.owin(w)
+    uname <- unitname(w)
+  } else {
+    if(is.null(xy)) 
+      stop("If w is missing, xy is required")
+  }
+  # If it's already a mask, and no other arguments specified,
+  # just return it.
+  if(!missing(w) && w$type == "mask" && nonamedargs)
+    return(w)
+  
+##########################
+#  First determine pixel coordinates
+##########################
+  if(is.null(xy)) {
+# Pixel coordinates to be computed from other dimensions
+# First determine row & column dimensions
+    if(!is.null(dimyx)) {
+      dimyx <- ensure2vector(dimyx)
+      nr <- dimyx[1L]
+      nc <- dimyx[2L]
+    } else {
+    # use pixel size 'eps'
+      if(!is.null(eps)) {
+        eps <- ensure2vector(eps)
+        nc <- diff(w$xrange)/eps[1L]
+        nr <- diff(w$yrange)/eps[2L]
+        if(nr < 1 || nc < 1)
+          warning("pixel size parameter eps > size of window")
+        nr <- ceiling(nr)
+        nc <- ceiling(nc)
+      } else {
+    # use spatstat defaults
+        np <- spatstat.options("npixel")
+        if(length(np) == 1)
+          nr <- nc <- np[1L]
+        else {
+          nr <- np[2L]  
+          nc <- np[1L]
+        }
+      }
+    }
+    if((mpix <- (nr * nc)/1048576) >= 10) {
+      whinge <- paste("Creating",
+                      articlebeforenumber(mpix),
+                      paste0(round(mpix, 1), "-megapixel"),
+                      "window mask")
+      message(whinge)
+      warning(whinge, call.=FALSE)
+    }
+    # Initialise mask with all entries TRUE
+    rasta <- owin(w$xrange, w$yrange, mask=matrix(TRUE, nr, nc))
+  } else {
+# 
+# Pixel coordinates given explicitly:
+#    xy is an image, a mask, or a list(x,y)
+#
+    if(is.im(xy)) {
+      rasta <- as.owin(xy)
+      rasta$m[] <- TRUE
+    } else if(is.owin(xy)) {
+      if(xy$type != "mask")
+        stop("argument xy does not contain raster coordinates.")
+      rasta <- xy
+      rasta$m[] <- TRUE
+    } else {
+      if(!checkfields(xy, c("x", "y")))
+        stop(paste(sQuote("xy"),
+                   "should be a list containing two vectors x and y"))
+      x <- sort(unique(xy$x))
+      y <- sort(unique(xy$y))
+      # derive other parameters
+      nr <- length(y)
+      nc <- length(x)
+      # check size
+      if((mpix <- (nr * nc)/1048576) >= 10) {
+        whinge <- paste("Creating",
+                        articlebeforenumber(mpix),
+                        paste0(round(mpix, 1), "-megapixel"),
+                        "window mask")
+        message(whinge)
+        warning(whinge, call.=FALSE)
+      }
+      # x and y pixel sizes
+      dx <- diff(x)
+      if(diff(range(dx)) > 0.01 * mean(dx))
+        stop("x coordinates must be evenly spaced")
+      xstep <- mean(dx)
+      dy <- diff(y)
+      if(diff(range(dy)) > 0.01 * mean(dy))
+        stop("y coordinates must be evenly spaced")
+      ystep <- mean(dy)
+      xr <- range(x)
+      yr <- range(y)
+      xrange <-  xr + xstep * c(-1,1)/2
+      yrange <-  yr + ystep * c(-1,1)/2
+      # initialise mask with all entries TRUE
+      rasta <- list(type     = "mask",
+                    xrange   = xrange,
+                    yrange   = yrange,
+                    dim      = c(nr, nc),
+                    xstep    = xstep,
+                    ystep    = ystep,
+                    warnings = .Spatstat.Image.Warning,
+                    xcol    = seq(from=xr[1L], to=xr[2L], length.out=nc),
+                    yrow    = seq(from=yr[1L], to=yr[2L], length.out=nr),
+                    m       = matrix(TRUE, nr, nc),
+                    units   = uname)
+      class(rasta) <- "owin"
+    }
+    if(missing(w)) {
+      # No more window information
+      out <- rasta
+      if(!(identical(x, xy$x) && identical(y, xy$y))) {
+        ## xy is an enumeration of the TRUE pixels
+        out$m[] <- FALSE
+        ij <- cbind(i=match(xy$y, y), j=match(xy$x, x))
+        out$m[ij] <- TRUE
+      }
+      return(out)
+    }
+  }
+
+################################  
+# Second, mask pixel raster with existing window
+################################  
+  switch(w$type,
+         rectangle = {
+           out <- rasta
+           if(!all(w$xrange == rasta$xrange)
+              || !all(w$yrange == rasta$yrange)) {
+             xcol <- rasta$xcol
+             yrow <- rasta$yrow
+             wx <- w$xrange
+             wy <- w$yrange
+             badrow <- which(yrow > wy[2L] | yrow < wy[1L])
+             badcol <- which(xcol > wx[2L] | xcol < wx[1L])
+             out$m[badrow , ] <- FALSE
+             out$m[ , badcol] <- FALSE
+           }
+         },
+         mask = {
+           # resample existing mask on new raster
+           out <- rastersample(w, rasta)
+         },
+         polygonal = {
+           # use C code
+           out <- owinpoly2mask(w, rasta, FALSE)
+         })
+
+  unitname(out) <- uname
+  return(out)
+}
+
+as.matrix.owin <- function(x, ...) {
+  m <- as.mask(x, ...)
+  return(m$m)
+}
+
+#
+#
+#-----------------------------------------------------------------------------
+#
+as.polygonal <- function(W, repair=FALSE) {
+  verifyclass(W, "owin")
+  switch(W$type,
+         rectangle = {
+           xr <- W$xrange
+           yr <- W$yrange
+           return(owin(xr, yr, poly=list(x=xr[c(1,2,2,1)],y=yr[c(1,1,2,2)]),
+                       unitname=unitname(W),
+                       check=FALSE))
+         },
+         polygonal = {
+           if(repair)
+             W <- owin(poly=W$bdry, unitname=unitname(W))
+           return(W)
+         },
+         mask = {
+           # This could take a while
+           M <- W$m
+           nr <- nrow(M)
+           notM <- !M
+           out <- NULL
+           xcol <- W$xcol
+           yrow <- W$yrow
+           xbracket <- 1.1 * c(-1,1) * W$xstep/2
+           ybracket <- 1.1 * c(-1,1) * W$ystep/2
+           # identify runs of TRUE entries in each column
+           start <- M & rbind(TRUE, notM[-nr, ])
+           finish <- M & rbind(notM[-1, ], TRUE)
+           for(j in 1:ncol(M)) {
+             xj <- xcol[j]
+             # identify start and end positions in column j
+             starts <- which(start[,j])
+             finishes <- which(finish[,j])
+             ns <- length(starts)
+             nf <- length(finishes)
+             if(ns != nf)
+               stop(paste("Internal error: length(starts)=", ns,
+                          ", length(finishes)=", nf))
+             if(ns > 0) {
+               for(k in 1:ns) {
+                 yfrom <- yrow[starts[k]]
+                 yto   <- yrow[finishes[k]]
+                 yk <- sort(c(yfrom,yto))
+                 # make rectangle
+                 recto <- owin(xj+xbracket,yk+ybracket)
+                 # add to result
+                 out <- union.owin(out, recto)
+               }
+               unitname(out) <- unitname(W)
+             }
+           }
+           return(out)
+         }
+         )
+}
+
+#
+# ----------------------------------------------------------------------
+
+is.polygonal <- function(w) {
+  return(inherits(w, "owin") && (w$type == "polygonal"))
+}
+
+is.rectangle <- function(w) {
+  return(inherits(w, "owin") && (w$type == "rectangle"))
+}
+
+is.mask <- function(w) {
+  return(inherits(w, "owin") && (w$type == "mask"))
+}
+
+validate.mask <- function(w, fatal=TRUE) {
+  verifyclass(w, "owin", fatal=fatal)
+  if(w$type == "mask")
+    return(TRUE)
+  if(fatal)
+      stop(paste(short.deparse(substitute(w)), "is not a binary mask"))
+  else {
+      warning(paste(short.deparse(substitute(w)), "is not a binary mask"))
+      return(FALSE)
+  }
+}
+
+dim.owin <- function(x) { return(x$dim) } ## NULL unless it's a mask
+
+## internal use only:
+
+rasterx.mask <- function(w, drop=FALSE) {
+  validate.mask(w)
+  x <- w$xcol[col(w)]
+  x <- if(drop) x[w$m, drop=TRUE] else array(x, dim=w$dim)
+  return(x)
+}
+
+rastery.mask <- function(w, drop=FALSE) {
+  validate.mask(w)
+  y <- w$yrow[row(w)]
+  y <- if(drop) y[w$m, drop=TRUE] else array(y, dim=w$dim)
+  return(y)
+}
+
+rasterxy.mask <- function(w, drop=FALSE) {
+  validate.mask(w)
+  x <- w$xcol[col(w)]
+  y <- w$yrow[row(w)]
+  if(drop) {
+    m <- w$m
+    x <- x[m, drop=TRUE] 
+    y <- y[m, drop=TRUE]
+  }
+  return(list(x=as.numeric(x),
+              y=as.numeric(y)))
+}
+
+
+nearest.raster.point <- function(x,y,w, indices=TRUE) {
+  stopifnot(is.mask(w) || is.im(w))
+  nr <- w$dim[1L]
+  nc <- w$dim[2L]
+  if(length(x) == 0) {
+    cc <- rr <- integer(0)
+  } else {
+    cc <- 1 + round((x - w$xcol[1L])/w$xstep)
+    rr <- 1 + round((y - w$yrow[1L])/w$ystep)
+    cc <- pmax.int(1,pmin.int(cc, nc))
+    rr <- pmax.int(1,pmin.int(rr, nr))
+  }
+  if(indices) 
+    return(list(row=rr, col=cc))
+  else
+    return(list(x=w$xcol[cc], y=w$yrow[rr]))
+}
+
+mask2df <- function(w) {
+  stopifnot(is.owin(w) && w$type == "mask")
+  xx <- raster.x(w)
+  yy <- raster.y(w)
+  ok <- w$m
+  xx <- as.vector(xx[ok])
+  yy <- as.vector(yy[ok])
+  return(data.frame(x=xx, y=yy))
+}
+
+#------------------------------------------------------------------
+		
+complement.owin <- function(w, frame=as.rectangle(w)) {
+  w <- as.owin(w)
+
+  if(reframe <- !missing(frame)) {
+    verifyclass(frame, "owin")
+    w <- rebound.owin(w, frame)
+    # if w was a rectangle, it's now polygonal
+  }
+
+  switch(w$type,
+         mask = {
+           w$m <- !(w$m)
+         },
+         rectangle = {
+           # return empty window
+           return(emptywindow(w))
+         },
+         polygonal = {
+           bdry <- w$bdry
+           if(length(bdry) == 0) {
+             # w is empty
+             return(frame)
+           }
+           # bounding box, in anticlockwise order
+           box <- list(x=w$xrange[c(1,2,2,1)],
+                       y=w$yrange[c(1,1,2,2)])
+           boxarea <- Area.xypolygon(box)
+                 
+           # first check whether one of the current boundary polygons
+           # is the bounding box itself (with + sign)
+           if(reframe)
+             is.box <- rep.int(FALSE, length(bdry))
+           else {
+             nvert <- lengths(lapply(bdry, getElement, name="x"))
+             areas <- sapply(bdry, Area.xypolygon)
+             boxarea.mineps <- boxarea * (0.99999)
+             is.box <- (nvert == 4 & areas >= boxarea.mineps)
+             if(sum(is.box) > 1)
+               stop("Internal error: multiple copies of bounding box")
+             if(all(is.box)) {
+               return(emptywindow(box))
+             }
+           }
+                 
+           # if box is present (with + sign), remove it
+           if(any(is.box))
+             bdry <- bdry[!is.box]
+                 
+           # now reverse the direction of each polygon
+           bdry <- lapply(bdry, reverse.xypolygon, adjust=TRUE)
+
+           # if box was absent, add it
+           if(!any(is.box))
+             bdry <- c(bdry, list(box))   # sic
+
+           # put back into w
+           w$bdry <- bdry
+         },
+         stop("unrecognised window type", w$type)
+         )
+  return(w)
+}
+
+#-----------------------------------------------------------
+
+inside.owin <- function(x, y, w) {
+  # test whether (x,y) is inside window w
+  # x, y may be vectors 
+
+  if(missing(y) && all(c("x", "y") %in% names(x)))
+    return(inside.owin(x$x, x$y, w))
+
+  w <- as.owin(w)
+
+  if(length(x)==0)
+    return(logical(0))
+  
+  # test whether inside bounding rectangle
+  xr <- w$xrange
+  yr <- w$yrange
+  eps <- sqrt(.Machine$double.eps)
+  frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & 
+             (y >= yr[1L] - eps) & (y <= yr[2L] + eps)
+ 
+  if(!any(frameok))  # all points OUTSIDE window - no further work needed
+    return(frameok)
+
+  ok <- frameok
+  switch(w$type,
+         rectangle = {
+           return(ok)
+         },
+         polygonal = {
+           ## check scale
+           framesize <- max(diff(xr), diff(yr))
+           if(framesize > 1e6 || framesize < 1e-6) {
+             ## rescale to avoid numerical overflow
+             scalefac <- framesize/100
+             w <- rescale(w, scalefac)
+             x <- x/scalefac
+             y <- y/scalefac
+           }
+           xy <- list(x=x,y=y)
+           bdry <- w$bdry
+           total <- numeric(length(x))
+           on.bdry <- rep.int(FALSE, length(x))
+           for(i in seq_along(bdry)) {
+             score <- inside.xypolygon(xy, bdry[[i]], test01=FALSE)
+             total <- total + score
+             on.bdry <- on.bdry | attr(score, "on.boundary")
+           }
+           # any points identified as belonging to the boundary get score 1
+           total[on.bdry] <- 1
+           # check for sanity now..
+           uhoh <- (total * (1-total) != 0)
+           if(any(uhoh)) {
+             nuh <- sum(uhoh)
+             warning(paste("point-in-polygon test had difficulty with",
+                           nuh,
+                           ngettext(nuh, "point", "points"),
+                           "(total score not 0 or 1)"),
+                     call.=FALSE)
+             total[uhoh] <- 0
+           }
+           return(ok & (total != 0))
+         },
+         mask = {
+           # consider only those points which are inside the frame
+           xf <- x[frameok]
+           yf <- y[frameok]
+           # map locations to raster (row,col) coordinates
+           loc <- nearest.raster.point(xf,yf,w)
+           # look up mask values
+           okf <- (w$m)[cbind(loc$row, loc$col)]
+           # insert into 'ok' vector
+           ok[frameok] <- okf
+           return(ok)
+         },
+         stop("unrecognised window type", w$type)
+         )
+}
+
+#-------------------------------------------------------------------------
+  
+print.owin <- function(x, ..., prefix="window: ") {
+  verifyclass(x, "owin")
+  unitinfo <- summary(unitname(x))
+  switch(x$type,
+         rectangle={
+           rectname <- paste0(prefix, "rectangle =")
+         },
+         polygonal={
+           splat(paste0(prefix, "polygonal boundary"))
+           if(length(x$bdry) == 0)
+             splat("window is empty")
+           rectname <- "enclosing rectangle:"
+         },
+         mask={
+           splat(paste0(prefix, "binary image mask"))
+           di <- x$dim
+           splat(di[1L], "x", di[2L], "pixel array (ny, nx)")
+           rectname <- "enclosing rectangle:"
+         }
+         )
+  splat(rectname,
+        prange(zapsmall(x$xrange)),
+        "x",
+        prange(zapsmall(x$yrange)),
+        unitinfo$plural,
+        unitinfo$explain)
+  invisible(NULL)
+}
+
+summary.owin <- function(object, ...) {
+  verifyclass(object, "owin")
+  result <- list(xrange=object$xrange,
+                 yrange=object$yrange,
+                 type=object$type,
+                 area=area(object),
+                 units=unitname(object))
+  result$areafraction <- with(result, area/(diff(xrange) * diff(yrange)))
+  switch(object$type,
+         rectangle={
+         },
+         polygonal={
+           poly <- object$bdry
+           result$npoly <- npoly <- length(poly)
+           if(npoly == 0) {
+             result$areas <- result$nvertices <- numeric(0)
+           } else if(npoly == 1) {
+             result$areas <- Area.xypolygon(poly[[1L]])
+             result$nvertices <- length(poly[[1L]]$x)
+           } else {
+             result$areas <- unlist(lapply(poly, Area.xypolygon))
+             result$nvertices <- lengths(lapply(poly, getElement, name="x"))
+           }
+           result$nhole <- sum(result$areas < 0)
+         },
+         mask={
+           result$npixels <- object$dim
+           result$xstep <- object$xstep
+           result$ystep <- object$ystep
+         }
+         )
+  class(result) <- "summary.owin"
+  result
+}
+
+print.summary.owin <- function(x, ...) {
+  verifyclass(x, "summary.owin")
+  unitinfo <- summary(x$units)
+  pluralunits <- unitinfo$plural
+  singularunits <- unitinfo$singular
+  switch(x$type,
+         rectangle={
+           rectname <- "Window: rectangle ="
+         },
+         polygonal={
+           np <- x$npoly
+           splat("Window: polygonal boundary")
+           if(np == 0) {
+             splat("window is empty")
+           } else if(np == 1) {
+             splat("single connected closed polygon with",
+                   x$nvertices, "vertices")
+           } else {
+             nh <- x$nhole
+             holy <- if(nh == 0) "(no holes)" else
+                     if(nh == 1) "(1 hole)" else
+                     paren(paste(nh, "holes"))
+             splat(np, "separate polygons", holy)
+             if(np > 0)
+               print(data.frame(vertices=x$nvertices,
+                                area=signif(x$areas, 6),
+                                relative.area=signif(x$areas/x$area,3),
+                                row.names=paste("polygon",
+                                  1:np,
+                                  ifelse(x$areas < 0, "(hole)", "")
+                                  )))
+           }
+           rectname <- "enclosing rectangle:"
+         },
+         mask={
+           splat("binary image mask")
+           di <- x$npixels
+           splat(di[1L], "x", di[2L], "pixel array (ny, nx)")
+           splat("pixel size:",
+                 signif(x$xstep,3), "by", signif(x$ystep,3),
+                 pluralunits)
+           rectname <- "enclosing rectangle:"
+         }
+         )
+  splat(rectname,
+        prange(zapsmall(x$xrange)),
+        "x",
+        prange(zapsmall(x$yrange)),
+        pluralunits)
+  Area <- signif(x$area, 6)
+  splat("Window area =", Area, "square",
+        if(Area == 1) singularunits else pluralunits)
+  if(!is.null(ledge <- unitinfo$legend))
+    splat(ledge)
+  if(x$type != "rectangle")
+    splat("Fraction of frame area:", signif(x$areafraction, 3))
+  return(invisible(x))
+}
+
+as.data.frame.owin <- function(x, ..., drop=TRUE) {
+  stopifnot(is.owin(x))
+  switch(x$type,
+         rectangle = { x <- as.polygonal(x) },
+         polygonal = { },
+         mask = {
+           xy <- rasterxy.mask(x, drop=drop)
+           if(!drop) xy <- append(xy, list(inside=as.vector(x$m)))
+           return(as.data.frame(xy, ...))
+         })
+  b <- x$bdry
+  ishole <- sapply(b, is.hole.xypolygon)
+  sign <- (-1)^ishole
+  b <- lapply(b, as.data.frame, ...)
+  nb <- length(b)
+  if(nb == 1)
+    return(b[[1L]])
+  dfs <- mapply(cbind, b, id=as.list(seq_len(nb)), sign=as.list(sign),
+                SIMPLIFY=FALSE)
+  df <- do.call(rbind, dfs)
+  return(df)
+}
+
+discretise <- function(X,eps=NULL,dimyx=NULL,xy=NULL) {
+  verifyclass(X,"ppp")
+  W <- X$window
+  ok <- inside.owin(X$x,X$y,W)
+  if(!all(ok))
+    stop("There are points of X outside the window of X")
+  all.null <- is.null(eps) & is.null(dimyx) & is.null(xy)
+  if(W$type=="mask" & all.null) return(X)
+  WM  <- as.mask(W,eps=eps,dimyx=dimyx,xy=xy)
+  nok <- !inside.owin(X$x,X$y,WM)
+  if(any(nok)) {
+    ifix <- nearest.raster.point(X$x[nok],X$y[nok], WM)
+    ifix <- cbind(ifix$row,ifix$col)
+    WM$m[ifix] <- TRUE
+  }
+  X$window <- WM
+  X
+}
+
+pixelcentres <- function (X, W=NULL,...) {
+  X <- as.mask(as.owin(X), ...)
+  if(is.null(W)) W <- as.rectangle(X)
+  Y <- as.ppp(raster.xy(X,drop=TRUE),W=W)
+  return(Y)
+}
+
+owin2polypath <- function(w) {
+  w <- as.polygonal(w)
+  b <- w$bdry
+  xvectors <- lapply(b, getElement, name="x")
+  yvectors <- lapply(b, getElement, name="y")
+  xx <- unlist(lapply(xvectors, append, values=NA, after=FALSE))[-1]
+  yy <- unlist(lapply(yvectors, append, values=NA, after=FALSE))[-1]
+  return(list(x=xx, y=yy))
+}
+
+## generics which extract and assign the window of some object
+
+Window <- function(X, ...) { UseMethod("Window") }
+
+"Window<-" <- function(X, ..., value) { UseMethod("Window<-") }
+
diff --git a/R/wingeom.R b/R/wingeom.R
new file mode 100755
index 0000000..d9ba495
--- /dev/null
+++ b/R/wingeom.R
@@ -0,0 +1,1102 @@
+#
+#	wingeom.R	Various geometrical computations in windows
+#
+#	$Revision: 4.122 $	$Date: 2017/06/05 10:31:58 $
+#
+
+volume.owin <- function(x) { area.owin(x) }
+
+area <- function(w) UseMethod("area")
+
+area.default <- function(w) area.owin(as.owin(w))
+
+area.owin <- function(w) {
+    stopifnot(is.owin(w))
+        switch(w$type,
+               rectangle = {
+		width <- abs(diff(w$xrange))
+		height <- abs(diff(w$yrange))
+		area <- width * height
+               },
+               polygonal = {
+                 area <- sum(unlist(lapply(w$bdry, Area.xypolygon)))
+               },
+               mask = {
+                 pixelarea <- abs(w$xstep * w$ystep)
+                 npixels <- sum(w$m)
+                 area <- pixelarea * npixels
+               },
+               stop("Unrecognised window type")
+        )
+        return(area)
+}
+
+perimeter <- function(w) {
+  w <- as.owin(w)
+  switch(w$type,
+         rectangle = {
+           return(2*(diff(w$xrange)+diff(w$yrange)))
+         },
+         polygonal={
+           return(sum(lengths.psp(edges(w))))
+         },
+         mask={
+           p <- as.polygonal(w)
+           if(is.null(p)) return(NA)
+           delta <- sqrt(w$xstep^2 + w$ystep^2)
+           p <- simplify.owin(p, delta * 1.15)
+           return(sum(lengths.psp(edges(p))))
+         })
+  return(NA)
+}
+
+framebottomleft <- function(w) {
+  f <- Frame(w)
+  c(f$xrange[1L], f$yrange[1L])
+}
+
+sidelengths.owin <- function(x) {
+  if(x$type != "rectangle")
+    warning("Computing the side lengths of a non-rectangular window")
+  with(x, c(diff(xrange), diff(yrange)))
+}
+
+shortside.owin <- function(x) { min(sidelengths(x)) }
+
+eroded.areas <- function(w, r, subset=NULL) {
+  w <- as.owin(w)
+  if(!is.null(subset) && !is.mask(w))
+    w <- as.mask(w)
+  switch(w$type,
+         rectangle = {
+           width <- abs(diff(w$xrange))
+           height <- abs(diff(w$yrange))
+           areas <- pmax(width - 2 * r, 0) * pmax(height - 2 * r, 0)
+         },
+         polygonal = {
+           ## warning("Approximating polygonal window by digital image")
+           w <- as.mask(w)
+           areas <- eroded.areas(w, r)
+         },
+         mask = {
+           ## distances from each pixel to window boundary
+           b <- if(is.null(subset)) bdist.pixels(w, style="matrix") else 
+                bdist.pixels(w)[subset, drop=TRUE, rescue=FALSE]
+           ## histogram breaks to satisfy hist()
+           Bmax <- max(b, r)
+           breaks <- c(-1,r,Bmax+1)
+           ## histogram of boundary distances
+           h <- hist(b, breaks=breaks, plot=FALSE)$counts
+           ## reverse cumulative histogram
+           H <- revcumsum(h)
+           ## drop first entry corresponding to r=-1
+           H <- H[-1]
+           ## convert count to area
+           pixarea <- w$xstep * w$ystep
+           areas <- pixarea * H
+         },
+         stop("unrecognised window type")
+         )
+  areas
+}	
+
+even.breaks.owin <- function(w) {
+	verifyclass(w, "owin")
+        Rmax <- diameter(w)
+        make.even.breaks(Rmax, Rmax/(100 * sqrt(2)))
+}
+
+unit.square <- function() { owin(c(0,1),c(0,1)) }
+
+square <- function(r=1, unitname=NULL) {
+  stopifnot(is.numeric(r))
+  if(!all(is.finite(r)))
+    stop("argument r is NA or infinite")
+  if(length(r) == 1) {
+    stopifnot(r > 0)
+    r <- c(0,r)
+  } else if(length(r) == 2) {
+    stopifnot(r[1L] < r[2L])
+  } else stop("argument r must be a single number, or a vector of length 2")
+  owin(r,r, unitname=unitname)
+}
+
+# convert polygonal window to mask window 
+owinpoly2mask <- function(w, rasta, check=TRUE) {
+  if(check) {
+    verifyclass(w, "owin")
+    stopifnot(w$type == "polygonal")
+    verifyclass(rasta, "owin")
+    stopifnot(rasta$type == "mask")
+  }
+  
+  bdry <- w$bdry
+
+  x0    <- rasta$xcol[1L]
+  y0    <- rasta$yrow[1L]
+  xstep <- rasta$xstep
+  ystep <- rasta$ystep
+  dimyx <- rasta$dim
+  nx    <- dimyx[2L]
+  ny    <- dimyx[1L]
+
+  epsilon <- with(.Machine, double.base^floor(double.ulp.digits/2))
+
+  score <- numeric(nx*ny)
+  
+  for(i in seq_along(bdry)) {
+    p <- bdry[[i]]
+    xp <- p$x
+    yp <- p$y
+    np <- length(p$x)
+    # repeat last vertex
+    xp <- c(xp, xp[1L])
+    yp <- c(yp, yp[1L])
+    np <- np + 1
+    # rescale coordinates so that pixels are at integer locations
+    xp <- (xp - x0)/xstep
+    yp <- (yp - y0)/ystep
+    # avoid exact integer locations for vertices
+    whole <- (ceiling(xp) == floor(xp))
+    xp[whole] <-  xp[whole] + epsilon
+    whole <- (ceiling(yp) == floor(yp))
+    yp[whole] <-  yp[whole] + epsilon
+    # call C
+    z <- .C("poly2imI",
+            xp=as.double(xp),
+            yp=as.double(yp),
+            np=as.integer(np),
+            nx=as.integer(nx),
+            ny=as.integer(ny),
+            out=as.integer(integer(nx * ny)),
+            PACKAGE="spatstat")
+    if(i == 1)
+      score <- z$out
+    else 
+      score <- score + z$out
+  }
+  status <- (score != 0)
+  out <- owin(rasta$xrange, rasta$yrange, mask=matrix(status, ny, nx))
+  return(out)
+}
+
+
+
+overlap.owin <- function(A, B) {
+  # compute the area of overlap between two windows
+  
+  # check units
+  if(!compatible.units(unitname(A), unitname(B)))
+    warning("The two windows have incompatible units of length")
+  
+  At <- A$type
+  Bt <- B$type
+  if(At=="rectangle" && Bt=="rectangle") {
+    xmin <- max(A$xrange[1L],B$xrange[1L])
+    xmax <- min(A$xrange[2L],B$xrange[2L])
+    if(xmax <= xmin) return(0)
+    ymin <- max(A$yrange[1L],B$yrange[1L])
+    ymax <- min(A$yrange[2L],B$yrange[2L])
+    if(ymax <= ymin) return(0)
+    return((xmax-xmin) * (ymax-ymin))
+  }
+  if((At=="rectangle" && Bt=="polygonal")
+     || (At=="polygonal" && Bt=="rectangle")
+     || (At=="polygonal" && Bt=="polygonal"))
+  {
+    AA <- as.polygonal(A)$bdry
+    BB <- as.polygonal(B)$bdry
+    area <- 0
+    for(i in seq_along(AA))
+      for(j in seq_along(BB))
+        area <- area + overlap.xypolygon(AA[[i]], BB[[j]])
+    # small negative numbers can occur due to numerical error
+    return(max(0, area))
+  }
+  if(At=="mask") {
+    # count pixels in A that belong to B
+    pixelarea <- abs(A$xstep * A$ystep)
+    rxy <- rasterxy.mask(A, drop=TRUE)
+    x <- rxy$x
+    y <- rxy$y
+    ok <- inside.owin(x, y, B) 
+    return(pixelarea * sum(ok))
+  }
+  if(Bt== "mask") {
+    # count pixels in B that belong to A
+    pixelarea <- abs(B$xstep * B$ystep)
+    rxy <- rasterxy.mask(B, drop=TRUE)
+    x <- rxy$x
+    y <- rxy$y
+    ok <- inside.owin(x, y, A)
+    return(pixelarea * sum(ok))
+  }
+  stop("Internal error")
+}
+
+#
+#  subset operator for window
+#
+
+"[.owin" <- 
+function(x, i, ...) {
+  if(!missing(i) && !is.null(i)) {
+    if(is.im(i) && i$type == "logical") {
+      # convert to window
+      i <- as.owin(eval.im(ifelse1NA(i)))
+    } else stopifnot(is.owin(i))
+    x <- intersect.owin(x, i, fatal=FALSE)
+  }
+  return(x)
+}
+    
+#
+#
+#  Intersection and union of windows
+#
+#
+
+intersect.owin <- function(..., fatal=TRUE, p) {
+  argh <- list(...)
+  ## p is a list of arguments to polyclip::polyclip
+  if(missing(p) || is.null(p)) p <- list()
+  ## handle 'solist' objects
+  argh <- expandSpecialLists(argh, "solist")
+  rasterinfo <- list()
+  if(length(argh) > 0) {
+    # explicit arguments controlling raster info
+    israster <- names(argh) %in% names(formals(as.mask))
+    if(any(israster)) {
+      rasterinfo <- argh[israster]
+      ## remaining arguments
+      argh <- argh[!israster]
+    }
+  }
+  ## look for window arguments
+  isowin <- sapply(argh, is.owin)
+  if(any(!isowin))
+    warning("Some arguments were not windows")
+  argh <- argh[isowin]
+  nwin <- length(argh)
+
+  if(nwin == 0) {
+    warning("No windows were given")
+    return(NULL)
+  }
+
+  ## at least one window
+  A <- argh[[1L]]
+  if(nwin == 1) return(A)
+  ## at least two windows
+  B <- argh[[2L]]
+  
+  if(nwin > 2) {
+    ## handle union of more than two windows
+    windows <- argh[-c(1,2)]
+    ## determine a common set of parameters for polyclip
+    p <- commonPolyclipArgs(A, B, do.call(boundingbox, windows), p=p)
+    ## absorb all windows into B
+    for(i in seq_along(windows))
+      B <- do.call(intersect.owin,
+                   append(list(B, windows[[i]], p=p),
+                            rasterinfo))
+  }
+
+  ## There are now only two windows
+  if(is.empty(A)) return(A)
+  if(is.empty(B)) return(B)
+
+  if(identical(A, B))
+    return(A)
+
+  # check units
+  if(!compatible(unitname(A), unitname(B)))
+    warning("The two windows have incompatible units of length")
+
+  # determine intersection of x and y ranges
+  xr <- intersect.ranges(A$xrange, B$xrange, fatal=fatal)
+  yr <- intersect.ranges(A$yrange, B$yrange, fatal=fatal)
+  if(!fatal && (is.null(xr) || is.null(yr)))
+    return(NULL)
+  C <- owin(xr, yr, unitname=unitname(A))
+
+  if(is.empty(A) || is.empty(B))
+    return(emptywindow(C))
+           
+  # Determine type of intersection
+  
+  Arect <- is.rectangle(A)
+  Brect <- is.rectangle(B)
+#  Apoly <- is.polygonal(A)
+#  Bpoly <- is.polygonal(B)
+  Amask <- is.mask(A)
+  Bmask <- is.mask(B)
+  
+  # Rectangular case
+  if(Arect && Brect)
+    return(C)
+
+  if(!Amask && !Bmask) {
+    ####### Result is polygonal ############
+    a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon)
+    b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon)
+    ab <- do.call(polyclip::polyclip,
+                  append(list(a, b, "intersection",
+                              fillA="nonzero", fillB="nonzero"),
+                         p))
+    if(length(ab)==0)
+      return(emptywindow(C))
+    # ensure correct polarity
+    totarea <- sum(unlist(lapply(ab, Area.xypolygon)))
+    if(totarea < 0)
+      ab <- lapply(ab, reverse.xypolygon)
+    AB <- owin(poly=ab, check=FALSE, unitname=unitname(A))
+    AB <- rescue.rectangle(AB)
+    return(AB)
+  }
+
+  ######### Result is a mask ##############
+  
+  # Restrict domain where possible 
+  if(Arect)
+    A <- C
+  if(Brect)
+    B <- C
+  if(Amask)
+    A <- trim.mask(A, C)
+  if(Bmask)
+    B <- trim.mask(B, C)
+
+  # Did the user specify the pixel raster?
+  if(length(rasterinfo) > 0) {
+    # convert to masks with specified parameters, and intersect
+    if(Amask) {
+      A <- do.call(as.mask, append(list(A), rasterinfo))
+      return(restrict.mask(A, B))
+    } else {
+      B <- do.call(as.mask, append(list(B), rasterinfo))
+      return(restrict.mask(B, A))
+    }
+  } 
+  
+  # One mask and one rectangle?
+  if(Arect && Bmask)
+      return(B)
+  if(Amask && Brect)
+      return(A)
+
+  # One mask and one polygon?
+  if(Amask && !Bmask) 
+    return(restrict.mask(A, B))
+  if(!Amask && Bmask)
+    return(restrict.mask(B, A))
+
+  # Two existing masks?
+  if(Amask && Bmask) {
+    # choose the finer one
+    if(A$xstep <= B$xstep)
+      return(restrict.mask(A, B))
+    else
+      return(restrict.mask(B, A))
+  }
+
+  # No existing masks. No clipping applied so far.
+  # Convert one window to a mask with default pixel raster, and intersect.
+  if(Arect) {
+    A <- as.mask(A)
+    return(restrict.mask(A, B))
+  } else {
+    B <- as.mask(B)
+    return(restrict.mask(B, A))
+  }
+}
+
+
+union.owin <- function(..., p) {
+  argh <- list(...)
+  ## weed out NULL arguments
+  argh <- argh[!sapply(argh, is.null)]
+  ## p is a list of arguments to polyclip::polyclip
+  if(missing(p) || is.null(p)) p <- list()
+  ## handle 'solist' objects
+  argh <- expandSpecialLists(argh, "solist")
+  rasterinfo <- list()
+  if(length(argh) > 0) {
+    ## arguments controlling raster info
+    israster <- names(argh) %in% names(formals(as.mask))
+    if(any(israster)) {
+      rasterinfo <- argh[israster]
+      ## remaining arguments
+      argh <- argh[!israster]
+    }
+  }
+  ## look for window arguments
+  isowin <- sapply(argh, is.owin)
+  if(any(!isowin))
+    warning("Some arguments were not windows")
+  argh <- argh[isowin]
+  ## 
+  nwin <- length(argh)
+  if(nwin == 0) {
+    warning("No windows were given")
+    return(NULL)
+  }
+  ## find non-empty ones
+  if(any(isemp <- sapply(argh, is.empty)))
+    argh <- argh[!isemp]
+  nwin <- length(argh)
+  if(nwin == 0) {
+    warning("All windows were empty")
+    return(NULL)
+  }
+  ## at least one window
+  A <- argh[[1L]]
+  if(nwin == 1) return(A)
+
+  ## more than two windows
+  if(nwin > 2) {
+    ## check if we need polyclip
+    somepoly <- !all(sapply(argh, is.mask))
+    if(somepoly) {
+      ## determine a common set of parameters for polyclip
+      p <- commonPolyclipArgs(do.call(boundingbox, argh), p=p)
+      ## apply these parameters now to avoid numerical errors
+      argh <- applyPolyclipArgs(argh, p=p)
+      A <- argh[[1L]]
+    } 
+    ## absorb all windows into A without rescaling
+    nullp <- list(eps=1, x0=0, y0=0)
+    for(i in 2:nwin) 
+      A <- do.call(union.owin,
+                   append(list(A, argh[[i]], p=nullp),
+                          rasterinfo))
+    if(somepoly) {
+      ## undo rescaling
+      A <- reversePolyclipArgs(A, p=p)
+    }
+    return(A)
+  }
+
+  ## Exactly two windows
+  B <- argh[[2L]]
+  if(identical(A, B))
+    return(A)
+
+  ## check units
+  if(!compatible(unitname(A), unitname(B)))
+    warning("The two windows have incompatible units of length")
+  
+  ## Determine type of intersection
+  
+##  Arect <- is.rectangle(A)
+##  Brect <- is.rectangle(B)
+##  Apoly <- is.polygonal(A)
+##  Bpoly <- is.polygonal(B)
+  Amask <- is.mask(A)
+  Bmask <- is.mask(B)
+
+  ## Create a rectangle to contain the result
+  
+  C <- owin(range(A$xrange, B$xrange),
+            range(A$yrange, B$yrange),
+            unitname=unitname(A))
+
+  if(!Amask && !Bmask) {
+    ####### Result is polygonal (or rectangular) ############
+    a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon)
+    b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon)
+    ab <- do.call(polyclip::polyclip,
+                  append(list(a, b, "union",
+                              fillA="nonzero", fillB="nonzero"),
+                         p))
+    if(length(ab) == 0)
+      return(emptywindow(C))
+    ## ensure correct polarity
+    totarea <- sum(unlist(lapply(ab, Area.xypolygon)))
+    if(totarea < 0)
+      ab <- lapply(ab, reverse.xypolygon)
+    AB <- owin(poly=ab, check=FALSE, unitname=unitname(A))
+    AB <- rescue.rectangle(AB)
+    return(AB)
+  }
+
+  ####### Result is a mask ############
+
+  ## Determine pixel raster parameters
+  if(length(rasterinfo) == 0) {
+    rasterinfo <-
+      if(Amask)
+        list(xy=list(x=as.numeric(prolongseq(A$xcol, C$xrange)),
+               y=as.numeric(prolongseq(A$yrow, C$yrange))))
+      else if(Bmask)
+        list(xy=list(x=as.numeric(prolongseq(B$xcol, C$xrange)),
+               y=as.numeric(prolongseq(B$yrow, C$yrange))))
+      else
+        list()
+  }
+
+  ## Convert C to mask
+  C <- do.call(as.mask, append(list(w=C), rasterinfo))
+
+  rxy <- rasterxy.mask(C)
+  x <- rxy$x
+  y <- rxy$y
+  ok <- inside.owin(x, y, A) | inside.owin(x, y, B)
+
+  if(all(ok)) {
+    ## result is a rectangle
+    C <- as.rectangle(C)
+  } else {
+    ## result is a mask
+    C$m[] <- ok
+  }
+  return(C)
+}
+
+setminus.owin <- function(A, B, ..., p) {
+  if(is.null(B)) return(A)
+  verifyclass(B, "owin")
+  if(is.null(A)) return(emptywindow(Frame(B)))
+  verifyclass(A, "owin")
+  if(is.empty(A) || is.empty(B)) return(A)
+  if(identical(A, B))
+    return(emptywindow(Frame(A)))
+
+  ## p is a list of arguments to polyclip::polyclip
+  if(missing(p) || is.null(p)) p <- list()
+
+  ## check units
+  if(!compatible(unitname(A), unitname(B)))
+    warning("The two windows have incompatible units of length")
+  
+  ## Determine type of arguments
+  
+  Arect <- is.rectangle(A)
+  Brect <- is.rectangle(B)
+##  Apoly <- is.polygonal(A)
+##  Bpoly <- is.polygonal(B)
+  Amask <- is.mask(A)
+  Bmask <- is.mask(B)
+
+  ## Case where A and B are both rectangular
+  if(Arect && Brect) {
+    if(is.subset.owin(A, B))
+      return(emptywindow(B))
+    C <- intersect.owin(A, B, fatal=FALSE)
+    if(is.null(C) || is.empty(C)) return(A)
+    return(complement.owin(C, A))
+  }
+    
+  ## Polygonal case
+
+  if(!Amask && !Bmask) {
+    ####### Result is polygonal ############
+    a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon)
+    b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon)
+    ab <- do.call(polyclip::polyclip,
+                  append(list(a, b, "minus",
+                              fillA="nonzero", fillB="nonzero"),
+                         p))
+    if(length(ab) == 0)
+      return(emptywindow(B))
+    ## ensure correct polarity
+    totarea <- sum(unlist(lapply(ab, Area.xypolygon)))
+    if(totarea < 0)
+      ab <- lapply(ab, reverse.xypolygon)
+    AB <- owin(poly=ab, check=FALSE, unitname=unitname(A))
+    AB <- rescue.rectangle(AB)
+    return(AB)
+  }
+
+  ####### Result is a mask ############
+
+  ## Determine pixel raster parameters
+  rasterinfo <- 
+    if((length(list(...)) > 0))
+      list(...)
+    else if(Amask)
+      list(xy=list(x=A$xcol,
+                   y=A$yrow))
+    else if(Bmask)
+      list(xy=list(x=B$xcol,
+                   y=B$yrow))
+    else
+      list()
+
+  ## Convert A to mask
+  AB <- do.call(as.mask, append(list(w=A), rasterinfo))
+
+  rxy <- rasterxy.mask(AB)
+  x <- rxy$x
+  y <- rxy$y
+  ok <- inside.owin(x, y, A) & !inside.owin(x, y, B)
+
+  if(!all(ok))
+    AB$m[] <- ok
+  else
+    AB <- rescue.rectangle(AB)
+
+  return(AB)
+}
+
+## auxiliary functions
+
+commonPolyclipArgs <- function(..., p=NULL) {
+  # compute a common resolution for polyclip operations
+  # on several windows
+  if(!is.null(p) && !is.null(p$eps) && !is.null(p$x0) && !is.null(p$y0))
+    return(p)
+  bb <- boundingbox(...)
+  xr <- bb$xrange
+  yr <- bb$yrange
+  eps <- p$eps %orifnull% max(diff(xr), diff(yr))/(2^31)
+  x0  <- p$x0  %orifnull% mean(xr)
+  y0  <- p$y0  %orifnull% mean(yr)
+  return(list(eps=eps, x0=x0, y0=y0))
+}
+
+applyPolyclipArgs <- function(x, p=NULL) {
+  if(is.null(p)) return(x)
+  y <- lapply(x, shift, vec=-c(p$x0, p$y0))
+  z <- lapply(y, scalardilate, f=1/p$eps)
+  return(z)
+}
+
+reversePolyclipArgs <- function(x, p=NULL) {
+  if(is.null(p)) return(x)
+  y <- scalardilate(x, f=p$eps)
+  z <- shift(y, vec=c(p$x0, p$y0))
+  return(z)
+}
+
+trim.mask <- function(M, R, tolerant=TRUE) {
+    ## M is a mask,
+    ## R is a rectangle
+
+    ## Ensure R is a subset of bounding rectangle of M
+    R <- owin(intersect.ranges(M$xrange, R$xrange),
+              intersect.ranges(M$yrange, R$yrange))
+    
+    ## Deal with very thin rectangles
+    if(tolerant) {
+      R$xrange <- adjustthinrange(R$xrange, M$xstep, M$xrange)
+      R$yrange <- adjustthinrange(R$yrange, M$ystep, M$yrange)
+    }
+
+    ## Extract subset of image grid
+    yrowok <- inside.range(M$yrow, R$yrange)
+    xcolok <- inside.range(M$xcol, R$xrange)
+    if((ny <- sum(yrowok)) == 0 || (nx <- sum(xcolok)) == 0) 
+      return(emptywindow(R))
+    Z <- M
+    Z$xrange <- R$xrange
+    Z$yrange <- R$yrange
+    Z$yrow <- M$yrow[yrowok]
+    Z$xcol <- M$xcol[xcolok]
+    Z$m <- M$m[yrowok, xcolok]
+    if(ny < 2 || nx < 2)
+      Z$m <- matrix(Z$m, nrow=ny, ncol=nx)
+    Z$dim <- dim(Z$m)
+    return(Z)
+}
+
+restrict.mask <- function(M, W) {
+  ## M is a mask, W is any window
+  stopifnot(is.mask(M))
+  stopifnot(inherits(W, "owin"))
+  if(is.rectangle(W))
+    return(trim.mask(M, W))
+  M <- trim.mask(M, as.rectangle(W))
+  ## Determine which pixels of M are inside W
+  rxy <- rasterxy.mask(M, drop=TRUE)
+  x <- rxy$x
+  y <- rxy$y
+  ok <- inside.owin(x, y, W)
+  Mm <- M$m
+  Mm[Mm] <- ok
+  M$m <- Mm
+  return(M)
+}
+
+# SUBSUMED IN rmhexpand.R
+# expand.owin <- function(W, f=1) {
+#
+#  # expand bounding box of 'win'
+#  # by factor 'f' in **area**
+#  if(f <= 0)
+#    stop("f must be > 0")
+#  if(f == 1)
+#    return(W)
+#  bb <- boundingbox(W)
+#  xr <- bb$xrange
+#  yr <- bb$yrange
+#  fff <- (sqrt(f) - 1)/2
+#  Wexp <- owin(xr + fff * c(-1,1) * diff(xr),
+#               yr + fff * c(-1,1) * diff(yr),
+#               unitname=unitname(W))
+#  return(Wexp)
+#}
+
+trim.rectangle <- function(W, xmargin=0, ymargin=xmargin) {
+  if(!is.rectangle(W))
+    stop("Internal error: tried to trim margin off non-rectangular window")
+  xmargin <- ensure2vector(xmargin)
+  ymargin <- ensure2vector(ymargin)
+  if(any(xmargin < 0) || any(ymargin < 0))
+    stop("values of xmargin, ymargin must be nonnegative")
+  if(sum(xmargin) > diff(W$xrange))
+    stop("window is too small to cut off margins of the width specified")
+  if(sum(ymargin) > diff(W$yrange))
+    stop("window is too small to cut off margins of the height specified")
+  owin(W$xrange + c(1,-1) * xmargin,
+       W$yrange + c(1,-1) * ymargin,
+       unitname=unitname(W))
+}
+
+grow.rectangle <- function(W, xmargin=0, ymargin=xmargin, fraction=NULL) {
+  if(!is.null(fraction)) {
+    fraction <- ensure2vector(fraction)
+    if(any(fraction < 0)) stop("fraction must be non-negative")
+    if(missing(xmargin)) xmargin <- fraction[1L] * diff(W$xrange)
+    if(missing(ymargin)) ymargin <- fraction[2L] * diff(W$yrange)
+  }
+  xmargin <- ensure2vector(xmargin)
+  ymargin <- ensure2vector(ymargin)
+  if(any(xmargin < 0) || any(ymargin < 0))
+    stop("values of xmargin, ymargin must be nonnegative")
+  owin(W$xrange + c(-1,1) * xmargin,
+       W$yrange + c(-1,1) * ymargin,
+       unitname=unitname(W))
+}
+
+grow.mask <- function(M, xmargin=0, ymargin=xmargin) {
+  stopifnot(is.mask(M))
+  m <- as.matrix(M)
+  Rplus <- grow.rectangle(as.rectangle(M), xmargin, ymargin)
+  ## extend the raster
+  xcolplus <- prolongseq(M$xcol, Rplus$xrange)
+  yrowplus <- prolongseq(M$yrow, Rplus$yrange)
+  mplus <- matrix(FALSE, length(yrowplus), length(xcolplus))
+  ## pad out the mask entries
+  nleft <- attr(xcolplus, "nleft")
+  nright <- attr(xcolplus, "nright")
+  nbot <- attr(yrowplus, "nleft")
+  ntop <- attr(yrowplus, "nright")
+  mplus[ (nbot+1):(length(yrowplus)-ntop),
+         (nleft+1):(length(xcolplus)-nright) ] <- m
+  ## pack up
+  result <- owin(xrange=Rplus$xrange,
+                 yrange=Rplus$yrange,
+                 xcol=as.numeric(xcolplus),
+                 yrow=as.numeric(yrowplus),
+                 mask=mplus,
+                 unitname=unitname(M))
+  return(result)
+}
+  
+bdry.mask <- function(W) {
+  verifyclass(W, "owin")
+  W <- as.mask(W)
+  m <- W$m
+  nr <- nrow(m)
+  nc <- ncol(m)
+  if(!spatstat.options('Cbdrymask')) {
+    ## old interpreted code
+    b <-     (m != rbind(FALSE,       m[-nr, ]))
+    b <- b | (m != rbind(m[-1, ], FALSE))
+    b <- b | (m != cbind(FALSE,       m[, -nc]))
+    b <- b | (m != cbind(m[, -1], FALSE))
+  } else {
+    b <- integer(nr * nc)
+    z <- .C("bdrymask",
+            nx = as.integer(nc),
+            ny = as.integer(nr),
+            m = as.integer(m),
+            b = as.integer(b),
+            PACKAGE = "spatstat")
+    b <- matrix(as.logical(z$b), nr, nc)
+  }
+  W$m <- b
+  return(W)
+}
+
+nvertices <- function(x, ...) {
+  UseMethod("nvertices")
+}
+
+nvertices.default <- function(x, ...) {
+  v <- vertices(x)
+  vx <- v$x
+  n <- if(is.null(vx)) NA else length(vx)
+  return(n)
+}
+
+nvertices.owin <- function(x, ...) {
+  if(is.empty(x))
+    return(0)
+  n <- switch(x$type,
+              rectangle=4,
+              polygonal=sum(lengths(lapply(x$bdry, getElement, name="x"))),
+              mask=sum(bdry.mask(x)$m))
+  return(n)
+}
+
+vertices <- function(w) {
+  UseMethod("vertices")
+}
+
+vertices.owin <- function(w) {
+  verifyclass(w, "owin")
+  if(is.empty(w))
+    return(NULL)
+  switch(w$type,
+         rectangle={
+           xr <- w$xrange
+           yr <- w$yrange
+           vert <- list(x=xr[c(1,2,2,1)], y=yr[c(1,1,2,2)])
+         },
+         polygonal={
+           vert <- do.call(concatxy,w$bdry)
+         },
+         mask={
+           bm <- bdry.mask(w)$m
+           rxy <- rasterxy.mask(w)
+           xx <- rxy$x
+           yy <- rxy$y
+           vert <- list(x=as.vector(xx[bm]),
+                        y=as.vector(yy[bm]))
+         })
+  return(vert)
+}
+
+diameter <- function(x) { UseMethod("diameter") }
+
+diameter.owin <- function(x) {
+  w <- as.owin(x)
+  if(is.empty(w))
+    return(NULL)
+  vert <- vertices(w)
+  if(length(vert$x) > 3) {
+    # extract convex hull
+    h <- with(vert, chull(x, y))
+    vert <- with(vert, list(x=x[h], y=y[h]))
+  }
+  d <- pairdist(vert, squared=TRUE)
+  return(sqrt(max(d)))
+}
+
+##    radius of inscribed circle
+
+inradius <- function(W) {
+  stopifnot(is.owin(W))
+  if(W$type == "rectangle") diameter(W)/2 else max(distmap(W, invert=TRUE))
+}
+
+  
+incircle <- function(W) {
+  # computes the largest circle contained in W
+  verifyclass(W, "owin")
+  if(is.empty(W))
+    return(NULL)
+  if(is.rectangle(W)) {
+    xr <- W$xrange
+    yr <- W$yrange
+    x0 <- mean(xr)
+    y0 <- mean(yr)
+    radius <- min(diff(xr), diff(yr))/2
+    return(list(x=x0, y=y0, r=radius))
+  }
+  # compute distance to boundary
+  D <- distmap(W, invert=TRUE)
+  D <- D[W, drop=FALSE]
+  # find maximum distance
+  v <- D$v
+  ok <- !is.na(v)
+  Dvalues <- as.vector(v[ok])
+  Dmax <- max(Dvalues)
+  # find location of maximum
+  locn <- which.max(Dvalues)
+  locrow <- as.vector(row(v)[ok])[locn]
+  loccol <- as.vector(col(v)[ok])[locn]
+  x0 <- D$xcol[loccol]
+  y0 <- D$yrow[locrow]
+  if(is.mask(W)) {
+    # radius could be one pixel diameter shorter than Dmax
+    Dpixel <- sqrt(D$xstep^2 + D$ystep^2)
+    radius <- max(0, Dmax - Dpixel)
+  } else radius <- Dmax
+  return(list(x=x0, y=y0, r=radius))
+}
+
+inpoint <- function(W) {
+  # selects a point that is always inside the window.
+  verifyclass(W, "owin")
+  if(is.empty(W))
+    return(NULL)
+  if(is.rectangle(W))
+    return(c(mean(W$xrange), mean(W$yrange)))
+  if(is.polygonal(W)) {
+    xy <- centroid.owin(W)
+    if(inside.owin(xy$x, xy$y, W))
+      return(xy)
+  }
+  W <- as.mask(W)
+  Mm <- W$m
+  Mrow <- as.vector(row(Mm)[Mm])
+  Mcol <- as.vector(col(Mm)[Mm])
+  selectmiddle <- function(x) { x[ceiling(length(x)/2)] }
+  midcol <- selectmiddle(Mcol)
+  midrow <- selectmiddle(Mrow[Mcol==midcol])
+  x <- W$xcol[midcol]
+  y <- W$yrow[midrow]
+  return(c(x,y))
+}
+
+simplify.owin <- function(W, dmin) {
+  verifyclass(W, "owin")
+  if(is.empty(W))
+    return(W)
+  W <- as.polygonal(W)
+  W$bdry <- lapply(W$bdry, simplify.xypolygon, dmin=dmin)
+  return(W)
+}
+
+  
+is.convex <- function(x) {
+  verifyclass(x, "owin")
+  if(is.empty(x))
+    return(TRUE)
+  switch(x$type,
+         rectangle={return(TRUE)},
+         polygonal={
+           b <- x$bdry
+           if(length(b) > 1)
+             return(FALSE)
+           b <- b[[1L]]
+           xx <- b$x
+           yy <- b$y
+           ch <- chull(xx,yy)
+           return(length(ch) == length(xx))
+         },
+         mask={
+           v <- vertices(x)
+           v <- as.ppp(v, W=as.rectangle(x))
+           ch <- convexhull.xy(v)
+           edg <- edges(ch)
+           edgedist <- nncross(v, edg, what="dist")
+           pixdiam <- sqrt(x$xstep^2 + x$ystep^2)
+           return(all(edgedist <= pixdiam))
+         })
+  return(as.logical(NA))
+}
+
+convexhull <- function(x) {
+  if(inherits(x, "owin")) 
+    v <- vertices(x)
+  else if(inherits(x, "psp"))
+    v <- endpoints.psp(x)
+  else if(inherits(x, "ppp"))
+    v <- x
+  else {
+    x <- as.owin(x)
+    v <- vertices(x)
+  }
+  b <- as.rectangle(x)
+  if(is.empty(x))
+    return(emptywindow(b))
+  ch <- convexhull.xy(v)
+  out <- rebound.owin(ch, b)
+  return(out)
+}
+
+  
+is.empty <- function(x) { UseMethod("is.empty") }
+
+is.empty.default <- function(x) { length(x) == 0 }
+
+is.empty.owin <- function(x) {
+  switch(x$type,
+         rectangle=return(FALSE),
+         polygonal=return(length(x$bdry) == 0),
+         mask=return(!any(x$m)))
+  return(NA)
+}
+
+emptywindow <- function(w) {
+  w <- as.owin(w)
+  out <- owin(w$xrange, w$yrange, poly=list(), unitname=unitname(w))
+  return(out)
+}
+
+discs <- function(centres, radii=marks(centres)/2, ..., 
+                  separate=FALSE, mask=FALSE, trim=TRUE,
+                  delta=NULL, npoly=NULL) {
+  stopifnot(is.ppp(centres))
+  n <- npoints(centres)
+  if(n == 0) return(emptywindow(Frame(centres)))
+  check.nvector(radii, npoints(centres), oneok=TRUE)
+  stopifnot(all(radii > 0))
+  
+  if(sameradius <- (length(radii) == 1)) 
+    radii <- rep(radii, npoints(centres))
+
+  if(!separate && mask) {
+    #' compute pixel approximation
+    M <- as.mask(Window(centres), ...)
+    z <- .C("discs2grid",
+            nx    = as.integer(M$dim[2L]),
+            x0    = as.double(M$xcol[1L]),
+            xstep = as.double(M$xstep),  
+            ny    = as.integer(M$dim[1L]),
+            y0    = as.double(M$yrow[1L]),
+            ystep = as.double(M$ystep), 
+            nd    = as.integer(n),
+            xd    = as.double(centres$x),
+            yd    = as.double(centres$y),
+            rd    = as.double(radii), 
+            out   = as.integer(integer(prod(M$dim))),
+            PACKAGE = "spatstat")
+    M$m[] <- as.logical(z$out)
+    return(M)
+  }
+  #' construct a list of discs
+  D <- list()
+  if(!sameradius && length(unique(radii)) > 1) {
+    if(is.null(delta) && is.null(npoly)) {
+      ra <- range(radii)
+      rr <- ra[2L]/ra[1L]
+      mm <- ceiling(128/rr)
+      mm <- max(16, mm) ## equals 16 unless ra[2]/ra[1] < 8 
+      delta <- 2 * pi * ra[1L]/mm
+    }
+    for(i in 1:n)
+      D[[i]] <- disc(centre=centres[i], radius=radii[i],
+                     delta=delta, npoly=npoly)
+  } else {
+    #' congruent discs -- use 'shift'
+    W0 <- disc(centre=c(0,0), radius=radii[1L],
+               delta=delta, npoly=npoly)
+    for(i in 1:n) 
+      D[[i]] <- shift(W0, vec=centres[i])
+  } 
+  D <- as.solist(D)
+  #' return list of discs?
+  if(separate)
+    return(D)
+  #' return union of discs
+  W <- union.owin(D)
+  if(trim) W <- intersect.owin(W, Window(centres))
+  return(W)
+}
+
+harmonise.owin <- harmonize.owin <- function(...) {
+  argz <- list(...)
+  wins <- solapply(argz, as.owin)
+  if(length(wins) < 2L) return(wins)
+  ismask <- sapply(wins, is.mask)
+  if(!any(ismask)) return(wins)
+  comgrid <- do.call(commonGrid, lapply(argz, as.owin))
+  result <- solapply(argz, "[", i=comgrid, drop=FALSE)
+  return(result)
+}
+
diff --git a/R/zclustermodel.R b/R/zclustermodel.R
new file mode 100644
index 0000000..43f516e
--- /dev/null
+++ b/R/zclustermodel.R
@@ -0,0 +1,89 @@
+#'
+#'   zclustermodel.R
+#'
+#' Experimental
+#'
+
+zclustermodel <- function(name="Thomas", ..., mu, kappa, scale) {
+  if(missing(kappa)) stop("The parent intensity kappa must be given")
+  if(missing(mu)) stop("The mean cluster size mu must be given")
+  if(missing(scale)) stop("The cluster scale must be given")
+  rules <- spatstatClusterModelInfo(name)
+  argh <- list(startpar=c(kappa=kappa, scale=scale), ...)
+  argh <- do.call(rules$resolvedots, argh)
+  par <- argh$startpar
+  other <- argh[names(argh) != "startpar"]
+  clustargs <- rules$checkclustargs(other$margs, old=FALSE)
+  out <- list(name=name, rules=rules, par=par, mu=mu, clustargs=clustargs,
+              other=other)
+  class(out) <- "zclustermodel"
+  return(out)
+}
+
+print.zclustermodel <- local({
+
+  print.zclustermodel <- function(x, ...) {
+    with(x, {
+      splat(rules$printmodelname(list(clustargs=clustargs)))
+      newpar <- rules$checkpar(par, old=FALSE)
+      splat("Parent intensity kappa =", blurb("kappa", newpar["kappa"]))
+      splat("Cluster scale = ", newpar["scale"])
+      splat("Mean cluster size mu =", blurb("mu", mu))
+      if(length(clustargs) > 0) {
+        hdr <- paste("Cluster shape",
+                     ngettext(length(clustargs), "parameter:", "parameters:"))
+        if(is.list(clustargs) &&
+           all(sapply(clustargs, is.numeric)) &&
+           all(lengths(clustargs) == 1)) {
+          splat(hdr,
+                paste(names(clustargs), as.numeric(clustargs),
+                      sep="=",
+                      collapse=", "))
+        } else {
+          splat(hdr)
+          print(clustargs)
+        }
+      }
+    })
+    return(invisible(NULL))
+  }
+
+  blurb <- function(name, value) {
+    if(is.numeric(value)) as.character(value) else
+    if(is.im(value)) "[image]" else "[unrecognized format]"
+  }
+
+  print.zclustermodel
+})
+
+                             
+pcfmodel.zclustermodel <- function(model, ...) {
+  p <- model$rules$pcf
+  mpar <- model$par
+  other <- model$other
+  f <- function(r) {
+    do.call(p, c(list(par=mpar, rvals=r), other, model$rules["funaux"]))
+  }
+  return(f)
+}
+
+predict.zclustermodel <- function(object, ...,
+                                 locations,
+                                 type="intensity",
+                                 ngrid=NULL) {
+  ## limited use!!!
+  if(!identical(type, "intensity"))
+    stop("Sorry, only type='intensity' is implemented")
+  lambda <- object$par["kappa"] * object$mu
+  if(is.numeric(lambda)) {
+    if(is.ppp(locations))
+      return(rep(lambda, npoints(locations)))
+    W <- as.owin(locations)
+    if(!is.mask(W))
+      W <- as.mask(W, dimyx=ngrid, ...)
+    return(as.im(lambda, W=W))
+  }
+  return(lambda[locations])
+}
+
+
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..4c8de7c
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/data/Kovesi.rda b/data/Kovesi.rda
new file mode 100644
index 0000000..2115326
Binary files /dev/null and b/data/Kovesi.rda differ
diff --git a/data/amacrine.rda b/data/amacrine.rda
new file mode 100644
index 0000000..c3a6862
Binary files /dev/null and b/data/amacrine.rda differ
diff --git a/data/anemones.rda b/data/anemones.rda
new file mode 100644
index 0000000..a6c00af
Binary files /dev/null and b/data/anemones.rda differ
diff --git a/data/ants.rda b/data/ants.rda
new file mode 100644
index 0000000..94717d5
Binary files /dev/null and b/data/ants.rda differ
diff --git a/data/austates.rda b/data/austates.rda
new file mode 100644
index 0000000..3770216
Binary files /dev/null and b/data/austates.rda differ
diff --git a/data/bdspots.rda b/data/bdspots.rda
new file mode 100644
index 0000000..e543cb7
Binary files /dev/null and b/data/bdspots.rda differ
diff --git a/data/bei.rda b/data/bei.rda
new file mode 100644
index 0000000..7fcaaef
Binary files /dev/null and b/data/bei.rda differ
diff --git a/data/betacells.rda b/data/betacells.rda
new file mode 100644
index 0000000..92d9a32
Binary files /dev/null and b/data/betacells.rda differ
diff --git a/data/bramblecanes.rda b/data/bramblecanes.rda
new file mode 100644
index 0000000..5c0a3ea
Binary files /dev/null and b/data/bramblecanes.rda differ
diff --git a/data/bronzefilter.rda b/data/bronzefilter.rda
new file mode 100644
index 0000000..2f2dee4
Binary files /dev/null and b/data/bronzefilter.rda differ
diff --git a/data/cells.rda b/data/cells.rda
new file mode 100644
index 0000000..62f023e
Binary files /dev/null and b/data/cells.rda differ
diff --git a/data/chicago.rda b/data/chicago.rda
new file mode 100644
index 0000000..d0ff365
Binary files /dev/null and b/data/chicago.rda differ
diff --git a/data/chorley.rda b/data/chorley.rda
new file mode 100644
index 0000000..564d632
Binary files /dev/null and b/data/chorley.rda differ
diff --git a/data/clmfires.rda b/data/clmfires.rda
new file mode 100644
index 0000000..ee710f4
Binary files /dev/null and b/data/clmfires.rda differ
diff --git a/data/copper.rda b/data/copper.rda
new file mode 100644
index 0000000..3d67493
Binary files /dev/null and b/data/copper.rda differ
diff --git a/data/datalist b/data/datalist
new file mode 100644
index 0000000..53d903d
--- /dev/null
+++ b/data/datalist
@@ -0,0 +1,55 @@
+Kovesi
+amacrine
+anemones
+ants: ants ants.extra
+austates
+bdspots
+bei: bei bei.extra
+betacells
+bramblecanes
+bronzefilter
+cells
+chicago
+chorley: chorley chorley.extra
+clmfires: clmfires clmfires.extra
+copper
+demohyper
+demopat
+dendrite
+finpines
+flu
+ganglia
+gordon
+gorillas: gorillas gorillas.extra
+hamster
+heather
+humberside: humberside humberside.convex
+hyytiala
+japanesepines
+lansing
+letterR
+longleaf
+mucosa: mucosa mucosa.subwin
+murchison
+nbfires: nbfires nbfires.extra nbw.rect
+nztrees
+osteo
+paracou
+ponderosa: ponderosa ponderosa.extra
+pyramidal
+redwood
+redwood3
+redwoodfull: redwoodfull redwoodfull.extra
+residualspaper
+shapley: shapley shapley.extra
+simba
+simdat
+simplenet
+spiders
+sporophores
+spruces
+swedishpines
+urkiola
+vesicles: vesicles vesicles.extra
+waka
+waterstriders
diff --git a/data/demohyper.rda b/data/demohyper.rda
new file mode 100644
index 0000000..39c5b20
Binary files /dev/null and b/data/demohyper.rda differ
diff --git a/data/demopat.rda b/data/demopat.rda
new file mode 100644
index 0000000..8e02777
Binary files /dev/null and b/data/demopat.rda differ
diff --git a/data/dendrite.rda b/data/dendrite.rda
new file mode 100644
index 0000000..f3ec109
Binary files /dev/null and b/data/dendrite.rda differ
diff --git a/data/finpines.rda b/data/finpines.rda
new file mode 100644
index 0000000..000cea8
Binary files /dev/null and b/data/finpines.rda differ
diff --git a/data/flu.rda b/data/flu.rda
new file mode 100644
index 0000000..d21ea25
Binary files /dev/null and b/data/flu.rda differ
diff --git a/data/ganglia.rda b/data/ganglia.rda
new file mode 100644
index 0000000..44670d8
Binary files /dev/null and b/data/ganglia.rda differ
diff --git a/data/gordon.rda b/data/gordon.rda
new file mode 100644
index 0000000..b06d8c3
Binary files /dev/null and b/data/gordon.rda differ
diff --git a/data/gorillas.rda b/data/gorillas.rda
new file mode 100644
index 0000000..b8da31c
Binary files /dev/null and b/data/gorillas.rda differ
diff --git a/data/hamster.rda b/data/hamster.rda
new file mode 100644
index 0000000..96c1272
Binary files /dev/null and b/data/hamster.rda differ
diff --git a/data/heather.rda b/data/heather.rda
new file mode 100644
index 0000000..9fc5a05
Binary files /dev/null and b/data/heather.rda differ
diff --git a/data/humberside.rda b/data/humberside.rda
new file mode 100644
index 0000000..c88c142
Binary files /dev/null and b/data/humberside.rda differ
diff --git a/data/hyytiala.rda b/data/hyytiala.rda
new file mode 100644
index 0000000..a163fda
Binary files /dev/null and b/data/hyytiala.rda differ
diff --git a/data/japanesepines.rda b/data/japanesepines.rda
new file mode 100644
index 0000000..544d884
Binary files /dev/null and b/data/japanesepines.rda differ
diff --git a/data/lansing.rda b/data/lansing.rda
new file mode 100644
index 0000000..034bd93
Binary files /dev/null and b/data/lansing.rda differ
diff --git a/data/letterR.rda b/data/letterR.rda
new file mode 100644
index 0000000..0bf2b5c
Binary files /dev/null and b/data/letterR.rda differ
diff --git a/data/longleaf.rda b/data/longleaf.rda
new file mode 100644
index 0000000..42731d3
Binary files /dev/null and b/data/longleaf.rda differ
diff --git a/data/mucosa.rda b/data/mucosa.rda
new file mode 100644
index 0000000..e4b277f
Binary files /dev/null and b/data/mucosa.rda differ
diff --git a/data/murchison.rda b/data/murchison.rda
new file mode 100644
index 0000000..edb9151
Binary files /dev/null and b/data/murchison.rda differ
diff --git a/data/nbfires.rda b/data/nbfires.rda
new file mode 100644
index 0000000..d1acc3b
Binary files /dev/null and b/data/nbfires.rda differ
diff --git a/data/nztrees.rda b/data/nztrees.rda
new file mode 100644
index 0000000..f5a8786
Binary files /dev/null and b/data/nztrees.rda differ
diff --git a/data/osteo.rda b/data/osteo.rda
new file mode 100644
index 0000000..c795e70
Binary files /dev/null and b/data/osteo.rda differ
diff --git a/data/paracou.rda b/data/paracou.rda
new file mode 100644
index 0000000..67f268e
Binary files /dev/null and b/data/paracou.rda differ
diff --git a/data/ponderosa.rda b/data/ponderosa.rda
new file mode 100644
index 0000000..f7733db
Binary files /dev/null and b/data/ponderosa.rda differ
diff --git a/data/pyramidal.rda b/data/pyramidal.rda
new file mode 100644
index 0000000..5b6c243
Binary files /dev/null and b/data/pyramidal.rda differ
diff --git a/data/redwood.rda b/data/redwood.rda
new file mode 100644
index 0000000..8098a01
Binary files /dev/null and b/data/redwood.rda differ
diff --git a/data/redwood3.rda b/data/redwood3.rda
new file mode 100644
index 0000000..30d8adc
Binary files /dev/null and b/data/redwood3.rda differ
diff --git a/data/redwoodfull.rda b/data/redwoodfull.rda
new file mode 100644
index 0000000..3bb72f9
Binary files /dev/null and b/data/redwoodfull.rda differ
diff --git a/data/residualspaper.rda b/data/residualspaper.rda
new file mode 100644
index 0000000..840fd3c
Binary files /dev/null and b/data/residualspaper.rda differ
diff --git a/data/shapley.rda b/data/shapley.rda
new file mode 100644
index 0000000..dfa83c7
Binary files /dev/null and b/data/shapley.rda differ
diff --git a/data/simba.rda b/data/simba.rda
new file mode 100644
index 0000000..e4e9791
Binary files /dev/null and b/data/simba.rda differ
diff --git a/data/simdat.rda b/data/simdat.rda
new file mode 100644
index 0000000..f62c084
Binary files /dev/null and b/data/simdat.rda differ
diff --git a/data/simplenet.rda b/data/simplenet.rda
new file mode 100644
index 0000000..fcf22d4
Binary files /dev/null and b/data/simplenet.rda differ
diff --git a/data/spiders.rda b/data/spiders.rda
new file mode 100644
index 0000000..6406ff1
Binary files /dev/null and b/data/spiders.rda differ
diff --git a/data/sporophores.rda b/data/sporophores.rda
new file mode 100644
index 0000000..f063bdf
Binary files /dev/null and b/data/sporophores.rda differ
diff --git a/data/spruces.rda b/data/spruces.rda
new file mode 100644
index 0000000..974d52a
Binary files /dev/null and b/data/spruces.rda differ
diff --git a/data/swedishpines.rda b/data/swedishpines.rda
new file mode 100644
index 0000000..2433487
Binary files /dev/null and b/data/swedishpines.rda differ
diff --git a/data/urkiola.rda b/data/urkiola.rda
new file mode 100644
index 0000000..58f7ff0
Binary files /dev/null and b/data/urkiola.rda differ
diff --git a/data/vesicles.rda b/data/vesicles.rda
new file mode 100644
index 0000000..fce431b
Binary files /dev/null and b/data/vesicles.rda differ
diff --git a/data/waka.rda b/data/waka.rda
new file mode 100644
index 0000000..481b400
Binary files /dev/null and b/data/waka.rda differ
diff --git a/data/waterstriders.rda b/data/waterstriders.rda
new file mode 100644
index 0000000..f724c07
Binary files /dev/null and b/data/waterstriders.rda differ
diff --git a/debian/README.source b/debian/README.source
deleted file mode 100644
index 660ca7c..0000000
--- a/debian/README.source
+++ /dev/null
@@ -1,19 +0,0 @@
-Explanation for binary files inside source package according to
-  http://lists.debian.org/debian-devel/2013/09/msg00332.html
-
-Files: data/*.rda
-  Each data file has its corresponding documentation file in
-       man/<name>.Rd
-  which describes the datafile extensively.
-
-Files: vignettes/getstart.Rnw
-  This file is documented in inst/doc/getstart.R
-
-Files: vignettes/replicated.Rnw
-  This file will be created using a method in R/quadscheme.R
-
-Files: vignettes/shapefiles.Rnw
-  This file is documented in man/spatstat-package.Rd
-
- -- Andreas Tille <tille at debian.org>  Fri, 18 Oct 2013 11:20:02 +0200
-
diff --git a/debian/README.test b/debian/README.test
deleted file mode 100644
index de1e453..0000000
--- a/debian/README.test
+++ /dev/null
@@ -1,13 +0,0 @@
-Notes on how this package can be tested.
-────────────────────────────────────────
-
-To run the unit tests provided by the package you can do
-
-   sh run-unit-test
-
-in this directory.  Make sure you have installed the two suggested packages
-
-   r-cran-gsl r-cran-randomfields
-
-to run the full test suite.
-
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 9f38d54..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,50 +0,0 @@
-r-cran-spatstat (1.52-1-2) unstable; urgency=medium
-
-  * New upstream version
-  * Standards-Version: 4.1.0 (no changes needed)
-  * New Build-Depends: r-cran-spatstat.utils
-
- -- Andreas Tille <tille at debian.org>  Thu, 07 Sep 2017 13:16:52 +0200
-
-r-cran-spatstat (1.48-0-1) unstable; urgency=medium
-
-  * New upstream version
-  * debhelper 10
-
- -- Andreas Tille <tille at debian.org>  Thu, 05 Jan 2017 17:45:22 +0100
-
-r-cran-spatstat (1.47-0-1) unstable; urgency=medium
-
-  * New upstream version
-  * Convert to dh-r
-  * Canonical homepage for CRAN
-  * d/watch: version=4
-  * New Build-Depends: r-cran-rpart
-
- -- Andreas Tille <tille at debian.org>  Mon, 14 Nov 2016 11:20:57 +0100
-
-r-cran-spatstat (1.45-0-1) unstable; urgency=medium
-
-  * New upstream version
-    Closes: #767884
-  * cme fix dpkg-control
-  * r-cran-goftest now available
-  * DEP5 fix
-
- -- Andreas Tille <tille at debian.org>  Thu, 14 Apr 2016 22:46:12 +0200
-
-r-cran-spatstat (1.37-0-1) unstable; urgency=medium
-
-  * New upstream version
-  * Enhance autopkgtest
-  * versioned (Build-)Depends r-cran-polyclip (>= 1.3.0)
-  * Moved debian/upstream to debian/upstream/metadata
-  * Suggests r-cran-gsl, r-cran-randomfields which are used in test suite
-
- -- Andreas Tille <tille at debian.org>  Sun, 22 Jun 2014 22:58:36 +0200
-
-r-cran-spatstat (1.35-0-1) unstable; urgency=low
-
-  * Initial release (Closes: #709650).
-
- -- Andreas Tille <tille at debian.org>  Tue, 14 Jan 2014 16:30:53 +0100
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 94cd71e..0000000
--- a/debian/control
+++ /dev/null
@@ -1,43 +0,0 @@
-Source: r-cran-spatstat
-Maintainer: Debian Science Team <debian-science-maintainers at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 10),
-               dh-r,
-               r-base-dev,
-               r-cran-mgcv,
-               r-cran-deldir,
-               r-cran-abind,
-               r-cran-tensor,
-               r-cran-polyclip,
-               r-cran-goftest,
-               r-cran-rpart,
-               r-cran-spatstat.utils
-Standards-Version: 4.1.0
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-science/packages/R/trunk/packages/r-cran-spatstat/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-science/packages/R/trunk/packages/r-cran-spatstat/trunk/
-Homepage: https://cran.r-project.org/package=spatstat
-
-Package: r-cran-spatstat
-Architecture: any
-Depends: ${misc:Depends},
-         ${shlibs:Depends},
-         ${R:Depends}
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: GNU R Spatial Point Pattern analysis, model-fitting, simulation, tests
- A GNU R package for analysing spatial data, mainly Spatial Point Patterns,
- including multitype/marked points and spatial covariates, in any
- two-dimensional spatial  region.  Contains functions for plotting spatial
- data, exploratory data analysis, model-fitting, simulation, spatial sampling,
- model diagnostics, and formal inference. Data types include point patterns,
- line segment patterns, spatial windows, and pixel images. Point process
- models can be fitted to point pattern data.  Cluster type models are fitted
- by the method of minimum contrast. Very general Gibbs point process models
- can be fitted to point pattern data using a function ppm similar to lm or glm.
- Models may include dependence on covariates, interpoint interaction and
- dependence on marks. Fitted models can be simulated automatically.  Also
- provides facilities for formal inference (such as chi-squared tests) and model
- diagnostics (including simulation envelopes, residuals, residual plots and Q-Q
- plots).
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index 42770e7..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,68 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: spatstat
-Upstream-Contact: Adrian Baddeley <adrian.baddeley at uwa.edu.au>
-Source: https://cran.r-project.org/package=spatstat
-
-Files: *
-Copyright: (C) 2005 - 2016 Adrian Baddeley and Rolf Turner, 
-           Marie-Colette van Lieshout, Rasmus Waagepetersen, Kasper Klitgaard
-           Berthelsen, Dominic Schuhmacher, Ang Qi Wei, C. Beale, B. Biggerstaff,
-           R. Bivand, F. Bonneu, J.B. Chen, Y.C. Chin, M. de la Cruz, P.J. Diggle,
-           S. Eglen, A. Gault, M. Genton, P. Grabarnik, C. Graf, J. Franklin,
-           U. Hahn, M. Hering, M.B. Hansen, M. Hazelton, J. Heikkinen, K. Hornik,
-           R. Ihaka, R. John-Chandran, D. Johnson, J. Laake, J. Mateu, P. McCullagh,
-           X.C. Mi, J. Moller, L.S. Nielsen, E. Parilov, J. Picka, M. Reiter,
-           B.D. Ripley, B. Rowlingson, J. Rudge, A. Sarkka, K. Schladitz,
-           B.T. Scott, I.-M. Sintorn, M. Spiess, M. Stevenson, P. Surovy,
-           B. Turlach, A. van Burgel, H. Wang and S. Wong.
-License: GPL-2+
-Comment: Regarding Files data/*.rda
- Date: Mon, 27 May 2013 09:27:10 +0800
- From: Adrian Baddeley <adrian.baddeley at uwa.edu.au>
- Subject: Re: Demo data sets in spatstat R package
- .
- Yes, I could provide original data for each of the datasets in spatstat,
- but may I suggest it would be easier and more reliable to use R 
- to convert the compressed binary *.rda files back into readable text files. 
- .
- Example (in the R interpreter)
-            a <- load("amacrine.rda")
-            dump(a, file="amacrine.R")
- .
- Here 'a' is a vector of strings containing the names of the R objects
- that were in the data file.
- .
- 'amacrine.R' is a text file that can replace 'amacrine.rda' in the data/
- subdirectory.
- .
- I guess there may even be a way to do this automatically using the R package
- builder;  for that you'd have to ask cran at r-project.org. Currently the package
- builder/checker forces all data files into the compressed .rda format so
- presumably this could be undone.
- .
- All the datasets in spatstat are covered by GPL >= 2
- .
- regards
- Adrian
-
-Files: debian/*
-Copyright: 2013-2016 Andreas Tille <tille at debian.org>
-License: GPL-2+
-
-License: GPL-2+
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- .
- On Debian systems, the complete text of the GNU General Public
- License can be found in `/usr/share/common-licenses/GPL-2'.
diff --git a/debian/docs b/debian/docs
deleted file mode 100644
index 3adf0d6..0000000
--- a/debian/docs
+++ /dev/null
@@ -1,3 +0,0 @@
-debian/README.test
-debian/tests/run-unit-test
-tests
diff --git a/debian/lintian-overrides b/debian/lintian-overrides
deleted file mode 100644
index 7a9a517..0000000
--- a/debian/lintian-overrides
+++ /dev/null
@@ -1,2 +0,0 @@
-#  This is a real name and no spelling error
-r-cran-spatstat: spelling-error-in-copyright Ang And
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index cc8abc1..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/make -f
-
-%:
-	dh $@ --buildsystem R
-
-override_dh_install:
-	dh_install
-	find debian -type d -name ratfor | xargs rm -rf
-
-override_dh_fixperms:
-	dh_fixperms
-	find debian -name CITATION -exec chmod -x \{\} \;
-	find debian -name "*.txt" -exec chmod -x \{\} \;
-	find debian -name "*.Rnw" -exec chmod -x \{\} \;
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/tests/control b/debian/tests/control
deleted file mode 100644
index b0de1be..0000000
--- a/debian/tests/control
+++ /dev/null
@@ -1,3 +0,0 @@
-Tests: run-unit-test
-Depends: @, r-cran-gsl, r-cran-randomfields
-Restrictions: allow-stderr
diff --git a/debian/tests/run-unit-test b/debian/tests/run-unit-test
deleted file mode 100644
index 070c0ec..0000000
--- a/debian/tests/run-unit-test
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh -e
-
-pkg=r-cran-spatstat
-if [ "$ADTTMP" = "" ] ; then
-  ADTTMP=`mktemp -d /tmp/${pkg}-test.XXXXXX`
-fi
-cd $ADTTMP
-cp -a /usr/share/doc/${pkg}/tests/* $ADTTMP
-gunzip -r *.gz
-for runtest in `ls *.R` ; do
-   LC_ALL=C R --no-save < $runtest >/dev/null
-   if [ ! $? ] ; then
-     echo "Test ${runtest} failed"
-     exit 1
-   else
-     echo "Test ${runtest} passed"
-   fi
-done
-rm -rf $ADTTMP/*
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
deleted file mode 100644
index e2f885d..0000000
--- a/debian/upstream/metadata
+++ /dev/null
@@ -1,10 +0,0 @@
-Reference:
-  Author: Adrian Baddeley and Rolf Turner
-  Title: "Spatstat: an {R} package for analyzing spatial point patterns"
-  Journal: Journal of Statistical Software
-  Year: 2005
-  Volume: 12
-  Number: 6
-  Pages: 1--42
-  ISSN: 1548-7660
-  URL: http://www.jstatsoft.org
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 88cfc9a..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=4
-http://cran.r-project.org/src/contrib/spatstat_([-\d.]*)\.tar\.gz
diff --git a/demo/00Index b/demo/00Index
new file mode 100755
index 0000000..cfcfac8
--- /dev/null
+++ b/demo/00Index
@@ -0,0 +1,4 @@
+spatstat	Demonstration of spatstat library
+diagnose	Demonstration of diagnostic capabilities for models in spatstat
+data		Datasets in spatstat
+sumfun		Demonstration of nonparametric summary functions in spatstat
diff --git a/demo/data.R b/demo/data.R
new file mode 100755
index 0000000..8d5eb31
--- /dev/null
+++ b/demo/data.R
@@ -0,0 +1,137 @@
+if(dev.cur() <= 1) {
+  dd <- getOption("device")
+  if(is.character(dd)) dd <- get(dd)
+  dd()
+}
+
+oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE))
+oldoptions <- options(warn=-1)
+
+plot(amacrine)
+
+plot(anemones, markscale=1)
+
+ants.extra$plotit()
+
+plot(austates)
+
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+
+plot(betacells)
+
+plot(bramblecanes, cols=1:3)
+plot(split(bramblecanes))
+
+plot(bronzefilter,markscale=2)
+
+plot(cells)
+
+plot(chicago, main="Chicago Street Crimes", col="grey",
+     cols=c("red", "blue", "black", "blue", "red", "blue", "blue"),
+     chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE)
+
+chorley.extra$plotit()
+
+plot(clmfires, which.marks="cause", cols=2:5, cex=0.25,
+     main="Castilla-La Mancha forest fires")
+plot(clmfires.extra$clmcov200, main="Covariates for forest fires")
+
+plot(copper$Points, main="Copper")
+plot(copper$Lines, add=TRUE)
+
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }),
+      parargs=list(mar=rep(1,4)))
+
+plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4)
+
+plot(demopat)
+
+plot(finpines, main="Finnish pines")
+
+wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1")
+plot(flu[wildM1, 1, drop=TRUE],
+     main=c("flu data", "wild type virus, M2-M1 stain"),
+     chars=c(16,3), cex=0.4, cols=2:3)
+
+plot(gordon, main="People in Gordon Square", pch=16)
+
+plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites")
+
+plot(hamster, cols=c(2,4))
+
+plot(heather)
+
+plot(humberside)
+
+plot(hyytiala, cols=2:5)
+
+plot(japanesepines)
+
+plot(lansing)
+plot(split(lansing))
+
+plot(longleaf)
+
+plot(mucosa, chars=c(1,3), cols=c("red", "green"))
+plot(mucosa.subwin, add=TRUE, lty=3)
+
+plot(murchison, main="Murchison data")
+
+plot(murchison$greenstone, main="Murchison data", col="lightgreen")
+plot(murchison$gold, add=TRUE, pch=3, col="blue")
+plot(murchison$faults, add=TRUE, col="red")
+
+plot(nbfires, use.marks=FALSE, pch=".")
+plot(split(nbfires), use.marks=FALSE, chars=".")
+plot(split(nbfires)$"2000", which.marks="fire.type",
+     main=c("New Brunswick fires 2000", "by fire type"),
+     cols=c("blue", "green", "red", "cyan"),
+     leg.side="left")
+
+plot(nztrees)
+plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3)
+
+plot(osteo[1:10,], tick.marks=FALSE, xlab="", ylab="", zlab="")
+
+plot(paracou, cols=2:3, chars=c(16,3))
+
+ponderosa.extra$plotit()
+
+pyr <- pyramidal
+pyr$grp <- abbreviate(pyramidal$group, minlength=7)
+plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons")
+rm(pyr)
+
+plot(redwood)
+plot(redwood3, add=TRUE, pch=20)
+
+redwoodfull.extra$plotit()
+
+plot(residualspaper$Fig1)
+plot(residualspaper$Fig4a)
+plot(residualspaper$Fig4b)
+plot(residualspaper$Fig4c)
+
+shapley.extra$plotit(main="Shapley")
+
+plot(simdat)
+
+plot(spiders, pch=16, show.window=FALSE)
+
+plot(sporophores, chars=c(16,1,2), cex=0.6)
+points(0,0,pch=16, cex=2)
+text(15,8,"Tree", cex=0.75)
+
+plot(spruces, maxsize=min(nndist(spruces)))
+
+plot(swedishpines)
+
+plot(urkiola, cex=0.5, cols=2:3)
+
+plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters"))
+
+plot(waterstriders)
+
+par(oldpar)
+options(oldoptions)
diff --git a/demo/diagnose.R b/demo/diagnose.R
new file mode 100755
index 0000000..01595e0
--- /dev/null
+++ b/demo/diagnose.R
@@ -0,0 +1,161 @@
+if(dev.cur() <= 1) {
+  dd <- getOption("device")
+  if(is.character(dd)) dd <- get(dd)
+  dd()
+}
+
+oldpar <- par(ask = interactive() &&
+              (.Device %in% c("X11", "GTK", "windows", "Macintosh")))
+par(mfrow=c(1,1))
+oldoptions <- options(warn = -1)
+
+# 
+#######################################################
+#
+
+X <- rpoispp(function(x,y) { 1000 * exp(- 4 * x)}, 1000)
+plot(X, main="Inhomogeneous Poisson pattern")
+
+fit.hom <- ppm(X ~1, Poisson())
+fit.inhom <- ppm(X ~x, Poisson())
+
+diagnose.ppm(fit.inhom, which="marks", type="Pearson",
+             main=c("Mark plot",
+               "Circles for positive residual mass",
+               "Colour for negative residual density"))
+
+par(mfrow=c(1,2))
+diagnose.ppm(fit.hom, which="marks", 
+             main=c("Wrong model", "(homogeneous Poisson)", "raw residuals"))
+diagnose.ppm(fit.inhom, which="marks", 
+             main=c("Right model", "(inhomogeneous Poisson)", "raw residuals"))
+par(mfrow=c(1,1))
+
+diagnose.ppm(fit.inhom, which="smooth", main="Smoothed residual field")
+
+par(mfrow=c(1,2))
+diagnose.ppm(fit.hom, which="smooth",
+             main=c("Wrong model", "(homogeneous Poisson)",
+                    "Smoothed residual field"))
+diagnose.ppm(fit.inhom, which="smooth",
+             main=c("Right model", "(inhomogeneous Poisson)",
+                    "Smoothed residual field"))
+
+par(mfrow=c(1,1))
+diagnose.ppm(fit.inhom, which="x")
+
+par(mfrow=c(1,2))
+diagnose.ppm(fit.hom, which="x",
+             main=c("Wrong model", "(homogeneous Poisson)",
+                    "lurking variable plot for x"))
+diagnose.ppm(fit.inhom, which="x",
+             main=c("Right model", "(inhomogeneous Poisson)",
+                    "lurking variable plot for x"))
+
+par(mfrow=c(1,1))
+diagnose.ppm(fit.hom, type="Pearson",main="standard diagnostic plots")
+
+par(mfrow=c(1,2))
+diagnose.ppm(fit.hom, main=c("Wrong model", "(homogeneous Poisson)"))
+diagnose.ppm(fit.inhom,  main=c("Right model", "(inhomogeneous Poisson)"))
+par(mfrow=c(1,1))
+
+
+# 
+#######################################################
+#  LEVERAGE/INFLUENCE
+
+plot(leverage(fit.inhom))
+
+plot(influence(fit.inhom))
+
+plot(dfbetas(fit.inhom))
+
+# 
+#######################################################
+#  COMPENSATORS
+
+## Takes a long time...
+CF <- compareFit(listof(hom=fit.hom, inhom=fit.inhom),
+                 Kcom, same="iso", different="icom")
+plot(CF, main="model compensators", legend=FALSE)
+legend("topleft",
+       legend=c("empirical K function", "compensator of CSR",
+         "compensator of inhomogeneous Poisson"), lty=1:3, col=1:3)
+
+# 
+#######################################################
+#  Q - Q  PLOTS
+#
+qqplot.ppm(fit.hom, 40) 
+#conclusion: homogeneous Poisson model is not correct
+title(main="Q-Q plot of smoothed residuals")
+
+qqplot.ppm(fit.inhom, 40) # TAKES A WHILE...
+title(main=c("Right model", "(inhomogeneous Poisson)",
+             "Q-Q plot of smoothed residuals"))
+# conclusion: fitted inhomogeneous Poisson model looks OK
+# 
+#######################################################
+#
+plot(cells)
+fitPoisson <- ppm(cells ~1, Poisson())
+diagnose.ppm(fitPoisson, 
+             main=c("CSR fitted to cells data",
+                    "Raw residuals",
+                    "No suggestion of departure from CSR"))
+diagnose.ppm(fitPoisson, type="pearson",
+             main=c("CSR fitted to cells data",
+                    "Pearson residuals",
+                    "No suggestion of departure from CSR"))
+# These diagnostic plots do NOT show evidence of departure from uniform Poisson
+
+plot(Kcom(fitPoisson), cbind(iso, icom) ~ r)
+plot(Gcom(fitPoisson), cbind(han, hcom) ~ r)
+
+# K compensator DOES show strong evidence of departure from uniform Poisson
+
+qqplot.ppm(fitPoisson, 40)
+title(main=c("CSR fitted to cells data",
+        "Q-Q plot of smoothed raw residuals",
+        "Strong suggestion of departure from CSR"))
+           
+# Q-Q plot DOES show strong evidence of departure from uniform Poisson.
+#
+fitStrauss <- ppm(cells ~1, Strauss(r=0.1))
+diagnose.ppm(fitStrauss, 
+             main=c("Strauss model fitted to cells data",
+                    "Raw residuals"))
+diagnose.ppm(fitStrauss, type="pearson",
+             main=c("Strauss model fitted to cells data",
+                    "Pearson residuals"))
+
+plot(Kcom(fitStrauss), cbind(iso, icom) ~ r)
+plot(Gcom(fitStrauss), cbind(han, hcom) ~ r)
+
+# next line takes a LOOONG time ...
+qqplot.ppm(fitStrauss, 40, type="pearson")
+title(main=c("Strauss model fitted to cells data",
+        "Q-Q plot of smoothed Pearson residuals",
+        "Suggests adequate fit")) 
+# Conclusion: Strauss model seems OK
+# 
+#######################################################
+#
+plot(nztrees)
+fit <- ppm(nztrees ~1, Poisson())
+diagnose.ppm(fit, type="pearson")
+title(main=c("CSR fitted to NZ trees",
+             "Pearson residuals"))
+diagnose.ppm(fit, type="pearson", cumulative=FALSE)
+title(main=c("CSR fitted to NZ trees",
+             "Pearson residuals (non-cumulative)"))
+lurking(fit, expression(x), type="pearson", cumulative=FALSE,
+        splineargs=list(spar=0.3))
+# Sharp peak at right is suspicious
+qqplot.ppm(fit, 40, type="pearson")
+title(main=c("CSR fitted to NZ trees",
+        "Q-Q plot of smoothed Pearson residuals"))
+# Slight suggestion of departure from Poisson at top right of pattern.
+par(oldpar)
+options(oldoptions)
diff --git a/demo/spatstat.R b/demo/spatstat.R
new file mode 100755
index 0000000..dd37dcb
--- /dev/null
+++ b/demo/spatstat.R
@@ -0,0 +1,726 @@
+if(dev.cur() <= 1) {
+  dd <- getOption("device")
+  if(is.character(dd)) dd <- get(dd)
+  dd()
+}
+
+oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE))
+oldoptions <- options(warn=-1)
+
+fanfare <- function(stuff) {
+  plot(c(0,1),c(0,1),type="n",axes=FALSE, xlab="", ylab="")
+  text(0.5,0.5, stuff, cex=2.5)
+}
+
+par(mar=c(1,1,2,1)+0.1)
+
+fanfare("Spatstat demonstration")
+
+fanfare("I. Types of data")
+plot(swedishpines, main="Point pattern")
+
+plot(demopat, cols=c("green", "blue"), main="Multitype point pattern")
+
+plot(longleaf, fg="blue", main="Marked point pattern")
+
+plot(finpines, main="Point pattern with multivariate marks")
+
+a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+plot(a, main="Line segment pattern")
+marks(a) <- sample(letters[1:4], 20, replace=TRUE)
+plot(a, main="Multitype line segment pattern")
+marks(a) <- runif(20)
+plot(a, main="Marked line segment pattern")
+
+plot(owin(), main="Rectangular window")
+plot(letterR, main="Polygonal window")
+plot(as.mask(letterR), main="Binary mask window")
+
+Z <- as.im(function(x,y){ sqrt((x - 1)^2 + (y-1)^2)}, square(2))
+plot(Z, main="Pixel image")
+
+X <- runifpoint(42)
+plot(dirichlet(X), main="Tessellation")
+
+plot(rpoispp3(100), main="Three-dimensional point pattern")
+
+plot(simplenet, main="Linear network (linnet)")
+
+X <- rpoislpp(20, simplenet)
+plot(X,
+     main="Point pattern on linear network (lpp)",
+     show.window=FALSE)
+
+fanfare("II. Graphics")
+
+plot(letterR, col="green", border="red", lwd=2, main="Polygonal window with colour fill")
+plot(letterR, hatch=TRUE, spacing=0.15, angle=30, main="Polygonal window with line shading")
+plot(letterR, hatch=TRUE, hatchargs=list(texture=8, spacing=0.12),
+     main="Polygonal window with texture fill")
+
+plot(amacrine, chars=c(1,16),
+     main="plot(X, chars = c(1,16))")
+plot(amacrine, cols=c("red","blue"), chars=16,
+     main="plot(X, cols=c(\"red\", \"blue\"))")
+
+opa <- par(mfrow=c(1,2))
+plot(longleaf, markscale=0.03, main="markscale=0.03")
+plot(longleaf, markscale=0.09, main="markscale=0.09")           
+par(opa)
+
+plot(longleaf, pch=21, cex=1,
+     bg=colourmap(terrain.colors(128), range=c(0,80)),
+     main="colourmap for numeric mark values")
+
+Z <- as.im(function(x,y) { r <- sqrt(x^2+y^2); r * exp(-r) },
+           owin(c(-5,5),c(-5,5)))
+plot(Z, main="pixel image: image plot")
+plot(Z, main="pixel image: image plot (heat colours)", col=heat.colors(256))
+plot(Z, main="pixel image: logarithmic colour map", 
+     log=TRUE, col=rainbow(128, end=5/6))
+contour(Z, main="pixel image: contour plot", axes=FALSE)
+plot(Z, main="pixel image: image + contour plot")
+contour(Z, add=TRUE)
+persp(Z, colmap=terrain.colors(128), shade=0.3, phi=30,theta=100,
+      main="pixel image: perspective plot")
+
+ct <- colourmap(rainbow(20), breaks=seq(-1,1,length=21))
+plot(ct, main="Colour map for real numbers")
+
+ca <- colourmap(rainbow(8), inputs=letters[1:8])
+plot(ca, main="Colour map for discrete values")
+
+Z <- as.im(nnfun(runifpoint(8)))
+plot(Z, main="colour image for discrete values")
+textureplot(Z, main="texture plot for discrete values")
+
+W <- owin(c(1,5),c(0,4.5))
+Lout <- scaletointerval(distmap(rebound.owin(letterR, W)))
+Lin <- scaletointerval(distmap(complement.owin(letterR, W)))
+L <- scaletointerval(eval.im(Lin-Lout))
+D <- scaletointerval(density(runifpoint(30, W), adjust=0.3))
+X <- scaletointerval(as.im(function(x,y){ x }, W=W))
+plot(listof(L=L, D=D, X=X), main="Multiple images")
+pairs(L, D, X, main="Multiple images: pairs plot")
+persp(L, colin=D,
+      theta=-24, phi=35, box=FALSE, apron=TRUE, 
+      main="Two images:\nperspective + colours",
+      shade=0.4, ltheta=225, lphi=10)
+plot(rgbim(D,X,L,maxColorValue=1), valuesAreColours=TRUE,
+     main="Three images: RGB display")
+plot(hsvim(D,L,X), valuesAreColours=TRUE,
+     main="Three images: HSV display")
+
+fanfare("III. Conversion between types")
+
+W <- as.owin(chorley)
+plot(W, "window W")
+
+plot(as.mask(W))
+plot(as.mask(W, dimyx=1000))
+
+plot(as.im(W, value=3))
+plot(as.im(W, value=3, na.replace=0), ribbon=TRUE)
+
+plot(as.im(function(x,y) {x^2 + y}, W=square(1)),
+     main="as.im(function(x,y){x^2+y})")
+
+V <- delaunay(runifpoint(12))
+plot(V, main="Tessellation V")
+plot(as.im(V, dimyx=256), main="as.im(V)")
+plot(as.owin(V))
+
+X <- swedishpines
+plot(X, "point pattern X")
+
+plot(as.im(X), col=c("white","red"), ribbon=FALSE, xlab="", ylab="")
+plot(as.owin(X), add=TRUE)
+
+fanfare("IV. Subsetting and splitting data")
+
+plot(X, "point pattern X")
+subset <- 1:20
+plot(X[subset], main="subset operation: X[subset]")
+subwindow <- owin(poly=list(x=c(0,96,96,40,40),y=c(0,0,100,100,50)))
+plot(X[subwindow], main="subset operation: X[subwindow]")
+
+plot(lansing, "Lansing Woods data")
+plot(split(lansing),
+     main="split operation: split(X)",
+     mar.panel=c(0,0,2,0), hsep=1, pch=3)
+
+plot(longleaf, main="Longleaf Pines data")
+plot(cut(longleaf, breaks=3),
+     main=c("cut operation", "cut(longleaf, breaks=3)"))
+
+Z <- dirichlet(runifpoint(16))
+X <- runifpoint(100)
+
+plot(cut(X,Z), main="points cut by tessellation", leg.side="left")
+plot(Z, add=TRUE)
+
+plot(split(X, Z),
+     main="points split by tessellation",
+     mar.panel=c(0,0,2,2), hsep=1)
+
+W <- square(1)
+X <- as.im(function(x,y){sqrt(x^2+y^2)}, W)
+Y <- dirichlet(runifpoint(12, W))
+plot(split(X,Y), main="image split by tessellation")
+
+fanfare("V. Exploratory data analysis")
+
+par(mar=c(3,3,3,2)+0.1)
+
+plot(swedishpines, main="Quadrat counts", pch="+")
+tab <- quadratcount(swedishpines, 4)
+plot(tab, add=TRUE, lty=2, cex=2, col="blue")
+
+par(mar=c(5,3,3,2)+0.1)
+
+plot(swedishpines, main="", pch="+")
+title(main=expression(chi^2 * " test"), cex.main=2)
+tes <- quadrat.test(swedishpines, 3)
+tes
+plot(tes, add=TRUE, col="red", cex=1.5, lty=2, lwd=3)
+title(sub=paste("p-value =", signif(tes$p.value,3)), cex.sub=1.4)
+
+par(mar=c(4,4,3,2)+0.1)
+
+tesk <- cdf.test(nztrees, "x")
+tesk
+plot(tesk)
+
+
+mur <- lapply(murchison, rescale, s=1000)
+mur <- lapply(mur, "unitname<-", value="km")
+X <- mur$gold
+D <- distfun(mur$faults)
+plot(X, main="Murchison gold deposits", cols="blue")
+plot(mur$faults, add=TRUE, col="red")
+rh <- rhohat(X,D)
+plot(rh,
+     main="Smoothed rate estimate",
+     xlab="Distance to nearest fault (km)",
+     legend=FALSE)
+plot(predict(rh), main="predict(rhohat(X,D))")
+
+Z <- density(cells, 0.07)
+plot(Z, main="Kernel smoothed intensity of point pattern")
+plot(cells, add=TRUE)
+
+plot(redwood, main="Redwood data")
+te <- scan.test(redwood, 0.1, method="poisson")
+plot(te, main=c("Scan Statistic for redwood data",
+              paste("p-value =", signif(te$p.value,3))))
+plot(redwood, add=TRUE)
+te
+
+X <- unique(unmark(shapley))
+plot(X, "Shapley galaxy concentration", pch=".")
+coco <-colourmap(rev(rainbow(128, end=2/3)), range=c(0,1))
+pa <- function(i, ...) {
+  if(i == 1) list(chars=c(".", "+"), cols=1:2) else
+             list(size=0.5, pch=16, col=coco)
+}
+plot(nnclean(X, k=17), panel.args=pa,
+     mar.panel=c(0,1,1,0), nrows=2,
+     main="Byers-Raftery nearest neighbour cleaning",
+     cex.title=1.2)
+Y <- sharpen(X, sigma=0.5, edgecorrect=TRUE)
+plot(Y, main="Choi-Hall data sharpening", pch=".")
+
+  owpa <- par(mfrow=c(1,2))
+  W <- grow.rectangle(as.rectangle(letterR), 1)
+  X <- superimpose(runifpoint(300, letterR),
+                   runifpoint(50, W), W=W)
+  plot(W, main="clusterset(X, 'm')")
+  plot(clusterset(X, 'marks', fast=TRUE), add=TRUE, chars=c("o", "+"), cols=1:2)
+  plot(letterR, add=TRUE)
+  plot(W, main="clusterset(X, 'd')")
+  plot(clusterset(X, 'domain', exact=FALSE), add=TRUE)
+  plot(letterR, add=TRUE)
+  par(owpa)
+
+D <- density(a, sigma=0.05)
+plot(D, main="Kernel smoothed intensity of line segment pattern")
+plot(a, add=TRUE)
+
+X <- runifpoint(42)
+plot(dirichlet(X))
+plot(X, add=TRUE)
+
+plot(delaunay(X))
+plot(X, add=TRUE)
+
+parsave <- par(mfrow=c(1,1), mar=0.2+c(0,1,3,1))
+plot(listof("Longleaf Pines data"=longleaf,
+            "Nearest mark"=nnmark(longleaf),
+            "Kernel smoothing of marks"=Smooth(longleaf,10),
+            "Inverse distance weighted\nsmoothing of marks"=idw(longleaf)),
+     equal.scales=TRUE, halign=TRUE, valign=TRUE,
+     main="", mar.panel=0.2+c(0,0,2,2))
+par(parsave)
+
+fryplot(cells, main=c("Fry plot","cells data"), pch="+")
+miplot(longleaf, main="Morishita Index plot", pch=16, col="blue")
+
+plot(swedishpines, main="Swedish Pines data")
+K <- Kest(swedishpines)
+plot(K, main="K function for Swedish Pines", legendmath=TRUE)
+
+en <- envelope(swedishpines, fun=Kest, nsim=10, correction="translate")
+plot(en, main="Envelopes of K function based on CSR", shade=c("hi", "lo"))
+
+pc <- pcf(swedishpines)
+plot(pc, main="Pair correlation function")
+
+plot(swedishpines, main="nearest neighbours")
+m <- nnwhich(swedishpines)
+b <- swedishpines[m]
+arrows(swedishpines$x, swedishpines$y, b$x, b$y,
+       angle=12, length=0.1, col="red")
+
+plot(swedishpines %mark% nndist(swedishpines),
+     markscale=1, main="Stienen diagram", legend=FALSE, fg="blue")
+
+plot(Gest(swedishpines),
+     main=c("Nearest neighbour distance function G", "Gest(swedishpines)"),
+     legendmath=TRUE)
+
+Z <- distmap(swedishpines, dimyx=512)
+plot(swedishpines$window, main="Distance map")
+plot(Z, add=TRUE)
+points(swedishpines)
+
+plot(Fest(swedishpines),
+     main=c("Empty space function F", "Fest(swedishpines)"),
+     legendmath=TRUE)
+
+W <- rebound.owin(letterR, square(5))
+plot(distmap(W), main="Distance map")
+plot(W, add=TRUE)
+
+a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+contour(distmap(a), main="Distance map")
+plot(a, add=TRUE,col="red")
+
+plot(Jest(swedishpines), main=c("J-function", "J(r)=(1-G(r))/(1-F(r))"))
+
+X <- swedishpines
+X <- X[sample(1:npoints(X))]
+Z <- nnfun(X)
+plot(as.owin(X), main="Nearest neighbour map")
+plot(Z, add=TRUE)
+points(X)
+
+plot(allstats(swedishpines))
+
+Fig4b <- residualspaper$Fig4b
+
+plot(Fig4b, main="Inhomogeneous point pattern")
+plot(Kinhom(Fig4b), main="Inhomogeneous K-function")
+plot(pcfinhom(Fig4b, stoyan=0.1), main="Inhomogeneous pair correlation")
+plot(Ginhom(Fig4b, sigma=0.06), main="Inhomogeneous G-function")
+plot(Jinhom(Fig4b, sigma=0.06), main="Inhomogeneous J-function")
+
+X <- unmark(bronzefilter)
+plot(X, "Bronze filter data")
+lam <- predict(ppm(X ~x))
+plot(Kscaled(X, lam), xlim=c(0, 1.5), main="Locally-scaled K function")
+
+plot(urkiola)
+plot(split(urkiola), cex=0.5)
+plot(density(split(urkiola)))
+contour(density(split(urkiola)), panel.begin=as.owin(urkiola))
+plot(relrisk(urkiola), main="Relative risk (cross-validated)")
+
+plot(bramblecanes)
+br <- rescale(bramblecanes)
+plot(alltypes(br, "K"), mar.panel=c(4,5,2,2)+0.1)
+
+ama <- rescale(amacrine)
+plot(alltypes(ama, Lcross, envelope=TRUE, nsim=9), . - r ~ r, ylim=c(-25, 5))
+
+ponderosa.extra$plotit(main="Ponderosa Pines")
+
+L <- localL(ponderosa)
+pL <- plot(L, lty=1, col=1, legend=FALSE,
+           main=c("neighbourhood density functions",
+             "for Ponderosa Pines"), cex.main=0.8)
+
+parsave <- par(mfrow=c(1,2))
+ponderosa.extra$plotit()
+par(pty="s")
+plot(L, iso007 ~ r, main="point B")
+
+par(mar=0.2+c(1,1,3,1))
+ponderosa.extra$plotit()
+L12 <- localL(ponderosa, rvalue=12)
+P12 <- ponderosa %mark% L12
+Z12 <- Smooth(P12, sigma=5, dimyx=128)
+plot(Z12, col=topo.colors(128),
+     main=c("smoothed", "neighbourhood density"),
+     cex.main=0.8)
+contour(Z12, add=TRUE)
+points(ponderosa, pch=16, cex=0.5)
+
+plot(amacrine, main="Amacrine cells data", cex.main=0.8)
+par(pty="s")
+mkc <- markcorr(amacrine, 
+                correction="translate", method="density",
+                kernel="epanechnikov")
+plot(mkc, main="Mark correlation function", legend=FALSE, cex.main=0.8)
+par(parsave)
+
+par(mar=0.2+c(4,4,3,1))
+plot(alltypes(amacrine, markconnect), 
+     title="Mark connection functions for amacrine cells")
+
+parsave <- par(mfrow=c(1,2))
+
+parspruce2 <- par(mar=0.2+c(0,2,2,0))
+plot(spruces, cex.main=0.8, markscale=10)
+par(pty="s", mar=0.2+c(2,3,2,0))
+plot(markcorr(spruces), main="Mark correlation", legendpos="bottomright")
+
+par(parspruce2)
+plot(spruces, cex.main=0.8, markscale=10)
+par(pty="s", mar=0.2+c(2,3,2,0))
+plot(markvario(spruces), main="Mark variogram", legendpos="topright")
+par(parsave)
+
+plot(listof("Emark(spruces)"=Emark(spruces),
+            "Vmark(spruces)"=Vmark(spruces)),
+     main="Independence diagnostics", ylim.covers=0,
+     legendpos="bottom")
+
+par3 <- par(mfrow=c(1,2))
+X <- rpoispp3(100)
+plot(X, main="3D point pattern X")
+plot(K3est(X), main="K-function in 3D")
+plot(X, main="3D point pattern X")
+plot(G3est(X), main="G-function in 3D", legendpos="bottomright")
+par(par3)
+
+par(mfrow=c(1,3))
+X <- unmark(chicago)
+plot(X, col="green", cols="red", pch=16,
+     main="Chicago Street Crimes", cex.main=0.75,
+     show.window=FALSE)
+plot(linearK(X, correction="none"), main="Network K-function", cex.main=0.75)
+plot(linearK(X, correction="Ang"), main="Corrected K-function", cex.main=0.75)
+
+par(mfrow=c(1,1))
+
+fanfare("VI. Model-fitting")
+
+parsave <- par(mar=0.2+c(1,1,3,2))
+plot(japanesepines)
+fit <- ppm(japanesepines ~1)
+print(fit)
+fit <- ppm(japanesepines ~polynom(x,y,2))
+print(fit)
+plot(fit, how="image", se=FALSE, main=c("Inhomogeneous Poisson model",
+                               "fit by maximum likelihood",
+                               "Fitted intensity"))
+plot(fit, how="image", trend=FALSE,
+     main=c("Standard error", "of fitted intensity"))
+
+plot(leverage(fit))
+plot(influence(fit))
+
+plot(mur$gold, main="Murchison gold deposits", cols="blue")
+plot(mur$faults, add=TRUE, col="red")
+fit <- ppm(mur$gold ~D, covariates=list(D=distfun(mur$faults)))
+par(mar=0.2+c(4,4,4,2))
+plot(parres(fit, "D"),
+     main="Partial residuals from loglinear Poisson model",
+     xlab="Distance to nearest fault (km)",
+     ylab="log intensity of gold", legend=FALSE)
+legend("bottomleft", legend=c("partial residual", "loglinear fit"),
+       col=c(1,4), lty=c(1,4))
+
+par(mar=rep(0.2, 4), mfrow=c(1,1))
+fitT <- kppm(redwood ~1, clusters="Thomas")
+simT <- simulate(fitT)[[1]]
+plot(listof(redwood, simT),
+     main.panel=c("Redwood", "simulation from\nfitted Thomas model"),
+     main="", mar.panel=0.2, equal.scales=TRUE)
+
+mop <- par(mfrow=c(1,2), pty="s", mar=rep(4.4, 4))
+plot(fitT, xname=c("Thomas model", "minimum contrast fit"), pause=FALSE)
+par(mop)
+
+oop <- par(pty="s", mar=0.2+c(4,4,4,2))
+os <- objsurf(fitT)
+plot(os, main="Minimum contrast objective function", col=terrain.colors(128))
+contour(os, add=TRUE)
+par(oop)
+
+parra <- par(mfrow=c(1,2), mar=0.2+c(3,3,4,2))
+plot(swedishpines)
+fit <- ppm(swedishpines ~1, Strauss(r=7))
+print(fit)
+plot(fit, how="image", main=c("Strauss model",
+                               "fit by maximum pseudolikelihood",
+                               "Conditional intensity plot"))
+# fitted interaction
+plot(swedishpines)
+fit <- ppm(swedishpines ~1, PairPiece(c(3,5,7,9,11,13)))
+plot(fitin(fit), legend=FALSE,
+     main=c("Pairwise interaction model",
+            "fit by maximum pseudolikelihood"))
+
+# simulation
+par(mfrow=c(1,1), mar=0.5+c(0,0,2,0))
+Xsim <- rmh(model=fit,
+            start=list(n.start=80),
+            control=list(nrep=100))
+plot(listof(swedishpines, Xsim),
+     main="",
+     main.panel=c("Swedish Pines",
+       "Simulation from\nfitted Strauss model"),
+     mar.panel=c(0,0,3,0),hsep=1,equal.scales=TRUE)
+
+# model compensator
+par(parra)
+par(mar=0.2+c(4,4,3,1))
+plot(swedishpines)
+fit <- ppm(swedishpines ~1, Strauss(r=7))
+plot(Kcom(fit), cbind(iso, icom, pois) ~ r,
+     legend=FALSE, main="model compensators")
+legend("topleft", legend=c("empirical K function",
+                    "Strauss model compensator of K",
+                    "Poisson theoretical K"), lty=1:3, col=1:3, inset=0.05)
+
+par(parsave)
+
+# Multitype data
+dpat <- rescale(demopat, 8)
+unitname(dpat) <- c("mile", "miles")
+dpat
+
+plot(dpat, cols=c("red", "blue"))
+fit <- ppm(dpat ~marks + polynom(x,y,2), Poisson())
+plot(fit, trend=TRUE, se=TRUE)
+
+fanfare("VII. Simulation")
+
+plot(letterR, main="Poisson random points")
+lambda <- 10/area.owin(letterR)
+points(rpoispp(lambda, win=letterR))
+points(rpoispp(9 * lambda, win=letterR))
+points(rpoispp(90 * lambda, win=letterR))
+plot(rpoispp(100))
+plot(rpoispp(function(x,y){1000 * exp(-3*x)}, 1000),
+     main="rpoispp(function)")
+
+plot(rMaternII(200, 0.05))
+plot(rSSI(0.05, 200))
+plot(rThomas(10, 0.2, 5))
+plot(rMatClust(10, 0.05, 4))
+plot(rCauchy(30, 0.01, 5))
+plot(rVarGamma(30, 2, 0.02, 5))
+plot(rGaussPoisson(30, 0.05, 0.5))
+
+if(require(RandomFields) && RandomFieldsSafe()) {
+  X <- rLGCP("exp", 4, var=0.2, scale=0.1)
+  plot(attr(X, "Lambda"), main="log-Gaussian Cox process")
+  plot(X, add=TRUE, pch=16)
+}
+
+plot(rStrauss(200, 0.3, 0.07))
+plot(rDiggleGratton(200,0.03,0.08))
+plot(rDGS(300, 0.05))
+
+plot(redwood, main="random thinning - rthin()")
+points(rthin(redwood, 0.5), col="green", cex=1.4)
+
+plot(rcell(nx=15))
+
+plot(rsyst(nx=5))
+abline(h=(1:4)/5, lty=2)
+abline(v=(1:4)/5, lty=2)
+
+plot(rstrat(nx=5))
+abline(h=(1:4)/5, lty=2)
+abline(v=(1:4)/5, lty=2)
+
+X <- rsyst(nx=10)
+plot(rjitter(X, 0.02))
+
+Xg <- rmh(list(cif="geyer", par=list(beta=1.25, gamma=1.6, r=0.2, sat=4.5),
+               w=c(0,10,0,10)),
+          control=list(nrep=1e4), start=list(n.start=200))
+plot(Xg, main=paste("Geyer saturation process\n",
+                    "rmh() with cif=\"geyer\""))
+
+L <- as.psp(matrix(runif(20), 5, 4), window=square(1))
+plot(L, main="runifpointOnLines(30, L)")
+plot(runifpointOnLines(30, L), add=TRUE, pch="+")
+
+plot(L, main="rpoisppOnLines(3, L)")
+plot(rpoisppOnLines(3, L), add=TRUE, pch="+")
+
+plot(runiflpp(20, simplenet))
+plot(rpoislpp(5, simplenet))
+
+plot(rpoisline(10))
+
+plot(rlinegrid(30, 0.1))
+
+spatstat.options(npixel=256)
+X <- dirichlet(runifpoint(30))
+plot(rMosaicSet(X, 0.4), col="green", border=NA)
+plot(X, add=TRUE)
+plot(rMosaicField(X, runif))
+plot(rMosaicSet(rpoislinetess(3), 0.5), col="green", border=NA, main="Switzer's random set")
+spatstat.options(npixel=100)
+
+plot(Halton(512, c(2,3)), main="quasirandom pattern")
+plot(Halton(16384, c(2,3)), main="quasirandom pattern", pch=".")
+
+fanfare("VIII. Geometry")
+
+A <- letterR
+
+B <- shift(letterR, c(0.2,0.1))
+plot(bounding.box(A,B), main="shift", type="n")
+plot(A, add=TRUE)
+plot(B, add=TRUE, border="red")
+
+B <- rotate(letterR, 0.2)
+plot(bounding.box(A,B), main="rotate", type="n")
+plot(A, add=TRUE)
+plot(B, add=TRUE, border="red")
+
+mat <- matrix(c(1.1, 0, 0.3, 1), 2, 2)
+B <- affine(letterR, mat=mat, vec=c(0.2,-0.1))
+plot(bounding.box(A,B), main="affine", type="n")
+plot(A, add=TRUE)
+plot(B, add=TRUE, border="red")
+
+par1x2 <- par(mfrow=c(1,2))
+L <- rpoisline(10, owin(c(1.5,4.5),c(0.2,3.6)))
+plot(L, main="Line segment pattern")
+plot(L$window, main="L[window]", type="n")
+plot(L[letterR], add=TRUE)
+plot(letterR, add=TRUE, border="red")
+par(par1x2)
+
+a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+plot(a, main="Self-crossing points")
+plot(selfcrossing.psp(a), add=TRUE, col="red")
+
+a <- as.psp(matrix(runif(20), 5, 4), window=square(1))
+b <- rstrat(square(1), 5)
+plot(a, lwd=3, col="green", main="project points to segments")
+plot(b, add=TRUE, col="red", pch=16)
+v <- project2segment(b, a)
+Xproj <- v$Xproj
+plot(Xproj, add=TRUE, pch=16)
+arrows(b$x, b$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red")
+
+plot(a, main="pointsOnLines(L)")
+plot(pointsOnLines(a, np=100), add=TRUE, pch="+")
+
+parry <- par(mfrow=c(1,3), mar=0.3+c(1,1,3,1))
+X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8))
+plot(X, cex.main=0.75)
+plot(letterR, cex.main=0.75)
+plot(intersect.tess(X, letterR), cex.main=0.75)
+
+X <- dirichlet(runifpoint(10))
+plot(X)
+L <- infline(0.3,0.5)
+plot(owin(), main="L", cex.main=0.75)
+plot(L, col="red", lwd=2, cex.main=0.75)
+plot(chop.tess(X,L), cex.main=0.75)
+par(parry)
+
+W <- chorley$window
+plot(W, main="simplify.owin")
+WS <- simplify.owin(W, 2)
+plot(WS, add=TRUE, border="green")
+
+nopa <- par(mfrow=c(2,2))
+Rbox <- grow.rectangle(as.rectangle(letterR), 0.3)
+
+v <- erode.owin(letterR, 0.25)
+plot(Rbox, type="n", main="erode.owin", cex.main=0.75)
+plot(letterR, add=TRUE, col="red", cex.main=0.75)
+plot(v, add=TRUE, col="blue")
+
+v <- dilate.owin(letterR, 0.25)
+plot(Rbox, type="n", main="dilate.owin", cex.main=0.75)
+plot(v, add=TRUE, col="blue")
+plot(letterR, add=TRUE, col="red")
+
+v <- closing.owin(letterR, 0.3)
+plot(Rbox, type="n", main="closing.owin", cex.main=0.75)
+plot(v, add=TRUE, col="blue")
+plot(letterR, add=TRUE, col="red")
+
+v <- opening.owin(letterR, 0.3)
+plot(Rbox, type="n", main="opening.owin", cex.main=0.75)
+plot(letterR, add=TRUE, col="red")
+plot(v, add=TRUE, col="blue")
+par(nopa)
+
+laslett(heather$fine, main="Laslett's Transform")
+
+fanfare("IX. Operations on pixel images")
+
+Z <- distmap(swedishpines, dimyx=512)
+plot(Z, main="An image Z")
+plot(levelset(Z, 4))
+plot(cut(Z, 5))
+plot(eval.im(sqrt(Z) - 3))
+plot(solutionset(abs(Z - 6) <= 1))
+nopa <- par(mfrow=c(1,2))
+plot(Z)
+segments(0,0,96,100,lwd=2)
+plot(transect.im(Z))
+par(nopa)
+
+d <- distmap(cells, dimyx=256)
+W <- levelset(d, 0.06)
+nopa <- par(mfrow=c(1,2))
+plot(W)
+plot(connected(W))
+par(nopa)
+
+Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR)
+plot(Z)
+plot(letterR, add=TRUE)
+
+plot(blur(Z, 0.3, bleed=TRUE))
+plot(letterR, add=TRUE)
+
+plot(blur(Z, 0.3, bleed=FALSE))
+plot(letterR, add=TRUE)
+          
+plot(blur(Z, 0.3, bleed=FALSE))
+plot(letterR, add=TRUE)
+          
+fanfare("X. Programming tools")
+
+showoffK <- function(Y, current, ..., fullpicture,rad) { 
+	plot(fullpicture,
+             main=c("Animation using `applynbd'", "explaining the K function"))
+	points(Y, cex=2)
+        u <- current
+	points(u[1],u[2],pch="+",cex=3)
+	theta <- seq(0,2*pi,length=100)
+	polygon(u[1]+ rad * cos(theta),u[2]+rad*sin(theta))
+	text(u[1]+rad/3,u[2]+rad/2,Y$n,cex=3)
+        if(runif(1) < 0.2) Sys.sleep(runif(1, max=0.4))
+	return(npoints(Y))
+}
+par(ask=FALSE)
+applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE)
+
+par(oldpar)
+options(oldoptions)
+
diff --git a/demo/sumfun.R b/demo/sumfun.R
new file mode 100644
index 0000000..221035d
--- /dev/null
+++ b/demo/sumfun.R
@@ -0,0 +1,169 @@
+## demonstration of all summary functions
+
+opa <- par(mfrow=c(1,1))
+
+## Ripley's K-function 
+plot(swedishpines)
+plot(Kest(swedishpines))
+
+## Besag's transformation
+plot(Lest(swedishpines))
+
+## pair correlation function
+plot(pcf(swedishpines))
+
+par(mfrow=c(2,3))
+## Showing the utility of the K-function
+plot(cells)
+plot(nztrees)
+plot(redwood)
+plot(Kest(cells))
+plot(Kest(nztrees))
+plot(Kest(redwood))
+## Showing the utility of the pair correlation function
+plot(cells)
+plot(nztrees)
+plot(redwood)
+plot(pcf(cells))
+plot(pcf(nztrees))
+plot(pcf(redwood))
+##
+par(mfrow=c(1,1))
+
+## Analogues for inhomogeneous patterns
+## Reweighted K-function
+plot(japanesepines)
+fit <- ppm(japanesepines, ~polynom(x,y,2))
+plot(predict(fit))
+plot(Kinhom(japanesepines, fit))
+plot(pcfinhom(japanesepines, fit))
+plot(Linhom(japanesepines))
+
+## Rescaled K-function
+plot(unmark(bronzefilter))
+plot(Kscaled(bronzefilter))
+fit <- ppm(unmark(bronzefilter), ~x)
+plot(predict(fit))
+plot(unmark(bronzefilter), add=TRUE)
+plot(Kscaled(bronzefilter, fit))
+plot(Lscaled(bronzefilter, fit))
+
+## Local indicators of spatial association
+plot(localL(swedishpines))
+plot(localK(swedishpines))
+
+## anisotropic
+plot(Ksector(redwood, 0, 90))
+plot(Rf <- pairorient(redwood, 0.05, 0.15))
+plot(Df <- deriv(Rf, spar=0.6, Dperiodic=TRUE))
+
+##
+par(mfrow=c(2,3))
+## Empty space function F
+plot(cells)
+plot(nztrees)
+plot(redwood)
+plot(Fest(cells))
+plot(Fest(nztrees))
+plot(Fest(redwood))
+## Nearest neighbour distance function G
+plot(cells)
+plot(nztrees)
+plot(redwood)
+plot(Gest(cells))
+plot(Gest(nztrees))
+plot(Gest(redwood))
+## J-function
+plot(cells)
+plot(nztrees)
+plot(redwood)
+plot(Jest(cells))
+plot(Jest(nztrees))
+plot(Jest(redwood))
+par(mfrow=c(1,1))
+
+## versions for inhomogeneous patterns
+plot(Finhom(japanesepines))
+plot(Ginhom(japanesepines))
+plot(Jinhom(japanesepines))
+
+## Display F,G,J,K
+plot(allstats(swedishpines))
+
+## Multitype patterns
+plot(amacrine)
+plot(Kcross(amacrine))
+plot(Kdot(amacrine))
+I <- (marks(amacrine) == "on")
+J <- (marks(amacrine) == "off")
+plot(Kmulti(amacrine, I, J))
+
+plot(alltypes(amacrine, "K"))
+
+plot(Lcross(amacrine))
+plot(Ldot(amacrine))
+
+plot(pcfcross(amacrine))
+plot(pcfdot(amacrine))
+plot(pcfmulti(amacrine, I, J))
+
+plot(Gcross(amacrine))
+plot(Gdot(amacrine))
+plot(Gmulti(amacrine, I, J))
+plot(alltypes(amacrine, "G"))
+
+plot(Jcross(amacrine))
+plot(Jdot(amacrine))
+plot(Jmulti(amacrine,I,J))
+plot(alltypes(amacrine, "J"))
+
+plot(alltypes(amacrine, "F"))
+
+plot(Iest(amacrine))
+
+plot(markconnect(amacrine))
+
+## Multitype, inhomogeneous
+plot(Kcross.inhom(amacrine))
+plot(Kdot.inhom(amacrine))
+plot(Kmulti.inhom(amacrine, I, J))
+plot(Lcross.inhom(amacrine))
+plot(Ldot.inhom(amacrine))
+
+plot(pcfcross.inhom(amacrine))
+plot(pcfdot.inhom(amacrine))
+plot(pcfmulti.inhom(amacrine, I, J))	
+
+## Numerical marks
+plot(markcorr(longleaf))
+plot(markvario(longleaf))
+plot(Emark(longleaf))
+plot(Vmark(longleaf))
+
+## Linear networks
+plot(chicago)
+plot(linearK(chicago))
+plot(linearKcross(chicago))
+plot(linearKdot(chicago))
+plot(linearpcf(chicago))
+plot(linearpcfcross(chicago))
+plot(linearpcfdot(chicago))
+
+lam <- rep(intensity(unmark(chicago)), npoints(chicago))
+A <- split(chicago)$assault
+B <- split(chicago)$burglary
+lamA <- rep(intensity(A), npoints(A))
+lamB <- rep(intensity(B), npoints(B))
+plot(linearKinhom(chicago, lam))
+plot(linearKcross.inhom(chicago, "assault", "burglary", lamA, lamB))
+plot(linearKdot.inhom(chicago, "assault", lamA, lam))
+plot(linearpcfinhom(chicago, lam))
+plot(linearpcfcross.inhom(chicago, "assault", "burglary", lamA, lamB))
+plot(linearpcfdot.inhom(chicago, "assault", lamA, lam))
+
+plot(linearmarkconnect(chicago))
+plot(linearmarkequal(chicago))
+
+rm(I,J,fit)
+
+par(opa)
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100755
index 0000000..6c60531
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,60 @@
+citHeader("To cite spatstat in publications use:")
+
+citEntry(entry = "Book",
+  title        = "Spatial Point Patterns: Methodology and Applications with {R}",
+  author       = personList(as.person("Adrian Baddeley"),
+                            as.person("Ege Rubak"),
+                            as.person("Rolf Turner")),
+  year         = "2015",
+  publisher    = "Chapman and Hall/CRC Press",
+  address      = "London",
+  url="http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/",
+  textVersion  =
+  paste("Adrian Baddeley, Ege Rubak, Rolf Turner (2015).",
+        "Spatial Point Patterns: Methodology and Applications with R.",
+        "London: Chapman and Hall/CRC Press, 2015.",
+        "URL http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/")
+)
+
+citEntry(entry = "Article",
+  title        = "Hybrids of Gibbs Point Process Models and Their Implementation",
+  author       = personList(as.person("Adrian Baddeley"),
+                   as.person("Rolf Turner"),
+                   as.person("Jorge Mateu"),
+                   as.person("Andrew Bevan")),
+  journal      = "Journal of Statistical Software",
+  year         = "2013",
+  volume       = "55",
+  number       = "11",
+  pages        = "1--43",
+  url          = "http://www.jstatsoft.org/v55/i11/",
+
+  textVersion  =
+  paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).",
+        "Hybrids of Gibbs Point Process Models and Their Implementation.",
+        "Journal of Statistical Software, 55(11), 1-43.",
+        "URL http://www.jstatsoft.org/v55/i11/."),
+
+  header       = "If you use hybrid models, please also cite:"
+)
+
+citEntry(entry = "Article",
+  title        = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns",
+  author       = personList(as.person("Adrian Baddeley"),
+                   as.person("Rolf Turner")),
+  journal      = "Journal of Statistical Software",
+  year         = "2005",
+  volume       = "12",
+  number       = "6",
+  pages        = "1--42",
+  url          = "http://www.jstatsoft.org/v12/i06/",
+
+  textVersion  =
+  paste("Adrian Baddeley, Rolf Turner (2005).",
+        "spatstat: An R Package for Analyzing Spatial Point Patterns.",
+        "Journal of Statistical Software 12(6), 1-42.",
+        "URL http://www.jstatsoft.org/v12/i06/."),
+         
+  header       = "In survey articles, please cite the original paper on spatstat:"
+)
+
diff --git a/inst/doc/BEGINNER.txt b/inst/doc/BEGINNER.txt
new file mode 100644
index 0000000..25b68c3
--- /dev/null
+++ b/inst/doc/BEGINNER.txt
@@ -0,0 +1,37 @@
+        -== Welcome to the 'spatstat' package! ==-
+
+For a friendly introduction to spatstat, type the command
+     vignette('getstart')
+which displays the document "Getting Started with Spatstat".
+
+For an overview of all capabilities, type 
+    help(spatstat)
+
+View the documentation for any command/function 'foo' by typing 
+     help(foo)
+
+Activate the graphical help interface by typing
+     help.start()
+
+To handle spatial data in the 'shapefile' format, see the document
+"Handling shapefiles in the spatstat package", by typing
+     vignette('shapefiles')
+
+For a complete course on spatstat, see the book
+   "Spatial Point Patterns: Methodology and Applications with R"
+by Baddeley, Rubak and Turner, Chapman and Hall/CRC Press, December 2015.
+For a summary of changes to spatstat since the book was finished, type
+     vignette('updates')
+Visit the website
+        www.spatstat.org
+for updates and free chapters.
+
+For news about the very latest version of spatstat, type
+     latest.news
+
+[[[Press 'Q' to exit, on some computers]]]
+
+
+
+
+
diff --git a/inst/doc/datasets.R b/inst/doc/datasets.R
new file mode 100644
index 0000000..3f3afef
--- /dev/null
+++ b/inst/doc/datasets.R
@@ -0,0 +1,517 @@
+### R code from vignette source 'datasets.Rnw'
+
+###################################################
+### code chunk number 1: datasets.Rnw:5-6
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+
+
+###################################################
+### code chunk number 2: datasets.Rnw:25-32
+###################################################
+library(spatstat)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+spatstat.options(transparent=FALSE)
+options(useFancyQuotes=FALSE)
+
+
+###################################################
+### code chunk number 3: datasets.Rnw:202-220
+###################################################
+opa <- par()
+## How to set all margins to zero and eliminate all outer spaces
+zeromargins <- function() {
+  par(
+      mar=rep(0,4),
+      omd=c(0,1,0,1),
+      xaxs="i",
+      yaxs="i"
+  )
+  invisible(NULL)
+}
+## Set 'mar'
+setmargins <- function(...) {
+  x <- c(...)
+  x <- rep(x, 4)[1:4]
+  par(mar=x)
+  invisible(NULL)
+}
+
+
+###################################################
+### code chunk number 4: datasets.Rnw:229-230 (eval = FALSE)
+###################################################
+## plot(amacrine)
+
+
+###################################################
+### code chunk number 5: datasets.Rnw:232-234
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+setmargins(0,1,2,0)
+plot(amacrine)
+
+
+###################################################
+### code chunk number 6: datasets.Rnw:243-244 (eval = FALSE)
+###################################################
+## plot(anemones, markscale=1)
+
+
+###################################################
+### code chunk number 7: datasets.Rnw:246-248
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+setmargins(0,0,2,0)
+plot(anemones, markscale=1)
+
+
+###################################################
+### code chunk number 8: datasets.Rnw:261-262 (eval = FALSE)
+###################################################
+## ants.extra$plotit()
+
+
+###################################################
+### code chunk number 9: datasets.Rnw:264-266
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+setmargins(0,0,1,0)
+ants.extra$plotit()
+
+
+###################################################
+### code chunk number 10: datasets.Rnw:274-275
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(austates)
+
+
+###################################################
+### code chunk number 11: datasets.Rnw:285-287 (eval = FALSE)
+###################################################
+## plot(bdspots, equal.scales=TRUE, pch="+", 
+##      panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+
+
+###################################################
+### code chunk number 12: datasets.Rnw:289-293
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+zeromargins()
+plot(bdspots, equal.scales=TRUE, pch="+", main="",
+     mar.panel=0, hsep=1,
+     panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+
+
+###################################################
+### code chunk number 13: datasets.Rnw:303-305 (eval = FALSE)
+###################################################
+## plot(bei.extra$elev, main="Beilschmiedia")
+## plot(bei, add=TRUE, pch=16, cex=0.3)
+
+
+###################################################
+### code chunk number 14: datasets.Rnw:307-310
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+setmargins(0,0,2,0)
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+
+
+###################################################
+### code chunk number 15: datasets.Rnw:313-319
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+M <- persp(bei.extra$elev, 
+           theta=-45, phi=18, expand=7,
+           border=NA, apron=TRUE, shade=0.3, 
+           box=FALSE, visible=TRUE,
+           main="")
+perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3)
+
+
+###################################################
+### code chunk number 16: datasets.Rnw:328-329
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(betacells)
+
+
+###################################################
+### code chunk number 17: datasets.Rnw:334-335
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(bramblecanes, cols=1:3)
+
+
+###################################################
+### code chunk number 18: datasets.Rnw:338-339
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(split(bramblecanes))
+
+
+###################################################
+### code chunk number 19: datasets.Rnw:349-350
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(bronzefilter,markscale=2)
+
+
+###################################################
+### code chunk number 20: datasets.Rnw:359-360
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(cells)
+
+
+###################################################
+### code chunk number 21: datasets.Rnw:369-372
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(chicago, main="Chicago Crimes", col="grey",
+     cols=c("red", "blue", "black", "blue", "red", "blue", "blue"),
+     chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE)
+
+
+###################################################
+### code chunk number 22: datasets.Rnw:382-383
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+chorley.extra$plotit()
+
+
+###################################################
+### code chunk number 23: datasets.Rnw:399-401
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(clmfires, which.marks="cause", cols=2:5, cex=0.25,
+     main="Castilla-La Mancha forest fires")
+
+
+###################################################
+### code chunk number 24: datasets.Rnw:411-412
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(clmfires.extra$clmcov200, main="Covariates for forest fires")
+
+
+###################################################
+### code chunk number 25: datasets.Rnw:423-425
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(copper$Points, main="Copper")
+plot(copper$Lines, add=TRUE)
+
+
+###################################################
+### code chunk number 26: datasets.Rnw:432-434
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }),
+      parargs=list(mar=rep(1,4)))
+
+
+###################################################
+### code chunk number 27: datasets.Rnw:441-442
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(demopat)
+
+
+###################################################
+### code chunk number 28: datasets.Rnw:456-457
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4)
+
+
+###################################################
+### code chunk number 29: datasets.Rnw:465-466
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(finpines, main="Finnish pines")
+
+
+###################################################
+### code chunk number 30: datasets.Rnw:479-483
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1")
+plot(flu[wildM1, 1, drop=TRUE],
+     main=c("flu data", "wild type virus, M2-M1 stain"),
+     chars=c(16,3), cex=0.4, cols=2:3)
+
+
+###################################################
+### code chunk number 31: datasets.Rnw:491-492
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(gordon, main="People in Gordon Square", pch=16)
+
+
+###################################################
+### code chunk number 32: datasets.Rnw:507-508
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites")
+
+
+###################################################
+### code chunk number 33: datasets.Rnw:512-513 (eval = FALSE)
+###################################################
+## system.file("rawdata/gorillas/vegetation.asc", package="spatstat")
+
+
+###################################################
+### code chunk number 34: datasets.Rnw:522-523
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(hamster, cols=c(2,4))
+
+
+###################################################
+### code chunk number 35: datasets.Rnw:533-534
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(heather)
+
+
+###################################################
+### code chunk number 36: datasets.Rnw:544-545
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(humberside)
+
+
+###################################################
+### code chunk number 37: datasets.Rnw:557-558
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(hyytiala, cols=2:5)
+
+
+###################################################
+### code chunk number 38: datasets.Rnw:567-568
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(japanesepines)
+
+
+###################################################
+### code chunk number 39: datasets.Rnw:577-578
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(lansing)
+
+
+###################################################
+### code chunk number 40: datasets.Rnw:581-582
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(split(lansing))
+
+
+###################################################
+### code chunk number 41: datasets.Rnw:589-590
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(longleaf)
+
+
+###################################################
+### code chunk number 42: datasets.Rnw:599-601
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(mucosa, chars=c(1,3), cols=c("red", "green"))
+plot(mucosa.subwin, add=TRUE, lty=3)
+
+
+###################################################
+### code chunk number 43: datasets.Rnw:615-618
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(murchison$greenstone, main="Murchison data", col="lightgreen")
+plot(murchison$gold, add=TRUE, pch=3, col="blue")
+plot(murchison$faults, add=TRUE, col="red")
+
+
+###################################################
+### code chunk number 44: datasets.Rnw:626-627
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(nbfires, use.marks=FALSE, pch=".")
+
+
+###################################################
+### code chunk number 45: datasets.Rnw:630-631
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(split(nbfires), use.marks=FALSE, chars=".")
+
+
+###################################################
+### code chunk number 46: datasets.Rnw:634-639
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+par(mar=c(0,0,2,0))
+plot(split(nbfires)$"2000", which.marks="fire.type",
+     main=c("New Brunswick fires 2000", "by fire type"),
+     cols=c("blue", "green", "red", "cyan"),
+     leg.side="left")
+
+
+###################################################
+### code chunk number 47: datasets.Rnw:647-649
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(nztrees)
+plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3)
+
+
+###################################################
+### code chunk number 48: datasets.Rnw:662-663
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(osteo[1:10,], main.panel="", pch=21, bg='white')
+
+
+###################################################
+### code chunk number 49: datasets.Rnw:669-670 (eval = FALSE)
+###################################################
+## system.file("rawdata/osteo/osteo36.txt", package="spatstat")
+
+
+###################################################
+### code chunk number 50: datasets.Rnw:679-680
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(paracou, cols=2:3, chars=c(16,3))
+
+
+###################################################
+### code chunk number 51: datasets.Rnw:688-689
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+ponderosa.extra$plotit()
+
+
+###################################################
+### code chunk number 52: datasets.Rnw:700-703
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+pyr <- pyramidal
+pyr$grp <- abbreviate(pyramidal$group, minlength=7)
+plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons")
+
+
+###################################################
+### code chunk number 53: datasets.Rnw:723-725
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(redwood)
+plot(redwood3, add=TRUE, pch=20)
+
+
+###################################################
+### code chunk number 54: datasets.Rnw:728-729
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+redwoodfull.extra$plotit()
+
+
+###################################################
+### code chunk number 55: datasets.Rnw:743-745
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), 
+     main="")
+
+
+###################################################
+### code chunk number 56: datasets.Rnw:753-754
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+shapley.extra$plotit(main="Shapley")
+
+
+###################################################
+### code chunk number 57: datasets.Rnw:761-762
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(simdat)
+
+
+###################################################
+### code chunk number 58: datasets.Rnw:770-771
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(spiders, pch=16, show.window=FALSE)
+
+
+###################################################
+### code chunk number 59: datasets.Rnw:778-781
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(sporophores, chars=c(16,1,2), cex=0.6)
+points(0,0,pch=16, cex=2)
+text(15,8,"Tree", cex=0.75)
+
+
+###################################################
+### code chunk number 60: datasets.Rnw:790-791
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(spruces, maxsize=min(nndist(spruces)))
+
+
+###################################################
+### code chunk number 61: datasets.Rnw:800-801
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(swedishpines)
+
+
+###################################################
+### code chunk number 62: datasets.Rnw:810-811
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(urkiola, cex=0.5, cols=2:3)
+
+
+###################################################
+### code chunk number 63: datasets.Rnw:818-820
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+par(mar=c(0,0,2,0))
+plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters"))
+
+
+###################################################
+### code chunk number 64: datasets.Rnw:827-831
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+v <- rotate(vesicles, pi/2)
+ve <- lapply(vesicles.extra, rotate, pi/2)
+plot(v, main="Vesicles")
+plot(ve$activezone, add=TRUE, lwd=3)
+
+
+###################################################
+### code chunk number 65: datasets.Rnw:856-857 (eval = FALSE)
+###################################################
+## system.file("rawdata/vesicles/mitochondria.txt", package="spatstat")
+
+
+###################################################
+### code chunk number 66: datasets.Rnw:865-866
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(waterstriders)
+
+
diff --git a/inst/doc/datasets.Rnw b/inst/doc/datasets.Rnw
new file mode 100644
index 0000000..a530329
--- /dev/null
+++ b/inst/doc/datasets.Rnw
@@ -0,0 +1,870 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Datasets Provided in Spatstat}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+spatstat.options(transparent=FALSE)
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Datasets provided in \spst}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+This document is an overview of the spatial datasets
+that are provided in the \spst\ package. 
+
+To flick through a nice display of all the data sets that come with
+\spst\ type \texttt{demo(data)}.  To see information about a given
+data set, type \texttt{help({\em name})} where \emph{name} is the
+name of the data set  To plot a given data set, 
+type \texttt{plot({\em name})}.
+
+Datasets in \spst\ are ``lazy-loaded'', which means that they can
+be accessed simply by typing their name. Not all packages do this;
+in some packages you have to type \texttt{data({\em name})} in
+order to access a data set.
+
+\section{List of datasets}
+
+\subsection{Point patterns in 2D}
+
+Here is a list of the standard point pattern data sets 
+that are supplied with the current installation of \spst:
+
+\newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}}
+\newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}}
+\newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}}
+\newcommand{\disc}{$\bigcirc$}
+\newcommand{\nomarks}{$\cdot$}
+\newcommand{\nocov}{$\cdot$}
+
+\begin{tabular}{l|l|ccc}
+{\sf name} & {\sf description} &
+            {\sf marks} & {\sf covariates} & {\sf window} \\ \hline
+{\tt amacrine} & rabbit amacrine cells &
+            cell type & \nocov & \recto \\  
+{\tt anemones} & sea anemones  & 
+            diameter & \nocov & \recto \\
+{\tt ants} & ant nests& 
+            species & zones  & \convpoly \\
+{\tt bdspots} & breakdown spots & 
+           \nomarks           & \nocov & \disc \\
+{\tt bei} & rainforest trees & 
+           \nomarks           & topography & \recto \\
+{\tt betacells} & cat retinal ganglia & 
+            cell type, area & \nocov & \recto \\
+{\tt bramblecanes} & bramble canes & 
+            age & \nocov & \recto \\
+{\tt bronzefilter} & bronze particles & 
+            diameter & \nocov & \recto \\
+{\tt cells} & biological cells &
+             \nomarks &\nocov & \recto \\
+{\tt chorley} & cancers & 
+            case/control &\nocov  & \irregpoly \\
+{\tt clmfires} & forest fires & 
+            cause, size, date & 
+            \shortstack[c]{elevation, orientation,\\ slope, land use}  
+            & \irregpoly \\
+{\tt copper} & copper deposits & 
+             \nomarks & fault lines & \recto  \\
+{\tt demopat} & artificial data & 
+             type & \nocov & \irregpoly \\
+{\tt finpines} & trees & 
+             diam, height & \nocov & \recto  \\
+{\tt gordon} & people in a park & 
+             \nomarks & \nocov & \irregpoly  \\
+{\tt gorillas} & gorilla nest sites & 
+             group, season & 
+            \shortstack[c]{terrain, vegetation,\\ heat, water} & 
+             \irregpoly  \\
+{\tt hamster} & hamster tumour cells & 
+              cell type &\nocov  & \recto \\
+{\tt humberside} & child leukaemia & 
+              case/control & \nocov & \irregpoly\\
+{\tt hyytiala} & mixed forest & 
+              species &\nocov  & \recto \\
+{\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\
+{\tt lansing} & mixed forest &
+               species & \nocov & \recto \\
+{\tt longleaf} & trees & 
+              diameter & \nocov &  \recto \\
+{\tt mucosa}   & gastric mucosa cells & 
+              cell type & \nocov &  \recto \\
+{\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\
+{\tt nbfires} & wildfires & several & \nocov & \irregpoly \\
+{\tt nztrees} & trees & \nomarks & \nocov & \recto \\
+{\tt paracou} & trees & adult/juvenile & \nocov & \recto \\
+{\tt ponderosa} & trees & \nomarks & \nocov & \recto \\
+{\tt redwood} & saplings & \nomarks & \nocov & \recto \\
+{\tt redwood3} & saplings & \nomarks & \nocov & \recto \\
+{\tt redwoodfull} & saplings & 
+              \nomarks & zones & \recto \\
+{\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\
+{\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\
+{\tt sporophores} & fungi & species & \nocov &  \disc \\
+{\tt spruces} & trees & diameter & \nocov &  \recto \\
+{\tt swedishpines} & trees & \nomarks & \nocov & \recto \\
+{\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\
+{\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\
+{\tt waka} & trees & diameter & \nocov & \recto \\
+\hline
+\end{tabular}
+
+\bigskip
+\noindent
+The shape of the window containing the point pattern
+is indicated by the symbols \recto\ (rectangle), 
+\disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon).
+
+Additional information about the data set \texttt{\em name}
+may be stored in a separate list \texttt{{\em name}.extra}.
+Currently these are the available options:
+
+\begin{tabular}[!h]{ll}
+  {\sc Name} & {\sc Contents} \\ 
+  \hline
+  {\tt ants.extra} & field and scrub subregions; \\
+                   & additional map elements; plotting function \\
+  {\tt bei.extra} & covariate images \\
+  {\tt chorley.extra} & incinerator location; plotting function \\
+  {\tt gorillas.extra} & covariate images\\
+  {\tt nbfires.extra} & inscribed rectangle \\
+  {\tt ponderosa.extra} & data points of interest; plotting function\\
+  {\tt redwoodfull.extra} & subregions; plotting function \\
+  {\tt shapley.extra} & individual survey fields; plotting function \\
+  {\tt vesicles.extra} & anatomical regions \\
+  \hline
+\end{tabular}
+
+For demonstration and instruction purposes, 
+raw data files are available for the datasets 
+\texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}.
+
+\subsection{Other Data Types}
+
+There are also the following spatial data sets which are not 2D point patterns:
+
+\begin{tabular}[c]{l|l|l}
+{\sf name} & {\sf description} & {\sf format} \\ \hline
+{\tt austates} & Australian states & tessellation \\
+{\tt chicago} & crimes & point pattern on linear network \\
+{\tt dendrite} & dendritic spines & point pattern on linear network \\
+{\tt spiders} & spider webs & point pattern on linear network \\
+{\tt flu} & virus proteins & replicated 2D point patterns \\
+{\tt heather} & heather mosaic & binary image (three versions) \\
+{\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\
+{\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\
+{\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\
+{\tt residualspaper} 
+                & data \& code from Baddeley et al (2005) &  
+                       2D point patterns, \R\ function \\
+{\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\
+{\tt waterstriders} & insects on water & replicated 2D point patterns\\
+\hline
+\end{tabular}
+
+Additionally there is a dataset \texttt{Kovesi} containing
+several colour maps with perceptually uniform contrast. 
+
+\section{Information on each dataset}
+
+Here we give basic information about each dataset.
+For further information, consult the help file for the 
+particular dataset.
+
+<<echo=FALSE>>=
+opa <- par()
+## How to set all margins to zero and eliminate all outer spaces
+zeromargins <- function() {
+  par(
+      mar=rep(0,4),
+      omd=c(0,1,0,1),
+      xaxs="i",
+      yaxs="i"
+  )
+  invisible(NULL)
+}
+## Set 'mar'
+setmargins <- function(...) {
+  x <- c(...)
+  x <- rep(x, 4)[1:4]
+  par(mar=x)
+  invisible(NULL)
+}
+@ 
+
+\subsubsection*{\texttt{amacrine}: Amacrine cells}
+
+Locations of displaced amacrine cells in the retina of a rabbit.
+There are two types of points, ``on'' and ``off''.
+
+\SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(amacrine)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,1,2,0)
+plot(amacrine)
+@ 
+
+\subsubsection*{\texttt{anemones}: Sea Anemones}
+
+These data give the spatial locations and diameters
+of sea anemones on a boulder near sea level.
+
+\SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(anemones, markscale=1)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,2,0)
+plot(anemones, markscale=1)
+@ 
+
+\subsubsection*{\texttt{ants}: Ants' nests}
+
+Spatial locations of nests of two species of
+ants at a site in Greece.
+The full dataset (supplied here) has an irregular polygonal boundary,
+while most analyses have been confined to two rectangular
+subsets of the pattern (also supplied here).
+
+% Parameters for Ants data with key at right
+\SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth}
+<<eval=FALSE>>=
+ants.extra$plotit()
+@ %$
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,1,0)
+ants.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{austates}: Australian states}
+
+  The states and large mainland territories of Australia are
+  represented as polygonal regions forming a tessellation.
+
+<<fig=TRUE>>=
+plot(austates)
+@   
+
+\subsubsection*{\texttt{bdspots}: Breakdown spots}
+
+A list of three point patterns, each giving the locations of
+electrical breakdown spots on a circular electrode in
+a microelectronic capacitor.
+
+\SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth}
+<<eval=FALSE>>=
+plot(bdspots, equal.scales=TRUE, pch="+", 
+     panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+@   
+<<fig=TRUE,echo=FALSE>>=
+zeromargins()
+plot(bdspots, equal.scales=TRUE, pch="+", main="",
+     mar.panel=0, hsep=1,
+     panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+@   
+
+\subsubsection*{\texttt{bei}: Beilschmiedia data}
+
+Locations of 3605 trees in a tropical rain forest.
+Accompanied by covariate data giving the elevation (altitude)
+and slope of elevation in the study region.
+  
+\SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,2,0)
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+@ 
+
+<<fig=TRUE>>=
+M <- persp(bei.extra$elev, 
+           theta=-45, phi=18, expand=7,
+           border=NA, apron=TRUE, shade=0.3, 
+           box=FALSE, visible=TRUE,
+           main="")
+perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3)
+@ 
+
+\subsubsection*{\texttt{betacells}: Beta ganglion cells}
+
+Locations of beta ganglion cells in cat retina,
+each cell classified as `on' or `off'
+and also labelled with the cell profile area.
+  
+<<fig=TRUE>>=
+plot(betacells)
+@ 
+
+\subsubsection*{\texttt{bramblecanes}: Bramble canes}
+
+<<fig=TRUE>>=
+plot(bramblecanes, cols=1:3)
+@ 
+
+<<fig=TRUE>>=
+plot(split(bramblecanes))
+@ 
+
+\subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles}
+
+Spatially inhomogeneous pattern of
+circular section profiles of particles, observed in a
+longitudinal plane section through a gradient sinter
+filter made from bronze powder.
+
+<<fig=TRUE>>=
+plot(bronzefilter,markscale=2)
+@ 
+
+\subsubsection*{\texttt{cells}: Biological cells}
+
+Locations of the centres of 42 biological cells
+observed under optical microscopy in a histological section.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(cells)
+@ 
+
+\subsubsection*{\texttt{chicago}: Chicago crimes}
+
+Locations (street addresses) of crimes reported in a two-week period
+in an area close to the University of Chicago.
+A multitype point pattern on a linear network.
+
+<<fig=TRUE>>=
+plot(chicago, main="Chicago Crimes", col="grey",
+     cols=c("red", "blue", "black", "blue", "red", "blue", "blue"),
+     chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE)
+@ 
+
+\subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data}
+
+Spatial locations of cases of cancer of the larynx
+and cancer of the lung, and the location of a disused industrial
+incinerator. A marked point pattern, with an irregular window
+and a simple covariate.
+
+<<fig=TRUE>>=
+chorley.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires}
+
+Forest fires in the Castilla-La Mancha
+region of Spain between 1998 and 2007.
+A point pattern with 4 columns of marks:
+
+\begin{tabular}{ll}
+  \texttt{cause} & cause of fire\\ 
+  \texttt{burnt.area} & total area burned, in hectares \\
+  \texttt{date} & date of fire \\
+  \texttt{julian.date} & date of fire in days since 1.1.1998
+\end{tabular}
+  
+<<fig=TRUE>>=
+plot(clmfires, which.marks="cause", cols=2:5, cex=0.25,
+     main="Castilla-La Mancha forest fires")
+@ 
+
+The accompanying dataset \texttt{clmfires.extra} is a list
+of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate
+information for the entire Castilla-La Mancha region. Each
+of these two elements is a list of four pixel images 
+named \texttt{elevation}, \texttt{orientation},
+\texttt{slope} and \texttt{landuse}. 
+
+<<fig=TRUE>>=
+plot(clmfires.extra$clmcov200, main="Covariates for forest fires")
+@ %$ 
+
+\subsubsection*{\texttt{copper}: Queensland copper data}
+
+These data come from an intensive geological survey 
+in central Queensland, Australia.
+They consist of 67 points representing copper ore deposits,
+and 146 line segments representing geological `lineaments',
+mostly faults. 
+
+<<fig=TRUE>>=
+plot(copper$Points, main="Copper")
+plot(copper$Lines, add=TRUE)
+@ 
+
+\subsubsection*{\texttt{demohyper}}
+
+A synthetic example of a \texttt{hyperframe} for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }),
+      parargs=list(mar=rep(1,4)))
+@ 
+
+\subsubsection*{\texttt{demopat}}
+
+A synthetic example of a point pattern for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(demopat)
+@ 
+
+\subsubsection*{\texttt{dendrite}}
+
+  Dendrites are branching filaments which extend from the
+  main body of a neuron (nerve cell) to propagate electrochemical
+  signals. Spines are small protrusions on the dendrites.
+
+  This dataset gives the locations of 566 spines
+  observed on one branch of the dendritic tree of a rat neuron.
+  The spines are classified according to their shape into three types:
+  mushroom, stubby or thin.
+
+<<fig=TRUE>>=
+plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4)
+@ 
+
+\subsubsection*{\texttt{finpines}: Finnish pine saplings}
+
+Locations of 126 pine saplings
+in a Finnish forest, their heights and their diameters.
+
+<<fig=TRUE>>=
+plot(finpines, main="Finnish pines")
+@ 
+
+\subsubsection*{\texttt{flu}: Influenza virus proteins}
+
+  The \texttt{flu} dataset contains
+  replicated spatial point patterns giving the locations of two
+  different virus proteins on the membranes of cells infected with
+  influenza virus.
+  
+  It is a \texttt{hyperframe} containing
+  point patterns and explanatory variables.
+  
+<<fig=TRUE>>=
+wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1")
+plot(flu[wildM1, 1, drop=TRUE],
+     main=c("flu data", "wild type virus, M2-M1 stain"),
+     chars=c(16,3), cex=0.4, cols=2:3)
+@ 
+
+\subsubsection*{\texttt{gordon}: People in Gordon Square}
+
+Locations of people sitting on a grass patch on a sunny afternoon.
+
+  
+<<fig=TRUE>>=
+plot(gordon, main="People in Gordon Square", pch=16)
+@ 
+
+\subsubsection*{\texttt{gorillas}: Gorilla nesting sites}
+
+ Locations of nesting sites of gorillas, and associated covariates,
+  in a National Park in Cameroon.  
+  
+  \texttt{gorillas} is a marked point pattern (object
+  of class \texttt{"ppp"}) representing nest site locations.
+
+  \texttt{gorillas.extra} is a named list of 7 pixel images (objects of
+  class \texttt{"im"}) containing spatial covariates.
+  It also belongs to the class \texttt{"listof"}.
+  
+<<fig=TRUE>>=
+plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites")
+@ 
+
+The \texttt{vegetation} covariate is also available as a raw ASCII format file,
+<<eval=FALSE>>=
+system.file("rawdata/gorillas/vegetation.asc", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{hamster}: Hamster kidney cells}
+
+ Cell nuclei in hamster kidney, each nucleus classified as
+ either `dividing' or `pyknotic'.
+ A multitype point pattern.
+ 
+<<fig=TRUE>>=
+plot(hamster, cols=c(2,4))
+@ 
+
+\subsubsection*{\texttt{heather}: Heather mosaic}
+
+The spatial mosaic of vegetation of the heather plant,
+recorded in a 10 by 20 metre sampling plot in Sweden.
+A list with three entries, representing the same data at
+different spatial resolutions.
+  
+<<fig=TRUE>>=
+plot(heather)
+@ 
+
+\subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma}
+
+Spatial locations of cases of childhood leukaemia
+and lymphoma, and randomly-selected controls,
+in North Humberside.
+A marked point pattern.
+
+<<fig=TRUE>>=
+plot(humberside)
+@ 
+
+The dataset \texttt{humberside.convex} is an object of the
+same format, representing the same point pattern data,
+but contained in a larger, 5-sided convex polygon.
+
+\subsubsection*{\texttt{hyytiala}: Mixed forest}
+
+Spatial locations and species classification for
+trees in a Finnish forest.
+
+<<fig=TRUE>>=
+plot(hyytiala, cols=2:5)
+@ 
+
+\subsubsection*{\texttt{japanesepines}: Japanese black pine saplings}
+
+Locations of Japanese black pine saplings
+in a square sampling region in a natural forest.
+Often used as a standard example.
+
+<<fig=TRUE>>=
+plot(japanesepines)
+@ 
+
+\subsubsection*{\texttt{lansing}: Lansing Woods}
+
+Locations and botanical classification of trees in a forest.
+A multitype point pattern with 6 different types of points.
+Includes duplicated points.
+
+<<fig=TRUE>>=
+plot(lansing)
+@ 
+
+<<fig=TRUE>>=
+plot(split(lansing))
+@ 
+
+\subsubsection*{\texttt{longleaf}: Longleaf Pines}
+
+Locations and diameters of Longleaf pine trees.
+  
+<<fig=TRUE>>=
+plot(longleaf)
+@ 
+
+\subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells}
+
+A bivariate inhomogeneous point pattern, giving the locations of
+the centres of two types of cells in a cross-section of the
+gastric mucosa of a rat.
+  
+<<fig=TRUE>>=
+plot(mucosa, chars=c(1,3), cols=c("red", "green"))
+plot(mucosa.subwin, add=TRUE, lty=3)
+@ 
+
+\subsubsection*{\texttt{murchison}: Murchison Gold Deposits}
+
+Spatial locations of gold deposits and associated
+geological features in the Murchison area of Western Australia.
+A list of three elements:
+\begin{itemize}
+\item \texttt{gold}, the point pattern of gold deposits;
+\item \texttt{faults}, the line segment pattern of geological faults;
+\item \texttt{greenstone}, the subregion of greenstone outcrop.
+\end{itemize}
+
+<<fig=TRUE>>=
+plot(murchison$greenstone, main="Murchison data", col="lightgreen")
+plot(murchison$gold, add=TRUE, pch=3, col="blue")
+plot(murchison$faults, add=TRUE, col="red")
+@ 
+
+\subsubsection*{\texttt{nbfires}: New Brunswick Fires}
+
+Fires in New Brunswick (Canada) 
+with marks giving information about each fire.
+
+<<fig=TRUE>>=
+plot(nbfires, use.marks=FALSE, pch=".")
+@ 
+
+<<fig=TRUE>>=
+plot(split(nbfires), use.marks=FALSE, chars=".")
+@ 
+
+<<fig=TRUE>>=
+par(mar=c(0,0,2,0))
+plot(split(nbfires)$"2000", which.marks="fire.type",
+     main=c("New Brunswick fires 2000", "by fire type"),
+     cols=c("blue", "green", "red", "cyan"),
+     leg.side="left")
+@ 
+
+\subsubsection*{\texttt{nztrees}: New Zealand Trees}
+
+Locations of trees in a forest plot in New Zealand.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(nztrees)
+plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3)
+@ 
+
+\subsubsection*{\texttt{osteo}: Osteocyte Lacunae}
+
+Replicated three-dimensional point patterns:
+the three-dimensional locations of 
+  osteocyte lacunae observed in rectangular volumes of
+  solid bone using a confocal microscope.
+A \texttt{hyperframe} containing 3D point patterns
+and explanatory variables.
+  
+  
+<<fig=TRUE>>=
+plot(osteo[1:10,], main.panel="", pch=21, bg='white')
+@ 
+
+For demonstration and instruction purposes, the 
+raw data from the 36th point pattern are available in a plain ascii file in the
+\texttt{spatstat} installation,
+<<eval=FALSE>>=
+system.file("rawdata/osteo/osteo36.txt", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{paracou}: Kimboto trees}
+
+Point pattern of adult and juvenile Kimboto trees
+recorded at Paracou in French Guiana.
+A bivariate point pattern.
+
+<<fig=TRUE>>=
+plot(paracou, cols=2:3, chars=c(16,3))
+@ 
+
+\subsubsection*{\texttt{ponderosa}: Ponderosa Pines}
+
+Locations of Ponderosa Pine trees in a forest.
+Several special points are identified.
+
+<<fig=TRUE>>=
+ponderosa.extra$plotit()
+@  %$
+
+\subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain}
+
+Locations of pyramidal neurons in sections of human brain.
+There is one point pattern from each of 31 human subjects.
+The subjects are divided into three groups:
+controls (12 subjects), schizoaffective (9  subjects)
+and schizophrenic (10 subjects).
+
+<<fig=TRUE>>=
+pyr <- pyramidal
+pyr$grp <- abbreviate(pyramidal$group, minlength=7)
+plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons")
+@ 
+
+\subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings}
+
+California Redwood seedlings and saplings in a forest.
+There are two versions of this dataset:
+\texttt{redwood} and \texttt{redwoodfull}.
+
+The \texttt{redwoodfull} dataset is the full data.
+It is spatially inhomogeneous in density and spacing of points.
+
+The \texttt{redwood} dataset is a subset of the full data,
+selected because it is apparently homogeneous, and has often 
+been used as a demonstration example. This comes in two versions
+commonly used in the literature:
+\texttt{redwood} (coordinates given to 2 decimal places)
+and \texttt{redwood3} (coordinates given to 3 decimal places).
+
+
+<<fig=TRUE>>=
+plot(redwood)
+plot(redwood3, add=TRUE, pch=20)
+@ 
+
+<<fig=TRUE>>=
+redwoodfull.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{residualspaper}: Data from residuals paper}
+
+Contains the point patterns used as examples in 
+\begin{quote}
+  A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \textbf{67}, 617--666
+\end{quote}
+along with {\sf R} code.
+
+<<fig=TRUE>>=
+plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), 
+     main="")
+@ 
+
+\subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration}
+
+Sky positions of 4215 galaxies in the Shapley Supercluster
+(mapped by radioastronomy).
+
+<<fig=TRUE>>=
+shapley.extra$plotit(main="Shapley")
+@  %$
+
+\subsubsection*{\texttt{simdat}: Simulated data}
+
+Another simulated dataset used for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(simdat)
+@ 
+
+\subsubsection*{\texttt{spiders}: Spider webs}
+
+Spider webs across the mortar lines of a brick wall. 
+A point pattern on a linear network.
+
+<<fig=TRUE>>=
+plot(spiders, pch=16, show.window=FALSE)
+@ 
+
+\subsubsection*{\texttt{sporophores}: Sporophores}
+
+Sporophores of three species of fungi around a tree.
+
+<<fig=TRUE>>=
+plot(sporophores, chars=c(16,1,2), cex=0.6)
+points(0,0,pch=16, cex=2)
+text(15,8,"Tree", cex=0.75)
+@ 
+
+\subsubsection*{\texttt{spruces}: Spruces in Saxony}
+
+Locations of Norwegian spruce trees 
+in a natural forest stand in Saxonia, Germany.
+Each tree is marked with its diameter at breast height.
+ 
+<<fig=TRUE>>=
+plot(spruces, maxsize=min(nndist(spruces)))
+@ 
+
+\subsubsection*{\texttt{swedishpines}: Swedish Pines}
+
+Locations of pine saplings
+in a Swedish forest.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(swedishpines)
+@ 
+
+\subsubsection*{\texttt{urkiola}: trees in a wood}
+
+Locations of birch and oak trees  in a secondary wood in
+Urkiola Natural Park (Basque country, northern Spain). 
+Irregular window, bivariate point pattern.
+
+<<fig=TRUE>>=
+plot(urkiola, cex=0.5, cols=2:3)
+@ 
+
+\subsubsection*{\texttt{waka}: trees in Waka National Park}
+
+Spatial coordinates of each tree, marked by the tree diameter at breast height.
+    
+<<fig=TRUE>>=
+par(mar=c(0,0,2,0))
+plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters"))
+@ 
+
+\subsubsection*{\texttt{vesicles}: synaptic vesicles}
+
+Point pattern of synaptic vesicles observed in rat brain tissue.
+
+<<fig=TRUE>>=
+v <- rotate(vesicles, pi/2)
+ve <- lapply(vesicles.extra, rotate, pi/2)
+plot(v, main="Vesicles")
+plot(ve$activezone, add=TRUE, lwd=3)
+@ 
+
+The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ 
+\begin{tabular}{ll}
+  \texttt{presynapse} & outer polygonal boundary of presynapse \\
+  \texttt{mitochondria} & polygonal boundary of mitochondria \\
+  \texttt{mask} & binary mask representation of vesicles window \\
+  \texttt{activezone} & line segment pattern representing the active zone.
+\end{tabular}
+
+For demonstration and training purposes,
+the raw data files for this dataset are also
+provided in the \pkg{spatstat} package installation:\\ 
+\begin{tabular}{ll}
+  \texttt{vesicles.txt} &  spatial locations of vesicles \\
+  \texttt{presynapse.txt} &  vertices of \texttt{presynapse} \\
+  \texttt{mitochondria.txt}  &  vertices of \texttt{mitochondria} \\
+  \texttt{vesiclesimage.tif}  &  greyscale microscope image \\
+  \texttt{vesiclesmask.tif}  &  binary image of \texttt{mask} \\
+  \texttt{activezone.txt}  &  coordinates of \texttt{activezone} 
+\end{tabular}
+The files are in the folder \texttt{rawdata/vesicles} in the
+\texttt{spatstat} installation directory. The precise location of the
+files can be obtained using \texttt{system.file}, for example
+<<eval=FALSE>>=
+system.file("rawdata/vesicles/mitochondria.txt", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{waterstriders}: Insects on a pond}
+
+Three independent replications of a point pattern
+formed by insects on the surface of a pond.
+  
+<<fig=TRUE>>=
+plot(waterstriders)
+@ 
+
+\end{document}
+
diff --git a/inst/doc/datasets.pdf b/inst/doc/datasets.pdf
new file mode 100644
index 0000000..8510c43
Binary files /dev/null and b/inst/doc/datasets.pdf differ
diff --git a/inst/doc/getstart.R b/inst/doc/getstart.R
new file mode 100644
index 0000000..be581a9
--- /dev/null
+++ b/inst/doc/getstart.R
@@ -0,0 +1,151 @@
+### R code from vignette source 'getstart.Rnw'
+
+###################################################
+### code chunk number 1: getstart.Rnw:5-6
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+
+
+###################################################
+### code chunk number 2: getstart.Rnw:25-32
+###################################################
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+
+
+###################################################
+### code chunk number 3: getstart.Rnw:56-58
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+data(redwood)
+plot(redwood, pch=16, main="")
+
+
+###################################################
+### code chunk number 4: getstart.Rnw:79-81
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+data(longleaf)
+plot(longleaf, main="")
+
+
+###################################################
+### code chunk number 5: getstart.Rnw:138-141
+###################################################
+data(finpines)
+mypattern <- unmark(finpines)
+mydata <- round(as.data.frame(finpines), 2)
+
+
+###################################################
+### code chunk number 6: getstart.Rnw:156-157 (eval = FALSE)
+###################################################
+## mydata <- read.csv("myfile.csv")
+
+
+###################################################
+### code chunk number 7: getstart.Rnw:167-168
+###################################################
+head(mydata)
+
+
+###################################################
+### code chunk number 8: getstart.Rnw:183-184 (eval = FALSE)
+###################################################
+##   mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90))
+
+
+###################################################
+### code chunk number 9: getstart.Rnw:187-188 (eval = FALSE)
+###################################################
+## ppp(x.coordinates, y.coordinates, x.range, y.range)
+
+
+###################################################
+### code chunk number 10: getstart.Rnw:197-198
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(mypattern)
+
+
+###################################################
+### code chunk number 11: getstart.Rnw:205-206 (eval = FALSE)
+###################################################
+## summary(mypattern)
+
+
+###################################################
+### code chunk number 12: getstart.Rnw:210-211
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1)))
+
+
+###################################################
+### code chunk number 13: getstart.Rnw:213-214
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(Kest(mypattern))
+
+
+###################################################
+### code chunk number 14: getstart.Rnw:220-221 (eval = FALSE)
+###################################################
+## plot(envelope(mypattern,Kest))
+
+
+###################################################
+### code chunk number 15: getstart.Rnw:223-224
+###################################################
+env <- envelope(mypattern,Kest, nsim=39)
+
+
+###################################################
+### code chunk number 16: getstart.Rnw:226-227
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(env, main="envelope(mypattern, Kest)")
+
+
+###################################################
+### code chunk number 17: getstart.Rnw:229-230
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+
+
+###################################################
+### code chunk number 18: getstart.Rnw:236-237
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(density(mypattern))
+
+
+###################################################
+### code chunk number 19: getstart.Rnw:247-248 (eval = FALSE)
+###################################################
+## marks(mypattern) <- mydata[, c(5,9)]
+
+
+###################################################
+### code chunk number 20: getstart.Rnw:250-251
+###################################################
+mypattern <-finpines
+
+
+###################################################
+### code chunk number 21: getstart.Rnw:254-255 (eval = FALSE)
+###################################################
+## plot(Smooth(mypattern))
+
+
+###################################################
+### code chunk number 22: getstart.Rnw:258-259
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)")
+
+
diff --git a/inst/doc/getstart.Rnw b/inst/doc/getstart.Rnw
new file mode 100644
index 0000000..5c34e53
--- /dev/null
+++ b/inst/doc/getstart.Rnw
@@ -0,0 +1,397 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Getting Started with Spatstat}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Getting started with \texttt{spatstat}}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+Welcome to \spst, a package in the \R\ language
+for analysing spatial point patterns.
+
+This document will help you to get started with \spst.
+It gives you a quick overview of \spst, and some cookbook
+recipes for doing basic calculations. 
+
+\section*{What kind of data does \spst\ handle?}
+
+\Spst\ is mainly designed for analysing \emph{spatial point patterns}.
+For example, suppose you are an ecologist studying plant seedlings. 
+You have pegged out a $10 \times 10$ metre rectangle for your survey.
+Inside the rectangle you identify all the seedlings of the species
+you want, and record their $(x,y)$ locations. You can plot the 
+$(x,y)$ locations:
+
+<<fig=TRUE,echo=FALSE,results=hide>>=
+data(redwood)
+plot(redwood, pch=16, main="")
+@ 
+
+This is a \emph{spatial point pattern} dataset. 
+
+Methods for
+analysing this kind of data are summarised in the
+highly recommended book by Diggle \cite{digg03} and other references
+in the bibliography. \nocite{handbook10,bivapebegome08}
+
+Alternatively the points could be locations in one dimension
+(such as road accidents recorded on a road network) or 
+in three dimensions (such as cells observed in 3D microscopy).
+
+You might also have recorded additional information about each seedling,
+such as its height, or the number of fronds. Such information, attached to
+each point in the point pattern, is called a \emph{mark} variable. For example,
+here is a stand of pine trees, with each tree marked by its
+diameter at breast height (dbh). The circle radii represent the dbh values
+(not to scale).
+
+<<fig=TRUE,echo=FALSE,results=hide>>=
+data(longleaf)
+plot(longleaf, main="")
+@ 
+
+You might also have recorded supplementary data, 
+such as the terrain elevation, which might serve as explanatory variables.
+These data can be in any format. \Spst\ does not usually provide 
+capabilities for analysing such data in their own right, but 
+\spst\ does allow such explanatory data to be taken into account
+in the analysis of a spatial point pattern. 
+
+\Spst\ is \underline{\bf not} designed to handle point data where
+the $(x,y)$ locations are fixed (e.g.\ temperature records 
+from the state capital cities in Australia) or where the different
+$(x,y)$ points represent the same object at different times (e.g.\ 
+hourly locations of a tiger shark with a GPS tag). These are different 
+statistical problems, for which you need different methodology.
+  
+\section*{What can \spst\ do?}
+
+\Spst\ supports a very wide range of popular techniques 
+for statistical analysis for spatial point patterns,
+for example 
+\begin{itemize}
+\item kernel estimation of density/intensity
+\item quadrat counting and clustering indices
+\item detection of clustering using Ripley's $K$-function
+\item spatial logistic regression
+\item model-fitting
+\item Monte Carlo tests
+\end{itemize}
+as well as some advanced statistical techniques.
+
+\Spst\ is one of the largest packages available for \R,
+containing over 1000 commands. It is the product of 15 years of software 
+development by leading researchers in spatial statistics.
+
+\section*{How do I start using \spst?}
+
+\begin{enumerate}
+\item Install \R\ on your computer
+  \begin{quote}
+  Go to \texttt{r-project.org} and follow the installation
+  instructions.
+  \end{quote}
+\item Install the \spst\ package in your \R\ system
+  \begin{quote}
+  Start \R\ and type \verb!install.packages("spatstat")!. 
+  If that doesn't work, go to \texttt{r-project.org} to learn how
+  to install Contributed Packages. 
+  \end{quote}
+\item Start \R\
+\item Type \texttt{library(spatstat)} to load the package.
+\item Type \texttt{help(spatstat)} for information.
+\end{enumerate}
+
+\section*{How do I get my data into \spst?}
+
+<<echo=FALSE,results=hide>>=
+data(finpines)
+mypattern <- unmark(finpines)
+mydata <- round(as.data.frame(finpines), 2)
+@ 
+
+Here is a cookbook example. Suppose you've recorded the 
+$(x,y)$ locations of seedlings, in an Excel spreadsheet.
+You should also have recorded the dimensions of the survey area
+in which the seedlings were mapped. 
+
+\begin{enumerate}
+\item In Excel, save the spreadsheet into a comma-separated values (CSV) file.
+\item Start \R\ 
+\item Read your data into \R\ using \texttt{read.csv}.
+  \begin{quote}
+    If your CSV file is called \texttt{myfile.csv} then you could 
+    type something like 
+<<eval=FALSE>>=
+mydata <- read.csv("myfile.csv")
+@ 
+    to read the data from the file and save them in an object called 
+    \texttt{mydata} (or whatever you want to call it).
+    You may need to set various options to get this to work
+    for your file format: type \texttt{help(read.csv)} for information.
+  \end{quote}
+\item Check that \texttt{mydata} contains the data you expect. 
+  \begin{quote}
+  For example, to see the first few rows of data from the spreadsheet, type
+<<>>=
+head(mydata)
+@ 
+  To select a particular column of data, you can type
+  \texttt{mydata[,3]} to extract the third column, or
+  \verb!mydata$x! to extract the column labelled \texttt{x}.
+  \end{quote}
+\item Type \texttt{library(spatstat)} to load the \spst\ package
+\item Now convert the data to a point pattern object using the
+  \spst\ command \texttt{ppp}. 
+  \begin{quote}
+    Suppose that the \texttt{x} and \texttt{y} coordinates were 
+    stored in columns 3 and 7 of the spreadsheet. Suppose that the
+    sampling plot was a rectangle, with the $x$ coordinates ranging
+    from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then
+    you would type
+<<eval=FALSE>>=
+  mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90))
+@ 
+   The general form is 
+<<eval=FALSE>>=
+ppp(x.coordinates, y.coordinates, x.range, y.range)
+@ 
+
+  Note that this only stores the seedling locations. 
+  If you have additional columns of data
+  (such as seedling height, seedling sex, etc) these can be added as
+  \emph{marks}, later.
+  \end{quote}
+\item Check that the point pattern looks right by plotting it:
+<<fig=TRUE,results=hide>>=
+plot(mypattern)
+@ 
+\item Now you are ready to do some statistical analysis. 
+  Try the following:
+  \begin{itemize}
+  \item 
+    Basic summary of data: type 
+<<eval=FALSE>>=
+summary(mypattern)
+@ 
+  \item 
+  Ripley's $K$-function:
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1)))
+@ 
+<<fig=TRUE,results=hide>>=
+plot(Kest(mypattern))
+@ 
+
+For more information, type \texttt{help(Kest)}
+  \item 
+  Envelopes of $K$-function:
+<<eval=FALSE>>=
+plot(envelope(mypattern,Kest))
+@ 
+<<echo=FALSE,results=hide>>=
+env <- envelope(mypattern,Kest, nsim=39)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+plot(env, main="envelope(mypattern, Kest)")
+@ 
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+  For more information, type \texttt{help(envelope)}
+  \item 
+  kernel smoother of point density:
+<<fig=TRUE,results=hide>>=
+plot(density(mypattern))
+@ 
+
+For more information, type \texttt{help(density.ppp)}
+\end{itemize}
+\item 
+  Next if you have additional columns of data
+  recording (for example) the seedling height and seedling sex,
+  you can add these data as \emph{marks}. Suppose that columns 
+  5 and 9 of the spreadsheet contained such values. Then do something like
+<<eval=FALSE>>=
+marks(mypattern) <- mydata[, c(5,9)]
+@ 
+<<echo=FALSE,results=hide>>=
+mypattern <-finpines
+@ 
+Now you can try things like the kernel smoother of mark values:
+<<eval=FALSE>>=
+plot(Smooth(mypattern))
+@ 
+\setkeys{Gin}{width=0.8\textwidth}
+<<fig=TRUE,echo=FALSE,results=hide>>=
+plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)")
+@ 
+\setkeys{Gin}{width=0.4\textwidth}
+\item
+  You are airborne!
+  Now look at the book \cite{baddrubaturn15} for more hints.
+\end{enumerate}
+
+\section*{How do I find out which command to use?}
+
+Information sources for \spst\ include:
+\begin{itemize}
+\item the Quick Reference guide: a list of the most useful commands.
+  \begin{quote}
+    To view the quick reference guide, 
+    start \R, then type \texttt{library(spatstat)}
+    and then \texttt{help(spatstat)}.
+    Alternatively you can download a pdf of the Quick Reference
+    guide from the website \texttt{www.spatstat.org}
+  \end{quote}
+\item online help:
+  \begin{quote}
+    The online help files are useful --- they
+    give detailed information and advice about each command.
+    They are available when you are running \spst. 
+    To get help about a particular command \texttt{blah}, 
+    type \texttt{help(blah)}.
+    There is a graphical help interface, 
+    which you can start by typing \texttt{help.start()}.
+    Alternatively you can download a pdf of the entire manual (1000 pages!)
+    from the website \texttt{www.spatstat.org}. 
+  \end{quote}
+\item vignettes:
+  \begin{quote}
+    \Spst\ comes installed with several `vignettes' (introductory documents
+    with examples) which can be accessed using the graphical help interface.
+    They include a document about \texttt{Handling shapefiles}.
+  \end{quote}
+\item workshop notes:
+  \begin{quote}
+    The notes from a two-day workshop 
+    on \spst\ are available online \cite{badd10wshop}.
+    These are now rather out-of-date, but still useful.
+  \end{quote}
+\item book:
+  \begin{quote}
+    The forthcoming book \cite{baddrubaturn15} 
+    contains a complete course on \texttt{spatstat}.
+  \end{quote}
+\item website:
+  \begin{quote}
+    Visit the \spst\ package website \texttt{www.spatstat.org} 
+  \end{quote}
+\item forums:
+  \begin{quote}
+    Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}.
+    Then email your questions to the forum. 
+    Alternatively you can ask the authors of the \spst\ package
+    (their email addresses are given in the package documentation).
+  \end{quote}
+\end{itemize}
+
+\begin{thebibliography}{10}
+\bibitem{badd10wshop}
+A. Baddeley.
+\newblock Analysing spatial point patterns in {{R}}.
+\newblock Technical report, CSIRO, 2010.
+\newblock Version 4.
+\newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/}
+
+\bibitem{baddrubaturn15}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\bibitem{bivapebegome08}
+R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio.
+\newblock {\em Applied spatial data analysis with {R}}.
+\newblock Springer, 2008.
+
+\bibitem{cres93}
+N.A.C. Cressie.
+\newblock {\em Statistics for Spatial Data}.
+\newblock {John Wiley and Sons}, {New York}, second edition, 1993.
+
+\bibitem{digg03}
+P.J. Diggle.
+\newblock {\em Statistical Analysis of Spatial Point Patterns}.
+\newblock Hodder Arnold, London, second edition, 2003.
+
+\bibitem{fortdale05}
+M.J. Fortin and M.R.T. Dale.
+\newblock {\em Spatial analysis: a guide for ecologists}.
+\newblock Cambridge University Press, Cambridge, UK, 2005.
+
+\bibitem{fothroge09handbook}
+A.S. Fotheringham and P.A. Rogers, editors.
+\newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}.
+\newblock SAGE Publications, London, 2009.
+
+\bibitem{gaetguyo09}
+C. Gaetan and X. Guyon.
+\newblock {\em Spatial statistics and modeling}.
+\newblock Springer, 2009.
+\newblock Translated by Kevin Bleakley.
+
+\bibitem{handbook10}
+A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors.
+\newblock {\em Handbook of Spatial Statistics}.
+\newblock CRC Press, 2010.
+
+\bibitem{illietal08}
+J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan.
+\newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}.
+\newblock John Wiley and Sons, Chichester, 2008.
+
+\bibitem{mollwaag04}
+J. M{\o}ller and R.P. Waagepetersen.
+\newblock {\em Statistical Inference and Simulation for Spatial Point
+  Processes}.
+\newblock Chapman and Hall/CRC, Boca Raton, 2004.
+
+\bibitem{pfeietal08}
+D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and
+  A. Clements.
+\newblock {\em Spatial analysis in epidemiology}.
+\newblock Oxford University Press, Oxford, UK, 2008.
+
+\bibitem{wallgotw04}
+L.A. Waller and C.A. Gotway.
+\newblock {\em Applied spatial statistics for public health data}.
+\newblock Wiley, 2004.
+
+\end{thebibliography}
+
+
+
+\end{document}
+
diff --git a/inst/doc/getstart.pdf b/inst/doc/getstart.pdf
new file mode 100644
index 0000000..46f141f
Binary files /dev/null and b/inst/doc/getstart.pdf differ
diff --git a/inst/doc/packagesizes.txt b/inst/doc/packagesizes.txt
new file mode 100644
index 0000000..2070e1b
--- /dev/null
+++ b/inst/doc/packagesizes.txt
@@ -0,0 +1,192 @@
+date version nhelpfiles nobjects ndatasets Rlines srclines
+"2001-08-08" "1.0-1" 109 196 0 706 1370
+"2002-05-17" "1.1-3" 116 220 0 1140 1370
+"2002-08-06" "1.2-1" 129 237 0 1786 1474
+"2003-03-12" "1.3-1" 134 242 0 1955 1474
+"2003-05-05" "1.3-2" 148 257 0 2024 1474
+"2003-07-28" "1.3-3" 148 266 0 2034 1474
+"2003-11-12" "1.3-4" 148 261 0 2033 1474
+"2004-01-27" "1.4-3" 166 296 0 3641 1437
+"2004-02-11" "1.4-4" 166 296 0 3641 1437
+"2004-03-25" "1.4-5" 166 296 0 3646 1439
+"2004-05-23" "1.4-6" 166 296 0 3689 1514
+"2004-06-17" "1.5-1" 166 300 0 4255 1514
+"2004-09-01" "1.5-3" 171 311 0 4636 1514
+"2004-09-24" "1.5-4" 174 315 0 4642 1514
+"2004-10-21" "1.5-5" 180 319 0 4686 1514
+"2004-11-15" "1.5-6" 180 319 0 4686 1512
+"2004-11-27" "1.5-7" 180 319 0 4687 1512
+"2005-01-25" "1.5-8" 182 320 0 4770 1512
+"2005-01-27" "1.5-9" 182 321 0 4805 1512
+"2005-02-16" "1.5-10" 182 321 0 4805 1512
+"2005-03-14" "1.6-1" 188 345 0 5597 1517
+"2005-03-30" "1.6-2" 188 345 0 5600 1450
+"2005-04-08" "1.6-3" 189 352 0 5715 1474
+"2005-04-14" "1.6-4" 194 358 0 6056 1544
+"2005-04-21" "1.6-5" 194 358 0 6056 1544
+"2005-05-09" "1.6-6" 195 373 0 6385 1592
+"2005-05-25" "1.6-7" 201 392 0 7727 1644
+"2005-06-07" "1.6-8" 206 400 0 8003 1644
+"2005-07-01" "1.6-9" 207 402 0 8025 1644
+"2005-07-26" "1.7-11" 212 406 0 8213 1643
+"2005-08-10" "1.7-12" 213 407 0 8279 1643
+"2005-10-27" "1.7-13" 215 410 0 8531 1643
+"2005-11-24" "1.8-1" 215 418 0 8539 1643
+"2005-12-05" "1.8-2" 229 440 0 9031 1643
+"2005-12-21" "1.8-3" 237 446 0 9175 1643
+"2006-01-09" "1.8-4" 237 446 0 9207 1643
+"2006-01-18" "1.8-5" 237 446 0 9225 1643
+"2006-02-23" "1.8-6" 241 449 0 9315 1643
+"2006-03-02" "1.8-7" 247 457 0 9627 1643
+"2006-03-30" "1.8-8" 248 459 0 9662 1643
+"2006-04-18" "1.8-9" 259 446 21 10144 1832
+"2006-05-03" "1.9-0" 259 447 21 10396 1817
+"2006-05-26" "1.9-1" 266 466 21 10861 3069
+"2006-06-05" "1.9-2" 268 473 21 11409 3487
+"2006-06-20" "1.9-3" 268 479 21 11941 4140
+"2006-08-03" "1.9-4" 273 490 22 12435 5619
+"2006-08-22" "1.9-5" 274 490 22 12493 5560
+"2006-09-27" "1.9-6" 277 494 22 12573 5601
+"2006-10-19" "1.10-1" 283 529 22 13124 5601
+"2006-10-19" "1.10-1" 283 529 22 13124 5171
+"2006-11-06" "1.10-2" 283 529 22 13194 5601
+"2006-11-20" "1.10-3" 287 540 22 13425 5684
+"2007-01-08" "1.10-4" 291 554 22 13591 5684
+"2007-01-08" "1.10-4" 291 554 22 13591 5684
+"2007-01-12" "1.11-0" 291 562 22 13728 5684
+"2007-02-01" "1.11-1" 294 564 23 13614 5684
+"2007-03-10" "1.11-2" 301 574 24 13860 5684
+"2007-03-16" "1.11-3" 305 580 24 14106 5819
+"2007-03-19" "1.11-4" 307 589 24 14316 5868
+"2007-05-08" "1.11-5" 307 591 24 14373 5940
+"2007-05-18" "1.11-6" 308 592 24 14390 5940
+"2007-06-09" "1.11-7" 311 595 24 14506 5940
+"2007-07-26" "1.11-8" 312 596 24 14552 6055
+"2007-08-20" "1.12-0" 319 619 25 15246 6055
+"2007-09-22" "1.12-1" 319 619 25 15250 6055
+"2007-10-26" "1.12-2" 322 623 25 15684 6188
+"2007-11-02" "1.12-3" 322 626 25 15767 6188
+"2007-12-18" "1.12-4" 322 626 25 15814 6188
+"2008-01-07" "1.12-5" 322 630 25 15891 6238
+"2008-02-04" "1.12-6" 328 638 25 16334 6446
+"2008-02-26" "1.12-8" 328 639 25 16405 6718
+"2008-03-18" "1.12-9" 331 644 25 16606 6718
+"2008-04-02" "1.12-10" 331 644 25 16649 6771
+"2008-04-11" "1.13-0" 332 645 25 16753 6771
+"2008-04-23" "1.13-1" 333 647 25 16812 6840
+"2008-05-14" "1.13-2" 339 654 25 17057 6840
+"2008-06-24" "1.13-3" 340 657 25 17182 6840
+"2008-07-18" "1.13-4" 348 672 26 17527 6840
+"2008-07-22" "1.14-0" 354 681 26 17923 7131
+"2008-07-22" "1.14-1" 356 684 26 18052 7131
+"2008-09-08" "1.14-2" 360 688 27 18087 7185
+"2008-09-26" "1.14-3" 362 693 27 18194 7185
+"2008-10-16" "1.14-4" 366 707 27 18427 7185
+"2008-10-23" "1.14-5" 368 715 27 18493 7185
+"2008-11-07" "1.14-6" 372 726 27 18657 7185
+"2008-11-17" "1.14-7" 374 730 27 18671 7185
+"2008-12-10" "1.14-8" 377 734 27 18766 7185
+"2008-12-16" "1.14-9" 377 734 27 18772 7185
+"2009-01-30" "1.14-10" 381 741 27 18949 7186
+"2009-03-02" "1.15-0" 384 750 27 19212 7362
+"2009-03-31" "1.15-1" 386 752 28 19292 7439
+"2009-04-14" "1.15-2" 396 772 28 19880 7436
+"2009-05-13" "1.15-3" 398 777 29 20141 7524
+"2009-06-11" "1.15-4" 399 776 29 20176 7524
+"2009-07-01" "1.16-0" 405 787 29 20774 7524
+"2009-07-27" "1.16-1" 411 814 29 21433 7524
+"2009-08-22" "1.16-2" 417 821 29 21863 7937
+"2009-08-28" "1.16-3" 419 831 29 22060 7941
+"2009-10-22" "1.17-0" 420 833 30 21881 8705
+"2009-11-04" "1.17-1" 437 875 30 22900 10614
+"2009-11-10" "1.17-2" 439 880 30 22943 10606
+"2009-12-15" "1.17-3" 442 885 30 23193 10606
+"2009-12-15" "1.17-4" 445 890 30 23640 10606
+"2010-01-06" "1.17-5" 451 906 30 24283 12003
+"2010-02-08" "1.17-6" 456 921 30 24795 12003
+"2010-03-10" "1.18-0" 459 931 30 25073 12333
+"2010-03-19" "1.18-1" 462 945 30 25464 12439
+"2010-04-09" "1.18-2" 463 950 30 25631 12475
+"2010-04-19" "1.18-3" 464 953 30 25720 12475
+"2010-05-02" "1.18-4" 475 980 30 26093 13417
+"2010-05-07" "1.18-5" 475 981 30 26117 13417
+"2010-05-14" "1.19-0" 476 982 30 26205 13417
+"2010-05-22" "1.19-1" 479 984 31 26286 13556
+"2010-06-09" "1.19-2" 481 996 31 26653 13667
+"2010-06-16" "1.19-3" 483 1003 31 26733 13667
+"2010-07-15" "1.20-0" 483 1017 31 26926 14009
+"2010-07-26" "1.20-1" 484 1020 31 27107 14263
+"2010-08-10" "1.20-2" 489 1028 31 27728 14466
+"2010-08-23" "1.20-3" 489 1033 31 27869 14564
+"2010-10-21" "1.20-4" 493 1040 31 28237 14805
+"2010-10-25" "1.20-5" 494 1043 31 28377 15160
+"2010-11-05" "1.21-0" 504 1067 31 41301 15160
+"2010-11-11" "1.21-1" 507 1075 31 41714 15554
+"2011-01-17" "1.21-3" 515 1103 31 42975 15747
+"2011-01-20" "1.21-4" 515 1103 31 42985 15747
+"2011-02-10" "1.21-5" 515 1103 31 43037 15747
+"2011-04-25" "1.21-6" 517 1107 31 43211 15747
+"2011-04-28" "1.22-0" 526 1148 32 44006 15831
+"2011-05-19" "1.22-1" 528 1154 32 44235 15820
+"2011-06-13" "1.22-2" 537 1188 32 45006 16282
+"2011-06-17" "1.22-3" 539 1197 32 45153 16269
+"2011-07-07" "1.22-4" 550 1218 33 46696 16269
+"2011-07-24" "1.23-0" 562 1244 34 47694 16496
+"2011-08-01" "1.23-1" 564 1252 34 48014 16658
+"2011-08-11" "1.23-2" 566 1260 34 48313 17035
+"2011-08-12" "1.23-3" 566 1260 34 48319 17035
+"2011-09-09" "1.23-4" 571 1269 34 48747 17243
+"2011-09-23" "1.23-5" 575 1274 34 49128 17141
+"2011-10-11" "1.23-6" 579 1286 34 49508 17141
+"2011-10-22" "1.24-1" 585 1308 34 50154 17141
+"2011-11-11" "1.24-2" 588 1312 34 50604 17839
+"2011-12-06" "1.25-0" 602 1334 34 52015 18351
+"2011-12-21" "1.25-1" 609 1339 35 52235 19088
+"2012-01-19" "1.25-2" 610 1338 35 52774 19120
+"2012-02-05" "1.25-3" 613 1345 35 53004 19120
+"2012-02-29" "1.25-4" 614 1347 35 53302 19423
+"2012-03-14" "1.25-5" 616 1351 35 53720 19506
+"2012-04-08" "1.26-0" 616 1356 35 53816 19169
+"2012-04-19" "1.26-1" 617 1358 35 54498 19261
+"2012-05-16" "1.27-0" 630 1393 35 55787 19363
+"2012-06-11" "1.28-0" 632 1417 35 56384 19363
+"2012-08-23" "1.28-2" 640 1438 36 58566 19372
+"2012-10-14" "1.29-0" 651 1470 36 59711 19457
+"2012-12-23" "1.30-0" 666 1499 41 61344 19806
+"2013-01-17" "1.31-0" 668 1507 41 61446 20094
+"2013-03-01" "1.31-1" 678 1562 41 63783 20536
+"2013-04-25" "1.31-2" 682 1581 41 64501 21117
+"2013-05-27" "1.31-3" 685 1600 41 65545 21773
+"2013-08-13" "1.32-0" 695 1625 41 67120 22151
+"2013-09-05" "1.33-0" 701 1630 43 67397 22218
+"2013-10-24" "1.34-0" 720 1666 43 69219 22867
+"2013-11-03" "1.34-1" 720 1666 43 69180 23340
+"2013-12-12" "1.35-0" 745 1717 47 72110 23491
+"2014-02-18" "1.36-0" 757 1753 47 73946 24042
+"2014-05-09" "1.37-0" 781 1841 47 77585 24633
+"2014-08-15" "1.38-0" 803 1963 48 80709 25191
+"2014-08-27" "1.38-1" 803 1965 48 80833 25191
+"2014-10-23" "1.39-0" 824 2015 49 82274 25554
+"2014-10-24" "1.39-1" 824 2015 49 81990 25554
+"2014-12-31" "1.40-0" 839 2071 51 85832 25637
+"2015-02-26" "1.41-0" 861 2135 53 88407 25650
+"2015-02-27" "1.41-1" 861 2135 53 88407 25650
+"2015-05-27" "1.42-0" 888 2222 53 91600 25650
+"2015-06-05" "1.42-1" 888 2225 53 91658 25650
+"2015-06-28" "1.42-2" 890 2232 53 91985 25650
+"2015-10-07" "1.43-0" 939 2342 54 95950 25802
+"2015-12-22" "1.44-0" 949 2378 54 97522 27569
+"2015-12-29" "1.44-1" 951 2385 54 97745 27569
+"2016-03-10" "1.45-0" 961 2456 54 100964 28122
+"2016-05-08" "1.45-1" 977 2478 54 101981 28124
+"2016-05-09" "1.45-2" 977 2478 54 101981 28124
+"2016-07-06" "1.46-0" 981 2490 54 102484 28310
+"2016-07-08" "1.46-1" 981 2491 54 102573 28310
+"2016-10-12" "1.47-0" 988 2533 54 103848 28679
+"2016-12-22" "1.48-0" 1017 2611 54 105733 29466
+"2017-02-08" "1.49-0" 1024 2629 54 106522 31029
+"2017-02-08" "1.49-0" 1024 2629 54 106522 31029
+"2017-03-22" "1.50-0" 1025 2476 54 104021 29413
+"2017-05-04" "1.51-0" 1029 2501 54 105229 29430
+"2017-08-10" "1.52-0" 1035 2518 54 106162 29416
+"2017-08-16" "1.52-1" 1035 2518 54 106170 29416
diff --git a/inst/doc/replicated.R b/inst/doc/replicated.R
new file mode 100644
index 0000000..c39b0f7
--- /dev/null
+++ b/inst/doc/replicated.R
@@ -0,0 +1,528 @@
+### R code from vignette source 'replicated.Rnw'
+
+###################################################
+### code chunk number 1: replicated.Rnw:29-30
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+
+
+###################################################
+### code chunk number 2: replicated.Rnw:35-42
+###################################################
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+
+
+###################################################
+### code chunk number 3: replicated.Rnw:180-181
+###################################################
+waterstriders
+
+
+###################################################
+### code chunk number 4: replicated.Rnw:199-200
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(waterstriders, main="")
+
+
+###################################################
+### code chunk number 5: replicated.Rnw:207-208
+###################################################
+summary(waterstriders)
+
+
+###################################################
+### code chunk number 6: replicated.Rnw:216-217
+###################################################
+X <- listof(rpoispp(100), rpoispp(100), rpoispp(100))
+
+
+###################################################
+### code chunk number 7: replicated.Rnw:222-224
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(X)
+X
+
+
+###################################################
+### code chunk number 8: replicated.Rnw:253-254 (eval = FALSE)
+###################################################
+## hyperframe(...)
+
+
+###################################################
+### code chunk number 9: replicated.Rnw:279-281
+###################################################
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+H
+
+
+###################################################
+### code chunk number 10: replicated.Rnw:289-294
+###################################################
+G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]),
+                W=list(rpoispp(100),rpoispp(100), rpoispp(100)),
+                U=42,
+                V=rpoispp(100), stringsAsFactors=FALSE)
+G
+
+
+###################################################
+### code chunk number 11: replicated.Rnw:323-324
+###################################################
+simba
+
+
+###################################################
+### code chunk number 12: replicated.Rnw:337-338
+###################################################
+pyramidal
+
+
+###################################################
+### code chunk number 13: replicated.Rnw:344-345
+###################################################
+ws <- hyperframe(Striders=waterstriders)
+
+
+###################################################
+### code chunk number 14: replicated.Rnw:352-354
+###################################################
+H$X
+H$Y
+
+
+###################################################
+### code chunk number 15: replicated.Rnw:364-366
+###################################################
+H$U <- letters[1:3]
+H
+
+
+###################################################
+### code chunk number 16: replicated.Rnw:371-375
+###################################################
+G <- hyperframe()
+G$X <- waterstriders
+G$Y <- 1:3
+G
+
+
+###################################################
+### code chunk number 17: replicated.Rnw:383-387
+###################################################
+H[,1]
+H[2,]
+H[2:3, ]
+H[1,1]
+
+
+###################################################
+### code chunk number 18: replicated.Rnw:393-396
+###################################################
+H[,1,drop=TRUE]
+H[1,1,drop=TRUE]
+H[1,2,drop=TRUE]
+
+
+###################################################
+### code chunk number 19: replicated.Rnw:409-410 (eval = FALSE)
+###################################################
+## plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL)
+
+
+###################################################
+### code chunk number 20: replicated.Rnw:425-426
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(waterstriders, pch=16, nrows=1)
+
+
+###################################################
+### code chunk number 21: replicated.Rnw:441-442
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(simba)
+
+
+###################################################
+### code chunk number 22: replicated.Rnw:454-456
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+plot(H$Y)
+
+
+###################################################
+### code chunk number 23: replicated.Rnw:468-469 (eval = FALSE)
+###################################################
+## plot(h, e)
+
+
+###################################################
+### code chunk number 24: replicated.Rnw:478-479
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }))
+
+
+###################################################
+### code chunk number 25: replicated.Rnw:491-493
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+H <- hyperframe(Bugs=waterstriders)
+plot(H, quote(plot(Kest(Bugs))), marsize=1)
+
+
+###################################################
+### code chunk number 26: replicated.Rnw:506-508
+###################################################
+df <- data.frame(A=1:10, B=10:1)
+with(df, A-B)
+
+
+###################################################
+### code chunk number 27: replicated.Rnw:521-522 (eval = FALSE)
+###################################################
+## with(h,e)
+
+
+###################################################
+### code chunk number 28: replicated.Rnw:532-535
+###################################################
+H <- hyperframe(Bugs=waterstriders)
+with(H, npoints(Bugs))
+with(H, distmap(Bugs))
+
+
+###################################################
+### code chunk number 29: replicated.Rnw:558-559
+###################################################
+with(simba, npoints(Points))
+
+
+###################################################
+### code chunk number 30: replicated.Rnw:566-568
+###################################################
+H <- hyperframe(Bugs=waterstriders)
+K <- with(H, Kest(Bugs))
+
+
+###################################################
+### code chunk number 31: replicated.Rnw:576-577
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(K)
+
+
+###################################################
+### code chunk number 32: replicated.Rnw:582-584
+###################################################
+H <- hyperframe(Bugs=waterstriders)
+with(H, nndist(Bugs))
+
+
+###################################################
+### code chunk number 33: replicated.Rnw:590-591
+###################################################
+with(H, min(nndist(Bugs)))
+
+
+###################################################
+### code chunk number 34: replicated.Rnw:603-604
+###################################################
+simba$Dist <- with(simba, distmap(Points))
+
+
+###################################################
+### code chunk number 35: replicated.Rnw:617-621
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+lambda <- rexp(6, rate=1/50)
+H <- hyperframe(lambda=lambda)
+H$Points <- with(H, rpoispp(lambda))
+plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4)))))
+
+
+###################################################
+### code chunk number 36: replicated.Rnw:627-628
+###################################################
+H$X <- with(H, rpoispp(50))
+
+
+###################################################
+### code chunk number 37: replicated.Rnw:657-658
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(simba, quote(plot(density(Points), main="")), nrows=2)
+
+
+###################################################
+### code chunk number 38: replicated.Rnw:677-679
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+rhos <- with(demohyper, rhohat(Points, Image))
+plot(rhos)
+
+
+###################################################
+### code chunk number 39: replicated.Rnw:696-697 (eval = FALSE)
+###################################################
+## mppm(formula, data, interaction, ...)
+
+
+###################################################
+### code chunk number 40: replicated.Rnw:707-708 (eval = FALSE)
+###################################################
+## mppm(Points ~ group, simba, Poisson())
+
+
+###################################################
+### code chunk number 41: replicated.Rnw:741-742
+###################################################
+mppm(Points ~ 1, simba)
+
+
+###################################################
+### code chunk number 42: replicated.Rnw:749-750
+###################################################
+mppm(Points ~ group, simba)
+
+
+###################################################
+### code chunk number 43: replicated.Rnw:756-757
+###################################################
+mppm(Points ~ id, simba)
+
+
+###################################################
+### code chunk number 44: replicated.Rnw:767-768
+###################################################
+mppm(Points ~ Image, data=demohyper)
+
+
+###################################################
+### code chunk number 45: replicated.Rnw:786-787 (eval = FALSE)
+###################################################
+## mppm(Points ~ offset(log(Image)), data=demohyper)
+
+
+###################################################
+### code chunk number 46: replicated.Rnw:799-800 (eval = FALSE)
+###################################################
+## mppm(Points ~ log(Image), data=demop)
+
+
+###################################################
+### code chunk number 47: replicated.Rnw:817-818 (eval = FALSE)
+###################################################
+## mppm(formula, data, interaction, ..., iformula=NULL)
+
+
+###################################################
+### code chunk number 48: replicated.Rnw:868-869
+###################################################
+radii <- with(simba, mean(nndist(Points)))
+
+
+###################################################
+### code chunk number 49: replicated.Rnw:876-878
+###################################################
+Rad <- hyperframe(R=radii)
+Str <- with(Rad, Strauss(R))
+
+
+###################################################
+### code chunk number 50: replicated.Rnw:883-885
+###################################################
+Int <- hyperframe(str=Str)
+mppm(Points ~ 1, simba, interaction=Int)
+
+
+###################################################
+### code chunk number 51: replicated.Rnw:912-915
+###################################################
+h <- hyperframe(Y=waterstriders)
+g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7))
+mppm(Y ~ 1, data=h, interaction=g, iformula=~str4)
+
+
+###################################################
+### code chunk number 52: replicated.Rnw:926-927
+###################################################
+fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group)
+
+
+###################################################
+### code chunk number 53: replicated.Rnw:945-946
+###################################################
+fit
+
+
+###################################################
+### code chunk number 54: replicated.Rnw:949-951
+###################################################
+co <- coef(fit)
+si <- function(x) { signif(x, 4) }
+
+
+###################################################
+### code chunk number 55: replicated.Rnw:962-963
+###################################################
+coef(fit)
+
+
+###################################################
+### code chunk number 56: replicated.Rnw:1020-1021 (eval = FALSE)
+###################################################
+## interaction=hyperframe(po=Poisson(), str=Strauss(0.07))
+
+
+###################################################
+### code chunk number 57: replicated.Rnw:1026-1027 (eval = FALSE)
+###################################################
+## iformula=~ifelse(group=="control", po, str)
+
+
+###################################################
+### code chunk number 58: replicated.Rnw:1037-1038 (eval = FALSE)
+###################################################
+## iformula=~I((group=="control")*po) + I((group=="treatment") * str)
+
+
+###################################################
+### code chunk number 59: replicated.Rnw:1048-1053
+###################################################
+g <- hyperframe(po=Poisson(), str=Strauss(0.07))
+fit2 <- mppm(Points ~ 1, simba, g, 
+             iformula=~I((group=="control")*po) 
+                     + I((group=="treatment") * str))
+fit2
+
+
+###################################################
+### code chunk number 60: replicated.Rnw:1176-1178
+###################################################
+H <- hyperframe(P=waterstriders)
+mppm(P ~ 1, H, random=~1|id)
+
+
+###################################################
+### code chunk number 61: replicated.Rnw:1185-1186 (eval = FALSE)
+###################################################
+## mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber)
+
+
+###################################################
+### code chunk number 62: replicated.Rnw:1209-1212
+###################################################
+H <- hyperframe(W=waterstriders)
+fit <- mppm(W ~ 1, H)
+subfits(fit)
+
+
+###################################################
+### code chunk number 63: replicated.Rnw:1233-1234 (eval = FALSE)
+###################################################
+## subfits <- subfits.new
+
+
+###################################################
+### code chunk number 64: replicated.Rnw:1246-1248
+###################################################
+H <- hyperframe(W=waterstriders)
+with(H, ppm(W))
+
+
+###################################################
+### code chunk number 65: replicated.Rnw:1271-1273
+###################################################
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+
+
+###################################################
+### code chunk number 66: replicated.Rnw:1283-1284
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+plot(res)
+
+
+###################################################
+### code chunk number 67: replicated.Rnw:1289-1291
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+smor <- with(hyperframe(res=res), Smooth(res, sigma=4))
+plot(smor)
+
+
+###################################################
+### code chunk number 68: replicated.Rnw:1303-1306
+###################################################
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+totres <- sapply(res, integral.msr)
+
+
+###################################################
+### code chunk number 69: replicated.Rnw:1312-1319
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+fit <- mppm(Points~Image, data=demohyper)
+resids <- residuals(fit, type="Pearson")
+totres <- sapply(resids, integral.msr)
+areas <- with(demohyper, area.owin(as.owin(Points)))
+df <- as.data.frame(demohyper[, "Group"])
+df$resids <- totres/areas
+plot(resids~Group, df)
+
+
+###################################################
+### code chunk number 70: replicated.Rnw:1340-1343
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+fit <- mppm(P ~ 1, hyperframe(P=waterstriders))
+sub <- hyperframe(Model=subfits(fit))
+plot(sub, quote(diagnose.ppm(Model)))
+
+
+###################################################
+### code chunk number 71: replicated.Rnw:1356-1364
+###################################################
+H <- hyperframe(P = waterstriders)
+fitall <- mppm(P ~ 1, H)
+together <- subfits(fitall)
+separate <- with(H, ppm(P))
+Fits <- hyperframe(Together=together, Separate=separate)
+dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together)))
+dr
+exp(dr)
+
+
+###################################################
+### code chunk number 72: replicated.Rnw:1381-1390
+###################################################
+H <- hyperframe(X=waterstriders)
+
+# Poisson with constant intensity for all patterns
+fit1 <- mppm(X~1, H)
+quadrat.test(fit1, nx=2)
+
+# uniform Poisson with different intensity for each pattern
+fit2 <- mppm(X ~ id, H)
+quadrat.test(fit2, nx=2)
+
+
+###################################################
+### code chunk number 73: replicated.Rnw:1419-1420 (eval = FALSE)
+###################################################
+## kstest.mppm(model, covariate)
+
+
diff --git a/inst/doc/replicated.Rnw b/inst/doc/replicated.Rnw
new file mode 100644
index 0000000..3b32240
--- /dev/null
+++ b/inst/doc/replicated.Rnw
@@ -0,0 +1,1525 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat}
+
+\usepackage{graphicx}
+\usepackage{Sweave}
+\usepackage{bm}
+\usepackage{anysize}
+
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\newcommand{\bold}[1]{{\textbf {#1}}}
+
+\newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}}
+\newcommand{\dee}[1]{\, {\rm d}{#1}}
+\newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}}
+\newcommand{\xx}{\boldmaths{x}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Analysing replicated point patterns in \texttt{spatstat}}
+\author{Adrian Baddeley}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+\begin{abstract}
+This document describes \spst's capabilities for
+fitting models to replicated point patterns.
+More generally it applies to data from a designed experiment
+in which the response from each unit is a spatial point pattern.
+\end{abstract}
+
+\tableofcontents
+\newpage
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Introduction}
+
+`Replicated point patterns' are datasets consisting of 
+several point patterns which can be 
+regarded as independent repetitions of the same experiment. For example,
+three point patterns taken from micrographs of three pipette samples of the 
+same jug of milk, could be assumed to be replicated observations.
+
+More generally we could have several experimental groups, with 
+replicated point pattern data in each group. For example there may be
+two jugs of milk that were treated differently, and we take three
+pipette samples from each jug.
+
+Even more generally our point patterns could be the result
+of a designed experiment involving
+control and treatment groups, covariates such as temperature,
+and even spatial covariates (such as image data). 
+
+This document describes some capabilities available in the \spst\ package
+for analysing such data. 
+\textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Overview of software}
+
+The main components needed are:
+
+\begin{itemize}
+\item the model-fitting function \texttt{mppm}, an extension of the
+  \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process
+  models to multiple point pattern datasets;
+\item support for the class \texttt{"mppm"} of point process models 
+   fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model,
+   analysis of deviance for Poisson models)
+\item some tools for exploratory data analysis;
+\item basic support for the data from such experiments
+  by storing the data in a \emph{``hyperframe''}. A hyperframe is like
+  a data frame, except that each entry in a column can be a point pattern
+  or a pixel image, as well as a single number or categorical value.
+\item four example datasets.
+\end{itemize}
+
+\section{Formulating the problem}
+
+We view the experiment as involving a series of
+{\em `units'\/}.
+Each unit is subjected to a known set of experimental conditions 
+(described by the values of the {\em covariates\/}), and
+each unit yields a {\em response\/} which is a spatial point pattern.
+The value of a particular covariate for each unit can be
+either a single value (numerical, logical or factor),
+or a pixel image.
+
+Three important cases are:
+\begin{description}
+\item[independent replicates:]
+We observe $n$
+different point patterns that can be regarded as independent replicates,
+i.e.\ independent realisations of the same point process.
+The `responses' are the point patterns; there are no covariates.
+\item[replication in groups:]
+there are $K$ different experimental groups (e.g. control, aspirin,
+nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$
+point patterns which can be regarded as independent replicates within
+this group. We regard this as an experiment with $n = \sum_k n_k$ 
+units. The responses are the point patterns; there is one covariate
+which is a factor (categorical variable) identifying which group 
+each point pattern belongs to.
+\item[general case:] 
+there are covariates other than factors that influence
+the response. The point patterns are assumed to be independent, 
+but no two patterns have the same distribution.
+\end{description}
+
+Examples of these three cases are given in the 
+datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper}
+respectively, which are installed in \spst.
+
+\section{Installed datasets}
+
+The following datasets are currently installed in \spst.
+
+\begin{itemize}
+\item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data
+recording the locations of insect larvae on a pond in 3 independent
+experiments.
+\item \texttt{pyramidal}: data from Diggle, Lange and Benes 
+\cite{digglangbene91} on the locations of pyramidal neurons in 
+human brain, 31 human subjects grouped into 3 groups (controls,
+schizoaffective and schizophrenic).
+\item \texttt{flu}: data from Chen et al \cite{chenetal08}
+giving the locations of two different virus proteins 
+on the membranes of cells infected with influenza virus;
+41 multitype point patterns divided into two virus types
+(wild and mutant) and two stain types. 
+\item \texttt{simba}: simulated data from an experiment with two groups
+and 5 replicate point patterns per group.
+\item \texttt{demohyper}: simulated data from an experiment with two 
+  groups in which each experimental unit has a point pattern response
+  and a pixel image covariate. 
+\end{itemize}
+
+\section{Lists of point patterns}
+
+First we need a convenient way to store the \emph{responses}
+from all the units in an experiment.
+
+An individual point pattern is stored as an object of class \verb!"ppp"!. 
+The easiest way to store all the responses is to form a list
+of \verb!"ppp"! objects.
+
+\subsection{Waterstriders data}
+
+The \texttt{waterstriders} data are an example of this type.
+The data consist of 3 independent point patterns representing the 
+locations of insect larvae on a pond. See \texttt{help(waterstriders)}.
+
+<<>>=
+waterstriders
+@ 
+
+The \texttt{waterstriders} dataset is a list of point patterns.
+It is a list, each of whose entries is a point pattern (object of class
+\verb!"ppp"!). Note that the observation windows of the
+three point patterns are {\tt not\/} identical. 
+
+\subsection{The class \texttt{listof}}
+
+For convenience, the \texttt{waterstriders} dataset also belongs to the
+class \verb!"listof"!. This is a simple mechanism to allow us to
+handle the list neatly --- for example, we can provide
+special methods for printing, plotting and summarising the list.
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(waterstriders, main="")
+@ 
+
+Notice that the 
+plot method displays each entry of the list in a separate panel.
+There's also the summary method:
+
+<<>>=
+summary(waterstriders)
+@ 
+
+\subsection{Creating a \texttt{listof} object}
+
+For example, here is a simulated dataset containing three
+independent realisations of the Poisson process with intensity 100.
+
+<<>>=
+X <- listof(rpoispp(100), rpoispp(100), rpoispp(100))
+@ 
+
+Then it can be printed and plotted.
+
+<<fig=TRUE>>=
+plot(X)
+X
+@ 
+
+To convert an existing list to the class \code{listof}, use
+\code{as.listof}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Hyperframes}
+
+A \emph{hyperframe} is like a data frame, except that its entries
+can be objects of any kind.
+A hyperframe is effectively a two-dimensional array
+in which each column consists of
+values of one type (as in a data frame) or consists of
+objects of one class. 
+
+The entries in a hyperframe can be point patterns, pixel images,
+windows, or any other objects. 
+
+To analyse an experiment, we will store {\bf all} the data from the experiment
+in a single hyperframe. The rows of the hyperframe will correspond 
+to different experimental units,
+while the columns represent different variables 
+(response variables or covariates).
+
+\subsection{Creating hyperframes}
+
+The function \texttt{hyperframe} will create a hyperframe.
+
+<<eval=FALSE>>=
+hyperframe(...)
+@ 
+
+The arguments \verb!...! are any number of arguments of
+the form \texttt{tag=value}. Each \texttt{value} will
+become a column of the array. The \texttt{tag} determines the name
+of the column.
+
+Each \texttt{value} can be either
+\begin{itemize}
+\item an atomic vector or factor
+  (i.e. numeric vector, integer vector, character vector, logical
+  vector, complex vector or factor)
+\item a list of objects which are all of the same class
+\item one atomic value, which will be replicated to make an atomic
+  vector or factor
+\item one object, which will be replicated to make a list of identical objects.
+\end{itemize}
+    
+All columns (vectors, factors and lists) must be of the same length,
+if their length is greater than 1. 
+
+For example, here is a hyperframe containing a column of
+numbers and a column of \emph{functions}:
+
+<<>>=
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+H
+@ 
+
+Note that a column of character strings will be converted to a factor, 
+unless you set \texttt{stringsAsFactors=FALSE} in the call to
+\code{hyperframe}. This is the same behaviour as for the function 
+\code{data.frame}.
+
+<<>>=
+G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]),
+                W=list(rpoispp(100),rpoispp(100), rpoispp(100)),
+                U=42,
+                V=rpoispp(100), stringsAsFactors=FALSE)
+G
+@ 
+
+This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V}
+are constant (all entries in a column are the same). The column named
+\texttt{Y} is a character vector.
+
+\subsection{Hyperframes of data}
+
+To analyse an experiment, we will store {\bf all} the data from the experiment
+in a single hyperframe. The rows of the hyperframe will correspond 
+to different experimental units,
+while the columns represent different variables 
+(response variables or covariates).
+
+Several examples of hyperframes are provided with the package,
+including \texttt{demohyper}, \texttt{flu}, \texttt{simba}
+and \texttt{pyramidal}, described above.
+
+The \texttt{simba} dataset contains simulated data from an
+experiment with a `control' group and a `treatment' group, each 
+group containing 5 experimental units. The responses in the control group are 
+independent Poisson point patterns with intensity 80.
+The responses in the treatment group are independent realisations of
+a Strauss process (see \texttt{help(simba)} for details).
+The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns:
+\texttt{Points} (the point patterns) and \texttt{group} (a factor 
+with levels \texttt{control} and \texttt{treatment}).
+
+<<>>=
+simba
+@ 
+
+The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes 
+\cite{digglangbene91} on the locations of pyramidal neurons in 
+human brain. One point pattern was observed in each of 31 human subjects.
+The subjects were classified
+into 3 groups (controls, schizoaffective and schizophrenic).
+The \texttt{pyramidal} dataset is a hyperframe with 31 rows
+and 2 columns: \code{Neurons} (the point patterns) and \code{group}
+(a factor with levels \texttt{control}, \texttt{schizoaffective} 
+and \texttt{schizophrenic}).
+
+<<>>=
+pyramidal
+@ 
+
+The \texttt{waterstriders} dataset is not a hyperframe; it's just a 
+list of point patterns. It can easily be converted into a hyperframe:
+
+<<>>=
+ws <- hyperframe(Striders=waterstriders)
+@ 
+
+\subsection{Columns of a hyperframe}
+
+Individual columns of a hyperframe can be extracted using \verb!$!:
+
+<<>>=
+H$X
+H$Y
+@ 
+
+The result of \verb!$! is a vector or factor if the column contains
+atomic values; otherwise it is a list of objects (with class \texttt{"listof"}
+to make it easier to print and plot).
+
+Individual columns can also be assigned (overwritten or created)
+using \verb!$<-!:
+
+<<>>=
+H$U <- letters[1:3]
+H
+@ 
+
+This can be used to build up a hyperframe column-by-column:
+
+<<>>=
+G <- hyperframe()
+G$X <- waterstriders
+G$Y <- 1:3
+G
+@ 
+
+\subsection{Subsets of a hyperframe}
+
+Other subsets of a hyperframe
+can be extracted with \verb![!:
+
+<<>>=
+H[,1]
+H[2,]
+H[2:3, ]
+H[1,1]
+@ 
+
+The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE!
+and the subset consists of only one element or one column:
+
+<<>>=
+H[,1,drop=TRUE]
+H[1,1,drop=TRUE]
+H[1,2,drop=TRUE]
+@ 
+
+Currently there is no method for \verb![<-! that would allow
+you to assign values to a subset of a hyperframe.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Plotting}
+
+\subsection{Plotting a \code{listof} object}
+
+The plot method for \code{listof} objects has formal arguments
+
+<<eval=FALSE>>=
+plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL)
+@ 
+
+where \code{main} is a title for the entire page.
+
+If \code{arrange=TRUE} then the entries of the list are displayed  
+in separate panels on the same page (with \code{nrows} rows and
+\code{ncols} columns of panels), while if \code{arrange=FALSE} then 
+the entries are just plotted as a series of plot frames.
+
+The extra arguments \verb!...! control the individual plot panels.
+These arguments will be passed to the plot method
+that displays each entry of the list. Suitable arguments depend on the
+type of entries.
+
+<<fig=TRUE>>= 
+plot(waterstriders, pch=16, nrows=1)
+@ 
+
+\subsection{Plotting a hyperframe}
+
+\subsubsection{Plotting one column}
+
+If \code{h} is a hyperframe, then the default action of
+\code{plot(h)} is to extract the first column
+of \code{h} and plot each of the entries in a separate panel on 
+one page (actually using the plot method for class \verb!"listof"!). 
+
+\SweaveOpts{width=7,height=5}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(simba)
+@ 
+
+This only works if the entries in the first column are objects 
+for which a plot method is defined (for example, point patterns, images,
+windows). 
+
+To select a different column, use \verb!$! or \verb![!:
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+plot(H$Y)
+@ 
+
+The plot can be controlled using the arguments for \code{plot.listof}
+(and, in this case, \code{plot.function}, since \verb!H$Y! consists of 
+functions).
+
+\subsubsection{Complex plots}
+
+More generally, we can display any kind of higher-order plot
+involving one or more columns of a hyperframe:
+
+<<eval=FALSE>>=
+plot(h, e)
+@ 
+
+where \code{h} is a hyperframe and \code{e} is an \R\ language call 
+or expression that must be evaluated in each row to generate each plot panel.
+
+\SweaveOpts{width=9,height=5}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }))
+@ 
+
+Note the use of \code{quote}, which prevents the code
+inside the braces from being evaluated immediately.
+
+To plot the $K$-functions of each of the patterns in the
+\code{waterstriders} dataset,
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+H <- hyperframe(Bugs=waterstriders)
+plot(H, quote(plot(Kest(Bugs))), marsize=1)
+@ 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Data analysis}
+
+\subsection{Computing with hyperframes}
+
+Often we want to perform some computation on each row of
+a hyperframe. 
+
+In a data frame, this can be done using the command \code{with}:
+
+<<>>=
+df <- data.frame(A=1:10, B=10:1)
+with(df, A-B)
+@ 
+
+In this example, the expression \code{A-B} is evaluated
+in each row of the data frame, and the result is a vector
+containing the computed values for each row.
+The function \code{with} is generic, and has a method for data frames,
+\code{with.data.frame}. The computation above
+was executed by \code{with.data.frame}. 
+
+The same syntax is available for hyperframes
+using the method \code{with.hyperframe}:
+
+<<eval=FALSE>>=
+with(h,e)
+@ 
+
+Here \code{h} is a hyperframe, and 
+\code{e} is an {\sf R} language construct involving the names
+of columns in \code{h}.
+For each row of \code{h}, the expression \code{e} will be evaluated
+in such a way that each entry in the row is identified by its
+column name. 
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+with(H, npoints(Bugs))
+with(H, distmap(Bugs))
+@
+
+The result of \code{with.hyperframe} 
+is a list of objects (of class \verb!"listof"!),
+or a vector or factor if appropriate.
+
+Notice that (unlike the situation for data frames)
+the operations in the expression \code{e} do not have to
+be vectorised. For example, \code{distmap} expects 
+a single point pattern, and is not vectorised to deal with 
+a list of point patterns. Instead, the expression \code{distmap(Bugs)}
+is evaluated separately in each row of the hyperframe.
+
+\subsection{Summary statistics}
+
+One application of \code{with.hyperframe} is to calculate summary statistics
+for each row of a hyperframe.
+
+For example, the number of points in a point pattern \code{X}
+is returned by \code{npoints(X)}. To calculate this for each of the 
+responses in the \code{simba} dataset,
+
+<<>>=
+with(simba, npoints(Points))
+@ 
+
+The summary statistic can be any kind of object. For example, to
+compute the empirical $K$-functions for each of the 
+patterns in the \code{waterstriders} dataset,
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+K <- with(H, Kest(Bugs))
+@ 
+
+To plot these $K$-functions you can then just type 
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(K)
+@ 
+
+The summary statistic for each row could be a numeric vector:
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+with(H, nndist(Bugs))
+@ 
+
+The result is a list, each entry being a vector of nearest neighbour distances.
+To find the minimum interpoint distance in each pattern:
+
+<<>>=
+with(H, min(nndist(Bugs)))
+@ 
+
+\subsection{Generating new columns}
+
+New columns of a hyperframe can be created by computation
+from the existing columns. 
+
+For example, I can add a new column to the \code{simba} dataset
+that contains pixel images of the distance maps for each of the
+point pattern responses. 
+
+<<fig=FALSE>>=
+simba$Dist <- with(simba, distmap(Points))
+@ 
+
+\subsection{Simulation}
+
+
+This can be useful for simulation. For example, to generate
+Poisson point patterns with different intensities, where the 
+intensities are given by a numeric vector \code{lambda}:
+
+\SweaveOpts{width=6,height=6}
+\setkeys{Gin}{width=0.7\textwidth}
+
+<<fig=TRUE>>=
+lambda <- rexp(6, rate=1/50)
+H <- hyperframe(lambda=lambda)
+H$Points <- with(H, rpoispp(lambda))
+plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4)))))
+@ 
+
+It's even simpler to generate 10 independent Poisson point patterns
+with the \emph{same} intensity 50, say:
+
+<<fig=FALSE>>=
+H$X <- with(H, rpoispp(50))
+@ 
+
+(the expression \code{rpoispp(50)} is evaluated once in each row,
+yielding a different point pattern in each row because of the
+randomness).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Exploratory data analysis}
+
+Before fitting models to the data, it is prudent to explore 
+the data to detect unusual features and to suggest appropriate 
+models. 
+
+\subsection{Exploring spatial trend and covariate effects}
+
+Points may be distributed non-uniformly either because they are
+intrinsically non-uniform (``spatial trend'') or because their abundance
+depends on a spatial covariate (``covariate effects''). 
+
+Non-uniformity of a point pattern can be investigated using 
+the kernel smoothed intensity. This is the convolution of the point pattern
+with a smooth density called the kernel. Effectively each point
+in the pattern is replaced by a copy of the kernel, and the sum of all
+copies of the kernel is the kernel-smoothed intensity function.
+It is computed by \texttt{density.ppp} separately for each point pattern.
+
+<<fig=TRUE>>=
+plot(simba, quote(plot(density(Points), main="")), nrows=2)
+@ 
+
+Covariate effects due to a real-valued spatial covariate (a real-valued
+pixel image) can be investigated
+using the command \code{rhohat}. This uses a kernel smoothing
+technique to fit a model of the form 
+\[
+           \lambda(u) = \rho(Z(u))
+\]
+where $\lambda(u)$ is the point process intensity at a location $u$,
+and $Z(u)$ is the value of the spatial covariate at that location.
+Here $\rho$ is an unknown, smooth function which is to be estimated.
+The function $\rho$ expresses the effect of the
+spatial covariate on the point process intensity. If $\rho$ turns out to
+be constant, then the covariate has no effect on point process intensity
+(and the constant value of $\rho$ is the constant intensity of the
+point process). 
+
+<<fig=TRUE>>=
+rhos <- with(demohyper, rhohat(Points, Image))
+plot(rhos)
+@ 
+
+\SweaveOpts{width=6,height=4}
+\setkeys{Gin}{width=0.9\textwidth}
+
+\subsection{Exploring interpoint interaction}
+
+Still to be written.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Fitting models of spatial trend}
+
+The command \code{mppm} fits models to multiple point patterns.
+Its syntax is very similar to that of \code{lm} and \code{glm}:
+
+<<eval=FALSE>>=
+mppm(formula, data, interaction, ...)
+@ 
+
+where \code{formula} is a formula describing the systematic trend
+part of the model, \code{data} is a hyperframe containing all the data 
+(responses and covariates), and \code{interaction} determines the
+stochastic interpoint interaction part of the model.
+
+For example:
+
+<<eval=FALSE>>=
+mppm(Points ~ group, simba, Poisson())
+@ 
+
+Note that the formula has a left hand side, which identifies
+the response. This should be the name of a column of \code{data}.
+
+\subsection{Trend formula}
+
+The right side of \code{formula} is an expression for the
+linear predictor (effectively the {\bf logarithm} 
+of the spatial trend). 
+
+The variables appearing in the right hand side
+of \code{formula} should be either
+\begin{itemize}
+\item names of columns in \code{data}
+\item objects in the {\sf R} global
+environment (such as \code{pi} and \code{log})
+\item the reserved names \code{x}, \code{y} 
+(representing Cartesian coordinates), \code{marks} (representing mark values
+attached to points) or \code{id} (a factor representing the row number
+in the hyperframe).
+\end{itemize}
+
+\subsubsection{Design covariates}
+
+The variables in the trend could be `design covariates'.
+
+For example, to fit a model to the \code{simba} dataset
+in which all patterns are independent replicates of the
+same uniform Poisson process, with the same constant
+intensity:
+
+<<>>=
+mppm(Points ~ 1, simba)
+@ 
+
+To fit a model in which the two groups of patterns (control and treatment
+groups) each consist of independent replicates of a uniform Poisson process,
+but with possibly different intensity in each group:
+
+<<>>=
+mppm(Points ~ group, simba)
+@ 
+
+To fit a uniform Poisson process to each pattern, with 
+different intensity for each pattern:
+
+<<>>=
+mppm(Points ~ id, simba)
+@ 
+
+\subsubsection{Spatial covariates}
+
+The variables in the trend could be `spatial covariates'.
+
+For example, the \code{demohyper} dataset has a column \code{Image}
+containing pixel images. 
+
+<<>>=
+mppm(Points ~ Image, data=demohyper)
+@ 
+
+This model postulates that each pattern is a Poisson process
+with intensity of the form
+\[
+      \lambda(u) = \exp(\beta_0 + \beta_1 Z(u))
+\]
+at location $u$, where $\beta_0, \beta_1$ are coefficients
+to be estimated, and $Z(u)$ is the value of the pixel image
+\code{Image} at location $u$.
+
+It may or may not be appropriate to assume that the intensity of the points
+is an exponential function of the image pixel value $Z$.
+If instead 
+we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$,
+the appropriate model is 
+
+<<eval=FALSE>>=
+mppm(Points ~ offset(log(Image)), data=demohyper)
+@ 
+
+which corresponds to an intensity proportional to \code{Image},
+\[
+      \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u).
+\]
+The \code{offset} indicates that there is no coefficient in front
+of $\log Z(u)$. 
+
+Alternatively we could allow a coefficient:
+
+<<eval=FALSE>>=
+mppm(Points ~ log(Image), data=demop)
+@ 
+
+which corresponds to a gamma transformation of \code{Image},
+\[
+      \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) 
+      = e^{\beta_0} \; Z(u)^{\beta_1}.
+\]
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Interpoint interaction}
+
+The stochastic interpoint interaction in a point process model
+is specified by the arguments \code{interaction} and (optionally)
+\code{iformula} in 
+
+<<eval=FALSE>>=
+mppm(formula, data, interaction, ..., iformula=NULL)
+@ 
+
+\subsection{Same interaction for all patterns}
+
+In the simplest case,
+the argument \texttt{interaction} is one of the familiar 
+objects that describe the point process interaction structure.
+It is an object of class \texttt{"interact"}
+created by calling one of the functions
+
+\begin{center}
+  \begin{tabular}{rl}
+    \texttt{Poisson()}  & the Poisson point process\\     
+    \texttt{Hardcore()}  & the hard core process \\ 
+    \texttt{Strauss()}  & the Strauss process \\ 
+    \texttt{StraussHard()}  & the Strauss/hard core point process\\ 
+    \texttt{Softcore()}  & pairwise interaction, soft core potential\\  
+    \texttt{PairPiece()}  & pairwise interaction, piecewise constant \\ 
+    \texttt{DiggleGatesStibbard() }  & Diggle-Gates-Stibbard pair potential \\ 
+    \texttt{DiggleGratton() }  & Diggle-Gratton pair potential \\ 
+    \texttt{Fiksel() }  & Fiksel pair potential \\ 
+    \texttt{LennardJones() }  & Lennard-Jones pair potential \\ 
+    \texttt{Pairwise()}  &	pairwise interaction, user-supplied potential\\ 
+    \texttt{AreaInter()}  &		area-interaction potential\\ 
+    \texttt{Geyer()}	  & Geyer's saturation process\\ 
+    \texttt{BadGey()}	  & multiscale Geyer saturation process\\ 
+    \texttt{Saturated()}  &	Saturated pair model, user-supplied potential\\ 
+    \texttt{OrdThresh()}  &		Ord process, threshold potential\\ 
+    \texttt{Ord()}  & 		        Ord model, user-supplied potential \\ 
+    \texttt{MultiStrauss()}  & 		multitype Strauss process \\ 
+    \texttt{MultiStraussHard()}  & 	multitype Strauss/hard core process \\ 
+    \texttt{Concom()}  &	connected component interaction \\ 
+    \texttt{Hybrid()}  &	hybrid of several interactions \\ 
+  \end{tabular}
+\end{center}
+
+In this `simple' usage of \texttt{mppm}, the point process model
+assumes that all point patterns have exactly the same interpoint interaction,
+(with the same interaction parameters), and only differ in their spatial trend.
+
+\subsection{Hyperframe of interactions}
+
+More generally the argument \code{interaction} can be a hyperframe
+containing objects of class \texttt{"interact"}. 
+
+For example, we
+might want to fit a Strauss process to each point pattern, but with
+a different Strauss interaction radius for each pattern. 
+
+<<echo=FALSE,results=hide>>=
+radii <- with(simba, mean(nndist(Points)))
+@ 
+
+Then \code{radii} is a vector of numbers which we could use
+as the values of the interaction radius for each case. 
+First we need to make the interaction objects:
+
+<<>>=
+Rad <- hyperframe(R=radii)
+Str <- with(Rad, Strauss(R))
+@ 
+
+Then we put them into a hyperframe and fit the model:
+
+<<>>=
+Int <- hyperframe(str=Str)
+mppm(Points ~ 1, simba, interaction=Int)
+@ 
+
+An important constraint is that all of the interaction objects 
+in one column must be \emph{instances of the same process} (e.g. Strauss)
+albeit possibly having different parameter values. For example, you cannot
+put Poisson and Strauss processes in the same column. 
+
+\subsection{Interaction formula}
+
+If \code{interaction} is a hyperframe, then 
+the additional argument \code{iformula} may be used to
+fully specify the interaction.
+
+(An \code{iformula} is also required if \code{interaction}
+has more than one column.)
+
+The \code{iformula} should be a formula without a left hand side.
+Variables on the right hand side are typically the names of
+columns in \code{interaction}.
+
+\subsubsection{Selecting one column}
+
+If the right hand side of \code{iformula} is a single name,
+then this identifies the column in \code{interaction}
+to be used as the interpoint interaction structure.
+
+<<>>=
+h <- hyperframe(Y=waterstriders)
+g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7))
+mppm(Y ~ 1, data=h, interaction=g, iformula=~str4)
+@ 
+
+\subsubsection{Interaction depending on design} 
+
+The \code{iformula} can also involve columns of \code{data}, but
+only those columns that are vectors or factors. This allows us to
+specify an interaction that depends on the experimental design.
+[This feature is {\bf experimental}.]
+For example
+
+<<>>=
+fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group)
+@ 
+
+Since \code{Strauss(0.1)} is not a hyperframe, it is first converted
+to a hyperframe with a single column named \code{Interaction}.
+
+The \code{iformula = ~Interaction*group} specifies (since \code{group}
+is a factor) that the interpoint interaction shall have a different 
+coefficient in each experimental group. That is, we fit a model
+which has two different values for the Strauss interaction parameter $\gamma$,
+one for the control group and one for the treatment group.
+
+When you print the result of such a fit, 
+the package tries to 
+do `automatic interpretation' of the fitted model (translating the
+fitted interaction coefficients into meaningful numbers like $\gamma$). 
+This will be successful in \emph{most} cases:
+
+<<>>=
+fit
+@ 
+
+<<echo=FALSE,results=hide>>=
+co <- coef(fit)
+si <- function(x) { signif(x, 4) }
+@ 
+
+Thus we see that the estimate of the Strauss parameter $\gamma$ 
+for the control group is \Sexpr{si(exp(co[2]))}, and 
+for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))}
+(the correct values in this simulated dataset were $1$ and $0.5$).
+
+The fitted model can also be interpreted directly from the fitted
+canonical coefficients:
+
+<<>>=
+coef(fit)
+@ 
+
+The last output shows all the coefficients $\beta_j$
+in the linear predictor for the (log) conditional intensity. 
+
+The interpretation of the model coefficients, for any fitted model in \R,
+depends on the \emph{contrasts} which were applicable when the model was
+fitted. This is part of the core {\sf R} system: see \code{help(contrasts)}
+or \code{options(contrasts)}. If you did not specify otherwise, 
+the default is to use \emph{treatment contrasts}. This means that,
+for an explanatory variable which is a \texttt{factor} with $N$ levels, 
+the first level of the factor is used as a baseline, and the
+fitted model coefficients represent the factor levels $2, 3, \ldots, N$
+relative to this baseline. 
+
+In the output above, there is a coefficient for \code{(Intercept)} 
+and one for \code{grouptreatment}. These are coefficients related to
+the \code{group} factor. According to the ``treatment contrasts'' rule, 
+the \code{(Intercept)} coefficient is
+the estimated effect for the control group, and the
+\code{grouptreatment} coefficient is the estimated difference between
+the treatment and control groups. Thus the fitted first order
+trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ 
+for the control group
+and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) 
+ = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group.
+The correct values in this simulated dataset were 
+$80$ and $100$.
+
+The remaining coefficients in the output are \code{Interaction}
+and \code{Interaction:grouptreatment}. Recall that the Strauss process
+interaction term 
+is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$
+at a spatial location $u$, for a point pattern $\xx$. 
+Since we're using treatment contrasts, the coefficient 
+\code{Interaction} is the estimate of 
+$\log\gamma$ for the control group.
+The coefficient  \code{Interaction:grouptreatment} is the 
+estimate of the difference in $\log\gamma$ between the 
+treatment and control groups. Thus the estimated Strauss interaction
+parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ 
+for the control group and 
+$\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ 
+for the treatment group.
+The correct values were $1$ and $0.5$.
+
+\subsubsection{Completely different interactions for different cases} 
+
+In the previous example, when we fitted a Strauss model to all 
+point patterns in the \code{simba} dataset, the fitted model for the
+patterns in the control group was close to Poisson ($\gamma \approx 1$).
+Suppose we now want to fit a model which {\it is}
+Poisson in the control group, and Strauss in the treatment group. 
+The Poisson and Strauss interactions must be given as separate columns
+in a hyperframe of interactions:
+
+<<eval=FALSE>>=
+interaction=hyperframe(po=Poisson(), str=Strauss(0.07))
+@ 
+
+What do we write for the 
+\code{iformula}? The following \emph{will not} work:
+<<eval=FALSE>>=
+iformula=~ifelse(group=="control", po, str)
+@ 
+This does not work because the Poisson and Strauss models are `incompatible'
+inside such expressions. The canonical sufficient statistics for
+the Poisson and Strauss processes do not have the same dimension. 
+Internally in \code{mppm} we translate the symbols \code{po} and \code{str}
+into matrices; the dimensions of these matrices are different,
+so the \code{ifelse} expression cannot be evaluated.
+
+Instead we need something like the following:
+<<eval=FALSE>>=
+iformula=~I((group=="control")*po) + I((group=="treatment") * str)
+@ 
+The letter \code{I} here is a standard R function that prevents its argument
+from being interpreted as a formula (thus the \code{*} is interpreted
+as multiplication instead of a model interaction). The expression
+\code{(group=="control")} is logical, and when multiplied by the matrix
+\code{po}, yields a matrix.
+
+So the following does work:
+
+<<>>=
+g <- hyperframe(po=Poisson(), str=Strauss(0.07))
+fit2 <- mppm(Points ~ 1, simba, g, 
+             iformula=~I((group=="control")*po) 
+                     + I((group=="treatment") * str))
+fit2
+@ 
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%#%^!ifdef RANDOMEFFECTS
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Random effects}
+
+\subsection{Mixed effects models}
+
+It is also possible to fit models that include `random effects'.
+Effectively, some of the coefficients
+in the model are assumed to be Normally-distributed random variables
+instead of constants.
+
+\subsubsection{Mixed Poisson model}
+
+Consider the simplest model of a uniform Poisson process which we fitted to
+the 3 point patterns of waterstriders. It might be sensible to assume that
+each pattern is a realisation of a Poisson process, but with 
+{\em random intensity\/}. In each realisation the intensity $\lambda$
+is constant across different locations, but it
+is a different, random value in different realisations.
+This example is called a `mixed Poisson process' and belongs to the
+class of `Cox processes' (Poisson processes with random intensity 
+functions).
+Let's assume further that the log-intensity 
+is a Normal random variable.
+Then the model is a (very degenerate) special case of
+a `log-Gaussian Cox process'. 
+
+To fit such a model we use the standard techniques of mixed effects
+models \cite{lairware82,davigilt95,pinhbate00}.
+The mixed Poisson process which we discussed above would
+be written in standard form
+\begin{equation}
+\label{mixPois}
+   \lambda_i(u) = \exp(\mu + Z_i)
+\end{equation}
+for the $i$th point pattern, where $\mu$ is a parameter to be estimated 
+(the `fixed effect')
+and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable
+(the `random effect' for point pattern $i$). In the simplest case we
+would assume that $Z_1, \ldots, Z_n$ are independent.
+The variance $\sigma^2$ of the random effects
+would be estimated. One can also estimate the individual realised values
+$z_i$ of the random effects for each point pattern, although these are
+usually not of such great interest. Since the model includes both
+fixed and random effects, it is called a ``mixed-effects'' model.
+
+\subsubsection{Dependence structure}
+
+When we formulate a random-effects or mixed-effects model, we must 
+specify the dependence structure of the random effects. In the model above
+we assumed that the $Z_i$ are independent for all point patterns $i$. 
+If the experiment consists of two groups, we could alternatively assume
+that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words
+all the patterns in one group have the same value of the random effect.
+So the random effect is associated with the group rather than with
+individual patterns. This could be appropriate if, for example, 
+the groups represent different
+batches of a chemical. Each batch is prepared under slightly different 
+conditions so we believe that there are random variations between batches,
+but within a batch we believe that the chemical is well-mixed.
+
+\subsubsection{Random effects are coefficients}
+
+In the mixed Poisson model (\ref{mixPois}), 
+the random effect is an additive constant
+(with a random value) in the log-intensity. In general, a
+random effect is a \emph{coefficient} of one of the covariates.
+For example if $v$ is a real-valued design covariate (e.g. `temperature'), 
+with value $v_i$ for the $i$th point pattern, then we could assume
+\begin{equation}
+\label{ranef2}
+   \lambda_i(u) = \exp(\mu + Z_i v_i)
+\end{equation}
+where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$.
+This model has a random effect in the dependence on $v$.
+
+We could also have a random effect for a spatial covariate $V$.
+Suppose $V_i$ is a real-valued image for the $i$th pattern 
+(so that $V_i(u)$ is the value of some covariate at the location $u$
+for the $i$th case). Then we could assume
+\begin{equation}
+\label{ranef3}
+   \lambda_i(u) = \exp(\mu + Z_i V_i(u))
+\end{equation}
+where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$.
+This kind of random effect would be appropriate if, for example, the 
+images $V_i$ are not `normalised' or `standardised' relative to each 
+other (e.g.\ they are images taken under different illumination). Then the
+coefficients $Z_i$ effectively include the rescaling necessary to standardise
+the images.
+
+\subsection{Fitting a mixed-effects model}
+
+The call to \texttt{mppm} can also include the argument
+\texttt{random}. This should be a formula (with no left-hand side) 
+describing the structure of random effects. 
+
+The formula for random effects
+must be recognisable to \texttt{lme}. It is typically of the form
+\begin{verbatim}
+      ~x1 + ... + xn | g
+\end{verbatim}
+or
+\begin{verbatim}
+      ~x1 + ... + xn | g1/.../gm
+\end{verbatim}
+where \verb!x1 + ... + xn! specifies the covariates for the random effects
+and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) 
+structure. Here \code{g} or \code{g1, \ldots, gm} should be factors.
+
+To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders,
+we want to have a random intercept coefficient
+(so \texttt{x} is \texttt{1}) that varies for different point patterns
+(so \texttt{g} is \texttt{id}). 
+The reserved name \code{id} is a factor referring to the individual
+point pattern. Thus
+
+<<>>=
+H <- hyperframe(P=waterstriders)
+mppm(P ~ 1, H, random=~1|id)
+@ 
+
+To fit the mixed effects model (\ref{ranef2}) to the coculture data
+with the \code{AstroIm} covariate, with a random effect associated
+with each well,
+
+<<eval=FALSE>>=
+mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber)
+@ 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%#%^!endif
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Studying the fitted model}
+
+Fitted models produced by \code{mppm}
+can be examined and validated in many ways.
+
+\subsection{Fits for each pattern}
+
+
+\subsubsection{Subfits}
+
+The command \code{subfits} takes an \code{mppm} object
+and extracts, for each individual point pattern, 
+the fitted point process model for that pattern
+\emph{that is implied by the overall fit}. It returns a list of 
+objects of class \code{ppm}. 
+
+<<>>=
+H <- hyperframe(W=waterstriders)
+fit <- mppm(W ~ 1, H)
+subfits(fit)
+@ 
+
+In this example the result is a list of three \code{ppm} objects
+representing the implied fits for each of the three point patterns
+in the \code{waterstriders} dataset.
+Notice that {\bf the fitted coefficients are the same} in all three 
+models. 
+
+Note that there are some unresolved difficulties with the implementation of
+\code{subfits}. Two completely different implementations are supplied
+in the package; they are called \code{subfits.old} 
+%(used in versions 0.1--1 and earlier) 
+and \code{subfits.new}.% (introduced in 0.1--2).
+The old version would occasionally crash. 
+Unfortunately the newer version \code{subfits.new} is quite memory-hungry
+and sometimes causes R to hang.
+We're still working on this problem. So for the time being,
+\code{subfits} is the same as \code{subfits.old}. You can change this
+simply by reassigning, e.g.
+
+<<eval=FALSE>>=
+subfits <- subfits.new
+@ 
+
+\subsubsection{Fitting separately to each pattern} 
+
+For comparison, we could fit a point process model separately
+to each point pattern dataset using \code{ppm}. The easy way to do this is 
+with \code{with.hyperframe}.
+
+To fit a \emph{separate} uniform Poisson 
+point process to each of the three waterstriders patterns,
+
+<<>>=
+H <- hyperframe(W=waterstriders)
+with(H, ppm(W))
+@ 
+
+The result is again a list of three fitted point process models
+(objects of class \code{ppm}), but now the fitted coefficients
+are different.
+
+\subsection{Residuals}
+
+One standard way to check a fitted model
+is to examine  the residuals. 
+
+\subsubsection{Point process residuals}
+
+Some recent papers \cite{baddetal05,baddmollpake08} have defined
+residuals for a fitted point process model (fitted to a \emph{single}
+point pattern). These residuals are implemented in \code{spatstat}
+as \code{residuals.ppm} and apply to an object of class \code{ppm}, 
+that is, a model fitted to a \emph{single} point pattern. 
+
+The command \code{residuals.mppm} computes the point process residuals
+for an \code{mppm} object. 
+
+<<>>=
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+@ 
+
+The result is a list, with one entry for each of the point pattern
+datasets. Each list entry contains the point process residuals 
+for the corresponding point pattern dataset.
+Each entry in the list is a signed measure (object of class \code{"msr"})
+as explained in the help for \code{residuals.ppm}).
+It can be plotted:
+
+<<fig=TRUE>>=
+plot(res)
+@ 
+
+You probably want the smoothed residual field:
+
+<<fig=TRUE>>=
+smor <- with(hyperframe(res=res), Smooth(res, sigma=4))
+plot(smor)
+@ 
+
+\subsubsection{Sums of residuals}
+
+It would be useful to have a residual that is a single value
+for each point pattern (representing how much that point pattern
+departs from the model fitted to all the point patterns).
+
+That can be computed by \emph{integrating} the residual measures
+using the function \code{integral.msr}:
+
+<<>>=
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+totres <- sapply(res, integral.msr)
+@ 
+
+In designed experiments we can plot these total residuals against
+the design covariates:
+
+<<fig=TRUE>>=
+fit <- mppm(Points~Image, data=demohyper)
+resids <- residuals(fit, type="Pearson")
+totres <- sapply(resids, integral.msr)
+areas <- with(demohyper, area.owin(as.owin(Points)))
+df <- as.data.frame(demohyper[, "Group"])
+df$resids <- totres/areas
+plot(resids~Group, df)
+@ 
+
+\subsubsection{Four-panel diagnostic plots} 
+
+Sometimes a more useful tool is the function \code{diagnose.ppm}
+which produces a four-panel diagnostic plot based on the 
+point process residuals. However, it is only available for 
+\code{ppm} objects.
+
+To obtain a four-panel diagnostic plot for each of the 
+point patterns, do the following:
+
+\begin{enumerate}
+\item fit a model to multiple point patterns using \code{mppm}.
+\item extract the individual fits using \code{subfits}.
+\item plot the residuals of the individual fits.
+\end{enumerate}
+
+For example:
+
+<<fig=TRUE>>=
+fit <- mppm(P ~ 1, hyperframe(P=waterstriders))
+sub <- hyperframe(Model=subfits(fit))
+plot(sub, quote(diagnose.ppm(Model)))
+@ 
+
+(One could also do this for models fitted separately to the 
+individual point patterns.)
+
+\subsubsection{Residuals of the parameter estimates}
+
+We can also compare the parameter estimates obtained 
+by fitting the model simultaneously to all patterns (using \code{mppm})
+with those obtained by fitting the model separately to each 
+pattern (using \code{ppm}). 
+
+<<>>=
+H <- hyperframe(P = waterstriders)
+fitall <- mppm(P ~ 1, H)
+together <- subfits(fitall)
+separate <- with(H, ppm(P))
+Fits <- hyperframe(Together=together, Separate=separate)
+dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together)))
+dr
+exp(dr)
+@ 
+
+One could also try deletion residuals, etc.
+
+\subsection{Goodness-of-fit tests} 
+
+\subsubsection{Quadrat count test}
+
+The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented
+for objects of class \code{ppm} (in \code{quadrat.test.ppm})
+and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}).
+
+This is a goodness-of-fit test for a fitted {\bf Poisson} point process
+model only. The model could be uniform or non-uniform and the intensity
+might depend on covariates. 
+
+<<>>=
+H <- hyperframe(X=waterstriders)
+
+# Poisson with constant intensity for all patterns
+fit1 <- mppm(X~1, H)
+quadrat.test(fit1, nx=2)
+
+# uniform Poisson with different intensity for each pattern
+fit2 <- mppm(X ~ id, H)
+quadrat.test(fit2, nx=2)
+@ 
+
+See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm}
+for further details.
+
+\subsubsection{Kolmogorov-Smirnov test}
+
+The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson
+point process model compares the observed and predicted
+distributions of the values of a spatial covariate.
+
+We want to test the null hypothesis $H_0$ that the observed point pattern
+${\mathbf x}$ is a realisation from the Poisson process with intensity 
+function $\lambda(u)$ (for locations $u$ in the window $W$).
+Let $Z(u)$ be a given, real-valued covariate defined at each spatial location
+$u$. Under $H_0$, the \emph{observed} values of $Z$ at the 
+data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent
+random variables with common probability distribution function
+\[
+    F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u}
+                {\int_W \lambda(u)                       \dee u}.
+\]
+We can therefore apply the Kolmogorov-Smirnov test 
+of goodness-of-fit. This compares the empirical cumulative distribution of
+the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$.
+
+The test is implemented as \code{kstest.ppm}. The syntax is 
+
+<<eval=FALSE>>=
+kstest.mppm(model, covariate)
+@ 
+
+where \code{model} is a fitted model (of class \texttt{"mppm"})
+and \code{covariate} is either 
+\begin{itemize}
+\item a \code{function(x,y)} making it possible to compute the value 
+of the covariate at any location \code{(x,y)}
+\item a pixel image containing the covariate values 
+\item a list of functions, one for each row of the hyperframe of
+original data
+\item a list of pixel images, one for each row of the hyperframe of 
+original data
+\item a hyperframe with one column containing either functions or 
+pixel images.
+\end{itemize}
+
+\newpage
+
+\addcontentsline{toc}{section}{Bibliography}
+
+%\bibliography{%
+%extra,%
+%extra2,%
+%biblio/badd,%
+%biblio/bioscience,%
+%biblio/censoring,%
+%biblio/mcmc,%
+%biblio/spatstat,%
+%biblio/stat,%
+%biblio/stochgeom%
+%}
+
+\begin{thebibliography}{1}
+
+\bibitem{baddmollpake08}
+A. Baddeley, J. M{\o}ller, and A.G. Pakes.
+\newblock Properties of residuals for spatial point processes.
+\newblock {\em Annals of the Institute of Statistical Mathematics},
+  60:627--649, 2008.
+
+\bibitem{TheBook}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with R}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\bibitem{statpaper}
+A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty.
+\newblock Analysing designed experiments where the response is a spatial point
+  pattern.
+\newblock In preparation.
+
+\bibitem{baddetal05}
+A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton.
+\newblock Residual analysis for spatial point processes (with discussion).
+\newblock {\em Journal of the Royal Statistical Society, series B},
+  67(5):617--666, 2005.
+
+\bibitem{chenetal08}
+B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb.
+\newblock The influenza virus {M2} protein cytoplasmic tail interacts with the
+  {M1} protein and influences virus assembly at the site of virus budding.
+\newblock {\em Journal of Virology}, 82:10059--10070, 2008.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{davigilt95}
+M. Davidian and D.M. Giltinan.
+\newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}.
+\newblock Chapman and Hall, 1995.
+%#%^!endif
+
+\bibitem{digglangbene91}
+P.J. Diggle, N. Lange, and F. M. Benes.
+\newblock Analysis of variance for replicated spatial point patterns in
+  clinical neuroanatomy.
+\newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation},
+  86:618--625, 1991.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{lairware82}
+N.M. Laird and J.H. Ware.
+\newblock Random-effects models for longitudinal data.
+\newblock {\em Biometrics}, 38:963--974, 1982.
+%#%^!endif
+
+\bibitem{pent84}
+A. Penttinen.
+\newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter
+  Estimation by the Maximum Likelihood Method}.
+\newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics
+  and Statistics. University of {Jyv\"askyl\"a}, 1984.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{pinhbate00}
+J.C. Pinheiro and D.M. Bates.
+\newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}.
+\newblock Springer, 2000.
+%#%^!endif
+
+\end{thebibliography}
+
+%\addcontentsline{toc}{section}{Index}
+
+%\printindex
+
+\end{document}
diff --git a/inst/doc/replicated.pdf b/inst/doc/replicated.pdf
new file mode 100644
index 0000000..e1dbbcf
Binary files /dev/null and b/inst/doc/replicated.pdf differ
diff --git a/inst/doc/shapefiles.R b/inst/doc/shapefiles.R
new file mode 100644
index 0000000..48c454b
--- /dev/null
+++ b/inst/doc/shapefiles.R
@@ -0,0 +1,162 @@
+### R code from vignette source 'shapefiles.Rnw'
+
+###################################################
+### code chunk number 1: shapefiles.Rnw:7-8
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+
+
+###################################################
+### code chunk number 2: shapefiles.Rnw:25-31
+###################################################
+library(spatstat)
+options(useFancyQuotes=FALSE)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+
+
+###################################################
+### code chunk number 3: shapefiles.Rnw:106-107 (eval = FALSE)
+###################################################
+## library(maptools)
+
+
+###################################################
+### code chunk number 4: shapefiles.Rnw:111-112 (eval = FALSE)
+###################################################
+## x <- readShapeSpatial("mydata.shp")
+
+
+###################################################
+### code chunk number 5: shapefiles.Rnw:117-118 (eval = FALSE)
+###################################################
+## class(x)
+
+
+###################################################
+### code chunk number 6: shapefiles.Rnw:135-139
+###################################################
+baltim <- columbus <- fylk <- list()
+class(baltim) <- "SpatialPointsDataFrame"
+class(columbus) <- "SpatialPolygonsDataFrame"
+class(fylk) <- "SpatialLinesDataFrame"
+
+
+###################################################
+### code chunk number 7: shapefiles.Rnw:141-145 (eval = FALSE)
+###################################################
+## setwd(system.file("shapes", package="maptools"))
+## baltim   <- readShapeSpatial("baltim.shp")
+## columbus <- readShapeSpatial("columbus.shp")
+## fylk     <- readShapeSpatial("fylk-val.shp")
+
+
+###################################################
+### code chunk number 8: shapefiles.Rnw:147-150
+###################################################
+class(baltim)
+class(columbus)
+class(fylk)
+
+
+###################################################
+### code chunk number 9: shapefiles.Rnw:178-179 (eval = FALSE)
+###################################################
+## X <- X[W]
+
+
+###################################################
+### code chunk number 10: shapefiles.Rnw:196-197 (eval = FALSE)
+###################################################
+## y <- as(x, "ppp")
+
+
+###################################################
+### code chunk number 11: shapefiles.Rnw:211-213 (eval = FALSE)
+###################################################
+## balt <- as(baltim, "ppp")
+## bdata <- slot(baltim, "data")
+
+
+###################################################
+### code chunk number 12: shapefiles.Rnw:261-262 (eval = FALSE)
+###################################################
+## out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+
+
+###################################################
+### code chunk number 13: shapefiles.Rnw:271-272 (eval = FALSE)
+###################################################
+## curvegroup <- lapply(out, function(z) { do.call("superimpose", z)})
+
+
+###################################################
+### code chunk number 14: shapefiles.Rnw:315-319 (eval = FALSE)
+###################################################
+## out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+## dat <- x at data
+## for(i in seq(nrow(dat))) 
+##   out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE])
+
+
+###################################################
+### code chunk number 15: shapefiles.Rnw:340-342
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+data(chorley)
+plot(as.owin(chorley), lwd=3, main="polygon")
+
+
+###################################################
+### code chunk number 16: shapefiles.Rnw:355-357
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+data(demopat)
+plot(as.owin(demopat), col="blue", main="polygonal region")
+
+
+###################################################
+### code chunk number 17: shapefiles.Rnw:393-396 (eval = FALSE)
+###################################################
+## regions <- slot(x, "polygons")
+## regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) })
+## windows <- lapply(regions, as.owin)
+
+
+###################################################
+### code chunk number 18: shapefiles.Rnw:401-402 (eval = FALSE)
+###################################################
+## te <- tess(tiles=windows)
+
+
+###################################################
+### code chunk number 19: shapefiles.Rnw:438-439 (eval = FALSE)
+###################################################
+## y <- as(x, "SpatialPolygons")
+
+
+###################################################
+### code chunk number 20: shapefiles.Rnw:449-453 (eval = FALSE)
+###################################################
+## cp      <- as(columbus, "SpatialPolygons")
+## cregions <- slot(cp, "polygons")
+## cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) })
+## cwindows <- lapply(cregions, as.owin)
+
+
+###################################################
+### code chunk number 21: shapefiles.Rnw:463-465 (eval = FALSE)
+###################################################
+## ch <- hyperframe(window=cwindows)
+## ch <- cbind.hyperframe(ch, columbus at data)
+
+
+###################################################
+### code chunk number 22: shapefiles.Rnw:485-487 (eval = FALSE)
+###################################################
+##   y <- as(x, "im")
+##   ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y)
+
+
diff --git a/inst/doc/shapefiles.Rnw b/inst/doc/shapefiles.Rnw
new file mode 100755
index 0000000..a90b716
--- /dev/null
+++ b/inst/doc/shapefiles.Rnw
@@ -0,0 +1,497 @@
+\documentclass[twoside,11pt]{article}
+
+% \VignetteIndexEntry{Handling shapefiles in the spatstat package}
+
+\SweaveOpts{eps=TRUE}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage[colorlinks=true,urlcolor=blue]{hyperref}
+\usepackage{color}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+
+\begin{document}
+%\bibliographystyle{plain}
+\thispagestyle{empty}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+options(useFancyQuotes=FALSE)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+@ 
+
+\title{Handling shapefiles in the \texttt{spatstat} package}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{
+\Sexpr{sdate} \\ 
+\pkg{spatstat} version \texttt{\Sexpr{sversion}}
+}
+\maketitle
+
+This vignette explains how to read data into the \pkg{spatstat} package
+from files in the popular `shapefile' format. 
+
+This vignette is part of the documentation included in 
+\pkg{spatstat} version \texttt{\Sexpr{sversion}}.
+The information applies to 
+\pkg{spatstat} versions \texttt{1.36-0} and above.
+
+\section{Shapefiles}
+
+A shapefile represents a list of spatial objects 
+--- a list of points, a list of lines, or a list of polygonal regions --- 
+and each object in the list may have additional variables attached to it.
+
+A dataset stored in shapefile format is actually stored in a
+collection of text files, for example
+\begin{verbatim}
+     mydata.shp
+     mydata.prj
+     mydata.sbn
+     mydata.dbf
+\end{verbatim}
+which all have the same base name \texttt{mydata} but different file extensions.
+To refer to this collection you will always use the filename with the 
+extension \texttt{shp}, for example \texttt{mydata.shp}. 
+
+\section{Helper packages}
+\label{S:helpers}
+
+We'll use two other packages%
+\footnote{In previous versions of \pkg{spatstat},
+  the package \pkg{gpclib} was also needed for some tasks.
+  This is no longer required.}
+ to handle shapefile data.
+
+The \pkg{maptools} package is designed specifically for handling
+file formats for spatial data. It contains 
+facilities for reading and writing files in shapefile format.
+
+The \pkg{sp} package supports a standard set of spatial data types
+in \R. These standard data types can be handled by many other packages,
+so it is useful to convert your spatial data into one of the 
+data types supported by \pkg{sp}.
+
+\section{How to read shapefiles into \pkg{spatstat}} 
+
+To read shapefile data into \pkg{spatstat}, you follow two steps:
+\begin{enumerate}
+\item 
+  using the facilities of \pkg{maptools}, read the shapefiles
+  and store the data in one of the standard formats supported by \pkg{sp}.
+\item
+  convert the \pkg{sp} data type into one of the data types
+  supported by \pkg{spatstat}.
+\end{enumerate}
+
+\subsection{Read shapefiles using \pkg{maptools}} 
+
+Here's how to read shapefile data.
+
+\begin{enumerate}
+\item ensure that the package \pkg{maptools} is installed. You will need
+version \texttt{0.7-16} or later.
+\item start R and load the package: 
+<<eval=FALSE>>=
+library(maptools)
+@ 
+\item read the shapefile into an object in the \pkg{sp} package
+using \texttt{readShapeSpatial}, for example
+<<eval=FALSE>>=
+x <- readShapeSpatial("mydata.shp")
+@ 
+\item 
+To find out what kind of spatial objects are represented by the dataset,
+inspect its class:
+<<eval=FALSE>>=
+class(x)
+@ 
+The class may be either \texttt{SpatialPoints} indicating a point pattern, 
+\texttt{SpatialLines} indicating a list of polygonal lines, or
+\texttt{SpatialPolygons} indicating a list of polygons. It may also be
+\texttt{SpatialPointsDataFrame},
+\texttt{SpatialLinesDataFrame} or 
+\texttt{SpatialPolygonsDataFrame} indicating that, in addition to the 
+spatial objects, there is a data frame of additional variables.
+The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame}
+represent pixel image data.
+\end{enumerate}
+
+Here are some examples, using the example shapefiles supplied in the
+\pkg{maptools} package itself.
+
+% fake data because we don't want spatstat to depend on maptools
+<<echo=FALSE,results=hide>>=
+baltim <- columbus <- fylk <- list()
+class(baltim) <- "SpatialPointsDataFrame"
+class(columbus) <- "SpatialPolygonsDataFrame"
+class(fylk) <- "SpatialLinesDataFrame"
+@ 
+<<eval=FALSE>>=
+setwd(system.file("shapes", package="maptools"))
+baltim   <- readShapeSpatial("baltim.shp")
+columbus <- readShapeSpatial("columbus.shp")
+fylk     <- readShapeSpatial("fylk-val.shp")
+@ 
+<<>>=
+class(baltim)
+class(columbus)
+class(fylk)
+@ 
+
+\subsection{Convert data to \pkg{spatstat} format}
+
+To convert the dataset to an object in the
+\pkg{spatstat} package, the procedure depends on the 
+type of data, as explained below. 
+
+Both packages \pkg{maptools} and \pkg{spatstat} must be loaded
+in order to convert the data.
+
+\subsubsection{Objects of class \texttt{SpatialPoints}}
+
+An object \texttt{x} of class \texttt{SpatialPoints}
+represents a spatial point pattern.
+Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a 
+spatial point pattern in \pkg{spatstat}.
+
+(The conversion is performed by \texttt{as.ppp.SpatialPoints},
+a function in \pkg{maptools}.)
+
+The window for the point pattern will be taken from
+the bounding box of the points. You will probably wish to change this window,
+usually by taking another dataset to provide the window information.
+Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern
+object of class \verb!"ppp"! and \texttt{W} is a window object of class
+\verb!"owin"!, type
+<<eval=FALSE>>=
+X <- X[W]
+@ 
+
+\subsubsection{Objects of class \texttt{SpatialPointsDataFrame }}
+
+An object \texttt{x} of class \texttt{SpatialPointsDataFrame}
+represents a pattern of points with additional variables (`marks') attached to
+each point. It includes an object of class \texttt{SpatialPoints} 
+giving the point locations, and a data frame containing the
+additional variables attached to the points. 
+
+Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an
+object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a 
+spatial point pattern in \pkg{spatstat}. In this conversion,
+the data frame of additional variables in \texttt{x} will 
+become the \texttt{marks} of the point pattern \texttt{z}.
+
+<<eval=FALSE>>=
+y <- as(x, "ppp")
+@ 
+
+(The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame},
+a function in \pkg{maptools}.)
+
+Before the conversion you can extract the
+data frame of auxiliary data by 
+\verb!df <- x at data! or \verb!df <- slot(x, "data")!.
+After the conversion you can extract these data by
+\verb!df <- marks(y)!. 
+
+For example:
+
+<<eval=FALSE>>=
+balt <- as(baltim, "ppp")
+bdata <- slot(baltim, "data")
+@ 
+
+\subsubsection{Objects of class \texttt{SpatialLines}}
+\label{spatiallines.2.psp}
+
+A ``line segment'' is the straight line between two points in the plane.
+
+In the \pkg{spatstat} package, an object of class \texttt{psp}
+(``planar segment pattern'')
+represents a pattern of line segments, which may or may not be
+connected to each other (like matches which have fallen at random
+on the ground). 
+
+In the \pkg{sp} package, an object of class \texttt{SpatialLines}
+represents a \textbf{list of lists} of \textbf{connected curves}, 
+each curve consisting of a sequence of straight 
+line segments that are joined together (like
+several pieces of a broken bicycle chain.) 
+
+So these two data types do not correspond exactly.
+
+The list-of-lists hierarchy in a \texttt{SpatialLines} object 
+is useful when representing internal divisions in a country.
+For example, if \texttt{USA} is an object of class \texttt{SpatialLines}
+representing the borders of the United States
+of America, then \verb!USA at lines! might be a list of length 52, with 
+\verb!USA at lines[[i]]! representing the borders of the \texttt{i}-th State. 
+The borders of each State consist of several different curved lines. Thus 
+\verb!USA at lines[[i]]@Lines[[j]]! would represent the \texttt{j}th 
+piece of the boundary of the \texttt{i}-th State.
+
+If \texttt{x} is an object of class \texttt{SpatialLines},
+there are several things that you might want to do:
+\begin{enumerate}
+\item 
+  collect together all the line segments (all the segments that make up all the
+  connected curves) and store them as a single object of class \texttt{psp}.
+\begin{quote}
+  To do this, 
+  use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a 
+  spatial line segment pattern. 
+\end{quote}
+\item 
+  convert each connected curve to an object of class \texttt{psp},
+  keeping different connected curves separate.
+
+  To do this, type something like the following:
+<<eval=FALSE>>=
+out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+@ 
+
+The result will be a \textbf{list of lists} of objects of class \texttt{psp}.
+Each one of these objects represents a connected curve,
+although the \pkg{spatstat} package does not know that.
+The list structure will reflect the list structure of the original
+\texttt{SpatialLines} object \texttt{x}. If that's not what you want,
+then use \verb!curvelist <- do.call("c", out)! or
+<<eval=FALSE>>=
+curvegroup <- lapply(out, function(z) { do.call("superimpose", z)})
+@ 
+to collapse the list-of-lists-of-\texttt{psp}'s 
+into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]}
+is a \texttt{psp} object representing the \texttt{i}-th connected curve. 
+In the second case, \texttt{curvegroup[[i]]}
+is a \texttt{psp} object containing all the line segments in
+the \texttt{i}-th group of connected curves (for example the 
+\texttt{i}-th State in the \texttt{USA} example).
+\end{enumerate}
+
+The window for the spatial line segment pattern can be specified
+as an argument \texttt{window} to the function \texttt{as.psp}.
+
+(The conversion is performed by \texttt{as.psp.SpatialLines}
+or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.)
+
+\subsubsection{Objects of class \texttt{SpatialLinesDataFrame}}
+
+An object \texttt{x} of class \texttt{SpatialLinesDataFrame}
+is a \texttt{SpatialLines} object with additional data.
+The additional data is stored as a data frame \verb!x at data!
+with one row for each entry in \verb!x at lines!, that is,
+one row for each group of connected curves. 
+
+In the \pkg{spatstat} package, an object of class \texttt{psp}
+(representing a collection of line segments)
+may have a data frame of marks. Note that each \emph{line segment}
+in a \texttt{psp} object may have different mark values. 
+
+If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame},
+there are two things that you might want to do:
+\begin{enumerate}
+\item collect together all the line segments that make up all the
+connected lines, and store them as a single object of class \texttt{psp}.
+\begin{quote}
+  To do this, 
+  use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a 
+  marked spatial line segment pattern. 
+\end{quote}
+\item keep each connected curve separate, and convert each connected
+curve to an object of class \texttt{psp}.
+To do this, type something like the following:
+<<eval=FALSE>>=
+out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+dat <- x at data
+for(i in seq(nrow(dat))) 
+  out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE])
+@ 
+The result is a list-of-lists-of-\texttt{psp}'s. 
+See the previous subsection for explanation on how to 
+change this using \texttt{c()} or \texttt{superimposePSP}.
+\end{enumerate}
+
+In either case, 
+the mark variables attached to a particular \emph{group of connected lines}
+in the \texttt{SpatialLinesDataFrame} object, will be duplicated
+and attached to each \emph{line segment} in the resulting \texttt{psp} object.
+
+\subsubsection{Objects of class \texttt{SpatialPolygons}}
+
+First, so that we don't go completely crazy, let's introduce some terminology.
+A \emph{polygon} is a closed curve that is composed of 
+straight line segments. You can draw a polygon
+without lifting your pen from the paper. 
+
+\setkeys{Gin}{width=0.4\textwidth}
+\begin{center}
+<<echo=FALSE,results=hide,fig=TRUE>>=
+data(chorley)
+plot(as.owin(chorley), lwd=3, main="polygon")
+@ 
+\end{center}
+
+A \emph{polygonal region}
+is a region in space whose boundary is composed of straight line segments.
+A polygonal region may consist of several unconnected pieces, and each piece
+may have holes. The boundary of a polygonal region
+consists of one or more polygons. To draw the boundary of a polygonal 
+region, you may need to lift and drop the pen several times.
+
+\setkeys{Gin}{width=0.4\textwidth}
+\begin{center}
+<<echo=FALSE,results=hide,fig=TRUE>>=
+data(demopat)
+plot(as.owin(demopat), col="blue", main="polygonal region")
+@ 
+\end{center}
+
+An object of class \texttt{owin} in \pkg{spatstat}
+represents a polygonal region. It is a region of space that is delimited
+by boundaries made of lines.
+
+An object \texttt{x} of class \texttt{SpatialPolygons}
+represents a \textbf{list of polygonal regions}. For example,
+a single object of class \texttt{SpatialPolygons} could 
+store information about every State in the United States of America
+(or the United States of Malaysia). Each State would be a separate
+polygonal region (and it might contain holes such as lakes).
+
+There are two things
+that you might want to do with an object of class \texttt{SpatialPolygons}:
+\begin{enumerate}
+\item 
+  combine all the polygonal regions together into a single
+  polygonal region, and convert this to a single object of class \texttt{owin}.
+  \begin{quote}
+    For example, you could combine all the States of the USA together
+    and obtain a single object that represents the territory of the USA. 
+
+    To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}.
+    The result is a single window (object of class \texttt{"owin"}) 
+    in the \pkg{spatstat} package.
+  \end{quote}
+\item keep the different polygonal regions separate; convert each 
+  one of the polygonal regions to an object of class \texttt{owin}.
+  \begin{quote}
+    For example, you could keep the States of the USA separate,
+    and convert each State to an object of class \texttt{owin}.
+  \end{quote}
+  To do this, type the following:
+<<eval=FALSE>>=
+regions <- slot(x, "polygons")
+regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) })
+windows <- lapply(regions, as.owin)
+@ 
+  The result is a list of objects of class \texttt{owin}. 
+  Often it would make sense to convert this to a 
+  tessellation object, by typing
+<<eval=FALSE>>=
+te <- tess(tiles=windows)
+@ 
+\end{enumerate}
+
+{\bf The following is different from what happened in
+  previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.)
+
+During the conversion process, the geometry of the polygons
+will be automatically ``repaired'' if needed.
+Polygon data from shapefiles often contain geometrical inconsistencies 
+such as self-intersecting boundaries and overlapping pieces. 
+For example, these can arise from small errors in curve-tracing.
+Geometrical inconsistencies are tolerated in 
+an object of class \texttt{SpatialPolygons} which
+is a list of lists of polygonal curves.
+However, they are not tolerated in an object of class \texttt{owin},
+because an \texttt{owin} must specify a well-defined region of space.
+These data inconsistencies must be repaired to prevent technical problems. 
+\pkg{Spatstat} uses polygon-clipping code to automatically convert 
+polygonal lines into valid polygon boundaries. 
+The repair process changes the number of vertices in each polygon,
+and the number of polygons (if you chose option 1).
+To disable the repair process, set 
+\texttt{spatstat.options(fixpolygons=FALSE)}.
+
+\subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}}
+
+What a mouthful!
+
+An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame}
+represents a list of polygonal regions,
+with additional variables attached to
+each region. It includes an object of class \texttt{SpatialPolygons} 
+giving the spatial regions, and a data frame containing the
+additional variables attached to the regions.
+The regions are extracted by
+<<eval=FALSE>>=
+y <- as(x, "SpatialPolygons")
+@ 
+and you then proceed as above to convert the curves to
+\pkg{spatstat} format.
+
+The data frame of auxiliary data is extracted by 
+\verb!df <- x at data! or \verb!df <- slot(x, "data")!.
+
+For example:
+
+<<eval=FALSE>>=
+cp      <- as(columbus, "SpatialPolygons")
+cregions <- slot(cp, "polygons")
+cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) })
+cwindows <- lapply(cregions, as.owin)
+@ 
+
+There is currently no facility in \pkg{spatstat} for attaching
+marks to an \texttt{owin} object directly. 
+
+However, \pkg{spatstat} supports objects called \textbf{hyperframes},
+which are like data frames except that the entries can be any type of object.
+Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as
+follows:
+<<eval=FALSE>>=
+ch <- hyperframe(window=cwindows)
+ch <- cbind.hyperframe(ch, columbus at data)
+@ 
+
+Then \texttt{ch} is a hyperframe containing a column of \texttt{owin}
+objects followed by the columns of auxiliary data.
+
+\subsubsection{Objects of class \texttt{SpatialGridDataFrame}
+  and \texttt{SpatialPixelsDataFrame}}
+
+An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents
+a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid}
+object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, 
+and a data frame \texttt{slot(x, "data")} containing the pixel values
+(which may include \texttt{NA} values).
+
+The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image
+of class \texttt{"im"}, taking the pixel values from the \emph{first column}
+of the data frame. If the data frame has multiple columns, these 
+have to be converted to separate pixel images in \pkg{spatstat}.
+For example
+<<eval=FALSE>>=
+  y <- as(x, "im")
+  ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y)
+@ 
+
+An object \texttt{x} of class  \texttt{SpatialPixelsDataFrame} 
+represents a \emph{subset} of a pixel image. 
+To convert this to a \pkg{spatstat} object, it should first be converted to
+a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")},
+then handled as described above.
+
+\end{document}
+
diff --git a/inst/doc/shapefiles.pdf b/inst/doc/shapefiles.pdf
new file mode 100644
index 0000000..3383206
Binary files /dev/null and b/inst/doc/shapefiles.pdf differ
diff --git a/inst/doc/spatstatlocalsize.txt b/inst/doc/spatstatlocalsize.txt
new file mode 100644
index 0000000..3bbb936
--- /dev/null
+++ b/inst/doc/spatstatlocalsize.txt
@@ -0,0 +1,2 @@
+date version nhelpfiles nobjects ndatasets Rlines srclines
+"2017-03-30" "3.5-6" 21 85 0 4677 0
diff --git a/inst/doc/updates.R b/inst/doc/updates.R
new file mode 100644
index 0000000..32532d0
--- /dev/null
+++ b/inst/doc/updates.R
@@ -0,0 +1,81 @@
+### R code from vignette source 'updates.Rnw'
+
+###################################################
+### code chunk number 1: updates.Rnw:20-24
+###################################################
+library(spatstat)
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+
+
+###################################################
+### code chunk number 2: updates.Rnw:41-70
+###################################################
+readSizeTable <- function(fname) {
+  if(is.null(fname) || !file.exists(fname)) return(NULL)
+  a <- read.table(fname, header=TRUE)
+  a$date <- as.Date(a$date)
+  return(a)
+}
+getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") {
+  fname <- system.file("doc", tablename, package=packagename)
+  readSizeTable(fname)
+}
+counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines")
+mergeSizeTables <- function(a, b) {
+  if(is.null(b)) return(a)
+  for(i in seq_len(nrow(a))) {
+    j <- which(b$date <= a$date[i])
+    if(length(j) > 0) 
+      a[i,counts] <- a[i,counts] + b[max(j), counts]
+  }
+  return(a)
+}
+z <- getSizeTable()
+zutils <- getSizeTable("spatstat.utils")
+zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt")
+z <- mergeSizeTables(z, zutils)
+z <- mergeSizeTables(z, zlocal)
+#
+changes <- z[nrow(z), ] - z[z$version == "1.42-0", ]
+newobj <- changes[["nobjects"]]
+newdat <- changes[["ndatasets"]] + 1  # counting rule doesn't detect redwood3
+
+
+###################################################
+### code chunk number 3: updates.Rnw:80-85
+###################################################
+options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0))))
+Plot <- function(fmla, ..., dat=z) {
+  yvals <- eval(as.expression(fmla[[2]]), envir=dat)
+  plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals)))
+}
+
+
+###################################################
+### code chunk number 4: updates.Rnw:91-96
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", 
+     main="Spatstat growth")
+lines(srclines/1000 ~ date, data=z)
+text(as.Date("2015-01-01"), 9.5, "C code")
+text(as.Date("2015-01-01"), 60, "R code")
+
+
+###################################################
+### code chunk number 5: updates.Rnw:1789-1793
+###################################################
+nbugs <- nrow(news(grepl("^BUG", Category), 
+                   package="spatstat"))
+nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), 
+                   package="spatstat"))
+
+
+###################################################
+### code chunk number 6: updates.Rnw:1799-1800 (eval = FALSE)
+###################################################
+## news(grepl("^BUG", Category), package="spatstat")
+
+
diff --git a/inst/doc/updates.Rnw b/inst/doc/updates.Rnw
new file mode 100644
index 0000000..620a03a
--- /dev/null
+++ b/inst/doc/updates.Rnw
@@ -0,0 +1,2197 @@
+\documentclass[11pt]{article}
+\usepackage{graphicx}
+\usepackage{Sweave}
+\usepackage{bm}
+\usepackage{anysize}
+
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+% \VignetteIndexEntry{Summary of Recent Updates to Spatstat}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Summary of recent updates to \spst}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+\thispagestyle{empty}
+
+This is a summary of changes that have been made 
+to the \spst\ package since the publication of the 
+accompanying book \cite{baddrubaturn15}.
+The book, published in December 2015,
+covers everything in \spst\ up to version \texttt{1.42-0}, 
+released in May 2015.
+
+<<echo=FALSE,results=hide>>=
+readSizeTable <- function(fname) {
+  if(is.null(fname) || !file.exists(fname)) return(NULL)
+  a <- read.table(fname, header=TRUE)
+  a$date <- as.Date(a$date)
+  return(a)
+}
+getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") {
+  fname <- system.file("doc", tablename, package=packagename)
+  readSizeTable(fname)
+}
+counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines")
+mergeSizeTables <- function(a, b) {
+  if(is.null(b)) return(a)
+  for(i in seq_len(nrow(a))) {
+    j <- which(b$date <= a$date[i])
+    if(length(j) > 0) 
+      a[i,counts] <- a[i,counts] + b[max(j), counts]
+  }
+  return(a)
+}
+z <- getSizeTable()
+zutils <- getSizeTable("spatstat.utils")
+zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt")
+z <- mergeSizeTables(z, zutils)
+z <- mergeSizeTables(z, zlocal)
+#
+changes <- z[nrow(z), ] - z[z$version == "1.42-0", ]
+newobj <- changes[["nobjects"]]
+newdat <- changes[["ndatasets"]] + 1  # counting rule doesn't detect redwood3
+@ %$
+
+The current version of \spst\ is \texttt{\Sexpr{sversion}}.
+It contains \Sexpr{newobj} new functions
+and \Sexpr{newdat} new datasets
+introduced after May 2015. This document summarises the most important changes.
+
+This document also lists all \emph{important} bugs detected \emph{since 2010}.
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0))))
+Plot <- function(fmla, ..., dat=z) {
+  yvals <- eval(as.expression(fmla[[2]]), envir=dat)
+  plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals)))
+}
+@ 
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.45\textwidth}
+
+\centerline{
+<<fig=TRUE,echo=FALSE,results=hide>>=
+Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", 
+     main="Spatstat growth")
+lines(srclines/1000 ~ date, data=z)
+text(as.Date("2015-01-01"), 9.5, "C code")
+text(as.Date("2015-01-01"), 60, "R code")
+@ 
+}
+
+\tableofcontents
+
+\newpage
+
+\section{\pkg{spatstat} is splitting into parts}
+
+    \pkg{spatstat} is being split into several sub-packages, to satisfy
+    the requirements of CRAN. This should not affect the user:
+    existing code will continue to work in the same way.
+
+    Currently there are two sub-packages, called \pkg{spatstat.utils}
+    and \pkg{spatstat}.
+
+    Typing \code{library(spatstat)} will load the familiar
+    \pkg{spatstat} package which can be used as before, and will silently
+    import the \pkg{spatstat.utils} package.
+
+    The \pkg{spatstat.utils} package
+    contains utility functions that were originally written for \pkg{spatstat}:
+    they were undocumented internal functions in \pkg{spatstat}, but are now
+    documented and accessible in a separate package because they may be
+    useful for other purposes. To access these functions, you need to
+    type \code{library(spatstat.utils)}. 
+
+\section{Precis of all changes}
+
+Here is the text from the `overview' sections of 
+the News and Release Notes for each update. 
+
+\begin{itemize}
+
+  \item \spst\ now Imports the package \pkg{spatstat.utils}.
+
+  \item \spst\ now suggests the package \pkg{fftwtools}.
+
+  \item Now handles disconnected linear networks.
+    
+  \item Effect function is now available for all types of fitted model.
+
+  \item Geometric-mean smoothing.
+
+  \item A model can be fitted or re-fitted to a sub-region of data.
+    
+  \item New fast algorithm for kernel smoothing on a linear network.
+
+  \item Leverage and influence diagnostics extended to Poisson/Gibbs models
+        fitted by logistic composite likelihood.
+
+  \item Two-stage Monte Carlo test.
+  
+  \item Dirichlet/Voronoi tessellation on a linear network.
+
+  \item Thinning of point patterns on a linear network.
+
+  \item More support for functions and tessellations on a linear network.
+
+  \item Bandwidth selection for pair correlation function.
+  
+  \item Pooling operations improved. 
+
+  \item Operations on signed measures.
+
+   \item Operations on lists of pixel images.
+
+   \item Improved pixellation of point patterns.
+
+   \item Stieltjes integral extended.
+
+   \item Subset operators extended.
+
+   \item Greatly accelerated \texttt{rmh} when using \texttt{nsave}
+
+  \item Sufficient Dimension Reduction for point processes.
+
+  \item Alternating Gibbs Sampler for point process simulation.
+
+  \item New class of spatially sampled functions.
+
+  \item ROC and AUC extended to other types of point patterns and models.
+
+  \item More support for linear networks.
+
+  \item More support for infinite straight lines.
+
+  \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}.
+
+  \item Important bug fix in \code{linearK}, \code{linearpcf}
+
+  \item Changed internal format of \code{linnet} and \code{lpp} objects.
+
+  \item Faster computation in linear networks.
+
+  \item Bias correction techniques.
+  
+  \item Bounding circle of a spatial object.
+
+  \item Option to plot marked points as arrows.
+
+  \item Kernel smoothing accelerated.
+
+  \item Workaround for bug in some graphics drivers affecting image orientation.
+
+  \item Non-Gaussian smoothing kernels.
+
+   \item Improvements to inhomogeneous multitype $K$ and $L$ functions.
+
+   \item Variance approximation for pair correlation function.
+
+   \item Leverage and influence for multitype point process models.
+
+   \item Functions for extracting components of vector-valued objects.
+
+  \item Recursive-partition point process models.
+
+   \item Minkowski sum, morphological dilation and erosion with any shape.
+
+  \item Minkowski sum also applicable to point patterns and line segment patterns.
+    
+   \item Important bug fix in Smooth.ppp
+ 
+   \item Important bug fix in spatial CDF tests.
+
+   \item  More bug fixes for replicated patterns.
+
+   \item Simulate a model fitted to replicated point patterns.
+
+   \item Inhomogeneous multitype $F$ and $G$ functions.
+
+   \item Summary functions recognise \texttt{correction="all"}
+
+   \item Leverage and influence code handles bigger datasets.
+
+   \item More support for pixel images.
+
+   \item Improved progress reports.
+
+   \item New dataset \texttt{redwood3}
+
+    \item Fixed namespace problems arising when spatstat is not loaded.
+
+   \item Important bug fix in leverage/influence diagnostics for Gibbs models.
+
+   \item Surgery with linear networks.
+   
+   \item Tessellations on a linear network.
+
+   \item Laslett's Transform.
+
+   \item Colour maps for point patterns with continuous marks
+     are easier to define.
+
+   \item Pair correlation function estimates can be pooled.
+
+   \item Stipulate a particular version of a package.
+
+   \item More support for replicated point patterns.
+     
+   \item More support for tessellations.
+
+  \item More support for multidimensional point patterns and point processes.
+
+   \item More options for one-sided envelopes.
+
+   \item More support for model comparison.
+
+   \item Convexifying operation.
+
+   \item Subdivide a linear network.
+
+   \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP).
+
+   \item Calculate the predicted variance of number of points.
+
+   \item Accelerated algorithms for linear networks.
+
+   \item Quadrat counting accelerated, in some cases.
+
+   \item Simulation algorithms have been accelerated; simulation outcomes 
+   are \emph{not} identical to those obtained from previous versions of \spst. 
+
+   \item Determinantal point process models.
+
+   \item Random-effects and mixed-effects models for replicated patterns.
+
+   \item Dao-Genton test, and corresponding simulation envelopes.
+
+   \item Simulated annealing and simulated tempering.
+
+   \item spatstat colour tools now handle transparent colours.
+
+   \item Improvements to \verb![! and \texttt{subset} methods
+
+   \item Extensions to kernel smoothing on a linear network.
+
+   \item Support for one-dimensional smoothing kernels.
+
+   \item Mark correlation function may include weights.
+
+   \item Cross-correlation version of the mark correlation function.
+
+   \item Penttinen pairwise interaction model.
+
+   \item Improvements to simulation of Neyman-Scott processes.
+
+   \item Improvements to fitting of Neyman-Scott models.
+
+   \item Extended functionality for pixel images.
+
+   \item Fitted intensity on linear network
+
+   \item Triangulation of windows.
+
+   \item  Corrected an edge correction.
+   \end{itemize}  
+   
+\section{New datasets}  
+
+The following datasets have been added to the package.
+
+\begin{itemize}
+\item \texttt{austates}: The states and large mainland territories of Australia
+   represented as polygonal regions forming a tessellation.
+ \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data.
+\end{itemize}
+
+\section{New classes}
+
+\begin{itemize}
+\item \texttt{ssf}:
+  Class of spatially sampled functions.
+\end{itemize}
+
+\section{New Functions}
+
+Following is a list of all the functions that have been added.
+
+\begin{itemize}
+    \item \texttt{as.data.frame.envelope}:
+    Extract function data from an envelope object,
+    including the functions for the simulated data ('simfuns')
+    if they were saved.
+
+    \item \texttt{is.connected}, \texttt{is.connected.default}, 
+      \texttt{is.connected.linnet}:
+    Determines whether a spatial object consists of
+    one topologically connected piece, or several pieces.
+
+    \item \texttt{is.connected.ppp}:
+    Determines whether a point pattern is connected after
+    all pairs of points closer than distance R are joined.
+
+    \item \texttt{hist.funxy}:
+    Histogram of values of a spatial function.
+
+    \item \texttt{model.matrix.ippm}:
+    Method for \texttt{model.matrix} which allows computation of
+    regular and irregular score components.
+
+    \item \texttt{harmonise.msr}:
+    Convert several measures (objects of class \texttt{msr})
+    to a common quadrature scheme.
+    
+    \item \texttt{bits.test}:
+    Balanced Independent Two-Stage Monte Carlo test,
+    an improvement on the Dao-Genton test.
+    
+    \item \texttt{lineardirichlet}:
+    Computes the Dirichlet-Voronoi tessellation associated with a
+    point pattern on a linear network.
+
+    \item \texttt{domain.lintess}, \texttt{domain.linfun}:
+    Extract the linear network from a
+    \texttt{lintess} or \texttt{linfun} object.
+
+    \item \texttt{summary.lintess}:
+    Summary of a tessellation on a linear network.
+
+    \item \texttt{clicklpp}:
+    Interactively add points on a linear network.
+    
+    \item \texttt{envelopeArray}:
+    Ggenerate an array of envelopes
+    using a function that returns \texttt{fasp} objects.
+
+    \item \texttt{bw.pcf}:
+      Bandwidth selection for pair correlation function.
+
+    \item \texttt{grow.box3}:
+    Expand a three-dimensional box.
+    
+    \item \texttt{hexagon}, \texttt{regularpolygon}:
+    Create regular polygons.
+
+    \item \texttt{Ops.msr}:
+    Arithmetic operations for measures.
+
+    \item \texttt{Math.imlist}, \texttt{Ops.imlist}, 
+      \texttt{Summary.imlist}, \texttt{Complex.imlist}:
+    Arithmetic operations for lists of pixel images.
+
+    \item \texttt{measurePositive}, \texttt{measureNegative}, 
+      \texttt{measureVariation}, \texttt{totalVariation}:
+    Positive and negative parts of a measure, and variation of a measure.
+
+    \item \texttt{as.function.owin}:
+    Convert a spatial window to a \texttt{function(x,y)}, 
+    the indicator function.
+
+    \item \texttt{as.function.ssf}:
+    Convert an object of class \texttt{ssf} to a \texttt{function(x,y)}
+
+    \item \texttt{as.function.leverage.ppm}
+    Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)}
+
+  \item \texttt{sdr}, \texttt{dimhat}:
+    Sufficient Dimension Reduction for point processes.
+
+    \item \texttt{simulate.rhohat}:
+    Simulate a Poisson point process with the
+    intensity estimated by \texttt{rhohat}.
+
+    \item \texttt{rlpp}:
+    Random points on a linear network with a specified probability density.
+
+    \item \texttt{cut.lpp}:
+    Method for \texttt{cut} for point patterns on a linear network.
+
+    \item \texttt{has.close}:
+    Faster way to check whether a point has a close neighbour.
+
+    \item \texttt{psib}:
+    Sibling probability (index of clustering strength in a cluster process).
+    
+    \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}:
+    Alternating Gibbs Sampler for point processes.
+
+    \item \texttt{bugfixes}:
+      List all bug fixes in recent versions of a package.
+      
+    \item \texttt{ssf}:
+    Create a spatially sampled function
+
+    \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, 
+      \texttt{image.ssf}:
+    Display a spatially sampled function
+
+    \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, 
+      \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}:
+    Manipulate data in a spatially sampled function
+
+    \item \texttt{Smooth.ssf}:
+    Smooth a spatially sampled function 
+
+    \item \texttt{integral.ssf}:
+    Approximate integral of spatially sampled function
+
+    \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}:
+      Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and 
+      \texttt{"lppm"} and point patterns of class \texttt{"lpp"}
+
+    \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}:
+      Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and 
+      \texttt{"lppm"} and point patterns of class \texttt{"lpp"}
+      
+    \item \texttt{timeTaken}:
+    Extract the timing data from a \texttt{"timed"} object or objects.
+
+    \item \texttt{rotate.infline}, 
+      \texttt{shift.infline}, \texttt{reflect.infline}, 
+      \texttt{flipxy.infline}:
+    Geometrical transformations for infinite straight lines.
+
+    \item \texttt{whichhalfplane}:
+    Determine which side of an infinite line a point lies on.
+
+    \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}:
+    Raise a matrix to any power.
+
+    \item \texttt{points.lpp}:
+    Method for \texttt{points} for point patterns on a linear network.
+
+    \item \texttt{pairs.linim}:
+    Pairs plot for images on a linear network.
+
+    \item \texttt{closetriples}:
+    Find close triples of points.
+    
+    \item \texttt{anyNA.im}:
+    Method for \texttt{anyNA} for pixel images.
+
+    \item \texttt{bc}:
+    Bias correction (Newton-Raphson) for fitted model parameters. 
+
+    \item \texttt{rex}:
+    Richardson extrapolation for numerical integrals and 
+    statistical model parameter estimates. 
+
+    \item \texttt{boundingcircle}, \texttt{boundingcentre}:
+    Find the smallest circle enclosing a window or point pattern.
+
+    \item \verb![.linim! : 
+    Subset operator for pixel images on a linear network.
+
+    \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}:
+    The mean, median, or quantiles of pixel values in a 
+    pixel image on a linear network.
+
+  \item \texttt{weighted.median}, \texttt{weighted.quantile}:
+    Median or quantile of numerical data with associated weights.
+
+  \item \verb!"[.linim"!:
+    Subset operator for pixel images on a linear network.
+
+  \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}:
+    The mean, median, or quantiles of pixel values in a 
+    pixel image on a linear network.
+  
+  \item \texttt{boundingcircle}, \texttt{boundingcentre}:
+   Smallest circle enclosing a spatial object.
+
+ \item \texttt{split.msr}:
+    Decompose a measure into parts.
+
+  \item \texttt{unstack.msr}:
+    Decompose a vector-valued measure into its component measures.
+
+  \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}:
+    Given a spatial pattern with several columns of marks,
+    separate the columns and return a list of spatial patterns, 
+    each having only one column of marks.
+
+ \item \texttt{kernel.squint}:
+    Integral of squared kernel, for the kernels used in density estimation.
+
+ \item \texttt{as.im.data.frame}:
+  Build a pixel image from a data frame of coordinates and pixel values.
+
+\item \texttt{covering}:
+    Cover a window using discs of a given radius.
+
+\item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! :
+    Morphological dilation and erosion by any shape.
+
+\item \texttt{FmultiInhom}, \texttt{GmultiInhom}
+  Inhomogeneous multitype/marked versions of the summary functions 
+  \texttt{Fest}, \texttt{Gest}.
+
+\item \texttt{kernel.moment}
+    Moment or incomplete moment of smoothing kernel.
+
+\item \texttt{MinkowskiSum}, \verb!%(+)%!: 
+    Minkowski sum of two windows: \verb!A %(+)% B!, 
+    or \texttt{MinkowskiSum(A,B)}
+
+\item \texttt{nobjects}:
+  New generic function for counting the number of 'things' in a dataset.
+  There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}.
+
+ \item \texttt{parameters.interact}, \texttt{parameters.fii}:
+    Extract parameters from interpoint interactions.
+    (These existing functions are now documented.)
+
+ \item \texttt{ppmInfluence}:
+  Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and 
+  \texttt{dfbetas.ppm} efficiently.
+
+  \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, 
+    \texttt{prune.rppm}:
+    Recursive-partition point process models.
+
+ \item \texttt{simulate.mppm}
+   Simulate a point process model fitted to replicated point patterns.
+
+ \item \texttt{update.interact}:
+    Update the parameters of an interpoint interaction.
+    [This existing function is now documented.]
+
+ \item \texttt{where.max}, \texttt{where.min}
+    Find the spatial location(s) where a pixel image achieves its
+    maximum or minimum value.
+    
+ \item \texttt{compileK}, \texttt{compilepcf}:
+   make a $K$ function or pair correlation function
+   given the pairwise distances and their weights.
+   [These existing internal functions are now documented.]
+    
+  \item \texttt{laslett}:
+  Laslett's Transform.
+
+\item \texttt{lintess}: 
+  Tessellation on a linear network.
+
+\item \texttt{divide.linnet}:
+  Divide a linear network into pieces demarcated by a point pattern.
+
+\item \texttt{insertVertices}:
+  Insert new vertices in a linear network.
+
+\item \texttt{thinNetwork}:
+  Remove vertices and/or segments from a linear network etc.
+
+\item \texttt{connected.linnet}:
+  Find connected components of a linear network.
+
+\item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}:
+  Count the number of vertices in a linear network 
+  or vertices of the boundary of a window.
+
+\item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}:
+  Extract a data frame of spatial locations and function values
+  from an object of class \texttt{linim} or \texttt{linfun}.
+
+\item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}:
+  Convert other kinds of data to a \texttt{linfun} object.
+
+\item \texttt{requireversion}:
+    Require a particular version of a package
+    (for use in stand-alone R scripts).
+
+  \item \texttt{as.function.tess}:
+   Convert a tessellation to a \texttt{function(x,y)}. The function value
+   indicates which tile of the tessellation contains the point $(x,y)$.
+
+   \item \texttt{tileindex}:
+   Determine which tile of a tessellation contains a given point $(x,y)$.
+
+   \item \texttt{persp.leverage.ppm}:
+   Method for persp plots for objects of class \texttt{leverage.ppm}
+
+   \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}:
+   AIC for point process models fitted to replicated point patterns.
+
+   \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}:
+   Methods for point process models fitted to replicated point patterns.
+
+  \item \texttt{rPenttinen}:
+    Simulate the Penttinen process using perfect simulation.
+
+  \item \texttt{varcount}:
+    Given a point process model, compute the predicted variance
+    of the number of points falling in a window.
+
+  \item \texttt{inside.boxx}:
+    Test whether multidimensional points lie inside a specified 
+    multidimensional box.
+  \item \texttt{lixellate}:
+    Divide each segment of a linear network into smaller segments.
+
+  \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}:
+     Count the number of line segments in a linear network.
+
+  \item \texttt{grow.boxx}:
+     Expand a multidimensional box.
+
+   \item \texttt{deviance.ppm}, \texttt{deviance.lppm}:
+     Deviance for a fitted point process model.
+
+   \item \texttt{pseudoR2}:
+     Pseudo-R-squared for a fitted point process model.
+
+   \item \texttt{tiles.empty}
+     Checks whether each tile of a tessellation is empty or nonempty.
+
+   \item \texttt{summary.linim}:
+     Summary for a pixel image on a linear network.
+     
+\item Determinantal Point Process models:
+  \begin{itemize}
+  \item \texttt{dppm}:
+    Fit a determinantal point process model.
+  \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}:
+    prediction for a fitted determinantal point process model.
+  \item 
+    \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: 
+    Second moments of a determinantal point process model.
+  \item
+    \texttt{rdpp}, \texttt{simulate.dppm}:
+    Simulation of a determinantal point process model.
+  \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, 
+    \texttt{nobs.dppm}: Likelihood and AIC for 
+    a fitted determinantal point process model.
+  \item
+    \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: 
+    Basic information about a \texttt{dpp} model.
+  \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, 
+    \texttt{terms.dppm}, \texttt{labels.dppm},
+    \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, 
+    \texttt{model.images.dppm},  \texttt{is.stationary.dppm}, 
+    \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, 
+    \texttt{Window.dppm}: Various methods for \texttt{dppm} objects.
+  \item \texttt{parameters.dppm}: Extract meaningful list of model parameters.
+  \item \texttt{objsurf.dppm}: Objective function surface of 
+    a \texttt{dppm} object. 
+  \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object.
+  \end{itemize}
+\item Determinantal Point Process model families:
+  \begin{itemize}
+  \item \texttt{dppBessel}, \texttt{dppCauchy}, 
+    \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}:
+    Determinantal Point Process family functions.
+  \item \texttt{detpointprocfamilyfun}:
+    Create a family function.
+  \item    
+    \texttt{update.detpointprocfamily}: Set parameter values in a
+    determinantal point process model family.
+  \item
+    \texttt{simulate.dppm}:   Simulation.
+  \item \texttt{is.stationary.detpointprocfamily}, 
+   \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, 
+   \texttt{pcfmodel.detpointprocfamily}: Moments.
+ \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, 
+   \texttt{dppapproxpcf}, \texttt{dppeigen}, 
+   \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, 
+   \texttt{dppspecden}:
+   Helper functions.
+ \end{itemize}
+
+ \item \texttt{dg.envelope}:
+   Simulation envelopes corresponding to Dao-Genton test.
+
+ \item \texttt{dg.progress}:
+   Progress plot (envelope representation) for the Dao-Genton test.
+
+ \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test.
+
+ \item \texttt{markcrosscorr}:
+   Mark cross-correlation function for point patterns with
+   several columns of marks.
+
+ \item \texttt{rtemper}:
+   Simulated annealing or simulated tempering.
+
+ \item \texttt{rgb2hsva}:
+   Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency.
+
+ \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}:
+   New methods for 'superimpose' for lists of point patterns.
+
+ \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}:
+   Probability density, cumulative probability, quantiles
+   and random generation from distributions used in basic one-dimensional
+   kernel smoothing.
+
+ \item \texttt{kernel.factor}:
+   Auxiliary calculations for one-dimensional kernel smoothing.
+
+ \item \texttt{spatdim}:
+   Spatial dimension of any object in the \spst\ package.
+
+ \item \texttt{as.boxx}:
+   Convert data to a multi-dimensional box.
+
+ \item \texttt{intensity.ppx}:
+   Method for \texttt{intensity} for multi-dimensional
+   space-time point patterns.
+
+ \item \texttt{fourierbasis}:
+   Evaluate Fourier basis functions in any number of dimensions.
+
+ \item \texttt{valid}:
+   New generic function, with methods 
+   \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}.
+   
+ \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}:
+   New generic function with methods for \texttt{ppm} and \texttt{lppm}.
+   \texttt{emend.ppm} is equivalent to \texttt{project.ppm}.
+
+ \item \texttt{Penttinen}:
+   New pairwise interaction model.
+
+  \item \texttt{quantile.density}:
+   Calculates quantiles from kernel density estimates.
+
+  \item \texttt{CDF.density}:
+   Calculates cumulative distribution function from kernel density estimates.
+
+\item \texttt{triangulate.owin}: decompose a spatial window into triangles.
+\item \texttt{fitted.lppm}: fitted intensity values for a point process
+  on a linear network.
+  
+   \item \texttt{parameters}:
+   Extract all parameters from a fitted model.
+
+ \end{itemize}
+ 
+ 
+\section{Alphabetical list of changes}
+
+Here is a list of all changes made to existing functions,
+listed alphabetically.
+
+\begin{itemize}
+%%A
+\item \texttt{affine.owin}:
+  Allows transformation matrix to be singular, if the window is polygonal.
+
+\item \texttt{anova.mppm}: Now handles Gibbs models,
+  and performs the adjusted composite likelihood ratio test.
+  New argument \texttt{fine}.
+  
+ \item \texttt{as.function.tess}:
+    New argument \texttt{values} specifies the function values.
+
+  \item \texttt{as.im.distfun}:
+  New argument \texttt{approx} specifies the choice of algorithm.
+
+  \item \texttt{as.im.function}:
+  New argument \texttt{strict}.
+
+\item \texttt{as.layered}:
+    Default method now handles a (vanilla) list of spatial objects.
+
+\item \texttt{as.linfun.lintess}:
+\begin{itemize}
+\item New argument \texttt{values} specifies the function value for each tile.
+\item New argument \texttt{navalue}.
+\end{itemize}
+
+\item \texttt{as.linim.default}:
+   New argument \texttt{delta} controls spacing of sample points
+   in internal data.
+
+  \item \texttt{as.linnet.psp}:
+If the line segment pattern has marks, then the resulting linear network
+also carries these marks in the \verb!$lines! component.
+
+\item \texttt{as.owin}:
+    Now refuses to convert a \code{box3} to a two-dimensional window.
+
+  \item \texttt{as.owin.data.frame}:
+    New argument \texttt{step}
+
+  \item \texttt{as.polygonal}:
+  Can now repair errors in polygon data, if \texttt{repair=TRUE}.
+
+  \item \texttt{as.solist}:
+    The argument \texttt{x} can now be a spatial object;
+    \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}.
+
+%%B
+ \item \texttt{bdist.pixels}:
+   Accelerated for polygonal windows. New argument \texttt{method}.
+   
+ \item \texttt{bind.fv}:
+   New argument \texttt{clip}.
+   
+ \item \texttt{bw.ppl}:
+   New arguments \texttt{weights} and \texttt{sigma}.
+   
+ \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, 
+   \texttt{bw.smoothppp}, 
+    These functions now extract and store the name of the unit of length
+    from the point pattern dataset. When the bandwidth selection criterion
+    is plotted, the name of the unit of length is shown on the x-axis.
+   
+%%C
+ \item \texttt{cdf.test}:
+   \begin{itemize}
+   \item    Calculations are more robust against numerical rounding effects.
+   \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, 
+     \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}.
+   \end{itemize}
+
+   
+ \item \texttt{cdf.test.mppm}:
+   \begin{itemize}
+   \item     Now handles Gibbs models.
+   \item     Now recognises \texttt{covariate="x"} or \texttt{"y"}.
+   \end{itemize}
+    
+ \item \texttt{clarkevans}:
+    The argument \texttt{correction="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+
+  \item \texttt{clickpoly}:
+    The polygon is now drawn progressively as the user clicks new vertices.
+
+ \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}:
+   \begin{itemize}
+   \item New arguments \texttt{distinct} and \texttt{neat} allow more options.
+   \item Argument \texttt{ordered} has been replaced by \texttt{twice}
+    (but \texttt{ordered} is still accepted, with a warning).
+  \item  
+    Performance improved (computation time and memory requirements reduced.)
+    This should improve the performance of many functions in \texttt{spatstat}.
+   \end{itemize}
+
+ \item \texttt{clusterset}:
+   Improved behaviour.
+
+ \item \texttt{clusterfit}:
+   New argument \texttt{algorithm} specifies the choice 
+   of optimisation algorithm.
+
+\item \texttt{collapse.fv}:
+   This is now treated as a method for the \texttt{nlme} 
+   generic \texttt{collapse}.
+   Its syntax has been adjusted slightly.
+
+\item \texttt{connected.im}:
+    Now handles a logical-valued image properly.
+    Arguments \texttt{...} now determine pixel resolution.
+    
+\item \texttt{connected.owin}:    
+    Arguments \texttt{...} now determine pixel resolution.
+
+  \item \texttt{contour.im}:
+   New argument \texttt{col} specifies the colour of the contour lines.
+   If \texttt{col} is a colour map, then the contours are drawn 
+   in different colours.
+
+ \item \texttt{crossing.psp}:
+    New argument \texttt{details} gives more information about the intersections
+    between the segments.
+
+ \item \texttt{cut.ppp}:
+    Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"}
+    indicating one of the spatial coordinates.    
+    
+%%D
+    
+  \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} 
+   \texttt{dclf.sigtrace, mad.sigtrace}, 
+   \texttt{dg.progress, dg.sigtrace}:
+   \begin{itemize}
+   \item 
+     New argument \texttt{clamp} determines the test statistic 
+     for one-sided tests.
+   \item 
+     New argument \texttt{rmin} determines the left endpoint
+     of the test interval.    
+   \item 
+     New argument \texttt{leaveout} specifies how to calculate
+     discrepancy between observed and simulated function values.
+   \item
+     New argument \texttt{scale} allows summary function values to be rescaled
+     before the comparison is performed.
+   \item
+     New argument \texttt{interpolate} supports interpolation of $p$-value.
+   \item
+     New argument \texttt{interpolate} supports interpolation of 
+     critical value of test.
+   \end{itemize}
+ 
+
+ \item \texttt{default.rmhcontrol, default.rmhexpand}:
+   New argument \texttt{w}.
+
+   
+ \item \texttt{density.lpp}:
+   \begin{itemize}
+   \item
+    New fast algorithm (up to 1000 times faster) for the default case
+    where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}.
+    Generously contributed by Greg McSwiggan.
+   \item 
+     New argument \texttt{kernel} specifies the smoothing kernel.
+     Any of the standard one-dimensional smoothing kernels can be used.
+   \item 
+     Now supports both the `equal-split continuous' and 
+     `equal-split discontinuous' smoothers. New argument \texttt{continuous} 
+     determines the choice of smoother.
+   \item 
+     New arguments \texttt{weights} and \texttt{old}.
+   \end{itemize}
+   
+ \item \texttt{density.ppp}:
+   \begin{itemize}
+   \item A non-Gaussian kernel can now be specified
+   using the argument \texttt{kernel}.
+ \item Argument \texttt{weights} can now be a pixel image.
+   \item
+     Accelerated by about 30\% when \texttt{at="pixels"}.
+   \item Accelerated by about 15\%
+     in the case where \texttt{at="points"}
+     and \texttt{kernel="gaussian"}.
+  \item 
+     Accelerated in the cases where weights are given or \texttt{diggle=TRUE}.
+   \item New argument \texttt{verbose}.
+   \end{itemize}
+
+ \item \texttt{density.psp}:
+   \begin{itemize}
+   \item New argument \texttt{method}.
+   \item Accelerated by 1 to 2 orders of magnitude.
+   \end{itemize}
+
+ \item \texttt{dfbetas.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+    
+  \item \texttt{diagnose.ppm}:
+    Infinite values of \texttt{rbord} are now ignored and treated as zero.
+    This ensures that \texttt{diagnose.ppm} has a sensible default
+    when the fitted model has infinite reach.
+  
+  \item \texttt{diagnose.ppm, plot.diagppm}:
+   New arguments \texttt{col.neg, col.smooth} control the colour maps.
+
+  \item \texttt{dilation.ppp}:
+    Improved geometrical accuracy.
+    Now accepts arguments to control resolution of polygonal approximation.
+
+ \item \texttt{discs}:
+   \begin{itemize}
+   \item     Now accepts a single numeric value for \texttt{radii}.
+   \item New argument \texttt{npoly}.
+   \item Accelerated in some cases. 
+   \end{itemize}
+
+
+  \item \texttt{distfun}:
+    When the user calls a distance function 
+    that was created by \texttt{distfun},
+    the user may now give a \texttt{ppp} or \texttt{lpp}
+    object for the argument \texttt{x},
+    instead of giving two coordinate vectors \texttt{x} and \texttt{y}.
+
+%%E
+    
+  \item \texttt{edge.Trans}:
+    New argument \texttt{gW} for efficiency.
+
+  \item \texttt{effectfun}:
+    Now works for \texttt{ppm}, \texttt{kppm}, 
+    \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects.
+    
+ \item \texttt{envelope}:
+   \begin{itemize}
+   \item 
+     New argument \texttt{clamp} gives greater control
+     over one-sided envelopes.
+   \item  New argument \texttt{funargs}
+   \item 
+     New argument \texttt{scale} allows global envelopes to have 
+     width proportional to a specified function of $r$,
+     rather than constant width.
+   \item 
+     New argument \texttt{funYargs} contains arguments to the summary function
+     when applied to the data pattern only.
+   \end{itemize}
+
+ \item \texttt{envelope.lpp}, \texttt{envelope.lppm}:
+    New arguments \texttt{fix.n} and \texttt{fix.marks}
+    allow envelopes to be computed
+    using simulations conditional on the observed number of points.   
+    
+%%F
+   
+ \item \texttt{Fest}:
+   Additional checks for errors in input data.
+
+ \item \texttt{fitted.lppm}:
+  New argument \texttt{leaveoneout}
+  allows leave-one-out computation of fitted value.
+  
+ \item \texttt{fitted.ppm}:
+  New option, \texttt{type="link"}.
+  
+  \item \texttt{funxy}:
+    When the user calls a function that was created by \texttt{funxy},
+    the user may now give a \texttt{ppp} or \texttt{lpp}
+    object for the argument \texttt{x},
+    instead of giving two coordinate vectors \texttt{x} and \texttt{y}.
+
+%%G
+    
+  \item \texttt{Geyer}:
+   The saturation parameter \texttt{sat} can now be less than 1.
+   
+   \item \texttt{grow.rectangle}:
+     New argument \texttt{fraction}.
+%%H
+   
+ \item \texttt{Hest}:
+   \begin{itemize}
+   \item Argument \texttt{X} can now be a pixel image with logical values.
+   \item New argument \texttt{W}. [Based on code by Kassel Hingee.]
+   \item Additional checks for errors in input data.
+   \end{itemize}
+   
+ \item \texttt{hist.im}: New argument \texttt{xname}.
+   
+%%I
+ \item \texttt{influence.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+ \item \texttt{integral.linfun}:
+   New argument \texttt{delta} controls step length of
+   approximation to integral.
+
+\item \texttt{intensity.ppm}:
+   Intensity approximation is now implemented for
+   area-interaction model, and Geyer saturation model.
+
+ \item \texttt{ippm}:
+   \begin{itemize}
+   \item Accelerated.
+   \item 
+   The internal format of the result has been extended slightly.
+   \item Improved defaults for numerical algorithm parameters.
+   \end{itemize}
+
+   
+%%J
+%%K
+
+ \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}:
+    These functions now allow intensity values to be given by
+    a fitted point process model.
+    New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}.
+
+ \item \texttt{Kest}
+    Accelerated computation (for translation and rigid corrections)
+    when window is an irregular shape.
+    
+ \item \texttt{Kest.fft}:
+   Now has \verb!...! arguments allowing control of spatial resolution.
+
+ \item \texttt{Kinhom}:
+   New argument \texttt{ratio}.
+    
+ \item \texttt{kppm}:
+   \begin{itemize}
+   \item 
+     Fitting a model with \texttt{clusters="LGCP"} no longer requires the
+     package \pkg{RandomFields} to be loaded explicitly.
+   \item
+     New argument \texttt{algorithm} specifies the choice 
+     of optimisation algorithm.
+     \item 
+       Left hand side of formula can now involve entries 
+       in the list \texttt{data}.
+     \item refuses to fit a log-Gaussian Cox model with anisotropic covariance.
+     \item 
+    A warning about infinite values of the summary function 
+    no longer occurs when the default settings are used.
+    Also affects \texttt{mincontrast}, 
+    \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf},
+    \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}.
+    \item
+    Improved printed output.
+  \end{itemize}
+  
+%%L
+   
+ \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}:
+    These functions now allow intensity values to be given by
+    a fitted point process model.
+    New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}.
+
+ \item \texttt{lengths.psp}: New argument \texttt{squared}.
+   
+ \item \texttt{leverage.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+    These methods now work for models that were fitted by logistic
+    composite likelihood (\texttt{method='logi'}).
+    
+
+ \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ 
+   \begin{itemize}
+     \item substantially accelerated.
+     \item ratio calculations are now supported.
+     \item new argument \texttt{ratio}.
+     \end{itemize}
+     
+ \item \texttt{linearKinhom}:
+   new argument \texttt{normpower}.
+   
+ \item \texttt{linearKinhom}, \texttt{linearpcfinhom}:
+   \begin{itemize}
+   \item Changed behaviour when \texttt{lambda} is a fitted model.
+   \item New arguments \texttt{update} and \texttt{leaveoneout}.
+   \end{itemize}
+   
+ \item \texttt{linearpcf}:
+   new argument \texttt{normpower}.
+   
+ \item \texttt{linnet}:
+   \begin{itemize}
+   \item
+     The internal format of a \texttt{linnet} (linear network) object
+    has been changed. Existing datasets of class \texttt{linnet} 
+    are still supported. However, computation will be faster if they
+    are converted to the new format. To convert a linnet object \texttt{L}
+    to the new format, use \verb!L <- as.linnet(L)!.
+   \item 
+    If the argument \texttt{edges} is given, then this argument
+    now determines the
+    ordering of the sequence of line segments. For example, the \texttt{i}-th
+    row of \texttt{edges} specifies the \texttt{i}-th line segment in 
+    \texttt{as.psp(L)}.
+  \item New argument \texttt{warn}.
+   \end{itemize}
+
+  \item \texttt{lintess}:
+    Argument \texttt{df} can be missing or \texttt{NULL}, 
+    resulting in a tesellation with only one tile.
+  
+  \item \texttt{logLik.ppm}:
+    \begin{itemize}
+    \item  New argument \texttt{absolute}.
+    \item The warning about pseudolikelihood (`log likelihood not available')
+    is given only once, and is not repeated in subsequent calls,
+    within a spatstat session.
+    \end{itemize}
+
+ \item \texttt{logLik.mppm}: new argument \texttt{warn}.
+   
+ \item \texttt{lpp}:
+   \begin{itemize}
+   \item 
+    The internal format of an \texttt{lpp} object
+    has been changed. Existing datasets of class \texttt{lpp} 
+    are still supported. However, computation will be faster if they
+    are converted to the new format. To convert an \texttt{lpp} 
+    object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!.
+   \item
+     \texttt{X} can be missing or \texttt{NULL}, 
+     resulting in an empty point pattern.
+   \end{itemize}
+
+  \item \texttt{lpp}, \texttt{as.lpp}: 
+   These functions now handle the case where coordinates
+   \texttt{seg} and \texttt{tp} are given
+    but \texttt{x} and \texttt{y} are missing.
+
+ \item \texttt{lppm}:
+   \begin{itemize}
+   \item New argument \texttt{random} controls placement of dummy points.
+   \item    Computation accelerated.
+   \end{itemize}
+
+%%M
+   
+ \item \texttt{markcorr}:
+   New argument \texttt{weights} allows computation of the weighted version
+   of the mark correlation function.
+
+ \item \texttt{mppm}:
+   \begin{itemize}
+   \item 
+     Now handles models with a random effect component.
+     (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.)
+   \item 
+     New argument \texttt{random} is a formula specifying the random effect.
+     (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.)
+   \item 
+     Performs more checks for consistency of the input data.
+   \item 
+     New arguments \texttt{gcontrol} and \texttt{reltol.pql} control 
+     the fitting algorithm. 
+   \end{itemize}
+
+%%N
+   
+   \item \texttt{nbfires}:
+     the unit of length for the coordinates is now specified in this dataset.
+   
+ \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}:
+   New argument \texttt{k} allows computation of $k$-th nearest point.
+   Computation accelerated.
+
+   \texttt{nnfun.lpp}: New argument \texttt{k}.
+%%O
+%%P
+   
+ \item \texttt{padimage}:
+   New argument \texttt{W} allows an image to be padded out to fill any window.
+
+ \item \texttt{pcf.ppp}:
+   \begin{itemize}
+   \item
+	New argument \code{close} for advanced use.
+   \item 
+     New argument \texttt{ratio} allows several estimates of pcf to be pooled.
+   \item 
+     Now calculates an analytic approximation to the variance of
+     the estimate of the pair correlation function 
+     (when \texttt{var.approx=TRUE}).
+   \item 
+     Now returns the smoothing bandwidth used, as an attribute of the result.
+   \item 
+     New argument \texttt{close} for advanced use.
+   \end{itemize}
+
+
+   
+ \item \texttt{pcfinhom}:
+   \begin{itemize}
+   \item
+	New argument \code{close} for advanced use.
+   \item 
+    Default behaviour is changed when \texttt{lambda} is a fitted model.
+    The default is now to re-fit the model to the data before computing pcf.
+    New arguments \texttt{update} and \texttt{leaveoneout} control this.
+   \item 
+     New argument \texttt{close} for advanced use.
+   \end{itemize}
+
+  \item \texttt{pixellate.ppp}:
+    \begin{itemize}
+    \item If the pattern is empty, the result is an integer-valued image
+    (by default) for consistency with the results for non-empty patterns.
+    \item      Accelerated in the case where weights are given.
+    \item New arguments \texttt{fractional} and \texttt{preserve}
+      for more accurate discretisation.
+    \end{itemize}
+
+
+ \item \texttt{plot.anylist}:
+   \begin{itemize}
+   \item 
+   If a list entry \verb!x[[i]]! 
+   belongs to class \texttt{"anylist"}, it will be expanded
+   so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel.
+   \item 
+     New arguments \texttt{panel.begin.args}, \texttt{panel.end.args}
+   \item  Result is now an (invisible) list containing the result
+    from executing the plot of each panel.
+   \end{itemize}
+
+
+ \item \texttt{plot.im}:
+   \begin{itemize}
+   \item  Now handles complex-valued images.
+   \item  New argument \texttt{workaround} to avoid a bug in some MacOS
+     device drivers that causes the image to be displayed
+     in the wrong spatial orientation.
+   \end{itemize}
+
+ \item \texttt{plot.imlist}:
+   Result is now an (invisible) list containing the result
+   from executing the plot of each panel.
+
+ \item \texttt{plot.influence.ppm}:
+   New argument \texttt{multiplot}.
+
+ \item \texttt{plot.kppm}:
+   \begin{itemize}
+   \item 
+   New arguments \texttt{pause} and \texttt{xname}.
+ \item 
+    The argument \texttt{what="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+   \end{itemize}
+
+ \item \texttt{plot.leverage.ppm}:
+   New argument \texttt{multiplot}.
+
+ \item \texttt{plot.linfun}:
+   \begin{itemize}
+   \item  Now passes arguments to the function being plotted.
+   \item A scale bar is now plotted when \texttt{style="width"}.
+   \item New argument \texttt{legend}.
+   \item The return value has a different format.
+  \end{itemize}
+
+ \item \texttt{plot.linim}:
+   \begin{itemize}
+   \item A scale bar is now plotted when \texttt{style="width"}.
+   \item  The return value has a different format.
+   \end{itemize}
+
+ \item \texttt{plot.lintess}:
+   Improved plot method, with more options.
+
+   \item \texttt{plot.lpp}:
+     \begin{itemize}
+     \item New argument \texttt{show.network}.
+     \item 
+     For a point pattern with continuous marks (``real numbers'')
+    the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now 
+    be vectors of colour values, and will be used to determine
+    the default colour map for the marks.
+     \end{itemize}
+
+   \item \texttt{plot.mppm}:
+   New argument \texttt{se}.
+   
+   \item \texttt{plot.msr}:
+     \begin{itemize}
+     \item  Now handles multitype measures.
+     \item New argument \texttt{multiplot}.
+     \item New argument \texttt{massthresh}.
+     \end{itemize}
+
+
+  \item \texttt{plot.pp3}:
+    New arguments \texttt{box.front}, \texttt{box.back} 
+    control plotting of the box.
+    
+   \item \texttt{plot.ppp}:
+     \begin{itemize}
+     \item  The default colour for the points is now a transparent grey,
+       if this is supported by the plot device.
+     \item For a point pattern with continuous marks (``real numbers'')
+    the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now 
+    be vectors of colour values, and will be used to determine
+    the default colour map for the marks.
+  \item Now recognises graphics parameters for text, such as 
+    \texttt{family} and \texttt{srt}
+  \item 
+    When \texttt{clipwin} is given, any parts of the boundary 
+    of the window of \texttt{x} that lie inside \texttt{clipwin} 
+    will also be plotted.
+  \end{itemize}
+
+   \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, 
+     \texttt{plot.quadrattest}, \texttt{plot.tess}:
+     Now recognise graphics parameters for text, such as 
+    \texttt{family} and \texttt{srt}
+
+    \item \texttt{plot.solist}:
+      \begin{itemize}
+      \item 
+        New arguments \texttt{panel.begin.args}, \texttt{panel.end.args}
+      \item 
+        Result is now an (invisible) list containing the result
+        from executing the plot of each panel.
+      \end{itemize}
+
+   \item \code{ponderosa}:
+    In this installed dataset, the function \code{ponderosa.extra\$plotit}
+    has changed slightly (to accommodate the
+    dependence on the package \pkg{spatstat.utils}).
+   \item \texttt{polynom}: This function now has a help file.
+     
+   \item \texttt{pool.fv}:
+     \begin{itemize}
+     \item 
+    The default plot of the pooled function no longer includes 
+    the variance curves.
+  \item  New arguments \texttt{relabel} and \texttt{variance}.
+     \end{itemize}
+
+   \item \texttt{pool.rat}:
+     New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}.
+
+  \item \texttt{ppm}:
+    \begin{itemize}
+    \item 
+      Argument \code{interaction} can now be a function that makes an interaction,
+      such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}.
+    \item 
+      Argument \texttt{subset} can now be a window (class \texttt{"owin"})
+      specifying the sub-region of data to which the model should be fitted.
+    \end{itemize}
+
+  \item \texttt{ppm.ppp, ppm.quad}:
+   New argument \texttt{emend}, equivalent to \texttt{project}.
+
+ \item \texttt{ppp}:
+       \begin{itemize}
+       \item New argument \texttt{checkdup}.
+       \item 
+       If the coordinate vectors \code{x} and \code{y} contain \code{NA},
+       \code{NaN} or infinite values,
+       these points are deleted with a warning,
+       instead of causing a fatal error.
+       \end{itemize}
+
+  \item \texttt{predict.kppm, residuals.kppm}
+   Now issues a warning when the calculation ignores the 
+   cluster/Cox component and treats the model as if it were Poisson.
+   (This currently happens in predict.kppm when se=TRUE or interval != "none",
+   and in residuals.kppm when type != "raw").
+
+  \item \texttt{predict.mppm}:
+    The argument \texttt{type="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+
+  \item \texttt{predict.rhohat}:    
+     New argument \texttt{what} determines which value should be calculated:
+    the function estimate, the upper/lower confidence limits, or the
+    standard error.
+
+  \item \texttt{print.quad}: More information is printed.
+    
+  \item \texttt{progressreport}
+    \begin{itemize}
+    \item Behaviour improved. 
+    \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}.
+    \item New option: \verb!style="tk"!
+    \end{itemize}
+
+
+%%Q
+   
+ \item \texttt{quadratcount.ppp}:
+   Computation accelerated in some cases.
+
+ \item \texttt{quadrat.test.ppm}:
+   Computation accelerated in some cases.
+
+ \item \texttt{quantile.ewcdf}:
+    The function is now normalised to the range \verb![0,1]!
+    before the quantiles are computed. 
+    This can be suppressed by setting \texttt{normalise=FALSE}.
+
+ \item \texttt{qqplot.ppm}
+    Argument \texttt{expr} can now be a list of point patterns,
+    or an envelope object containing a list of point patterns.
+    
+%%R
+   
+   \item \texttt{rcellnumber}:
+     New argument \texttt{mu}.
+
+   \item \texttt{rgbim, hsvim}:
+   New argument \texttt{A} controls the alpha (transparency) channel.
+
+   \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,}
+   \texttt{complementarycolour, is.grey, to.grey}
+   These colour tools now handle transparent colours.
+
+   \item \texttt{rgb2hex}:
+   New argument \texttt{maxColorValue}
+
+   \texttt{rhohat.lpp}:
+   New argument \texttt{random} controls placement of dummy points.
+
+ \item \texttt{rLGCP}:
+    This function no longer requires the package \pkg{RandomFields}
+   to be loaded explicitly. 
+
+  \item \texttt{rMaternI, rMaternII}:
+    These functions can now generate random patterns in 
+    three dimensions and higher dimensions, when the argument
+    \texttt{win} is of class \texttt{box3} or \texttt{boxx}.
+
+  \item \texttt{rmh}:
+    Accelerated, in the case where multiple patterns are saved 
+    using \texttt{nsave}.
+  
+  \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}:
+   A model fitted using the \texttt{Penttinen} interaction can now be simulated.
+
+ \item \texttt{rmh.default, rmhmodel.default}:
+   \begin{itemize}
+   \item 
+     These functions now recognise \verb!cif='penttinen'!
+     for the Penttinen interaction.
+   \item 
+     New arguments \texttt{nsim}, \texttt{saveinfo}.
+   \end{itemize}
+
+  \item \texttt{rmhcontrol}:
+    New parameter \texttt{pstage} determines when to generate
+    random proposal points.
+    
+ \item \texttt{rose.default}
+   New argument \texttt{weights}.
+
+   \item \texttt{rose}
+   New arguments \texttt{start} and \texttt{clockwise} specify the convention
+   for measuring and plotting angles.
+
+ \item \texttt{rotmean}:
+   New argument \texttt{padzero}. 
+   Default behaviour has changed.
+
+   \item \texttt{rpoispp}:
+   Accelerated, when \texttt{lambda} is a pixel image.
+
+   \item \texttt{rpoisppx}:
+   	New argument \code{drop}.
+	
+   \item \texttt{rpoisline}:
+     Also returns information about the original infinite random lines.
+
+ \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:}
+   New argument \texttt{drop}.
+
+   \item \texttt{rthin}
+   \begin{itemize}
+      \item	
+      Accelerated, when \texttt{P} is a single number.
+      \item
+       \texttt{X} can now be a point pattern on a linear network
+     (class \texttt{lpp}).
+   \end{itemize}
+
+   \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}:
+     \begin{itemize}
+     \item 
+       When the model is approximately Poisson, it is simulated using rpoispp. 
+       This avoids computations which would require huge amounts of memory. 
+       New argument \texttt{poisthresh} controls this behaviour.
+     \item 
+       New argument \texttt{saveparents}.
+     \end{itemize}
+
+   \item \texttt{runifpointx}:
+   	New argument \code{drop}.
+	
+%%S   
+   \item Simulation:
+   Several basic simulation algorithms have been accelerated.
+   Consequently, simulation outcomes are not identical to 
+   those obtained with previous versions of \spst, even when the
+   same random seed is used. To ensure compatibility with previous
+   versions of spatstat, revert to the slower code by setting
+   \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}.
+
+   \item \code{shapley}:
+    In this installed dataset, the function \code{shapley.extra\$plotit}
+    has changed slightly (to accommodate the
+    dependence on the package \pkg{spatstat.utils}).
+   
+   \item \texttt{simulate.ppm}
+   New argument \texttt{w} controls the window of the simulated patterns.
+   New argument \texttt{verbose}.
+   
+ \item \texttt{Smooth.ppp}:
+   \begin{itemize}
+   \item A non-Gaussian kernel can now be specified
+   using the argument \texttt{kernel}.
+   \item Argument \texttt{weights} can now be a pixel image.
+   \item Accelerated by about 30\% in the case where \texttt{at="pixels"}.
+   \item Accelerated by about 15\% in the case where \texttt{at="points"}
+     and \texttt{kernel="gaussian"}.
+   \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN}
+     or \texttt{Inf}.
+   \item New argument \texttt{geometric} supports geometric-mean smoothing.
+   \end{itemize}
+
+ 
+   \item \texttt{spatstat.options}
+   New options \texttt{fastthin} and \texttt{fastpois} 
+   enable fast simulation algorithms.
+   Set these options to \texttt{FALSE} to reproduce results obtained with
+   previous versions of \spst.
+
+   \item \texttt{split.ppp}
+   The splitting variable \texttt{f} can now be a logical vector.
+
+   \item \texttt{step}: now works for models of class \texttt{"mppm"}.
+
+   \item \texttt{stieltjes}:
+    Argument \texttt{M} can be a stepfun object (such as an empirical CDF).
+
+  \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, 
+     \texttt{subset.ppx}:
+    The argument \texttt{subset} can now be any argument acceptable to
+    the \verb!"["! method.
+
+  \item summary functions
+    The argument \texttt{correction="all"} is now recognised: it selects
+    all the available options. 
+    \begin{quote}
+        This applies to
+        \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, 
+        \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est},
+        \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, 
+        \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, 
+        \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom},
+        \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, 
+        \texttt{Kres}, 
+	\texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, 
+        \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, 
+        \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, 
+        \texttt{markcrosscorr},
+	\texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, 
+        \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, 
+        \texttt{Tstat}.
+    \end{quote}
+    
+   \item \texttt{summary.ppm}:
+    New argument \texttt{fine} selects the algorithm for variance estimation.
+
+   \item \texttt{summary.owin}, \texttt{summary.im}:
+    The fraction of frame area that is occupied by the window/image
+    is now reported.
+
+  \item \texttt{sumouter}:
+    New argument \texttt{y} allows computation of asymmetric outer products.
+  
+  \item \texttt{symbolmap}:
+    \begin{itemize}
+    \item 
+     Now accepts a vector of colour values for the arguments \texttt{col}, 
+     \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range}
+     is given.
+   \item New option: \texttt{shape="arrows"}.
+    \end{itemize}
+
+
+%%T
+     
+   \item \texttt{tess}:
+   Argument \texttt{window} is ignored when xgrid, ygrid are given.
+
+  \item \texttt{texturemap}:
+    Argument \texttt{textures} can be missing or NULL.
+    
+   \item \texttt{textureplot}: 
+     Argument \texttt{x} can now be something acceptable to \texttt{as.im}.
+     
+   \item \texttt{to.grey}
+   New argument \texttt{transparent}.
+
+%%U
+   
+  \item \texttt{union.owin}:
+   Improved behaviour when there are more than 2 windows.
+
+   \item \texttt{update}: now works for models of class \texttt{"mppm"}.
+     
+   \item \texttt{update.kppm}:
+   \begin{itemize}
+   \item New argument \texttt{evaluate}.
+   \item Now handles additional arguments in any order, with or without names.
+   \item Changed arguments.
+   \item Improved behaviour.
+   \end{itemize}
+
+%%V
+   
+   \item \texttt{valid.ppm}
+   This is now a method for the generic function \texttt{valid}.
+
+   \item \texttt{vcov.mppm}:
+     Now handles models with Gibbs interactions.
+
+   \item \texttt{vcov.ppm}:
+     Performance slightly improved, for Gibbs models.
+
+%%W
+%%X
+%%Y
+%%Z
+   
+ \item \verb![<-.im!
+  Accepts an array for \texttt{value}.
+
+ \item \verb![.im!
+   The subset index \texttt{i} can now be a linear network.
+   Then the result of \verb!x[i, drop=FALSE]! is a pixel image of
+   class \texttt{linim}.
+
+ \item \verb![.layered!:
+   \begin{itemize}
+   \item 
+       Subset index \texttt{i} can now be an \texttt{owin} object.
+     \item Additional arguments \verb!...! are now passed to other methods.
+   \end{itemize}
+
+       
+ \item \verb![.leverage.ppm!:
+   New argument \texttt{update}.
+       
+ \item \verb![.linnet!, \verb![.lpp!:
+    New argument \texttt{snip} determines what to do with segments 
+    of the network that cross the boundary of the window. 
+    Default behaviour has changed.
+
+\item \verb![.ppx!:
+  The subset index \texttt{i} may now be a spatial domain
+  of class \texttt{boxx} or \texttt{box3}.
+
+   \item \verb![.ppp!
+   New argument \texttt{clip} determines whether the window is clipped.
+
+   \item \verb![.ppp!
+   The previously-unused argument \texttt{drop} now determines whether 
+   to remove unused levels of a factor.
+
+   \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, 
+     \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}:
+   These methods now have an argument \texttt{drop} which determines
+   whether to remove unused levels of a factor.
+
+   \item \verb![.psp!:
+   New argument \texttt{fragments} specifies whether to keep fragments of
+    line segments that are cut by the new window, or only to retain
+    segments that lie entirely inside the window.
+     
+ \item \verb![.solist!:
+       Subset index \texttt{i} can now be an \texttt{owin} object.
+ \end{itemize}
+ 
+\section{Serious Bugs Fixed}
+
+<<echo=FALSE,results=hide>>=
+nbugs <- nrow(news(grepl("^BUG", Category), 
+                   package="spatstat"))
+nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), 
+                   package="spatstat"))
+@ 
+
+Hundreds of bugs have been detected and fixed in \spst.
+Bugs that may have affected the user are listed in the 
+package \texttt{NEWS} file. To read all these bug reports, type
+<<eval=FALSE>>=
+news(grepl("^BUG", Category), package="spatstat")
+@ 
+which currently produces a list of \Sexpr{nbugs} bugs,
+of which \Sexpr{nbugssince} were detected after publication of the
+book \cite{baddrubaturn15}.
+
+Following is a list of the {\bf most serious bugs} only, in order
+of potential impact.
+
+\newcommand\bugger[4]{%
+  \\  {} %
+  {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; %
+    fixed in \texttt{spatstat {#3}}, {#4})}%
+}
+  
+\begin{itemize}
+  
+  %% LEVEL 1: always completely wrong, broad impact
+  
+\item \texttt{nncross.ppp}:
+  Results were completely incorrect if $k > 1$.
+  \bugger{1.31-2}{april 2013}{1.35-0}{december 2013}
+
+\item \texttt{nncross.pp3}:
+  Results were completely incorrect in some cases.
+  \bugger{1.32-0}{august 2013}{1.34-0}{october 2013}
+
+ \item \texttt{cdf.test.ppm}:
+    Calculation of $p$-values was incorrect for Gibbs models: 
+    $1-p$ was computed instead of $p$.
+  \bugger{1.40-0}{december 2014}{1.45-2}{may 2016}
+
+\item \texttt{Smooth.ppp}:
+  Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)!
+  were completely incorrect.
+  \bugger{1.20-5}{august 2010}{1.46-0}{july 2016}
+  
+\item \texttt{rmh}:
+  
+   \begin{itemize}
+   \item Simulation was completely incorrect in the case of 
+     a multitype point process with an interaction that does not depend
+     on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))!
+     due to a coding error in the \texttt{C} interface.
+     \bugger{1.22-3}{march 2010}{1.22-3}{june 2011}
+   \item 
+     Simulation of the Area-Interaction model was completely incorrect.
+     \bugger{1.23-6}{october 2011}{1.31-0}{january 2013}
+   \item 
+     Simulation of the Geyer saturation process was completely incorrect.
+     \bugger{1.31-0}{january 2013}{1.31-1}{march 2013}
+   \item 
+     Simulation of the Strauss-Hard Core process was partially incorrect,
+     giving point patterns with a slightly lower intensity.
+     \bugger{1.31-0}{january 2013}{1.37-0}{may 2014}
+   \item
+   The result of simulating a model with a hard core
+   did not necessarily respect the hard core constraint,
+   and simulation of a model with strong inhibition
+   did not necessarily converge. 
+   This only happened if the first order trend was large,
+   the starting state (\texttt{n.start} or \texttt{x.start}) was not given,
+   and the number of iterations \texttt{nrep} was not very large.
+   It occurred because of a poor choice for the default starting state.
+   {\small (Bug was present since about 2010.  
+     Fixed in \texttt{spatstat 1.40-0}, december 2014)}
+   \item 
+     Simulation was incorrect in the case of an inhomogeneous multitype model
+     with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points 
+     of each type) if the model was segregated (i.e.\ if different types
+     of points had different first order trend). 
+     The effect of the error was that all types of points
+     had the same first order trend.
+     {\small (Bug was present since about 2010.
+       Fixed in \texttt{spatstat 1.43-0}, september 2015)}
+  \item 
+     Simulation of the Geyer saturation process was 
+     incorrectly initialised, so that the results of a short run 
+     (i.e. small value of \texttt{nrep}) were incorrect, 
+     while long runs were correct.
+     \bugger{1.17-0}{october 2009}{1.31-1}{march 2013}
+   \end{itemize}
+
+ \item \texttt{rVarGamma}:
+   Simulations were incorrect; they were generated using the wrong value
+   of the parameter \texttt{nu.ker}.  
+   \bugger{1.25-0}{december 2011}{1.35-0}{december 2013}
+
+ \item \texttt{rCauchy}:
+   Simulations were incorrect; they were generated using the wrong value
+   of the parameter \texttt{omega}.
+   \bugger{1.25-0}{december 2011}{1.25-2}{january 2012}
+   
+ \item \texttt{lppm}:
+   For multitype patterns, the fitted model was completely incorrect
+   due to an error in constructing the quadrature scheme.
+   \bugger{1.23-0}{july 2011}{1.30-0}{december 2012}
+   
+ \item \verb![.lpp!:
+   The local coordinate \texttt{seg} was completely incorrect,
+   when \texttt{i} was a window.
+   \bugger{1.31-2}{april 2013}{1.45-0}{march 2016}
+   
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+   Results were incorrect for non-Poisson processes
+   due to a mathematical error.
+   \bugger{1.25-0}{december 2011}{1.51-0}{may 2017}
+
+   %% LEVEL 2: often completely wrong, moderate impact
+   
+ \item \texttt{bw.pcf}:
+   Results were totally incorrect due to a typo.
+   \bugger{1.51-0}{may 2017}{1.52-0}{august 2017}   
+   
+ \item \texttt{predict.rho2hat}:
+   Results were incorrect for a \texttt{rho2hat} object computed
+   from a point pattern.
+   \bugger{1.42-0}{may 2015}{1.52-0}{august 2017}   
+   
+\item \texttt{envelope.ppm}:
+   If the model was an inhomogeneous Poisson process, 
+   the resulting envelope object was incorrect
+   (the simulations were correct, but the envelopes were calculated
+   assuming the model was CSR). 
+   \bugger{1.23-5}{september 2011}{1.23-6}{october 2011}
+
+ \item \texttt{linearK}, \texttt{linearpcf},
+   \texttt{linearKinhom}, \texttt{linearpcfinhom}
+   and multitype versions:
+    These functions were sometimes greatly underestimated
+    when the network had segments shorter than 10 coordinate units.
+   \bugger{1.44-0}{december 2015}{1.46-2}{july 2016}
+
+ \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}:
+  Results of \texttt{nncross} were possibly incorrect 
+  when \code{X} and \code{Y} did not have the same window. 
+  This bug affected values of \texttt{distfun} and may also 
+  have affected ppm objects with interaction \texttt{AreaInter}.
+  \bugger{1.9-4}{june 2006}{1.25-2}{january 2012}
+
+ \item \texttt{update.kppm}:
+  If the call to \texttt{update} did not include a formula argument
+   or a point pattern argument, then all arguments were ignored. 
+   Example: \texttt{update(fit, improve.type="quasi")} was identical to 
+   \texttt{fit}.
+   \bugger{1.42-2}{june 2015}{1.45-0}{march 2016}
+
+  \item \texttt{markcorrint}:
+   Results were completely incorrect.
+   \bugger{1.39-0}{october 2014}{1.40-0}{december 2014}
+
+  %% LEVEL 3: substantially incorrect, moderate impact
+  
+\item \texttt{density.ppp}:
+  Values of \verb!density(X, at="points")!
+  and \verb!Smooth(X, at="points")!
+  were sometimes incorrect, due to omission of
+  the contribution from the data point with the smallest $x$ coordinate.
+  \bugger{1.26-0}{april 2012}{1.46-1}{july 2016}
+  
+ \item \texttt{update.ppm}:
+   If the argument \texttt{Q} was given,
+   the results were usually incorrect, or an error was generated.
+   \bugger{1.38-0}{august 2014}{1.38-1}{august 2014}
+
+\item \texttt{subfits}:
+    The interaction coefficients of the submodels were incorrect
+    for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc).
+   \bugger{1.35-0}{december 2013}{1.45-2}{may 2016}
+   
+\item \texttt{F3est}:
+   Estimates of $F(r)$ for the largest value of $r$ 
+   were wildly incorrect. 
+     {\small (Bug was present since about 2010.
+       Fixed in \texttt{spatstat 1.48-0}, december 2016)}
+   
+ \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}:
+    The pair correlation function of the M\'atern Cluster Process
+    was evaluated incorrectly at distances close to 0.
+    This could have affected the fitted parameters 
+    in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}.
+    \bugger{1.20-2}{august 2010}{1.33-0}{september 2013}
+    
+ \item \texttt{ppm}:
+   Results were incorrect for the Geyer saturation model
+   with a non-integer value of the saturation parameter \texttt{sat}.
+   \bugger{1.20-0}{july 2010}{1.31-2}{april 2013}
+   
+ \item \texttt{clip.infline}: 
+   Results were incorrect unless the midpoint of the window
+   was the coordinate origin.
+   \bugger{1.15-1}{april 2009}{1.48-0}{december 2016}
+   
+ \item \texttt{intensity.ppm}:
+   Result was incorrect for Gibbs models if the model was 
+    exactly equivalent to a Poisson process (i.e. if all interaction
+    coefficients were exactly zero).
+   \bugger{1.28-1}{june 2012}{1.47-0}{october 2016}
+   
+ \item \texttt{funxy}: 
+   Did not correctly handle one-line functions. 
+   The resulting objects evaluated the wrong function in some cases.
+   \bugger{1.45-0}{march 2016}{1.46-0}{july 2016}   
+
+%% LEVEL 4: partially incorrect
+   
+\item \texttt{density.ppp}:
+  If the smoothing bandwidth \texttt{sigma} was very small 
+  (e.g.\ less than the width of a pixel), 
+  results were inaccurate if the default resolution was used,
+  and completely incorrect if a user-specified resolution was given.
+  \bugger{1.26-0}{april 2012}{1.52-0}{august 2017}
+  
+ \item \texttt{selfcrossing.psp}:
+   $y$ coordinate values were incorrect.   
+   \bugger{1.23-2}{august 2011}{1.25-3}{february 2012}
+     
+ \item \texttt{Geyer}:
+   For point process models with the \texttt{Geyer} interaction, 
+   \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers.
+   \bugger{1.27-0}{may 2012}{1.30-0}{december 2012}
+   
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+    Calculations were incorrect for a Geyer model fitted using
+    an edge correction other than \texttt{"border"} or \texttt{"none"}.
+   \bugger{1.25-0}{december 2011}{1.51-0}{may 2017}
+
+ \item \texttt{vcov.ppm}, \texttt{suffstat}:
+   These functions sometimes gave incorrect values 
+   for marked point process models.   
+   \bugger{1.27-0}{may 2012}{1.29-0}{october 2012}
+   
+ \item \texttt{diagnose.ppm}:
+   When applied to a model obtained from \texttt{subfits()}, 
+   in the default case (\texttt{oldstyle=FALSE}) 
+   the variance calculations were incorrect.
+   Consequently the dotted lines representing significance bands were 
+   incorrect. An error or warning about negative variances occurred sometimes.
+   However, calculations with \texttt{oldstyle=TRUE} were correct.
+   The default has now been changed to \texttt{oldstyle=TRUE} for such models.
+   \bugger{1.35-0}{december 2013}{1.45-0}{march 2016}
+
+ \item \texttt{Smooth.ppp}:
+   Results for \verb!at="points"! were garbled, for some values of 
+   \texttt{sigma}, if \texttt{X} had more than one column of marks.
+   \bugger{1.38-0}{october 2014}{1.46-0}{july 2016}
+    
+ \item \texttt{linearK}, \texttt{linearKinhom}:
+   If any data points were located exactly at a vertex of the 
+   linear network, the weights for Ang's correction were incorrect, 
+   due to numerical error. This sometimes produced infinite 
+   or NA values of the linear $K$ function.  
+   \bugger{1.23-0}{july 2011}{1.27-0}{may 2012}
+
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   the results were not renormalised (even if \texttt{renormalise=TRUE})
+   in some cases.
+   \bugger{1.21-0}{december 2010}{1.37-0}{may 2014}
+     
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   Ignored argument \texttt{reciplambda2} in some cases.
+   \bugger{1.39-0}{october 2014}{1.40-0}{december 2014}
+
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   Calculations were incorrect if \texttt{lambda} was a fitted point
+   process model.
+   \bugger{1.38-0}{august 2014}{1.38-1}{august 2014}
+     
+ \item \texttt{integral.linim}, \texttt{integral.linfun}:
+   \begin{itemize}
+   \item 
+   results were inaccurate because of a bias in the distribution of
+   sample points.
+   \bugger{1.41-0}{february 2015}{1.47-0}{october 2016}
+   \item 
+   results were inaccurate if many of the segment lengths were
+   shorter than the width of a pixel.
+   \bugger{1.41-0}{february 2015}{1.48-0}{december 2016}
+   \end{itemize}
+   
+ \item \texttt{predict.ppm}:
+   Calculation of the conditional intensity omitted the edge correction
+   if \texttt{correction='translate'} or \texttt{correction='periodic'}. 
+   \bugger{1.17-0}{october 2009}{1.31-3}{may 2013}
+
+ \item \texttt{varblock}:
+   Calculations were incorrect if more than one column of 
+   edge corrections was computed. 
+   \bugger{1.21-1}{november 2010}{1.39-0}{october 2014}
+   
+ \item \texttt{scan.test}
+   Results were sometimes incorrect due to numerical instability
+   (a 'Gibbs phenomenon').    
+   \bugger{1.24-1}{october 2011}{1.26-1}{april 2012}
+
+ \item \texttt{relrisk}:
+   When \verb!at="pixels"!, a small fraction of pixel values were sometimes
+   wildly inaccurate, due to numerical errors. This affected the 
+   range of values in the result, and therefore the appearance of plots.
+   {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)}
+
+ \item \texttt{predict.slrm}:
+   Results of \texttt{predict(object, newdata)} were incorrect 
+   if the spatial domain of \texttt{newdata}
+   was larger than the original domain.
+   \bugger{1.21-0}{november 2010}{1.25-3}{february 2012}
+   
+ \item \texttt{Lest}:
+   The variance approximations (Lotwick-Silverman and Ripley)
+   obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest}
+   (although they were correct for \texttt{Kest}) due to a coding error.
+   \bugger{1.24-1}{october 2011}{1.24-2}{november 2011}
+ 
+ \item \texttt{bw.diggle}:
+   Bandwidth was too large by a factor of 2.
+   \bugger{1.23-4}{september 2011}{1.23-5}{september 2011}
+ 
+ \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, 
+    \texttt{pcfcross} etc:)
+    The result had a negative bias at the maximum $r$ value,
+    because contributions to the pcf estimate from interpoint distances
+    greater than \texttt{max(r)} were mistakenly omitted. 
+    {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)}
+    
+ \item \texttt{Kest}, \texttt{Lest}:
+   Gave incorrect values in very large datasets, due to numerical overflow.
+   `Very large' typically means about 1 million points in a random pattern, 
+   or 100,000 points in a tightly clustered pattern.
+   [Overflow cannot occur unless there are at least 46,341 points.]
+   
+ \item \texttt{bw.relrisk}:
+    Implementation of \texttt{method="weightedleastsquares"} was incorrect
+    and was equivalent to \texttt{method="leastsquares"}.
+    \bugger{1.21-0}{november 2010}{1.23-4}{september 2011}
+    
+\item \texttt{triangulate.owin}:
+   Results were incorrect in some special cases.
+   \bugger{1.42-2}{june 2015}{1.44-0}{december 2015}
+  
+\item \texttt{crosspairs}:
+   If \texttt{X} and \texttt{Y} were identical point patterns,
+   the result was not necessarily symmetric
+   (on some machines) due to numerical artifacts.
+   \bugger{1.35-0}{december 2013}{1.44-0}{december 2015}
+
+ \item \texttt{bdist.tiles}:
+   Values were incorrect in some cases due to numerical error.
+    {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)}
+   
+\item \texttt{Kest.fft}:
+  Result was incorrectly normalised.
+   \bugger{1.21-2}{january 2011}{1.44-0}{december 2015}
+  
+\item \texttt{crossdist.ppp}:
+  Ignored argument \texttt{squared} if \texttt{periodic=FALSE}.
+    {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)}
+
+\item polygon geometry:
+    The point-in-polygon test gave the wrong answer in some boundary cases.
+    {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)}
+
+\item \texttt{MultiStraussHard}:
+    If a fitted model with \texttt{MultiStraussHard} interaction was invalid,
+    \texttt{project.ppm} sometimes yielded a model that was still invalid.
+    {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)}
+    
+\item \texttt{pool.envelope}:  
+  Did not always respect the value of \texttt{use.theory}.
+    \bugger{1.23-5}{september 2011}{1.43-0}{september 2015}
+
+\item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}:
+  Sometimes caused a segmentation fault.
+    \bugger{1.44-0}{december 2015}{1.44-1}{december 2015}
+
+\item \texttt{anova.ppm}:
+  If a single \texttt{object} was given, and it was a Gibbs model,
+  then \texttt{adjust} was effectively set to \texttt{FALSE}.
+  \bugger{1.39-0}{october 2014}{1.44-1}{december 2015}
+
+\end{itemize}
+
+
+\begin{thebibliography}{1}
+\bibitem{badd10wshop}
+A.~Baddeley.
+\newblock Analysing spatial point patterns in {{R}}.
+\newblock Technical report, CSIRO, 2010.
+\newblock Version 4.
+\newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/}
+
+\bibitem{baddrubaturn15}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\end{thebibliography}
+ 
+\end{document}
diff --git a/inst/doc/updates.pdf b/inst/doc/updates.pdf
new file mode 100644
index 0000000..dc1453e
Binary files /dev/null and b/inst/doc/updates.pdf differ
diff --git a/inst/ratfor/Makefile b/inst/ratfor/Makefile
new file mode 100755
index 0000000..f91d65f
--- /dev/null
+++ b/inst/ratfor/Makefile
@@ -0,0 +1,58 @@
+
+RATFOR = /home/adrian/bin/ratfor77
+#RATFOR = /usr/local/bin/ratfor
+CPP = /usr/bin/cpp 
+
+##########################################################
+# Sources actually written by humans:
+
+RAT_SRC = dppll.r inxypOld.r 
+
+C_DOMINIC = dinfty.c dwpure.c 
+
+C_MISC = raster.h areadiff.c closepair.c connect.c corrections.c \
+       discarea.c distances.c distmapbin.c distseg.c \
+       exactdist.c  exactPdist.c  \
+       massdisthack.c poly2im.c trigraf.c utils.c xyseg.c 
+
+C_MH = methas.h dist2.h areaint.c badgey.c dgs.c \
+	diggra.c dist2.c fexitc.c getcif.c geyer.c \
+	lookup.c methas.c stfcr.c \
+	straush.c straushm.c strauss.c straussm.c
+
+C_KEST = Kloop.h Kborder.c
+
+C_SRC = $(C_DOMINIC) $(C_MISC) $(C_MH) $(C_KEST)
+
+CC_SRC = PerfectStrauss.cc
+
+HUMAN = $(RAT_SRC) $(C_SRC) $(CC_SRC)  Makefile
+
+##########################################################
+# Source to be generated automatically:
+
+RAT_FOR = dppll.f inxypOld.f 
+
+GENERATED = $(RAT_FOR) 
+
+
+######################################################
+###########  TARGETS ################################
+
+target:	$(GENERATED)
+	@echo -- Done -------
+
+tar:
+	tar cvf src.tar $(HUMAN)
+
+clean:
+	rm $(GENERATED)
+	-rm src.tar
+
+#######################################################
+#########    RULES   ##################################
+
+
+.r.f:
+	$(RATFOR) -o $@ $?
+
diff --git a/inst/ratfor/dppll.r b/inst/ratfor/dppll.r
new file mode 100755
index 0000000..07b5c82
--- /dev/null
+++ b/inst/ratfor/dppll.r
@@ -0,0 +1,55 @@
+subroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin)
+implicit double precision(a-h,o-z)
+dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np)
+double precision l1(nl), l2(nl), l3(nl), l4(nl)
+one = 1.d0
+zero = 0.d0
+do j = 1,nl {
+	dx = l3(j) - l1(j)
+	dy = l4(j) - l2(j)
+	alen = sqrt(dx**2 + dy**2)
+	if(alen .gt. eps) {
+		co = dx/alen
+		si = dy/alen
+	} else {
+          co = 0.5
+          si = 0.5
+        }
+	do  i = 1, np {
+		xpx1 = x(i) - l1(j)
+		ypy1 = y(i) - l2(j)
+		xpx2 = x(i) - l3(j)
+		ypy2 = y(i) - l4(j)
+		d1 = xpx1**2 + ypy1**2
+		d2 = xpx2**2 + ypy2**2
+		dd = min(d1,d2)
+		if(alen .gt. eps) {
+			xpr = xpx1*co + ypy1*si
+			if(xpr .lt. zero .or. xpr .gt. alen) {
+				d3 = -one
+			}
+			else {
+				ypr = - xpx1*si + ypy1*co
+				d3 = ypr**2
+			}
+		}
+		else {
+				d3 = -one
+		}
+		if(d3 .ge. zero) {
+			dd = min(dd,d3)
+		}
+		sd =sqrt(dd)
+		rslt(i,j) = sd
+		if(mint.gt.0) {
+			if(sd .lt. xmin(i)) {
+				xmin(i) = sd
+				if(mint.gt.1) {
+					jmin(i) = j
+				}
+			}
+		}
+	}
+}
+return
+end
diff --git a/inst/ratfor/inxypOld.r b/inst/ratfor/inxypOld.r
new file mode 100755
index 0000000..f63af05
--- /dev/null
+++ b/inst/ratfor/inxypOld.r
@@ -0,0 +1,49 @@
+subroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry)
+implicit double precision(a-h,o-z)
+dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts)
+logical first, onbndry(npts)
+zero = 0.0d0
+half = 0.5d0
+one  = 1.0d0
+do i = 1,nedges {
+  x0 = xp(i)
+  y0 = yp(i)
+  if(i == nedges) {
+    x1 = xp(1)
+    y1 = yp(1)
+  } else {
+    x1 = xp(i+1)
+    y1 = yp(i+1)
+  }
+  dx = x1 - x0
+  dy = y1 - y0
+  do j = 1,npts {
+    xcrit = (x(j) - x0)*(x(j) - x1)
+    if(xcrit <= zero) {
+      if(xcrit == zero) {
+        contrib = half
+      } else {
+        contrib = one
+      }
+      ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx
+      if(dx < 0) {
+        if(ycrit >= zero) {
+          score(j) = score(j) + contrib
+        }
+        onbndry(j) = onbndry(j) | (ycrit == zero)
+      } else if(dx > zero) {
+        if(ycrit < zero) {
+          score(j) = score(j) - contrib
+        }
+        onbndry(j) = onbndry(j) | (ycrit == zero)
+      } else {
+        if(x(j) == x0) {
+          ycrit = (y(j) - y0)*(y(j) - y1)
+        }
+        onbndry(j) = onbndry(j) | (ycrit <= zero)
+      }
+    }
+  }
+}
+return
+end
diff --git a/inst/rawdata/amacrine/amacrine.txt b/inst/rawdata/amacrine/amacrine.txt
new file mode 100755
index 0000000..36baeaf
--- /dev/null
+++ b/inst/rawdata/amacrine/amacrine.txt
@@ -0,0 +1,295 @@
+      x      y  on
+  .0224  .0243	1
+  .0243  .1028	1
+  .1626  .1477	1
+  .1215  .0729	1
+  .2411  .0486	1
+  .0766  .1776	1
+  .1047  .2579	1
+  .0430  .3645	1
+  .1084  .4000	1
+  .1981  .2841	1
+  .2505  .2776	1
+  .2215  .1617	1
+  .3421  .1963	1
+  .2953  .0729	1
+  .3953  .0579	1
+  .4121  .1439	1
+  .3449  .2841	1
+  .3121  .3514	1
+  .0701  .5215	1
+  .0972  .6570	1
+  .0757  .7355	1
+  .0299  .8720	1
+  .0393  .9869	1
+  .0757  .8252	1
+  .1972  .8617	1
+  .1561  .9411	1
+  .2159  .7757	1
+  .1935  .6533	1
+  .2084  .5458	1
+  .2280  .4280	1
+  .3383  .4776	1
+  .3150  .5832	1
+  .3467  .5636	1
+  .3449  .3832	1
+  .4318  .3262	1
+  .4804  .2542	1
+  .5243  .1925	1
+  .5215  .1159	1
+  .5075  .0234	1
+  .5991  .0252	1
+  .6393  .1196	1
+  .6402  .2579	1
+  .6262  .3523	1
+  .5748  .3897	1
+  .4617  .4271	1
+  .4813  .4897	1
+  .4720  .6822	1
+  .3636  .7551	1
+  .3505  .6953	1
+  .3000  .8112	1
+  .2738  .9084	1
+  .2673  .9813	1
+  .3804  .8785	1
+  .4327  .8402	1
+  .5037  .8813	1
+  .5477  .9308	1
+  .5645  .8028	1
+  .5271  .5907	1
+  .6103  .6757	1
+  .6598  .7813	1
+  .6542  .8318	1
+  .6411  .9720	1
+  .7084  .9626	1
+  .7421  .8981	1
+  .7869  .7645	1
+  .7467  .6355	1
+  .6748  .6019	1
+  .6477  .5579	1
+  .6140  .4720	1
+  .6710  .1841	1
+  .7495  .2523	1
+  .7495  .0963	1
+  .7654  .0299	1
+  .9056  .1514	1
+  .9093  .2206	1
+  .9355  .2019	1
+  .9056  .3093	1
+  .9860  .3299	1
+  .9430  .4280	1
+  .7486  .4047	1
+  .7832  .4084	1
+  .7935  .3234	1
+  .7869  .4953	1
+  .9056  .5150	1
+  .8673  .5720	1
+  .8636  .6374	1
+  .8065  .7093	1
+  .8636  .7486	1
+  .8533  .8495	1
+  .8561  .9579	1
+  .9346  .9009	1
+  .9991  .9888	1
+ 1.0645  .9262	1
+ 1.0262  .7916	1
+  .9822  .6794	1
+ 1.0290  .5271	1
+ 1.0673  .4729	1
+ 1.0869  .5598	1
+ 1.0981  .6953	1
+ 1.1607  .7383	1
+ 1.1093  .8252	1
+ 1.1617  .9224	1
+ 1.2832  .8514	1
+ 1.3103  .9766	1
+ 1.4234  .9112	1
+ 1.4738  .8290	1
+ 1.4869  .9916	1
+ 1.5570  .9374	1
+ 1.5972  .9449	1
+ 1.5766  .8327	1
+ 1.5860  .7729	1
+ 1.4804  .7121	1
+ 1.4234  .7981	1
+ 1.3355  .7570	1
+ 1.2206  .7626	1
+ 1.1402  .6495	1
+ 1.2477  .6523	1
+ 1.3645  .6523	1
+ 1.3776  .5598	1
+ 1.5467  .6037	1
+ 1.5794  .6280	1
+ 1.5907  .4598	1
+ 1.4907  .4963	1
+ 1.4393  .4477	1
+ 1.3187  .4766	1
+ 1.3280  .5505	1
+ 1.2159  .5299	1
+ 1.1850  .4168	1
+ 1.1766  .3047	1
+ 1.2561  .3832	1
+ 1.2850  .2589	1
+ 1.3449  .3421	1
+ 1.3748  .3607	1
+ 1.3570  .2262	1
+ 1.4944  .3467	1
+ 1.5439  .2589	1
+ 1.5421  .1626	1
+ 1.4037  .1841	1
+ 1.4766  .1318	1
+ 1.4421  .0318	1
+ 1.5196  .0393	1
+ 1.3271  .0579	1
+ 1.3075  .1570	1
+ 1.1935  .1617	1
+ 1.1972  .0822	1
+ 1.1925  .0084	1
+ 1.2682  .0140	1
+ 1.0318  .0486	1
+ 1.0785  .2196	1
+ 1.0458  .2374	1
+ 1.0467  .2907	1
+ 1.0495  .3804	1
+  .0720  .0215	0
+  .0766  .1692	0
+  .0944  .2692	0
+  .1523  .3308	0
+  .2065  .3505	0
+  .2486  .2206	0
+  .2355  .1327	0
+  .2112  .0617	0
+  .1589  .0916	0
+  .3280  .0206	0
+  .3449  .0785	0
+  .4009  .1121	0
+  .3748  .2140	0
+  .3112  .3065	0
+  .0439  .4589	0
+  .1262  .5664	0
+  .1888  .4514	0
+  .3084  .4131	0
+  .2822  .5252	0
+  .2140  .5841	0
+  .2822  .5991	0
+  .2280  .7103	0
+  .1262  .6626	0
+  .0112  .8168	0
+  .0346  .9645	0
+  .1262  .8561	0
+  .1598  .9654	0
+  .1916  .8121	0
+  .2738  .8682	0
+  .3449  .9047	0
+  .4084  .8944	0
+  .3832  .7720	0
+  .3355  .7327	0
+  .4065  .6692	0
+  .4168  .6028	0
+  .4645  .6991	0
+  .5112  .7187	0
+  .5346  .7682	0
+  .5570  .8093	0
+  .4645  .9486	0
+  .5421  .9710	0
+  .6336  .8645	0
+  .7075  .9374	0
+  .7645  .9710	0
+  .8299  .8486	0
+  .7533  .8271	0
+  .7262  .7383	0
+  .6308  .6869	0
+  .5308  .6093	0
+  .5243  .5617	0
+  .4383  .4785	0
+  .5271  .3832	0
+  .4187  .3505	0
+  .5374  .2589	0
+  .5112  .1748	0
+  .4953  .1019	0
+  .5037  .0785	0
+  .5598  .0841	0
+  .6336  .0318	0
+  .6121  .1355	0
+  .6159  .3093	0
+  .6355  .4206	0
+  .6187  .5065	0
+  .6748  .5710	0
+  .7710  .6028	0
+  .8009  .5243	0
+  .7505  .4486	0
+  .7121  .3336	0
+  .7065  .2374	0
+  .7495  .1523	0
+  .7308  .0215	0
+  .8972  .0430	0
+  .8514  .1383	0
+  .8215  .2327	0
+  .8168  .3103	0
+  .9075  .2626	0
+  .8598  .3776	0
+  .9009  .4598	0
+  .8981  .5579	0
+  .7935  .6664	0
+  .8925  .7150	0
+  .9009  .9514	0
+  .9953  .8860	0
+  .9430  .8121	0
+  .9991  .7636	0
+  .9981  .6822	0
+  .9664  .6084	0
+ 1.0037  .5542	0
+  .9925  .4336	0
+  .9720  .3075	0
+ 1.0421  .1505	0
+ 1.0047  .0645	0
+ 1.0701  .2308	0
+ 1.1009  .2551	0
+ 1.0523  .3271	0
+ 1.1318  .3701	0
+ 1.0953  .4636	0
+ 1.1299  .5794	0
+ 1.1215  .7579	0
+ 1.1224  .8486	0
+ 1.0617  .9542	0
+ 1.2178  .1168	0
+ 1.2234  .0037	0
+ 1.3738  .0318	0
+ 1.3290  .1224	0
+ 1.4495  .0757	0
+ 1.5439  .0542	0
+ 1.5841  .1449	0
+ 1.4262  .1916	0
+ 1.3252  .2243	0
+ 1.3944  .2804	0
+ 1.5028  .2439	0
+ 1.5178  .3308	0
+ 1.4430  .3841	0
+ 1.4009  .3486	0
+ 1.2598  .3037	0
+ 1.2271  .2252	0
+ 1.2505  .3364	0
+ 1.1944  .4421	0
+ 1.3327  .4150	0
+ 1.4579  .4757	0
+ 1.5785  .5290	0
+ 1.5935  .6252	0
+ 1.4318  .5533	0
+ 1.3822  .6252	0
+ 1.3178  .5720	0
+ 1.2850  .6514	0
+ 1.2682  .6925	0
+ 1.2019  .6589	0
+ 1.1925  .5822	0
+ 1.2701  .7505	0
+ 1.2159  .8654	0
+ 1.2346  .9374	0
+ 1.3673  .9411	0
+ 1.3271  .8374	0
+ 1.4271  .8748	0
+ 1.4935  .8346	0
+ 1.5234  .9953	0
+ 1.5766  .8187	0
+ 1.5729  .7290	0
+ 1.4570  .6822	0
+ 1.4168  .7374	0
diff --git a/inst/rawdata/finpines/finpines.txt b/inst/rawdata/finpines/finpines.txt
new file mode 100755
index 0000000..65b968d
--- /dev/null
+++ b/inst/rawdata/finpines/finpines.txt
@@ -0,0 +1,127 @@
+      x             y           diameter         height
+ -1.993875      0.9297642       1  	     1.70    
+ -1.019901      0.4120694       1  	     1.70
+ -4.914071       1.985425       1  	     1.60
+ -4.469962       1.452390       5  	     4.10
+ -4.303847      0.9148214       3  	     3.10
+ -3.814774      0.8108644       4  	     4.30
+ -3.423515      0.7276988       2  	     2.30
+ -3.130071      0.6653246       4  	     4.00
+ -4.431633      0.7814268       7  	     5.40
+-0.9000000      2.0670868E-06   2  	     1.80
+ -3.899406      6.8073884E-02   3  	     3.20
+ -1.315571     -0.4788248       1  	     1.30
+ -2.499282     -0.7166504       5  	     3.80
+ -2.704594     -0.7246863       1  	     1.50
+ -4.510529      -1.641685       2  	     2.10
+ -4.134652      -1.504878       3  	     3.30
+ -4.107759      -1.576808       3  	     3.00
+ -4.040682      -1.470676       4  	     3.20
+ -3.174177      -1.218442       1  	     1.50
+ -3.150349      -2.124924       1  	     1.60
+ -4.948165      -3.213357       2  	     2.10
+-0.5656871     -0.5656838       1  	     1.40
+ -3.217394      -2.699698       3  	     2.80
+ -2.877367      -2.778625       4  	     4.00
+ -4.027665      -4.973723       1  	     1.40
+ -4.847309      -7.464154       1  	     1.60
+ -4.738381      -7.296420       3  	     4.00
+-0.3914369     -0.5803250       1  	     1.90
+ -3.444492      -5.512302       7  	     4.80
+ -1.960709      -3.019208       1  	     1.60
+ -2.225672      -3.561795       3  	     3.00
+-0.9000050      -1.558843       3  	     3.10
+ -2.450014      -4.243516       6  	     4.40
+ -3.102800      -5.597556       1  	     2.10
+ -3.248244      -5.859941       2  	     3.00
+ -3.400019      -5.888962       1  	     2.40
+ -3.286322      -6.180622       1  	     1.90
+ -3.393688      -6.122327       2  	     2.30
+ -3.700021      -6.408576       6  	     4.80
+ -4.600026      -7.967419       1  	     1.80
+ -1.436497      -3.946704       1  	     2.00
+ -2.701983      -7.423563       3  	     3.50
+ -1.941167      -7.244437       3  	     3.30
+ -1.967049      -7.341030       2  	     2.80
+ -1.669284      -6.695035       1  	     1.65
+ -1.475744      -5.918799       2  	     2.40
+ -1.268229      -4.733033       3  	     3.50
+-0.6531979      -2.619796       0  	     1.00
+-0.7119707      -4.037709       3  	     3.00
+ -1.007178      -5.711882       1  	     2.40
+-0.9506087      -7.741857       3  	     3.40
+-0.7212717      -6.862198       0  	     0.80
+-2.3614140E-05  -7.100000       2  	     2.30
+-2.2616361E-05  -6.800000       7  	     5.00
+-1.9623018E-05  -5.900000       4  	     3.60
+ 0.3820261      -7.289998       3  	     3.40
+ 0.3506266      -6.690819       2  	     4.00
+ 0.5939180      -6.674507       1  	     1.50
+ 0.5839180      -6.684507       4  	     4.70
+ 0.5752028      -6.574887       6  	     5.00
+ 0.4254918      -6.085142       2  	     2.80
+ 0.5229116      -5.977170       1  	     1.40
+ 0.7839378      -7.458917       2  	     2.90
+ 0.7734853      -7.359465       5  	     4.60
+ 0.6376027      -6.066586       1  	     1.60
+ 0.6271502      -5.967134       1  	     2.30
+ 0.6166977      -5.867682       3  	     4.40
+ 0.5957927      -5.668777       2  	     3.60
+ 0.6580755      -5.359752       6  	     4.90
+ 0.3149265      -1.364119       1  	     2.10
+ 0.7741384      -3.104949       1  	     1.50
+ 0.8709057      -3.493068       3  	     3.00
+  1.204589      -3.498423       3  	     3.00
+  1.530154      -4.443943       3  	     3.20
+  1.953639      -4.602531       3  	     3.20
+  1.992711      -4.694582       2  	     3.00
+  2.031784      -4.786633       2  	     3.00
+  2.198075      -5.154835       1  	     1.90
+  2.188075      -5.154835       3  	     3.60
+  2.237032      -5.024508       3  	     3.30
+  2.318379      -5.207218       1  	     2.40
+  3.634487      -7.794261       7  	     4.90
+  2.323348      -4.763618       2  	     2.50
+  2.411021      -4.943377       1  	     1.60
+  4.302621      -6.625516       7  	     3.90
+  4.106847      -6.572390       4  	     4.00
+  2.384621      -3.816226       4  	     4.20
+  1.539145      -1.970033       3  	     3.10
+  4.719881      -5.828613       1  	     1.40
+  3.583619      -3.341807       4  	     3.80
+  2.120918      -1.977804       1  	     1.80
+  4.457258      -2.678217       2  	     2.90
+  3.221761      -1.367573       1  	     2.20
+  3.129711      -1.328499       3  	     3.50
+  1.933057     -0.8205435       3  	     3.20
+  4.079437     -0.4996840       1  	     2.10
+  4.069437     -0.5096840       6  	     5.20
+  2.757459     -0.4862275       3  	     3.50
+  2.673722     -0.3757801       5  	     3.90
+  2.382109     -0.2924980       1  	     1.50
+  2.696299     -0.1413200       0  	     0.90
+  3.305476     -0.1727244       2  	     2.80
+  3.295476     -0.1627244       1  	     1.50
+  3.595065     -0.1884266       1  	     2.30
+  3.894654     -0.2041288       4  	     4.00
+  4.397319     -0.1535792       0  	     0.90
+  2.490487      0.2178892       2  	     2.30
+  4.881354      0.4270628       1  	     1.20
+  3.366912      0.4731882       2  	     2.50
+  3.862046      0.5427747       3  	     3.70
+  4.159126      0.5845266       1  	     2.00
+  4.037712      0.7119570       4  	     3.80
+  3.325702      0.7068992       0  	     1.30
+  2.934443      0.6237346       3  	     3.20
+  4.733037       1.268212       2  	     2.70
+  4.698464       1.710100       5  	     4.80
+  3.296725       1.679764       2  	     2.30
+ 0.6709366      0.4357109       6  	     4.50
+  2.289731       1.430781       0  	     1.30
+ 0.7372370      0.5162184       0  	     1.30
+  2.374739       1.992640       3  	     3.10
+  1.198207       1.845074       5  	     4.30
+ 0.8714234       1.341872       1  	     2.10
+ 0.6180466       1.028600       6  	     4.80
+ 0.2079127      0.9781474       2  	     2.50
+-0.1395106       1.995128       0            1.20    
diff --git a/inst/rawdata/gorillas/vegetation.asc b/inst/rawdata/gorillas/vegetation.asc
new file mode 100644
index 0000000..d6b490d
--- /dev/null
+++ b/inst/rawdata/gorillas/vegetation.asc
@@ -0,0 +1,155 @@
+ncols         181
+nrows         149
+xllcorner     580440.38505253
+yllcorner     674156.51146465
+cellsize      30.70932052048
+NODATA_value  -9999
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 3 3 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 3 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 4 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 3 1 1 1 1 1 3 3 3 3 3 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 3 3 3 3 3 3 3 4 1 5 5 5 4 4 4 4 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 3 3 3 3 3 3 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 3 3 3 3 3 3 3 4 4 5 5 5 3 3 4 4 4 4 4 1 3 3 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 4 4 4 4 3 3 3 3 3 3 3 4 4 4 4 3 3 3 3 4 4 4 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 4 4 4 4 4 4 4 3 3 3 3 3 3 3 4 4 4 5 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 3 3 3 3 1 1 1 1 5 5 5 5 5 5 5 5 5 5 5 1 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 4 4 4 5 5 5 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 4 5 5 5 4 4 4 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 5 5 5 5 5 5 5 5 5 5 5 5 1 1 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 1 6 6 4 4 4 4 4 4 4 3 3 3 3 4 1 1 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 6 1 6 6 4 4 4 4 4 5 5 3 3 4 4 1 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 4 4 4 4 4 4 1 6 6 6 6 4 4 4 4 4 5 5 5 3 4 4 4 1 1 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 3 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 4 1 1 4 4 5 5 4 4 4 4 4 4 3 3 3 3 3 3 3 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 3 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 6 6 6 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 4 4 4 4 4 3 3 3 3 3 3 3 3 4 4 4 4 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 4 4 4 4 4 4 6 6 6 1 1 1 1 1 4 1 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 4 4 4 4 4 4 4 4 3 3 3 3 3 3 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 3 1 1 1 1 1 1 1 1 1 1 1 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 6 6 6 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 3 3 4 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 3 1 1 1 1 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 6 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 1 1 1 4 4 4 4 3 3 3 4 4 6 6 6 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 4 4 4 4 4 5 5 5 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 3 3 3 3 3 3 3 3 6 6 4 4 1 1 1 1 1 1 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 4 4 4 4 4 5 5 5 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 4 4 4 4 4 3 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 4 4 4 4 4 4 5 5 4 4 4 4 4 4 4 4 4 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 3 3 3 3 3 3 3 6 1 4 4 4 4 4 3 3 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 3 3 3 3 3 1 3 6 6 6 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 1 1 1 3 3 3 4 1 1 3 6 6 4 4 4 4 4 4 3 3 3 3 3 4 3 3 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 6 6 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 1 4 4 4 4 4 4 4 1 1 1 1 4 4 3 3 3 6 6 4 4 1 1 4 4 4 3 3 3 3 3 3 3 3 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 1 1 1 1 1 1 5 6 6 6 6 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 3 3 3 3 3 3 3 3 1 4 4 6 3 3 3 3 3 3 3 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 1 1 1 1 5 5 6 6 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 3 3 3 3 3 3 3 3 3 3 6 6 3 3 3 3 3 3 3 3 3 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 5 5 5 5 5 5 5 6 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 1 1 4 4 4 4 3 3 3 4 4 1 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 3 3 3 3 3 3 3 3 3 3 6 6 6 4 3 3 3 3 3 3 4 4 4 4 4 4 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 5 5 5 1 1 1 5 5 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 1 4 4 4 1 1 4 4 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 3 3 3 3 3 3 3 3 3 6 6 6 4 4 3 3 3 6 6 4 4 4 4 4 4 1 1 1 1 1 4 4 4 4 4 4 4 4 1 1 5 1 5 5 1 5 5 5 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 1 4 1 4 4 4 4 1 1 4 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 3 3 3 3 3 3 3 3 6 6 6 4 1 6 6 6 3 3 3 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 5 5 5 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 6 1 1 1 6 3 3 3 3 4 4 4 6 6 6 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 3 3 3 6 6 4 4 4 1 1 1 4 3 3 3 3 4 4 4 6 6 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 6 6 6 6 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 6 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 3 1 3 3 3 3 3 3 1 1 1 1 3 3 3 1 1 3 3 3 3 1 1 1 3 3 3 1 1 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 1 1 1 1 1 1 5 5 5 5 5 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 1 1 1 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 1 1 1 1 5 5 5 5 5 1 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 1 1 1 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 1 1 1 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 1 1 1 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 3 1 1 1 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 4 4 4 5 5 4 4 5 1 5 5 5 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 6 6 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 4 3 3 3 3 3 6 4 4 4 4 4 4 4 1 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 5 5 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 4 4 3 3 3 3 6 6 4 4 4 4 4 4 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 1 5 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 4 4 6 6 6 6 3 3 4 4 4 4 4 4 4 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 3 5 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 4 4 1 1 1 1 1 4 4 6 6 6 6 6 3 4 4 4 4 4 4 4 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 3 3 3 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 5 3 3 3 4 4 4 4 4 4 4 4 4 4 3 3 3 3 5 1 1 3 3 3 3 1 1 3 3 3 3 3 3 3 1 3 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 4 4 4 4 4 4 1 4 4 4 4 4 6 1 1 3 4 4 4 4 4 4 4 4 5 5 5 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 1 1 3 4 4 4 4 4 4 4 5 5 5 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 4 4 6 1 1 1 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 5 4 4 4 4 4 4 1 1 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 3 3 3 3 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 4 4 4 6 6 1 1 4 4 4 4 4 4 4 4 6 1 1 6 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 3 3 3 3 3 5 5 5 4 4 4 4 4 5 3 3 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 4 4 4 4 6 6 6 1 1 1 4 4 4 4 4 4 4 1 1 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 3 3 4 4 4 3 3 3 3 5 5 5 5 5 5 5 5 5 5 5 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 4 4 4 4 4 6 6 4 1 4 4 4 4 4 4 4 6 1 1 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 3 3 3 3 3 5 5 5 5 5 5 5 5 5 5 5 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 3 3 3 3 3 3 5 5 5 5 5 5 5 5 5 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 6 6 6 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 5 5 5 5 5 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 5 5 5 5 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 3 3 3 4 4 4 4 4 4 4 4 4 4 4 2 4 4 4 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 3 3 3 4 5 5 5 5 5 4 4 4 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 3 3 3 4 4 4 4 4 2 2 2 2 2 2 2 4 4 4 4 4 4 4 4 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 4 5 5 5 5 5 5 5 4 4 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 3 3 3 4 4 4 2 2 2 2 2 2 2 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 4 4 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 1 1 3 4 4 4 4 2 2 2 2 2 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 4 4 4 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 1 1 1 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 4 4 4 4 4 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 4 4 4 4 4 1 1 1 1 1 1 3 3 3 3 3 3 3 1 3 3 3 3 3 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 1 1 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 4 4 4 4 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 1 1 1 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 6 6 6 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 4 4 4 4 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 1 1 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 5 5 5 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 6 6 4 4 4 4 4 4 4 4 3 3 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 4 4 4 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 4 4 4 4 4 4 4 4 5 5 5 5 5 4 4 4 3 3 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 6 6 6 6 6 6 6 6 6 3 3 3 3 3 3 3 4 4 4 4 4 3 4 4 4 4 4 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 4 4 5 5 5 5 5 5 5 5 1 5 5 5 5 1 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 5 5 5 5 5 4 3 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 3 3 3 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 3 3 4 4 4 4 4 3 3 3 3 6 6 1 1 4 4 4 4 4 4 4 5 5 5 5 4 4 4 4 5 5 5 5 5 5 5 5 1 1 5 5 1 1 1 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 4 4 4 4 4 4 4 4 5 5 5 5 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 1 1 1 6 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 3 3 3 3 3 3 6 6 1 1 1 1 4 1 1 1 1 5 5 5 4 4 4 4 4 5 5 5 5 5 5 5 5 5 1 1 5 1 1 1 1 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 5 3 5 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 6 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 3 3 6 6 1 1 1 1 1 1 1 1 1 1 5 5 4 4 4 4 4 5 5 5 5 5 5 5 5 5 1 1 5 1 1 1 1 1 1 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 3 3 3 3 3 3 1 1 1 1 1 1 4 4 4 4 1 1 1 1 1 1 1 1 5 5 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 6 6 3 3 3 3 3 3 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 6 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 4 1 6 6 1 1 4 4 4 4 4 4 4 4 4 4 5 5 5 5 1 5 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 6 6 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 6 6 6 6 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 3 3 3 4 4 4 3 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 5 5 5 5 5 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 5 5 4 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 6 6 6 6 6 4 4 4 4 4 4 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 6 4 4 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 6 4 4 4 4 4 4 4 4 3 3 4 4 1 1 1 1 1 1 1 1 5 5 1 5 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 5 5 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 6 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 3 3 6 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 4 4 4 4 4 4 4 4 4 3 3 3 3 1 1 1 1 1 1 1 1 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 5 5 5 4 4 4 4 4 3 4 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 4 6 6 6 6 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 6 3 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 3 3 3 3 3 3 1 1 1 1 1 1 1 5 5 1 1 1 1 1 1 1 1 1 1 1 1 5 5 6 6 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 5 1 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 6 6 6 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 3 3 4 4 4 4 4 4 4 3 3 3 3 3 3 3 1 1 1 1 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 5 5 6 6 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 4 4 1 1 1 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 4 4 4 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 1 1 1 1 6 6 6 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 1 1 1 1 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 6 6 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 6 6 4 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 5 5 5 1 1 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 1 1 6 6 6 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 6 6 6 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 5 5 5 1 1 1 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 4 3 3 3 3 6 6 6 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 1 5 1 1 1 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 4 4 4 4 4 4 4 4 4 4 6 3 6 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 3 3 3 3 3 3 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 4 4 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 5 4 1 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 4 4 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 3 3 3 3 3 3 1 5 5 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 4 4 4 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 4 4 4 5 1 1 5 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 3 6 6 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 1 1 3 3 3 3 3 3 1 1 5 5 5 5 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 1 3 1 1 1 1 1 1 1 1 3 6 6 4 4 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 5 1 2 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 3 5 5 5 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 1 1 1 3 3 3 1 1 1 1 1 1 1 6 6 6 4 4 4 1 1 1 1 1 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 2 1 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 5 5 3 3 1 1 1 1 1 1 3 1 1 1 1 1 1 6 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 1 1 3 3 3 3 1 1 1 1 1 6 6 6 6 4 4 4 1 1 1 1 4 4 4 4 4 4 4 4 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 6 6 6 6 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 1 1 1 3 3 3 3 1 1 1 1 6 6 6 6 4 4 4 1 1 1 1 4 4 4 3 3 4 3 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 6 6 6 6 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 1 1 3 3 3 3 3 3 1 1 1 6 6 6 6 4 4 4 4 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 6 6 6 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 6 6 6 6 3 1 1 4 4 4 1 6 6 3 3 3 3 3 3 3 3 3 3 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 6 6 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 6 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 2 2 2 2 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 6 6 3 3 3 3 1 3 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 2 2 2 2 2 4 4 4 4 4 4 4 4 4 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 1 1 3 3 3 3 3 3 3 3 3 6 3 3 3 1 1 1 1 3 3 3 3 3 3 3 6 6 1 1 3 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 2 2 2 2 2 4 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 6 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 6 6 6 2 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 3 3 3 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 3 3 3 1 1 1 1 1 3 3 3 1 6 6 6 6 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 6 6 6 6 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 6 6 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 6 6 6 6 4 4 4 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 6 4 6 4 4 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 4 4 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 3 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 3 3 3 3 3 3 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 3 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 3 3 3 3 3 3 3 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 3 3 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 3 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 3 3 3 3 1 1 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 1 1 1 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 3 3 3 3 1 1 1 3 3 3 3 1 1 1 1 1 1 1 3 3 3 1 1 1 3 3 3 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 3 3 3 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -999 [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999  [...]
+-9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 1 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -9999 -999 [...]
diff --git a/inst/rawdata/osteo/osteo36.txt b/inst/rawdata/osteo/osteo36.txt
new file mode 100644
index 0000000..64e3d1e
--- /dev/null
+++ b/inst/rawdata/osteo/osteo36.txt
@@ -0,0 +1,30 @@
+"x" "y" "z"
+6.36363636363636 76.3636363636364 -4
+58.1818181818182 72.7272727272727 -10
+66.3636363636364 49.0909090909091 -13
+28.1818181818182 12.7272727272727 -15
+41.8181818181818 50 -15
+24.5454545454545 56.3636363636364 -22
+2.72727272727273 61.8181818181818 -11
+43.6363636363636 31.8181818181818 -25
+74.5454545454545 16.3636363636364 -23
+15.4545454545455 19.0909090909091 -24
+50 95.4545454545455 -26
+80.9090909090909 95.4545454545455 -25
+22.7272727272727 93.6363636363636 -42
+56.3636363636364 77.2727272727273 -38
+38.1818181818182 51.8181818181818 -46
+79.0909090909091 56.3636363636364 -46
+81.8181818181818 30.9090909090909 -47
+75.4545454545455 80.9090909090909 -53
+33.6363636363636 79.0909090909091 -64
+57.2727272727273 59.0909090909091 -62
+7.27272727272727 31.8181818181818 -64
+70.9090909090909 22.7272727272727 -68
+56.3636363636364 8.18181818181818 -69
+27.2727272727273 1.81818181818182 -69
+32.7272727272727 35.4545454545455 -75
+12.7272727272727 14.5454545454545 -80
+52.7272727272727 44.5454545454545 -90
+31.8181818181818 64.5454545454545 -94
+9.09090909090909 97.2727272727273 -94
diff --git a/inst/rawdata/sandholes/sandholes.jpg b/inst/rawdata/sandholes/sandholes.jpg
new file mode 100644
index 0000000..09eca0b
Binary files /dev/null and b/inst/rawdata/sandholes/sandholes.jpg differ
diff --git a/inst/rawdata/vesicles/activezone.txt b/inst/rawdata/vesicles/activezone.txt
new file mode 100644
index 0000000..e6dda8d
--- /dev/null
+++ b/inst/rawdata/vesicles/activezone.txt
@@ -0,0 +1,10 @@
+"x0" "y0" "x1" "y1"
+404.701192457016 288.331701251585 412.829312257349 310.006687385806
+412.829312257349 310.006687385806 412.829312257349 334.391046786805
+412.829312257349 334.391046786805 429.085551858014 383.159765588801
+429.085551858014 383.159765588801 442.632418191902 418.38161805691
+442.632418191902 418.38161805691 461.598031059346 461.731590325351
+461.598031059346 461.731590325351 480.563643926789 486.11594972635
+480.563643926789 486.11594972635 502.238630061009 524.04717546124
+502.238630061009 524.04717546124 526.622989462008 553.85028139579
+526.622989462008 553.85028139579 548.297975596229 572.81589426323
diff --git a/inst/rawdata/vesicles/mitochondria.txt b/inst/rawdata/vesicles/mitochondria.txt
new file mode 100644
index 0000000..294627a
--- /dev/null
+++ b/inst/rawdata/vesicles/mitochondria.txt
@@ -0,0 +1,24 @@
+"x" "y"
+274.651275651692 543.01278832868
+301.745008319468 559.26902792934
+312.582501386578 580.94401406357
+315.291874653356 608.03774673134
+312.582501386578 643.25959919945
+307.163754853023 673.062705134
+288.198141985579 700.15643780178
+258.395036051026 727.25017046956
+225.882556849695 746.215783337
+206.916943982252 765.18139620444
+171.695091514143 781.43763580511
+144.601358846367 773.30951600477
+109.379506378258 757.05327640411
+98.542013311148 738.08766353667
+90.413893510815 719.12205066922
+98.542013311148 697.447064535
+98.542013311148 675.77207840078
+103.960759844703 637.84085266589
+112.088879645036 597.20025366423
+128.345119245702 570.10652099645
+155.438851913478 556.55965466257
+196.079450915141 534.88466852835
+236.720049916805 532.17529526157
diff --git a/inst/rawdata/vesicles/presynapse.txt b/inst/rawdata/vesicles/presynapse.txt
new file mode 100644
index 0000000..1a7fbeb
--- /dev/null
+++ b/inst/rawdata/vesicles/presynapse.txt
@@ -0,0 +1,70 @@
+"x" "y"
+282.779395452024 14.685001307049
+309.8731281198 36.359987441269
+331.548114254021 66.163093375823
+336.966860787576 87.8380795100441
+347.804353854687 106.803692377487
+345.094980587909 133.897425045263
+364.060593455352 155.572411179483
+369.479339988907 185.375517114037
+385.735579589573 223.306742848923
+393.863699389906 253.109848783477
+401.991819190239 280.203581451253
+410.119938990571 307.297314119028
+412.829312257349 339.80979332036
+429.085551858014 383.159765588801
+442.632418191902 418.38161805691
+464.307404326123 456.312843791796
+480.563643926789 491.534696259905
+504.948003327787 518.62842892768
+526.622989462008 548.43153486223
+548.297975596229 572.81589426323
+569.972961730449 597.20025366423
+586.229201331115 618.87523979845
+580.81045479756 651.38771899978
+578.101081530782 686.60957146789
+572.682334997227 735.37829026989
+542.879229062673 781.43763580511
+523.91361619523 846.46259420777
+496.819883527454 914.19692587721
+467.016777592901 971.09376447954
+450.760537992235 998.18749714731
+437.213671658347 1017.15311001476
+401.991819190239 1017.15311001476
+369.479339988907 1027.99060308187
+345.094980587909 1030.69997634865
+309.8731281198 1022.57185654831
+274.651275651692 995.47812388054
+242.13879645036 968.38439121276
+220.46381031614 946.70940507854
+193.370077648364 925.03441894432
+166.276344980588 906.06880607688
+141.89198557959 884.39381994266
+125.635745978924 876.26570014232
+93.123266777593 868.13758034199
+82.285773710483 846.46259420777
+63.320160843039 805.82199520611
+49.773294509151 765.18139620444
+33.517054908486 751.63452987055
+22.679561841376 724.54079720278
+36.226428175264 692.02831800145
+57.901414309484 656.80646553334
+68.738907376595 632.42210613234
+79.576400443705 599.90962693101
+87.704520244038 572.81589426323
+112.088879645036 545.72216159546
+125.635745978924 526.75654872801
+133.763865779257 491.534696259905
+150.020105379922 459.022217058574
+166.276344980588 431.928484390798
+177.113838047698 393.997258655912
+187.951331114809 353.356659654248
+196.079450915141 310.006687385806
+201.498197448697 269.366088384142
+201.498197448697 204.34112998148
+190.660704381586 185.375517114037
+185.241957848031 152.863037912706
+182.532584581254 106.803692377487
+193.370077648364 63.453720109045
+223.173183582917 33.650614174492
+255.685662784248 11.975628040271
diff --git a/inst/rawdata/vesicles/vesicles.csv b/inst/rawdata/vesicles/vesicles.csv
new file mode 100644
index 0000000..7969fda
--- /dev/null
+++ b/inst/rawdata/vesicles/vesicles.csv
@@ -0,0 +1,38 @@
+"","x","y"
+"1",467.016777592901,776.01888927155
+"2",445.34179145868,827.49698134033
+"3",364.060593455352,911.48755261043
+"4",323.419994453688,914.19692587721
+"5",339.676234054354,957.54689814565
+"6",345.094980587909,873.55632687555
+"7",323.419994453688,838.33447440744
+"8",282.779395452024,838.33447440744
+"9",383.026206322795,781.43763580511
+"10",404.701192457016,762.47202293766
+"11",437.213671658347,740.79703680344
+"12",464.307404326123,705.57518433533
+"13",496.819883527454,729.95954373633
+"14",510.366749861342,651.38771899978
+"15",461.598031059346,673.062705134
+"16",399.282445923461,694.73769126822
+"17",412.829312257349,645.96897246623
+"18",372.188713255685,643.25959919945
+"19",393.863699389906,602.61900019779
+"20",472.435524126456,599.90962693101
+"21",480.563643926789,637.84085266589
+"22",445.34179145868,575.52526753001
+"23",461.598031059346,510.50030912735
+"24",504.948003327787,561.97840119612
+"25",437.213671658347,450.894097258241
+"26",423.666805324459,491.534696259905
+"27",377.60745978924,440.056604191131
+"28",347.804353854687,404.834751723022
+"29",380.316833056018,383.159765588801
+"30",415.538685524126,412.962871523355
+"31",388.444952856351,307.297314119028
+"32",334.257487520799,342.519166587137
+"33",339.676234054354,293.750447785141
+"34",353.223100388242,258.528595317032
+"35",328.838740987244,188.084890380814
+"36",277.360648918469,55.325600308712
+"37",548.297975596229,627.00335959878
diff --git a/inst/rawdata/vesicles/vesicles.txt b/inst/rawdata/vesicles/vesicles.txt
new file mode 100644
index 0000000..f593850
--- /dev/null
+++ b/inst/rawdata/vesicles/vesicles.txt
@@ -0,0 +1,38 @@
+"x" "y"
+467.016777592901 776.01888927155
+445.34179145868 827.49698134033
+364.060593455352 911.48755261043
+323.419994453688 914.19692587721
+339.676234054354 957.54689814565
+345.094980587909 873.55632687555
+323.419994453688 838.33447440744
+282.779395452024 838.33447440744
+383.026206322795 781.43763580511
+404.701192457016 762.47202293766
+437.213671658347 740.79703680344
+464.307404326123 705.57518433533
+496.819883527454 729.95954373633
+510.366749861342 651.38771899978
+461.598031059346 673.062705134
+399.282445923461 694.73769126822
+412.829312257349 645.96897246623
+372.188713255685 643.25959919945
+393.863699389906 602.61900019779
+472.435524126456 599.90962693101
+480.563643926789 637.84085266589
+445.34179145868 575.52526753001
+461.598031059346 510.50030912735
+504.948003327787 561.97840119612
+437.213671658347 450.894097258241
+423.666805324459 491.534696259905
+377.60745978924 440.056604191131
+347.804353854687 404.834751723022
+380.316833056018 383.159765588801
+415.538685524126 412.962871523355
+388.444952856351 307.297314119028
+334.257487520799 342.519166587137
+339.676234054354 293.750447785141
+353.223100388242 258.528595317032
+328.838740987244 188.084890380814
+277.360648918469 55.325600308712
+548.297975596229 627.00335959878
diff --git a/inst/rawdata/vesicles/vesiclesimage.tif b/inst/rawdata/vesicles/vesiclesimage.tif
new file mode 100644
index 0000000..fa12d49
Binary files /dev/null and b/inst/rawdata/vesicles/vesiclesimage.tif differ
diff --git a/inst/rawdata/vesicles/vesiclesmask.tif b/inst/rawdata/vesicles/vesiclesmask.tif
new file mode 100644
index 0000000..251cf70
Binary files /dev/null and b/inst/rawdata/vesicles/vesiclesmask.tif differ
diff --git a/inst/rawdata/vesicles/vesicleswindow.csv b/inst/rawdata/vesicles/vesicleswindow.csv
new file mode 100644
index 0000000..9ade1f6
--- /dev/null
+++ b/inst/rawdata/vesicles/vesicleswindow.csv
@@ -0,0 +1,93 @@
+"","x","y","id"
+"1",282.779396210649,14.6850018489232,1
+"2",309.873127166101,36.3599882432439,1
+"3",331.548113560422,66.1630940260726,1
+"4",336.966860159002,87.8380804203933,1
+"5",347.804353356162,106.803693006062,1
+"6",345.094980566235,133.897425998963,1
+"7",364.060593151903,155.572411374559,1
+"8",369.479339750483,185.375517157388,1
+"9",385.735579546224,223.306743347449,1
+"10",393.863698934732,253.109849130277,1
+"11",401.99181832324,280.203582123178,1
+"12",410.119938730472,307.297315116079,1
+"13",412.8293115204,339.809793688836,1
+"14",429.085551316141,383.159766477477,1
+"15",442.632417303229,418.381618858886,1
+"16",464.307403697549,456.312844030223,1
+"17",480.56364349329,491.534696411632,1
+"18",504.948002677539,518.628429404533,1
+"19",526.622989071859,548.431534168637,1
+"20",548.29797546618,572.815893352885,1
+"21",569.972960841776,597.200253555858,1
+"22",586.229200637517,618.875238931455,1
+"23",580.810454038937,651.387718522936,1
+"24",578.101081249009,686.609570904345,1
+"25",572.682334650428,735.378289272842,1
+"26",542.8792288676,781.437634851411,1
+"27",523.913615263207,846.462594034373,1
+"28",496.81988328903,914.196924988539,1
+"29",467.016777506202,971.093763764268,1
+"30",450.760537710461,998.187496757169,1
+"31",437.213670704649,1017.15310934284,1
+"32",401.99181832324,1017.15310934284,1
+"33",369.479339750483,1027.99060254,1
+"34",345.094980566235,1030.69997532993,1
+"35",309.873127166101,1022.57185594142,1
+"36",274.651275803417,995.478122948517,1
+"37",242.13879723066,968.38439097434,1
+"38",220.463810836339,946.70940458002,1
+"39",193.370077843439,925.034418185699,1
+"40",166.276345869262,906.06880560003,1
+"41",141.891985666289,884.39381920571,1
+"42",125.635746889273,876.265699817202,1
+"43",93.123267297792,868.137579409969,1
+"44",82.2857741006317,846.462594034373,1
+"45",63.3201615149633,805.821995054384,1
+"46",49.7732955278751,765.181396074395,1
+"47",33.5170557321346,751.634529068582,1
+"48",22.6795625349742,724.540797094406,1
+"49",36.2264285220625,692.028317502925,1
+"50",57.9014149163831,656.806465121516,1
+"51",68.7389081135435,632.422105937267,1
+"52",79.5764013107038,599.909626345786,1
+"53",87.7045206992119,572.815893352885,1
+"54",112.08887988346,545.722161378709,1
+"55",125.635746889273,526.756547774316,1
+"56",133.763866277781,491.534696411632,1
+"57",150.020106073522,459.022217838875,1
+"58",166.276345869262,431.928484845974,1
+"59",177.113838047698,393.997258655913,1
+"60",187.951331244858,353.356659675924,1
+"61",196.079451652091,310.006687906007,1
+"62",201.498198250671,269.366088926018,1
+"63",201.498198250671,204.34113076178,1
+"64",190.660705053511,185.375517157388,1
+"65",185.241958454931,152.863038584631,1
+"66",182.532584646278,106.803693006062,1
+"67",193.370077843439,63.4537202174204,1
+"68",223.173183626267,33.6506144345916,1
+"69",255.685663217748,11.9756290589953,1
+"70",196.079451652091,534.884668181549,2
+"71",155.438852672102,556.559654575869,2
+"72",128.345119679201,570.106520562958,2
+"73",112.08887988346,597.200253555858,2
+"74",103.960760494952,637.840852535847,2
+"75",98.5420138963722,675.772077707184,2
+"76",98.5420138963722,697.447064101505,2
+"77",90.4138945078641,719.122050495826,2
+"78",98.5420138963722,738.087663081494,2
+"79",109.379507093533,757.053275667162,2
+"80",144.601359474941,773.309515462903,2
+"81",171.695092467842,781.437634851411,2
+"82",206.916944849251,765.181396074395,2
+"83",225.88255743492,746.215782470002,2
+"84",258.395037026401,727.250169884334,2
+"85",288.198142809229,700.156436891433,2
+"86",307.163754376173,673.062704917256,2
+"87",312.582500974754,643.259599134428,2
+"88",315.291873764681,608.037745734294,2
+"89",312.582500974754,580.944013760118,2
+"90",301.745008796318,559.269027365797,2
+"91",274.651275803417,543.012787570057,2
+"92",236.72005063208,532.175294372896,2
diff --git a/inst/rawdata/vesicles/vesicleswindow.txt b/inst/rawdata/vesicles/vesicleswindow.txt
new file mode 100644
index 0000000..ba4cf7c
--- /dev/null
+++ b/inst/rawdata/vesicles/vesicleswindow.txt
@@ -0,0 +1,93 @@
+"x" "y" "id"
+282.779396210649 14.6850018489232 1
+309.873127166101 36.3599882432439 1
+331.548113560422 66.1630940260726 1
+336.966860159002 87.8380804203933 1
+347.804353356162 106.803693006062 1
+345.094980566235 133.897425998963 1
+364.060593151903 155.572411374559 1
+369.479339750483 185.375517157388 1
+385.735579546224 223.306743347449 1
+393.863698934732 253.109849130277 1
+401.99181832324 280.203582123178 1
+410.119938730472 307.297315116079 1
+412.8293115204 339.809793688836 1
+429.085551316141 383.159766477477 1
+442.632417303229 418.381618858886 1
+464.307403697549 456.312844030223 1
+480.56364349329 491.534696411632 1
+504.948002677539 518.628429404533 1
+526.622989071859 548.431534168637 1
+548.29797546618 572.815893352885 1
+569.972960841776 597.200253555858 1
+586.229200637517 618.875238931455 1
+580.810454038937 651.387718522936 1
+578.101081249009 686.609570904345 1
+572.682334650428 735.378289272842 1
+542.8792288676 781.437634851411 1
+523.913615263207 846.462594034373 1
+496.81988328903 914.196924988539 1
+467.016777506202 971.093763764268 1
+450.760537710461 998.187496757169 1
+437.213670704649 1017.15310934284 1
+401.99181832324 1017.15310934284 1
+369.479339750483 1027.99060254 1
+345.094980566235 1030.69997532993 1
+309.873127166101 1022.57185594142 1
+274.651275803417 995.478122948517 1
+242.13879723066 968.38439097434 1
+220.463810836339 946.70940458002 1
+193.370077843439 925.034418185699 1
+166.276345869262 906.06880560003 1
+141.891985666289 884.39381920571 1
+125.635746889273 876.265699817202 1
+93.123267297792 868.137579409969 1
+82.2857741006317 846.462594034373 1
+63.3201615149633 805.821995054384 1
+49.7732955278751 765.181396074395 1
+33.5170557321346 751.634529068582 1
+22.6795625349742 724.540797094406 1
+36.2264285220625 692.028317502925 1
+57.9014149163831 656.806465121516 1
+68.7389081135435 632.422105937267 1
+79.5764013107038 599.909626345786 1
+87.7045206992119 572.815893352885 1
+112.08887988346 545.722161378709 1
+125.635746889273 526.756547774316 1
+133.763866277781 491.534696411632 1
+150.020106073522 459.022217838875 1
+166.276345869262 431.928484845974 1
+177.113838047698 393.997258655913 1
+187.951331244858 353.356659675924 1
+196.079451652091 310.006687906007 1
+201.498198250671 269.366088926018 1
+201.498198250671 204.34113076178 1
+190.660705053511 185.375517157388 1
+185.241958454931 152.863038584631 1
+182.532584646278 106.803693006062 1
+193.370077843439 63.4537202174204 1
+223.173183626267 33.6506144345916 1
+255.685663217748 11.9756290589953 1
+196.079451652091 534.884668181549 2
+155.438852672102 556.559654575869 2
+128.345119679201 570.106520562958 2
+112.08887988346 597.200253555858 2
+103.960760494952 637.840852535847 2
+98.5420138963722 675.772077707184 2
+98.5420138963722 697.447064101505 2
+90.4138945078641 719.122050495826 2
+98.5420138963722 738.087663081494 2
+109.379507093533 757.053275667162 2
+144.601359474941 773.309515462903 2
+171.695092467842 781.437634851411 2
+206.916944849251 765.181396074395 2
+225.88255743492 746.215782470002 2
+258.395037026401 727.250169884334 2
+288.198142809229 700.156436891433 2
+307.163754376173 673.062704917256 2
+312.582500974754 643.259599134428 2
+315.291873764681 608.037745734294 2
+312.582500974754 580.944013760118 2
+301.745008796318 559.269027365797 2
+274.651275803417 543.012787570057 2
+236.72005063208 532.175294372896 2
diff --git a/man/AreaInter.Rd b/man/AreaInter.Rd
new file mode 100644
index 0000000..a10177b
--- /dev/null
+++ b/man/AreaInter.Rd
@@ -0,0 +1,208 @@
+\name{AreaInter}
+\alias{AreaInter}
+\title{The Area Interaction Point Process Model}
+\description{
+  Creates an instance of the Area Interaction point process model
+  (Widom-Rowlinson penetrable spheres model) 
+  which can then be fitted to point pattern data.
+}
+\usage{
+  AreaInter(r)
+}
+\arguments{
+  \item{r}{The radius of the discs in the area interaction process}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the area-interaction process with disc radius \eqn{r}.
+}
+\details{
+  This function defines the interpoint interaction structure of a point
+  process called the Widom-Rowlinson penetrable sphere model
+  or area-interaction process. It can be used to fit this model to
+  point pattern data.
+
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the area interaction structure is
+  yielded by the function \code{AreaInter()}. See the examples below.
+
+  In \bold{standard form}, the area-interaction process
+  (Widom and Rowlinson, 1970;
+  Baddeley and Van Lieshout, 1995) with disc radius \eqn{r},
+  intensity parameter \eqn{\kappa}{\kappa} and interaction parameter
+  \eqn{\gamma}{\gamma} is a point process with probability density
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \kappa^{n(x)} \gamma^{-A(x)}
+  }{
+    f(x[1],\ldots,x[n]) =
+    \alpha . \kappa^n(x) . \gamma^(-A(x))
+  }
+  for a point pattern \eqn{x}, where 
+  \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, and \eqn{A(x)} is the area of the region formed by
+  the union of discs of radius \eqn{r} centred at the points
+  \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}.
+  Here \eqn{\alpha}{\alpha} is a normalising constant.
+
+  The interaction parameter \eqn{\gamma}{\gamma} can be any positive number.
+  If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson
+  process with intensity \eqn{\kappa}{\kappa}.
+  If \eqn{\gamma < 1}{\gamma < 1} then the process is regular,
+  while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered.
+  Thus, an area interaction process can be used to model either
+  clustered or regular point patterns. Two points interact if the
+  distance between them is less than \eqn{2r}{2 * r}.
+
+  The standard form of the model, shown above, is a little
+  complicated to interpret in practical applications.
+  For example, each isolated point of the pattern \eqn{x} contributes a factor
+  \eqn{\kappa \gamma^{-\pi r^2}}{\kappa * \gamma^(-\pi * r^2)}
+  to the probability density. 
+  
+  In \pkg{spatstat}, the model is parametrised in a different form,
+  which is easier to interpret.
+  In \bold{canonical scale-free form}, the probability density is rewritten as
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \eta^{-C(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    \alpha . \beta^n(x) \eta^(-C(x))
+  }
+  where \eqn{\beta}{\beta} is the new intensity parameter,
+  \eqn{\eta}{\eta} is the new interaction parameter, and
+  \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here
+  \deqn{
+    B(x) = \frac{A(x)}{\pi r^2}
+  }{
+    B(x) = A(x)/(\pi * r^2)
+  }
+  is the normalised area (so that the discs have unit area).
+  In this formulation, each isolated point of the pattern contributes a
+  factor \eqn{\beta}{\beta} to the probability density (so the
+  first order trend is \eqn{\beta}{\beta}). The quantity 
+  \eqn{C(x)} is a true interaction potential, in the sense that
+  \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any
+  points that lie close together (closer than \eqn{2r}{2*r} units
+  apart).
+
+  When a new point \eqn{u} is added to an existing point pattern
+  \eqn{x}, the rescaled potential \eqn{-C(x)} increases by
+  a value between 0 and 1. 
+  The increase is zero if \eqn{u} is not close to any point of \eqn{x}.
+  The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u}
+  is completely contained in the union of discs of radius \eqn{r}
+  centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in
+  potential is a measure of how close the new point \eqn{u} is to the
+  existing pattern \eqn{x}. Addition of the point
+  \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta}
+  to the probability density, where \eqn{\delta}{\delta} is the
+  increase in potential. 
+
+  The old parameters \eqn{\kappa,\gamma}{\kappa,\gamma} of the
+  standard form are related to
+  the new parameters \eqn{\beta,\eta}{\beta,\eta} of the canonical
+  scale-free form, by
+  \deqn{
+    \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta
+  }{
+    \beta = \kappa * \gamma^(-\pi * r^2)= \kappa / \eta
+  }
+  and
+  \deqn{
+    \eta = \gamma^{\pi r^2}
+  }{
+    \eta = \gamma^(\pi * r^2)
+  }
+  provided \eqn{\gamma}{\gamma} and \eqn{\kappa}{\kappa} are positive and finite.
+
+  In the canonical scale-free form, the parameter \eqn{\eta}{\eta}
+  can take any nonnegative value. The value \eqn{\eta = 1}{\eta = 1}
+  again corresponds to a Poisson process, with intensity \eqn{\beta}{\beta}.
+  If \eqn{\eta < 1}{\eta < 1} then the process is regular,
+  while if \eqn{\eta > 1}{\eta > 1} the process is clustered.
+  The value \eqn{\eta = 0}{\eta = 0} corresponds to a hard core process
+  with hard core radius \eqn{r} (interaction distance \eqn{2r}).
+  
+  The \emph{nonstationary} area interaction process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{\beta(x[i])}
+  of location, rather than a constant beta. 
+ 
+  Note the only argument of \code{AreaInter()} is the disc radius \code{r}.
+  When \code{r} is fixed, the model becomes an exponential family.
+  The canonical parameters \eqn{\log(\beta)}{log(\beta)}
+  and \eqn{\log(\eta)}{log(\eta)}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{AreaInter()}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+
+  \code{\link{ragsAreaInter}} and \code{\link{rmh}} for simulation
+  of area-interaction models.
+}
+\section{Warnings}{
+  The interaction distance of this process is equal to
+  \code{2 * r}. Two discs of radius \code{r} overlap if their centres
+  are closer than \code{2 * r} units apart.
+
+  The estimate of the interaction parameter \eqn{\eta}{\eta}
+  is unreliable if the interaction radius \code{r} is too small
+  or too large. In these situations the model is approximately Poisson
+  so that \eqn{\eta}{\eta} is unidentifiable.
+  As a rule of thumb, one can inspect the empty space function
+  of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)}
+  of the empty space function at the interaction radius \code{r} should
+  be between 0.2 and 0.8. 
+}
+\examples{
+   \testonly{op <- spatstat.options(ngrid.disc=8)}
+
+   # prints a sensible description of itself
+   AreaInter(r=0.1)
+
+   # Note the reach is twice the radius
+   reach(AreaInter(r=1))
+
+   # Fit the stationary area interaction process to Swedish Pines data
+   data(swedishpines)
+   ppm(swedishpines, ~1, AreaInter(r=7))
+
+   # Fit the stationary area interaction process to `cells'
+   data(cells) 
+   ppm(cells, ~1, AreaInter(r=0.06))
+   # eta=0 indicates hard core process.
+
+   # Fit a nonstationary area interaction with log-cubic polynomial trend
+   \dontrun{
+   ppm(swedishpines, ~polynom(x/10,y/10,3), AreaInter(r=7))
+   }
+   \testonly{spatstat.options(op)}
+}
+\references{
+  Baddeley, A.J. and Van Lieshout, M.N.M. (1995).
+  Area-interaction point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{47} (1995) 601--619.
+
+  Widom, B. and Rowlinson, J.S. (1970).
+  New model for the study of liquid-vapor phase transitions.
+  \emph{The Journal of Chemical Physics}
+  \bold{52} (1970) 1670--1684.
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/BadGey.Rd b/man/BadGey.Rd
new file mode 100644
index 0000000..7556611
--- /dev/null
+++ b/man/BadGey.Rd
@@ -0,0 +1,120 @@
+\name{BadGey}
+\alias{BadGey}
+\title{Hybrid Geyer Point Process Model}
+\description{
+  Creates an instance of the Baddeley-Geyer point process model, defined
+  as a hybrid of several Geyer interactions. The model
+  can then be fitted to point pattern data.
+}
+\usage{
+  BadGey(r, sat)
+}
+\arguments{
+  \item{r}{vector of interaction radii}
+  \item{sat}{
+    vector of saturation parameters,
+    or a single common value of saturation parameter
+  }
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  This is Baddeley's generalisation of the
+  Geyer saturation point process model,
+  described in \code{\link{Geyer}}, to a process with multiple interaction
+  distances. 
+
+  The BadGey point process with interaction radii
+  \eqn{r_1,\ldots,r_k}{r[1], \ldots, r[k]},
+  saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],\ldots,s[k]},
+  intensity parameter \eqn{\beta}{\beta} and
+  interaction parameters
+  \eqn{\gamma_1,\ldots,gamma_k}{\gamma[1], \ldots, \gamma[k]},
+  is the point process
+  in which each point
+  \eqn{x_i}{x[i]} in the pattern \eqn{X}
+  contributes a factor
+  \deqn{
+    \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)}
+  }{
+    \beta \gamma[1]^v(1, x_i, X) \ldots \gamma[k]^v(k, x_i, X)
+  }
+  to the probability density of the point pattern,
+  where
+  \deqn{
+    v_j(x_i, X) = \min( s_j, t_j(x_i,X) )
+  }{
+    v(j, x[i], X) = min(s[j], t(j, x[i], X))
+  }
+  where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the
+  number of points in the pattern \eqn{X} which lie
+  within a distance \eqn{r_j}{r[j]}
+  from the point \eqn{x_i}{x[i]}. 
+
+  \code{BadGey} is used to fit this model to data.
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the piecewise constant Saturated pairwise
+  interaction is yielded by the function \code{BadGey()}.
+  See the examples below.
+
+  The argument \code{r} specifies the vector of interaction distances.
+  The entries of \code{r} must be strictly increasing, positive numbers.
+
+  The argument \code{sat} specifies the vector of saturation parameters
+  that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}.
+  It should be a vector of the same length as \code{r}, and its entries
+  should be nonnegative numbers. Thus \code{sat[1]} is applied to the
+  count of points within a distance \code{r[1]}, and \code{sat[2]} to the
+  count of points within a distance \code{r[2]}, etc.
+  Alternatively \code{sat} may be a single number, and this saturation
+  value will be applied to every count.
+
+  Infinite values of the
+  saturation parameters are also permitted; in this case
+  \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x[i], X) = t(j, x[i], X)}
+  and there is effectively no `saturation' for the distance range in
+  question. If all the saturation parameters are set to \code{Inf} then
+  the model is effectively a pairwise interaction process, equivalent to
+  \code{\link{PairPiece}} (however the interaction parameters
+  \eqn{\gamma}{\gamma} obtained from \code{\link{BadGey}}
+  have a complicated relationship to the interaction
+  parameters \eqn{\gamma}{\gamma} obtained from \code{\link{PairPiece}}).
+   
+  If \code{r} is a single number, this model is virtually equivalent to the 
+  Geyer process, see \code{\link{Geyer}}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairsat.family}},
+  \code{\link{Geyer}},
+  \code{\link{PairPiece}},
+  \code{\link{SatPiece}}
+}
+\examples{
+   BadGey(c(0.1,0.2), c(1,1))
+   # prints a sensible description of itself
+   BadGey(c(0.1,0.2), 1)
+   data(cells) 
+
+   # fit a stationary Baddeley-Geyer model
+   ppm(cells, ~1, BadGey(c(0.07, 0.1, 0.13), 2))
+
+   # nonstationary process with log-cubic polynomial trend
+   \dontrun{
+   ppm(cells, ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2))
+   }
+}
+\author{
+  \adrian
+  and
+  \rolf
+  in collaboration with Hao Wang and Jeff Picka
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/CDF.Rd b/man/CDF.Rd
new file mode 100644
index 0000000..2d9c4bd
--- /dev/null
+++ b/man/CDF.Rd
@@ -0,0 +1,56 @@
+\name{CDF}
+\alias{CDF}
+\alias{CDF.density}
+\title{
+  Cumulative Distribution Function From Kernel Density Estimate
+}
+\description{
+  Given a kernel estimate of a probability density,
+  compute the corresponding cumulative distribution function.
+}
+\usage{
+CDF(f, \dots)
+
+\method{CDF}{density}(f, \dots, warn = TRUE)
+}
+\arguments{
+  \item{f}{
+    Density estimate (object of class \code{"density"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{warn}{
+    Logical value indicating whether to issue a warning if the
+    density estimate \code{f} had to be renormalised because it
+    was computed in a restricted interval.
+  }
+}
+\details{
+  \code{CDF} is generic, with a method for class \code{"density"}.
+  
+  This calculates the cumulative distribution function
+  whose probability density has been estimated and stored in the object
+  \code{f}. The object \code{f} must belong to the class \code{"density"},
+  and would typically have been obtained from a call to the function
+  \code{\link[stats]{density}}.
+}
+\value{
+  A function, which can be applied to any numeric value or vector of
+  values.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link[stats]{density}},
+  \code{\link{quantile.density}}
+}
+\examples{
+   b <- density(runif(10))
+   f <- CDF(b)
+   f(0.5)
+   plot(f)
+}
+\keyword{nonparametric}
+\keyword{univar}
diff --git a/man/Concom.Rd b/man/Concom.Rd
new file mode 100644
index 0000000..21b967f
--- /dev/null
+++ b/man/Concom.Rd
@@ -0,0 +1,148 @@
+\name{Concom}
+\alias{Concom}
+\title{The Connected Component Process Model}
+\description{
+  Creates an instance of the Connected Component point process model
+  which can then be fitted to point pattern data.
+}
+\usage{
+  Concom(r)
+}
+\arguments{
+  \item{r}{Threshold distance}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the connected component process with disc radius \eqn{r}.
+}
+\details{
+  This function defines the interpoint interaction structure of a point
+  process called the connected component process.
+  It can be used to fit this model to point pattern data.
+
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the connected component interaction is
+  yielded by the function \code{Concom()}. See the examples below.
+
+  In \bold{standard form}, the connected component process
+  (Baddeley and \ifelse{latex}{\out{M\o ller}}{Moller}, 1989) with disc radius \eqn{r},
+  intensity parameter \eqn{\kappa}{\kappa} and interaction parameter
+  \eqn{\gamma}{\gamma} is a point process with probability density
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \kappa^{n(x)} \gamma^{-C(x)}
+  }{
+    f(x[1],\ldots,x[n]) =
+    \alpha . \kappa^n(x) . \gamma^(-C(x))
+  }
+  for a point pattern \eqn{x}, where 
+  \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, and \eqn{C(x)} is defined below.
+  Here \eqn{\alpha}{\alpha} is a normalising constant.
+
+  To define the term \code{C(x)}, suppose that we construct a planar
+  graph by drawing an edge between 
+  each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than
+  \eqn{r} units apart. Two points belong to the same connected component
+  of this graph if they are joined by a path in the graph.
+  Then \eqn{C(x)} is the number of connected components of the graph.
+
+  The interaction parameter \eqn{\gamma}{\gamma} can be any positive number.
+  If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson
+  process with intensity \eqn{\kappa}{\kappa}.
+  If \eqn{\gamma < 1}{\gamma < 1} then the process is regular,
+  while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered.
+  Thus, a connected-component interaction process can be used to model either
+  clustered or regular point patterns.
+  
+  In \pkg{spatstat}, the model is parametrised in a different form,
+  which is easier to interpret.
+  In \bold{canonical form}, the probability density is rewritten as
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \gamma^{-U(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    \alpha . \beta^n(x) \gamma^(-U(x))
+  }
+  where \eqn{\beta}{\beta} is the new intensity parameter and
+  \eqn{U(x) = C(x) - n(x)} is the interaction potential. 
+  In this formulation, each isolated point of the pattern contributes a
+  factor \eqn{\beta}{\beta} to the probability density (so the
+  first order trend is \eqn{\beta}{\beta}). The quantity 
+  \eqn{U(x)} is a true interaction potential, in the sense that
+  \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any
+  points that lie close together.
+
+  When a new point \eqn{u} is added to an existing point pattern
+  \eqn{x}, the rescaled potential \eqn{-U(x)} increases by
+  zero or a positive integer.
+  The increase is zero if \eqn{u} is not close to any point of \eqn{x}.
+  The increase is a positive integer \eqn{k} if there are
+  \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}.
+  Addition of the point
+  \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta}
+  to the probability density, where \eqn{\delta}{\delta} is the
+  increase in potential.
+
+  If desired, the original parameter \eqn{\kappa}{\kappa} can be recovered from
+  the canonical parameter by \eqn{\kappa = \beta\gamma}{\kappa = \beta * \gamma}.
+
+  The \emph{nonstationary} connected component process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{\beta(x[i])}
+  of location, rather than a constant beta. 
+ 
+  Note the only argument of \code{Concom()} is the threshold distance \code{r}.
+  When \code{r} is fixed, the model becomes an exponential family.
+  The canonical parameters \eqn{\log(\beta)}{log(\beta)}
+  and \eqn{\log(\gamma)}{log(\gamma)}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{Concom()}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\section{Edge correction}{
+  The interaction distance of this process is infinite.
+  There are no well-established procedures for edge correction
+  for fitting such models, and accordingly the model-fitting function
+  \code{\link{ppm}} will give an error message saying that the user must
+  specify an edge correction. A reasonable solution is
+  to use the border correction at the same distance \code{r}, as shown in the
+  Examples.
+}
+\examples{
+   # prints a sensible description of itself
+   Concom(r=0.1)
+
+   # Fit the stationary connected component process to redwood data
+   ppm(redwood, ~1, Concom(r=0.07), rbord=0.07)
+
+   # Fit the stationary connected component process to `cells' data
+   ppm(cells, ~1, Concom(r=0.06), rbord=0.06)
+   # eta=0 indicates hard core process.
+
+   # Fit a nonstationary connected component model
+   # with log-cubic polynomial trend
+   \dontrun{
+   ppm(swedishpines, ~polynom(x/10,y/10,3), Concom(r=7), rbord=7)
+   }
+}
+\references{
+  Baddeley, A.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1989)
+  Nearest-neighbour Markov point processes and random sets.
+  \emph{International Statistical Review} \bold{57}, 89--121.
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/DiggleGatesStibbard.Rd b/man/DiggleGatesStibbard.Rd
new file mode 100644
index 0000000..150da04
--- /dev/null
+++ b/man/DiggleGatesStibbard.Rd
@@ -0,0 +1,86 @@
+\name{DiggleGatesStibbard}
+\alias{DiggleGatesStibbard}
+\title{Diggle-Gates-Stibbard Point Process Model}
+\description{
+Creates an instance of the Diggle-Gates-Stibbard point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  DiggleGatesStibbard(rho)
+}
+\arguments{
+  \item{rho}{Interaction range}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the Diggle-Gates-Stibbard
+  process with interaction range \code{rho}.
+}
+\details{
+  Diggle, Gates and Stibbard (1987) proposed a pairwise interaction
+  point process in which each pair of points separated by
+  a distance \eqn{d} contributes a factor \eqn{e(d)} to the
+  probability density, where
+  \deqn{
+    e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right)
+  }{
+    e(d) = sin^2((\pi * d)/(2 * \rho))
+  }
+  for \eqn{d < \rho}{d < \rho}, and \eqn{e(d)} is equal to 1
+  for \eqn{d \ge \rho}{d \ge \rho}.
+
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Diggle-Gates-Stibbard 
+  pairwise interaction is 
+  yielded by the function \code{DiggleGatesStibbard()}.
+  See the examples below.
+
+  Note that this model does not have any regular parameters
+  (as explained in the section on Interaction Parameters
+  in the help file for \code{\link{ppm}}).
+  The parameter \eqn{\rho} is not estimated by \code{\link{ppm}}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{DiggleGratton}},
+  \code{\link{rDGS}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+  
+  Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+}
+\examples{
+   DiggleGatesStibbard(0.02)
+   # prints a sensible description of itself
+
+   \dontrun{
+   ppm(cells ~1, DiggleGatesStibbard(0.05))
+   # fit the stationary D-G-S process to `cells'
+   }
+
+   ppm(cells ~ polynom(x,y,3), DiggleGatesStibbard(0.05))
+   # fit a nonstationary D-G-S process
+   # with log-cubic polynomial trend
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/DiggleGratton.Rd b/man/DiggleGratton.Rd
new file mode 100644
index 0000000..02d124b
--- /dev/null
+++ b/man/DiggleGratton.Rd
@@ -0,0 +1,84 @@
+\name{DiggleGratton}
+\alias{DiggleGratton}
+\title{Diggle-Gratton model}
+\description{
+  Creates an instance of the Diggle-Gratton pairwise interaction
+  point process model, which can then be fitted to point pattern data.
+}
+\usage{
+  DiggleGratton(delta=NA, rho)
+}
+\arguments{
+  \item{delta}{lower threshold \eqn{\delta}{\delta}}
+  \item{rho}{upper threshold \eqn{\rho}{\rho}}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  Diggle and Gratton (1984, pages 208-210)
+  introduced the pairwise interaction point
+  process with pair potential \eqn{h(t)} of the form
+  \deqn{
+    h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa
+    \quad\quad \mbox{  if  } \delta \le t \le \rho
+  }{
+    h(t) = ((t - \delta)/(\rho - \delta))^\kappa, {    } \delta \le t \le \rho
+  }
+  with \eqn{h(t) = 0} for \eqn{t < \delta}{t < \delta}
+  and  \eqn{h(t) = 1} for \eqn{t > \rho}{t > \rho}.
+  Here \eqn{\delta}{\delta}, \eqn{\rho}{\rho} and \eqn{\kappa}{\kappa}
+  are parameters.
+
+  Note that we use the symbol \eqn{\kappa}{\kappa}
+  where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987)
+  use \eqn{\beta}{\beta}, since in \pkg{spatstat} we reserve the symbol
+  \eqn{\beta}{\beta} for an intensity parameter.
+
+  The parameters must all be nonnegative,
+  and must satisfy \eqn{\delta \le \rho}{\delta \le \rho}.
+
+  The potential is inhibitory, i.e.\ this model is only appropriate for
+  regular point patterns. The strength of inhibition increases with
+  \eqn{\kappa}{\kappa}. For \eqn{\kappa=0}{\kappa=0} the model is
+  a hard core process with hard core radius \eqn{\delta}{\delta}.
+  For \eqn{\kappa=\infty}{\kappa=Inf} the model is a hard core
+  process with hard core radius \eqn{\rho}{\rho}.
+
+  The irregular parameters 
+  \eqn{\delta, \rho}{\delta, \rho} must be given in the call to
+  \code{DiggleGratton}, while the
+  regular parameter \eqn{\kappa}{\kappa} will be estimated.
+
+  If the lower threshold \code{delta} is missing or \code{NA},
+  it will be estimated from the data when \code{\link{ppm}} is called.
+  The estimated value of \code{delta} is the minimum nearest neighbour distance
+  multiplied by \eqn{n/(n+1)}, where \eqn{n} is the
+  number of data points.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{Pairwise}}
+}
+\examples{
+   ppm(cells ~1, DiggleGratton(0.05, 0.1))
+}
+\references{
+  Diggle, P.J., Gates, D.J. and Stibbard, A. (1987)
+  A nonparametric estimator for pairwise-interaction point processes.
+  \emph{Biometrika} \bold{74}, 763 -- 770.
+
+  Diggle, P.J. and Gratton, R.J. (1984)
+  Monte Carlo methods of inference for implicit statistical models.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{46}, 193 -- 212.
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/Emark.Rd b/man/Emark.Rd
new file mode 100644
index 0000000..d0e4ce6
--- /dev/null
+++ b/man/Emark.Rd
@@ -0,0 +1,186 @@
+\name{Emark}
+\alias{Emark}
+\alias{Vmark}
+\title{
+  Diagnostics for random marking
+}
+\description{
+  Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for 
+  a marked point pattern, proposed by Schlather et al (2004) as diagnostics 
+  for dependence between the points and the marks.
+}
+\usage{
+Emark(X, r=NULL,
+         correction=c("isotropic", "Ripley", "translate"),
+         method="density", \dots, normalise=FALSE)
+Vmark(X, r=NULL,
+         correction=c("isotropic", "Ripley", "translate"),
+         method="density", \dots, normalise=FALSE)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. The pattern should have numeric marks.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated.
+    There is a sensible default.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}.
+    It specifies the edge correction(s) to be applied.
+  }
+  \item{method}{
+    A character vector indicating the user's choice of
+    density estimation technique to be used. Options are
+    \code{"density"}, 
+    \code{"loess"},
+    \code{"sm"} and \code{"smrep"}.
+  }
+  \item{\dots}{
+    Arguments passed to the density estimation routine
+    (\code{\link{density}}, \code{\link{loess}} or \code{sm.density})
+    selected by \code{method}.
+  }
+  \item{normalise}{
+    If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)}
+    so that it would have value equal to 1 if the marks are independent
+    of the points.
+  }
+}
+\value{
+  If \code{marks(X)} is a numeric vector, the result is 
+  an object of class \code{"fv"} (see \code{\link{fv.object}}).
+  If \code{marks(X)} is a data frame, the result is
+  a list of objects of class \code{"fv"}, one for each column of marks.
+
+  An object of class \code{"fv"} is essentially
+  a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{E(r)} or \eqn{V(r)}
+    has been estimated
+  }
+  \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)}
+    when the marks attached to different points are independent
+  }
+  together with a column or columns named 
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{E(r)} or \eqn{V(r)}
+  obtained by the edge corrections named.
+}
+\details{
+  For a marked point process,
+  Schlather et al (2004) defined the functions
+  \eqn{E(r)} and \eqn{V(r)} to be the conditional mean
+  and conditional variance of the mark attached to a
+  typical random point, given that there exists another random
+  point at a distance \eqn{r} away from it.
+
+  More formally,
+  \deqn{
+    E(r) = E_{0u}[M(0)]
+  }{
+    E(r) = E[0u] M(0)
+  }
+  and
+  \deqn{
+    V(r) = E_{0u}[(M(0)-E(u))^2]
+  }{
+    V(r) = E[0u]((M(0)-E(u))^2)
+  }
+  where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation
+  given that there are points of the process at the locations
+  \eqn{0} and \eqn{u} separated by a distance \eqn{r},
+  and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. 
+
+  These functions may serve as diagnostics for dependence
+  between the points and the marks. If the points and marks are
+  independent, then \eqn{E(r)} and \eqn{V(r)} should be
+  constant (not depending on \eqn{r}). See Schlather et al (2004).
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern with numeric marks.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated.
+
+  This algorithm assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kest}}.
+  The edge corrections implemented here are
+  \describe{
+    \item{isotropic/Ripley}{Ripley's isotropic correction
+      (see Ripley, 1988; Ohser, 1983).
+      This is implemented only for rectangular and polygonal windows
+      (not for binary masks).
+    }
+    \item{translate}{Translation correction (Ohser, 1983).
+      Implemented for all window geometries, but slow for
+      complex windows. 
+    }
+  }
+  Note that the estimator assumes the process is stationary (spatially
+  homogeneous). 
+
+  The numerator and denominator of the mark correlation function
+  (in the expression above) are estimated using density estimation
+  techniques. The user can choose between
+  \describe{
+    \item{\code{"density"}}{
+      which uses the standard kernel
+      density estimation routine \code{\link{density}}, and
+      works only for evenly-spaced \code{r} values;
+    }
+    \item{\code{"loess"}}{
+      which uses the function \code{loess} in the
+      package \pkg{modreg};
+    }
+    \item{\code{"sm"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is extremely slow;
+    }
+    \item{\code{"smrep"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is relatively fast, but may require manual
+      control of the smoothing parameter \code{hmult}.
+    }
+  }
+}
+\references{
+  Schlather, M. and Ribeiro, P. and Diggle, P. (2004)
+  Detecting dependence between marks and locations of
+  marked point processes.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{66} (2004) 79-83.
+}
+\seealso{
+  Mark correlation \code{\link{markcorr}},
+  mark variogram \code{\link{markvario}} for numeric marks.
+  
+  Mark connection function \code{\link{markconnect}} and 
+  multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}
+  for factor-valued marks.
+}
+\examples{
+    plot(Emark(spruces))
+    E <- Emark(spruces, method="density", kernel="epanechnikov")
+    plot(Vmark(spruces))
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
diff --git a/man/Extract.anylist.Rd b/man/Extract.anylist.Rd
new file mode 100644
index 0000000..ebeb85f
--- /dev/null
+++ b/man/Extract.anylist.Rd
@@ -0,0 +1,52 @@
+\name{Extract.anylist}
+\alias{[.anylist}
+\alias{[<-.anylist}
+\title{Extract or Replace Subset of a List of Things}
+\description{
+  Extract or replace a subset of a list of things.
+}
+\usage{
+  \method{[}{anylist}(x, i, \dots)
+
+  \method{[}{anylist}(x, i) <- value
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"anylist"} representing a list of things.
+  }
+  \item{i}{
+    Subset index. Any valid subset index in the usual \R sense.
+  }
+  \item{value}{
+    Replacement value for the subset. 
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  Another object of class \code{"anylist"}.
+}
+\details{
+  These are the methods for extracting and replacing subsets 
+  for the class \code{"anylist"}.
+
+  The argument \code{x} should be an object of class \code{"anylist"}
+  representing a list of things. See \code{\link{anylist}}.
+
+  The method replaces a designated
+  subset of \code{x}, and returns an object of class \code{"anylist"}.
+}
+\seealso{
+  \code{\link{anylist}},
+  \code{\link{plot.anylist}},
+  \code{\link{summary.anylist}}
+}
+\examples{
+   x <- anylist(A=runif(10), B=runif(10), C=runif(10))
+   x[1] <- list(A=rnorm(10))
+ }
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{list}
+\keyword{manip}
diff --git a/man/Extract.fasp.Rd b/man/Extract.fasp.Rd
new file mode 100644
index 0000000..c753d73
--- /dev/null
+++ b/man/Extract.fasp.Rd
@@ -0,0 +1,69 @@
+\name{Extract.fasp}
+\alias{[.fasp}
+\title{Extract Subset of Function Array}
+\description{
+  Extract a subset of a function array (an object of class
+  \code{"fasp"}).
+}
+\usage{
+  \method{[}{fasp}(x, I, J, drop=TRUE,\dots)
+}
+\arguments{
+  \item{x}{
+    A function array. An object of class \code{"fasp"}.
+  }
+  \item{I}{
+    any valid expression for a subset of the row indices of the array.
+  }
+  \item{J}{
+    any valid expression for a subset of the column indices of the array.
+  }
+  \item{drop}{
+      Logical.
+      When the selected subset consists of only one cell of the array,
+      if \code{drop=FALSE} the result is still returned as a
+      \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"})
+      while if \code{drop=TRUE} it is returned as a
+      function (class \code{"fv"}).
+  }
+  \item{\dots}{Ignored.}
+} 
+\value{
+  A function array (of class \code{"fasp"}).
+  Exceptionally, if the array has only one cell, and
+  if \code{drop=TRUE}, then the result is a function value table
+  (class \code{"fv"}).
+}
+\details{
+  A function array can be regarded as a matrix whose entries
+  are functions. See \code{\link{fasp.object}} for an explanation of
+  function arrays.
+
+  This routine extracts a sub-array according to the usual
+  conventions for matrix indexing.
+}
+\seealso{
+  \code{\link{fasp.object}}
+}
+\examples{
+ # Lansing woods data - multitype points with 6 types
+ woods <- lansing
+ \testonly{
+ # smaller dataset
+   woods <- woods[ seq(1,npoints(woods),by=45)]
+ }
+ # compute 6 x 6 array of all cross-type K functions
+ a <- alltypes(woods, "K")
+
+ # extract first three marks only
+ b <- a[1:3,1:3]
+ \dontrun{plot(b)}
+ # subset of array pertaining to hickories
+ h <- a[levels(marks(woods)) == "hickory", ]
+ \dontrun{plot(h)}
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.fv.Rd b/man/Extract.fv.Rd
new file mode 100644
index 0000000..7f816ac
--- /dev/null
+++ b/man/Extract.fv.Rd
@@ -0,0 +1,110 @@
+\name{Extract.fv}
+\alias{[.fv}
+\alias{[<-.fv}
+\alias{$<-.fv}
+\title{Extract or Replace Subset of Function Values}
+\description{
+  Extract or replace a subset of an object of class \code{"fv"}.
+}
+\usage{
+  \method{[}{fv}(x, i, j, \dots, drop=FALSE)
+
+  \method{[}{fv}(x, i, j) <- value
+
+  \method{$}{fv}(x, name) <- value
+}
+\arguments{
+  \item{x}{
+    a function value object,
+    of class \code{"fv"} (see \code{\link{fv.object}}).
+    Essentially a data frame.
+  }
+  \item{i}{
+    any appropriate subset index.
+    Selects a subset of the rows of the data frame, i.e.
+    a subset of the domain of the function(s) represented by \code{x}.
+  }
+  \item{j}{
+    any appropriate subset index for the columns of the data frame.
+    Selects some of the functions present in \code{x}.
+  }
+  \item{name}{
+    the name of a column of the data frame.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{drop}{
+     Logical. If \code{TRUE}, the result is a data frame or vector
+     containing the selected rows and columns of data.
+     If \code{FALSE} (the default),
+     the result is another object of class \code{"fv"}.
+  }
+  \item{value}{
+     Replacement value for the column or columns selected by
+     \code{name} or \code{j}.
+  }
+} 
+\value{
+  The result of \code{[.fv} with \code{drop=TRUE}
+  is a data frame or vector. 
+
+  Otherwise, the result is another object of class \code{"fv"}.
+}
+\details{
+  These functions extract a designated subset of an object of class
+  \code{"fv"}, or replace the designated subset with other data,
+  or delete the designated subset.
+
+  The subset is specified by the 
+  row index \code{i} and column index \code{j}, or
+  by the column name \code{name}. Either \code{i} or \code{j}
+  may be missing, or both may be missing.
+
+  The function \code{[.fv} is a method for the generic operator
+  \code{\link{[}} for the class \code{"fv"}. It extracts the
+  designated subset of \code{x}, and returns it as
+  another object of class \code{"fv"} (if \code{drop=FALSE})
+  or as a data frame or vector (if \code{drop=TRUE}).
+
+  The function \code{[<-.fv} is a method for the generic operator
+  \code{\link{[<-}} for the class \code{"fv"}.
+  If \code{value} is \code{NULL}, the designated subset of \code{x} will be
+  deleted from \code{x}.
+  Otherwise, the designated subset of \code{x} will be
+  replaced by the data contained in \code{value}.
+  The return value is the modified object \code{x}.
+
+  The function \code{$<-.fv} is a method for the generic operator
+  \code{\link{$<-}} for the class \code{"fv"}.
+  If \code{value} is \code{NULL}, the designated column of \code{x} will be
+  deleted from \code{x}.
+  Otherwise, the designated column of \code{x} will be
+  replaced by the data contained in \code{value}.
+  The return value is the modified object \code{x}.
+}
+\seealso{
+  \code{\link{fv.object}}
+}
+\examples{
+ K <- Kest(cells)
+
+ # discard the estimates of K(r) for r  > 0.1
+ Ksub <- K[K$r <= 0.1, ]
+
+ # extract the border method estimates
+ bor <- K[ , "border", drop=TRUE]
+ # or equivalently
+ bor <- K$border
+
+ # remove the border-method estimates
+ K$border <- NULL
+ K
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.hyperframe.Rd b/man/Extract.hyperframe.Rd
new file mode 100644
index 0000000..ecc68dd
--- /dev/null
+++ b/man/Extract.hyperframe.Rd
@@ -0,0 +1,122 @@
+\name{Extract.hyperframe}
+\alias{[.hyperframe}
+\alias{[<-.hyperframe}
+\alias{$.hyperframe}
+\alias{$<-.hyperframe}
+\title{Extract or Replace Subset of Hyperframe}
+\description{
+  Extract or replace a subset of a hyperframe.
+}
+\usage{
+  \method{[}{hyperframe}(x, i, j, drop, strip=drop, \dots)
+  \method{[}{hyperframe}(x, i, j) <- value
+  \method{$}{hyperframe}(x, name)
+  \method{$}{hyperframe}(x, name) <- value
+}
+\arguments{
+  \item{x}{
+    A hyperframe (object of class \code{"hyperframe"}).
+  }
+  \item{i,j}{
+    Row and column indices.
+  }
+  \item{drop,strip}{
+     Logical values indicating what to do when the
+     hyperframe has only one row or column.
+     See Details.
+  }
+  \item{\dots}{
+    Ignored. 
+  }
+  \item{name}{
+    Name of a column of the hyperframe.
+  }
+  \item{value}{
+    Replacement value for the subset. A hyperframe
+    or (if the subset is a single column) a list or an atomic vector.
+  }
+}
+\value{
+  A hyperframe (of class \code{"hyperframe"}).
+}
+\details{
+  These functions extract a designated subset of a hyperframe,
+  or replace the designated subset with another hyperframe.
+
+  The function \code{[.hyperframe} is a method for the subset operator
+  \code{\link{[}} for the
+  class \code{"hyperframe"}. It extracts the subset of \code{x}
+  specified by the row index \code{i} and column index \code{j}.
+
+  The argument \code{drop} determines whether the array structure
+  will be discarded if possible. The argument \code{strip} determines
+  whether the list structure in a row or column or cell will be discarded if
+  possible.
+  If \code{drop=FALSE} (the default), the return value is always
+  a hyperframe or data frame.
+  If \code{drop=TRUE}, and if the selected subset has only one row,
+  or only one column, or both, then
+  \itemize{
+    \item{
+       if \code{strip=FALSE}, the result is a
+       list, with one entry for each array cell that was selected.
+    }
+    \item{
+       if \code{strip=TRUE},
+       \itemize{
+         \item if the subset has one row containing several columns,
+         the result is a list or (if possible) an atomic vector;
+         \item if the subset has one column containing several rows,
+         the result is a list or (if possible) an atomic vector;
+         \item if the subset has exactly one row and exactly one column,
+         the result is the object (or atomic value)
+         contained in this row and column.
+       }
+    }
+  }
+  
+  The function \code{[<-.hyperframe} is a method for the
+  subset replacement operator \code{\link{[<-}} for the
+  class \code{"hyperframe"}. It replaces the designated
+  subset with the hyperframe \code{value}.
+  The subset of \code{x} to be replaced is designated by
+  the arguments \code{i} and \code{j} as above.
+  The replacement \code{value} should be a hyperframe with the
+  appropriate dimensions, or (if the specified subset is a single
+  column) a list of the appropriate length.
+
+  The function \code{$.hyperframe} is a method for \code{\link{$}}
+  for hyperframes. It extracts the relevant column of the hyperframe.
+  The result is always a list (i.e. equivalent to using
+  \code{[.hyperframe} with \code{strip=FALSE}).
+
+  The function \code{$<-.hyperframe} is a method for \code{\link{$<-}}
+  for hyperframes. It replaces the relevant column of the hyperframe.
+  The replacement value should be a list of the appropriate length.
+}
+\seealso{
+  \code{\link{hyperframe}}
+}
+\examples{
+  h <- hyperframe(X=list(square(1), square(2)), Y=list(sin, cos))
+  h
+  h[1, ]
+  h[1, ,drop=TRUE]
+  h[ , 1]
+  h[ , 1, drop=TRUE]
+  h[1,1]
+  h[1,1,drop=TRUE]
+  h[1,1,drop=TRUE,strip=FALSE]
+  h[1,1] <- list(square(3))
+  # extract column
+  h$X
+  # replace existing column
+  h$Y <- list(cells, cells)
+  # add new column
+  h$Z <- list(cells, cells)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.im.Rd b/man/Extract.im.Rd
new file mode 100644
index 0000000..f33c354
--- /dev/null
+++ b/man/Extract.im.Rd
@@ -0,0 +1,213 @@
+\name{Extract.im}
+\alias{[.im}
+\title{Extract Subset of Image}
+\description{
+  Extract a subset or subregion of a pixel image.
+}
+\usage{
+  \method{[}{im}(x, i, j, \dots, drop=TRUE, tight=FALSE,
+                                 raster=NULL, rescue=is.owin(i))
+}
+\arguments{
+  \item{x}{
+    A two-dimensional pixel image.
+    An object of class \code{"im"}.
+  }
+  \item{i}{
+    Object defining the subregion or subset to be extracted.
+    Either a spatial window (an object of class \code{"owin"}), or a
+    pixel image with logical values, or a linear network (object of
+    class \code{"linnet"}) or a point pattern (an object
+    of class \code{"ppp"}), or any type of index that applies to a
+    matrix, or something that can be converted to a point pattern
+    by \code{\link{as.ppp}} (using the window of \code{x}).
+  }
+  \item{j}{
+    An integer or logical vector serving as the column index if
+    matrix indexing is being used. Ignored if \code{i} is a spatial object.
+  }
+  \item{\dots}{Ignored.}
+  \item{drop}{
+    Logical value. 
+    Locations in \code{w} that lie outside the spatial domain of the
+    image \code{x} return a pixel value of \code{NA} if
+    \code{drop=FALSE}, and are omitted if \code{drop=TRUE}.
+  }
+  \item{tight}{
+    Logical value. If \code{tight=TRUE}, and if the result of the
+    subset operation is an image, the image will be trimmed
+    to the smallest possible rectangle.
+  }
+  \item{raster}{
+     Optional. An object of class \code{"owin"} or \code{"im"}
+     determining a pixel grid.
+  }
+  \item{rescue}{
+   Logical value indicating whether rectangular blocks of data
+   should always be returned as pixel images.
+  }
+}
+\value{
+  Either a pixel image or a vector of pixel values. See Details.
+}
+\details{
+  This function extracts a subset of the pixel values in a
+  pixel image. (To reassign the pixel values, see \code{\link{[<-.im}}).
+
+  The image \code{x} must be an object of class
+  \code{"im"} representing a pixel image defined inside a
+  rectangle in two-dimensional space (see \code{\link{im.object}}).
+
+  The subset to be extracted is determined by the arguments \code{i,j}
+  according to the following rules (which are checked in this order):
+  \enumerate{
+    \item
+    \code{i} is a spatial object such as a window,
+    a pixel image with logical values,
+    a linear network, or a point pattern;  or
+    \item
+    \code{i,j} are indices for the matrix \code{as.matrix(x)}; or
+    \item
+    \code{i} can be converted to a point pattern
+    by \code{\link{as.ppp}(i, W=Window(x))},
+    and \code{i} is not a matrix.
+  }
+    
+  If \code{i} is a spatial window (an object of class \code{"owin"}),
+  the values of the image inside this window are extracted
+  (after first clipping the window to the spatial domain of the image
+  if necessary).
+
+  If \code{i} is a linear network (object of class \code{"linnet"}),
+  the values of the image on this network are extracted.
+
+  If \code{i} is a pixel image with logical values,
+  it is interpreted as a spatial window (with \code{TRUE} values
+  inside the window and \code{FALSE} outside).
+
+  If \code{i} is a point pattern (an object of class
+  \code{"ppp"}), then the values of the pixel image at the points of
+  this pattern are extracted. This is a simple way to read the
+  pixel values at a given spatial location.
+
+  At locations outside the spatial domain of the image, the pixel
+  value is undefined, and is taken to be \code{NA}.  The logical
+  argument \code{drop} determines whether such \code{NA} values
+  will be returned or omitted.  It also influences the format of
+  the return value.
+
+  If \code{i} is a point pattern (or something that can be converted
+  to a point pattern), then \code{X[i, drop=FALSE]} is a numeric
+  vector containing the pixel values at each of the points of
+  the pattern.  Its length is equal to the number of points in the
+  pattern \code{i}.  It may contain \code{NA}s corresponding to
+  points which lie outside the spatial domain of the image \code{x}.
+  By contrast, \code{X[i]} or \code{X[i, drop=TRUE]} contains only
+  those pixel values which are not \code{NA}. It may be shorter.
+
+  If \code{i} is a spatial window then \code{X[i, drop=FALSE]} is
+  another pixel image of the same dimensions as \code{X} obtained
+  by setting all pixels outside the window \code{i} to have value
+  \code{NA}. When the result is displayed by \code{\link{plot.im}}
+  the effect is that the pixel image \code{x} is clipped to the
+  window \code{i}.
+
+  If \code{i} is a linear network (object of class \code{"linnet"})
+  then \code{X[i, drop=FALSE]} is another pixel image of the same
+  dimensions as \code{X} obtained by restricting the pixel image
+  \code{X} to the linear network. The result also belongs to the
+  class \code{"linim"} (pixel image on a linear network). 
+
+  If \code{i} is a spatial window then \code{X[i, drop=TRUE]}
+  is either:
+  \itemize{
+    \item a numeric vector containing the pixel values for all pixels
+          that lie inside the window \code{i}.
+          This happens if \code{i} is \emph{not} a rectangle
+          (i.e. \code{i$type != "rectangle"})
+          or if \code{rescue=FALSE}.
+    \item  a pixel image.
+          This happens only if
+          \code{i} is a rectangle (\code{i$type = "rectangle"})
+          and \code{rescue=TRUE} (the default).
+  }
+
+  If the optional argument \code{raster} is given, then it should
+  be a binary image mask or a pixel image. Then
+  \code{x} will first be converted to an image defined on the
+  pixel grid implied by \code{raster}, before the subset operation
+  is carried out.
+  In particular, \code{x[i, raster=i, drop=FALSE]} will return
+  an image defined on the same pixel array as the object \code{i}.
+
+  If \code{i} does not satisfy any of the conditions above, then
+  the algorithm attempts to interpret \code{i} and \code{j}
+  as indices for the matrix \code{as.matrix(x)}.
+  Either \code{i} or \code{j} may be missing or blank.
+  The result is usually a vector or matrix of pixel values.
+  Exceptionally the result is a pixel image if \code{i,j} determines
+  a rectangular subset of the pixel grid, and if the user specifies
+  \code{rescue=TRUE}.
+
+  Finally, if none of the above conditions is met,
+  the object \code{i} may also be a data frame or list of \code{x,y}
+  coordinates which will be converted to a point pattern, taking the
+  observation window to be \code{Window(x)}. Then the pixel values
+  at these points will be extracted as a vector.
+}
+\section{Warnings}{
+  If you have a 2-column matrix containing the \eqn{x,y} coordinates
+  of point locations, then to prevent this being interpreted as an
+  array index, you should convert it to a \code{data.frame}
+  or to a point pattern.
+
+  If \code{W} is a window or a pixel image, then \code{x[W, drop=FALSE]} 
+  will return an image defined on the same pixel array
+  as the original image \code{x}. If you want to obtain an image
+  whose pixel dimensions agree with those of \code{W}, use the
+  \code{raster} argument, \code{x[W, raster=W, drop=FALSE]}.
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{[<-.im}},
+  \code{\link{ppp.object}},
+  \code{\link{as.ppp}},
+  \code{\link{owin.object}},
+  \code{\link{plot.im}}
+}
+\examples{
+ # make up an image
+ X <- setcov(unit.square())
+ plot(X)
+
+ # a rectangular subset
+ W <- owin(c(0,0.5),c(0.2,0.8))
+ Y <- X[W]
+ plot(Y)
+
+ # a polygonal subset
+ R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7))
+ plot(X[R, drop=FALSE])
+ plot(X[R, drop=FALSE, tight=TRUE])
+
+ # a point pattern
+ P <- rpoispp(20)
+ Y <- X[P]
+
+ # look up a specified location
+ X[list(x=0.1,y=0.2)]
+
+ # 10 x 10 pixel array
+ X <- as.im(function(x,y) { x + y }, owin(c(-1,1),c(-1,1)), dimyx=10)
+ # 100 x 100 
+ W <- as.mask(disc(1, c(0,0)), dimyx=100)
+ # 10 x 10 raster
+ X[W,drop=FALSE]
+ # 100 x 100 raster
+ X[W, raster=W, drop=FALSE]
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.influence.ppm.Rd b/man/Extract.influence.ppm.Rd
new file mode 100644
index 0000000..a271eb3
--- /dev/null
+++ b/man/Extract.influence.ppm.Rd
@@ -0,0 +1,64 @@
+\name{Extract.influence.ppm}
+\alias{[.influence.ppm}
+\title{Extract Subset of Influence Object}
+\description{
+  Extract a subset of an influence object, 
+  or extract the influence values at specified locations.
+}
+\usage{
+  \method{[}{influence.ppm}(x, i, ...)
+}
+\arguments{
+  \item{x}{
+    A influence object (of class \code{"influence.ppm"})
+    computed by \code{\link{influence.ppm}}.
+  }
+  \item{i}{
+    Subset index (passed to \code{\link{[.ppp}}).
+    Either a spatial window (object of class \code{"owin"})
+    or an integer index.
+  }
+  \item{\dots}{
+     Ignored.
+  }
+}
+\value{
+  Another object of class \code{"influence.ppm"}.
+}
+\details{
+  An object of class \code{"influence.ppm"} contains the values of the
+  likelihood influence for a point process model, computed by
+  \code{\link{influence.ppm}}. This is effectively a marked point
+  pattern obtained by marking each of the original data points
+  with its likelihood influence.
+  
+  This function extracts a designated subset of the influence values,
+  either as another influence object, or as a vector of numeric values.
+
+  The function \code{[.influence.ppm} is a method for \code{\link{[}} for the
+  class \code{"influence.ppm"}. The argument \code{i} should be
+  an index applicable to a point pattern. It may be either
+  a spatial window (object of class \code{"owin"})
+  or a sequence index.
+  The result will be another influence object
+  (of class \code{influence.ppm}).
+
+  To extract the influence values as a numeric vector,
+  use \code{marks(as.ppp(x))}.
+}
+\seealso{
+  \code{\link{influence.ppm}}.
+}
+\examples{
+   fit <- ppm(cells, ~x)
+   infl <- influence(fit)
+   b <- owin(c(0.1, 0.3), c(0.2, 0.4))
+   infl[b]
+   infl[1:5]
+   marks(as.ppp(infl))[1:3]
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.layered.Rd b/man/Extract.layered.Rd
new file mode 100644
index 0000000..08197a0
--- /dev/null
+++ b/man/Extract.layered.Rd
@@ -0,0 +1,88 @@
+\name{Extract.layered}
+\alias{[.layered}
+\alias{[<-.layered}
+\alias{[[<-.layered}
+\title{Extract or Replace Subset of a Layered Object}
+\description{
+  Extract or replace some or all of the layers of a layered object,
+  or extract a spatial subset of each layer.
+}
+\usage{
+  \method{[}{layered}(x, i, j, drop=FALSE, ...)
+
+  \method{[}{layered}(x, i) <- value
+
+  \method{[[}{layered}(x, i) <- value
+}
+\arguments{
+  \item{x}{
+    A layered object (class \code{"layered"}).
+  }
+  \item{i}{
+    Subset index for the list of layers.
+    A logical vector, integer vector or character vector
+    specifying which layers are to be extracted or replaced.
+  }
+  \item{j}{
+    Subset index to be applied to the data in each layer.
+    Typically a spatial window (class \code{"owin"}).
+  }
+  \item{drop}{
+    Logical. If \code{i} specifies only a single layer
+    and \code{drop=TRUE}, then the contents of this layer
+    will be returned.
+  }
+  \item{\dots}{
+     Additional arguments, passed to other subset methods
+     if the subset index is a window.
+  }
+  \item{value}{List of objects which shall replace the designated
+    subset, or an object which shall replace the designated element.
+  }
+}
+\value{
+  Usually an object of class \code{"layered"}.
+}
+\details{
+  A layered object represents data that should be plotted in
+  successive layers, for example, a background and a foreground.
+  See \code{\link{layered}}.
+
+  The function \code{[.layered}
+  extracts a designated subset of a layered object.
+  It is a method for \code{\link{[}} for the
+  class \code{"layered"}.
+
+  The functions \code{[<-.layered} and \code{[[<-.layered}
+  replace a designated subset or designated entry of the object by new
+  values. They are methods for \code{\link{[<-}} and \code{\link{[[<-}}
+  for the \code{"layered"} class.
+
+  The index \code{i} specifies which layers will be retained.
+  It should be a valid subset index for the list of layers.
+
+  The index \code{j} will be applied to each layer. It is typically
+  a spatial window (class \code{"owin"}) so that each of the layers
+  will be restricted to the same spatial region.
+  Alternatively \code{j} may be any subset index
+  which is permissible for the \code{"["} method for each of the layers.
+}
+\seealso{
+  \code{\link{layered}}
+}
+\examples{
+ D <- distmap(cells)
+ L <- layered(D, cells,
+              plotargs=list(list(ribbon=FALSE), list(pch=16)))
+
+ L[-2]
+ L[, square(0.5)]
+
+ L[[3]] <- japanesepines
+ L
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.leverage.ppm.Rd b/man/Extract.leverage.ppm.Rd
new file mode 100644
index 0000000..2d6fe66
--- /dev/null
+++ b/man/Extract.leverage.ppm.Rd
@@ -0,0 +1,74 @@
+\name{Extract.leverage.ppm}
+\alias{[.leverage.ppm}
+\title{Extract Subset of Leverage Object}
+\description{
+  Extract a subset of a leverage map, 
+  or extract the leverage values at specified locations.
+}
+\usage{
+  \method{[}{leverage.ppm}(x, i, \dots, update=TRUE)
+}
+\arguments{
+  \item{x}{
+    A leverage object (of class \code{"leverage.ppm"})
+    computed by \code{\link{leverage.ppm}}.
+  }
+  \item{i}{
+    Subset index (passed to \code{\link{[.im}}).
+    Either a spatial window (object of class \code{"owin"})
+    or a spatial point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Further arguments passed to \code{\link{[.im}},
+    especially the argument \code{drop}.
+  }
+  \item{update}{
+    Logical value indicating whether to update the internally-stored value
+    of the mean leverage, by averaging over the specified subset.
+  }    
+}
+\value{
+  Another object of class \code{"leverage.ppm"},
+  or a vector of numeric values of leverage.
+}
+\details{
+  An object of class \code{"leverage.ppm"} contains the values of the
+  leverage function for a point process model, computed by
+  \code{\link{leverage.ppm}}.
+
+  This function extracts a designated subset of the leverage values,
+  either as another leverage object, or as a vector of numeric values.
+
+  The function \code{[.leverage.ppm} is a method for \code{\link{[}} for the
+  class \code{"leverage.ppm"}. The argument \code{i} should be either
+  \itemize{
+    \item 
+     a spatial window (object of class \code{"owin"})
+     determining a region where the leverage map is required.
+     The result will typically be another leverage map
+     (object of class \code{leverage.ppm}).
+   \item 
+     a spatial point pattern (object of class \code{"ppp"})
+     specifying locations at which the leverage values are required.
+     The result will be a numeric vector.
+  }
+  The subset operator for images, \code{\link{[.im}}, is applied to
+  the leverage map. If this yields a pixel image, then the result of
+  \code{\link{[.leverage.ppm}} is another leverage object. Otherwise,
+  a vector containing the numeric values of leverage is returned.
+}
+\seealso{
+  \code{\link{leverage.ppm}}.
+}
+\examples{
+   fit <- ppm(cells ~x)
+   lev <- leverage(fit)
+   b <- owin(c(0.1, 0.3), c(0.2, 0.4))
+   lev[b]
+   lev[cells]
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.linim.Rd b/man/Extract.linim.Rd
new file mode 100644
index 0000000..928a2fa
--- /dev/null
+++ b/man/Extract.linim.Rd
@@ -0,0 +1,62 @@
+\name{Extract.linim}
+\alias{[.linim}
+\title{Extract Subset of Pixel Image on Linear Network}
+\description{
+  Extract a subset of a pixel image on a linear network.
+}
+\usage{
+  \method{[}{linim}(x, i, \dots, drop=TRUE)
+}
+\arguments{
+  \item{x}{
+    A pixel image on a linear network (object of class \code{"linim"}).
+  }
+  \item{i}{
+    Spatial window defining the subregion.
+    Either a spatial window (an object of class \code{"owin"}), or a
+    logical-valued pixel image, 
+    or any type of index that applies to a matrix,
+    or a point pattern (an object of class \code{"lpp"} or \code{"ppp"}),
+    or something that can be converted to a point pattern
+    by \code{\link{as.lpp}} (using the network on which \code{x} is defined).
+  }
+  \item{\dots}{Additional arguments passed to \code{[.im}.}
+  \item{drop}{Logical value indicating whether \code{NA} values should
+  be omitted from the result.}
+}
+\value{
+  Another pixel image on a linear network (object of class \code{"linim"})
+  or a vector of pixel values.
+}
+\details{
+  This function is a method for the subset operator \code{"["} for
+  pixel images on linear networks (objects of class \code{"linim"}). 
+
+  The pixel image \code{x} will be restricted to the 
+  domain specified by \code{i}.
+
+  Pixels outside the domain of \code{x} are assigned the value \code{NA};
+  if \code{drop=TRUE} (the default) such \code{NA} values are deleted
+  from the result; if \code{drop=FALSE}, then \code{NA} values are retained.
+
+  If \code{i} is a window (or a logical-valued pixel image)
+  then \code{x[i]} is another pixel image of class \code{"linim"},
+  representing the restriction of \code{x} to the spatial domain
+  specified by \code{i}.
+
+  If \code{i} is a point pattern, then \code{x[i]} is the vector of
+  pixel values of \code{x} at the locations specified by \code{i}.
+}
+\examples{
+  M <- as.mask.psp(as.psp(simplenet))
+  Z <- as.im(function(x,y){x}, W=M)
+  Y <- linim(simplenet, Z)
+  X <- runiflpp(4, simplenet)
+  Y[X]
+  Y[square(c(0.3, 0.6))]
+}
+\author{
+  \adrian
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.linnet.Rd b/man/Extract.linnet.Rd
new file mode 100644
index 0000000..33a2924
--- /dev/null
+++ b/man/Extract.linnet.Rd
@@ -0,0 +1,66 @@
+\name{Extract.linnet}
+\alias{[.linnet}
+\title{Extract Subset of Linear Network}
+\description{
+  Extract a subset of a linear network.
+}
+\usage{
+  \method{[}{linnet}(x, i, \dots, snip=TRUE)
+}
+\arguments{
+  \item{x}{
+    A linear network (object of class \code{"linnet"}).
+  }
+  \item{i}{
+    Spatial window defining the subregion.
+    An object of class \code{"owin"}.
+  }
+  \item{snip}{
+    Logical. If \code{TRUE} (the default), segments of \code{x}
+    which cross the boundary of \code{i} will be cut by the boundary.
+    If \code{FALSE}, these segments will be deleted.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  Another linear network (object of class \code{"linnet"}).
+}
+\details{
+  This function computes the intersection
+  between the linear network \code{x} and the domain specified by \code{i}.
+  
+  This function is a method for the subset operator \code{"["} for
+  linear networks (objects of class \code{"linnet"}). It is provided
+  mainly for completeness.
+
+  The index \code{i} should be a window.
+
+  The argument \code{snip} specifies what to do with segments of \code{x}
+  which cross the boundary of \code{i}. 
+  If \code{snip=FALSE}, such segments are simply deleted.  
+  If \code{snip=TRUE} (the default), such segments are cut into pieces by the
+  boundary of \code{i}, and those pieces which lie inside the window
+  \code{i} are included in the resulting network. 
+}
+\examples{
+  p <- par(mfrow=c(1,2), mar=0.2+c(0,0,1,0))
+  B <- owin(c(0.1,0.7),c(0.19,0.6))
+
+  plot(simplenet, main="x[w, snip=TRUE]")
+  plot(simplenet[B], add=TRUE, col="green", lwd=3)
+  plot(B, add=TRUE, border="red", lty=3)
+
+  plot(simplenet, main="x[w, snip=FALSE]")
+  plot(simplenet[B, snip=FALSE], add=TRUE, col="green", lwd=3)
+  plot(B, add=TRUE, border="red", lty=3)
+
+  par(p)
+}
+\author{
+  \adrian,
+  \rolf,
+  \ege
+  and Suman Rakshit.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.listof.Rd b/man/Extract.listof.Rd
new file mode 100644
index 0000000..fb07263
--- /dev/null
+++ b/man/Extract.listof.Rd
@@ -0,0 +1,47 @@
+\name{Extract.listof}
+\alias{[<-.listof}
+\title{Extract or Replace Subset of a List of Things}
+\description{
+  Replace a subset of a list of things.
+}
+\usage{
+  \method{[}{listof}(x, i) <- value
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"listof"} representing a list of things
+    which all belong to one class.
+  }
+  \item{i}{
+    Subset index. Any valid subset index in the usual \R sense.
+  }
+  \item{value}{
+    Replacement value for the subset. 
+  }
+}
+\value{
+  Another object of class \code{"listof"}.
+}
+\details{
+  This is a subset replacement method for the class \code{"listof"}.
+
+  The argument \code{x} should be an object of class \code{"listof"}
+  representing a list of things that all belong to one class.
+
+  The method replaces a designated
+  subset of \code{x}, and returns an object of class \code{"listof"}.
+}
+\seealso{
+  \code{\link{plot.listof}},
+  \code{\link{summary.listof}}
+}
+\examples{
+   x <- list(A=runif(10), B=runif(10), C=runif(10))
+   class(x) <- c("listof", class(x))
+   x[1] <- list(A=rnorm(10))
+ }
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.lpp.Rd b/man/Extract.lpp.Rd
new file mode 100644
index 0000000..998d798
--- /dev/null
+++ b/man/Extract.lpp.Rd
@@ -0,0 +1,101 @@
+\name{Extract.lpp}
+\alias{[.lpp}
+\title{Extract Subset of Point Pattern on Linear Network}
+\description{
+  Extract a subset of a point pattern on a linear network.
+}
+\usage{
+  \method{[}{lpp}(x, i, j, drop=FALSE, \dots, snip=TRUE)
+}
+\arguments{
+  \item{x}{
+    A point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{i}{
+   Subset index. A valid subset index in the usual \R sense,
+   indicating which points should be retained.
+  }
+  \item{j}{
+    Spatial window
+   (object of class \code{"owin"}) delineating the region that should
+   be retained.
+  }
+  \item{drop}{
+    Logical value indicating whether to remove unused levels
+    of the marks, if the marks are a factor.
+  }
+  \item{snip}{
+    Logical. If \code{TRUE} (the default), segments of the network
+    which cross the boundary of the window \code{j} will be cut by the boundary.
+    If \code{FALSE}, these segments will be deleted.
+  }
+  \item{\dots}{
+    Ignored. 
+  }
+}
+\value{
+  A point pattern on a linear network (of class \code{"lpp"}).
+}
+\details{
+  This function extracts a designated subset of a point pattern
+  on a linear network.
+
+  The function \code{[.lpp} is a method for \code{\link{[}} for the
+  class \code{"lpp"}. It extracts a designated subset of a point pattern.
+  The argument \code{i} should be a subset index in the usual \R sense:
+  either a numeric vector
+  of positive indices (identifying the points to be retained),
+  a numeric vector of negative indices (identifying the points
+  to be deleted) or a logical vector of length equal to the number of
+  points in the point pattern \code{x}. In the latter case, 
+  the points \code{(x$x[i], x$y[i])} for which 
+  \code{subset[i]=TRUE} will be retained, and the others
+  will be deleted.
+
+  The argument \code{j}, if present, should be a spatial window.
+  The pattern inside the region will be retained.
+  \emph{Line segments that cross the boundary of the window
+  are deleted} in the current implementation.
+
+  The argument \code{drop} determines whether to remove
+  unused levels of a factor, if the point pattern is multitype
+  (i.e. the marks are a factor) or if the marks are a data frame or hyperframe
+  in which some of the columns are factors.
+
+  The argument \code{snip} specifies what to do with segments of
+  the network which cross the boundary of the window \code{j}. 
+  If \code{snip=FALSE}, such segments are simply deleted.  
+  If \code{snip=TRUE} (the default), such segments are cut into pieces by the
+  boundary of \code{j}, and those pieces which lie inside the window
+  \code{ji} are included in the resulting network. 
+
+  Use \code{\link{unmark}} to remove all the marks in a marked point
+  pattern, and \code{\link{subset.lpp}} to remove only some columns of marks. 
+}
+\seealso{
+  \code{\link{lpp}},
+  \code{\link{subset.lpp}}
+}
+\examples{
+  # Chicago crimes data - remove cases of assault
+  chicago[marks(chicago) != "assault"]
+  # equivalent to subset(chicago, select=-assault)
+
+  # spatial window subset
+  B <- owin(c(350, 700), c(600, 1000))
+  plot(chicago)
+  plot(B, add=TRUE, lty=2, border="red", lwd=3)
+  op <- par(mfrow=c(1,2), mar=0.6+c(0,0,1,0))
+  plot(B, main="chicago[B, snip=FALSE]", lty=3, border="red")
+  plot(chicago[, B, snip=FALSE], add=TRUE)
+  plot(B, main="chicago[B, snip=TRUE]", lty=3, border="red")
+  plot(chicago[, B, snip=TRUE], add=TRUE)
+  par(op)
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.msr.Rd b/man/Extract.msr.Rd
new file mode 100644
index 0000000..0037234
--- /dev/null
+++ b/man/Extract.msr.Rd
@@ -0,0 +1,51 @@
+\name{Extract.msr}
+\alias{[.msr}
+\title{Extract Subset of Signed or Vector Measure}
+\description{
+  Extract a subset of a signed measure or vector-valued measure.
+}
+\usage{
+\method{[}{msr}(x, i, j, \dots)
+}
+\arguments{
+  \item{x}{
+    A signed or vector measure.
+    An object of class \code{"msr"} (see \code{\link{msr}}).
+  }
+  \item{i}{
+    Object defining the subregion or subset to be extracted.
+    Either a spatial window (an object of class \code{"owin"}),
+    or a pixel image with logical values,
+    or any type of index that applies to a matrix.
+  }
+  \item{j}{
+    Subset index selecting the vector coordinates to be extracted,
+    if \code{x} is a vector-valued measure.
+  }
+  \item{\dots}{Ignored.}
+} 
+\value{
+   An object of class \code{"msr"}.
+}
+\details{
+  This operator extracts a subset of
+  the data which determines the signed measure
+  or vector-valued measure \code{x}. The result is another measure.
+}
+\seealso{
+  \code{\link{msr}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X ~x+y)
+   rp <- residuals(fit, type="pearson")
+   rs <- residuals(fit, type="score")
+
+   rp[square(0.5)]
+   rs[ , 2:3]
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.owin.Rd b/man/Extract.owin.Rd
new file mode 100644
index 0000000..c97eadb
--- /dev/null
+++ b/man/Extract.owin.Rd
@@ -0,0 +1,51 @@
+\name{Extract.owin}
+\alias{[.owin}
+\title{Extract Subset of Window}
+\description{
+  Extract a subset of a window.
+}
+\usage{
+  \method{[}{owin}(x, i, \dots)
+}
+\arguments{
+  \item{x}{
+    A spatial window (object of class \code{"owin"}).
+  }
+  \item{i}{
+    Object defining the subregion.
+    Either a spatial window, or a
+    pixel image with logical values.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  Another spatial window (object of class \code{"owin"}).
+}
+\details{
+  This function computes the intersection
+  between the window \code{x} and the domain specified by \code{i},
+  using \code{\link{intersect.owin}}.
+  
+  This function is a method for the subset operator \code{"["} for
+  spatial windows (objects of class \code{"owin"}). It is provided
+  mainly for completeness.
+
+  The index \code{i} may be either a window, or a pixel image with
+  logical values (the \code{TRUE} values of the
+  image specify the spatial domain).
+}
+\seealso{
+  \code{\link{intersect.owin}}
+}
+\examples{
+ W <- owin(c(2.5, 3.2), c(1.4, 2.9))
+ plot(letterR)
+ plot(letterR[W], add=TRUE, col="red")
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.ppp.Rd b/man/Extract.ppp.Rd
new file mode 100644
index 0000000..1014b0b
--- /dev/null
+++ b/man/Extract.ppp.Rd
@@ -0,0 +1,206 @@
+\name{Extract.ppp}
+\alias{[.ppp}
+\alias{[<-.ppp}
+\title{Extract or Replace Subset of Point Pattern}
+\description{
+  Extract or replace a subset of a point pattern.
+  Extraction of a subset has the effect of thinning the 
+  points and/or trimming the window.
+}
+\usage{
+  \method{[}{ppp}(x, i, j, drop=FALSE, \dots, clip=FALSE)
+  \method{[}{ppp}(x, i, j) <- value
+}
+\arguments{
+  \item{x}{
+    A two-dimensional point pattern.
+    An object of class \code{"ppp"}.
+  }
+  \item{i}{
+   Subset index. Either a valid subset index in the usual \R sense,
+   indicating which points should be retained, or a window
+   (an object of class \code{"owin"}) 
+   delineating a subset of the original observation window,
+   or a pixel image with logical values defining a subset of the
+   original observation window.
+  }
+  \item{value}{
+    Replacement value for the subset. A point pattern.
+  }
+  \item{j}{
+    Redundant. Included for backward compatibility.
+  }
+  \item{drop}{
+    Logical value indicating whether to remove unused levels
+    of the marks, if the marks are a factor.
+  }
+  \item{clip}{
+    Logical value indicating how to form the window of the resulting
+    point pattern, when \code{i} is a window. 
+    If \code{clip=FALSE} (the default), the result has window
+    equal to \code{i}. If \code{clip=TRUE}, the resulting window
+    is the intersection between the window of \code{x} and the
+    window \code{i}.
+  }
+  \item{\dots}{
+    Ignored. This argument is required for compatibility
+    with the generic function.
+  }
+}
+\value{
+  A point pattern (of class \code{"ppp"}).
+}
+\details{
+  These functions extract a designated subset of a point pattern,
+  or replace the designated subset with another point pattern.
+
+  The function \code{[.ppp} is a method for \code{\link{[}} for the
+  class \code{"ppp"}. It extracts a designated subset of a point pattern,
+  either by ``\emph{thinning}''
+  (retaining/deleting some points of a point pattern)
+  or ``\emph{trimming}'' (reducing the window of observation
+  to a smaller subregion and retaining only
+  those points which lie in the subregion) or both.
+
+  The pattern will be ``thinned''
+  if \code{i} is a subset index in the usual \R sense:
+  either a numeric vector
+  of positive indices (identifying the points to be retained),
+  a numeric vector of negative indices (identifying the points
+  to be deleted) or a logical vector of length equal to the number of
+  points in the point pattern \code{x}. In the latter case, 
+  the points \code{(x$x[i], x$y[i])} for which 
+  \code{subset[i]=TRUE} will be retained, and the others
+  will be deleted.
+ 
+  The pattern will be ``trimmed''
+  if \code{i} is an object of class 
+  \code{"owin"} specifying a window of observation.
+  The points of \code{x} lying inside the new
+  window \code{i} will be retained. Alternatively \code{i} may be a
+  pixel image (object of class \code{"im"}) with logical values;
+  the pixels with the value \code{TRUE} will be interpreted as a window.
+
+  The argument \code{drop} determines whether to remove
+  unused levels of a factor, if the point pattern is multitype
+  (i.e. the marks are a factor) or if the marks are a data frame
+  in which some of the columns are factors.
+
+  The function \code{[<-.ppp} is a method for \code{\link{[<-}} for the
+  class \code{"ppp"}. It replaces the designated
+  subset with the point pattern \code{value}.
+  The subset of \code{x} to be replaced is designated by
+  the argument \code{i} as above.
+
+  The replacement point pattern \code{value} must lie inside the
+  window of the original pattern \code{x}.
+  The ordering of points in \code{x} will be preserved
+  if the replacement pattern \code{value} has the same number of points
+  as the subset to be replaced.  Otherwise the ordering is
+  unpredictable.
+
+  If the original pattern \code{x} has marks, then the replacement
+  pattern \code{value} must also have marks, of the same type.
+
+  Use the function \code{\link{unmark}} to remove marks from a
+  marked point pattern.
+
+  Use the function \code{\link{split.ppp}} to select those points
+  in a marked point pattern which have a specified mark.
+}
+\seealso{
+  \code{\link{subset.ppp}}.
+
+  \code{\link{ppp.object}},
+  \code{\link{owin.object}},
+  \code{\link{unmark}},
+  \code{\link{split.ppp}},
+  \code{\link{cut.ppp}}
+}
+\section{Warnings}{
+  The function does not check whether \code{i} is a subset of
+  \code{Window(x)}. Nor does it check whether \code{value} lies
+  inside \code{Window(x)}.
+}
+\examples{
+ # Longleaf pines data
+ lon <- longleaf
+ \dontrun{
+ plot(lon)
+ }
+ \testonly{lon <- lon[seq(1,npoints(lon),by=10)]}
+
+ # adult trees defined to have diameter at least 30 cm
+ longadult <- subset(lon, marks >= 30)
+ \dontrun{
+ plot(longadult)
+ }
+ # note that the marks are still retained.
+ # Use unmark(longadult) to remove the marks
+ 
+ # New Zealand trees data
+ \dontrun{
+ plot(nztrees)          # plot shows a line of trees at the far right
+ abline(v=148, lty=2)   # cut along this line
+ }
+ nzw <- owin(c(0,148),c(0,95)) # the subwindow
+ # trim dataset to this subwindow
+ nzsub <- nztrees[nzw]
+ \dontrun{
+ plot(nzsub)
+ }
+
+ # Redwood data
+ \dontrun{
+ plot(redwood)
+ }
+ # Random thinning: delete 60\% of data
+ retain <- (runif(npoints(redwood)) < 0.4)
+ thinred <- redwood[retain]
+ \dontrun{
+ plot(thinred)
+ }
+
+ # Scramble 60\% of data
+ X <- redwood
+ modif <- (runif(npoints(X)) < 0.6)
+ X[modif] <- runifpoint(ex=X[modif])
+
+ # Lansing woods data - multitype points
+ lan <- lansing
+ \testonly{
+    lan <- lan[seq(1, npoints(lan), length=100)]
+ }
+
+ # Hickory trees
+  hicks <- split(lansing)$hickory
+
+ # Trees in subwindow
+  win <- owin(c(0.3, 0.6),c(0.2, 0.5))
+  lsub <- lan[win]
+
+ # Scramble the locations of trees in subwindow, retaining their marks
+  lan[win] <- runifpoint(ex=lsub) \%mark\% marks(lsub)
+
+ # Extract oaks only
+ oaknames <- c("redoak", "whiteoak", "blackoak")
+ oak <- lan[marks(lan) \%in\% oaknames, drop=TRUE]
+ oak <- subset(lan, marks \%in\% oaknames, drop=TRUE)
+
+ # To clip or not to clip
+ X <- runifpoint(25, letterR)
+ B <- owin(c(2.2, 3.9), c(2, 3.5))
+ opa <- par(mfrow=c(1,2))
+ plot(X, main="X[B]")
+ plot(X[B], border="red", cols="red", add=TRUE, show.all=TRUE, main="")
+ plot(X, main="X[B, clip=TRUE]")
+ plot(B, add=TRUE, lty=2)
+ plot(X[B, clip=TRUE], border="blue", cols="blue", add=TRUE,
+      show.all=TRUE, main="")
+ par(opa)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.ppx.Rd b/man/Extract.ppx.Rd
new file mode 100644
index 0000000..ddbb3fd
--- /dev/null
+++ b/man/Extract.ppx.Rd
@@ -0,0 +1,71 @@
+\name{Extract.ppx}
+\alias{[.ppx}
+\title{Extract Subset of Multidimensional Point Pattern}
+\description{
+  Extract a subset of a multidimensional point pattern.
+}
+\usage{
+  \method{[}{ppx}(x, i, drop=FALSE, ...)
+}
+\arguments{
+  \item{x}{
+    A multidimensional point pattern (object of class \code{"ppx"}).
+  }
+  \item{i}{
+   Subset index. A valid subset index in the usual \R sense,
+   indicating which points should be retained;
+   or a spatial domain of class \code{"boxx"} or \code{"box3"}.
+  }
+  \item{drop}{
+    Logical value indicating whether to remove unused levels
+    of the marks, if the marks are a factor.
+  }
+  \item{\dots}{
+    Ignored. 
+  }
+}
+\value{
+  A multidimensional point pattern (of class \code{"ppx"}).
+}
+\details{
+  This function extracts a designated subset of a multidimensional
+  point pattern.
+
+  The function \code{[.ppx} is a method for \code{\link{[}} for the
+  class \code{"ppx"}. It extracts a designated subset of a point pattern.
+  The argument \code{i} may be either
+  \itemize{
+    \item a subset index in the usual \R sense:
+    either a numeric vector
+    of positive indices (identifying the points to be retained),
+    a numeric vector of negative indices (identifying the points
+    to be deleted) or a logical vector of length equal to the number of
+    points in the point pattern \code{x}. In the latter case, 
+    the points \code{(x$x[i], x$y[i])} for which 
+    \code{subset[i]=TRUE} will be retained, and the others
+    will be deleted.
+  \item
+    a spatial domain of class \code{"boxx"} or \code{"box3"}.
+    Points falling inside this region will be retained.
+  }
+  The argument \code{drop} determines whether to remove
+  unused levels of a factor, if the point pattern is multitype
+  (i.e. the marks are a factor) or if the marks are a data frame or hyperframe
+  in which some of the columns are factors.
+
+  Use the function \code{\link{unmark}} to remove marks from a
+  marked point pattern.
+}
+\seealso{
+  \code{\link{ppx}}
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),z=runif(4))
+   X <- ppx(data=df, coord.type=c("s","s","t"))
+   X[-2]
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.psp.Rd b/man/Extract.psp.Rd
new file mode 100644
index 0000000..271ba63
--- /dev/null
+++ b/man/Extract.psp.Rd
@@ -0,0 +1,98 @@
+\name{Extract.psp}
+\alias{[.psp}
+\title{Extract Subset of Line Segment Pattern}
+\description{
+  Extract a subset of a line segment pattern.
+}
+\usage{
+  \method{[}{psp}(x, i, j, drop, \dots, fragments=TRUE)
+}
+\arguments{
+  \item{x}{
+    A two-dimensional line segment pattern.
+    An object of class \code{"psp"}.
+  }
+  \item{i}{
+    Subset index. Either a valid subset index in the usual \R sense,
+   indicating which segments should be retained, or a window
+   (an object of class \code{"owin"})
+    delineating a subset of the original observation window. 
+  }
+  \item{j}{
+   Redundant - included for backward compatibility.
+  }
+  \item{drop}{
+    Ignored. Required for compatibility with generic function.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{fragments}{
+    Logical value indicating whether to retain all pieces of line segments
+    that intersect the new window (\code{fragments=TRUE}, the default)
+    or to retain only those line segments
+    that lie entirely inside the new window (\code{fragments=FALSE}).
+  }
+} 
+\value{
+  A line segment pattern (of class \code{"psp"}).
+}
+\details{
+  These functions extract a designated subset of a line segment pattern.
+
+  The function \code{[.psp} is a method for \code{\link{[}} for the
+  class \code{"psp"}. It extracts a designated subset of a line segment pattern,
+  either by ``\emph{thinning}''
+  (retaining/deleting some line segments of a line segment pattern)
+  or ``\emph{trimming}'' (reducing the window of observation
+  to a smaller subregion and clipping the line segments to
+  this boundary) or both.
+ 
+  The pattern will be ``thinned''
+  if  \code{subset} is specified. The line segments designated by \code{subset}
+  will be retained. Here \code{subset} can be a numeric vector
+  of positive indices (identifying the line segments to be retained),
+  a numeric vector of negative indices (identifying the line segments
+  to be deleted) or a logical vector of length equal to the number
+  of line segments in the line segment pattern \code{x}. In the latter case,
+  the line segments for which 
+  \code{subset[i]=TRUE} will be retained, and the others
+  will be deleted.
+ 
+  The pattern will be ``trimmed''
+  if \code{window} is specified. This should
+  be an object of class \code{\link{owin}} specifying a window of observation
+  to which the line segment pattern \code{x} will be
+  trimmed. Line segments of \code{x} lying inside the new
+  \code{window} will be retained unchanged. Line segments lying
+  partially inside the new \code{window} and partially outside it
+  will, by default, be clipped so that they lie entirely inside the window;
+  but if \code{fragments=FALSE}, such segments will be removed.
+ 
+  Both ``thinning'' and ``trimming'' can be performed together.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{owin.object}}
+}
+\examples{
+    a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+    plot(a)
+  # thinning
+    id <- sample(c(TRUE, FALSE), 20, replace=TRUE)
+    b <- a[id]
+    plot(b, add=TRUE, lwd=3)
+ # trimming
+    plot(a)
+    w <- owin(c(0.1,0.7), c(0.2, 0.8))
+    b <- a[w]
+    plot(b, add=TRUE, col="red", lwd=2)
+    plot(w, add=TRUE)
+    u <- a[w, fragments=FALSE]
+    plot(u, add=TRUE, col="blue", lwd=3)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.quad.Rd b/man/Extract.quad.Rd
new file mode 100644
index 0000000..2f00d7c
--- /dev/null
+++ b/man/Extract.quad.Rd
@@ -0,0 +1,50 @@
+\name{Extract.quad}
+\alias{[.quad}
+\title{Subset of Quadrature Scheme}
+\description{
+  Extract a subset of a quadrature scheme.
+}
+\usage{
+  \method{[}{quad}(x, ...)
+}
+\arguments{
+  \item{x}{
+    A quadrature scheme (object of class \code{"quad"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{[.ppp}} to determine the
+    subset.
+  }
+}
+\value{
+  A quadrature scheme (object of class \code{"quad"}).
+}
+\details{
+  This function extracts a designated subset of a quadrature scheme.
+
+  The function \code{[.quad} is a method for \code{\link{[}} for the
+  class \code{"quad"}. It extracts a designated subset of a quadrature
+  scheme.
+
+  The subset to be extracted is determined by the arguments \code{\dots} which
+  are interpreted by \code{\link{[.ppp}}. Thus it is possible to take the
+  subset consisting of all quadrature points that lie inside a
+  given region, or a subset of quadrature points identified by
+  numeric indices.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{[.ppp}}.
+}
+\examples{
+ Q <- quadscheme(nztrees)
+ W <- owin(c(0,148),c(0,95)) # a subwindow
+ Q[W]
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.solist.Rd b/man/Extract.solist.Rd
new file mode 100644
index 0000000..0bb69c3
--- /dev/null
+++ b/man/Extract.solist.Rd
@@ -0,0 +1,63 @@
+\name{Extract.solist}
+\alias{[.solist}
+\alias{[<-.solist}
+\title{Extract or Replace Subset of a List of Spatial Objects}
+\description{
+  Extract or replace some entries in a list of spatial objects,
+  or extract a designated sub-region in each object.
+}
+\usage{
+  \method{[}{solist}(x, i, \dots)
+
+  \method{[}{solist}(x, i) <- value
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"solist"} representing a list of
+    two-dimensional spatial objects.
+  }
+  \item{i}{
+    Subset index. Any valid subset index for vectors in the usual \R sense,
+    or a window (object of class \code{"owin"}).
+  }
+  \item{value}{
+    Replacement value for the subset. 
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  Another object of the same class as \code{x}.
+}
+\details{
+  These are methods for extracting and replacing subsets
+  for the class \code{"solist"}.
+
+  The argument \code{x} should be an object of class \code{"solist"}
+  representing a list of two-dimensional spatial objects.
+  See \code{\link{solist}}.
+
+  For the subset method, the subset index \code{i} can be either
+  a vector index (specifying some elements of the list)
+  or a spatial window (specifying a spatial sub-region). 
+
+  For the replacement method,
+  \code{i} must be a vector index: the designated elements will be
+  replaced.
+}
+\seealso{
+  \code{\link{solist}},
+  \code{\link{plot.solist}},
+  \code{\link{summary.solist}}
+}
+\examples{
+   x <- solist(japanesepines, cells, redwood)
+   x[2:3]
+   x[square(0.5)]
+   x[1] <- list(finpines)
+ }
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{list}
+\keyword{manip}
diff --git a/man/Extract.splitppp.Rd b/man/Extract.splitppp.Rd
new file mode 100644
index 0000000..733048e
--- /dev/null
+++ b/man/Extract.splitppp.Rd
@@ -0,0 +1,55 @@
+\name{Extract.splitppp}
+\alias{[.splitppp}
+\alias{[<-.splitppp}
+\title{Extract or Replace Sub-Patterns}
+\description{
+  Extract or replace some of the sub-patterns in a split point pattern.
+}
+\usage{
+  \method{[}{splitppp}(x, ...)
+  \method{[}{splitppp}(x, ...) <- value
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"splitppp"}, representing a point pattern
+    separated into a list of sub-patterns.
+  }
+  \item{\dots}{
+    Subset index. Any valid subset index in the usual \R sense.
+  }
+  \item{value}{
+    Replacement value for the subset. A list of point patterns.
+  }
+}
+\value{
+  Another object of class \code{"splitppp"}.
+}
+\details{
+  These are subset methods for the class \code{"splitppp"}.
+
+  The argument \code{x} should be an object of class \code{"splitppp"},
+  representing a point pattern that has been separated into a
+  list of sub-patterns. It is created by \code{\link{split.ppp}}.
+  
+  The methods extract or replace a designated
+  subset of the list \code{x}, and return an object of class \code{"splitppp"}.
+}
+\seealso{
+  \code{\link{split.ppp}},
+  \code{\link{plot.splitppp}},
+  \code{\link{summary.splitppp}}
+}
+\examples{
+  data(amacrine)  # multitype point pattern
+  y <- split(amacrine)
+  y[1]
+  y["off"]
+  y[1] <- list(runifpoint(42, Window(amacrine)))
+ }
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.ssf.Rd b/man/Extract.ssf.Rd
new file mode 100644
index 0000000..9fbb7e6
--- /dev/null
+++ b/man/Extract.ssf.Rd
@@ -0,0 +1,46 @@
+\name{[.ssf}
+\alias{[.ssf}
+\title{
+  Subset of spatially sampled function
+}
+\description{
+  Extract a subset of the data for a spatially sampled function.
+}
+\usage{
+ \method{[}{ssf}(x, i, j, ..., drop)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"ssf"}.
+  }  
+  \item{i}{
+    Subset index applying to the locations where the function is sampled.
+  }
+  \item{j}{
+   Subset index applying to the columns (variables) measured at each location.
+  }
+  \item{\dots, drop}{
+   Ignored.
+ }
+}
+\details{
+   This is the subset operator for the class \code{"ssf"}.
+}
+\value{
+  Another object of class \code{"ssf"}.
+}
+\author{
+\adrian.
+}
+\seealso{
+\code{\link{ssf}}, 
+\code{\link{with.ssf}}
+}
+\examples{
+  f <- ssf(cells, data.frame(d=nndist(cells), i=1:42))
+  f
+  f[1:10,]
+  f[ ,1]
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/Extract.tess.Rd b/man/Extract.tess.Rd
new file mode 100644
index 0000000..73149c5
--- /dev/null
+++ b/man/Extract.tess.Rd
@@ -0,0 +1,69 @@
+\name{Extract.tess}
+\alias{[.tess}
+\alias{[<-.tess}
+\title{Extract or Replace Subset of Tessellation}
+\description{
+  Extract, change or delete a subset of the tiles of a tessellation,
+  to make a new tessellation.
+}
+\usage{
+  \method{[}{tess}(x, i, \dots)
+  \method{[}{tess}(x, i, \dots) <- value
+}
+\arguments{
+  \item{x}{A tessellation (object of class \code{"tess"}).}
+  \item{i}{
+     Subset index for the tiles of the tessellation.
+     Alternatively a window (object of class \code{"owin"}).
+  }
+  \item{\dots}{
+     One argument that specifies the subset to be extracted or changed.
+     Any valid format for the subset index in a list.
+  }
+  \item{value}{
+     Replacement value for the selected tiles of the tessellation.
+     A list of windows (objects of class \code{"owin"}) or \code{NULL}. 
+  }
+}
+\details{
+  A tessellation (object of class \code{"tess"}, see \code{\link{tess}})
+  is effectively a list of tiles (spatial regions) that cover a spatial region.
+  The subset operator \code{[.tess} extracts some
+  of these tiles and forms a new tessellation, which of course covers a
+  smaller region than the original. 
+
+  For \code{[.tess} only, the subset index can also be a window
+  (object of class \code{"owin"}). The tessellation \code{x}
+  is then intersected with the window.
+
+  The replacement operator changes the selected tiles. The replacement
+  \code{value} may be either \code{NULL} (which causes the selected tiles
+  to be removed from \code{x}) or a list of the same length as
+  the selected subset. The entries of \code{value} may be windows
+  (objects of class \code{"owin"}) or \code{NULL} to indicate that the
+  corresponding tile should be deleted.
+
+  Generally it does not make sense to replace a tile in a tessellation
+  with a completely different tile, because the tiles are expected to
+  fit together. However this facility is sometimes useful for making
+  small adjustments to polygonal tiles.
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+}
+\seealso{
+  \code{\link{tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}.
+}
+\examples{
+   \testonly{op <- spatstat.options(npixel=10)}
+   A <- tess(xgrid=0:4, ygrid=0:3)
+   B <- A[c(1, 3, 7)]
+   E <- A[-1]
+   A[c(2, 5, 11)] <- NULL
+   \testonly{spatstat.options(op)}
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/F3est.Rd b/man/F3est.Rd
new file mode 100644
index 0000000..776c53d
--- /dev/null
+++ b/man/F3est.Rd
@@ -0,0 +1,162 @@
+\name{F3est}
+\Rdversion{1.1}
+\alias{F3est}
+\title{
+  Empty Space Function of a Three-Dimensional Point Pattern
+}
+\description{
+  Estimates the empty space function \eqn{F_3(r)}{F3(r)} from 
+  a three-dimensional point pattern.
+}
+\usage{
+F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL,
+              correction = c("rs", "km", "cs"),
+              sphere = c("fudge", "ideal", "digital"))
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{rmax}{
+    Optional. Maximum value of argument \eqn{r} for which
+    \eqn{F_3(r)}{F3(r)} will be estimated. 
+  }
+  \item{nrval}{
+    Optional. Number of values of \eqn{r} for which
+    \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval}
+    is required to avoid discretisation effects.
+  }
+  \item{vside}{
+    Optional. 
+    Side length of the voxels in the discrete approximation.
+  }
+  \item{correction}{
+    Optional. Character vector specifying the edge correction(s)
+    to be applied. See Details.
+  }
+  \item{sphere}{
+    Optional. Character string specifying how to calculate the
+    theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson
+    process. See Details.
+  }
+}
+\details{
+  For a stationary point process \eqn{\Phi}{Phi} in three-dimensional
+  space, the empty space function is
+  \deqn{
+    F_3(r) = P(d(0,\Phi) \le r)
+  }{
+    F3(r) = P(d(0,Phi) <= r)
+  }
+  where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed
+  origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}.
+  
+  The three-dimensional point pattern \code{X} is assumed to be a
+  partial realisation of a stationary point process \eqn{\Phi}{Phi}.
+  The empty space function of \eqn{\Phi}{Phi} can then be estimated using
+  techniques described in the References.
+
+  The box containing the point
+  pattern is discretised into cubic voxels of side length \code{vside}.
+  The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for
+  every voxel centre point
+  \eqn{u} using a three-dimensional version of the distance transform
+  algorithm (Borgefors, 1986). The empirical cumulative distribution
+  function of these values, with appropriate edge corrections, is the
+  estimate of \eqn{F_3(r)}{F3(r)}.
+
+  The available edge corrections are:
+  \describe{
+    \item{\code{"rs"}:}{
+      the reduced sample (aka minus sampling, border correction)
+      estimator (Baddeley et al, 1993)
+    }
+    \item{\code{"km"}:}{
+      the three-dimensional version of the
+      Kaplan-Meier estimator (Baddeley and Gill, 1997)
+    }
+    \item{\code{"cs"}:}{
+      the three-dimensional generalisation of
+      the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998).
+    }
+  }
+  Alternatively \code{correction="all"} selects all options.
+
+  The result includes a column \code{theo} giving the 
+  theoretical value of \eqn{F_3(r)}{F3(r)} for
+  a uniform Poisson process (Complete Spatial Randomness).
+  This value depends on the volume of the sphere of radius \code{r}
+  measured in the discretised distance metric.
+  The argument \code{sphere} determines how this will be calculated.
+  \itemize{
+    \item
+    If \code{sphere="ideal"} the calculation will use the
+    volume of an ideal sphere of radius \eqn{r} namely
+    \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended
+    because the theoretical values of \eqn{F_3(r)}{F3(r)}
+    are inaccurate.
+    \item
+    If \code{sphere="fudge"} then the volume of the ideal sphere will
+    be multiplied by 0.78, which gives the approximate volume
+    of the sphere in the discretised distance metric.
+    \item
+    If \code{sphere="digital"} then the volume of the sphere in the
+    discretised distance metric is computed exactly using another
+    distance transform. This takes longer to compute, but is exact.
+  }
+}
+\value{
+  A function value table (object of class \code{"fv"}) that can be
+  plotted, printed or coerced to a data frame containing the function values.
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A.
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42} (1993) 641--668.
+
+  Baddeley, A.J. and Gill, R.D. (1997)
+  Kaplan-Meier estimators of interpoint distance
+  distributions for spatial point processes.
+  \emph{Annals of Statistics} \bold{25}, 263--292.
+
+  Borgefors, G. (1986)
+  Distance transformations in digital images.
+  \emph{Computer Vision, Graphics and Image Processing}
+  \bold{34}, 344--371.
+
+  Chiu, S.N. and Stoyan, D. (1998)
+  Estimators of distance distributions for spatial patterns.
+  \emph{Statistica Neerlandica} \bold{52}, 239--246.
+}
+\author{
+  \adrian
+  
+  
+  and Rana Moyeed.
+}
+\section{Warnings}{
+  A small value of \code{vside} and a large value of \code{nrval}
+  are required for reasonable accuracy. 
+
+  The default value of \code{vside} ensures that the total number of
+  voxels is \code{2^22} or about 4 million.
+  To change the default number of voxels, see
+  \code{\link{spatstat.options}("nvoxel")}.
+}
+\seealso{
+  \code{\link{G3est}},
+  \code{\link{K3est}},
+  \code{\link{pcf3est}}.
+}
+\examples{
+  \testonly{op <- spatstat.options(nvoxel=2^18)}
+  X <- rpoispp3(42)
+  Z <- F3est(X)
+  if(interactive()) plot(Z)
+  \testonly{spatstat.options(op)}
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Fest.Rd b/man/Fest.Rd
new file mode 100644
index 0000000..56687cf
--- /dev/null
+++ b/man/Fest.Rd
@@ -0,0 +1,311 @@
+\name{Fest}
+\alias{Fest}
+\alias{Fhazard}
+\title{Estimate the Empty Space Function or its Hazard Rate}
+\description{
+  Estimates the empty space function \eqn{F(r)}
+  or its hazard rate \eqn{h(r)} from a point pattern in a 
+  window of arbitrary shape.
+}
+\usage{
+Fest(X, \dots, eps, r=NULL, breaks=NULL,
+     correction=c("rs", "km", "cs"),
+     domain=NULL)
+
+Fhazard(X, \dots) 
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{F(r)} will be computed.
+    An object of class \code{ppp}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{
+    Extra arguments, passed from \code{Fhazard} to \code{Fest}.
+    Extra arguments to \code{Fest} are ignored.
+  }
+  \item{eps}{Optional. A positive number.
+    The resolution of the discrete approximation to Euclidean
+    distance (see below). There is a sensible default.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which \eqn{F(r)} should be evaluated. There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional.
+    The edge correction(s) to be used to estimate \eqn{F(r)}.
+    A vector of character strings selected from
+    \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"}
+    and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{domain}{
+    Optional. Calculations will be restricted to this subset
+    of the window. See Details.
+  }
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  The result of \code{Fest} is 
+  essentially a data frame containing up to seven columns:
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{F(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{F(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{F(r)} by the spatial Kaplan-Meier method
+  }
+  \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)}
+  }
+  \item{raw}{the uncorrected estimate of \eqn{F(r)},
+  i.e. the empirical distribution of the distance from 
+  a random point in the window to the nearest point of
+  the data pattern \code{X}
+  }
+  \item{theo}{the theoretical value of \eqn{F(r)}
+  for a stationary Poisson process of the same estimated intensity.
+  }
+
+  The result of \code{Fhazard} contains only three columns
+  \item{r}{the values of the argument \eqn{r} 
+    at which the hazard rate \eqn{h(r)} has been  estimated
+  }
+  \item{hazard}{the spatial Kaplan-Meier estimate of the
+    hazard rate \eqn{h(r)}}
+  \item{theo}{
+    the theoretical value of \eqn{h(r)}
+    for a stationary Poisson process of the same estimated intensity.
+  }
+}
+\details{
+  \code{Fest} computes an estimate of the empty space function \eqn{F(r)},
+  and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}.
+  
+  The empty space function 
+  (also called the ``\emph{spherical contact distribution}''
+  or the ``\emph{point-to-nearest-event}'' distribution)
+  of a stationary point process \eqn{X}
+  is the cumulative distribution function \eqn{F} of the distance
+  from a fixed point in space to the nearest point of \eqn{X}.
+
+  An estimate of \eqn{F} derived from a spatial point pattern dataset
+  can be used in exploratory data analysis and formal inference
+  about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988).
+  In exploratory analyses, the estimate of \eqn{F} is a useful statistic 
+  summarising the sizes of gaps in the pattern.
+  For inferential purposes, the estimate of \eqn{F} is usually compared to the 
+  true value of \eqn{F} for a completely random (Poisson) point process,
+  which is
+  \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{%
+    F(r) = 1 - exp( - \lambda * \pi * r^2)      %
+  }
+  where \eqn{\lambda}{\lambda}
+  is the intensity (expected number of points per unit area).
+  Deviations between the empirical and theoretical \eqn{F} curves
+  may suggest spatial clustering or spatial regularity.
+
+  This algorithm estimates the empty space function \eqn{F} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window. 
+  The window (which is specified in \code{X}) may have arbitrary shape. 
+
+  The argument \code{X} is interpreted as a point pattern object 
+  (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
+  be supplied in any of the formats recognised
+  by \code{\link{as.ppp}}.
+
+  The algorithm uses two discrete approximations which are controlled
+  by the parameter \code{eps} and by the spacing of values of \code{r}
+  respectively. (See below for details.)
+  First-time users are strongly advised not to specify these arguments.
+
+  The estimation of \eqn{F} is hampered by edge effects arising from 
+  the unobservability of points of the random pattern outside the window. 
+  An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). 
+  The edge corrections implemented here are the border method or
+  "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator
+  (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and
+  Stoyan, 1998). 
+
+  Our implementation makes essential use of the distance transform algorithm
+  of image processing (Borgefors, 1986). A fine grid of pixels is 
+  created in the observation window. The Euclidean distance between two pixels
+  is approximated by the length of the shortest path joining them in the grid,
+  where a path is a sequence of steps between adjacent pixels, and 
+  horizontal, vertical and diagonal steps have length
+  \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)}
+  respectively in pixel units. If the pixel grid is sufficiently fine then
+  this is an accurate approximation. 
+
+  The parameter \code{eps}
+  is the pixel width of the rectangular raster
+  used to compute the distance transform (see below). It must not be too
+  large: the absolute error in distance values due to discretisation is bounded
+  by \code{eps}.
+
+  If \code{eps} is not specified, the function
+  checks whether the window \code{Window(X)} contains pixel raster
+  information. If so, then \code{eps} is set equal to the 
+  pixel width of the raster; otherwise, \code{eps}
+  defaults to 1/100 of the width of the observation window.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{F(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The 
+  estimators are computed from histogram counts. 
+  This introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the spacing of successive
+  \code{r} values must be very fine (ideally not greater than \code{eps/4}).
+
+  The algorithm also returns an estimate of the hazard rate function, 
+  \eqn{h(r)} of \eqn{F(r)}. The hazard rate is
+  defined by
+  \deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{%
+    h(r) = - (d/dr) log(1 - F(r)) %
+  }
+  The hazard rate of \eqn{F} has been proposed as a useful
+  exploratory statistic (Baddeley and Gill, 1994).
+  The estimate of \eqn{h(r)} given here
+  is a discrete approximation to the hazard rate of the 
+  Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is 
+  absolutely continuous (for any stationary point process \eqn{X}), 
+  so the hazard function always exists (Baddeley and Gill, 1997). 
+
+  If the argument \code{domain} is given, the estimate of \eqn{F(r)}
+  will be based only on the empty space distances
+  measured from locations inside \code{domain} (although their
+  nearest data points may lie outside \code{domain}).
+  This is useful in bootstrap techniques. The argument \code{domain}
+  should be a window (object of class \code{"owin"}) or something acceptable to
+  \code{\link{as.owin}}. It must be a subset of the
+  window of the point pattern \code{X}.
+
+  The naive empirical distribution of distances from each location
+  in the window to the nearest point of the data pattern, is a biased
+  estimate of \eqn{F}. However this is also returned by the algorithm
+  (if \code{correction="none"}),
+  as it is sometimes useful in other contexts.
+  Care should be taken not to use the uncorrected
+  empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}.
+}
+\note{
+  Sizeable amounts of memory may be needed during the calculation.
+}
+\references{
+  Baddeley, A.J. Spatial sampling and censoring.
+     In O.E. Barndorff-Nielsen, W.S. Kendall and
+     M.N.M. van Lieshout (eds) 
+     \emph{Stochastic Geometry: Likelihood and Computation}.
+     Chapman and Hall, 1998.
+     Chapter 2, pages 37-78.
+  
+  Baddeley, A.J. and Gill, R.D. 
+    The empty space hazard of a spatial pattern.
+    Research Report 1994/3, Department of Mathematics,
+    University of Western Australia, May 1994.
+
+  Baddeley, A.J. and Gill, R.D.
+     Kaplan-Meier estimators of interpoint distance
+		distributions for spatial point processes.
+     \emph{Annals of Statistics} \bold{25} (1997) 263-292.
+
+  Borgefors, G.
+     Distance transformations in digital images.
+     \emph{Computer Vision, Graphics and Image Processing}
+     \bold{34} (1986) 344-371.
+
+  Chiu, S.N. and Stoyan, D. (1998)
+  Estimators of distance distributions for spatial patterns.
+  \emph{Statistica Neerlandica} \bold{52}, 239--246.
+  
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+}
+\section{Warnings}{
+  The reduced sample (border method)
+  estimator of \eqn{F} is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+
+  The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing
+  but its maximum value may be less than \eqn{1}.
+
+  The estimate of hazard rate \eqn{h(r)}
+  returned by the algorithm is an approximately
+  unbiased estimate for the integral of \eqn{h()}
+  over the corresponding histogram cell.
+  It may exhibit oscillations due to discretisation effects.
+  We recommend modest smoothing, such as kernel smoothing with 
+  kernel width equal to the width of a histogram cell,
+  using \code{\link{Smooth.fv}}.
+}
+\seealso{
+  \code{\link{Gest}},
+  \code{\link{Jest}},
+  \code{\link{Kest}},
+  \code{\link{km.rs}},
+  \code{\link{reduced.sample}},
+  \code{\link{kaplan.meier}}
+}
+\examples{
+   Fc <- Fest(cells, 0.01)
+
+   # Tip: don't use F for the left hand side!
+   # That's an abbreviation for FALSE
+
+   plot(Fc)
+
+   # P-P style plot
+   plot(Fc, cbind(km, theo) ~ theo)
+
+   # The empirical F is above the Poisson F
+   # indicating an inhibited pattern
+
+   \dontrun{
+   plot(Fc, . ~ theo)
+   plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo)))
+   }
+   \testonly{
+   Fh <- Fhazard(cells)
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Fiksel.Rd b/man/Fiksel.Rd
new file mode 100644
index 0000000..bfbd490
--- /dev/null
+++ b/man/Fiksel.Rd
@@ -0,0 +1,114 @@
+\name{Fiksel}
+\alias{Fiksel}
+\title{The Fiksel Interaction}
+\description{
+  Creates an instance of Fiksel's double exponential
+  pairwise interaction point process model,
+  which can then be fitted to point pattern data.
+}
+\usage{
+  Fiksel(r, hc=NA, kappa)
+}
+\arguments{
+  \item{r}{The interaction radius of the Fiksel model}
+  \item{hc}{The hard core distance}
+  \item{kappa}{The rate parameter}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the Fiksel
+  process with interaction radius \eqn{r},
+  hard core distance \code{hc} and
+  rate parameter \code{kappa}.
+}
+\details{
+  Fiksel (1984) introduced a pairwise interaction point process
+  with the following interaction function \eqn{c}.
+  For two points \eqn{u} and \eqn{v} separated by a distance
+  \eqn{d=||u-v||}, the interaction
+  \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h},
+  equal to \eqn{1} if \eqn{d > r}, and
+  equal to
+  \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))}
+  if \eqn{h \le d \le r}{h <= d <= r}, where
+  \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters.
+  
+  A graph of this interaction function is shown in the Examples.
+  The interpretation of the parameters is as follows.
+  \itemize{
+    \item \eqn{h} is the hard core distance: distinct points are
+    not permitted to come closer than a distance \eqn{h} apart.
+    \item \eqn{r} is the interaction range: points further than
+    this distance do not interact.
+    \item \eqn{\kappa}{kappa} is the rate or slope parameter,
+    controlling the decay of the interaction as distance increases.
+    \item \eqn{a} is the interaction strength parameter,
+    controlling the strength and type of interaction.
+    If \eqn{a} is zero, the process is Poisson. If \code{a} is positive,
+    the process is clustered. If \code{a} is negative, the process is
+    inhibited (regular).
+  }
+  
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Fiksel
+  pairwise interaction is
+  yielded by the function \code{Fiksel()}. See the examples below.
+  
+  The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be
+  fixed and given in the call to \code{Fiksel}, while the canonical
+  parameter \eqn{a} is estimated by \code{\link{ppm}()}.
+  
+  To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa}
+  it is possible to use \code{\link{profilepl}}. The maximum likelihood
+  estimator of\eqn{h} is the minimum interpoint distance.
+
+  If the hard core distance argument \code{hc} is missing or \code{NA},
+  it will be estimated from the data when \code{\link{ppm}} is called.
+  The estimated value of \code{hc} is the minimum nearest neighbour distance
+  multiplied by \eqn{n/(n+1)}, where \eqn{n} is the
+  number of data points.
+  
+  See also Stoyan, Kendall and Mecke (1987) page 161.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{StraussHard}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+Fiksel, T. (1984)
+Estimation of parameterized pair potentials
+of marked and non-marked Gibbsian point processes.
+\emph{Electronische Informationsverabeitung und Kybernetika}
+\bold{20}, 270--278.
+
+ Stoyan, D, Kendall, W.S. and Mecke, J. (1987)
+ \emph{Stochastic geometry and its applications}.  Wiley.
+}
+\examples{
+   Fiksel(r=1,hc=0.02, kappa=2)
+   # prints a sensible description of itself
+
+   data(spruces)
+   X <- unmark(spruces)
+
+   fit <- ppm(X ~ 1, Fiksel(r=3.5, kappa=1))
+   plot(fitin(fit))
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Finhom.Rd b/man/Finhom.Rd
new file mode 100644
index 0000000..f54a42e
--- /dev/null
+++ b/man/Finhom.Rd
@@ -0,0 +1,178 @@
+\name{Finhom}
+\alias{Finhom}
+\title{
+  Inhomogeneous Empty Space Function
+}
+\description{
+  Estimates the inhomogeneous empty space function of
+  a non-stationary point pattern.
+}
+\usage{
+  Finhom(X, lambda = NULL, lmin = NULL, ...,
+        sigma = NULL, varcov = NULL,
+        r = NULL, breaks = NULL, ratio = FALSE, update = TRUE)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern,
+    from which an estimate of the inhomogeneous \eqn{F} function
+    will be computed.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lmin}{
+    Optional. The minimum possible value of the intensity
+    over the spatial domain. A positive numerical value.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{as.mask}} to control
+    the pixel resolution, or passed to \code{\link{density.ppp}}
+    to control the smoothing bandwidth.
+}
+  \item{r}{
+    vector of values for the argument \eqn{r} at which
+    the inhomogeneous \eqn{K} function
+    should be evaluated. Not normally given by the user;
+    there is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    the estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{update}{
+    Logical. If \code{lambda} is a fitted model
+    (class \code{"ppm"} or \code{"kppm"})
+    and \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without fitting it to \code{X}.
+  }
+}
+\details{
+  This command computes estimates of the 
+  inhomogeneous \eqn{F}-function (van Lieshout, 2010)
+  of a point pattern. It is the counterpart, for inhomogeneous
+  spatial point patterns, of the empty space function \eqn{F} 
+  for homogeneous point patterns computed by \code{\link{Fest}}.
+
+  The argument \code{X} should be a point pattern
+  (object of class \code{"ppp"}).
+
+  The inhomogeneous \eqn{F} function is computed
+  using the border correction, equation (6) in Van Lieshout (2010).
+  
+  The argument \code{lambda} should supply the
+  (estimated) values of the intensity function \eqn{\lambda}{lambda}
+  of the point process. It may be either
+  \describe{
+    \item{a numeric vector}{
+      containing the values
+      of the intensity function at the points of the pattern \code{X}.
+    }
+    \item{a pixel image}{
+      (object of class \code{"im"})
+      assumed to contain the values of the intensity function
+      at all locations in the window. 
+    }
+    \item{a fitted point process model}{
+      (object of class \code{"ppm"} or \code{"kppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{omitted:}{
+      if \code{lambda} is omitted, then it will be estimated using
+      a `leave-one-out' kernel smoother.
+    }
+  }
+  If \code{lambda} is a numeric vector, then its length should
+  be equal to the number of points in the pattern \code{X}.
+  The value \code{lambda[i]} is assumed to be the 
+  the (estimated) value of the intensity
+  \eqn{\lambda(x_i)}{lambda(x[i])} for
+  the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
+  Each value must be a positive number; \code{NA}'s are not allowed.
+
+  If \code{lambda} is a pixel image, the domain of the image should
+  cover the entire window of the point pattern. If it does not (which
+  may occur near the boundary because of discretisation error),
+  then the missing pixel values 
+  will be obtained by applying a Gaussian blur to \code{lambda} using
+  \code{\link{blur}}, then looking up the values of this blurred image
+  for the missing locations. 
+  (A warning will be issued in this case.)
+
+  If \code{lambda} is a function, then it will be evaluated in the
+  form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
+  of coordinates of the points of \code{X}. It should return a numeric
+  vector with length equal to the number of points in \code{X}.
+
+  If \code{lambda} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother, as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller}
+  and Waagepetersen (2000).  The estimate \code{lambda[i]} for the
+  point \code{X[i]} is computed by removing \code{X[i]} from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+}
+\references{
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  Van Lieshout, M.N.M. (2010)
+  A J-function for inhomogeneous point processes.
+  \emph{Statistica Neerlandica} \bold{65}, 183--201.
+}
+\seealso{
+  \code{\link{Ginhom}},
+  \code{\link{Jinhom}},
+  \code{\link{Fest}}
+}
+\examples{
+  \dontrun{
+  plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2))
+  }
+  plot(Finhom(swedishpines, sigma=10))
+}
+\author{
+  Original code by Marie-Colette van Lieshout.
+  C implementation and R adaptation by \adrian
+  
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/FmultiInhom.Rd b/man/FmultiInhom.Rd
new file mode 100644
index 0000000..21d3e11
--- /dev/null
+++ b/man/FmultiInhom.Rd
@@ -0,0 +1,80 @@
+\name{FmultiInhom}
+\alias{FmultiInhom}
+\title{
+  Inhomogeneous Marked F-Function
+}
+\description{
+  For a marked point pattern, 
+  estimate the inhomogeneous version of the multitype \eqn{F} function,
+  effectively the cumulative distribution function of the distance from
+  a fixed point to the nearest point in subset \eqn{J},
+  adjusted for spatially varying intensity.
+}
+\usage{
+  FmultiInhom(X, J,
+              lambda = NULL, lambdaJ = NULL, lambdamin = NULL,
+              \dots,
+              r = NULL)
+}
+\arguments{
+  \item{X}{
+    A spatial point pattern (object of class \code{"ppp"}.
+  }
+  \item{J}{
+    A subset index specifying the subset of points to which
+    distances are measured. Any kind of subset index acceptable
+    to \code{\link{[.ppp}}.
+  }
+  \item{lambda}{
+    Intensity estimates for each point of \code{X}.
+    A numeric vector of length equal to \code{npoints(X)}.
+    Incompatible with \code{lambdaJ}.
+  }
+  \item{lambdaJ}{
+    Intensity estimates for each point of \code{X[J]}.
+    A numeric vector of length equal to \code{npoints(X[J])}.
+    Incompatible with \code{lambda}.
+  }
+  \item{lambdamin}{
+    A lower bound for the intensity,
+    or at least a lower bound for the values in \code{lambdaJ}
+    or \code{lambda[J]}.
+  }
+  \item{\dots}{
+    Ignored. 
+  }
+  \item{r}{
+    Vector of distance values at which the inhomogeneous \eqn{G}
+    function should be estimated. There is a sensible default.
+  }
+}
+\details{
+  See Cronie and Van Lieshout (2015).
+}
+\value{
+  Object of class \code{"fv"} containing the estimate of the
+  inhomogeneous multitype \eqn{F} function.
+}
+\references{
+  Cronie, O. and Van Lieshout, M.N.M. (2015)
+  Summary statistics for inhomogeneous marked point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  DOI: 10.1007/s10463-015-0515-z
+}
+\author{
+  Ottmar Cronie and Marie-Colette van Lieshout.
+  Rewritten for \pkg{spatstat} by \adrian.
+}
+\seealso{
+  \code{\link{Finhom}}
+}
+\examples{
+  X <- amacrine
+  J <- (marks(X) == "off")
+  mod <- ppm(X ~ marks * x)
+  lam <- fitted(mod, dataonly=TRUE)
+  lmin <- min(predict(mod)[["off"]]) * 0.9
+  plot(FmultiInhom(X, J, lambda=lam, lambdamin=lmin))
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Frame.Rd b/man/Frame.Rd
new file mode 100644
index 0000000..58178b8
--- /dev/null
+++ b/man/Frame.Rd
@@ -0,0 +1,74 @@
+\name{Frame}
+\alias{Frame}
+\alias{Frame<-}
+\alias{Frame.default}
+\alias{Frame<-.owin}
+\alias{Frame<-.ppp}
+\alias{Frame<-.im}
+\title{
+  Extract or Change the Containing Rectangle of a Spatial Object
+}
+\description{
+  Given a spatial object (such as a point pattern or pixel image)
+  in two dimensions, these functions extract or change the
+  containing rectangle inside which the object is defined.
+}
+\usage{
+   Frame(X)
+
+   \method{Frame}{default}(X)
+
+   Frame(X) <- value
+
+   \method{Frame}{owin}(X) <- value
+
+   \method{Frame}{ppp}(X) <- value
+
+   \method{Frame}{im}(X) <- value
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a point pattern, line segment pattern
+    or pixel image.
+  }
+  \item{value}{
+    A rectangular window (object of class \code{"owin"}
+    of type \code{"rectangle"}) to be used as the new containing
+    rectangle for \code{X}.
+  }
+}
+\details{
+  The functions \code{Frame} and \code{Frame<-} are generic.
+
+  \code{Frame(X)} extracts the rectangle inside which \code{X} is
+  defined. 
+
+  \code{Frame(X) <- R} changes the rectangle inside which \code{X} is defined
+  to the new rectangle \code{R}.
+}
+\value{
+  The result of \code{Frame} is a rectangular window (object of class
+  \code{"owin"} of type \code{"rectangle"}).
+
+  The result of \code{Frame<-} is the updated object \code{X},
+  of the same class as \code{X}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{Window}}
+}
+\examples{
+   Frame(cells)
+   X <- demopat
+   Frame(X)
+   Frame(X) <- owin(c(0, 11000), c(400, 8000))
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/G3est.Rd b/man/G3est.Rd
new file mode 100644
index 0000000..1858989
--- /dev/null
+++ b/man/G3est.Rd
@@ -0,0 +1,118 @@
+\name{G3est}
+\Rdversion{1.1}
+\alias{G3est}
+\title{
+  Nearest Neighbour Distance Distribution Function
+  of a Three-Dimensional Point Pattern
+}
+\description{
+  Estimates the nearest-neighbour distance distribution function
+  \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern.
+}
+\usage{
+G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch"))
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{rmax}{
+    Optional. Maximum value of argument \eqn{r} for which
+    \eqn{G_3(r)}{G3(r)} will be estimated. 
+  }
+  \item{nrval}{
+    Optional. Number of values of \eqn{r} for which
+    \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval}
+    is required to avoid discretisation effects.
+  }
+  \item{correction}{
+    Optional. Character vector specifying the edge correction(s)
+    to be applied. See Details.
+  }
+}
+\details{
+  For a stationary point process \eqn{\Phi}{Phi} in three-dimensional
+  space, the nearest-neighbour function
+  is
+  \deqn{
+    G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi)
+  }{
+    G3(r) = P(d*(x,Phi) <= r | x in Phi)
+  }
+  the cumulative distribution function of the distance 
+  \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x}
+  in  \eqn{\Phi}{Phi} to its nearest neighbour, i.e.
+  to the nearest \emph{other} point of \eqn{\Phi}{Phi}.
+  
+  The three-dimensional point pattern \code{X} is assumed to be a
+  partial realisation of a stationary point process \eqn{\Phi}{Phi}.
+  The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using
+  techniques described in the References. For each data point, the
+  distance to the nearest neighbour is computed.
+  The empirical cumulative distribution
+  function of these values, with appropriate edge corrections, is the
+  estimate of \eqn{G_3(r)}{G3(r)}.
+
+  The available edge corrections are:
+  \describe{
+    \item{\code{"rs"}:}{
+      the reduced sample (aka minus sampling, border correction)
+      estimator (Baddeley et al, 1993)
+    }
+    \item{\code{"km"}:}{
+      the three-dimensional version of the
+      Kaplan-Meier estimator (Baddeley and Gill, 1997)
+    }
+    \item{\code{"Hanisch"}:}{
+      the three-dimensional generalisation of
+      the Hanisch estimator (Hanisch, 1984).
+    }
+  }
+  Alternatively \code{correction="all"} selects all options.
+}
+\value{
+  A function value table (object of class \code{"fv"}) that can be
+  plotted, printed or coerced to a data frame containing the function values.
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42}, 641--668.
+
+  Baddeley, A.J. and Gill, R.D. (1997)
+  Kaplan-Meier estimators of interpoint distance
+  distributions for spatial point processes.
+  \emph{Annals of Statistics} \bold{25}, 263--292.
+
+  Hanisch, K.-H. (1984) 
+  Some remarks on estimators of the distribution function
+  of nearest neighbour distance in stationary spatial point patterns.
+  \emph{Mathematische Operationsforschung und Statistik, series Statistics}
+  \bold{15}, 409--412.
+}
+\author{
+  \adrian
+  
+  
+  and Rana Moyeed.
+}
+\section{Warnings}{
+  A large value of \code{nrval} is required in order to avoid
+  discretisation effects (due to the use of histograms in the
+  calculation).
+}
+\seealso{
+  \code{\link{F3est}},
+  \code{\link{K3est}},
+  \code{\link{pcf3est}}
+}
+\examples{
+  X <- rpoispp3(42)
+  Z <- G3est(X)
+  if(interactive()) plot(Z)
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Gcom.Rd b/man/Gcom.Rd
new file mode 100644
index 0000000..649c278
--- /dev/null
+++ b/man/Gcom.Rd
@@ -0,0 +1,273 @@
+\name{Gcom}
+\Rdversion{1.1}
+\alias{Gcom}
+\title{
+  Model Compensator of Nearest Neighbour Function
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the \emph{compensator} 
+  of the nearest neighbour distance distribution function \eqn{G}
+  based on the fitted model 
+  (as well as the usual nonparametric estimates
+  of \eqn{G} based on the data alone).
+  Comparison between the nonparametric and model-compensated \eqn{G}
+  functions serves as a diagnostic for the model.
+}
+\usage{
+Gcom(object, r = NULL, breaks = NULL, ...,
+     correction = c("border", "Hanisch"),
+     conditional = !is.poisson(object),
+     restrict=FALSE,
+     model=NULL,
+     trend = ~1, interaction = Poisson(),
+     rbord = reach(interaction),
+     ppmcorrection="border",
+     truecoef = NULL, hi.res = NULL)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"})
+    or a point pattern (object of class \code{"ppp"})
+    or quadrature scheme (object of class \code{"quad"}).
+  }
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    function \eqn{G(r)} should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    Edge correction(s) to be employed in calculating the compensator.
+    Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{conditional}{
+    Optional. Logical value indicating whether to 
+    compute the estimates for the conditional case. See Details.
+  }
+  \item{restrict}{
+    Logical value indicating whether to compute
+    the restriction estimator (\code{restrict=TRUE}) or
+    the reweighting estimator (\code{restrict=FALSE}, the default).
+    Applies only if \code{conditional=TRUE}.  See Details.
+  }
+  \item{model}{
+    Optional. A fitted point process model (object of
+    class \code{"ppm"}) to be re-fitted to the data
+    using \code{\link{update.ppm}}, if \code{object} is a point pattern.
+    Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern.
+    See \code{\link{ppm}} for details.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{ppm}}.
+  }
+  \item{ppmcorrection}{
+    The \code{correction} argument to \code{\link{ppm}}.
+  }
+  \item{truecoef}{
+    Optional. Numeric vector. If present, this will be treated as 
+    if it were the true coefficient vector of the point process model,
+    in calculating the diagnostic. Incompatible with \code{hi.res}.
+  }
+  \item{hi.res}{
+    Optional. List of parameters passed to \code{\link{quadscheme}}.
+    If this argument is present, the model will be
+    re-fitted at high resolution as specified by these parameters.
+    The coefficients
+    of the resulting fitted model will be taken as the true coefficients.
+    Then the diagnostic will be computed for the default
+    quadrature scheme, but using the high resolution coefficients.
+  }
+}
+\details{
+  This command provides a diagnostic for the goodness-of-fit of
+  a point process model fitted to a point pattern dataset.
+  It computes different estimates of the nearest neighbour distance
+  distribution function \eqn{G} of the
+  dataset, which should be approximately equal if the model is a good
+  fit to the data.
+
+  The first argument, \code{object}, is usually a fitted point process model
+  (object of class \code{"ppm"}), obtained from the
+  model-fitting function \code{\link{ppm}}.
+
+  For convenience, \code{object} can also be a point pattern
+  (object of class \code{"ppp"}).
+  In that case, a point process
+  model will be fitted to it,
+  by calling \code{\link{ppm}} using the arguments
+  \code{trend} (for the first order trend),
+  \code{interaction} (for the interpoint interaction)
+  and \code{rbord} (for the erosion distance in the border correction
+  for the pseudolikelihood). See \code{\link{ppm}} for details
+  of these arguments.
+  
+  The algorithm first extracts the original point pattern dataset
+  (to which the model was fitted) and computes the 
+  standard nonparametric estimates of the \eqn{G} function.
+  It then also computes the \emph{model-compensated} 
+  \eqn{G} function. The different functions are returned
+  as columns in a data frame (of class \code{"fv"}).
+  The interpretation of the columns is as follows
+  (ignoring edge corrections):
+  \describe{
+    \item{\code{bord}:}{ 
+      the nonparametric border-correction estimate of \eqn{G(r)},
+      \deqn{
+	\hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{
+	  b_i > r\}}
+      }{
+	G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r))
+      }
+      where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point
+      to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the
+      \eqn{i}-th data point to the boundary of the window \eqn{W}.
+    }
+    \item{\code{bcom}:}{
+      the model compensator of the border-correction estimate
+      \deqn{
+	{\bf C}\,  \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x)
+	  \le r\}}{ 1 + \sum_i I\{ b_i > r\} }
+      }{
+	C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1
+	+ sum[i] I(b[i] > r))
+      }
+      where 
+      \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity
+      of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the
+      distance from \eqn{u} to the nearest point in \eqn{x}, while
+      \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the
+      window\eqn{W}.
+    }
+    \item{\code{han}:}{
+      the nonparametric Hanisch estimate of \eqn{G(r)}
+      \deqn{
+	\hat G(r) = \frac{D(r)}{D(\infty)}
+      }{
+	G(r) = D(r)/D(infty)
+      }
+      where
+      \deqn{
+	D(r) = \sum_i 
+	\frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\}
+	}{
+	  \mbox{area}(W_{\ominus d_i})
+	}
+      }{
+	D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]])
+      }
+      in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window
+      \eqn{W} by a distance \eqn{r}.
+    }
+    \item{\code{hcom}:}{
+      the corresponding model-compensated function 
+      \deqn{
+	{\bf C} \, G(r) = \int_W 
+	\frac{
+	  \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r)
+	}{ 
+	  \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1
+	}
+      }{
+	C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/
+	(1 + D(infty) area(W[-d(u)]))
+      }
+      where \eqn{d(u) = d(u, x)} is the (`empty space') 
+      distance from location \eqn{u} to the nearest point of \eqn{x}.
+    }
+  }
+  
+  If the fitted model is a Poisson point process, then the formulae above
+  are exactly what is computed. If the fitted model is not Poisson, the 
+  formulae above are modified slightly to handle edge effects.
+
+  The modification is determined by the arguments
+  \code{conditional} and \code{restrict}.
+  The value of \code{conditional} defaults to \code{FALSE} for Poisson models
+  and \code{TRUE} for non-Poisson models.
+  If \code{conditional=FALSE} then the formulae above are not modified.
+  If \code{conditional=TRUE}, then the algorithm calculates
+  the \emph{restriction estimator} if \code{restrict=TRUE},
+  and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}.
+  See Appendix E of Baddeley, Rubak
+  and \ifelse{latex}{\out{M\o ller}}{Moller} (2011).
+  See also \code{\link{spatstat.options}('eroded.intensity')}.
+  Thus, by default, the reweighting estimator is computed
+  for non-Poisson models.
+
+  The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are
+  approximately unbiased estimates of the \eqn{G}-function,
+  assuming the point process is
+  stationary. The model-compensated functions are unbiased estimates
+  \emph{of the mean value of the corresponding nonparametric estimate},
+  assuming the model is true. Thus, if the model is a good fit, the mean value
+  of the difference between the nonparametric and model-compensated
+  estimates is approximately zero.
+
+  To compute the difference between the nonparametric and model-compensated
+  functions, use \code{\link{Gres}}.
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Related functions:
+  \code{\link{Gest}},
+  \code{\link{Gres}}.
+
+  Alternative functions:
+  \code{\link{Kcom}}, 
+  \code{\link{psstA}}, 
+  \code{\link{psstG}}, 
+  \code{\link{psst}}.
+  
+  Model fitting: \code{\link{ppm}}.
+}
+\examples{
+    data(cells)
+    fit0 <- ppm(cells, ~1) # uniform Poisson
+    G0 <- Gcom(fit0)
+    G0
+    plot(G0)
+# uniform Poisson is clearly not correct
+
+# Hanisch estimates only
+    plot(Gcom(fit0), cbind(han, hcom) ~ r)
+
+    fit1 <- ppm(cells, ~1, Strauss(0.08))
+    plot(Gcom(fit1), cbind(han, hcom) ~ r)
+
+# Try adjusting interaction distance
+
+    fit2 <- update(fit1, Strauss(0.10))
+    plot(Gcom(fit2), cbind(han, hcom) ~ r)
+
+    G3 <- Gcom(cells, interaction=Strauss(0.12))
+    plot(G3, cbind(han, hcom) ~ r)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Gcross.Rd b/man/Gcross.Rd
new file mode 100644
index 0000000..6e10ffe
--- /dev/null
+++ b/man/Gcross.Rd
@@ -0,0 +1,246 @@
+\name{Gcross}
+\alias{Gcross}
+\title{
+  Multitype Nearest Neighbour Distance Function (i-to-j)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the distribution of the distance
+  from a point of type \eqn{i}
+  to the nearest point of type \eqn{j}.
+}
+\usage{
+Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han"))
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross type distance distribution function
+    \eqn{G_{ij}(r)}{Gij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{G_{ij}(r)}{Gij(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{\dots}{
+  	Ignored.
+  }
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing six numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{G_{ij}(r)}{Gij(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{G_{ij}(r)}{Gij(r)}
+  }
+  \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method
+  }
+  \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)},
+  i.e. the empirical distribution of the distances from 
+  each point of type \eqn{i} to the nearest point of type \eqn{j}
+  }
+  \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)}
+    for a marked Poisson process with the same estimated intensity
+    (see below).
+  }
+}
+\details{
+  This function \code{Gcross} and its companions
+  \code{\link{Gdot}} and \code{\link{Gmulti}}
+  are generalisations of the function \code{\link{Gest}}
+  to multitype point patterns. 
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+  The arguments \code{i} and \code{j} will be interpreted as
+  levels of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as
+  the number 3, \bold{not} the 3rd smallest level). 
+  
+  The ``cross-type'' (type \eqn{i} to type \eqn{j})
+  nearest neighbour distance distribution function 
+  of a multitype point process 
+  is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)}
+  of the distance from a typical random point of the process with type \eqn{i}
+  the nearest point of type \eqn{j}. 
+
+  An estimate of \eqn{G_{ij}(r)}{Gij(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern.
+  If the process of type \eqn{i} points
+  were independent of the process of type \eqn{j} points,
+  then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)},
+  the empty space function of the type \eqn{j} points.
+  For a multitype Poisson point process where the type \eqn{i} points
+  have intensity \eqn{\lambda_i}{lambda[i]}, we have
+  \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{%
+    Gij(r) = 1 - exp( - lambda[j] * pi * r^2)}
+  Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves
+  may suggest dependence between the points of types \eqn{i} and \eqn{j}.
+
+  This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Gest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The reduced-sample and
+  Kaplan-Meier estimators are computed from histogram counts. 
+  In the case of the Kaplan-Meier estimator this introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the successive entries of \code{r}
+  must be finely spaced.
+
+  The algorithm also returns an estimate of the hazard rate function, 
+  \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. 
+  This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)}
+  is not necessarily differentiable.
+
+  The naive empirical distribution of distances from each point of
+  the pattern \code{X} to the nearest other point of the pattern, 
+  is a biased estimate of \eqn{G_{ij}}{Gij}.
+  However this is also returned by the algorithm, as it is sometimes 
+  useful in other contexts. Care should be taken not to use the uncorrected
+  empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of
+  \eqn{G_{ij}}{Gij}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Diggle, P. J. (1986).
+  Displaced amacrine cells in the retina of a
+  rabbit : analysis of a bivariate spatial point pattern. 
+  \emph{J. Neurosci. Meth.} \bold{18}, 115--125.
+ 
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are always interpreted as
+  levels of the factor \code{X$marks}. They are converted to character
+  strings if they are not already character strings.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+
+  The function \eqn{G_{ij}}{Gij} does not necessarily have a density. 
+
+  The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+
+  The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij}
+  is always nondecreasing
+  but its maximum value may be less than \eqn{1}.
+}
+\seealso{
+ \code{\link{Gdot}},
+ \code{\link{Gest}},
+ \code{\link{Gmulti}}
+}
+\examples{
+    # amacrine cells data
+    G01 <- Gcross(amacrine)
+
+    # equivalent to:
+    \dontrun{
+    G01 <- Gcross(amacrine, "off", "on")
+    }
+
+    plot(G01)
+
+    # empty space function of `on' points
+    \dontrun{
+       F1 <- Fest(split(amacrine)$on, r = G01$r)
+       lines(F1$r, F1$km, lty=3)
+    }
+
+    # synthetic example    
+    pp <- runifpoispp(30)
+    pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE))
+    G <- Gcross(pp, "0", "1")   # note: "0" not 0
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Gdot.Rd b/man/Gdot.Rd
new file mode 100644
index 0000000..c430486
--- /dev/null
+++ b/man/Gdot.Rd
@@ -0,0 +1,237 @@
+\name{Gdot}
+\alias{Gdot}
+\title{
+  Multitype Nearest Neighbour Distance Function (i-to-any)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the distribution of the distance
+  from a point of type \eqn{i}
+  to the nearest other point of any type.
+}
+\usage{
+Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han"))
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the 
+    distance distribution function
+    \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing six numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  }
+  \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method
+  }
+  \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)},
+  i.e. the empirical distribution of the distances from 
+  each point of type \eqn{i} to the nearest other point of any type.
+  }
+  \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)}
+    for a marked Poisson process with the same estimated intensity
+    (see below).
+  }
+}
+\details{
+  This function \code{Gdot} and its companions
+  \code{\link{Gcross}} and \code{\link{Gmulti}}
+  are generalisations of the function \code{\link{Gest}}
+  to multitype point patterns.
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+  The argument will be interpreted as a
+  level of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as the number 3,
+  \bold{not} the 3rd smallest level.)
+  
+  The ``dot-type'' (type \eqn{i} to any type)
+  nearest neighbour distance distribution function 
+  of a multitype point process 
+  is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  of the distance from a typical random point of the process with type \eqn{i}
+  the nearest other point of the process, regardless of type. 
+
+  An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern.
+  If the type \eqn{i} points
+  were independent of all other points,
+  then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)},
+  the nearest neighbour distance distribution function of the type
+  \eqn{i} points alone.
+  For a multitype Poisson point process with total intensity
+  \eqn{\lambda}{lambda}, we have
+  \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{%
+    Gi.(r) = 1 - exp( - lambda * pi * r^2)}
+  Deviations between the empirical and theoretical
+  \eqn{G_{i\bullet}}{Gi.} curves
+  may suggest dependence of the type \eqn{i} points on the other points.
+
+  This algorithm estimates the distribution function
+  \eqn{G_{i\bullet}(r)}{Gi.(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Gest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The reduced-sample and
+  Kaplan-Meier estimators are computed from histogram counts. 
+  In the case of the Kaplan-Meier estimator this introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the successive entries of \code{r}
+  must be finely spaced.
+
+  The algorithm also returns an estimate of the hazard rate function, 
+  \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. 
+  This estimate should be used with caution as
+  \eqn{G_{i\bullet}(r)}{Gi.(r)}
+  is not necessarily differentiable.
+
+  The naive empirical distribution of distances from each point of
+  the pattern \code{X} to the nearest other point of the pattern, 
+  is a biased estimate of \eqn{G_{i\bullet}}{Gi.}.
+  However this is also returned by the algorithm, as it is sometimes 
+  useful in other contexts. Care should be taken not to use the uncorrected
+  empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of
+  \eqn{G_{i\bullet}}{Gi.}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Diggle, P. J. (1986).
+  Displaced amacrine cells in the retina of a
+  rabbit : analysis of a bivariate spatial point pattern. 
+  \emph{J. Neurosci. Meth.} \bold{18}, 115--125.
+ 
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as
+  a level of the factor \code{X$marks}. It is converted to a character
+  string if it is not already a character string.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+
+  The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. 
+
+  The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.}
+  is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+
+  The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.}
+  is always nondecreasing
+  but its maximum value may be less than \eqn{1}.
+}
+\seealso{
+ \code{\link{Gcross}},
+ \code{\link{Gest}},
+ \code{\link{Gmulti}}
+}
+\examples{
+    # amacrine cells data
+    G0. <- Gdot(amacrine, "off") 
+    plot(G0.)
+
+    # synthetic example    
+    pp <- runifpoispp(30)
+    pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE))
+    G <- Gdot(pp, "0")
+    G <- Gdot(pp, 0) # equivalent
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
+
+
diff --git a/man/Gest.Rd b/man/Gest.Rd
new file mode 100644
index 0000000..f194d51
--- /dev/null
+++ b/man/Gest.Rd
@@ -0,0 +1,240 @@
+\name{Gest}
+\alias{Gest}
+\alias{nearest.neighbour}
+\title{
+  Nearest Neighbour Distance Function G
+}
+\description{
+Estimates the nearest neighbour distance distribution
+function \eqn{G(r)} from a point pattern in a 
+window of arbitrary shape.
+}
+\usage{
+Gest(X, r=NULL, breaks=NULL, \dots,
+     correction=c("rs", "km", "han"),
+     domain=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{G(r)} will be computed.
+    An object of class \code{ppp}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which \eqn{G(r)} should be evaluated. There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Optional.
+    The edge correction(s) to be used to estimate \eqn{G(r)}.
+    A vector of character strings selected from
+    \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"}
+    and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{domain}{
+    Optional. Calculations will be restricted to this subset
+    of the window. See Details.
+  }
+ }
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing some or all of the following
+  columns:
+  
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{G(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{G(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{G(r)} by the spatial Kaplan-Meier method
+  }
+  \item{raw}{the uncorrected estimate of \eqn{G(r)},
+  i.e. the empirical distribution of the distances from 
+  each point in the pattern \code{X} to the nearest other point of
+  the pattern
+  }
+  \item{han}{the Hanisch correction estimator of \eqn{G(r)}
+  }
+  \item{theo}{the theoretical value of \eqn{G(r)}
+  for a stationary Poisson process of the same estimated intensity.
+  }
+}
+\details{
+  The nearest neighbour distance distribution function 
+  (also called the ``\emph{event-to-event}'' or
+  ``\emph{inter-event}'' distribution)
+  of a point process \eqn{X}
+  is the cumulative distribution function \eqn{G} of the distance
+  from a typical random point of \eqn{X} to
+  the nearest other point of \eqn{X}.
+
+  An estimate of \eqn{G} derived from a spatial point pattern dataset
+  can be used in exploratory data analysis and formal inference
+  about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988).
+  In exploratory analyses, the estimate of \eqn{G} is a useful statistic 
+  summarising one aspect of the ``clustering'' of points.
+  For inferential purposes, the estimate of \eqn{G} is usually compared to the 
+  true value of \eqn{G} for a completely random (Poisson) point process,
+  which is
+  \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{%
+    G(r) = 1 - exp( - lambda * pi * r^2)}
+  where \eqn{\lambda}{lambda} is the intensity
+  (expected number of points per unit area).
+  Deviations between the empirical and theoretical \eqn{G} curves
+  may suggest spatial clustering or spatial regularity.
+
+  This algorithm estimates the nearest neighbour distance distribution
+  function \eqn{G}
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape. 
+
+  The argument \code{X} is interpreted as a point pattern object 
+  (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
+  be supplied in any of the formats recognised
+  by \code{\link{as.ppp}()}. 
+
+  The estimation of \eqn{G} is hampered by edge effects arising from 
+  the unobservability of points of the random pattern outside the window. 
+  An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). 
+  The edge corrections implemented here are the border method or
+  ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator
+  (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984).
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{G(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The 
+  estimators are computed from histogram counts. 
+  This introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the successive entries of \code{r}
+  must be finely spaced.
+
+  The algorithm also returns an estimate of the hazard rate function, 
+  \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is
+  defined as the derivative
+  \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{%
+    lambda(r) = - (d/dr) log(1 - G(r))}
+  This estimate should be used with caution as \eqn{G} is not necessarily
+  differentiable.
+
+  If the argument \code{domain} is given, the estimate of \eqn{G(r)}
+  will be based only on the nearest neighbour distances
+  measured from points falling inside \code{domain} (although their
+  nearest neighbours may lie outside \code{domain}).
+  This is useful in bootstrap techniques. The argument \code{domain}
+  should be a window (object of class \code{"owin"}) or something acceptable to
+  \code{\link{as.owin}}. It must be a subset of the
+  window of the point pattern \code{X}.
+
+  The naive empirical distribution of distances from each point of
+  the pattern \code{X} to the nearest other point of the pattern, 
+  is a biased estimate of \eqn{G}. However it is sometimes useful.
+  It can be returned by the algorithm, by selecting \code{correction="none"}.
+  Care should be taken not to use the uncorrected
+  empirical \eqn{G} as if it were an unbiased estimator of  \eqn{G}.
+
+  To simply compute the nearest neighbour distance for each point in the
+  pattern, use \code{\link{nndist}}. To determine which point is the
+  nearest neighbour of a given point, use \code{\link{nnwhich}}.
+}
+\references{
+    Baddeley, A.J. Spatial sampling and censoring.
+     In O.E. Barndorff-Nielsen, W.S. Kendall and
+     M.N.M. van Lieshout (eds) 
+     \emph{Stochastic Geometry: Likelihood and Computation}.
+     Chapman and Hall, 1998.
+     Chapter 2, pages 37-78.
+  
+  Baddeley, A.J. and Gill, R.D.
+     Kaplan-Meier estimators of interpoint distance
+		distributions for spatial point processes.
+     \emph{Annals of Statistics} \bold{25} (1997) 263-292.
+
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Hanisch, K.-H. (1984) Some remarks on estimators of the distribution
+  function of nearest-neighbour distance in stationary spatial point
+  patterns. \emph{Mathematische Operationsforschung und Statistik,
+    series Statistics} \bold{15}, 409--412.
+  
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+}
+\section{Warnings}{
+  The function \eqn{G} does not necessarily have a density. 
+  Any valid c.d.f. may appear as the nearest neighbour distance
+  distribution function of a stationary point process.
+
+  The reduced sample estimator of \eqn{G} is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+
+  The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing
+  but its maximum value may be less than \eqn{1}.
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{nnwhich}},
+  \code{\link{Fest}},
+  \code{\link{Jest}},
+  \code{\link{Kest}},
+  \code{\link{km.rs}},
+  \code{\link{reduced.sample}},
+  \code{\link{kaplan.meier}}
+}
+\examples{
+  data(cells)
+  G <- Gest(cells)
+  plot(G)
+
+  # P-P style plot
+  plot(G, cbind(km,theo) ~ theo)
+
+  # the empirical G is below the Poisson G,
+  # indicating an inhibited pattern
+
+  \dontrun{
+     plot(G, . ~ r)
+     plot(G, . ~ theo)
+     plot(G, asin(sqrt(.)) ~ asin(sqrt(theo)))
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Geyer.Rd b/man/Geyer.Rd
new file mode 100644
index 0000000..a924ee2
--- /dev/null
+++ b/man/Geyer.Rd
@@ -0,0 +1,124 @@
+\name{Geyer}
+\alias{Geyer}
+\title{Geyer's Saturation Point Process Model}
+\description{
+Creates an instance of Geyer's saturation point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Geyer(r,sat)
+}
+\arguments{
+  \item{r}{Interaction radius. A positive real number.}
+  \item{sat}{Saturation threshold. A non-negative real number.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of Geyer's saturation point process
+  with interaction radius \eqn{r} and saturation threshold \code{sat}.
+}
+\details{
+  Geyer (1999) introduced the \dQuote{saturation process},
+  a modification of the Strauss process (see \code{\link{Strauss}})
+  in which the total contribution
+  to the potential  from each point (from its pairwise interaction with all 
+  other points) is trimmed to a maximum value \eqn{s}. 
+  The interaction structure of this
+  model is implemented in the function \code{\link{Geyer}()}.
+
+  The saturation point process with interaction radius \eqn{r},
+  saturation threshold \eqn{s}, and 
+  parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma},
+  is the point process
+  in which each point
+  \eqn{x_i}{x[i]} in the pattern \eqn{X}
+  contributes a factor
+  \deqn{
+    \beta \gamma^{\min(s, t(x_i, X))}
+  }{
+    beta gamma^min(s, t(x[i],X))
+  }
+  to the probability density of the point pattern,
+  where \eqn{t(x_i, X)}{t(x[i],X)} denotes the
+  number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern
+  \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point
+  \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i}
+  such that the distance between 
+  \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}.
+
+  If the saturation threshold \eqn{s} is set to infinity,
+  this model reduces to the Strauss process (see \code{\link{Strauss}})
+  with interaction parameter \eqn{\gamma^2}{gamma^2}.
+  If \eqn{s = 0}, the model reduces to the Poisson point process.
+  If \eqn{s} is a finite positive number, then the interaction parameter
+  \eqn{\gamma}{gamma} may take any positive value (unlike the case
+  of the Strauss process), with
+  values \eqn{\gamma < 1}{gamma < 1}
+  describing an \sQuote{ordered} or \sQuote{inhibitive} pattern,
+  and 
+  values \eqn{\gamma > 1}{gamma > 1}
+  describing a \sQuote{clustered} or \sQuote{attractive} pattern.
+ 
+  The nonstationary saturation process is similar except that 
+  the value \eqn{\beta}{beta} 
+  is replaced by a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location.
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the saturation process interaction is
+  yielded by \code{Geyer(r, sat)} where the
+  arguments \code{r} and \code{sat} specify
+  the Strauss interaction radius \eqn{r} and the saturation threshold
+  \eqn{s}, respectively. See the examples below.
+ 
+  Note the only arguments are the interaction radius \code{r}
+  and the saturation threshold \code{sat}.
+  When \code{r} and \code{sat} are fixed,
+  the model becomes an exponential family.
+  The canonical parameters \eqn{\log(\beta)}{log(beta)}
+  and \eqn{\log(\gamma)}{log(gamma)}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{Geyer()}. 
+}
+\section{Zero saturation}{
+  The value \code{sat=0} is permitted by \code{Geyer},
+  but this is not very useful.
+  For technical reasons, when \code{\link{ppm}} fits a
+  Geyer model with \code{sat=0}, the default behaviour is to return
+  an \dQuote{invalid} fitted model in which the estimate of
+  \eqn{\gamma}{gamma} is \code{NA}.  In order to get a Poisson
+  process model returned when \code{sat=0},
+  you would need to set \code{emend=TRUE} in
+  the call to \code{\link{ppm}}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{Strauss}},
+  \code{\link{SatPiece}}
+}
+\references{
+Geyer, C.J. (1999)
+Likelihood Inference for Spatial Point Processes.
+Chapter 3 in 
+O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds)
+\emph{Stochastic Geometry: Likelihood and Computation},
+Chapman and Hall / CRC, 
+Monographs on Statistics and Applied Probability, number 80.
+Pages 79--140.
+}
+
+\examples{
+   ppm(cells, ~1, Geyer(r=0.07, sat=2))
+   # fit the stationary saturation process to `cells'
+}
+\author{\adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Gfox.Rd b/man/Gfox.Rd
new file mode 100644
index 0000000..3de2a12
--- /dev/null
+++ b/man/Gfox.Rd
@@ -0,0 +1,118 @@
+\name{Gfox}
+\alias{Gfox}
+\alias{Jfox}
+\title{
+  Foxall's Distance Functions
+}
+\description{
+  Given a point pattern \code{X} and a spatial object \code{Y},
+  compute estimates of Foxall's  \eqn{G} and \eqn{J} functions.
+}
+\usage{
+Gfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...)
+Jfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"})
+    from which distances will be measured.
+  }
+  \item{Y}{
+    An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}
+    to which distances will be measured.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which \eqn{Gfox(r)} or \eqn{Jfox(r)}
+    should be evaluated. There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional.
+    The edge correction(s) to be used to estimate
+    \eqn{Gfox(r)} or \eqn{Jfox(r)}.
+    A vector of character strings selected from
+    \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"}
+    and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{
+    Extra arguments affecting the discretisation of distances.
+    These arguments are ignored by \code{Gfox}, but
+    \code{Jfox} passes them to \code{\link{Hest}} to determine
+    the discretisation of the spatial domain.
+  }
+}
+\details{
+  Given a point pattern \code{X} and another spatial object \code{Y},
+  these functions compute two nonparametric measures of association
+  between \code{X} and \code{Y}, introduced by Foxall
+  (Foxall and Baddeley, 2002).
+  
+  Let the random variable \eqn{R} be the distance from a typical point
+  of \code{X} to the object \code{Y}.
+  Foxall's \eqn{G}-function is the cumulative distribution function
+  of \eqn{R}:
+  \deqn{G(r) = P(R \le r)}{P(R <= r)}
+  
+  Let the random variable \eqn{S} be the distance from a \emph{fixed} point
+  in space to the object \code{Y}. The cumulative distribution function
+  of \eqn{S} is the (unconditional) spherical contact distribution
+  function
+  \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)}
+  which is computed by \code{\link{Hest}}.
+
+  Foxall's \eqn{J}-function is the ratio
+  \deqn{
+    J(r) = \frac{1-G(r)}{1-H(r)}
+  }{
+    J(r) = (1-G(r))/(1-H(r))
+  }
+  For further interpretation, see Foxall and Baddeley (2002).
+  
+  Accuracy of \code{Jfox} depends on the pixel resolution,
+  which is controlled by the
+  arguments \code{eps}, \code{dimyx} and \code{xy} passed to
+  \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify
+  square pixels of side 0.1 units, and \code{dimyx=256} to specify a
+  256 by 256 grid of pixels.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  which can be printed, plotted, or converted to a data frame of values.
+}
+\references{
+  Foxall, R. and Baddeley, A. (2002)
+  Nonparametric measures of association between a
+  spatial point process and a random set, with
+  geological applications. \emph{Applied Statistics} \bold{51}, 165--182.
+}
+\seealso{
+  \code{\link{Gest}}, 
+  \code{\link{Hest}}, 
+  \code{\link{Jest}}, 
+  \code{\link{Fest}}
+}
+\examples{
+  data(copper)
+  X <- copper$SouthPoints
+  Y <- copper$SouthLines
+  G <- Gfox(X,Y)
+  J <- Jfox(X,Y, correction="km")
+  \testonly{
+  J <- Jfox(X,Y, correction="km", eps=1)
+  }
+  \dontrun{
+  J <- Jfox(X,Y, correction="km", eps=0.25)
+  }
+}
+\author{Rob Foxall and
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Ginhom.Rd b/man/Ginhom.Rd
new file mode 100644
index 0000000..aa34423
--- /dev/null
+++ b/man/Ginhom.Rd
@@ -0,0 +1,186 @@
+\name{Ginhom}
+\alias{Ginhom}
+\title{
+  Inhomogeneous Nearest Neighbour Function
+}
+\description{
+  Estimates the inhomogeneous nearest neighbour function \eqn{G} of
+  a non-stationary point pattern.
+}
+\usage{
+  Ginhom(X, lambda = NULL, lmin = NULL, ...,
+        sigma = NULL, varcov = NULL,
+        r = NULL, breaks = NULL, ratio = FALSE, update = TRUE)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern,
+    from which an estimate of the inhomogeneous \eqn{G} function
+    will be computed.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lmin}{
+    Optional. The minimum possible value of the intensity
+    over the spatial domain. A positive numerical value.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{as.mask}} to control
+    the pixel resolution, or passed to \code{\link{density.ppp}}
+    to control the smoothing bandwidth.
+}
+  \item{r}{
+    vector of values for the argument \eqn{r} at which
+    the inhomogeneous \eqn{K} function
+    should be evaluated. Not normally given by the user;
+    there is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    the estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{update}{
+    Logical. If \code{lambda} is a fitted model
+    (class \code{"ppm"} or \code{"kppm"})
+    and \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without fitting it to \code{X}.
+  }
+}
+\details{
+  This command computes estimates of the 
+  inhomogeneous \eqn{G}-function (van Lieshout, 2010)
+  of a point pattern. It is the counterpart, for inhomogeneous
+  spatial point patterns, of the nearest-neighbour distance
+  distribution function \eqn{G} 
+  for homogeneous point patterns computed by \code{\link{Gest}}.
+
+  The argument \code{X} should be a point pattern
+  (object of class \code{"ppp"}).
+
+  The inhomogeneous \eqn{G} function is computed
+  using the border correction, equation (7) in Van Lieshout (2010).
+  
+  The argument \code{lambda} should supply the
+  (estimated) values of the intensity function \eqn{\lambda}{lambda}
+  of the point process. It may be either
+  \describe{
+    \item{a numeric vector}{
+      containing the values
+      of the intensity function at the points of the pattern \code{X}.
+    }
+    \item{a pixel image}{
+      (object of class \code{"im"})
+      assumed to contain the values of the intensity function
+      at all locations in the window. 
+    }
+    \item{a fitted point process model}{
+      (object of class \code{"ppm"} or \code{"kppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{omitted:}{
+      if \code{lambda} is omitted, then it will be estimated using
+      a `leave-one-out' kernel smoother.
+    }
+  }
+  If \code{lambda} is a numeric vector, then its length should
+  be equal to the number of points in the pattern \code{X}.
+  The value \code{lambda[i]} is assumed to be the 
+  the (estimated) value of the intensity
+  \eqn{\lambda(x_i)}{lambda(x[i])} for
+  the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
+  Each value must be a positive number; \code{NA}'s are not allowed.
+
+  If \code{lambda} is a pixel image, the domain of the image should
+  cover the entire window of the point pattern. If it does not (which
+  may occur near the boundary because of discretisation error),
+  then the missing pixel values 
+  will be obtained by applying a Gaussian blur to \code{lambda} using
+  \code{\link{blur}}, then looking up the values of this blurred image
+  for the missing locations. 
+  (A warning will be issued in this case.)
+
+  If \code{lambda} is a function, then it will be evaluated in the
+  form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
+  of coordinates of the points of \code{X}. It should return a numeric
+  vector with length equal to the number of points in \code{X}.
+
+  If \code{lambda} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother,
+  as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller}
+  and Waagepetersen (2000).  The estimate \code{lambda[i]} for the
+  point \code{X[i]} is computed by removing \code{X[i]} from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+}
+\references{
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  Van Lieshout, M.N.M. (2010)
+  A J-function for inhomogeneous point processes.
+  \emph{Statistica Neerlandica} \bold{65}, 183--201.
+}
+\seealso{
+  \code{\link{Finhom}},
+  \code{\link{Jinhom}},
+  \code{\link{Gest}}
+}
+\examples{
+  \dontrun{
+    plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2))
+  }
+  plot(Ginhom(swedishpines, sigma=10))
+}
+\author{
+  Original code by Marie-Colette van Lieshout.
+  C implementation and R adaptation by \adrian
+  
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Gmulti.Rd b/man/Gmulti.Rd
new file mode 100644
index 0000000..d3eb04c
--- /dev/null
+++ b/man/Gmulti.Rd
@@ -0,0 +1,206 @@
+\name{Gmulti}
+\alias{Gmulti}
+\title{
+  Marked Nearest Neighbour Distance Function
+}
+\description{
+  For a marked point pattern, 
+  estimate the distribution of the distance
+  from a typical point in subset \code{I}
+  to the nearest point of subset \eqn{J}.
+}
+\usage{
+Gmulti(X, I, J, r=NULL, breaks=NULL, \dots,
+        disjoint=NULL, correction=c("rs", "km", "han"))
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype distance distribution function
+    \eqn{G_{IJ}(r)}{GIJ(r)} will be computed.
+    It must be a marked point pattern.
+    See under Details.
+  }
+  \item{I}{Subset of points of \code{X} from which distances are
+    measured. 
+  }
+  \item{J}{Subset of points in \code{X} to which distances are measured.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{disjoint}{Optional flag indicating whether
+    the subsets \code{I} and \code{J} are disjoint.
+    If missing, this value will be computed by inspecting the
+    vectors \code{I} and \code{J}.
+  }
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing six numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{G_{IJ}(r)}{GIJ(r)}
+  }
+  \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method
+  }
+  \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)},
+  i.e. the empirical distribution of the distances from 
+  each point of type \eqn{i} to the nearest point of type \eqn{j}
+  }
+  \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)}
+    for a marked Poisson process with the same estimated intensity
+  }
+}
+\details{
+  The function \code{Gmulti}
+  generalises \code{\link{Gest}} (for unmarked point
+  patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for
+  multitype point patterns) to arbitrary marked point patterns.
+
+  Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly
+  overlapping, of a marked point process. This function computes an
+  estimate of the cumulative
+  distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance
+  from a typical point of  \eqn{X_I}{X[I]} to the nearest distinct point of
+  \eqn{X_J}{X[J]}. 
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+
+  The arguments \code{I} and \code{J} specify two subsets of the
+  point pattern. They may be any type of subset indices, for example,
+  logical vectors of length equal to \code{npoints(X)},
+  or integer vectors with entries in the range 1 to
+  \code{npoints(X)}, or negative integer vectors.
+
+  Alternatively, \code{I} and \code{J} may be \bold{functions}
+  that will be applied to the point pattern \code{X} to obtain
+  index vectors. If \code{I} is a function, then evaluating
+  \code{I(X)} should yield a valid subset index. This option
+  is useful when generating simulation envelopes using
+  \code{\link{envelope}}.
+
+  This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Gest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The reduced-sample and
+  Kaplan-Meier estimators are computed from histogram counts. 
+  In the case of the Kaplan-Meier estimator this introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the successive entries of \code{r}
+  must be finely spaced.
+
+  The algorithm also returns an estimate of the hazard rate function, 
+  \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. 
+  This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)}
+  is not necessarily differentiable.
+
+  The naive empirical distribution of distances from each point of
+  the pattern \code{X} to the nearest other point of the pattern, 
+  is a biased estimate of \eqn{G_{IJ}}{GIJ}.
+  However this is also returned by the algorithm, as it is sometimes 
+  useful in other contexts. Care should be taken not to use the uncorrected
+  empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of
+  \eqn{G_{IJ}}{GIJ}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Diggle, P. J. (1986).
+  Displaced amacrine cells in the retina of a
+  rabbit : analysis of a bivariate spatial point pattern. 
+  \emph{J. Neurosci. Meth.} \bold{18}, 115--125.
+ 
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. 
+
+  The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+
+  The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ}
+  is always nondecreasing
+  but its maximum value may be less than \eqn{1}.
+}
+\seealso{
+ \code{\link{Gcross}},
+ \code{\link{Gdot}},
+ \code{\link{Gest}}
+}
+\examples{
+    trees <- longleaf
+     # Longleaf Pine data: marks represent diameter
+    \testonly{
+      trees <- trees[seq(1, npoints(trees), by=50), ]
+    }
+    Gm <- Gmulti(trees, marks(trees) <= 15, marks(trees) >= 25)
+    plot(Gm)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/GmultiInhom.Rd b/man/GmultiInhom.Rd
new file mode 100644
index 0000000..5736d03
--- /dev/null
+++ b/man/GmultiInhom.Rd
@@ -0,0 +1,100 @@
+\name{GmultiInhom}
+\alias{GmultiInhom}
+\title{
+  Inhomogeneous Marked G-Function
+}
+\description{
+  For a marked point pattern, 
+  estimate the inhomogeneous version of the multitype \eqn{G} function,
+  effectively the cumulative distribution function of the distance from
+  a point in subset \eqn{I} to the nearest point in subset \eqn{J},
+  adjusted for spatially varying intensity.
+}
+\usage{
+  GmultiInhom(X, I, J,
+             lambda = NULL, lambdaI = NULL, lambdaJ = NULL,
+             lambdamin = NULL, \dots,
+             r = NULL,
+             ReferenceMeasureMarkSetI = NULL,
+             ratio = FALSE)
+}
+\arguments{
+  \item{X}{
+    A spatial point pattern (object of class \code{"ppp"}.
+  }
+  \item{I}{
+    A subset index specifying the subset of points \emph{from} which
+    distances are measured. Any kind of subset index acceptable
+    to \code{\link{[.ppp}}.
+  }
+  \item{J}{
+    A subset index specifying the subset of points \emph{to} which
+    distances are measured. Any kind of subset index acceptable
+    to \code{\link{[.ppp}}.
+  }
+  \item{lambda}{
+    Intensity estimates for each point of \code{X}.
+    A numeric vector of length equal to \code{npoints(X)}.
+    Incompatible with \code{lambdaI,lambdaJ}.
+  }
+  \item{lambdaI}{
+    Intensity estimates for each point of \code{X[I]}.
+    A numeric vector of length equal to \code{npoints(X[I])}.
+    Incompatible with \code{lambda}.
+  }
+  \item{lambdaJ}{
+    Intensity estimates for each point of \code{X[J]}.
+    A numeric vector of length equal to \code{npoints(X[J])}.
+    Incompatible with \code{lambda}.
+  }
+  \item{lambdamin}{
+    A lower bound for the intensity,
+    or at least a lower bound for the values in \code{lambdaJ}
+    or \code{lambda[J]}.
+  }
+  \item{\dots}{
+    Ignored. 
+  }
+  \item{r}{
+    Vector of distance values at which the inhomogeneous \eqn{G}
+    function should be estimated. There is a sensible default.
+  }
+  \item{ReferenceMeasureMarkSetI}{
+    Optional. The total measure of the mark set. A positive number.
+  }
+  \item{ratio}{
+    Logical value indicating whether to save ratio information.
+  }
+}
+\details{
+  See Cronie and Van Lieshout (2015).
+}
+\value{
+  Object of class \code{"fv"} containing the estimate of the
+  inhomogeneous multitype \eqn{G} function.
+}
+\references{
+  Cronie, O. and Van Lieshout, M.N.M. (2015)
+  Summary statistics for inhomogeneous marked point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  DOI: 10.1007/s10463-015-0515-z
+}
+\author{
+  Ottmar Cronie and Marie-Colette van Lieshout.
+  Rewritten for \pkg{spatstat} by \adrian.
+}
+\seealso{
+  \code{\link{Ginhom}},
+  \code{\link{Gmulti}}
+}
+\examples{
+  X <- amacrine
+  I <- (marks(X) == "on")
+  J <- (marks(X) == "off")
+  mod <- ppm(X ~ marks * x)
+  lam <- fitted(mod, dataonly=TRUE)
+  lmin <- min(predict(mod)[["off"]]) * 0.9
+  plot(GmultiInhom(X, I, J, lambda=lam, lambdamin=lmin))
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Gres.Rd b/man/Gres.Rd
new file mode 100644
index 0000000..bcf576d
--- /dev/null
+++ b/man/Gres.Rd
@@ -0,0 +1,102 @@
+\name{Gres}
+\Rdversion{1.1}
+\alias{Gres}
+\title{
+  Residual G Function
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the residual \eqn{G} function,
+  which serves as a diagnostic for goodness-of-fit of the model.
+}
+\usage{
+   Gres(object, ...)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"}),
+    a point pattern (object of class \code{"ppp"}),
+    a quadrature scheme (object of class \code{"quad"}),
+    or the value returned by a previous call to \code{\link{Gcom}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{Gcom}}.
+  }
+}
+\details{
+  This command provides a diagnostic for the goodness-of-fit of
+  a point process model fitted to a point pattern dataset.
+  It computes a residual version of the \eqn{G} function of the
+  dataset, which should be approximately zero if the model is a good
+  fit to the data.
+
+  In normal use, \code{object} is a fitted point process model
+  or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}}
+  to compute both the nonparametric estimate of the \eqn{G} function
+  and its model compensator. Then \code{Gres} computes the
+  difference between them, which is the residual \eqn{G}-function.
+  
+  Alternatively, \code{object} may be a function value table
+  (object of class \code{"fv"}) that was returned by
+  a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the
+  residual from this object. 
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Related functions:
+  \code{\link{Gcom}}, 
+  \code{\link{Gest}}.
+
+  Alternative functions:
+  \code{\link{Kres}}, 
+  \code{\link{psstA}}, 
+  \code{\link{psstG}}, 
+  \code{\link{psst}}.
+
+  Model-fitting:
+  \code{\link{ppm}}.
+}
+\examples{
+    data(cells)
+    fit0 <- ppm(cells, ~1) # uniform Poisson
+    G0 <- Gres(fit0)
+    plot(G0)
+# Hanisch correction estimate
+    plot(G0, hres ~ r)
+# uniform Poisson is clearly not correct
+
+    fit1 <- ppm(cells, ~1, Strauss(0.08))
+    plot(Gres(fit1), hres ~ r)
+# fit looks approximately OK; try adjusting interaction distance
+
+    plot(Gres(cells, interaction=Strauss(0.12)))
+
+# How to make envelopes
+    \dontrun{
+    E <- envelope(fit1, Gres, model=fit1, nsim=39)
+    plot(E)
+    }
+# For computational efficiency
+    Gc <- Gcom(fit1)
+    G1 <- Gres(Gc)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/Hardcore.Rd b/man/Hardcore.Rd
new file mode 100644
index 0000000..f18785d
--- /dev/null
+++ b/man/Hardcore.Rd
@@ -0,0 +1,95 @@
+\name{Hardcore}
+\alias{Hardcore}
+\title{The Hard Core Point Process Model}
+\description{
+Creates an instance of the hard core point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Hardcore(hc=NA)
+}
+\arguments{
+  \item{hc}{The hard core distance}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the hard core
+  process with hard core distance \code{hc}.
+}
+\details{
+  A hard core process with 
+  hard core distance \eqn{h} and abundance
+  parameter \eqn{\beta}{beta} 
+  is a pairwise interaction point process
+  in which distinct points are not allowed to come closer
+  than a distance \eqn{h} apart.
+
+  The probability density is zero if any pair of points
+  is closer than \eqn{h} units apart, and otherwise equals
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} 
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) 
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, and \eqn{\alpha}{alpha} is the normalising constant.
+
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the hard core process
+  pairwise interaction is 
+  yielded by the function \code{Hardcore()}. See the examples below.
+
+  If the hard core distance argument \code{hc} is missing or \code{NA},
+  it will be estimated from the data when \code{\link{ppm}} is called.
+  The estimated value of \code{hc} is the minimum nearest neighbour distance
+  multiplied by \eqn{n/(n+1)}, where \eqn{n} is the
+  number of data points.
+}
+\seealso{
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}},
+  \code{\link{MultiHard}},
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+}
+\examples{
+   Hardcore(0.02)
+   # prints a sensible description of itself
+
+   \dontrun{
+   ppm(cells, ~1, Hardcore(0.05))
+   # fit the stationary hard core process to `cells'
+   }
+
+   # estimate hard core radius from data
+   ppm(cells, ~1, Hardcore())
+   ppm(cells, ~1, Hardcore)
+
+   ppm(cells, ~ polynom(x,y,3), Hardcore(0.05))
+   # fit a nonstationary hard core process
+   # with log-cubic polynomial trend
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Hest.Rd b/man/Hest.Rd
new file mode 100644
index 0000000..f9436a7
--- /dev/null
+++ b/man/Hest.Rd
@@ -0,0 +1,166 @@
+\name{Hest}
+\alias{Hest}
+\title{Spherical Contact Distribution Function}
+\description{
+  Estimates the spherical contact distribution function of a
+  random set.
+}
+\usage{
+Hest(X, r=NULL, breaks=NULL, ...,
+     W,
+     correction=c("km", "rs", "han"),
+     conditional=TRUE)
+}
+\arguments{
+  \item{X}{The observed random set.
+    An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}.
+    Alternatively a pixel image (class \code{"im"}) with logical values.
+  }
+  \item{r}{
+    Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} 
+    should be evaluated. Users are advised \emph{not} to specify this
+    argument; there is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.mask}}
+    to control the discretisation.
+  }
+  \item{W}{
+    Optional. A window (object of class \code{"owin"})
+    to be taken as the window of observation.
+    The contact distribution function will be estimated
+    from values of the contact distance inside \code{W}.
+  }
+  \item{correction}{
+   Optional.
+    The edge correction(s) to be used to estimate \eqn{H(r)}.
+    A vector of character strings selected from
+    \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"}
+    and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{conditional}{
+    Logical value indicating whether to compute the
+    conditional or unconditional distribution. See Details.
+  }
+}
+\details{
+  The spherical contact distribution function
+  of a stationary random set \eqn{X}
+  is the cumulative distribution function \eqn{H} of the distance
+  from a fixed point in space to the nearest point of \eqn{X},
+  given that the point lies outside \eqn{X}.
+  That is, \eqn{H(r)} equals
+  the probability that \code{X} lies closer than \eqn{r} units away
+  from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}.
+
+  Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary
+  point \eqn{x} to the set \code{X}. Then the spherical contact
+  distribution function is
+  \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)}
+  For a point process, the spherical contact distribution function
+  is the same as the empty space function \eqn{F} discussed
+  in \code{\link{Fest}}. 
+
+  The argument \code{X} may be a point pattern
+  (object of class \code{"ppp"}), a line segment pattern
+  (object of class \code{"psp"}) or a window (object of class
+  \code{"owin"}). It is assumed to be a realisation of a stationary
+  random set.
+
+  The algorithm first calls \code{\link{distmap}} to compute the
+  distance transform of \code{X}, then computes the Kaplan-Meier
+  and reduced-sample estimates of the cumulative distribution
+  following Hansen et al (1999).
+  If \code{conditional=TRUE} (the default) the algorithm
+  returns an estimate of the spherical contact function
+  \eqn{H(r)} as defined above. 
+  If \code{conditional=FALSE}, it instead returns an estimate of the
+  cumulative distribution function
+  \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)}
+  which includes a jump at \eqn{r=0} if \code{X} has nonzero area.
+
+  Accuracy depends on the pixel resolution, which is controlled by the
+  arguments \code{eps}, \code{dimyx} and \code{xy} passed to
+  \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify
+  square pixels of side 0.1 units, and \code{dimyx=256} to specify a
+  256 by 256 grid of pixels.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing up to six columns:
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{H(r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{H(r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)}
+  }
+  \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
+    of \eqn{H(r)} by the spatial Kaplan-Meier method
+  }
+  \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)}
+  }
+  \item{raw}{the uncorrected estimate of \eqn{H(r)},
+  i.e. the empirical distribution of the distance from 
+  a fixed point in the window to the nearest point of \code{X}
+  }
+}
+\references{
+  Baddeley, A.J. Spatial sampling and censoring.
+     In O.E. Barndorff-Nielsen, W.S. Kendall and
+     M.N.M. van Lieshout (eds) 
+     \emph{Stochastic Geometry: Likelihood and Computation}.
+     Chapman and Hall, 1998.
+     Chapter 2, pages 37-78.
+  
+  Baddeley, A.J. and Gill, R.D. 
+    The empty space hazard of a spatial pattern.
+    Research Report 1994/3, Department of Mathematics,
+    University of Western Australia, May 1994.
+
+  Hansen, M.B., Baddeley, A.J. and Gill, R.D.
+  First contact distributions for spatial patterns:
+  regularity and estimation.
+  \emph{Advances in Applied Probability} \bold{31} (1999) 15-33.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+}
+\seealso{\code{\link{Fest}}}
+\examples{
+   X <- runifpoint(42)
+   H <- Hest(X)
+   Y <- rpoisline(10)
+   H <- Hest(Y)
+   H <- Hest(Y, dimyx=256)
+   X <- heather$coarse
+   plot(Hest(X))
+   H <- Hest(X, conditional=FALSE)
+
+   P <- owin(poly=list(x=c(5.3, 8.5, 8.3, 3.7, 1.3, 3.7),
+                       y=c(9.7, 10.0, 13.6, 14.4, 10.7, 7.2)))
+   plot(X)
+   plot(P, add=TRUE, col="red")
+   H <- Hest(X, W=P)
+   Z <- as.im(FALSE, Frame(X))
+   Z[X] <- TRUE
+   Z <- Z[P, drop=FALSE]
+   plot(Z)
+   H <- Hest(Z)
+}
+\author{
+  \spatstatAuthors
+  with contributions from Kassel Hingee.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/HierHard.Rd b/man/HierHard.Rd
new file mode 100644
index 0000000..319ce82
--- /dev/null
+++ b/man/HierHard.Rd
@@ -0,0 +1,120 @@
+\name{HierHard}
+\alias{HierHard}
+\title{The Hierarchical  Hard Core Point Process Model}
+\description{
+Creates an instance of the hierarchical hard core point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  HierHard(hradii=NULL, types=NULL, archy=NULL)
+}
+\arguments{
+  \item{hradii}{Optional matrix of hard core distances}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+  \item{archy}{Optional: the hierarchical order. See Details.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the hierarchical hard core process with
+  hard core distances \eqn{hradii[i,j]}.
+}
+\details{
+  This is a hierarchical point process model
+  for a multitype point pattern
+  (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999;
+  Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009).
+  It is appropriate for analysing multitype point pattern data
+  in which the types are ordered so that
+  the points of type \eqn{j} depend on the points of type
+  \eqn{1,2,\ldots,j-1}{1,2,...,j-1}.
+  
+  The hierarchical version of the (stationary) 
+   hard core process with \eqn{m} types, with
+  hard core distances \eqn{h_{ij}}{h[i,j]} and 
+  parameters \eqn{\beta_j}{beta[j]}, is a point process
+  in which each point of type \eqn{j}
+  contributes a factor \eqn{\beta_j}{beta[j]} to the 
+  probability density of the point pattern.
+  If any pair of points
+  of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]}
+  units apart, the configuration of points is impossible (probability
+  density zero).
+  
+  The nonstationary hierarchical  hard core
+  process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location and type, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()},
+  which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the hierarchical
+  hard core process pairwise interaction is
+  yielded by the function \code{HierHard()}. See the examples below.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the HierHard interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrix \code{radii}.
+
+  The argument \code{archy} can be used to specify a hierarchical
+  ordering of the types. It can be either a vector of integers
+  or a character vector matching the possible types.
+  The default is the sequence
+  \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j}
+  depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}.
+  
+  The matrix \code{iradii} must be square, with entries
+  which are either positive numbers, or zero or \code{NA}. 
+  A value of zero or \code{NA} indicates that no hard core interaction term
+  should be included for this combination of types.
+  
+  Note that only the hard core distances are
+  specified in \code{HierHard}.  The canonical
+  parameters \eqn{\log(\beta_j)}{log(beta[j])} 
+  are estimated by
+  \code{\link{ppm}()}, not fixed in \code{HierHard()}.
+}
+\seealso{
+  \code{\link{MultiHard}} for the corresponding
+  symmetrical interaction.
+
+  \code{\link{HierStrauss}},
+  \code{\link{HierStraussHard}}.
+}
+\examples{
+   h <- matrix(c(4, NA, 10, 15), 2, 2)
+   HierHard(h)
+   # prints a sensible description of itself
+   ppm(ants ~1, HierHard(h))
+   # fit the stationary hierarchical hard core process to ants data
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\references{
+  Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009)
+  Modelling the spatial structure of forest stands by
+  multivariate point processes with hierarchical interactions.
+  \emph{Ecological Modelling} \bold{220}, 1232--1240.
+
+  \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999)
+  Multitype spatial point patterns with hierarchical interactions.
+  \emph{Biometrics} \bold{55}, 1051--1058.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/HierStrauss.Rd b/man/HierStrauss.Rd
new file mode 100644
index 0000000..c491fb5
--- /dev/null
+++ b/man/HierStrauss.Rd
@@ -0,0 +1,120 @@
+\name{HierStrauss}
+\alias{HierStrauss}
+\title{The Hierarchical Strauss Point Process Model}
+\description{
+Creates an instance of the hierarchical Strauss point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  HierStrauss(radii, types=NULL, archy=NULL)
+}
+\arguments{
+  \item{radii}{Matrix of interaction radii}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+  \item{archy}{Optional: the hierarchical order. See Details.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the hierarchical Strauss process with
+  interaction radii \eqn{radii[i,j]}.
+}
+\details{
+  This is a hierarchical point process model
+  for a multitype point pattern
+  (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999;
+  Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009).
+  It is appropriate for analysing multitype point pattern data
+  in which the types are ordered so that
+  the points of type \eqn{j} depend on the points of type
+  \eqn{1,2,\ldots,j-1}{1,2,...,j-1}.
+  
+  The hierarchical version of the (stationary) 
+  Strauss process with \eqn{m} types, with interaction radii
+  \eqn{r_{ij}}{r[i,j]} and 
+  parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]}
+  is a point process
+  in which each point of type \eqn{j}
+  contributes a factor \eqn{\beta_j}{beta[j]} to the 
+  probability density of the point pattern, and a pair of points
+  of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]}
+  units apart contributes a factor
+  \eqn{\gamma_{ij}}{gamma[i,j]} to the density
+  \bold{provided} \eqn{i \le j}{i <= j}. 
+  
+  The nonstationary hierarchical Strauss process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location and type, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()},
+  which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the hierarchical
+  Strauss process pairwise interaction is
+  yielded by the function \code{HierStrauss()}. See the examples below.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the HierStrauss interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrix \code{radii}.
+
+  The argument \code{archy} can be used to specify a hierarchical
+  ordering of the types. It can be either a vector of integers
+  or a character vector matching the possible types.
+  The default is the sequence
+  \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j}
+  depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}.
+  
+  The matrix \code{radii} must be symmetric, with entries
+  which are either positive numbers or \code{NA}. 
+  A value of \code{NA} indicates that no interaction term should be included
+  for this combination of types.
+  
+  Note that only the interaction radii are
+  specified in \code{HierStrauss}.  The canonical
+  parameters \eqn{\log(\beta_j)}{log(beta[j])} and
+  \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by
+  \code{\link{ppm}()}, not fixed in \code{HierStrauss()}.
+}
+\seealso{
+  \code{\link{MultiStrauss}} for the corresponding
+  symmetrical interaction.
+
+  \code{\link{HierHard}},
+  \code{\link{HierStraussHard}}.
+}
+\examples{
+   r <- matrix(10 * c(3,4,4,3), nrow=2,ncol=2)
+   HierStrauss(r)
+   # prints a sensible description of itself
+   ppm(ants ~1, HierStrauss(r, , c("Messor", "Cataglyphis")))
+   # fit the stationary hierarchical Strauss process to ants data
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\references{
+  Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009)
+  Modelling the spatial structure of forest stands by
+  multivariate point processes with hierarchical interactions.
+  \emph{Ecological Modelling} \bold{220}, 1232--1240.
+
+  \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999)
+  Multitype spatial point patterns with hierarchical interactions.
+  \emph{Biometrics} \bold{55}, 1051--1058.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/HierStraussHard.Rd b/man/HierStraussHard.Rd
new file mode 100644
index 0000000..2267c37
--- /dev/null
+++ b/man/HierStraussHard.Rd
@@ -0,0 +1,127 @@
+\name{HierStraussHard}
+\alias{HierStraussHard}
+\title{The Hierarchical Strauss Hard Core Point Process Model}
+\description{
+Creates an instance of the hierarchical Strauss-hard core point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  HierStraussHard(iradii, hradii=NULL, types=NULL, archy=NULL)
+}
+\arguments{
+  \item{iradii}{Matrix of interaction radii}
+  \item{hradii}{Optional matrix of hard core distances}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+  \item{archy}{Optional: the hierarchical order. See Details.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the hierarchical Strauss-hard core process with
+  interaction radii \eqn{iradii[i,j]} and hard core distances
+  \eqn{hradii[i,j]}.
+}
+\details{
+  This is a hierarchical point process model
+  for a multitype point pattern
+  (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999;
+  Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009).
+  It is appropriate for analysing multitype point pattern data
+  in which the types are ordered so that
+  the points of type \eqn{j} depend on the points of type
+  \eqn{1,2,\ldots,j-1}{1,2,...,j-1}.
+  
+  The hierarchical version of the (stationary) 
+  Strauss hard core process with \eqn{m} types, with interaction radii
+  \eqn{r_{ij}}{r[i,j]}, hard core distances \eqn{h_{ij}}{h[i,j]} and 
+  parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]}
+  is a point process
+  in which each point of type \eqn{j}
+  contributes a factor \eqn{\beta_j}{beta[j]} to the 
+  probability density of the point pattern, and a pair of points
+  of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]}
+  units apart contributes a factor
+  \eqn{\gamma_{ij}}{gamma[i,j]} to the density
+  \bold{provided} \eqn{i \le j}{i <= j}. If any pair of points
+  of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]}
+  units apart, the configuration of points is impossible (probability
+  density zero).
+  
+  The nonstationary hierarchical Strauss hard core
+  process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location and type, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()},
+  which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the hierarchical
+  Strauss hard core process pairwise interaction is
+  yielded by the function \code{HierStraussHard()}. See the examples below.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the HierStraussHard interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrix \code{radii}.
+
+  The argument \code{archy} can be used to specify a hierarchical
+  ordering of the types. It can be either a vector of integers
+  or a character vector matching the possible types.
+  The default is the sequence
+  \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j}
+  depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}.
+  
+  The matrices \code{iradii} and \code{hradii} must be square, with entries
+  which are either positive numbers or zero or \code{NA}. 
+  A value of zero or \code{NA} indicates that no interaction term
+  should be included for this combination of types.
+  
+  Note that only the interaction radii and hard core distances are
+  specified in \code{HierStraussHard}.  The canonical
+  parameters \eqn{\log(\beta_j)}{log(beta[j])} and
+  \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by
+  \code{\link{ppm}()}, not fixed in \code{HierStraussHard()}.
+}
+\seealso{
+  \code{\link{MultiStraussHard}} for the corresponding
+  symmetrical interaction.
+
+  \code{\link{HierHard}},
+  \code{\link{HierStrauss}}.
+}
+\examples{
+   r <- matrix(c(30, NA, 40, 30), nrow=2,ncol=2)
+   h <- matrix(c(4, NA, 10, 15), 2, 2)
+   HierStraussHard(r, h)
+   # prints a sensible description of itself
+   ppm(ants ~1, HierStraussHard(r, h))
+   # fit the stationary hierarchical Strauss-hard core process to ants data
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\references{
+  Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009)
+  Modelling the spatial structure of forest stands by
+  multivariate point processes with hierarchical interactions.
+  \emph{Ecological Modelling} \bold{220}, 1232--1240.
+
+  \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and 
+  \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999)
+  Multitype spatial point patterns with hierarchical interactions.
+  \emph{Biometrics} \bold{55}, 1051--1058.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Hybrid.Rd b/man/Hybrid.Rd
new file mode 100644
index 0000000..8c547b7
--- /dev/null
+++ b/man/Hybrid.Rd
@@ -0,0 +1,101 @@
+\name{Hybrid}
+\alias{Hybrid}
+\title{
+  Hybrid Interaction Point Process Model
+}
+\description{
+  Creates an instance of a hybrid point process model
+  which can then be fitted to point pattern data.
+}
+\usage{
+Hybrid(...)
+}
+\arguments{
+  \item{\dots}{
+    Two or more interactions (objects of class \code{"interact"})
+    or objects which can be converted to interactions.
+    See Details.
+  }
+}
+\details{
+  A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013)
+  is a point process model created by combining two or more
+  point process models, or an interpoint interaction created by combining
+  two or more interpoint interactions.
+  
+  The \emph{hybrid} of two point processes, with probability densities
+  \eqn{f(x)} and \eqn{g(x)} respectively,
+  is the point process with probability density 
+  \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)}
+  where \eqn{c} is a normalising constant.
+
+  Equivalently, the hybrid of two point processes with conditional intensities
+  \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)}
+  is the point process with conditional intensity
+  \deqn{
+    \phi(u,x) = \lambda(u,x) \, \kappa(u,x).
+  }{
+    phi(u,x) = lambda(u,x) * kappa(u,x).
+  }
+  The hybrid of \eqn{m > 3} point processes is defined in a similar way.
+  
+  The function \code{\link{ppm}}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of a hybrid interaction is
+  yielded by the function \code{Hybrid()}. 
+  
+  The arguments \code{\dots} will be interpreted as interpoint interactions
+  (objects of class \code{"interact"}) and the result will be the hybrid
+  of these interactions. Each argument must either be an
+  interpoint interaction (object of class \code{"interact"}),
+  or a point process model (object of class \code{"ppm"}) from which the
+  interpoint interaction will be extracted.
+
+  The arguments \code{\dots} may also be given in the form
+  \code{name=value}. This is purely cosmetic: it can be used to attach
+  simple mnemonic names to the component interactions, and makes the
+  printed output from \code{\link{print.ppm}} neater.
+}
+\value{
+  An object of class \code{"interact"}
+  describing an interpoint interaction structure.
+}
+\references{
+  Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013)
+  Hybrids of Gibbs point process models and their implementation.
+  \emph{Journal of Statistical Software} \bold{55}:11, 1--43.
+  \url{http://www.jstatsoft.org/v55/i11/}
+}
+\seealso{
+  \code{\link{ppm}}
+}
+\examples{
+  Hybrid(Strauss(0.1), Geyer(0.2, 3))
+
+  Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3))
+
+  fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2)))
+  fit
+
+  ctr <- rmhcontrol(nrep=5e4, expand=1)
+  plot(simulate(fit, control=ctr))
+
+  # hybrid components can be models (including hybrid models)
+  Hybrid(fit, S=Softcore(0.5))
+
+  # plot.fii only works if every component is a pairwise interaction
+  data(swedishpines)
+  fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5)))
+  plot(fitin(fit2))
+  plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Iest.Rd b/man/Iest.Rd
new file mode 100644
index 0000000..c9c5c84
--- /dev/null
+++ b/man/Iest.Rd
@@ -0,0 +1,145 @@
+\name{Iest}
+\alias{Iest}
+\title{Estimate the I-function}
+\description{
+  Estimates the summary function \eqn{I(r)} for a multitype point pattern.
+}
+\usage{
+  Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{I(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{Ignored.}
+  \item{eps}{
+    the resolution of the discrete approximation to Euclidean distance
+    (see below). There is a sensible default.
+  }
+  \item{r}{Optional. Numeric vector of values for the argument \eqn{r}
+  	at which \eqn{I(r)} 
+    should be evaluated. There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \code{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional. Vector of character strings specifying the edge correction(s)
+    to be used by \code{\link{Jest}}.
+  }
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{I} has been  estimated}
+  \item{rs}{the ``reduced sample'' or ``border correction''
+              estimator of \eqn{I(r)} computed from
+	      the border-corrected estimates of \eqn{J} functions}
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from
+    the Kaplan-Meier estimates of \eqn{J} functions}
+  \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from
+    the Hanisch-style estimates of \eqn{J} functions}
+  \item{un}{the uncorrected estimate of \eqn{I(r)}
+             computed from the uncorrected estimates of \eqn{J}
+  }
+  \item{theo}{the theoretical value of \eqn{I(r)}
+    for a stationary Poisson process: identically equal to \eqn{0}
+  }
+}
+\note{
+  Sizeable amounts of memory may be needed during the calculation.
+}
+\details{
+  The \eqn{I} function 
+  summarises the dependence between types in a multitype point process
+  (Van Lieshout and Baddeley, 1999)
+  It is based on the concept of the \eqn{J} function for an
+  unmarked point process (Van Lieshout and Baddeley, 1996).
+  See \code{\link{Jest}} for information about the \eqn{J} function.
+  
+  The \eqn{I} function is defined as 
+  \deqn{ %
+    I(r) = \sum_{i=1}^m p_i J_{ii}(r) %
+    - J_{\bullet\bullet}(r)}{ %
+    I(r) = (sum p[i] Jii(r)) - J(r)
+  }
+  where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for
+  the entire point process ignoring the marks, while 
+  \eqn{J_{ii}}{Jii} is the \eqn{J} function for the
+  process consisting of points of type \eqn{i} only,
+  and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}.
+
+  The \eqn{I} function is designed to measure dependence between
+  points of different types, even if the points are
+  not Poisson. Let \eqn{X} be a stationary multitype point process,
+  and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}.
+  If the processes \eqn{X_i}{X[i]} are independent of each other,
+  then the \eqn{I}-function is identically equal to \eqn{0}. 
+  Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1}
+  typically indicate negative and positive association, respectively,
+  between types.
+  See Van Lieshout and Baddeley (1999)
+  for further information.
+
+  An estimate of \eqn{I} derived from a multitype spatial point pattern dataset
+  can be used in exploratory data analysis and formal inference
+  about the pattern. The estimate of \eqn{I(r)} is compared against the 
+  constant function \eqn{0}.
+  Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1}
+  may suggest negative and positive association, respectively.
+
+  This algorithm estimates the \eqn{I}-function
+  from the multitype point pattern \code{X}.
+  It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial marked point process in the plane, observed through
+  a bounded window. 
+
+  The argument \code{X} is interpreted as a point pattern object 
+  (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
+  be supplied in any of the formats recognised by
+  \code{\link{as.ppp}()}. It must be a multitype point pattern
+  (it must have a \code{marks} vector which is a \code{factor}).
+
+  The function \code{\link{Jest}} is called to 
+  compute estimates of the \eqn{J} functions in the formula above.
+  In fact three different estimates are computed
+  using different edge corrections. See \code{\link{Jest}} for
+  information.
+}
+\references{
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\seealso{
+  \code{\link{Jest}}
+}
+\examples{
+   data(amacrine)
+   Ic <- Iest(amacrine)
+   plot(Ic, main="Amacrine Cells data")
+   # values are below I= 0, suggesting negative association
+   # between 'on' and 'off' cells.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Jcross.Rd b/man/Jcross.Rd
new file mode 100644
index 0000000..169b957
--- /dev/null
+++ b/man/Jcross.Rd
@@ -0,0 +1,193 @@
+\name{Jcross}
+\alias{Jcross}
+\title{
+  Multitype J Function (i-to-j)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the multitype \eqn{J} function 
+  summarising the interpoint dependence between
+  points of type \eqn{i} and of type \eqn{j}.
+}
+\usage{
+Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype \eqn{J} function
+    \eqn{J_{ij}(r)}{Jij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{eps}{A positive number.
+    The resolution of the discrete approximation to Euclidean
+    distance (see below). There is a sensible default.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{J_{ij}(r)}{Jij(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"Hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing six numeric columns 
+  \item{J}{the recommended
+    estimator of \eqn{J_{ij}(r)}{Jij(r)},
+    currently the Kaplan-Meier estimator.
+  }
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{J_{ij}(r)}{Jij(r)} has been  estimated
+  }
+  \item{km}{the Kaplan-Meier 
+    estimator of \eqn{J_{ij}(r)}{Jij(r)}
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{J_{ij}(r)}{Jij(r)}
+  }
+  \item{han}{the Hanisch-style
+    estimator of \eqn{J_{ij}(r)}{Jij(r)}
+  }
+  \item{un}{the ``uncorrected'' 
+    estimator of \eqn{J_{ij}(r)}{Jij(r)}
+    formed by taking the ratio of uncorrected empirical estimators
+    of \eqn{1 - G_{ij}(r)}{1 - Gij(r)}
+    and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see
+    \code{\link{Gdot}} and \code{\link{Fest}}.
+  }
+  \item{theo}{the theoretical value of  \eqn{J_{ij}(r)}{Jij(r)}
+    for a marked Poisson process, namely 1.
+  }
+  The result also has two attributes \code{"G"} and \code{"F"}
+  which are respectively the outputs of \code{\link{Gcross}}
+  and \code{\link{Fest}} for the point pattern.
+}
+\details{
+  This function \code{Jcross} and its companions
+  \code{\link{Jdot}} and \code{\link{Jmulti}}
+  are generalisations of the function \code{\link{Jest}}
+  to multitype point patterns. 
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+  The argument \code{i} will be interpreted as a
+  level of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as the number 3,
+  \bold{not} the 3rd smallest level).
+  
+  The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function 
+  of a stationary multitype point process \eqn{X}
+  was introduced by Van lieshout and Baddeley (1999). It is defined by
+  \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 -
+      F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))}
+  where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of
+  the distance from a type \eqn{i} point to the nearest point of type \eqn{j},
+  and \eqn{F_{j}(r)}{Fj(r)} is the distribution
+  function of the distance from a fixed point in space to the nearest
+  point of type \eqn{j} in the pattern.
+
+  An estimate of \eqn{J_{ij}(r)}{Jij(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern. 
+  If the subprocess of type \eqn{i} points is independent
+  of the subprocess of points of type \eqn{j},
+  then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}.
+  Hence deviations of the empirical estimate of
+  \eqn{J_{ij}}{Jij} from the value 1
+  may suggest dependence between types.
+
+  This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Jest}},
+  using the Kaplan-Meier and border corrections.
+  The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are always interpreted as
+  levels of the factor \code{X$marks}. They are converted to character
+  strings if they are not already character strings.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Jdot}},
+ \code{\link{Jest}},
+ \code{\link{Jmulti}}
+}
+\examples{
+     # Lansing woods data: 6 types of trees
+    woods <- lansing
+    \testonly{
+       woods <- woods[seq(1,npoints(woods), by=30)]
+    }
+    Jhm <- Jcross(woods, "hickory", "maple")
+    # diagnostic plot for independence between hickories and maples
+    plot(Jhm)
+
+    # synthetic example with two types "a" and "b"
+    pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE))
+    J <- Jcross(pp)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Jdot.Rd b/man/Jdot.Rd
new file mode 100644
index 0000000..4489a1d
--- /dev/null
+++ b/man/Jdot.Rd
@@ -0,0 +1,200 @@
+\name{Jdot}
+\alias{Jdot}
+\title{
+  Multitype J Function (i-to-any)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the multitype \eqn{J} function 
+  summarising the interpoint dependence between
+  the type \eqn{i} points and the points of any type.
+}
+\usage{
+Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype \eqn{J} function
+    \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{eps}{A positive number.
+    The resolution of the discrete approximation to Euclidean
+    distance (see below). There is a sensible default.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"Hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing six numeric columns 
+  \item{J}{the recommended
+    estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)},
+    currently the Kaplan-Meier estimator.
+  }
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been  estimated
+  }
+  \item{km}{the Kaplan-Meier 
+    estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}
+  }
+  \item{han}{the Hanisch-style
+    estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}
+  }
+  \item{un}{the ``uncorrected'' 
+    estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}
+    formed by taking the ratio of uncorrected empirical estimators
+    of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)}
+    and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see
+    \code{\link{Gdot}} and \code{\link{Fest}}.
+  }
+  \item{theo}{the theoretical value of  \eqn{J_{i\bullet}(r)}{Ji.(r)}
+    for a marked Poisson process, namely 1.
+  }
+  The result also has two attributes \code{"G"} and \code{"F"}
+  which are respectively the outputs of \code{\link{Gdot}}
+  and \code{\link{Fest}} for the point pattern.
+}
+\details{
+  This function \code{Jdot} and its companions
+  \code{\link{Jcross}} and \code{\link{Jmulti}}
+  are generalisations of the function \code{\link{Jest}}
+  to multitype point patterns. 
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+  The argument \code{i} will be interpreted as a
+  level of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as the number 3,
+  \bold{not} the 3rd smallest level.)
+  
+  The ``type \eqn{i} to any type'' multitype \eqn{J} function 
+  of a stationary multitype point process \eqn{X}
+  was introduced by Van lieshout and Baddeley (1999). It is defined by
+  \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 -
+      F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))}
+  where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of
+  the distance from a type \eqn{i} point to the nearest other point
+  of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution
+  function of the distance from a fixed point in space to the nearest
+  point of the pattern.
+
+  An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern. If the pattern is 
+  a marked Poisson point process, then
+  \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}.
+  If the subprocess of type \eqn{i} points is independent
+  of the subprocess of points of all types not equal to \eqn{i},
+  then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals
+  \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function
+  (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996))
+  of the points of type \eqn{i}. 
+  Hence deviations from zero of the empirical estimate of
+  \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} 
+  may suggest dependence between types.
+
+  This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Jest}},
+  using the Kaplan-Meier and border corrections.
+  The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as
+  a level of the factor \code{X$marks}. It is converted to a character
+  string if it is not already a character string.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Jcross}},
+ \code{\link{Jest}},
+ \code{\link{Jmulti}}
+}
+\examples{
+     # Lansing woods data: 6 types of trees
+   woods <- lansing
+
+    \testonly{
+        woods <- woods[seq(1,npoints(woods), by=30), ]
+    }
+    Jh. <- Jdot(woods, "hickory")
+    plot(Jh.)
+    # diagnostic plot for independence between hickories and other trees
+    Jhh <- Jest(split(woods)$hickory)
+    plot(Jhh, add=TRUE, legendpos="bottom")
+
+    \dontrun{
+    # synthetic example with two marks "a" and "b"
+    pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE))
+    J <- Jdot(pp, "a")
+    }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Jest.Rd b/man/Jest.Rd
new file mode 100644
index 0000000..23602da
--- /dev/null
+++ b/man/Jest.Rd
@@ -0,0 +1,250 @@
+\name{Jest}
+\alias{Jest}
+\title{Estimate the J-function}
+\description{
+  Estimates the summary function \eqn{J(r)} for a point pattern in a 
+  window of arbitrary shape.
+}
+\usage{
+  Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{J(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{Ignored.}
+  \item{eps}{
+    the resolution of the discrete approximation to Euclidean distance
+    (see below). There is a sensible default.
+  }
+  \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} 
+    should be evaluated. There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \code{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional. Character string specifying the choice of edge
+    correction(s) in \code{\link{Fest}} and \code{\link{Gest}}.
+    See Details.
+  }
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{J} has been  estimated}
+  \item{rs}{the ``reduced sample'' or ``border correction''
+              estimator of \eqn{J(r)} computed from
+	      the border-corrected estimates of \eqn{F} and \eqn{G} }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from
+    the Kaplan-Meier estimates of \eqn{F} and \eqn{G} }
+  \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from
+    the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of
+    \eqn{F}
+  }
+  \item{un}{the uncorrected estimate of \eqn{J(r)}
+             computed from the uncorrected estimates of \eqn{F} and
+	     \eqn{G}
+  }
+  \item{theo}{the theoretical value of \eqn{J(r)}
+    for a stationary Poisson process: identically equal to \eqn{1}
+  }
+  The data frame also has \bold{attributes}
+  \item{F}{
+    the output of \code{\link{Fest}} for this point pattern,
+    containing three estimates of the empty space function \eqn{F(r)}
+    and an estimate of its hazard function
+  }
+  \item{G}{
+    the output of \code{\link{Gest}} for this point pattern,
+    containing three estimates of the nearest neighbour distance distribution
+    function \eqn{G(r)} and an estimate of its hazard function
+  }
+}
+\note{
+  Sizeable amounts of memory may be needed during the calculation.
+}
+\details{
+  The \eqn{J} function (Van Lieshout and Baddeley, 1996)
+  of a stationary point process is defined as 
+  \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ %
+    J(r) = (1-G(r))/(1-F(r))}
+  where \eqn{G(r)} is the nearest neighbour distance distribution
+  function of the point process (see \code{\link{Gest}}) 
+  and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}).
+
+  For a completely random (uniform Poisson) point process,
+  the \eqn{J}-function is identically equal to \eqn{1}. 
+  Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1}
+  typically indicate spatial clustering or spatial regularity, respectively.
+  The \eqn{J}-function is one of the few characteristics that can be
+  computed explicitly for a wide range of point processes. 
+  See Van Lieshout and Baddeley (1996), Baddeley et al (2000),
+  Thonnes and Van Lieshout (1999)  for further information.
+
+  An estimate of \eqn{J} derived from a spatial point pattern dataset
+  can be used in exploratory data analysis and formal inference
+  about the pattern. The estimate of \eqn{J(r)} is compared against the 
+  constant function \eqn{1}.
+  Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1}
+  may suggest spatial clustering or spatial regularity, respectively.
+
+  This algorithm estimates the \eqn{J}-function
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window. 
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape. 
+
+  The argument \code{X} is interpreted as a point pattern object 
+  (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
+  be supplied in any of the formats recognised by
+  \code{\link{as.ppp}()}. 
+
+  The functions \code{\link{Fest}} and \code{\link{Gest}} are called to 
+  compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively.
+  These estimates are then combined by simply taking the ratio
+  \eqn{J(r) = (1-G(r))/(1-F(r))}.
+
+  In fact several different estimates are computed
+  using different edge corrections (Baddeley, 1998).
+
+  The Kaplan-Meier estimate (returned as \code{km}) is the ratio 
+  \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of
+  \eqn{1-F} and \eqn{1-G} computed by
+  \code{\link{Fest}} and \code{\link{Gest}} respectively.
+  This is computed if \code{correction=NULL} or if \code{correction}
+  includes \code{"km"}.
+
+  The Hanisch-style estimate (returned as \code{han}) is the ratio 
+  \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of
+  \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}.
+  This is computed if \code{correction=NULL} or if \code{correction}
+  includes \code{"cs"} or \code{"han"}.
+
+  The reduced-sample or border corrected estimate
+  (returned as \code{rs}) is
+  the same ratio \code{J = (1-G)/(1-F)}
+  of the border corrected estimates. 
+  This is computed if \code{correction=NULL} or if \code{correction}
+  includes \code{"rs"} or \code{"border"}.
+
+  These edge-corrected estimators are slightly biased for \eqn{J}, 
+  since they are ratios
+  of approximately unbiased estimators.
+  The logarithm of the
+  Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}.
+
+  The uncorrected estimate (returned as \code{un}
+  and computed only if \code{correction} includes \code{"none"})
+  is the ratio \code{J = (1-G)/(1-F)}
+  of the uncorrected (``raw'') estimates of the survival functions
+  of \eqn{F} and \eqn{G},
+  which are the empirical distribution functions of the 
+  empty space distances \code{Fest(X,\dots)$raw}
+  and of the nearest neighbour distances 
+  \code{Gest(X,\dots)$raw}. The uncorrected estimates
+  of \eqn{F} and \eqn{G} are severely biased.
+  However the uncorrected estimate of \eqn{J}
+  is approximately unbiased (if the process is close to Poisson);
+  it is insensitive to edge effects, and should be used when
+  edge effects are severe (see Baddeley et al, 2000).
+  
+  The algorithm for \code{\link{Fest}}
+  uses two discrete approximations which are controlled
+  by the parameter \code{eps} and by the spacing of values of \code{r}
+  respectively. See \code{\link{Fest}} for details.
+  First-time users are strongly advised not to specify these arguments.
+
+  Note that the value returned by \code{Jest} includes 
+  the output of \code{\link{Fest}} and \code{\link{Gest}}
+  as attributes (see the last example below).
+  If the user is intending to compute the \code{F,G} and \code{J}
+  functions for the point pattern, it is only necessary to
+  call \code{Jest}.
+}
+\references{
+  Baddeley, A.J. Spatial sampling and censoring.
+     In O.E. Barndorff-Nielsen, W.S. Kendall and
+     M.N.M. van Lieshout (eds) 
+     \emph{Stochastic Geometry: Likelihood and Computation}.
+     Chapman and Hall, 1998.
+     Chapter 2, pages 37--78.
+  
+  Baddeley, A.J. and Gill, R.D. 
+    The empty space hazard of a spatial pattern.
+    Research Report 1994/3, Department of Mathematics,
+    University of Western Australia, May 1994.
+
+  Baddeley, A.J. and Gill, R.D.
+     Kaplan-Meier estimators of interpoint distance
+		distributions for spatial point processes.
+     \emph{Annals of Statistics} \bold{25} (1997) 263--292.
+
+  Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T.
+  Estimating the \emph{J} function without edge correction.
+  \emph{Statistica Neerlandica} \bold{54} (2000) 315--328.
+
+  Borgefors, G.
+     Distance transformations in digital images.
+     \emph{Computer Vision, Graphics and Image Processing}
+     \bold{34} (1986) 344--371.
+
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+  Thonnes, E. and Van Lieshout, M.N.M,
+  A comparative study on the power of Van Lieshout and Baddeley's J-function.
+  \emph{Biometrical Journal} \bold{41} (1999) 721--734.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J.
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50} (1996) 344--361.
+}
+\seealso{
+  \code{\link{Jinhom}},
+  \code{\link{Fest}},
+  \code{\link{Gest}},
+  \code{\link{Kest}},
+  \code{\link{km.rs}},
+  \code{\link{reduced.sample}},
+  \code{\link{kaplan.meier}}
+}
+\examples{
+   data(cells)
+   J <- Jest(cells, 0.01)
+   plot(J, main="cells data")
+   # values are far above J = 1, indicating regular pattern
+
+   data(redwood)
+   J <- Jest(redwood, 0.01, legendpos="center")
+   plot(J, main="redwood data")
+   # values are below J = 1, indicating clustered pattern
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Jinhom.Rd b/man/Jinhom.Rd
new file mode 100644
index 0000000..6c8901b
--- /dev/null
+++ b/man/Jinhom.Rd
@@ -0,0 +1,182 @@
+\name{Jinhom}
+\alias{Jinhom}
+\title{
+  Inhomogeneous J-function
+}
+\description{
+  Estimates the inhomogeneous \eqn{J} function of
+  a non-stationary point pattern.
+}
+\usage{
+  Jinhom(X, lambda = NULL, lmin = NULL, ...,
+        sigma = NULL, varcov = NULL,
+        r = NULL, breaks = NULL, update = TRUE)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern,
+    from which an estimate of the inhomogeneous \eqn{J} function
+    will be computed.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lmin}{
+    Optional. The minimum possible value of the intensity
+    over the spatial domain. A positive numerical value.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{as.mask}} to control
+    the pixel resolution, or passed to \code{\link{density.ppp}}
+    to control the smoothing bandwidth.
+}
+  \item{r}{
+    vector of values for the argument \eqn{r} at which
+    the inhomogeneous \eqn{K} function
+    should be evaluated. Not normally given by the user;
+    there is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{update}{
+    Logical. If \code{lambda} is a fitted model
+    (class \code{"ppm"} or \code{"kppm"})
+    and \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without fitting it to \code{X}.
+  }
+}
+\details{
+  This command computes estimates of the 
+  inhomogeneous \eqn{J}-function (Van Lieshout, 2010)
+  of a point pattern. It is the counterpart, for inhomogeneous
+  spatial point patterns, of the \eqn{J} function
+  for homogeneous point patterns computed by \code{\link{Jest}}.
+
+  The argument \code{X} should be a point pattern
+  (object of class \code{"ppp"}).
+
+  The inhomogeneous \eqn{J} function is computed as
+  \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))}
+  where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F}
+  functions computed using the border correction
+  (equations (7) and (6) respectively in Van Lieshout, 2010).
+  
+  The argument \code{lambda} should supply the
+  (estimated) values of the intensity function \eqn{\lambda}{lambda}
+  of the point process. It may be either
+  \describe{
+    \item{a numeric vector}{
+      containing the values
+      of the intensity function at the points of the pattern \code{X}.
+    }
+    \item{a pixel image}{
+      (object of class \code{"im"})
+      assumed to contain the values of the intensity function
+      at all locations in the window. 
+    }
+    \item{a fitted point process model}{
+      (object of class \code{"ppm"} or \code{"kppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{omitted:}{
+      if \code{lambda} is omitted, then it will be estimated using
+      a `leave-one-out' kernel smoother.
+    }
+  }
+  If \code{lambda} is a numeric vector, then its length should
+  be equal to the number of points in the pattern \code{X}.
+  The value \code{lambda[i]} is assumed to be the 
+  the (estimated) value of the intensity
+  \eqn{\lambda(x_i)}{lambda(x[i])} for
+  the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
+  Each value must be a positive number; \code{NA}'s are not allowed.
+
+  If \code{lambda} is a pixel image, the domain of the image should
+  cover the entire window of the point pattern. If it does not (which
+  may occur near the boundary because of discretisation error),
+  then the missing pixel values 
+  will be obtained by applying a Gaussian blur to \code{lambda} using
+  \code{\link{blur}}, then looking up the values of this blurred image
+  for the missing locations. 
+  (A warning will be issued in this case.)
+
+  If \code{lambda} is a function, then it will be evaluated in the
+  form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
+  of coordinates of the points of \code{X}. It should return a numeric
+  vector with length equal to the number of points in \code{X}.
+
+  If \code{lambda} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother,
+  as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller}
+  and Waagepetersen (2000).  The estimate \code{lambda[i]} for the
+  point \code{X[i]} is computed by removing \code{X[i]} from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+}
+\references{
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+
+  van Lieshout, M.N.M. and Baddeley, A.J. (1996)
+  A nonparametric measure of spatial interaction in point patterns.
+  \emph{Statistica Neerlandica} \bold{50}, 344--361.
+
+  van Lieshout, M.N.M. (2010)
+  A J-function for inhomogeneous point processes.
+  \emph{Statistica Neerlandica} \bold{65}, 183--201.
+}
+\seealso{
+  \code{\link{Ginhom}},
+  \code{\link{Finhom}},
+  \code{\link{Jest}}
+}
+\examples{
+  \dontrun{
+    plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2))
+  }
+  plot(Jinhom(swedishpines, sigma=10))
+}
+\author{
+  Original code by Marie-Colette van Lieshout.
+  C implementation and R adaptation by \adrian
+  
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Jmulti.Rd b/man/Jmulti.Rd
new file mode 100644
index 0000000..20c3bd7
--- /dev/null
+++ b/man/Jmulti.Rd
@@ -0,0 +1,164 @@
+\name{Jmulti}
+\alias{Jmulti}
+\title{
+  Marked J Function
+}
+\description{
+  For a marked point pattern, 
+  estimate the multitype \eqn{J} function
+  summarising dependence between the
+  points in subset \eqn{I}
+  and those in subset \eqn{J}.
+}
+\usage{
+  Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL,
+         correction=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype distance distribution function
+    \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed.
+    It must be a marked point pattern.
+    See under Details.
+  }
+  \item{I}{Subset of points of \code{X} from which distances are
+    measured. See Details.
+  }
+  \item{J}{Subset of points in \code{X} to which distances are measured.
+    See Details.
+  }
+  \item{eps}{A positive number.
+    The pixel resolution of the discrete approximation to Euclidean
+    distance (see \code{\link{Jest}}). There is a sensible default.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{Ignored.}
+  \item{disjoint}{Optional flag indicating whether
+    the subsets \code{I} and \code{J} are disjoint.
+    If missing, this value will be computed by inspecting the
+    vectors \code{I} and \code{J}.
+  }
+  \item{correction}{
+    Optional. Character string specifying the edge correction(s)
+    to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"},
+    \code{"Hanisch"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing six numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been  estimated
+  }
+  \item{rs}{the ``reduced sample'' or ``border correction''
+    estimator of \eqn{J_{IJ}(r)}{J[IJ](r)}
+  }
+  \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)}
+  }
+  \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)}
+  }
+  \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)},
+    formed by taking the ratio of uncorrected empirical estimators
+    of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)}
+    and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see
+    \code{\link{Gdot}} and \code{\link{Fest}}.
+  }
+  \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)}
+    for a marked Poisson process with the same estimated intensity,
+    namely 1.
+  }
+}
+\details{
+  The function \code{Jmulti}
+  generalises \code{\link{Jest}} (for unmarked point
+  patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for
+  multitype point patterns) to arbitrary marked point patterns.
+
+  Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly
+  overlapping, of a marked point process. Define
+  \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{
+    J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))}
+  where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of
+  the distance from a fixed location to the nearest point
+  of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)}
+  is the distribution function of the distance
+  from a typical point of  \eqn{X_I}{X[I]} to the nearest distinct point of
+  \eqn{X_J}{X[J]}. 
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+
+  The arguments \code{I} and \code{J} specify two subsets of the
+  point pattern. They may be any type of subset indices, for example,
+  logical vectors of length equal to \code{npoints(X)},
+  or integer vectors with entries in the range 1 to
+  \code{npoints(X)}, or negative integer vectors.
+
+  Alternatively, \code{I} and \code{J} may be \bold{functions}
+  that will be applied to the point pattern \code{X} to obtain
+  index vectors. If \code{I} is a function, then evaluating
+  \code{I(X)} should yield a valid subset index. This option
+  is useful when generating simulation envelopes using
+  \code{\link{envelope}}.
+
+  It is assumed that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Jest}}.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances. The reduced-sample and
+  Kaplan-Meier estimators are computed from histogram counts. 
+  In the case of the Kaplan-Meier estimator this introduces a discretisation
+  error which is controlled by the fineness of the breakpoints.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. Furthermore, the successive entries of \code{r}
+  must be finely spaced.
+}
+\references{
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\seealso{
+ \code{\link{Jcross}},
+ \code{\link{Jdot}},
+ \code{\link{Jest}}
+}
+\examples{
+    trees <- longleaf
+     # Longleaf Pine data: marks represent diameter
+    \testonly{
+        trees <- trees[seq(1,npoints(trees), by=50)]
+    }
+    Jm <- Jmulti(trees, marks(trees) <= 15, marks(trees) >= 25)
+    plot(Jm)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/K3est.Rd b/man/K3est.Rd
new file mode 100644
index 0000000..c0cc760
--- /dev/null
+++ b/man/K3est.Rd
@@ -0,0 +1,116 @@
+\name{K3est}
+\Rdversion{1.1}
+\alias{K3est}
+\title{
+  K-function of a Three-Dimensional Point Pattern
+}
+\description{
+  Estimates the \eqn{K}-function from a three-dimensional point pattern.
+}
+\usage{
+  K3est(X, \dots,
+        rmax = NULL, nrval = 128,
+        correction = c("translation", "isotropic"),
+        ratio=FALSE)
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{rmax}{
+    Optional. Maximum value of argument \eqn{r} for which
+    \eqn{K_3(r)}{K3(r)} will be estimated. 
+  }
+  \item{nrval}{
+    Optional. Number of values of \eqn{r} for which
+    \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval}
+    is required to avoid discretisation effects.
+  }
+  \item{correction}{
+    Optional. Character vector specifying the edge correction(s)
+    to be applied. See Details.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\details{
+  For a stationary point process \eqn{\Phi}{Phi} in three-dimensional
+  space, the three-dimensional \eqn{K} function
+  is
+  \deqn{
+    K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi)
+  }{
+    K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi)
+  }
+  where \eqn{\lambda}{lambda} is the intensity of the process
+  (the expected number of points per unit volume) and
+  \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of
+  \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a
+  distance \eqn{r} of \eqn{x}. This is the three-dimensional
+  generalisation of Ripley's \eqn{K} function for two-dimensional
+  point processes (Ripley, 1977).
+  
+  The three-dimensional point pattern \code{X} is assumed to be a
+  partial realisation of a stationary point process \eqn{\Phi}{Phi}.
+  The distance between each pair of distinct points is computed.
+  The empirical cumulative distribution
+  function of these values, with appropriate edge corrections, is
+  renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}.
+
+  The available edge corrections are:
+  \describe{
+    \item{\code{"translation"}:}{
+      the Ohser translation correction estimator
+      (Ohser, 1983; Baddeley et al, 1993)
+    }
+    \item{\code{"isotropic"}:}{
+      the three-dimensional counterpart of
+      Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993).
+    }
+  }
+  Alternatively \code{correction="all"} selects all options.
+}
+\value{
+  A function value table (object of class \code{"fv"}) that can be
+  plotted, printed or coerced to a data frame containing the function values.
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42}, 641--668.
+
+  Ohser, J. (1983)
+  On estimators for the reduced second moment measure of
+  point processes. \emph{Mathematische Operationsforschung und
+  Statistik, series Statistics}, \bold{14}, 63 -- 71.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+}
+\author{
+  \adrian
+  
+  
+  and Rana Moyeed.
+}
+\seealso{
+  \code{\link{F3est}},
+  \code{\link{G3est}},
+  \code{\link{pcf3est}}
+}
+\examples{
+  X <- rpoispp3(42)
+  Z <- K3est(X)
+  if(interactive()) plot(Z)
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Kcom.Rd b/man/Kcom.Rd
new file mode 100644
index 0000000..493f6f9
--- /dev/null
+++ b/man/Kcom.Rd
@@ -0,0 +1,245 @@
+\name{Kcom}
+\Rdversion{1.1}
+\alias{Kcom}
+\title{
+  Model Compensator of K Function
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the \emph{compensator} 
+  of the \eqn{K} function based on the fitted model 
+  (as well as the usual nonparametric estimates
+  of \eqn{K} based on the data alone).
+  Comparison between the nonparametric and model-compensated \eqn{K}
+  functions serves as a diagnostic for the model.
+}
+\usage{
+Kcom(object, r = NULL, breaks = NULL, ...,
+     correction = c("border", "isotropic", "translate"),
+     conditional = !is.poisson(object),
+     restrict = FALSE,
+     model = NULL,
+     trend = ~1, interaction = Poisson(), rbord = reach(interaction),
+     compute.var = TRUE,
+     truecoef = NULL, hi.res = NULL)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"})
+    or a point pattern (object of class \code{"ppp"})
+    or quadrature scheme (object of class \code{"quad"}).
+}
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    function \eqn{K(r)} should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for advanced use only.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{correction}{
+    Optional vector of character strings specifying the edge
+    correction(s) to be used. See \code{\link{Kest}} for options.
+  }
+  \item{conditional}{
+    Optional. Logical value indicating whether to 
+    compute the estimates for the conditional case. See Details.
+  }
+  \item{restrict}{
+    Logical value indicating
+    whether to compute the restriction estimator (\code{restrict=TRUE}) or
+    the reweighting estimator (\code{restrict=FALSE}, the default).
+    Applies only if \code{conditional=TRUE}.
+    See Details.
+  }
+  \item{model}{
+    Optional. A fitted point process model (object of
+    class \code{"ppm"}) to be re-fitted to the data
+    using \code{\link{update.ppm}}, if \code{object} is a point pattern.
+    Overrides the arguments \code{trend,interaction,rbord}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern.
+    See \code{\link{ppm}} for details.
+  }
+  \item{compute.var}{
+    Logical value indicating whether to compute the
+    Poincare variance bound for the residual \eqn{K} function
+    (calculation is only implemented for the isotropic correction).
+  }
+  \item{truecoef}{
+    Optional. Numeric vector. If present, this will be treated as 
+    if it were the true coefficient vector of the point process model,
+    in calculating the diagnostic. Incompatible with \code{hi.res}.
+  }
+  \item{hi.res}{
+    Optional. List of parameters passed to \code{\link{quadscheme}}.
+    If this argument is present, the model will be
+    re-fitted at high resolution as specified by these parameters.
+    The coefficients
+    of the resulting fitted model will be taken as the true coefficients.
+    Then the diagnostic will be computed for the default
+    quadrature scheme, but using the high resolution coefficients.
+  }
+}
+\details{
+  This command provides a diagnostic for the goodness-of-fit of
+  a point process model fitted to a point pattern dataset.
+  It computes an estimate of the \eqn{K} function of the
+  dataset, together with a \emph{model compensator} of the
+  \eqn{K} function, which should be approximately equal if the model is a good
+  fit to the data.
+
+  The first argument, \code{object}, is usually a fitted point process model
+  (object of class \code{"ppm"}), obtained from the
+  model-fitting function \code{\link{ppm}}.
+
+  For convenience, \code{object} can also be a point pattern
+  (object of class \code{"ppp"}). In that case, a point process
+  model will be fitted to it, by calling \code{\link{ppm}} using the arguments
+  \code{trend} (for the first order trend),
+  \code{interaction} (for the interpoint interaction)
+  and \code{rbord} (for the erosion distance in the border correction
+  for the pseudolikelihood). See \code{\link{ppm}} for details
+  of these arguments.
+
+  The algorithm first extracts the original point pattern dataset
+  (to which the model was fitted) and computes the 
+  standard nonparametric estimates of the \eqn{K} function.
+  It then also computes the \emph{model compensator} of the
+  \eqn{K} function. The different function estimates are returned
+  as columns in a data frame (of class \code{"fv"}).
+
+  The argument \code{correction} determines the edge correction(s)
+  to be applied. See \code{\link{Kest}} for explanation of the principle
+  of edge corrections. The following table gives the options
+  for the \code{correction} argument, and the corresponding
+  column names in the result:
+
+  \tabular{llll}{
+    \code{correction} \tab
+    \bold{description of correction} \tab
+    \bold{nonparametric} \tab \bold{compensator} \cr
+    \code{"isotropic"} \tab Ripley isotropic correction
+    \tab \code{iso} \tab \code{icom} \cr
+    \code{"translate"} \tab Ohser-Stoyan translation correction
+    \tab \code{trans} \tab \code{tcom} \cr
+    \code{"border"} \tab border correction
+    \tab \code{border} \tab \code{bcom}
+  }
+
+  The nonparametric estimates can all be expressed in the form
+  \deqn{
+    \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \}
+  }{
+    K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r )
+  }
+  where \eqn{x_i}{x[i]} is the \eqn{i}-th data point,
+  \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and
+  \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is
+  a term that serves to correct edge effects and to re-normalise the
+  sum. The corresponding model compensator is
+  \deqn{
+    {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\}
+  }{
+    C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r )
+  }
+  where the integral is over all locations \eqn{u} in
+  the observation window, 
+  \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity
+  of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the
+  data point pattern \eqn{x} augmented by adding the extra point \eqn{u}.
+  
+  If the fitted model is a Poisson point process, then the formulae above
+  are exactly what is computed. If the fitted model is not Poisson, the 
+  formulae above are modified slightly to handle edge effects.
+
+  The modification is determined by the arguments
+  \code{conditional} and \code{restrict}.
+  The value of \code{conditional} defaults to \code{FALSE} for Poisson models
+  and \code{TRUE} for non-Poisson models.
+  If \code{conditional=FALSE} then the formulae above are not modified.
+  If \code{conditional=TRUE}, then the algorithm calculates
+  the \emph{restriction estimator} if \code{restrict=TRUE},
+  and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}.
+  See Appendix D of Baddeley, Rubak and \ifelse{latex}{\out{M\o ller}}{Moller} (2011).
+  Thus, by default, the reweighting estimator is computed
+  for non-Poisson models.
+  
+  The nonparametric estimates of \eqn{K(r)} are approximately unbiased
+  estimates of the \eqn{K}-function, assuming the point process is
+  stationary. The model compensators are unbiased estimates
+  \emph{of the mean values of the corresponding nonparametric estimates},
+  assuming the model is true. Thus, if the model is a good fit, the mean value
+  of the difference between the nonparametric estimates and model compensators
+  is approximately zero.
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Related functions: 
+  \code{\link{Kres}},
+  \code{\link{Kest}}.
+
+  Alternative functions:
+  \code{\link{Gcom}},
+  \code{\link{psstG}}, \code{\link{psstA}},  \code{\link{psst}}.
+
+  Point process models: \code{\link{ppm}}.
+}
+\examples{
+    fit0 <- ppm(cells, ~1) # uniform Poisson
+    \testonly{fit0 <- ppm(cells, ~1, nd=16)}
+
+    if(interactive()) {
+      plot(Kcom(fit0))
+# compare the isotropic-correction estimates
+      plot(Kcom(fit0), cbind(iso, icom) ~ r)
+# uniform Poisson is clearly not correct
+    }
+
+    fit1 <- ppm(cells, ~1, Strauss(0.08))
+    \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)}
+    K1 <- Kcom(fit1)
+    K1
+    if(interactive()) {
+      plot(K1)
+      plot(K1, cbind(iso, icom) ~ r)
+      plot(K1, cbind(trans, tcom) ~ r)
+# how to plot the difference between nonparametric estimates and compensators
+      plot(K1, iso - icom ~ r)
+# fit looks approximately OK; try adjusting interaction distance
+    }
+    fit2 <- ppm(cells, ~1, Strauss(0.12))
+    \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)}
+    K2 <- Kcom(fit2)
+    if(interactive()) {
+      plot(K2)
+      plot(K2, cbind(iso, icom) ~ r)
+      plot(K2, iso - icom ~ r)
+    }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Kcross.Rd b/man/Kcross.Rd
new file mode 100644
index 0000000..a248f7e
--- /dev/null
+++ b/man/Kcross.Rd
@@ -0,0 +1,217 @@
+\name{Kcross}
+\alias{Kcross}
+\title{
+  Multitype K Function (Cross-type)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the multitype \eqn{K} function
+  which counts the expected number of points of type \eqn{j}
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+Kcross(X, i, j, r=NULL, breaks=NULL, correction,
+       \dots, ratio=FALSE, from, to )
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross type \eqn{K} function
+    \eqn{K_{ij}(r)}{Kij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{K_{ij}(r)}{Kij(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{Ignored.}
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{from,to}{
+    An alternative way to specify \code{i} and \code{j} respectively.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{K_{ij}(r)}{Kij(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{K_{ij}(r)}{Kij(r)}
+    for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{K_{ij}(r)}{Kij(r)}
+  obtained by the edge corrections named.
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{K(r)}. 
+}
+\details{
+  This function \code{Kcross} and its companions
+  \code{\link{Kdot}} and \code{\link{Kmulti}}
+  are generalisations of the function \code{\link{Kest}}
+  to multitype point patterns. 
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+
+  The arguments \code{i} and \code{j} will be interpreted as
+  levels of the factor \code{X$marks}. 
+  If \code{i} and \code{j} are missing, they default to the first
+  and second level of the marks factor, respectively.
+  
+  The ``cross-type'' (type \eqn{i} to type \eqn{j})
+  \eqn{K} function 
+  of a stationary multitype point process \eqn{X} is defined so that
+  \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of
+  additional random points of type \eqn{j}
+  within a distance \eqn{r} of a
+  typical point of type \eqn{i} in the process \eqn{X}.
+  Here \eqn{\lambda_j}{lambda[j]}
+  is the intensity of the type \eqn{j} points,
+  i.e. the expected number of points of type \eqn{j} per unit area.
+  The function \eqn{K_{ij}}{Kij} is determined by the 
+  second order moment properties of \eqn{X}.
+
+  An estimate of \eqn{K_{ij}(r)}{Kij(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern.
+  If the process of type \eqn{i} points
+  were independent of the process of type \eqn{j} points,
+  then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}.
+  Deviations between the empirical \eqn{K_{ij}}{Kij} curve
+  and the theoretical curve \eqn{\pi r^2}{pi * r^2} 
+  may suggest dependence between the points of types \eqn{i} and \eqn{j}.
+
+  This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kest}},
+  using the border correction.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+
+  The pair correlation function can also be applied to the
+  result of \code{Kcross}; see \code{\link{pcf}}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are always interpreted as
+  levels of the factor \code{X$marks}. They are converted to character
+  strings if they are not already character strings.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Kdot}},
+ \code{\link{Kest}},
+ \code{\link{Kmulti}},
+ \code{\link{pcf}}
+}
+\examples{
+    # amacrine cells data
+    K01 <- Kcross(amacrine, "off", "on") 
+    plot(K01)
+
+    \testonly{
+    K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) 
+    }
+    \dontrun{
+    K10 <- Kcross(amacrine, "on", "off")
+
+    # synthetic example: point pattern with marks 0 and 1
+    pp <- runifpoispp(50)
+    pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE))
+    K <- Kcross(pp, "0", "1")
+    K <- Kcross(pp, 0, 1) # equivalent
+    }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Kcross.inhom.Rd b/man/Kcross.inhom.Rd
new file mode 100644
index 0000000..8dc4bbd
--- /dev/null
+++ b/man/Kcross.inhom.Rd
@@ -0,0 +1,321 @@
+\name{Kcross.inhom}
+\alias{Kcross.inhom}
+\title{
+  Inhomogeneous Cross K Function
+}
+\description{
+  For a multitype point pattern, 
+  estimate the inhomogeneous version of the cross \eqn{K} function,
+  which counts the expected number of points of type \eqn{j}
+  within a given distance of a point of type \eqn{i},
+  adjusted for spatially varying intensity.
+}
+\usage{
+Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots,  r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL,
+         lambdaIJ=NULL,
+         lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous cross type \eqn{K} function
+    \eqn{K_{ij}(r)}{Kij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Optional.
+    Values of the the estimated intensity of the sub-process of
+    points of type \code{i}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the type \code{i} points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lambdaJ}{
+    Optional.
+    Values of the the estimated intensity of the sub-process of
+    points of type \code{j}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the type \code{j} points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location. 
+  }
+  \item{r}{
+      Optional. Numeric vector giving the values of the argument \eqn{r}
+      at which the cross K function
+      \eqn{K_{ij}(r)}{Kij(r)} should be evaluated.
+      There is a sensible default.
+      First-time users are strongly advised not to specify this argument.
+      See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for advanced use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{sigma}{
+    Standard deviation of isotropic Gaussian smoothing kernel,
+    used in computing leave-one-out kernel estimates of
+    \code{lambdaI}, \code{lambdaJ} if they are omitted.
+  }
+  \item{varcov}{
+    Variance-covariance matrix of anisotropic Gaussian kernel,
+    used in computing leave-one-out kernel estimates of
+    \code{lambdaI}, \code{lambdaJ} if they are omitted.
+    Incompatible with \code{sigma}.
+  }
+  \item{lambdaIJ}{
+    Optional. A matrix containing estimates of the
+    product of the intensities \code{lambdaI} and \code{lambdaJ}
+    for each pair of points of types \code{i} and \code{j} respectively.
+  }
+  \item{lambdaX}{
+    Optional. Values of the intensity for all points of \code{X}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+    If present, this argument overrides both \code{lambdaI} and
+    \code{lambdaJ}.
+  }
+  \item{update}{
+    Logical value indicating what to do when
+    \code{lambdaI}, \code{lambdaJ} or \code{lambdaX}
+    is a fitted point process model
+    (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{density.ppp}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{K_{ij}(r)}{Kij(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{K_{ij}(r)}{Kij(r)}
+    for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{K_{ij}(r)}{Kij(r)}
+  obtained by the edge corrections named.
+}
+\details{
+  This is a generalisation of the function \code{\link{Kcross}}
+  to include an adjustment for spatially inhomogeneous intensity,
+  in a manner similar to the function \code{\link{Kinhom}}.
+
+  The inhomogeneous cross-type \eqn{K} function is described by
+  \ifelse{latex}{\out{M\o ller}}{Moller}
+  and Waagepetersen (2003, pages 48-49 and 51-53).
+  
+  Briefly, given a multitype point process, suppose the sub-process
+  of points of type \eqn{j} has intensity function
+  \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}.
+  Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)}
+  at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total
+  mass per unit area is 1. The
+  inhomogeneous ``cross-type'' \eqn{K} function 
+  \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected
+  total mass within a radius \eqn{r} of a point of the process
+  of type \eqn{i}. 
+  
+  If the process of type \eqn{i} points
+  were independent of the process of type \eqn{j} points,
+  then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)}
+  would equal \eqn{\pi r^2}{pi * r^2}.
+  Deviations between the empirical \eqn{K_{ij}}{Kij} curve
+  and the theoretical curve \eqn{\pi r^2}{pi * r^2} 
+  suggest dependence between the points of types \eqn{i} and \eqn{j}.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+
+  The arguments \code{i} and \code{j} will be interpreted as
+  levels of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as the number 3,
+  \bold{not} the 3rd smallest level).
+  If \code{i} and \code{j} are missing, they default to the first
+  and second level of the marks factor, respectively.
+
+  The argument \code{lambdaI} supplies the values
+  of the intensity of the sub-process of points of type \code{i}.
+  It may be either
+  \describe{
+    \item{a pixel image}{(object of class \code{"im"}) which
+      gives the values of the type \code{i} intensity
+      at all locations in the window containing \code{X};
+    }
+    \item{a numeric vector}{containing the values of the
+      type \code{i} intensity evaluated only
+      at the data points of type \code{i}. The length of this vector
+      must equal the number of type \code{i} points in \code{X}.
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+     \item{a fitted point process model}{
+      (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{omitted:}{
+      if \code{lambdaI} is omitted then it will be estimated
+      using a leave-one-out kernel smoother. 
+    }
+  }
+  If \code{lambdaI} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother,
+  as described in Baddeley, \Moller 
+  and Waagepetersen (2000).  The estimate of \code{lambdaI} for a given
+  point is computed by removing the point from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point in question. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+
+  Similarly \code{lambdaJ} should contain
+  estimated values of the intensity of the sub-process of points of
+  type \code{j}. It may be either a pixel image, a function,
+  a numeric vector, or omitted.
+  
+  Alternatively if the argument \code{lambdaX} is given, then it specifies
+  the intensity values for all points of \code{X}, and the
+  arguments \code{lambdaI}, \code{lambdaJ} will be ignored.
+  
+  The optional argument \code{lambdaIJ} is for advanced use only.
+  It is a matrix containing estimated
+  values of the products of these two intensities for each pair of
+  data points of types \code{i} and \code{j} respectively.
+  
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must exceed the radius of the
+  largest disc contained in the window.
+
+  The argument \code{correction} chooses the edge correction
+  as explained e.g. in \code{\link{Kest}}.
+
+  The pair correlation function can also be applied to the
+  result of \code{Kcross.inhom}; see \code{\link{pcf}}.
+}
+\references{
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R.
+  Statistical Inference and Simulation for Spatial Point Processes
+  Chapman and Hall/CRC
+  Boca Raton, 2003.
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are always interpreted as
+  levels of the factor \code{X$marks}. They are converted to character
+  strings if they are not already character strings.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Kcross}},
+ \code{\link{Kinhom}},
+ \code{\link{Kdot.inhom}},
+ \code{\link{Kmulti.inhom}},
+ \code{\link{pcf}}
+}
+\examples{
+    # Lansing Woods data
+    woods <- lansing
+    \testonly{woods <- woods[seq(1,npoints(woods), by=10)]}
+    ma <- split(woods)$maple
+    wh <- split(woods)$whiteoak
+
+    # method (1): estimate intensities by nonparametric smoothing
+    lambdaM <- density.ppp(ma, sigma=0.15, at="points")
+    lambdaW <- density.ppp(wh, sigma=0.15, at="points")
+    K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM)
+
+    # method (2): leave-one-out
+    K <- Kcross.inhom(woods, "whiteoak", "maple", sigma=0.15)
+
+    # method (3): fit parametric intensity model
+    fit <- ppm(woods ~marks * polynom(x,y,2))
+    # alternative (a): use fitted model as 'lambda' argument
+    K <- Kcross.inhom(woods, "whiteoak", "maple",
+                      lambdaI=fit, lambdaJ=fit, update=FALSE)
+    K <- Kcross.inhom(woods, "whiteoak", "maple",
+                      lambdaX=fit, update=FALSE)
+    # alternative (b): evaluate fitted intensities at data points
+    # (these are the intensities of the sub-processes of each type)
+    inten <- fitted(fit, dataonly=TRUE)
+    # split according to types of points
+    lambda <- split(inten, marks(woods))
+    K <- Kcross.inhom(woods, "whiteoak", "maple",
+              lambda$whiteoak, lambda$maple)
+    
+    # synthetic example: type A points have intensity 50,
+    #                    type B points have intensity 100 * x
+    lamB <- as.im(function(x,y){50 + 100 * x}, owin())
+    X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB))
+    K <- Kcross.inhom(X, "A", "B",
+        lambdaI=as.im(50, Window(X)), lambdaJ=lamB)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Kdot.Rd b/man/Kdot.Rd
new file mode 100644
index 0000000..4884666
--- /dev/null
+++ b/man/Kdot.Rd
@@ -0,0 +1,211 @@
+\name{Kdot}
+\alias{Kdot}
+\title{
+  Multitype K Function (i-to-any)
+}
+\description{
+  For a multitype point pattern, 
+  estimate the multitype \eqn{K} function
+  which counts the expected number of other points of the process
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype \eqn{K} function
+    \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the distribution function
+    \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{Ignored.}
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{from}{An alternative way to specify \code{i}.}
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{K_{i\bullet}(r)}{Ki.(r)}
+    for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)}
+  obtained by the edge corrections named.
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{K(r)}. 
+}
+\details{
+  This function \code{Kdot} and its companions
+  \code{\link{Kcross}} and \code{\link{Kmulti}}
+  are generalisations of the function \code{\link{Kest}}
+  to multitype point patterns. 
+
+  A multitype point pattern is a spatial pattern of
+  points classified into a finite number of possible
+  ``colours'' or ``types''. In the \pkg{spatstat} package,
+  a multitype pattern is represented as a single 
+  point pattern object in which the points carry marks,
+  and the mark value attached to each point
+  determines the type of that point.
+  
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+
+  The argument \code{i} will be interpreted as a
+  level of the factor \code{X$marks}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor, \code{i = levels(X$marks)[1]}.
+  
+  The ``type \eqn{i} to any type'' multitype \eqn{K} function 
+  of a stationary multitype point process \eqn{X} is defined so that
+  \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)}
+  equals the expected number of
+  additional random points within a distance \eqn{r} of a
+  typical point of type \eqn{i} in the process \eqn{X}.
+  Here \eqn{\lambda}{lambda}
+  is the intensity of the process,
+  i.e. the expected number of points of \eqn{X} per unit area.
+  The function \eqn{K_{i\bullet}}{Ki.} is determined by the 
+  second order moment properties of \eqn{X}.
+
+  An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)}
+  is a useful summary statistic in exploratory data analysis
+  of a multitype point pattern.
+  If the subprocess of type \eqn{i} points were independent
+  of the subprocess of points of all types not equal to \eqn{i},
+  then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}.
+  Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve
+  and the theoretical curve \eqn{\pi r^2}{pi * r^2} 
+  may suggest dependence between types.
+
+  This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} 
+  from the point pattern \code{X}. It assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kest}},
+  using the border correction.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must exceed the radius of the
+  largest disc contained in the window.
+
+  The pair correlation function can also be applied to the
+  result of \code{Kdot}; see \code{\link{pcf}}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as
+  a level of the factor \code{X$marks}. It is converted to a character
+  string if it is not already a character string.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+
+  The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately 
+  unbiased, but need not be a valid distribution function; it may 
+  not be a nondecreasing function of \eqn{r}. Its range is always 
+  within \eqn{[0,1]}.
+}
+\seealso{
+ \code{\link{Kdot}},
+ \code{\link{Kest}},
+ \code{\link{Kmulti}},
+ \code{\link{pcf}}
+}
+\examples{
+     # Lansing woods data: 6 types of trees
+    woods <- lansing
+
+    \testonly{woods <- woods[seq(1, npoints(woods), by=80)]}
+
+    Kh. <- Kdot(woods, "hickory") 
+    # diagnostic plot for independence between hickories and other trees
+    plot(Kh.)
+
+    \dontrun{
+    # synthetic example with two marks "a" and "b"
+    pp <- runifpoispp(50)
+    pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE))
+    K <- Kdot(pp, "a")
+    }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Kdot.inhom.Rd b/man/Kdot.inhom.Rd
new file mode 100644
index 0000000..5c4946e
--- /dev/null
+++ b/man/Kdot.inhom.Rd
@@ -0,0 +1,301 @@
+\name{Kdot.inhom}
+\alias{Kdot.inhom}
+\title{
+  Inhomogeneous Multitype K Dot Function
+}
+\description{
+  For a multitype point pattern, 
+  estimate the inhomogeneous version of the dot \eqn{K} function,
+  which counts the expected number of points of any type
+  within a given distance of a point of type \eqn{i},
+  adjusted for spatially varying intensity.
+}
+\usage{
+Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL,
+         correction = c("border", "isotropic", "Ripley", "translate"),
+         sigma=NULL, varcov=NULL, lambdaIdot=NULL,
+         lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous cross type \eqn{K} function
+    \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Optional.
+    Values of the estimated intensity of the sub-process of
+    points of type \code{i}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the type \code{i} points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lambdadot}{
+    Optional.
+    Values of the estimated intensity of the entire point process,
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values at each of the 
+    points in \code{X}, a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{r}{
+      Optional. Numeric vector giving the values of the argument \eqn{r}
+      at which the cross K function
+      \eqn{K_{ij}(r)}{Kij(r)} should be evaluated.
+      There is a sensible default.
+      First-time users are strongly advised not to specify this argument.
+      See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{sigma}{
+    Standard deviation of isotropic Gaussian smoothing kernel,
+    used in computing leave-one-out kernel estimates of
+    \code{lambdaI}, \code{lambdadot} if they are omitted.
+  }
+  \item{varcov}{
+    Variance-covariance matrix of anisotropic Gaussian kernel,
+    used in computing leave-one-out kernel estimates of
+    \code{lambdaI}, \code{lambdadot} if they are omitted.
+    Incompatible with \code{sigma}.
+  }
+  \item{lambdaIdot}{
+    Optional. A matrix containing estimates of the
+    product of the intensities \code{lambdaI} and \code{lambdadot}
+    for each pair of points, the first point of type \code{i} and
+    the second of any type.
+  }
+  \item{lambdaX}{
+    Optional. Values of the intensity for all points of \code{X}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+    If present, this argument overrides both \code{lambdaI} and
+    \code{lambdadot}.
+  }
+  \item{update}{
+    Logical value indicating what to do when
+    \code{lambdaI}, \code{lambdadot} or \code{lambdaX}
+    is a fitted point process model
+    (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{density.ppp}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{K_{i\bullet}(r)}{Ki.(r)}
+    for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)}
+  obtained by the edge corrections named.
+}
+\details{
+  This is a generalisation of the function \code{\link{Kdot}}
+  to include an adjustment for spatially inhomogeneous intensity,
+  in a manner similar to the function \code{\link{Kinhom}}.
+
+  Briefly, given a multitype point process, consider the points without
+  their types, and suppose this unmarked point process 
+  has intensity function
+  \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}.
+  Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)}
+  at each point \eqn{\zeta}{z} of the process. Then the expected total
+  mass per unit area is 1. The
+  inhomogeneous ``dot-type'' \eqn{K} function 
+  \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected
+  total mass within a radius \eqn{r} of a point of the process
+  of type \eqn{i}, discounting this point itself.
+  
+  If the process of type \eqn{i} points
+  were independent of the points of other types,
+  then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)}
+  would equal \eqn{\pi r^2}{pi * r^2}.
+  Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve
+  and the theoretical curve \eqn{\pi r^2}{pi * r^2} 
+  suggest dependence between the points of types \eqn{i} and \eqn{j} for
+  \eqn{j\neq i}{j != i}.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern, and the mark vector
+  \code{X$marks} must be a factor.
+
+  The argument \code{i} will be interpreted as a
+  level of the factor \code{X$marks}. (Warning: this means that
+  an integer value \code{i=3} will be interpreted as the number 3,
+  \bold{not} the 3rd smallest level).
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor, \code{i = levels(X$marks)[1]}.
+  
+  The argument \code{lambdaI} supplies the values
+  of the intensity of the sub-process of points of type \code{i}.
+  It may be either
+  \describe{
+    \item{a pixel image}{(object of class \code{"im"}) which
+      gives the values of the type \code{i} intensity
+      at all locations in the window containing \code{X};
+    }
+    \item{a numeric vector}{containing the values of the
+      type \code{i} intensity evaluated only
+      at the data points of type \code{i}. The length of this vector
+      must equal the number of type \code{i} points in \code{X}.
+    }
+    \item{a function}{
+      of the form \code{function(x,y)}
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+     \item{a fitted point process model}{
+      (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+   \item{omitted:}{
+      if \code{lambdaI} is omitted then it will be estimated
+      using a leave-one-out kernel smoother. 
+    }
+  }
+  If \code{lambdaI} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother, as described in Baddeley,
+  \Moller 
+  and Waagepetersen (2000).  The estimate of \code{lambdaI} for a given
+  point is computed by removing the point from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point in question. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+
+  Similarly the argument \code{lambdadot} should contain
+  estimated values of the intensity of the entire point process.
+  It may be either a pixel image, a numeric vector of length equal
+  to the number of points in \code{X}, a function, or omitted.
+
+  Alternatively if the argument \code{lambdaX} is given, then it specifies
+  the intensity values for all points of \code{X}, and the
+  arguments \code{lambdaI}, \code{lambdadot} will be ignored.
+  (The two arguments \code{lambdaI}, \code{lambdadot} allow the user
+  to specify two different methods for calculating the intensities of
+  the two kinds of points, while \code{lambdaX} ensures that the same
+  method is used for both kinds of points.)
+  
+  For advanced use only, the optional argument \code{lambdaIdot}
+  is a matrix containing estimated
+  values of the products of these two intensities for each pair of
+  points, the first point of type \code{i} and the second of any type.
+  
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must exceed the radius of the
+  largest disc contained in the window.
+
+  The argument \code{correction} chooses the edge correction
+  as explained e.g. in \code{\link{Kest}}.
+
+  The pair correlation function can also be applied to the
+  result of \code{Kcross.inhom}; see \code{\link{pcf}}.
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R.
+  Statistical Inference and Simulation for Spatial Point Processes
+  Chapman and Hall/CRC
+  Boca Raton, 2003.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as
+  a level of the factor \code{X$marks}. It is converted to a character
+  string if it is not already a character string.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Kdot}},
+ \code{\link{Kinhom}},
+ \code{\link{Kcross.inhom}},
+ \code{\link{Kmulti.inhom}},
+ \code{\link{pcf}}
+}
+\examples{
+    # Lansing Woods data
+    woods <- lansing
+    woods <- woods[seq(1,npoints(woods), by=10)]
+    ma <- split(woods)$maple
+    lg <- unmark(woods)
+
+    # Estimate intensities by nonparametric smoothing
+    lambdaM <- density.ppp(ma, sigma=0.15, at="points")
+    lambdadot <- density.ppp(lg, sigma=0.15, at="points")
+    K <- Kdot.inhom(woods, "maple", lambdaI=lambdaM,
+                                      lambdadot=lambdadot)
+
+    # Equivalent
+    K <- Kdot.inhom(woods, "maple", sigma=0.15)
+
+    # Fit model
+    fit <- ppm(woods ~ marks * polynom(x,y,2))
+    K <- Kdot.inhom(woods, "maple", lambdaX=fit, update=FALSE)
+    
+    # synthetic example: type A points have intensity 50,
+    #                    type B points have intensity 50 + 100 * x
+    lamB <- as.im(function(x,y){50 + 100 * x}, owin())
+    lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin())
+    X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB))
+    K <- Kdot.inhom(X, "B",  lambdaI=lamB,     lambdadot=lamdot)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Kest.Rd b/man/Kest.Rd
new file mode 100644
index 0000000..c88e39f
--- /dev/null
+++ b/man/Kest.Rd
@@ -0,0 +1,336 @@
+\name{Kest}
+\alias{Kest}
+\title{K-function}
+\description{
+Estimates Ripley's reduced second moment function \eqn{K(r)} 
+from a point pattern in a window of arbitrary shape.
+}
+\usage{
+  Kest(X, \dots, r=NULL, rmax=NULL, breaks=NULL, 
+     correction=c("border", "isotropic", "Ripley", "translate"),
+    nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{K(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{Ignored.}
+  \item{r}{
+    Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} 
+    should be evaluated. Users are advised \emph{not} to specify this
+    argument; there is a sensible default. If necessary, specify \code{rmax}.
+  }
+  \item{rmax}{
+    Optional. Maximum desired value of the argument \eqn{r}.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional. A character vector containing any selection of the
+    options \code{"none"}, \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"}, \code{"rigid"},
+    \code{"none"}, \code{"good"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{nlarge}{
+    Optional. Efficiency threshold.
+    If the number of points exceeds \code{nlarge}, then only the
+    border correction will be computed (by default), using a fast algorithm.
+  }
+  \item{domain}{
+    Optional. Calculations will be restricted to this subset
+    of the window. See Details.
+  }
+  \item{var.approx}{Logical. If \code{TRUE}, the approximate
+    variance of \eqn{\hat K(r)}{Kest(r)} under CSR
+    will also be computed.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{K} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}
+    for a stationary Poisson process
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{K(r)} obtained by the edge corrections
+  named.
+
+  If \code{var.approx=TRUE} then the return value
+  also has columns \code{rip} and \code{ls} containing approximations
+  to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR.
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{K(r)}. 
+}
+\details{
+  The \eqn{K} function (variously called ``Ripley's K-function''
+  and the ``reduced second moment function'')
+  of a stationary point process \eqn{X} is defined so that
+  \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of
+  additional random points within a distance \eqn{r} of a
+  typical random point of \eqn{X}. Here \eqn{\lambda}{lambda}
+  is the intensity of the process,
+  i.e. the expected number of points of \eqn{X} per unit area.
+  The \eqn{K} function is determined by the 
+  second order moment properties of \eqn{X}.
+ 
+  An estimate of \eqn{K} derived from a spatial point pattern dataset
+  can be used in exploratory data analysis and formal inference
+  about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988).
+  In exploratory analyses, the estimate of \eqn{K} is a useful statistic 
+  summarising aspects of inter-point ``dependence'' and ``clustering''.
+  For inferential purposes, the estimate of \eqn{K} is usually compared to the 
+  true value of \eqn{K} for a completely random (Poisson) point process,
+  which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}.
+  Deviations between the empirical and theoretical \eqn{K} curves
+  may suggest spatial clustering or spatial regularity.
+ 
+  This routine \code{Kest} estimates the \eqn{K} function
+  of a stationary point process, given observation of the process
+  inside a known, bounded window. 
+  The argument \code{X} is interpreted as a point pattern object 
+  (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
+  be supplied in any of the formats recognised by
+  \code{\link{as.ppp}()}.
+
+  The estimation of \eqn{K} is hampered by edge effects arising from 
+  the unobservability of points of the random pattern outside the window. 
+  An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). 
+  The corrections implemented here are
+  \describe{
+    \item{border}{the border method or
+      ``reduced sample'' estimator (see Ripley, 1988). This is
+      the least efficient (statistically) and the fastest to compute.
+      It can be computed for a window of arbitrary shape.
+    }
+    \item{isotropic/Ripley}{Ripley's isotropic correction
+      (see Ripley, 1988; Ohser, 1983).
+      This is implemented for rectangular and polygonal windows
+      (not for binary masks).
+    }
+    \item{translate/translation}{Translation correction (Ohser, 1983).
+      Implemented for all window geometries, but slow for
+      complex windows. 
+    }
+    \item{rigid}{Rigid motion correction (Ohser and Stoyan, 1981).
+      Implemented for all window geometries, but slow for
+      complex windows. 
+    }
+    \item{none}{
+      Uncorrected estimate.
+      An estimate of the K function \emph{without} edge correction.
+      (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below.
+      This estimate is \bold{biased} and should
+      not be used for data analysis, \emph{unless} you have
+      an extremely large point pattern (more than 100,000 points).
+    }
+    \item{best}{
+      Selects the best edge correction
+      that is available for the geometry of the window. Currently
+      this is Ripley's isotropic correction for a rectangular
+      or polygonal window, and the translation correction for masks.
+    }
+    \item{good}{
+      Selects the best edge correction
+      that can be computed in a reasonable time.
+      This is the same as \code{"best"} for datasets with fewer
+      than 3000 points; otherwise the selected edge correction
+      is \code{"border"}, unless there are more than 100,000 points, when 
+      it is \code{"none"}.
+    }
+  }
+  
+  The estimates of \eqn{K(r)} are of the form
+  \deqn{
+    \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij}
+  }{
+    Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j])
+  }
+  where \eqn{a} is the area of the window, \eqn{n} is the number of
+  data points, and the sum is taken over all ordered pairs of points
+  \eqn{i} and \eqn{j} in \code{X}.
+  Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points,
+  and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator
+  that equals 1 if the distance is less than or equal to \eqn{r}.
+  The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which
+  depends on the choice of edge correction listed above).
+
+  Note that this estimator assumes the process is stationary (spatially
+  homogeneous). For inhomogeneous point patterns, see
+  \code{\link{Kinhom}}.
+
+  If the point pattern \code{X} contains more than about 3000 points,
+  the isotropic and translation edge corrections can be computationally
+  prohibitive. The computations for the border method are much faster,
+  and are statistically efficient when there are large numbers of
+  points. Accordingly, if the number of points in \code{X} exceeds
+  the threshold \code{nlarge}, then only the border correction will be
+  computed. Setting \code{nlarge=Inf} or \code{correction="best"}
+  will prevent this from happening.
+  Setting \code{nlarge=0} is equivalent to selecting only the border
+  correction with \code{correction="border"}.
+
+  If \code{X} contains more than about 100,000 points,
+  even the border correction is time-consuming. You may want to consider
+  setting \code{correction="none"} in this case.
+  There is an even faster algorithm for the uncorrected estimate.
+
+  Approximations to the variance of \eqn{\hat K(r)}{Kest(r)}
+  are available, for the case of the isotropic edge correction estimator,
+  \bold{assuming complete spatial randomness}
+  (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53).
+  If \code{var.approx=TRUE}, then the result of
+  \code{Kest} also has a column named \code{rip} 
+  giving values of Ripley's (1988) approximation to
+  \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))},
+  and (if the window is a rectangle) a column named \code{ls} giving
+  values of Lotwick and Silverman's (1982) approximation.
+  
+  If the argument \code{domain} is given, the calculations will
+  be restricted to a subset of the data. In the formula for \eqn{K(r)} above,
+  the \emph{first} point \eqn{i} will be restricted to lie inside
+  \code{domain}. The result is an approximately unbiased estimate
+  of \eqn{K(r)} based on pairs of points in which the first point lies
+  inside \code{domain} and the second point is unrestricted.
+  This is useful in bootstrap techniques. The argument \code{domain}
+  should be a window (object of class \code{"owin"}) or something acceptable to
+  \code{\link{as.owin}}. It must be a subset of the
+  window of the point pattern \code{X}.
+
+  The estimator \code{Kest} ignores marks.
+  Its counterparts for multitype point patterns
+  are \code{\link{Kcross}}, \code{\link{Kdot}},
+  and for general marked point patterns
+  see \code{\link{Kmulti}}. 
+
+  Some writers, particularly Stoyan (1994, 1995) advocate the use of
+  the ``pair correlation function''
+  \deqn{
+    g(r) = \frac{K'(r)}{2\pi r}
+  }{
+    g(r) = K'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'(r)} is the derivative of \eqn{K(r)}.
+  See \code{\link{pcf}} on how to estimate this function.
+}
+\section{Envelopes, significance bands and confidence intervals}{
+  To compute simulation envelopes for the \eqn{K}-function
+  under CSR, use \code{\link{envelope}}. 
+  
+  To compute a confidence interval for the true \eqn{K}-function,
+  use \code{\link{varblock}} or \code{\link{lohboot}}.
+}
+\references{
+Baddeley, A.J. Spatial sampling and censoring.
+     In O.E. Barndorff-Nielsen, W.S. Kendall and
+     M.N.M. van Lieshout (eds) 
+     \emph{Stochastic Geometry: Likelihood and Computation}.
+     Chapman and Hall, 1998.
+     Chapter 2, pages 37--78.
+  
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+  Academic Press, 1983.
+
+  Ohser, J. (1983)
+  On estimators for the reduced second moment measure of
+  point processes. \emph{Mathematische Operationsforschung und
+  Statistik, series Statistics}, \bold{14}, 63 -- 71.
+  
+  Ohser, J. and Stoyan, D. (1981)
+  On the second-order and orientation analysis of
+  planar stationary point processes.
+  \emph{Biometrical Journal} \bold{23}, 523--533.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J. (1995)
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+} 
+\section{Warnings}{
+  The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}.
+  Bias increases with \eqn{r} and depends on the window geometry.
+  For a rectangular window it is prudent to restrict the \eqn{r} values to
+  a maximum of \eqn{1/4} of the smaller side length of the rectangle.
+  Bias may become appreciable for point patterns consisting of 
+  fewer than 15 points.
+ 
+  While \eqn{K(r)} is always a non-decreasing function, the estimator 
+  of \eqn{K} is not guaranteed to be non-decreasing. This is rarely 
+  a problem in practice.
+}
+\seealso{
+  \code{\link{localK}} to extract individual summands in the \eqn{K}
+  function.
+
+  \code{\link{pcf}} for the pair correlation.
+
+  \code{\link{Fest}},
+  \code{\link{Gest}},
+  \code{\link{Jest}}
+  for alternative summary functions.
+  
+  \code{\link{Kcross}},
+  \code{\link{Kdot}},
+  \code{\link{Kinhom}},
+  \code{\link{Kmulti}} for counterparts of the \eqn{K} function
+  for multitype point patterns.
+  
+  \code{\link{reduced.sample}} for the calculation of reduced sample
+  estimators.
+}
+\examples{
+ X <- runifpoint(50)
+ K <- Kest(X)
+ K <- Kest(cells, correction="isotropic")
+ plot(K)
+ plot(K, main="K function for cells")
+ # plot the L function
+ plot(K, sqrt(iso/pi) ~ r)
+ plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells")
+}
+\author{\adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+ 
+ 
diff --git a/man/Kest.fft.Rd b/man/Kest.fft.Rd
new file mode 100644
index 0000000..cc26313
--- /dev/null
+++ b/man/Kest.fft.Rd
@@ -0,0 +1,107 @@
+\name{Kest.fft}
+\alias{Kest.fft}
+\title{K-function using FFT}
+\description{
+Estimates the reduced second moment function \eqn{K(r)} 
+from a point pattern in a window of arbitrary shape,
+using the Fast Fourier Transform.
+}
+\usage{
+  Kest.fft(X, sigma, r=NULL, \dots, breaks=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{K(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{sigma}{
+    Standard deviation of the isotropic Gaussian
+    smoothing kernel.
+  }
+  \item{r}{
+    Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} determining the
+    spatial resolution for the FFT calculation.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{K} has been  estimated
+  }
+  \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r}
+  }
+  \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}
+    for a stationary Poisson process
+  }
+}
+\details{
+  This is an alternative to the function \code{\link{Kest}}
+  for estimating the \eqn{K} function. It may be useful for
+  very large patterns of points.
+
+  Whereas \code{\link{Kest}} computes the distance between
+  each pair of points analytically, this function discretises the
+  point pattern onto a rectangular pixel raster and applies
+  Fast Fourier Transform techniques to estimate \eqn{K(t)}.
+  The hard work is done by the function \code{\link{Kmeasure}}.
+
+  The result is an approximation whose accuracy depends on the
+  resolution of the pixel raster. The resolution is controlled
+  by the arguments \code{\dots}, or by setting the parameter \code{npixel} in 
+  \code{\link{spatstat.options}}.
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+  Academic Press, 1983.
+
+  Ohser, J. (1983)
+  On estimators for the reduced second moment measure of
+  point processes. \emph{Mathematische Operationsforschung und
+  Statistik, series Statistics}, \bold{14}, 63 -- 71.
+    
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J. (1995)
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+} 
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{Kmeasure}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+ pp <- runifpoint(10000)
+ \testonly{
+  op <- spatstat.options(npixel=125)
+ }
+ Kpp <- Kest.fft(pp, 0.01)
+ plot(Kpp)
+ \testonly{spatstat.options(op)}
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{nonparametric}
+ 
+ 
diff --git a/man/Kinhom.Rd b/man/Kinhom.Rd
new file mode 100644
index 0000000..ab91e77
--- /dev/null
+++ b/man/Kinhom.Rd
@@ -0,0 +1,408 @@
+\name{Kinhom}
+\alias{Kinhom}
+\title{Inhomogeneous K-function}
+\description{
+  Estimates the inhomogeneous \eqn{K} function of
+  a non-stationary point pattern.
+}
+\usage{
+  Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL,
+    correction=c("border", "bord.modif", "isotropic", "translate"),
+    renormalise=TRUE,
+    normpower=1,
+    update=TRUE,
+    leaveoneout=TRUE,
+    nlarge = 1000,
+    lambda2=NULL, reciplambda=NULL, reciplambda2=NULL,
+    diagonal=TRUE,
+    sigma=NULL, varcov=NULL,
+    ratio=FALSE)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern,
+    from which an estimate of the inhomogeneous \eqn{K} function
+    will be computed.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"})
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{\dots}{
+    Extra arguments. Ignored if \code{lambda} is present.
+    Passed to \code{\link{density.ppp}} if \code{lambda} is omitted.
+  }
+  \item{r}{
+    vector of values for the argument \eqn{r} at which
+    the inhomogeneous \eqn{K} function
+    should be evaluated. Not normally given by the user;
+    there is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{renormalise}{
+    Logical. Whether to renormalise the estimate. See Details.
+  }
+  \item{normpower}{
+    Integer (usually either 1 or 2).
+    Normalisation power. See Details.
+  }
+  \item{update}{
+    Logical value indicating what to do when \code{lambda} is a fitted model
+    (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{density.ppp}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity.
+  }
+  \item{nlarge}{
+    Optional. Efficiency threshold.
+    If the number of points exceeds \code{nlarge}, then only the
+    border correction will be computed, using a fast algorithm.
+  }
+  \item{lambda2}{
+    Advanced use only.
+    Matrix containing estimates of the products
+    \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])}
+    of the intensities at each pair of data points 
+    \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. 
+  }
+  \item{reciplambda}{
+    Alternative to \code{lambda}.
+    Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda}
+    of the intensity function.
+    Either a vector giving the reciprocal intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    reciprocal intensity values at all locations,
+    or a \code{function(x,y)} which can be evaluated to give the
+    reciprocal intensity value at any location.
+  }
+  \item{reciplambda2}{
+    Advanced use only. Alternative to \code{lambda2}.
+    A matrix giving values of the estimated \emph{reciprocal products}
+    \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))}
+    of the intensities at each pair of data points 
+    \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. 
+  }
+  \item{diagonal}{
+    Do not use this argument.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing at least the following columns,
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} has been estimated
+  }
+  \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2},
+    the theoretical value of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}
+    for an inhomogeneous Poisson process
+  }
+  and containing additional columns
+  according to the choice specified in the \code{correction}
+  argument. The additional columns are named
+  \code{border}, \code{trans} and \code{iso}
+  and give the estimated values of 
+  \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}
+  using the border correction, translation correction,
+  and Ripley isotropic correction, respectively.
+
+If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}.
+}
+\details{
+  This computes a generalisation of the \eqn{K} function
+  for inhomogeneous point patterns, proposed by
+  Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000).
+  
+  The ``ordinary'' \eqn{K} function
+  (variously known as the reduced second order moment function
+  and Ripley's \eqn{K} function), is
+  described under \code{\link{Kest}}. It is defined only
+  for stationary point processes.
+  
+  The inhomogeneous \eqn{K} function
+  \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}
+  is a direct generalisation to nonstationary point processes.
+  Suppose \eqn{x} is a point process with non-constant intensity
+  \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}.
+  Define \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} to be the expected
+  value, given that \eqn{u} is a point of \eqn{x},
+  of the sum of all terms
+  \eqn{1/\lambda(x_j)}{1/lambda(x[j])}
+  over all points \eqn{x_j}{x[j]}
+  in the process separated from \eqn{u} by a distance less than \eqn{r}.
+  This reduces to the ordinary \eqn{K} function if
+  \eqn{\lambda()}{lambda()} is constant.
+  If \eqn{x} is an inhomogeneous Poisson process with intensity
+  function \eqn{\lambda(u)}{lambda(u)}, then
+  \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) = \pi r^2}{Kinhom(r) = pi * r^2}.
+  
+  Given a point pattern dataset, the 
+  inhomogeneous \eqn{K} function can be estimated
+  essentially by summing the values
+  \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))}
+  for all pairs of points \eqn{x_i, x_j}{x[i], x[j]}
+  separated by a distance less than \eqn{r}.
+
+  This allows us to inspect a point pattern for evidence of 
+  interpoint interactions after allowing for spatial inhomogeneity
+  of the pattern. Values 
+  \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) > \pi r^2}{Kinhom(r) > pi * r^2}
+  are suggestive of clustering.
+
+  The argument \code{lambda} should supply the
+  (estimated) values of the intensity function \eqn{\lambda}{lambda}.
+  It may be either
+  \describe{
+    \item{a numeric vector}{
+      containing the values
+      of the intensity function at the points of the pattern \code{X}.
+    }
+    \item{a pixel image}{
+      (object of class \code{"im"})
+      assumed to contain the values of the intensity function
+      at all locations in the window. 
+    }
+    \item{a fitted point process model}{
+      (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{omitted:}{
+      if \code{lambda} is omitted, then it will be estimated using
+      a `leave-one-out' kernel smoother.
+    }
+  }
+  If \code{lambda} is a numeric vector, then its length should
+  be equal to the number of points in the pattern \code{X}.
+  The value \code{lambda[i]} is assumed to be the 
+  the (estimated) value of the intensity
+  \eqn{\lambda(x_i)}{lambda(x[i])} for
+  the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
+  Each value must be a positive number; \code{NA}'s are not allowed.
+
+  If \code{lambda} is a pixel image, the domain of the image should
+  cover the entire window of the point pattern. If it does not (which
+  may occur near the boundary because of discretisation error),
+  then the missing pixel values 
+  will be obtained by applying a Gaussian blur to \code{lambda} using
+  \code{\link{blur}}, then looking up the values of this blurred image
+  for the missing locations. 
+  (A warning will be issued in this case.)
+
+  If \code{lambda} is a function, then it will be evaluated in the
+  form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
+  of coordinates of the points of \code{X}. It should return a numeric
+  vector with length equal to the number of points in \code{X}.
+
+  If \code{lambda} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother,
+  as described in Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller}
+  and Waagepetersen (2000).  The estimate \code{lambda[i]} for the
+  point \code{X[i]} is computed by removing \code{X[i]} from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+  
+  Edge corrections are used to correct bias in the estimation
+  of \eqn{K_{\mbox{\scriptsize\rm inhom}}}{Kinhom}.
+  Each edge-corrected estimate of
+  \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is
+  of the form
+  \deqn{
+    \widehat K_{\mbox{\scriptsize\rm inhom}}(r) = (1/A)
+    \sum_i \sum_j \frac{1\{d_{ij} \le r\}
+      e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)}
+  }{
+    K^inhom(r)= (1/A) sum[i] sum[j] 1(d[i,j] <= r) * 
+    e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j]))
+  }
+  where \code{A} is a constant denominator, 
+  \eqn{d_{ij}}{d[i,j]} is the distance between points
+  \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and
+  \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is
+  an edge correction factor. For the `border' correction,
+  \deqn{
+    e(x_i,x_j,r) =
+    \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)}
+  }{
+    1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j]))
+  }
+  where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]}
+  to the boundary of the window. For the `modified border'
+  correction, 
+  \deqn{
+    e(x_i,x_j,r) =
+    \frac{1(b_i > r)}{\mbox{area}(W \ominus r)}
+  }{
+    1(b[i] > r)/area(W [-] r)
+  }
+  where \eqn{W \ominus r}{W [-] r} is the eroded window obtained
+  by trimming a margin of width \eqn{r} from the border of the original
+  window.
+  For the `translation' correction,
+  \deqn{
+    e(x_i,x_j,r) =
+    \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))}
+  }{
+    1/area(W intersect (W + x[j]-x[i]))
+  }
+  and for the `isotropic' correction,
+  \deqn{
+    e(x_i,x_j,r) =
+    \frac 1 {\mbox{area}(W) g(x_i,x_j)}
+  }{
+    1/(area(W) g(x[i],x[j]))
+  }
+  where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the
+  circumference of the circle with centre \eqn{x_i}{x[i]} and radius
+  \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window.
+
+  If \code{renormalise=TRUE} (the default), then the estimates
+  described above 
+  are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where 
+  \eqn{
+    c = \mbox{area}(W)/\sum (1/\lambda(x_i)).
+  }{
+    c = area(W)/sum[i] (1/lambda(x[i])).
+  }
+  This rescaling reduces the variability and bias of the estimate
+  in small samples and in cases of very strong inhomogeneity.
+  The default value of \code{normpower} is 1 (for consistency with
+  previous versions of \pkg{spatstat})
+  but the most sensible value is 2, which would correspond to rescaling
+  the \code{lambda} values so that
+  \eqn{
+    \sum (1/\lambda(x_i)) = \mbox{area}(W).
+  }{
+    sum[i] (1/lambda(x[i])) = area(W).
+  }
+  
+  If the point pattern \code{X} contains more than about 1000 points,
+  the isotropic and translation edge corrections can be computationally
+  prohibitive. The computations for the border method are much faster,
+  and are statistically efficient when there are large numbers of
+  points. Accordingly, if the number of points in \code{X} exceeds
+  the threshold \code{nlarge}, then only the border correction will be
+  computed. Setting \code{nlarge=Inf} or \code{correction="best"}
+  will prevent this from happening.
+  Setting \code{nlarge=0} is equivalent to selecting only the border
+  correction with \code{correction="border"}.
+
+  The pair correlation function can also be applied to the
+  result of \code{Kinhom}; see \code{\link{pcf}}.
+}
+\references{
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{pcf}}
+}
+\examples{
+  # inhomogeneous pattern of maples
+  X <- unmark(split(lansing)$maple)
+  \testonly{
+     sub <- sample(c(TRUE,FALSE), npoints(X), replace=TRUE, prob=c(0.1,0.9))
+     X <- X[sub]
+  }
+
+  # (1) intensity function estimated by model-fitting
+  # Fit spatial trend: polynomial in x and y coordinates
+  fit <- ppm(X, ~ polynom(x,y,2), Poisson())
+  # (a) predict intensity values at points themselves,
+  #     obtaining a vector of lambda values
+  lambda <- predict(fit, locations=X, type="trend")
+  # inhomogeneous K function
+  Ki <- Kinhom(X, lambda)
+  plot(Ki)
+  # (b) predict intensity at all locations,
+  #     obtaining a pixel image
+  lambda <- predict(fit, type="trend")
+  Ki <- Kinhom(X, lambda)
+  plot(Ki)
+
+  # (2) intensity function estimated by heavy smoothing
+  Ki <- Kinhom(X, sigma=0.1)
+  plot(Ki)
+
+  # (3) simulated data: known intensity function
+  lamfun <- function(x,y) { 50 + 100 * x }
+  # inhomogeneous Poisson process
+  Y <- rpoispp(lamfun, 150, owin())
+  # inhomogeneous K function
+  Ki <- Kinhom(Y, lamfun)
+  plot(Ki)
+
+  # How to make simulation envelopes:
+  #      Example shows method (2)
+  \dontrun{
+  smo <- density.ppp(X, sigma=0.1)
+  Ken <- envelope(X, Kinhom, nsim=99,
+                  simulate=expression(rpoispp(smo)),
+                  sigma=0.1, correction="trans")
+  plot(Ken)
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Kmark.Rd b/man/Kmark.Rd
new file mode 100644
index 0000000..43cc50d
--- /dev/null
+++ b/man/Kmark.Rd
@@ -0,0 +1,191 @@
+\name{Kmark}
+\alias{Kmark}
+\alias{markcorrint}
+\title{Mark-Weighted K Function}
+\description{
+  Estimates the mark-weighted \eqn{K} function
+  of a marked point pattern.
+}
+\usage{
+  Kmark(X, f = NULL, r = NULL,
+        correction = c("isotropic", "Ripley", "translate"), ...,
+        f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL)
+
+  markcorrint(X, f = NULL, r = NULL,
+              correction = c("isotropic", "Ripley", "translate"), ...,
+              f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. 
+  }
+  \item{f}{Optional. Test function \eqn{f} used in the definition of the
+    mark correlation function. An \R function with at least two
+    arguments. There is a sensible default.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the mark correlation function 
+    \eqn{k_f(r)}{k[f](r)} should be evaluated.
+    There is a sensible default.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{f1}{
+    An alternative to \code{f}. If this argument is given,
+    then \eqn{f} is assumed to take the form
+    \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}.
+  }
+  \item{normalise}{
+    If \code{normalise=FALSE},
+    compute only the numerator of the expression for the
+    mark correlation.
+  }
+  \item{returnL}{
+    Compute the analogue of the K-function if \code{returnL=FALSE}
+    or the analogue of the L-function if \code{returnL=TRUE}.
+  }
+  \item{fargs}{
+    Optional. A list of extra arguments to be passed to the function
+    \code{f} or \code{f1}.
+  }
+}
+\details{
+  The functions \code{Kmark} and \code{markcorrint} are identical.
+  (Eventually \code{markcorrint} will be deprecated.)
+  
+  The \emph{mark-weighted \eqn{K} function} \eqn{K_f(r)}{K[f](r)}
+  of a marked point process (Penttinen et al, 1992)
+  is a generalisation of Ripley's \eqn{K} function, in which the contribution
+  from each pair of points is weighted by a function of their marks.
+  If the marks of the two points are \eqn{m_1, m_2}{m1, m2} then
+  the weight is proportional to \eqn{f(m_1, m_2)}{f(m1, m2)} where
+  \eqn{f} is a specified \emph{test function}.
+
+  The mark-weighted \eqn{K} function is defined so that
+  \deqn{
+    \lambda K_f(r) = \frac{C_f(r)}{E[ f(M_1, M_2) ]}
+  }{
+    lambda * K_f(r) = C[f](r)/E[f(M1, M2)]
+  }
+  where 
+  \deqn{
+    C_f(r) = 
+    E \left[
+    \sum_{x \in X}
+    f(m(u), m(x))
+    1{0 < ||u - x|| \le r}
+    \;  \big| \;
+    u \in X
+    \right]
+  }{
+    C[f](r) = E[ sum[x in X] f(m(u), m(x)) 1(0 < d(u,x) <= r) | u in X]
+  }
+  for any spatial location \eqn{u} taken to be a typical point of
+  the point process \eqn{X}. Here \eqn{||u-x||}{d(u,x)} is the
+  euclidean distance between \eqn{u} and \eqn{x}, so that the sum
+  is taken over all random points \eqn{x} that lie within a distance
+  \eqn{r} of the point \eqn{u}. The function \eqn{C_f(r)}{C[f](r)} is
+  the \emph{unnormalised} mark-weighted \eqn{K} function.
+  To obtain \eqn{K_f(r)}{K[f](r)} we standardise \eqn{C_f(r)}{C[f](r)}
+  by dividing by \eqn{E[f(M_1,M_2)]}{E[f(M1,M2)]}, the expected value of
+  \eqn{f(M_1,M_2)}{f(M1,M2)} when \eqn{M_1}{M1} and \eqn{M_2}{M2} are
+  independent random marks with the same distribution as the marks in
+  the point process. 
+
+  Under the hypothesis of random labelling, the
+  mark-weighted \eqn{K} function 
+  is equal to Ripley's \eqn{K} function,
+  \eqn{K_f(r) = K(r)}{K[f](r) = K(r)}.
+
+  The mark-weighted \eqn{K} function is sometimes called the 
+  \emph{mark correlation integral} because it is related to the
+  mark correlation function \eqn{k_f(r)}{k[f](r)}
+  and the pair correlation function \eqn{g(r)} by
+  \deqn{
+    K_f(r) = 2 \pi \int_0^r s k_f(s) \, g(s) \, {\rm d}s
+  }{
+    K[f](r) = 2 * pi * integral[0,r] (s * k[f](s) * g(s) ) ds
+  }
+  See \code{\link{markcorr}} for a definition of the
+  mark correlation function.
+
+  Given a marked point pattern \code{X},
+  this command computes edge-corrected estimates
+  of the mark-weighted \eqn{K} function.
+  If \code{returnL=FALSE} then the estimated
+  function \eqn{K_f(r)}{K[f](r)} is returned;
+  otherwise the function
+  \deqn{
+    L_f(r) = \sqrt{K_f(r)/\pi}
+  }{
+    L[f](r) = sqrt(K[f](r)/pi)
+  }
+  is returned.
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the mark correlation integral \eqn{K_f(r)}{K[f](r)}
+    has been  estimated
+  }
+  \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)}
+    when the marks attached to different points are independent,
+    namely \eqn{\pi r^2}{pi * r^2}
+  }
+  together with a column or columns named 
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the mark-weighted \eqn{K} function \eqn{K_f(r)}{K[f](r)}
+  obtained by the edge corrections named (if \code{returnL=FALSE}).
+}
+\references{
+  Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992)
+  Marked point processes in forest statistics.
+  \emph{Forest Science} \bold{38} (1992) 806-824.
+
+  Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008)
+  \emph{Statistical analysis and modelling of spatial point patterns}.
+  Chichester: John Wiley.
+}
+\seealso{
+  \code{\link{markcorr}} to estimate the mark correlation function.
+}
+\examples{
+    # CONTINUOUS-VALUED MARKS:
+    # (1) Spruces
+    # marks represent tree diameter
+    # mark correlation function
+    ms <- Kmark(spruces)
+    plot(ms)
+
+    # (2) simulated data with independent marks
+    X <- rpoispp(100)
+    X <- X \%mark\% runif(npoints(X))
+    Xc <- Kmark(X)
+    plot(Xc)
+    
+    # MULTITYPE DATA:
+    # Hughes' amacrine data
+    # Cells marked as 'on'/'off'
+    M <- Kmark(amacrine, function(m1,m2) {m1==m2},
+                         correction="translate")
+    plot(M)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Kmeasure.Rd b/man/Kmeasure.Rd
new file mode 100644
index 0000000..33c6c6c
--- /dev/null
+++ b/man/Kmeasure.Rd
@@ -0,0 +1,180 @@
+\name{Kmeasure}
+\alias{Kmeasure}
+\title{Reduced Second Moment Measure}
+\description{
+Estimates the reduced second moment measure \eqn{\kappa}{Kappa} 
+from a point pattern in a window of arbitrary shape.
+}
+\usage{
+  Kmeasure(X, sigma, edge=TRUE, \dots, varcov=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{\kappa}{Kappa} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+    }
+    \item{sigma}{
+      Standard deviation \eqn{\sigma}{sigma} of the Gaussian
+      smoothing kernel. Incompatible with \code{varcov}.
+    }
+    \item{edge}{
+      Logical value indicating whether an edge correction
+      should be applied.
+    }
+    \item{\dots}{
+      Arguments passed to \code{\link{as.mask}} controlling
+      the pixel resolution.
+    }
+    \item{varcov}{
+      Variance-covariance matrix of the Gaussian smoothing kernel.
+      Incompatible with \code{sigma}.
+    }
+}
+\value{
+  A real-valued pixel image (an object of class \code{"im"},
+  see \code{\link{im.object}}) whose pixel values are estimates
+  of the density of the reduced second moment measure
+  at each location.
+}
+\details{
+  Given a point pattern dataset, 
+  this command computes an estimate of the reduced second moment
+  measure \eqn{\kappa}{Kappa} of the point process.
+  The result is a pixel image whose pixel values are estimates of
+  the density of the reduced second moment measure. 
+
+  The reduced second moment measure \eqn{\kappa}{Kappa}
+  can be regarded as a generalisation of the more familiar
+  \eqn{K}-function.
+  An estimate of \eqn{\kappa}{Kappa} derived from a spatial point
+  pattern dataset can be useful in exploratory data analysis.
+  Its advantage over the \eqn{K}-function is that it is also sensitive
+  to anisotropy and directional effects. 
+  
+  In a nutshell, the command \code{Kmeasure} computes a smoothed version
+  of the \emph{Fry plot}. 
+  As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the
+  vectors joining all pairs of points in the pattern.
+  The reduced second moment measure is (essentially) defined as
+  the average of the Fry plot over different realisations of the point
+  process. The command \code{Kmeasure} effectively smooths the Fry plot
+  of a dataset to obtain an estimate of the reduced second moment measure.
+
+  In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa}
+  of a stationary point process \eqn{X} is a measure defined on the
+  two-dimensional plane such that,
+  for a `typical' point \eqn{x} of the process, 
+  the expected number of other points \eqn{y} of the process
+  such that the vector \eqn{y - x} lies in a region \eqn{A},
+  equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}.
+  Here \eqn{\lambda}{lambda}
+  is the intensity of the process,
+  i.e. the expected number of points of \eqn{X} per unit area.
+
+  The \eqn{K}-function is a special case. The function value \eqn{K(t)} is
+  the value of the reduced second moment measure
+  for the disc of radius \eqn{t} centred at the origin; that is,
+  \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}.
+  
+  The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa}
+  from a point pattern dataset \code{X},
+  which is assumed to be a realisation of a stationary point process,
+  observed inside a known, bounded window. Marks are ignored.
+
+  The algorithm approximates the point pattern and its window by binary pixel
+  images, introduces a Gaussian smoothing kernel
+  and uses the Fast Fourier Transform \code{\link{fft}}
+  to form a density estimate of \eqn{\kappa}{Kappa}. The calculation
+  corresponds to the edge correction known as the ``translation
+  correction''.
+
+  The Gaussian smoothing kernel may be specified by either of the
+  arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single
+  number, this specifies an isotropic Gaussian kernel
+  with standard deviation \code{sigma} on each coordinate axis.
+  If \code{sigma} is a vector of two numbers, this specifies a Gaussian
+  kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis,
+  standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero
+  correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is
+  given, this specifies the variance-covariance matrix of the
+  Gaussian kernel. There do not seem to be any well-established rules
+  for selecting the smoothing kernel in this context.
+  
+  The density estimate of \eqn{\kappa}{Kappa}
+  is returned in the form of a real-valued pixel image.
+  Pixel values are estimates of the normalised
+  second moment density at the centre of the pixel.
+  (The uniform Poisson process would have values identically equal to
+  \eqn{1}.)
+  The image \code{x} and \code{y}
+  coordinates are on the same scale as vector displacements in the
+  original point pattern window. The point \code{x=0, y=0} corresponds
+  to the `typical point'.
+  A peak in the image near \code{(0,0)} suggests clustering;
+  a dip in the image near \code{(0,0)} suggests inhibition;
+  peaks or dips at other positions suggest possible periodicity.
+  
+  If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region
+  \eqn{A} can be estimated by computing the integral of the pixel image
+  over the domain \eqn{A}, i.e.\ summing the pixel values and
+  multiplying by pixel area, using \code{\link{integral.im}}.
+  One possible application is to compute anisotropic counterparts of the
+  \eqn{K}-function (in which the disc of radius \eqn{t} is replaced
+  by another shape). See Examples.
+}
+\section{Warning}{
+  Some writers use the term \emph{reduced second moment measure}
+  when they mean the \eqn{K}-function. This has caused
+  confusion. 
+ 
+  As originally defined, the
+  reduced second moment measure is a measure, obtained by modifying
+  the second moment measure, while the \eqn{K}-function is a function
+  obtained by evaluating this measure for discs of increasing radius.
+  In \pkg{spatstat}, the \eqn{K}-function is computed by
+  \code{\link{Kest}} and the reduced second moment measure is computed
+  by \code{Kmeasure}.
+}
+\references{
+  Stoyan, D, Kendall, W.S. and Mecke, J. (1995)
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+} 
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{fryplot}},
+  \code{\link{spatstat.options}},
+  \code{\link{integral.im}},
+  \code{\link{im.object}}
+}
+\examples{
+ data(cells)
+ plot(Kmeasure(cells, 0.05))
+ # shows pronounced dip around origin consistent with strong inhibition
+ data(redwood)
+ plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32)))
+ # shows peaks at several places, reflecting clustering and ?periodicity
+ M <- Kmeasure(cells, 0.05)
+ # evaluate measure on a sector
+ W <- Window(M)
+ ang <- as.im(atan2, W)
+ rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W)
+ sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6)
+ integral.im(M[sector, drop=FALSE])
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+ 
+ 
diff --git a/man/Kmodel.Rd b/man/Kmodel.Rd
new file mode 100644
index 0000000..eb3fa97
--- /dev/null
+++ b/man/Kmodel.Rd
@@ -0,0 +1,62 @@
+\name{Kmodel}
+\alias{Kmodel}
+\alias{pcfmodel}
+\title{K Function or Pair Correlation Function of a Point Process Model}
+\description{
+  Returns the theoretical \eqn{K} function or the pair correlation function
+  of a point process model.
+}
+\usage{
+   Kmodel(model, \dots)
+
+   pcfmodel(model, \dots)
+}
+\arguments{
+  \item{model}{
+    A fitted point process model of some kind.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  A \code{function} in the \R language,
+  which takes one argument \code{r}.
+}
+\details{
+  For certain types of point process models, it is possible to
+  write down a mathematical expression for the \eqn{K} function
+  or the pair correlation function of the model. 
+  
+  The functions \code{Kmodel} and \code{pcfmodel} give the
+  theoretical \eqn{K}-function and the theoretical pair correlation
+  function for a point process model that has been fitted to data.
+  
+  The functions \code{Kmodel} and \code{pcfmodel} are generic,
+  with methods for the classes \code{"kppm"} (cluster processes
+  and Cox processes) and \code{"ppm"} (Gibbs processes).
+  
+  The return value is a \code{function} in the \R language,
+  which takes one argument \code{r}.
+  Evaluation of this function, on a numeric vector \code{r},
+  yields values of the desired \eqn{K} function or pair correlation
+  function at these distance values.
+}
+\seealso{
+  \code{\link{Kest}} or \code{\link{pcf}}
+  to estimate the \eqn{K} function or pair correlation function
+  nonparametrically from data.
+
+  \code{\link{Kmodel.kppm}} for the method for cluster processes
+  and Cox processes.
+  
+  \code{\link{Kmodel.ppm}} for the method for Gibbs processes.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Kmodel.dppm.Rd b/man/Kmodel.dppm.Rd
new file mode 100644
index 0000000..355833f
--- /dev/null
+++ b/man/Kmodel.dppm.Rd
@@ -0,0 +1,42 @@
+\name{Kmodel.dppm}
+\alias{Kmodel.detpointprocfamily}
+\alias{pcfmodel.detpointprocfamily}
+\alias{Kmodel.dppm}
+\alias{pcfmodel.dppm}
+\title{
+  K-function or Pair Correlation Function of a
+  Determinantal Point Process Model
+}
+\description{Returns the theoretical \eqn{K}-function
+  or theoretical pair correlation function of a determinantal point
+  process model as a function of one argument \eqn{r}.
+}
+\usage{
+   \method{Kmodel}{dppm}(model, \dots)
+
+   \method{pcfmodel}{dppm}(model, \dots)
+
+   \method{Kmodel}{detpointprocfamily}(model, \dots)
+
+   \method{pcfmodel}{detpointprocfamily}(model, \dots)
+}
+\arguments{
+  \item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.}
+  \item{\dots}{Ignored (not quite true -- there is some undocumented internal use)}
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2)
+  KMatern <- Kmodel(model)
+  pcfMatern <- pcfmodel(model)
+  plot(KMatern, xlim = c(0,0.05))
+  plot(pcfMatern, xlim = c(0,0.05))
+}
diff --git a/man/Kmodel.kppm.Rd b/man/Kmodel.kppm.Rd
new file mode 100644
index 0000000..d9be3dd
--- /dev/null
+++ b/man/Kmodel.kppm.Rd
@@ -0,0 +1,69 @@
+\name{Kmodel.kppm}
+\alias{Kmodel.kppm}
+\alias{pcfmodel.kppm}
+\title{K Function or Pair Correlation Function of Cluster Model or Cox model}
+\description{
+  Returns the theoretical \eqn{K} function or the pair correlation function
+  of a cluster point process model or Cox point process model.
+}
+\usage{
+   \method{Kmodel}{kppm}(model, \dots)
+
+   \method{pcfmodel}{kppm}(model, \dots)
+}
+\arguments{
+  \item{model}{
+    A fitted cluster point process model (object of
+    class \code{"kppm"}) typically obtained from
+    the model-fitting algorithm \code{\link{kppm}}. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  A \code{function} in the \R language,
+  which takes one argument \code{r}.
+}
+\details{
+  For certain types of point process models, it is possible to
+  write down a mathematical expression for the \eqn{K} function
+  or the pair correlation function of the model. In particular this
+  is possible for a fitted cluster point process model 
+  (object of class \code{"kppm"} obtained from \code{\link{kppm}}).
+  
+  The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic.
+  The functions documented here are the methods for the class \code{"kppm"}.
+  
+  The return value is a \code{function} in the \R language,
+  which takes one argument \code{r}.
+  Evaluation of this function, on a numeric vector \code{r},
+  yields values of the desired \eqn{K} function or pair correlation
+  function at these distance values.
+}
+\seealso{
+  \code{\link{Kest}} or \code{\link{pcf}}
+  to estimate the \eqn{K} function or pair correlation function
+  nonparametrically from data.
+
+  \code{\link{kppm}} to fit cluster models.
+
+  \code{\link{Kmodel}} for the generic functions.
+
+  \code{\link{Kmodel.ppm}} for the method for Gibbs processes.
+}
+\examples{
+  data(redwood)
+  fit <- kppm(redwood, ~x, "MatClust")
+  K <- Kmodel(fit)
+  K(c(0.1, 0.2))
+  curve(K(x), from=0, to=0.25)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Kmodel.ppm.Rd b/man/Kmodel.ppm.Rd
new file mode 100644
index 0000000..3744ef2
--- /dev/null
+++ b/man/Kmodel.ppm.Rd
@@ -0,0 +1,83 @@
+\name{Kmodel.ppm}
+\alias{Kmodel.ppm}
+\alias{pcfmodel.ppm}
+\title{K Function or Pair Correlation Function of Gibbs Point Process model}
+\description{
+  Returns the theoretical \eqn{K} function or the pair correlation function
+  of a fitted Gibbs point process model.
+}
+\usage{
+   \method{Kmodel}{ppm}(model, \dots)
+
+   \method{pcfmodel}{ppm}(model, \dots)
+}
+\arguments{
+  \item{model}{
+    A fitted Poisson or Gibbs point process model (object of
+    class \code{"ppm"}) typically obtained from
+    the model-fitting algorithm \code{\link{ppm}}. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  A \code{function} in the \R language,
+  which takes one argument \code{r}.
+}
+\details{
+  This function computes an \emph{approximation} to the \eqn{K} function
+  or the pair correlation function of a Gibbs point process.
+  
+  The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic.
+  The functions documented here are the methods for the class
+  \code{"ppm"}.
+  
+  The approximation is only available for stationary 
+  pairwise-interaction models.
+  It uses the second order Poisson-saddlepoint approximation
+  (Baddeley and Nair, 2012b) which is a combination of
+  the Poisson-Boltzmann-Emden and Percus-Yevick approximations.
+
+  The return value is a \code{function} in the \R language,
+  which takes one argument \code{r}.
+  Evaluation of this function, on a numeric vector \code{r},
+  yields values of the desired \eqn{K} function or pair correlation
+  function at these distance values.
+}
+\seealso{
+ \code{\link{Kest}} or \code{\link{pcf}}
+  to estimate the \eqn{K} function or pair correlation function
+  nonparametrically from data.
+
+  \code{\link{ppm}} to fit Gibbs models.
+
+  \code{\link{Kmodel}} for the generic functions.
+
+  \code{\link{Kmodel.kppm}} for the method for cluster/Cox processes.
+}
+\examples{
+  fit <- ppm(swedishpines, ~1, Strauss(8))
+  p <- pcfmodel(fit)
+  K <- Kmodel(fit)
+  p(6)
+  K(8)
+  curve(K(x), from=0, to=15)
+}
+\references{
+  Baddeley, A. and Nair, G. (2012a) 
+  Fast approximation of the intensity of Gibbs point processes.
+  \emph{Electronic Journal of Statistics} \bold{6} 1155--1169.
+  
+  Baddeley, A. and Nair, G. (2012b)
+  Approximating the moments of a spatial point process.
+  \emph{Stat} \bold{1}, 1, 18--30.
+  doi: 10.1002/sta4.5
+}
+\author{\adrian
+  
+  
+  and Gopalan Nair.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Kmulti.Rd b/man/Kmulti.Rd
new file mode 100644
index 0000000..c855961
--- /dev/null
+++ b/man/Kmulti.Rd
@@ -0,0 +1,223 @@
+\name{Kmulti}
+\alias{Kmulti}
+\title{
+Marked K-Function
+}
+\description{
+For a marked point pattern, 
+estimate the multitype \eqn{K} function
+which counts the expected number of points of subset \eqn{J}
+within a given distance from a typical point in subset \code{I}.
+}
+\usage{
+Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the multitype \eqn{K} function
+    \eqn{K_{IJ}(r)}{KIJ(r)} will be computed.
+    It must be a marked point pattern.
+    See under Details.
+  }
+  \item{I}{Subset index specifying the points of \code{X}
+    from which distances are measured. See Details.
+  }
+  \item{J}{Subset index specifying the points in \code{X} to which
+    distances are measured. See Details.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the multitype \eqn{K} function
+    \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{\dots}{Ignored.}
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\value{
+An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+Essentially a data frame containing numeric columns 
+\item{r}{the values of the argument \eqn{r} 
+at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been  estimated
+}
+\item{theo}{the theoretical value of  \eqn{K_{IJ}(r)}{KIJ(r)}
+for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+}
+together with a column or columns named 
+\code{"border"}, \code{"bord.modif"},
+\code{"iso"} and/or \code{"trans"},
+according to the selected edge corrections. These columns contain
+estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)}
+obtained by the edge corrections named.
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{K(r)}. 
+}
+\details{
+The function \code{Kmulti}
+generalises \code{\link{Kest}} (for unmarked point
+patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for
+multitype point patterns) to arbitrary marked point patterns.
+
+Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly
+overlapping, of a marked point process.
+The multitype \eqn{K} function 
+is defined so that
+\eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of
+additional random points of \eqn{X_J}{X[J]} 
+within a distance \eqn{r} of a
+typical point of  \eqn{X_I}{X[I]}.
+Here \eqn{\lambda_J}{lambda[J]}
+is the intensity of \eqn{X_J}{X[J]} 
+i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area.
+The function \eqn{K_{IJ}}{KIJ} is determined by the 
+second order moment properties of \eqn{X}.
+
+The argument \code{X} must be a point pattern (object of class
+\code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+
+The arguments \code{I} and \code{J} specify two subsets of the
+point pattern. They may be any type of subset indices, for example,
+logical vectors of length equal to \code{npoints(X)},
+or integer vectors with entries in the range 1 to
+\code{npoints(X)}, or negative integer vectors.
+
+Alternatively, \code{I} and \code{J} may be \bold{functions}
+that will be applied to the point pattern \code{X} to obtain
+index vectors. If \code{I} is a function, then evaluating
+\code{I(X)} should yield a valid subset index. This option
+is useful when generating simulation envelopes using
+\code{\link{envelope}}.
+
+The argument \code{r} is the vector of values for the
+distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. 
+It is also used to determine the breakpoints
+(in the sense of \code{\link{hist}})
+for the computation of histograms of distances.
+
+First-time users would be strongly advised not to specify \code{r}.
+However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+and \code{max(r)} must be larger than the radius of the largest disc 
+contained in the window. 
+
+This algorithm assumes that \code{X} can be treated
+as a realisation of a stationary (spatially homogeneous) 
+random spatial point process in the plane, observed through
+a bounded window.
+The window (which is specified in \code{X} as \code{Window(X)})
+may have arbitrary shape.
+
+Biases due to edge effects are
+treated in the same manner as in \code{\link{Kest}}.
+The edge corrections implemented here are
+\describe{
+\item{border}{the border method or
+``reduced sample'' estimator (see Ripley, 1988). This is
+the least efficient (statistically) and the fastest to compute.
+It can be computed for a window of arbitrary shape.
+}
+\item{isotropic/Ripley}{Ripley's isotropic correction
+(see Ripley, 1988; Ohser, 1983).
+This is currently implemented only for rectangular and polygonal windows.
+}
+\item{translate}{Translation correction (Ohser, 1983).
+Implemented for all window geometries.
+}
+  }
+
+  The pair correlation function \code{\link{pcf}} can also be applied to the
+  result of \code{Kmulti}.
+
+}
+\references{
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+    Academic Press, 1983.
+
+  Diggle, P. J. (1986).
+  Displaced amacrine cells in the retina of a
+  rabbit : analysis of a bivariate spatial point pattern. 
+  \emph{J. Neurosci. Meth.} \bold{18}, 115--125.
+ 
+  Harkness, R.D and Isham, V. (1983)
+  A bivariate spatial point pattern of ants' nests.
+  \emph{Applied Statistics} \bold{32}, 293--303
+ 
+  Lotwick, H. W. and Silverman, B. W. (1982).
+  Methods for analysing spatial processes of several types of points.
+  \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D, Kendall, W.S. and Mecke, J.
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag, 1995.
+
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+}
+\section{Warnings}{
+  The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable.
+
+  The border correction (reduced sample) estimator of
+  \eqn{K_{IJ}}{KIJ} used here is pointwise approximately 
+  unbiased, but need not be a nondecreasing function of \eqn{r},
+  while the true  \eqn{K_{IJ}}{KIJ} must be nondecreasing.
+}
+\seealso{
+ \code{\link{Kcross}},
+ \code{\link{Kdot}},
+ \code{\link{Kest}},
+ \code{\link{pcf}}
+}
+\examples{
+     # Longleaf Pine data: marks represent diameter
+    trees <- longleaf
+    \testonly{
+        trees <- trees[seq(1,npoints(trees), by=50), ]
+    }
+    K <- Kmulti(trees, marks(trees) <= 15, marks(trees) >= 25)
+    plot(K)
+    # functions determining subsets
+    f1 <- function(X) { marks(X) <= 15 }
+    f2 <- function(X) { marks(X) >= 15 }
+    K <- Kmulti(trees, f1, f2)
+   \testonly{
+        rm(trees)
+    }
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
+
diff --git a/man/Kmulti.inhom.Rd b/man/Kmulti.inhom.Rd
new file mode 100644
index 0000000..ad37c78
--- /dev/null
+++ b/man/Kmulti.inhom.Rd
@@ -0,0 +1,285 @@
+\name{Kmulti.inhom}
+\alias{Kmulti.inhom}
+\title{
+  Inhomogeneous Marked K-Function
+}
+\description{
+  For a marked point pattern, 
+  estimate the inhomogeneous version of the multitype \eqn{K} function
+  which counts the expected number of points of subset \eqn{J}
+  within a given distance from a typical point in subset \code{I},
+  adjusted for spatially varying intensity.
+}
+\usage{
+  Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL,
+          \dots,
+          r=NULL, breaks=NULL,
+          correction=c("border", "isotropic", "Ripley", "translate"),
+          lambdaIJ=NULL,
+          sigma=NULL, varcov=NULL,
+          lambdaX=NULL, update=TRUE, leaveoneout=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous multitype \eqn{K} function
+    \eqn{K_{IJ}(r)}{KIJ(r)} will be computed.
+    It must be a marked point pattern.
+    See under Details.
+  }
+  \item{I}{Subset index specifying the points of \code{X}
+    from which distances are measured. See Details.
+  }
+  \item{J}{Subset index specifying the points in \code{X} to which
+    distances are measured. See Details.
+  }
+ \item{lambdaI}{
+    Optional.
+    Values of the estimated intensity of the sub-process \code{X[I]}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the points in \code{X[I]},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location,
+  }
+ \item{lambdaJ}{
+    Optional.
+    Values of the estimated intensity of the sub-process \code{X[J]}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the points in \code{X[J]},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{\dots}{Ignored.}
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the multitype \eqn{K} function
+    \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{lambdaIJ}{
+    Optional. A matrix containing estimates of
+    the product of the intensities \code{lambdaI} and \code{lambdaJ}
+    for each pair of points, the first point belonging to subset
+    \code{I} and the second point to subset \code{J}.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{lambdaX}{
+    Optional. Values of the intensity for all points of \code{X}.
+    Either a pixel image (object of class \code{"im"}),
+    a numeric vector containing the intensity values
+    at each of the points in \code{X},
+    a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}),
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+    If present, this argument overrides both \code{lambdaI} and
+    \code{lambdaJ}.
+  }
+  \item{update}{
+    Logical value indicating what to do when
+    \code{lambdaI}, \code{lambdaJ} or \code{lambdaX}
+    is a fitted point process model
+    (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{density.ppp}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity.
+  }
+}
+\value{
+An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+Essentially a data frame containing numeric columns 
+\item{r}{the values of the argument \eqn{r} 
+at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been  estimated
+}
+\item{theo}{the theoretical value of  \eqn{K_{IJ}(r)}{KIJ(r)}
+for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2}
+}
+together with a column or columns named 
+\code{"border"}, \code{"bord.modif"},
+\code{"iso"} and/or \code{"trans"},
+according to the selected edge corrections. These columns contain
+estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)}
+obtained by the edge corrections named.
+}
+\details{
+  The function \code{Kmulti.inhom}
+  is the counterpart, for spatially-inhomogeneous marked point patterns,
+  of the multitype \eqn{K} function \code{\link{Kmulti}}.
+
+  Suppose \eqn{X} is a marked point process, with marks of any kind.
+  Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly
+  overlapping. Typically \eqn{X_I}{X[I]} would consist of those points
+  of \eqn{X} whose marks lie in a specified range of mark values,
+  and similarly for \eqn{X_J}{X[J]}. Suppose that
+  \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the
+  spatially-varying intensity functions of \eqn{X_I}{X[I]} and
+  \eqn{X_J}{X[J]} respectively. Consider all the pairs of points
+  \eqn{(u,v)} in the point process \eqn{X} such that the first point
+  \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v}
+  belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v}
+  is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)}
+  the numerical weight
+  \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}.
+  Calculate the sum of these weights over all pairs of points as
+  described. This sum (after appropriate edge-correction and
+  normalisation) is the estimated inhomogeneous multitype \eqn{K} function.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+
+  The arguments \code{I} and \code{J} specify two subsets of the
+  point pattern. They may be any type of subset indices, for example,
+  logical vectors of length equal to \code{npoints(X)},
+  or integer vectors with entries in the range 1 to
+  \code{npoints(X)}, or negative integer vectors.
+
+  Alternatively, \code{I} and \code{J} may be \bold{functions}
+  that will be applied to the point pattern \code{X} to obtain
+  index vectors. If \code{I} is a function, then evaluating
+  \code{I(X)} should yield a valid subset index. This option
+  is useful when generating simulation envelopes using
+  \code{\link{envelope}}.
+
+  The argument \code{lambdaI} supplies the values
+  of the intensity of the sub-process identified by index \code{I}.
+  It may be either
+  \describe{
+    \item{a pixel image}{(object of class \code{"im"}) which
+      gives the values of the intensity of \code{X[I]}
+      at all locations in the window containing \code{X};
+    }
+    \item{a numeric vector}{containing the values of the
+      intensity of \code{X[I]} evaluated only
+      at the data points of \code{X[I]}. The length of this vector
+      must equal the number of points in \code{X[I]}.
+    }
+    \item{a function}{
+      of the form \code{function(x,y)}
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{a fitted point process model}{
+      (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"})
+      whose fitted \emph{trend} can be used as the fitted intensity.
+      (If \code{update=TRUE} the model will first be refitted to the
+      data \code{X} before the trend is computed.)
+    }
+    \item{omitted:}{
+      if \code{lambdaI} is omitted then it will be estimated
+      using a leave-one-out kernel smoother. 
+    }
+  }
+  If \code{lambdaI} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother, as described in Baddeley,
+  \Moller 
+  and Waagepetersen (2000).  The estimate of \code{lambdaI} for a given
+  point is computed by removing the point from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point in question. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+
+  Similarly \code{lambdaJ} supplies the values
+  of the intensity of the sub-process identified by index \code{J}.
+
+  Alternatively if the argument \code{lambdaX} is given, then it specifies
+  the intensity values for all points of \code{X}, and the
+  arguments \code{lambdaI}, \code{lambdaJ} will be ignored.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. 
+  It is also used to determine the breakpoints
+  (in the sense of \code{\link{hist}})
+  for the computation of histograms of distances.
+
+  First-time users would be strongly advised not to specify \code{r}.
+  However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, 
+  and \code{max(r)} must be larger than the radius of the largest disc 
+  contained in the window. 
+
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kinhom}}.
+  The edge corrections implemented here are
+  \describe{
+    \item{border}{the border method or
+      ``reduced sample'' estimator (see Ripley, 1988). This is
+      the least efficient (statistically) and the fastest to compute.
+      It can be computed for a window of arbitrary shape.
+    }
+    \item{isotropic/Ripley}{Ripley's isotropic correction
+      (see Ripley, 1988; Ohser, 1983).
+      This is currently implemented only for rectangular windows.
+    }
+    \item{translate}{Translation correction (Ohser, 1983).
+      Implemented for all window geometries.
+    }
+  }
+  The pair correlation function \code{\link{pcf}} can also be applied to the
+  result of \code{Kmulti.inhom}.
+}
+\references{
+  Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+}
+\seealso{
+ \code{\link{Kmulti}},
+ \code{\link{Kdot.inhom}},
+ \code{\link{Kcross.inhom}},
+ \code{\link{pcf}}
+}
+\examples{
+    # Finnish Pines data: marked by diameter and height
+    plot(finpines, which.marks="height")
+    II <- (marks(finpines)$height <= 2)
+    JJ <- (marks(finpines)$height > 3)
+    K <- Kmulti.inhom(finpines, II, JJ)
+    plot(K)
+    # functions determining subsets
+    f1 <- function(X) { marks(X)$height <= 2 }
+    f2 <- function(X) { marks(X)$height > 3 }
+    K <- Kmulti.inhom(finpines, f1, f2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
+
diff --git a/man/Kovesi.Rd b/man/Kovesi.Rd
new file mode 100644
index 0000000..f4ef162
--- /dev/null
+++ b/man/Kovesi.Rd
@@ -0,0 +1,88 @@
+\name{Kovesi}
+\alias{Kovesi}
+\docType{data}
+\title{
+  Colour Sequences with Uniform Perceptual Contrast 
+}
+\description{
+  A collection of 41 different sequences of colours,
+  each sequence having a uniform perceptual contrast over its whole
+  range. These sequences make very good colour maps which avoid
+  introducing artefacts when displaying image data.
+}
+\usage{data(Kovesi)}
+\format{
+  A \code{\link{hyperframe}} with the following columns:
+
+  \tabular{ll}{
+    \code{linear} \tab Logical: whether the sequence is linear. \cr
+    \code{diverging} \tab Logical: whether the sequence is diverging. \cr
+    \code{rainbow} \tab Logical: whether the sequence is a rainbow. \cr
+    \code{cyclic} \tab Logical: whether the sequence is cyclic. \cr
+    \code{isoluminant} \tab Logical: whether the sequence is isoluminant. \cr
+    \code{ternary} \tab Logical: whether the sequence is ternary. \cr
+    \code{colsig} \tab Character: colour signature (see Details) \cr
+    \code{l1}, \code{l2} \tab Numeric: lightness parameters \cr
+    \code{chro} \tab Numeric: average chroma (percent) \cr
+    \code{n} \tab Numeric: length of colour sequence \cr
+    \code{cycsh} \tab Numeric: cyclic shift (percent) \cr
+    \code{values} \tab: Character: the colour values.
+  }
+}
+\details{
+  Kovesi (2014, 2015) presented a collection of colour sequences
+  that have uniform perceptual contrast over their whole range.
+
+  The dataset \code{Kovesi} provides these data. It is a
+  \code{hyperframe} with 41 rows, in which each row provides information
+  about one colour sequence.
+
+  Additional information in each row specifies whether the
+  colour sequence is \sQuote{linear}, \sQuote{diverging},
+  \sQuote{rainbow}, \sQuote{cyclic}, \sQuote{isoluminant}
+  and/or \sQuote{ternary} as defined by Kovesi (2014, 2015).
+
+  The \sQuote{colour signature} is a string composed of letters
+  representing the successive hues, using the following code:
+  \tabular{ll}{
+    r \tab red \cr
+    g \tab green\cr
+    b \tab blue \cr
+    c \tab cyan \cr
+    m \tab magenta \cr
+    y \tab yellow \cr
+    o \tab orange \cr
+    v \tab violet \cr
+    k \tab black \cr
+    w \tab white \cr
+    j \tab grey (j rhymes with grey)
+  }
+  For example \code{kryw} is the sequence from black to red to yellow to
+  white.
+
+  The column \code{values} contains the colour data themselves.
+  The \code{i}th colour sequence is \code{Kovesi$values[[i]]},
+  a character vector of length 256.
+}
+\source{
+  Dr Peter Kovesi, Centre for Exploration Targeting,
+  University of Western Australia.
+}
+\references{
+  Kovesi, P. (2014)
+  Website \emph{CET Uniform Perceptual Contrast Colour Maps}
+  \url{www.peterkovesi.com/projects/colourmaps/}
+
+  Kovesi, P. (2015)
+  Designing colour maps with uniform perceptual contrast.
+  Manuscript submitted for publication.
+}
+\examples{
+  Kovesi
+  LinearBMW <- Kovesi$values[[28]]
+  plot(colourmap(LinearBMW, range=c(0,1)))
+
+  ## The following would be suitable for spatstat.options(image.colfun)
+  BMWfun <- function(n) { interp.colours(LinearBMW, n) }
+}
+\keyword{datasets}
diff --git a/man/Kres.Rd b/man/Kres.Rd
new file mode 100644
index 0000000..e6786f9
--- /dev/null
+++ b/man/Kres.Rd
@@ -0,0 +1,106 @@
+\name{Kres}
+\Rdversion{1.1}
+\alias{Kres}
+\title{
+  Residual K Function
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the residual \eqn{K} function,
+  which serves as a diagnostic for goodness-of-fit of the model.
+}
+\usage{
+   Kres(object, ...)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"}),
+    a point pattern (object of class \code{"ppp"}),
+    a quadrature scheme (object of class \code{"quad"}),
+    or the value returned by a previous call to \code{\link{Kcom}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{Kcom}}.
+  }
+}
+\details{
+  This command provides a diagnostic for the goodness-of-fit of
+  a point process model fitted to a point pattern dataset.
+  It computes a residual version of the \eqn{K} function of the
+  dataset, which should be approximately zero if the model is a good
+  fit to the data.
+
+  In normal use, \code{object} is a fitted point process model
+  or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}}
+  to compute both the nonparametric estimate of the \eqn{K} function
+  and its model compensator. Then \code{Kres} computes the
+  difference between them, which is the residual \eqn{K}-function.
+  
+  Alternatively, \code{object} may be a function value table
+  (object of class \code{"fv"}) that was returned by
+  a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the
+  residual from this object. 
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Related functions:
+  \code{\link{Kcom}}, 
+  \code{\link{Kest}}.
+  
+  Alternative functions:
+  \code{\link{Gres}},
+  \code{\link{psstG}}, \code{\link{psstA}},  \code{\link{psst}}.
+
+  Point process models: \code{\link{ppm}}.
+}
+\examples{
+    data(cells)
+    fit0 <- ppm(cells, ~1) # uniform Poisson
+    \testonly{    fit0 <- ppm(cells, ~1, nd=16)}
+    K0 <- Kres(fit0)
+    K0
+    plot(K0)
+# isotropic-correction estimate
+    plot(K0, ires ~ r)
+# uniform Poisson is clearly not correct
+
+    fit1 <- ppm(cells, ~1, Strauss(0.08))
+    \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)}
+    K1 <- Kres(fit1)
+
+    if(interactive()) {
+      plot(K1, ires ~ r)
+   # fit looks approximately OK; try adjusting interaction distance
+      plot(Kres(cells, interaction=Strauss(0.12)))
+    }
+
+# How to make envelopes
+    \dontrun{
+    E <- envelope(fit1, Kres, model=fit1, nsim=19)
+    plot(E)
+    }
+
+# For computational efficiency
+    Kc <- Kcom(fit1)
+    K1 <- Kres(Kc)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/Kscaled.Rd b/man/Kscaled.Rd
new file mode 100644
index 0000000..93db014
--- /dev/null
+++ b/man/Kscaled.Rd
@@ -0,0 +1,243 @@
+\name{Kscaled}
+\alias{Kscaled}
+\alias{Lscaled}
+\title{Locally Scaled K-function}
+\description{
+  Estimates the locally-rescaled \eqn{K}-function of
+  a point process.
+}
+\usage{
+  Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL,
+    rmax = 2.5, 
+    correction=c("border", "isotropic", "translate"),
+    renormalise=FALSE, normpower=1,
+    sigma=NULL, varcov=NULL)
+
+  Lscaled(\dots)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern,
+    from which an estimate of the locally scaled \eqn{K} function
+    will be computed.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}.
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location,
+    or a fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Arguments passed from \code{Lscaled} to \code{Kscaled}
+    and from \code{Kscaled} to \code{\link{density.ppp}}
+    if \code{lambda} is omitted.
+  }
+  \item{r}{
+    vector of values for the argument \eqn{r} at which
+    the locally scaled \eqn{K} function
+    should be evaluated. (These are rescaled distances.)
+    Not normally given by the user; there is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{rmax}{
+    maximum value of the argument \eqn{r} that should be used.
+    (This is the rescaled distance).
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"border"}, 
+    \code{"isotropic"}, \code{"Ripley"},
+    \code{"translate"}, \code{"translation"},
+    \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{renormalise}{
+    Logical. Whether to renormalise the estimate. See Details.
+  }
+  \item{normpower}{
+    Integer (usually either 1 or 2).
+    Normalisation power. See Details.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing at least the following columns,
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the pair correlation function \eqn{g(r)} has been  estimated
+  }
+  \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2},
+    the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)}
+    for an inhomogeneous Poisson process
+  }
+  and containing additional columns
+  according to the choice specified in the \code{correction}
+  argument. The additional columns are named
+  \code{border}, \code{trans} and \code{iso}
+  and give the estimated values of 
+  \eqn{K_{\rm scaled}(r)}{Kscaled(r)}
+  using the border correction, translation correction,
+  and Ripley isotropic correction, respectively.
+}
+\details{
+  \code{Kscaled} computes an estimate of the \eqn{K} function
+  for a locally scaled point process.
+  \code{Lscaled} computes the corresponding \eqn{L} function
+  \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}.
+
+  Locally scaled point processes are a class of models
+  for inhomogeneous point patterns, introduced by Hahn et al (2003).
+  They include inhomogeneous Poisson processes, and many other models.
+
+  The template \eqn{K} function of a locally-scaled process is a counterpart
+  of the ``ordinary'' Ripley \eqn{K} function, in which
+  the distances between points of the process are measured
+  on a spatially-varying scale (such that the locally rescaled
+  process has unit intensity).
+
+  The template \eqn{K} function is an indicator of interaction
+  between the points. For an inhomogeneous Poisson process, the
+  theoretical template \eqn{K} function is approximately equal
+  to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}.
+  Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2}
+  are suggestive of clustering.
+
+  \code{Kscaled} computes an estimate of the template \eqn{K} function
+  and \code{Lscaled} computes the corresponding \eqn{L} function
+  \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}.
+  
+  The locally scaled interpoint distances are computed
+  using an approximation proposed by Hahn (2007). The Euclidean
+  distance between two points is multiplied by the average of the
+  square roots of the intensity values at the two points.
+
+  The argument \code{lambda} should supply the
+  (estimated) values of the intensity function \eqn{\lambda}{lambda}.
+  It may be either
+  \describe{
+    \item{a numeric vector}{
+      containing the values
+      of the intensity function at the points of the pattern \code{X}.
+    }
+    \item{a pixel image}{
+      (object of class \code{"im"})
+      assumed to contain the values of the intensity function
+      at all locations in the window. 
+    }
+    \item{a function}{
+      which can be evaluated to give values of the intensity at
+      any locations.
+    }
+    \item{omitted:}{
+      if \code{lambda} is omitted, then it will be estimated using
+      a `leave-one-out' kernel smoother.
+    }
+  }
+  If \code{lambda} is a numeric vector, then its length should
+  be equal to the number of points in the pattern \code{X}.
+  The value \code{lambda[i]} is assumed to be the 
+  the (estimated) value of the intensity
+  \eqn{\lambda(x_i)}{lambda(x[i])} for
+  the point \eqn{x_i}{x[i]} of the pattern \eqn{X}.
+  Each value must be a positive number; \code{NA}'s are not allowed.
+
+  If \code{lambda} is a pixel image, the domain of the image should
+  cover the entire window of the point pattern. If it does not (which
+  may occur near the boundary because of discretisation error),
+  then the missing pixel values 
+  will be obtained by applying a Gaussian blur to \code{lambda} using
+  \code{\link{blur}}, then looking up the values of this blurred image
+  for the missing locations. 
+  (A warning will be issued in this case.)
+
+  If \code{lambda} is a function, then it will be evaluated in the
+  form \code{lambda(x,y)} where \code{x} and \code{y} are vectors
+  of coordinates of the points of \code{X}. It should return a numeric
+  vector with length equal to the number of points in \code{X}.
+
+  If \code{lambda} is omitted, then it will be estimated using
+  a `leave-one-out' kernel smoother,
+  as described in Baddeley, \Moller
+  and Waagepetersen (2000).  The estimate \code{lambda[i]} for the
+  point \code{X[i]} is computed by removing \code{X[i]} from the
+  point pattern, applying kernel smoothing to the remaining points using
+  \code{\link{density.ppp}}, and evaluating the smoothed intensity
+  at the point \code{X[i]}. The smoothing kernel bandwidth is controlled
+  by the arguments \code{sigma} and \code{varcov}, which are passed to
+  \code{\link{density.ppp}} along with any extra arguments.
+  
+  If \code{renormalise=TRUE}, the estimated intensity \code{lambda}
+  is multiplied by \eqn{c^(normpower/2)} before performing other calculations,
+  where \eqn{c = area(W)/sum[i] (1/lambda(x[i]))}. This 
+  renormalisation has about the same effect as in \code{\link{Kinhom}},
+  reducing the variability and bias of the estimate
+  in small samples and in cases of very strong inhomogeneity.
+
+  Edge corrections are used to correct bias in the estimation
+  of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are 
+  rescaled, and then edge corrections are applied as in \code{\link{Kest}}.
+  See \code{\link{Kest}} for details of the edge corrections
+  and the options for the argument \code{correction}.
+  
+  The pair correlation function can also be applied to the
+  result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}.
+}
+\references{
+  Baddeley, A.,
+  \Moller, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+
+  Hahn, U. (2007)
+  \emph{Global and Local Scaling in the
+    Statistics of Spatial Point Processes}. Habilitationsschrift,
+  Universitaet Augsburg.
+  
+  Hahn, U., Jensen, E.B.V., van Lieshout, M.N.M. and Nielsen, L.S. (2003)
+  Inhomogeneous spatial point processes by location-dependent scaling.
+  \emph{Advances in Applied Probability} \bold{35}, 319--336.
+
+  \Prokesova, M.,
+  Hahn, U. and Vedel Jensen, E.B. (2006)
+  Statistics for locally scaled point patterns.
+  In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.)
+  \emph{Case Studies in Spatial Point Pattern Modelling}.
+  Lecture Notes in Statistics 185. New York: Springer Verlag.
+  Pages 99--123.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{pcf}}
+}
+\examples{
+  data(bronzefilter)
+  X <- unmark(bronzefilter)
+  K <- Kscaled(X)
+  fit <- ppm(X, ~x)
+  lam <- predict(fit)
+  K <- Kscaled(X, lam)
+}
+\author{Ute Hahn,
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Ksector.Rd b/man/Ksector.Rd
new file mode 100644
index 0000000..b5bdc3e
--- /dev/null
+++ b/man/Ksector.Rd
@@ -0,0 +1,95 @@
+\name{Ksector}
+\alias{Ksector}
+\title{Sector K-function}
+\description{
+  A directional counterpart of Ripley's \eqn{K} function,
+  in which pairs of points are counted only when the
+  vector joining the pair happens to
+  lie in a particular range of angles.
+}
+\usage{
+Ksector(X, begin = 0, end = 360, \dots,
+        units = c("degrees", "radians"), 
+        r = NULL, breaks = NULL,
+        correction = c("border", "isotropic", "Ripley", "translate"),
+        domain=NULL, ratio = FALSE, verbose=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{K(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{begin,end}{
+    Numeric values giving the range of angles inside which
+    points will be counted. Angles are measured in degrees
+    (if \code{units="degrees"}, the default) or radians
+    (if \code{units="radians"}) anti-clockwise from the positive \eqn{x}-axis.
+  }
+  \item{\dots}{Ignored.}
+  \item{units}{
+    Units in which the angles \code{begin} and \code{end} are expressed.
+  }
+  \item{r}{
+    Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} 
+    should be evaluated. Users are advised \emph{not} to specify this
+    argument; there is a sensible default.
+  }
+  \item{breaks}{
+    This argument is for internal use only.
+  }
+  \item{correction}{
+    Optional. A character vector containing any selection of the
+    options \code{"none"}, \code{"border"}, \code{"bord.modif"},
+    \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{domain}{
+    Optional window. The first point \eqn{x_i}{x[i]} of each pair of points
+    will be constrained to lie in \code{domain}.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports
+    and warnings.
+  }
+}
+\details{
+  This is a directional counterpart of Ripley's \eqn{K} function
+  (see \code{\link{Kest}}) in which, instead of counting all
+  pairs of points within a specified distance \eqn{r}, we
+  count only the pairs \eqn{(x_i, x_j)}{x[i], x[j]}
+  for which the vector \eqn{x_j - x_i}{x[j] - x[i]}
+  falls in a particular range of angles.
+
+  This can be used to evaluate evidence for anisotropy
+  in the point pattern \code{X}.
+}
+\value{
+  An object of class \code{"fv"} containing the estimated
+  function.
+}
+\seealso{
+  \code{\link{Kest}}
+}
+\examples{
+ K <- Ksector(swedishpines, 0, 90)
+ plot(K)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/LambertW.Rd b/man/LambertW.Rd
new file mode 100644
index 0000000..60976ff
--- /dev/null
+++ b/man/LambertW.Rd
@@ -0,0 +1,61 @@
+\name{LambertW}
+\alias{LambertW}
+\title{
+  Lambert's W Function
+}
+\description{
+  Computes Lambert's W-function.
+}
+\usage{
+LambertW(x)
+}
+\arguments{
+  \item{x}{
+    Vector of nonnegative numbers.
+  }
+}
+\details{
+  Lambert's W-function is the inverse function of
+  \eqn{f(y) = y e^y}{f(y) = y * exp(y)}.
+  That is, \eqn{W} is the function such that
+  \deqn{
+    W(x) e^{W(x)} = x
+  }{
+    W(x) * exp(W(x)) = x
+  }
+
+  This command \code{LambertW} computes \eqn{W(x)} for each entry
+  in the argument \code{x}. 
+  If the library \pkg{gsl} has been installed, then the function
+  \code{lambert_W0} in that library is invoked. Otherwise,
+  values of the W-function are computed by root-finding, using the
+  function \code{\link[stats]{uniroot}}.
+
+  Computation using \pkg{gsl} is about 100 times faster.
+
+  If any entries of \code{x} are infinite or \code{NA}, the corresponding
+  results are \code{NA}.
+}
+\value{
+  Numeric vector.
+}
+\references{
+  Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996),
+  On the Lambert W function.
+  \emph{Computational Mathematics}, \bold{5}, 325--359.
+  
+  Roy, R and Olver, F (2010),
+  Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.),
+  \emph{{NIST} Handbook of Mathematical Functions},
+  Cambridge University Press.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+   LambertW(exp(1))
+}
+\keyword{math}
diff --git a/man/Lcross.Rd b/man/Lcross.Rd
new file mode 100644
index 0000000..36420cb
--- /dev/null
+++ b/man/Lcross.Rd
@@ -0,0 +1,93 @@
+\name{Lcross}
+\alias{Lcross}
+\title{Multitype L-function (cross-type)}
+\description{
+  Calculates an estimate of the cross-type L-function
+  for a multitype point pattern.
+}
+\usage{
+  Lcross(X, i, j, ..., from, to)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross-type \eqn{L} function
+    \eqn{L_{ij}(r)}{Lij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{Kcross}}.
+  }
+  \item{from,to}{
+    An alternative way to specify \code{i} and \code{j} respectively.
+  }
+}
+\details{
+  The cross-type L-function is a transformation of the cross-type K-function,
+  \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)}
+  where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function
+  from type \code{i} to type \code{j}.
+  See \code{\link{Kcross}} for information
+  about the cross-type K-function.
+
+  The command \code{Lcross} first calls
+  \code{\link{Kcross}} to compute the estimate of the cross-type K-function,
+  and then applies the square root transformation.
+
+  For a marked point pattern in which the points of type \code{i}
+  are independent of the points of type \code{j},
+  the theoretical value of the L-function is
+  \eqn{L_{ij}(r) = r}{Lij(r) = r}.
+  The square root also has the effect of stabilising
+  the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate
+  for use in simulation envelopes and hypothesis tests.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{L_{ij}}{Lij} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r}
+    for a stationary Poisson process
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections
+  named.
+}
+\seealso{
+  \code{\link{Kcross}},
+  \code{\link{Ldot}},
+  \code{\link{Lest}}
+}
+\examples{
+ data(amacrine)
+ L <- Lcross(amacrine, "off", "on")
+ plot(L)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Lcross.inhom.Rd b/man/Lcross.inhom.Rd
new file mode 100644
index 0000000..9f8bc78
--- /dev/null
+++ b/man/Lcross.inhom.Rd
@@ -0,0 +1,118 @@
+\name{Lcross.inhom}
+\alias{Lcross.inhom}
+\title{
+  Inhomogeneous Cross Type L Function
+}
+\description{
+  For a multitype point pattern, 
+  estimate the inhomogeneous version of the cross-type \eqn{L} function.
+}
+\usage{
+Lcross.inhom(X, i, j, \dots)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous cross type \eqn{L} function
+    \eqn{L_{ij}(r)}{Lij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link{Kcross.inhom}}.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{L_{ij}(r)}{Lij(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{L_{ij}(r)}{Lij(r)}
+    for a marked Poisson process, identically equal to \code{r}
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L_{ij}(r)}{Lij(r)}
+  obtained by the edge corrections named.
+}
+\details{
+  This is a generalisation of the function \code{\link{Lcross}}
+  to include an adjustment for spatially inhomogeneous intensity,
+  in a manner similar to the function \code{\link{Linhom}}.
+
+  All the arguments are passed to \code{\link{Kcross.inhom}}, which 
+  estimates the inhomogeneous multitype K function
+  \eqn{K_{ij}(r)}{Kij(r)} for the point pattern.
+  The resulting values are then
+  transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}.
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R.
+  Statistical Inference and Simulation for Spatial Point Processes
+  Chapman and Hall/CRC
+  Boca Raton, 2003.
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are always interpreted as
+  levels of the factor \code{X$marks}. They are converted to character
+  strings if they are not already character strings.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Lcross}},
+ \code{\link{Linhom}},
+ \code{\link{Kcross.inhom}}
+}
+\examples{
+    # Lansing Woods data
+    woods <- lansing
+    \testonly{woods <- woods[seq(1,npoints(woods), by=10)]}
+    ma <- split(woods)$maple
+    wh <- split(woods)$whiteoak
+
+    # method (1): estimate intensities by nonparametric smoothing
+    lambdaM <- density.ppp(ma, sigma=0.15, at="points")
+    lambdaW <- density.ppp(wh, sigma=0.15, at="points")
+    L <- Lcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM)
+
+    # method (2): fit parametric intensity model
+    fit <- ppm(woods ~marks * polynom(x,y,2))
+    # evaluate fitted intensities at data points
+    # (these are the intensities of the sub-processes of each type)
+    inten <- fitted(fit, dataonly=TRUE)
+    # split according to types of points
+    lambda <- split(inten, marks(woods))
+    L <- Lcross.inhom(woods, "whiteoak", "maple",
+              lambda$whiteoak, lambda$maple)
+    
+    # synthetic example: type A points have intensity 50,
+    #                    type B points have intensity 100 * x
+    lamB <- as.im(function(x,y){50 + 100 * x}, owin())
+    X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB))
+    L <- Lcross.inhom(X, "A", "B",
+        lambdaI=as.im(50, Window(X)), lambdaJ=lamB)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Ldot.Rd b/man/Ldot.Rd
new file mode 100644
index 0000000..80681b2
--- /dev/null
+++ b/man/Ldot.Rd
@@ -0,0 +1,86 @@
+\name{Ldot}
+\alias{Ldot}
+\title{Multitype L-function (i-to-any)}
+\description{
+  Calculates an estimate of the multitype L-function
+  (from type \code{i} to any type)
+  for a multitype point pattern.
+}
+\usage{
+  Ldot(X, i, ..., from)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the dot-type \eqn{L} function
+    \eqn{L_{ij}(r)}{Lij(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{Kdot}}.
+  }
+  \item{from}{An alternative way to specify \code{i}.}
+}
+\details{
+  This command computes 
+  \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)}
+  where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function
+  from points of type \code{i} to points of any type.
+  See \code{\link{Kdot}} for information
+  about \eqn{K_{i\bullet}(r)}{Ki.(r)}.
+
+  The command \code{Ldot} first calls
+  \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any
+  \eqn{K}-function, and then applies the square root transformation.
+
+  For a marked Poisson point process,
+  the theoretical value of the L-function is
+  \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}.
+  The square root also has the effect of stabilising
+  the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.}
+  is more appropriate
+  for use in simulation envelopes and hypothesis tests.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{L_{i\bullet}}{Li.} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}
+    for a stationary Poisson process
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L_{i\bullet}}{Li.}
+  obtained by the edge corrections named.
+}
+\seealso{
+  \code{\link{Kdot}},
+  \code{\link{Lcross}},
+  \code{\link{Lest}}
+}
+\examples{
+ data(amacrine)
+ L <- Ldot(amacrine, "off")
+ plot(L)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Ldot.inhom.Rd b/man/Ldot.inhom.Rd
new file mode 100644
index 0000000..b720707
--- /dev/null
+++ b/man/Ldot.inhom.Rd
@@ -0,0 +1,103 @@
+\name{Ldot.inhom}
+\alias{Ldot.inhom}
+\title{
+  Inhomogeneous Multitype L Dot Function
+}
+\description{
+  For a multitype point pattern, 
+  estimate the inhomogeneous version of the dot \eqn{L} function.
+}
+\usage{
+Ldot.inhom(X, i, \dots)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous cross type \eqn{L} function
+    \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). See under Details.
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link{Kdot.inhom}}.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been  estimated
+  }
+  \item{theo}{the theoretical value of  \eqn{L_{i\bullet}(r)}{Li.(r)}
+    for a marked Poisson process, identical to \eqn{r}.
+  }
+  together with a column or columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)}
+  obtained by the edge corrections named.
+}
+\details{
+  This a generalisation of the function \code{\link{Ldot}}
+  to include an adjustment for spatially inhomogeneous intensity,
+  in a manner similar to the function \code{\link{Linhom}}.
+
+  All the arguments are passed to \code{\link{Kdot.inhom}}, which 
+  estimates the inhomogeneous multitype K function
+  \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern.
+  The resulting values are then
+  transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}.
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R.
+  Statistical Inference and Simulation for Spatial Point Processes
+  Chapman and Hall/CRC
+  Boca Raton, 2003.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as
+  a level of the factor \code{X$marks}. It is converted to a character
+  string if it is not already a character string.
+  The value \code{i=1} does \bold{not}
+  refer to the first level of the factor.
+}
+\seealso{
+ \code{\link{Ldot}},
+ \code{\link{Linhom}},
+ \code{\link{Kdot.inhom}},
+ \code{\link{Lcross.inhom}}.
+}
+\examples{
+    # Lansing Woods data
+    lan <- lansing
+    lan <- lan[seq(1,npoints(lan), by=10)]
+    ma <- split(lan)$maple
+    lg <- unmark(lan)
+
+    # Estimate intensities by nonparametric smoothing
+    lambdaM <- density.ppp(ma, sigma=0.15, at="points")
+    lambdadot <- density.ppp(lg, sigma=0.15, at="points")
+    L <- Ldot.inhom(lan, "maple", lambdaI=lambdaM,
+                                  lambdadot=lambdadot)
+
+
+    # synthetic example: type A points have intensity 50,
+    #                    type B points have intensity 50 + 100 * x
+    lamB <- as.im(function(x,y){50 + 100 * x}, owin())
+    lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin())
+    X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB))
+    L <- Ldot.inhom(X, "B",  lambdaI=lamB,     lambdadot=lamdot)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/LennardJones.Rd b/man/LennardJones.Rd
new file mode 100644
index 0000000..c8eb3c1
--- /dev/null
+++ b/man/LennardJones.Rd
@@ -0,0 +1,142 @@
+\name{LennardJones}
+\alias{LennardJones}
+\title{The Lennard-Jones Potential}
+\description{
+   Creates the Lennard-Jones pairwise interaction structure
+   which can then be fitted to point pattern data.
+}
+\usage{
+  LennardJones(sigma0=NA)
+}
+\value{
+  An object of class \code{"interact"}
+  describing the Lennard-Jones interpoint interaction
+  structure.
+}
+\arguments{
+  \item{sigma0}{
+    Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}.
+    A positive number.
+  }
+}
+\details{
+  In a pairwise interaction point process with the
+  Lennard-Jones pair potential (Lennard-Jones, 1924)
+  each pair of points in the point pattern,
+  a distance \eqn{d} apart,
+  contributes a factor
+  \deqn{
+    v(d) = \exp \left\{
+    -
+    4\epsilon
+    \left[
+       \left(
+          \frac{\sigma}{d}
+       \right)^{12}
+       -
+       \left(
+          \frac{\sigma}{d}
+       \right)^6
+    \right]
+    \right\}
+  }{
+    v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6))
+  }
+  to the probability density,
+  where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are
+  positive parameters to be estimated.
+  
+  See \bold{Examples} for a plot of this expression.
+  
+  This potential causes very strong inhibition between points at short
+  range, and attraction between points at medium range.
+  The parameter  \eqn{\sigma}{sigma} is called the
+  \emph{characteristic diameter} and controls the scale of interaction.
+  The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth}
+  and determines the strength of attraction.
+  The potential switches from inhibition to attraction at
+  \eqn{d=\sigma}{d=sigma}.
+  The maximum value of the pair potential is
+  \eqn{\exp(\epsilon)}{exp(epsilon)}
+  occuring at distance
+  \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}.
+  Interaction is usually considered to be negligible for distances
+  \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}.
+
+  This potential is used 
+  to model interactions between uncharged molecules in statistical physics.
+  
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Lennard-Jones pairwise interaction is
+  yielded by the function \code{LennardJones()}.
+  See the examples below.
+}
+\section{Rescaling}{
+  To avoid numerical instability,
+  the interpoint distances \code{d} are rescaled
+  when fitting the model.
+  
+  Distances are rescaled by dividing by \code{sigma0}.
+  In the formula for \eqn{v(d)} above,
+  the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}.
+
+  The rescaling happens automatically by default.
+  If the argument \code{sigma0} is missing or \code{NA} (the default),
+  then \code{sigma0} is taken to be the minimum
+  nearest-neighbour distance in the data point pattern (in the
+  call to \code{\link{ppm}}). 
+  
+  If the argument \code{sigma0} is given, it should be a positive
+  number, and it should be a rough estimate of the
+  parameter \eqn{\sigma}{sigma}. 
+  
+  The ``canonical regular parameters'' estimated by \code{\link{ppm}} are
+  \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12}
+  and 
+  \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}.
+}
+\section{Warnings and Errors}{
+  Fitting the Lennard-Jones model is extremely unstable, because
+  of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)}
+  and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to
+  converge. Try increasing the number of
+  iterations of the GLM fitting algorithm, by setting
+  \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}.
+  
+  Errors are likely to occur if this model is fitted to a point pattern dataset
+  which does not exhibit both short-range inhibition and
+  medium-range attraction between points.  The values of the parameters
+  \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA}
+  (because the fitted canonical parameters have opposite sign, which
+  usually occurs when the pattern is completely random).
+
+  An absence of warnings does not mean that the fitted model is sensible.
+  A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when
+  the pattern is strongly clustered); this does not correspond
+  to a valid point process model, but the software does not issue a warning.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\examples{
+   fit <- ppm(cells ~1, LennardJones(), rbord=0.1)
+   fit
+   plot(fitin(fit))
+}
+\references{
+  Lennard-Jones, J.E. (1924) On the determination of molecular fields.
+  \emph{Proc Royal Soc London A} \bold{106}, 463--477.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Lest.Rd b/man/Lest.Rd
new file mode 100644
index 0000000..4f38f5e
--- /dev/null
+++ b/man/Lest.Rd
@@ -0,0 +1,95 @@
+\name{Lest}
+\alias{Lest}
+\title{L-function}
+\description{
+  Calculates an estimate of the \eqn{L}-function (Besag's
+  transformation of Ripley's \eqn{K}-function)
+  for a spatial point pattern.
+}
+\usage{
+  Lest(X, ...)
+}
+\arguments{
+  \item{X}{
+    The observed point pattern, 
+    from which an estimate of \eqn{L(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link{Kest}}
+    to control the estimation procedure.
+  }
+}
+\details{
+  This command computes an estimate of the \eqn{L}-function
+  for the spatial point pattern \code{X}.
+  The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function,
+  \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)}
+  where \eqn{K(r)} is the \eqn{K}-function.
+
+  See \code{\link{Kest}} for information
+  about Ripley's \eqn{K}-function. The transformation to \eqn{L} was
+  proposed by Besag (1977).
+
+  The command \code{Lest} first calls
+  \code{\link{Kest}} to compute the estimate of the \eqn{K}-function,
+  and then applies the square root transformation.
+
+  For a completely random (uniform Poisson) point pattern,
+  the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}.
+  The square root also has the effect of stabilising
+  the variance of the estimator, so that \eqn{L(r)} is more appropriate
+  for use in simulation envelopes and hypothesis tests.
+
+  See \code{\link{Kest}} for the list of arguments.
+}
+\section{Variance approximations}{
+  If the argument \code{var.approx=TRUE} is given, the return value
+  includes columns \code{rip} and \code{ls} containing approximations
+  to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR.
+  These are obtained by the delta method from the variance
+  approximations described in \code{\link{Kest}}.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{L} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{L(r) = r}
+    for a stationary Poisson process
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L(r)} obtained by the edge corrections
+  named.
+}
+\references{
+  Besag, J. (1977) 
+  Discussion of Dr Ripley's paper.
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 193--195.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{pcf}}
+}
+\examples{
+ data(cells)
+ L <- Lest(cells)
+ plot(L, main="L function for cells")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Linhom.Rd b/man/Linhom.Rd
new file mode 100644
index 0000000..8346ee0
--- /dev/null
+++ b/man/Linhom.Rd
@@ -0,0 +1,88 @@
+\name{Linhom}
+\alias{Linhom}
+\title{L-function}
+\description{
+  Calculates an estimate of the inhomogeneous version of
+  the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function)
+  for a spatial point pattern.
+}
+\usage{
+  Linhom(...)
+}
+\arguments{
+  \item{\dots}{
+    Arguments passed to \code{\link{Kinhom}}
+    to estimate the inhomogeneous K-function.
+  }
+}
+\details{
+  This command computes an estimate of the inhomogeneous version of
+  the \eqn{L}-function for a spatial point pattern
+
+  The original \eqn{L}-function is a transformation
+  (proposed by Besag) of Ripley's \eqn{K}-function,
+  \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)}
+  where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous
+  point pattern, estimated by \code{\link{Kest}}.
+
+  The inhomogeneous \eqn{L}-function is the corresponding transformation
+  of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}.
+  It is appropriate when the point pattern clearly does not have a
+  homogeneous intensity of points. It was proposed by
+  Baddeley, \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2000).
+
+  The command \code{Linhom} first calls
+  \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function,
+  and then applies the square root transformation.
+
+  For a Poisson point pattern (homogeneous or inhomogeneous),
+  the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}.
+  The square root also has the effect of stabilising
+  the variance of the estimator, so that \eqn{L} is more appropriate
+  for use in simulation envelopes and hypothesis tests.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{L} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{L(r) = r}
+    for a stationary Poisson process
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{L(r)} obtained by the edge corrections
+  named.
+}
+\references{
+  Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2000)
+  Non- and semiparametric estimation of interaction in
+  inhomogeneous point patterns.
+  \emph{Statistica Neerlandica} \bold{54}, 329--350.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{Lest}},
+  \code{\link{Kinhom}},
+  \code{\link{pcf}}
+}
+\examples{
+ data(japanesepines)
+ X <- japanesepines
+ L <- Linhom(X, sigma=0.1)
+ plot(L, main="Inhomogeneous L function for Japanese Pines")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Math.im.Rd b/man/Math.im.Rd
new file mode 100644
index 0000000..a2103e0
--- /dev/null
+++ b/man/Math.im.Rd
@@ -0,0 +1,116 @@
+\name{Math.im}
+\alias{Math.im}
+\alias{Ops.im}
+\alias{Complex.im}
+\alias{Summary.im}
+\title{S3 Group Generic methods for images}
+\description{
+  These are group generic methods for images of class \code{"im"}, which
+  allows for usual mathematical functions and operators to be applied
+  directly to images. See Details for a list of implemented functions.
+}
+\usage{
+## S3 methods for group generics have prototypes:
+\special{Math(x, \dots)}
+\special{Ops(e1, e2)}
+\special{Complex(z)}
+\special{Summary(\dots, na.rm=FALSE, drop=TRUE)}
+%NAMESPACE S3method("Math", "im")
+%NAMESPACE S3method("Ops", "im")
+%NAMESPACE S3method("Complex", "im")
+%NAMESPACE S3method("Summary", "im")
+}
+\arguments{
+  \item{x, z, e1, e2}{objects of class \code{"im"}.}
+  \item{\dots}{further arguments passed to methods.}
+  \item{na.rm,drop}{
+    Logical values specifying whether missing values
+    should be removed. This will happen if either
+    \code{na.rm=TRUE} or \code{drop=TRUE}. See Details.
+  }
+}
+
+\details{
+  Below is a list of mathematical functions and operators which are
+  defined for images. Not all functions will make sense for all
+  types of images. For example, none of the functions in the \code{"Math"} group
+  make sense for character-valued images. Note that the \code{"Ops"}
+  group methods are implemented using \code{\link{eval.im}}, which tries
+  to harmonise images via \code{\link{harmonise.im}} if they aren't
+  compatible to begin with.
+
+  \enumerate{
+    \item Group \code{"Math"}:
+    \itemize{
+      \item
+      \code{abs}, \code{sign}, \code{sqrt},\cr
+      \code{floor}, \code{ceiling}, \code{trunc},\cr
+      \code{round}, \code{signif}
+
+      \item
+      \code{exp}, \code{log},  \code{expm1}, \code{log1p},\cr
+      \code{cos}, \code{sin}, \code{tan},\cr
+      \code{cospi}, \code{sinpi}, \code{tanpi},\cr
+      \code{acos}, \code{asin}, \code{atan}
+
+      \code{cosh}, \code{sinh}, \code{tanh},\cr
+      \code{acosh}, \code{asinh}, \code{atanh}
+
+      \item
+      \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma}
+      \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin}
+    }
+
+    \item Group \code{"Ops"}:
+    \itemize{
+      \item
+      \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"},
+      \code{"^"}, \code{"\%\%"}, \code{"\%/\%"}
+
+      \item \code{"&"}, \code{"|"}, \code{"!"}
+
+      \item \code{"=="}, \code{"!="},
+      \code{"<"}, \code{"<="}, \code{">="}, \code{">"}
+    }
+
+    \item Group \code{"Summary"}:
+    \itemize{
+      \item \code{all}, \code{any}
+      \item \code{sum}, \code{prod}
+      \item \code{min}, \code{max}
+      \item \code{range}
+    }
+
+    \item Group \code{"Complex"}:
+    \itemize{
+      \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re}
+    }
+  }
+
+  For the \code{Summary} group, the generic has an argument
+  \code{na.rm=FALSE}, but for pixel images it makes
+  sense to set \code{na.rm=TRUE} so that pixels outside the domain of
+  the image are ignored. To enable this, we added the argument
+  \code{drop}. Pixel values that are \code{NA} are removed
+  if \code{drop=TRUE} or if \code{na.rm=TRUE}.
+}
+\seealso{
+  \code{\link{eval.im}} for evaluating expressions involving images.
+}
+\examples{
+  ## Convert gradient values to angle of inclination:
+  V <- atan(bei.extra$grad) * 180/pi
+  ## Make logical image which is TRUE when heat equals 'Moderate': 
+  A <- (gorillas.extra$heat == "Moderate")
+  ## Summary:
+  any(A)
+  ## Complex:
+  Z <- exp(1 + V * 1i)
+  Z
+  Re(Z)
+}
+\author{
+  \spatstatAuthors and Kassel Hingee.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/Math.imlist.Rd b/man/Math.imlist.Rd
new file mode 100644
index 0000000..a75ecb3
--- /dev/null
+++ b/man/Math.imlist.Rd
@@ -0,0 +1,105 @@
+\name{Math.imlist}
+\alias{Math.imlist}
+\alias{Ops.imlist}
+\alias{Complex.imlist}
+\alias{Summary.imlist}
+\title{S3 Group Generic methods for List of Images}
+\description{
+  These are group generic methods for the class \code{"imlist"}
+  of lists of images. These methods 
+  allows the usual mathematical functions and operators to be applied
+  directly to lists of images. See Details for a list of implemented functions.
+}
+\usage{
+## S3 methods for group generics have prototypes:
+\special{Math(x, \dots)}
+\special{Ops(e1, e2)}
+\special{Complex(z)}
+\special{Summary(\dots, na.rm = TRUE)}
+%NAMESPACE S3method("Math", "imlist")
+%NAMESPACE S3method("Ops", "imlist")
+%NAMESPACE S3method("Complex", "imlist")
+%NAMESPACE S3method("Summary", "imlist")
+}
+\arguments{
+  \item{x, z, e1, e2}{objects of class \code{"imlist"}.}
+  \item{\dots}{further arguments passed to methods.}
+  \item{na.rm}{logical: should missing values be removed?}
+}
+\details{
+  Below is a list of mathematical functions and operators which are
+  defined for lists of images. Not all functions will make sense for all
+  types of images. For example, none of the functions in the \code{"Math"} group
+  make sense for character-valued images. Note that the \code{"Ops"}
+  group methods are implemented using \code{\link{eval.im}}, which tries
+  to harmonise images via \code{\link{harmonise.im}} if they aren't
+  compatible to begin with.
+
+  \enumerate{
+    \item Group \code{"Math"}:
+    \itemize{
+      \item
+      \code{abs}, \code{sign}, \code{sqrt},\cr
+      \code{floor}, \code{ceiling}, \code{trunc},\cr
+      \code{round}, \code{signif}
+
+      \item
+      \code{exp}, \code{log},  \code{expm1}, \code{log1p},\cr
+      \code{cos}, \code{sin}, \code{tan},\cr
+      \code{cospi}, \code{sinpi}, \code{tanpi},\cr
+      \code{acos}, \code{asin}, \code{atan}
+
+      \code{cosh}, \code{sinh}, \code{tanh},\cr
+      \code{acosh}, \code{asinh}, \code{atanh}
+
+      \item
+      \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma}
+      \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin}
+    }
+
+    \item Group \code{"Ops"}:
+    \itemize{
+      \item
+      \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"},
+      \code{"^"}, \code{"\%\%"}, \code{"\%/\%"}
+
+      \item \code{"&"}, \code{"|"}, \code{"!"}
+
+      \item \code{"=="}, \code{"!="},
+      \code{"<"}, \code{"<="}, \code{">="}, \code{">"}
+    }
+
+    \item Group \code{"Summary"}:
+    \itemize{
+      \item \code{all}, \code{any}
+      \item \code{sum}, \code{prod}
+      \item \code{min}, \code{max}
+      \item \code{range}
+    }
+
+    \item Group \code{"Complex"}:
+    \itemize{
+      \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re}
+    }
+  }
+}
+\value{
+  The result of \code{"Math"}, \code{"Ops"} and \code{"Complex"} group
+  operations is another list of images.
+  The result of \code{"Summary"} group operations is a numeric vector of
+  length 1 or 2.
+}
+\seealso{
+  \code{\link{Math.im}} or \code{\link{eval.im}}
+  for evaluating expressions involving images.
+}
+\examples{
+  a <- Smooth(finpines, 2)
+  log(a)/2 - sqrt(a)
+  range(a)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/Math.linim.Rd b/man/Math.linim.Rd
new file mode 100644
index 0000000..5209a50
--- /dev/null
+++ b/man/Math.linim.Rd
@@ -0,0 +1,108 @@
+\name{Math.linim}
+\alias{Math.linim}
+\alias{Ops.linim}
+\alias{Summary.linim}
+\alias{Complex.linim}
+\title{S3 Group Generic Methods for Images on a Linear Network}
+\description{
+  These are group generic methods for images of class \code{"linim"}, which
+  allows for usual mathematical functions and operators to be applied
+  directly to pixel images on a linear network.
+  See Details for a list of implemented functions.
+}
+\usage{
+## S3 methods for group generics have prototypes:
+\special{Math(x, \dots)}
+\special{Ops(e1, e2)}
+\special{Complex(z)}
+\special{Summary(\dots, na.rm = FALSE)}
+%NAMESPACE S3method("Math", "linim")
+%NAMESPACE S3method("Ops", "linim")
+%NAMESPACE S3method("Complex", "linim")
+%NAMESPACE S3method("Summary", "linim")
+}
+\arguments{
+  \item{x, z, e1, e2}{objects of class \code{"linim"}.}
+  \item{\dots}{further arguments passed to methods.}
+  \item{na.rm}{logical: should missing values be removed?}
+}
+\details{
+  An object of class \code{"linim"} represents a pixel image on
+  a linear network. See \code{\link{linim}}.
+  
+  Below is a list of mathematical functions and operators which are
+  defined for these images. Not all functions will make sense for all
+  types of images. For example, none of the functions in the \code{"Math"} group
+  make sense for character-valued images. Note that the \code{"Ops"}
+  group methods are implemented using \code{\link{eval.linim}}.
+
+  \enumerate{
+    \item Group \code{"Math"}:
+    \itemize{
+      \item
+      \code{abs}, \code{sign}, \code{sqrt},\cr
+      \code{floor}, \code{ceiling}, \code{trunc},\cr
+      \code{round}, \code{signif}
+
+      \item
+      \code{exp}, \code{log},  \code{expm1}, \code{log1p},\cr
+      \code{cos}, \code{sin}, \code{tan},\cr
+      \code{cospi}, \code{sinpi}, \code{tanpi},\cr
+      \code{acos}, \code{asin}, \code{atan}
+
+      \code{cosh}, \code{sinh}, \code{tanh},\cr
+      \code{acosh}, \code{asinh}, \code{atanh}
+
+      \item
+      \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma}
+      \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin}
+    }
+
+    \item Group \code{"Ops"}:
+    \itemize{
+      \item
+      \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"},
+      \code{"^"}, \code{"\%\%"}, \code{"\%/\%"}
+
+      \item \code{"&"}, \code{"|"}, \code{"!"}
+
+      \item \code{"=="}, \code{"!="},
+      \code{"<"}, \code{"<="}, \code{">="}, \code{">"}
+    }
+
+    \item Group \code{"Summary"}:
+    \itemize{
+      \item \code{all}, \code{any}
+      \item \code{sum}, \code{prod}
+      \item \code{min}, \code{max}
+      \item \code{range}
+    }
+
+    \item Group \code{"Complex"}:
+    \itemize{
+      \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re}
+    }
+  }
+}
+\seealso{
+  \code{\link{eval.linim}} for evaluating expressions involving images.
+}
+\examples{
+  fx <- function(x,y,seg,tp) { (x - y)^2 }
+  fL <- linfun(fx, simplenet)
+  Z <- as.linim(fL)
+  A <- Z+2
+  A <- -Z
+  A <- sqrt(Z)
+  A <- !(Z > 0.1)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/MinkowskiSum.Rd b/man/MinkowskiSum.Rd
new file mode 100644
index 0000000..0cbde80
--- /dev/null
+++ b/man/MinkowskiSum.Rd
@@ -0,0 +1,112 @@
+\name{MinkowskiSum}
+\alias{MinkowskiSum}
+\alias{\%(+)\%}  %DoNotExport 
+%NAMESPACE export("%(+)%")
+\alias{dilationAny}
+\title{Minkowski Sum of Windows}
+\description{
+  Compute the Minkowski sum of two spatial windows.
+}
+\usage{
+MinkowskiSum(A, B)
+
+A \%(+)\% B
+
+dilationAny(A, B)
+}
+\arguments{
+  \item{A,B}{
+    Windows (objects of class \code{"owin"}),
+    point patterns (objects of class \code{"ppp"})
+    or line segment patterns (objects of class \code{"psp"})
+    in any combination.
+  }
+}
+\value{
+  A window (object of class \code{"owin"}) except that
+  if \code{A} is a point pattern, then the result is an object of the same
+  type as \code{B} (and vice versa).
+}
+\details{
+  The operator \code{A \%(+)\% B} and function \code{MinkowskiSum(A,B)}
+  are synonymous: they both compute the
+  Minkowski sum of the windows \code{A} and \code{B}.
+  The function \code{dilationAny} computes the Minkowski dilation
+  \code{A \%(+)\% reflect(B)}.
+  
+  The Minkowski sum 
+  of two spatial regions \eqn{A} and \eqn{B}
+  is another region, formed by taking all possible pairs of points,
+  one in \eqn{A} and one in \eqn{B}, and adding them as vectors.
+  The Minkowski Sum \eqn{A \oplus B}{A \%(+)\% B}
+  is the set of all points \eqn{a+b} where \eqn{a} is in \eqn{A}
+  and \eqn{b} is in \eqn{B}.
+  A few common facts about the Minkowski sum are:
+  \itemize{
+    \item
+    The sum is symmetric:
+    \eqn{A \oplus B = B \oplus A}{A \%(+)\% B = B \%(+)\% A}.
+    \item
+    If \eqn{B} is a single point, then \eqn{A \oplus B}{A \%(+)\% B}
+    is a shifted copy of \eqn{A}.
+    \item
+    If \eqn{A} is a square of side length \eqn{a},
+    and \eqn{B} is a square of side length \eqn{b},
+    with sides that are parallel to the coordinate axes,
+    then \eqn{A \oplus B}{A \%(+)\% B} is a square of side length \eqn{a+b}.
+    \item
+    If \eqn{A} and \eqn{B} are discs of radius \eqn{r} and \eqn{s}
+    respectively, then \eqn{A \oplus B}{A \%(+)\% B} is a disc
+    of redius \eqn{r+s}.
+    \item
+    If \eqn{B} is a disc of radius \eqn{r} centred at the origin,
+    then \eqn{A \oplus B}{A \%(+)\% B} is equivalent to the
+    \emph{morphological dilation} of \eqn{A} by distance \eqn{r}.
+    See \code{\link{dilation}}.
+  }
+
+  The Minkowski dilation is the closely-related region
+  \eqn{A \oplus (-B)}{A \%(+)\% (-B)}
+  where \eqn{(-B)} is the reflection of \eqn{B} through the origin.
+  The Minkowski dilation is the set of all vectors \eqn{z}
+  such that, if \eqn{B} is shifted by \eqn{z}, the resulting set
+  \eqn{B+z} has nonempty intersection with \eqn{A}.
+  
+  The algorithm currently computes the result as a polygonal
+  window using the \pkg{polyclip} library. 
+  It will be quite slow if applied to binary mask windows.
+
+  The arguments \code{A} and \code{B} can also be point patterns or line
+  segment patterns. These are interpreted as spatial regions, the
+  Minkowski sum is computed, and the result is returned as an object of
+  the most appropriate type. The Minkowski sum of two point patterns is
+  another point pattern. The Minkowski sum of a point pattern and a line
+  segment pattern is another line segment pattern.
+}
+\seealso{
+  \code{\link{dilation}},
+  \code{\link{erosionAny}}
+}
+\examples{
+  B <- square(0.2)
+  RplusB <- letterR \%(+)\% B
+
+  opa <- par(mfrow=c(1,2))
+  FR <- grow.rectangle(Frame(letterR), 0.3)
+  plot(FR, main="")
+  plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5))
+  plot(shift(B, vec=c(3.675, 3)),
+       add=TRUE, border="red", lwd=2)
+  plot(FR, main="")
+  plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5))
+  plot(RplusB, add=TRUE, border="blue", lwd=2,
+         hatch=TRUE, hatchargs=list(col="blue"))
+  par(opa)
+
+  plot(cells \%(+)\% square(0.1))
+}
+\author{
+  \adrian
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/MultiHard.Rd b/man/MultiHard.Rd
new file mode 100644
index 0000000..a706bd0
--- /dev/null
+++ b/man/MultiHard.Rd
@@ -0,0 +1,88 @@
+\name{MultiHard}
+\alias{MultiHard}
+\title{The Multitype Hard Core Point Process Model}
+\description{
+Creates an instance of the multitype hard core point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  MultiHard(hradii, types=NULL)
+}
+\arguments{
+  \item{hradii}{Matrix of hard core radii}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the multitype hard core process with
+  hard core radii \eqn{hradii[i,j]}.
+}
+\details{
+  This is a multitype version of the hard core process.
+  A pair of points
+  of types \eqn{i} and \eqn{j}
+  must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the MultiStrauss interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrix \code{hradii}.
+
+  The matrix \code{hradii}
+  must be symmetric, with entries
+  which are either positive numbers or \code{NA}. 
+  A value of \code{NA} indicates that no distance constraint should be applied
+  for this combination of types.
+  
+  Note that only the hardcore radii
+  are specified in \code{MultiHard}.
+  The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{MultiHard()}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{Strauss}}.
+
+  See \code{\link{ragsMultiHard}} and \code{\link{rmh}} for
+  simulation.
+}
+\examples{
+   h <- matrix(c(1,2,2,1), nrow=2,ncol=2)
+
+   # prints a sensible description of itself
+   MultiHard(h)
+
+   # Fit the stationary multitype hardcore process to `amacrine'
+   # with hard core operating only between cells of the same type.
+   h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2)
+   ppm(amacrine ~1, MultiHard(h))
+}
+\section{Warnings}{
+  In order that \code{\link{ppm}} can fit the multitype hard core
+  model correctly to a point pattern \code{X}, this pattern must
+  be marked, with \code{markformat} equal to \code{vector} and the
+  mark vector \code{marks(X)} must be a factor.  If the argument
+  \code{types} is specified it is interpreted as a set of factor
+  levels and this set must equal \code{levels(marks(X))}.
+}
+\section{Changed Syntax}{
+  Before \pkg{spatstat} version \code{1.37-0},
+  the syntax of this function was different:
+  \code{MultiHard(types=NULL, hradii)}.
+  The new code attempts to handle the old syntax as well.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/MultiStrauss.Rd b/man/MultiStrauss.Rd
new file mode 100644
index 0000000..5ddff82
--- /dev/null
+++ b/man/MultiStrauss.Rd
@@ -0,0 +1,113 @@
+\name{MultiStrauss}
+\alias{MultiStrauss}
+\title{The Multitype Strauss Point Process Model}
+\description{
+Creates an instance of the multitype Strauss point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  MultiStrauss(radii, types=NULL)
+}
+\arguments{
+  \item{radii}{Matrix of interaction radii}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the multitype Strauss process with
+  interaction radii \eqn{radii[i,j]}.
+}
+\details{
+  The (stationary) multitype
+  Strauss process with \eqn{m} types, with interaction radii
+  \eqn{r_{ij}}{r[i,j]} and 
+  parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]}
+  is the pairwise interaction point process
+  in which each point of type \eqn{j}
+  contributes a factor \eqn{\beta_j}{beta[j]} to the 
+  probability density of the point pattern, and a pair of points
+  of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]}
+  units apart contributes a factor
+  \eqn{\gamma_{ij}}{gamma[i,j]} to the density.
+
+  The nonstationary multitype Strauss process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location and type, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the multitype
+  Strauss process pairwise interaction is
+  yielded by the function \code{MultiStrauss()}. See the examples below.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the MultiStrauss interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrix \code{radii}.
+
+  The matrix \code{radii} must be symmetric, with entries
+  which are either positive numbers or \code{NA}. 
+  A value of \code{NA} indicates that no interaction term should be included
+  for this combination of types.
+  
+  Note that only the interaction radii are
+  specified in \code{MultiStrauss}.  The canonical
+  parameters \eqn{\log(\beta_j)}{log(beta[j])} and
+  \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by
+  \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{Strauss}},
+  \code{\link{MultiHard}}
+}
+\examples{
+   r <- matrix(c(1,2,2,1), nrow=2,ncol=2)
+   MultiStrauss(r)
+   # prints a sensible description of itself
+   r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2)
+   X <- amacrine
+   \testonly{
+      X <- X[ owin(c(0, 0.8), c(0, 1)) ]
+   }
+   ppm(X ~1, MultiStrauss(r))
+   # fit the stationary multitype Strauss process to `amacrine'
+
+   \dontrun{
+   ppm(X ~polynom(x,y,3), MultiStrauss(r, c("off","on")))
+   # fit a nonstationary multitype Strauss process with log-cubic trend
+   }
+}
+\section{Warnings}{
+  In order that \code{\link{ppm}} can fit the multitype Strauss
+  model correctly to a point pattern \code{X}, this pattern must
+  be marked, with \code{markformat} equal to \code{vector} and the
+  mark vector \code{marks(X)} must be a factor.  If the argument
+  \code{types} is specified it is interpreted as a set of factor
+  levels and this set must equal \code{levels(marks(X))}.
+}
+\section{Changed Syntax}{
+  Before \pkg{spatstat} version \code{1.37-0},
+  the syntax of this function was different:
+  \code{MultiStrauss(types=NULL, radii)}.
+  The new code attempts to handle the old syntax as well.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/MultiStraussHard.Rd b/man/MultiStraussHard.Rd
new file mode 100644
index 0000000..ccaa959
--- /dev/null
+++ b/man/MultiStraussHard.Rd
@@ -0,0 +1,101 @@
+\name{MultiStraussHard}
+\alias{MultiStraussHard}
+\title{The Multitype/Hard Core Strauss Point Process Model}
+\description{
+Creates an instance of the multitype/hard core Strauss point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  MultiStraussHard(iradii, hradii, types=NULL)
+}
+\arguments{
+  \item{iradii}{Matrix of interaction radii}
+  \item{hradii}{Matrix of hard core radii}
+  \item{types}{Optional; vector of all possible types (i.e. the possible levels
+    of the \code{marks} variable in the data)}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the multitype/hard core Strauss process with
+  interaction radii \eqn{iradii[i,j]}
+  and hard core radii \eqn{hradii[i,j]}.
+}
+\details{
+  This is a hybrid of the multitype Strauss process
+  (see \code{\link{MultiStrauss}}) and the hard core process
+  (case \eqn{\gamma=0}{gamma = 0} of the Strauss process).
+  A pair of points
+  of types \eqn{i} and \eqn{j}
+  must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart;
+  if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than 
+  \eqn{r_{ij}}{r[i,j]} units apart, it
+  contributes a factor
+  \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density.
+
+  The argument \code{types} need not be specified in normal use.
+  It will be determined automatically from the point pattern data set
+  to which the MultiStraussHard interaction is applied,
+  when the user calls \code{\link{ppm}}. 
+  However, the user should be confident that
+  the ordering of types in the dataset corresponds to the ordering of
+  rows and columns in the matrices \code{iradii} and \code{hradii}.
+
+  The matrices \code{iradii} and \code{hradii}
+  must be symmetric, with entries
+  which are either positive numbers or \code{NA}. 
+  A value of \code{NA} indicates that no interaction term should be included
+  for this combination of types.
+  
+  Note that only the interaction radii and hardcore radii
+  are specified in \code{MultiStraussHard}.
+  The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])}
+  and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{MultiStraussHard()}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiHard}},
+  \code{\link{Strauss}}
+}
+\examples{
+   r <- matrix(3, nrow=2,ncol=2)
+   h <- matrix(c(1,2,2,1), nrow=2,ncol=2)
+   MultiStraussHard(r,h)
+   # prints a sensible description of itself
+   r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2)
+   h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2)
+   X <- amacrine
+   \testonly{
+       X <- X[owin(c(0,0.8), c(0,1))]
+   }
+   fit <- ppm(X ~1, MultiStraussHard(r,h))
+   # fit stationary multitype hardcore Strauss process to `amacrine'
+}
+\section{Warnings}{
+  In order that \code{\link{ppm}} can fit the multitype/hard core
+  Strauss model correctly to a point pattern \code{X}, this pattern
+  must be marked, with \code{markformat} equal to \code{vector}
+  and the mark vector \code{marks(X)} must be a factor.  If the
+  argument \code{types} is specified it is interpreted as a set of
+  factor levels and this set must equal \code{levels(marks(X))}.
+}
+\section{Changed Syntax}{
+  Before \pkg{spatstat} version \code{1.37-0},
+  the syntax of this function was different:
+  \code{MultiStraussHard(types=NULL, iradii, hradii)}.
+  The new code attempts to handle the old syntax as well.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Ops.msr.Rd b/man/Ops.msr.Rd
new file mode 100644
index 0000000..2a92f20
--- /dev/null
+++ b/man/Ops.msr.Rd
@@ -0,0 +1,60 @@
+\name{Ops.msr}
+\alias{Ops.msr}
+\title{Arithmetic Operations on Measures}
+\description{
+  These group generic methods for the class \code{"msr"}
+  allow the arithmetic operators
+  \code{+}, \code{-}, \code{*} and \code{/} to be applied
+  directly to measures. 
+}
+\usage{
+## S3 methods for group generics have prototypes:
+\special{Ops(e1, e2)}
+%NAMESPACE S3method("Ops", "msr")
+}
+\arguments{
+  \item{e1, e2}{objects of class \code{"msr"}.}
+}
+\details{
+  Arithmetic operators on a measure \code{A} are only defined
+  in some cases. The arithmetic operator is effectively applied
+  to the value of \code{A(W)} for every spatial domain \code{W}.
+  If the result is a measure, then this operation is valid.
+  
+  If \code{A} is a measure (object of class \code{"msr"})
+  then the operations \code{-A} and \code{+A} are defined.
+  
+  If \code{A} and \code{B} are measures with the same dimension
+  (i.e. both are scalar-valued, or both are \code{k}-dimensional vector-valued)
+  then \code{A + B} and \code{A - B} are defined.
+
+  If \code{A} is a measure and \code{z} is a numeric value,
+  then \code{A * z} and \code{A / z} are defined,
+  and \code{z * A} is defined.
+}
+\value{
+  Another measure (object of class \code{"msr"}).
+}
+\seealso{
+  \code{\link{with.msr}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   rp <- residuals(fit, type="pearson")
+   rp
+
+   -rp
+   2 * rp
+   rp /2
+
+   rp - rp
+
+   rr <- residuals(fit, type="raw")
+   rp - rr
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/Ord.Rd b/man/Ord.Rd
new file mode 100644
index 0000000..e5afe49
--- /dev/null
+++ b/man/Ord.Rd
@@ -0,0 +1,70 @@
+\name{Ord}
+\alias{Ord}
+\title{Generic Ord Interaction model}
+\description{
+Creates an instance of an Ord-type interaction point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Ord(pot, name)
+}
+\arguments{
+  \item{pot}{An S language function giving the user-supplied
+    interaction potential.}
+  \item{name}{Character string.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  Ord's point process model (Ord, 1977) is a Gibbs point process
+  of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern
+  \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where
+  \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the
+  tile associated with \eqn{x_i}{x[i]}
+  in the Dirichlet tessellation of \eqn{x}.
+
+  Ord (1977) proposed fitting this model to forestry data
+  when \eqn{g(a)} has a simple ``threshold'' form. That model is
+  implemented in our function \code{\link{OrdThresh}}.
+  The present function \code{Ord} implements the case of a
+  completely general Ord potential \eqn{g(a)}
+  specified as an S language function \code{pot}.
+
+  This is experimental. 
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ord, J.K. (1977) 
+  Contribution to the discussion of Ripley (1977).
+
+  Ord, J.K. (1978) 
+  How many trees in a forest?
+  \emph{Mathematical Scientist} \bold{3}, 23--33.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{OrdThresh}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/OrdThresh.Rd b/man/OrdThresh.Rd
new file mode 100644
index 0000000..0cb4ac7
--- /dev/null
+++ b/man/OrdThresh.Rd
@@ -0,0 +1,66 @@
+\name{OrdThresh}
+\alias{OrdThresh}
+\title{Ord's Interaction model}
+\description{
+Creates an instance of Ord's point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  OrdThresh(r)
+}
+\arguments{
+  \item{r}{Positive number giving the threshold value
+    for Ord's model.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  Ord's point process model (Ord, 1977) is a Gibbs point process
+  of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern
+  \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where
+  \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the
+  tile associated with \eqn{x_i}{x[i]}
+  in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is
+  simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and
+  \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r},
+  where \eqn{r} is called the threshold value.
+
+  This function creates an instance of Ord's model with a given
+  value of \eqn{r}. It can then be fitted to point process data
+  using \code{\link{ppm}}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ord, J.K. (1977) 
+  Contribution to the discussion of Ripley (1977).
+
+  Ord, J.K. (1978) 
+  How many trees in a forest?
+  \emph{Mathematical Scientist} \bold{3}, 23--33.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/PPversion.Rd b/man/PPversion.Rd
new file mode 100644
index 0000000..0bb434f
--- /dev/null
+++ b/man/PPversion.Rd
@@ -0,0 +1,90 @@
+\name{PPversion}
+\alias{PPversion}
+\alias{QQversion}
+\title{
+  Transform a Function into its P-P or Q-Q Version
+}
+\description{
+  Given a function object \code{f} containing both the estimated
+  and theoretical versions of a summary function, these operations
+  combine the estimated and theoretical functions into a new function.
+  When plotted, the new function gives either the P-P plot or Q-Q plot
+  of the original \code{f}.
+}
+\usage{
+PPversion(f, theo = "theo", columns = ".")
+
+QQversion(f, theo = "theo", columns = ".")
+}
+\arguments{
+  \item{f}{
+    The function to be transformed. An object of class \code{"fv"}.
+  }
+  \item{theo}{
+    The name of the column of \code{f} that should be treated as the
+    theoretical value of the function.
+  }
+  \item{columns}{
+    Character vector, specifying the columns of \code{f}
+    to which the transformation will be applied.
+    Either a vector of names of columns of \code{f},
+    or one of the abbreviations recognised by \code{\link{fvnames}}.
+  }
+}
+\details{
+  The argument \code{f} should be an object of class \code{"fv"},
+  containing both empirical estimates \eqn{\widehat f(r)}{fhat(r)}
+  and a theoretical value \eqn{f_0(r)}{f0(r)} for a summary function.
+
+  The \emph{P--P version} of \code{f} is the function
+  \eqn{g(x) = \widehat f (f_0^{-1}(x))}{g(x) = fhat(f0^(-1)(x))}
+  where \eqn{f_0^{-1}}{f0^(-1)} is the inverse function of
+  \eqn{f_0}{f0}.
+  A plot of \eqn{g(x)} against \eqn{x} 
+  is equivalent to a plot of \eqn{\widehat f(r)}{fhat(r)} against
+  \eqn{f_0(r)}{f0(r)} for all \eqn{r}.
+  If \code{f} is a cumulative distribution function (such as the
+  result of \code{\link{Fest}} or \code{\link{Gest}}) then
+  this is a P--P plot, a plot of the observed versus theoretical
+  probabilities for the distribution.
+  The diagonal line \eqn{y=x}
+  corresponds to perfect agreement between observed and theoretical
+  distribution.
+
+  The \emph{Q--Q version} of \code{f} is the function
+  \eqn{h(x) = f_0^{-1}(\widehat f(x))}{f0^(-1)(fhat(x))}.
+  If \code{f} is a cumulative distribution function,
+  a plot of \eqn{h(x)} against \eqn{x}
+  is a Q--Q plot, a plot of the observed versus theoretical
+  quantiles of the distribution.
+  The diagonal line \eqn{y=x}
+  corresponds to perfect agreement between observed and theoretical
+  distribution.
+  Another straight line corresponds to the situation where the
+  observed variable is a linear transformation of the theoretical variable.
+  For a point pattern \code{X}, the Q--Q version of \code{Kest(X)} is
+  essentially equivalent to \code{Lest(X)}.
+}
+\value{
+  Another object of class \code{"fv"}.
+}
+\author{
+  Tom Lawrence
+  and Adrian Baddeley.
+  
+  Implemented by
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{plot.fv}}
+}
+\examples{
+  opa <- par(mar=0.1+c(5,5,4,2))
+  G <- Gest(redwoodfull)
+  plot(PPversion(G))
+  plot(QQversion(G))
+  par(opa)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+\keyword{manip}
diff --git a/man/PairPiece.Rd b/man/PairPiece.Rd
new file mode 100644
index 0000000..e2762fb
--- /dev/null
+++ b/man/PairPiece.Rd
@@ -0,0 +1,108 @@
+\name{PairPiece}
+\alias{PairPiece}
+\title{The Piecewise Constant Pairwise Interaction Point Process Model}
+\description{
+  Creates an instance of a pairwise interaction point process model
+  with piecewise constant potential function. The model
+  can then be fitted to point pattern data.
+}
+\usage{
+  PairPiece(r)
+}
+\arguments{
+  \item{r}{vector of jump points for the potential function}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. The process is a pairwise interaction process,
+  whose interaction potential is piecewise constant, with jumps
+  at the distances given in the vector \eqn{r}.
+}
+\details{
+  A pairwise interaction point process in a bounded region
+  is a stochastic point process with probability density of the form
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j)
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . product { b(x[i]) } product { h(x_i, x_j) }
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern. The first product on the right hand side is
+  over all points of the pattern; the second product is over all
+  unordered pairs of points of the pattern.
+
+  Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor 
+  \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of
+  points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor
+  \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density.
+
+  The pairwise interaction term \eqn{h(u, v)} is called
+  \emph{piecewise constant}
+  if it depends only on the distance between \eqn{u} and \eqn{v},
+  say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant
+  function (a function which is constant except for jumps at a finite
+  number of places). The use of piecewise constant interaction terms
+  was first suggested by Takacs (1986).
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the piecewise constant pairwise
+  interaction is yielded by the function \code{PairPiece()}.
+  See the examples below.
+
+  The entries of \code{r} must be strictly increasing, positive numbers.
+  They are interpreted as the points of discontinuity of \eqn{H}.
+  It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax}
+  where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the
+  model has as many regular parameters (see \code{\link{ppm}}) 
+  as there are entries in \code{r}. The \eqn{i}-th regular parameter
+  \eqn{\theta_i}{theta[i]} is the logarithm of the value of the
+  interaction function \eqn{H} on the interval
+  \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. 
+
+  If \code{r} is a single number, this model is similar to the 
+  Strauss process, see \code{\link{Strauss}}. The difference is that
+  in \code{PairPiece} the interaction function is continuous on the
+  right, while in \code{\link{Strauss}} it is continuous on the left.
+
+  The analogue of this model for multitype point processes
+  has not yet been implemented.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}},
+  \code{\link{Strauss}}
+  \code{\link{rmh.ppm}}
+}
+\examples{
+   PairPiece(c(0.1,0.2))
+   # prints a sensible description of itself
+   data(cells) 
+
+   \dontrun{
+   ppm(cells, ~1, PairPiece(r = c(0.05, 0.1, 0.2)))
+   # fit a stationary piecewise constant pairwise interaction process
+   }
+
+   ppm(cells, ~polynom(x,y,3), PairPiece(c(0.05, 0.1)))
+   # nonstationary process with log-cubic polynomial trend
+}
+\references{
+  Takacs, R. (1986)
+  Estimator for the pair potential of a Gibbsian point process.
+  \emph{Statistics} \bold{17}, 429--433.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Pairwise.Rd b/man/Pairwise.Rd
new file mode 100644
index 0000000..2f192b6
--- /dev/null
+++ b/man/Pairwise.Rd
@@ -0,0 +1,107 @@
+\name{Pairwise}
+\alias{Pairwise}
+\title{Generic Pairwise Interaction model}
+\description{
+Creates an instance of a pairwise interaction point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Pairwise(pot, name, par, parnames, printfun)
+}
+\arguments{
+  \item{pot}{An R language function giving the user-supplied
+    pairwise interaction potential.}
+  \item{name}{Character string.}
+  \item{par}{List of numerical values for irregular parameters}
+  \item{parnames}{Vector of names of irregular parameters}
+  \item{printfun}{Do not specify this argument: for internal use only.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  This code constructs a member of the
+  pairwise interaction family \code{\link{pairwise.family}}
+  with arbitrary pairwise interaction potential given by
+  the user.
+
+  Each pair of points in the point pattern contributes a factor
+  \eqn{h(d)} to the probability density, where \eqn{d} is the distance
+  between the two points. The factor term \eqn{h(d)} is
+  \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))}
+  provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, 
+  where \eqn{\theta}{theta} is the coefficient vector in the model.  
+
+  The function \code{pot} must take as its first argument
+  a matrix of interpoint distances, and evaluate the
+  potential for each of these distances. The result must be
+  either a matrix with the same dimensions as its input,
+  or an array with its first two dimensions the same as its input
+  (the latter case corresponds to a vector-valued potential).
+
+  If irregular parameters are present, then the second argument
+  to \code{pot} should be a vector of the same type as \code{par}
+  giving those parameter values.
+
+  The values returned by \code{pot} may be finite numeric values,
+  or \code{-Inf} indicating a hard core (that is, the corresponding
+  interpoint distance is forbidden). We define
+  \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}.
+  Thus, a potential value of minus infinity is \emph{always} interpreted
+  as corresponding to \eqn{h(d) = 0}, regardless of the sign
+  and magnitude of \eqn{\theta}{theta}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\examples{
+   #This is the same as StraussHard(r=0.7,h=0.05)
+   strpot <- function(d,par) {
+         r <- par$r
+         h <- par$h
+         value <- (d <= r)
+         value[d < h] <- -Inf
+         value
+   }
+   mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05),
+           c("interaction distance r", "hard core distance h"))
+   data(cells)
+   ppm(cells, ~ 1, mySH, correction="isotropic")
+
+   # Fiksel (1984) double exponential interaction
+   # see Stoyan, Kendall, Mecke 1987 p 161
+
+   fikspot <- function(d, par) {
+      r <- par$r
+      h <- par$h
+      zeta <- par$zeta
+      value <- exp(-zeta * d)
+      value[d < h] <- -Inf
+      value[d > r] <- 0
+      value
+   }
+   Fiksel <- Pairwise(fikspot, "Fiksel double exponential process",
+                      list(r=3.5, h=1, zeta=1),
+                      c("interaction distance r",
+                        "hard core distance h",
+                        "exponential coefficient zeta"))
+   data(spruces)
+   fit <- ppm(unmark(spruces), ~1, Fiksel, rbord=3.5)
+   fit
+   plot(fitin(fit), xlim=c(0,4))
+   coef(fit)
+   # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/Penttinen.Rd b/man/Penttinen.Rd
new file mode 100644
index 0000000..a41ad34
--- /dev/null
+++ b/man/Penttinen.Rd
@@ -0,0 +1,84 @@
+\name{Penttinen}
+\alias{Penttinen}
+\title{Penttinen Interaction}
+\description{
+  Creates an instance of the Penttinen pairwise interaction
+  point process model, which can then be fitted to point pattern data.
+}
+\usage{
+  Penttinen(r)
+}
+\arguments{
+  \item{r}{circle radius}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  Penttinen (1984, Example 2.1, page 18), citing Cormack (1979),
+  described the pairwise interaction point process with interaction factor
+  \deqn{
+    h(d) = e^{\theta A(d)} = \gamma^{A(d)}
+  }{
+    h(d) = exp(theta * A(d)) = gamma^(A(d))
+  }
+  between each pair of points separated by a distance $d$.
+  Here \eqn{A(d)} is the area of intersection between two discs
+  of radius \eqn{r} separated by a distance \eqn{d}, normalised so that
+  \eqn{A(0) = 1}.
+
+  The scale of interaction is controlled by the disc radius \eqn{r}:
+  two points interact if they are closer than \eqn{2 r}{2 * r} apart.
+  The strength of interaction is controlled by the
+  canonical parameter \eqn{\theta}{theta}, which 
+  must be less than or equal to zero, or equivalently by the
+  parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)},
+  which must lie between 0 and 1.
+
+  The potential is inhibitory, i.e.\ this model is only appropriate for
+  regular point patterns. 
+  For \eqn{\gamma=0}{gamma=0} the model is
+  a hard core process with hard core diameter \eqn{2 r}{2 * r}.
+  For \eqn{\gamma=1}{gamma=1} the model is a Poisson process.
+
+  The irregular parameter
+  \eqn{r} must be given in the call to
+  \code{Penttinen}, while the
+  regular parameter \eqn{\theta}{theta} will be estimated.
+
+  This model can be considered as a pairwise approximation
+  to the area-interaction model \code{\link{AreaInter}}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{Pairwise}},
+  \code{\link{AreaInter}}.
+}
+\examples{
+   fit <- ppm(cells ~ 1, Penttinen(0.07))
+   fit
+   reach(fit) # interaction range is circle DIAMETER
+}
+\references{
+  Cormack, R.M. (1979)
+  Spatial aspects of competition between individuals.
+  Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology},
+  eds. R.M. Cormack and J.K. Ord, International Co-operative
+  Publishing House, Fairland, MD, USA. 
+
+  Penttinen, A. (1984) 
+  \emph{Modelling Interaction in Spatial Point Patterns:
+  Parameter Estimation by the Maximum Likelihood Method.}
+  \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}
+  Studies in Computer Science, Economics and Statistics \bold{7},
+  University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland.
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/Poisson.Rd b/man/Poisson.Rd
new file mode 100644
index 0000000..98d81e1
--- /dev/null
+++ b/man/Poisson.Rd
@@ -0,0 +1,72 @@
+\name{Poisson}
+\alias{Poisson}
+\title{Poisson Point Process Model}
+\description{
+Creates an instance of the Poisson point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+ Poisson()
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction 
+  structure of the Poisson point process
+  (namely, there are no interactions).
+}
+\details{
+  The function \code{\link{ppm}}, which fits point process models to 
+  point pattern data, requires an argument \code{interaction}
+  of class \code{"interact"}
+  describing the interpoint interaction structure
+  of the model to be fitted. 
+  The appropriate description of the Poisson process is 
+  provided by the value of the function \code{Poisson}.
+
+  This works for all types of Poisson processes including
+  multitype and nonstationary Poisson processes.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{Strauss}}
+}
+\examples{
+ ppm(nztrees ~1, Poisson())
+ # fit the stationary Poisson process to 'nztrees'
+ # no edge correction needed
+
+ lon <- longleaf
+ \testonly{
+   lon <- lon[seq(1, npoints(lon), by=50)]
+ }
+ longadult <- unmark(subset(lon, marks >= 30))
+ ppm(longadult ~ x, Poisson())
+ # fit the nonstationary Poisson process 
+ # with intensity lambda(x,y) = exp( a + bx)
+
+ # trees marked by species
+ lans <- lansing
+ \testonly{
+     lans <- lans[seq(1, npoints(lans), by=30)]
+ }
+ ppm(lans ~ marks, Poisson())
+ # fit stationary marked Poisson process
+ # with different intensity for each species
+
+\dontrun{
+ ppm(lansing ~ marks * polynom(x,y,3), Poisson())
+}
+ # fit nonstationary marked Poisson process
+ # with different log-cubic trend for each species
+\testonly{
+   # equivalent functionality - smaller dataset
+   ppm(amacrine ~ marks * polynom(x,y,2), Poisson())
+}
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
+ 
+ 
diff --git a/man/Replace.im.Rd b/man/Replace.im.Rd
new file mode 100644
index 0000000..a60e1b7
--- /dev/null
+++ b/man/Replace.im.Rd
@@ -0,0 +1,133 @@
+\name{Replace.im}
+\alias{[<-.im}
+\title{Reset Values in Subset of Image}
+\description{
+  Reset the values in a subset of a pixel image.
+}
+\usage{
+  \method{[}{im}(x, i, j) <- value
+}
+\arguments{
+  \item{x}{
+    A two-dimensional pixel image.
+    An object of class \code{"im"}.
+  }
+  \item{i}{
+    Object defining the subregion or subset to be replaced.
+    Either a spatial window (an object of class \code{"owin"}), or a
+    pixel image with logical values, or a point pattern (an object
+    of class \code{"ppp"}), or any type of index that applies to a
+    matrix, or something that can be converted to a point pattern
+    by \code{\link{as.ppp}} (using the window of \code{x}).
+  }
+  \item{j}{
+    An integer or logical vector serving as the column index
+    if matrix indexing is being used.  Ignored if \code{i} is
+    appropriate to some sort of replacement \emph{other than}
+    matrix indexing.
+  }
+  \item{value}{
+     Vector, matrix, factor or pixel image
+     containing the replacement values.
+     Short vectors will be recycled.
+  }
+} 
+\value{
+  The image \code{x} with the values replaced.
+}
+\details{
+  This function changes some of the pixel values in a
+  pixel image. The image \code{x} must be an object of class
+  \code{"im"} representing a pixel image defined inside a
+  rectangle in two-dimensional space (see \code{\link{im.object}}).
+
+  The subset to be changed is determined by the arguments \code{i,j}
+  according to the following rules (which are checked in this order):
+  \enumerate{
+    \item
+    \code{i} is a spatial object such as a window,
+    a pixel image with logical values, or a point pattern;  or
+    \item
+    \code{i,j} are indices for the matrix \code{as.matrix(x)}; or
+    \item
+    \code{i} can be converted to a point pattern
+    by \code{\link{as.ppp}(i, W=Window(x))},
+    and \code{i} is not a matrix.
+  }
+
+  If \code{i} is a spatial window (an object of class \code{"owin"}),
+  the values of the image inside this window are changed.
+
+  If \code{i} is a point pattern (an object of class
+  \code{"ppp"}), then the values of the pixel image at the points of
+  this pattern are changed.
+
+  If \code{i} does not satisfy any of the conditions above, then
+  the algorithm tries to interpret \code{i,j} as indices for the matrix
+  \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank.
+
+  If none of the conditions above are met, and if \code{i} is not
+  a matrix, then \code{i} is converted into a point pattern
+  by \code{\link{as.ppp}(i, W=Window(x))}.
+  Again the values of the pixel image at the points of
+  this pattern are changed.
+}
+\section{Warning}{
+  If you have a 2-column matrix containing the \eqn{x,y} coordinates
+  of point locations, then to prevent this being interpreted as an
+  array index, you should convert it to a \code{data.frame}
+  or to a point pattern.
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{[.im}},
+  \code{\link{[}},
+  \code{\link{ppp.object}},
+  \code{\link{as.ppp}},
+  \code{\link{owin.object}}
+}
+\examples{
+ # make up an image
+ X <- setcov(unit.square())
+ plot(X)
+
+ # a rectangular subset
+ W <- owin(c(0,0.5),c(0.2,0.8))
+ X[W] <- 2
+ plot(X)
+
+ # a polygonal subset
+ data(letterR)
+ R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7))
+ X[R] <- 3
+ plot(X)
+
+ # a point pattern
+ P <- rpoispp(20)
+ X[P] <- 10
+ plot(X)
+
+ # change pixel value at a specific location
+ X[list(x=0.1,y=0.2)] <- 7
+
+ # matrix indexing --- single vector index
+ X[1:2570] <- 10
+ plot(X)
+
+ # matrix indexing using double indices
+ X[1:257,1:10] <- 5
+ plot(X)
+
+ # matrix indexing using a matrix of indices
+ X[cbind(1:257,1:257)] <- 10
+ X[cbind(257:1,1:257)] <- 10
+ plot(X)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/SatPiece.Rd b/man/SatPiece.Rd
new file mode 100644
index 0000000..28a7918
--- /dev/null
+++ b/man/SatPiece.Rd
@@ -0,0 +1,127 @@
+\name{SatPiece}
+\alias{SatPiece}
+\title{Piecewise Constant Saturated Pairwise Interaction Point Process Model}
+\description{
+  Creates an instance of a saturated pairwise interaction point process model
+  with piecewise constant potential function. The model
+  can then be fitted to point pattern data.
+}
+\usage{
+  SatPiece(r, sat)
+}
+\arguments{
+  \item{r}{vector of jump points for the potential function}
+  \item{sat}{
+    vector of saturation values,
+    or a single saturation value
+  }
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  This is a generalisation of the Geyer saturation point process model,
+  described in \code{\link{Geyer}}, to the case of multiple interaction
+  distances. It can also be described as the saturated analogue of a
+  pairwise interaction process with piecewise-constant pair potential,
+  described in \code{\link{PairPiece}}.
+
+  The saturated point process with interaction radii
+  \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]},
+  saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]},
+  intensity parameter \eqn{\beta}{beta} and
+  interaction parameters
+  \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]},
+  is the point process
+  in which each point
+  \eqn{x_i}{x[i]} in the pattern \eqn{X}
+  contributes a factor
+  \deqn{
+    \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)}
+  }{
+    beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X)
+  }
+  to the probability density of the point pattern,
+  where
+  \deqn{
+    v_j(x_i, X) = \min( s_j, t_j(x_i,X) )
+  }{
+    v(j, x_i, X) = min(s[j], t(j, x_i, X))
+  }
+  where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the
+  number of points in the pattern \eqn{X} which lie
+  at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]}
+  from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0}
+  so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of
+  \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point
+  \eqn{x_i}{x[i]}. 
+
+  \code{SatPiece} is used to fit this model to data.
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the piecewise constant Saturated pairwise
+  interaction is yielded by the function \code{SatPiece()}.
+  See the examples below.
+
+  Simulation of this point process model is not yet implemented.
+  This model is not locally stable (the conditional intensity is
+  unbounded).
+  
+  The argument \code{r} specifies the vector of interaction distances.
+  The entries of \code{r} must be strictly increasing, positive numbers.
+
+  The argument \code{sat} specifies the vector of saturation parameters.
+  It should be a vector of the same length as \code{r}, and its entries
+  should be nonnegative numbers. Thus \code{sat[1]} corresponds to the
+  distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the
+  distance range from \code{r[1]} to \code{r[2]}, etc.
+  Alternatively \code{sat} may be a single number, and this saturation
+  value will be applied to every distance range.
+
+  Infinite values of the
+  saturation parameters are also permitted; in this case
+  \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)}
+  and there is effectively no `saturation' for the distance range in
+  question. If all the saturation parameters are set to \code{Inf} then
+  the model is effectively a pairwise interaction process, equivalent to
+  \code{\link{PairPiece}} (however the interaction parameters
+  \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the
+  square roots of the parameters \eqn{\gamma}{gamma}
+  obtained from \code{\link{PairPiece}}).
+   
+  If \code{r} is a single number, this model is virtually equivalent to the 
+  Geyer process, see \code{\link{Geyer}}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairsat.family}},
+  \code{\link{Geyer}},
+  \code{\link{PairPiece}},
+  \code{\link{BadGey}}.
+}
+\examples{
+   SatPiece(c(0.1,0.2), c(1,1))
+   # prints a sensible description of itself
+   SatPiece(c(0.1,0.2), 1)
+   data(cells) 
+   ppm(cells, ~1, SatPiece(c(0.07, 0.1, 0.13), 2))
+   # fit a stationary piecewise constant Saturated pairwise interaction process
+
+   \dontrun{
+   ppm(cells, ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2))
+   # nonstationary process with log-cubic polynomial trend
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+  in collaboration with Hao Wang and Jeff Picka
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Saturated.Rd b/man/Saturated.Rd
new file mode 100644
index 0000000..065e1c9
--- /dev/null
+++ b/man/Saturated.Rd
@@ -0,0 +1,40 @@
+\name{Saturated}
+\alias{Saturated}
+\title{Saturated Pairwise Interaction model}
+\description{
+  Experimental.
+}
+\usage{
+  Saturated(pot, name)
+}
+\arguments{
+  \item{pot}{An S language function giving the user-supplied
+    pairwise interaction potential.}
+  \item{name}{Character string.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of a point process. 
+}
+\details{
+  This is experimental. It constructs a member of the
+  ``saturated pairwise'' family \code{\link{pairsat.family}}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairsat.family}},
+  \code{\link{Geyer}},
+  \code{\link{SatPiece}},
+  \code{\link{ppm.object}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+
+
diff --git a/man/Smooth.Rd b/man/Smooth.Rd
new file mode 100644
index 0000000..b3d85ee
--- /dev/null
+++ b/man/Smooth.Rd
@@ -0,0 +1,39 @@
+\name{Smooth}
+\alias{Smooth}
+\title{Spatial smoothing of data}
+\description{
+  Generic function to perform spatial smoothing of spatial data.
+}
+\usage{
+  Smooth(X, ...)
+}
+\arguments{
+  \item{X}{Some kind of spatial data}
+  \item{\dots}{Arguments passed to methods.}
+}
+\details{
+  This generic function calls an appropriate method
+  to perform spatial smoothing on the spatial dataset \code{X}.
+
+  Methods for this function include
+  \itemize{
+    \item \code{\link{Smooth.ppp}} for point patterns
+    \item \code{\link{Smooth.msr}} for measures
+    \item \code{\link{Smooth.fv}} for function value tables
+  }
+}
+\seealso{
+  \code{\link{Smooth.ppp}},
+  \code{\link{Smooth.im}},
+  \code{\link{Smooth.msr}},
+  \code{\link{Smooth.fv}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/Smooth.fv.Rd b/man/Smooth.fv.Rd
new file mode 100644
index 0000000..a87f673
--- /dev/null
+++ b/man/Smooth.fv.Rd
@@ -0,0 +1,98 @@
+\name{Smooth.fv}
+\alias{Smooth.fv}
+\title{
+  Apply Smoothing to Function Values
+}
+\description{
+  Applies smoothing to the values
+  in selected columns of a function value table.
+}
+\usage{
+\method{Smooth}{fv}(X, which = "*", ...,
+          method=c("smooth.spline", "loess"),
+          xinterval=NULL)
+}
+\arguments{
+  \item{X}{
+    Values to be smoothed.
+    A function value table (object of class \code{"fv"},
+    see \code{\link{fv.object}}).
+  }
+  \item{which}{
+    Character vector identifying which columns of the table
+    should be smoothed. Either a vector containing names
+    of columns, or one of the wildcard strings \code{"*"} or \code{"."}
+    explained below.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link[stats]{smooth.spline}}
+    or \code{\link[stats]{loess}} to control the smoothing.
+  }
+  \item{method}{
+    Smoothing algorithm. A character string, partially matched
+    to either \code{"smooth.spline"} or \code{"loess"}.
+  }
+  \item{xinterval}{
+    Optional. Numeric vector of length 2 specifying a range of
+    \eqn{x} values. Smoothing will be performed only on the part of the
+    function corresponding to this range.
+  }
+}
+\details{
+  The command \code{Smooth.fv}
+  applies smoothing to the function values in
+  a function value table (object of class \code{"fv"}).
+
+  \code{Smooth.fv} is a method for the generic function
+  \code{\link{Smooth}}.
+  
+  The smoothing is performed either by 
+  \code{\link[stats]{smooth.spline}} or by
+  \code{\link[stats]{loess}}.
+
+  Smoothing is applied to every column
+  (or to each of the selected columns) of function values in turn,
+  using the function argument as the \eqn{x} coordinate
+  and the selected column as the \eqn{y} coordinate.
+  The original function values are then replaced by the corresponding
+  smooth interpolated function values.
+
+  The optional argument \code{which} specifies which of the
+  columns of function values in \code{x} will be smoothed.
+  The default (indicated by the wildcard \code{which="*"})
+  is to smooth all function values, i.e.\ all columns except the
+  function argument. Alternatively \code{which="."} designates
+  the subset of function values that are displayed in the default plot.
+  Alternatively \code{which} can be a character vector containing the
+  names of columns of \code{x}.
+
+  If the argument \code{xinterval} is given, then
+  smoothing will be performed only in the specified range 
+  of \eqn{x} values. 
+}
+\value{
+  Another function value table (object of class \code{"fv"})
+  of the same format.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{Smooth}},
+  \code{\link{with.fv}},
+  \code{\link{fv.object}},
+  \code{\link[stats]{smooth.spline}},
+  \code{\link[stats]{smooth.spline}}
+}
+\examples{
+   data(cells)
+   G <- Gest(cells)
+   plot(G)
+   plot(Smooth(G, df=9), add=TRUE)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/Smooth.msr.Rd b/man/Smooth.msr.Rd
new file mode 100644
index 0000000..a1f29bc
--- /dev/null
+++ b/man/Smooth.msr.Rd
@@ -0,0 +1,75 @@
+\name{Smooth.msr}
+\alias{Smooth.msr}
+\title{
+  Smooth a Signed or Vector-Valued Measure
+}
+\description{
+  Apply kernel smoothing to a signed measure or vector-valued measure.
+}
+\usage{
+ \method{Smooth}{msr}(X, ..., drop=TRUE)
+}
+\arguments{
+  \item{X}{
+    Object of class \code{"msr"} representing a
+    signed measure or vector-valued measure.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{density.ppp}} controlling the
+    smoothing bandwidth and the pixel resolution.
+  }
+  \item{drop}{
+    Logical. If \code{TRUE} (the default), the result of smoothing
+    a scalar-valued measure is a pixel image. If \code{FALSE}, the
+    result of smoothing a scalar-valued measure is a list
+    containing one pixel image.
+  }
+}
+\details{
+  This function applies kernel smoothing to a signed measure or
+  vector-valued measure \code{X}. The Gaussian kernel is used.
+
+  The object \code{X} would typically have been created by
+  \code{\link{residuals.ppm}} or \code{\link{msr}}.
+}
+\value{
+  A pixel image or a list of pixel images.
+  For scalar-valued measures, a pixel image (object of class
+  \code{"im"}) provided \code{drop=TRUE}.
+  For vector-valued measures (or if \code{drop=FALSE}),
+  a list of pixel images; the list also
+  belongs to the class \code{"solist"} so that it can be printed and plotted.
+}
+\references{
+  Baddeley, A., Turner, R.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2008) 
+  Properties of residuals for spatial point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{60}, 627--649.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{Smooth}},
+  \code{\link{msr}},
+  \code{\link{plot.msr}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   rp <- residuals(fit, type="pearson")
+   rs <- residuals(fit, type="score")
+
+   plot(Smooth(rp))
+   plot(Smooth(rs))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Smooth.ppp.Rd b/man/Smooth.ppp.Rd
new file mode 100644
index 0000000..174b09d
--- /dev/null
+++ b/man/Smooth.ppp.Rd
@@ -0,0 +1,209 @@
+\name{Smooth.ppp}
+\alias{Smooth.ppp}
+\alias{markmean}
+\alias{markvar}
+\title{Spatial smoothing of observations at irregular points}
+\description{
+  Performs spatial smoothing of numeric values observed
+  at a set of irregular locations. Uses Gaussian kernel smoothing
+  and least-squares cross-validated bandwidth selection.
+}
+\usage{
+\method{Smooth}{ppp}(X, sigma=NULL,
+                     ...,
+                     weights = rep(1, npoints(X)),
+                     at="pixels",
+                     edge=TRUE, diggle=FALSE, geometric=FALSE)
+
+markmean(X, ...)
+
+markvar(X, sigma=NULL, ..., weights=NULL, varcov=NULL)
+}
+\arguments{
+  \item{X}{A marked point pattern (object of class \code{"ppp"}).}
+  \item{sigma}{
+    Smoothing bandwidth.
+    A single positive number, a numeric vector of length 2,
+    or a function that selects the bandwidth automatically.
+    See \code{\link{density.ppp}}.
+  }
+  \item{\dots}{
+    Further arguments passed to
+    \code{\link{bw.smoothppp}} and \code{\link{density.ppp}}
+    to control the kernel smoothing and
+    the pixel resolution of the result.
+  }
+  \item{weights}{
+    Optional weights attached to the observations.
+    A numeric vector, numeric matrix, an \code{expression}
+    or a pixel image.
+    See \code{\link{density.ppp}}.
+  }
+  \item{at}{
+    String specifying whether to compute the smoothed values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{X} (\code{at="points"}).
+  }
+  \item{edge,diggle}{
+    Arguments passed to \code{\link{density.ppp}} to
+    determine the edge correction.
+  }
+  \item{varcov}{
+    Variance-covariance matrix. An alternative
+    to \code{sigma}. See \code{\link{density.ppp}}.
+  }
+  \item{geometric}{
+    Logical value indicating whether to perform geometric mean
+    smoothing instead of arithmetic mean smoothing. See Details.
+  }
+}
+\details{
+  The function \code{Smooth.ppp}
+  performs spatial smoothing of numeric values
+  observed at a set of irregular locations. The functions
+  \code{markmean} and \code{markvar} are wrappers for \code{Smooth.ppp}
+  which compute the spatially-varying mean and variance of the marks of
+  a point pattern.
+
+  \code{Smooth.ppp} is a method for the generic function
+  \code{\link{Smooth}} for the class \code{"ppp"} of point patterns.
+  Thus you can type simply \code{Smooth(X)}.
+  
+  Smoothing is performed by Gaussian kernel weighting. If the
+  observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]}
+  at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively,
+  then the smoothed value at a location \eqn{u} is
+  (ignoring edge corrections)
+  \deqn{
+    g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)}
+  }{
+    g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i]))
+  }
+  where \eqn{k} is a Gaussian kernel. This is known as the 
+  Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964).
+  By default, the smoothing kernel bandwidth is chosen by
+  least squares cross-validation (see below).
+  
+  The argument \code{X} must be a marked point pattern (object
+  of class \code{"ppp"}, see \code{\link{ppp.object}}).
+  The points of the pattern are taken to be the
+  observation locations \eqn{x_i}{x[i]}, and the marks of the pattern
+  are taken to be the numeric values \eqn{v_i}{v[i]} observed at these
+  locations.
+
+  The marks are allowed to be a data frame (in
+  \code{Smooth.ppp}
+  and \code{markmean}). Then the smoothing procedure is applied to each
+  column of marks. 
+  
+  The numerator and denominator are computed by \code{\link{density.ppp}}.
+  The arguments \code{...} control the smoothing kernel parameters
+  and determine whether edge correction is applied.
+  The smoothing kernel bandwidth can be specified by either of the arguments
+  \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}.
+  If neither of these arguments is present, then by default the
+  bandwidth is selected by least squares cross-validation,
+  using \code{\link{bw.smoothppp}}. 
+
+  The optional argument \code{weights} allows numerical weights to
+  be applied to the data. If a weight \eqn{w_i}{w[i]}
+  is associated with location \eqn{x_i}{x[i]}, then the smoothed
+  function is 
+  (ignoring edge corrections)
+  \deqn{
+    g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i}
+  }{
+    g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i])
+  }
+
+  If \code{geometric=TRUE} then geometric mean smoothing
+  is performed instead of arithmetic mean smoothing.
+  The mark values must be non-negative numbers.
+  The logarithm of the mark values is computed; these logarithmic values are
+  kernel-smoothed as described above; then the exponential function
+  is applied to the smoothed values.
+  
+  An alternative to kernel smoothing is inverse-distance weighting,
+  which is performed by \code{\link{idw}}.
+}
+\section{Very small bandwidth}{
+  If the chosen bandwidth \code{sigma} is very small,
+  kernel smoothing is mathematically equivalent
+  to nearest-neighbour interpolation; the result will
+  be computed by \code{\link{nnmark}}. This is
+  unless \code{at="points"} and \code{leaveoneout=FALSE},
+  when the original mark values are returned.
+}
+\value{
+  \emph{If \code{X} has a single column of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is
+    a pixel image (object of class \code{"im"}). 
+    Pixel values are values of the interpolated function.
+    \item
+    If \code{at="points"}, the result is a numeric vector
+    of length equal to the number of points in \code{X}.
+    Entries are values of the interpolated function at the points of \code{X}.
+  }
+  \emph{If \code{X} has a data frame of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is a named list of 
+    pixel images (object of class \code{"im"}). There is one
+    image for each column of marks. This list also belongs to
+    the class \code{"solist"}, for which there is a plot method.
+    \item
+    If \code{at="points"}, the result is a data frame
+    with one row for each point of \code{X},
+    and one column for each column of marks. 
+    Entries are values of the interpolated function at the points of \code{X}.
+  }
+  The return value has attributes
+  \code{"sigma"} and \code{"varcov"} which report the smoothing
+  bandwidth that was used.
+}
+\seealso{
+  \code{\link{Smooth}},
+  
+  \code{\link{density.ppp}},
+  \code{\link{bw.smoothppp}},
+  \code{\link{nnmark}},
+  \code{\link{ppp.object}},
+  \code{\link{im.object}}.
+
+  See \code{\link{idw}} for inverse-distance weighted smoothing.
+  
+  To perform interpolation, see also the \code{akima} package.
+}
+\examples{
+   # Longleaf data - tree locations, marked by tree diameter
+   # Local smoothing of tree diameter (automatic bandwidth selection)
+   Z <- Smooth(longleaf)
+   # Kernel bandwidth sigma=5
+   plot(Smooth(longleaf, 5))
+   # mark variance
+   plot(markvar(longleaf, sigma=5))
+   # data frame of marks: trees marked by diameter and height
+   plot(Smooth(finpines, sigma=2))
+}
+\author{
+  \spatstatAuthors.
+}
+\references{
+  Nadaraya, E.A. (1964) On estimating regression.
+  \emph{Theory of Probability and its Applications}
+  \bold{9}, 141--142.
+
+  Nadaraya, E.A. (1989) 
+  \emph{Nonparametric estimation of probability densities
+  and regression curves}.
+  Kluwer, Dordrecht.
+
+  Watson, G.S. (1964)
+  Smooth regression analysis.
+  \emph{Sankhya A} \bold{26}, 359--372.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/Smooth.ssf.Rd b/man/Smooth.ssf.Rd
new file mode 100644
index 0000000..6d4336b
--- /dev/null
+++ b/man/Smooth.ssf.Rd
@@ -0,0 +1,45 @@
+\name{Smooth.ssf}
+\alias{Smooth.ssf}
+\title{
+  Smooth a Spatially Sampled Function
+}
+\description{
+  Applies kernel smoothing to a spatially sampled function.
+}
+\usage{
+ \method{Smooth}{ssf}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Object of class \code{"ssf"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[spatstat]{Smooth.ppp}}
+    to control the smoothing.
+  }
+}
+\details{
+  An object of class \code{"ssf"} represents a real-valued or
+  vector-valued function that has been evaluated or sampled at an
+  irregular set of points.
+
+  The function values will be smoothed using a Gaussian
+  kernel.
+}
+\value{
+  A pixel image or a list of pixel images.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{ssf}},
+  \code{\link{Smooth.ppp}}
+}
+\examples{
+   f <- ssf(redwood, nndist(redwood))
+   Smooth(f, sigma=0.1)
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/Smoothfun.ppp.Rd b/man/Smoothfun.ppp.Rd
new file mode 100644
index 0000000..30060d4
--- /dev/null
+++ b/man/Smoothfun.ppp.Rd
@@ -0,0 +1,73 @@
+\name{Smoothfun.ppp}
+\alias{Smoothfun}
+\alias{Smoothfun.ppp}
+\title{
+  Smooth Interpolation of Marks as a Spatial Function
+}
+\description{
+  Perform spatial smoothing of numeric values observed
+  at a set of irregular locations, and return the result
+  as a function of spatial location.  
+}
+\usage{
+Smoothfun(X, \dots)
+
+\method{Smoothfun}{ppp}(X, sigma = NULL, \dots,
+                        weights = NULL, edge = TRUE, diggle = FALSE)
+}
+\arguments{
+  \item{X}{
+    Marked point pattern (object of class \code{"ppp"}).
+  }
+  \item{sigma}{
+    Smoothing bandwidth, or bandwidth selection function,
+    passed to \code{\link{Smooth.ppp}}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{Smooth.ppp}}.
+  }
+  \item{weights}{
+    Optional vector of weights associated with the points of \code{X}.
+  }
+  \item{edge,diggle}{
+    Logical arguments controlling the edge correction.
+    Arguments passed to \code{\link{Smooth.ppp}}.
+  }
+}
+\details{
+  The commands \code{Smoothfun} and \code{\link{Smooth}}
+  both perform kernel-smoothed spatial interpolation
+  of numeric values observed at irregular spatial locations.
+  The difference is that \code{\link{Smooth}} returns a pixel image,
+  containing the interpolated values at a grid of locations, while
+  \code{Smoothfun} returns a \code{function(x,y)} which can be used
+  to compute the interpolated value at \emph{any} spatial location.
+  For purposes such as model-fitting it is more accurate to
+  use \code{Smoothfun} to interpolate data.
+}
+\value{
+  A \code{function} with arguments \code{x,y}.
+  The function also belongs to the class \code{"Smoothfun"} which has
+  methods for \code{print} and \code{\link{as.im}}.
+  It also belongs to the class \code{"funxy"} which has methods
+  for \code{plot}, \code{contour} and \code{persp}.
+}
+\seealso{
+  \code{\link{Smooth}}
+}
+\examples{
+  f <- Smoothfun(longleaf)
+  f
+  f(120, 80)
+  plot(f)
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/Softcore.Rd b/man/Softcore.Rd
new file mode 100644
index 0000000..4610cdf
--- /dev/null
+++ b/man/Softcore.Rd
@@ -0,0 +1,128 @@
+\name{Softcore}
+\alias{Softcore}
+\title{The Soft Core Point Process Model}
+\description{
+Creates an instance of the Soft Core point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Softcore(kappa, sigma0=NA)
+}
+\arguments{
+  \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core
+    interaction}
+  \item{sigma0}{
+    Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}.
+    A positive number.
+  }
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the Soft Core process with exponent \eqn{\kappa}{kappa}.
+}
+\details{
+  The (stationary)
+  Soft Core point process with parameters \eqn{\beta}{beta} and
+  \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa}
+  is the pairwise interaction point process in which 
+  each point contributes a factor \eqn{\beta}{beta} to the 
+  probability density of the point pattern, and each pair of points
+  contributes a factor
+  \deqn{
+    \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\}
+  }{
+    exp( - (sigma/d)^(2/kappa) )
+  }
+  to the density, where \eqn{d} is the distance between the two points.
+
+  Thus the process has probability density
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)}
+    \exp \left\{ - \sum_{i < j} \left(
+                 \frac{\sigma}{||x_i-x_j||}
+    \right)^{2/\kappa} \right\}
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa))
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, \eqn{\alpha}{alpha} is the normalising constant,
+  and the sum on the right hand side is
+  over all unordered pairs of points of the pattern. 
+
+  This model describes an ``ordered'' or ``inhibitive'' process,
+  with the interpoint interaction decreasing smoothly with distance.
+  The strength of interaction is controlled by the
+  parameter \eqn{\sigma}{sigma}, a positive real number,
+  with larger values corresponding
+  to stronger interaction; and by the exponent \eqn{\kappa}{kappa}
+  in the range \eqn{(0,1)}, with larger values corresponding to
+  weaker interaction.
+  If \eqn{\sigma = 0}{sigma = 0}
+  the model reduces to the Poisson point process.
+  If \eqn{\sigma > 0}{sigma > 0},
+  the process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}.
+  The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the
+  hard core process with hard core distance \eqn{h=\sigma}{h=sigma}.
+ 
+  The nonstationary Soft Core process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Soft Core process pairwise interaction is
+  yielded by the function \code{Softcore()}. See the examples below.
+ 
+  The main argument is the exponent  \code{kappa}.
+  When \code{kappa} is fixed, the model becomes an exponential family
+  with canonical parameters \eqn{\log \beta}{log(beta)}
+  and \deqn{
+    \log \gamma = \frac{2}{\kappa} \log\sigma
+  }{
+    log(gamma) = (2/kappa) log(sigma)
+  }
+  The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in
+  \code{Softcore()}. 
+
+  The optional argument \code{sigma0} can be used to improve
+  numerical stability. If \code{sigma0} is given, it should be a positive
+  number, and it should be a rough estimate of the
+  parameter \eqn{\sigma}{sigma}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Ogata, Y, and Tanemura, M. (1981).
+  Estimation of interaction potentials of spatial point patterns
+  through the maximum likelihood procedure.
+  \emph{Annals of the Institute of Statistical Mathematics}, B
+  \bold{33}, 315--338.
+
+  Ogata, Y, and Tanemura, M. (1984).
+  Likelihood analysis of spatial point patterns.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{46}, 496--518.
+}
+\examples{
+   data(cells) 
+   ppm(cells, ~1, Softcore(kappa=0.5), correction="isotropic")
+   # fit the stationary Soft Core process to `cells'
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Strauss.Rd b/man/Strauss.Rd
new file mode 100644
index 0000000..56fc54b
--- /dev/null
+++ b/man/Strauss.Rd
@@ -0,0 +1,100 @@
+\name{Strauss}
+\alias{Strauss}
+\title{The Strauss Point Process Model}
+\description{
+Creates an instance of the Strauss point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Strauss(r)
+}
+\arguments{
+  \item{r}{The interaction radius of the Strauss process}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the Strauss process with interaction radius \eqn{r}.
+}
+\details{
+  The (stationary) Strauss process with interaction radius \eqn{r} and 
+  parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}
+  is the pairwise interaction point process
+  in which each point contributes a factor \eqn{\beta}{beta} to the 
+  probability density of the point pattern, and each pair of points
+  closer than \eqn{r} units apart contributes a factor
+  \eqn{\gamma}{gamma} to the density.
+
+  Thus the probability density is
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \gamma^{s(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) gamma^s(x)
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, \eqn{s(x)} is the number of distinct unordered pairs of
+  points that are closer than \eqn{r} units apart,
+  and \eqn{\alpha}{alpha} is the normalising constant.
+
+  The interaction parameter \eqn{\gamma}{gamma} must be less than
+  or equal to \eqn{1}
+  so that this model describes an ``ordered'' or ``inhibitive'' pattern.
+ 
+  The nonstationary Strauss process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Strauss process pairwise interaction is
+  yielded by the function \code{Strauss()}. See the examples below.
+ 
+  Note the only argument is the interaction radius \code{r}.
+  When \code{r} is fixed, the model becomes an exponential family.
+  The canonical parameters \eqn{\log(\beta)}{log(beta)}
+  and \eqn{\log(\gamma)}{log(gamma)}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{Strauss()}. 
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\references{
+ Kelly, F.P. and Ripley, B.D. (1976)
+ On Strauss's model for clustering.
+ \emph{Biometrika} \bold{63}, 357--360.
+
+ Strauss, D.J. (1975)
+ A model for clustering.
+ \emph{Biometrika} \bold{62}, 467--475.
+}
+\examples{
+   Strauss(r=0.1)
+   # prints a sensible description of itself
+   data(cells)
+
+   \dontrun{
+   ppm(cells, ~1, Strauss(r=0.07))
+   # fit the stationary Strauss process to `cells'
+   }
+
+
+   ppm(cells, ~polynom(x,y,3), Strauss(r=0.07))
+   # fit a nonstationary Strauss process with log-cubic polynomial trend
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/StraussHard.Rd b/man/StraussHard.Rd
new file mode 100644
index 0000000..38f052c
--- /dev/null
+++ b/man/StraussHard.Rd
@@ -0,0 +1,124 @@
+\name{StraussHard}
+\alias{StraussHard}
+\title{The Strauss / Hard Core Point Process Model}
+\description{
+Creates an instance of the ``Strauss/ hard core'' point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  StraussHard(r, hc=NA)
+}
+\arguments{
+  \item{r}{The interaction radius of the Strauss interaction}
+  \item{hc}{The hard core distance. Optional.}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the ``Strauss/hard core''
+  process with Strauss interaction radius \eqn{r}
+  and hard core distance \code{hc}.
+}
+\details{
+  A Strauss/hard core process with interaction radius \eqn{r},
+  hard core distance \eqn{h < r}, and 
+  parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma},
+  is a pairwise interaction point process
+  in which
+  \itemize{
+    \item distinct points are not allowed to come closer
+      than a distance \eqn{h} apart
+    \item each pair of points closer than \eqn{r} units apart
+      contributes a factor \eqn{\gamma}{gamma} to the probability density.
+  }
+  This is a hybrid of the Strauss process and the hard core process.
+
+  The probability density is zero if any pair of points
+  is closer than \eqn{h} units apart, and otherwise equals
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \gamma^{s(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) gamma^s(x)
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, \eqn{s(x)} is the number of distinct unordered pairs of
+  points that are closer than \eqn{r} units apart,
+  and \eqn{\alpha}{alpha} is the normalising constant.
+
+  The interaction parameter \eqn{\gamma}{gamma} may take any
+  positive value (unlike the case for the Strauss process).
+  If \eqn{\gamma < 1}{gamma < 1},
+  the model describes an ``ordered'' or ``inhibitive'' pattern.
+  If \eqn{\gamma > 1}{gamma > 1},
+  the model is ``ordered'' or ``inhibitive'' up to the distance
+  \eqn{h}, but has an ``attraction'' between points lying at
+  distances in the range between \eqn{h} and \eqn{r}.
+
+  If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical
+  hard core process with hard core distance \eqn{h}.
+  If \eqn{\gamma = 0}{gamma = 0}, the process reduces to a classical
+  hard core process with hard core distance \eqn{r}.
+  
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Strauss/hard core process
+  pairwise interaction is
+  yielded by the function \code{StraussHard()}. See the examples below.
+ 
+  The canonical parameter \eqn{\log(\gamma)}{log(gamma)}
+  is estimated by \code{\link{ppm}()}, not fixed in
+  \code{StraussHard()}. 
+
+  If the hard core distance argument \code{hc} is missing or \code{NA},
+  it will be estimated from the data when \code{\link{ppm}} is called.
+  The estimated value of \code{hc} is the minimum nearest neighbour distance
+  multiplied by \eqn{n/(n+1)}, where \eqn{n} is the
+  number of data points.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{pairwise.family}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+
+  Strauss, D.J. (1975)
+  A model for clustering.
+  \emph{Biometrika} \bold{62}, 467--475.
+}
+\examples{
+   StraussHard(r=1,hc=0.02)
+   # prints a sensible description of itself
+
+   data(cells)
+
+   \dontrun{
+   ppm(cells, ~1, StraussHard(r=0.1, hc=0.05))
+   # fit the stationary Strauss/hard core  process to `cells'
+   }
+
+   ppm(cells, ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05))
+   # fit a nonstationary Strauss/hard core process
+   # with log-cubic polynomial trend
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Triplets.Rd b/man/Triplets.Rd
new file mode 100644
index 0000000..40f744a
--- /dev/null
+++ b/man/Triplets.Rd
@@ -0,0 +1,102 @@
+\name{Triplets}
+\alias{Triplets}
+\title{The Triplet Point Process Model}
+\description{
+Creates an instance of Geyer's triplet interaction point process model
+which can then be fitted to point pattern data.
+}
+\usage{
+  Triplets(r)
+}
+\arguments{
+  \item{r}{The interaction radius of the Triplets process}
+}
+\value{
+  An object of class \code{"interact"}
+  describing the interpoint interaction
+  structure of the Triplets process with interaction radius \eqn{r}.
+}
+\details{
+  The (stationary) Geyer triplet process (Geyer, 1999)
+  with interaction radius \eqn{r} and 
+  parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}
+  is the point process
+  in which each point contributes a factor \eqn{\beta}{beta} to the 
+  probability density of the point pattern, and each triplet of close points
+  contributes a factor \eqn{\gamma}{gamma} to the density.
+  A triplet of close points is a group of 3 points,
+  each pair of which is closer than \eqn{r} units
+  apart.
+
+  Thus the probability density is
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \gamma^{s(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) gamma^s(x)
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, \eqn{s(x)} is the number of unordered triples of
+  points that are closer than \eqn{r} units apart,
+  and \eqn{\alpha}{alpha} is the normalising constant.
+
+  The interaction parameter \eqn{\gamma}{gamma} must be less than
+  or equal to \eqn{1}
+  so that this model describes an ``ordered'' or ``inhibitive'' pattern.
+ 
+  The nonstationary Triplets process is similar except that 
+  the contribution of each individual point \eqn{x_i}{x[i]}
+  is a function \eqn{\beta(x_i)}{beta(x[i])}
+  of location, rather than a constant beta. 
+ 
+  The function \code{\link{ppm}()}, which fits point process models to 
+  point pattern data, requires an argument 
+  of class \code{"interact"} describing the interpoint interaction
+  structure of the model to be fitted. 
+  The appropriate description of the Triplets process pairwise interaction is
+  yielded by the function \code{Triplets()}. See the examples below.
+ 
+  Note the only argument is the interaction radius \code{r}.
+  When \code{r} is fixed, the model becomes an exponential family.
+  The canonical parameters \eqn{\log(\beta)}{log(beta)}
+  and \eqn{\log(\gamma)}{log(gamma)}
+  are estimated by \code{\link{ppm}()}, not fixed in
+  \code{Triplets()}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{triplet.family}},
+  \code{\link{ppm.object}}
+}
+\references{
+  Geyer, C.J. (1999)
+  Likelihood Inference for Spatial Point Processes.
+  Chapter 3 in 
+  O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds)
+  \emph{Stochastic Geometry: Likelihood and Computation},
+  Chapman and Hall / CRC, 
+  Monographs on Statistics and Applied Probability, number 80.
+  Pages 79--140.
+}
+\examples{
+   Triplets(r=0.1)
+   # prints a sensible description of itself
+
+   \dontrun{
+   ppm(cells, ~1, Triplets(r=0.2))
+   # fit the stationary Triplets process to `cells'
+   }
+
+   ppm(cells, ~polynom(x,y,3), Triplets(r=0.2))
+   # fit a nonstationary Triplets process with log-cubic polynomial trend
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/Tstat.Rd b/man/Tstat.Rd
new file mode 100644
index 0000000..742dacf
--- /dev/null
+++ b/man/Tstat.Rd
@@ -0,0 +1,91 @@
+\name{Tstat}
+\alias{Tstat}
+\title{
+  Third order summary statistic
+}
+\description{
+  Computes the third order summary statistic \eqn{T(r)}
+  of a spatial point pattern.
+}
+\usage{
+Tstat(X, ..., r = NULL, rmax = NULL,
+    correction = c("border", "translate"), ratio = FALSE, verbose=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of \eqn{T(r)} will be computed.
+    An object of class \code{"ppp"}, or data
+    in any format acceptable to \code{\link{as.ppp}()}.
+  }
+  \item{\dots}{Ignored.}
+  \item{r}{
+    Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} 
+    should be evaluated. Users are advised \emph{not} to specify this
+    argument; there is a sensible default.
+  }
+  \item{rmax}{
+    Optional. Numeric. The maximum value of \eqn{r} for which
+    \eqn{T(r)} should be estimated.
+  }
+  \item{correction}{
+    Optional. A character vector containing any selection of the
+    options \code{"none"}, \code{"border"}, \code{"bord.modif"},
+    \code{"translate"}, \code{"translation"}, or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{verbose}{
+    Logical. If \code{TRUE}, an estimate of the computation time
+    is printed.
+  }
+}
+\details{
+  This command calculates the 
+  third-order summary statistic \eqn{T(r)} for a spatial point patterns,
+  defined by Schladitz and Baddeley (2000).
+
+  The definition of \eqn{T(r)} is similar to the definition of Ripley's
+  \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of
+  points while \eqn{T(r)} counts triples of points. 
+  Essentially \eqn{T(r)} is a rescaled cumulative
+  distribution function of the diameters of triangles in the
+  point pattern. The diameter of a triangle is the length of its
+  longest side.
+}
+\section{Computation time}{
+  If the number of points is large, the algorithm can take a very long time
+  to inspect all possible triangles. A rough estimate
+  of the total computation time will be printed at the beginning
+  of the calculation. If this estimate seems very large,
+  stop the calculation using the user interrupt signal, and
+  call \code{Tstat} again, using \code{rmax} to restrict the
+  range of \code{r} values,
+  thus reducing the number of triangles to be inspected.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+}
+\references{
+  Schladitz, K. and Baddeley, A. (2000)
+  A third order point process characteristic.
+  \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671.
+}
+\seealso{
+  \code{\link{Kest}}
+}
+\examples{
+  plot(Tstat(redwood))
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/Window.Rd b/man/Window.Rd
new file mode 100644
index 0000000..e6b314b
--- /dev/null
+++ b/man/Window.Rd
@@ -0,0 +1,110 @@
+\name{Window}
+\alias{Window}
+\alias{Window<-}
+\alias{Window.ppp}
+\alias{Window<-.ppp}
+\alias{Window.psp}
+\alias{Window<-.psp}
+\alias{Window.im}
+\alias{Window<-.im}
+\title{
+  Extract or Change the Window of a Spatial Object
+}
+\description{
+  Given a spatial object (such as a point pattern or pixel image)
+  in two dimensions, these functions extract or change the window
+  in which the object is defined.
+}
+\usage{
+   Window(X, \dots)
+
+   Window(X, \dots) <- value
+
+   \method{Window}{ppp}(X, \dots)
+
+   \method{Window}{ppp}(X, \dots) <- value
+
+   \method{Window}{psp}(X, \dots)
+
+   \method{Window}{psp}(X, \dots) <- value
+
+   \method{Window}{im}(X, \dots)
+
+   \method{Window}{im}(X, \dots) <- value
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a point pattern, line segment pattern
+    or pixel image.
+  }
+  \item{\dots}{
+    Extra arguments. They are ignored by all the methods listed here.
+  }
+  \item{value}{
+    Another window (object of class \code{"owin"}) to be used as the
+    window for \code{X}.
+  }
+}
+\details{
+  The functions \code{Window} and \code{Window<-} are generic.
+
+  \code{Window(X)} extracts the spatial window in which \code{X} is
+  defined.
+
+  \code{Window(X) <- W} changes the window in which \code{X} is defined
+  to the new window \code{W}, and \emph{discards any data outside} \code{W}.
+  In particular:
+  \itemize{
+    \item If \code{X} is a point pattern (object of class \code{"ppp"})
+    then \code{Window(X) <- W} discards any points of \code{X} which
+    fall outside \code{W}.
+    \item If \code{X} is a line segment pattern (object of class
+    \code{"psp"}) then \code{Window(X) <- W} clips the segments of \code{X}
+    to the boundaries of \code{W}.
+    \item If \code{X} is a pixel image (object of class \code{"im"})
+    then \code{Window(X) <- W} has the effect that pixels
+    lying outside \code{W} are retained but their pixel values
+    are set to \code{NA}.
+  }
+
+  Many other classes of spatial object have a method
+  for \code{Window}, but not \code{Window<-}.
+  See \code{\link{Window.ppm}}.
+}
+\value{
+  The result of \code{Window} is a window (object of class
+  \code{"owin"}).
+
+  The result of \code{Window<-} is the updated object \code{X},
+  of the same class as \code{X}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{Window.ppm}}
+}
+\examples{
+   ## point patterns
+   Window(cells)
+   X <- demopat
+   Window(X)
+   Window(X) <- as.rectangle(Window(X))
+
+   ## line segment patterns
+   X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+   Window(X)
+   Window(X) <- square(0.5)
+
+   ## images
+   Z <- setcov(owin())
+   Window(Z)
+   Window(Z) <- square(0.5)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/WindowOnly.Rd b/man/WindowOnly.Rd
new file mode 100644
index 0000000..18e4b7c
--- /dev/null
+++ b/man/WindowOnly.Rd
@@ -0,0 +1,104 @@
+\name{WindowOnly}
+\alias{Window.ppm}
+\alias{Window.kppm}
+\alias{Window.dppm}
+\alias{Window.lpp}
+\alias{Window.lppm}
+\alias{Window.msr}
+\alias{Window.quad}
+\alias{Window.quadratcount}
+\alias{Window.quadrattest}
+\alias{Window.tess}
+\alias{Window.layered}
+\alias{Window.distfun}
+\alias{Window.nnfun}
+\alias{Window.funxy}
+\alias{Window.rmhmodel}
+\alias{Window.leverage.ppm}
+\alias{Window.influence.ppm}
+\title{Extract Window of Spatial Object}
+\description{
+  Given a spatial object (such as a point pattern or pixel image)
+  in two dimensions, these functions extract the window
+  in which the object is defined.
+}
+\usage{
+ \method{Window}{ppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{Window}{kppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{Window}{dppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{Window}{lpp}(X, \dots)
+
+ \method{Window}{lppm}(X, \dots)
+
+ \method{Window}{msr}(X, \dots)
+
+ \method{Window}{quad}(X, \dots)
+
+ \method{Window}{quadratcount}(X, \dots)
+
+ \method{Window}{quadrattest}(X, \dots)
+
+ \method{Window}{tess}(X, \dots)
+
+ \method{Window}{layered}(X, \dots)
+
+ \method{Window}{distfun}(X, \dots)
+
+ \method{Window}{nnfun}(X, \dots)
+
+ \method{Window}{funxy}(X, \dots)
+
+ \method{Window}{rmhmodel}(X, \dots)
+
+ \method{Window}{leverage.ppm}(X, \dots)
+
+ \method{Window}{influence.ppm}(X, \dots)
+}
+\arguments{
+  \item{X}{A spatial object.}
+  \item{\dots}{Ignored.}
+  \item{from}{Character string. See Details.}
+}
+\value{
+  An object of class \code{"owin"} (see \code{\link{owin.object}})
+  specifying an observation window.
+}
+\details{
+  These are methods for the generic function \code{\link{Window}}
+  which extract the spatial window in which the object \code{X}
+  is defined. 
+
+  The argument \code{from} applies when \code{X} is a fitted 
+  point process model
+  (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+  If \code{from="data"} (the default),
+  \code{Window} extracts the window of the original point
+  pattern data to which the model was fitted.
+  If \code{from="covariates"} then \code{Window} returns the
+  window in which the spatial covariates of the model were provided.
+}
+\seealso{
+  \code{\link{Window}},
+  \code{\link{Window.ppp}},
+  \code{\link{Window.psp}}.
+  
+  \code{\link{owin.object}}
+}
+\examples{
+   X <- quadratcount(cells, 4)
+   Window(X)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege 
+}
+\keyword{spatial}
+\keyword{manip}
+ 
+ 
diff --git a/man/adaptive.density.Rd b/man/adaptive.density.Rd
new file mode 100644
index 0000000..0016866
--- /dev/null
+++ b/man/adaptive.density.Rd
@@ -0,0 +1,101 @@
+\name{adaptive.density}
+\alias{adaptive.density}
+\title{Intensity Estimate of Point Pattern Using Tessellation}
+\description{
+  Computes an adaptive estimate of the intensity function of a point
+  pattern.
+}
+\usage{
+adaptive.density(X, f = 0.1, ..., nrep = 1, verbose=TRUE)
+}
+\arguments{
+  \item{X}{Point pattern dataset (object of class \code{"ppp"}).}
+  \item{f}{Fraction (between 0 and 1 inclusive) of the data points that will be
+    removed from the data and used to determine a tessellation for the
+    intensity estimate.
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.im}} determining the
+    pixel resolution of the result.
+  }
+  \item{nrep}{Number of independent repetitions of the randomised
+    procedure.}
+  \item{verbose}{Logical value indicating whether to print
+    progress reports.}
+}
+\details{
+  This function is an alternative to \code{\link{density.ppp}}. It
+  computes an estimate of the intensity function of a point pattern
+  dataset. The result is a pixel image giving the estimated intensity,
+
+  If \code{f=1}, the Voronoi estimate (Barr and Schoenberg, 2010)
+  is computed: the point pattern \code{X} is used to construct
+  a Voronoi/Dirichlet tessellation (see \code{\link{dirichlet}});
+  the areas of the Dirichlet tiles are computed; the estimated intensity
+  in each tile is the reciprocal of the tile area.
+
+  If \code{f=0}, the intensity estimate at every location is
+  equal to the average intensity (number of points divided by window area).
+  
+  If \code{f} is strictly between 0 and 1, 
+  the dataset \code{X} is randomly split into two patterns \code{A} and
+  \code{B} containing a fraction \code{f} and \code{1-f}, respectively,
+  of the original data. The subpattern \code{A} is used to construct a
+  Dirichlet tessellation, while the subpattern
+  \code{B} is retained for counting. For each tile of the Dirichlet
+  tessellation, we count the number of points of \code{B} falling in the
+  tile, and divide by the area of the same tile, to obtain an estimate
+  of the intensity of the pattern \code{B} in the tile.
+  This estimate is divided by \code{1-f} to obtain an estimate
+  of the intensity of \code{X} in the tile. The result is a pixel image
+  of intensity estimates which are constant on each tile of the tessellation.
+
+  If \code{nrep} is greater than 1, this randomised procedure is
+  repeated \code{nrep} times, and the results are averaged.
+
+  This technique has been used by Ogata et al. (2003), Ogata (2004)
+  and Baddeley (2007).
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose values are
+  estimates of the intensity of \code{X}.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{dirichlet}},
+  \code{\link{im.object}}.
+}
+\references{
+  Baddeley, A. (2007)
+  Validation of statistical models for spatial point patterns.
+  In J.G. Babu and E.D. Feigelson (eds.)
+  \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV},
+  volume 317 of Astronomical Society of the Pacific Conference Series,
+  San Francisco, California USA, 2007. Pages 22--38.
+
+  Barr, C., and Schoenberg, F.P. (2010).
+  On the Voronoi estimator for the intensity of an inhomogeneous
+  planar Poisson process. \emph{Biometrika} \bold{97} (4), 977--984.
+
+  Ogata, Y. (2004)
+  Space-time model for regional seismicity and detection of crustal
+  stress changes.
+  \emph{Journal of Geophysical Research}, \bold{109}, 2004.
+
+  Ogata, Y., Katsura, K. and Tanemura, M. (2003).
+  Modelling heterogeneous space-time occurrences of earthquakes and its
+  residual analysis.
+  \emph{Applied Statistics} \bold{52} 499--509.
+}
+\examples{
+  plot(adaptive.density(nztrees, 1), main="Voronoi estimate")
+  nr <- if(interactive()) 100 else 5
+  plot(adaptive.density(nztrees, nrep=nr), main="Adaptive estimate")
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/add.texture.Rd b/man/add.texture.Rd
new file mode 100644
index 0000000..6d57413
--- /dev/null
+++ b/man/add.texture.Rd
@@ -0,0 +1,76 @@
+\name{add.texture}
+\alias{add.texture}
+\title{
+  Fill Plot With Texture
+}
+\description{
+  Draws a simple texture inside a region on the plot.
+}
+\usage{
+add.texture(W, texture = 4, spacing = NULL, ...)
+}
+\arguments{
+  \item{W}{
+    Window (object of class \code{"owin"}) inside which the
+    texture should be drawn.
+  }
+  \item{texture}{
+    Integer from 1 to 8 identifying the type of texture. See Details.
+  }
+  \item{spacing}{
+    Spacing between elements of the texture, in units of the
+    current plot.
+  }
+  \item{\dots}{
+    Further arguments controlling the plot colour, line width etc.
+  }
+}
+\details{
+  The chosen texture, confined to the window \code{W},
+  will be added to the current plot.
+  The available textures are:
+  \describe{
+    \item{texture=1:}{
+      Small crosses arranged in a square grid.
+    }
+    \item{texture=2:}{
+      Parallel vertical lines.
+    }
+    \item{texture=3:}{
+      Parallel horizontal lines.
+    }
+    \item{texture=4:}{ 
+      Parallel diagonal lines at 45 degrees from the horizontal.
+   }
+    \item{texture=5:}{
+      Parallel diagonal lines at 135 degrees from the horizontal.
+    }
+    \item{texture=6:}{
+      Grid of horizontal and vertical lines.
+    }
+    \item{texture=7:}{
+      Grid of diagonal lines at 45 and 135 degrees from the horizontal.
+    }
+    \item{texture=8:}{
+      Grid of hexagons.
+    }
+  }
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\seealso{
+  \code{\link{owin}}, 
+  \code{\link{plot.owin}},
+  \code{\link{textureplot}},
+  \code{\link{texturemap}}.
+}
+\examples{
+  W <- Window(chorley)
+  plot(W, main="")
+  add.texture(W, 7)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/addvar.Rd b/man/addvar.Rd
new file mode 100644
index 0000000..01f2119
--- /dev/null
+++ b/man/addvar.Rd
@@ -0,0 +1,183 @@
+\name{addvar}
+\alias{addvar}
+\title{
+  Added Variable Plot for Point Process Model
+}
+\description{
+  Computes the coordinates for an Added Variable Plot
+  for a fitted point process model.
+}
+\usage{
+addvar(model, covariate, ...,
+                   subregion=NULL,
+                   bw="nrd0", adjust=1,
+                   from=NULL, to=NULL, n=512,
+                   bw.input = c("points", "quad"),
+                   bw.restrict = FALSE,
+                   covname, crosscheck=FALSE)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{covariate}{
+    The covariate to be added to the model. Either a
+    pixel image, a \code{function(x,y)}, or a character string
+    giving the name of a covariate that was supplied when
+    the model was fitted.
+  }
+  \item{subregion}{
+    Optional.  A window (object of class \code{"owin"})
+    specifying a subset of the spatial domain of the data.
+    The calculation will be confined to the data in this subregion.
+  }
+  \item{bw}{
+    Smoothing bandwidth or bandwidth rule
+    (passed to \code{\link[stats]{density.default}}).
+  }
+  \item{adjust}{
+    Smoothing bandwidth adjustment factor
+    (passed to \code{\link[stats]{density.default}}).
+  }
+  \item{n, from, to}{
+    Arguments passed to \code{\link[stats]{density.default}} to
+    control the number and range of values at which the function
+    will be estimated.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link[stats]{density.default}}.
+  }
+  \item{bw.input}{
+    Character string specifying the input data used for automatic
+    bandwidth selection. 
+  }
+  \item{bw.restrict}{
+    Logical value, specifying whether bandwidth selection is performed using
+    data from the entire spatial domain or from the \code{subregion}.
+  }
+  \item{covname}{
+    Optional. Character string to use as the name of the covariate.
+  }
+  \item{crosscheck}{
+    For developers only.
+    Logical value indicating whether to perform
+    cross-checks on the validity of the calculation.
+  }
+}
+\details{
+  This command generates the plot coordinates for an Added Variable Plot
+  for a spatial point process model.
+  
+  Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985)
+  are commonly used in linear models and generalized linear
+  models, to decide whether a model with response \eqn{y} and predictors \eqn{x}
+  would be improved by including another predictor \eqn{z}.
+  
+  In a (generalised) linear model 
+  with response \eqn{y} and predictors \eqn{x},
+  the Added Variable Plot for a new covariate \eqn{z} 
+  is a plot of the smoothed Pearson residuals from the original model
+  against the scaled residuals from a weighted linear
+  regression of \eqn{z} on \eqn{x}.
+  If this plot has nonzero slope, then the new covariate \eqn{z} is
+  needed. For general advice see Cook and Weisberg(1999); Harrell (2001).
+  
+  Essentially the same technique can be used for a spatial point process
+  model (Baddeley et al, 2012).
+
+  The argument \code{model} should be a fitted spatial point process
+  model (object of class \code{"ppm"}). 
+
+  The argument \code{covariate}
+  identifies the covariate that is to be considered for addition to
+  the model. It should be either a pixel image (object of class
+  \code{"im"}) or a \code{function(x,y)} giving the values of the
+  covariate at any spatial location. Alternatively \code{covariate}
+  may be a character string, giving the name of a covariate that was
+  supplied (in the \code{covariates} argument to \code{\link{ppm}})
+  when the model was fitted, but was not used in the model.
+
+  The result of \code{addvar(model, covariate)} is an object belonging
+  to the classes \code{"addvar"} and \code{"fv"}. Plot this object to
+  generate the added variable plot. 
+  
+  Note that the plot method shows the pointwise significance bands
+  for a test of the \emph{null} model, i.e. the null hypothesis
+  that the new covariate has no effect.
+
+  The smoothing bandwidth is controlled by the arguments
+  \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}.
+  If \code{bw} is a numeric value, then
+  the bandwidth is taken to be \code{adjust * bw}.
+  If \code{bw} is a string representing a bandwidth selection rule
+  (recognised by \code{\link[stats]{density.default}})
+  then the bandwidth is selected by this rule.
+
+  The data used for automatic bandwidth selection are
+  specified by \code{bw.input} and \code{bw.restrict}.
+  If \code{bw.input="points"}  (the default) then bandwidth selection is
+  based on the covariate values at the points of the original point
+  pattern dataset to which the model was fitted.
+  If \code{bw.input="quad"} then bandwidth selection is
+  based on the covariate values at every quadrature point used to
+  fit the model.
+  If \code{bw.restrict=TRUE} then the bandwidth selection is performed
+  using only data from inside the \code{subregion}.
+}
+\section{Slow computation}{
+  In a large dataset, computation can be very slow if the default
+  settings are used, because the smoothing bandwidth is selected
+  automatically. To avoid this, specify a numerical value
+  for the bandwidth \code{bw}. One strategy is to use a coarser
+  subset of the data to select \code{bw} automatically.
+  The selected bandwidth can be read off the print output for
+  \code{addvar}.  
+}
+\value{
+  An object of class \code{"addvar"} containing the coordinates
+  for the added variable plot. There is a \code{plot} method.
+}
+\section{Internal data}{
+  The return value has an attribute \code{"spatial"} which contains
+  the internal data: the computed values of the residuals,
+  and of all relevant covariates,
+  at each quadrature point of the model. It is an object of class
+  \code{"ppp"} with a data frame of marks.
+}
+\references{
+  Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013)
+  Residual diagnostics for covariate effects in
+  spatial point process models.
+  \emph{Journal of Computational and Graphical Statistics},
+  \bold{22}, 886--905.
+  
+  Cook, R.D. and Weisberg, S. (1999)
+  \emph{Applied regression, including computing and graphics}.
+  New York: Wiley.
+  
+  Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley.
+
+  Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer.
+
+  Wang, P. (1985) Adding a variable in generalized linear models.
+  \emph{Technometrics} \bold{27}, 273--276.
+}
+\author{
+  \adrian,
+  \rolf,
+  Ya-Mei Chang and Yong Song.
+}
+\seealso{
+  \code{\link{parres}},
+  \code{\link{rhohat}},
+  \code{\link{rho2hat}}.
+}
+\examples{
+  X <-  rpoispp(function(x,y){exp(3+3*x)})
+  model <- ppm(X, ~y)
+  adv <- addvar(model, "x")
+  plot(adv)
+  adv <- addvar(model, "x", subregion=square(0.5))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/affine.Rd b/man/affine.Rd
new file mode 100644
index 0000000..38b087d
--- /dev/null
+++ b/man/affine.Rd
@@ -0,0 +1,47 @@
+\name{affine}
+\alias{affine}
+\title{Apply Affine Transformation}
+\description{
+  Applies any affine transformation of the plane (linear transformation
+  plus vector shift) to a plane geometrical object,
+  such as a point pattern or a window. 
+}
+\usage{
+  affine(X, \dots)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    a line segment pattern (object of class \code{"psp"}),
+    a window (object of class \code{"owin"}) or a pixel image
+    (object of class \code{"im"}).
+  }
+  \item{\dots}{Arguments determining the affine transformation.}
+}
+\value{
+  Another object of the same type, representing the
+  result of applying the affine transformation.
+}
+\details{
+  This is generic. Methods are provided for
+  point patterns (\code{\link{affine.ppp}})
+  and windows (\code{\link{affine.owin}}).
+}
+\seealso{
+  \code{\link{affine.ppp}},
+  \code{\link{affine.psp}},
+  \code{\link{affine.owin}},
+  \code{\link{affine.im}},
+  \code{\link{flipxy}},
+  \code{\link{reflect}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.im.Rd b/man/affine.im.Rd
new file mode 100644
index 0000000..9e6ef99
--- /dev/null
+++ b/man/affine.im.Rd
@@ -0,0 +1,56 @@
+\name{affine.im}
+\alias{affine.im} 
+\title{Apply Affine Transformation To Pixel Image}
+\description{
+  Applies any affine transformation of the plane (linear transformation
+  plus vector shift) to a pixel image. 
+}
+\usage{
+  \method{affine}{im}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) 
+}
+\arguments{
+  \item{X}{Pixel image (object of class \code{"im"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution of the transformed image.
+  }
+}
+\value{
+  Another pixel image (of class \code{"im"}) representing the
+  result of applying the affine transformation.
+}
+\details{
+  The image is subjected first to the linear transformation represented by
+  \code{mat} (multiplying on the left by \code{mat}),
+  and then the result is translated by the vector \code{vec}.
+  
+  The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2}
+  matrix.
+
+  This is a method for the generic function \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{affine.ppp}},
+  \code{\link{affine.psp}},
+  \code{\link{affine.owin}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  X <- setcov(owin())
+  stretch <- diag(c(2,3))
+  Y <- affine(X, mat=stretch)
+  shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2)
+  Z <- affine(X, mat=shear)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.linnet.Rd b/man/affine.linnet.Rd
new file mode 100644
index 0000000..f88bdcd
--- /dev/null
+++ b/man/affine.linnet.Rd
@@ -0,0 +1,94 @@
+\name{affine.linnet} 
+\alias{affine.linnet} 
+\alias{shift.linnet}
+\alias{rotate.linnet}
+\alias{rescale.linnet}
+\alias{scalardilate.linnet}
+\title{Apply Geometrical Transformations to a Linear Network}
+\description{
+  Apply geometrical transformations
+  to a linear network.
+}
+\usage{
+  \method{affine}{linnet}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+
+  \method{shift}{linnet}(X, vec=c(0,0), \dots, origin=NULL)
+
+  \method{rotate}{linnet}(X, angle=pi/2, \dots, centre=NULL)
+
+  \method{scalardilate}{linnet}(X, f, \dots)
+
+  \method{rescale}{linnet}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Linear network (object of class \code{"linnet"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{angle}{Rotation angle in radians.}
+  \item{f}{Scalar dilation factor.}
+  \item{s}{
+    Unit conversion factor: the new units are \code{s} times the old units.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{origin}{
+    Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched. 
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    A value acceptable to the function \code{\link{unitname<-}}
+  }
+}
+\value{
+  Another linear network (of class \code{"linnet"}) representing the
+  result of applying the geometrical transformation.
+}
+\details{
+  These functions are methods for the generic functions
+  \code{\link{affine}},
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{rescale}} and
+  \code{\link{scalardilate}}
+  applicable to objects of class \code{"linnet"}.
+
+  All of these functions
+  perform geometrical transformations on the object \code{X},
+  except for \code{rescale}, which simply rescales the units of length.
+}
+\seealso{
+  \code{\link{linnet}} and \code{\link{as.linnet}}.
+  
+  Generic functions
+  \code{\link{affine}},
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{scalardilate}},
+  \code{\link{rescale}}.
+}
+\examples{
+  U <- rotate(simplenet, pi)
+  stretch <- diag(c(2,3))
+  Y <- affine(simplenet, mat=stretch)
+  shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2)
+  Z <- affine(simplenet, mat=shear, vec=c(0, 1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.lpp.Rd b/man/affine.lpp.Rd
new file mode 100644
index 0000000..99c6b01
--- /dev/null
+++ b/man/affine.lpp.Rd
@@ -0,0 +1,96 @@
+\name{affine.lpp} 
+\alias{affine.lpp} 
+\alias{shift.lpp} 
+\alias{rotate.lpp}
+\alias{rescale.lpp}
+\alias{scalardilate.lpp}
+\title{Apply Geometrical Transformations to Point Pattern on a Linear Network}
+\description{
+  Apply geometrical transformations to a point pattern on a linear network.
+}
+\usage{
+  \method{affine}{lpp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+
+  \method{shift}{lpp}(X, vec=c(0,0), \dots, origin=NULL)
+
+  \method{rotate}{lpp}(X, angle=pi/2, \dots, centre=NULL)
+
+  \method{scalardilate}{lpp}(X, f, \dots)
+
+  \method{rescale}{lpp}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Point pattern on a linear network (object of class \code{"lpp"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{angle}{Rotation angle in radians.}
+  \item{f}{Scalar dilation factor.}
+  \item{s}{
+    Unit conversion factor: the new units are \code{s} times the old units.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{origin}{
+    Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched. 
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    A value acceptable to the function \code{\link{unitname<-}}
+  }
+}
+\value{
+  Another point pattern on a linear network (object of class
+  \code{"lpp"}) 
+  representing the
+  result of applying the geometrical transformation.
+}
+\details{
+  These functions are methods for the generic functions
+  \code{\link{affine}},
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{rescale}} and
+  \code{\link{scalardilate}}
+  applicable to objects of class \code{"lpp"}.
+
+  All of these functions
+  perform geometrical transformations on the object \code{X},
+  except for \code{rescale}, which simply rescales the units of length.
+}
+\seealso{
+  \code{\link{lpp}}.
+  
+  Generic functions
+  \code{\link{affine}},
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{scalardilate}},
+  \code{\link{rescale}}.
+}
+\examples{
+  X <- rpoislpp(2, simplenet)
+  U <- rotate(X, pi)
+  stretch <- diag(c(2,3))
+  Y <- affine(X, mat=stretch)
+  shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2)
+  Z <- affine(X, mat=shear, vec=c(0, 1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.owin.Rd b/man/affine.owin.Rd
new file mode 100644
index 0000000..86d7bd3
--- /dev/null
+++ b/man/affine.owin.Rd
@@ -0,0 +1,65 @@
+\name{affine.owin} 
+\alias{affine.owin} 
+\title{Apply Affine Transformation To Window}
+\description{
+  Applies any affine transformation of the plane (linear transformation
+  plus vector shift) to a window. 
+}
+\usage{
+  \method{affine}{owin}(X, mat=diag(c(1,1)), vec=c(0,0), \dots, rescue=TRUE)
+}
+\arguments{
+  \item{X}{Window (object of class \code{"owin"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{rescue}{
+    Logical. If \code{TRUE}, the transformed window
+    will be processed by \code{\link{rescue.rectangle}}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution of the transformed window,
+    if \code{X} is a binary pixel mask.
+  }
+}
+\value{
+  Another window (of class \code{"owin"}) representing the
+  result of applying the affine transformation.
+}
+\details{
+  The window is subjected first to the linear transformation represented by
+  \code{mat} (multiplying on the left by \code{mat}),
+  and then the result is translated by the vector \code{vec}.
+  
+  The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2}
+  matrix.
+
+  This is a method for the generic function \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{affine.ppp}},
+  \code{\link{affine.psp}},
+  \code{\link{affine.im}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  # shear transformation
+  shear <- matrix(c(1,0,0.6,1),ncol=2)
+  X <- affine(owin(), shear)
+  \dontrun{
+  plot(X)
+  }
+  data(letterR)
+  affine(letterR, shear, c(0, 0.5))
+  affine(as.mask(letterR), shear, c(0, 0.5))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.ppp.Rd b/man/affine.ppp.Rd
new file mode 100644
index 0000000..7377dc7
--- /dev/null
+++ b/man/affine.ppp.Rd
@@ -0,0 +1,61 @@
+\name{affine.ppp} 
+\alias{affine.ppp} 
+\title{Apply Affine Transformation To Point Pattern}
+\description{
+  Applies any affine transformation of the plane (linear transformation
+  plus vector shift) to a point pattern. 
+}
+\usage{
+ \method{affine}{ppp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+}
+\arguments{
+  \item{X}{Point pattern (object of class \code{"ppp"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting
+    the handling of the observation window, if it is a binary pixel
+    mask.
+  }
+}
+\value{
+  Another point pattern (of class \code{"ppp"}) representing the
+  result of applying the affine transformation.
+}
+\details{
+  The point pattern, and its window, are subjected first to the
+  linear transformation represented by
+  \code{mat} (multiplying on the left by \code{mat}),
+  and are then translated by the vector \code{vec}.
+  
+  The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2}
+  matrix.
+
+  This is a method for the generic function \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{affine.owin}},
+  \code{\link{affine.psp}},
+  \code{\link{affine.im}},
+  \code{\link{flipxy}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  data(cells)
+  # shear transformation
+  X <- affine(cells, matrix(c(1,0,0.6,1),ncol=2))
+  \dontrun{
+  plot(X)
+  # rescale y coordinates by factor 1.3
+  plot(affine(cells, diag(c(1,1.3))))
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.psp.Rd b/man/affine.psp.Rd
new file mode 100644
index 0000000..b42e3d7
--- /dev/null
+++ b/man/affine.psp.Rd
@@ -0,0 +1,63 @@
+\name{affine.psp}  
+\alias{affine.psp}
+\title{Apply Affine Transformation To Line Segment Pattern}
+\description{
+  Applies any affine transformation of the plane (linear transformation
+  plus vector shift) to a line segment pattern. 
+}
+\usage{
+ \method{affine}{psp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+}
+\arguments{
+  \item{X}{Line Segment pattern (object of class \code{"psp"}).}
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting
+    the handling of the observation window, if it is a binary pixel
+    mask.
+  }
+}
+\value{
+  Another line segment pattern (of class \code{"psp"}) representing the
+  result of applying the affine transformation.
+}
+\details{
+  The line segment pattern, and its window, are subjected first to the
+  linear transformation represented by
+  \code{mat} (multiplying on the left by \code{mat}),
+  and are then translated by the vector \code{vec}.
+  
+  The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2}
+  matrix.
+
+  This is a method for the generic function \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{affine.owin}},
+  \code{\link{affine.ppp}},
+  \code{\link{affine.im}},
+  \code{\link{flipxy}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  oldpar <- par(mfrow=c(2,1))
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(X, main="original")
+  # shear transformation
+  Y <- affine(X, matrix(c(1,0,0.6,1),ncol=2))
+  plot(Y, main="transformed")
+  par(oldpar)
+  # 
+  # rescale y coordinates by factor 0.2
+  affine(X, diag(c(1,0.2)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/affine.tess.Rd b/man/affine.tess.Rd
new file mode 100644
index 0000000..f01fb27
--- /dev/null
+++ b/man/affine.tess.Rd
@@ -0,0 +1,109 @@
+\name{affine.tess} 
+\alias{reflect.tess} 
+\alias{shift.tess} 
+\alias{rotate.tess} 
+\alias{scalardilate.tess} 
+\alias{affine.tess} 
+\title{Apply Geometrical Transformation To Tessellation}
+\description{
+  Apply various geometrical transformations of the plane
+  to each tile in a tessellation.
+}
+\usage{
+  \method{reflect}{tess}(X)
+
+  \method{shift}{tess}(X, \dots)
+
+  \method{rotate}{tess}(X, angle=pi/2, \dots, centre=NULL)
+
+  \method{scalardilate}{tess}(X, f, \dots)
+
+  \method{affine}{tess}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+}
+\arguments{
+  \item{X}{Tessellation (object of class \code{"tess"}).}
+  \item{angle}{
+    Rotation angle in radians (positive values represent
+    anticlockwise rotations).
+  }
+  \item{mat}{Matrix representing a linear transformation.}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{f}{Positive number giving scale factor.}
+  \item{\dots}{Arguments passed to other methods.}
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+}
+\value{
+  Another tessellation (of class \code{"tess"}) representing the
+  result of applying the geometrical transformation.
+}
+\details{
+  These are method for the generic functions 
+\code{\link{reflect}},
+\code{\link{shift}},
+\code{\link{rotate}}, 
+\code{\link{scalardilate}},
+\code{\link{affine}}
+for tessellations (objects of class \code{"tess"}).
+
+The individual tiles of the tessellation, and the window
+containing the tessellation, are all subjected to the same
+geometrical transformation.
+
+The transformations are performed by the corresponding method
+for windows (class \code{"owin"}) or images (class \code{"im"})
+depending on the type of tessellation.
+
+If the argument \code{origin} is used in \code{shift.tess}
+it is interpreted as applying to the window containing the
+tessellation. Then all tiles are shifted by the same vector.
+}
+\seealso{
+  Generic functions
+  \code{\link{reflect}},
+  \code{\link{shift}},
+  \code{\link{rotate}}, 
+  \code{\link{scalardilate}},
+  \code{\link{affine}}.
+
+  Methods for windows:
+  \code{\link{reflect.default}},
+  \code{\link{shift.owin}},
+  \code{\link{rotate.owin}}, 
+  \code{\link{scalardilate.owin}},
+  \code{\link{affine.owin}}.
+
+  Methods for images:
+  \code{\link{reflect.im}},
+  \code{\link{shift.im}},
+  \code{\link{rotate.im}}, 
+  \code{\link{scalardilate.im}},
+  \code{\link{affine.im}}.
+}
+\examples{
+  live <- interactive()
+  if(live) {
+    H <- hextess(letterR, 0.2)
+    plot(H)
+    plot(reflect(H))
+    plot(rotate(H, pi/3))
+  } else H <- hextess(letterR, 0.6)
+
+  # shear transformation
+  shear <- matrix(c(1,0,0.6,1),2,2)
+  sH <- affine(H, shear)
+  if(live) plot(sH)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/allstats.Rd b/man/allstats.Rd
new file mode 100644
index 0000000..69cc9fa
--- /dev/null
+++ b/man/allstats.Rd
@@ -0,0 +1,95 @@
+\name{allstats}
+\alias{allstats}
+\title{Calculate four standard summary functions of a point pattern.}
+\description{
+  Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K}
+  summary functions for an unmarked point pattern.
+  Returns them as a function array (of class \code{"fasp"}, see
+  \code{\link{fasp.object}}).
+}
+\usage{
+  allstats(pp, \dots, dataname=NULL, verb=FALSE)
+}
+\arguments{
+  \item{pp}{The observed point pattern, for which summary function
+    estimates are required.  An object of class \code{"ppp"}.
+    It must not be marked.
+  }
+  \item{\dots}{
+    Optional arguments passed to the summary functions
+    \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}
+    and \code{\link{Kest}}.
+  }
+  \item{dataname}{A character string giving an optional (alternative)
+    name for the point pattern.
+  }
+  \item{verb}{A logical value meaning ``verbose''. If \code{TRUE},
+    progress reports are printed during calculation.
+  }
+}
+\details{
+  This computes four standard summary statistics for a
+  point pattern: the empty space function \eqn{F(r)},
+  nearest neighbour distance distribution function \eqn{G(r)},
+  van Lieshout-Baddeley function \eqn{J(r)}
+  and Ripley's function \eqn{K(r)}.
+  The real work is done by 
+  \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and
+  \code{\link{Kest}} respectively. Consult the help files for these functions
+  for further information about the statistical interpretation
+  of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}.
+
+  If \code{verb} is \code{TRUE}, then ``progress reports''
+  (just indications of completion) are printed out when the
+  calculations are finished for each of the four function types.
+
+  The overall title of the array of four functions
+  (for plotting by \code{\link{plot.fasp}})
+  will be formed from the argument \code{dataname}.
+  If this is not given, it defaults to the expression
+  for \code{pp} given in the call to \code{allstats}.
+}
+\value{
+  A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and
+  \eqn{K} functions respectively. 
+
+  The list can be plotted directly using \code{plot} (which dispatches to
+  \code{\link{plot.solist}}). 
+
+  Each list entry retains the format
+  of the output of the relevant estimating routine
+  \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or
+  \code{\link{Kest}}. Thus each entry in the list is
+  a function value table (object of class \code{"fv"},
+  see \code{\link{fv.object}}).
+
+  The default formulae for plotting these functions are 
+  \code{cbind(km,theo) ~ r} for F, G, and J, and
+  \code{cbind(trans,theo) ~ r} for K.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+         \code{\link{plot.solist}},
+         \code{\link{plot.fv}},
+         \code{\link{fv.object}},
+         \code{\link{Fest}},
+         \code{\link{Gest}},
+         \code{\link{Jest}},
+         \code{\link{Kest}}
+}
+
+\examples{
+	data(swedishpines)
+        a <- allstats(swedishpines,dataname="Swedish Pines")
+        \dontrun{
+        plot(a)
+        plot(a, subset=list("r<=15","r<=15","r<=15","r<=50"))
+        }
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/alltypes.Rd b/man/alltypes.Rd
new file mode 100644
index 0000000..38e186e
--- /dev/null
+++ b/man/alltypes.Rd
@@ -0,0 +1,252 @@
+\name{alltypes}
+\alias{alltypes}
+\title{Calculate Summary Statistic for All Types in a Multitype Point Pattern}
+\description{
+  Given a marked point pattern, this computes the estimates of
+  a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc)
+  of the pattern, for all possible combinations of marks,
+  and returns these functions in an array.
+}
+
+\usage{
+  alltypes(X, fun="K", \dots,
+           dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE)
+}
+
+\arguments{
+  \item{X}{The observed point pattern, for which summary function
+    estimates are required.  An object of class \code{"ppp"} or \code{"lpp"}.
+  }
+
+  \item{fun}{The summary function. Either an \R function,
+    or a character string indicating the summary function
+    required.  Options for strings are
+    \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"},
+    \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"},
+    \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}.
+  }
+  \item{\dots}{
+    Arguments passed to the summary function
+    (and to the function \code{\link{envelope}} if appropriate)
+  }
+
+  \item{dataname}{Character string giving an optional (alternative)
+    name to the point pattern, different from what is given
+    in the call.  This name, if supplied, may be used by
+    \code{\link{plot.fasp}()} in forming the title of the plot.
+    If not supplied it defaults to the parsing of the argument
+    supplied as \code{X} in the call.
+  }
+
+  \item{verb}{
+    Logical value.  If \code{verb} is
+    true then terse ``progress reports'' (just the values of the
+    mark indices) are printed out when the calculations for that
+    combination of marks are completed. 
+  }
+  \item{envelope}{
+    Logical value. If \code{envelope} is true, then simulation envelopes
+    of the summary function will also be computed. See Details.
+  }
+  \item{reuse}{
+    Logical value indicating whether the envelopes in each panel
+    should be based on the same set of simulated patterns
+    (\code{reuse=TRUE}) or on different, independent sets of simulated
+    patterns (\code{reuse=FALSE}).
+  }
+}
+
+\details{
+  This routine is a convenient way to analyse the dependence between
+  types in a multitype point pattern.
+  It computes the estimates of a selected summary function of the
+  pattern, for all possible combinations of marks.
+  It returns these functions in an array
+  (an object of class \code{"fasp"}) amenable to plotting
+  by \code{\link{plot.fasp}()}.
+
+  The argument \code{fun} specifies the summary function that will
+  be evaluated for each type of point, or for each pair of types.
+  It may be either an \R function or a character string.
+  
+  Suppose that the points have possible types \eqn{1,2,\ldots,m}
+  and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only.
+
+  If \code{fun="F"} then this routine
+  calculates, for each possible type \eqn{i},
+  an estimate of the Empty Space Function \eqn{F_i(r)}{F[i](r)} of
+  \eqn{X_i}{X[i]}. See \code{\link{Fest}}
+  for explanation of the empty space function.
+  The estimate is computed by applying \code{\link{Fest}}
+  to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}.
+
+  If \code{fun} is
+  \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"},
+  the routine calculates, for each pair of types \eqn{(i,j)},
+  an estimate of the ``\code{i}-to\code{j}'' cross-type function
+  \eqn{G_{ij}(r)}{G[i,j](r)},
+  \eqn{J_{ij}(r)}{J[i,j](r)},
+  \eqn{K_{ij}(r)}{K[i,j](r)} or
+  \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the
+  dependence between 
+  \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}.
+  See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}
+  or \code{\link{Lcross}} respectively for explanation of these
+  functions.
+  The estimate is computed by applying the relevant function
+  (\code{\link{Gcross}} etc)
+  to \code{X} using each possible value of the arguments \code{i,j},
+  together with the optional arguments \code{\dots}.
+  
+  If \code{fun} is \code{"pcf"} the routine calculates
+  the cross-type pair correlation function \code{\link{pcfcross}}
+  between each pair of types.
+
+  If \code{fun} is 
+  \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"},
+  the routine calculates, for each type \eqn{i},
+  an estimate of the ``\code{i}-to-any'' dot-type function
+  \eqn{G_{i\bullet}(r)}{G[i.](r)},
+  \eqn{J_{i\bullet}(r)}{J[i.](r)} or
+  \eqn{K_{i\bullet}(r)}{K[i.](r)} or
+  \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the
+  dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}.
+  See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}
+  or \code{\link{Ldot}} respectively for explanation of these functions.
+  The estimate is computed by applying the relevant function
+  (\code{\link{Gdot}} etc)
+  to \code{X} using each possible value of the argument \code{i},
+  together with the optional arguments \code{\dots}.
+
+  The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"}
+  are interpreted as abbreviations for \code{\link{Gcross}},
+  \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}}
+  respectively, assuming the point pattern is
+  marked. If the point pattern is unmarked, the appropriate
+  function \code{\link{Fest}}, \code{\link{Jest}},
+  \code{\link{Kest}} or \code{\link{Lest}} is invoked instead.
+
+  If \code{envelope=TRUE}, then as well as computing the value of the
+  summary function for each combination of types, the algorithm also
+  computes simulation envelopes of the summary function for each
+  combination of types. The arguments \code{\dots} are passed to the function
+  \code{\link{envelope}} to control the number of
+  simulations, the random process generating the simulations,
+  the construction of envelopes, and so on. 
+}
+\value{
+  A function array (an object of class \code{"fasp"},
+  see \code{\link{fasp.object}}). This can be plotted
+  using \code{\link{plot.fasp}}.
+
+  If the pattern is not marked, the resulting ``array'' has dimensions
+  \eqn{1 \times 1}{1 x 1}. Otherwise the following is true:
+
+  If \code{fun="F"},
+  the function array has dimensions \eqn{m \times 1}{m * 1}
+  where \eqn{m} is the number of different marks in the point pattern.
+  The entry at position \code{[i,1]} in this array
+  is the result of applying \code{\link{Fest}} to the
+  points of type \code{i} only.
+
+  If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}
+  or \code{"Ldot"}, the function array
+  again has dimensions \eqn{m \times 1}{m * 1}.
+  The entry at position \code{[i,1]} in this array
+  is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)}
+  \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively.
+
+  If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}
+  or \code{"Lcross"} 
+  (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}),
+  the function array has dimensions \eqn{m \times m}{m * m}.
+  The \code{[i,j]} entry of the function array
+  (for \eqn{i \neq j}{i != j}) is the
+  result of applying the function \code{\link{Gcross}},
+  \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to
+  the pair of types \code{(i,j)}. The diagonal
+  \code{[i,i]} entry of the function array is the result of
+  applying the univariate function \code{\link{Gest}},
+  \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the
+  points of type \code{i} only.
+
+  If \code{envelope=FALSE}, then
+  each function entry \code{fns[[i]]} retains the format
+  of the output of the relevant estimating routine
+  \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}},
+  \code{\link{Kest}},  \code{\link{Lest}}, \code{\link{Gcross}},
+  \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}},
+  \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or
+  \code{\link{Ldot}}
+  The default formulae for plotting these functions are 
+  \code{cbind(km,theo) ~ r} for F, G, and J functions, and
+  \code{cbind(trans,theo) ~ r} for K and L functions.
+
+  If \code{envelope=TRUE}, then each function entry \code{fns[[i]]}
+  has the same format as the output of the \code{\link{envelope}} command.
+}
+\note{
+  Sizeable amounts of memory may be needed during the calculation.
+}
+
+\seealso{
+  \code{\link{plot.fasp}},
+  \code{\link{fasp.object}},
+  \code{\link{Fest}},
+  \code{\link{Gest}},
+  \code{\link{Jest}},
+  \code{\link{Kest}},
+  \code{\link{Lest}},
+  \code{\link{Gcross}},
+  \code{\link{Jcross}},
+  \code{\link{Kcross}},
+  \code{\link{Lcross}},
+  \code{\link{Gdot}},
+  \code{\link{Jdot}},
+  \code{\link{Kdot}},
+  \code{\link{envelope}}.
+}
+\examples{
+   # bramblecanes (3 marks).
+   bram <- bramblecanes
+   \testonly{
+      bram <- bram[c(seq(1, 744, by=20), seq(745, 823, by=4))]
+   }
+   bF <- alltypes(bram,"F",verb=TRUE)
+   plot(bF)
+   if(interactive()) {
+     plot(alltypes(bram,"G"))
+     plot(alltypes(bram,"Gdot"))
+   }
+   
+   # Swedishpines (unmarked).
+  swed <- swedishpines
+   \testonly{
+     swed <- swed[1:25]
+   }
+   plot(alltypes(swed,"K"))
+
+   plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3))
+
+   # A setting where you might REALLY want to use dataname:
+   \dontrun{
+   xxx <- alltypes(ppp(Melvin$x,Melvin$y,
+                window=as.owin(c(5,20,15,50)),marks=clyde),
+                fun="F",verb=TRUE,dataname="Melvin")
+   }
+
+   # envelopes
+   bKE <- alltypes(bram,"K",envelope=TRUE,nsim=19)
+   \dontrun{
+   bFE <- alltypes(bram,"F",envelope=TRUE,nsim=19,global=TRUE)
+   }
+
+   # extract one entry
+   as.fv(bKE[1,1])
+   
+}
+\author{\adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/amacrine.Rd b/man/amacrine.Rd
new file mode 100644
index 0000000..f7494dc
--- /dev/null
+++ b/man/amacrine.Rd
@@ -0,0 +1,43 @@
+\name{amacrine}
+\alias{amacrine}
+\docType{data}
+\title{Hughes' Amacrine Cell Data}
+\description{
+Austin Hughes' data: a point pattern 
+of displaced amacrine cells in the retina of a rabbit.
+A marked point pattern.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of cell locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr
+    \code{marks} \tab factor with levels \code{off} and \code{on} \cr
+                 \tab indicating ``off'' and ``on'' cells
+    }
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(amacrine)}
+\source{Peter Diggle, personal communication}
+\section{Notes}{
+Austin Hughes' data: a point pattern 
+of displaced amacrine cells in the retina of a rabbit.
+152 ``on'' cells and 142 ``off'' cells in a rectangular sampling frame.
+
+The true dimensions of the rectangle are 1060 by 662 microns.
+The coordinates here are scaled to a rectangle of height 1 and width
+\eqn{1060/662 = 1.601} so the unit of measurement is approximately 662 microns.
+
+The data were analysed by Diggle (1986).
+}
+\references{
+Diggle, P. J. (1986).
+Displaced amacrine cells in the retina of a
+rabbit: analysis of a bivariate spatial point pattern. 
+\emph{J. Neurosci. Meth.} \bold{18}, 115--125.
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/anemones.Rd b/man/anemones.Rd
new file mode 100644
index 0000000..d487846
--- /dev/null
+++ b/man/anemones.Rd
@@ -0,0 +1,70 @@
+\name{anemones}
+\alias{anemones}
+\docType{data}
+\title{
+  Beadlet Anemones Data
+}
+\description{
+  These data give the spatial locations and diameters
+  of sea anemones (beadlet anemone
+  \emph{Actinia equina}) in a sample plot 
+  on the north face of a boulder,
+  well above low tide level, at Quiberon (Bretagne, France) in May 1976.
+
+  The data were originally described and discussed by Kooijman (1979a).
+  Kooijman (1979b) shows a hand-drawn plot of the original data.
+  The data are discussed  by Upton and Fingleton (1985)
+  as Example 1.8 on pages 64--67. 
+
+  The \code{anemones} dataset is taken directly from Table 1.11
+  of Upton and Fingleton (1985). The coordinates and
+  diameters are integer multiples of an idiosyncratic unit of length.
+  The boundary is a rectangle 280 by 180 units.
+}
+\section{Units}{
+  There is some confusion about the correct physical scale for these
+  data. According to Upton and Fingleton (1985), one unit in the dataset 
+  is approximately 0.475 cm. According to Kooijman (1979a, 1979b)
+  and also quoted by Upton and Fingleton (1985), the
+  physical size of the sample plot was 14.5 by 9.75 decimetres
+  (145 by 97.5 centimetres). 
+  However if the data are plotted at this scale, they are too small for
+  a rectangle of this size, and the appearance of the plot
+  does not match the original hand-drawn plot in Kooijman (1979b).
+  To avoid confusion, we have not assigned a unit scale to this
+  dataset.
+} 
+\format{
+  \code{anemones} is an object of class \code{"ppp"}
+  representing the point pattern of anemone locations.
+  It is a marked point pattern with numeric marks representing
+  anemone diameter.
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(anemones)}
+\examples{
+  data(anemones)
+  # plot diameters on same scale as x, y
+  plot(anemones, markscale=1)
+}
+\source{
+  Table 1.11 on pages 62--63 of Upton and Fingleton (1985),
+  who acknowledge Kooijman (1979a) as the source.
+}
+\references{
+ Kooijman, S.A.L.M. (1979a)
+ The description of point patterns. 
+ In \emph{Spatial and temporal analysis in ecology} (ed. R.M. Cormack
+ and J.K. Ord), International Cooperative Publishing House, 
+ Fairland, Maryland, USA. Pages 305--332.
+
+ Kooijman, S.A.L.M. (1979b)
+ Inference about dispersal patterns. 
+ \emph{Acta Biotheoretica} \bold{28}, 149--189.
+
+  Upton, G.J.G. and Fingleton, B. (1985)
+  \emph{Spatial data analysis by example}. Volume 1: Point pattern
+  and quantitative data. John Wiley and Sons, Chichester.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/angles.psp.Rd b/man/angles.psp.Rd
new file mode 100644
index 0000000..7bd47da
--- /dev/null
+++ b/man/angles.psp.Rd
@@ -0,0 +1,59 @@
+\name{angles.psp}
+\alias{angles.psp}
+\title{Orientation Angles of Line Segments}
+\description{
+  Computes the orientation angle of each line segment
+  in a line segment pattern.
+}
+\usage{
+  angles.psp(x, directed=FALSE)
+}
+\arguments{
+  \item{x}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{directed}{
+    Logical flag. See details.
+  }
+}
+\value{
+  Numeric vector.
+}
+\details{
+  For each line segment,  the angle of inclination to the \eqn{x}-axis
+  (in radians) is computed,
+  and the angles are returned as a numeric vector.
+
+  If \code{directed=TRUE}, the directed angle of orientation
+  is computed. The angle respects the
+  sense of direction from \code{(x0,y0)} to \code{(x1,y1)}.
+  The values returned are angles in the full range from \eqn{-\pi}{-\pi}
+  to \eqn{\pi}{\pi}. The angle is computed as 
+  \code{atan2(y1-y0,x1-x0)}. See \code{\link{atan2}}.
+
+  If \code{directed=FALSE}, the undirected angle of orientation
+  is computed. Angles differing by \eqn{\pi} are
+  regarded as equivalent. The values returned are angles
+  in the range from \eqn{0} to \eqn{\pi}{\pi}. These angles are
+  computed by first computing the directed angle,
+  then adding \eqn{\pi}{\pi} to any negative angles.
+}
+\seealso{
+  \code{\link{summary.psp}},
+  \code{\link{midpoints.psp}},
+  \code{\link{lengths.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  b <- angles.psp(a)   
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/anova.lppm.Rd b/man/anova.lppm.Rd
new file mode 100644
index 0000000..b2ae01b
--- /dev/null
+++ b/man/anova.lppm.Rd
@@ -0,0 +1,113 @@
+\name{anova.lppm}
+\alias{anova.lppm}
+\title{ANOVA for Fitted Point Process Models on Linear Network}
+\description{
+Performs analysis of deviance for two or more fitted point process
+models on a linear network.
+}
+\usage{
+  \method{anova}{lppm}(object, \dots, test=NULL)
+}
+\arguments{
+  \item{object}{A fitted point process model on a linear network
+    (object of class \code{"lppm"}).
+    }
+    \item{\dots}{
+      One or more fitted point process models on the same
+      linear network.
+    }
+    \item{test}{
+      Character string, partially matching one of
+      \code{"Chisq"}, \code{"F"} or \code{"Cp"}.
+    }
+}
+\value{
+  An object of class \code{"anova"}, or \code{NULL}.
+}
+\details{
+  This is a method for \code{\link{anova}} for 
+  fitted point process models on a linear network
+  (objects of class \code{"lppm"},
+  usually generated by the model-fitting function \code{\link{lppm}}).
+
+  If the fitted models are all Poisson point processes,
+  then this function performs an Analysis of Deviance of
+  the fitted models. The output shows the deviance differences
+  (i.e. 2 times log likelihood ratio),
+  the difference in degrees of freedom, and (if \code{test="Chi"})
+  the two-sided p-values for the chi-squared tests. Their interpretation
+  is very similar to that in \code{\link{anova.glm}}.
+
+  If some of the fitted models are \emph{not} Poisson point processes,
+  then the deviance difference is replaced by the
+  adjusted composite likelihood ratio (Pace et al, 2011;
+  Baddeley et al, 2014). 
+}
+\section{Errors and warnings}{
+  \describe{
+    \item{models not nested:}{
+      There may be an error message that the models are not \dQuote{nested}.
+      For an Analysis of Deviance the models must be nested, i.e. one model
+      must be a special case of the other. For example the point process
+      model with formula \code{~x} is a special case of the model with
+      formula \code{~x+y}, so these models are nested. However
+      the two point process
+      models with formulae \code{~x} and \code{~y} are not nested.
+      
+      If you get this error message and you believe that the models should
+      be nested, the problem may be the inability of \R to recognise that
+      the two formulae are nested. Try modifying the formulae to make
+      their relationship more obvious.
+    }
+    \item{different sizes of dataset:}{
+      There may be an error message from \code{anova.glmlist} that
+      \dQuote{models were not all fitted to the same size of dataset}.
+      This generally occurs when the point process models
+      are fitted on different linear networks.
+    }
+  }
+}
+\seealso{
+  \code{\link{lppm}}
+}
+\examples{
+ X <- runiflpp(10, simplenet)
+ mod0 <- lppm(X ~1)
+ modx <- lppm(X ~x)
+ anova(mod0, modx, test="Chi")
+}
+\author{\adrian
+  
+  
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  Baddeley, A., Turner, R. and Rubak, E. (2015)
+  Adjusted composite likelihood ratio test for Gibbs point processes.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{86} (5) 922--941.
+   DOI: 10.1080/00949655.2015.1044530.
+
+  McSwiggan, G., Nair, M.G. and Baddeley, A. (2012)
+  Fitting Poisson point process models to events 
+  on a linear network. Manuscript in preparation.
+
+  Pace, L., Salvan, A. and Sartori, N. (2011)
+  Adjusting composite likelihood ratio statistics.
+  \emph{Statistica Sinica} \bold{21}, 129--148.
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
+ 
+ 
diff --git a/man/anova.mppm.Rd b/man/anova.mppm.Rd
new file mode 100644
index 0000000..6fde597
--- /dev/null
+++ b/man/anova.mppm.Rd
@@ -0,0 +1,131 @@
+\name{anova.mppm}
+\alias{anova.mppm}
+\title{ANOVA for Fitted Point Process Models for Replicated Patterns}
+\description{
+  Performs analysis of deviance for one or more
+  point process models fitted to replicated point pattern data.
+}
+\usage{
+  \method{anova}{mppm}(object, \dots,
+                  test=NULL, adjust=TRUE,
+                  fine=FALSE, warn=TRUE)
+}
+\arguments{
+  \item{object}{
+    Object of class \code{"mppm"} representing a
+    point process model that was fitted to replicated point patterns.
+  }
+  \item{\dots}{
+    Optional. Additional objects of class \code{"mppm"}.
+  }
+  \item{test}{
+    Type of hypothesis test to perform. 
+    A character string, partially matching one of
+    \code{"Chisq"}, \code{"LRT"},
+    \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"},
+    or \code{NULL} indicating that no test should be performed.
+  }
+  \item{adjust}{
+    Logical value indicating whether to correct the
+    pseudolikelihood ratio when some of the models are not Poisson
+    processes.
+  }
+  \item{fine}{
+    Logical value passed to \code{\link{vcov.ppm}}
+    indicating whether to use a quick estimate 
+    (\code{fine=FALSE}, the default) or a slower, more accurate
+    estimate (\code{fine=TRUE}) of the variance of the fitted
+    coefficients of each model. 
+    Relevant only when some of the models are not Poisson
+    and \code{adjust=TRUE}.
+  }
+  \item{warn}{
+    Logical value indicating whether to issue warnings
+    if problems arise.
+  }
+}
+\value{
+  An object of class \code{"anova"}, or \code{NULL}.
+}
+\details{
+  This is a method for \code{\link{anova}} for comparing several
+  fitted point process models of class \code{"mppm"},
+  usually generated by the model-fitting function \code{\link{mppm}}).
+
+  If the fitted models are all Poisson point processes,
+  then this function performs an Analysis of Deviance of
+  the fitted models. The output shows the deviance differences
+  (i.e. 2 times log likelihood ratio),
+  the difference in degrees of freedom, and (if \code{test="Chi"})
+  the two-sided p-values for the chi-squared tests. Their interpretation
+  is very similar to that in \code{\link{anova.glm}}.
+
+  If some of the fitted models are \emph{not} Poisson point processes,
+  the `deviance' differences in this table are
+  'pseudo-deviances' equal to 2 times the differences
+  in the maximised values of the log pseudolikelihood (see
+  \code{\link{ppm}}). It is not valid to compare these
+  values to the chi-squared distribution. In this case,
+  if \code{adjust=TRUE} (the default), the
+  pseudo-deviances will be adjusted using the method of Pace et al
+  (2011) and Baddeley, Turner and Rubak (2015)
+  so that the chi-squared test is valid.
+  It is strongly advisable to perform this adjustment.
+
+  The argument \code{test} determines which hypothesis test, if any, will
+  be performed to compare the models. The argument \code{test}
+  should be a character string, partially matching one of
+  \code{"Chisq"}, \code{"F"} or \code{"Cp"},
+  or \code{NULL}. The first option \code{"Chisq"} gives
+  the likelihood ratio test based on the asymptotic chi-squared
+  distribution of the deviance difference.
+  The meaning of the other options is explained in
+  \code{\link{anova.glm}}.
+  For random effects models, only \code{"Chisq"} is
+  available, and again gives the likelihood ratio test.
+}
+\section{Error messages}{
+  An error message that reports
+  \emph{system is computationally singular} indicates that the
+  determinant of the Fisher information matrix of one of the models
+  was either too large or too small for reliable numerical calculation.
+  See \code{\link{vcov.ppm}} for suggestions on how to handle this.
+}
+\seealso{
+  \code{\link{mppm}}
+}
+\examples{
+ H <- hyperframe(X=waterstriders)
+ mod0 <- mppm(X~1, data=H, Poisson())
+ modx <- mppm(X~x, data=H, Poisson())
+ anova(mod0, modx, test="Chi")
+
+ mod0S <- mppm(X~1, data=H, Strauss(2))
+ modxS <- mppm(X~x, data=H, Strauss(2))
+ anova(mod0S, modxS, test="Chi")
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+
+  Baddeley, A., Turner, R. and Rubak, E. (2015)
+  Adjusted composite likelihood ratio test for Gibbs point processes.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{86} (5) 922--941.
+  DOI: 10.1080/00949655.2015.1044530.
+
+  Pace, L., Salvan, A. and Sartori, N. (2011)
+  Adjusting composite likelihood ratio statistics.
+  \emph{Statistica Sinica} \bold{21}, 129--148.
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
+ 
+ 
diff --git a/man/anova.ppm.Rd b/man/anova.ppm.Rd
new file mode 100644
index 0000000..cb5924d
--- /dev/null
+++ b/man/anova.ppm.Rd
@@ -0,0 +1,175 @@
+\name{anova.ppm}
+\alias{anova.ppm}
+\title{ANOVA for Fitted Point Process Models}
+\description{
+Performs analysis of deviance for one or more fitted point process models.
+}
+\usage{
+  \method{anova}{ppm}(object, \dots, test=NULL,
+                      adjust=TRUE, warn=TRUE, fine=FALSE)
+}
+\arguments{
+  \item{object}{
+    A fitted point process model
+    (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Optional. Additional objects of class \code{"ppm"}.
+  }
+  \item{test}{
+    Character string, partially matching one of
+    \code{"Chisq"}, \code{"LRT"},
+    \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"},
+    or \code{NULL} indicating that no test should be performed.
+  }
+  \item{adjust}{
+    Logical value indicating whether to correct the
+    pseudolikelihood ratio when some of the models are not Poisson
+    processes.
+  }
+  \item{warn}{
+    Logical value indicating whether to issue warnings
+    if problems arise.
+  }
+  \item{fine}{
+    Logical value, passed to \code{\link{vcov.ppm}},
+    indicating whether to use a quick estimate 
+    (\code{fine=FALSE}, the default) or a slower, more accurate
+    estimate (\code{fine=TRUE}) of variance terms.
+    Relevant only when some of the models are not Poisson
+    and \code{adjust=TRUE}.
+  }
+}
+\value{
+  An object of class \code{"anova"}, or \code{NULL}.
+}
+\details{
+  This is a method for \code{\link[stats]{anova}} for 
+  fitted point process models (objects of class \code{"ppm"},
+  usually generated by the model-fitting function \code{\link{ppm}}).
+
+  If the fitted models are all Poisson point processes,
+  then by default, this function performs an Analysis of Deviance of
+  the fitted models. The output shows the deviance differences
+  (i.e. 2 times log likelihood ratio),
+  the difference in degrees of freedom, and (if \code{test="Chi"}
+  or \code{test="LRT"})
+  the two-sided p-values for the chi-squared tests. Their interpretation
+  is very similar to that in \code{\link[stats]{anova.glm}}.
+  If \code{test="Rao"} or \code{test="score"},
+  the \emph{score test} (Rao, 1948) is performed instead.
+
+  If some of the fitted models are \emph{not} Poisson point processes,
+  the `deviance' differences in this table are
+  'pseudo-deviances' equal to 2 times the differences
+  in the maximised values of the log pseudolikelihood (see
+  \code{\link{ppm}}). It is not valid to compare these
+  values to the chi-squared distribution. In this case,
+  if \code{adjust=TRUE} (the default), the
+  pseudo-deviances will be adjusted using the method of Pace et al
+  (2011) and Baddeley et al (2015) so that the chi-squared test is valid.
+  It is strongly advisable to perform this adjustment.
+}
+\section{Errors and warnings}{
+  \describe{
+    \item{models not nested:}{
+      There may be an error message that the models are not \dQuote{nested}.
+      For an Analysis of Deviance the models must be nested, i.e. one model
+      must be a special case of the other. For example the point process
+      model with formula \code{~x} is a special case of the model with
+      formula \code{~x+y}, so these models are nested. However
+      the two point process
+      models with formulae \code{~x} and \code{~y} are not nested.
+      
+      If you get this error message and you believe that the models should
+      be nested, the problem may be the inability of \R to recognise that
+      the two formulae are nested. Try modifying the formulae to make
+      their relationship more obvious.
+    }
+    \item{different sizes of dataset:}{
+      There may be an error message from \code{anova.glmlist} that
+      \dQuote{models were not all fitted to the same size of dataset}.
+      This implies that the models were fitted using different
+      quadrature schemes (see \code{\link{quadscheme}}) and/or
+      with different edge corrections or different values of the
+      border edge correction distance \code{rbord}.
+
+      To ensure that models are comparable, check the following:
+      \itemize{
+	\item the models must all have been fitted to the same
+	point pattern dataset, in the same window.
+	\item all models must have been fitted by the same
+	fitting method as specified by the argument \code{method} in
+	\code{\link{ppm}}. 
+	\item If some of the models depend on covariates, then
+	they should all have been fitted using the same list of
+	covariates, and using \code{allcovar=TRUE} to ensure that the
+	same quadrature scheme is used.
+	\item all models must have been fitted using the same edge
+	correction as specified by the arguments \code{correction}
+	and \code{rbord}. If you did not specify the value of
+	\code{rbord}, then it may have
+	taken a different value for different models. The default value of
+	\code{rbord} is equal to zero for a Poisson model,
+	and otherwise equals the reach (interaction distance) of the
+	interaction term (see \code{\link{reach}}).
+	To ensure that the models are comparable, set \code{rbord} to
+	equal the maximum reach of the interactions that you
+	are fitting.
+      }
+    }
+  }
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{vcov.ppm}}
+}
+\section{Error messages}{
+  An error message that reports
+  \emph{system is computationally singular} indicates that the
+  determinant of the Fisher information matrix of one of the models
+  was either too large or too small for reliable numerical calculation.
+  See \code{\link{vcov.ppm}} for suggestions on how to handle this.
+}
+\examples{
+ mod0 <- ppm(swedishpines ~1)
+ modx <- ppm(swedishpines ~x)
+ # Likelihood ratio test
+ anova(mod0, modx, test="Chi")
+ # Score test
+ anova(mod0, modx, test="Rao")
+
+ # Single argument
+ modxy <- ppm(swedishpines ~x + y)
+ anova(modxy, test="Chi")
+
+ # Adjusted composite likelihood ratio test
+ modP <- ppm(swedishpines ~1, rbord=9)
+ modS <- ppm(swedishpines ~1, Strauss(9))
+ anova(modP, modS, test="Chi")
+}
+\references{
+  Baddeley, A., Turner, R. and Rubak, E. (2015)
+  Adjusted composite likelihood ratio test for Gibbs point processes.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{86} (5) 922--941.
+   DOI: 10.1080/00949655.2015.1044530.
+
+  Pace, L., Salvan, A. and Sartori, N. (2011)
+  Adjusting composite likelihood ratio statistics.
+  \emph{Statistica Sinica} \bold{21}, 129--148.
+
+  Rao, C.R. (1948) 
+  Large sample tests of statistical hypotheses concerning
+  several parameters with applications to problems of
+  estimation. \emph{Proceedings of the Cambridge Philosophical Society}
+  \bold{44}, 50--57.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
+ 
+ 
diff --git a/man/anova.slrm.Rd b/man/anova.slrm.Rd
new file mode 100644
index 0000000..b2cd545
--- /dev/null
+++ b/man/anova.slrm.Rd
@@ -0,0 +1,61 @@
+\name{anova.slrm}
+\Rdversion{1.1}
+\alias{anova.slrm}
+\title{
+  Analysis of Deviance for Spatial Logistic Regression Models
+}
+\description{
+  Performs Analysis of Deviance for two or more fitted Spatial Logistic
+  Regression models.
+}
+\usage{
+  \method{anova}{slrm}(object, ..., test = NULL)
+}
+\arguments{
+  \item{object}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    additional objects of the same type (optional).
+  }
+  \item{test}{
+    a character string, (partially) matching one of
+    \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the
+    reference distribution that should be used to compute
+    \eqn{p}-values.
+  }
+}
+\details{
+  This is a method for \code{\link[stats]{anova}} for fitted spatial logistic
+  regression models (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}).
+
+  The output shows the deviance differences (i.e. 2 times log
+  likelihood ratio), the difference in degrees of freedom, and (if
+  \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests.
+  Their interpretation is very similar to that
+  in \code{\link[stats]{anova.glm}}.
+}
+\value{
+  An object of class \code{"anova"}, inheriting from
+  class \code{"data.frame"}, representing the analysis of deviance table.
+}
+\seealso{
+  \code{\link{slrm}}
+}
+\examples{
+  X <- rpoispp(42)
+  fit0 <- slrm(X ~ 1)
+  fit1 <- slrm(X ~ x+y)
+  anova(fit0, fit1, test="Chi")
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/ants.Rd b/man/ants.Rd
new file mode 100644
index 0000000..e879f91
--- /dev/null
+++ b/man/ants.Rd
@@ -0,0 +1,169 @@
+\name{ants}
+\alias{ants}
+\alias{ants.extra}
+\docType{data}
+\title{
+  Harkness-Isham ants' nests data 
+}
+\description{
+  These data give the spatial locations of nests of two species of
+  ants, \emph{Messor wasmanni} and \emph{Cataglyphis bicolor},
+  recorded by Professor R.D. Harkness at a site in northern Greece,
+  and described in Harkness \& Isham (1983). 
+  The full dataset (supplied here) has an irregular polygonal boundary,
+  while most analyses have been confined to two rectangular
+  subsets of the pattern (also supplied here).
+
+  The harvester ant \emph{M. wasmanni} 
+  collects seeds for food and builds a nest composed mainly of
+  seed husks. \emph{C. bicolor} is a heat-tolerant desert foraging ant which
+  eats dead insects and other arthropods. Interest focuses on whether
+  there is evidence in the data for intra-species competition
+  between \emph{Messor} nests (i.e. competition for resources) and for
+  preferential placement of \emph{Cataglyphis} nests in the vicinity of
+  \emph{Messor} nests.   
+
+  The full dataset is displayed in Figure 1 of Harkness \& Isham (1983).
+  See \bold{Usage} below to produce a comparable plot.
+  It comprises 97 nests (68 Messor and 29 Cataglyphis)
+  inside an irregular convex polygonal boundary, together with
+  annotations showing a foot track through the region,
+  the boundary between field and scrub areas inside the
+  region, and indicating the two rectangular subregions
+  A and B used in their analysis.
+  
+  Rectangular subsets of the data were analysed by
+  Harkness \& Isham (1983), Isham (1984), Takacs \& Fiksel
+  (1986),  S\"arkk\"a (1993, section 5.3),
+  H\"ogmander and S\"arkk\"a (1999) and Baddeley \& Turner (2000).
+  The full dataset (inside its irregular boundary) was first analysed 
+  by Baddeley \& Turner (2005b).
+  
+  The dataset \code{ants} is the full point pattern
+  enclosed by the irregular polygonal boundary.
+  The \eqn{x} and \eqn{y} coordinates are eastings (E-W) and northings (N-S)
+  scaled so that 1 unit equals 0.5 feet. 
+  This is a multitype point pattern object, each point carrying a mark
+  indicating the ant species (with levels \code{Cataglyphis}
+  and \code{Messor}).
+
+  The dataset \code{ants.extra} is a list of auxiliary
+  information:
+  \describe{
+    \item{\code{A} and \code{B}}{The subsets
+      of the pattern within the rectangles A and B
+      demarcated in Figure 1 of Harkness \& Isham (1983).
+      These are multitype point pattern objects.
+    }
+    \item{\code{trackNE} and \code{trackSW}}{
+      coordinates of two straight lines bounding the foot track.
+    }
+    \item{\code{fieldscrub}}{The endpoints of a straight line
+      separating the regions of `field' and `scrub': 
+      scrub to the North and field to the South.
+    }
+    \item{\code{side}}{
+      A \code{function(x,y)} that determines whether the location
+      \code{(x,y)} is in the scrub or the field. The function can be applied
+      to numeric vectors \code{x} and \code{y}, and returns a factor
+      with levels \code{"scrub"} and \code{"field"}.
+      This function is useful as a spatial covariate.
+    }
+    \item{\code{plotit}}{A function which
+      produces a plot of the full dataset.
+    }
+  }
+} 
+\format{
+  \code{ants} is an object of class \code{"ppp"}
+  representing the full point pattern of ants' nests.
+  See \code{\link{ppp.object}} for details of the format.
+  The coordinates are scaled so that 1 unit equals 0.5 feet.
+  The points are marked by species (with levels \code{Cataglyphis}
+  and \code{Messor}).
+
+  \code{ants.extra} is a list with entries
+  \describe{
+    \item{A}{point pattern of class \code{"ppp"}}
+    \item{B}{point pattern of class \code{"ppp"}}
+    \item{trackNE}{data in format \code{list(x=numeric(2),y=numeric(2))}
+      giving the two endpoints of line markings}
+    \item{trackSW}{data in format \code{list(x=numeric(2),y=numeric(2))}
+      giving the two endpoints of line markings}
+    \item{fieldscrub}{data in format \code{list(x=numeric(2),y=numeric(2))}
+      giving the two endpoints of line markings}
+    \item{side}{Function with arguments \code{x,y}}
+    \item{plotit}{Function}
+  }
+}
+\usage{data(ants)}
+\examples{
+
+  # Equivalent to Figure 1 of Harkness and Isham (1983)
+
+  data(ants)
+  ants.extra$plotit()
+
+  # Data in subrectangle A, rotated 
+  # Approximate data used by Sarkka (1993)
+
+  angle <- atan(diff(ants.extra$fieldscrub$y)/diff(ants.extra$fieldscrub$x))
+  plot(rotate(ants.extra$A, -angle))
+
+  # Approximate window used by Takacs and Fiksel (1986)
+
+  tfwindow <- boundingbox(Window(ants))
+  antsTF <- ppp(ants$x, ants$y, window=tfwindow)
+  plot(antsTF)
+}
+\source{Harkness and Isham (1983).
+  Nest coordinates kindly provided by Prof Valerie Isham.
+  Polygon coordinates digitised by \adrian from
+  a reprint of Harkness \& Isham (1983).
+}
+\references{
+Baddeley, A. and Turner, R. (2000)
+Practical maximum pseudolikelihood for spatial point patterns.
+\emph{Australian and New Zealand Journal of Statistics}
+\bold{42}, 283--322.
+ 
+  Baddeley, A. and Turner, R. (2005a)
+  Spatstat: an R package for analyzing spatial point patterns.
+  \emph{Journal of Statistical Software} \bold{12}:6, 1--42.
+  URL: \code{www.jstatsoft.org}, ISSN: 1548-7660.
+
+  Baddeley, A. and Turner, R. (2005b)
+  Modelling spatial point patterns in R.
+  In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan,
+  editors, \emph{Case Studies in Spatial Point Pattern Modelling},
+  Lecture Notes in Statistics number 185. Pages 23--74.
+  Springer-Verlag, New York, 2006. 
+  ISBN: 0-387-28311-0.  
+
+Harkness, R.D. and Isham, V. (1983)
+A bivariate spatial point pattern of ants' nests.
+\emph{Applied Statistics} \bold{32}, 293--303.
+
+\ifelse{latex}{\out{H\"ogmander}}{Hogmander}, H. and
+\ifelse{latex}{\out{S\"arkk\"a}}{Sarkka}, A. (1999)
+Multitype spatial point patterns with hierarchical interactions.
+\emph{Biometrics} \bold{55}, 1051--1058.
+
+Isham, V.S. (1984)
+Multitype Markov point processes: some approximations.
+\emph{Proceedings of the Royal Society of London, Series A},
+\bold{391}, 39--53.
+
+Takacs, R. and Fiksel, T. (1986)
+Interaction pair-potentials for a system of ants' nests.
+\emph{Biometrical Journal} \bold{28}, 1007--1013.
+
+\ifelse{latex}{\out{S\"arkk\"a}}{Sarkka}, A. (1993)
+\emph{Pseudo-likelihood approach for pair potential
+  estimation of Gibbs processes}.
+Number 22 in \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}
+Studies in Computer Science, Economics and Statistics.
+University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/anyNA.im.Rd b/man/anyNA.im.Rd
new file mode 100644
index 0000000..7c81b9a
--- /dev/null
+++ b/man/anyNA.im.Rd
@@ -0,0 +1,46 @@
+\name{anyNA.im}
+\alias{anyNA.im}
+\title{
+  Check Whether Image Contains NA Values
+}
+\description{
+  Checks whether any pixel values in a pixel image are \code{NA}
+  (meaning that the pixel lies outside the domain of definition of the
+  image). 
+}
+\usage{
+  \method{anyNA}{im}(x, recursive = FALSE)
+}
+\arguments{
+  \item{x}{
+    A pixel image (object of class \code{"im"}).
+  }
+  \item{recursive}{
+    Ignored.
+  }
+}
+\details{
+  The function \code{\link{anyNA}} is generic: \code{anyNA(x)} is a
+  faster alternative to \code{any(is.na(x))}.
+
+  This function \code{anyNA.im} is a method for the generic \code{anyNA}
+  defined for pixel images. It returns the value \code{TRUE} if any of the pixel
+  values in \code{x} are \code{NA}, and
+  and otherwise returns \code{FALSE}.
+}
+\value{
+  A single logical value.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{im.object}}
+}
+\examples{
+  anyNA(as.im(letterR))
+}
+\keyword{spatial}
+\keyword{methods}
+
+
diff --git a/man/anylist.Rd b/man/anylist.Rd
new file mode 100644
index 0000000..53df090
--- /dev/null
+++ b/man/anylist.Rd
@@ -0,0 +1,57 @@
+\name{anylist}
+\alias{anylist}
+\alias{as.anylist}
+\title{
+  List of Objects
+}
+\description{
+  Make a list of objects of any type.
+}
+\usage{
+anylist(\dots)
+as.anylist(x)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments of any type.
+  }
+  \item{x}{
+    A list.
+  }
+}
+\details{
+  An object of class \code{"anylist"} is
+  a list of objects that the user intends to treat in a similar fashion.
+
+  For example it may be desired to plot each of the objects
+  side-by-side: this can be done using the function
+  \code{\link{plot.anylist}}.
+  
+  The objects can belong to any class;
+  they may or may not all belong to the same class.
+
+  In the \pkg{spatstat} package, various functions produce
+  an object of class \code{"anylist"}.
+}
+\value{
+  A list, belonging to the class \code{"anylist"},
+  containing the original objects.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{solist}},
+  \code{\link{as.solist}},
+  \code{\link{anylapply}}.
+}
+\examples{
+  anylist(cells, intensity(cells), Kest(cells))
+}
+\keyword{list}
+\keyword{manip}
diff --git a/man/append.psp.Rd b/man/append.psp.Rd
new file mode 100644
index 0000000..6a9e74d
--- /dev/null
+++ b/man/append.psp.Rd
@@ -0,0 +1,47 @@
+\name{append.psp}
+\alias{append.psp}
+\title{Combine Two Line Segment Patterns}
+\description{
+  Combine two line segment patterns into a single pattern.
+}
+\usage{
+  append.psp(A, B)
+}
+\arguments{
+  \item{A,B}{
+    Line segment patterns (objects of class \code{"psp"}).
+  }
+}
+\value{
+  Another line segment pattern (object of class \code{"psp"}).
+}
+\details{
+  This function is used to superimpose two line segment patterns
+  \code{A} and \code{B}.
+  
+  The two patterns must have \bold{identical} windows. If one
+  pattern has marks, then the other must also have marks
+  of the same type.  It the marks are data frames then the
+  number of columns of these data frames, and the names of
+  the columns must be identical.
+
+  (To combine two point patterns, see \code{superimpose}).
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{as.psp}},
+  \code{\link{superimpose}},
+}
+\examples{
+  X <- psp(runif(20), runif(20), runif(20), runif(20),  window=owin())
+  Y <- psp(runif(5), runif(5), runif(5), runif(5),  window=owin())
+  append.psp(X,Y)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/applynbd.Rd b/man/applynbd.Rd
new file mode 100644
index 0000000..583f54b
--- /dev/null
+++ b/man/applynbd.Rd
@@ -0,0 +1,222 @@
+\name{applynbd}
+\alias{applynbd}
+\title{Apply Function to Every Neighbourhood in a Point Pattern}
+\description{
+  Visit each point in a point pattern, find the neighbouring points,
+  and apply a given function to them.
+}
+\usage{
+   applynbd(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, \dots)
+}
+\arguments{
+  \item{X}{
+    Point pattern. 
+    An object of class \code{"ppp"},
+    or data which can be converted into 
+    this format by \code{\link{as.ppp}}.
+  }
+  \item{FUN}{
+    Function to be applied to each neighbourhood.
+    The arguments of \code{FUN} are described under \bold{Details}.
+  }
+  \item{N}{
+    Integer. If this argument is present,
+    the neighbourhood of a point of \code{X} is defined to consist of the
+    \code{N} points of \code{X} which are closest to it.
+  }
+  \item{R}{
+    Nonnegative numeric value. If this argument is present,
+    the neighbourhood of a point of \code{X} is defined to consist of
+    all points of \code{X} which lie within a distance \code{R}
+    of it.
+  }
+  \item{criterion}{
+    Function. If this argument is present, 
+    the neighbourhood of a point of \code{X} is determined by
+    evaluating this function. See under \bold{Details}.
+  }
+  \item{exclude}{
+    Logical. If \code{TRUE} then the point currently being visited
+    is excluded from its own neighbourhood.
+  }
+  \item{\dots}{
+    extra arguments passed to the function \code{FUN}.
+    They must be given in the form \code{name=value}.
+  }
+}
+\value{
+  Similar to the result of \code{\link{apply}}.
+  If each call to \code{FUN} returns a single numeric value,
+  the result is a vector of dimension \code{npoints(X)}, the number of points
+  in \code{X}.
+  If each call to \code{FUN} returns a vector of the same length
+  \code{m}, then the result is a matrix of dimensions \code{c(m,n)};
+  note the transposition of the indices, as usual for the family of
+  \code{apply} functions.
+  If the calls to \code{FUN} return vectors of different lengths,
+  the result is a list of length \code{npoints(X)}. 
+}
+\details{
+  This is an analogue of \code{\link{apply}}
+  for point patterns. It visits each point in the point pattern \code{X},
+  determines which points of \code{X} are ``neighbours'' of the current
+  point, applies the function \code{FUN} to this neighbourhood,
+  and collects the values returned by \code{FUN}.
+
+  The definition of ``neighbours'' depends on the arguments
+  \code{N}, \code{R} and \code{criterion}.
+  Also the argument \code{exclude} determines whether
+  the current point is excluded from its own neighbourhood.
+
+  \itemize{
+    \item 
+    If \code{N} is given, then the neighbours of the current
+    point are the \code{N} points of \code{X} which are closest to
+    the current point (including the current point itself
+    unless \code{exclude=TRUE}).
+    \item
+    If \code{R} is given, then the neighbourhood of the current point
+    consists of all points of \code{X} which lie closer than a distance \code{R}
+    from the current point.
+    \item 
+    If \code{criterion} is given, then it must be a function
+    with two arguments \code{dist} and \code{drank} which will be
+    vectors of equal length.
+    The interpretation is that \code{dist[i]} will be the
+    distance of a point from the current point, and
+    \code{drank[i]} will be the rank of that distance (the three points
+    closest to the current point will have rank 1, 2 and 3).
+    This function must return a logical vector of the same length
+    as \code{dist} and \code{drank} whose \code{i}-th entry is
+    \code{TRUE} if the corresponding point should be included in
+    the neighbourhood. See the examples below.
+    \item
+    If more than one of the arguments \code{N}, \code{R} and
+    \code{criterion} is given, the neighbourhood is defined as
+    the \emph{intersection} of the neighbourhoods specified by these arguments.
+    For example if \code{N=3} and \code{R=5} then the neighbourhood
+    is formed by finding the 3 nearest neighbours of current point,
+    and retaining only those neighbours which lie closer than 5 units
+    from the current point.
+  }
+
+  When \code{applynbd} is executed, 
+  each point of \code{X} is visited, and the following happens
+  for each point:
+  \itemize{
+    \item
+    the neighbourhood of the current point is determined according
+    to the chosen rule, and stored as a point pattern \code{Y};
+    \item
+    the function \code{FUN} is called as:
+  
+    \code{FUN(Y=Y, current=current, dists=dists, dranks=dranks, \dots)}
+
+    where \code{current} is the location of the current point
+    (in a format explained below),
+    \code{dists} is a vector of distances from the current
+    point to each of the points in \code{Y}, 
+    \code{dranks} is a vector of the ranks of these distances
+    with respect to the full point pattern \code{X},
+    and \code{\dots} are the arguments passed from the call to
+    \code{applynbd};
+    \item
+    The result of the call to \code{FUN} is stored.
+  }
+  The results of each call to \code{FUN} are collected and returned
+  according to the usual rules for \code{\link{apply}} and its
+  relatives. See the \bold{Value} section of this help file.
+
+  The format of the argument \code{current} is as follows.
+  If \code{X} is an unmarked point pattern, then \code{current} is a
+  vector of length 2 containing the coordinates of the current point.
+  If \code{X} is marked, then \code{current} is a point pattern
+  containing exactly one point, so that \code{current$x} is its
+  \eqn{x}-coordinate and \code{current$marks} is its mark value.
+  In either case, the coordinates of the current point can be referred to as
+  \code{current$x} and \code{current$y}.
+
+  Note that \code{FUN} will be called exactly as described above,
+  with each argument named explicitly. Care is required when writing the
+  function \code{FUN} to ensure that
+  the arguments will match up. See the Examples.
+
+  See \code{\link{markstat}} for a common use of this function.
+
+  To simply tabulate the marks in every \code{R}-neighbourhood, use
+  \code{\link{marktable}}.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{apply}},
+  \code{\link{markstat}},
+  \code{\link{marktable}}
+}
+\examples{
+  redwood
+  # count the number of points within radius 0.2 of each point of X
+  nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)-1})
+  # equivalent to:
+  nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)}, exclude=TRUE)
+
+  # compute the distance to the second nearest neighbour of each point
+  secondnndist <- applynbd(redwood, N = 2,
+                           function(dists, ...){max(dists)},
+                           exclude=TRUE)
+
+  # marked point pattern
+  trees <- longleaf
+  \testonly{
+	# smaller dataset
+	trees <- trees[seq(1, npoints(trees), by=80)]
+  }
+  # compute the median of the marks of all neighbours of a point
+  # (see also 'markstat')
+  dbh.med <- applynbd(trees, R=90, exclude=TRUE,
+                 function(Y, ...) { median(marks(Y))})
+
+
+  # ANIMATION explaining the definition of the K function
+  # (arguments `fullpicture' and 'rad' are passed to FUN)
+
+  if(interactive()) {
+  showoffK <- function(Y, current, dists, dranks, fullpicture,rad) { 
+	plot(fullpicture, main="")
+	points(Y, cex=2)
+        ux <- current[["x"]]
+        uy <- current[["y"]]
+	points(ux, uy, pch="+",cex=3)
+	theta <- seq(0,2*pi,length=100)
+	polygon(ux + rad * cos(theta), uy+rad*sin(theta))
+	text(ux + rad/3, uy + rad/2,npoints(Y),cex=3)
+	if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3)
+	return(npoints(Y))
+  }
+  applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE)
+
+  # animation explaining the definition of the G function
+
+  showoffG <- function(Y, current, dists, dranks, fullpicture) { 
+	plot(fullpicture, main="")
+	points(Y, cex=2)
+        u <- current
+	points(u[1],u[2],pch="+",cex=3)
+	v <- c(Y$x[1],Y$y[1])
+	segments(u[1],u[2],v[1],v[2],lwd=2)
+	w <- (u + v)/2
+	nnd <- dists[1]
+	text(w[1],w[2],round(nnd,3),cex=2)
+	if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3)
+	return(nnd)
+  }
+
+  applynbd(cells, N=1, showoffG, exclude=TRUE, fullpicture=cells)
+  }
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{programming}
+\keyword{iteration}
+ 
diff --git a/man/area.owin.Rd b/man/area.owin.Rd
new file mode 100644
index 0000000..7c68da7
--- /dev/null
+++ b/man/area.owin.Rd
@@ -0,0 +1,75 @@
+\name{area.owin}
+\alias{area}
+\alias{area.owin}
+\alias{area.default}
+\alias{volume.owin}
+\title{Area of a Window}
+\description{
+  Computes the area of a window 
+}
+\usage{
+ area(w)
+
+ \method{area}{owin}(w)
+
+ \method{area}{default}(w)
+
+ \method{volume}{owin}(x)
+}
+\arguments{
+  \item{w}{A window, whose area will be computed.
+    This should be an object of class \code{\link{owin}},
+    or can be given in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{x}{Object of class \code{\link{owin}}}
+}
+\value{
+  A numerical value giving the area of the window. 
+}
+\details{
+  If the window \code{w} is of type \code{"rectangle"} or \code{"polygonal"},
+  the area of this rectangular window is computed by analytic geometry.
+  If \code{w} is of type \code{"mask"} 
+  the area of the discrete raster approximation of the window is
+  computed by summing the binary image values and adjusting for
+  pixel size.
+
+  The function \code{volume.owin} is identical to \code{area.owin}
+  except for the argument name. It is a method for the generic function
+  \code{volume}.
+}
+\seealso{
+  \code{\link{perimeter}},
+  \code{\link{diameter.owin}},
+  \code{\link{owin.object}},
+  \code{\link{as.owin}}
+}
+\examples{
+  w <- unit.square()
+  area(w)
+       # returns 1.00000
+
+  k <- 6
+  theta <- 2 * pi * (0:(k-1))/k
+  co <- cos(theta)
+  si <- sin(theta)
+  mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si))
+  area(mas)
+      # returns approx area of k-gon
+  
+  mas <- as.mask(square(2), eps=0.01)
+  X <- raster.x(mas)
+  Y <- raster.y(mas)
+  mas$m <- ((X - 1)^2 + (Y - 1)^2 <= 1)
+  area(mas)
+       # returns 3.14 approx     
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/areaGain.Rd b/man/areaGain.Rd
new file mode 100644
index 0000000..b826df5
--- /dev/null
+++ b/man/areaGain.Rd
@@ -0,0 +1,80 @@
+\name{areaGain}
+\alias{areaGain}
+\title{Difference of Disc Areas}
+\description{
+  Computes the area of that part of a disc
+  that is not covered by other discs.
+}
+\usage{
+   areaGain(u, X, r, ..., W=as.owin(X), exact=FALSE,
+                     ngrid=spatstat.options("ngrid.disc"))
+}
+\arguments{
+  \item{u}{
+    Coordinates of the centre of the disc of interest.
+    A vector of length 2.
+    Alternatively, a point pattern (object of class \code{"ppp"}).
+  }
+  \item{X}{
+    Locations of the centres of other discs.
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{
+    Disc radius, or vector of disc radii.
+  }
+  \item{\dots}{Ignored.}
+  \item{W}{
+    Window (object of class \code{"owin"})
+    in which the area should be computed.
+  }
+  \item{exact}{
+    Choice of algorithm.
+    If \code{exact=TRUE}, areas are computed exactly using
+    analytic geometry. If \code{exact=FALSE} then a faster algorithm
+    is used to compute a discrete approximation to the areas.
+  }
+  \item{ngrid}{
+    Integer. Number of points in the square grid used to compute
+    the discrete approximation, when \code{exact=FALSE}.
+}
+}
+\value{
+  A matrix with one row for each point in \code{u}
+  and one column for each value in \code{r}. 
+}
+\details{
+  This function computes the area of that part of
+  the disc of radius \code{r} centred at the location \code{u}
+  that is \emph{not} covered by any of the discs of radius \code{r}
+  centred at the points of the pattern \code{X}.
+  This area is important in some calculations related to
+  the area-interaction model \code{\link{AreaInter}}.
+
+  If \code{u} is a point pattern and \code{r} is a vector,
+  the result is a matrix, with one row for each point in \code{u}
+  and one column for each entry of \code{r}. The \code{[i,j]} entry
+  in the matrix is the area of that part of the disc of radius
+  \code{r[j]} centred at the location \code{u[i]} that is
+  \emph{not} covered by any of the discs of radius \code{r[j]}
+  centred at the points of the pattern \code{X}.
+
+  If \code{W} is not \code{NULL}, then the areas are computed only
+  inside the window \code{W}. 
+}
+\seealso{
+  \code{\link{AreaInter}},
+  \code{\link{areaLoss}}
+}
+\examples{
+   data(cells)
+   u <- c(0.5,0.5)
+   areaGain(u, cells, 0.1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/areaLoss.Rd b/man/areaLoss.Rd
new file mode 100644
index 0000000..ed381c4
--- /dev/null
+++ b/man/areaLoss.Rd
@@ -0,0 +1,74 @@
+\name{areaLoss}
+\alias{areaLoss}
+\title{Difference of Disc Areas}
+\description{
+  Computes the area of that part of a disc
+  that is not covered by other discs.
+}
+\usage{
+   areaLoss(X, r, ..., W=as.owin(X), subset=NULL,
+                 exact=FALSE,
+                 ngrid=spatstat.options("ngrid.disc"))
+}
+\arguments{
+  \item{X}{
+    Locations of the centres of discs.
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{
+    Disc radius, or vector of disc radii.
+  }
+  \item{\dots}{Ignored.}
+  \item{W}{
+    Optional. Window (object of class \code{"owin"})
+    inside which the area should be calculated.
+  }
+  \item{subset}{
+    Optional. Index identifying a subset of the points of \code{X}
+    for which the area difference should be computed.
+  }
+  \item{exact}{
+    Choice of algorithm.
+    If \code{exact=TRUE}, areas are computed exactly using
+    analytic geometry. If \code{exact=FALSE} then a faster algorithm
+    is used to compute a discrete approximation to the areas.
+  }
+  \item{ngrid}{
+    Integer. Number of points in the square grid used to compute
+    the discrete approximation, when \code{exact=FALSE}.
+  }
+}
+\value{
+  A matrix with one row for each point in \code{X} (or \code{X[subset]})
+  and one column for each value in \code{r}. 
+}
+\details{
+  This function computes, for each point \code{X[i]} in \code{X}
+  and for each radius \code{r},
+  the area of that part of the disc of radius \code{r} centred at the
+  location \code{X[i]} that is \emph{not} covered by any of the
+  other discs of radius \code{r} centred at the points \code{X[j]}
+  for \code{j} not equal to \code{i}.
+  This area is important in some calculations related to
+  the area-interaction model \code{\link{AreaInter}}.
+
+  The result is a matrix, with one row for each point in \code{X}
+  and one column for each entry of \code{r}. 
+}
+\seealso{
+  \code{\link{AreaInter}},
+  \code{\link{areaGain}},
+  \code{\link{dilated.areas}}
+}
+\examples{
+   data(cells)
+   areaLoss(cells, 0.1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/as.box3.Rd b/man/as.box3.Rd
new file mode 100644
index 0000000..91409fd
--- /dev/null
+++ b/man/as.box3.Rd
@@ -0,0 +1,52 @@
+\name{as.box3}
+\Rdversion{1.1}
+\alias{as.box3}
+\title{
+  Convert Data to Three-Dimensional Box
+}
+\description{
+  Interprets data as the dimensions of a three-dimensional box.
+}
+\usage{
+as.box3(...)
+}
+\arguments{
+  \item{\dots}{
+    Data that can be interpreted as giving the dimensions of a
+    three-dimensional box. See Details.
+  }
+}
+\details{
+  This function converts data in various formats to
+  an object of class \code{"box3"} representing a three-dimensional
+  box (see \code{\link{box3}}). The arguments \code{\dots} may be
+  \itemize{
+    \item an object of class \code{"box3"}
+    \item arguments acceptable to \code{box3}
+    \item a numeric vector of length 6, interpreted as
+    \code{c(xrange[1],xrange[2],yrange[1],yrange[2],zrange[1],zrange[2])}
+    \item an object of class \code{"pp3"} representing a
+    three-dimensional point pattern contained in a box.
+  }
+}
+\value{
+  Object of class \code{"box3"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{box3}}, 
+  \code{\link{pp3}}
+}
+\examples{
+    X <- c(0,10,0,10,0,5)
+    as.box3(X)
+    X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1)))
+    as.box3(X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.boxx.Rd b/man/as.boxx.Rd
new file mode 100644
index 0000000..69b7908
--- /dev/null
+++ b/man/as.boxx.Rd
@@ -0,0 +1,47 @@
+\name{as.boxx}
+\alias{as.boxx}
+\title{Convert Data to Multi-Dimensional Box}
+\description{Interprets data as the dimensions of a multi-dimensional box.}
+\usage{
+  as.boxx(\dots, warn.owin = TRUE)
+}
+\arguments{
+  \item{\dots}{
+    Data that can be interpreted as giving the dimensions of a
+    multi-dimensional box. See Details.
+  }
+  \item{warn.owin}{
+    Logical value indicating whether to print a warning
+    if a non-rectangular window (object of class \code{"owin"})
+    is supplied.
+  }
+}
+\details{
+  Either a single argument should be provided which is one
+  of the following:
+  \itemize{
+    \item an object of class \code{"boxx"}
+    \item an object of class \code{"box3"}
+    \item an object of class \code{"owin"}
+    \item a numeric vector of even length, specifying the corners
+    of the box. See Examples
+  }
+  or a list of arguments acceptable to \code{\link{boxx}}.
+}
+\value{A \code{"boxx"} object.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+ # Convert unit square to two dimensional box.
+ W <- owin()
+ as.boxx(W)
+ # Make three dimensional box [0,1]x[0,1]x[0,1] from numeric vector
+ as.boxx(c(0,1,0,1,0,1))
+}
diff --git a/man/as.data.frame.envelope.Rd b/man/as.data.frame.envelope.Rd
new file mode 100644
index 0000000..60c391d
--- /dev/null
+++ b/man/as.data.frame.envelope.Rd
@@ -0,0 +1,46 @@
+\name{as.data.frame.envelope}
+\alias{as.data.frame.envelope}
+\title{Coerce Envelope to Data Frame}
+\description{
+  Converts an envelope object to a data frame.
+}
+\usage{
+\method{as.data.frame}{envelope}(x, \dots, simfuns=FALSE)
+}
+\arguments{
+  \item{x}{Envelope object (class \code{"envelope"}).}
+  \item{\dots}{Ignored.}
+  \item{simfuns}{Logical value indicating whether the result should
+    include the values of the simulated functions
+    that were used to build the envelope. 
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{as.data.frame}}
+  for the class of envelopes (see \code{\link{envelope}}.
+
+  The result is a data frame with columns
+  containing the values of the function argument
+  (usually named \code{r}), the function estimate for the original
+  point pattern data (\code{obs}),
+  the upper and lower envelope limits (\code{hi} and \code{lo}),
+  and possibly additional columns.
+  
+  If \code{simfuns=TRUE}, the result also includes columns of values
+  of the simulated functions that were used to compute the envelope.
+  This is possible only when the envelope was computed with the
+  argument \code{savefuns=TRUE} in the call to \code{\link{envelope}}.
+}
+\value{
+  A data frame.
+}
+\examples{
+  E <- envelope(cells, nsim=5, savefuns=TRUE)
+  tail(as.data.frame(E))
+  tail(as.data.frame(E, simfuns=TRUE))
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.data.frame.hyperframe.Rd b/man/as.data.frame.hyperframe.Rd
new file mode 100644
index 0000000..2e6519e
--- /dev/null
+++ b/man/as.data.frame.hyperframe.Rd
@@ -0,0 +1,49 @@
+\name{as.data.frame.hyperframe}
+\alias{as.data.frame.hyperframe}
+\title{Coerce Hyperframe to Data Frame}
+\description{
+  Converts a hyperframe to a data frame.
+}
+\usage{
+\method{as.data.frame}{hyperframe}(x, row.names = NULL,
+                                  optional = FALSE, ..., 
+                                  discard=TRUE, warn=TRUE)
+}
+\arguments{
+  \item{x}{Hyperframe (object of class \code{"hyperframe"}).}
+  \item{row.names}{Optional character vector of row names.}
+  \item{optional}{Argument passed to \code{\link{as.data.frame}}
+    controlling what happens to row names.}
+  \item{\dots}{Ignored.}
+  \item{discard}{Logical. Whether to discard columns of the hyperframe
+    that do not contain atomic data. See Details.
+  }
+  \item{warn}{Logical. Whether to issue a warning when columns are discarded.}
+}
+\details{
+  This is a method for the generic function \code{\link{as.data.frame}}
+  for the class of hyperframes (see \code{\link{hyperframe}}.
+
+  If \code{discard=TRUE}, any columns of the hyperframe that
+  do not contain atomic data will be removed (and a warning will
+  be issued if \code{warn=TRUE}). 
+  If \code{discard=FALSE}, then such columns are converted to
+  strings indicating what class of data
+  they originally contained.
+}
+\value{
+  A data frame.
+}
+\examples{
+  h <- hyperframe(X=1:3, Y=letters[1:3], f=list(sin, cos, tan))
+  as.data.frame(h, discard=TRUE, warn=FALSE)
+  as.data.frame(h, discard=FALSE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.data.frame.im.Rd b/man/as.data.frame.im.Rd
new file mode 100644
index 0000000..2993114
--- /dev/null
+++ b/man/as.data.frame.im.Rd
@@ -0,0 +1,40 @@
+\name{as.data.frame.im}
+\alias{as.data.frame.im}
+\title{Convert Pixel Image to Data Frame}
+\description{
+  Convert a pixel image to a data frame
+}
+\usage{
+  \method{as.data.frame}{im}(x, ...)
+}
+\arguments{
+  \item{x}{A pixel image (object of class \code{"im"}).}
+  \item{\dots}{Further arguments passed to
+    \code{\link[base:as.data.frame]{as.data.frame.default}}
+    to determine the row names and other features.}
+}
+\details{
+  This function takes the pixel image \code{x}
+  and returns a data frame with three columns
+  containing the pixel coordinates and the pixel values.
+
+  The data frame entries are automatically sorted in increasing order of
+  the \code{x} coordinate (and in increasing order of \code{y} within
+  \code{x}). 
+}
+\value{
+  A data frame.
+}
+\examples{
+   # artificial image
+   Z <- setcov(square(1))
+
+   Y <- as.data.frame(Z)
+
+   head(Y)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/as.data.frame.owin.Rd b/man/as.data.frame.owin.Rd
new file mode 100644
index 0000000..f011e4f
--- /dev/null
+++ b/man/as.data.frame.owin.Rd
@@ -0,0 +1,63 @@
+\name{as.data.frame.owin}
+\alias{as.data.frame.owin}
+\title{Convert Window to Data Frame}
+\description{
+  Converts a window object to a data frame.
+}
+\usage{
+\method{as.data.frame}{owin}(x, \dots, drop=TRUE)
+}
+\arguments{
+  \item{x}{
+    Window (object of class \code{"owin"}).
+  }
+  \item{\dots}{Further arguments passed to
+    \code{\link[base:as.data.frame]{as.data.frame.default}}
+    to determine the row names and other features.
+  }
+  \item{drop}{
+    Logical value indicating whether to discard pixels that are
+    outside the window, when \code{x} is a binary mask.
+  }
+}
+\details{
+  This function returns a data frame specifying the coordinates of the
+  window.
+  
+  If \code{x} is a binary mask window,
+  the result is a data frame with columns \code{x} and \code{y}
+  containing the spatial coordinates of each \emph{pixel}.
+  If \code{drop=TRUE} (the default), only pixels inside the window are retained.
+  If \code{drop=FALSE}, all pixels are retained, and the data frame has
+  an extra column \code{inside} containing the logical value of each pixel
+  (\code{TRUE} for pixels inside the window, \code{FALSE} for outside).
+  
+  If \code{x} is a rectangle or a polygonal window,
+  the result is a data frame with columns \code{x} and \code{y}
+  containing the spatial coordinates of the \emph{vertices} of the
+  window. If the boundary consists of several polygons, the data frame
+  has additional columns \code{id}, identifying which polygon is being
+  traced, and \code{sign}, indicating whether the polygon is an
+  outer or inner boundary (\code{sign=1} and \code{sign=-1} respectively).
+}
+\value{
+  A data frame with columns named \code{x} and \code{y},
+  and possibly other columns.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{as.data.frame.im}}
+}
+\examples{
+   as.data.frame(square(1))
+
+   holey <- owin(poly=list(
+                        list(x=c(0,10,0), y=c(0,0,10)),
+                        list(x=c(2,2,4,4), y=c(2,4,4,2))))
+   as.data.frame(holey)
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/as.data.frame.ppp.Rd b/man/as.data.frame.ppp.Rd
new file mode 100644
index 0000000..3e26410
--- /dev/null
+++ b/man/as.data.frame.ppp.Rd
@@ -0,0 +1,40 @@
+\name{as.data.frame.ppp}
+\alias{as.data.frame.ppp}
+\title{Coerce Point Pattern to a Data Frame}
+\description{
+  Extracts the coordinates of the points in a point pattern,
+  and their marks if any, and returns them in a data frame.
+}
+\usage{
+\method{as.data.frame}{ppp}(x, row.names = NULL, ...)
+}
+\arguments{
+  \item{x}{Point pattern (object of class \code{"ppp"}).}
+  \item{row.names}{Optional character vector of row names.}
+  \item{\dots}{Ignored.}
+}
+\details{
+  This is a method for the generic function \code{\link{as.data.frame}}
+  for the class \code{"ppp"} of point patterns.
+
+  It extracts the coordinates of the points in the point pattern, and
+  returns them as columns named \code{x} and \code{y} in a data frame.
+  If the points were marked, the marks are returned as a column
+  named \code{marks} with the same type as in the point pattern dataset.
+}
+\value{
+  A data frame.
+}
+\examples{
+  data(amacrine)
+  df <- as.data.frame(amacrine)
+  df[1:5,]
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.data.frame.psp.Rd b/man/as.data.frame.psp.Rd
new file mode 100644
index 0000000..cf8e978
--- /dev/null
+++ b/man/as.data.frame.psp.Rd
@@ -0,0 +1,45 @@
+\name{as.data.frame.psp}
+\alias{as.data.frame.psp}
+\title{Coerce Line Segment Pattern to a Data Frame}
+\description{
+  Extracts the coordinates of the endpoints in a line segment pattern,
+  and their marks if any, and returns them in a data frame.
+}
+\usage{
+\method{as.data.frame}{psp}(x, row.names = NULL, ...)
+}
+\arguments{
+  \item{x}{Line segment pattern (object of class \code{"psp"}).}
+  \item{row.names}{Optional character vector of row names.}
+  \item{\dots}{Ignored.}
+}
+\details{
+  This is a method for the generic function \code{\link{as.data.frame}}
+  for the class \code{"psp"} of line segment patterns.
+
+  It extracts the coordinates of the endpoints of the line segments,
+  and returns them as columns named \code{x0}, \code{y0}, \code{x1}
+  and \code{y1} in a data frame. If the line segments were marked,
+  the marks are appended as an extra column or columns to the
+  data frame which is returned.  If the marks are a vector then a
+  single column named \code{marks} is appended. in the data frame,
+  with the same type as in the line segment pattern dataset.  If the
+  marks are a data frame, then the columns of this data frame are
+  appended (retaining their names).
+
+}
+\value{
+  A data frame with 4 or 5 columns.
+}
+\examples{
+  data(copper)
+  df <- as.data.frame(copper$Lines)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.data.frame.tess.Rd b/man/as.data.frame.tess.Rd
new file mode 100644
index 0000000..3200491
--- /dev/null
+++ b/man/as.data.frame.tess.Rd
@@ -0,0 +1,55 @@
+\name{as.data.frame.tess}
+\alias{as.data.frame.tess}
+\title{Convert Tessellation to Data Frame}
+\description{
+  Converts a spatial tessellation object to a data frame.
+}
+\usage{
+\method{as.data.frame}{tess}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    Tessellation (object of class \code{"tess"}).
+  }
+  \item{\dots}{Further arguments passed to
+    \code{\link{as.data.frame.owin}} or
+    \code{\link{as.data.frame.im}} and ultimately to
+    \code{\link[base:as.data.frame]{as.data.frame.default}}
+    to determine the row names and other features.
+  }
+}
+\details{
+  This function converts the tessellation \code{x} to a data frame.
+
+  If \code{x} is a pixel image tessellation (a pixel image with factor
+  values specifying the tile membership of each pixel) then this
+  pixel image is converted to a data frame by
+  \code{\link{as.data.frame.im}}. The result is a data frame with
+  columns \code{x} and \code{y} giving the pixel coordinates,
+  and \code{Tile} identifying the tile containing the pixel.
+
+  If \code{x} is a tessellation consisting of a rectangular grid of tiles
+  or a list of polygonal tiles, then each tile is converted to a
+  data frame by \code{\link{as.data.frame.owin}}, and these data frames
+  are joined together, yielding a single large data frame containing
+  columns \code{x}, \code{y} giving the coordinates of vertices of the
+  polygons, and \code{Tile} identifying the tile.
+}
+\value{
+  A data frame with columns named \code{x}, \code{y}, \code{Tile},
+  and possibly other columns.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{as.data.frame.owin}},
+  \code{\link{as.data.frame.im}}
+}
+\examples{
+  Z <- as.data.frame(dirichlet(cells))
+  head(Z, 10)
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/as.function.fv.Rd b/man/as.function.fv.Rd
new file mode 100644
index 0000000..0b734d6
--- /dev/null
+++ b/man/as.function.fv.Rd
@@ -0,0 +1,118 @@
+\name{as.function.fv}
+\alias{as.function.fv}
+\alias{as.function.rhohat}
+\title{
+  Convert Function Value Table to Function
+}
+\description{
+  Converts an object of class \code{"fv"} to an \R language function.
+}
+\usage{
+  \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE)
+
+  \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"fv"} or \code{"rhohat"}.
+}
+  \item{\dots}{
+    Ignored.
+}
+  \item{value}{
+    Optional. Character string or character vector selecting
+    one or more of the columns of \code{x}
+    for use as the function value. See Details.
+  }
+  \item{extrapolate}{
+    Logical, indicating whether to extrapolate the function
+    outside the domain of \code{x}. See Details.
+  }
+}
+\details{
+  A function value table (object of class \code{"fv"})
+  is a convenient way of storing and plotting
+  several different estimates of the same function.
+  Objects of this class are returned 
+  by many commands in \pkg{spatstat}, such as \code{\link{Kest}}
+  which returns an estimate of Ripley's \eqn{K}-function
+  for a point pattern dataset.
+
+  Sometimes it is useful to convert the function value table
+  to a \code{function} in the \R language. This is done by
+  \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"}
+  to an \R function \code{f}.
+
+  If \code{f <- as.function(x)} then \code{f} is an \R function
+  that accepts a numeric argument and returns a corresponding value
+  for the summary function by linear interpolation between the values
+  in the table \code{x}. 
+
+  Argument values lying outside the range of the table
+  yield an \code{NA} value (if \code{extrapolate=FALSE})
+  or the function value at the nearest endpoint of the range
+  (if \code{extrapolate = TRUE}).
+  To apply different rules to the left and right extremes,
+  use \code{extrapolate=c(TRUE,FALSE)} and so on.
+  
+  Typically the table \code{x} contains several columns of
+  function values corresponding to different edge corrections.
+  Auxiliary information for the table identifies one of these
+  columns as the \emph{recommended value}. 
+  By default, the values of the function \code{f <- as.function(x)}
+  are taken from this column of recommended values. 
+  This default can be changed using the argument \code{value},
+  which can be a character string or character vector of names of
+  columns of \code{x}. Alternatively \code{value} can be one of
+  the abbreviations used by \code{\link{fvnames}}.
+
+  If \code{value} specifies a single column of the table,
+  then the result is a function \code{f(r)} with a single numeric
+  argument \code{r} (with the same name as the orginal argument
+  of the function table).
+
+  If \code{value} specifies several columns of the table,
+  then the result is a function \code{f(r,what)} 
+  where \code{r} is the numeric argument and 
+  \code{what} is a character string identifying the column of values
+  to be used.
+
+  The formal arguments of the resulting function
+  are \code{f(r, what=value)}, which
+  means that in a call to this function \code{f}, the permissible values
+  of \code{what} are the entries of the original vector \code{value};
+  the default value of \code{what} is the first entry of \code{value}.
+  
+  The command \code{as.function.fv} is a method for the generic command
+  \code{\link{as.function}}. 
+}
+\value{
+  A \code{function(r)} or \code{function(r,what)}
+  where \code{r} is the name of the original argument of the function table.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+   \code{\link{fv}}, 
+   \code{\link{fv.object}}, 
+   \code{\link{fvnames}}, 
+   \code{\link{plot.fv}}, 
+   \code{\link{Kest}}
+}
+\examples{
+  K <- Kest(cells)
+  f <- as.function(K)
+  f
+  f(0.1)
+  g <- as.function(K, value=c("iso", "trans"))
+  g
+  g(0.1, "trans")
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/as.function.im.Rd b/man/as.function.im.Rd
new file mode 100644
index 0000000..a4e1e7e
--- /dev/null
+++ b/man/as.function.im.Rd
@@ -0,0 +1,50 @@
+\name{as.function.im}
+\alias{as.function.im}
+\title{
+  Convert Pixel Image to Function of Coordinates
+}
+\description{
+  Converts a pixel image to a function of the
+  \eqn{x} and \eqn{y} coordinates.
+}
+\usage{
+ \method{as.function}{im}(x, ...)
+}
+\arguments{
+  \item{x}{
+    Pixel image (object of class \code{"im"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This command converts a pixel image
+  (object of class \code{"im"}) to a \code{function(x,y)}
+  where the arguments \code{x} and \code{y} are (vectors of) spatial
+  coordinates. This function returns the pixel values at the
+  specified locations.
+}
+\value{
+  A function in the \R language, also belonging to the
+  class \code{"funxy"}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{[.im}}
+}
+\examples{
+  d <- density(cells)
+  f <- as.function(d)
+  f(0.1, 0.3)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.function.leverage.ppm.Rd b/man/as.function.leverage.ppm.Rd
new file mode 100644
index 0000000..2d1a42e
--- /dev/null
+++ b/man/as.function.leverage.ppm.Rd
@@ -0,0 +1,51 @@
+\name{as.function.leverage.ppm}
+\alias{as.function.leverage.ppm}
+\title{
+  Convert Leverage Object to Function of Coordinates
+}
+\description{
+  Converts an object of class \code{"leverage.ppm"} to a function of the
+  \eqn{x} and \eqn{y} coordinates.
+}
+\usage{
+ \method{as.function}{leverage.ppm}(x, ...)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"leverage.ppm"}
+    produced by \code{\link{leverage.ppm}}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  An object of class \code{"leverage.ppm"} represents the leverage
+  function of a fitted point process model. This command converts the object
+  to a \code{function(x,y)}
+  where the arguments \code{x} and \code{y} are (vectors of) spatial
+  coordinates. This function returns the leverage values at the
+  specified locations (calculated by referring to the nearest location
+  where the leverage has been computed).
+}
+\value{
+  A function in the \R language, also belonging to the
+  class \code{"funxy"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{as.im.leverage.ppm}}
+}
+\examples{
+  X <- rpoispp(function(x,y) { exp(3+3*x) })
+  fit <- ppm(X ~x+y)
+  lev <- leverage(fit)
+  f <- as.function(lev)
+  
+  f(0.2, 0.3)  # evaluate at (x,y) coordinates
+  y <- f(X)    # evaluate at a point pattern
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.function.owin.Rd b/man/as.function.owin.Rd
new file mode 100644
index 0000000..b10c1cd
--- /dev/null
+++ b/man/as.function.owin.Rd
@@ -0,0 +1,48 @@
+\name{as.function.owin}
+\alias{as.function.owin}
+\title{
+  Convert Window to Indicator Function
+}
+\description{
+  Converts a spatial window to a function of the
+  \eqn{x} and \eqn{y} coordinates returning the value 1 inside the
+  window and 0 outside.
+}
+\usage{
+ \method{as.function}{owin}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    Pixel image (object of class \code{"owin"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This command converts a spatial window
+  (object of class \code{"owin"}) to a \code{function(x,y)}
+  where the arguments \code{x} and \code{y} are (vectors of) spatial
+  coordinates. This is the indicator function of the window:
+  it returns the value 1 for locations inside the window,
+  and returns 0 for values outside the window.
+}
+\value{
+  A function in the \R language.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{as.im.owin}}
+}
+\examples{
+  W <- Window(humberside)
+  f <- as.function(W)
+  f(5000, 4500)
+  f(123456, 78910)
+  X <- runifpoint(5, Frame(humberside))
+  f(X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.function.tess.Rd b/man/as.function.tess.Rd
new file mode 100644
index 0000000..8f3fc79
--- /dev/null
+++ b/man/as.function.tess.Rd
@@ -0,0 +1,60 @@
+\name{as.function.tess}
+\alias{as.function.tess}
+\title{
+  Convert a Tessellation to a Function
+}
+\description{
+  Convert a tessellation into a function of the \eqn{x} and \eqn{y} coordinates.
+  The default function values are factor levels specifying which tile of the
+  tessellation contains the point \eqn{(x,y)}.
+}
+\usage{
+  \method{as.function}{tess}(x,\dots,values=NULL)
+}
+\arguments{
+  \item{x}{
+    A tessellation (object of class \code{"tess"}).
+  }
+  \item{values}{
+    Optional. A vector giving the values of the function for each tile
+    of \code{x}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This command converts a tessellation
+  (object of class \code{"tess"}) to a \code{function(x,y)}
+  where the arguments \code{x} and \code{y} are (vectors of) spatial
+  coordinates. The corresponding function values are factor levels
+  identifying which tile of the tessellation contains each point.
+  Values are \code{NA} if the corresponding point lies outside the
+  tessellation.
+
+  If the argument \code{values} is given, then it determines the value
+  of the function in each tile of \code{x}.
+}
+\value{
+  A function in the \R language, also belonging to the
+  class \code{"funxy"}.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{tileindex}} for the low-level calculation of tile index.
+
+  \code{\link{cut.ppp}} and \code{\link{split.ppp}} to
+  divide up the points of a point pattern according to
+  a tessellation.
+}
+\examples{
+  X <- runifpoint(7)
+  V <- dirichlet(X)
+  f <- as.function(V)
+  f(0.1, 0.4)
+  plot(f)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.fv.Rd b/man/as.fv.Rd
new file mode 100644
index 0000000..2a610f1
--- /dev/null
+++ b/man/as.fv.Rd
@@ -0,0 +1,108 @@
+\name{as.fv} 
+\alias{as.fv}
+\alias{as.fv.fv}
+\alias{as.fv.fasp}
+\alias{as.fv.data.frame}
+\alias{as.fv.matrix}
+\alias{as.fv.minconfit}
+\alias{as.fv.dppm}
+\alias{as.fv.kppm}
+\alias{as.fv.bw.optim}
+\title{Convert Data To Class fv}
+\description{
+  Converts data into a function table (an object of class \code{"fv"}).
+}
+\usage{
+  as.fv(x)
+
+  \method{as.fv}{fv}(x)
+
+  \method{as.fv}{data.frame}(x)
+
+  \method{as.fv}{matrix}(x)
+
+  \method{as.fv}{fasp}(x)
+
+  \method{as.fv}{minconfit}(x)
+
+  \method{as.fv}{dppm}(x)
+
+  \method{as.fv}{kppm}(x)
+
+  \method{as.fv}{bw.optim}(x)
+}
+\arguments{
+  \item{x}{Data which will be converted into a function table}
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This command converts data \code{x}, that
+  could be interpreted as the values of a function,
+  into a function value table (object of the class \code{"fv"}
+  as described in \code{\link{fv.object}}). This object can then
+  be plotted easily using \code{\link{plot.fv}}.
+
+  The dataset \code{x} may be any of the following:
+  \itemize{
+    \item
+    an object of class \code{"fv"};
+    \item
+    a matrix or data frame with at least two columns;
+    \item
+    an object of class \code{"fasp"}, representing an array of
+    \code{"fv"} objects.
+    \item
+    an object of class \code{"minconfit"}, giving the results
+    of a minimum contrast fit by the command \code{\link{mincontrast}}.
+    The 
+    \item
+    an object of class \code{"kppm"}, representing a fitted
+    Cox or cluster point process model, obtained from the
+    model-fitting command \code{\link{kppm}};
+    \item
+    an object of class \code{"dppm"}, representing a fitted
+    determinantal point process model, obtained from the
+    model-fitting command \code{\link{dppm}};
+    \item
+    an object of class \code{"bw.optim"}, representing an optimal
+    choice of smoothing bandwidth by a cross-validation method, obtained
+    from commands like \code{\link{bw.diggle}}.
+  }
+  The function \code{as.fv} is generic, with methods for each of the
+  classes listed above. The behaviour is as follows:
+  \itemize{
+    \item
+    If \code{x} is an object of class \code{"fv"}, it is
+    returned unchanged.
+    \item
+    If \code{x} is a matrix or data frame, 
+    the first column is interpreted
+    as the function argument, and subsequent columns are interpreted as
+    values of the function computed by different methods.
+    \item
+    If \code{x} is an object of class \code{"fasp"}
+    representing an array of \code{"fv"} objects, 
+    these are combined into a single \code{"fv"} object.
+    \item
+    If \code{x} is an object of class \code{"minconfit"},
+    or an object of class \code{"kppm"} or \code{"dppm"},
+    the result is a function table containing the
+    observed summary function and the best fit summary function.
+    \item
+    If \code{x} is an object of class \code{"bw.optim"},
+    the result is a function table of the optimisation criterion
+    as a function of the smoothing bandwidth.
+  }
+}
+\examples{
+  r <- seq(0, 1, length=101)
+  x <- data.frame(r=r, y=r^2)
+  as.fv(x)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.hyperframe.Rd b/man/as.hyperframe.Rd
new file mode 100644
index 0000000..7fc01bf
--- /dev/null
+++ b/man/as.hyperframe.Rd
@@ -0,0 +1,95 @@
+\name{as.hyperframe} 
+\Rdversion{1.1}
+\alias{as.hyperframe}
+\alias{as.hyperframe.default}
+\alias{as.hyperframe.data.frame}
+\alias{as.hyperframe.hyperframe}
+\alias{as.hyperframe.listof}
+\alias{as.hyperframe.anylist}
+\title{
+  Convert Data to Hyperframe
+}
+\description{
+  Converts data from any suitable format into a hyperframe.
+}
+\usage{
+as.hyperframe(x, \dots)
+
+\method{as.hyperframe}{default}(x, \dots)
+
+\method{as.hyperframe}{data.frame}(x, \dots, stringsAsFactors=FALSE)
+
+\method{as.hyperframe}{hyperframe}(x, \dots)
+
+\method{as.hyperframe}{listof}(x, \dots)
+
+\method{as.hyperframe}{anylist}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    Data in some other format.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{hyperframe}}.
+  }
+  \item{stringsAsFactors}{
+    Logical. If \code{TRUE}, any column of the data frame \code{x} that
+    contains character strings will be converted to a \code{factor}.
+    If \code{FALSE}, no such conversion will occur.
+  }
+}
+\details{
+  A hyperframe is like a data frame, except that its entries
+  can be objects of any kind.
+
+  The generic function \code{as.hyperframe} converts any suitable kind
+  of data into a hyperframe.
+
+  There are methods for the classes
+  \code{data.frame}, \code{listof}, \code{anylist} and a default method,
+  all of which convert data that is like a hyperframe into
+  a hyperframe object. (The method for the class \code{listof}
+  and \code{anylist} converts a list of objects, of
+  arbitrary type, into a hyperframe with one column.)
+  These methods do not discard any information.
+
+  There are also methods for other classes
+  (see \code{\link{as.hyperframe.ppx}}) which extract
+  the coordinates from a spatial dataset. These methods
+  do discard some information.
+}
+\section{Conversion of Strings to Factors}{
+  Note that \code{as.hyperframe.default}
+  will convert a character vector to a factor.
+  It behaves like \code{\link{as.data.frame}}.
+
+  However \code{as.hyperframe.data.frame} does not convert strings to
+  factors; it respects the structure of the data frame \code{x}.
+
+  The behaviour can be changed using the argument \code{stringsAsFactors}.
+}
+\value{
+  An object of class \code{"hyperframe"} created by
+  \code{\link{hyperframe}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{hyperframe}},
+  \code{\link{as.hyperframe.ppx}}
+}
+\examples{
+   df <- data.frame(x=runif(4),y=letters[1:4])
+   as.hyperframe(df)
+
+   sims <- list()
+   for(i in 1:3) sims[[i]] <- rpoispp(42)
+   as.hyperframe(as.listof(sims))
+   as.hyperframe(as.solist(sims))
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.hyperframe.ppx.Rd b/man/as.hyperframe.ppx.Rd
new file mode 100644
index 0000000..6fa44c6
--- /dev/null
+++ b/man/as.hyperframe.ppx.Rd
@@ -0,0 +1,80 @@
+\name{as.hyperframe.ppx}
+\Rdversion{1.1}
+\alias{as.hyperframe.ppx}
+\alias{as.data.frame.ppx}
+\alias{as.matrix.ppx}
+\title{
+  Extract coordinates and marks of multidimensional point pattern
+}
+\description{
+  Given any kind of spatial or space-time point pattern,
+  extract the coordinates and marks of the points.
+}
+\usage{
+\method{as.hyperframe}{ppx}(x, ...)
+\method{as.data.frame}{ppx}(x, ...)
+\method{as.matrix}{ppx}(x, ...)
+}
+\arguments{
+  \item{x}{
+    A general multidimensional space-time point pattern 
+    (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  An object of class \code{"ppx"} (see \code{\link{ppx}})
+  represents a marked point pattern
+  in multidimensional space and/or time. There may be any
+  number of spatial coordinates, any number of temporal coordinates,
+  and any number of mark variables. The individual marks may be
+  atomic (numeric values, factor values, etc) or objects of any kind.
+
+  The function \code{as.hyperframe.ppx} extracts the coordinates
+  and the marks as a \code{"hyperframe"} (see
+  \code{\link{hyperframe}}) with one row of data for each
+  point in the pattern. This is a method for the generic
+  function \code{\link{as.hyperframe}}.
+
+  The function \code{as.data.frame.ppx} discards those mark variables
+  which are not atomic values, and extracts the coordinates
+  and the remaining marks as a \code{data.frame}
+  with one row of data for each
+  point in the pattern. This is a method for the generic
+  function \code{\link{as.data.frame}}.
+
+  Finally \code{as.matrix(x)} is equivalent to
+  \code{as.matrix(as.data.frame(x))} for an object of class
+  \code{"ppx"}. Be warned that, if there are any columns of non-numeric
+  data (i.e. if there are mark variables that are factors),
+  the result will be a matrix of character values.
+}
+\value{
+  A \code{hyperframe}, \code{data.frame} or \code{matrix} as appropriate.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppx}},
+  \code{\link{hyperframe}},
+  \code{\link{as.hyperframe}}.
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),t=runif(4))
+   X <- ppx(data=df, coord.type=c("s","s","t"))
+   as.data.frame(X)
+   val <- runif(4)
+   E <- lapply(val, function(s) { rpoispp(s) })
+   hf <- hyperframe(t=val, e=as.listof(E))
+   Z <- ppx(data=hf, domain=c(0,1))
+   as.hyperframe(Z)
+   as.data.frame(Z)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.im.Rd b/man/as.im.Rd
new file mode 100644
index 0000000..5184c4b
--- /dev/null
+++ b/man/as.im.Rd
@@ -0,0 +1,272 @@
+\name{as.im}  
+\alias{as.im}
+\alias{as.im.im}
+\alias{as.im.leverage.ppm}
+\alias{as.im.owin}
+\alias{as.im.matrix}
+\alias{as.im.tess}
+\alias{as.im.function}
+\alias{as.im.funxy}
+\alias{as.im.distfun}
+\alias{as.im.nnfun}
+\alias{as.im.Smoothfun}
+\alias{as.im.data.frame}
+\alias{as.im.default}
+\title{Convert to Pixel Image}
+\description{
+  Converts various kinds of data to a pixel image
+}
+\usage{
+  as.im(X, \dots)
+
+  \method{as.im}{im}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL)
+
+  \method{as.im}{owin}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL, value=1)
+
+  \method{as.im}{matrix}(X, W=NULL, \dots)
+
+  \method{as.im}{tess}(X, W=NULL, \dots, 
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL)
+
+  \method{as.im}{function}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL, strict=FALSE)
+
+  \method{as.im}{funxy}(X, W=Window(X), \dots)
+
+  \method{as.im}{distfun}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL, approx=TRUE)
+
+  \method{as.im}{nnfun}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL)
+
+  \method{as.im}{Smoothfun}(X, W=NULL, \dots)
+
+  \method{as.im}{leverage.ppm}(X, \dots)
+
+  \method{as.im}{data.frame}(X, \dots, step, fatal=TRUE, drop=TRUE)
+
+  \method{as.im}{default}(X, W=NULL, \dots,
+        eps=NULL, dimyx=NULL, xy=NULL,
+        na.replace=NULL)
+}
+\arguments{
+  \item{X}{Data to be converted to a pixel image.}
+  \item{W}{Window object which determines the spatial domain
+    and pixel array geometry.
+  }
+  \item{\dots}{Additional arguments passed to \code{X} when \code{X}
+    is a function.}
+  \item{eps,dimyx,xy}{
+    Optional parameters passed to \code{\link{as.mask}}
+    which determine the pixel array geometry.
+    See \code{\link{as.mask}}.
+  }
+  \item{na.replace}{Optional value to replace \code{NA} entries in the
+    output image.
+  }
+  \item{value}{Optional.
+    The value to be assigned to pixels inside the window,
+    if \code{X} is a window.
+  }
+  \item{strict}{
+    Logical value indicating whether to match formal arguments
+    of \code{X} when \code{X} is a function. If \code{strict=FALSE}
+    (the default), all the \code{\dots} arguments are passed to
+    \code{X}. If \code{strict=TRUE}, only named arguments
+    are passed, and only if they match the names of formal arguments of
+    \code{X}. 
+  }
+  \item{step}{
+    Optional. A single number, or numeric vector of length 2,
+    giving the grid step lengths
+    in the \eqn{x} and \eqn{y} directions.
+  }
+  \item{fatal}{
+    Logical value indicating what to do if the resulting image
+    would be too large for available memory. If \code{fatal=TRUE} (the
+    default), an error occurs. If \code{fatal=FALSE}, a warning is
+    issued and \code{NULL} is returned.
+  }
+  \item{drop}{
+    Logical value indicating what to do when \code{X}
+    is a data frame with 3 columns. If \code{drop=TRUE} (the default),
+    the result is a pixel image. If \code{drop=FALSE}, the result is
+    a list containing one image.
+  }
+  \item{approx}{
+   Logical value indicating whether to compute an approximate result
+   at faster speed, by using \code{\link{distmap}}, when \code{X} is
+   a distance function.
+  }
+}
+\details{
+  This function converts the data \code{X} into a pixel image
+  object of class \code{"im"} (see \code{\link{im.object}}).
+  The function \code{as.im} is generic, with methods for the classes
+  listed above.
+
+  Currently \code{X} may be any of the following:
+  \itemize{
+    \item
+    a pixel image object, of class \code{"im"}.
+    \item
+    a window object, of class \code{"owin"} (see
+    \code{\link{owin.object}}). The result is an image
+    with all pixel entries equal to \code{value} inside the window \code{X},
+    and \code{NA} outside.
+    \item
+    a matrix. 
+    \item
+    a tessellation (object of class \code{"tess"}).
+    The result is a factor-valued image, with one factor level
+    corresponding to each tile of the tessellation. Pixels are classified
+    according to the tile of the tessellation into which they fall.
+    \item
+    a single number (or a single logical, complex, factor or character
+    value). The result is an image
+    with all pixel entries equal to this constant value
+    inside the window \code{W} (and \code{NA} outside, unless the
+    argument \code{na.replace} is given).
+    Argument \code{W} is required.
+    \item
+    a function of the form \code{function(x, y, ...)}
+    which is to be evaluated to yield the image pixel values.
+    In this case, the additional argument \code{W} must be present.
+    This window will be converted to
+    a binary image mask. Then the function \code{X} will be evaluated
+    in the form \code{X(x, y, ...)} where \code{x} and \code{y} are
+    \bold{vectors} containing the \eqn{x} and \eqn{y} coordinates
+    of all the pixels in the image mask, and \code{...} are any extra
+    arguments given. This function must return a
+    vector or factor of the same length as the input vectors,
+    giving the pixel values.
+    \item
+    an object of class \code{"funxy"} representing a \code{function(x,y,...)}
+    \item
+    an object of class \code{"distfun"} representing a distance function
+    (created by the command \code{\link{distfun}}).
+    \item
+    an object of class \code{"nnfun"} representing a nearest neighbour function
+    (created by the command \code{\link{nnfun}}).
+    \item
+    a list with entries \code{x, y, z} in the format expected by
+    the standard \code{R} functions
+    \code{\link{image.default}} and \code{\link{contour.default}}.
+    That is, \code{z} is a matrix of pixel values, \code{x} and \code{y}
+    are vectors of \eqn{x} and \eqn{y} coordinates respectively,
+    and \code{z[i,j]} is the pixel value for the location
+    \code{(x[i],y[j])}.
+    \item
+    a point pattern (object of class \code{"ppp"}).
+    See the separate documentation for \code{\link{as.im.ppp}}.
+    \item
+    A data frame with at least three columns.
+    Columns named \code{x}, \code{y} and \code{z}, if present,
+    will be assumed to contain the spatial coordinates and the pixel
+    values, respectively. Otherwise the \code{x} and \code{y}
+    coordinates will be taken from the first two columns of the
+    data frame, and any remaining columns will be interpreted as
+    pixel values.
+  }
+
+  The spatial domain (enclosing rectangle) of the pixel image
+  is determined by the argument \code{W}. If \code{W} is absent, 
+  the spatial domain is determined by \code{X}.
+  When \code{X} is a function, a matrix, or a single numerical value,
+  \code{W} is required.
+  
+  The pixel array dimensions of the final resulting image are determined
+  by (in priority order)
+  \itemize{
+    \item the argument \code{eps}, \code{dimyx} or \code{xy} if present;
+    \item the pixel dimensions of the window \code{W}, if it is
+    present and if it is a binary mask;
+    \item the pixel dimensions of \code{X} if it is an image,
+    a binary mask, or a \code{list(x,y,z)};
+    \item the default pixel dimensions, 
+    controlled by \code{\link{spatstat.options}}.
+  }
+
+  Note that if \code{eps}, \code{dimyx} or \code{xy}
+  is given, this will override
+  the pixel dimensions of \code{X} if it has them.
+  Thus, \code{as.im} can be used to change an image's pixel dimensions.
+
+  If the argument \code{na.replace} is given, then all \code{NA} entries
+  in the image will be replaced by this value.  The resulting image is
+  then defined everwhere on the full rectangular domain, instead of a
+  smaller window. Here \code{na.replace} should be a single value,
+  of the same type as the other entries in the image.
+
+  If \code{X} is a pixel image that was created by an older version
+  of \pkg{spatstat}, the command \code{X <- as.im(X)} will
+  repair the internal format of \code{X} so that it conforms to the
+  current version of \pkg{spatstat}.
+
+  If \code{X} is a data frame with \code{m} columns,
+  then \code{m-2} columns of data are interpreted as pixel values,
+  yielding \code{m-2} pixel images. The result of
+  \code{as.im.data.frame} is a list of pixel
+  images, belonging to the class \code{"imlist"}.
+  If \code{m = 3} and \code{drop=TRUE} (the default), then the
+  result is a pixel image rather than a list containing this image.
+}
+\value{
+  A pixel image (object of class \code{"im"}),
+  or a list of pixel images, 
+  or \code{NULL} if the conversion failed.
+}
+\seealso{
+  Separate documentation for \code{\link{as.im.ppp}}
+}
+\examples{
+  data(demopat)
+  # window object
+  W <- Window(demopat)
+  plot(W)
+  Z <- as.im(W)
+  image(Z)
+  # function
+  Z <- as.im(function(x,y) {x^2 + y^2}, unit.square())
+  image(Z)
+  # function with extra arguments
+  f <- function(x, y, x0, y0) {
+      sqrt((x - x0)^2 + (y-y0)^2)
+  }
+  Z <- as.im(f, unit.square(), x0=0.5, y0=0.5)
+  image(Z)
+  # Revisit the Sixties
+  data(letterR)
+  Z <- as.im(f, letterR, x0=2.5, y0=2)
+  image(Z)
+  # usual convention in S
+  stuff <- list(x=1:10, y=1:10, z=matrix(1:100, nrow=10))
+  Z <- as.im(stuff)
+  # convert to finer grid
+  Z <- as.im(Z, dimyx=256)
+
+  # pixellate the Dirichlet tessellation
+  Di <- dirichlet(runifpoint(10))
+  plot(as.im(Di))
+  plot(Di, add=TRUE)
+
+  # as.im.data.frame is the reverse of as.data.frame.im
+  grad <- bei.extra$grad
+  slopedata <- as.data.frame(grad)
+  slope <- as.im(slopedata)
+  unitname(slope) <- c("metre","metres")
+  all.equal(slope, grad) # TRUE
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.interact.Rd b/man/as.interact.Rd
new file mode 100644
index 0000000..fb4ce7d
--- /dev/null
+++ b/man/as.interact.Rd
@@ -0,0 +1,66 @@
+\name{as.interact}
+\alias{as.interact}
+\alias{as.interact.fii}
+\alias{as.interact.interact}
+\alias{as.interact.ppm}
+\title{Extract Interaction Structure}
+\description{
+  Extracts the interpoint interaction structure from
+  a point pattern model.
+}
+\usage{
+as.interact(object)
+\method{as.interact}{fii}(object)
+\method{as.interact}{interact}(object)
+\method{as.interact}{ppm}(object)
+}
+\arguments{
+  \item{object}{A fitted point process model (object of class
+    \code{"ppm"}) or an interpoint interaction structure
+    (object of class \code{"interact"}).
+  }
+}
+\details{
+  The function \code{as.interact} extracts 
+  the interpoint interaction structure from a suitable object.
+  
+  An object of class \code{"interact"} describes an interpoint
+  interaction structure, before it has been fitted to point pattern
+  data. The irregular parameters of the interaction (such as the
+  interaction range) are fixed, but the regular parameters
+  (such as interaction strength) are undetermined.
+  Objects of this class are created by the functions
+  \code{\link{Poisson}}, \code{\link{Strauss}} and so on.
+  The main use of such objects is in a call to \code{\link{ppm}}.
+  
+  The function \code{as.interact} is generic, with methods for the classes
+  \code{"ppm"}, \code{"fii"} and \code{"interact"}. 
+  The result is an object of class \code{"interact"} which can be printed.
+}
+\section{Note on parameters}{
+  This function does \bold{not} extract the fitted coefficients
+  of the interaction. To extract the fitted interaction including
+  the fitted coefficients, use \code{\link{fitin}}.
+}
+\value{
+  An object of class \code{"interact"} representing the 
+  interpoint interaction. This object can be printed and plotted.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{fitin}},
+  \code{\link{ppm}}.
+}
+\examples{
+   data(cells)
+   model <- ppm(cells, ~1, Strauss(0.07))
+   f <- as.interact(model)
+   f
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/as.layered.Rd b/man/as.layered.Rd
new file mode 100644
index 0000000..69763e2
--- /dev/null
+++ b/man/as.layered.Rd
@@ -0,0 +1,86 @@
+\name{as.layered}
+\alias{as.layered}
+\alias{as.layered.default}
+\alias{as.layered.ppp}
+\alias{as.layered.splitppp}
+\alias{as.layered.solist}
+\alias{as.layered.listof}
+\alias{as.layered.msr}
+\title{Convert Data To Layered Object}
+\description{
+  Converts spatial data into a layered object.
+}
+\usage{
+ as.layered(X)
+
+ \method{as.layered}{default}(X)
+
+ \method{as.layered}{ppp}(X)
+
+ \method{as.layered}{splitppp}(X)
+
+ \method{as.layered}{solist}(X)
+
+ \method{as.layered}{listof}(X)
+
+ \method{as.layered}{msr}(X)
+}
+\arguments{
+  \item{X}{
+    Some kind of spatial data.
+  }
+}
+\value{
+  An object of class \code{"layered"} (see \code{\link{layered}}).
+}
+\details{
+  This function converts the object \code{X}
+  into an object of class \code{"layered"}.
+
+  The argument \code{X} should contain some kind of spatial data
+  such as a point pattern, window, or pixel image.
+
+  If \code{X} is a simple object then it will be converted into
+  a \code{layered} object containing only one layer which is equivalent
+  to \code{X}.
+
+  If \code{X} can be interpreted as consisting of
+  multiple layers of data, then the result will be a \code{layered}
+  object consisting of these separate layers of data.
+  \itemize{
+    \item if \code{X} is a list of class \code{"listof"} or \code{"solist"},
+    then \code{as.layered(X)} consists of several layers,
+    one for each entry in the list \code{X};
+    \item if \code{X} is a multitype point pattern,
+    then \code{as.layered(X)} consists of several layers,
+    each containing the sub-pattern consisting of points of one type;
+    \item if \code{X} is a vector-valued measure,
+    then \code{as.layered(X)} consists of several layers,
+    each containing a scalar-valued measure.
+  }
+}
+\seealso{
+  \code{\link{layered}},
+  \code{\link{split.ppp}}
+}
+\examples{
+   as.layered(cells)
+   as.layered(amacrine)
+
+   P <- rpoispp(100)
+   fit <- ppm(P ~ x+y)
+   rs <- residuals(fit, type="score")
+   as.layered(rs)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{manip}
+ 
+ 
diff --git a/man/as.linfun.Rd b/man/as.linfun.Rd
new file mode 100644
index 0000000..ddfe116
--- /dev/null
+++ b/man/as.linfun.Rd
@@ -0,0 +1,80 @@
+\name{as.linfun} 
+\alias{as.linfun}
+\alias{as.linfun.linim}
+\alias{as.linfun.lintess}
+\title{
+  Convert Data to a Function on a Linear Network
+}
+\description{
+  Convert some kind of data to an object of class \code{"linfun"}
+  representing a function on a linear network.
+}
+\usage{
+  as.linfun(X, \dots)
+
+  \method{as.linfun}{linim}(X, \dots)
+
+  \method{as.linfun}{lintess}(X, \dots, values, navalue=NA)
+}
+\arguments{
+  \item{X}{
+    Some kind of data to be converted.
+  }
+  \item{\dots}{
+    Other arguments passed to methods.
+  }
+  \item{values}{
+    Optional. Vector of function values,
+    one entry associated with each tile of the tessellation.
+  }
+  \item{navalue}{
+    Optional. Function value associated with locations that
+    do not belong to a tile of the tessellation.
+  }
+}
+\details{
+  An object of class \code{"linfun"} represents
+  a function defined on a linear network.
+
+  The function \code{as.linfun} is generic. The method \code{as.linfun.linim}
+  converts objects of class \code{"linim"} (pixel images on a linear
+  network) to functions on the network.
+
+  The method \code{as.linfun.lintess} converts a tessellation
+  on a linear network into a function with a different value
+  on each tile of the tessellation.
+  If the argument \code{values} is missing or null,
+  then the function returns factor values identifying which tile
+  contains each given point. If \code{values} is given,
+  it should be a vector with one entry for each tile of the tessellation:
+  any point lying in tile number \code{i} will return the value \code{v[i]}.
+}
+\value{
+  Object of class \code{"linfun"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{linfun}}
+}
+\examples{
+   X <- runiflpp(2, simplenet)
+   Y <- runiflpp(5, simplenet)
+
+   # image on network
+   D <- density(Y, 0.1, verbose=FALSE)
+
+   f <- as.linfun(D)
+   f
+   f(X)
+
+   # tessellation on network
+   Z <- lineardirichlet(Y)
+   g <- as.linfun(Z)
+   g(X)
+   h <- as.linfun(Z, values = runif(5))
+   h(X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.linim.Rd b/man/as.linim.Rd
new file mode 100644
index 0000000..441747d
--- /dev/null
+++ b/man/as.linim.Rd
@@ -0,0 +1,97 @@
+\name{as.linim} 
+\alias{as.linim}
+\alias{as.linim.linim}
+\alias{as.linim.linfun}
+\alias{as.linim.default}
+\title{Convert to Pixel Image on Linear Network}
+\description{
+  Converts various kinds of data to a pixel image on a linear network.
+}
+\usage{
+  as.linim(X, \dots)
+
+  \method{as.linim}{linim}(X, \dots)
+
+  \method{as.linim}{default}(X, L, \dots,
+                             eps = NULL, dimyx = NULL, xy = NULL,
+                             delta=NULL)
+
+  \method{as.linim}{linfun}(X, L=domain(X), \dots,
+                            eps = NULL, dimyx = NULL, xy = NULL,
+                            delta=NULL)
+}
+\arguments{
+  \item{X}{
+    Data to be converted to a pixel image on a linear network.
+  }
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{\dots}{Additional arguments passed to \code{X} when \code{X}
+    is a function.
+  }
+  \item{eps,dimyx,xy}{
+    Optional arguments passed to \code{\link{as.mask}} to control
+    the pixel resolution.
+  }
+  \item{delta}{
+    Optional. Numeric value giving the approximate distance
+    (in coordinate units) between successive
+    sample points along each segment of the network.
+  }
+}
+\details{
+  This function converts the data \code{X} into a pixel image
+  on a linear network, an object of class \code{"linim"}
+  (see \code{\link{linim}}). 
+
+  The argument \code{X} may be any of the following:
+  \itemize{
+    \item
+    a function on a linear network, an object of class \code{"linfun"}.
+    \item
+    a pixel image on a linear network, an object of class
+    \code{"linim"}.
+    \item
+    a pixel image, an object of class \code{"im"}.
+    \item
+    any type of data acceptable to \code{\link{as.im}},
+    such as a function, numeric value, or window.
+  }
+
+  First \code{X} is converted to a pixel image object \code{Y}
+  (object of class \code{"im"}).
+  The conversion is performed by \code{\link{as.im}}.
+  The arguments \code{eps}, \code{dimyx} and \code{xy}
+  determine the pixel resolution.
+
+  Next \code{Y} is converted to a pixel image on a linear network
+  using \code{\link{linim}}. The argument \code{L} determines the
+  linear network. If \code{L} is missing or \code{NULL},
+  then \code{X} should be an object of class \code{"linim"},
+  and \code{L} defaults to the linear network on which \code{X} is defined.
+
+  In addition to converting the
+  function to a pixel image, the algorithm also generates a fine grid of
+  sample points evenly spaced along each segment of the network
+  (with spacing at most \code{delta} coordinate units). The function values
+  at these sample points are stored in the resulting object as a data frame
+  (the argument \code{df} of \code{\link{linim}}). This mechanism allows
+  greater accuracy for some calculations (such as
+  \code{\link{integral.linim}}).
+}
+\value{
+  An image object on a linear network; an object of class \code{"linim"}.
+}
+\seealso{
+  \code{\link{as.im}}
+}
+\examples{
+  f <- function(x,y){ x + y }
+  plot(as.linim(f, simplenet))
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.linnet.linim.Rd b/man/as.linnet.linim.Rd
new file mode 100644
index 0000000..2f76d4a
--- /dev/null
+++ b/man/as.linnet.linim.Rd
@@ -0,0 +1,68 @@
+\name{as.linnet.linim}
+\alias{as.linnet.lpp}
+\alias{as.linnet.linim}
+\alias{as.linnet.linfun}
+\alias{as.linnet.lintess}
+\title{
+  Extract Linear Network from Data on a Linear Network
+}
+\description{
+  Given some kind of data on a linear network,
+  the command \code{as.linnet} extracts the linear network itself.
+}
+\usage{
+ \method{as.linnet}{linim}(X, \dots)
+
+ \method{as.linnet}{linfun}(X, \dots)
+
+ \method{as.linnet}{lintess}(X, \dots)
+
+ \method{as.linnet}{lpp}(X, \dots, fatal=TRUE, sparse)
+}
+\arguments{
+  \item{X}{
+    Data on a linear network.
+    A point pattern (class \code{"lpp"}),
+    pixel image (class \code{"linim"}), function (class
+    \code{"linfun"}) or tessellation (class \code{"lintess"})
+    on a linear network.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{fatal}{
+    Logical value indicating whether data in the wrong format
+    should lead to an error (\code{fatal=TRUE}) or a warning
+    (\code{fatal=FALSE}).
+  }
+  \item{sparse}{
+    Logical value indicating whether to use a sparse matrix
+    representation, as explained in \code{\link{linnet}}.
+    Default is to keep the same representation as in \code{X}.
+  }
+}
+\details{
+  These are methods for the generic \code{\link{as.linnet}}
+  for various classes.
+  
+  The network on which the data are defined is extracted.
+}
+\value{
+  A linear network (object of class \code{"linnet"}).
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{linnet}}, 
+  \code{\link{methods.linnet}}.
+}
+\examples{
+  # make some data
+  xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet)
+  as.linnet(xcoord)
+  X <- as.linim(xcoord)
+  as.linnet(X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.linnet.psp.Rd b/man/as.linnet.psp.Rd
new file mode 100644
index 0000000..6f4ff0b
--- /dev/null
+++ b/man/as.linnet.psp.Rd
@@ -0,0 +1,79 @@
+\name{as.linnet.psp}
+\alias{as.linnet.psp}
+\title{
+  Convert Line Segment Pattern to Linear Network
+}
+\description{
+  Converts a line segment pattern to a linear network.
+}
+\usage{
+ \method{as.linnet}{psp}(X, \dots, eps, sparse=FALSE)
+}
+\arguments{
+  \item{X}{
+    Line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{eps}{
+    Optional. Distance threshold. If two segment endpoints
+    are closer than \code{eps} units apart, they will be treated
+    as the same point, and will become a single vertex in the
+    linear network.
+  }
+  \item{sparse}{
+    Logical value indicating whether to use a sparse matrix
+    representation, as explained in \code{\link{linnet}}.
+  }
+}
+\details{
+  This command converts any collection of line segments into a linear
+  network by guessing the connectivity of the network,
+  using the distance threshold \code{eps}.
+
+  If any segments in \code{X} cross over each other, they are first
+  cut into pieces using \code{\link{selfcut.psp}}.
+
+  Then any pair of segment endpoints lying closer than \code{eps}
+  units apart, is treated as a single vertex. The linear network
+  is then constructed using \code{\link{linnet}}.
+
+  It would be wise to check the result by plotting the degree
+  of each vertex, as shown in the Examples.
+
+  If \code{X} has marks, then these are stored in
+  the resulting linear network \code{Y <- as.linnet(X)},
+  and can be extracted as \code{marks(as.psp(Y))} or \code{marks(Y$lines)}.
+}
+\value{
+A linear network (object of class \code{"linnet"}).
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{linnet}}, \code{\link{selfcut.psp}},
+  \code{\link{methods.linnet}}.
+}
+\examples{
+  # make some data
+  A <- psp(0.09, 0.55, 0.79, 0.80, window=owin())
+  B <- superimpose(A, as.psp(simplenet))
+
+  # convert to a linear network
+  D <- as.linnet(B)
+
+  # check validity
+  D
+  plot(D)
+  text(vertices(D), labels=vertexdegree(D))
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.lpp.Rd b/man/as.lpp.Rd
new file mode 100644
index 0000000..be47281
--- /dev/null
+++ b/man/as.lpp.Rd
@@ -0,0 +1,95 @@
+\name{as.lpp}
+\Rdversion{1.1}
+\alias{as.lpp}
+\title{
+  Convert Data to a Point Pattern on a Linear Network
+}
+\description{
+  Convert various kinds of data to a point pattern on a linear network.
+}
+\usage{
+  as.lpp(x=NULL, y=NULL, seg=NULL, tp=NULL, \dots,
+         marks=NULL, L=NULL, check=FALSE, sparse)
+}
+\arguments{
+  \item{x,y}{
+    Vectors of cartesian coordinates, or any data
+    acceptable to \code{\link[grDevices]{xy.coords}}.
+    Alternatively \code{x} can be a point pattern
+    on a linear network (object of class \code{"lpp"})
+    or a planar point pattern (object of class \code{"ppp"}).
+  }
+  \item{seg,tp}{
+    Optional local coordinates. Vectors of the same length
+    as \code{x,y}. See Details.
+  }
+  \item{\dots}{Ignored.}
+  \item{marks}{
+    Optional marks for the point pattern.
+    A vector or factor with one entry for each point,
+    or a data frame or hyperframe with one row for each point.
+  }
+  \item{L}{
+    Linear network (object of class \code{"linnet"})
+    on which the points lie.
+  }
+  \item{check}{
+    Logical. Whether to check the validity of the spatial coordinates.
+  }
+  \item{sparse}{
+    Optional logical value indicating whether to store the
+    linear network data in a sparse matrix representation or not.
+    See \code{\link{linnet}}.
+  }
+}
+\details{
+  This function converts data in various formats into a point pattern
+  on a linear network  (object of class \code{"lpp"}).
+  
+  The possible formats are:
+  \itemize{
+    \item
+    \code{x} is already a point pattern on a linear network
+    (object of class \code{"lpp"}).
+    Then \code{x} is returned unchanged.
+    \item
+    \code{x} is a planar point pattern (object of class \code{"ppp"}).
+    Then \code{x} is converted to a point pattern on the linear network
+    \code{L} using \code{\link{lpp}}.
+    \item
+    \code{x,y,seg,tp} are vectors of equal length.
+    These specify that the \code{i}th point has Cartesian coordinates
+    \code{(x[i],y[i])}, and lies on segment number \code{seg[i]} of the
+    network \code{L}, at a fractional position \code{tp[i]} along that
+    segment (with \code{tp=0} representing one endpoint and
+    \code{tp=1} the other endpoint of the segment).
+    \item
+    \code{x,y} are missing and \code{seg,tp} are vectors of equal length
+    as described above.
+    \item
+    \code{seg,tp} are \code{NULL}, and \code{x,y} are data in a format
+    acceptable to \code{\link[grDevices]{xy.coords}} specifying the
+    Cartesian coordinates.
+  }
+}
+\value{
+  A point pattern
+  on a linear network (object of class \code{"lpp"}).
+}
+\seealso{
+  \code{\link{lpp}}.
+}
+\examples{
+   A <- as.psp(simplenet)
+   X <- runifpointOnLines(10, A)
+   is.ppp(X)
+   Y <- as.lpp(X, L=simplenet)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/as.mask.Rd b/man/as.mask.Rd
new file mode 100644
index 0000000..eb46003
--- /dev/null
+++ b/man/as.mask.Rd
@@ -0,0 +1,101 @@
+\name{as.mask}
+\alias{as.mask}
+\title{Pixel Image Approximation of a Window}
+\description{
+  Obtain a discrete (pixel image) approximation of a given window
+}
+\usage{
+ as.mask(w, eps=NULL, dimyx=NULL, xy=NULL)
+}
+\arguments{
+  \item{w}{A window (object of class \code{"owin"}) or data acceptable
+    to \code{\link{as.owin}}.}
+  \item{eps}{(optional) width and height of pixels.}
+  \item{dimyx}{(optional) pixel array dimensions}
+  \item{xy}{(optional) data containing pixel coordinates}
+}
+\value{
+  A window (object of class \code{"owin"})
+  of type \code{"mask"} representing a binary pixel image.
+}
+\details{
+  This function generates a rectangular grid of locations in the plane,
+  tests whether each of these locations lies inside the
+  window \code{w}, and stores the results as a binary pixel image
+  or `mask' (an object of class \code{"owin"}, see \code{\link{owin.object}}).
+
+  The most common use of this function is to approximate the shape
+  of another window \code{w} by a binary pixel image. In this case,
+  we will usually want to have a very fine grid of pixels.
+
+  This function can also be used to generate a coarsely-spaced grid of
+  locations inside a window, for purposes such as subsampling
+  and prediction.
+
+  The grid spacing and location are controlled by the
+  arguments \code{eps}, \code{dimyx} and \code{xy},
+  which are mutually incompatible.
+
+  If \code{eps} is given, then it determines the grid spacing.
+  If \code{eps} is a single number,
+  then the grid spacing will be approximately \code{eps}
+  in both the \eqn{x} and \eqn{y} directions. If \code{eps} is a
+  vector of length 2, then the grid spacing will be approximately
+  \code{eps[1]} in the \eqn{x} direction and 
+  \code{eps[2]} in the \eqn{y} direction.
+
+  If \code{dimyx} is given, then the pixel grid will be an
+  \eqn{m \times n}{m * n} rectangular grid
+  where \eqn{m, n} are given by \code{dimyx[2]}, \code{dimyx[1]}
+  respectively. \bold{Warning:} \code{dimyx[1]} is the number of
+  pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number
+  in the \eqn{x} direction. 
+
+  If \code{xy} is given, then this should be some kind of
+  data specifing the coordinates of a pixel grid. It may be
+  \itemize{
+    \item 
+    a list or structure containing elements \code{x} and \code{y}
+    which are numeric vectors of equal length. These will be taken as
+    \eqn{x} and \code{y} coordinates of the margins
+    of the grid. The pixel coordinates will be generated
+    from these two vectors.
+    \item
+    a pixel image (object of class \code{"im"}).
+    \item
+    a window (object of class \code{"owin"}) which is
+    of type \code{"mask"} so that it contains pixel coordinates.
+  }
+  If \code{xy} is given, \code{w} may be omitted.
+  
+  If neither \code{eps} nor \code{dimyx} nor \code{xy} is given,
+  the pixel raster dimensions are obtained from
+  \code{\link{spatstat.options}("npixel")}.
+
+  There is no inverse of this function. However, the function
+  \code{\link{as.polygonal}} will compute a polygonal approximation
+  of a binary mask.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{as.rectangle}},
+  \code{\link{as.polygonal}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+  w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7)))
+  \dontrun{plot(w)}
+  m <- as.mask(w)
+  \dontrun{plot(m)}
+  x <- 1:9
+  y <- seq(0.25, 9.75, by=0.5)
+  m <- as.mask(w, xy=list(x=x, y=y))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.mask.psp.Rd b/man/as.mask.psp.Rd
new file mode 100644
index 0000000..5a10499
--- /dev/null
+++ b/man/as.mask.psp.Rd
@@ -0,0 +1,64 @@
+\name{as.mask.psp}
+\alias{as.mask.psp}
+\title{
+  Convert Line Segment Pattern to Binary Pixel Mask
+}
+\description{
+  Converts a line segment pattern to a binary pixel mask
+  by determining which pixels intersect the lines.
+}
+\usage{
+as.mask.psp(x, W=NULL, ...)
+}
+
+\arguments{
+  \item{x}{
+    Line segment pattern (object of class \code{"psp"}).
+  }
+  \item{W}{
+    Optional window (object of class \code{"owin"})
+    determining the pixel raster.
+  }
+  \item{\dots}{
+    Optional extra arguments passed to \code{\link{as.mask}}
+    to determine the pixel resolution.
+  }
+}
+\details{
+  This function converts a line segment pattern to a binary
+  pixel mask by determining which pixels intersect the lines.
+
+  The pixel raster is determined by \code{W}
+  and the optional arguments \code{\dots}.
+  If \code{W} is missing or \code{NULL}, it defaults to the window
+  containing \code{x}.
+  Then \code{W} is converted to a
+  binary pixel mask using \code{\link{as.mask}}. The arguments
+  \code{\dots} are passed to \code{\link{as.mask}} to
+  control the pixel resolution.
+}
+\value{
+  A window (object of class \code{"owin"})
+  which is a binary pixel mask (type \code{"mask"}).
+}
+\seealso{
+  \code{\link{pixellate.psp}},
+  \code{\link{as.mask}}.
+
+  Use \code{\link{pixellate.psp}} if you want to measure the
+  length of line in each pixel.
+}
+\examples{
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(as.mask.psp(X))
+  plot(X, add=TRUE, col="red")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/as.matrix.im.Rd b/man/as.matrix.im.Rd
new file mode 100644
index 0000000..67e0bfe
--- /dev/null
+++ b/man/as.matrix.im.Rd
@@ -0,0 +1,55 @@
+\name{as.matrix.im}
+\alias{as.matrix.im}
+\alias{as.array.im}
+\title{Convert Pixel Image to Matrix or Array}
+\description{
+  Converts a pixel image to a matrix or an array.
+}
+\usage{
+  \method{as.matrix}{im}(x, ...)
+  \method{as.array}{im}(x, ...)
+}
+\arguments{
+  \item{x}{A pixel image (object of class \code{"im"}).}
+  \item{\dots}{See below.}
+}
+\details{
+  The function \code{as.matrix.im} converts the pixel image \code{x}
+  into a matrix containing the pixel values.
+  It is handy when you want to extract a summary of the pixel values.
+  See the Examples.
+  
+  The function \code{as.array.im} converts the pixel image to an array.
+  By default this is a three-dimensional array of dimension \eqn{n} by
+  \eqn{m} by \eqn{1}. 
+  If the extra arguments \code{\dots} are given, they will be passed
+  to \code{\link{array}}, and they may change the dimensions of the
+  array.
+}
+\value{
+  A matrix or array.
+}
+\seealso{
+  \code{\link{as.matrix.owin}}
+}
+\examples{
+   # artificial image
+   Z <- setcov(square(1))
+
+   M <- as.matrix(Z)
+
+   median(M)
+   
+   \dontrun{
+   # plot the cumulative distribution function of pixel values
+   plot(ecdf(as.matrix(Z)))
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/as.matrix.owin.Rd b/man/as.matrix.owin.Rd
new file mode 100644
index 0000000..9b6628b
--- /dev/null
+++ b/man/as.matrix.owin.Rd
@@ -0,0 +1,50 @@
+\name{as.matrix.owin}
+\alias{as.matrix.owin}
+\title{Convert Pixel Image to Matrix}
+\description{
+  Converts a pixel image to a matrix.
+}
+\usage{
+  \method{as.matrix}{owin}(x, ...)
+}
+\arguments{
+  \item{x}{A window (object of class \code{"owin"}).}
+  \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the
+    pixel resolution.}
+}
+\details{
+  The function \code{as.matrix.owin} converts a window to a
+  logical matrux.
+
+  It first converts the window \code{x} into a binary pixel mask
+  using \code{\link{as.mask}}. It then extracts the pixel entries
+  as a logical matrix.
+
+  The resulting matrix has entries that are
+  \code{TRUE} if the corresponding pixel is inside the window,
+  and \code{FALSE} if it is outside.
+  
+  The function \code{as.matrix} is generic. The function
+  \code{as.matrix.owin}
+  is the method for windows (objects of class \code{"owin"}).
+
+  Use \code{\link{as.im}} to convert a window to a pixel image.
+}
+\value{
+  A logical matrix.
+}
+\examples{
+  m <- as.matrix(letterR)
+}
+\seealso{
+  \code{\link{as.matrix.im}},
+  \code{\link{as.im}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/as.owin.Rd b/man/as.owin.Rd
new file mode 100644
index 0000000..8e11453
--- /dev/null
+++ b/man/as.owin.Rd
@@ -0,0 +1,241 @@
+\name{as.owin}
+\alias{as.owin}
+\alias{as.owin.owin}
+\alias{as.owin.ppp}
+\alias{as.owin.ppm}
+\alias{as.owin.kppm}
+\alias{as.owin.dppm}
+\alias{as.owin.lpp}
+\alias{as.owin.lppm}
+\alias{as.owin.msr}
+\alias{as.owin.psp}
+\alias{as.owin.quad}
+\alias{as.owin.quadratcount}
+\alias{as.owin.quadrattest}
+\alias{as.owin.tess}
+\alias{as.owin.im}
+\alias{as.owin.layered}
+\alias{as.owin.data.frame}
+\alias{as.owin.distfun}
+\alias{as.owin.nnfun}
+\alias{as.owin.funxy}
+\alias{as.owin.boxx}
+\alias{as.owin.rmhmodel}
+\alias{as.owin.leverage.ppm}
+\alias{as.owin.influence.ppm}
+\alias{as.owin.default}
+\title{Convert Data To Class owin}
+\description{
+Converts data specifying an observation window
+in any of several formats, into an object of class \code{"owin"}.
+}
+\usage{
+ as.owin(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{owin}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{ppp}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE)
+
+ \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE)
+
+ \method{as.owin}{dppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE)
+
+ \method{as.owin}{lpp}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{lppm}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{msr}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{psp}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{quad}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{quadratcount}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{quadrattest}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{tess}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{im}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{layered}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{data.frame}(W, \dots, step, fatal=TRUE)
+
+ \method{as.owin}{distfun}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{nnfun}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{funxy}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{boxx}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{rmhmodel}(W, \dots, fatal=FALSE)
+
+ \method{as.owin}{leverage.ppm}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{influence.ppm}(W, \dots, fatal=TRUE)
+
+ \method{as.owin}{default}(W, \dots, fatal=TRUE)
+}
+\arguments{
+  \item{W}{Data specifying an observation window, in any of several formats
+    described under \emph{Details} below.}
+  \item{fatal}{Logical flag determining what to do
+    if the data cannot be converted to an observation window.
+    See Details.
+  }
+  \item{\dots}{Ignored.}
+  \item{from}{Character string. See Details.}
+  \item{step}{
+    Optional. A single number, or numeric vector of length 2,
+    giving the grid step lengths
+    in the \eqn{x} and \eqn{y} directions.
+  }
+}
+\value{
+  An object of class \code{"owin"} (see \code{\link{owin.object}})
+  specifying an observation window.
+}
+\details{
+  The class \code{"owin"} is a way of specifying the observation window
+  for a point pattern. See \code{\link{owin.object}} for an overview.
+ 
+  This function converts data in any of several formats 
+  into an object of class \code{"owin"} for use by the \pkg{spatstat}
+  package. The function \code{as.owin} is generic, with methods
+  for different classes of objects, and a default method.
+
+  The argument \code{W} may be
+  \itemize{
+    \item
+    an object of class \code{"owin"}
+    \item
+    a structure with entries \code{xrange}, \code{yrange} specifying the 
+    \eqn{x} and \eqn{y} dimensions of a rectangle
+    \item
+    a four-element vector
+    (interpreted as \code{(xmin, xmax, ymin, ymax)})
+    specifying the \eqn{x} and \eqn{y} dimensions of a rectangle
+    \item
+    a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu}
+    specifying the \eqn{x} and \eqn{y} dimensions of a rectangle
+    as \code{(xmin, xmax) = (xl, xu)} and 
+    \code{(ymin, ymax) = (yl, yu)}. This will accept objects of
+    class \code{spp} used in the Venables and Ripley \pkg{spatial}
+    library.
+    \item
+    an object of class \code{"ppp"} representing a point pattern.
+    In this case, the object's \code{window} structure will be
+    extracted.
+    \item
+    an object of class \code{"psp"} representing a line segment pattern.
+    In this case, the object's \code{window} structure will be
+    extracted.
+    \item
+    an object of class \code{"tess"} representing a tessellation.
+    In this case, the object's \code{window} structure will be
+    extracted.
+    \item
+    an object of class \code{"quad"} representing a quadrature scheme.
+    In this case, the window of the \code{data} component will be
+    extracted.
+    \item
+    an object of class \code{"im"} representing a pixel image.
+    In this case, a window of type \code{"mask"} will be returned,
+    with the same pixel raster coordinates as the image.
+    An image pixel value of \code{NA}, signifying that the pixel
+    lies outside the window, is transformed into the logical value
+    \code{FALSE}, which is the corresponding convention for window masks.
+    \item
+    an object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}
+    representing a fitted point process
+    model. In this case, if \code{from="data"} (the default),
+    \code{as.owin} extracts the  original point
+    pattern data to which the model was fitted, and returns the
+    observation window of this point pattern. If
+    \code{from="covariates"} then \code{as.owin} extracts the
+    covariate images to which the model was fitted,
+    and returns a binary mask window that specifies the pixel locations.
+    \item
+    an object of class \code{"lpp"}
+    representing a point pattern on a linear network.
+    In this case, \code{as.owin} extracts the linear network
+    and returns a window containing this network. 
+    \item
+    an object of class \code{"lppm"}
+    representing a fitted point process model on a linear network.
+    In this case, \code{as.owin} extracts the linear network
+    and returns a window containing this network. 
+    \item
+    A \code{data.frame} with exactly three columns. Each row of the
+    data frame corresponds to one pixel. Each row contains the
+    \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value
+    indicating whether the pixel lies inside the window.   
+    \item
+    A \code{data.frame} with exactly two columns. Each row of the
+    data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel
+    that lies inside the window.
+    \item
+    an object of class \code{"distfun"}, \code{"nnfun"}
+    or \code{"funxy"} representing a function of spatial location,
+    defined on a spatial domain. The spatial domain of the function will be
+    extracted.
+    \item
+    an object of class \code{"rmhmodel"} representing a
+    point process model that can be simulated using \code{\link{rmh}}.
+    The window (spatial domain) of the model will be extracted.
+    The window may be \code{NULL} in some circumstances (indicating that the
+    simulation window has not yet been determined). This is not treated
+    as an error, because the argument \code{fatal} defaults to
+    \code{FALSE} for this method.
+    \item
+    an object of class \code{"layered"} representing a
+    list of spatial objects. See \code{\link{layered}}.
+    In this case, \code{as.owin} will be applied to each
+    of the objects in the list, and the union of these windows
+    will be returned.
+  }
+  If the argument \code{W} is not in one of these formats
+  and cannot be converted to a window, then an error will
+  be generated (if \code{fatal=TRUE}) or a value of \code{NULL}
+  will be returned (if \code{fatal=FALSE}).
+
+  When \code{W} is a data frame, the argument \code{step}
+  can be used to specify the pixel grid spacing; otherwise, the spacing
+  will be guessed from the data.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{owin}}
+}
+\examples{
+ w <- as.owin(c(0,1,0,1))
+ w <- as.owin(list(xrange=c(0,5),yrange=c(0,10)))
+ # point pattern
+ data(demopat)
+ w <- as.owin(demopat)
+ # image
+ Z <- as.im(function(x,y) { x + 3}, unit.square())
+ w <- as.owin(Z)
+
+ # Venables & Ripley 'spatial' package
+ require(spatial)
+ towns <- ppinit("towns.dat")
+ w <- as.owin(towns)
+ detach(package:spatial)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{manip}
+ 
+ 
diff --git a/man/as.polygonal.Rd b/man/as.polygonal.Rd
new file mode 100644
index 0000000..4505eef
--- /dev/null
+++ b/man/as.polygonal.Rd
@@ -0,0 +1,69 @@
+\name{as.polygonal}
+\Rdversion{1.1}
+\alias{as.polygonal}
+\title{
+Convert a Window to a Polygonal Window
+}
+\description{
+  Given a window \code{W} of any geometric type (rectangular,
+  polygonal or binary mask),  this function returns a polygonal window
+  that represents the same spatial domain.
+}
+\usage{
+as.polygonal(W, repair=FALSE)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}).
+  }
+  \item{repair}{
+    Logical value indicating whether to check the validity
+    of the polygon data and repair it,
+    if \code{W} is already a polygonal window.
+  }
+}
+\details{
+  Given a window \code{W} of any geometric type (rectangular,
+  polygonal or binary mask),  this function returns a polygonal window
+  that represents the same spatial domain.
+
+  If \code{W} is a rectangle, it is converted to a polygon with
+  4 vertices.
+
+  If \code{W} is already polygonal, it is returned unchanged, by default.  
+  However if \code{repair=TRUE} then the validity of the polygonal coordinates
+  will be checked (for example to check the boundary is not self-intersecting)
+  and repaired if necessary, so that the result could be different
+  from \code{W}.
+
+  If \code{W} is a binary mask, then each pixel in the
+  mask is replaced by a small square or rectangle, and the union of these
+  squares or rectangles is computed. The result is a polygonal window
+  that has only horizontal and vertical edges. 
+  (Use \code{\link{simplify.owin}} to remove the staircase
+  appearance, if desired).
+}
+\value{
+  A polygonal window (object of class \code{"owin"}
+  and of type \code{"polygonal"}). 
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{as.mask}},
+  \code{\link{simplify.owin}}
+}
+\examples{
+   data(letterR)
+   m <- as.mask(letterR, dimyx=32)
+   p <- as.polygonal(m)
+   if(interactive()) {
+      plot(m)
+      plot(p, add=TRUE, lwd=2)
+   }
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.ppm.Rd b/man/as.ppm.Rd
new file mode 100644
index 0000000..f09a350
--- /dev/null
+++ b/man/as.ppm.Rd
@@ -0,0 +1,75 @@
+\name{as.ppm}
+\alias{as.ppm}
+\alias{as.ppm.ppm}
+\alias{as.ppm.profilepl}
+\alias{as.ppm.kppm}
+\alias{as.ppm.dppm}
+\title{Extract Fitted Point Process Model}
+\description{
+  Extracts the fitted point process model from
+  some kind of fitted model.
+}
+\usage{
+as.ppm(object)
+
+\method{as.ppm}{ppm}(object)
+
+\method{as.ppm}{profilepl}(object)
+
+\method{as.ppm}{kppm}(object)
+
+\method{as.ppm}{dppm}(object)
+}
+\arguments{
+  \item{object}{An object that includes a
+    fitted Poisson or Gibbs point process model.
+    An object of class \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} or \code{"dppm"}
+    or possibly other classes.
+  }
+}
+\details{
+  The function \code{as.ppm} extracts 
+  the fitted point process model (of class \code{"ppm"})
+  from a suitable object.
+  
+  The function \code{as.ppm} is generic, with methods for the classes
+  \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} and \code{"dppm"},
+  and possibly for other classes.
+
+  For the class \code{"profilepl"} of models fitted by maximum profile
+  pseudolikelihood, the method \code{as.ppm.profilepl} extracts the
+  fitted point process model (with the optimal values of the
+  irregular parameters).
+
+  For the class \code{"kppm"} of models fitted by minimum contrast (or Palm or composite likelihood)
+  using Waagepetersen's two-step estimation procedure
+  (see \code{\link{kppm}}), the method \code{as.ppm.kppm}
+  extracts the Poisson point process model that is fitted in the
+  first stage of the procedure.
+
+  The behaviour for the class \code{"dppm"} is analogous to the \code{"kppm"} case above.
+}
+\value{
+  An object of class \code{"ppm"}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{profilepl}}.
+}
+\examples{
+   # fit a model by profile maximum pseudolikelihood
+   rvals <- data.frame(r=(1:10)/100)
+   pfit <- profilepl(rvals, Strauss, cells, ~1)
+   # extract the fitted model
+   fit <- as.ppm(pfit)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/as.ppp.Rd b/man/as.ppp.Rd
new file mode 100644
index 0000000..a9c1793
--- /dev/null
+++ b/man/as.ppp.Rd
@@ -0,0 +1,154 @@
+\name{as.ppp}
+\alias{as.ppp}
+\alias{as.ppp.ppp}
+\alias{as.ppp.psp}
+\alias{as.ppp.quad}
+\alias{as.ppp.matrix}
+\alias{as.ppp.data.frame}
+\alias{as.ppp.influence.ppm}
+\alias{as.ppp.default}
+\title{Convert Data To Class ppp}
+\description{
+  Tries to coerce any reasonable kind of data to a spatial point pattern
+  (an object of class \code{"ppp"})
+  for use by the \pkg{spatstat} package).
+}
+\usage{
+  as.ppp(X, \dots, fatal=TRUE)
+
+  \method{as.ppp}{ppp}(X, \dots, fatal=TRUE)
+
+  \method{as.ppp}{psp}(X, \dots, fatal=TRUE)
+
+  \method{as.ppp}{quad}(X, \dots, fatal=TRUE)
+
+  \method{as.ppp}{matrix}(X, W=NULL, \dots, fatal=TRUE)
+
+  \method{as.ppp}{data.frame}(X, W=NULL, \dots, fatal=TRUE)
+
+  \method{as.ppp}{influence.ppm}(X, \dots)
+
+  \method{as.ppp}{default}(X, W=NULL, \dots, fatal=TRUE)
+}
+\arguments{
+  \item{X}{Data which will be converted into a point pattern}
+  \item{W}{
+    Data which define a window for the pattern,
+    when \code{X} does not contain a window.
+    (Ignored if \code{X} contains window information.)
+  }
+  \item{\dots}{Ignored.}
+  \item{fatal}{
+    Logical value specifying what to do if the
+    data cannot be converted.
+    See Details.
+  }
+}
+\value{
+  An object of class \code{"ppp"} (see \code{\link{ppp.object}})
+  describing the point pattern and its window of observation.
+  The value \code{NULL} may also be returned; see Details.
+}
+\details{
+  Converts the dataset \code{X} to a point pattern
+  (an object of class \code{"ppp"}; see \code{\link{ppp.object}} for
+  an overview).
+
+  This function is normally used to convert an existing point pattern
+  dataset, stored in another format, to the \code{"ppp"} format.
+  To create a new point pattern from raw data such as \eqn{x,y}
+  coordinates, it is normally easier to use the creator function
+  \code{\link{ppp}}.
+
+  The function \code{as.ppp} is generic, with methods for the
+  classes \code{"ppp"}, \code{"psp"}, \code{"quad"}, \code{"matrix"},
+  \code{"data.frame"} and a default method.
+
+  The dataset \code{X} may be:
+  \itemize{
+    \item
+    an object of class \code{"ppp"}  
+    \item
+    an object of class \code{"psp"}
+    \item
+    a point pattern object created by the \pkg{spatial} library
+    \item
+    an object of class \code{"quad"} representing a quadrature scheme
+    (see \code{\link{quad.object}})
+    \item
+    a matrix or data frame with at least two columns
+    \item
+    a structure with entries \code{x}, \code{y} which are numeric vectors
+    of equal length
+    \item
+    a numeric vector of length 2, interpreted as the coordinates
+    of a single point.
+  }
+  In the last three cases, we need the second argument \code{W}
+  which is converted to a window object
+  by the function \code{\link{as.owin}}.
+  In the first four cases, \code{W} will be ignored.
+
+  If \code{X} is a line segment pattern (an object of class \code{psp})
+  the point pattern returned consists of the endpoints of the segments.
+  If \code{X} is marked then the point pattern returned will also be
+  marked, the mark associated with a point being the mark of the segment
+  of which that point was an endpoint.
+
+  If \code{X} is a matrix or data frame, the first and second columns will
+  be interpreted as the \eqn{x} and \eqn{y} coordinates respectively.
+  Any additional columns will be interpreted as marks.
+  
+  The argument \code{fatal} indicates what to do when 
+  \code{W} is missing and \code{X} contains no
+  information about the window. If \code{fatal=TRUE}, a fatal error
+  will be generated; if \code{fatal=FALSE}, the
+  value \code{NULL} is returned.
+
+  In the \pkg{spatial} library, a point pattern is represented
+  in either of the following formats:
+  \itemize{
+    \item
+    (in \pkg{spatial} versions 1 to 6)
+    a structure with entries \code{x}, \code{y}
+    \code{xl}, \code{xu}, \code{yl}, \code{yu}
+    \item
+    (in \pkg{spatial} version 7)
+    a structure with entries
+    \code{x}, \code{y} and \code{area},
+    where \code{area} is a structure with entries
+    \code{xl}, \code{xu}, \code{yl}, \code{yu}
+  }
+  where \code{x} and \code{y} are vectors of equal length
+  giving the point coordinates, and \code{xl}, \code{xu}, \code{yl},
+  \code{yu} are numbers giving the dimensions of a rectangular window.
+
+  Point pattern datasets can also be created by the function
+  \code{\link{ppp}}.
+}
+\seealso{
+  \code{\link{ppp}}, \code{\link{ppp.object}}, \code{\link{as.owin}},
+  \code{\link{owin.object}}
+}
+\examples{
+ xy <- matrix(runif(40), ncol=2)
+ pp <- as.ppp(xy, c(0,1,0,1))
+
+ # Venables-Ripley format
+ # check for 'spatial' package
+ spatialpath <- system.file(package="spatial")
+ if(nchar(spatialpath) > 0) {
+   require(spatial)
+   towns <- ppinit("towns.dat")
+   pp <- as.ppp(towns) # converted to our format
+   detach(package:spatial)
+ }
+
+ xyzt <- matrix(runif(40), ncol=4)
+ Z <- as.ppp(xyzt, square(1))
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.psp.Rd b/man/as.psp.Rd
new file mode 100644
index 0000000..9ce0baf
--- /dev/null
+++ b/man/as.psp.Rd
@@ -0,0 +1,170 @@
+\name{as.psp}
+\alias{as.psp}
+\alias{as.psp.psp}
+\alias{as.psp.data.frame}
+\alias{as.psp.matrix}
+\alias{as.psp.default}
+\title{Convert Data To Class psp}
+\description{
+  Tries to coerce any reasonable kind of data object to a line segment pattern
+  (an object of class \code{"psp"})
+  for use by the \pkg{spatstat} package.
+}
+\usage{
+  as.psp(x, \dots, from=NULL, to=NULL)
+
+  \method{as.psp}{psp}(x, \dots, check=FALSE, fatal=TRUE)
+
+  \method{as.psp}{data.frame}(x, \dots, window=NULL, marks=NULL,
+      check=spatstat.options("checksegments"), fatal=TRUE)
+
+  \method{as.psp}{matrix}(x, \dots, window=NULL, marks=NULL,
+       check=spatstat.options("checksegments"), fatal=TRUE)
+
+  \method{as.psp}{default}(x, \dots, window=NULL, marks=NULL,
+       check=spatstat.options("checksegments"), fatal=TRUE)
+}
+\arguments{
+  \item{x}{Data which will be converted into a line segment pattern}
+  \item{window}{Data which define a window for the pattern.}
+  \item{\dots}{Ignored.}
+  \item{marks}{(Optional) vector or data frame of marks for the pattern}
+  \item{check}{
+    Logical value indicating whether to check the validity of the data,
+    e.g. to check that the line segments lie inside the window.
+  }
+  \item{fatal}{Logical value. See Details.}
+  \item{from,to}{Point patterns (object of class \code{"ppp"})
+    containing the first and second endpoints (respectively) of each
+    segment. Incompatible with \code{x}.
+  }
+}
+\value{
+  An object of class \code{"psp"} (see \code{\link{psp.object}})
+  describing the line segment pattern and its window of observation.
+  The value \code{NULL} may also be returned; see Details.
+}
+\details{
+  Converts the dataset \code{x} to a line segment pattern
+  (an object of class \code{"psp"}; see \code{\link{psp.object}} for
+  an overview).
+
+  This function is normally used to convert an existing line segment pattern
+  dataset, stored in another format, to the \code{"psp"} format.
+  To create a new point pattern from raw data such as \eqn{x,y}
+  coordinates, it is normally easier to use the creator function
+  \code{\link{psp}}.
+
+  The dataset \code{x} may be:
+  \itemize{
+    \item
+    an object of class \code{"psp"}  
+    \item
+    a data frame with at least 4 columns
+    \item a structure (list) with elements named  \code{x0, y0,
+    x1, y1} or elements named \code{xmid, ymid, length, angle}
+    and possibly a fifth element named \code{marks}
+  }
+  If \code{x} is a data frame the interpretation of its columns is
+  as follows:
+  \itemize{
+    \item
+    If there are columns named \code{x0, y0, x1, y1} then these
+    will be interpreted as the coordinates of the endpoints of
+    the segments and used to form the \code{ends} component of
+    the \code{psp} object to be returned.
+    \item
+    If there are columns named \code{xmid, ymid, length, angle}
+    then these will be interpreted as the coordinates of the segment
+    midpoints, the lengths of the segments, and the orientations
+    of the segments in radians and used to form the \code{ends}
+    component of the \code{psp} object to be returned.
+    \item
+    If there is a column named \code{marks} then this will
+    be interpreted as the marks of the pattern provided that
+    the argument \code{marks} of this function is \code{NULL}.
+    If argument \code{marks} is not \code{NULL} then the value
+    of this argument is taken to be the marks of the pattern and
+    the column named \code{marks} is ignored (with a warning).
+    In either case the column named marks is deleted and omitted
+    from further consideration.
+    \item
+    If there is no column named \code{marks} and if the \code{marks}
+    argument of this function is \code{NULL}, and if after
+    interpreting 4 columns of \code{x} as determining the \code{ends}
+    component of the \code{psp} object to be returned, there remain
+    other columns of \code{x}, then these remaining columns will be
+    taken to form a data frame of marks for the \code{psp} object
+    to be returned.
+  }
+
+  If \code{x} is a structure (list) with elements named \code{x0,
+  y0, x1, y1, marks} or \code{xmid, ymid, length, angle, marks},
+  then the element named \code{marks} will be interpreted as the
+  marks of the pattern provide that the argument \code{marks} of
+  this function is \code{NULL}.  If this argument is non-\code{NULL}
+  then it is interpreted as the marks of the pattern and the element
+  \code{marks} of \code{x} is ignored --- with a warning.
+
+  Alternatively, you may specify two point patterns
+  \code{from} and \code{to} containing the first and second endpoints
+  of the line segments.
+  
+  The argument \code{window} is converted to a window object
+  by the function \code{\link{as.owin}}.
+
+  The argument \code{fatal} indicates what to do when
+  the data cannot be converted to a line segment pattern.
+  If \code{fatal=TRUE}, a fatal error
+  will be generated; if \code{fatal=FALSE}, the
+  value \code{NULL} is returned.
+
+  The function \code{as.psp} is generic, with methods for the
+  classes \code{"psp"}, \code{"data.frame"}, \code{"matrix"}
+  and a default method.
+  
+  Point pattern datasets can also be created by the function
+  \code{\link{psp}}.
+}
+\section{Warnings}{
+If only a proper subset of the names \code{x0,y0,x1,y1} or
+\code{xmid,ymid,length,angle} appear amongst the names of the
+columns of \code{x} where \code{x} is a data frame, then these
+special names are ignored.
+
+For example if the names of the columns were
+\code{xmid,ymid,length,degrees}, then these columns would be
+interpreted as if the represented \code{x0,y0,x1,y1} in that order.
+
+Whether it gets used or not, column named \code{marks} is
+\emph{always} removed from \code{x} before any attempt to form the
+\code{ends} component of the \code{psp} object that is returned.
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{psp.object}},
+  \code{\link{as.owin}},
+  \code{\link{owin.object}}.
+
+  See \code{\link{edges}} for extracting the edges of a polygonal window
+  as a \code{"psp"} object.
+}
+\examples{
+   mat <- matrix(runif(40), ncol=4)
+   mx <- data.frame(v1=sample(1:4,10,TRUE),
+                    v2=factor(sample(letters[1:4],10,TRUE),levels=letters[1:4]))
+   a <- as.psp(mat, window=owin(),marks=mx)
+   mat <- cbind(as.data.frame(mat),mx)
+   b <- as.psp(mat, window=owin()) # a and b are identical.
+   stuff <- list(xmid=runif(10),
+                 ymid=runif(10),
+                 length=rep(0.1, 10),
+                 angle=runif(10, 0, 2 * pi))
+   a <- as.psp(stuff, window=owin())
+   b <- as.psp(from=runifpoint(10), to=runifpoint(10))
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.rectangle.Rd b/man/as.rectangle.Rd
new file mode 100644
index 0000000..8b4e5da
--- /dev/null
+++ b/man/as.rectangle.Rd
@@ -0,0 +1,67 @@
+\name{as.rectangle}
+\alias{as.rectangle}
+\title{Window Frame}
+\description{
+  Extract the window frame of a window
+  or other spatial dataset
+}
+\usage{
+ as.rectangle(w, \dots)
+}
+\arguments{
+  \item{w}{A window, or a dataset that has a window.
+    Either a window (object of class \code{"owin"}),
+    a pixel image (object of class \code{"im"})
+    or other data determining such a window.
+  }
+  \item{\dots}{
+    Optional. Auxiliary data to help determine the window. If
+    \code{w} does not belong to a recognised class,
+    the arguments \code{w} and \code{\dots} 
+    are passed to \code{\link{as.owin}} to determine the window.
+  }
+}
+\value{
+  A window (object of class \code{"owin"})
+  of type \code{"rectangle"} representing a rectangle.
+}
+\details{
+  This function is the quickest way to determine a bounding rectangle
+  for a spatial dataset.
+
+  If \code{w} is a window, the function
+  just extracts the outer bounding rectangle
+  of \code{w} as given by its elements \code{xrange,yrange}.
+
+  The function can also be applied to any spatial dataset that has a window:
+  for example, a point pattern (object of class \code{"ppp"}) or
+  a line segment pattern (object of class \code{"psp"}).
+  The bounding rectangle of the window of the dataset is extracted.
+
+  Use the function \code{\link{boundingbox}} to compute the \emph{smallest}
+  bounding rectangle of a dataset.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{boundingbox}}
+}
+\examples{
+  w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7)))
+  r <- as.rectangle(w)
+  # returns a 10 x 10 rectangle
+
+  data(lansing)
+  as.rectangle(lansing)
+
+  data(copper)
+  as.rectangle(copper$SouthLines)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/as.solist.Rd b/man/as.solist.Rd
new file mode 100644
index 0000000..69b42ae
--- /dev/null
+++ b/man/as.solist.Rd
@@ -0,0 +1,53 @@
+\name{as.solist}
+\alias{as.solist}
+\title{
+  Convert List of Two-Dimensional Spatial Objects
+}
+\description{
+  Given a list of two-dimensional spatial objects,
+  convert it to the class \code{"solist"}.
+}
+\usage{
+as.solist(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A list of objects, each representing a two-dimensional
+    spatial dataset.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{solist}}.
+  }
+}
+\details{
+  This command makes the list \code{x} into an object of class \code{"solist"}
+  (spatial object list).
+  See \code{\link{solist}} for details.
+
+  The entries in the list \code{x} should be
+  two-dimensional spatial datasets
+  (not necessarily of the same class).
+}
+\value{
+  A list, usually of class \code{"solist"}.
+}
+\seealso{
+  \code{\link{solist}},
+  \code{\link{as.anylist}},
+  \code{\link{solapply}}.
+}
+\examples{
+  x <- list(cells, density(cells))
+  y <- as.solist(x)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{list}
+\keyword{manip}
diff --git a/man/as.tess.Rd b/man/as.tess.Rd
new file mode 100644
index 0000000..adbba23
--- /dev/null
+++ b/man/as.tess.Rd
@@ -0,0 +1,91 @@
+\name{as.tess}
+\alias{as.tess}
+\alias{as.tess.tess}
+\alias{as.tess.im}
+\alias{as.tess.owin}
+\alias{as.tess.quadratcount}
+\alias{as.tess.quadrattest}
+\alias{as.tess.list}
+\title{Convert Data To Tessellation}
+\description{
+Converts data specifying a tessellation,
+in any of several formats, into an object of class \code{"tess"}.
+}
+\usage{
+ as.tess(X)
+ \method{as.tess}{tess}(X)
+ \method{as.tess}{im}(X)
+ \method{as.tess}{owin}(X)
+ \method{as.tess}{quadratcount}(X)
+ \method{as.tess}{quadrattest}(X)
+ \method{as.tess}{list}(X)
+}
+\arguments{
+  \item{X}{Data to be converted to a tessellation.}
+}
+\value{
+  An object of class \code{"tess"} specifying a tessellation.
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. This command creates an object of class \code{"tess"} that
+  represents a tessellation. 
+
+  This function converts data in any of several formats 
+  into an object of class \code{"tess"} for use by the \pkg{spatstat}
+  package.  The argument \code{X} may be
+  \itemize{
+    \item
+    an object of class \code{"tess"}.
+    The object will be stripped of any extraneous attributes
+    and returned.
+    \item
+    a pixel image (object of class \code{"im"}) with pixel values that
+    are logical or factor values. Each level of the factor will
+    determine a tile of the tessellation.
+    \item
+    a window (object of class \code{"owin"}). The result will be a
+    tessellation consisting of a single tile.
+    \item
+    a set of quadrat counts (object of class \code{"quadratcount"})
+    returned by the command \code{\link{quadratcount}}.
+    The quadrats
+    used to generate the counts will be extracted and returned as a
+    tessellation.
+    \item
+    a quadrat test (object of class \code{"quadrattest"})
+    returned by the command \code{\link{quadrat.test}}. 
+    The quadrats
+    used to perform the test will be extracted and returned as a
+    tessellation.
+    \item
+    a list of windows (objects of class \code{"owin"})
+    giving the tiles of the tessellation.
+  }
+  The function \code{as.tess} is generic, with methods for
+  various classes, as listed above.
+}
+\seealso{
+  \code{\link{tess}}
+}
+\examples{
+ # pixel image
+ v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin())
+ levels(v) <- letters[seq(length(levels(v)))]
+ as.tess(v)
+ # quadrat counts
+ data(nztrees)
+ qNZ <- quadratcount(nztrees, nx=4, ny=3)
+ as.tess(qNZ)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+ 
+ 
diff --git a/man/auc.Rd b/man/auc.Rd
new file mode 100644
index 0000000..31c9059
--- /dev/null
+++ b/man/auc.Rd
@@ -0,0 +1,110 @@
+\name{auc}
+\alias{auc}
+\alias{auc.ppp}
+\alias{auc.lpp}
+\alias{auc.ppm}
+\alias{auc.kppm}
+\alias{auc.lppm}
+\title{
+  Area Under ROC Curve
+}
+\description{
+  Compute the AUC (area under the Receiver Operating Characteristic
+  curve) for a fitted point process model.
+}
+\usage{
+auc(X, \dots)
+
+\method{auc}{ppp}(X, covariate, \dots, high = TRUE)
+
+\method{auc}{ppm}(X, \dots)
+
+\method{auc}{kppm}(X, \dots)
+
+\method{auc}{lpp}(X, covariate, \dots, high = TRUE)
+
+\method{auc}{lppm}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"} or \code{"lpp"})
+    or fitted point process model (object of class \code{"ppm"}
+    or \code{"kppm"} or \code{"lppm"}).
+  }
+  \item{covariate}{
+    Spatial covariate. Either a \code{function(x,y)},
+    a pixel image (object of class \code{"im"}), or
+    one of the strings \code{"x"} or \code{"y"} indicating the
+    Cartesian coordinates.    
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} controlling the
+    pixel resolution for calculations.
+  }
+  \item{high}{
+    Logical value indicating whether the threshold operation
+    should favour high or low values of the covariate.
+  }
+}
+\details{
+  This command computes the AUC, the area under the Receiver Operating
+  Characteristic curve. The ROC itself is computed by \code{\link{roc}}.
+
+  For a point pattern \code{X} and a covariate \code{Z}, the
+  AUC is a numerical index that measures the ability of the 
+  covariate to separate the spatial domain
+  into areas of high and low density of points.
+  Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X}
+  and \eqn{U} a randomly-selected location in the study region.
+  The AUC is the probability that
+  \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)}
+  assuming \code{high=TRUE}.
+  That is, AUC is the probability that a randomly-selected data point
+  has a higher value of the covariate \code{Z} than does a
+  randomly-selected spatial location. The AUC is a number between 0 and 1. 
+  A value of 0.5 indicates a complete lack of discriminatory power.
+  
+  For a fitted point process model \code{X},
+  the AUC measures the ability of the
+  fitted model intensity to separate the spatial domain
+  into areas of high and low density of points.
+  Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model.
+  The AUC is the probability that
+  \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}.
+  That is, AUC is the probability that a randomly-selected data point
+  has higher predicted intensity than does a randomly-selected spatial
+  location.
+  The AUC is \bold{not} a measure of the goodness-of-fit of the model
+  (Lobo et al, 2007).
+}
+\value{
+  A numeric vector of length 2 giving the AUC value 
+  and the theoretically expected AUC value for this model.
+}
+\references{
+  Lobo, J.M.,
+  \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A.
+  and Real, R. (2007)
+  AUC: a misleading measure of the performance of predictive
+  distribution models.
+  \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151.
+
+  Nam, B.-H. and D'Agostino, R. (2002)
+  Discrimination index, the area under the {ROC} curve.
+  Pages 267--279 in 
+  Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. 
+  and Mesbah, M., \emph{Goodness-of-fit tests and model validity},
+  \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{roc}}
+}
+\examples{
+  fit <- ppm(swedishpines ~ x+y)
+  auc(fit)
+  auc(swedishpines, "x")
+}
+\keyword{spatial}
diff --git a/man/austates.Rd b/man/austates.Rd
new file mode 100644
index 0000000..3bdf599
--- /dev/null
+++ b/man/austates.Rd
@@ -0,0 +1,36 @@
+\name{austates}
+\alias{austates}
+\docType{data}
+\title{
+  Australian States and Mainland Territories
+}
+\description{
+  The states and large mainland territories of Australia are
+  represented as polygonal regions forming a tessellation.
+}
+\usage{data(austates)}
+\format{
+  Object of class \code{"tess"}.
+}
+\details{
+  Western Australia, South Australia, Queensland,
+  New South Wales, Victoria and Tasmania (which are states of Australia)
+  and the Northern Territory (which is a `territory' of Australia)
+  are represented as polygonal regions.
+
+  Offshore territories, and smaller mainland territories, are not shown.
+
+  The dataset \code{austates} is a tessellation object (class \code{"tess"})
+  whose tiles are the states and territories.
+
+  The coordinates are latitude and
+  longitude in degrees, so the space is effectively a Mercator
+  projection of the earth.
+}
+\source{
+  Obtained from the \pkg{oz} package and reformatted.
+}
+\examples{
+data(austates); plot(austates)
+}
+\keyword{datasets}
diff --git a/man/bc.ppm.Rd b/man/bc.ppm.Rd
new file mode 100644
index 0000000..8b45abb
--- /dev/null
+++ b/man/bc.ppm.Rd
@@ -0,0 +1,75 @@
+\name{bc.ppm}
+\alias{bc}
+\alias{bc.ppm}
+\title{
+  Bias Correction for Fitted Model
+}
+\description{
+  Applies a first-order bias correction to a fitted model.
+}
+\usage{
+  bc(fit, \dots)
+
+  \method{bc}{ppm}(fit, \dots, nfine = 256)
+}
+\arguments{
+  \item{fit}{
+    A fitted point process model (object of class \code{"ppm"})
+    or a model of some other class.
+  }
+  \item{\dots}{
+    Additional arguments are currently ignored.
+  }
+  \item{nfine}{
+    Grid dimensions for fine grid of locations.
+    An integer, or a pair of integers. See Details.
+  }
+}
+\details{
+  This command applies the first order Newton-Raphson bias correction method of
+  Baddeley and Turner (2014, sec 4.2) to a fitted model.
+  The function \code{bc} is generic, with a method for fitted point
+  process models of class \code{"ppm"}.
+
+  A fine grid of locations, of dimensions \code{nfine * nfine} or
+  \code{nfine[2] * nfine[1]}, is created over the original window of the
+  data, and the intensity or conditional intensity of the fitted model is
+  calculated on this grid. The result is used to update the fitted
+  model parameters once by a Newton-Raphson update.
+
+  This is only useful if the quadrature points used to fit the original
+  model \code{fit} are coarser than the grid of points specified by
+  \code{nfine}. 
+}
+\value{
+  A numeric vector, of the same length as \code{coef(fit)}, giving updated
+  values for the fitted model coefficients.
+}
+\references{
+  Baddeley, A. and Turner, R. (2014)
+  Bias correction for parameter estimates of spatial point process models.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{84}, 1621--1643.
+  DOI: 10.1080/00949655.2012.755976
+}
+\author{
+  \adrian
+  and
+  \rolf.
+}
+\seealso{
+  \code{\link{rex}}
+}
+\examples{
+  fit <- ppm(cells ~ x, Strauss(0.07))
+  coef(fit)
+  if(!interactive()) {
+    bc(fit, nfine=64)
+  } else {
+    bc(fit)
+  }
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{math}
+\keyword{optimize}
diff --git a/man/bdist.pixels.Rd b/man/bdist.pixels.Rd
new file mode 100644
index 0000000..29db4a1
--- /dev/null
+++ b/man/bdist.pixels.Rd
@@ -0,0 +1,84 @@
+\name{bdist.pixels}
+\alias{bdist.pixels}
+\title{Distance to Boundary of Window}
+\description{
+  Computes the distances 
+  from each pixel in a window to the boundary of the window.
+}
+\usage{
+ bdist.pixels(w, \dots, style="image", method=c("C", "interpreted"))
+}
+\arguments{
+  \item{w}{A window (object of class \code{"owin"}).}
+  \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution.}
+  \item{style}{Character string determining the format of
+    the output: either \code{"matrix"}, \code{"coords"} or
+    \code{"image"}.
+  }
+  \item{method}{Choice of algorithm to use when \code{w} is polygonal.}
+}
+\value{
+  If \code{style="image"}, a pixel image (object of class \code{"im"})
+  containing the distances from each pixel in the image raster
+  to the boundary of the window.
+
+  If \code{style="matrix"},
+  a matrix giving the distances from each pixel in the image raster
+  to the boundary of the window. Rows of this matrix correspond to
+  the \eqn{y} coordinate and columns to the \eqn{x} coordinate.
+  
+  If \code{style="coords"}, a list with three components
+  \code{x,y,z}, where \code{x,y} are vectors of length \eqn{m,n}
+  giving the \eqn{x} and \eqn{y} coordinates respectively,
+  and \code{z} is an \eqn{m \times n}{m x n} matrix such that
+  \code{z[i,j]} is the distance from \code{(x[i],y[j])} to the
+  boundary of the window. Rows of this matrix correspond to the
+  \eqn{x} coordinate and columns to the \eqn{y} coordinate.
+  This result can be plotted with \code{persp}, \code{image}
+  or \code{contour}.
+}
+\details{
+  This function computes, for each pixel \eqn{u}
+  in the window \code{w}, the shortest distance
+  \eqn{d(u, W^c)}{dist(u, W')} from \eqn{u}
+  to the boundary of \eqn{W}.
+
+  If the window is a binary mask then the distance from each pixel
+  to the boundary is computed using the distance transform algorithm
+  \code{\link{distmap.owin}}. The result is equivalent to
+  \code{distmap(W, invert=TRUE)}.
+
+  If the window is a rectangle or a polygonal region,
+  the grid of pixels is determined by the arguments \code{"\dots"} 
+  passed to \code{\link{as.mask}}. The distance from each pixel to the
+  boundary is calculated exactly, using analytic geometry.
+  This is slower but more accurate than in the case of a binary mask.
+
+  For software testing purposes, there are two implementations
+  available when \code{w} is a polygon: the default is \code{method="C"}
+  which is much faster than \code{method="interpreted"}.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{erosion}},
+  \code{\link{bdist.points}},
+  \code{\link{bdist.tiles}},
+  \code{\link{distmap.owin}}.
+}
+\examples{
+  u <- owin(c(0,1),c(0,1))
+  d <- bdist.pixels(u, eps=0.01)
+  image(d)
+  d <- bdist.pixels(u, eps=0.01, style="matrix")
+  mean(d >= 0.1)
+  # value is approx (1 - 2 * 0.1)^2 = 0.64
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/bdist.points.Rd b/man/bdist.points.Rd
new file mode 100644
index 0000000..8b23d04
--- /dev/null
+++ b/man/bdist.points.Rd
@@ -0,0 +1,50 @@
+\name{bdist.points}
+\alias{bdist.points}
+\title{Distance to Boundary of Window}
+\description{
+  Computes the distances 
+  from each point of a point pattern
+  to the boundary of the window.
+}
+\usage{
+ bdist.points(X)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+}
+\value{
+  A numeric vector, giving the distances from each point of the pattern
+  to the boundary of the window.
+}
+\details{
+  This function computes, for each point \eqn{x_i}{x[i]}
+  in the point pattern \code{X}, the shortest distance
+  \eqn{d(x_i, W^c)}{dist(x[i], W')} from \eqn{x_i}{x[i]}
+  to the boundary of the window \eqn{W} of observation.
+
+  If the window \code{Window(X)} is of type \code{"rectangle"}
+  or \code{"polygonal"}, then these distances are computed by
+  analytic geometry and are exact, up to rounding errors.
+  If the window is of type \code{"mask"} then the distances
+  are computed using the real-valued distance transform,
+  which is an approximation with maximum error equal to the width
+  of one pixel in the mask.
+}
+\seealso{
+  \code{\link{bdist.pixels}},
+  \code{\link{bdist.tiles}},
+  \code{\link{ppp.object}},
+  \code{\link{erosion}}
+}
+\examples{
+  data(cells)
+  d <- bdist.points(cells)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/bdist.tiles.Rd b/man/bdist.tiles.Rd
new file mode 100644
index 0000000..2e0fa2b
--- /dev/null
+++ b/man/bdist.tiles.Rd
@@ -0,0 +1,47 @@
+\name{bdist.tiles}
+\alias{bdist.tiles}
+\title{Distance to Boundary of Window}
+\description{
+  Computes the shortest distances 
+  from each tile in a tessellation
+  to the boundary of the window.
+}
+\usage{
+ bdist.tiles(X)
+}
+\arguments{
+  \item{X}{A tessellation (object of class \code{"tess"}).}
+}
+\value{
+  A numeric vector, 
+  giving the shortest distance from each tile in the tessellation
+  to the boundary of the window.
+  Entries of the vector correspond to the entries of \code{tiles(X)}.
+}
+\details{
+  This function computes, for each tile \eqn{s_i}{s[[i]]}
+  in the tessellation \code{X}, the shortest distance
+  from \eqn{s_i}{s[[i]]}
+  to the boundary of the window \eqn{W} containing the tessellation.
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{bdist.points}},
+  \code{\link{bdist.pixels}}
+}
+\examples{
+  P <- runifpoint(15)
+  X <- dirichlet(P)
+  plot(X, col="red")
+  B <- bdist.tiles(X)
+  # identify tiles that do not touch the boundary
+  plot(X[B > 0], add=TRUE, col="green", lwd=3)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/bdspots.Rd b/man/bdspots.Rd
new file mode 100644
index 0000000..c36d7f8
--- /dev/null
+++ b/man/bdspots.Rd
@@ -0,0 +1,87 @@
+\name{bdspots}
+\alias{bdspots}
+\docType{data}
+\title{
+  Breakdown Spots in Microelectronic Materials
+}
+\description{
+  A list of three point patterns, each giving the locations of
+  electrical breakdown spots on a circular electrode in
+  a microelectronic capacitor.
+}
+\usage{data(bdspots)}
+\format{
+  A list (of class \code{"listof"}) of three spatial point patterns,
+  each representing the spatial locations of breakdown spots on an
+  electrode. The three electrodes are circular discs, of radii
+  169, 282 and 423 microns respectively. Spatial coordinates are
+  given in microns.  
+}
+\details{
+  The application of successive voltage sweeps to the metal gate electrode
+  of a microelectronic capacitor generates multiple breakdown spots
+  on the electrode.
+  The spatial distribution of these breakdown spots
+  in MIM (metal-insulator-metal) and MIS (metal-insulator-semiconductor)
+  structures was observed and analysed by 
+  Miranda et al (2010, 2013) and Saura et al (2013a, 2013b, 2014).
+
+  The data given here are the breakdown spot patterns for three circular
+  electrodes of different radii, 169, 282 and 423 microns respectively,
+  in MIM structures analysed in Saura et al (2013a).
+}
+\source{
+  Professor Enrique Miranda, Departament d'Enginyeria
+  \ifelse{latex}{\out{Electr{\`o}nica}}{Electronica},
+  Escola d'Enginyeria, Universitat
+  \ifelse{latex}{\out{Aut{\`o}noma}}{Autonoma}
+  de Barcelona, Barcelona, Spain.
+}
+\references{
+  Miranda, E. and O'Connor, E. and Hurley, P.K. (2010)
+  Simulation of the breakdown spots spatial
+  distribution in high-\emph{K} dielectrics and model
+  validation using the \pkg{spatstat} package for \emph{R} language.
+  \emph{ECS Transactions} \bold{33} (3) 557--562.
+
+  Miranda, E.,
+  \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}, D.,
+  \ifelse{latex}{\out{Su{\~n}{\'e}}}{Sune}, J.,
+  O'Connor, E.,
+  Monaghan, S.,
+  Povey, I.,
+  Cherkaoui, K. and Hurley, P. K. (2013)
+  Nonhomogeneous spatial distribution of filamentary leakage current
+  paths in circular area Pt/HfO2/Pt capacitors.
+  \emph{J. Vac. Sci. Technol. B} \bold{31}, 01A107.
+
+  Saura, X.,
+  \ifelse{latex}{\out{Su{\~n}{\'e}}}{Sune}, J.,
+  Monaghan, S., Hurley, P.K. and Miranda, E. (2013a)
+  Analysis of the breakdown spot spatial distribution in Pt/HfO2/Pt
+  capacitors using nearest neighbor statistics.
+  \emph{J. Appl. Phys.} \bold{114}, 154112.
+
+  Saura, X., Moix, D.,
+  \ifelse{latex}{\out{Su{\~n}{\'e}}}{Sune}, J.,
+  Hurley, P.K. and Miranda, E. (2013b)
+  Direct observation of the generation of breakdown spots in MIM
+  structures under constant voltage stress.
+  \emph{Microelectronics Reliability} \bold{53}, 1257--1260.
+
+  Saura, X.,
+  Monaghan, S.,
+  Hurley, P.K.,
+  \ifelse{latex}{\out{Su{\~n}{\'e}}}{Sune}, J.
+  and Miranda, E. (2014)
+  Failure analysis of MIM and MIS structures using point-to-event
+  distance and angular probability distributions. 
+  \emph{IEEE Transactions on Devices and Materials Reliability} 
+  \bold{14} (4) 1080--1090.
+}
+\examples{
+data(bdspots)
+plot(bdspots, equal.scales=TRUE)
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/beachcolours.Rd b/man/beachcolours.Rd
new file mode 100644
index 0000000..2261a87
--- /dev/null
+++ b/man/beachcolours.Rd
@@ -0,0 +1,94 @@
+\name{beachcolours}
+\alias{beachcolours}
+\alias{beachcolourmap}
+\title{
+  Create Colour Scheme for a Range of Numbers
+}
+\description{
+  Given a range of numerical values, this command creates a
+  colour scheme that would be appropriate if the numbers were
+  altitudes (elevation above or below sea level).
+}
+\usage{
+beachcolours(range, sealevel = 0, monochrome = FALSE,
+             ncolours = if (monochrome) 16 else 64,
+             nbeach = 1)
+beachcolourmap(range, ...) 
+}
+\arguments{
+  \item{range}{
+    Range of numerical values to be mapped.
+    A numeric vector of length 2.
+  }
+  \item{sealevel}{
+    Value that should be treated as zero.
+    A single number,
+    lying between \code{range[1]} and \code{range[2]}.
+  }
+  \item{monochrome}{
+    Logical. If \code{TRUE} then a greyscale colour map is
+    constructed. 
+  }
+  \item{ncolours}{
+    Number of distinct colours to use.
+  }
+  \item{nbeach}{
+    Number of colours that will be yellow.
+  }
+  \item{\dots}{Arguments passed to \code{beachcolours}.}
+}
+\details{
+  Given a range of numerical values, these commands create a
+  colour scheme that would be appropriate if the numbers were
+  altitudes (elevation above or below sea level).
+
+  Numerical values close to zero are portrayed in green (representing
+  the waterline). Negative values are blue (representing water)
+  and positive values are yellow to red (representing land).
+  At least, these are the colours of land and sea in Western Australia.
+  This colour scheme was proposed by Baddeley et al (2005).
+
+  The function \code{beachcolours} returns these colours
+  as a character vector, while \code{beachcolourmap}
+  returns a colourmap object.
+
+  The argument \code{range} should be a numeric vector of
+  length 2 giving a range of numerical values.
+
+  The argument \code{sealevel} specifies the height value that will
+  be treated as zero, and mapped to the colour green.
+  A vector of \code{ncolours} colours will be created,
+  of which \code{nbeach} colours will be green.
+
+  The argument \code{monochrome} is included
+  for convenience when preparing publications.
+  If \code{monochrome=TRUE} the colour map will be 
+  a simple grey scale containing \code{ncolours}
+  shades from black to white.
+}
+\value{
+  For \code{beachcolours}, 
+  a character vector of length \code{ncolours} specifying colour values.
+  For \code{beachcolourmap}, a colour map (object of class \code{"colourmap"}).
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+}
+\seealso{
+  \code{\link{colourmap}}, 
+  \code{\link[spatstat:colourtools]{colourtools}}.
+}
+\examples{
+  plot(beachcolourmap(c(-2,2)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{color}
diff --git a/man/beginner.Rd b/man/beginner.Rd
new file mode 100644
index 0000000..c609ced
--- /dev/null
+++ b/man/beginner.Rd
@@ -0,0 +1,44 @@
+\name{beginner}
+\alias{beginner}
+\title{
+  Print Introduction For Beginners
+}
+\description{
+  Prints an introduction for beginners to the \code{spatstat} package,
+  or another specified package.
+}
+\usage{
+beginner(package = "spatstat")
+}
+\arguments{
+  \item{package}{
+    Name of package.
+  }
+}
+\details{
+  This function prints an introduction for beginners
+  to the \pkg{spatstat} package.
+
+  The function can be executed simply by typing \code{beginner}
+  without parentheses.
+
+  If the argument \code{package} is given, then the function prints the
+  beginner's help file \code{BEGINNER.txt} from the specified package
+  (if it has one).
+}
+\value{
+  Null.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{latest.news}}
+}
+\examples{
+  beginner
+}
+\keyword{documentation}
diff --git a/man/begins.Rd b/man/begins.Rd
new file mode 100644
index 0000000..a0e4eff
--- /dev/null
+++ b/man/begins.Rd
@@ -0,0 +1,42 @@
+\name{begins}
+\alias{begins}
+\title{
+  Check Start of Character String 
+}
+\description{
+  Checks whether a character string begins with a particular prefix.
+}
+\usage{
+begins(x, firstbit)
+}
+\arguments{
+  \item{x}{
+    Character string, or vector of character strings, to be tested.
+  }
+  \item{firstbit}{
+    A single character string.
+  }
+}
+\details{
+  This simple wrapper function checks whether (each entry in) \code{x}
+  begins with the string \code{firstbit}, and returns a logical value
+  or logical vector with one entry for each entry of \code{x}.
+  This function is useful mainly for reducing complexity in model formulae.
+}
+\value{
+  Logical vector of the same length as \code{x}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\examples{
+  begins(c("Hello", "Goodbye"), "Hell")
+  begins("anything", "")
+}
+\keyword{character}
diff --git a/man/bei.Rd b/man/bei.Rd
new file mode 100644
index 0000000..fac4e02
--- /dev/null
+++ b/man/bei.Rd
@@ -0,0 +1,71 @@
+\name{bei}
+\alias{bei}
+\alias{bei.extra}
+\docType{data}
+\title{Tropical rain forest trees}
+\description{
+  A point pattern giving the locations of 3605
+  trees in a tropical rain forest.
+  Accompanied by covariate data giving the elevation (altitude)
+  and slope of elevation in the study region.
+} 
+\format{
+  \code{bei} is an object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  See \code{\link{ppp.object}} for details of the format.
+
+  \code{bei.extra} is a list containing
+  two pixel images, \code{elev} (elevation in metres) and
+  \code{grad} (norm of elevation gradient). These pixel images are objects
+  of class \code{"im"}, see \code{\link{im.object}}.
+}
+\usage{data(bei)}
+\source{
+  Hubbell and Foster (1983), Condit, Hubbell and Foster (1996)
+  and Condit (1998).
+  Data files kindly supplied by Rasmus Waagepetersen.
+  The data were collected in the forest dynamics plot of Barro Colorado
+  Island. The study was made possible through the generous support of
+  the U.S. National Science Foundation, the John D. and Catherine
+  T. MacArthur Foundation, and the Smithsonian Tropical Research Institute.
+}
+\section{Notes}{
+  The dataset \code{bei} gives the positions of 3605 trees
+  of the species \emph{Beilschmiedia pendula} (Lauraceae)
+  in a 1000 by 500 metre rectangular sampling region
+  in the tropical rainforest of Barro Colorado Island.
+
+  The accompanying dataset  \code{bei.extra} gives information
+  about the altitude (elevation) in the study region. It is a list
+  containing two pixel images, \code{elev} (elevation in metres) and
+  \code{grad} (norm of elevation gradient).
+
+  These data are part of a much larger dataset containing the positions of
+  hundreds of thousands of trees belong to thousands of species;
+  see Hubbell and Foster (1983), Condit, Hubbell and Foster (1996)
+  and Condit (1998).
+
+  The present data were analysed by \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2007).
+}
+\references{
+  Condit, R. (1998) \emph{Tropical Forest Census Plots}.
+  Springer-Verlag, Berlin and R.G. Landes Company, Georgetown, Texas.
+
+  Condit, R., Hubbell, S.P and Foster, R.B. (1996)
+  Changes in tree species abundance in a neotropical forest: impact of
+  climate change. \emph{Journal of Tropical Ecology} \bold{12},
+  231--256.
+
+  Hubbell, S.P and Foster, R.B. (1983)
+  Diversity of canopy trees in a neotropical forest and implications for
+  conservation. In: \emph{Tropical Rain Forest: Ecology and Management}
+  (eds. S.L. Sutton, T.C. Whitmore and A.C. Chadwick),
+  Blackwell Scientific Publications, Oxford, 25--41.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R.P. (2007)
+  Modern spatial point process modelling and inference (with discussion).
+  \emph{Scandinavian Journal of Statistics} \bold{34}, 643--711.
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/berman.test.Rd b/man/berman.test.Rd
new file mode 100644
index 0000000..2bdf423
--- /dev/null
+++ b/man/berman.test.Rd
@@ -0,0 +1,194 @@
+\name{berman.test} 
+\alias{berman.test}
+\alias{berman.test.ppm}
+\alias{berman.test.ppp}
+\alias{berman.test.lppm}
+\alias{berman.test.lpp}
+\title{Berman's Tests for Point Process Model} 
+\description{
+  Tests the goodness-of-fit of a Poisson point process model
+  using methods of Berman (1986).
+}
+\usage{
+berman.test(...)
+
+\method{berman.test}{ppp}(X, covariate,
+                         which = c("Z1", "Z2"),
+        alternative = c("two.sided", "less", "greater"), ...)
+
+\method{berman.test}{ppm}(model, covariate,
+                         which = c("Z1", "Z2"),
+               alternative = c("two.sided", "less", "greater"), ...)
+
+\method{berman.test}{lpp}(X, covariate,
+                         which = c("Z1", "Z2"),
+        alternative = c("two.sided", "less", "greater"), ...)
+
+\method{berman.test}{lppm}(model, covariate,
+                         which = c("Z1", "Z2"),
+               alternative = c("two.sided", "less", "greater"), ...)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"} or \code{"lpp"}).
+  }
+  \item{model}{
+    A fitted point process model (object of class \code{"ppm"} or
+    \code{"lppm"}).
+  }
+  \item{covariate}{
+    The spatial covariate on which the test will be based.
+    An image (object of class \code{"im"})
+    or a function.
+  }
+  \item{which}{
+    Character string specifying the choice of test.
+  }
+  \item{alternative}{
+    Character string specifying the alternative hypothesis.
+  }
+  \item{\dots}{
+    Additional arguments controlling the pixel resolution
+    (arguments \code{dimyx} and \code{eps} passed to
+    \code{\link{as.mask}})
+    or other undocumented features.
+  }
+}
+\details{
+  These functions perform a goodness-of-fit test of a Poisson point
+  process model fitted to point pattern data. The observed distribution
+  of the values of a spatial covariate at the data points,
+  and the predicted distribution of the same values under the model,
+  are compared using either of two test statistics
+  \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986).
+  The \eqn{Z_1}{Z[1]} test is also known as the
+  Lawson-Waller test.
+
+  The function \code{berman.test} is generic, with methods for
+  point patterns (\code{"ppp"} or \code{"lpp"})
+  and point process models (\code{"ppm"} or \code{"lppm"}).
+  \itemize{
+    \item 
+    If \code{X} is a point pattern dataset (object of class
+    \code{"ppp"} or \code{"lpp"}), then
+    \code{berman.test(X, ...)} performs a goodness-of-fit test of the
+    uniform Poisson point process (Complete Spatial Randomness, CSR)
+    for this dataset.
+    \item
+    If \code{model} is a fitted point process model
+    (object of class \code{"ppm"} or \code{"lppm"})
+    then \code{berman.test(model, ...)} performs
+    a test of goodness-of-fit for this fitted model. In this case,
+    \code{model} should be a Poisson point process.
+  }
+  
+  The test is performed by comparing the observed distribution
+  of the values of a spatial covariate at the data points,
+  and the predicted distribution of the same covariate under the model.
+  Thus, you must nominate a spatial covariate for this test.
+  
+  The argument \code{covariate} should be either a \code{function(x,y)}
+  or a pixel image (object of class \code{"im"} containing the values
+  of a spatial function.
+  If \code{covariate} is an image, it should have numeric values,
+  and its domain should cover the observation window of the
+  \code{model}. If \code{covariate} is a function, it should expect
+  two arguments \code{x} and \code{y} which are vectors of coordinates,
+  and it should return a numeric vector of the same length
+  as \code{x} and \code{y}.  
+
+  First the original data point pattern is extracted from \code{model}.
+  The values of the \code{covariate} at these data points are
+  collected.
+
+  Next the values of the \code{covariate} at all locations in the
+  observation window are evaluated. The point process intensity
+  of the fitted model is also evaluated at all locations in the window.
+
+  \itemize{
+    \item If \code{which="Z1"},
+    the test statistic \eqn{Z_1}{Z[1]} is computed as follows.
+    The sum \eqn{S} of the covariate values at all data
+    points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance
+    \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed
+    from the values of the covariate at all locations in the window.
+    Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}.
+    Closely-related tests were proposed independently 
+    by Waller et al (1993) and Lawson (1993)
+    so this test is often termed the 
+    Lawson-Waller test in epidemiological literature.
+    \item If \code{which="Z2"},
+    the test statistic \eqn{Z_2}{Z[2]} is computed as follows.
+    The values of the \code{covariate} at all locations in the
+    observation window, weighted by the point process intensity,
+    are compiled into a cumulative distribution function \eqn{F}.
+    The probability integral transformation is then applied:
+    the values of the \code{covariate} at the original data points
+    are transformed by the predicted cumulative distribution function
+    \eqn{F} into numbers between 0 and 1. If the model is correct,
+    these numbers are i.i.d. uniform random numbers.
+    The standardised sample mean of these numbers is the
+    statistic \eqn{Z_2}{Z[2]}. 
+  }
+  In both cases the null distribution of the test statistic
+  is the standard normal distribution, approximately.
+
+  The return value is an object of class \code{"htest"} containing the
+  results of the hypothesis test. The print method for this class
+  gives an informative summary of the test outcome.
+}
+\value{
+  An object of class \code{"htest"} (hypothesis test)
+  and also of class \code{"bermantest"},
+  containing the results of the test. The return value can be
+  plotted (by \code{\link{plot.bermantest}}) or printed
+  to give an informative summary of the test.
+}
+\section{Warning}{
+  The meaning of a one-sided test must be carefully scrutinised: see
+  the printed output.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{cdf.test}},
+  \code{\link{quadrat.test}},
+  \code{\link{ppm}}
+}
+\references{
+  Berman, M. (1986)
+  Testing for spatial association between a point process
+  and another stochastic process.
+  \emph{Applied Statistics} \bold{35}, 54--62.
+
+  Lawson, A.B. (1993)
+  On the analysis of mortality events around a
+  prespecified fixed point.
+  \emph{Journal of the Royal Statistical Society, Series A}
+  \bold{156} (3) 363--377.
+
+  Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992)
+  Chronic Disease Surveillance and testing of
+  clustering of disease and exposure: Application to
+  leukaemia incidence and TCE-contaminated dumpsites
+  in upstate New York.
+  \emph{Environmetrics} \bold{3}, 281--300.
+}
+\examples{
+   # Berman's data
+   data(copper)
+   X <- copper$SouthPoints
+   L <- copper$SouthLines
+   D <- distmap(L, eps=1)
+   # test of CSR
+   berman.test(X, D)
+   berman.test(X, D, "Z2")
+}
+\keyword{htest}
+\keyword{spatial}
+
diff --git a/man/betacells.Rd b/man/betacells.Rd
new file mode 100644
index 0000000..9f90460
--- /dev/null
+++ b/man/betacells.Rd
@@ -0,0 +1,101 @@
+\name{betacells}
+\alias{betacells}
+\docType{data}
+\title{Beta Ganglion Cells in Cat Retina} 
+\description{
+  Point pattern of cells in the retina, each cell classified as `on' or
+  `off' and labelled with the cell profile area.
+} 
+\format{
+  \code{betacells} is an object of class \code{"ppp"}
+  representing the point pattern of cell locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr
+    \code{marks} \tab data frame of marks
+  }
+  Cartesian coordinates are given in microns.
+
+  The data frame of marks has two columns:
+  \tabular{ll}{
+    \code{type} \tab factor with levels \code{off} and \code{on} \cr
+                \tab indicating ``off'' and ``on'' cells\cr
+    \code{area} \tab numeric vector giving the \cr
+		\tab areas of cell profiles (in square microns)
+  }
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(betacells)}
+\source{
+  W\"assle et al (1981), Figure 6(a),
+  scanned and processed by Stephen Eglen
+  \email{S.J.Eglen at damtp.cam.ac.uk}
+}
+\section{Notes}{
+  This is a new, corrected version of the old dataset
+  \code{\link{ganglia}}. See below.
+  
+  These data represent a pattern of beta-type ganglion cells
+  in the retina of a cat recorded by W\"assle et al. (1981).
+  Beta cells are associated
+  with the resolution of fine detail in the cat's visual system.
+  They can be classified anatomically as ``on'' or ``off''.
+ 
+  Statistical independence of the arrangement of the
+  ``on''- and ``off''-components
+  would strengthen the evidence for Hering's (1878) `opponent theory'
+  that there are two separate channels for sensing
+  ``brightness'' and ``darkness''.
+  See W\"assle et al (1981). There is considerable current interest
+  in the arrangement of cell mosaics in the retina, see
+  Rockhill et al (2000).
+
+  The dataset is a marked point pattern giving the locations,
+  types (``on'' or ``off''), and profile areas of beta cells observed
+  in a rectangle of dimensions \eqn{750 \times 990}{750 x 990} microns.
+  Coordinates are given in microns (thousandths of a millimetre)
+  and areas are given in square microns.
+  
+  The original source is Figure 6 of W\"assle et al (1981),
+  which is a manual drawing of the beta mosaic observed
+  in a microscope field-of-view of a whole mount of the retina.
+  Thus, all beta cells in the retina were effectively projected onto the same
+  two-dimensional plane.
+  
+  The data were scanned in 2004 by Stephen Eglen from
+  Figure 6(a) of W\"assle et al (1981).
+  Image analysis software was used to identify the soma (cell
+  body). The \eqn{x,y} location of each cell was taken to be the
+  centroid of the soma. The type of each cell (``on'' or `off'')
+  was identified by referring to Figures 6(b) and 6(d).
+  The area of each soma (in square microns) was also computed.
+
+  Note that this is a corrected version of the \code{\link{ganglia}}  
+  dataset provided in earlier versions of \pkg{spatstat}.
+  The earlier data \code{\link{ganglia}} were not faithful to the scale
+  in the original paper and contain some scanning errors.
+}
+\examples{
+   plot(betacells)
+   area <- marks(betacells)$area
+   plot(betacells \%mark\% sqrt(area/pi), markscale=1)
+}
+\references{
+Hering, E. (1878) Zur Lehre von Lichtsinn. Vienna.
+
+Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+Indices of dependence between types in multivariate point patterns.
+\emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+Rockhill, R.L., Euler, T. and Masland, R.H. (2000)
+Spatial order within but not between types of retinal neurons.
+\emph{Proc. Nat. Acad. Sci. USA} \bold{97}(5), 2303--2307.
+
+W\"assle, H., Boycott, B. B. & Illing, R.-B. (1981).
+Morphology and mosaic of on- and off-beta cells in the cat retina and
+some functional considerations.
+\emph{Proc. Roy. Soc. London Ser. B} \bold{212}, 177--195.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/bind.fv.Rd b/man/bind.fv.Rd
new file mode 100644
index 0000000..6d1d1bb
--- /dev/null
+++ b/man/bind.fv.Rd
@@ -0,0 +1,114 @@
+\name{bind.fv}
+\alias{bind.fv}
+\alias{cbind.fv}
+\title{
+  Combine Function Value Tables
+}
+\description{
+  Advanced Use Only.
+  Combine objects of class \code{"fv"},
+  or glue extra columns of data onto an existing \code{"fv"} object.
+}
+\usage{
+\method{cbind}{fv}(...)
+bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL, clip=FALSE)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, which are objects of class \code{"fv"}.
+  }
+  \item{x}{
+    An object of class \code{"fv"}.
+  }
+  \item{y}{
+    Either a data frame or an object of class \code{"fv"}.
+  }
+  \item{labl}{
+    Plot labels (see \code{\link{fv}}) for columns of \code{y}.
+    A character vector. 
+  }
+  \item{desc}{
+    Descriptions (see \code{\link{fv}})
+    for columns of \code{y}. A character vector.
+  }
+  \item{preferred}{
+    Character string specifying the column which is to be the
+    new recommended value of the function.
+  }
+  \item{clip}{
+    Logical value indicating whether each object must have exactly the
+    same domain, that is, the same sequence of values of the function argument
+    (\code{clip=FALSE}, the default) or whether objects with different
+    domains are permissible and will be restricted
+    to a common domain (\code{clip=TRUE}).
+  }  
+}
+\details{
+  This documentation is provided
+  for experienced programmers who want to modify the internal
+  behaviour of \pkg{spatstat}.
+
+  The function \code{cbind.fv} is a method for the generic
+  \R function \code{\link{cbind}}. It combines any number of
+  objects of class \code{"fv"} into a single object of
+  class \code{"fv"}. The objects must be compatible, in the sense
+  that they have identical values of the function argument.
+  
+  The function \code{bind.fv} is a lower level
+  utility which glues additional columns onto an
+  existing object \code{x} of class \code{"fv"}.
+  It has two modes of use:
+  \itemize{
+    \item 
+    If the additional dataset \code{y} is an object of class \code{"fv"}, then
+    \code{x} and \code{y} must be compatible as described above.
+    Then the columns of \code{y} that contain function values
+    will be appended to the object \code{x}.
+    \item 
+    Alternatively if \code{y} is a data frame, then \code{y} must have the
+    same number of rows as \code{x}. All columns of \code{y} will be
+    appended to \code{x}.
+  }
+  The arguments \code{labl} and \code{desc} provide
+  plot labels and description strings (as described in \code{\link{fv}})
+  for the \emph{new} columns. If \code{y} is an object of class
+  \code{"fv"} then \code{labl} and \code{desc} are optional, and
+  default to the relevant entries in the object \code{y}. 
+  If \code{y} is a data frame then
+  \code{labl} and \code{desc} must be provided.
+}
+\value{
+  An object of class \code{"fv"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\examples{
+   data(cells)
+   K1 <- Kest(cells, correction="border")
+   K2 <- Kest(cells, correction="iso")
+   # remove column 'theo' to avoid duplication
+   K2 <- K2[, names(K2) != "theo"]
+
+   cbind(K1, K2)
+
+   bind.fv(K1, K2, preferred="iso")
+
+   # constrain border estimate to be monotonically increasing
+   bm <- cumsum(c(0, pmax(0, diff(K1$border))))
+   bind.fv(K1, data.frame(bmono=bm),
+               "\%s[bmo](r)",
+               "monotone border-corrected estimate of \%s",
+               "bmono") 
+}
+\seealso{
+  \code{\link{fv}}, 
+  \code{\link{with.fv}}.
+
+  \emph{Undocumented} functions for modifying an \code{"fv"} object
+  include \code{fvnames}, \code{fvnames<-},
+  \code{tweak.fv.entry} and \code{rebadge.fv}.
+}
+\keyword{spatial}
+\keyword{attribute}
+
diff --git a/man/bits.test.Rd b/man/bits.test.Rd
new file mode 100644
index 0000000..a36a7bb
--- /dev/null
+++ b/man/bits.test.Rd
@@ -0,0 +1,137 @@
+\name{bits.test}
+\alias{bits.test}
+\title{
+  Balanced Independent Two-Stage Monte Carlo Test
+}
+\description{
+  Performs a Balanced Independent Two-Stage Monte Carlo test
+  of goodness-of-fit for spatial pattern.
+}
+\usage{
+bits.test(X, \dots,
+        exponent = 2, nsim=19, 
+        alternative=c("two.sided", "less", "greater"),
+        leaveout=1, interpolate = FALSE,
+        savefuns=FALSE, savepatterns=FALSE,
+        verbose = TRUE)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern dataset (object of class \code{"ppp"},
+    \code{"lpp"} or \code{"pp3"}) or a fitted point process model
+    (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}
+    or \code{"slrm"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{dclf.test}} or
+    \code{\link{mad.test}} or \code{\link{envelope}} to
+    control the conduct of the test.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{rinterval} to determine the range of
+    \eqn{r} values used in the test, 
+    and \code{use.theory} described under Details. 
+  }
+  \item{exponent}{
+    Exponent used in the test statistic. Use \code{exponent=2}
+    for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf}
+    for the Maximum Absolute Deviation test.
+  }
+  \item{nsim}{
+    Number of replicates in each stage of the test.
+    A total of \code{nsim * (nsim + 1)} simulated point patterns will be
+    generated, and the \eqn{p}-value will be a multiple of \code{1/(nsim+1)}.
+  }
+  \item{alternative}{
+    Character string specifying the alternative hypothesis.
+    The default (\code{alternative="two.sided"}) is that the
+    true value of the summary function is not equal to the theoretical
+    value postulated under the null hypothesis.
+    If \code{alternative="less"} the alternative hypothesis is that the
+    true value of the summary function is lower than the theoretical value.
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{interpolate}{
+    Logical value indicating whether to interpolate the distribution of
+    the test statistic by kernel smoothing, as described in
+    Dao and Genton (2014, Section 5).
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save the simulated
+    function values (from the first stage).
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save the simulated
+    point patterns (from the first stage). 
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  Performs the Balanced Independent Two-Stage Monte Carlo test
+  proposed by Baddeley et al (2017),
+  an improvement of the Dao-Genton (2014) test.
+  
+  If \code{X} is a point pattern, the null hypothesis is CSR.
+
+  If \code{X} is a fitted model, the null hypothesis is that model.
+
+  The argument \code{use.theory} passed to \code{\link{envelope}}
+  determines whether to compare the summary function for the data
+  to its theoretical value for CSR (\code{use.theory=TRUE})
+  or to the sample mean of simulations from CSR
+  (\code{use.theory=FALSE}).
+
+  The argument \code{leaveout} specifies how to calculate the
+  discrepancy between the summary function for the data and the
+  nominal reference value, when the reference value must be estimated
+  by simulation. The values \code{leaveout=0} and
+  \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014,
+  Appendix) to computing the difference \code{observed - reference}
+  where the \code{reference} is the mean of simulated values.
+  The value \code{leaveout=2} gives the leave-two-out discrepancy
+  proposed by Dao and Genton (2014).
+}
+\value{
+  A hypothesis test (object of class \code{"htest"}
+  which can be printed to show the outcome of the test.
+}
+\references{
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+
+  Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne,
+  R.K. and Nair, G. (2014) On tests of spatial pattern based on
+  simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489.
+  
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2017)
+  On two-stage Monte Carlo tests of composite hypotheses.
+  \emph{Computational Statistics and Data Analysis}, in press.
+}
+\author{
+  Adrian Baddeley, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by \spatstatAuthors.
+}
+\seealso{
+  \code{\link{dg.test}},
+  \code{\link{dclf.test}},
+  \code{\link{mad.test}}
+}
+\examples{
+ ns <- if(interactive()) 19 else 4
+ bits.test(cells, nsim=ns)
+ bits.test(cells, alternative="less", nsim=ns)
+ bits.test(cells, nsim=ns, interpolate=TRUE)
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/blur.Rd b/man/blur.Rd
new file mode 100644
index 0000000..4b9b5a3
--- /dev/null
+++ b/man/blur.Rd
@@ -0,0 +1,103 @@
+\name{blur}
+\alias{blur}
+\alias{Smooth.im}
+\title{Apply Gaussian Blur to a Pixel Image}
+\description{
+  Applies a Gaussian blur to a pixel image.
+}
+\usage{
+blur(x, sigma = NULL, ..., normalise=FALSE, bleed = TRUE, varcov=NULL)
+
+\method{Smooth}{im}(X, sigma = NULL, ...,
+                    normalise=FALSE, bleed = TRUE, varcov=NULL)
+}
+\arguments{
+  \item{x,X}{The pixel image. An object of class \code{"im"}.}
+  \item{sigma}{
+    Standard deviation of isotropic Gaussian smoothing kernel.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{normalise}{
+    Logical flag indicating whether the output values should be divided
+    by the corresponding blurred image of the window itself. See Details.
+  }
+  \item{bleed}{
+    Logical flag indicating whether to allow blur to extend outside the
+    original domain of the image. See Details.
+  }
+  \item{varcov}{
+    Variance-covariance matrix of anisotropic Gaussian kernel.
+    Incompatible with \code{sigma}.
+  }
+}
+\details{
+  This command applies a Gaussian blur to the pixel image \code{x}.
+
+  \code{Smooth.im} is a method for the generic \code{\link{Smooth}}
+  for pixel images. It is currently identical to \code{blur},
+  apart from the name of the first argument.
+  
+  The blurring kernel is the isotropic Gaussian kernel with standard
+  deviation \code{sigma}, or the anisotropic Gaussian kernel with
+  variance-covariance matrix \code{varcov}.
+  The arguments \code{sigma} and \code{varcov} are incompatible.
+  Also \code{sigma} may be a vector of length 2 giving the
+  standard deviations of two independent Gaussian coordinates,
+  thus equivalent to \code{varcov = diag(sigma^2)}.
+
+  If the pixel values of \code{x} include some \code{NA} values
+  (meaning that the image domain does not completely fill
+  the rectangular frame) then these \code{NA} values are first reset to zero.
+
+  The algorithm then computes the convolution \eqn{x \ast G}{x * G}
+  of the (zero-padded) pixel
+  image \eqn{x} with the specified Gaussian kernel \eqn{G}.
+  
+  If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G}
+  is returned.
+  If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G}
+  is normalised by
+  dividing it by the convolution \eqn{w \ast G}{w * G} of the image
+  domain \code{w}
+  with the same Gaussian kernel. Normalisation ensures that the result
+  can be interpreted as a weighted average of input pixel values,
+  without edge effects due to the shape of the domain.
+
+  If \code{bleed=FALSE}, then pixel values outside the original image
+  domain are set to \code{NA}. Thus the output is a pixel image with the
+  same domain as the input. If \code{bleed=TRUE}, then no such
+  alteration is performed, and the result is a pixel image defined
+  everywhere in the rectangular frame containing the input image.
+  
+  Computation is performed using the Fast Fourier Transform.
+}
+\value{
+  A pixel image with the same pixel array as the input image \code{x}.
+}
+\seealso{
+  \code{\link{interp.im}} for interpolating a pixel image to a finer resolution,
+  \code{\link{density.ppp}} for blurring a point pattern,
+  \code{\link{Smooth.ppp}} for interpolating marks attached to points.
+}
+\examples{
+   data(letterR)
+   Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR)
+   par(mfrow=c(1,3))
+   plot(Z)
+   plot(letterR, add=TRUE)
+   plot(blur(Z, 0.3, bleed=TRUE))
+   plot(letterR, add=TRUE)
+   plot(blur(Z, 0.3, bleed=FALSE))
+   plot(letterR, add=TRUE)
+   par(mfrow=c(1,1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/border.Rd b/man/border.Rd
new file mode 100644
index 0000000..e4afb57
--- /dev/null
+++ b/man/border.Rd
@@ -0,0 +1,67 @@
+\name{border}
+\alias{border}
+\title{Border Region of a Window}
+\description{
+  Computes the border region of a window,
+  that is, the region lying within a specified distance of the boundary
+  of a window.
+}
+\usage{
+border(w, r, outside=FALSE, ...)
+}
+\arguments{
+  \item{w}{A window (object of class \code{"owin"})
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{r}{Numerical value.}
+  \item{outside}{Logical value determining whether to compute the
+    border outside or inside \code{w}.}
+  \item{\dots}{
+    Optional arguments passed to \code{\link{erosion}}
+    (if \code{outside=FALSE}) or to \code{\link{dilation}}
+    (if \code{outside=TRUE}).
+  }
+}
+\value{
+  A window (object of class \code{"owin"}).
+}
+\details{
+  By default (if \code{outside=FALSE}),
+  the border region is the subset of \code{w}
+  lying within a distance \code{r} of the boundary of \code{w}.
+  It is computed by eroding \code{w} by the distance \code{r} (using
+  \code{\link{erosion}}) and
+  subtracting this eroded window from the original window \code{w}.
+
+  If \code{outside=TRUE}, the border region is the set of locations
+  outside \code{w} lying within a distance \code{r} of \code{w}.
+  It is computed by dilating \code{w} by the distance \code{r}
+  (using \code{\link{dilation}}) and
+  subtracting the original window \code{w} from the dilated window.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{erosion}},
+  \code{\link{dilation}}
+}
+\examples{
+# rectangle
+   u <- unit.square()
+   border(u, 0.1)
+   border(u, 0.1, outside=TRUE)
+# polygon
+   \testonly{opa <- spatstat.options(npixel=32)}
+   data(letterR)
+   plot(letterR)
+   plot(border(letterR, 0.1), add=TRUE)
+   plot(border(letterR, 0.1, outside=TRUE), add=TRUE)
+   \testonly{spatstat.options(opa)}
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/bounding.box.xy.Rd b/man/bounding.box.xy.Rd
new file mode 100644
index 0000000..6dd8564
--- /dev/null
+++ b/man/bounding.box.xy.Rd
@@ -0,0 +1,58 @@
+\name{bounding.box.xy}
+\alias{bounding.box.xy}
+\title{Convex Hull of Points}
+\description{
+  Computes the smallest rectangle containing a set of points.
+}
+\usage{
+bounding.box.xy(x, y=NULL)
+}
+\arguments{
+  \item{x}{
+    vector of \code{x} coordinates of observed points,
+    or a 2-column matrix giving \code{x,y} coordinates,
+    or a list with components \code{x,y} giving coordinates
+    (such as a point pattern object of class \code{"ppp"}.)
+  }
+  \item{y}{(optional) vector of \code{y} coordinates of observed points,
+    if \code{x} is a vector.}
+}
+\value{
+  A window (an object of class \code{"owin"}).
+}
+\details{
+  Given an observed pattern of points with coordinates 
+  given by \code{x} and \code{y}, this function finds the smallest
+  rectangle, with sides parallel to the coordinate axes, that contains
+  all the points, and returns it as a window.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{convexhull.xy}},
+  \code{\link{ripras}}
+}
+\examples{
+  x <- runif(30)
+  y <- runif(30)
+  w <- bounding.box.xy(x,y)
+  plot(owin(), main="bounding.box.xy(x,y)")
+  plot(w, add=TRUE)
+  points(x,y)
+
+  X <- rpoispp(30)
+  plot(X, main="bounding.box.xy(X)")
+  plot(bounding.box.xy(X), add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
+
+
+
+
diff --git a/man/boundingbox.Rd b/man/boundingbox.Rd
new file mode 100644
index 0000000..1b39c79
--- /dev/null
+++ b/man/boundingbox.Rd
@@ -0,0 +1,77 @@
+\name{boundingbox}
+\alias{boundingbox}
+\alias{boundingbox.default}
+\alias{boundingbox.im}
+\alias{boundingbox.owin}
+\alias{boundingbox.ppp}
+\alias{boundingbox.solist}
+\title{
+  Bounding Box of a Window, Image, or Point Pattern
+}
+\description{
+  Find the smallest rectangle containing a given window(s),
+  image(s) or point pattern(s).
+}
+\usage{
+boundingbox(\dots)
+
+\method{boundingbox}{default}(\dots)
+
+\method{boundingbox}{im}(\dots)
+
+\method{boundingbox}{owin}(\dots)
+
+\method{boundingbox}{ppp}(\dots)
+
+\method{boundingbox}{solist}(\dots)
+}
+\arguments{
+  \item{\dots}{One or more windows (objects of class \code{"owin"}),
+    pixel images (objects of class \code{"im"}) or
+    point patterns (objects of class \code{"ppp"}).
+    Alternatively, the argument may be a list of such objects,
+    of class \code{"solist"}.
+  }
+}
+\details{
+  This function finds the smallest rectangle (with sides parallel to
+  the coordinate axes) that contains all the given objects.
+
+  For a window (object of class \code{"owin"}), the bounding box
+  is the smallest rectangle that contains all the vertices of the
+  window (this is generally smaller than the enclosing frame,
+  which is returned by \code{\link{as.rectangle}}).
+
+  For a point pattern (object of class \code{"ppp"}), the bounding box
+  is the smallest rectangle that contains all the points of the pattern.
+
+  For a pixel image (object of class \code{"im"}), the image will
+  be converted to a window using \code{\link{as.owin}},
+  and the bounding box of this window is obtained.
+  
+  If the argument is a list of several objects, then
+  this function finds the smallest rectangle that contains
+  all the bounding boxes of the objects.
+}
+\value{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{as.rectangle}}
+}
+\examples{
+  w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7)))
+  r <- boundingbox(w)
+  # returns rectangle [1,3] x [2,7]
+
+  w2 <- unit.square()
+  r <- boundingbox(w, w2)
+  # returns rectangle [0,3] x [0,7]
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
diff --git a/man/boundingcircle.Rd b/man/boundingcircle.Rd
new file mode 100644
index 0000000..6d597cd
--- /dev/null
+++ b/man/boundingcircle.Rd
@@ -0,0 +1,85 @@
+\name{boundingcircle}
+\alias{boundingradius}
+\alias{boundingradius.owin}
+\alias{boundingradius.ppp}
+\alias{boundingcentre}
+\alias{boundingcircle}
+\alias{boundingcentre.owin}
+\alias{boundingcircle.owin}
+\alias{boundingcentre.ppp}
+\alias{boundingcircle.ppp}
+\title{
+  Smallest Enclosing Circle
+}
+\description{
+  Find the smallest circle enclosing a spatial window
+  or other object. Return its radius, or the location of its centre,
+  or the circle itself.
+}
+\usage{
+boundingradius(x, \dots)
+
+boundingcentre(x, \dots)
+
+boundingcircle(x, \dots)
+
+\method{boundingradius}{owin}(x, \dots)
+
+\method{boundingcentre}{owin}(x, \dots)
+
+\method{boundingcircle}{owin}(x, \dots)
+
+\method{boundingradius}{ppp}(x, \dots)
+
+\method{boundingcentre}{ppp}(x, \dots)
+
+\method{boundingcircle}{ppp}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A window (object of class \code{"owin"}), or another spatial object. 
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution for the calculation.
+  }
+}
+\details{
+  The \code{boundingcircle} of a spatial region \eqn{W} is the smallest circle
+  that contains \eqn{W}. The \code{boundingradius} is the radius of this circle,
+  and the \code{boundingcentre} is the centre of the circle.
+  
+  The functions \code{boundingcircle}, \code{boundingcentre} and
+  \code{boundingradius} are generic. There are methods
+  for objects of class \code{"owin"}, \code{"ppp"} and \code{"linnet"}.
+}
+\value{
+  The result of \code{boundingradius} is a single numeric value.
+
+  The result of \code{boundingcentre} is a point pattern containing a
+  single point.
+
+  The result of \code{boundingcircle} is a window representing the
+  boundingcircle.
+}
+\author{
+  \adrian 
+}
+\seealso{
+ \code{\link{boundingradius.linnet}}
+}
+\examples{
+  boundingradius(letterR)
+
+  plot(grow.rectangle(Frame(letterR), 0.2), main="", type="n")
+  plot(letterR, add=TRUE, col="grey")
+  plot(boundingcircle(letterR), add=TRUE, border="green", lwd=2)
+  plot(boundingcentre(letterR), pch="+", cex=2, col="blue", add=TRUE)
+
+  X <- runifpoint(5)
+  plot(X)
+  plot(boundingcircle(X), add=TRUE)
+  plot(boundingcentre(X), pch="+", cex=2, col="blue", add=TRUE)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/box3.Rd b/man/box3.Rd
new file mode 100644
index 0000000..de0a97d
--- /dev/null
+++ b/man/box3.Rd
@@ -0,0 +1,61 @@
+\name{box3}
+\Rdversion{1.1}
+\alias{box3}
+\title{
+  Three-Dimensional Box
+}
+\description{
+  Creates an object representing a three-dimensional box.
+}
+\usage{
+box3(xrange = c(0, 1), yrange = xrange, zrange = yrange, unitname = NULL)
+}
+\arguments{
+  \item{xrange, yrange, zrange}{
+    Dimensions of the box in the \eqn{x,y,z} directions.
+    Each of these arguments should be a numeric vector of length 2.
+  }
+  \item{unitname}{
+    Optional. Name of the unit of length. See Details.
+}
+}
+\details{
+  This function creates an object representing
+  a three-dimensional rectangular parallelepiped (box)
+  with sides parallel to the coordinate axes.
+
+  The object can be used to specify the domain of a three-dimensional
+  point pattern (see \code{\link{pp3}}) and in various
+  geometrical calculations (see \code{\link{volume.box3}},
+  \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}). 
+  
+  The optional argument \code{unitname} specifies the name
+  of the unit of length. See \code{\link{unitname}}
+  for valid formats.
+
+  The function \code{\link{as.box3}} can be used to convert other kinds
+  of data to this format.
+}
+\value{
+  An object of class \code{"box3"}. There is a print method for this class.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{as.box3}}, 
+  \code{\link{pp3}},
+  \code{\link{volume.box3}},
+  \code{\link{diameter.box3}},
+  \code{\link{eroded.volumes}}.
+}
+\examples{
+    box3()
+    box3(c(0,10),c(0,10),c(0,5), unitname=c("metre","metres"))
+    box3(c(-1,1))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/boxx.Rd b/man/boxx.Rd
new file mode 100644
index 0000000..11233a0
--- /dev/null
+++ b/man/boxx.Rd
@@ -0,0 +1,51 @@
+\name{boxx}
+\Rdversion{1.1}
+\alias{boxx}
+\title{
+  Multi-Dimensional Box
+}
+\description{
+  Creates an object representing a multi-dimensional box.
+}
+\usage{
+boxx(..., unitname = NULL)
+}
+\arguments{
+  \item{\dots}{
+    Dimensions of the box. Vectors of length 2.
+  }
+  \item{unitname}{
+    Optional. Name of the unit of length. See Details.
+  }
+}
+\details{
+  This function creates an object representing
+  a multi-dimensional rectangular parallelepiped (box)
+  with sides parallel to the coordinate axes.
+
+  The object can be used to specify the domain of a multi-dimensional
+  point pattern (see \code{\link{ppx}}) and in various
+  geometrical calculations (see \code{\link{volume.boxx}},
+  \code{\link{diameter.boxx}}, \code{\link{eroded.volumes}}). 
+  
+  The optional argument \code{unitname} specifies the name
+  of the unit of length. See \code{\link{unitname}}
+  for valid formats.
+}
+\value{
+  An object of class \code{"boxx"}. There is a print method for this class.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{ppx}},
+  \code{\link{volume.boxx}},
+  \code{\link{diameter.boxx}},
+  \code{\link{eroded.volumes.boxx}}.
+}
+\examples{
+    boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre","metres"))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/bramblecanes.Rd b/man/bramblecanes.Rd
new file mode 100644
index 0000000..85c4e83
--- /dev/null
+++ b/man/bramblecanes.Rd
@@ -0,0 +1,66 @@
+\name{bramblecanes}
+\alias{bramblecanes}
+\docType{data}
+\title{Hutchings' Bramble Canes data}
+\description{
+  Data giving the locations and ages of bramble canes in a field.
+  A marked point pattern. 
+} 
+\format{
+  An object of class \code{"ppp"} 
+  representing the point pattern of plant locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of plant \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of plant \cr
+    \code{marks} \tab factor with levels 0,1, 2 indicating age
+  }
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(bramblecanes)}
+\source{Hutchings (1979), data published in Diggle (1983)}
+\section{Notes}{
+These data record the \eqn{(x,y)} locations and ages of bramble canes in a 
+field \eqn{9} metres square, rescaled to the unit square. 
+The canes were classified according to age as either newly emergent, 
+one or two years old. These are encoded as marks 0, 1 and 2 respectively
+in the dataset.
+
+The data were recorded and analysed by Hutchings (1979)
+and further analysed by Diggle (1981a, 1981b, 1983),
+Diggle and Milne (1983), and Van Lieshout and Baddeley (1999).
+All analyses found that the pattern of newly emergent canes
+exhibits clustering, which Hutchings attributes to ``vigorous
+vegetative reproduction''. 
+}
+\references{
+  Diggle, P. J. (1981a)
+  Some graphical methods in the analysis of spatial point patterns.
+  In \emph{Interpreting multivariate data}, V. Barnett (Ed.)
+  John Wiley and Sons. 
+ 
+  Diggle, P. J. (1981b).
+  Statistical analysis of spatial point patterns.
+  \emph{N.Z. Statist.} \bold{16}, 22--41.
+ 
+  Diggle, P.J. (1983)
+  \emph{Statistical analysis of spatial point patterns}.
+  Academic Press.
+
+  Diggle, P. J. and Milne, R. K. (1983)
+  Bivariate Cox processes: some models for bivariate spatial point patterns.
+  \emph{Journal of the Royal Statistical Soc. Series B} 
+  \bold{45}, 11--21.
+ 
+  Hutchings, M. J. (1979)
+  Standing crop and pattern in pure stands of Mercurialis
+  perennis and Rubus fruticosus in mixed deciduous woodland.
+  \emph{Oikos} \bold{31}, 351--357.
+ 
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999)
+  Indices of dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+}
+\keyword{datasets}
+\keyword{spatial}
+ 
diff --git a/man/branchlabelfun.Rd b/man/branchlabelfun.Rd
new file mode 100644
index 0000000..c98e6da
--- /dev/null
+++ b/man/branchlabelfun.Rd
@@ -0,0 +1,69 @@
+\name{branchlabelfun}
+\alias{branchlabelfun}
+\title{
+  Tree Branch Membership Labelling Function
+}
+\description{
+  Creates a function which returns the tree branch membership label
+  for any location on a linear network.
+}
+\usage{
+  branchlabelfun(L, root = 1)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+    The network must have no loops.
+  }
+  \item{root}{
+    Root of the tree. An integer index identifying
+    which point in \code{vertices(L)} is the root of the tree.
+  }
+}
+\details{
+  The linear network \code{L}
+  must be an acyclic graph (i.e. must not contain any loops) so that it
+  can be interpreted as a tree. 
+  
+  The result of \code{f <- branchlabelfun(L, root)} is
+  a function \code{f} which gives,
+  for each location on the linear network \code{L},
+  the tree branch label at that location.
+
+  Tree branch labels are explained in \code{\link{treebranchlabels}}.
+  
+  The result \code{f} also belongs to the class \code{"linfun"}.
+  It can be called using several different kinds of data,
+  as explained in the help for \code{\link{linfun}}.
+  The values of the function are character strings.
+}
+\value{
+  A function (of class \code{"linfun"}).
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{treebranchlabels}},
+  \code{\link{linfun}}
+}
+\examples{
+  # make a simple tree
+  m <- simplenet$m
+  m[8,10] <- m[10,8] <- FALSE
+  L <- linnet(vertices(simplenet), m)
+  # make function
+  f <- branchlabelfun(L, 1)
+  plot(f)
+  X <- runiflpp(5, L)
+  f(X)
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/bronzefilter.Rd b/man/bronzefilter.Rd
new file mode 100644
index 0000000..e4df831
--- /dev/null
+++ b/man/bronzefilter.Rd
@@ -0,0 +1,55 @@
+\name{bronzefilter}
+\alias{bronzefilter}
+\docType{data}
+\title{Bronze gradient filter data}
+\description{
+  These data represent a spatially inhomogeneous pattern of
+  circular section profiles of particles, observed in a
+  longitudinal plane section through a gradient sinter
+  filter made from bronze powder, prepared by Ricardo Bernhardt, Dresden. 
+
+  The material was produced by sedimentation of bronze powder with varying 
+  grain diameter and subsequent sintering,
+  as described in Bernhardt et al. (1997). 
+
+  The data are supplied as a marked point pattern of circle centres marked by
+  circle radii.
+  The coordinates of the centres and the radii are recorded in mm.
+  The field of view is an \eqn{18 \times 7}{18 * 7} mm rectangle.
+
+  The data were first analysed by Hahn et al. (1999).
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of cell locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of bronze grain profile centre\cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of bronze grain profile centre\cr
+    \code{marks} \tab radius of bronze grain profile
+  }
+  See \code{\link{ppp.object}} for details of the format.
+  All coordinates are recorded in mm.
+}
+\usage{data(bronzefilter)}
+\examples{
+  data(bronzefilter)
+  plot(bronzefilter, markscale=2)
+}
+\source{
+  R.\ Bernhardt (section image), H.\ Wendrock (coordinate measurement). 
+  Adjusted, formatted and communicated by U.\ Hahn.
+}
+\references{
+  Bernhardt, R., Meyer-Olbersleben, F. and Kieback, B. (1997)
+  Fundamental investigation on the preparation of gradient structures
+  by sedimentation of different powder fractions under gravity.
+  \emph{Proc. of the 4th Int. Conf. On Composite Engineering,
+    July 6--12 1997, ICCE/4}, Hawaii, Ed. David Hui, 147--148.
+
+  Hahn U., Micheletti, A., Pohlink, R., Stoyan D. and Wendrock, H.(1999) 
+  Stereological analysis and modelling of gradient structures. 
+  \emph{Journal of Microscopy}, \bold{195}, 113--124.
+} 
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/bugfixes.Rd b/man/bugfixes.Rd
new file mode 100644
index 0000000..da2c615
--- /dev/null
+++ b/man/bugfixes.Rd
@@ -0,0 +1,67 @@
+\name{bugfixes}
+\alias{bugfixes}
+\title{
+  List Recent Bug Fixes
+}
+\description{
+  List all bug fixes in a package, starting from a certain date or
+  version of the package. Fixes are sorted alphabetically by the name of the
+  affected function. The default is to list bug fixes in the latest
+  version of the \pkg{spatstat} package.  
+}
+\usage{
+  bugfixes(sinceversion = NULL, sincedate = NULL,
+           package = "spatstat", show = TRUE)
+}
+\arguments{
+  \item{sinceversion}{
+    Earliest version of \code{package}
+    for which bugs should be listed.
+    The default is the current installed version.
+  }
+  \item{sincedate}{
+    Earliest release date of \code{package}
+    for which bugs should be listed.
+    A character string or a date-time object.
+  }
+  \item{package}{
+    Character string. The name of the package for which bugs are to be listed.
+  }
+  \item{show}{
+    Logical value indicating whether to display the bug table
+    on the terminal.
+  }
+}
+\details{
+  Bug reports are extracted from the NEWS file
+  of the specified \code{package}.
+  Only those after a specified date, or after a specified version
+  of the package, are retained.
+  The bug reports are then sorted alphabetically, so that all bugs affecting a
+  particular function are listed consecutively. Finally the table of bug
+  reports is displayed (if \code{show=TRUE}) and returned invisibly.
+  
+  The argument \code{sinceversion} should be a character string
+  like \code{"1.2-3"}. The default is the current installed version of the
+  package. The argument \code{sincedata} should be a 
+  character string like \code{"2015-05-27"}, or a date-time object.
+
+  Typing \code{bugfixes} without parentheses will display a table of
+  all bug fixes in the current installed version of \pkg{spatstat}.
+}
+\value{
+  A data frame, belonging to the class \code{"bugtable"},
+  which has its own print method. 
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{latest.news}},
+  \code{\link[utils]{news}}.  
+}
+\examples{
+   # show all bugs reported after publication of the spatstat book
+   if(interactive()) bugfixes("1.42-0")
+}
+\keyword{documentation}
diff --git a/man/bw.diggle.Rd b/man/bw.diggle.Rd
new file mode 100644
index 0000000..75df9f7
--- /dev/null
+++ b/man/bw.diggle.Rd
@@ -0,0 +1,114 @@
+\name{bw.diggle}
+\alias{bw.diggle}
+\title{
+  Cross Validated Bandwidth Selection for Kernel Density
+}
+\description{
+  Uses cross-validation to select a smoothing bandwidth
+  for the kernel estimation of point process intensity.
+}
+\usage{
+   bw.diggle(X, ..., correction="good", hmax=NULL, nr=512)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Character string passed to \code{\link{Kest}}
+    determining the edge correction to be used to
+    calculate the \eqn{K} function.
+  }
+  \item{hmax}{
+    Numeric. Maximum value of bandwidth that should be considered.
+  }
+  \item{nr}{
+    Integer. Number of steps in the distance value \eqn{r} to use in computing
+    numerical integrals. 
+  }
+}
+\details{
+  This function selects an appropriate bandwidth \code{sigma}
+  for the kernel estimator of point process intensity
+  computed by \code{\link{density.ppp}}.
+
+  The bandwidth \eqn{\sigma}{\sigma} is chosen to 
+  minimise the mean-square error criterion defined by Diggle (1985).
+  The algorithm uses the method of Berman and Diggle (1989) to
+  compute the quantity
+  \deqn{
+    M(\sigma) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0)
+  }{
+    M(\sigma) = MSE(\sigma)/\lambda^2 - g(0)
+  }
+  as a function of bandwidth \eqn{\sigma}{\sigma},
+  where \eqn{\mbox{MSE}(\sigma)}{MSE(\sigma)} is the
+  mean squared error at bandwidth \eqn{\sigma}{\sigma},
+  while \eqn{\lambda}{\lambda} is the mean intensity,
+  and \eqn{g} is the pair correlation function. 
+  See Diggle (2003, pages 115-118) for a summary of this method.
+
+  The result is a numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted to show the (rescaled) mean-square error
+  as a function of \code{sigma}.
+}
+\section{Definition of bandwidth}{
+  The smoothing parameter \code{sigma} returned by \code{bw.diggle}
+  (and displayed on the horizontal axis of the plot)
+  corresponds to \code{h/2}, where \code{h} is the smoothing
+  parameter described in Diggle (2003, pages 116-118) and
+  Berman and Diggle (1989).
+  In those references, the smoothing kernel 
+  is the uniform density on the disc of radius \code{h}. In
+  \code{\link{density.ppp}}, the smoothing kernel is the
+  isotropic Gaussian density with standard deviation \code{sigma}.
+  When replacing one kernel by another, the usual
+  practice is to adjust the bandwidths so that the kernels have equal
+  variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{bw.ppl}},
+  \code{\link{bw.scott}}
+}
+\examples{
+  data(lansing)
+  attach(split(lansing))
+  b <- bw.diggle(hickory)
+  plot(b, ylim=c(-2, 0), main="Cross validation for hickories")
+  \donttest{
+   plot(density(hickory, b))
+  }
+}
+\references{
+  Berman, M. and Diggle, P. (1989)
+  Estimating weighted integrals of the
+  second-order intensity of a spatial point process.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{51}, 81--92.
+
+  Diggle, P.J. (1985)
+  A kernel method for smoothing point process data.
+  \emph{Applied Statistics} (Journal of the Royal Statistical Society,
+  Series C) \bold{34} (1985) 138--147.
+
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.frac.Rd b/man/bw.frac.Rd
new file mode 100644
index 0000000..c8fb8dc
--- /dev/null
+++ b/man/bw.frac.Rd
@@ -0,0 +1,79 @@
+\name{bw.frac}
+\alias{bw.frac}
+\title{
+  Bandwidth Selection Based on Window Geometry
+}
+\description{
+  Select a smoothing bandwidth for smoothing a point pattern,
+  based only on the geometry of the spatial window.
+  The bandwidth is a specified quantile of the distance
+  between two independent random points in the window.
+}
+\usage{
+   bw.frac(X, \dots, f=1/4)
+}
+\arguments{
+  \item{X}{
+    A window (object of class \code{"owin"}) or
+    point pattern (object of class \code{"ppp"})
+    or other data which can be converted to a window
+    using \code{\link{as.owin}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{distcdf}}.
+  }
+  \item{f}{
+    Probability value (between 0 and 1)
+    determining the quantile of the distribution.
+  }
+}
+\details{
+  This function selects an appropriate bandwidth \code{sigma}
+  for the kernel estimator of point process intensity
+  computed by \code{\link{density.ppp}}.
+
+  The bandwidth \eqn{\sigma}{\sigma} is computed as a 
+  quantile of the distance between two independent random points
+  in the window. The default is the lower quartile of this
+  distribution.
+
+  If \eqn{F(r)} is the cumulative distribution function of the
+  distance between two independent random points uniformly distributed
+  in the window, then the value returned is the quantile
+  with probability \eqn{f}. That is, the bandwidth is 
+  the value \eqn{r} such that \eqn{F(r) = f}.
+
+  The cumulative distribution function  \eqn{F(r)} is
+  computed using \code{\link{distcdf}}. We then
+  we compute the smallest number \eqn{r}
+  such that \eqn{F(r) \ge f}{F(r) >= f}.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.frac"}
+  which can be plotted to show the cumulative distribution function
+  and the selected quantile.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{bw.diggle}},
+  \code{\link{bw.ppl}},
+  \code{\link{bw.relrisk}},
+  \code{\link{bw.scott}},
+  \code{\link{bw.smoothppp}},
+  \code{\link{bw.stoyan}}
+}
+\examples{
+  h <- bw.frac(letterR)
+  h
+  plot(h, main="bw.frac(letterR)")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.pcf.Rd b/man/bw.pcf.Rd
new file mode 100644
index 0000000..c768306
--- /dev/null
+++ b/man/bw.pcf.Rd
@@ -0,0 +1,137 @@
+\name{bw.pcf}
+\alias{bw.pcf}
+\title{
+  Cross Validated Bandwidth Selection for Pair Correlation Function
+}
+\description{
+  Uses composite likelihood or generalized least squares 
+  cross-validation to select a smoothing bandwidth
+  for the kernel estimation of pair correlation function.
+}
+\usage{
+  bw.pcf(X, rmax=NULL, lambda=NULL, divisor="r", 
+         kernel="epanechnikov", nr=10000, bias.correct=TRUE, 
+         cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL,
+	 \dots, verbose=FALSE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{rmax}{
+    Numeric. Maximum value of the spatial lag distance \eqn{r} 
+    for which \eqn{g(r)} should be evaluated.
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    A vector giving the intensity values
+    at the points of the pattern \code{X}.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}. 
+    See \code{pcf.ppp}.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel, passed to \code{density}; 
+    see \code{\link{pcf}} and \code{\link{pcfinhom}}.
+  }
+  \item{nr}{
+    Integer. Number of subintervals for discretization of 
+    [0, rmax] to use in computing numerical integrals.
+  }
+  \item{bias.correct}{
+    Logical. Whether to use bias corrected version of the kernel 
+    estimate. See Details.
+  }
+  \item{cv.method}{
+    Choice of cross validation method: either
+    \code{"compLik"} or \code{"leastSQ"} (partially matched).
+  }
+  \item{simple}{
+    Logical. Whether to use simple removal of spatial lag 
+    distances. See Details.
+  }
+  \item{srange}{
+    Optional. Numeric vector of length 2 giving the range of
+    bandwidth values that should be searched to find the optimum
+    bandwidth.
+  }
+  \item{\dots}{
+    Other arguments, passed to \code{\link{pcf}} or 
+    \code{\link{pcfinhom}}.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports
+    during the optimization procedure.
+  }
+}
+\details{
+  This function selects an appropriate bandwidth \code{bw}
+  for the kernel estimator of the pair correlation function 
+  of a point process intensity computed by \code{\link{pcf.ppp}} 
+  (homogeneous case) or \code{\link{pcfinhom}} 
+  (inhomogeneous case).
+
+  With \code{cv.method="leastSQ"}, the bandwidth 
+  \eqn{h} is chosen to minimise an unbiased 
+  estimate of the integrated mean-square error criterion 
+  \eqn{M(h)} defined in equation (4) in Guan (2007a).
+  
+  With \code{cv.method="compLik"}, the bandwidth 
+  \eqn{h} is chosen to maximise a likelihood 
+  cross-validation criterion \eqn{CV(h)} defined in 
+  equation (6) of Guan (2007b).
+  
+  \deqn{
+    M(b) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0)
+  }{
+    M(b) = \int_{0}^{rmax} \hat{g}^2(r;b) r dr - \sum_{u,v}
+  }
+
+  The result is a numerical value giving the selected bandwidth.
+}
+\section{Definition of bandwidth}{
+  The bandwidth \code{bw} returned by \code{bw.pcf}
+  corresponds to the standard deviation of the smoothoing 
+  kernel. As mentioned in the documentation of 
+  \code{\link{density.default}} and \code{\link{pcf.ppp}}, 
+  this differs from the scale parameter \code{h} of 
+  the smoothing kernel which is often considered in the 
+  literature as the bandwidth of the kernel function.
+  For example for the Epanechnikov kernel, \code{bw=h/sqrt(h)}.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted.
+}
+\seealso{
+  \code{\link{pcf.ppp}},
+  \code{\link{pcfinhom}}
+}
+\examples{
+  b <- bw.pcf(redwood)
+  plot(pcf(redwood, bw=b))
+}
+\references{
+  Guan, Y. (2007a). 
+  A composite likelihood cross-validation approach in selecting 
+  bandwidth for the estimation of the pair correlation function. 
+  \emph{Scandinavian Journal of Statistics}, 
+  \bold{34}(2), 336--346.
+  
+  Guan, Y. (2007b). 
+  A least-squares cross-validation bandwidth selection approach 
+  in pair correlation function estimations. 
+  \emph{Statistics & Probability Letters}, 
+  \bold{77}(18), 1722--1729.
+}
+\author{
+  Rasmus Waagepetersen and Abdollah Jalilian. 
+  Adapted for \pkg{spatstat} by \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.ppl.Rd b/man/bw.ppl.Rd
new file mode 100644
index 0000000..3f2f1ca
--- /dev/null
+++ b/man/bw.ppl.Rd
@@ -0,0 +1,100 @@
+\name{bw.ppl}
+\alias{bw.ppl}
+\title{
+  Likelihood Cross Validation Bandwidth Selection for Kernel Density
+}
+\description{
+  Uses likelihood cross-validation to select a smoothing bandwidth
+  for the kernel estimation of point process intensity.
+}
+\usage{
+   bw.ppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{Ignored.}
+  \item{srange}{
+    Optional numeric vector of length 2 giving the
+    range of values of bandwidth to be searched.
+  }
+  \item{ns}{
+    Optional integer giving the number of values of
+    bandwidth to search.
+  }
+  \item{sigma}{
+    Optional. Vector of values of the bandwidth to be searched.
+    Overrides the values of \code{ns} and \code{srange}.
+  }
+  \item{weights}{
+    Optional. Numeric vector of weights for the points of \code{X}.
+    Argument passed to \code{\link{density.ppp}}.
+  }
+}
+\details{
+  This function selects an appropriate bandwidth \code{sigma}
+  for the kernel estimator of point process intensity
+  computed by \code{\link{density.ppp}}.
+
+  The bandwidth \eqn{\sigma}{\sigma} is chosen to 
+  maximise the point process likelihood cross-validation criterion
+  \deqn{
+    \mbox{LCV}(\sigma) =
+    \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u
+  }{
+    LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[W] \lambda(u) du
+  }
+  where the sum is taken over all the data points \eqn{x_i}{x[i]},
+  where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the
+  leave-one-out kernel-smoothing estimate of the intensity at
+  \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma},
+  and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate
+  of the intensity at a spatial location \eqn{u} with smoothing
+  bandwidth \eqn{\sigma}{\sigma}.
+  See Loader(1999, Section 5.3).
+
+  The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed
+  directly, using \code{\link{density.ppp}}, 
+  for \code{ns} different values of \eqn{\sigma}{\sigma}
+  between \code{srange[1]} and \code{srange[2]}.
+
+  The result is a numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted to show the (rescaled) mean-square error
+  as a function of \code{sigma}.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{bw.diggle}},
+  \code{\link{bw.scott}}
+}
+\examples{
+  \donttest{
+    b <- bw.ppl(redwood)
+    plot(b, main="Likelihood cross validation for redwoods")
+    plot(density(redwood, b))
+  }
+  \testonly{
+    b <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2)
+  }
+}
+\references{
+  Loader, C. (1999)
+  \emph{Local Regression and Likelihood}.
+  Springer, New York.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.relrisk.Rd b/man/bw.relrisk.Rd
new file mode 100644
index 0000000..8938853
--- /dev/null
+++ b/man/bw.relrisk.Rd
@@ -0,0 +1,112 @@
+\name{bw.relrisk}
+\alias{bw.relrisk}
+\title{
+  Cross Validated Bandwidth Selection for Relative Risk Estimation
+}
+\description{
+  Uses cross-validation to select a smoothing bandwidth
+  for the estimation of relative risk.
+}
+\usage{
+   bw.relrisk(X, method = "likelihood", nh = spatstat.options("n.bandwidth"),
+   hmin=NULL, hmax=NULL, warn=TRUE)
+}
+
+\arguments{
+  \item{X}{
+    A multitype point pattern (object of class \code{"ppp"}
+    which has factor valued marks).
+  }
+  \item{method}{
+    Character string determining the cross-validation method.
+    Current options are \code{"likelihood"},
+    \code{"leastsquares"} or
+    \code{"weightedleastsquares"}.
+  }
+  \item{nh}{
+    Number of trial values of smoothing bandwith \code{sigma}
+    to consider. The default is 32.
+  }
+  \item{hmin, hmax}{
+    Optional. Numeric values.
+    Range of trial values of smoothing bandwith \code{sigma}
+    to consider. There is a sensible default.
+  }
+  \item{warn}{
+    Logical. If \code{TRUE}, issue a warning if the minimum of
+    the cross-validation criterion occurs at one of the ends of the
+    search interval.
+  }
+}
+\details{
+  This function selects an appropriate bandwidth for the nonparametric
+  estimation of relative risk using \code{\link{relrisk}}.
+  
+  Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when
+  data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0}
+  otherwise.
+  For a particular value of smoothing bandwidth,
+  let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated
+  probabilities that a point at location \eqn{u} will belong to
+  type \eqn{j}. 
+  Then the bandwidth is chosen to minimise either the likelihood,
+  the squared error, or the approximately standardised squared error, of the
+  indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted
+  values  \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003).
+
+  The result is a numerical value giving the selected bandwidth \code{sigma}.
+  The result also belongs to the class \code{"bw.optim"}
+  allowing it to be printed and plotted. The plot shows the cross-validation
+  criterion as a function of bandwidth.  
+  
+  The range of values for the smoothing bandwidth \code{sigma}
+  is set by the arguments \code{hmin, hmax}. There is a sensible default,
+  based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}.
+  
+  If the optimal bandwidth is achieved at an endpoint of the
+  interval \code{[hmin, hmax]}, the algorithm will issue a warning
+  (unless \code{warn=FALSE}). If this occurs, then it is probably advisable
+  to expand the interval by changing the arguments \code{hmin, hmax}.
+
+  Computation time depends on the number \code{nh} of trial values
+  considered, and also on the range \code{[hmin, hmax]} of values
+  considered, because larger values of \code{sigma} require
+  calculations involving more pairs of data points.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted.
+}
+\seealso{
+  \code{\link{relrisk}},
+  \code{\link{bw.stoyan}}
+}
+\examples{
+  data(urkiola)
+  \testonly{op <- spatstat.options(n.bandwidth=8)}
+  b <- bw.relrisk(urkiola)
+  b
+  plot(b)
+  b <- bw.relrisk(urkiola, hmax=20)
+  plot(b)
+  \testonly{spatstat.options(op)}
+}
+\references{
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+  
+  Kelsall, J.E. and Diggle, P.J. (1995)
+  Kernel estimation of relative risk.
+  \emph{Bernoulli} \bold{1}, 3--16.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.scott.Rd b/man/bw.scott.Rd
new file mode 100644
index 0000000..ed9ef16
--- /dev/null
+++ b/man/bw.scott.Rd
@@ -0,0 +1,64 @@
+\name{bw.scott}
+\alias{bw.scott}
+\title{
+  Scott's Rule for Bandwidth Selection for Kernel Density
+}
+\description{
+  Use Scott's rule of thumb to determine the smoothing bandwidth
+  for the kernel estimation of point process intensity.
+}
+\usage{
+   bw.scott(X)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+}
+\details{
+  This function selects a bandwidth \code{sigma}
+  for the kernel estimator of point process intensity
+  computed by \code{\link{density.ppp}}.
+
+  The bandwidth \eqn{\sigma}{\sigma} is computed by the rule of thumb
+  of Scott (1992, page 152). It is very fast to compute.
+
+  This rule is designed for density
+  estimation, and typically produces a larger bandwidth
+  than \code{\link{bw.diggle}}. It is useful for estimating
+  gradual trend. 
+}
+\value{
+  A numerical vector of two elements giving the selected
+  bandwidths in the \code{x} and \code{y} directions.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{bw.diggle}},
+  \code{\link{bw.ppl}},
+  \code{\link{bw.frac}}.
+}
+\examples{
+  data(lansing)
+  attach(split(lansing))
+  b <- bw.scott(hickory)
+  b
+  \donttest{
+   plot(density(hickory, b))
+  }
+}
+\references{
+  Scott, D.W. (1992)
+  \emph{Multivariate Density Estimation. Theory, Practice and
+    Visualization}. 
+  New York: Wiley.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.smoothppp.Rd b/man/bw.smoothppp.Rd
new file mode 100644
index 0000000..db54579
--- /dev/null
+++ b/man/bw.smoothppp.Rd
@@ -0,0 +1,93 @@
+\name{bw.smoothppp}
+\alias{bw.smoothppp}
+\title{
+  Cross Validated Bandwidth Selection for Spatial Smoothing
+}
+\description{
+  Uses least-squares cross-validation to select a smoothing bandwidth
+  for spatial smoothing of marks.
+}
+\usage{
+   bw.smoothppp(X, nh = spatstat.options("n.bandwidth"),
+   hmin=NULL, hmax=NULL, warn=TRUE)
+}
+
+\arguments{
+  \item{X}{
+    A marked point pattern with numeric marks.
+  }
+  \item{nh}{
+    Number of trial values of smoothing bandwith \code{sigma}
+    to consider. The default is 32.
+  }
+  \item{hmin, hmax}{
+    Optional. Numeric values.
+    Range of trial values of smoothing bandwith \code{sigma}
+    to consider. There is a sensible default.
+  }
+  \item{warn}{
+    Logical. If \code{TRUE}, issue a warning if the minimum of
+    the cross-validation criterion occurs at one of the ends of the
+    search interval.
+  }
+}
+\details{
+  This function selects an appropriate bandwidth for the nonparametric
+  smoothing of mark values using \code{\link{Smooth.ppp}}.
+  
+  The argument \code{X} must be a marked point pattern
+  with a vector or data frame of marks. All mark values must be numeric.
+  
+  The bandwidth is selected by least-squares cross-validation.
+  Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point.
+  For a particular choice of smoothing bandwidth,
+  let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point.
+  Then the bandwidth is chosen to minimise 
+  the squared error of the smoothed values
+  \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}.
+
+  The result of \code{bw.smoothppp}
+  is a numerical value giving the selected bandwidth \code{sigma}.
+  The result also belongs to the class \code{"bw.optim"}
+  allowing it to be printed and plotted. The plot shows the cross-validation
+  criterion as a function of bandwidth.  
+  
+  The range of values for the smoothing bandwidth \code{sigma}
+  is set by the arguments \code{hmin, hmax}. There is a sensible default,
+  based on the nearest neighbour distances.
+  
+  If the optimal bandwidth is achieved at an endpoint of the
+  interval \code{[hmin, hmax]}, the algorithm will issue a warning
+  (unless \code{warn=FALSE}). If this occurs, then it is probably advisable
+  to expand the interval by changing the arguments \code{hmin, hmax}.
+
+  Computation time depends on the number \code{nh} of trial values
+  considered, and also on the range \code{[hmin, hmax]} of values
+  considered, because larger values of \code{sigma} require
+  calculations involving more pairs of data points.
+}
+\value{
+  A numerical value giving the selected bandwidth.
+  The result also belongs to the class \code{"bw.optim"}
+  which can be plotted.
+}
+\seealso{
+  \code{\link{Smooth.ppp}}
+}
+\examples{
+  data(longleaf)
+  \testonly{op <- spatstat.options(n.bandwidth=8)}
+  b <- bw.smoothppp(longleaf)
+  b
+  plot(b)
+  \testonly{spatstat.options(op)}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/bw.stoyan.Rd b/man/bw.stoyan.Rd
new file mode 100644
index 0000000..190ac4e
--- /dev/null
+++ b/man/bw.stoyan.Rd
@@ -0,0 +1,69 @@
+\name{bw.stoyan}
+\alias{bw.stoyan}
+\title{
+  Stoyan's Rule of Thumb for Bandwidth Selection
+}
+\description{
+  Computes a rough estimate of the appropriate bandwidth
+  for kernel smoothing estimators of the pair correlation function
+  and other quantities.
+}
+\usage{
+bw.stoyan(X, co=0.15)
+}
+
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{co}{
+    Coefficient appearing in the rule of thumb. See Details.
+  }
+}
+\details{
+  Estimation of the pair correlation function and other quantities
+  by smoothing methods requires a choice of the smoothing bandwidth.
+  Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a
+  rule of thumb for choosing the smoothing bandwidth.
+  
+  For the Epanechnikov kernel, the rule of thumb is to set
+  the kernel's half-width \eqn{h} to
+  \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(\lambda)} where
+  \eqn{\lambda}{\lambda} is the estimated intensity of the point pattern,
+  typically computed as the number of points of \code{X} divided by the
+  area of the window containing \code{X}.
+
+  For a general kernel, the corresponding rule is to set the
+  standard deviation of the kernel to
+  \eqn{\sigma = 0.15/\sqrt{5\lambda}}{\sigma = 0.15/sqrt(5 * \lambda)}.
+  
+  The coefficient \eqn{0.15} can be tweaked using the
+  argument \code{co}. 
+}
+\value{
+  A numerical value giving the selected bandwidth (the standard
+  deviation of the smoothing kernel).
+}
+\seealso{
+  \code{\link{pcf}},
+  \code{\link{bw.relrisk}}
+}
+\examples{
+  data(shapley)
+  bw.stoyan(shapley)
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1995)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/by.im.Rd b/man/by.im.Rd
new file mode 100644
index 0000000..bb09a05
--- /dev/null
+++ b/man/by.im.Rd
@@ -0,0 +1,62 @@
+\name{by.im}
+\alias{by.im}
+\title{Apply Function to Image Broken Down by Factor}
+\description{
+  Splits a pixel image into sub-images and applies a function to each
+  sub-image.
+}
+\usage{
+\method{by}{im}(data, INDICES, FUN, ...)
+}
+\arguments{
+  \item{data}{A pixel image (object of class \code{"im"}).}
+  \item{INDICES}{Grouping variable.
+    Either a tessellation (object of class \code{"tess"}) or
+    a factor-valued pixel image.
+  }
+  \item{FUN}{Function to be applied to each sub-image of \code{data}.}
+  \item{\dots}{Extra arguments passed to \code{FUN}.}
+}
+\details{
+  This is a method for the generic function \code{\link{by}} for
+  pixel images (class \code{"im"}).
+
+  The pixel image \code{data} is first divided into sub-images according
+  to \code{INDICES}. Then the function \code{FUN} is applied to each subset.
+  The results of each computation are returned in a list.
+
+  The grouping variable \code{INDICES} may be either
+  \itemize{
+    \item a tessellation (object of class \code{"tess"}). Each tile of
+    the tessellation delineates a subset of the spatial domain.
+    \item a pixel image (object of class \code{"im"}) with factor
+    values. The levels of the factor determine subsets of the spatial
+    domain.
+  }
+}
+\value{
+  A list containing the results of each evaluation of \code{FUN}.
+}
+\seealso{
+  \code{\link{split.im}},
+  \code{\link{tess}},
+  \code{\link{im}}
+}
+\examples{
+  W <- square(1)
+  X <- as.im(function(x,y){sqrt(x^2+y^2)}, W)
+  Y <- dirichlet(runifpoint(12, W))
+  # mean pixel value in each subset
+  unlist(by(X, Y, mean))
+  # trimmed mean
+  unlist(by(X, Y, mean, trim=0.05))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{manip}
diff --git a/man/by.ppp.Rd b/man/by.ppp.Rd
new file mode 100644
index 0000000..a56ee2b
--- /dev/null
+++ b/man/by.ppp.Rd
@@ -0,0 +1,84 @@
+\name{by.ppp}
+\alias{by.ppp}
+\title{Apply a Function to a Point Pattern Broken Down by Factor}
+\description{
+  Splits a point pattern into sub-patterns, and applies the function to each
+  sub-pattern.
+}
+\usage{
+ \method{by}{ppp}(data, INDICES=marks(data), FUN, ...)
+}
+\arguments{
+  \item{data}{Point pattern (object of class \code{"ppp"}).}
+  \item{INDICES}{Grouping variable. Either a factor, a pixel image with
+    factor values, or a tessellation.}
+  \item{FUN}{Function to be applied to subsets of \code{data}.}
+  \item{\dots}{Additional arguments to \code{FUN}.}
+}
+\details{
+  This is a method for the generic function \code{\link{by}}
+  for point patterns (class \code{"ppp"}).
+
+  The point pattern \code{data} is first divided into subsets
+  according to \code{INDICES}. Then the function \code{FUN}
+  is applied to each subset. The results of each computation are
+  returned in a list.
+
+  The argument \code{INDICES} may be
+  \itemize{
+    \item
+    a factor, of length equal to the number of points in \code{data}.
+    The levels of \code{INDICES}
+    determine the destination of each point in \code{data}.
+    The \code{i}th point of \code{data} will be placed in the sub-pattern
+    \code{split.ppp(data)$l} where \code{l = f[i]}.
+    \item
+    a pixel image (object of class \code{"im"}) with factor values.
+    The pixel value of \code{INDICES}
+    at each point of \code{data} will be used as the classifying variable.
+    \item
+    a tessellation (object of class \code{"tess"}).
+    Each point of \code{data} will be classified according to
+    the tile of the tessellation into which it falls.
+  }
+  If \code{INDICES} is missing, then \code{data} must be a multitype point pattern
+  (a marked point pattern whose marks vector is a factor).
+  Then the effect is that the points of each type
+  are separated into different point patterns.
+}
+\value{
+  A list (also of class \code{"anylist"} or \code{"solist"} as
+  appropriate) containing the results returned
+  from \code{FUN} for each of the subpatterns.
+}
+\seealso{
+  \code{\link{ppp}},
+  \code{\link{split.ppp}},
+  \code{\link{cut.ppp}},
+  \code{\link{tess}},
+  \code{\link{im}}.
+}
+\examples{
+  # multitype point pattern, broken down by type
+  data(amacrine)
+  by(amacrine, FUN=density)
+  by(amacrine, FUN=function(x) { min(nndist(x)) } )
+
+  # how to pass additional arguments to FUN
+  by(amacrine, FUN=clarkevans, correction=c("Donnelly","cdf"))
+
+  # point pattern broken down by tessellation
+  data(swedishpines)
+  tes <- quadrats(swedishpines, 5, 5)
+  B <- by(swedishpines, tes, clarkevans, correction="Donnelly")
+  unlist(lapply(B, as.numeric))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{manip}
diff --git a/man/cauchy.estK.Rd b/man/cauchy.estK.Rd
new file mode 100644
index 0000000..c96af76
--- /dev/null
+++ b/man/cauchy.estK.Rd
@@ -0,0 +1,152 @@
+\name{cauchy.estK}
+\alias{cauchy.estK}
+\title{Fit the Neyman-Scott cluster process with Cauchy kernel}
+\description{
+  Fits the Neyman-Scott Cluster point process with Cauchy kernel
+  to a point pattern dataset by the Method of Minimum Contrast.
+}
+\usage{
+cauchy.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the model.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+}
+\details{
+  This algorithm fits the Neyman-Scott cluster point process model
+  with Cauchy kernel to a point pattern dataset
+  by the Method of Minimum Contrast, using the \eqn{K} function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The \eqn{K} function of the point pattern will be computed
+      using \code{\link{Kest}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the \eqn{K} function,
+      and this object should have been obtained by a call to
+      \code{\link{Kest}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Neyman-Scott cluster point process
+  with Cauchy kernel to \code{X},
+  by finding the parameters of the Matern Cluster model
+  which give the closest match between the
+  theoretical \eqn{K} function of the Matern Cluster process
+  and the observed \eqn{K} function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The model is described in Jalilian et al (2013).
+  It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{\kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{\mu}, and the locations of the offspring points of one parent
+  follow a common distribution described in Jalilian et al (2013).
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of the point process intensity \eqn{\lambda}{\lambda}.
+  Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{\lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{\mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The corresponding model can be simulated using \code{\link{rCauchy}}.
+
+  For computational reasons, the optimisation procedure uses the parameter 
+  \code{eta2}, which is equivalent to \code{4 * scale^2}
+  where \code{scale} is the scale parameter for the model
+  as used in \code{\link{rCauchy}}.
+  
+  Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be
+  fitted using the function \code{\link{kppm}} and the fitted models
+  can be simulated using \code{\link{simulate.kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Ghorbani, M. (2012) Cauchy cluster process.
+  \emph{Metrika}, to appear.
+
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{cauchy.estpcf}},
+  \code{\link{lgcp.estK}},
+  \code{\link{thomas.estK}},
+  \code{\link{vargamma.estK}},
+  \code{\link{mincontrast}},
+  \code{\link{Kest}},
+  \code{\link{Kmodel}}.
+
+  \code{\link{rCauchy}} to simulate the model.
+}
+\examples{
+    u <- cauchy.estK(redwood)
+    u
+    plot(u)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/cauchy.estpcf.Rd b/man/cauchy.estpcf.Rd
new file mode 100644
index 0000000..7c429eb
--- /dev/null
+++ b/man/cauchy.estpcf.Rd
@@ -0,0 +1,159 @@
+\name{cauchy.estpcf}
+\alias{cauchy.estpcf}
+\title{Fit the Neyman-Scott cluster process with Cauchy kernel}
+\description{
+  Fits the Neyman-Scott Cluster point process with Cauchy kernel
+  to a point pattern dataset by the Method of Minimum Contrast,
+  using the pair correlation function.
+}
+\usage{
+cauchy.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...,
+            pcfargs = list())
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the model.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+  \item{pcfargs}{
+    Optional list containing arguments passed to \code{\link{pcf.ppp}}
+    to control the smoothing in the estimation of the
+    pair correlation function.
+  }
+}
+\details{
+  This algorithm fits the Neyman-Scott cluster point process model
+  with Cauchy kernel to a point pattern dataset
+  by the Method of Minimum Contrast, using the pair correlation function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The pair correlation function of the point pattern will be computed
+      using \code{\link{pcf}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the pair correlation function,
+      and this object should have been obtained by a call to
+      \code{\link{pcf}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Neyman-Scott cluster point process
+  with Cauchy kernel to \code{X},
+  by finding the parameters of the Matern Cluster model
+  which give the closest match between the
+  theoretical pair correlation function of the Matern Cluster process
+  and the observed pair correlation function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The model is described in Jalilian et al (2013).
+  It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{\kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{\mu}, and the locations of the offspring points of one parent
+  follow a common distribution described in Jalilian et al (2013).
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of the point process intensity \eqn{\lambda}{\lambda}.
+  Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{\lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{\mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The corresponding model can be simulated using \code{\link{rCauchy}}.
+  
+  For computational reasons, the optimisation procedure internally uses
+  the parameter \code{eta2}, which is equivalent to \code{4 * scale^2}
+  where \code{scale} is the scale parameter for the model as used in
+  \code{\link{rCauchy}}.
+  
+   Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be
+  fitted using the function \code{\link{kppm}} and the fitted models
+  can be simulated using \code{\link{simulate.kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Ghorbani, M. (2012) Cauchy cluster process.
+  \emph{Metrika}, to appear.
+
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{cauchy.estK}},
+  \code{\link{lgcp.estpcf}},
+  \code{\link{thomas.estpcf}},
+  \code{\link{vargamma.estpcf}},
+  \code{\link{mincontrast}},
+  \code{\link{pcf}},
+  \code{\link{pcfmodel}}.
+
+  \code{\link{rCauchy}} to simulate the model.
+}
+\examples{
+    u <- cauchy.estpcf(redwood)
+    u
+    plot(u, legendpos="topright")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/cbind.hyperframe.Rd b/man/cbind.hyperframe.Rd
new file mode 100644
index 0000000..3444e7e
--- /dev/null
+++ b/man/cbind.hyperframe.Rd
@@ -0,0 +1,63 @@
+\name{cbind.hyperframe}
+\alias{cbind.hyperframe}
+\alias{rbind.hyperframe}
+\title{
+  Combine Hyperframes by Rows or by Columns
+}
+\description{
+  Methods for \code{cbind} and \code{rbind} for hyperframes.
+}
+\usage{
+\method{cbind}{hyperframe}(...)
+\method{rbind}{hyperframe}(...)
+}
+\arguments{
+  \item{\dots}{
+    Any number of hyperframes (objects of class \code{\link{hyperframe}}).
+  }
+}
+\details{
+  These are methods for \code{\link{cbind}}
+  and \code{\link{rbind}} for hyperframes.
+  
+  Note that \emph{all} the arguments must be hyperframes (because of
+  the peculiar dispatch rules of \code{\link{cbind}} and
+  \code{\link{rbind}}).
+
+  To combine a hyperframe with a data frame, one should either 
+  convert the data frame to a hyperframe using
+  \code{\link{as.hyperframe}}, or explicitly invoke the
+  function \code{cbind.hyperframe} or \code{rbind.hyperframe}.
+  
+  In other words: if \code{h} is a hyperframe and \code{d} is a data frame,
+  the result of \code{cbind(h,d)} will be the same as
+  \code{cbind(as.data.frame(h), d)}, so that all hypercolumns
+  of \code{h} will be deleted (and a warning will be issued).
+  To combine \code{h} with \code{d}
+  so that all columns of \code{h} are retained,
+  type either \code{cbind(h, as.hyperframe(d))} or
+  \code{cbind.hyperframe(h,d)}.
+}
+\value{
+  Another hyperframe.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{hyperframe}},
+  \code{\link{as.hyperframe}}
+}
+\examples{
+  lambda <- runif(5, min=10, max=30)
+  X <- lapply(as.list(lambda), function(x) { rpoispp(x) })
+  h <- hyperframe(lambda=lambda, X=X)
+  g <- hyperframe(id=letters[1:5], Y=rev(X))
+  gh <- cbind(h, g)
+  hh <- rbind(h, h)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/cdf.test.Rd b/man/cdf.test.Rd
new file mode 100644
index 0000000..820be40
--- /dev/null
+++ b/man/cdf.test.Rd
@@ -0,0 +1,291 @@
+\name{cdf.test} 
+\alias{cdf.test}
+\alias{cdf.test.ppm}
+\alias{cdf.test.lppm}
+\alias{cdf.test.lpp}
+\alias{cdf.test.ppp}
+\alias{cdf.test.slrm}
+\title{Spatial Distribution Test for Point Pattern or Point Process Model} 
+\description{
+  Performs a test of goodness-of-fit of a point process model.
+  The observed and predicted distributions
+  of the values of a spatial covariate are compared using either the
+  Kolmogorov-Smirnov test,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test
+  or Anderson-Darling test.
+  For non-Poisson models, a Monte Carlo test is used.
+}
+\usage{
+cdf.test(...)
+
+\method{cdf.test}{ppp}(X, covariate, test=c("ks", "cvm", "ad"), \dots,
+                       interpolate=TRUE, jitter=TRUE)
+
+\method{cdf.test}{ppm}(model, covariate,  test=c("ks", "cvm", "ad"), \dots,
+          interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE)
+
+\method{cdf.test}{lpp}(X, covariate,  test=c("ks", "cvm", "ad"), \dots,
+        interpolate=TRUE, jitter=TRUE)
+
+\method{cdf.test}{lppm}(model, covariate,  test=c("ks", "cvm", "ad"),
+\dots,
+          interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE)
+
+\method{cdf.test}{slrm}(model, covariate,  test=c("ks", "cvm", "ad"), \dots, modelname=NULL, covname=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"} or \code{"lpp"}).
+  }
+  \item{model}{
+    A fitted point process model (object of class \code{"ppm"} or \code{"lppm"})
+    or fitted spatial logistic regression (object of class \code{"slrm"}).
+  }
+  \item{covariate}{
+    The spatial covariate on which the test will be based.
+    A function, a pixel image (object of class \code{"im"}),
+    a list of pixel images, or one of the characters
+    \code{"x"} or \code{"y"} indicating the Cartesian coordinates.
+  }
+  \item{test}{
+    Character string identifying the test to be performed:
+    \code{"ks"} for Kolmogorov-Smirnov test,
+    \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test
+    or \code{"ad"} for Anderson-Darling test.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[stats]{ks.test}}
+    (from the \pkg{stats} package) or 
+    \code{\link[goftest]{cvm.test}} or
+    \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package)
+    to control the test.
+  }
+  \item{interpolate}{
+    Logical flag indicating whether to interpolate pixel images.
+    If \code{interpolate=TRUE}, the value of the covariate
+    at each point of \code{X} will be approximated by interpolating
+    the nearby pixel values.
+    If \code{interpolate=FALSE}, the nearest pixel value will be used.
+  }
+  \item{jitter}{
+    Logical flag. If \code{jitter=TRUE}, values of the covariate
+    will be slightly perturbed at random, to avoid tied values in the test.
+  }
+  \item{modelname,covname}{
+    Character strings giving alternative names for \code{model}
+    and \code{covariate} to be used in labelling plot axes.
+  }
+  \item{nsim}{
+    Number of simulated realisations from the \code{model} to be used
+    for the Monte Carlo test, when \code{model} is not a Poisson process.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports when
+    performing a Monte Carlo test.
+  }
+}
+\details{
+  These functions perform a goodness-of-fit test of a Poisson or Gibbs point
+  process model fitted to point pattern data. The observed distribution
+  of the values of a spatial covariate at the data points,
+  and the predicted distribution of the same values under the model,
+  are compared using the Kolmogorov-Smirnov test,
+  the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test
+  or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is
+  performed using these test statistics.
+
+  The function \code{cdf.test} is generic, with methods for
+  point patterns (\code{"ppp"} or \code{"lpp"}),
+  point process models (\code{"ppm"} or \code{"lppm"})
+  and spatial logistic regression models (\code{"slrm"}).
+  \itemize{
+    \item 
+    If \code{X} is a point pattern dataset (object of class
+    \code{"ppp"}), then \code{cdf.test(X, \dots)}
+    performs a goodness-of-fit test of the
+    uniform Poisson point process (Complete Spatial Randomness, CSR)
+    for this dataset.
+    For a multitype point pattern, the uniform intensity
+    is assumed to depend on the type of point (sometimes called
+    Complete Spatial Randomness and Independence, CSRI).
+    \item
+    If \code{model} is a fitted point process model
+    (object of class \code{"ppm"} or \code{"lppm"})
+    then \code{cdf.test(model, \dots)} performs
+    a test of goodness-of-fit for this fitted model. 
+    \item
+    If \code{model} is a fitted spatial logistic regression
+    (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs
+    a test of goodness-of-fit for this fitted model. 
+  }
+  The test is performed by comparing the observed distribution
+  of the values of a spatial covariate at the data points,
+  and the predicted distribution of the same covariate under the model,
+  using a classical goodness-of-fit test.
+  Thus, you must nominate
+  a spatial covariate for this test.
+  
+  If \code{X} is a point pattern that does not have marks,
+  the argument \code{covariate} should be either a \code{function(x,y)}
+  or a pixel image (object of class \code{"im"} containing the values
+  of a spatial function, or one of the characters \code{"x"} or
+  \code{"y"} indicating the Cartesian coordinates.
+  If \code{covariate} is an image, it should have numeric values,
+  and its domain should cover the observation window of the
+  \code{model}. If \code{covariate} is a function, it should expect
+  two arguments \code{x} and \code{y} which are vectors of coordinates,
+  and it should return a numeric vector of the same length
+  as \code{x} and \code{y}.
+  
+  If \code{X} is a multitype point pattern, the argument \code{covariate}
+  can be either a \code{function(x,y,marks)},
+  or a pixel image, or a list of pixel images corresponding to
+  each possible mark value, or one of the characters \code{"x"} or
+  \code{"y"} indicating the Cartesian coordinates. 
+  
+  First the original data point pattern is extracted from \code{model}.
+  The values of the \code{covariate} at these data points are
+  collected. 
+
+  The predicted distribution of the values of the \code{covariate}
+  under the fitted \code{model} is computed as follows.
+  The values of the \code{covariate} at all locations in the
+  observation window are evaluated,
+  weighted according to the point process intensity of the fitted model,
+  and compiled into a cumulative distribution function \eqn{F} using
+  \code{\link{ewcdf}}.
+
+  The probability integral transformation is then applied:
+  the values of the \code{covariate} at the original data points
+  are transformed by the predicted cumulative distribution function
+  \eqn{F} into numbers between 0 and 1. If the model is correct,
+  these numbers are i.i.d. uniform random numbers. The
+  A goodness-of-fit test of the uniform distribution is applied
+  to these numbers using \code{stats::\link[stats]{ks.test}},
+  \code{goftest::\link[goftest]{cvm.test}} or
+  \code{goftest::\link[goftest]{ad.test}}.
+
+  This test was apparently first described (in the context of
+  spatial data, and using Kolmogorov-Smirnov) by Berman (1986).
+  See also Baddeley et al (2005).
+
+  If \code{model} is not a Poisson process, then
+  a Monte Carlo test is performed, by generating \code{nsim}
+  point patterns which are simulated realisations of the \code{model},
+  re-fitting the model to each simulated point pattern, 
+  and calculating the test statistic for each fitted model.
+  The Monte Carlo \eqn{p} value is determined by comparing
+  the simulated values of the test statistic 
+  with the value for the original data.
+  
+  The return value is an object of class \code{"htest"} containing the
+  results of the hypothesis test. The print method for this class
+  gives an informative summary of the test outcome.
+
+  The return value also belongs to the class \code{"cdftest"}
+  for which there is a plot method \code{\link{plot.cdftest}}.
+  The plot method displays the empirical cumulative distribution
+  function of the covariate at the data points, and the predicted
+  cumulative distribution function of the covariate under the model,
+  plotted against the value of the covariate.
+
+  The argument \code{jitter} controls whether covariate values are
+  randomly perturbed, in order to avoid ties.
+  If the original data contains any ties in the covariate (i.e. points
+  with equal values of the covariate), and if \code{jitter=FALSE}, then 
+  the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}}
+  will issue a warning that it cannot calculate the exact \eqn{p}-value.
+  To avoid this, if \code{jitter=TRUE} each value of the covariate will
+  be perturbed by adding a small random value. The perturbations are
+  normally distributed with standard deviation equal to one hundredth of
+  the range of values of the covariate. This prevents ties, 
+  and the \eqn{p}-value is still correct. There is
+  a very slight loss of power.
+}
+\value{
+  An object of class \code{"htest"} containing the results of the
+  test. See \code{\link[stats]{ks.test}} for details. The return value can be
+  printed to give an informative summary of the test.
+
+  The value also belongs to the class \code{"cdftest"} for which there is
+  a plot method.
+}
+\section{Warning}{
+  The outcome of the test involves a small amount of random variability,
+  because (by default) the coordinates are randomly perturbed to
+  avoid tied values. Hence, if \code{cdf.test} is executed twice, the
+  \eqn{p}-values will not be exactly the same. To avoid this behaviour,
+  set \code{jitter=FALSE}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{plot.cdftest}},
+  \code{\link{quadrat.test}},
+  \code{\link{berman.test}},
+  \code{\link[stats]{ks.test}},
+  \code{\link[goftest]{cvm.test}},
+  \code{\link[goftest]{ad.test}},
+  \code{\link{ppm}}
+}
+\references{
+  Baddeley, A., Turner, R.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Berman, M. (1986)
+  Testing for spatial association between a point process
+  and another stochastic process.
+  \emph{Applied Statistics} \bold{35}, 54--62.
+}
+\examples{
+   op <- options(useFancyQuotes=FALSE)
+
+   # test of CSR using x coordinate
+   cdf.test(nztrees, "x")
+   cdf.test(nztrees, "x", "cvm")
+   cdf.test(nztrees, "x", "ad")
+
+   # test of CSR using a function of x and y
+   fun <- function(x,y){2* x + y}
+   cdf.test(nztrees, fun)
+
+   # test of CSR using an image covariate
+   funimage <- as.im(fun, W=Window(nztrees))
+   cdf.test(nztrees, funimage)
+
+   # fit inhomogeneous Poisson model and test
+   model <- ppm(nztrees ~x)
+   cdf.test(model, "x")
+
+   if(interactive()) {
+     # synthetic data: nonuniform Poisson process
+     X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1))
+
+     # fit uniform Poisson process
+     fit0 <- ppm(X ~1)
+     # fit correct nonuniform Poisson process
+     fit1 <- ppm(X ~x)
+
+     # test wrong model
+     cdf.test(fit0, "x")
+     # test right model
+     cdf.test(fit1, "x")
+   }
+
+   # multitype point pattern
+   cdf.test(amacrine, "x")
+   yimage <- as.im(function(x,y){y}, W=Window(amacrine))
+   cdf.test(ppm(amacrine ~marks+y), yimage)
+
+   options(op)
+}
+\keyword{htest}
+\keyword{spatial}
+
diff --git a/man/cdf.test.mppm.Rd b/man/cdf.test.mppm.Rd
new file mode 100644
index 0000000..7003241
--- /dev/null
+++ b/man/cdf.test.mppm.Rd
@@ -0,0 +1,207 @@
+\name{cdf.test.mppm}
+\alias{cdf.test.mppm}
+\title{Spatial Distribution Test for Multiple Point Process Model} 
+\description{
+  Performs a spatial distribution test
+  of a point process model fitted to multiple spatial point
+  patterns.  The test compares the observed
+  and predicted distributions of the values of a spatial covariate,
+  using either the Kolmogorov-Smirnov,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises 
+  or Anderson-Darling test of goodness-of-fit. 
+}
+\usage{
+\method{cdf.test}{mppm}(model, covariate, test=c("ks", "cvm", "ad"), ...,
+            nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE)
+}
+\arguments{
+  \item{model}{
+    An object of class \code{"mppm"} representing a point process model
+    fitted to multiple spatial point patterns.
+  }
+  \item{covariate}{
+    The spatial covariate on which the test will be based.
+    A function, a pixel image, a list of functions, a list of pixel
+    images, a hyperframe, a character string containing the name
+    of one of the covariates in \code{model}, or one of the strings
+    \code{"x"} or \code{"y"}.
+  }
+  \item{test}{
+    Character string identifying the test to be performed:
+    \code{"ks"} for Kolmogorov-Smirnov test,
+    \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test
+    or \code{"ad"} for Anderson-Darling test.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{cdf.test}}
+    to control the test.
+  }
+  \item{nsim}{
+    Number of simulated realisations which should be generated,
+    if a Monte Carlo test is required.
+  }
+  \item{verbose}{Logical flag indicating whether to print
+    progress reports.
+  }
+  \item{interpolate}{
+    Logical flag indicating whether to interpolate between
+    pixel values when code{covariate} is a pixel image.
+    See \emph{Details}.
+  }
+  \item{fast}{
+    Logical flag. If \code{TRUE}, values of the covariate
+    are only sampled at the original quadrature points used to
+    fit the model. If \code{FALSE}, values of the covariate
+    are sampled at all pixels, which can be slower by three orders of
+    magnitude.
+  }
+  \item{jitter}{
+    Logical flag. If \code{TRUE}, observed values of the covariate
+    are perturbed by adding small random values, to avoid
+    tied observations.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link{cdf.test}} for the class \code{mppm}.
+
+  This function performs a goodness-of-fit test of
+  a point process model that has been fitted to multiple point patterns.
+  The observed distribution
+  of the values of a spatial covariate at the data points,
+  and the predicted distribution of the same values under the model,
+  are compared using the Kolmogorov-Smirnov,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises 
+  or Anderson-Darling test of goodness-of-fit.
+  These are exact tests if the model is Poisson;
+  otherwise, for a Gibbs model, a Monte Carlo p-value is computed by
+  generating simulated realisations of the model and applying the
+  selected goodness-of-fit test to each simulation.
+  
+  The argument \code{model} should be a fitted point process model
+  fitted to multiple point patterns
+  (object of class \code{"mppm"}). 
+  
+  The argument \code{covariate} contains the values of a spatial
+  function. It can be
+  \itemize{
+    \item a \code{function(x,y)}
+    \item a pixel image (object of class \code{"im"}
+    \item a list of \code{function(x,y)}, one for each point pattern
+    \item a list of pixel images, one for each point pattern
+    \item a hyperframe (see \code{\link{hyperframe}})
+    of which the first
+    column will be taken as containing the covariate
+    \item a character string giving the name of one of the covariates
+    in \code{model}
+    \item one of the character strings \code{"x"} or \code{"y"},
+    indicating the spatial coordinates.
+  }
+  If \code{covariate} is an image, it should have numeric values,
+  and its domain should cover the observation window of the
+  \code{model}. If \code{covariate} is a function, it should expect
+  two arguments \code{x} and \code{y} which are vectors of coordinates,
+  and it should return a numeric vector of the same length
+  as \code{x} and \code{y}.  
+
+  First the original data point pattern is extracted from \code{model}.
+  The values of the \code{covariate} at these data points are
+  collected. 
+
+  The predicted distribution of the values of the \code{covariate}
+  under the fitted \code{model} is computed as follows.
+  The values of the \code{covariate} at all locations in the
+  observation window are evaluated,
+  weighted according to the point process intensity of the fitted model,
+  and compiled into a cumulative distribution function \eqn{F} using
+  \code{\link{ewcdf}}.
+
+  The probability integral transformation is then applied:
+  the values of the \code{covariate} at the original data points
+  are transformed by the predicted cumulative distribution function
+  \eqn{F} into numbers between 0 and 1. If the model is correct,
+  these numbers are i.i.d. uniform random numbers. 
+  A goodness-of-fit test of the uniform distribution is applied
+  to these numbers using \code{\link[stats]{ks.test}},
+  \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}}.
+
+  The argument \code{interpolate} determines 
+  how pixel values will be handled when code{covariate} is a pixel image.
+  The value of the covariate at a data point is obtained
+  by looking up the value of the nearest pixel if
+  \code{interpolate=FALSE}, or by linearly interpolating
+  between the values of the four nearest pixels 
+  if \code{interpolate=TRUE}. Linear interpolation is slower,
+  but is sometimes necessary to avoid tied values of the covariate
+  arising when the pixel grid is coarse.
+
+  If \code{model} is a Poisson point process, then the 
+  Kolmogorov-Smirnov,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises 
+  and Anderson-Darling tests are theoretically exact.
+  This test was apparently first described (in the context of
+  spatial data, and for Kolmogorov-Smirnov) by Berman (1986).
+  See also Baddeley et al (2005).
+
+  If \code{model} is not a Poisson point process, then the
+  Kolmogorov-Smirnov,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises 
+  and Anderson-Darling tests are biased.
+  Instead they are used as the basis of a Monte Carlo test.
+  First \code{nsim} simulated realisations of the model will be generated.
+  Each simulated realisation consists of a list of simulated point
+  patterns, one for each of the original data patterns. This
+  can take a very long time. The model is then re-fitted to each
+  simulation, and the refitted model is subjected to the goodness-of-fit
+  test described above. A Monte Carlo p-value is then computed by
+  comparing the p-value of the original test with the
+  p-values obtained from the simulations.
+}
+\value{
+  An object of class \code{"cdftest"} and \code{"htest"}
+  containing the results of the
+  test. See \code{\link{cdf.test}} for details.
+}
+\seealso{
+  \code{\link{cdf.test}},
+  \code{\link{quadrat.test}},
+  \code{\link{mppm}}
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+
+  Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Berman, M. (1986)
+  Testing for spatial association between a point process
+  and another stochastic process.
+  \emph{Applied Statistics} \bold{35}, 54--62.
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \spatstatAuthors.
+}
+\examples{
+   # three i.i.d. realisations of nonuniform Poisson process
+   lambda <- as.im(function(x,y) { 300 * exp(x) }, square(1))
+   dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda)))
+
+   # fit uniform Poisson process
+   fit0 <- mppm(X~1, dat)
+   # fit correct nonuniform Poisson process
+   fit1 <- mppm(X~x, dat)
+
+   # test wrong model
+   cdf.test(fit0, "x")
+   # test right model
+   cdf.test(fit1, "x")
+}
+\keyword{htest}
+\keyword{spatial}
+
diff --git a/man/cells.Rd b/man/cells.Rd
new file mode 100644
index 0000000..3693872
--- /dev/null
+++ b/man/cells.Rd
@@ -0,0 +1,39 @@
+\name{cells}
+\alias{cells}
+\docType{data}
+\title{
+  Biological Cells Point Pattern
+}
+\description{
+The data record the locations of the centres of 42 biological cells
+observed under optical microscopy in a histological section. 
+The microscope field-of-view has been rescaled to the unit square.
+
+The data were recorded by F.H.C. Crick and B.D. Ripley,
+and analysed in Ripley (1977, 1981) and Diggle (1983).
+They are often used as a canonical example of an `ordered'
+point pattern.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of cell centres.
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(cells)}
+\source{Crick and Ripley, see Ripley (1977)}
+\references{
+  Diggle, P.J. (1983)
+  \emph{Statistical analysis of spatial point patterns}.
+  Academic Press.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{39}, 172--212.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/centroid.owin.Rd b/man/centroid.owin.Rd
new file mode 100644
index 0000000..bd2dd93
--- /dev/null
+++ b/man/centroid.owin.Rd
@@ -0,0 +1,83 @@
+\name{centroid.owin}
+\alias{centroid.owin}
+\title{Centroid of a window}
+\description{
+  Computes the centroid (centre of mass) of a window 
+}
+\usage{
+ centroid.owin(w, as.ppp = FALSE)
+}
+\arguments{
+  \item{w}{A window}
+  \item{as.ppp}{Logical flag indicating whether to return the centroid
+    as a point pattern (\code{ppp} object)}
+}
+\value{
+  Either a list with components \code{x, y}, or a point pattern (of class
+  \code{ppp}) consisting of a single point, giving the coordinates of the
+  centroid of the window \code{w}.
+}
+\details{
+  The centroid of the window \code{w} is computed.
+  The centroid (``centre of mass'') 
+  is the point whose \eqn{x} and \eqn{y} coordinates 
+  are the mean values of the \eqn{x} and \eqn{y} coordinates
+  of all points in the window.
+
+  The argument \code{w} should be a window (an object of class
+  \code{"owin"}, see \code{\link{owin.object}} for details)
+  or can be given in any format acceptable to \code{\link{as.owin}()}.
+
+  The calculation uses an exact analytic formula for the case
+  of polygonal windows.
+
+  Note that the centroid of a window is not necessarily inside 
+  the window, unless the window is convex.
+  If  \code{as.ppp=TRUE} and
+  the centroid of \code{w} lies outside \code{w},
+  then the window of the returned point pattern
+  will be a rectangle containing the
+  original window (using \code{\link{as.rectangle}}. 
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}}
+}
+\examples{
+  w <- owin(c(0,1),c(0,1))
+  centroid.owin(w)
+  # returns 0.5, 0.5
+
+  data(demopat)
+  w <- Window(demopat)
+  # an irregular window
+  cent <- centroid.owin(w, as.ppp = TRUE)
+  \dontrun{
+  plot(cent)
+  # plot the window and its centroid
+  }
+
+  wapprox <- as.mask(w)
+  # pixel approximation of window
+  \dontrun{
+  points(centroid.owin(wapprox))
+  # should be indistinguishable 
+  }
+  \testonly{
+  centroid.owin(w)
+  centroid.owin(wapprox)
+  }	
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{math}
+
+
+
+
diff --git a/man/chicago.Rd b/man/chicago.Rd
new file mode 100644
index 0000000..bcfd1dc
--- /dev/null
+++ b/man/chicago.Rd
@@ -0,0 +1,66 @@
+\name{chicago}
+\alias{chicago}
+\docType{data}
+\title{
+  Chicago Crime Data
+}
+\description{
+  This dataset is a record of spatial locations of crimes
+  reported in the period 25 April to 8 May 2002,
+  in an area of Chicago (Illinois, USA)
+  close to the University of Chicago.
+  The original crime map was published in the
+  Chicago Weekly News in 2002. 
+  
+  The data give the spatial location (street address) of each crime report,
+  and the type of crime. The type labels are interpreted as follows:
+  \tabular{ll}{
+    \code{assault} \tab battery/assault \cr
+    \code{burglary} \tab burglary \cr
+    \code{cartheft} \tab motor vehicle theft \cr
+    \code{damage} \tab criminal damage \cr
+    \code{robbery} \tab robbery \cr
+    \code{theft} \tab theft \cr
+    \code{trespass} \tab criminal trespass
+  }
+  All crimes occurred on or near a street. The data give the
+  coordinates of all streets in the survey area, and their connectivity.
+
+  The dataset \code{chicago} is an object of class \code{"lpp"}
+  representing a point pattern on a linear network.
+  See \code{\link{lpp}} for further information on the format.
+
+  These data were published and analysed in
+  Ang, Baddeley and Nair (2012).
+} 
+\format{
+  Object of class \code{"lpp"}. 
+  See \code{\link{lpp}}.
+}
+\usage{data(chicago)}
+\examples{
+data(chicago)
+plot(chicago)
+plot(as.linnet(chicago), main="Chicago Street Crimes",col="green")
+plot(as.ppp(chicago), add=TRUE, col="red", chars=c(16,2,22,17,24,15,6))
+}
+\source{
+  Chicago Weekly News, 2002.
+  Manually digitised by \adrian.
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  Chicago Weekly News website: \url{www.chicagoweeklynews.com}
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/chop.tess.Rd b/man/chop.tess.Rd
new file mode 100644
index 0000000..a738569
--- /dev/null
+++ b/man/chop.tess.Rd
@@ -0,0 +1,60 @@
+\name{chop.tess}
+\alias{chop.tess}
+\title{Subdivide a Window or Tessellation using a Set of Lines}
+\description{
+  Divide a given window into tiles
+  delineated by a set of infinite straight lines, obtaining
+  a tessellation of the window.
+  Alternatively, given a tessellation, divide each tile of the
+  tessellation into sub-tiles delineated by the lines.
+}
+\usage{
+chop.tess(X, L)
+}
+\arguments{
+  \item{X}{
+    A window (object of class \code{"owin"}) or tessellation
+    (object of class \code{"tess"}) to be subdivided by lines.
+  }
+  \item{L}{
+    A set of infinite straight lines (object of class \code{"infline"})
+  }
+}
+\details{
+  The argument \code{L} should be a set of infinite straight lines in the plane
+  (stored in an object \code{L} of class \code{"infline"} created by the
+  function \code{\link{infline}}).
+  
+  If \code{X} is a window, then it is divided into tiles
+  delineated by the lines in \code{L}.
+
+  If \code{X} is a tessellation, then each tile of \code{X} is
+  subdivided into sub-tiles delineated by the lines in \code{L}.
+
+  The result is a tessellation. 
+}
+\section{Warning}{
+  If \code{X} is a non-convex window, or a tessellation containing
+  non-convex tiles, then \code{chop.tess(X,L)} may contain a tile
+  which consists of several unconnected pieces. 
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{infline}},
+  \code{\link{clip.infline}}
+}
+\examples{
+  L <- infline(p=1:3, theta=pi/4)
+  W <- square(4)
+  chop.tess(W, L)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/chorley.Rd b/man/chorley.Rd
new file mode 100644
index 0000000..d9dfab0
--- /dev/null
+++ b/man/chorley.Rd
@@ -0,0 +1,104 @@
+\name{chorley}
+\alias{chorley}
+\alias{chorley.extra}
+\docType{data}
+\title{Chorley-Ribble Cancer Data}
+\description{
+  Spatial locations of cases of cancer of the larynx
+  and cancer of the lung, and the location of a disused industrial
+  incinerator. A marked point pattern.
+} 
+\format{
+  The dataset \code{chorley} is
+  an object of class \code{"ppp"}
+  representing a marked point pattern.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of home address \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of home address \cr
+    \code{marks} \tab factor with levels \code{larynx} and \code{lung} \cr
+    \tab indicating whether this is a case of cancer of the larynx\cr
+    \tab or cancer of the lung.
+  }
+  See \code{\link{ppp.object}} for details of the format.
+
+  The dataset \code{chorley.extra} is a list with two components.
+  The first component \code{plotit} is a function which will 
+  plot the data in a sensible fashion. The second
+  component \code{incin} is a list with entries \code{x} and \code{y}
+  giving the location of the industrial incinerator.
+
+  Coordinates are given in kilometres,
+  and the resolution is 100 metres (0.1 km)
+}
+\usage{data(chorley)}
+\examples{
+    chorley
+    summary(chorley)
+    chorley.extra$plotit()
+}
+\source{
+  Coordinates of cases were provided by the
+  Chorley and South Ribble Health Authority, and were
+  kindly supplied by Professor Peter Diggle.
+  Region boundary was digitised by \adrian, 2005, from
+  a photograph of an Ordnance Survey map.
+}
+\section{Notes}{
+  The data give the precise domicile addresses of new cases 
+  of cancer of the larynx (58 cases)
+  and cancer of the lung (978 cases),
+  recorded in the Chorley and South Ribble Health Authority
+  of Lancashire (England) between 1974 and 1983.
+  The supplementary data give the location of a disused industrial
+  incinerator.
+
+  The data were first presented and analysed by Diggle (1990).
+  They have subsequently been analysed by Diggle and Rowlingson (1994)
+  and Baddeley et al. (2005).
+  
+  The aim is to assess evidence for an increase in
+  the incidence of cancer of the larynx in the vicinity of the
+  now-disused industrial incinerator. The lung cancer cases serve as a
+  surrogate for the spatially-varying density of the susceptible
+  population.
+
+  The data are represented as a marked point pattern,
+  with the points giving the spatial location of each individual's home address
+  and the marks identifying whether each point is a case of
+  laryngeal cancer or lung cancer.
+
+  Coordinates are in kilometres, and the resolution is
+  100 metres (0.1 km).
+
+  The dataset \code{chorley} has a polygonal window with 132 edges
+  which closely approximates the boundary of the Chorley and South
+  Ribble Health Authority. 
+
+  Note that, due to the rounding of spatial coordinates,
+  the data contain duplicated points (two points at the
+  same location). To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Diggle, P. (1990) A point process modelling approach to  
+  raised incidence of a rare phenomenon in the vicinity
+  of a prespecified point.
+  \emph{Journal of the Royal Statistical Soc. Series A} 
+  \bold{153}, 349-362.
+
+  Diggle, P. and Rowlingson, B. (1994) A conditional approach
+  to point process modelling of elevated risk.
+  \emph{Journal of the Royal Statistical Soc. Series A} 
+  \bold{157}, 433-440.
+   
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/circdensity.Rd b/man/circdensity.Rd
new file mode 100644
index 0000000..0d83dda
--- /dev/null
+++ b/man/circdensity.Rd
@@ -0,0 +1,62 @@
+\name{circdensity}
+\alias{circdensity}
+\title{
+  Density Estimation for Circular Data
+}
+\description{
+  Computes a kernel smoothed estimate of the probability density
+  for angular data.
+}
+\usage{
+circdensity(x, sigma = "nrd0", \dots,
+               bw = NULL,
+               weights=NULL, unit = c("degree", "radian"))
+}
+\arguments{
+  \item{x}{
+    Numeric vector, containing angular data.
+  }
+  \item{sigma}{
+    Smoothing bandwidth, or bandwidth selection rule, passed to
+    \code{\link[stats]{density.default}}.
+  }
+  \item{bw}{Alternative to \code{sigma} for consistency with other functions.}
+  \item{\dots}{
+    Additional arguments passed to
+    \code{\link[stats]{density.default}},
+    such as \code{kernel} and \code{weights}.
+  }
+  \item{weights}{
+    Optional numeric vector of weights for the data in \code{x}.
+  }
+  \item{unit}{
+    The unit of angle in which \code{x} is expressed.
+  }
+}
+\details{
+  The angular values \code{x} are smoothed using
+  (by default) the wrapped Gaussian kernel with standard deviation \code{sigma}.
+}
+\value{
+  An object of class \code{"density"} (produced by
+  \code{\link[stats]{density.default}}) which can be plotted
+  by \code{plot} or by \code{\link{rose}}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link[stats]{density.default}}),
+  \code{\link{rose}}.
+}
+\examples{
+  ang <- runif(1000, max=360)
+  rose(circdensity(ang, 12))
+}
+\keyword{nonparametric}
+\keyword{smooth}
diff --git a/man/clarkevans.Rd b/man/clarkevans.Rd
new file mode 100644
index 0000000..6c94088
--- /dev/null
+++ b/man/clarkevans.Rd
@@ -0,0 +1,135 @@
+\name{clarkevans}
+\alias{clarkevans}
+\title{Clark and Evans Aggregation Index}
+\description{
+  Computes the Clark and Evans aggregation index
+  \eqn{R} for a spatial point pattern.
+}
+\usage{
+clarkevans(X, correction=c("none", "Donnelly", "cdf"),
+              clipregion=NULL)
+}
+\arguments{
+  \item{X}{
+    A spatial point pattern (object of class \code{"ppp"}).
+  }
+  \item{correction}{
+    Character vector.
+    The type of edge correction(s) to be applied.
+  }
+  \item{clipregion}{
+    Clipping region for the guard area correction.
+    A window (object of class \code{"owin"}).
+    See Details.
+  }
+}
+\details{
+  The Clark and Evans (1954) aggregation index \eqn{R} is a crude
+  measure of clustering or ordering of a point pattern.
+  It is the ratio of the observed mean nearest neighbour distance
+  in the pattern to that expected for a Poisson point process
+  of the same intensity.
+  A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests
+  clustering.
+
+  Without correction for edge effects, the value of \code{R} will be
+  positively biased. Edge effects arise because, for a point of \code{X}
+  close to the edge of the window, the true nearest neighbour may
+  actually lie outside the window. Hence observed nearest neighbour
+  distances tend to be larger than the true nearest neighbour distances.
+
+  The argument \code{correction} specifies an edge correction
+  or several edge corrections to be applied. It is a character vector
+  containing one or more of the options
+  \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"}
+  (which are recognised by partial matching).
+  These edge corrections are:
+  \describe{
+    \item{"none":}{
+      No edge correction is applied. 
+    }
+    \item{"Donnelly":}{
+      Edge correction of Donnelly (1978), available for rectangular
+      windows only.
+      The theoretical expected value of mean nearest neighbour distance
+      under a Poisson process is adjusted for edge effects by the 
+      edge correction of Donnelly (1978). The value of \eqn{R} is the
+      ratio of the observed mean nearest neighbour distance to this
+      adjusted theoretical mean.
+    }
+    \item{"guard":}{
+      Guard region or buffer area method.
+      The observed mean nearest neighbour distance
+      for the point pattern \code{X}
+      is re-defined by averaging only over those points of \code{X}
+      that fall inside the sub-window \code{clipregion}.
+    }
+    \item{"cdf":}{
+      Cumulative Distribution Function method.
+      The nearest neighbour distance distribution function \eqn{G(r)}
+      of the stationary point process is estimated by \code{\link{Gest}}
+      using the Kaplan-Meier type edge correction. Then the mean of the
+      distribution is calculated from the cdf. 
+    }
+  }
+  Alternatively \code{correction="all"} selects all options.
+
+  If the argument \code{clipregion} is given, then the selected
+  edge corrections will be assumed to include \code{correction="guard"}.
+
+  To perform a test based on the Clark-Evans index,
+  see \code{\link{clarkevans.test}}.
+}
+\value{
+  A numeric value, or a numeric vector with named components
+  \item{naive}{\eqn{R} without edge correction}
+  \item{Donnelly}{\eqn{R} using Donnelly edge correction}
+  \item{guard}{\eqn{R} using guard region}
+  \item{cdf}{\eqn{R} using cdf method}
+  (as selected by \code{correction}). The value of the \code{Donnelly}
+  component will be \code{NA} if the window of \code{X} is not a rectangle.
+}
+\references{
+  Clark, P.J. and Evans, F.C. (1954)
+  Distance to nearest neighbour as a measure of spatial
+  relationships in populations \emph{Ecology} \bold{35},
+  445--453.
+
+  Donnelly, K. (1978) Simulations to determine the variance
+  and edge-effect of total nearest neighbour distance.
+  In I. Hodder (ed.) \emph{Simulation studies in archaeology},
+  Cambridge/New York: Cambridge University Press, pp 91--95.
+}
+\author{
+John Rudge
+\email{rudge at esc.cam.ac.uk}
+with modifications by
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{clarkevans.test}},
+  \code{\link{hopskel}},
+  \code{\link{nndist}},
+  \code{\link{Gest}}
+}
+\examples{
+  # Example of a clustered pattern
+  clarkevans(redwood)
+
+  # Example of an ordered pattern
+  clarkevans(cells)
+
+  # Random pattern
+  X <- rpoispp(100)
+  clarkevans(X)
+
+  # How to specify a clipping region
+  clip1 <- owin(c(0.1,0.9),c(0.1,0.9))
+  clip2 <- erosion(Window(cells), 0.1)
+  clarkevans(cells, clipregion=clip1)
+  clarkevans(cells, clipregion=clip2)
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/clarkevans.test.Rd b/man/clarkevans.test.Rd
new file mode 100644
index 0000000..565c33c
--- /dev/null
+++ b/man/clarkevans.test.Rd
@@ -0,0 +1,108 @@
+\name{clarkevans.test}
+\alias{clarkevans.test}
+\title{Clark and Evans Test}
+\description{
+  Performs the Clark-Evans test of aggregation
+  for a spatial point pattern.
+}
+\usage{
+clarkevans.test(X, ...,
+               correction="none",
+               clipregion=NULL,
+               alternative=c("two.sided", "less", "greater",
+                             "clustered", "regular"),
+               nsim=999)
+}
+\arguments{
+  \item{X}{
+    A spatial point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{Ignored.}
+  \item{correction}{
+    Character string.
+    The type of edge correction to be applied.
+    See \code{\link{clarkevans}}
+  }
+  \item{clipregion}{
+    Clipping region for the guard area correction.
+    A window (object of class \code{"owin"}).
+    See \code{\link{clarkevans}}
+  }
+  \item{alternative}{
+    String indicating the type of alternative for the
+    hypothesis test. Partially matched.
+  }
+  \item{nsim}{
+    Number of Monte Carlo simulations to perform, if a Monte Carlo
+    p-value is required.
+  }
+}
+\details{
+  This command uses the Clark and Evans (1954) aggregation index \eqn{R}
+  as the basis for a crude test of clustering or ordering of a point pattern.
+  
+  The Clark-Evans index is computed by the function
+  \code{\link{clarkevans}}. See the help for \code{\link{clarkevans}}
+  for information about the Clark-Evans index \eqn{R} and about
+  the arguments \code{correction} and \code{clipregion}.
+
+  This command performs a hypothesis test of clustering or ordering of
+  the point pattern \code{X}. The null hypothesis is Complete
+  Spatial Randomness, i.e.\ a uniform Poisson process. The alternative
+  hypothesis is specified by the argument \code{alternative}:
+  \itemize{
+    \item \code{alternative="less"} or \code{alternative="clustered"}:
+    the alternative hypothesis
+    is that \eqn{R < 1} corresponding to a clustered point pattern;
+    \item \code{alternative="greater"} or \code{alternative="regular"}:
+    the alternative hypothesis
+    is that \eqn{R > 1} corresponding to a regular or ordered point pattern;
+    \item \code{alternative="two.sided"}:
+    the alternative hypothesis is that \eqn{R \neq 1}{R != 1}
+    corresponding to a clustered or regular pattern.
+  }
+  
+  The Clark-Evans index \eqn{R} is computed for the data
+  as described in \code{\link{clarkevans}}.
+
+  If \code{correction="none"} and \code{nsim} is missing,
+  the \eqn{p}-value for the test is computed by standardising
+  \eqn{R} as proposed by Clark and Evans (1954) and referring the
+  statistic to the standard Normal distribution.
+
+  Otherwise, the \eqn{p}-value for the test is computed
+  by Monte Carlo simulation of \code{nsim} realisations of
+  Complete Spatial Randomness conditional on the
+  observed number of points.
+}
+\value{
+  An object of class \code{"htest"} representing the result of the test.
+}
+\references{
+  Clark, P.J. and Evans, F.C. (1954)
+  Distance to nearest neighbour as a measure of spatial
+  relationships in populations. \emph{Ecology} \bold{35},
+  445--453.
+  
+  Donnelly, K. (1978) Simulations to determine the variance
+  and edge-effect of total nearest neighbour distance.
+  In \emph{Simulation methods in archaeology},
+  Cambridge University Press, pp 91--95.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{clarkevans}},
+  \code{\link{hopskel.test}}
+}
+\examples{
+  # Redwood data - clustered
+  clarkevans.test(redwood)
+  clarkevans.test(redwood, alternative="clustered")
+}
+\keyword{spatial}
+\keyword{nonparametric}
+\keyword{htest}
diff --git a/man/clickbox.Rd b/man/clickbox.Rd
new file mode 100644
index 0000000..9f24f0f
--- /dev/null
+++ b/man/clickbox.Rd
@@ -0,0 +1,50 @@
+\name{clickbox}
+\alias{clickbox}
+\title{Interactively Define a Rectangle}
+\description{
+  Allows the user to specify a rectangle by
+  point-and-click in the display.
+}
+\usage{
+  clickbox(add=TRUE, \dots)
+}
+\arguments{
+  \item{add}{
+    Logical value indicating whether to create a new plot
+    (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
+  }
+  \item{\dots}{
+    Graphics arguments passed to \code{\link[graphics]{polygon}} to plot the
+    box.
+  }
+}
+\value{
+  A window (object of class \code{"owin"}) representing the
+  selected rectangle.
+}
+\details{
+  This function allows the user to create a rectangular window 
+  by interactively clicking on the screen display.
+
+  The user is prompted to point the mouse at any desired locations
+  for two corners of the rectangle,
+  and click the left mouse button to add each point.
+
+  The return value is a window (object of class \code{"owin"})
+  representing the rectangle.
+
+  This function uses the \R command \code{\link[graphics]{locator}} to
+  input the mouse clicks. It only works on screen devices such as
+  \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. 
+}
+\seealso{
+  \code{\link{clickpoly}},
+  \code{\link{clickppp}},
+  \code{\link{clickdist}},
+  \code{\link[graphics]{locator}}
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/clickdist.Rd b/man/clickdist.Rd
new file mode 100644
index 0000000..a34ddc2
--- /dev/null
+++ b/man/clickdist.Rd
@@ -0,0 +1,38 @@
+\name{clickdist}
+\alias{clickdist}
+\title{Interactively Measure Distance}
+\description{
+  Measures the distance between two points
+  which the user has clicked on.
+}
+\usage{
+  clickdist()
+}
+\value{
+  A single nonnegative number.
+}
+\details{
+  This function allows the user to measure the distance
+  between two spatial locations, interactively,
+  by clicking on the screen display.
+
+  When \code{clickdist()} is called, the user is expected to
+  click two points in the current graphics device. The distance
+  between these points will be returned.
+
+  This function uses the \R{} command \code{\link[graphics]{locator}} to
+  input the mouse clicks. It only works on screen devices such as
+  \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}.
+}
+\seealso{
+  \code{\link[graphics]{locator}},
+  \code{\link{clickppp}},
+  \code{\link{clicklpp}},
+  \code{\link{clickpoly}},
+  \code{\link{clickbox}}
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/clickjoin.Rd b/man/clickjoin.Rd
new file mode 100644
index 0000000..5d02cf6
--- /dev/null
+++ b/man/clickjoin.Rd
@@ -0,0 +1,72 @@
+\name{clickjoin}
+\alias{clickjoin}
+\title{
+  Interactively join vertices on a plot
+}
+\description{
+  Given a point pattern representing a set of vertices,
+  this command gives a point-and-click interface
+  allowing the user to join pairs of selected vertices by edges.
+}
+\usage{
+  clickjoin(X, \dots, add = TRUE, m = NULL, join = TRUE)
+}
+\arguments{
+  \item{X}{
+    Point pattern of vertices. An object of class \code{"ppp"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{segments}} to control
+    the plotting of the new edges.
+  }
+  \item{add}{
+    Logical. Whether the point pattern \code{X} should be added to the
+    existing plot (\code{add=TRUE}) or a new plot should be created
+    (\code{add=FALSE}).
+  }
+  \item{m}{
+    Optional. Logical matrix specifying an initial
+    set of edges. There is an edge between vertices \code{i} and
+    \code{j} if \code{m[i,j] = TRUE}.
+  }
+  \item{join}{
+    Optional. If \code{TRUE}, then each user click will join a pair of
+    vertices. If \code{FALSE}, then each user click will delete an
+    existing edge. This is only relevant if \code{m} is supplied.
+  }
+}
+\details{
+  This function makes it easier for the user to create a
+  linear network or a planar graph, given a set of vertices.
+  
+  The function first displays the point pattern \code{X},
+  then repeatedly prompts the user to click on a pair of points in \code{X}.
+  Each selected pair of points will be joined
+  by an edge. The function returns a
+  logical matrix which has entries equal to \code{TRUE} for each
+  pair of vertices joined by an edge.
+
+  The selection of points is performed
+  using \code{\link{identify.ppp}} which typically expects the user to
+  click the left mouse button.  This point-and-click interaction
+  continues until the user terminates it, 
+  by pressing the middle mouse button,
+  or pressing the right mouse button and selecting \code{stop}.
+
+  The return value can be used in \code{\link{linnet}}
+  to create a linear network.
+}
+\value{
+  Logical matrix \code{m} with value \code{m[i,j] = TRUE} for every
+  pair of vertices \code{X[i]} and \code{X[j]} that should be joined by
+  an edge.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{linnet}},
+  \code{\link{clickppp}}
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/clicklpp.Rd b/man/clicklpp.Rd
new file mode 100644
index 0000000..9d3113e
--- /dev/null
+++ b/man/clicklpp.Rd
@@ -0,0 +1,83 @@
+\name{clicklpp}
+\alias{clicklpp}
+\title{Interactively Add Points on a Linear Network}
+\description{
+  Allows the user to create a point pattern on a linear network by
+  point-and-click in the display.
+}
+\usage{
+  clicklpp(L, n=NULL, types=NULL, \dots,
+           add=FALSE, main=NULL, hook=NULL)
+}
+\arguments{
+  \item{L}{
+     Linear network on which the points will be placed.
+     An object of class \code{"linnet"}.
+  }
+  \item{n}{
+    Number of points to be added (if this is predetermined).
+  }
+  \item{types}{
+    Vector of types, when creating a multitype point pattern.
+  }
+  \item{\dots}{
+    Optional extra arguments to be passed to \code{\link[graphics]{locator}}
+    to control the display.
+  }
+  \item{add}{
+    Logical value indicating whether to create a new plot
+    (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
+  }
+  \item{main}{
+    Main heading for plot.
+  }
+  \item{hook}{For internal use only. Do not use this argument.}
+}
+\value{
+  A point pattern (object of class \code{"lpp"}).
+}
+\details{
+  This function allows the user to create a point pattern
+  on a linear network
+  by interactively clicking on the screen display.
+
+  First the linear network \code{L} is plotted on the current screen device.
+  Then the user is prompted to point the mouse at any desired locations
+  and click the left mouse button to add each point.
+  Interactive input stops after \code{n} clicks (if \code{n} was given)
+  or when the middle mouse button is pressed.
+
+  The return value is a point pattern on the network \code{L},
+  containing the locations of all the clicked points,
+  after they have been projected onto the network \code{L}.
+  Any points that were clicked outside the bounding window of the network
+  will be ignored.
+
+  If the argument \code{types} is given, then a multitype point pattern
+  will be created. The user is
+  prompted to input the locations of points of type \code{type[i]},
+  for each successive index \code{i}. (If the argument \code{n} was
+  given, there will be \code{n} points of \emph{each} type.)
+  The return value is a multitype point pattern on a linear network.
+  
+  This function uses the \R{} command \code{\link[graphics]{locator}} to
+  input the mouse clicks. It only works on screen devices such as
+  \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be
+  passed to \code{\link[graphics]{locator}} through \code{\dots} include
+  \code{pch} (plotting character), \code{cex} (character expansion
+  factor) and \code{col} (colour). See \code{\link[graphics]{locator}}
+  and \code{\link[graphics]{par}}.
+}
+\seealso{
+  \code{\link{clickppp}},
+  \code{\link{identify.lpp}},
+  \code{\link[graphics]{locator}},
+  \code{\link{clickpoly}},
+  \code{\link{clickbox}},
+  \code{\link{clickdist}}
+}
+\author{
+  \spatstatAuthors, based on an idea by Dominic Schuhmacher.  
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/clickpoly.Rd b/man/clickpoly.Rd
new file mode 100644
index 0000000..e7af647
--- /dev/null
+++ b/man/clickpoly.Rd
@@ -0,0 +1,68 @@
+\name{clickpoly}
+\alias{clickpoly}
+\title{Interactively Define a Polygon}
+\description{
+  Allows the user to create a polygon by
+  point-and-click in the display.
+}
+\usage{
+  clickpoly(add=FALSE, nv=NULL, np=1, \dots)
+}
+\arguments{
+  \item{add}{
+    Logical value indicating whether to create a new plot
+    (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
+  }
+  \item{nv}{
+    Number of vertices of the polygon (if this is predetermined).
+  }
+  \item{np}{
+    Number of polygons to create.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[graphics]{locator}} to control the
+    interactive plot, and to \code{\link[graphics]{polygon}} to plot the
+    polygons.
+  }
+}
+\value{
+  A window (object of class \code{"owin"}) representing the polygon.
+}
+\details{
+  This function allows the user to create a polygonal window 
+  by interactively clicking on the screen display.
+
+  The user is prompted to point the mouse at any desired locations
+  for the polygon vertices,
+  and click the left mouse button to add each point.
+  Interactive input stops after \code{nv} clicks (if \code{nv} was given)
+  or when the middle mouse button is pressed.
+
+  The return value is a window (object of class \code{"owin"})
+  representing the polygon.
+
+  This function uses the \R command \code{\link[graphics]{locator}} to
+  input the mouse clicks. It only works on screen devices such as
+  \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be
+  passed to \code{\link[graphics]{locator}} through \code{\dots} include
+  \code{pch} (plotting character), \code{cex} (character expansion
+  factor) and \code{col} (colour). See \code{\link[graphics]{locator}}
+  and \code{\link[graphics]{par}}.
+
+  Multiple polygons can also be drawn, by specifying
+  \code{np > 1}. The polygons must be disjoint. The result is 
+  a single window object consisting of all the polygons.
+}
+\seealso{
+  \code{\link{identify.ppp}},
+  \code{\link{clickbox}},
+  \code{\link{clickppp}},
+  \code{\link{clickdist}},
+  \code{\link[graphics]{locator}}
+}
+\author{
+  \adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/clickppp.Rd b/man/clickppp.Rd
new file mode 100644
index 0000000..080b0a2
--- /dev/null
+++ b/man/clickppp.Rd
@@ -0,0 +1,83 @@
+\name{clickppp}
+\alias{clickppp}
+\title{Interactively Add Points}
+\description{
+  Allows the user to create a point pattern by
+  point-and-click in the display.
+}
+\usage{
+  clickppp(n=NULL, win=square(1), types=NULL, \dots, add=FALSE,
+    main=NULL, hook=NULL)
+}
+\arguments{
+  \item{n}{
+    Number of points to be added (if this is predetermined).
+  }
+  \item{win}{
+    Window in which to create the point pattern.
+    An object of class \code{"owin"}.
+  }
+  \item{types}{
+    Vector of types, when creating a multitype point pattern.
+  }
+  \item{\dots}{
+    Optional extra arguments to be passed to \code{\link[graphics]{locator}}
+    to control the display.
+  }
+  \item{add}{
+    Logical value indicating whether to create a new plot
+    (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
+  }
+  \item{main}{
+    Main heading for plot.
+  }
+  \item{hook}{For internal use only. Do not use this argument.}
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\details{
+  This function allows the user to create a point pattern
+  by interactively clicking on the screen display.
+
+  First the window \code{win} is plotted on the current screen device.
+  Then the user is prompted to point the mouse at any desired locations
+  and click the left mouse button to add each point.
+  Interactive input stops after \code{n} clicks (if \code{n} was given)
+  or when the middle mouse button is pressed.
+
+  The return value is a point pattern
+  containing the locations of all the clicked points
+  inside the original window \code{win},
+  provided that all of the clicked locations were
+  inside this window. Otherwise, the window is expanded to a box
+  large enough to contain all the points (as well as containing
+  the original window).
+
+  If the argument \code{types} is given, then a multitype point pattern
+  will be created. The user is
+  prompted to input the locations of points of type \code{type[i]},
+  for each successive index \code{i}. (If the argument \code{n} was
+  given, there will be \code{n} points of \emph{each} type.)
+  The return value is a multitype point pattern.
+  
+  This function uses the \R{} command \code{\link[graphics]{locator}} to
+  input the mouse clicks. It only works on screen devices such as
+  \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be
+  passed to \code{\link[graphics]{locator}} through \code{\dots} include
+  \code{pch} (plotting character), \code{cex} (character expansion
+  factor) and \code{col} (colour). See \code{\link[graphics]{locator}}
+  and \code{\link[graphics]{par}}.
+}
+\seealso{
+  \code{\link{identify.ppp}},
+  \code{\link[graphics]{locator}},
+  \code{\link{clickpoly}},
+  \code{\link{clickbox}},
+  \code{\link{clickdist}}
+}
+\author{Original by Dominic Schuhmacher.
+  Adapted by \adrian and \rolf.
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/clip.infline.Rd b/man/clip.infline.Rd
new file mode 100644
index 0000000..523b3be
--- /dev/null
+++ b/man/clip.infline.Rd
@@ -0,0 +1,48 @@
+\name{clip.infline}
+\alias{clip.infline}
+\title{Intersect Infinite Straight Lines with a Window}
+\description{
+  Take the intersection between a set of infinite straight lines
+  and a window, yielding a set of line segments.
+}
+\usage{
+clip.infline(L, win)
+}
+\arguments{
+  \item{L}{
+    Object of class \code{"infline"} specifying a set of infinite
+    straight lines in the plane.
+  }
+  \item{win}{
+    Window (object of class \code{"owin"}).
+  }
+}
+\details{
+  This function computes the intersection between
+  a set of infinite straight lines in the plane
+  (stored in an object \code{L} of class \code{"infline"} created by the
+  function \code{\link{infline}}) and a window \code{win}.
+  The result is a pattern of line segments. Each line segment carries a
+  mark indicating which line it belongs to.
+}
+\value{
+  A line segment pattern (object of class \code{"psp"})
+  with a single column of marks.
+}
+\author{
+  \adrian
+  and \rolf.
+}
+\seealso{
+  \code{\link{infline}},\code{\link{psp}}.
+
+  To divide a window into pieces using infinite lines,
+  use \code{\link{chop.tess}}.
+}
+\examples{
+  L <- infline(p=1:3, theta=pi/4)
+  W <- square(4)
+  clip.infline(L, W)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/clmfires.Rd b/man/clmfires.Rd
new file mode 100644
index 0000000..f8191ea
--- /dev/null
+++ b/man/clmfires.Rd
@@ -0,0 +1,102 @@
+\name{clmfires}
+\alias{clmfires}
+\alias{clmfires.extra}
+\docType{data}
+\title{
+  Castilla-La Mancha Forest Fires
+}
+\description{
+  This dataset is a record of forest fires in the Castilla-La Mancha
+  region of Spain between 1998 and 2007.  This region is approximately
+  400 by 400 kilometres.  The coordinates are recorded in kilometres.
+
+  The dataset \code{clmfires} is a point pattern (object of class
+  \code{"ppp"}) containing the spatial coordinates of each fire,
+  with marks containing information about each fire.  There are 4
+  columns of marks:
+  \tabular{ll}{
+    \code{cause} \tab cause of fire (see below) \cr
+    \code{burnt.area} \tab total area burned, in hectares \cr
+    \code{date} \tab the date of fire, as a value of class \code{Date} \cr
+    \code{julian.date} \tab number of days elapsed since 1 January 1998 \cr
+  }
+  The \code{cause} of the fire is a factor with the levels
+  \code{lightning}, \code{accident} (for accidents or negligence),
+  \code{intentional} (for intentionally started fires) and
+  \code{other} (for other causes including unknown cause).
+
+  The format of \code{date} is \dQuote{Year-month-day}, e.g.
+  \dQuote{2005-07-14} means 14 July, 2005.
+
+  The accompanying dataset \code{clmfires.extra} is a list
+  of two items \code{clmcov100} and \code{clmcov200} containing covariate
+  information for the entire Castilla-La Mancha region. Each
+  of these two elements is a list of four images (objects of
+  class \code{"im"}) named \code{elevation}, \code{orientation},
+  \code{slope} and \code{landuse}.  The \code{landuse} image is
+  factor-valued with the factor having levels \code{urban},
+  \code{farm} (for farms or orchards), \code{meadow},
+  \code{denseforest} (for dense forest), \code{conifer} (for conifer
+  forest or plantation), \code{mixedforest}, \code{grassland},
+  \code{bush}, \code{scrub} and \code{artifgreen} for artificial
+  greens such as golf courses.
+
+  These images (effectively) provide values for the four
+  covariates at every location in the study area. The images in
+  \code{clmcov100} are 100 by 100 pixels in size, while those in
+  \code{clmcov200} are 200 by 200 pixels.  For easy handling,
+  \code{clmcov100} and \code{clmcov200} also belong to the
+  class \code{"listof"} so that they can be plotted and printed
+  immediately.
+} 
+\format{
+  \code{clmfires} is a marked point pattern (object of class \code{"ppp"}). 
+  See \code{\link{ppp.object}}.
+
+  \code{clmfires.extra} is a list with two components, named
+  \code{clmcov100} and \code{clmcov200}, which are lists of pixel images
+  (objects of class \code{"im"}).
+}
+\section{Remark}{
+The precision with which the coordinates of the locations of the
+fires changed between 2003 and 2004.  From 1998 to 2003 many of
+the locations were recorded as the centroid of the corresponding
+\dQuote{district unit}; the rest were recorded as exact UTM
+coordinates of the centroids of the fires.  In 2004 the system
+changed and the exact UTM coordinates of the centroids of the fires
+were used for \emph{all} fires.  There is thus a strongly apparent
+\dQuote{gridlike} quality to the fire locations for the years 1998
+to 2003.
+
+There is however no actual duplication of points in the 1998 to 2003
+patterns due to \dQuote{jittering} having been applied in order to
+avoid such duplication.  It is not clear just \emph{how} the fire
+locations were jittered.  It seems unlikely that the jittering was
+done using the \code{jitter()} function from \code{R} or the
+\pkg{spatstat} function \code{\link{rjitter}}.
+
+Of course there are many sets of points which are \emph{virtually}
+identical, being separated by distances induced by the jittering.
+Typically these distances are of the order of 40 metres which
+is unlikely to be meaningful on the scale at which forest fires
+are observed.
+
+Caution should therefore be exercised in any analyses of the patterns
+for the years 1998 to 2003.
+}
+
+\usage{data(clmfires)}
+\examples{
+plot(clmfires, which.marks="cause", cols=2:5, cex=0.25)
+plot(clmfires.extra$clmcov100)
+# Split the clmfires pattern by year and plot the first and last years:
+yr  <- factor(format(marks(clmfires)$date,format="\%Y"))
+X   <- split(clmfires,f=yr)
+fAl <- c("1998","2007")
+plot(X[fAl],use.marks=FALSE,main.panel=fAl,main="")
+}
+\source{
+  Professor Jorge Mateu.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/closepairs.Rd b/man/closepairs.Rd
new file mode 100644
index 0000000..112db84
--- /dev/null
+++ b/man/closepairs.Rd
@@ -0,0 +1,159 @@
+\name{closepairs}
+\alias{closepairs}
+\alias{crosspairs}
+\alias{closepairs.ppp}
+\alias{crosspairs.ppp}
+\alias{closepaircounts}
+\alias{crosspaircounts}
+\title{
+  Close Pairs of Points
+}
+\description{
+  Low-level functions to find all close pairs of points.
+}
+\usage{
+closepaircounts(X, r)
+
+crosspaircounts(X, Y, r)
+
+closepairs(X, rmax, \dots)
+
+\method{closepairs}{ppp}(X, rmax, twice=TRUE,
+                         what=c("all","indices","ijd"),
+                         distinct=TRUE, neat=TRUE, \dots)
+
+crosspairs(X, Y, rmax, \dots)
+
+\method{crosspairs}{ppp}(X, Y, rmax, what=c("all", "indices", "ijd"), \dots)
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns (objects of class \code{"ppp"}).
+  }
+  \item{r,rmax}{
+    Maximum distance between pairs of points to be counted as close pairs.
+  }
+  \item{twice}{
+    Logical value indicating whether all ordered pairs of close points
+    should be returned. If \code{twice=TRUE} (the default),
+    each pair will appear twice in the output, as \code{(i,j)}
+    and again as \code{(j,i)}. If \code{twice=FALSE},
+    then each pair will appear only once, as the pair \code{(i,j)}
+    with \code{i < j}.
+  }
+  \item{what}{
+    String specifying the data to be returned for each close pair of points.
+    If \code{what="all"} (the default) then the
+    returned information includes the indices \code{i,j} of each pair,
+    their \code{x,y} coordinates, and the distance between them.
+    If \code{what="indices"} then only the indices \code{i,j} are
+    returned.
+    If \code{what="ijd"} then the indices \code{i,j} and the
+    distance \code{d} are returned.
+  }
+  \item{distinct}{
+    Logical value indicating whether to return only the
+    pairs of points with different indices \code{i} and \code{j}
+    (\code{distinct=TRUE}, the default) or to also include
+    the pairs where \code{i=j} (\code{distinct=FALSE}).
+  }
+  \item{neat}{
+    Logical value indicating whether to ensure that \code{i < j}
+    in each output pair, when \code{twice=FALSE}. 
+  }
+  \item{\dots}{Extra arguments, ignored by methods.}
+}
+\details{
+  These are the efficient low-level functions used by \pkg{spatstat}
+  to find all close pairs of points in a point pattern
+  or all close pairs between two point patterns. 
+
+  \code{closepaircounts(X,r)} counts the number of neighbours for
+  each point in the pattern \code{X}. That is, for each point
+  \code{X[i]}, it counts the number of other points \code{X[j]}
+  with \code{j != i} such that \code{d(X[i],X[j]) <= r} where
+  \code{d} denotes Euclidean distance. The result is an integer vector
+  \code{v} such that \code{v[i]} is the number of neighbours of
+  \code{X[i]}.
+
+  \code{crosspaircounts(X,Y,r)} counts, for each point 
+  in the pattern \code{X}, the number of neighbours in the pattern
+  \code{Y}. That is, for each point
+  \code{X[i]}, it counts the number of points \code{Y[j]}
+  such that \code{d(X[i],X[j]) <= r}. The result is an integer vector
+  \code{v} such that \code{v[i]} is the number of neighbours of
+  \code{X[i]} in the pattern \code{Y}.
+
+  \code{closepairs(X,rmax)} identifies all pairs of distinct neighbours 
+  in the pattern \code{X} and returns them. The result is
+  a list with the following components:
+  \describe{
+    \item{i}{Integer vector of indices of the first point in each pair.}
+    \item{j}{Integer vector of indices of the second point in each pair.}
+    \item{xi,yi}{Coordinates of the first point in each pair.}
+    \item{xj,yj}{Coordinates of the second point in each pair.}
+    \item{dx}{Equal to \code{xj-xi}}
+    \item{dy}{Equal to \code{yj-yi}}
+    \item{d}{Euclidean distance between each pair of points.}
+  }
+  If \code{what="indices"} then only the components \code{i} and
+  \code{j} are returned. This is slightly faster and more efficient
+  with use of memory.
+
+  \code{crosspairs(X,rmax)} identifies all pairs of neighbours
+  \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y},
+  and returns them. The result is
+  a list with the same format as for \code{closepairs}.
+}
+\section{Warning about accuracy}{
+  The results of these functions may not agree exactly with
+  the correct answer (as calculated by a human) and may not
+  be consistent between different computers and different installations
+  of \R. The discrepancies arise in marginal cases where the interpoint
+  distance is equal to, or very close to, the threshold \code{rmax}.
+
+  Floating-point numbers in a computer
+  are not mathematical Real Numbers: they are approximations using
+  finite-precision binary arithmetic.
+  The approximation is accurate to a tolerance of about
+  \code{.Machine$double.eps}.
+
+  If the true interpoint distance \eqn{d} and the threshold \code{rmax}
+  are equal, or if their difference is no more than \code{.Machine$double.eps},
+  the result may be incorrect.
+}
+\value{
+  For \code{closepaircounts} and \code{crosspaircounts}, an integer
+  vector of length equal to the number of points in \code{X}.
+
+  For \code{closepairs} and \code{crosspairs}, 
+  a list with components \code{i} and \code{j},
+  and possibly other components as described under Details.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{closepairs.pp3}} for the corresponding
+  functions for 3D point patterns.
+  
+  \code{\link{Kest}}, \code{\link{Kcross}},
+  \code{\link{nndist}}, \code{\link{nncross}},
+  \code{\link{applynbd}}, \code{\link{markstat}}
+  for functions which use these capabilities.
+}
+\examples{
+   a <- closepaircounts(cells, 0.1)
+   sum(a)
+
+   Y <- split(amacrine)
+   b <- crosspaircounts(Y$on, Y$off, 0.1)
+
+   d <- closepairs(cells, 0.1)
+   e <- crosspairs(Y$on, Y$off, 0.1)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/closepairs.pp3.Rd b/man/closepairs.pp3.Rd
new file mode 100644
index 0000000..80ed460
--- /dev/null
+++ b/man/closepairs.pp3.Rd
@@ -0,0 +1,115 @@
+\name{closepairs.pp3}
+\alias{closepairs.pp3}
+\alias{crosspairs.pp3}
+\title{
+  Close Pairs of Points in 3 Dimensions
+}
+\description{
+  Low-level functions to find all close pairs of points
+  in three-dimensional point patterns.
+}
+\usage{
+\method{closepairs}{pp3}(X, rmax, twice=TRUE,
+                         what=c("all", "indices"),
+                         distinct=TRUE, neat=TRUE, \dots)
+
+\method{crosspairs}{pp3}(X, Y, rmax, what=c("all", "indices"), \dots) 
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns in three dimensions (objects of class \code{"pp3"}).
+  }
+  \item{rmax}{
+    Maximum distance between pairs of points to be counted as close pairs.
+  }
+  \item{twice}{
+    Logical value indicating whether all ordered pairs of close points
+    should be returned. If \code{twice=TRUE}, each pair will appear twice
+    in the output, as \code{(i,j)} and again as \code{(j,i)}. If
+    \code{twice=FALSE}, then each pair will appear only once,
+    as the pair \code{(i,j)} such that \code{i < j}.
+  }
+  \item{what}{
+    String specifying the data to be returned for each close pair of points.
+    If \code{what="all"} (the default) then the
+    returned information includes the indices \code{i,j} of each pair,
+    their \code{x,y,z} coordinates, and the distance between them.
+    If \code{what="indices"} then only the indices \code{i,j} are returned.
+  }
+  \item{distinct}{
+    Logical value indicating whether to return only the
+    pairs of points with different indices \code{i} and \code{j}
+    (\code{distinct=TRUE}, the default) or to also include
+    the pairs where \code{i=j} (\code{distinct=FALSE}).
+  }
+  \item{neat}{
+    Logical value indicating whether to ensure that \code{i < j}
+    in each output pair, when \code{twice=FALSE}. 
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  These are the efficient low-level functions used by \pkg{spatstat}
+  to find all close pairs of points in a three-dimensional point pattern
+  or all close pairs between two point patterns in three dimensions.
+
+  \code{closepairs(X,rmax)} identifies all pairs of neighbours 
+  in the pattern \code{X} and returns them. The result is
+  a list with the following components:
+  \describe{
+    \item{i}{Integer vector of indices of the first point in each pair.}
+    \item{j}{Integer vector of indices of the second point in each pair.}
+    \item{xi,yi,zi}{Coordinates of the first point in each pair.}
+    \item{xj,yj,zj}{Coordinates of the second point in each pair.}
+    \item{dx}{Equal to \code{xj-xi}}
+    \item{dy}{Equal to \code{yj-yi}}
+    \item{dz}{Equal to \code{zj-zi}}
+    \item{d}{Euclidean distance between each pair of points.}
+  }
+  If \code{what="indices"} then only the components \code{i} and
+  \code{j} are returned. This is slightly faster.
+
+  \code{crosspairs(X,rmax)} identifies all pairs of neighbours
+  \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y},
+  and returns them. The result is
+  a list with the same format as for \code{closepairs}.
+}
+\section{Warning about accuracy}{
+  The results of these functions may not agree exactly with
+  the correct answer (as calculated by a human) and may not
+  be consistent between different computers and different installations
+  of \R. The discrepancies arise in marginal cases where the interpoint
+  distance is equal to, or very close to, the threshold \code{rmax}.
+
+  Floating-point numbers in a computer
+  are not mathematical Real Numbers: they are approximations using
+  finite-precision binary arithmetic.
+  The approximation is accurate to a tolerance of about
+  \code{.Machine$double.eps}.
+
+  If the true interpoint distance \eqn{d} and the threshold \code{rmax}
+  are equal, or if their difference is no more than \code{.Machine$double.eps},
+  the result may be incorrect.
+}
+\value{
+  A list with components \code{i} and \code{j},
+  and possibly other components as described under Details.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{closepairs}}
+}
+\examples{
+   X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)))
+   Y <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)))
+   a <- closepairs(X, 0.1)
+   b <- crosspairs(X, Y, 0.1)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/closetriples.Rd b/man/closetriples.Rd
new file mode 100644
index 0000000..ab52dfe
--- /dev/null
+++ b/man/closetriples.Rd
@@ -0,0 +1,42 @@
+\name{closetriples}
+\alias{closetriples}
+\title{
+  Close Triples of Points
+}
+\description{
+  Low-level function to find all close triples of points.
+}
+\usage{
+closetriples(X, rmax)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"} or \code{"pp3"}).
+  }
+  \item{rmax}{
+    Maximum distance between each pair of points in a triple.
+  }
+}
+\details{
+  This low-level function
+  finds all triples of points in a point pattern
+  in which each pair lies closer than \code{rmax}.
+}
+\value{
+  A data frame with columns \code{i,j,k} giving the indices of the
+  points in each triple, and a column \code{diam} giving the diameter
+  (maximum pairwise distance) in the triple.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{closepairs}},
+  \code{\link{Tstat}}.
+}
+\examples{
+   closetriples(redwoodfull, 0.02)
+   closetriples(redwoodfull, 0.005)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/closing.Rd b/man/closing.Rd
new file mode 100644
index 0000000..64fd94d
--- /dev/null
+++ b/man/closing.Rd
@@ -0,0 +1,84 @@
+\name{closing} 
+\alias{closing}
+\alias{closing.owin}
+\alias{closing.ppp}
+\alias{closing.psp}
+\title{Morphological Closing}
+\description{
+  Perform morphological closing of a window, a line segment pattern
+  or a point pattern.
+}
+\usage{
+ closing(w, r, \dots)
+
+ \method{closing}{owin}(w, r, \dots, polygonal=NULL)
+
+ \method{closing}{ppp}(w, r, \dots, polygonal=TRUE)
+
+ \method{closing}{psp}(w, r, \dots, polygonal=TRUE)
+}
+\arguments{
+  \item{w}{
+    A window (object of class \code{"owin"}
+    or a line segment pattern (object of class \code{"psp"})
+    or a point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{positive number: the radius of the closing.}
+  \item{\dots}{extra arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution, if a pixel approximation is used}
+  \item{polygonal}{
+    Logical flag indicating whether to compute a polygonal
+    approximation to the erosion (\code{polygonal=TRUE}) or
+    a pixel grid approximation (\code{polygonal=FALSE}).
+  }
+}
+\value{
+  If \code{r > 0}, an object of class \code{"owin"} representing the
+  closed region. If \code{r=0}, the result is identical to \code{w}.
+}
+\details{
+  The morphological closing (Serra, 1982)
+  of a set \eqn{W} by a distance \eqn{r > 0}
+  is the set of all points that cannot be
+  separated from \eqn{W} by any circle of radius \eqn{r}.
+  That is, a point \eqn{x} belongs to the closing \eqn{W*}
+  if it is impossible to draw any circle of radius \eqn{r} that
+  has \eqn{x} on the inside and \eqn{W} on the outside.
+  The closing \eqn{W*} contains the original set \eqn{W}.
+
+  For a small radius \eqn{r}, the closing operation
+  has the effect of smoothing out irregularities in the boundary of
+  \eqn{W}. For larger radii, the closing operation smooths out
+  concave features in the boundary. For very large radii,
+  the closed set \eqn{W*} becomes more and more convex.
+
+  The algorithm applies \code{\link{dilation}} followed by
+  \code{\link{erosion}}. 
+}
+\seealso{
+  \code{\link{opening}} for the opposite operation.
+
+  \code{\link{dilation}}, \code{\link{erosion}} for the basic
+  operations.  
+  
+  \code{\link{owin}},
+  \code{\link{as.owin}} for information about windows.
+}
+\examples{
+  v <- closing(letterR, 0.25)
+  plot(v, main="closing")
+  plot(letterR, add=TRUE)
+}
+\references{
+  Serra, J. (1982)
+  Image analysis and mathematical morphology.
+  Academic Press.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/clusterfield.Rd b/man/clusterfield.Rd
new file mode 100644
index 0000000..355edaa
--- /dev/null
+++ b/man/clusterfield.Rd
@@ -0,0 +1,108 @@
+\name{clusterfield}
+\alias{clusterfield}
+\alias{clusterfield.character}
+\alias{clusterfield.function}
+\alias{clusterfield.kppm}
+\title{Field of clusters}
+\description{
+  Calculate the superposition of cluster kernels at the location of a
+  point pattern.
+}
+\usage{
+  clusterfield(model, locations = NULL, \dots)
+
+  \method{clusterfield}{character}(model, locations = NULL, \dots)
+
+  \method{clusterfield}{function}(model, locations = NULL, \dots, mu = NULL)
+
+  \method{clusterfield}{kppm}(model, locations = NULL, \dots)
+}
+\arguments{
+  \item{model}{
+    Cluster model. Either a fitted cluster model (object of class
+    \code{"kppm"}), a character string specifying the type of cluster
+    model, or a function defining the cluster kernel. See Details.
+  }
+  \item{locations}{
+    A point pattern giving the locations of the kernels. Defaults to the
+    centroid of the observation window for the \code{"kppm"} method and
+    to the center of a unit square otherwise.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{density.ppp}} or the
+    cluster kernel. See Details.
+  }
+  \item{mu}{
+     Mean number of offspring per cluster. 
+     A single number or a pixel image.
+  }
+}
+\details{
+  The actual calculations are preformed by \code{\link{density.ppp}} and
+  \code{\dots} arguments are passed thereto for control over the pixel
+  resolution etc. (These arguments are then passed on to \code{\link{pixellate.ppp}}
+  and \code{\link{as.mask}}.)
+
+  For the function method the given kernel function should accept
+  vectors of x and y coordinates as its first two arguments. Any
+  additional arguments may be passed through the \code{\dots}.
+
+  The function method also accepts the optional parameter \code{mu}
+  (defaulting to 1) specifying the mean number of points per cluster (as
+  a numeric) or the inhomogeneous reference cluster intensity (as an
+  \code{"im"} object or a \code{function(x,y)}). The interpretation of
+  \code{mu} is as explained in the simulation functions referenced in
+  the See Also section below.
+
+  For the character method \code{model} must be one of:
+  \code{model="Thomas"} for the Thomas process,
+  \code{model="MatClust"} for the Matern cluster process,
+  \code{model="Cauchy"} for the Neyman-Scott cluster process with
+  Cauchy kernel, or \code{model="VarGamma"} for the Neyman-Scott
+  cluster process with Variance Gamma kernel. For all these models the
+  parameter \code{scale} is required and passed through \code{\dots} as
+  well as the parameter \code{nu} when \code{model="VarGamma"}. This
+  method calls \code{clusterfield.function} so the parameter \code{mu}
+  may also be passed through \code{\dots} and will be interpreted as
+  explained above.
+
+  The kppm method extracts the relevant information from the fitted
+  model (including \code{mu}) and calls \code{clusterfield.function}.
+  
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{density.ppp}} and \code{\link{kppm}}
+
+  Simulation algorithms for cluster models:
+  \code{\link{rCauchy}}
+  \code{\link{rMatClust}}
+  \code{\link{rThomas}}
+  \code{\link{rVarGamma}}
+}
+\examples{
+  # method for fitted model
+  fit <- kppm(redwood~1, "Thomas")
+  clusterfield(fit, eps = 0.01)
+
+  # method for functions
+  kernel <- function(x,y,scal) { 
+      r <- sqrt(x^2 + y^2)
+      ifelse(r > 0,
+             dgamma(r, shape=5, scale=scal)/(2 * pi * r),
+             0)               
+  }
+  X <- runifpoint(10)
+  clusterfield(kernel, X, scal=0.05)
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+  .
+}
+\keyword{spatial}
diff --git a/man/clusterfit.Rd b/man/clusterfit.Rd
new file mode 100644
index 0000000..61634ff
--- /dev/null
+++ b/man/clusterfit.Rd
@@ -0,0 +1,143 @@
+\name{clusterfit}
+\alias{clusterfit}
+\title{Fit Cluster or Cox Point Process Model via Minimum Contrast}
+\description{
+  Fit a homogeneous or inhomogeneous cluster process or
+  Cox point process model to a point pattern by the Method of Minimum Contrast.
+}
+\usage{
+clusterfit(X, clusters, lambda = NULL, startpar = NULL,
+           q = 1/4, p = 2, rmin = NULL, rmax = NULL, \dots,
+           statistic = NULL, statargs = NULL, algorithm="Nelder-Mead")
+}
+\arguments{
+  \item{X}{
+    Data to which the cluster or Cox model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{clusters}{
+    Character string determining the cluster or Cox model.
+    Partially matched.
+    Options are \code{"Thomas"}, \code{"MatClust"},
+    \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+    Either a single numeric specifying a constant intensity,
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"})
+    or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{startpar}{
+    Vector of initial values of the parameters of the
+    point process mode. If \code{X} is a point pattern sensible defaults
+    are used. Otherwise rather arbitrary values are used.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{mincontrast}.}
+  }
+  \item{statistic}{
+    Optional. Name of the summary statistic to be used
+    for minimum contrast estimation: either \code{"K"} or \code{"pcf"}.
+  }
+  \item{statargs}{
+    Optional list of arguments to be used when calculating
+    the \code{statistic}. See Details.
+  }
+  \item{algorithm}{
+    Character string determining the mathematical optimisation algorithm
+    to be used by \code{\link[stats]{optim}}. See
+    the argument \code{method} of \code{\link[stats]{optim}}.
+  }
+}
+\details{
+  This function fits the clustering parameters of a cluster or Cox point
+  process model by the Method of Minimum Contrast, that is, by
+  matching the theoretical \eqn{K}-function of the model to the
+  empirical \eqn{K}-function of the data, as explained in
+  \code{\link{mincontrast}}.
+  
+  If \code{statistic="pcf"} (or \code{X} appears to be an
+  estimated pair correlation function) then instead of using the
+  \eqn{K}-function, the algorithm will use the pair correlation
+  function.
+
+  If \code{X} is a point pattern of class \code{"ppp"} an estimate of
+  the summary statistic specfied by \code{statistic} (defaults to
+  \code{"K"}) is first computed before minimum contrast estimation is
+  carried out as described above. In this case the argument
+  \code{statargs} can be used for controlling the summary statistic
+  estimation. The precise algorithm for computing the summary statistic
+  depends on whether the intensity specification (\code{lambda}) is:
+
+  \describe{
+    \item{homogeneous:}{
+      If \code{lambda} is \code{NUll} or a single numeric the pattern is
+      considered homogeneous and either \code{\link{Kest}} or
+      \code{\link{pcf}} is invoked. In this case \code{lambda} is
+      \bold{not} used for anything when estimating the summary statistic.
+    }
+    \item{inhomogeneous:}{
+
+      If \code{lambda} is a pixel image (object of class \code{"im"}),
+      a fitted point process model (object of class \code{"ppm"} or
+      \code{"kppm"}) or a \code{function(x,y)} the pattern is considered
+      inhomogeneous. In this case either \code{\link{Kinhom}} or
+      \code{\link{pcfinhom}} is invoked with \code{lambda} as an
+      argument.
+      
+    }
+  }
+
+  After the clustering parameters of the model have been estimated by
+  minimum contrast \code{lambda} (if non-null) is used to compute the
+  additional model parameter \eqn{\mu}{\mu}.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. See \code{\link{mincontrast}}.
+}
+\references{
+  Diggle, P.J. and Gratton, R.J. (1984)
+  Monte Carlo methods of inference for implicit statistical models.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{46}, 193 -- 212.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2007).
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63} (2007) 252--258.
+}  
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{kppm}}
+}
+\examples{
+  fit <- clusterfit(redwood, "Thomas")
+  fit
+  if(interactive()){
+    plot(fit)
+  }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/clusterkernel.Rd b/man/clusterkernel.Rd
new file mode 100644
index 0000000..5aa26a6
--- /dev/null
+++ b/man/clusterkernel.Rd
@@ -0,0 +1,56 @@
+\name{clusterkernel}
+\alias{clusterkernel}
+\alias{clusterkernel.character}
+\alias{clusterkernel.kppm}
+\title{
+  Extract Cluster Offspring Kernel
+}
+\description{
+  Given a cluster point process model, this command
+  returns the probability density of the cluster offspring.
+}
+\usage{
+clusterkernel(model, \dots)
+
+\method{clusterkernel}{kppm}(model, \dots)
+
+\method{clusterkernel}{character}(model, \dots)
+}
+\arguments{
+  \item{model}{
+    Cluster model. Either a fitted cluster or Cox model
+    (object of class \code{"kppm"}), or a character string
+    specifying the type of cluster model.
+  }
+  \item{\dots}{
+    Parameter values for the model,
+    when \code{model} is a character string.
+  }
+}
+\details{
+  Given a specification of a cluster point process model, this command
+  returns a \code{function(x,y)} giving the two-dimensional
+  probability density of the cluster offspring points assuming a cluster parent
+  located at the origin.
+}
+\value{
+  A function in the \R\ language with arguments \code{x,y,\dots}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{clusterfield}}, \code{\link{kppm}}
+}
+\examples{
+  fit <- kppm(redwood ~ x, "MatClust")
+  f <- clusterkernel(fit)
+  f(0.1, 0.2)
+}
+\keyword{spatial}
diff --git a/man/clusterradius.Rd b/man/clusterradius.Rd
new file mode 100644
index 0000000..dafa742
--- /dev/null
+++ b/man/clusterradius.Rd
@@ -0,0 +1,99 @@
+\name{clusterradius}
+\alias{clusterradius}
+\alias{clusterradius.character}
+\alias{clusterradius.kppm}
+\title{
+  Compute or Extract Effective Range of Cluster Kernel
+}
+\description{
+  Given a cluster point process model, this command
+  returns a value beyond which the the probability density of the
+  cluster offspring is neglible.
+}
+\usage{
+clusterradius(model, \dots)
+
+\method{clusterradius}{kppm}(model, \dots, thresh = NULL, precision = FALSE)
+
+\method{clusterradius}{character}(model, \dots, thresh = NULL, precision = FALSE)
+}
+\arguments{
+  \item{model}{
+    Cluster model. Either a fitted cluster or Cox model
+    (object of class \code{"kppm"}), or a character string
+    specifying the type of cluster model.
+  }
+  \item{\dots}{
+    Parameter values for the model,
+    when \code{model} is a character string.
+  }
+  \item{thresh}{
+    Numerical threshold relative to the cluster kernel value at the
+    origin (parent location) determining when the cluster kernel
+    will be considered neglible. A sensible default is provided.
+  }
+  \item{precision}{
+    Logical. If \code{precision=TRUE} the precision of the calculated
+    range is returned as an attribute to the range. See details.
+  }
+}
+\details{
+  Given a cluster model this function by default returns the effective
+  range of the model with the given parameters as used in spatstat. For
+  the Matern cluster model (see e.g. \code{\link{rMatClust}}) this is
+  simply the finite radius of the offsring density given by the paramter
+  \code{scale} irrespective of other options given to this function. The
+  remaining models in spatstat have infinite theoretical range, and an
+  effective finite value is given as follows: For the Thomas model (see
+  e.g. \code{\link{rThomas}} the default is \code{4*scale} where scale
+  is the scale or standard deviation parameter of the model. If
+  \code{thresh} is given the value is instead found as described for the
+  other models below.
+
+  For the Cauchy model (see e.g. \code{\link{rCauchy}}) and the Variance
+  Gamma (Bessel) model (see e.g. \code{\link{rVarGamma}}) the value of
+  \code{thresh} defaults to 0.001, and then this is used to compute the
+  range numerically as follows. If \eqn{k(x,y)=k_0(r)}{k(x,y)=k0(r)}
+  with \eqn{r=\sqrt(x^2+y^2)}{r=sqrt(x^2+y^2)}
+  denotes the isotropic cluster kernel then \eqn{f(r) = 2 \pi r
+  k_0(r)}{f(r) = 2 \pi r k0(r)} is the
+  density function of the offspring distance from the parent. The range
+  is determined as the value of \eqn{r} where \eqn{f(r)} falls below
+  \code{thresh} times \eqn{k_0(r)}{k0(r)}.
+  
+  If \code{precision=TRUE} the precision related to the chosen range is
+  returned as an attribute. Here the precision is defined as the polar
+  integral of the kernel from distance 0 to the calculated
+  range. Ideally this should be close to the value 1 which would be
+  obtained for the true theretical infinite range.
+
+}
+\value{
+  A positive numeric.
+
+  Additionally, the precision related to this range value is returned as
+  an attribute \code{"prec"}, if \code{precision=TRUE}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{clusterkernel}}, \code{\link{kppm}},
+  \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}},
+  \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}.
+}
+\examples{
+  fit <- kppm(redwood ~ x, "MatClust")
+  clusterradius(fit)
+
+  clusterradius("Thomas", scale = .1)
+  clusterradius("Thomas", scale = .1, thresh = 0.001)
+  clusterradius("VarGamma", scale = .1, nu = 2, precision = TRUE)
+}
+\keyword{spatial}
diff --git a/man/clusterset.Rd b/man/clusterset.Rd
new file mode 100644
index 0000000..0b538a7
--- /dev/null
+++ b/man/clusterset.Rd
@@ -0,0 +1,145 @@
+\name{clusterset}
+\alias{clusterset}
+\title{
+  Allard-Fraley Estimator of Cluster Feature
+}
+\description{
+  Detect high-density features in a spatial point pattern
+  using the (unrestricted) Allard-Fraley estimator.
+}
+\usage{
+  clusterset(X, what=c("marks", "domain"),
+            \dots, verbose=TRUE,
+            fast=FALSE,
+            exact=!fast)
+}
+\arguments{
+  \item{X}{
+    A dimensional spatial point pattern (object of class
+    \code{"ppp"}).
+  }
+  \item{what}{
+    Character string or character vector
+    specifying the type of result. See Details.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{fast}{
+    Logical. If \code{FALSE} (the default), the Dirichlet tile areas
+    will be computed exactly using polygonal geometry, so that the
+    optimal choice of tiles will be computed exactly.
+    If \code{TRUE}, the Dirichlet tile areas
+    will be approximated using pixel counting, so the optimal
+    choice will be approximate.
+  }
+  \item{exact}{
+    Logical. If \code{TRUE}, the Allard-Fraley estimator
+    of the domain will be computed exactly using polygonal geometry.
+    If \code{FALSE}, the Allard-Fraley estimator of the domain
+    will be approximated by a binary pixel mask.
+    The default is initially set to \code{FALSE}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}} to control the
+    pixel resolution if \code{exact=FALSE}.
+  }
+}
+\details{
+  Allard and Fraley (1997) developed a technique for recognising
+  features of high density in a spatial point pattern in the presence of
+  random clutter.
+
+  This algorithm computes the \emph{unrestricted} Allard-Fraley estimator.
+  The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is
+  computed. The smallest \code{m} Dirichlet cells are selected,
+  where the number \code{m} is determined by a maximum likelihood
+  criterion.
+  \itemize{
+    \item 
+    If \code{fast=FALSE} (the default), the areas of the tiles
+    of the Dirichlet tessellation will be computed exactly
+    using polygonal geometry. This ensures that the optimal selection of
+    tiles is computed exactly.
+    \item
+    If \code{fast=TRUE}, the Dirichlet tile areas
+    will be approximated by counting pixels.
+    This is faster, and is usually correct (depending on the pixel
+    resolution, which is controlled by the arguments \code{\dots}).
+  }
+
+  The type of result depends on the character vector \code{what}.
+  \itemize{
+    \item
+    If \code{what="marks"} the result is the point pattern \code{X}
+    with a vector of marks labelling each point with a value \code{yes} or
+    \code{no} depending on whether the corresponding Dirichlet cell is
+    selected by the Allard-Fraley estimator. In other words each point of
+    \code{X} is labelled as either a cluster point or a non-cluster point.
+    \item 
+    If \code{what="domain"}, the result is the Allard-Fraley estimator
+    of the cluster feature set, which is the union of all the
+    selected Dirichlet cells, represented as a window (object of class
+    \code{"owin"}).
+    \item
+    If \code{what=c("marks", "domain")} the result is a list
+    containing both of the results described above.
+  }
+
+  Computation of the Allard-Fraley set estimator depends on
+  the argument \code{exact}.
+  \itemize{
+    \item
+    If \code{exact=TRUE} (the default), the Allard-Fraley set estimator
+    will be computed exactly using polygonal geometry.
+    The result is a polygonal window.
+    \item
+    If \code{exact=FALSE}, the Allard-Fraley set estimator
+    will be approximated by a binary pixel mask.
+    This is faster than the exact computation.
+    The result is a binary mask.
+  }
+}
+\value{
+  If \code{what="marks"}, a multitype point pattern (object of class
+  \code{"ppp"}).
+
+  If  \code{what="domain"}, a window (object of class
+  \code{"owin"}).
+
+  If  \code{what=c("marks", "domain")} (the default),
+  a list consisting of a multitype point pattern and a window.
+}
+\references{
+  Allard, D. and Fraley, C. (1997) 
+  Nonparametric maximum likelihood estimation of features in
+  spatial point processes using Voronoi tessellation.
+  \emph{Journal of the American Statistical Association}
+  \bold{92}, 1485--1493.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{nnclean}}, 
+  \code{\link{sharpen}}
+}
+\examples{
+  opa <- par(mfrow=c(1,2))
+  W <- grow.rectangle(as.rectangle(letterR), 1)
+  X <- superimpose(runifpoint(300, letterR),
+                   runifpoint(50, W), W=W)
+  plot(W, main="clusterset(X, 'm')")
+  plot(clusterset(X, "marks", fast=TRUE), add=TRUE, chars=c(1, 3), cols=1:2)
+  plot(letterR, add=TRUE)
+  plot(W, main="clusterset(X, 'd')")
+  plot(clusterset(X, "domain", exact=FALSE), add=TRUE)
+  plot(letterR, add=TRUE)
+  par(opa)
+}
+\keyword{spatial}
+\keyword{classif}
diff --git a/man/coef.mppm.Rd b/man/coef.mppm.Rd
new file mode 100644
index 0000000..3af139e
--- /dev/null
+++ b/man/coef.mppm.Rd
@@ -0,0 +1,106 @@
+\name{coef.mppm}
+\alias{coef.mppm}
+\title{
+  Coefficients of Point Process Model Fitted to Multiple Point Patterns
+}
+\description{
+  Given a point process model fitted to a list of point patterns,
+  extract the coefficients of the fitted model.
+  A method for \code{coef}.
+}
+\usage{
+  \method{coef}{mppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"mppm"})
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Either a vector containing the fitted coefficients,
+  or a data frame containing the fitted coefficients for each point pattern.
+}
+\details{
+  This function is a method for the generic function \code{\link{coef}}.
+  
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"mppm"}) produced by the 
+  fitting algorithm \code{\link{mppm}}). This represents a
+  point process model that has been fitted
+  to a list of several point pattern datasets. See \code{\link{mppm}}
+  for information.
+
+  This function extracts the vector of coefficients of the fitted model.
+  This is the estimate of the parameter vector
+  \eqn{\theta}{\theta} such that the conditional intensity of the model
+  is of the form
+  \deqn{
+    \lambda(u,x) = \exp(\theta S(u,x))
+  }{
+    \lambda(u,x) = \exp(\theta . S(u,x))
+  }
+  where \eqn{S(u,x)} is a (vector-valued) statistic.
+
+  For example, if the model \code{object} is the uniform Poisson process,
+  then \code{coef(object)} will yield a single value
+  (named \code{"(Intercept)"}) which is the logarithm of the
+  fitted intensity of the Poisson process.
+
+  If the fitted model includes random effects (i.e. if the argument
+  \code{random} was specified in the call to \code{\link{mppm}}),
+  then the fitted coefficients are different for each point pattern
+  in the original data, so \code{coef(object)} is a data frame
+  with one row for each point pattern, and one column for each
+  parameter. Use \code{\link{fixef.mppm}} to extract the vector of fixed effect
+  coefficients, and \code{\link{ranef.mppm}} to extract the random
+  effect coefficients at each level.
+
+  Use \code{\link{print.mppm}} to print a more useful
+  description of the fitted model.
+}
+\seealso{
+  \code{\link{fixef.mppm}} and \code{\link{ranef.mppm}}
+  for the fixed and random effect coefficients in a model that includes
+  random effects.
+  
+ \code{\link{print.mppm}},
+ \code{\link{mppm}}
+}
+\examples{
+    H <- hyperframe(X=waterstriders)
+
+    fit.Poisson <- mppm(X ~ 1, H)
+    coef(fit.Poisson)
+
+    # The single entry "(Intercept)" 
+    # is the log of the fitted intensity of the Poisson process
+
+    fit.Strauss <- mppm(X~1, H, Strauss(7))
+    coef(fit.Strauss)
+
+    # The two entries "(Intercept)" and "Interaction"
+    # are respectively log(beta) and log(gamma)
+    # in the usual notation for Strauss(beta, gamma, r)
+
+    # Tweak data to exaggerate differences
+    H$X[[1]] <- rthin(H$X[[1]], 0.3)
+    # Model with random effects
+    fitran <- mppm(X ~ 1, H, random=~1|id)
+    coef(fitran)
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented in \pkg{spatstat} by
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/coef.ppm.Rd b/man/coef.ppm.Rd
new file mode 100644
index 0000000..709f6a5
--- /dev/null
+++ b/man/coef.ppm.Rd
@@ -0,0 +1,79 @@
+\name{coef.ppm}
+\alias{coef.ppm}
+\title{
+  Coefficients of Fitted Point Process Model
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  extract the coefficients of the fitted model.
+  A method for \code{coef}.
+}
+\usage{
+  \method{coef}{ppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"ppm"})
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  A vector containing the fitted coefficients.
+}
+\details{
+  This function is a method for the generic function \code{\link{coef}}.
+  
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"ppm"}). Such objects are produced by the maximum
+  pseudolikelihood fitting algorithm \code{\link{ppm}}).
+
+  This function extracts the vector of coefficients of the fitted model.
+  This is the estimate of the parameter vector
+  \eqn{\theta}{\theta} such that the conditional intensity of the model
+  is of the form
+  \deqn{
+    \lambda(u,x) = \exp(\theta S(u,x))
+  }{
+    \lambda(u,x) = exp(\theta . S(u,x))
+  }
+  where \eqn{S(u,x)} is a (vector-valued) statistic.
+
+  For example, if the model \code{object} is the uniform Poisson process,
+  then \code{coef(object)} will yield a single value
+  (named \code{"(Intercept)"}) which is the logarithm of the
+  fitted intensity of the Poisson process.
+
+  Use \code{\link{print.ppm}} to print a more useful
+  description of the fitted model.
+}
+\seealso{
+ \code{\link{print.ppm}},
+ \code{\link{ppm.object}},
+ \code{\link{ppm}}
+}
+\examples{
+    data(cells)
+
+    poi <- ppm(cells, ~1, Poisson())
+    coef(poi)
+    # This is the log of the fitted intensity of the Poisson process
+
+    stra <- ppm(cells, ~1, Strauss(r=0.07))
+    coef(stra)
+
+    # The two entries "(Intercept)" and "Interaction"
+    # are respectively log(beta) and log(gamma)
+    # in the usual notation for Strauss(beta, gamma, r)
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/coef.slrm.Rd b/man/coef.slrm.Rd
new file mode 100644
index 0000000..4dec857
--- /dev/null
+++ b/man/coef.slrm.Rd
@@ -0,0 +1,51 @@
+\name{coef.slrm}
+\Rdversion{1.1}
+\alias{coef.slrm}
+\title{
+  Coefficients of Fitted Spatial Logistic Regression Model 
+}
+\description{
+  Extracts the coefficients (parameters) from a fitted
+  Spatial Logistic Regression model.
+}
+\usage{
+  \method{coef}{slrm}(object, ...)
+}
+
+\arguments{
+  \item{object}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for \code{\link{coef}} for fitted spatial logistic
+  regression models (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}).
+
+  It extracts the fitted canonical parameters, i.e.\ the coefficients in the
+  linear predictor of the spatial logistic regression.
+}
+\value{
+  Numeric vector of coefficients.
+}
+\seealso{
+  \code{\link{slrm}}
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- slrm(X ~ x+y)
+  coef(fit)
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/collapse.fv.Rd b/man/collapse.fv.Rd
new file mode 100644
index 0000000..ca03b53
--- /dev/null
+++ b/man/collapse.fv.Rd
@@ -0,0 +1,93 @@
+\name{collapse.fv}
+\alias{collapse.fv}
+\alias{collapse.anylist}
+\title{
+  Collapse Several Function Tables into One
+}
+\description{
+  Combines several function tables (objects of class \code{"fv"})
+  into a single function table, merging columns that are identical
+  and relabelling columns that are different.
+}
+\usage{
+\method{collapse}{fv}(object, \dots, same = NULL, different = NULL)
+
+\method{collapse}{anylist}(object, \dots, same = NULL, different = NULL)
+}
+\arguments{
+  \item{object}{
+    An object of class \code{"fv"}, or a list of such objects.
+  }
+  \item{\dots}{
+    Additional objects of class \code{"fv"}.
+  }
+  \item{same}{
+    Character string or character vector specifying a column or columns,
+    present in each \code{"fv"} object, that are identical
+    in each object. This column or columns will be included only once.
+  }
+  \item{different}{
+    Character string or character vector specifying a column or columns,
+    present in each \code{"fv"} object, that contain different values in
+    each object. Each of these columns of data will be included, with
+    labels that distinguish them from each other.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link[nlme]{collapse}}.
+  
+  It combines the data in several function tables 
+  (objects of class \code{"fv"}, see \code{\link{fv.object}})
+  to make a single function table.
+  It is essentially a smart wrapper for
+  \code{\link{cbind.fv}}.
+
+  A typical application is to calculate the same summary statistic
+  (such as the \eqn{K} function) for different point patterns,
+  and then to use \code{collapse.fv} to combine the results into a
+  single object that can easily be plotted. See the Examples.
+  
+  The arguments \code{object} and \code{\dots} should be function tables
+  (objects of class \code{"fv"}, see \code{\link{fv.object}})
+  that are compatible in the sense that they
+  have the same values of the function argument. 
+
+  The argument \code{same} identifies any columns that are present
+  in each function table, and which are known to contain exactly
+  the same values in each table. This column or columns will be
+  included only once in the result.
+
+  The argument \code{different} identifies any columns that are present
+  in each function table, and which contain different numerical values
+  in each table. Each of these columns will be included, with labels
+  to distinguish them. 
+
+  Columns that are not named in \code{same} or \code{different} will not
+  be included.
+}
+\value{
+  Object of class \code{"fv"}.
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{cbind.fv}}
+}
+\examples{
+  # generate simulated data
+  X <- replicate(3, rpoispp(100), simplify=FALSE)
+  names(X) <- paste("Simulation", 1:3)
+  # compute K function estimates
+  Klist <- anylapply(X, Kest)
+  # collapse
+  K <- collapse(Klist, same="theo", different="iso")
+  K
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/colourmap.Rd b/man/colourmap.Rd
new file mode 100644
index 0000000..1e97fc0
--- /dev/null
+++ b/man/colourmap.Rd
@@ -0,0 +1,118 @@
+\name{colourmap}
+\alias{colourmap}
+\title{Colour Lookup Tables}
+\description{
+  Create a colour map (colour lookup table).
+}
+\usage{
+colourmap(col, ..., range=NULL, breaks=NULL, inputs=NULL)
+}
+\arguments{
+  \item{col}{Vector of values specifying colours}
+  \item{\dots}{Ignored.}
+  \item{range}{
+    Interval to be mapped.
+    A numeric vector of length 2, specifying the endpoints of the
+    range of values to be mapped.
+    Incompatible with \code{breaks} or \code{inputs}.
+  }
+  \item{inputs}{
+    Values to which the colours are associated.
+    A factor or vector of the same length as \code{col}.
+    Incompatible with \code{breaks} or \code{range}.
+  }
+  \item{breaks}{
+    Breakpoints for the colour map.
+    A numeric vector of length equal to \code{length(col)+1}.
+    Incompatible with \code{range} or \code{inputs}.
+  }
+}
+\details{
+  A colour map is a mechanism for associating colours with data.
+  It can be regarded as a function, mapping data to colours.
+
+  The command \code{colourmap} creates an object representing
+  a colour map, which can then be used to control the plot commands
+  in the \pkg{spatstat} package. It can also be used to compute the
+  colour assigned to any data value. 
+
+  The argument \code{col} specifies the colours to which
+  data values will be mapped. It should be a vector
+  whose entries can be interpreted as colours by the standard
+  \R graphics system. The entries can be string names of colours
+  like \code{"red"}, or integers that refer to
+  colours in the standard palette, or strings containing
+  six-letter hexadecimal codes like \code{"#F0A0FF"}. 
+
+  Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks}
+  must be specified by name.
+
+  If \code{inputs} is given, then it should be a vector or factor,
+  of the same length as \code{col}. The entries of \code{inputs} can be
+  any atomic type (e.g. numeric, logical, character, complex) or factor
+  values. The resulting colour map associates the value \code{inputs[i]}
+  with the colour \code{col[i]}.
+
+  If \code{range} is given, then it determines the interval of the real
+  number line that will be mapped. It should be a numeric vector of
+  length 2. 
+
+  If \code{breaks} is given, then it determines the precise intervals
+  of the real number line
+  which are mapped to each colour. It should be a numeric vector,
+  of length at least 2, with entries that are in increasing order.
+  Infinite values are allowed. Any number in the range
+  between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the
+  colour \code{col[i]}. 
+
+  The result is an object of class \code{"colourmap"}. 
+  There are \code{print} and \code{plot} methods for this class.
+  Some plot commands in the \pkg{spatstat} package accept an object
+  of this class as a specification of the colour map.
+
+  The result is also a function \code{f} which can be used to compute
+  the colour assigned to any data value. 
+  That is, \code{f(x)} returns the character value of the colour assigned
+  to \code{x}. This also works for vectors of data values.
+}
+\value{
+  A function, which is also an object of class \code{"colourmap"}.
+}
+\seealso{
+  The plot method \code{\link{plot.colourmap}}.
+  
+  See the \R help file on
+  \code{\link[grDevices:colors]{colours}} for information about the colours
+  that \R recognises, and how to manipulate them.
+  
+  To make a smooth transition between colours, see
+  \code{\link{interp.colourmap}}.
+  To alter individual colour values, see
+  \code{\link{tweak.colourmap}}.
+    
+  See \code{\link[spatstat:colourtools]{colourtools}}
+  for more tools to manipulate colour values.
+
+  See \code{\link{lut}} for lookup tables.
+}
+\examples{
+  # colour map for real numbers, using breakpoints
+  cr <- colourmap(c("red", "blue", "green"), breaks=c(0,5,10,15))
+  cr
+  cr(3.2)
+  cr(c(3,5,7))
+  # a large colour map
+  co <- colourmap(rainbow(100), range=c(-1,1))
+  co(0.2)
+  # colour map for discrete set of values
+  ct <- colourmap(c("red", "green"), inputs=c(FALSE, TRUE))
+  ct(TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{color}
diff --git a/man/colourtools.Rd b/man/colourtools.Rd
new file mode 100644
index 0000000..0d9a4ae
--- /dev/null
+++ b/man/colourtools.Rd
@@ -0,0 +1,188 @@
+\name{colourtools}
+\alias{colourtools} %DoNotExport
+\alias{paletteindex}
+\alias{rgb2hex}
+\alias{rgb2hsva}
+\alias{col2hex}
+\alias{paletteindex}
+\alias{samecolour}
+\alias{complementarycolour}
+\alias{interp.colours}
+\alias{is.colour}
+\alias{is.grey}
+\alias{to.grey}
+\alias{to.opaque}
+\alias{to.transparent}
+\title{
+  Convert and Compare Colours in Different Formats
+}
+\description{
+  These functions convert between different formats for specifying
+  a colour in \R, determine whether colours are equivalent,
+  and convert colour to greyscale.
+}
+\usage{
+col2hex(x)
+rgb2hex(v, maxColorValue=255)
+rgb2hsva(red, green=NULL, blue=NULL, alpha=NULL, maxColorValue=255)
+paletteindex(x)
+samecolour(x,y)
+complementarycolour(x)
+interp.colours(x, length.out=512)
+is.colour(x)
+to.grey(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE)
+is.grey(x)
+to.opaque(x)
+to.transparent(x, fraction)
+}
+
+\arguments{
+  \item{x,y}{
+    Any valid specification for a colour or sequence of colours
+    accepted by \code{\link[grDevices]{col2rgb}}.
+  }
+  \item{v}{
+    A numeric vector of length 3, giving the RGB values of
+    a single colour, or a 3-column matrix giving the RGB values of
+    several colours. Alternatively a vector of length 4 or a matrix with
+    4 columns, giving the RGB and alpha (transparency) values.
+  }
+  \item{red,green,blue,alpha}{
+    Arguments acceptable to \code{\link[grDevices]{rgb}}
+    determining the red, green, blue channels and optionally the
+    alpha (transparency) channel.
+    Note that \code{red} can also be a matrix with 3 \bold{rows}
+    giving the RGB values, or a matrix with 4 rows
+    giving RGB and alpha values.
+  }
+  \item{maxColorValue}{
+    Number giving the maximum possible value for the entries in
+    \code{v} or \code{red,green,blue,alpha}.
+  }
+  \item{weights}{
+    Numeric vector of length 3 giving 
+    relative weights for the red, green, and blue
+    channels respectively.
+  }
+  \item{transparent}{
+    Logical value indicating whether transparent colours should
+    be converted to transparent grey values (\code{transparent=TRUE})
+    or converted to opaque grey values (\code{transparent=FALSE}, the
+    default).
+  }
+  \item{fraction}{
+    Transparency fraction. Numerical value or vector of values
+    between 0 and 1, giving the opaqueness of a colour.
+    A fully opaque colour has \code{fraction=1}.
+  }
+  \item{length.out}{
+    Integer. Length of desired sequence.
+  }
+}
+\details{
+  \code{is.colour(x)} can be applied to any kind of data \code{x}
+  and returns \code{TRUE} if \code{x} can be interpreted as a colour or
+  colours. The remaining functions expect data that can be interpreted
+  as colours.
+  
+  \code{col2hex} converts colours specified in any format
+  into their hexadecimal character codes.
+
+  \code{rgb2hex} converts RGB colour values into their hexadecimal
+  character codes. It is a very minor extension to \code{\link[grDevices]{rgb}}.
+  Arguments to \code{rgb2hex} should be similar to
+  arguments to \code{\link[grDevices]{rgb}}.
+
+  \code{rgb2hsva} converts RGB colour values into HSV colour values
+  including the alpha (transparency) channel.
+  It is an extension of \code{\link[grDevices]{rgb2hsv}}.
+  Arguments to \code{rgb2hsva} should be similar to arguments to
+  \code{\link[grDevices]{rgb2hsv}}.
+  
+  \code{paletteindex} checks whether the colour or colours specified
+  by \code{x} are available in the default palette returned by
+  \code{\link[grDevices]{palette}()}. If so, it returns the index or indices of
+  the colours in the palette. If not, it returns \code{NA}.
+
+  \code{samecolour} decides whether two colours \code{x} and \code{y}
+  are equivalent.
+
+  \code{is.grey} determines whether each entry of \code{x} is a
+  greyscale colour, and returns a logical vector.
+  
+  \code{to.grey} converts the colour data in \code{x} to greyscale
+  colours. Alternatively \code{x} can be an object of class \code{"colourmap"}
+  and \code{to.grey(x)} is the modified colour map.
+
+  \code{to.opaque} converts the colours in \code{x} to opaque
+  (non-transparent) colours, and \code{to.transparent} converts them
+  to transparent colours with a specified transparency value.
+  Note that \code{to.transparent(x,1)} is equivalent to \code{to.opaque(x)}.
+
+  For \code{to.grey}, \code{to.opaque} and
+  \code{to.transparent}, if all the data in \code{x} specifies colours from the
+  standard palette, and if the result would be equivalent to \code{x},
+  then the result is identical to \code{x}.
+  
+  \code{complementarycolour} replaces each colour by its
+  complementary colour in RGB space (the colour obtained by replacing
+  RGB values \code{(r, g, b)} by \code{(255-r, 255-g, 255-b)}).
+  The transparency value is not changed.
+  Alternatively \code{x} can be an object of class \code{"colourmap"}
+  and \code{complementarycolour(x)} is the modified colour map.
+
+  \code{interp.colours} interpolates between each successive pair of
+  colours in a sequence of colours, to generate a more finely-spaced
+  sequence. It uses linear interpolation in HSV space (with hue
+  represented as a two-dimensional unit vector).
+}
+\section{Warning}{
+  \code{paletteindex("green")} returns \code{NA} because
+  the green colour in the default palette is called \code{"green3"}.
+}
+\value{
+  For \code{col2hex} and \code{rgb2hex} 
+  a character vector containing hexadecimal colour codes.
+
+  For \code{to.grey}, \code{to.opaque} and \code{to.transparent},
+  either a character vector containing hexadecimal colour codes,
+  or a value identical to the input \code{x}.
+  
+  For \code{rgb2hsva}, a matrix with 3 or 4 rows containing
+  HSV colour values.
+  
+  For \code{paletteindex}, an integer vector, possibly containing
+  \code{NA} values.
+
+  For \code{samecolour} and \code{is.grey},
+  a logical value or logical vector.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link[grDevices]{col2rgb}},
+  \code{\link[grDevices]{rgb2hsv}},
+  \code{\link[grDevices]{palette}}.
+
+  See also the class of colour map objects in the \pkg{spatstat} package:
+  \code{\link{colourmap}}, 
+  \code{\link{interp.colourmap}}, 
+  \code{\link{tweak.colourmap}}.
+}
+\examples{
+  samecolour("grey", "gray")
+  paletteindex("grey")
+  col2hex("orange")
+  to.grey("orange")
+  complementarycolour("orange")
+  is.grey("lightgrey")
+  is.grey(8)
+  to.transparent("orange", 0.5)
+  to.opaque("red")
+  interp.colours(c("orange", "red", "violet"), 5)
+}
+\keyword{color}
diff --git a/man/commonGrid.Rd b/man/commonGrid.Rd
new file mode 100644
index 0000000..6253f6c
--- /dev/null
+++ b/man/commonGrid.Rd
@@ -0,0 +1,61 @@
+\name{commonGrid}
+\alias{commonGrid}
+\title{Determine A Common Spatial Domain And Pixel Resolution}
+\description{
+  Determine a common spatial domain and pixel resolution for
+  several spatial objects such as images, masks, windows
+  and point patterns.
+}
+\usage{
+commonGrid(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of pixel images (objects of class \code{"im"}),
+    binary masks (objects of class \code{"owin"} of type \code{"mask"})
+    or data which can be converted to binary masks by \code{\link{as.mask}}.
+  } 
+}
+\details{
+  This function determines a common spatial resolution and spatial domain
+  for several spatial objects. 
+
+  The arguments \code{\dots} may be pixel images,
+  binary masks, or other spatial objects acceptable to \code{\link{as.mask}}.
+  
+  The common pixel grid is determined by inspecting all the pixel images
+  and binary masks in the argument list, finding the pixel grid with the
+  highest spatial resolution, and extending this pixel grid to cover the
+  bounding box of all the spatial objects.
+
+  The return value is a binary mask \code{M}, representing the bounding box
+  at the chosen pixel resolution.
+  Use \code{\link{as.im}(X, W=M)} to convert a pixel image \code{X} to this new
+  pixel resolution.
+  Use \code{\link{as.mask}(W, xy=M)} to convert a window \code{W}
+  to a binary mask at this new pixel resolution.
+  See the Examples.
+}
+\value{
+  A binary mask (object of class \code{"owin"} and type \code{"mask"}).
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+   A <- setcov(square(1))
+   G <- density(runifpoint(42), dimyx=16)
+   H <- commonGrid(A, letterR, G)
+   newR <- as.mask(letterR, xy=H)
+   newG <- as.im(G, W=H)
+}
+\seealso{
+  \code{\link{harmonise.im}},
+  \code{\link{compatible.im}},
+  \code{\link{as.im}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/compareFit.Rd b/man/compareFit.Rd
new file mode 100644
index 0000000..646d949
--- /dev/null
+++ b/man/compareFit.Rd
@@ -0,0 +1,130 @@
+\name{compareFit}
+\alias{compareFit}
+\title{
+  Residual Diagnostics for Multiple Fitted Models
+}
+\description{
+  Compares several fitted point process models using the
+  same residual diagnostic. 
+}
+\usage{
+compareFit(object, Fun, r = NULL, breaks = NULL, ...,
+         trend = ~1, interaction = Poisson(), rbord = NULL,
+         modelnames = NULL, same = NULL, different = NULL)
+}
+\arguments{
+  \item{object}{
+    Object or objects to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"}),
+    a point pattern (object of class \code{"ppp"}),
+    or a list of these objects.
+  }
+  \item{Fun}{
+    Diagnostic function to be computed for each model.
+    One of the functions \code{Kcom}, \code{Kres}, \code{Gcom},
+    \code{Gres}, \code{psst}, \code{psstA} or \code{psstG}
+    or a string containing one of these names.
+}
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    diagnostic should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+    Optional alternative to \code{r} for advanced use. 
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{Fun}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern or list of point patterns.
+    See \code{\link{ppm}} for details.
+    Each of these arguments can be a list, specifying different
+    \code{trend}, \code{interaction} and/or \code{rbord}
+    values to be used to generate different fitted models.
+  }
+  \item{modelnames}{
+    Character vector. Short descriptive names for the different models.
+}
+  \item{same,different}{
+    Character strings or character vectors passed to
+    \code{\link{collapse.fv}} to
+    determine the format of the output.
+  }
+}
+\details{
+  This is a convenient way to collect diagnostic information
+  for several different point process models fitted to the same
+  point pattern dataset, or for point process models of the same form fitted to
+  several different datasets, etc.
+
+  The first argument, \code{object}, is usually a list of
+  fitted point process models
+  (objects of class \code{"ppm"}), obtained from the
+  model-fitting function \code{\link{ppm}}.
+
+  For convenience, \code{object} can also be a list of point patterns
+  (objects of class \code{"ppp"}).
+  In that case, point process models will be fitted to
+  each of the point pattern datasets,
+  by calling \code{\link{ppm}} using the arguments
+  \code{trend} (for the first order trend),
+  \code{interaction} (for the interpoint interaction)
+  and \code{rbord} (for the erosion distance in the border correction
+  for the pseudolikelihood). See \code{\link{ppm}} for details
+  of these arguments.
+
+  Alternatively \code{object} can be a single point pattern
+  (object of class \code{"ppp"}) and one or more of the arguments
+  \code{trend}, \code{interaction} or \code{rbord}
+  can be a list. In this case, point process models will be fitted to
+  the same point pattern dataset, using each of the model specifications
+  listed.
+
+  The diagnostic function \code{Fun} will be applied to each of the
+  point process models. The results will be collected into a single
+  function value table. The \code{modelnames} are used to label the
+  results from each fitted model.
+}
+\value{
+  Function value table (object of class \code{"fv"}).
+}
+\author{
+  \ege, 
+  \adrian
+  
+  
+  and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{Kcom}},
+  \code{\link{Kres}},
+  \code{\link{Gcom}},
+  \code{\link{Gres}},
+  \code{\link{psst}},
+  \code{\link{psstA}},
+  \code{\link{psstG}},
+  \code{\link{collapse.fv}}
+}
+\examples{
+   nd <- 40
+   \testonly{
+        nd <- 10
+   }
+   ilist <- list(Poisson(), Geyer(7, 2), Strauss(7))
+   iname <- c("Poisson", "Geyer", "Strauss")
+   \testonly{
+      ilist <- ilist[c(1,3)]
+      iname <- iname[c(1,3)]
+   }
+   K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9,
+            correction="translate",
+            same="trans", different="tcom", modelnames=iname, nd=nd)
+   K
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/compatible.Rd b/man/compatible.Rd
new file mode 100644
index 0000000..2442360
--- /dev/null
+++ b/man/compatible.Rd
@@ -0,0 +1,41 @@
+\name{compatible}
+\alias{compatible}
+\title{Test Whether Objects Are Compatible}
+\description{
+  Tests whether two or more objects of the same class 
+  are compatible.
+}
+\usage{
+  compatible(A, B, \dots)
+}
+\arguments{
+  \item{A,B,\dots}{Two or more objects of the same class}
+}
+\details{
+  This generic function is used to check whether the
+  objects \code{A} and \code{B} (and any additional
+  objects \code{\dots}) are compatible.
+
+  What is meant by \sQuote{compatible} depends on the class of object.
+
+  There are methods for the classes \code{"fv"}, \code{"fasp"},
+  \code{"im"} and \code{"units"}.
+}
+\value{
+  Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE}
+  if they are not.
+}
+\seealso{
+  \code{\link{compatible.fv}},
+  \code{\link{compatible.fasp}},
+  \code{\link{compatible.im}},
+  \code{\link{compatible.units}} 
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/compatible.fasp.Rd b/man/compatible.fasp.Rd
new file mode 100644
index 0000000..854ff61
--- /dev/null
+++ b/man/compatible.fasp.Rd
@@ -0,0 +1,41 @@
+\name{compatible.fasp}  
+\alias{compatible.fasp}
+\title{Test Whether Function Arrays Are Compatible}
+\description{
+  Tests whether two or more function arrays (class \code{"fasp"})
+  are compatible.
+}
+\usage{
+  \method{compatible}{fasp}(A, B, \dots)
+}
+\arguments{
+  \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).}
+}
+\details{
+  An object of class \code{"fasp"} can be regarded as an array
+  of functions. Such objects are returned by the
+  command \code{\link{alltypes}}.
+
+  This command tests whether such objects are compatible
+  (so that, for example, they could be added or subtracted).
+  It is a method for the generic command \code{\link{compatible}}.
+  
+  The function arrays are compatible if the arrays have the same dimensions,
+  and the corresponding elements in each cell of the array
+  are compatible as defined by \code{\link{compatible.fv}}.
+}
+\value{
+  Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE}
+  if they are not.
+}
+\seealso{
+  \code{\link{eval.fasp}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/compatible.fv.Rd b/man/compatible.fv.Rd
new file mode 100644
index 0000000..32201f8
--- /dev/null
+++ b/man/compatible.fv.Rd
@@ -0,0 +1,42 @@
+\name{compatible.fv}  
+\alias{compatible.fv}
+\title{Test Whether Function Objects Are Compatible}
+\description{
+  Tests whether two or more function objects (class \code{"fv"})
+  are compatible.
+}
+\usage{
+  \method{compatible}{fv}(A, B, \dots)
+}
+\arguments{
+  \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).}
+}
+\details{
+  An object of class \code{"fv"} is essentially a data frame
+  containing several different statistical estimates of the same
+  function. Such objects are returned by \code{\link{Kest}} and its
+  relatives.
+
+  This command tests whether such objects are compatible
+  (so that, for example, they could be added or subtracted).
+  It is a method for the generic command \code{\link{compatible}}.
+  
+  The functions are compatible if they have been evaluated at the
+  same sequence of values of the argument \code{r}, and if the
+  statistical estimates have the same names.
+}
+\value{
+  Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE}
+  if they are not.
+}
+\seealso{
+  \code{\link{eval.fv}} 
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/compatible.im.Rd b/man/compatible.im.Rd
new file mode 100644
index 0000000..0ddba0a
--- /dev/null
+++ b/man/compatible.im.Rd
@@ -0,0 +1,41 @@
+\name{compatible.im}  
+\alias{compatible.im}
+\title{Test Whether Pixel Images Are Compatible}
+\description{
+  Tests whether two or more pixel image objects have compatible dimensions.
+}
+\usage{
+ \method{compatible}{im}(A, B, \dots, tol=1e-6)
+}
+\arguments{
+  \item{A,B,\dots}{Two or more pixel images (objects of class \code{"im"}).}
+  \item{tol}{Tolerance factor}
+}
+\details{
+  This function tests whether the pixel images \code{A} and \code{B}
+  (and any additional images \code{\dots})
+  have compatible pixel dimensions. They are compatible if they have
+  the same number of rows and columns, the same physical pixel
+  dimensions, and occupy the same rectangle in the plane.
+
+  The argument \code{tol} specifies the maximum tolerated error
+  in the pixel coordinates, expressed as a
+  fraction of the dimensions of a single pixel.
+}
+\value{
+  Logical value: \code{TRUE} if the images are compatible, and \code{FALSE}
+  if they are not.
+}
+\seealso{
+  \code{\link{eval.im}},
+  \code{\link{harmonise.im}},
+  \code{\link{commonGrid}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/compileK.Rd b/man/compileK.Rd
new file mode 100644
index 0000000..5e68329
--- /dev/null
+++ b/man/compileK.Rd
@@ -0,0 +1,128 @@
+\name{compileK}
+\alias{compileK}
+\alias{compilepcf}
+\title{
+  Generic Calculation of K Function and Pair Correlation Function
+}
+\description{
+  Low-level functions which 
+  calculate the estimated \eqn{K} function
+  and estimated pair correlation function
+  (or any similar functions)
+  from a matrix of pairwise distances and optional weights.
+}
+\usage{
+compileK(D, r, weights = NULL, denom = 1,
+         check = TRUE, ratio = FALSE, fname = "K")
+
+compilepcf(D, r, weights = NULL, denom = 1,
+         check = TRUE, endcorrect = TRUE, ratio=FALSE,
+	 \dots, fname = "g")
+}
+\arguments{
+  \item{D}{
+    A square matrix giving the distances between all pairs of points.
+  }
+  \item{r}{
+    An equally spaced, finely spaced sequence of distance values.
+  }
+  \item{weights}{
+    Optional numerical weights for the pairwise distances.
+    A numeric matrix with the same dimensions as \code{D}.
+    If absent, the weights are taken to equal 1.
+  }
+  \item{denom}{
+    Denominator for the estimator.
+    A single number, or a numeric vector with the same length
+    as \code{r}. See Details.
+  }
+  \item{check}{
+    Logical value specifying whether to check that \code{D} is a
+    valid matrix of pairwise distances.
+  }
+  \item{ratio}{
+    Logical value indicating whether to store ratio information.
+    See Details.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{density.default}}
+    controlling the kernel smoothing.
+  }
+  \item{endcorrect}{
+    Logical value indicating whether to apply End Correction of
+    the pair correlation estimate at \code{r=0}.
+  }
+  \item{fname}{
+    Character string giving the name of the function being estimated.
+  }
+}
+\details{
+  These low-level functions construct estimates of
+  the \eqn{K} function or pair correlation function,
+  or any similar functions, given only the matrix of pairwise
+  distances and optional weights associated with these distances.
+
+  These functions are useful for code development and for teaching,
+  because they perform a common task, and do the housekeeping required to
+  make an object of class \code{"fv"} that represents the estimated
+  function. However, they are not very efficient. 
+  
+  \code{compileK} calculates the weighted estimate
+  of the \eqn{K} function,
+  \deqn{
+    \hat K(r) = (1/v(r)) \sum_i \sum_j 1\{ d_{ij} \le r\} w_{ij}
+  }{
+    K(r) = (1/v(r)) \sum[i] \sum[j] 1(d[i,j] \le r) w[i,j]
+  }
+  and \code{compilepcf} calculates the weighted estimate of the
+  pair correlation function,
+  \deqn{
+    \hat g(r) = (1/v(r)) \sum_i \sum_j \kappa( d_{ij} - r ) w_{ij}
+  }{
+    g(r) = (1/v(r)) \sum[i] \sum[j] \kappa ( d[i,j] - r) w[i,j]
+  }
+  where \eqn{d_{ij}}{d[i,j]} is the distance between spatial points
+  \eqn{i} and \eqn{j}, with corresponding weight \eqn{w_{ij}}{w[i,j]},
+  and \eqn{v(r)} is a specified denominator. Here \eqn{\kappa}{\kappa}
+  is a fixed-bandwidth smoothing kernel. 
+
+  For a point pattern in two dimensions, the usual denominator \eqn{v(r)}
+  is constant for the \eqn{K} function, and proportional to \eqn{r}
+  for the pair correlation function. See the Examples.
+
+  The result is an object of class \code{"fv"} representing the
+  estimated function. This object has only one column of function
+  values. Additional columns (such as a column giving the theoretical
+  value) must be added by the user, with the aid of
+  \code{\link{bind.fv}}.
+
+  If \code{ratio=TRUE}, the result also belongs to class \code{"rat"}
+  and has attributes containing the numerator and denominator
+  of the function estimate. This allows function estimates from
+  several datasets to be pooled using \code{\link{pool}}.
+}
+\value{
+  An object of class \code{"fv"} representing the estimated function.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{pcf}} for definitions of the \eqn{K} function
+  and pair correlation function.
+  
+  \code{\link{bind.fv}} to add more columns.
+}
+\examples{
+  X <- japanesepines
+  D <- pairdist(X)
+  Wt <- edge.Ripley(X, D)
+  lambda <- intensity(X)
+  a <- (npoints(X)-1) * lambda
+  r <- seq(0, 0.25, by=0.01)
+  K <- compileK(D=D, r=r, weights=Wt, denom=a)
+  g <- compilepcf(D=D, r=r, weights=Wt, denom= a * 2 * pi * r)
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/complement.owin.Rd b/man/complement.owin.Rd
new file mode 100644
index 0000000..4e6ba68
--- /dev/null
+++ b/man/complement.owin.Rd
@@ -0,0 +1,71 @@
+\name{complement.owin}
+\alias{complement.owin}
+\title{Take Complement of a Window}
+\description{
+  Take the set complement of a window, within its enclosing rectangle
+  or in a larger rectangle.
+}
+\usage{
+ complement.owin(w, frame=as.rectangle(w))
+}
+\arguments{
+  \item{w}{
+    an object of class \code{"owin"} describing
+    a window of observation for a point pattern.
+  }
+  \item{frame}{
+    Optional. The enclosing rectangle,
+    with respect to which the set complement is taken.
+  }
+}
+\value{
+  Another object of class \code{"owin"} 
+  representing the complement of the window, i.e. the inside
+  of the window becomes the outside.
+}
+\details{
+  This yields a window object (of class \code{"owin"},
+  see \code{\link{owin.object}}) representing the set complement
+  of \code{w} with respect to the rectangle \code{frame}.
+
+  By default, \code{frame} is the enclosing box of \code{w}
+  (originally specified by the arguments \code{xrange} and \code{yrange}
+  given to \code{\link{owin}} when \code{w} was created).
+  If \code{frame} is specified, it must be a rectangle (an object of
+  class \code{"owin"} whose type is \code{"rectangle"}) and it must be
+  larger than the enclosing box of \code{w}. This rectangle becomes the
+  enclosing box for the resulting window.
+  
+  If \code{w} is a rectangle, then \code{frame} must be specified.
+  Otherwise an error will occur (since the complement of \code{w} in
+  itself is empty).
+
+  For rectangular and polygonal windows, the complement is computed by
+  reversing the sign of each boundary polygon, while for binary masks it is
+  computed by negating the pixel values.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{owin.object}}
+}
+\examples{
+   # rectangular
+   a <- owin(c(0,1),c(0,1))
+   b <- owin(c(-1,2),c(-1,2))
+   bmina <- complement.owin(a, frame=b)
+   # polygonal
+   data(demopat)
+   w <- Window(demopat)
+   outside <- complement.owin(w)
+   # mask
+   w <- as.mask(Window(demopat))
+   outside <- complement.owin(w)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/concatxy.Rd b/man/concatxy.Rd
new file mode 100644
index 0000000..1e3d91d
--- /dev/null
+++ b/man/concatxy.Rd
@@ -0,0 +1,49 @@
+\name{concatxy}
+\alias{concatxy}
+\title{Concatenate x,y Coordinate Vectors}
+\description{
+  Concatenate any number of pairs of \code{x} and \code{y}
+  coordinate vectors.
+}
+\usage{
+  concatxy(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, each of which is a structure
+    containing elements \code{x} and \code{y}.
+  }
+}
+\value{
+  A list with two components \code{x} and \code{y}, which are the
+  concatenations of all the corresponding \code{x} and \code{y}
+  vectors in the argument list.
+}
+\details{
+  This function can be used to superimpose two or more point patterns
+  of unmarked points (but see also \code{\link{superimpose}} which is
+  recommended). 
+  
+  It assumes that each of the arguments in
+  \code{\dots} is a structure containing (at least) the elements
+  \code{x} and \code{y}. It concatenates all the \code{x} elements
+  into a vector \code{x}, and similarly for \code{y}, and returns these
+  concatenated vectors.
+}
+\seealso{
+  \code{\link{superimpose}},
+  \code{\link{quadscheme}}
+}
+\examples{
+  dat <- runifrect(30)
+  xy <- list(x=runif(10),y=runif(10))
+  new <- concatxy(dat, xy)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/connected.Rd b/man/connected.Rd
new file mode 100644
index 0000000..e28798d
--- /dev/null
+++ b/man/connected.Rd
@@ -0,0 +1,123 @@
+\name{connected}  
+\Rdversion{1.1}
+\alias{connected}
+\alias{connected.im}
+\alias{connected.owin}
+\title{
+  Connected components
+}
+\description{
+  Finds the topologically-connected components of a spatial object,
+  such as the connected clumps of pixels in a binary image.
+}
+\usage{
+connected(X, \dots)
+
+\method{connected}{owin}(X, \dots, method="C")
+
+\method{connected}{im}(X, \dots, background = NA, method="C")
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a pixel image (object of class \code{"im"})
+    or a window (object of class \code{"owin"}).
+  }
+  \item{background}{
+    Optional. Treat pixels with this value 
+    as being part of the background.
+  }
+  \item{method}{
+    String indicating the algorithm to be used. Either \code{"C"}
+    or \code{"interpreted"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution.
+  }
+}
+\details{
+  The function \code{connected} is generic, with methods
+  for pixel images (class \code{"im"}) and windows (class \code{"owin"})
+  described here. There is also a method for point patterns
+  described in \code{\link{connected.ppp}}.
+  
+  The functions described here compute the connected component transform
+  (Rosenfeld and Pfalz, 1966)
+  of a binary image or binary mask. The argument \code{X} is first
+  converted into a pixel image with logical values. Then the algorithm
+  identifies the connected components (topologically-connected clumps
+  of pixels) in the foreground.
+
+  Two pixels belong to the same connected component if they have the value
+  \code{TRUE} and if they are neighbours (in the 8-connected
+  sense). This rule is applied repeatedly until it terminates.
+  Then each connected component
+  contains all the pixels that can be reached by stepping from neighbour
+  to neighbour.
+
+  If \code{method="C"}, the computation is performed by a compiled C language
+  implementation of the classical algorithm of Rosenfeld and Pfalz
+  (1966). If \code{method="interpreted"}, the computation is performed
+  by an \R implementation of the algorithm of Park et al (2000). 
+
+  The result is a factor-valued image, with levels that correspond to
+  the connected components. The Examples show how to extract each
+  connected component as a separate window object.
+}
+\value{
+  A pixel image (object of class \code{"im"}) with factor values.
+  The levels of the factor correspond to the connected components.
+}
+\references{
+  Park, J.-M., Looney, C.G. and Chen, H.-C. (2000)
+  Fast connected component labeling algorithm using a divide and conquer
+  technique. Pages 373-376 in
+  S.Y. Shin (ed) \emph{Computers and Their Applications:} Proceedings of
+  the ISCA 15th International Conference on Computers and Their
+  Applications, March 29-31, 2000, New Orleans, Louisiana USA. ISCA
+  2000, ISBN 1-880843-32-3. 
+
+  Rosenfeld, A. and Pfalz, J.L. (1966)
+  Sequential operations in digital processing.
+  \emph{Journal of the Association for Computing Machinery} \bold{13}
+  471-494.
+}
+\seealso{
+  \code{\link{connected.ppp}},
+  \code{\link{im.object}}, 
+  \code{\link{tess}}
+}
+\section{Warnings}{
+  It may be hard to distinguish different components 
+  in the default plot because the colours of nearby components may be
+  very similar. See the Examples for a randomised colour map.
+  
+  The algorithm for \code{method="interpreted"}
+  can be very slow for large images (or images where
+  the connected components include a large number of pixels).
+}
+\examples{
+  d <- distmap(cells, dimyx=256)
+  X <- levelset(d, 0.07)
+  plot(X)
+  Z <- connected(X)
+  plot(Z)
+  # or equivalently
+  Z <- connected(d <= 0.07)
+
+  # number of components
+  nc <- length(levels(Z))
+  # plot with randomised colour map
+  plot(Z, col=hsv(h=sample(seq(0,1,length=nc), nc)))
+
+  # how to extract the components as a list of windows
+  W <- tiles(tess(image=Z))
+}
+\author{
+  Original \R code by Julian Burgos, University of Washington.
+  Adapted for \pkg{spatstat} by
+  \adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/connected.linnet.Rd b/man/connected.linnet.Rd
new file mode 100644
index 0000000..803d472
--- /dev/null
+++ b/man/connected.linnet.Rd
@@ -0,0 +1,61 @@
+\name{connected.linnet}
+\alias{connected.linnet}
+\title{
+  Connected Components of a Linear Network
+}
+\description{
+  Find the topologically-connected components of a linear network.
+}
+\usage{
+\method{connected}{linnet}(X, \dots, what = c("labels", "components"))
+}
+\arguments{
+  \item{X}{
+    A linear network (object of class \code{"linnet"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{what}{
+    Character string specifying the kind of result.
+  }
+}
+\details{
+  The function \code{connected} is generic. This is the method for
+  linear networks (objects of class \code{"linnet"}).
+
+  Two vertices of the network are connected if they are joined by a path
+  in the network. This function divides the network into subsets, such
+  that all points in a subset are connected to each other.
+
+  If \code{what="labels"} the return value is a factor with one entry
+  for each vertex of \code{X}, identifying which connected component the
+  vertex belongs to. 
+
+  If \code{what="components"} the return value is a list of linear
+  networks, which are the connected components of \code{X}.
+}
+\value{
+  If \code{what="labels"}, a factor.
+  If \code{what="components"}, a list of linear networks.
+}
+\author{
+  \adrian
+  and Suman Rakshit.
+}
+\seealso{
+  \code{\link{thinNetwork}}
+}
+\examples{
+   # remove some edges from a network to make it disconnected
+   plot(simplenet, col="grey", main="", lty=2)
+   A <- thinNetwork(simplenet, retainedges=-c(3,5))
+   plot(A, add=TRUE, lwd=2)
+   # find the connected components
+   connected(A)
+   cA <- connected(A, what="components")
+   plot(cA[[1]], add=TRUE, col="green", lwd=2)
+   plot(cA[[2]], add=TRUE, col="blue", lwd=2)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/connected.lpp.Rd b/man/connected.lpp.Rd
new file mode 100644
index 0000000..6d1c2d1
--- /dev/null
+++ b/man/connected.lpp.Rd
@@ -0,0 +1,78 @@
+\name{connected.lpp}
+\alias{connected.lpp}
+\title{
+  Connected Components of a Point Pattern on a Linear Network
+}
+\description{
+  Finds the topologically-connected components of a point pattern on a
+  linear network, when all pairs of points closer than a threshold distance
+  are joined.
+}
+\usage{
+\method{connected}{lpp}(X, R=Inf, \dots, dismantle=TRUE)
+}
+\arguments{
+  \item{X}{
+    A linear network (object of class \code{"lpp"}).
+  }
+   \item{R}{
+    Threshold distance. Pairs of points will be joined together
+    if they are closer than \code{R} units apart, measured
+    by the shortest path in the network.
+    The default \code{R=Inf} implies that points
+    will be joined together if they are mutually connected by any
+    path in the network.
+  }
+  \item{dismantle}{
+    Logical. If \code{TRUE} (the default), the network itself will be
+    divided into its path-connected components using
+    \code{\link{connected.linnet}}.
+  }
+ \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  The function \code{connected} is generic. This is the method for
+  point patterns on a linear network (objects of class \code{"lpp"}).
+  It divides the point pattern \code{X} into one or more groups of points.
+
+  If \code{R=Inf} (the default), then \code{X} is divided into groups
+  such that any pair of points in the same group
+  can be joined by a path in the network.
+
+  If \code{R} is a finite number, then two points of \code{X} are
+  declared to be \emph{R-close} if they lie closer than
+  \code{R} units apart, measured by the length of the shortest path in the
+  network. Two points are \emph{R-connected} if they 
+  can be reached by a series of steps between R-close pairs of
+  points of \code{X}. Then \code{X} is divided into groups such that
+  any pair of points in the same group is R-connected.
+
+  If \code{dismantle=TRUE} (the default) the algorithm first checks
+  whether the network is connected (i.e. whether any pair of vertices
+  can be joined by a path in the network), and if not, the network is
+  decomposed into its connected components.
+}
+\value{
+  A point pattern (of class \code{"lpp"}) with marks indicating the
+  grouping, or a list of such point patterns.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{thinNetwork}}
+}
+\examples{
+   # remove some edges from a network to make it disconnected
+   plot(simplenet, col="grey", main="", lty=2)
+   A <- thinNetwork(simplenet, retainedges=-c(3,5))
+   plot(A, add=TRUE, lwd=2)
+   X <- runiflpp(10, A)
+   # find the connected components
+   cX <- connected(X)
+   plot(cX[[1]], add=TRUE, col="blue", lwd=2)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/connected.ppp.Rd b/man/connected.ppp.Rd
new file mode 100644
index 0000000..eb751c9
--- /dev/null
+++ b/man/connected.ppp.Rd
@@ -0,0 +1,68 @@
+\name{connected.ppp}  
+\Rdversion{1.1}
+\alias{connected.ppp}
+\title{
+  Connected Components of a Point Pattern
+}
+\description{
+  Finds the topologically-connected components of a point pattern,
+  when all pairs of points closer than a threshold distance are joined.
+}
+\usage{
+\method{connected}{ppp}(X, R, \dots)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{R}{
+    Threshold distance. Pairs of points closer than \code{R} units apart
+    will be joined together.
+  }
+  \item{\dots}{
+    Other arguments, not recognised by these methods.
+  }
+}
+\details{
+  This function can be used to identify clumps of points in a point pattern.
+
+  The function \code{connected} is generic. This is the method for
+  point patterns (objects of class \code{"ppp"}).
+
+  The point pattern \code{X} is first converted into an abstract graph
+  by joining every pair of points that lie closer than \code{R} units
+  apart. Then the connected components of this graph are identified.
+
+  Two points in \code{X} belong to the same connected component if they
+  can be reached by a series of steps between points of \code{X},
+  each step being shorter than \code{R} units in length.
+
+  The result is a vector of labels for the points of \code{X}
+  where all the points in a connected component have the same label.
+}
+\value{
+  A point pattern, equivalent to \code{X} except that the points
+  have factor-valued marks, with levels corresponding to the
+  connected components.
+}
+\seealso{
+  \code{\link{connected.im}},
+  \code{\link{im.object}}, 
+  \code{\link{tess}}
+}
+\examples{
+   Y <- connected(redwoodfull, 0.1)
+   if(interactive()) {
+    plot(Y, cols=1:length(levels(marks(Y))),
+         main="connected(redwoodfull, 0.1)")
+   }
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/contour.im.Rd b/man/contour.im.Rd
new file mode 100644
index 0000000..f4085ea
--- /dev/null
+++ b/man/contour.im.Rd
@@ -0,0 +1,119 @@
+\name{contour.im}
+\alias{contour.im}
+\title{Contour plot of pixel image}
+\description{
+  Generates a contour plot of a pixel image.
+}
+\usage{
+   \method{contour}{im}(x, \dots, main,
+            axes=FALSE, add=FALSE, col=par("fg"), 
+            clipwin=NULL, show.all=!add, do.plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    Pixel image to be plotted.
+    An object of class \code{"im"}.
+  }
+  \item{main}{
+    Character string to be displayed as the main title.
+  }
+  \item{axes}{
+    Logical. If \code{TRUE}, coordinate axes are plotted
+    (with tick marks) around a region slightly larger than the image window.
+    If \code{FALSE} (the default), no axes are plotted,
+    and a box is drawn tightly around the image window.
+    Ignored if \code{add=TRUE}.
+  }
+  \item{add}{
+    Logical. If \code{FALSE}, a new plot is created. If \code{TRUE},
+    the contours are drawn over the existing plot.
+  }
+  \item{col}{
+    Colour in which to draw the contour lines.
+    Either a single value that can be interpreted as a colour value,
+    or a \code{colourmap} object.
+  }
+  \item{clipwin}{
+    Optional. A window (object of class \code{"owin"}).
+    Only this subset of the data will be displayed.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link{contour.default}}
+    controlling the contour plot; see Details.
+  }
+  \item{show.all}{
+    Logical value indicating whether to display all plot elements
+    including the main title, bounding box, and (if
+    \code{axis=TRUE}) coordinate axis markings.
+    Default is \code{TRUE} for new plots and \code{FALSE} for added plots.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plot.
+  }
+}
+\details{
+  This is a method for the generic \code{contour} function,
+  for objects of the class \code{"im"}.
+  
+  An object of class \code{"im"} represents a pixel image;
+  see \code{\link{im.object}}.
+
+  This function displays the values of the pixel image \code{x}
+  as a contour plot on the current plot device,
+  using equal scales on the \eqn{x} and \eqn{y} axes.
+
+  The appearance of the plot can be modified
+  using any of the arguments listed in the help for
+  \code{\link{contour.default}}.
+  Useful ones include:
+  \describe{
+    \item{nlevels}{
+      Number of contour levels to plot.
+    }
+    \item{drawlabels}{
+      Whether to label the contour lines with text.
+    }
+    \item{col,lty,lwd}{
+      Colour, type, and width of contour lines.
+    }
+  }
+  See \code{\link{contour.default}} for a full list of these arguments.
+
+  The defaults for any of the abovementioned arguments
+  can be reset using \code{\link{spatstat.options}("par.contour")}.
+
+  If \code{col} is a colour map (object of class \code{"colourmap"},
+  see \code{\link{colourmap}}) then the contours will be plotted in
+  different colours as determined by the colour map.
+  The contour at level \code{z} will be plotted
+  in the colour \code{col(z)} associated with this level in the colour map.
+}
+\value{
+  none.
+}
+\examples{
+   # an image
+   Z <- setcov(owin())
+   contour(Z, axes=TRUE)
+   contour(Z)
+
+   co <- colourmap(rainbow(100), range=c(0,1))
+   contour(Z, col=co, lwd=2)
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{plot.im}},
+  \code{\link{persp.im}}
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/contour.imlist.Rd b/man/contour.imlist.Rd
new file mode 100644
index 0000000..2ccb67e
--- /dev/null
+++ b/man/contour.imlist.Rd
@@ -0,0 +1,57 @@
+\name{contour.imlist}
+\alias{contour.imlist}
+\alias{contour.listof}
+\title{Array of Contour Plots}
+\description{
+  Generates an array of contour plots.
+}
+\usage{
+  \method{contour}{imlist}(x, \dots)
+
+  \method{contour}{listof}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    An object of the class \code{"imlist"}
+    representing a list of pixel images.
+    Alternatively \code{x} may belong to the outdated class \code{"listof"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.solist}} to control the
+    spatial arrangement of panels, and arguments passed
+    to \code{\link{contour.im}} to control the display of each panel.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  This is a method for the generic command
+  \code{contour} for the class \code{"imlist"}.
+  An object of class \code{"imlist"} represents a list of pixel images.
+
+  (The outdated class \code{"listof"} is also handled.)
+
+  Each entry in the list \code{x} will be displayed as a contour plot,
+  in an array of panels laid out on the same graphics display,
+  using \code{\link{plot.solist}}. Invididual panels are plotted
+  by \code{\link{contour.im}}.
+}
+\seealso{
+  \code{\link{plot.solist}},
+  \code{\link{contour.im}}
+}
+\examples{
+# Multitype point pattern
+ contour(D <- density(split(amacrine)))
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/convexhull.Rd b/man/convexhull.Rd
new file mode 100644
index 0000000..8ee47b2
--- /dev/null
+++ b/man/convexhull.Rd
@@ -0,0 +1,47 @@
+\name{convexhull}
+\alias{convexhull}
+\title{Convex Hull}
+\description{
+  Computes the convex hull of a spatial object.
+}
+\usage{
+convexhull(x)
+}
+\arguments{
+  \item{x}{
+    a window (object of class \code{"owin"}),
+    a point pattern (object of class \code{"ppp"}),
+    a line segment pattern (object of class \code{"psp"}),
+    or an object that can be converted to a window
+    by \code{\link{as.owin}}.
+  }
+}
+\value{
+  A window (an object of class \code{"owin"}).
+}
+\details{
+  This function computes the convex hull of the spatial object \code{x}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{convexhull.xy}},
+  \code{\link{is.convex}}
+}
+\examples{
+   data(demopat)
+   W <- Window(demopat)
+   plot(convexhull(W), col="lightblue", border=NA)
+   plot(W, add=TRUE, lwd=2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
+
+
+
+
diff --git a/man/convexhull.xy.Rd b/man/convexhull.xy.Rd
new file mode 100644
index 0000000..0aa31e4
--- /dev/null
+++ b/man/convexhull.xy.Rd
@@ -0,0 +1,58 @@
+\name{convexhull.xy}
+\alias{convexhull.xy}
+\title{Convex Hull of Points}
+\description{
+  Computes the convex hull of a set of points in two dimensions.
+}
+\usage{
+convexhull.xy(x, y=NULL)
+}
+\arguments{
+  \item{x}{
+    vector of \code{x} coordinates of observed points,
+    or a 2-column matrix giving \code{x,y} coordinates,
+    or a list with components \code{x,y} giving coordinates
+    (such as a point pattern object of class \code{"ppp"}.)
+  }
+  \item{y}{(optional) vector of \code{y} coordinates of observed points,
+    if \code{x} is a vector.}
+}
+\value{
+  A window (an object of class \code{"owin"}).
+}
+\details{
+  Given an observed pattern of points with coordinates 
+  given by \code{x} and \code{y}, this function computes 
+  the convex hull of the points, and returns it as a window.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{convexhull}},
+  \code{\link{bounding.box.xy}},
+  \code{\link{ripras}}
+}
+\examples{
+  x <- runif(30)
+  y <- runif(30)
+  w <- convexhull.xy(x,y)
+  plot(owin(), main="convexhull.xy(x,y)", lty=2)
+  plot(w, add=TRUE)
+  points(x,y)
+
+  X <- rpoispp(30)
+  plot(X, main="convexhull.xy(X)")
+  plot(convexhull.xy(X), add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
+
+
+
+
diff --git a/man/convexify.Rd b/man/convexify.Rd
new file mode 100644
index 0000000..d7e1518
--- /dev/null
+++ b/man/convexify.Rd
@@ -0,0 +1,68 @@
+\name{convexify}
+\alias{convexify}
+\title{
+  Weil's Convexifying Operation
+}
+\description{
+  Converts the window \code{W} into a convex set by rearranging
+  the edges, preserving spatial orientation of each edge.
+}
+\usage{
+ convexify(W, eps)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}).
+  }
+  \item{eps}{
+    Optional. Minimum edge length of polygonal approximation,
+    if \code{W} is not a polygon.
+  }
+}
+\details{
+  Weil (1995) defined a convexification operation 
+  for windows \eqn{W} that belong to the convex ring (that is,
+  for any \eqn{W} which is a finite union of convex sets).
+  Note that this is \bold{not} the same as the convex hull.
+
+  The convexified set \eqn{f(W)} has the same total boundary length as
+  \eqn{W} and the same distribution of orientations of the boundary.
+  If \eqn{W} is a polygonal set, then the convexification \eqn{f(W)}
+  is obtained by rearranging all the edges of \eqn{W} in order of
+  their spatial orientation.
+
+  The argument \code{W} must be a window. If it is not already a polygonal
+  window, it is first converted to one, using
+  \code{\link{simplify.owin}}.
+  The edges are sorted in increasing order of angular orientation
+  and reassembled into a convex polygon.
+}
+\value{
+  A window (object of class \code{"owin"}).
+}
+\references{
+  Weil, W. (1995)
+  The estimation of mean particle shape and mean
+  particle number in overlapping particle systems in
+  the plane. \emph{Advances in Applied Probability} \bold{27}, 102--119.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{convexhull}} for the convex hull of a window.
+}
+\examples{
+  opa <- par(mfrow=c(1,2))
+  plot(letterR)
+  plot(convexify(letterR))
+  par(opa)
+}
+\keyword{spatial}
+\keyword{utilities}
diff --git a/man/convolve.im.Rd b/man/convolve.im.Rd
new file mode 100644
index 0000000..615a23c
--- /dev/null
+++ b/man/convolve.im.Rd
@@ -0,0 +1,79 @@
+\name{convolve.im}
+\alias{convolve.im}
+\title{Convolution of Pixel Images}
+\description{
+  Computes the convolution of two pixel images.
+}
+\usage{
+ convolve.im(X, Y=X, \dots, reflectX=FALSE, reflectY=FALSE)
+}
+\arguments{
+  \item{X}{
+    A pixel image (object of class \code{"im"}.
+  }
+  \item{Y}{
+    Optional. Another pixel image.
+  }
+  \item{\dots}{Ignored.}
+  \item{reflectX,reflectY}{
+    Logical values specifying whether the images \code{X} and \code{Y}
+    (respectively) should be reflected in the origin before computing
+    the convolution.
+  }
+}
+\value{
+  A pixel image (an object of class \code{"im"}) representing the
+  convolution of \code{X} and \code{Y}.
+}
+\details{
+  The \emph{convolution} of two pixel images \eqn{X} and \eqn{Y} in the plane
+  is the function \eqn{C(v)} defined for each vector \eqn{v} as
+  \deqn{
+    C(v) = \int X(u)Y(v-u)\, {\rm d}u
+  }{
+    C(v) = integral of X(u) * Y(v-u) du
+  }
+  where the integral is
+  over all spatial locations \eqn{u}, and where \eqn{X(u)} and
+  \eqn{Y(u)} denote the pixel values of \eqn{X} and \eqn{Y} respectively
+  at location \eqn{u}.
+  
+  This command computes a discretised approximation to
+  the convolution, using the Fast Fourier Transform.
+  The return value is
+  another pixel image (object of class \code{"im"}) whose greyscale values
+  are values of the convolution.
+
+  If \code{reflectX = TRUE} then the pixel image \code{X} is reflected
+  in the origin (see \code{\link{reflect}})
+  before the convolution is computed, so that
+  \code{convolve.im(X,Y,reflectX=TRUE)} is mathematically
+  equivalent to \code{convolve.im(reflect(X), Y)}. (These two commands
+  are not exactly equivalent, because the reflection is performed
+  in the Fourier domain in the first command, and reflection is
+  performed in the spatial domain in the second command).
+
+  Similarly if \code{reflectY = TRUE} then the pixel image \code{Y} is reflected
+  in the origin before the convolution is computed, so that
+  \code{convolve.im(X,Y,reflectY=TRUE)} is mathematically
+  equivalent to \code{convolve.im(X, reflect(Y))}.
+}
+\seealso{
+  \code{\link{imcov}},
+  \code{\link{reflect}}
+}
+\examples{
+  X <- as.im(letterR)
+  Y <- as.im(square(1))
+  plot(convolve.im(X, Y))
+  plot(convolve.im(X, Y, reflectX=TRUE))
+  plot(convolve.im(X))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/coords.Rd b/man/coords.Rd
new file mode 100644
index 0000000..2c27766
--- /dev/null
+++ b/man/coords.Rd
@@ -0,0 +1,84 @@
+\name{coords}
+\Rdversion{1.1}
+\alias{coords}
+\alias{coords.ppp}
+\alias{coords.ppx}
+\alias{coords<-}
+\alias{coords<-.ppp}
+\alias{coords<-.ppx}
+\title{
+  Extract or Change Coordinates of a Spatial or Spatiotemporal Point Pattern
+}
+\description{
+  Given any kind of spatial or space-time point pattern,
+  this function extracts the
+  (space and/or time and/or local) coordinates of the points
+  and returns them as a data frame.
+}
+\usage{
+  coords(x, ...)
+  \method{coords}{ppp}(x, ...)
+  \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE)
+  coords(x, ...) <- value
+  \method{coords}{ppp}(x, ...) <- value
+  \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) <- value
+}
+\arguments{
+  \item{x}{
+    A point pattern: either a two-dimensional point pattern
+    (object of class \code{"ppp"}), a three-dimensional point pattern
+    (object of class \code{"pp3"}), or a 
+    general multidimensional space-time point pattern 
+    (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Further arguments passed to methods.
+  }
+  \item{spatial,temporal,local}{
+    Logical values indicating whether to extract spatial, temporal
+    and local coordinates, respectively. The default is to return all
+    such coordinates. (Only relevant to \code{ppx} objects).
+  }
+  \item{value}{
+    New values of the coordinates. A numeric vector with one entry for each
+    point in \code{x}, or a numeric matrix or data frame with one row
+    for each point in \code{x}.
+  }
+}
+\details{
+  The function \code{coords} extracts the coordinates from
+  a point pattern. The function \code{coords<-} replaces the coordinates
+  of the point pattern with new values.
+  
+  Both functions \code{coords} and \code{coords<-} are generic, with methods for
+  the classes \code{"ppp"}) and \code{"ppx"}.
+  An object of class \code{"pp3"} also inherits from \code{"ppx"} and
+  is handled by the method for \code{"ppx"}.
+}
+\value{
+  \code{coords} returns a \code{data.frame} with one row for each point,
+  containing the coordinates.
+  \code{coords<-} returns the altered point pattern.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppx}},
+  \code{\link{pp3}},
+  \code{\link{ppp}},
+  \code{as.hyperframe.ppx},
+  \code{as.data.frame.ppx}.
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),t=runif(4))
+   X <- ppx(data=df, coord.type=c("s","s","t"))
+   coords(X)
+   coords(X, temporal=FALSE)
+   coords(X) <- matrix(runif(12), ncol=3)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/copper.Rd b/man/copper.Rd
new file mode 100644
index 0000000..1281c22
--- /dev/null
+++ b/man/copper.Rd
@@ -0,0 +1,119 @@
+\name{copper}
+\alias{copper}
+\docType{data}
+\title{
+  Berman-Huntington points and lines data
+}
+\description{
+  These data come from an intensive geological survey of
+  a 70 x 158 km region in central Queensland, Australia.
+  They consist of 67 points representing copper ore deposits,
+  and 146 line segments representing geological `lineaments'.
+  Lineaments are linear features, visible on a satellite image,
+  that are believed to consist largely of geological faults (Berman, 1986,
+  p. 55).
+  It would be of great interest to predict the occurrence of copper deposits
+  from the lineament pattern, since the latter can easily be observed on
+  satellite images. 
+
+  These data were introduced and analysed by Berman (1986).
+  They have also been studied by Berman and Diggle (1989),
+  Berman and Turner (1992),
+  Baddeley and Turner (2000, 2005), Foxall and Baddeley (2002)
+  and Baddeley et al (2005).
+  
+  Many analyses have been performed on the southern half of the data only.
+  This subset is also provided.
+} 
+\format{
+  \code{copper} is a list with the following entries:
+  \describe{
+    \item{Points}{a point pattern (object of class \code{"ppp"})
+      representing the full point pattern of copper deposits.
+      See \code{\link{ppp.object}} for details of the format.
+    }
+    \item{Lines}{a line segment pattern (object of class \code{"psp"})
+      representing the lineaments in the full dataset.
+      See \code{\link{psp.object}} for details of the format.
+    }
+    \item{SouthWindow}{the window delineating the southern half of
+      the study region. An object of class \code{"owin"}.
+    }
+    \item{SouthPoints}{the point pattern of copper deposits in the
+      southern half of the study region. An object of class
+      \code{"ppp"}.
+    }
+    \item{SouthLines}{the line segment pattern of the lineaments in the
+      southern half of the study region. An object of class \code{"psp"}.
+    }
+  }
+}
+\usage{data(copper)}
+\examples{
+
+  data(copper)
+
+  # Plot full dataset
+
+  plot(copper$Points)
+  plot(copper$Lines, add=TRUE)
+
+  # Plot southern half of data
+  plot(copper$SouthPoints)
+  plot(copper$SouthLines, add=TRUE)
+
+  \dontrun{
+    Z <- distmap(copper$SouthLines)
+    plot(Z)
+    X <- copper$SouthPoints
+    ppm(X, ~D, covariates=list(D=Z))
+  }
+}
+\source{
+  Dr Jonathan Huntington, CSIRO Earth Science and Resource Engineering,
+  Sydney, Australia.
+  Coordinates kindly provided by Dr. Mark Berman
+  and Dr. Andy Green, CSIRO, Sydney, Australia.
+}
+\references{
+Baddeley, A. and Turner, R. (2000)
+Practical maximum pseudolikelihood for spatial point patterns.
+\emph{Australian and New Zealand Journal of Statistics}
+\bold{42}, 283--322.
+ 
+Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+Residual analysis for spatial point processes.
+\emph{Journal of the Royal Statistical Society, Series B}
+\bold{67}, 617--666.
+
+Baddeley, A. and Turner, R. (2005)
+Modelling spatial point patterns in R.
+In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan,
+editors, \emph{Case Studies in Spatial Point Pattern Modelling},
+Lecture Notes in Statistics number 185. Pages 23--74.
+Springer-Verlag, New York, 2006. 
+ISBN: 0-387-28311-0.  
+
+Berman, M. (1986).
+Testing for spatial association between a point process and another
+  stochastic process.
+\emph{Applied Statistics} \bold{35}, 54--62.
+
+Berman, M. and Diggle, P.J. (1989)
+Estimating Weighted Integrals of the Second-order Intensity of a
+Spatial Point Process.
+\emph{Journal of the Royal Statistical Society, series B}
+\bold{51}, 81--92.
+
+Berman, M. and Turner, T.R. (1992)
+Approximating point process likelihoods with GLIM.
+\emph{Applied Statistics} \bold{41}, 31--38.
+ 
+Foxall, R. and Baddeley, A. (2002)
+Nonparametric measures of association between a
+spatial point process and a random set, with
+geological applications. \emph{Applied Statistics} \bold{51}, 165--182.
+
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/copyExampleFiles.Rd b/man/copyExampleFiles.Rd
new file mode 100644
index 0000000..53dbc58
--- /dev/null
+++ b/man/copyExampleFiles.Rd
@@ -0,0 +1,54 @@
+\name{copyExampleFiles}
+\alias{copyExampleFiles}
+\title{
+  Copy Data Files for Example 
+}
+\description{
+  This command copies several data files to
+  a folder (directory) chosen by the user,
+  so that they can be used for a practice example.
+}
+\usage{
+copyExampleFiles(which, folder = getwd())
+}
+\arguments{
+  \item{which}{
+    Character string name (partially matched)
+    of one of the datasets installed in \code{spatstat} for which
+    the original data files are provided. 
+    If \code{which} is missing, a list of available options is printed.
+  }
+  \item{folder}{
+    Character string path name of a folder (directory) in which the
+    files will be placed. Defaults to the current working directory.
+  }
+}
+\details{
+  The original text files containing data for the selected
+  dataset are copied to the chosen folder.
+
+  This is part of an exercise described in Chapter 3 of
+  Baddeley, Rubak and Turner (2015).
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Analysing Spatial Point Patterns with R}.
+  Chapman and Hall/CRC, to appear.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\examples{
+   copyExampleFiles()
+}
+\seealso{
+  \code{\link{vesicles}},
+  \code{\link{finpines}}
+}
+\keyword{spatial}
diff --git a/man/corners.Rd b/man/corners.Rd
new file mode 100644
index 0000000..3f338fa
--- /dev/null
+++ b/man/corners.Rd
@@ -0,0 +1,44 @@
+\name{corners}
+\alias{corners}
+\title{Corners of a rectangle}
+\description{
+  Returns the four corners of a rectangle 
+}
+\usage{
+ corners(window)
+}
+\arguments{
+  \item{window}{A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+}
+\value{
+  A list with two components \code{x} and \code{y}, which are numeric
+  vectors of length 4 giving the coordinates of the four corner points
+  of the (bounding rectangle of the) window.
+}
+\details{
+  This trivial function is occasionally convenient.
+  If \code{window} is of type \code{"rectangle"} this returns the
+  four corners of the window itself; 
+  otherwise, it returns the corners of the bounding rectangle
+  of the window.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{quadscheme}}
+}
+\examples{
+  w <- unit.square()
+  corners(w)
+       # returns list(x=c(0,1,0,1),y=c(0,0,1,1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
diff --git a/man/covering.Rd b/man/covering.Rd
new file mode 100644
index 0000000..41cdd0d
--- /dev/null
+++ b/man/covering.Rd
@@ -0,0 +1,51 @@
+\name{covering}
+\alias{covering}
+\title{Cover Region with Discs}
+\description{
+  Given a spatial region, this function finds an efficient
+  covering of the region using discs of a chosen radius.
+}
+\usage{
+ covering(W, r, \dots, giveup=1000)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}).
+  }
+  \item{r}{positive number: the radius of the covering discs.}
+  \item{\dots}{
+    extra arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution for the calculations.
+  }
+  \item{giveup}{
+    Maximum number of attempts to place additional discs.
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"})
+  giving the centres of the discs.
+}
+\details{
+  This function finds an efficient covering of the
+  window \code{W} using discs of the given radius \code{r}.
+  The result is a point pattern giving the centres of the discs.
+
+  The algorithm tries to use as few discs as possible,
+  but is not guaranteed to find the minimal number of discs.
+  It begins by placing a hexagonal grid of points inside \code{W},
+  then adds further points until every location inside \code{W} lies
+  no more than \code{r} units away from one of the points.
+}
+\examples{
+  rr <- 0.5
+  X <- covering(letterR, rr)
+  plot(grow.rectangle(Frame(X), rr), type="n", main="")
+  plot(X, pch=16, add=TRUE, col="red")
+  plot(letterR, add=TRUE, lwd=3)
+  plot(X \%mark\% (2*rr), add=TRUE, markscale=1)
+}
+\author{
+  \adrian
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/crossdist.Rd b/man/crossdist.Rd
new file mode 100644
index 0000000..9d77da3
--- /dev/null
+++ b/man/crossdist.Rd
@@ -0,0 +1,54 @@
+\name{crossdist}
+\alias{crossdist}
+\title{Pairwise distances}
+\description{
+  Computes the distances between pairs of `things'
+  taken from two different datasets.
+}
+\usage{
+  crossdist(X, Y, \dots)
+}
+\arguments{
+  \item{X,Y}{
+    Two objects of the same class.
+  }
+  \item{\dots}{
+    Additional arguments depending on the method.
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th thing in the first dataset
+  to the \code{j}-th thing in the second dataset.
+}
+\details{
+  Given two datasets \code{X} and \code{Y}
+  (representing either two point patterns or
+  two line segment patterns)
+  \code{crossdist} computes the Euclidean distance from each thing
+  in the first dataset to each thing in the second dataset,
+  and returns a matrix containing these distances.
+
+  The function \code{crossdist} is generic, with
+  methods for point patterns (objects of class \code{"ppp"}),
+  line segment patterns (objects of class \code{"psp"}),
+  and a default method. See the documentation for
+  \code{\link{crossdist.ppp}},
+  \code{\link{crossdist.psp}} or
+  \code{\link{crossdist.default}} for further details.
+}
+\seealso{
+  \code{\link{crossdist.ppp}},
+  \code{\link{crossdist.psp}},
+  \code{\link{crossdist.default}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}}
+}
+\author{
+  \adrian
+  
+  
+}
+\keyword{spatial}
+
+\keyword{math}
diff --git a/man/crossdist.default.Rd b/man/crossdist.default.Rd
new file mode 100644
index 0000000..4fd7f3f
--- /dev/null
+++ b/man/crossdist.default.Rd
@@ -0,0 +1,89 @@
+\name{crossdist.default}
+\alias{crossdist.default}
+\title{Pairwise distances between two different sets of points}
+\description{
+  Computes the distances between each pair of points
+  taken from two different sets of points.
+}
+\usage{
+  \method{crossdist}{default}(X, Y, x2, y2, \dots,
+             period=NULL, method="C", squared=FALSE)
+}
+\arguments{
+  \item{X,Y}{
+    Numeric vectors of equal length specifying the coordinates of
+    the first set of points.
+  }
+  \item{x2,y2}{
+    Numeric vectors of equal length specifying the coordinates of
+    the second set of points.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{period}{
+    Optional. Dimensions for periodic edge correction.
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th point in the first set of points
+  to the \code{j}-th point in the second set of points.
+}
+\details{
+  Given two sets of points,
+  this function computes the Euclidean distance from each point
+  in the first set to each point in the second set,
+  and returns a matrix containing these distances.
+
+  This is a method for the generic function \code{\link{crossdist}}.
+
+  This function expects \code{X} and \code{Y} to be numeric vectors
+  of equal length specifying the coordinates of the first set of points.
+  The arguments \code{x2},\code{y2} specify the coordinates of the
+  second set of points.
+  
+  Alternatively if \code{period} is given,
+  then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance).
+  The points will be treated as if they are in a rectangle
+  of width \code{period[1]} and height \code{period[2]}.
+  Opposite edges of the rectangle are regarded as equivalent.
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. 
+  The C code is faster by a factor of 4.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{crossdist.ppp}},
+  \code{\link{crossdist.psp}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}},
+  \code{\link{Gest}}
+}
+\examples{
+   d <- crossdist(runif(7), runif(7), runif(12), runif(12))
+   d <- crossdist(runif(7), runif(7), runif(12), runif(12), period=c(1,1))
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/crossdist.lpp.Rd b/man/crossdist.lpp.Rd
new file mode 100644
index 0000000..effc5df
--- /dev/null
+++ b/man/crossdist.lpp.Rd
@@ -0,0 +1,72 @@
+\name{crossdist.lpp} 
+\alias{crossdist.lpp}
+\title{Pairwise distances between two point patterns on a linear network}
+\description{
+  Computes the distances between pairs of points
+  taken from two different point patterns on
+  the same linear network.
+}
+\usage{
+  \method{crossdist}{lpp}(X, Y, \dots, method="C")
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns on a linear network (objects of class \code{"lpp"}).
+    They must lie on the \emph{same} network.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{method}{String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th point in \code{X}
+  to the \code{j}-th point in \code{Y}.
+  Matrix entries are nonnegative numbers or infinity (\code{Inf}).
+}
+\details{
+  Given two point patterns on a linear network,
+  this function computes the Euclidean distance from each point
+  in the first pattern to each point in the second pattern,
+  measuring distance by the shortest path in the network.
+
+  This is a method for the generic function \code{\link{crossdist}}
+  for point patterns on a linear network
+  (objects of class \code{"lpp"}).
+
+  This function expects two point pattern objects \code{X} and \code{Y}
+  on the \emph{same} linear network,
+  and returns the matrix whose \code{[i,j]} entry is the
+  shortest-path distance from \code{X[i]} to \code{Y[j]}.
+
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. 
+  The C code is much faster.
+
+  If two points cannot be joined by a path,
+  the distance between them is infinite (\code{Inf}).
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{crossdist.ppp}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}}
+}
+\examples{
+   v <- split(chicago)
+   X <- v$cartheft
+   Y <- v$burglary
+   d <- crossdist(X, Y)
+}
+\author{
+  \adrian.
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/crossdist.pp3.Rd b/man/crossdist.pp3.Rd
new file mode 100644
index 0000000..4a1ba84
--- /dev/null
+++ b/man/crossdist.pp3.Rd
@@ -0,0 +1,73 @@
+\name{crossdist.pp3} 
+\alias{crossdist.pp3}
+\title{Pairwise distances between two different three-dimensional point patterns}
+\description{
+  Computes the distances between pairs of points
+  taken from two different three-dimensional point patterns.
+}
+\usage{
+  \method{crossdist}{pp3}(X, Y, \dots, periodic=FALSE, squared=FALSE)
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns in three dimensions (objects of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{periodic}{
+    Logical. Specifies whether to apply a periodic edge correction.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th point in \code{X}
+  to the \code{j}-th point in \code{Y}.
+}
+\details{
+  Given two point patterns in three-dimensional space, 
+  this function computes the Euclidean distance from each point
+  in the first pattern to each point in the second pattern,
+  and returns a matrix containing these distances.
+
+  This is a method for the generic function \code{\link{crossdist}}
+  for three-dimensional point patterns (objects of class \code{"pp3"}).
+
+  This function expects two
+  point patterns \code{X} and \code{Y}, and returns the matrix
+  whose \code{[i,j]} entry is the distance from \code{X[i]} to
+  \code{Y[j]}.
+
+  Alternatively if \code{periodic=TRUE}, then provided the windows
+  containing \code{X} and \code{Y} are identical and are rectangular,
+  then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance): opposite edges of the
+  rectangle are regarded as equivalent.
+  This is meaningless if the window is not a rectangle.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}},
+  \code{\link{G3est}}
+}
+\examples{
+   X <- runifpoint3(20)
+   Y <- runifpoint3(30)
+   d <- crossdist(X, Y)
+   d <- crossdist(X, Y, periodic=TRUE)
+}
+\author{
+  \adrian
+  
+  
+  based on code for two dimensions by 
+  Pavel Grabarnik.
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/crossdist.ppp.Rd b/man/crossdist.ppp.Rd
new file mode 100644
index 0000000..2daf8f9
--- /dev/null
+++ b/man/crossdist.ppp.Rd
@@ -0,0 +1,85 @@
+\name{crossdist.ppp} 
+\alias{crossdist.ppp}
+\title{Pairwise distances between two different point patterns}
+\description{
+  Computes the distances between pairs of points
+  taken from two different point patterns.
+}
+\usage{
+  \method{crossdist}{ppp}(X, Y, \dots, periodic=FALSE, method="C", squared=FALSE)
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns (objects of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{periodic}{
+    Logical. Specifies whether to apply a periodic edge correction.
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th point in \code{X}
+  to the \code{j}-th point in \code{Y}.
+}
+\details{
+  Given two point patterns, 
+  this function computes the Euclidean distance from each point
+  in the first pattern to each point in the second pattern,
+  and returns a matrix containing these distances.
+
+  This is a method for the generic function \code{\link{crossdist}}
+  for point patterns (objects of class \code{"ppp"}).
+
+  This function expects two
+  point patterns \code{X} and \code{Y}, and returns the matrix
+  whose \code{[i,j]} entry is the distance from \code{X[i]} to
+  \code{Y[j]}.
+
+  Alternatively if \code{periodic=TRUE}, then provided the windows
+  containing \code{X} and \code{Y} are identical and are rectangular,
+  then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance): opposite edges of the
+  rectangle are regarded as equivalent.
+  This is meaningless if the window is not a rectangle.
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. 
+  The C code is faster by a factor of 4.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{crossdist.default}},
+  \code{\link{crossdist.psp}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}},
+  \code{\link{Gest}}
+}
+\examples{
+   data(cells)
+   d <- crossdist(cells, runifpoint(6))
+   d <- crossdist(cells, runifpoint(6), periodic=TRUE)
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/crossdist.ppx.Rd b/man/crossdist.ppx.Rd
new file mode 100644
index 0000000..9a97ee2
--- /dev/null
+++ b/man/crossdist.ppx.Rd
@@ -0,0 +1,62 @@
+\name{crossdist.ppx}
+\alias{crossdist.ppx}
+\title{Pairwise Distances Between Two Different Multi-Dimensional Point Patterns}
+\description{
+  Computes the distances between pairs of points
+  taken from two different multi-dimensional point patterns.
+}
+\usage{
+  \method{crossdist}{ppx}(X, Y, \dots)
+}
+\arguments{
+  \item{X,Y}{
+    Multi-dimensional point patterns (objects of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{coords.ppx}} to determine
+    which coordinates should be used.
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th point in \code{X}
+  to the \code{j}-th point in \code{Y}.
+}
+\details{
+  Given two point patterns in multi-dimensional space, 
+  this function computes the Euclidean distance from each point
+  in the first pattern to each point in the second pattern,
+  and returns a matrix containing these distances.
+
+  This is a method for the generic function \code{\link{crossdist}}
+  for three-dimensional point patterns (objects of class \code{"ppx"}).
+
+  This function expects two multidimensional
+  point patterns \code{X} and \code{Y}, and returns the matrix
+  whose \code{[i,j]} entry is the distance from \code{X[i]} to
+  \code{Y[j]}.
+  
+  By default, both spatial and temporal coordinates are extracted.
+  To obtain the spatial distance between points in a space-time point
+  pattern, set \code{temporal=FALSE}.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{pairdist}},
+  \code{\link{nndist}}
+}
+\examples{
+   df <- data.frame(x=runif(3),y=runif(3),z=runif(3),w=runif(3))
+   X <- ppx(data=df)
+   df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5))
+   Y <- ppx(data=df)
+   d <- crossdist(X, Y)
+}
+\author{
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/crossdist.psp.Rd b/man/crossdist.psp.Rd
new file mode 100644
index 0000000..1cce9f6
--- /dev/null
+++ b/man/crossdist.psp.Rd
@@ -0,0 +1,81 @@
+\name{crossdist.psp} 
+\alias{crossdist.psp}
+\title{Pairwise distances between two different line segment patterns}
+\description{
+  Computes the distances between all pairs of line segments
+  taken from two different line segment patterns.
+}
+\usage{
+  \method{crossdist}{psp}(X, Y, \dots, method="C", type="Hausdorff")
+}
+\arguments{
+  \item{X,Y}{
+    Line segment patterns (objects of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{method}{String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+    Usually not specified.
+  }
+  \item{type}{
+    Type of distance to be computed. Options are
+    \code{"Hausdorff"} and \code{"separation"}. Partial matching is used.
+  }
+}
+\value{
+  A matrix whose \code{[i,j]} entry is the distance
+  from the \code{i}-th line segment in \code{X}
+  to the \code{j}-th line segment in \code{Y}.
+}
+\details{
+  This is a method for the generic function \code{\link{crossdist}}.
+
+  Given two line segment patterns, 
+  this function computes the distance from each line segment
+  in the first pattern to each line segment in the second pattern,
+  and returns a matrix containing these distances.
+
+  The distances between line segments are measured in one of two ways:
+  \itemize{
+    \item if \code{type="Hausdorff"}, distances are computed
+    in the Hausdorff metric. The Hausdorff
+    distance between two line segments is the \emph{maximum} distance
+    from any point on one of the segments to the nearest point on
+    the other segment.
+    \item if \code{type="separation"}, distances are computed
+    as the \emph{minimum} distance from a point on one line segment to
+    a point on the other line segment. For example, line segments which
+    cross over each other have separation zero.
+  }
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted \R code only. If \code{method="C"}
+  (the default) then compiled \code{C} code is used. 
+  The \code{C} code is several times faster.
+}
+\seealso{
+  \code{\link{pairdist}},
+  \code{\link{nndist}},
+  \code{\link{Gest}}
+}
+\examples{
+   L1 <- psp(runif(5), runif(5), runif(5), runif(5), owin())
+   L2 <- psp(runif(10), runif(10), runif(10), runif(10), owin())
+   D <- crossdist(L1, L2)
+   #result is a 5 x 10 matrix
+   S <- crossdist(L1, L2, type="sep")
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+
+\keyword{math}
diff --git a/man/crossing.linnet.Rd b/man/crossing.linnet.Rd
new file mode 100644
index 0000000..f12602b
--- /dev/null
+++ b/man/crossing.linnet.Rd
@@ -0,0 +1,44 @@
+\name{crossing.linnet}
+\alias{crossing.linnet}
+\title{
+  Crossing Points between Linear Network and Other Lines
+}
+\description{
+  Find all the crossing-points between
+  a linear network and another pattern of lines or line segments.
+}
+\usage{
+crossing.linnet(X, Y)
+}
+\arguments{
+  \item{X}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{Y}{
+    A linear network, or a spatial pattern of line segments (class \code{"psp"})
+    or infinite lines (class \code{"infline"}).
+  }
+}
+\details{
+  All crossing-points between \code{X} and \code{Y}
+  are determined. The result is a point pattern on the network \code{X}.
+}
+\value{
+  Point pattern on a linear network (object of class \code{"lpp"}).
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{crossing.psp}}
+%  \code{\link{chop.linnet}}
+}
+\examples{
+   plot(simplenet, main="")
+   L <- infline(p=runif(3), theta=runif(3, max=pi/2))
+   plot(L, col="red")
+   Y <- crossing.linnet(simplenet, L)
+   plot(Y, add=TRUE, cols="blue")
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/crossing.psp.Rd b/man/crossing.psp.Rd
new file mode 100644
index 0000000..9cf3da8
--- /dev/null
+++ b/man/crossing.psp.Rd
@@ -0,0 +1,73 @@
+\name{crossing.psp}
+\alias{crossing.psp}
+\title{Crossing Points of Two Line Segment Patterns}
+\description{
+  Finds any crossing points between 
+  two line segment patterns.
+}
+\usage{
+  crossing.psp(A,B,fatal=TRUE,details=FALSE)
+}
+\arguments{
+  \item{A,B}{
+    Line segment patterns (objects of class \code{"psp"}).
+  }
+  \item{details}{
+    Logical value indicating whether to return additional information.
+    See below.
+  }
+  \item{fatal}{
+    Logical value indicating what to do
+    if the windows of \code{A} and \code{B} do not overlap.
+    See Details.
+  }
+}
+\value{
+  Point pattern (object of class \code{"ppp"}).
+}
+\details{
+  This function finds any crossing points between
+  the line segment patterns \code{A} and \code{B}.
+
+  A crossing point occurs whenever one of the line segments in \code{A}
+  intersects one of the line segments in \code{B}, at a nonzero
+  angle of intersection.
+
+  The result is a point pattern consisting of all the intersection points.
+
+  If \code{details=TRUE}, additional information is computed,
+  specifying where each intersection point came from.
+  The resulting point pattern has a data frame of marks, with columns
+  named \code{iA, jB, tA, tB}. The marks \code{iA}
+  and \code{jB} are the indices of the line segments in \code{A} and
+  \code{B}, respectively, which produced each intersection point.
+  The marks \code{tA} and \code{tB} are numbers between 0 and 1
+  specifying the position of the intersection point along the
+  original segments.
+
+  If the windows \code{Window(A)} and \code{Window(B)} do not
+  overlap, then an error will be reported if \code{fatal=TRUE},
+  while if \code{fatal=FALSE} an error will not occur
+  and the result will be \code{NULL}.
+}
+\seealso{
+  \code{\link{selfcrossing.psp}},
+  \code{\link{psp.object}},
+  \code{\link{ppp.object}}.
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  b <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(a, col="green", main="crossing.psp")
+  plot(b, add=TRUE, col="blue")
+  P <- crossing.psp(a,b)
+  plot(P, add=TRUE, col="red")
+  as.data.frame(crossing.psp(a,b,details=TRUE))
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/cut.im.Rd b/man/cut.im.Rd
new file mode 100644
index 0000000..72a6d32
--- /dev/null
+++ b/man/cut.im.Rd
@@ -0,0 +1,67 @@
+\name{cut.im}
+\alias{cut.im}
+\title{Convert Pixel Image from Numeric to Factor}
+\description{
+  Transform the values of a pixel image
+  from numeric values into a factor.
+}
+\usage{
+  \method{cut}{im}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A pixel image.
+    An object of class \code{"im"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{cut.default}}.
+    They determine the breakpoints for the mapping from numerical values to
+    factor values. See \code{\link{cut.default}}.
+  }
+} 
+\value{
+  A pixel image (object of class \code{"im"}) with 
+  pixel values that are a factor.
+  See \code{\link{im.object}}.
+}
+\details{
+  This simple function applies the generic \code{\link{cut}} operation
+  to the pixel values of the image \code{x}. The range of pixel values
+  is divided into several intervals, and each
+  interval is associated with a level of a factor. 
+  The result is another pixel image,
+  with the same window and pixel grid as
+  \code{x}, but with the numeric value of each pixel discretised
+  by replacing it by the factor level. 
+
+  This function is a convenient
+  way to inspect an image and to obtain summary statistics.
+  See the examples.
+
+  To select a subset of an image, use the subset operator
+  \code{\link{[.im}} instead.
+}
+\seealso{
+  \code{\link{cut}},
+  \code{\link{im.object}}
+}
+\examples{
+  # artificial image data
+  Z <- setcov(square(1))
+
+  Y <- cut(Z, 3)
+  Y <- cut(Z, breaks=seq(0,1,length=5))
+
+  # cut at the quartiles
+  # (divides the image into 4 equal areas)
+  Y <- cut(Z, quantile(Z))
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/cut.lpp.Rd b/man/cut.lpp.Rd
new file mode 100644
index 0000000..7072e4e
--- /dev/null
+++ b/man/cut.lpp.Rd
@@ -0,0 +1,111 @@
+\name{cut.lpp}
+\alias{cut.lpp}
+\title{Classify Points in a Point Pattern on a Network}
+\description{
+  For a point pattern on a linear network, 
+  classify the points into distinct types
+  according to the numerical marks in the pattern, or according to
+  another variable.
+}
+\usage{
+  \method{cut}{lpp}(x, z=marks(x), ...)
+}
+\arguments{
+  \item{x}{
+    A point pattern on a linear network
+    (object of class \code{"lpp"}).
+  }
+  \item{z}{
+    Data determining the classification. A numeric vector,
+    a factor, a pixel image on a linear network (class \code{"linim"}),
+    a function on a linear network (class \code{"linfun"}),
+    a tessellation on a linear network (class \code{"lintess"}), a string
+    giving the name of a column of marks, or one of the coordinate
+    names \code{"x"}, \code{"y"}, \code{"seg"} or \code{"tp"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{cut.default}}.
+    They determine the breakpoints for the mapping from numerical values
+    in \code{z} to factor values in the output.
+    See \code{\link{cut.default}}.
+  }
+} 
+\value{
+  A multitype point pattern on the same linear network,
+  that is, a point pattern object
+  (of class \code{"lpp"}) with a \code{marks} vector that is a factor.
+}
+\details{
+  This function has the effect of classifying each point in the point
+  pattern \code{x} into one of several possible types. The
+  classification is based on the dataset \code{z}, which may be either
+  \itemize{
+    \item
+    a factor (of length equal to the number of points in \code{z})
+    determining the classification of each point in \code{x}.
+    Levels of the factor determine the classification.
+    \item
+    a numeric vector (of length equal to the number of points in
+    \code{z}). The range of values of \code{z} will be divided into
+    bands (the number of bands is determined by \code{\dots})
+    and \code{z} will be converted to a factor using
+    \code{\link{cut.default}}.
+    \item
+    a pixel image on a network (object of class \code{"linim"}).
+    The value of \code{z} at each point of \code{x} will be
+    used as the classifying variable.
+    \item
+    a function on a network (object of class \code{"linfun"}, see
+    \code{\link{linfun}}). 
+    The value of \code{z} at each point of \code{x} will be
+    used as the classifying variable.
+    \item
+    a tessellation on a network (object of class \code{"lintess"}, see
+    \code{\link{lintess}}). Each point of \code{x} will be classified
+    according to the tile of the tessellation into which it falls.
+    \item
+    a character string, giving the name of one of the columns
+    of \code{marks(x)}, if this is a data frame.
+    \item
+    a character string identifying one of the coordinates:
+    the spatial coordinates
+    \code{"x"}, \code{"y"} or the segment identifier \code{"seg"}
+    or the fractional coordinate along the segment, \code{"tp"}.
+  }
+  The default is to take \code{z} to be the vector of marks in
+  \code{x} (or the first column in the data frame of marks of \code{x},
+  if it is a data frame). If the marks are numeric, then the range of values
+  of the numerical marks is divided into several intervals, and each
+  interval is associated with a level of a factor. 
+  The result is a
+  marked point pattern, on the same linear network,
+  with the same point locations as
+  \code{x}, but with the numeric mark of each point discretised
+  by replacing it by the factor level.
+  This is a convenient way to transform a marked point pattern
+  which has numeric marks into a multitype point pattern,
+  for example to plot it or analyse it. See the examples.
+
+  To select some points from \code{x}, use the subset operators
+  \code{\link{[.lpp}} or \code{\link{subset.lpp}} instead.
+}
+\seealso{
+  \code{\link{cut}},
+  \code{\link{lpp}},
+  \code{\link{lintess}},
+  \code{\link{linfun}},
+  \code{\link{linim}}
+}
+\examples{
+  X <- runiflpp(20, simplenet)
+  f <- linfun(function(x,y,seg,tp) { x }, simplenet)
+  plot(cut(X, f, breaks=4))
+  plot(cut(X, "x", breaks=4))
+  plot(cut(X, "seg"))
+}
+
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/cut.ppp.Rd b/man/cut.ppp.Rd
new file mode 100644
index 0000000..9e2f841
--- /dev/null
+++ b/man/cut.ppp.Rd
@@ -0,0 +1,138 @@
+\name{cut.ppp}
+\alias{cut.ppp}
+\title{Classify Points in a Point Pattern}
+\description{
+  Classifies the points in a point pattern into distinct types
+  according to the numerical marks in the pattern, or according to
+  another variable.
+}
+\usage{
+  \method{cut}{ppp}(x, z=marks(x), ...)
+}
+\arguments{
+  \item{x}{
+    A two-dimensional point pattern.
+    An object of class \code{"ppp"}.
+  }
+  \item{z}{
+    Data determining the classification. A numeric vector,
+    a factor, a pixel image, a window, a tessellation, or a string
+    giving the name of a column of marks or the name of a spatial coordinate.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{cut.default}}.
+    They determine the breakpoints for the mapping from numerical values
+    in \code{z} to factor values in the output.
+    See \code{\link{cut.default}}.
+  }
+} 
+\value{
+  A multitype point pattern, that is, a point pattern object
+  (of class \code{"ppp"}) with a \code{marks} vector that is a factor.
+}
+\details{
+  This function has the effect of classifying each point in the point
+  pattern \code{x} into one of several possible types. The
+  classification is based on the dataset \code{z}, which may be either
+  \itemize{
+    \item
+    a factor (of length equal to the number of points in \code{z})
+    determining the classification of each point in \code{x}.
+    Levels of the factor determine the classification.
+    \item
+    a numeric vector (of length equal to the number of points in
+    \code{z}). The range of values of \code{z} will be divided into
+    bands (the number of bands is determined by \code{\dots})
+    and \code{z} will be converted to a factor using
+    \code{\link{cut.default}}.
+    \item
+    a pixel image (object of class \code{"im"}).
+    The value of \code{z} at each point of \code{x} will be
+    used as the classifying variable.
+    \item
+    a tessellation (object of class \code{"tess"}, see
+    \code{\link{tess}}). Each point of \code{x} will be classified
+    according to the tile of the tessellation into which it falls.
+    \item
+    a window (object of class \code{"owin"}).
+    Each point of \code{x} will be classified
+    according to whether it falls inside or outside this window.
+    \item
+    a character string, giving the name of one of the columns
+    of \code{marks(x)}, if this is a data frame.
+    \item
+    a character string \code{"x"} or \code{"y"} identifying one of the
+    spatial coordinates.
+  }
+  The default is to take \code{z} to be the vector of marks in
+  \code{x} (or the first column in the data frame of marks of \code{x},
+  if it is a data frame). If the marks are numeric, then the range of values
+  of the numerical marks is divided into several intervals, and each
+  interval is associated with a level of a factor. 
+  The result is a
+  marked point pattern, with the same window and point locations as
+  \code{x}, but with the numeric mark of each point discretised
+  by replacing it by the factor level.
+  This is a convenient way to transform a marked point pattern
+  which has numeric marks into a multitype point pattern,
+  for example to plot it or analyse it. See the examples.
+
+  To select some points from a point pattern, use the subset operators
+  \code{\link{[.ppp}} or \code{\link{subset.ppp}} instead.
+}
+\seealso{
+  \code{\link{cut}},
+  \code{\link{ppp.object}},
+  \code{\link{tess}}
+}
+\examples{
+ # (1) cutting based on numeric marks of point pattern
+ 
+ trees <- longleaf
+ # Longleaf Pines data
+ # the marks are positive real numbers indicating tree diameters.
+
+ \testonly{
+	# smaller dataset
+	trees <- trees[seq(1, npoints(trees), by=80)]
+ }
+ \dontrun{
+ plot(trees)
+ }
+
+ # cut the range of tree diameters into three intervals
+ long3 <- cut(trees, breaks=3)
+ \dontrun{
+ plot(long3)
+ }
+
+ # adult trees defined to have diameter at least 30 cm
+ long2 <- cut(trees, breaks=c(0,30,100), labels=c("Sapling", "Adult"))
+ plot(long2)
+ plot(long2, cols=c("green","blue"))
+
+ # (2) cutting based on another numeric vector
+ # Divide Swedish Pines data into 3 classes
+ # according to nearest neighbour distance
+
+ swedishpines
+ plot(cut(swedishpines, nndist(swedishpines), breaks=3))
+
+ # (3) cutting based on tessellation
+ # Divide Swedish Pines study region into a 4 x 4 grid of rectangles
+ # and classify points accordingly
+
+ tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5))
+ plot(cut(swedishpines, tes))
+ plot(tes, lty=2, add=TRUE)
+
+ # (4) multivariate marks
+ finpines
+ cut(finpines, "height", breaks=4)
+}
+
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/data.ppm.Rd b/man/data.ppm.Rd
new file mode 100644
index 0000000..e58c167
--- /dev/null
+++ b/man/data.ppm.Rd
@@ -0,0 +1,49 @@
+\name{data.ppm}
+\alias{data.ppm}
+\title{Extract Original Data from a Fitted Point Process Model}
+\description{
+  Given a fitted point process model,
+  this function extracts the original point pattern dataset
+  to which the model was fitted.
+}
+\usage{
+  data.ppm(object)
+}
+\arguments{
+  \item{object}{
+    fitted point process model (an object of class \code{"ppm"}).
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\details{
+  An object of class \code{"ppm"} represents a point process model
+  that has been fitted to data. It is typically produced by
+  the model-fitting algorithm \code{\link{ppm}}.
+  The object contains complete information about the original data
+  point pattern to which the model was fitted.
+  This function extracts the original data pattern.
+
+  See \code{\link{ppm.object}} for a list of all operations that can be
+  performed on objects of class \code{"ppm"}.
+}
+\seealso{
+  \code{\link{ppm.object}},
+  \code{\link{ppp.object}}
+}
+\examples{
+ data(cells)
+ fit <- ppm(cells, ~1, Strauss(r=0.1))
+ X <- data.ppm(fit)
+ # 'X' is identical to 'cells'
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/dclf.progress.Rd b/man/dclf.progress.Rd
new file mode 100644
index 0000000..1a78543
--- /dev/null
+++ b/man/dclf.progress.Rd
@@ -0,0 +1,160 @@
+\name{dclf.progress}
+\alias{dclf.progress}
+\alias{mad.progress}
+\alias{mctest.progress}
+\title{
+  Progress Plot of Test of Spatial Pattern
+}
+\description{
+  Generates a progress plot (envelope representation) of the
+  Diggle-Cressie-Loosmore-Ford test or the
+  Maximum Absolute Deviation test for a spatial point pattern.
+}
+\usage{
+dclf.progress(X, \dots)
+mad.progress(X, \dots)
+mctest.progress(X, fun = Lest, \dots,
+                exponent = 1, nrank = 1,
+                interpolate = FALSE, alpha, rmin=0)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern (object of class \code{"ppp"}, \code{"lpp"}
+    or other class), a fitted point process model (object of class \code{"ppm"},
+    \code{"kppm"} or other class) or an envelope object (class
+    \code{"envelope"}). 
+  }
+  \item{\dots}{
+    Arguments passed to \code{mctest.progress} or to \code{\link{envelope}}.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{nsim} to specify the number of Monte Carlo
+    simulations, \code{alternative} to specify one-sided or two-sided
+    envelopes, and \code{verbose=FALSE} to turn off the messages.
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern. 
+  }
+  \item{exponent}{
+    Positive number. The exponent of the \eqn{L^p} distance.
+    See Details.
+  }
+  \item{nrank}{
+    Integer. The rank of the critical value of the Monte Carlo test,
+    amongst the \code{nsim} simulated values.
+    A rank of 1 means that the minimum and maximum
+    simulated values will become the critical values for the test.
+  }
+  \item{interpolate}{
+    Logical value indicating how to compute the critical value.
+    If \code{interpolate=FALSE} (the default), a standard Monte Carlo test
+    is performed, and the critical value is the largest
+    simulated value of the test statistic (if \code{nrank=1})
+    or the \code{nrank}-th largest (if \code{nrank} is another number).
+    If \code{interpolate=TRUE}, kernel density estimation
+    is applied to the simulated values, and the critical value is
+    the upper \code{alpha} quantile of this estimated distribution.
+  }
+  \item{alpha}{
+    Optional. The significance level of the test.
+    Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the
+    number of simulations.
+  }
+  \item{rmin}{
+    Optional. Left endpoint for the interval of \eqn{r} values
+    on which the test statistic is calculated.
+  }
+}
+\details{
+  The Diggle-Cressie-Loosmore-Ford test and the 
+  Maximum Absolute Deviation test for a spatial point pattern
+  are described in \code{\link{dclf.test}}.
+  These tests depend on the choice of an interval of
+  distance values (the argument \code{rinterval}).
+  A \emph{progress plot} or \emph{envelope representation}
+  of the test (Baddeley et al, 2014) is a plot of the
+  test statistic (and the corresponding critical value) against the length of
+  the interval \code{rinterval}.
+  
+  The command \code{dclf.progress} performs 
+  \code{\link{dclf.test}} on \code{X} using all possible intervals
+  of the form \eqn{[0,R]}, and returns the resulting values of the test
+  statistic, and the corresponding critical values of the test,
+  as a function of \eqn{R}. 
+
+  Similarly \code{mad.progress} performs
+  \code{\link{mad.test}} using all possible intervals
+  and returns the test statistic and critical value.
+
+  More generally, \code{mctest.progress} performs a test based on the
+  \eqn{L^p} discrepancy between the curves. The deviation between two
+  curves is measured by the \eqn{p}th root of the integral of
+  the \eqn{p}th power of the absolute value of the difference
+  between the two curves. The exponent \eqn{p} is
+  given by the argument \code{exponent}. The case \code{exponent=2}
+  is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the
+  MAD test.
+
+  If the argument \code{rmin} is given, it specifies the left endpoint
+  of the interval defining the test statistic: the tests are
+  performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]}
+  where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}.
+  
+  The result of each command is an object of class \code{"fv"}
+  that can be plotted to obtain the progress plot. The display shows
+  the test statistic (solid black line) and the Monte Carlo
+  acceptance region (grey shading).
+
+  The significance level for the Monte Carlo test is
+  \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99,
+  so if the values of \code{nrank} and \code{nsim} are not given,
+  the default is a test with significance level 0.01.
+
+  If \code{X} is an envelope object, then some of the data stored
+  in \code{X} may be re-used:
+  \itemize{
+    \item
+    If \code{X} is an envelope object containing simulated functions,
+    and \code{fun=NULL}, then
+    the code will re-use the simulated functions stored in \code{X}.
+    \item
+    If \code{X} is an envelope object containing
+    simulated point patterns, 
+    then \code{fun} will be applied to the stored point patterns
+    to obtain the simulated functions.
+    If \code{fun} is not specified, it defaults to \code{\link{Lest}}.
+    \item
+    Otherwise, new simulations will be performed,
+    and \code{fun} defaults to  \code{\link{Lest}}.
+  }
+}
+\value{
+  An object of class \code{"fv"} that can be plotted to
+  obtain the progress plot. 
+}
+\references{
+  Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T.,
+  Milne, R. and Nair, G. (2014)
+  On tests of spatial pattern based on simulation envelopes.
+  \emph{Ecological Monographs} \bold{84} (3) 477--489.
+}
+\author{
+  \adrian
+  
+  ,
+  Andrew Hardegen, Tom Lawrence, Gopal Nair and
+  Robin Milne.
+}
+\seealso{
+  \code{\link{dclf.test}} and
+  \code{\link{mad.test}} for the tests.
+  
+  See \code{\link{plot.fv}} for information on plotting
+  objects of class \code{"fv"}.
+}
+\examples{
+  plot(dclf.progress(cells, nsim=19))
+}
+\keyword{spatial}
+\keyword{htest}
+
diff --git a/man/dclf.sigtrace.Rd b/man/dclf.sigtrace.Rd
new file mode 100644
index 0000000..cbcb658
--- /dev/null
+++ b/man/dclf.sigtrace.Rd
@@ -0,0 +1,168 @@
+\name{dclf.sigtrace}
+\alias{dclf.sigtrace}
+\alias{mad.sigtrace}
+\alias{mctest.sigtrace}
+\title{
+  Significance Trace of Cressie-Loosmore-Ford or Maximum Absolute
+  Deviation Test
+}
+\description{
+  Generates a Significance Trace of the
+  Diggle(1986)/ Cressie (1991)/ Loosmore and Ford (2006) test or the
+  Maximum Absolute Deviation test for a spatial point pattern.
+}
+\usage{
+dclf.sigtrace(X, \dots)
+mad.sigtrace(X, \dots)
+mctest.sigtrace(X, fun=Lest, \dots,
+                exponent=1, interpolate=FALSE, alpha=0.05,
+                confint=TRUE, rmin=0)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern (object of class \code{"ppp"}, \code{"lpp"}
+    or other class), a fitted point process model (object of class \code{"ppm"},
+    \code{"kppm"} or other class) or an envelope object (class
+    \code{"envelope"}). 
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{envelope}}
+    or \code{\link{mctest.progress}}.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{nsim} to specify the number of Monte Carlo
+    simulations, \code{alternative} to specify a one-sided test,
+    and \code{verbose=FALSE} to turn off the messages.
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern.
+  }
+  \item{exponent}{
+    Positive number. The exponent of the \eqn{L^p} distance.
+    See Details.
+  }
+  \item{interpolate}{
+    Logical value specifying whether to calculate the \eqn{p}-value
+    by interpolation.
+    If \code{interpolate=FALSE} (the default), a standard Monte Carlo test
+    is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)}
+    where \eqn{n} is the number of simulations and \eqn{k} is the number
+    of simulated values which are more extreme than the observed value.
+    If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by
+    applying kernel density estimation to the simulated values, and
+    computing the tail probability for this estimated distribution.
+  }
+  \item{alpha}{
+    Significance level to be plotted (this has no effect on the calculation
+    but is simply plotted as a reference value).
+  }
+  \item{confint}{
+    Logical value indicating whether to compute a confidence interval
+    for the \sQuote{true} \eqn{p}-value.
+  }
+  \item{rmin}{
+    Optional. Left endpoint for the interval of \eqn{r} values
+    on which the test statistic is calculated.
+  }
+}
+\details{
+  The Diggle (1986)/ Cressie (1991)/Loosmore and Ford (2006) test and the 
+  Maximum Absolute Deviation test for a spatial point pattern
+  are described in \code{\link{dclf.test}}.
+  These tests depend on the choice of an interval of
+  distance values (the argument \code{rinterval}).
+  A \emph{significance trace} (Bowman and Azzalini, 1997;
+  Baddeley et al, 2014, 2015)
+  of the test is a plot of the \eqn{p}-value
+  obtained from the test against the length of
+  the interval \code{rinterval}.
+  
+  The command \code{dclf.sigtrace} performs 
+  \code{\link{dclf.test}} on \code{X} using all possible intervals
+  of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values
+  as a function of \eqn{R}.
+
+  Similarly \code{mad.sigtrace} performs
+  \code{\link{mad.test}} using all possible intervals
+  and returns the \eqn{p}-values.
+
+  More generally, \code{mctest.sigtrace} performs a test based on the
+  \eqn{L^p} discrepancy between the curves. The deviation between two
+  curves is measured by the \eqn{p}th root of the integral of
+  the \eqn{p}th power of the absolute value of the difference
+  between the two curves. The exponent \eqn{p} is
+  given by the argument \code{exponent}. The case \code{exponent=2}
+  is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the
+  MAD test.
+
+  If the argument \code{rmin} is given, it specifies the left endpoint
+  of the interval defining the test statistic: the tests are
+  performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]}
+  where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}.
+  
+  The result of each command
+  is an object of class \code{"fv"} that can be plotted to
+  obtain the significance trace. The plot shows the Monte Carlo
+  \eqn{p}-value (solid black line), 
+  the critical value \code{0.05} (dashed red line),
+  and a pointwise 95\% confidence band (grey shading)
+  for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value.
+  The confidence band is based on the Agresti-Coull (1998)
+  confidence interval for a binomial proportion (when
+  \code{interpolate=FALSE}) or the delta method
+  and normal approximation (when \code{interpolate=TRUE}).
+
+  If \code{X} is an envelope object and \code{fun=NULL} then
+  the code will re-use the simulated functions stored in \code{X}.
+}
+\value{
+  An object of class \code{"fv"} that can be plotted to
+  obtain the significance trace. 
+}
+\references{
+  Agresti, A. and Coull, B.A. (1998)
+  Approximate is better than \dQuote{Exact} for interval
+   estimation of binomial proportions.
+  \emph{American Statistician} \bold{52}, 119--126.
+
+  Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T.,
+  Milne, R. and Nair, G. (2014)
+  On tests of spatial pattern based on simulation envelopes.
+  \emph{Ecological Monographs} \bold{84}(3) 477--489.
+
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2015)
+  Pushing the envelope: extensions of graphical
+  Monte Carlo tests. Submitted for publication.
+
+  Bowman, A.W. and Azzalini, A. (1997) 
+  \emph{Applied smoothing techniques for data analysis: 
+    the kernel approach with S-Plus illustrations}.
+  Oxford University Press, Oxford.
+}
+\author{
+  \adrian, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{dclf.test}} for the tests;
+  \code{\link{dclf.progress}} for progress plots.
+  
+  See \code{\link{plot.fv}} for information on plotting
+  objects of class \code{"fv"}.
+
+  See also \code{\link{dg.sigtrace}}.
+}
+\examples{
+  plot(dclf.sigtrace(cells, Lest, nsim=19))
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/dclf.test.Rd b/man/dclf.test.Rd
new file mode 100644
index 0000000..06e2746
--- /dev/null
+++ b/man/dclf.test.Rd
@@ -0,0 +1,270 @@
+\name{dclf.test}
+\alias{dclf.test}
+\alias{mad.test}
+\title{
+  Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests
+}
+\description{
+  Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006)
+  test or the Maximum Absolute Deviation test for a spatial point pattern.
+}
+\usage{
+dclf.test(X, \dots, alternative=c("two.sided", "less", "greater"),
+                  rinterval = NULL, leaveout=1,
+                  scale=NULL, clamp=FALSE, interpolate=FALSE)
+
+mad.test(X, \dots,  alternative=c("two.sided", "less", "greater"),
+                  rinterval = NULL, leaveout=1,
+                  scale=NULL, clamp=FALSE, interpolate=FALSE)
+}
+\arguments{
+  \item{X}{
+    Data for the test.
+    Either a point pattern (object of class \code{"ppp"}, \code{"lpp"}
+    or other class), a fitted point process model (object of class \code{"ppm"},
+    \code{"kppm"} or other class), a simulation envelope (object of class
+    \code{"envelope"}) or a previous result of \code{dclf.test} or
+    \code{mad.test}. 
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{envelope}}.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{nsim} to specify the number of Monte Carlo
+    simulations, \code{verbose=FALSE} to turn off the messages,
+    \code{savefuns} or \code{savepatterns} to save the simulation
+    results, and \code{use.theory} described under Details. 
+  }
+  \item{alternative}{
+    The alternative hypothesis. A character string.
+    The default is a two-sided alternative. See Details.
+  }
+  \item{rinterval}{
+    Interval of values of the summary function argument \code{r}
+    over which the maximum absolute deviation, or the integral,
+    will be computed for the test. A numeric vector of length 2.
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{scale}{
+    Optional. A function in the \R language which determines the
+    relative scale of deviations, as a function of
+    distance \eqn{r}. Summary function values for distance \code{r}
+    will be \emph{divided} by \code{scale(r)} before the
+    test statistic is computed.
+  }
+  \item{clamp}{
+    Logical value indicating how to compute deviations
+    in a one-sided test. Deviations of the observed
+    summary function from the theoretical summary function are initially
+    evaluated as signed real numbers, with large positive values indicating
+    consistency with the alternative hypothesis.
+    If \code{clamp=FALSE} (the default), these values are not changed.
+    If \code{clamp=TRUE}, any negative values are replaced by zero.
+  }
+  \item{interpolate}{
+    Logical value specifying whether to calculate the \eqn{p}-value
+    by interpolation.
+    If \code{interpolate=FALSE} (the default), a standard Monte Carlo test
+    is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)}
+    where \eqn{n} is the number of simulations and \eqn{k} is the number
+    of simulated values which are more extreme than the observed value.
+    If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by
+    applying kernel density estimation to the simulated values, and
+    computing the tail probability for this estimated distribution.
+  }
+}
+\details{
+  These functions perform hypothesis tests for goodness-of-fit
+  of a point pattern dataset to a point process model, based on
+  Monte Carlo simulation from the model.
+
+  \code{dclf.test} performs the test advocated by Loosmore and Ford (2006)
+  which is also described in Diggle (1986), Cressie (1991, page 667, equation
+  (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014) for
+  detailed discussion.
+
+  \code{mad.test} performs the \sQuote{global} or
+  \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981).
+  See Baddeley et al (2014).
+  
+  The type of test depends on the type of argument \code{X}.
+  \itemize{
+    \item 
+    If \code{X} is some kind of point pattern, then a test of Complete
+    Spatial Randomness (CSR) will be performed. That is,
+    the null hypothesis is that the point pattern is completely random.
+    \item
+    If \code{X} is a fitted point process model, then a test of
+    goodness-of-fit for the fitted model will be performed. The model object
+    contains the data point pattern to which it was originally fitted.
+    The null hypothesis is that the data point pattern is a realisation
+    of the model.
+    \item
+    If \code{X} is an envelope object generated by \code{\link{envelope}},
+    then it should have been generated with \code{savefuns=TRUE} or
+    \code{savepatterns=TRUE} so that it contains simulation results.
+    These simulations will be treated as realisations from the null
+    hypothesis.
+    \item
+    Alternatively \code{X} could be a previously-performed
+    test of the same kind (i.e. the result of calling
+    \code{dclf.test} or \code{mad.test}).
+    The simulations used to perform the original test
+    will be re-used to perform the new test (provided these simulations
+    were saved in the original test, by setting \code{savefuns=TRUE} or
+    \code{savepatterns=TRUE}).
+  }
+
+  The argument \code{alternative} specifies the alternative hypothesis,
+  that is, the direction of deviation that will be considered
+  statistically significant. If \code{alternative="two.sided"} (the
+  default), both positive and negative deviations (between
+  the observed summary function and the theoretical function)
+  are significant. If \code{alternative="less"}, then only negative
+  deviations (where the observed summary function is lower than the
+  theoretical function) are considered. If \code{alternative="greater"},
+  then only positive deviations (where the observed summary function is
+  higher than the theoretical function) are considered.
+  
+  In all cases, the algorithm will first call \code{\link{envelope}} to
+  generate or extract the simulated summary functions.
+  The number of simulations that will be generated or extracted,
+  is determined by the argument \code{nsim}, and defaults to 99.
+  The summary function that will be computed is determined by the
+  argument \code{fun} (or the first unnamed argument in the list
+  \code{\dots}) and defaults to \code{\link{Kest}} (except when
+  \code{X} is an envelope object generated with \code{savefuns=TRUE},
+  when these functions will be taken).
+
+  The choice of summary function \code{fun} affects the power of the
+  test. It is normally recommended to apply a variance-stabilising
+  transformation (Ripley, 1981). If you are using the \eqn{K} function,
+  the normal practice is to replace this by the \eqn{L} function
+  (Besag, 1977) computed by \code{\link{Lest}}. If you are using
+  the \eqn{F} or \eqn{G} functions, the recommended practice is to apply
+  Fisher's variance-stabilising transformation
+  \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument
+  \code{transform}. See the Examples.
+
+  The argument \code{rinterval} specifies the interval of
+  distance values \eqn{r} which will contribute to the
+  test statistic (either maximising over this range of values
+  for \code{mad.test}, or integrating over this range of values
+  for \code{dclf.test}). This affects the power of the test.
+  General advice and experiments in Baddeley et al (2014) suggest
+  that the maximum \eqn{r} value should be slightly larger than
+  the maximum possible range of interaction between points. The
+  \code{dclf.test} is quite sensitive to this choice, while the
+  \code{mad.test} is relatively insensitive.
+
+  It is also possible to specify a pointwise test (i.e. taking
+  a single, fixed value of distance \eqn{r}) by specifing
+  \code{rinterval = c(r,r)}.
+
+  The argument \code{use.theory} passed to \code{\link{envelope}}
+  determines whether to compare the summary function for the data
+  to its theoretical value for CSR (\code{use.theory=TRUE})
+  or to the sample mean of simulations from CSR
+  (\code{use.theory=FALSE}).
+
+  The argument \code{leaveout} specifies how to calculate the
+  discrepancy between the summary function for the data and the
+  nominal reference value, when the reference value must be estimated
+  by simulation. The values \code{leaveout=0} and
+  \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014,
+  Appendix) to computing the difference \code{observed - reference}
+  where the \code{reference} is the mean of simulated values.
+  The value \code{leaveout=2} gives the leave-two-out discrepancy
+  proposed by Dao and Genton (2014).
+}
+\section{Handling Ties}{
+  If the observed value of the test statistic is equal to one or more of the
+  simulated values (called a \emph{tied value}), then the tied values
+  will be assigned a random ordering, and a message will be printed.
+}
+\value{
+  An object of class \code{"htest"}.
+  Printing this object gives a report on the result of the test.
+  The \eqn{p}-value is contained in the component \code{p.value}.
+}
+\references{
+  Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne,
+  R.K. and Nair, G. (2014) On tests of spatial pattern based on
+  simulation envelopes. 
+  \emph{Ecological Monographs} \bold{84}(3) 477--489.
+  
+  Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K. and Nair,
+  G. (2015) \emph{Pushing the envelope}. In preparation.
+  
+  Besag, J. (1977) 
+  Discussion of Dr Ripley's paper.
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 193--195.
+  
+  Cressie, N.A.C. (1991)
+  \emph{Statistics for spatial data}.
+  John Wiley and Sons, 1991.
+
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+
+  Diggle, P. J. (1986).
+  Displaced amacrine cells in the retina of a
+  rabbit : analysis of a bivariate spatial point pattern. 
+  \emph{J. Neuroscience Methods} \bold{18}, 115--125.
+ 
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+
+  Loosmore, N.B. and Ford, E.D. (2006)
+  Statistical inference using the \emph{G} or \emph{K} point
+  pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+ }
+\author{
+  \adrian
+  
+  ,
+  Andrew Hardegen and Suman Rakshit.
+}
+\seealso{
+  \code{\link{envelope}},
+  \code{\link{dclf.progress}}
+}
+\examples{
+  dclf.test(cells, Lest, nsim=39)
+  m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19)
+  m
+  # extract the p-value
+  m$p.value
+  # variance stabilised G function
+  dclf.test(cells, Gest, transform=expression(asin(sqrt(.))),
+                   verbose=FALSE, nsim=19)
+
+  ## one-sided test
+  ml <- mad.test(cells, Lest, verbose=FALSE, nsim=19, alternative="less")
+
+  ## scaled
+  mad.test(cells, Kest, verbose=FALSE, nsim=19,
+           rinterval=c(0.05, 0.2),
+           scale=function(r) { r })
+}
+\keyword{spatial}
+\keyword{htest}
+
diff --git a/man/default.dummy.Rd b/man/default.dummy.Rd
new file mode 100644
index 0000000..2209b56
--- /dev/null
+++ b/man/default.dummy.Rd
@@ -0,0 +1,114 @@
+\name{default.dummy}
+\alias{default.dummy}
+\title{Generate a Default Pattern of Dummy Points}
+\description{
+  Generates a default pattern of dummy points
+  for use in a quadrature scheme.
+}
+\usage{
+ default.dummy(X, nd, random=FALSE, ntile=NULL, npix=NULL,
+              quasi=FALSE, \dots, eps=NULL, verbose=FALSE)
+}
+\arguments{
+  \item{X}{
+    The observed data point pattern.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{nd}{
+    Optional. Integer, or integer vector of length 2, specifying an
+    \code{nd * nd} or \code{nd[1] * nd[2]}
+    rectangular array of dummy points. 
+  }
+  \item{random}{
+    Logical value. If \code{TRUE}, the dummy points are
+    generated randomly.
+  }
+  \item{quasi}{
+    Logical value. If \code{TRUE}, the dummy points are
+    generated by a quasirandom sequence.
+  }
+  \item{ntile}{
+  	Optional. Integer or pair of integers specifying 
+	the number of rows and columns of tiles used in the counting rule.
+  }
+  \item{npix}{
+  	Optional. Integer or pair of integers specifying the
+	number of rows and columns of pixels used in computing
+	approximate areas.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{eps}{
+    Optional. Grid spacing.
+    A positive number, or a vector of two positive numbers, giving the
+    horizontal and vertical spacing, respectively, of the grid of
+    dummy points. Incompatible with \code{nd}.
+  }
+  \item{verbose}{
+    If \code{TRUE}, information about the construction of the
+    quadrature scheme is printed.
+  }
+} 
+\value{
+  A point pattern (an object of class \code{"ppp"},
+  see \code{\link{ppp.object}}) containing the dummy points.
+}
+\details{
+  This function provides a sensible default for the dummy points
+  in a quadrature scheme.
+
+  A quadrature scheme consists of 
+  the original data point pattern, an additional pattern of dummy points,
+  and a vector of quadrature weights for all these points.
+  See \code{\link{quad.object}} for further information about
+  quadrature schemes.
+
+  If \code{random} and \code{quasi} are both false (the default),
+  then the function creates dummy points
+  in a regular \code{nd[1]} by \code{nd[1]} rectangular grid.
+  If \code{random} is true and \code{quasi} is false,
+  then the frame of the window is divided into
+  an  \code{nd[1]} by \code{nd[1]} array of tiles, and one dummy point
+  is generated at random inside each tile.
+  If \code{quasi} is true, a quasirandom pattern of
+  \code{nd[1] * nd[2]} points is generated.
+  In all cases, the four corner points of the frame of the window
+  are added. Then if the window is not rectangular, any dummy points
+  lying outside it are deleted. 
+  
+  If \code{nd} is missing, 
+  a default value (depending on the
+  data pattern \code{X}) is computed by \code{default.ngrid}.
+
+  Alternative functions for creating dummy patterns
+  include \code{\link{corners}},
+  \code{\link{gridcentres}},
+  \code{\link{stratrand}} and
+  \code{\link{spokes}}.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{quadscheme}},
+  \code{\link{corners}},
+  \code{\link{gridcentres}},
+  \code{\link{stratrand}},
+  \code{\link{spokes}}
+}
+\examples{
+  data(simdat)
+  P <- simdat
+  D <- default.dummy(P, 100)
+  \dontrun{plot(D)}
+  Q <- quadscheme(P, D, "grid")
+  \dontrun{plot(union.quad(Q))}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/default.expand.Rd b/man/default.expand.Rd
new file mode 100644
index 0000000..a69cac5
--- /dev/null
+++ b/man/default.expand.Rd
@@ -0,0 +1,108 @@
+\name{default.expand}
+\alias{default.expand}
+\title{Default Expansion Rule for Simulation of Model}
+\description{
+  Defines the default expansion window or expansion rule
+  for simulation of a fitted point process model.
+}
+\usage{
+  default.expand(object, m=2, epsilon=1e-6, w=Window(object))
+}
+\arguments{
+  \item{object}{
+    A point process model (object of class \code{"ppm"}
+    or \code{"rmhmodel"}).
+  }
+  \item{m}{
+    A single numeric value.
+    The window will be expanded by a distance 
+    \code{m * reach(object)} along each side.
+  }
+  \item{epsilon}{
+    Threshold argument passed to \code{\link{reach}} to determine
+    \code{reach(object)}.
+  }
+  \item{w}{
+    Optional. The un-expanded window in which the model is defined.
+    The resulting simulated point patterns will lie in this window. 
+  }
+} 
+\value{
+  A window expansion rule (object of class \code{"rmhexpand"}).
+}
+\details{
+  This function computes a default value for the
+  expansion rule (the argument \code{expand} in \code{\link{rmhcontrol}})
+  given a fitted point process model \code{object}.
+  This default is used by \code{\link{envelope}}, 
+  \code{\link{qqplot.ppm}}, \code{\link{simulate.ppm}} and other functions.
+
+  Suppose we wish to generate simulated realisations
+  of a fitted point process model inside a window \code{w}.
+  It is advisable to first simulate
+  the pattern on a larger window, and then clip it to the original
+  window \code{w}. This avoids edge effects in the simulation.
+  It is called \emph{expansion} of the simulation window.
+  
+  Accordingly, for the Metropolis-Hastings simulation algorithm
+  \code{\link{rmh}}, the algorithm control parameters specified by
+  \code{\link{rmhcontrol}} include an argument \code{expand} that
+  determines the expansion of the simulation window.
+
+  The function \code{default.expand} determines the default expansion
+  rule for a fitted point process model \code{object}.
+  
+  If the model is Poisson, then no expansion is necessary.
+  No expansion is performed by default,
+  and \code{default.expand} returns a rule representing no expansion.
+  The simulation window is the original window \code{w = Window(object)}.
+  
+  If the model depends on external covariates (i.e.\ covariates other than
+  the Cartesian covariates \code{x} and \code{y} and the \code{marks})
+  then no expansion is feasible, in general, because the spatial domain
+  of the covariates is not guaranteed to be large enough.
+  \code{default.expand} returns a rule representing no expansion.
+  The simulation window is the original window \code{w = Window(object)}.
+
+  If the model depends on the Cartesian covariates \code{x} and \code{y},
+  it would be feasible to expand the simulation window, and this was the
+  default for \pkg{spatstat} version 1.24-1 and earlier.
+  However this sometimes produces artefacts (such as an empty point pattern)
+  or memory overflow, because the fitted trend, extrapolated outside the
+  original window of the data, may become very large.
+  In \pkg{spatstat} version 1.24-2 and later, the
+  default rule is \emph{not} to expand if the model depends
+  on \code{x} or \code{y}.
+  Again \code{default.expand} returns a rule representing no expansion.
+  
+  Otherwise, expansion will occur.
+  The original window \code{w = Window(object)} is expanded by
+  a distance \code{m * rr}, where
+  \code{rr} is the interaction range of the model, computed by
+  \code{\link{reach}}. If \code{w} is a rectangle then
+  each edge of \code{w} is displaced outward by distance \code{m * rr}.
+  If \code{w} is not a rectangle then \code{w} is dilated by
+  distance \code{m * rr} using \code{\link{dilation}}.
+}
+\seealso{
+  \code{\link{rmhexpand}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmh}},
+  \code{\link{envelope}},
+  \code{\link{qqplot.ppm}}
+}
+\examples{
+  data(cells)
+  fit <- ppm(cells, ~1, Strauss(0.07))
+  default.expand(fit)
+  mod <- rmhmodel(cif="strauss", par=list(beta=100, gamma=0.5, r=0.07))
+  default.expand(fit)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/default.rmhcontrol.Rd b/man/default.rmhcontrol.Rd
new file mode 100644
index 0000000..1ed1212
--- /dev/null
+++ b/man/default.rmhcontrol.Rd
@@ -0,0 +1,59 @@
+\name{default.rmhcontrol}
+\alias{default.rmhcontrol}
+\title{Set Default Control Parameters for Metropolis-Hastings Algorithm.}
+\description{
+  Given a fitted point process model, this command
+  sets appropriate default values of the
+  parameters controlling the iterative behaviour
+  of the Metropolis-Hastings algorithm.
+}
+\usage{
+   default.rmhcontrol(model, w=NULL)
+}
+\arguments{
+  \item{model}{
+    A fitted point process model (object of class \code{"ppm"})
+  }
+  \item{w}{
+    Optional. Window for the resulting simulated patterns.
+  }
+}
+\value{
+  An object of class \code{"rmhcontrol"}. See \code{\link{rmhcontrol}}.
+}
+\details{
+  This function sets the values of 
+  the parameters controlling the iterative behaviour
+  of the Metropolis-Hastings
+  simulation algorithm. It uses default values
+  that would be appropriate for the fitted point
+  process model \code{model}.
+
+  The expansion parameter \code{expand} is set to
+  \code{\link{default.expand}(model, w)}.
+
+  All other parameters revert to their defaults given in
+  \code{\link{rmhcontrol.default}}.
+  
+  See \code{\link{rmhcontrol}} for the full list of control parameters.
+  To override default parameters, use \code{\link{update.rmhcontrol}}.
+}
+\seealso{
+  \code{\link{rmhcontrol}},
+  \code{\link{update.rmhcontrol}},
+  \code{\link{ppm}},
+  \code{\link{default.expand}}
+}
+\examples{
+  fit <- ppm(cells, ~1, Strauss(0.1))
+  default.rmhcontrol(fit)
+  default.rmhcontrol(fit, w=square(2))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/delaunay.Rd b/man/delaunay.Rd
new file mode 100644
index 0000000..6d765bc
--- /dev/null
+++ b/man/delaunay.Rd
@@ -0,0 +1,51 @@
+\name{delaunay}
+\alias{delaunay}
+\title{Delaunay Triangulation of Point Pattern}
+\description{
+  Computes the Delaunay triangulation of a spatial point pattern.
+}
+\usage{
+delaunay(X)
+}
+\arguments{
+  \item{X}{Spatial point pattern (object of class \code{"ppp"}).}
+}
+\details{
+  The Delaunay triangulation of a spatial point pattern \code{X}
+  is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X}
+  computed; see \code{\link{dirichlet}}. Then two points of \code{X}
+  are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles
+  share a common boundary. Every pair of Delaunay neighbours is
+  joined by a straight line. The result is a tessellation, consisting of
+  disjoint triangles. The union of these triangles is the convex hull of
+  \code{X}.
+}
+\value{
+  A tessellation (object of class \code{"tess"}). The window of the
+  tessellation is the convex hull of \code{X}, not the original window
+  of \code{X}.
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{dirichlet}},
+  \code{\link{convexhull.xy}},
+  \code{\link{ppp}},
+  \code{\link{delaunayDistance}},
+  \code{\link{delaunayNetwork}}
+}
+\examples{
+  X <- runifpoint(42)
+  plot(delaunay(X))
+  plot(X, add=TRUE)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/delaunayDistance.Rd b/man/delaunayDistance.Rd
new file mode 100644
index 0000000..213191d
--- /dev/null
+++ b/man/delaunayDistance.Rd
@@ -0,0 +1,54 @@
+\name{delaunayDistance}
+\alias{delaunayDistance}
+\title{Distance on Delaunay Triangulation}
+\description{
+  Computes the graph distance in the Delaunay triangulation
+  of a point pattern.
+}
+\usage{
+delaunayDistance(X)
+}
+\arguments{
+  \item{X}{Spatial point pattern (object of class \code{"ppp"}).}
+}
+\details{
+  The Delaunay triangulation of a spatial point pattern \code{X}
+  is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X}
+  computed; see \code{\link{dirichlet}}. Then two points of \code{X}
+  are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles
+  share a common boundary. Every pair of Delaunay neighbours is
+  joined by a straight line.
+
+  The \emph{graph distance} 
+  in the Delaunay triangulation between two points \code{X[i]} and \code{X[j]}
+  is the minimum number of edges of the Delaunay triangulation
+  that must be traversed to go from \code{X[i]} to \code{X[j]}.
+
+  This command returns a matrix \code{D} such that
+  \code{D[i,j]} is the graph distance
+  between \code{X[i]} and \code{X[j]}.
+}
+\value{
+  A symmetric square matrix with integer entries.
+}
+\seealso{
+  \code{\link{delaunay}},
+  \code{\link{delaunayNetwork}}
+}
+\examples{
+  X <- runifpoint(20)
+  M <- delaunayDistance(X)
+  plot(delaunay(X), lty=3)
+  text(X, labels=M[1, ], cex=2)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/delaunayNetwork.Rd b/man/delaunayNetwork.Rd
new file mode 100644
index 0000000..2e35a97
--- /dev/null
+++ b/man/delaunayNetwork.Rd
@@ -0,0 +1,56 @@
+\name{delaunayNetwork}
+\alias{delaunayNetwork}
+\alias{dirichletNetwork}
+\title{
+  Linear Network of Delaunay Triangulation or Dirichlet Tessellation
+}
+\description{
+  Computes the edges of the Delaunay triangulation
+  or Dirichlet tessellation of a point pattern,
+  and returns the result as a linear network object.
+}
+\usage{
+delaunayNetwork(X)
+
+dirichletNetwork(X, \dots)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{\dots}{Arguments passed to \code{\link{as.linnet.psp}}}
+}
+\details{
+  For \code{delaunayNetwork}, points of \code{X} which are neighbours
+  in the Delaunay triangulation
+  (see \code{\link{delaunay}}) will be joined by a straight line.
+  The result will be returned as a linear network (object of class
+  \code{"linnet"}).
+
+  For \code{dirichletNetwork}, the Dirichlet tessellation is computed
+  (see \code{\link{dirichlet}}) and the edges of the
+  tiles of the tessellation are extracted. This is converted to a linear
+  network using \code{\link{as.linnet.psp}}.
+}
+\value{
+  Linear network (object of class \code{"linnet"})
+  or \code{NULL}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{delaunay}},
+  \code{\link{dirichlet}},
+  \code{\link{delaunayDistance}}
+}
+\examples{
+  LE <- delaunayNetwork(cells)
+  LI <- dirichletNetwork(cells)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/deletebranch.Rd b/man/deletebranch.Rd
new file mode 100644
index 0000000..e4401e8
--- /dev/null
+++ b/man/deletebranch.Rd
@@ -0,0 +1,94 @@
+\name{deletebranch}
+\alias{deletebranch}
+\alias{deletebranch.linnet}
+\alias{deletebranch.lpp}
+\alias{extractbranch}
+\alias{extractbranch.linnet}
+\alias{extractbranch.lpp}
+\title{
+  Delete or Extract a Branch of a Tree
+}
+\description{
+  Deletes or extracts a given branch of a tree.
+}
+\usage{
+deletebranch(X, \dots)
+
+\method{deletebranch}{linnet}(X, code, labels, \dots)
+
+\method{deletebranch}{lpp}(X, code, labels, \dots)
+
+extractbranch(X, \dots)
+
+\method{extractbranch}{linnet}(X, code, labels, \dots, which=NULL)
+
+\method{extractbranch}{lpp}(X, code, labels, \dots, which=NULL)
+}
+\arguments{
+  \item{X}{
+    Linear network (object of class \code{"linnet"})
+    or point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{code}{
+    Character string. Label of the branch to be deleted or extracted.
+  }
+  \item{labels}{
+    Vector of character strings. Branch labels for the
+    vertices of the network, usually obtained from
+    \code{\link{treebranchlabels}}.
+  }
+  \item{\dots}{Arguments passed to methods.}
+  \item{which}{
+    Logical vector indicating which vertices of the network
+    should be extracted. Overrides \code{code} and \code{labels}.
+  }
+}
+\details{
+  The linear network \code{L <- X} or \code{L <- as.linnet(X)}
+  must be a tree, that is, it has no loops.
+
+  The argument \code{labels} should be a character vector
+  giving tree branch labels for each vertex of the network.
+  It is usually obtained by calling \code{\link{treebranchlabels}}.
+
+  The branch designated by the string \code{code} will be deleted
+  or extracted.
+
+  The return value is the result of deleting or extracting
+  this branch from \code{X}
+  along with any data associated with this branch (such as points or marks).
+}
+\value{
+  Another object of the same type as \code{X}
+  obtained by deleting or extracting the specified branch.
+}
+\author{
+\spatstatAuthors
+}
+\seealso{
+\code{\link{treebranchlabels}},
+\code{\link{branchlabelfun}},
+\code{\link{linnet}}
+}
+\examples{
+  # make a simple tree
+  m <- simplenet$m
+  m[8,10] <- m[10,8] <- FALSE
+  L <- linnet(vertices(simplenet), m)
+  plot(L, main="")
+  # compute branch labels 
+  tb <- treebranchlabels(L, 1)
+  tbc <- paste0("[", tb, "]")
+  text(vertices(L), labels=tbc, cex=2)
+
+  # delete branch B
+  LminusB <- deletebranch(L, "b", tb)
+  plot(LminusB, add=TRUE, col="green")
+
+  # extract branch B
+  LB <- extractbranch(L, "b", tb)
+  plot(LB, add=TRUE, col="red")
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/deltametric.Rd b/man/deltametric.Rd
new file mode 100644
index 0000000..365a7cf
--- /dev/null
+++ b/man/deltametric.Rd
@@ -0,0 +1,103 @@
+\name{deltametric}
+\Rdversion{1.1}
+\alias{deltametric}
+\title{
+  Delta Metric
+}
+\description{
+  Computes the discrepancy between two sets \eqn{A} and \eqn{B}
+  according to Baddeley's delta-metric.
+}
+\usage{
+deltametric(A, B, p = 2, c = Inf, ...)
+}
+\arguments{
+  \item{A,B}{
+    The two sets which will be compared.
+    Windows (objects of class \code{"owin"}),
+    point patterns (objects of class \code{"ppp"})
+    or line segment patterns (objects of class \code{"psp"}).
+  }
+  \item{p}{
+    Index of the \eqn{L^p} metric.
+    Either a positive numeric value, or \code{Inf}.
+  }
+  \item{c}{
+    Distance threshold. 
+    Either a positive numeric value, or \code{Inf}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution of the distance maps computed by \code{\link{distmap}}.
+  }
+}
+\details{
+  Baddeley (1992a, 1992b) defined a distance
+  between two sets \eqn{A} and \eqn{B} contained in a space \eqn{W} by 
+  \deqn{
+    \Delta(A,B) = \left[
+    \frac 1 {|W|}
+    \int_W
+    \left| \min(c, d(x,A)) - \min(c, d(x,B)) \right|^p \, {\rm d}x
+    \right]^{1/p}
+  }{
+    \Delta(A,B) = [ (1/|W|) * integral of |min(c, d(x,A))-min(c, d(x,B))|^p dx ]^(1/p)
+  }
+  where \eqn{c \ge 0}{c \ge 0} is a distance threshold parameter,
+  \eqn{0 < p \le \infty}{0 < p \le Inf} is the exponent parameter,
+  and \eqn{d(x,A)} denotes the 
+  shortest distance from a point \eqn{x} to the set \eqn{A}.
+  Also \code{|W|} denotes the area or volume of the containing space \eqn{W}.
+
+  This is defined so that it is a \emph{metric}, i.e.
+  \itemize{
+    \item \eqn{\Delta(A,B)=0}{\Delta(A,B)=0} if and only if \eqn{A=B}
+    \item \eqn{\Delta(A,B)=\Delta(B,A)}{\Delta(A,B)=\Delta(B,A)}
+    \item \eqn{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)}{\Delta(A,C) \le
+      \Delta(A,B) + \Delta(B,C)}
+  }
+  It is topologically equivalent to the Hausdorff metric
+  (Baddeley, 1992a) but has better stability properties
+  in practical applications (Baddeley, 1992b).
+
+  If \eqn{p=\infty}{p=Inf} and \eqn{c=\infty}{c=Inf} the Delta metric
+  is equal to the Hausdorff metric.
+
+  The algorithm uses \code{\link{distmap}} to compute the distance maps
+  \eqn{d(x,A)} and \eqn{d(x,B)}, then approximates the integral
+  numerically.
+  The accuracy of the computation depends on the pixel resolution
+  which is controlled through the extra arguments \code{\dots} passed
+  to \code{\link{as.mask}}.
+}
+\value{
+  A numeric value.
+}
+\references{
+  Baddeley, A.J. (1992a)
+  Errors in binary images and an \eqn{L^p} version of the Hausdorff metric.
+  \emph{Nieuw Archief voor Wiskunde} \bold{10}, 157--183.
+
+  Baddeley, A.J. (1992b)
+  An error metric for binary images.
+  In W. Foerstner and S. Ruwiedel (eds)
+  \emph{Robust Computer Vision}. Karlsruhe: Wichmann.
+  Pages 59--78.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{distmap}}
+}
+\examples{
+  X <- runifpoint(20)
+  Y <- runifpoint(10)
+  deltametric(X, Y, p=1,c=0.1)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/demohyper.Rd b/man/demohyper.Rd
new file mode 100644
index 0000000..3004ebb
--- /dev/null
+++ b/man/demohyper.Rd
@@ -0,0 +1,42 @@
+\name{demohyper}
+\alias{demohyper}
+\docType{data}
+\title{
+  Demonstration Example of Hyperframe of Spatial Data
+}
+\description{
+  This is an artificially constructed example of a
+  hyperframe of spatial data. The data could have been obtained
+  from an experiment in which there are two groups of
+  experimental units, the response from each unit
+  is a point pattern \code{Points}, and for each unit there is explanatory
+  data in the form of a pixel image \code{Image}.
+}
+\usage{data(demohyper)}
+\format{
+  A \code{\link{hyperframe}} with 3 rows and 3 columns:
+  \describe{
+    \item{Points}{
+      List of spatial point patterns
+      (objects of class \code{"ppp"})
+      serving as the responses in an experiment.
+    }
+    \item{Image}{
+      List of images (objects of class \code{"im"})
+      serving as explanatory variables.
+    }
+    \item{Group}{
+      Factor with two levels \code{a} and \code{b}
+      serving as an explanatory variable.
+    }
+  }
+}
+\source{
+  Artificially generated by \adrian.
+}
+\examples{
+ plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }),
+      parargs=list(mar=rep(1,4)))
+ mppm(Points ~ Group/Image, data=demohyper)
+}
+\keyword{datasets}
diff --git a/man/demopat.Rd b/man/demopat.Rd
new file mode 100644
index 0000000..6782b45
--- /dev/null
+++ b/man/demopat.Rd
@@ -0,0 +1,26 @@
+\name{demopat}
+\alias{demopat}
+\docType{data}
+\title{
+   Artificial Data Point Pattern
+}
+\description{
+  This is an artificial dataset, for use in testing and demonstrating the
+  capabilities of the \code{spatstat} package.
+  It is a multitype point pattern in an irregular polygonal window.
+  There are two types of points. The window contains a polygonal hole.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(demopat)}
+\source{\adrian}
+\keyword{datasets}
+\keyword{spatial}
+
+
+ 
diff --git a/man/dendrite.Rd b/man/dendrite.Rd
new file mode 100644
index 0000000..9eddb73
--- /dev/null
+++ b/man/dendrite.Rd
@@ -0,0 +1,46 @@
+\name{dendrite}
+\alias{dendrite}
+\docType{data}
+\title{
+  Dendritic Spines Data
+}
+\description{
+  Dendrites are branching filaments which extend from the
+  main body of a neuron (nerve cell) to propagate electrochemical
+  signals. Spines are small protrusions on the dendrites.
+
+  This dataset gives the locations of 566 spines
+  observed on one branch of the dendritic tree of a rat neuron.
+  The spines are classified according to their shape into three types:
+  mushroom, stubby or thin.
+
+  The data have been analysed in Jammalamadaka et al (2013) and
+  Baddeley et al (2014). Please cite these papers and
+  acknowledge the Kosik Lab, UC Santa Barbara, in any use of the data.
+}
+\usage{data("dendrite")}
+\format{
+  Object of class \code{"lpp"}. 
+  See \code{\link{lpp}}.
+}
+\source{
+  Kosik Lab, UC Santa Barbara (Dr Kenneth Kosik, Dr Sourav Banerjee).
+  Formatted for \code{spatstat} by Dr Aruna Jammalamadaka.
+}
+\references{
+Baddeley, A., Jammalamadaka, A. and Nair, G. (2014)
+Multitype point process analysis of spines on the
+dendrite network of a neuron.
+\emph{Applied Statistics (Journal of the Royal Statistical
+ Society, Series C)}, In press.
+\code{doi: 10.1111/rssc.12054}
+
+Jammalamadaka, A., Banerjee, S., Manjunath, B.S. and Kosik, K. (2013)
+Statistical Analysis of Dendritic Spine Distributions in
+Rat Hippocampal Cultures.
+\emph{BMC Bioinformatics} \bold{14}, 287.
+}
+\examples{
+plot(dendrite,leg.side="bottom", main="", cex=0.75, cols=2:4)
+}
+\keyword{datasets}
diff --git a/man/density.lpp.Rd b/man/density.lpp.Rd
new file mode 100644
index 0000000..988b385
--- /dev/null
+++ b/man/density.lpp.Rd
@@ -0,0 +1,131 @@
+\name{density.lpp}
+\alias{density.lpp}
+\alias{density.splitppx}
+\title{
+  Kernel Estimate of Intensity on a Linear Network
+}
+\description{
+  Estimates the intensity of a point process on a linear network
+  by applying kernel smoothing to the point pattern data.
+}
+\usage{
+\method{density}{lpp}(x, sigma, \dots,
+        weights=NULL,
+        kernel="gaussian",
+        continuous=TRUE,
+        epsilon = 1e-06, verbose = TRUE,
+        debug = FALSE, savehistory = TRUE,
+        old=FALSE)
+
+\method{density}{splitppx}(x, sigma, \dots)
+}
+\arguments{
+  \item{x}{
+    Point pattern on a linear network (object of class \code{"lpp"})
+    to be smoothed.
+  }
+  \item{sigma}{
+    Smoothing bandwidth (standard deviation of the kernel)
+    in the same units as the spatial coordinates of \code{x}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} determining the
+    resolution of the result.
+  }
+  \item{weights}{
+    Optional. Numeric vector of weights associated with the
+    points of \code{x}. Weights may be positive, negative or zero.
+  }
+  \item{kernel}{
+    Character string specifying the smoothing kernel.
+    See \code{\link{dkernel}} for possible options.
+  }
+  \item{continuous}{
+    Logical value indicating whether to compute the
+    \dQuote{equal-split continuous} smoother (\code{continuous=TRUE}, the
+    default) or the \dQuote{equal-split discontinuous} smoother
+    (\code{continuous=FALSE}).
+  }
+  \item{epsilon}{
+    Tolerance value. A tail of the kernel with total mass
+    less than \code{epsilon} may be deleted.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{debug}{
+    Logical value indicating whether to print debugging information.
+  }
+  \item{savehistory}{
+    Logical value indicating whether to save the entire history of the
+    algorithm, for the purposes of evaluating performance.
+  }
+  \item{old}{
+    Logical value indicating whether to use the old, very slow algorithm
+    for the equal-split continuous estimator.
+  }
+}
+\details{
+  Kernel smoothing is applied to the points of \code{x}
+  using one of the rules described in Okabe and Sugihara (2012)
+  and McSwiggan et al (2016).
+  The result is a pixel image on the linear network (class
+  \code{"linim"}) which can be plotted.
+
+  If \code{continuous=TRUE} (the default), smoothing is performed
+  using the \dQuote{equal-split continuous} rule described in
+  Section 9.2.3 of Okabe and Sugihara (2012).
+  The resulting function is continuous on the linear network.
+
+  If \code{continuous=FALSE}, smoothing is performed
+  using the \dQuote{equal-split discontinuous} rule described in
+  Section 9.2.2 of Okabe and Sugihara (2012). The
+  resulting function is not continuous.
+
+  In the default case
+  (where \code{continuous=TRUE} and \code{kernel="gaussian"}
+  and \code{old=FALSE}),
+  computation is performed rapidly by solving the classical heat equation
+  on the network, as described in McSwiggan et al (2016).
+  Computational time is short, but increases quadratically with \code{sigma}.
+  The arguments \code{epsilon,debug,verbose,savehistory} are ignored.
+  
+  In all other cases, computation is performed by path-tracing
+  as described in Okabe and Sugihara (2012);
+  computation can be extremely slow, and time
+  increases exponentially with \code{sigma}.
+
+  There is also a method for split point patterns on a linear network
+  (class \code{"splitppx"}) which will return a list of pixel images.
+}
+\value{
+  A pixel image on the linear network (object of class \code{"linim"}).
+}
+\references{
+McSwiggan, G., Baddeley, A. and Nair, G. (2016)
+Kernel density estimation on a linear network.
+\emph{Scandinavian Journal of Statistics},
+In press.
+
+Okabe, A. and Sugihara, K. (2012)
+  \emph{Spatial analysis along networks}.
+  Wiley.
+}
+\author{
+  \adrian and Greg McSwiggan.
+}
+\seealso{
+  \code{\link{lpp}},
+  \code{\link{linim}}
+}
+\examples{
+  X <- runiflpp(3, simplenet)
+  D <- density(X, 0.2, verbose=FALSE)
+  plot(D, style="w", main="", adjust=2)
+  Dw <- density(X, 0.2, weights=c(1,2,-1), verbose=FALSE)
+  De <- density(X, 0.2, kernel="epanechnikov", verbose=FALSE)
+  Ded <- density(X, 0.2, kernel="epanechnikov", continuous=FALSE, verbose=FALSE)
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/density.ppp.Rd b/man/density.ppp.Rd
new file mode 100644
index 0000000..dbdff18
--- /dev/null
+++ b/man/density.ppp.Rd
@@ -0,0 +1,383 @@
+\name{density.ppp}
+\alias{density.ppp}
+\title{Kernel Smoothed Intensity of Point Pattern}
+\description{
+  Compute a kernel smoothed intensity function from a point pattern.
+}
+\usage{
+  \method{density}{ppp}(x, sigma=NULL, \dots,
+        weights=NULL, edge=TRUE, varcov=NULL,
+        at="pixels", leaveoneout=TRUE,
+        adjust=1, diggle=FALSE, se=FALSE,
+        kernel="gaussian",
+        scalekernel=is.character(kernel), 
+        positive=FALSE, verbose=TRUE) 
+}
+\arguments{
+  \item{x}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{sigma}{
+    Standard deviation of isotropic smoothing kernel.
+    Either a numerical value, or a function that computes an
+    appropriate value of \code{sigma}.
+  }
+  \item{weights}{
+    Optional weights to be attached to the points.
+    A numeric vector, numeric matrix, an \code{expression},
+    or a pixel image.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{pixellate.ppp}}
+    and \code{\link{as.mask}} to determine
+    the pixel resolution, or passed to \code{sigma} if it is a function.
+  }
+  \item{edge}{
+    Logical value indicating whether to apply edge correction.
+  }
+  \item{varcov}{
+    Variance-covariance matrix of anisotropic smoothing kernel.
+    Incompatible with \code{sigma}.
+  }
+  \item{at}{
+    String specifying whether to compute the intensity values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{x} (\code{at="points"}).
+  }
+  \item{leaveoneout}{
+    Logical value indicating whether to compute a leave-one-out
+    estimator. Applicable only when \code{at="points"}.
+  }
+  \item{adjust}{
+    Optional. Adjustment factor for the smoothing parameter.
+  }
+  \item{diggle}{
+    Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction,
+    which is more accurate but slower to compute than the default
+    correction.
+  }
+  \item{kernel}{
+    The smoothing kernel.
+    A character string specifying the smoothing kernel
+    (current options are \code{"gaussian"}, \code{"epanechnikov"},
+    \code{"quartic"} or \code{"disc"}),
+    or a pixel image (object of class \code{"im"})
+    containing values of the kernel, or a \code{function(x,y)} which
+    yields values of the kernel.
+  }
+  \item{scalekernel}{
+    Logical value.
+    If \code{scalekernel=TRUE}, then the kernel will be rescaled
+    to the bandwidth determined by \code{sigma} and \code{varcov}:
+    this is the default behaviour when \code{kernel} is a character string.
+    If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov}
+    will be ignored: this is the default behaviour when \code{kernel} is a
+    function or a pixel image.
+  }
+  \item{se}{
+    Logical value indicating whether to compute standard errors as well.
+  }
+  \item{positive}{
+    Logical value indicating whether to force all density values to
+    be positive numbers. Default is \code{FALSE}.
+  }
+  \item{verbose}{
+    Logical value indicating whether to issue warnings
+    about numerical problems and conditions.
+  }
+}
+\value{
+  By default, the result is
+  a pixel image (object of class \code{"im"}). 
+  Pixel values are estimated intensity values,
+  expressed in \dQuote{points per unit area}.
+
+  If \code{at="points"}, the result is a numeric vector
+  of length equal to the number of points in \code{x}.
+  Values are estimated intensity values at the points of \code{x}.
+
+  In either case, the return value has attributes
+  \code{"sigma"} and \code{"varcov"} which report the smoothing
+  bandwidth that was used.
+
+  If \code{weights} is a matrix with more than one column, then the
+  result is a list of images (if \code{at="pixels"}) or a matrix of
+  numerical values (if \code{at="points"}).
+  
+  If \code{se=TRUE}, the result is a list with two elements named
+  \code{estimate} and \code{SE}, each of the format described above.
+}
+\details{
+  This is a method for the generic function \code{density}.
+
+  It computes a fixed-bandwidth kernel estimate 
+  (Diggle, 1985) of the intensity function of the point process
+  that generated the point pattern \code{x}.
+  
+  By default it computes the convolution of the
+  isotropic Gaussian kernel of standard deviation \code{sigma}
+  with point masses at each of the data points in \code{x}.
+  Anisotropic Gaussian kernels are also supported.
+  Each point has unit weight, unless the argument \code{weights} is
+  given.
+
+  If \code{edge=TRUE}, the intensity estimate is corrected for
+  edge effect bias in one of two ways:
+  \itemize{
+    \item If \code{diggle=FALSE} (the default) the intensity estimate is
+    correted by dividing it by the convolution of the
+    Gaussian kernel with the window of observation.
+    This is the approach originally described in Diggle (1985).
+    Thus the intensity value at a point \eqn{u} is
+    \deqn{
+      \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i
+    }{
+      \lambda(u) = e(u) \sum[i] k(x[i] - u) w[i]
+    }
+    where \eqn{k} is the Gaussian smoothing kernel,
+    \eqn{e(u)} is an edge correction factor, 
+    and \eqn{w_i}{w[i]} are the weights.
+    \item
+    If \code{diggle=TRUE} then the code uses the improved edge correction
+    described by Jones (1993) and Diggle (2010, equation 18.9).
+    This has been shown to have better performance (Jones, 1993)
+    but is slightly slower to compute. 
+    The intensity value at a point \eqn{u} is 
+    \deqn{
+      \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i)
+    }{
+      \lambda(u) = \sum[i] k(x[i] - u) w[i] e(x[i])
+    }
+    where again \eqn{k} is the Gaussian smoothing kernel,
+    \eqn{e(x_i)}{e(x[i])} is an edge correction factor, 
+    and \eqn{w_i}{w[i]} are the weights.
+  }
+  In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the
+  kernel mass inside the window:
+  \deqn{
+    \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v
+  }{
+    1/e(u) = integral[v in W] k(v-u) dv
+  }
+  where \eqn{W} is the observation window.
+
+  The smoothing kernel is determined by the arguments
+  \code{sigma}, \code{varcov} and \code{adjust}.
+  \itemize{
+    \item if \code{sigma} is a single numerical value,
+    this is taken as the standard deviation of the isotropic Gaussian
+    kernel.
+    \item alternatively \code{sigma} may be a function that computes
+    an appropriate bandwidth for the isotropic Gaussian kernel
+    from the data point pattern by calling \code{sigma(x)}.
+    To perform automatic bandwidth selection using cross-validation,
+    it is recommended to use the functions \code{\link{bw.diggle}}
+    or \code{\link{bw.ppl}}.
+    \item
+    The smoothing kernel may be chosen to be any Gaussian
+    kernel, by giving the variance-covariance matrix \code{varcov}.
+    The arguments \code{sigma} and \code{varcov} are incompatible.
+    \item
+    Alternatively \code{sigma} may be a vector of length 2 giving the
+    standard deviations of two independent Gaussian coordinates,
+    thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}.
+    \item if neither \code{sigma} nor \code{varcov} is specified,
+    an isotropic Gaussian kernel will be used, 
+    with a default value of \code{sigma}
+    calculated by a simple rule of thumb
+    that depends only on the size of the window.
+    \item
+    The argument \code{adjust} makes it easy for the user to change the
+    bandwidth specified by any of the rules above.
+    The value of \code{sigma} will be multiplied by
+    the factor \code{adjust}. The matrix \code{varcov} will be
+    multiplied by \code{adjust^2}. To double the smoothing bandwidth, set
+    \code{adjust=2}.
+  }
+
+  If \code{at="pixels"} (the default), intensity values are
+  computed at every location \eqn{u} in a fine grid,
+  and are returned as a pixel image. The point pattern is first discretised 
+  using \code{\link{pixellate.ppp}}, then the intensity is
+  computed using the Fast Fourier Transform.
+  Accuracy depends on the pixel resolution and the discretisation rule.
+  The pixel resolution is controlled by the arguments
+  \code{\dots} passed to \code{\link{as.mask}} (specify the number of
+  pixels by \code{dimyx} or the pixel size by \code{eps}). 
+  The discretisation rule is controlled by the arguments
+  \code{\dots} passed to \code{\link{pixellate.ppp}}
+  (the default rule is that each point is allocated to the nearest
+  pixel centre; this can be modified using the arguments
+  \code{fractional} and \code{preserve}).
+
+  If \code{at="points"}, the intensity values are computed 
+  to high accuracy at the points of \code{x} only. Computation is
+  performed by directly evaluating and summing the Gaussian kernel
+  contributions without discretising the data. The result is a numeric
+  vector giving the density values.
+  The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE})
+  \deqn{
+    \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j
+  }{
+    \lambda(x[i]) = e(x[i]) \sum[j] k(x[j] - x[i]) w[j]
+  }
+  or (if \code{diggle=TRUE})
+  \deqn{
+    \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j)
+  }{
+    \lambda(x[i]) = \sum[j] k(x[j] - x[i]) w[j] e(x[j])
+  }
+  If \code{leaveoneout=TRUE} (the default), then the sum in the equation
+  is taken over all \eqn{j} not equal to \eqn{i},
+  so that the intensity value at a
+  data point is the sum of kernel contributions from
+  all \emph{other} data points.
+  If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j},
+  so that the intensity value at a data point includes a contribution
+  from the same point.
+
+  If \code{weights} is a matrix with more than one column, then the
+  calculation is effectively repeated for each column of weights. The
+  result is a list of images (if \code{at="pixels"}) or a matrix of
+  numerical values (if \code{at="points"}).
+  
+  The argument \code{weights} can also be an \code{expression}.
+  It will be evaluated in the data frame \code{as.data.frame(x)}
+  to obtain a vector or matrix of weights. The expression may involve
+  the symbols \code{x} and \code{y} representing the Cartesian
+  coordinates, the symbol \code{marks} representing the mark values
+  if there is only one column of marks, and the names of the columns of
+  marks if there are several columns.  
+
+  The argument \code{weights} can also be a pixel image
+  (object of class \code{"im"}). numerical weights for the data points
+  will be extracted from this image (by looking up the pixel values
+  at the locations of the data points in \code{x}).
+  
+  To select the bandwidth \code{sigma} automatically by
+  cross-validation, use \code{\link{bw.diggle}} or \code{\link{bw.ppl}}.
+  
+  To perform spatial interpolation of values that were observed
+  at the points of a point pattern, use \code{\link{Smooth.ppp}}.
+
+  For adaptive nonparametric estimation, see
+  \code{\link{adaptive.density}}.
+  For data sharpening, see \code{\link{sharpen.ppp}}.
+
+  To compute a relative risk surface or probability map for
+  two (or more) types of points, use \code{\link{relrisk}}.
+}
+\seealso{
+  \code{\link{bw.diggle}},
+  \code{\link{bw.ppl}},
+  \code{\link{Smooth.ppp}},
+  \code{\link{sharpen.ppp}},
+  \code{\link{adaptive.density}},
+  \code{\link{relrisk}},
+  \code{\link{ppp.object}},
+  \code{\link{im.object}}
+}
+\note{
+  This function is often misunderstood.
+
+  The result of \code{density.ppp} is not a spatial smoothing 
+  of the marks or weights attached to the point pattern.
+  To perform spatial interpolation of values that were observed
+  at the points of a point pattern, use \code{\link{Smooth.ppp}}.
+
+  The result of \code{density.ppp} is not a probability density.
+  It is an estimate of the \emph{intensity function} of the
+  point process that generated the point pattern data.
+  Intensity is the expected number of random points
+  per unit area.
+  The units of intensity are \dQuote{points per unit area}.
+  Intensity is usually a function of spatial location,
+  and it is this function which is estimated by \code{density.ppp}.
+  The integral of the intensity function over a spatial region gives the
+  expected number of points falling in this region.
+
+  Inspecting an estimate of the intensity function is usually the
+  first step in exploring a spatial point pattern dataset.
+  For more explanation, see Baddeley, Rubak and Turner (2015)
+  or Diggle (2003, 2010).
+
+  If you have two (or more) types of points, and you want a
+  probability map or relative risk surface (the spatially-varying
+  probability of a given type), use \code{\link{relrisk}}.
+}
+\section{Negative Values}{
+  Negative and zero values of the density estimate are possible
+  when \code{at="pixels"} because of numerical errors in finite-precision
+  arithmetic.
+
+  By default, \code{density.ppp} does not try to repair such errors.
+  This would take more computation time and is not always needed.
+  (Also it would not be appropriate if \code{weights} include negative values.)
+
+  To ensure that the resulting density values are always positive,
+  set \code{positive=TRUE}.
+}
+\examples{
+  if(interactive()) {
+    opa <- par(mfrow=c(1,2))
+    plot(density(cells, 0.05))
+    plot(density(cells, 0.05, diggle=TRUE))
+    par(opa)
+    v <- diag(c(0.05, 0.07)^2)
+    plot(density(cells, varcov=v))
+  }
+  \donttest{
+    Z <- density(cells, 0.05)
+    Z <- density(cells, 0.05, diggle=TRUE)
+    Z <- density(cells, 0.05, se=TRUE)
+    Z <- density(cells, varcov=diag(c(0.05^2, 0.07^2)))
+    Z <- density(cells, 0.05, weights=data.frame(a=1:42,b=42:1))
+    Z <- density(cells, 0.05, weights=expression(x))
+  }
+  # automatic bandwidth selection
+  plot(density(cells, sigma=bw.diggle(cells)))
+  # equivalent:
+  plot(density(cells, bw.diggle))
+  # evaluate intensity at points
+  density(cells, 0.05, at="points")
+
+  plot(density(cells, sigma=0.4, kernel="epanechnikov"))
+
+  # relative risk calculation by hand (see relrisk.ppp)
+  lung <- split(chorley)$lung
+  larynx <- split(chorley)$larynx
+  D <- density(lung, sigma=2)
+  plot(density(larynx, sigma=2, weights=1/D))
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  Chapman and Hall/CRC Press.
+  
+  Diggle, P.J. (1985)
+  A kernel method for smoothing point process data.
+  \emph{Applied Statistics} (Journal of the Royal Statistical Society,
+  Series C) \bold{34} (1985) 138--147.
+
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+
+  Diggle, P.J. (2010)
+  Nonparametric methods.
+  Chapter 18, pp. 299--316 in
+  A.E. Gelfand, P.J. Diggle, M. Fuentes and P. Guttorp (eds.)
+  \emph{Handbook of Spatial Statistics},
+  CRC Press, Boca Raton, FL.
+
+  Jones, M.C. (1993)
+  Simple boundary corrections for kernel density estimation.
+  \emph{Statistics and Computing} \bold{3}, 135--146.
+}
+
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/density.psp.Rd b/man/density.psp.Rd
new file mode 100644
index 0000000..7a7b8fa
--- /dev/null
+++ b/man/density.psp.Rd
@@ -0,0 +1,77 @@
+\name{density.psp}
+\alias{density.psp}
+\title{Kernel Smoothing of Line Segment Pattern}
+\description{
+  Compute a kernel smoothed intensity function from a line segment pattern.
+}
+\usage{
+  \method{density}{psp}(x, sigma, \dots, edge=TRUE,
+                   method=c("FFT", "C", "interpreted"))
+}
+\arguments{
+  \item{x}{
+    Line segment pattern (object of class \code{"psp"}) to be smoothed.
+  }
+  \item{sigma}{
+    Standard deviation of isotropic Gaussian smoothing kernel.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{as.mask}} which determine
+    the resolution of the resulting image.
+  }
+  \item{edge}{
+    Logical flag indicating whether to apply edge correction.
+  }
+  \item{method}{
+    Character string (partially matched) specifying the method of
+    computation. Option \code{"FFT"} is the fastest, while
+    \code{"C"} is the most accurate.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\details{
+  This is a method for the generic function \code{\link{density}}.
+
+  A kernel estimate of the intensity of the line segment pattern
+  is computed. The result is 
+  the convolution of the isotropic Gaussian kernel, of
+  standard deviation \code{sigma}, with the line segments.
+  The result is computed as follows:
+  \itemize{
+    \item if \code{method="FFT"}, the line segments are discretised
+    using \code{\link{pixellate.psp}}, then the Fast Fourier Transform
+    is used to calculate the convolution. This method is the fastest,
+    but is slightly less accurate.
+    \item if \code{method="C"} the exact value of the convolution at the
+    centre of each pixel is computed analytically using \code{C} code;
+    \item if \code{method="interpreted"},
+    the exact value of the convolution at the
+    centre of each pixel is computed analytically using \code{R} code.
+    This method is the slowest.
+  }
+  If \code{edge=TRUE} this result is adjusted for edge effects
+  by dividing it by the convolution of the same Gaussian kernel
+  with the observation window.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{im.object}},
+  \code{\link{density}}
+}
+\examples{
+  L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+  D <- density(L, sigma=0.03)
+  plot(D, main="density(L)")
+  plot(L, add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/density.splitppp.Rd b/man/density.splitppp.Rd
new file mode 100644
index 0000000..7d8ab89
--- /dev/null
+++ b/man/density.splitppp.Rd
@@ -0,0 +1,80 @@
+\name{density.splitppp}
+\alias{density.splitppp}
+\alias{density.ppplist}
+\title{Kernel Smoothed Intensity of Split Point Pattern}
+\description{
+  Compute a kernel smoothed intensity function for each
+  of the components of a split point pattern,
+  or each of the point patterns in a list.
+}
+\usage{
+  \method{density}{splitppp}(x, \dots, se=FALSE)
+
+  \method{density}{ppplist}(x, \dots, se=FALSE)
+}
+\arguments{
+  \item{x}{
+    Split point pattern (object of class \code{"splitppp"}
+    created by \code{\link{split.ppp}}) to be smoothed.
+    Alternatively a list of point patterns,
+    of class \code{"ppplist"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{density.ppp}} to control
+    the smoothing, pixel resolution, edge correction etc.
+  }
+  \item{se}{
+    Logical value indicating whether to compute standard errors as well.
+  }
+}
+\value{
+  A list of pixel images (objects of class \code{"im"})
+  which can be plotted or printed;
+  or a list of numeric vectors giving the values at specified points.
+
+  If \code{se=TRUE}, the result is a list with two elements named
+  \code{estimate} and \code{SE}, each of the format described above.
+}
+\details{
+  This is a method for the generic function \code{density}.
+
+  The argument \code{x} should be a list of point patterns,
+  and should belong to one of the classes 
+  \code{"ppplist"} or \code{"splitppp"}.
+  
+  Typically \code{x} is obtained by applying
+  the function \code{\link{split.ppp}} to a point pattern \code{y}
+  by calling \code{split(y)}. This splits the points of \code{y} into several
+  sub-patterns.
+  
+  A kernel estimate of the intensity function of each of the
+  point patterns is computed using \code{\link{density.ppp}}.
+
+  The return value is usually a list, each of whose entries is a
+  pixel image (object of class \code{"im"}). The return value
+  also belongs to the class \code{"solist"} and can be plotted
+  or printed.
+
+  If the argument \code{at="points"} is given, the result is a list
+  of numeric vectors giving the intensity values at the data points.
+
+  If \code{se=TRUE}, the result is a list with two elements named
+  \code{estimate} and \code{SE}, each of the format described above.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{im.object}}
+}
+\examples{
+  Z <- density(split(amacrine), 0.05)
+  plot(Z)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/deriv.fv.Rd b/man/deriv.fv.Rd
new file mode 100644
index 0000000..0704a52
--- /dev/null
+++ b/man/deriv.fv.Rd
@@ -0,0 +1,115 @@
+\name{deriv.fv}
+\alias{deriv.fv}
+\title{
+  Calculate Derivative of Function Values
+}
+\description{
+  Applies numerical differentiation to the values
+  in selected columns of a function value table.
+}
+\usage{
+\method{deriv}{fv}(expr, which = "*", ...,
+          method=c("spline", "numeric"),
+          kinks=NULL,
+          periodic=FALSE,
+          Dperiodic=periodic)
+}
+\arguments{
+  \item{expr}{
+    Function values to be differentiated.
+    A function value table (object of class \code{"fv"},
+    see \code{\link{fv.object}}).
+  }
+  \item{which}{
+    Character vector identifying which columns of the table
+    should be differentiated. Either a vector containing names
+    of columns, or one of the wildcard strings \code{"*"} or \code{"."}
+    explained below.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link[stats]{smooth.spline}}
+    to control the differentiation algorithm, if \code{method="spline"}.
+  }
+  \item{method}{
+    Differentiation method. A character string, partially matched
+    to either \code{"spline"} or \code{"numeric"}.
+  }
+  \item{kinks}{
+    Optional vector of \eqn{x} values where the derivative is
+    allowed to be discontinuous. 
+  }
+  \item{periodic}{
+    Logical value indicating whether the function \code{expr}
+    is periodic. 
+  }
+  \item{Dperiodic}{
+    Logical value indicating whether the resulting derivative
+    should be a periodic function.
+  }
+}
+\details{
+  This command performs numerical differentiation on the function values in
+  a function value table (object of class \code{"fv"}).
+  The differentiation is performed either by 
+  \code{\link[stats]{smooth.spline}} or by
+  a naive numerical difference algorithm.
+
+  The command \code{\link{deriv}} is generic. This is the
+  method for objects of class \code{"fv"}.
+
+  Differentiation is applied to every column
+  (or to each of the selected columns) of function values in turn,
+  using the function argument as the \eqn{x} coordinate
+  and the selected column as the \eqn{y} coordinate.
+  The original function values are then replaced by the corresponding
+  derivatives.
+
+  The optional argument \code{which} specifies which of the
+  columns of function values in \code{expr} will be differentiated.
+  The default (indicated by the wildcard \code{which="*"})
+  is to differentiate all function values, i.e.\ all columns except the
+  function argument. Alternatively \code{which="."} designates
+  the subset of function values that are displayed in the default plot.
+  Alternatively \code{which} can be a character vector containing the
+  names of columns of \code{expr}.
+
+  If the argument \code{kinks} is given, it should be a numeric vector
+  giving the discontinuity points of the function: the value or values
+  of the function argument at which the function is
+  not differentiable. Differentiation will be performed separately on
+  intervals between the discontinuity points.
+
+  If \code{periodic=TRUE} then the function \code{expr} is taken to be
+  periodic, with period equal to the range of the function
+  argument in \code{expr}. The resulting derivative is periodic.
+
+  If \code{periodic=FALSE} but \code{Dperiodic=TRUE}, then the
+  \emph{derivative} is assumed to be periodic. This would be
+  appropriate if \code{expr} is the cumulative distribution function
+  of an angular variable, for example. 
+}
+\value{
+  Another function value table (object of class \code{"fv"})
+  of the same format.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{with.fv}},
+  \code{\link{fv.object}},
+  \code{\link[stats]{smooth.spline}}
+}
+\examples{
+   G <- Gest(cells)
+   plot(deriv(G, which=".", spar=0.5))
+   A <- pairorient(redwood, 0.05, 0.15)
+   DA <- deriv(A, spar=0.6, Dperiodic=TRUE)
+}
+\keyword{spatial}
+\keyword{math}
+\keyword{nonparametric}
+
diff --git a/man/detpointprocfamilyfun.Rd b/man/detpointprocfamilyfun.Rd
new file mode 100644
index 0000000..744438a
--- /dev/null
+++ b/man/detpointprocfamilyfun.Rd
@@ -0,0 +1,183 @@
+\name{detpointprocfamilyfun}
+\alias{detpointprocfamilyfun}
+\title{Construct a New Determinantal Point Process Model Family Function}
+\description{
+  Function to ease the implementation of a new determinantal
+  point process model family.
+}
+\usage{detpointprocfamilyfun(kernel = NULL,
+    specden = NULL, basis = "fourierbasis", 
+    convkernel = NULL, Kfun = NULL, valid = NULL, intensity = NULL, 
+    dim = 2, name = "User-defined", isotropic = TRUE, range = NULL, 
+    parbounds = NULL, specdenrange = NULL, startpar = NULL, \dots)
+}
+\arguments{
+  \item{kernel}{
+    function specifying the kernel.
+    May be set to \code{NULL}. See Details.
+  }
+  \item{specden}{
+    function specifying the spectral density.
+    May be set to \code{NULL}. See Details.
+  }
+  \item{basis}{
+    character string giving the name of the basis.
+    Defaults to the Fourier basis. See Details.
+  }
+  \item{convkernel}{
+    function specifying the k-fold auto-convolution of the kernel.
+    May be set to \code{NULL}. See Details.
+  }
+  \item{Kfun}{
+    function specifying the K-function.
+    May be set to \code{NULL}. See Details.
+  }
+  \item{valid}{
+    function determining whether a given set of parameter values
+    yields a valid model. May be set to \code{NULL}. See Examples.
+  }
+  \item{intensity}{
+    character string specifying which parameter is the intensity in the
+    model family. Should be \code{NULL} if the model family has no intensity
+    parameter.
+  }
+  \item{dim}{
+    character strig specifying which parameter is the dimension of the
+    state space in this model family (if any).
+    Alternatively a positive integer specifying the dimension.
+  }
+  \item{name}{
+    character string giving the name of the model family used for
+    printing.
+  }
+  \item{isotropic}{
+    logical value indicating whether or not the model is isotropic.
+  }
+  \item{range}{
+    function determining the interaction range of the model. May be
+    set to \code{NULL}. See Examples.
+  }
+  \item{parbounds}{
+    function determining the bounds for each model parameter when all
+    other parameters are fixed. May be set to \code{NULL}. See Examples.
+  }
+  \item{specdenrange}{
+    function specifying the the range of the spectral density if it
+    is finite (only the case for very few models). May be set to
+    \code{NULL}.
+  }
+  \item{startpar}{
+    function determining starting values for parameters in any estimation
+    algorithm. May be set to \code{NULL}. See Examples.
+  }
+  \item{\dots}{
+    Additional arguments for inclusion in the returned model object. These
+    are not checked in any way.
+  }
+}
+\details{
+  A determinantal point process family is specified either
+  in terms of a kernel (a positive semi-definite function, i.e. a
+  covariance function) or a spectral density, or preferably both. 
+  One of these can be \code{NULL} if it is unknown, but not both.
+  When both are supplied they must have the same arguments.
+  The first argument gives
+  the values at which the function should be evaluated. In general
+  the function should accept an  \eqn{n} by \eqn{d} matrix or
+  \code{data.frame} specifying \eqn{n (>=0)}
+  points in dimension \eqn{d}. If the model is isotropic it only needs to
+  accept a non-negative valued numeric of length \eqn{n}. (In fact
+  there is currently almost no support for non-isotropic models, so
+  it is recommended not to specify such a model.) The name
+  of this argument could be chosen freely, but \eqn{x} is
+  recommended. The remaining arguments are the parameters of the
+  model. If one of these is an intensity parameter the name should
+  be mentioned in the argument \code{intensity}. If one of these
+  specifies the dimension of the model it should be mentioned in the
+  argument \code{dim}.
+
+  The kernel and spectral density is with respect to a
+  specific set of basis functions, which is typically the Fourier
+  basis. However this can be changed to any user-supplied basis in
+  the argument \code{basis}. If such an alternative is supplied it
+  must be the name of a function expecting the same arguments as
+  \code{\link{fourierbasis}} and returning the results in the same
+  form as \code{\link{fourierbasis}}.
+
+  If supplied, the arguments of convkernel must obey the
+  following: first argument should be like the first argument of
+  kernel and/or specden (see above). The second argument (preferably
+  called \code{k}) should be the positive integer specifying how many times
+  the auto-convolution is done (i.e. the \eqn{k} in \eqn{k}-fold
+  auto-convolution). The remaining arguments must agree with the
+  arguments of \code{kernel} and/or \code{specden} (see above).
+
+  If supplied, the arguments of \code{Kfun} should be like the
+  arguments of \code{kernel} and \code{specden} (see above).
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  ## Example of how to define the Gauss family
+exGauss <- detpointprocfamilyfun(
+    name="Gaussian",
+    kernel=function(x, lambda, alpha, d){
+        lambda*exp(-(x/alpha)^2)
+    },
+    specden=function(x, lambda, alpha, d){
+        lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2)
+    },
+    convkernel=function(x, k, lambda, alpha, d){
+        logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2)
+        return(exp(logres))
+    },
+    Kfun = function(x, lambda, alpha, d){
+        pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2))
+    },
+    valid=function(lambda, alpha, d){
+        lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d)
+    },
+    isotropic=TRUE,
+    intensity="lambda",
+    dim="d",
+    range=function(alpha, bound = .99){
+        if(missing(alpha))
+            stop("The parameter alpha is missing.")
+        if(!(is.numeric(bound)&&bound>0&&bound<1))
+            stop("Argument bound must be a numeric between 0 and 1.")
+        return(alpha*sqrt(-log(sqrt(1-bound))))
+    },
+    parbounds=function(name, lambda, alpha, d){
+        switch(name,
+               lambda = c(0, (sqrt(pi)*alpha)^(-d)),
+               alpha = c(0, lambda^(-1/d)/sqrt(pi)),
+               stop("Parameter name misspecified")
+               )
+    },
+    startpar=function(model, X){
+        rslt <- NULL
+        if("lambda" \%in\% model$freepar){
+            lambda <- intensity(X)
+            rslt <- c(rslt, "lambda" = lambda)
+            model <- update(model, lambda=lambda)
+        }
+        if("alpha" \%in\% model$freepar){
+            alpha <- .8*dppparbounds(model, "alpha")[2]
+            rslt <- c(rslt, "alpha" = alpha)
+        }
+        return(rslt)
+    }
+    )
+  exGauss
+  m <- exGauss(lambda=100, alpha=.05, d=2)
+  m
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dfbetas.ppm.Rd b/man/dfbetas.ppm.Rd
new file mode 100644
index 0000000..dedd890
--- /dev/null
+++ b/man/dfbetas.ppm.Rd
@@ -0,0 +1,95 @@
+\name{dfbetas.ppm}
+\alias{dfbetas.ppm}
+\title{
+  Parameter influence measure
+}
+\description{
+  Computes the deletion influence measure for each parameter
+  in a fitted point process model.
+}
+\usage{
+\method{dfbetas}{ppm}(model, ..., drop = FALSE, iScore=NULL,
+iHessian=NULL, iArgs=NULL)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{drop}{
+    Logical. Whether to include (\code{drop=FALSE}) or
+    exclude (\code{drop=TRUE}) contributions from quadrature
+    points that were not used to fit the model.
+  }
+  \item{iScore,iHessian}{
+    Components of the score vector and Hessian matrix for
+    the irregular parameters, if required. See Details.
+  }
+  \item{iArgs}{
+    List of extra arguments for the functions \code{iScore},
+    \code{iHessian} if required.
+  }
+}
+\details{
+  Given a fitted spatial point process \code{model},
+  this function computes the influence measure for each parameter,
+  as described in Baddeley, Chang and Song (2013).
+  
+  This is a method for the generic function \code{\link[stats]{dfbetas}}.
+
+  The influence measure for each parameter \eqn{\theta}{\theta} is a
+  signed measure in two-dimensional space. It consists of a discrete
+  mass on each data point (i.e. each point in the point pattern to which
+  the \code{model} was originally fitted) and a continuous density at
+  all locations. The mass at a data point represents the change in the 
+  fitted value of the parameter \eqn{\theta}{\theta} that would occur
+  if this data point were to be deleted.
+  The density at other non-data locations represents the
+  effect (on the fitted value of \eqn{\theta}{\theta})
+  of deleting these locations (and their associated covariate values)
+  from the input to the fitting procedure.
+  
+  If the point process model trend has irregular parameters that were
+  fitted (using \code{\link{ippm}})
+  then the influence calculation requires the first and second
+  derivatives of the log trend with respect to the irregular parameters. 
+  The argument \code{iScore} should be a list,
+  with one entry for each irregular parameter,  of \R functions that compute the
+  partial derivatives of the log trend (i.e. log intensity or
+  log conditional intensity) with respect to each irregular
+  parameter. The argument \code{iHessian} should be a list,
+  with \eqn{p^2} entries where \eqn{p} is the number of irregular
+  parameters, of \R functions that compute the second order
+  partial derivatives of the
+  log trend with respect to each pair of irregular parameters.  
+}
+\value{
+  An object of class \code{"msr"} representing a signed or vector-valued
+  measure. 
+}
+\references{
+  Baddeley, A. and Chang, Y.M. and Song, Y. (2013)
+  Leverage and influence diagnostics for spatial point process models.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{leverage.ppm}},
+  \code{\link{influence.ppm}},
+  \code{\link{ppmInfluence}}
+}
+\examples{
+   \testonly{op <- spatstat.options(npixel=32)}
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   \testonly{fit <- ppm(X, ~x+y, nd=16)}
+   plot(dfbetas(fit))
+   plot(Smooth(dfbetas(fit)))
+   \testonly{spatstat.options(op)}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dg.envelope.Rd b/man/dg.envelope.Rd
new file mode 100644
index 0000000..5e4d134
--- /dev/null
+++ b/man/dg.envelope.Rd
@@ -0,0 +1,130 @@
+\name{dg.envelope}
+\alias{dg.envelope}
+\title{
+  Global Envelopes for Dao-Genton Test
+}
+\description{
+  Computes the global envelopes
+  corresponding to the Dao-Genton test of goodness-of-fit.
+}
+\usage{
+dg.envelope(X, \dots,
+            nsim = 19, nsimsub=nsim-1, nrank = 1,
+            alternative=c("two.sided", "less", "greater"),
+            leaveout=1, interpolate = FALSE,
+            savefuns=FALSE, savepatterns=FALSE,
+            verbose = TRUE)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern dataset (object of class \code{"ppp"},
+    \code{"lpp"} or \code{"pp3"}) or a fitted point process model
+    (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}).
+  }
+  \item{\dots}{
+    Arguments passed to 
+    \code{\link{mad.test}} or \code{\link{envelope}} to
+    control the conduct of the test.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{rinterval} to determine the range of
+    \eqn{r} values used in the test, and
+    \code{verbose=FALSE} to turn off the messages.
+  }
+  \item{nsim}{
+    Number of simulated patterns to be generated in the primary
+    experiment. 
+  }
+  \item{nsimsub}{
+    Number of simulations in each basic test. There will be \code{nsim}
+    repetitions of the basic test, each involving \code{nsimsub} simulated
+    realisations, so there will be a total
+    of \code{nsim * (nsimsub + 1)} simulations.
+  }
+  \item{nrank}{
+    Integer. Rank of the envelope value amongst the \code{nsim} simulated
+    values. A rank of 1 means that the minimum and maximum
+    simulated values will be used.
+  }
+  \item{alternative}{
+    Character string determining whether the envelope corresponds
+    to a two-sided test (\code{alternative="two.sided"}, the default)
+    or a one-sided test with a lower critical boundary
+    (\code{alternative="less"}) or a one-sided test
+    with an upper critical boundary (\code{alternative="greater"}).
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{interpolate}{
+    Logical value indicating whether to interpolate the distribution of
+    the test statistic by kernel smoothing, as described in
+    Dao and Genton (2014, Section 5).
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save the simulated
+    function values (from the first stage).
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save the simulated
+    point patterns (from the first stage). 
+  }
+  \item{verbose}{
+    Logical value determining whether to print progress reports.
+  }
+}
+\details{
+  Computes global simulation envelopes corresponding to the
+  Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test.
+  The envelopes are described in Baddeley et al (2015).
+  
+  If \code{X} is a point pattern, the null hypothesis is CSR.
+
+  If \code{X} is a fitted model, the null hypothesis is that model.
+}
+\value{
+  An object of class \code{"fv"}.
+}
+\references{
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2015)
+  Pushing the envelope: extensions of graphical
+  Monte Carlo tests. Submitted for publication.
+}
+\author{
+  \adrian, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{dg.test}},
+  \code{\link{mad.test}},
+  \code{\link{envelope}}
+}
+\examples{
+  ns <- if(interactive()) 19 else 4
+  E <- dg.envelope(swedishpines, Lest, nsim=ns)
+  E
+  plot(E)
+  Eo <- dg.envelope(swedishpines, Lest, alternative="less", nsim=ns)
+  Ei <- dg.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns)
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
diff --git a/man/dg.progress.Rd b/man/dg.progress.Rd
new file mode 100644
index 0000000..87353a4
--- /dev/null
+++ b/man/dg.progress.Rd
@@ -0,0 +1,183 @@
+\name{dg.progress}
+\alias{dg.progress}
+\title{
+   Progress Plot of Dao-Genton Test of Spatial Pattern
+}
+\description{
+  Generates a progress plot (envelope representation) of the
+  Dao-Genton test for a spatial point pattern.
+}
+\usage{
+dg.progress(X, fun = Lest, \dots,
+            exponent = 2, nsim = 19, nsimsub = nsim - 1,
+            nrank = 1, alpha, leaveout=1, interpolate = FALSE, rmin=0,
+            savefuns = FALSE, savepatterns = FALSE, verbose=TRUE)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern (object of class \code{"ppp"}, \code{"lpp"}
+    or other class), a fitted point process model (object of class \code{"ppm"},
+    \code{"kppm"} or other class) or an envelope object (class
+    \code{"envelope"}). 
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern. 
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{envelope}}.
+    Useful arguments include \code{alternative} to
+    specify one-sided or two-sided envelopes.
+  }  
+  \item{exponent}{
+    Positive number. The exponent of the \eqn{L^p} distance.
+    See Details.
+  }
+  \item{nsim}{
+    Number of repetitions of the basic test.
+  }
+  \item{nsimsub}{
+    Number of simulations in each basic test. There will be \code{nsim}
+    repetitions of the basic test, each involving \code{nsimsub} simulated
+    realisations, so there will be a total
+    of \code{nsim * (nsimsub + 1)} simulations.
+  }
+  \item{nrank}{
+    Integer. The rank of the critical value of the Monte Carlo test,
+    amongst the \code{nsim} simulated values.
+    A rank of 1 means that the minimum and maximum
+    simulated values will become the critical values for the test.
+  }
+  \item{alpha}{
+    Optional. The significance level of the test.
+    Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the
+    number of simulations.
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{interpolate}{
+    Logical value indicating how to compute the critical value.
+    If \code{interpolate=FALSE} (the default), a standard Monte Carlo test
+    is performed, and the critical value is the largest
+    simulated value of the test statistic (if \code{nrank=1})
+    or the \code{nrank}-th largest (if \code{nrank} is another number).
+    If \code{interpolate=TRUE}, kernel density estimation
+    is applied to the simulated values, and the critical value is
+    the upper \code{alpha} quantile of this estimated distribution.
+  }
+  \item{rmin}{
+    Optional. Left endpoint for the interval of \eqn{r} values
+    on which the test statistic is calculated.
+  }
+  \item{savefuns}{
+    Logical value indicating whether to save the simulated
+    function values (from the first stage).
+  }
+  \item{savepatterns}{
+    Logical value indicating whether to save the simulated
+    point patterns (from the first stage). 
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  The Dao and Genton (2014) test for a spatial point pattern
+  is described in \code{\link{dg.test}}.
+  This test depends on the choice of an interval of
+  distance values (the argument \code{rinterval}).
+  A \emph{progress plot} or \emph{envelope representation}
+  of the test (Baddeley et al, 2014) is a plot of the
+  test statistic (and the corresponding critical value) against the length of
+  the interval \code{rinterval}.
+  
+  The command \code{dg.progress} effectively performs 
+  \code{\link{dg.test}} on \code{X} using all possible intervals
+  of the form \eqn{[0,R]}, and returns the resulting values of the test
+  statistic, and the corresponding critical values of the test,
+  as a function of \eqn{R}. 
+
+  The result is an object of class \code{"fv"}
+  that can be plotted to obtain the progress plot. The display shows
+  the test statistic (solid black line) and the test
+  acceptance region (grey shading).
+  If \code{X} is an envelope object, then some of the data stored
+  in \code{X} may be re-used:
+  \itemize{
+    \item
+    If \code{X} is an envelope object containing simulated functions,
+    and \code{fun=NULL}, then
+    the code will re-use the simulated functions stored in \code{X}.
+    \item
+    If \code{X} is an envelope object containing
+    simulated point patterns, 
+    then \code{fun} will be applied to the stored point patterns
+    to obtain the simulated functions.
+    If \code{fun} is not specified, it defaults to \code{\link{Lest}}.
+    \item
+    Otherwise, new simulations will be performed,
+    and \code{fun} defaults to  \code{\link{Lest}}.
+  }
+  If the argument \code{rmin} is given, it specifies the left endpoint
+  of the interval defining the test statistic: the tests are
+  performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]}
+  where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}.
+
+  The argument \code{leaveout} specifies how to calculate the
+  discrepancy between the summary function for the data and the
+  nominal reference value, when the reference value must be estimated
+  by simulation. The values \code{leaveout=0} and
+  \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014,
+  Appendix) to computing the difference \code{observed - reference}
+  where the \code{reference} is the mean of simulated values.
+  The value \code{leaveout=2} gives the leave-two-out discrepancy
+  proposed by Dao and Genton (2014).
+}
+\value{
+  An object of class \code{"fv"} that can be plotted to
+  obtain the progress plot. 
+}
+\references{
+  Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T.,
+  Milne, R. and Nair, G. (2014)
+  On tests of spatial pattern based on simulation envelopes.
+  \emph{Ecological Monographs} \bold{84} (3) 477--489.
+
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2015)
+  Pushing the envelope: extensions of graphical
+  Monte Carlo tests. Submitted for publication.
+
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+}
+\author{
+  \adrian, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{dg.test}},
+  \code{\link{dclf.progress}}
+}
+\examples{
+   ns <- if(interactive()) 19 else 5
+   plot(dg.progress(cells, nsim=ns))
+}
+\keyword{spatial}
+\keyword{htest}
+
diff --git a/man/dg.sigtrace.Rd b/man/dg.sigtrace.Rd
new file mode 100644
index 0000000..10109c8
--- /dev/null
+++ b/man/dg.sigtrace.Rd
@@ -0,0 +1,180 @@
+\name{dg.sigtrace}
+\alias{dg.sigtrace}
+\title{
+  Significance Trace of Dao-Genton Test
+}
+\description{
+  Generates a Significance Trace of the
+  Dao and Genton (2014) test for a spatial point pattern.
+}
+\usage{
+  dg.sigtrace(X, fun = Lest, \dots,
+              exponent = 2, nsim = 19, nsimsub = nsim - 1,
+              alternative = c("two.sided", "less", "greater"),
+              rmin=0, leaveout=1,
+              interpolate = FALSE, confint = TRUE, alpha = 0.05,
+              savefuns=FALSE, savepatterns=FALSE, verbose=FALSE)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern (object of class \code{"ppp"}, \code{"lpp"}
+    or other class), a fitted point process model (object of class \code{"ppm"},
+    \code{"kppm"} or other class) or an envelope object (class
+    \code{"envelope"}). 
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{envelope}}.
+  }
+  \item{exponent}{
+    Positive number. Exponent used in the test statistic. Use \code{exponent=2}
+    for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf}
+    for the Maximum Absolute Deviation test.
+    See Details.
+  }
+  \item{nsim}{
+    Number of repetitions of the basic test.
+  }
+  \item{nsimsub}{
+    Number of simulations in each basic test. There will be \code{nsim}
+    repetitions of the basic test, each involving \code{nsimsub} simulated
+    realisations, so there will be a total
+    of \code{nsim * (nsimsub + 1)} simulations.
+  }
+  \item{alternative}{
+    Character string specifying the alternative hypothesis.
+    The default (\code{alternative="two.sided"}) is that the
+    true value of the summary function is not equal to the theoretical
+    value postulated under the null hypothesis.
+    If \code{alternative="less"} the alternative hypothesis is that the
+    true value of the summary function is lower than the theoretical value.
+  }
+  \item{rmin}{
+    Optional. Left endpoint for the interval of \eqn{r} values
+    on which the test statistic is calculated.
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{interpolate}{
+    Logical value indicating whether to interpolate the distribution of
+    the test statistic by kernel smoothing, as described in
+    Dao and Genton (2014, Section 5).
+  }
+  \item{confint}{
+    Logical value indicating whether to compute a confidence interval
+    for the \sQuote{true} \eqn{p}-value.
+  }
+  \item{alpha}{
+    Significance level to be plotted (this has no effect on the calculation
+    but is simply plotted as a reference value).
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save the simulated
+    function values (from the first stage).
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save the simulated
+    point patterns (from the first stage). 
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports.
+  }
+}
+\details{
+  The Dao and Genton (2014) test for a spatial point pattern
+  is described in \code{\link{dg.test}}.
+  This test depends on the choice of an interval of
+  distance values (the argument \code{rinterval}).
+  A \emph{significance trace} (Bowman and Azzalini, 1997;
+  Baddeley et al, 2014, 2015)
+  of the test is a plot of the \eqn{p}-value
+  obtained from the test against the length of
+  the interval \code{rinterval}.
+  
+  The command \code{dg.sigtrace} effectively performs 
+  \code{\link{dg.test}} on \code{X} using all possible intervals
+  of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values
+  as a function of \eqn{R}.
+
+  The result is an object of class \code{"fv"} that can be plotted to
+  obtain the significance trace. The plot shows the
+  Dao-Genton adjusted
+  \eqn{p}-value (solid black line), 
+  the critical value \code{0.05} (dashed red line),
+  and a pointwise 95\% confidence band (grey shading)
+  for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value.
+  The confidence band is based on the Agresti-Coull (1998)
+  confidence interval for a binomial proportion.
+
+  If \code{X} is an envelope object and \code{fun=NULL} then
+  the code will re-use the simulated functions stored in \code{X}.
+
+  If the argument \code{rmin} is given, it specifies the left endpoint
+  of the interval defining the test statistic: the tests are
+  performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]}
+  where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}.
+
+  The argument \code{leaveout} specifies how to calculate the
+  discrepancy between the summary function for the data and the
+  nominal reference value, when the reference value must be estimated
+  by simulation. The values \code{leaveout=0} and
+  \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014,
+  Appendix) to computing the difference \code{observed - reference}
+  where the \code{reference} is the mean of simulated values.
+  The value \code{leaveout=2} gives the leave-two-out discrepancy
+  proposed by Dao and Genton (2014).
+}
+\value{
+  An object of class \code{"fv"} that can be plotted to
+  obtain the significance trace. 
+}
+\references{
+  Agresti, A. and Coull, B.A. (1998)
+  Approximate is better than \dQuote{Exact} for interval
+   estimation of binomial proportions.
+  \emph{American Statistician} \bold{52}, 119--126.
+
+  Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T.,
+  Milne, R. and Nair, G. (2014)
+  On tests of spatial pattern based on simulation envelopes.
+  \emph{Ecological Monographs} \bold{84}(3) 477--489.
+
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2015)
+  Pushing the envelope: extensions of graphical
+  Monte Carlo tests. Submitted for publication.
+
+  Bowman, A.W. and Azzalini, A. (1997) 
+  \emph{Applied smoothing techniques for data analysis: 
+    the kernel approach with S-Plus illustrations}.
+  Oxford University Press, Oxford.
+
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+}
+\author{
+  \adrian, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{dg.test}} for the Dao-Genton test,
+  \code{\link{dclf.sigtrace}} for significance traces of other tests.
+}
+\examples{
+  ns <- if(interactive()) 19 else 5
+  plot(dg.sigtrace(cells, nsim=ns))
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/dg.test.Rd b/man/dg.test.Rd
new file mode 100644
index 0000000..9d74b14
--- /dev/null
+++ b/man/dg.test.Rd
@@ -0,0 +1,148 @@
+\name{dg.test}
+\alias{dg.test}
+\title{
+  Dao-Genton Adjusted Goodness-Of-Fit Test
+}
+\description{
+  Performs the Dao and Genton (2014) adjusted
+  goodness-of-fit test of spatial pattern.
+}
+\usage{
+dg.test(X, \dots,
+        exponent = 2, nsim=19, nsimsub=nsim-1,
+        alternative=c("two.sided", "less", "greater"),
+        reuse = TRUE, leaveout=1, interpolate = FALSE,
+        savefuns=FALSE, savepatterns=FALSE,
+        verbose = TRUE)
+}
+\arguments{
+  \item{X}{
+    Either a point pattern dataset (object of class \code{"ppp"},
+    \code{"lpp"} or \code{"pp3"}) or a fitted point process model
+    (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}
+    or \code{"slrm"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{dclf.test}} or
+    \code{\link{mad.test}} or \code{\link{envelope}} to
+    control the conduct of the test.
+    Useful arguments include \code{fun} to determine the summary
+    function, \code{rinterval} to determine the range of
+    \eqn{r} values used in the test, 
+    and \code{use.theory} described under Details. 
+  }
+  \item{exponent}{
+    Exponent used in the test statistic. Use \code{exponent=2}
+    for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf}
+    for the Maximum Absolute Deviation test.
+  }
+  \item{nsim}{
+    Number of repetitions of the basic test.
+  }
+  \item{nsimsub}{
+    Number of simulations in each basic test. There will be \code{nsim}
+    repetitions of the basic test, each involving \code{nsimsub} simulated
+    realisations, so there will be a total
+    of \code{nsim * (nsimsub + 1)} simulations.
+  }
+  \item{alternative}{
+    Character string specifying the alternative hypothesis.
+    The default (\code{alternative="two.sided"}) is that the
+    true value of the summary function is not equal to the theoretical
+    value postulated under the null hypothesis.
+    If \code{alternative="less"} the alternative hypothesis is that the
+    true value of the summary function is lower than the theoretical value.
+  }
+  \item{reuse}{
+    Logical value indicating whether to re-use the first stage
+    simulations at the second stage, as described by Dao and Genton (2014).
+  }
+  \item{leaveout}{
+    Optional integer 0, 1 or 2 indicating how to calculate the
+    deviation between the observed summary function and the
+    nominal reference value, when the reference value must be estimated
+    by simulation. See Details.
+  }
+  \item{interpolate}{
+    Logical value indicating whether to interpolate the distribution of
+    the test statistic by kernel smoothing, as described in
+    Dao and Genton (2014, Section 5).
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save the simulated
+    function values (from the first stage).
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save the simulated
+    point patterns (from the first stage). 
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  Performs the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit
+  test, in the equivalent form described by Baddeley et al (2014).
+  
+  If \code{X} is a point pattern, the null hypothesis is CSR.
+
+  If \code{X} is a fitted model, the null hypothesis is that model.
+
+  The argument \code{use.theory} passed to \code{\link{envelope}}
+  determines whether to compare the summary function for the data
+  to its theoretical value for CSR (\code{use.theory=TRUE})
+  or to the sample mean of simulations from CSR
+  (\code{use.theory=FALSE}).
+
+  The argument \code{leaveout} specifies how to calculate the
+  discrepancy between the summary function for the data and the
+  nominal reference value, when the reference value must be estimated
+  by simulation. The values \code{leaveout=0} and
+  \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014,
+  Appendix) to computing the difference \code{observed - reference}
+  where the \code{reference} is the mean of simulated values.
+  The value \code{leaveout=2} gives the leave-two-out discrepancy
+  proposed by Dao and Genton (2014).
+
+  The Dao-Genton test is biased when the significance level is very small
+  (small \eqn{p}-values are not reliable) and
+  we recommend \code{\link{bits.test}} in this case.
+}
+\value{
+  A hypothesis test (object of class \code{"htest"}
+  which can be printed to show the outcome of the test.
+}
+\references{
+  Dao, N.A. and Genton, M. (2014)
+  A Monte Carlo adjusted goodness-of-fit test for
+  parametric models describing spatial point patterns.
+  \emph{Journal of Graphical and Computational Statistics}
+  \bold{23}, 497--517.
+
+  Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne,
+  R.K. and Nair, G. (2014) On tests of spatial pattern based on
+  simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489.
+  
+  Baddeley, A., Hardegen, A., Lawrence, L., 
+  Milne, R.K., Nair, G.M. and Rakshit, S. (2017)
+  On two-stage Monte Carlo tests of composite hypotheses.
+  \emph{Computational Statistics and Data Analysis}, in press.
+}
+\author{
+  Adrian Baddeley, Andrew Hardegen, Tom Lawrence,
+  Robin Milne, Gopalan Nair and Suman Rakshit.
+  Implemented by \spatstatAuthors.
+}
+\seealso{
+  \code{\link{bits.test}},
+  \code{\link{dclf.test}},
+  \code{\link{mad.test}}
+}
+\examples{
+ ns <- if(interactive()) 19 else 4
+ dg.test(cells, nsim=ns)
+ dg.test(cells, alternative="less", nsim=ns)
+ dg.test(cells, nsim=ns, interpolate=TRUE)
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/diagnose.ppm.Rd b/man/diagnose.ppm.Rd
new file mode 100644
index 0000000..bfc9dc9
--- /dev/null
+++ b/man/diagnose.ppm.Rd
@@ -0,0 +1,416 @@
+\name{diagnose.ppm}
+\alias{diagnose.ppm}
+\alias{plot.diagppm}
+\title{
+  Diagnostic Plots for Fitted Point Process Model
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  produce diagnostic plots based on residuals.
+}
+\usage{
+  diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, 
+               rbord=reach(object), cumulative=TRUE,
+               plot.it=TRUE, rv = NULL,
+               compute.sd=is.poisson(object), compute.cts=TRUE,
+               envelope=FALSE, nsim=39, nrank=1,
+               typename, check=TRUE, repair=TRUE,
+               oldstyle=FALSE, splineargs=list(spar=0.5))
+
+  \method{plot}{diagppm}(x, \dots, which, 
+               plot.neg=c("image", "discrete", "contour", "imagecontour"),
+               plot.smooth=c("imagecontour", "image", "contour", "persp"),
+               plot.sd, spacing=0.1, outer=3,
+               srange=NULL, monochrome=FALSE, main=NULL)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"ppm"})
+    for which diagnostics should be produced. This object
+    is usually obtained from \code{\link{ppm}}.
+  }
+  \item{type}{
+    String indicating the type of residuals or weights to be used.
+    Current options are \code{"eem"}
+    for the Stoyan-Grabarnik exponential energy weights,
+    \code{"raw"} for the raw residuals,
+    \code{"inverse"} for the inverse-lambda residuals,
+    and \code{"pearson"} for the Pearson residuals.
+    A partial match is adequate.
+  }
+  \item{which}{
+    Character string or vector indicating the choice(s) of
+    plots to be generated. Options are
+    \code{"all"}, \code{"marks"}, \code{"smooth"},
+    \code{"x"}, \code{"y"} and \code{"sum"}.
+    Multiple choices may be given but must be matched exactly.
+    See Details.
+  }
+  \item{sigma}{
+    Bandwidth for kernel smoother in \code{"smooth"} option.
+  }
+  \item{rbord}{
+    Width of border to avoid edge effects.
+    The diagnostic calculations
+    will be confined to those points of the data pattern which are
+    at least \code{rbord} units away from the edge of the window.
+    (An infinite value of \code{rbord} will be ignored.)
+  }
+  \item{cumulative}{
+    Logical flag indicating whether the lurking variable plots
+    for the \eqn{x} and \eqn{y} coordinates will be the plots of
+    cumulative sums of marks (\code{cumulative=TRUE}) or the
+    plots of marginal integrals of the smoothed residual field
+    (\code{cumulative=FALSE}).
+  }
+  \item{plot.it}{
+    Logical value indicating whether 
+    plots should be shown. If \code{plot.it=FALSE}, 
+    the computed diagnostic quantities are returned without plotting them.
+  }
+  \item{plot.neg}{
+    String indicating how the density part
+    of the residual measure should be plotted.
+  }
+  \item{plot.smooth}{
+    String indicating how the smoothed residual field should be plotted.
+  }
+  \item{compute.sd,plot.sd}{
+    Logical values indicating whether 
+    error bounds should be computed and added to the \code{"x"} and \code{"y"}
+    plots. The default is \code{TRUE} for Poisson models and
+    \code{FALSE} for non-Poisson models. See Details.
+  }
+  \item{envelope,nsim,nrank}{
+    Arguments passed to \code{\link{lurking}}
+    in order to plot simulation envelopes for the lurking variable plots.
+  }
+  \item{rv}{
+    Usually absent. Advanced use only.
+    If this argument is present, the values of the residuals will not be
+    calculated from the fitted model \code{object} but will instead
+    be taken directly from \code{rv}.
+  }
+  \item{spacing}{
+    The spacing between plot panels (when a four-panel plot
+    is generated) expressed as a fraction of the width of the
+    window of the point pattern.
+  }
+  \item{outer}{
+    The distance from the outermost line of text to the nearest plot
+    panel, expressed as a multiple of the spacing between plot panels.
+  }
+  \item{srange}{
+    Vector of length 2 that will be taken as giving the range of values
+    of the smoothed residual field, when generating an image plot of this
+    field. This is useful if you want to generate diagnostic plots
+    for two different fitted models using the same colour map. 
+   }
+  \item{monochrome}{
+    Flag indicating whether images should be displayed in
+    greyscale (suitable for publication) or in colour (suitable
+    for the screen). The default is to display in colour.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{object}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+  \item{repair}{
+    Logical value indicating whether to repair the internal format
+    of \code{object}, if it is found to be damaged. 
+  }
+  \item{oldstyle}{
+    Logical flag indicating whether error bounds should be plotted
+    using the approximation given in the original paper
+    (\code{oldstyle=TRUE}),
+    or using the correct asymptotic formula (\code{oldstyle=FALSE}).
+  }
+  \item{splineargs}{
+    Argument passed to \code{\link{lurking}} 
+    to control the smoothing in the lurking variable plot.
+  }
+  \item{x}{The value returned from a previous call to 
+  \code{diagnose.ppm}. An object of class \code{"diagppm"}.
+  }
+  \item{typename}{String to be used as the name of the residuals.}
+  \item{main}{Main title for the plot.}
+  \item{\dots}{
+    Extra arguments, controlling either the resolution of the smoothed image
+    (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) 
+    or the appearance of the plots
+    (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from 
+    \code{plot.diagppm} to \code{\link{plot.default}}).
+  }
+  \item{compute.cts}{Advanced use only.}
+}
+\value{
+  An object of class \code{"diagppm"} which contains
+  the coordinates needed to reproduce the selected plots.
+  This object can be plotted using \code{plot.diagppm}
+  and printed using \code{print.diagppm}.
+}
+\details{
+  The function \code{diagnose.ppm} generates several diagnostic plots for a
+  fitted point process model.
+  The plots display the residuals from the fitted model
+  (Baddeley et al, 2005)
+  or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991).
+  These plots can be used to
+  assess goodness-of-fit, to identify outliers in the data,
+  and to reveal departures from the fitted model.
+  See also the companion function \code{\link{qqplot.ppm}}.
+
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"ppm"}) typically produced by the maximum
+  pseudolikelihood fitting algorithm \code{\link{ppm}}).
+
+  The argument \code{type} selects the type of residual or weight
+  that will be computed. Current options are:
+
+  \describe{
+    \item{\code{"eem"}:}{
+    exponential energy marks (Stoyan and Grabarnik, 1991) 
+    computed by \code{\link{eem}}.
+    These are positive weights attached to the data points
+    (i.e. the points of the point pattern dataset
+    to which the model was fitted).
+    If the fitted model is correct, then the sum of these weights
+    for all data points in a spatial region \eqn{B}
+    has expected value equal to the
+    area of \eqn{B}. See \code{\link{eem}} for further explanation.
+  }
+  \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{
+    point process residuals (Baddeley et al, 2005)
+    computed by the function \code{\link{residuals.ppm}}.
+    These are residuals attached both to the data points and to some
+    other points in the window of observation (namely, to the dummy
+    points of the quadrature scheme used to fit the model).
+    If the fitted model is correct, then the sum of the
+    residuals in a spatial region \eqn{B} has mean zero.
+    The options are
+    \itemize{
+      \item
+      \code{"raw"}: the raw residuals;
+      \item
+      \code{"inverse"}: the `inverse-lambda' residuals,
+      a counterpart of the exponential energy weights;
+      \item
+      \code{"pearson"}: the Pearson residuals.
+    }
+    See \code{\link{residuals.ppm}} for further explanation.
+  }
+  }
+
+  The argument \code{which} selects the type of plot that is
+  produced. Options are:
+  \describe{
+    \item{\code{"marks"}:}{
+      plot the residual measure.
+      For the exponential energy weights (\code{type="eem"})
+      this displays circles centred at the points of the data pattern,
+      with radii proportional to the exponential energy weights.
+      For the residuals (\code{type="raw"}, \code{type="inverse"}
+      or \code{type="pearson"}) this again displays circles centred at
+      the points of the data pattern with radii proportional to the
+      (positive) residuals, while the plotting of the negative residuals
+      depends on the argument \code{plot.neg}. If
+      \code{plot.neg="image"} then the negative part of the residual
+      measure, which is a density, is plotted as a colour image.
+      If \code{plot.neg="discrete"} then the discretised negative
+      residuals (obtained by approximately integrating the negative
+      density using the quadrature scheme of the fitted model)
+      are plotted as squares centred at the dummy points
+      with side lengths proportional to the (negative) residuals.
+      [To control the size of the circles and squares, use the argument
+      \code{maxsize}.]
+    }
+    \item{\code{"smooth"}:}{
+      plot a kernel-smoothed version of the residual measure.
+      Each data or dummy point is taken to have a `mass' equal to its
+      residual or exponential energy weight.
+      (Note that residuals can be negative).
+      This point mass is then replaced by
+      a bivariate isotropic Gaussian density
+      with standard deviation \code{sigma}.
+      The value of the smoothed residual field at
+      any point in the window is the sum of these weighted densities.
+      If the fitted model is correct, this smoothed field
+      should be flat, and its height should be close to 0
+      (for the residuals) or 1 (for the exponential energy weights).
+      The field is plotted either as an image, contour plot or
+      perspective view of a surface, according to the
+      argument \code{plot.smooth}.
+      The range of values of the smoothed field is printed
+      if the option \code{which="sum"} is also selected.
+    }
+    \item{\code{"x"}:}{
+      produce a `lurking variable' plot for the \eqn{x} coordinate.
+      This is a plot of \eqn{h(x)} against \eqn{x} (solid lines)
+      and of \eqn{E(h(x))} against \eqn{x} (dashed lines),
+      where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the
+      expectation of \eqn{h(x)} assuming the fitted model is true.
+      \itemize{
+      \item
+        if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of
+	the weights or residuals for all points
+	which have \eqn{X} coordinate less than or equal to \eqn{x}.
+	For the residuals \eqn{E(h(x)) = 0},
+	and for the exponential energy weights
+	\eqn{E(h(x)) = } area of the subset of the window to the left of
+	the line \eqn{X=x}.
+      \item
+	if \code{cumulative=FALSE} then 
+	\eqn{h(x)} is the marginal integral of 
+	the smoothed residual field (see the case \code{which="smooth"}
+	described above) on the \eqn{x} axis. 
+	This is approximately the derivative
+	of the plot for \code{cumulative=TRUE}.
+	The value of \eqn{h(x)} is computed by summing the values of
+	the smoothed residual field over all pixels with
+	the given \eqn{x} coordinate. 
+	For the residuals \eqn{E(h(x)) = 0},
+	and for the exponential energy weights
+	\eqn{E(h(x)) = } length of the intersection between the
+	observation window and the line \eqn{X=x}.
+      }
+      If \code{plot.sd = TRUE}, then superimposed on the lurking variable
+      plot are the pointwise
+      two-standard-deviation error limits for \eqn{h(x)} calculated for the
+      inhomogeneous Poisson process. The default is \code{plot.sd = TRUE}
+      for Poisson models and \code{plot.sd = FALSE} for non-Poisson
+      models.
+    }
+    \item{\code{"y"}:}{
+      produce a similar lurking variable plot for the \eqn{y} coordinate.
+    }
+    \item{\code{"sum"}:}{
+      print the sum of the weights or residuals for all points
+      in the window (clipped by a margin \code{rbord} if required)
+      and the area of the same window. If the fitted model is correct
+      the sum of the exponential energy weights should equal the area of
+      the window, while the sum of the residuals should equal zero.
+      Also print the range of values of the smoothed field
+      displayed in the \code{"smooth"} case.
+    }
+    \item{\code{"all"}:}{
+      All four of the diagnostic plots listed above are plotted together
+      in a two-by-two display. Top left panel is \code{"marks"} plot.
+      Bottom right panel is \code{"smooth"} plot. Bottom left panel is
+      \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees.
+    }
+  }
+
+  The argument \code{rbord} ensures there are no edge
+  effects in the computation of the residuals. The diagnostic calculations
+    will be confined to those points of the data pattern which are
+    at least \code{rbord} units away from the edge of the window.
+  The value of \code{rbord} should be greater than or equal to
+  the range of interaction permitted in the model.
+
+  By default, the two-standard-deviation limits are calculated
+  from the exact formula for the asymptotic variance
+  of the residuals under the asymptotic normal approximation,
+  equation (37) of Baddeley et al (2006).
+  However, for compatibility with the original paper
+  of Baddeley et al (2005), if \code{oldstyle=TRUE},
+  the two-standard-deviation limits are calculated
+  using the innovation variance, an over-estimate of the true
+  variance of the residuals. (However, see the section about
+  Replicated Data).
+
+  The argument \code{rv} would normally be used only by experts.
+  It enables the user to substitute arbitrary values for the
+  residuals or marks, overriding the usual calculations.
+  If \code{rv} is present, then instead of calculating the residuals from
+  the fitted model, the algorithm takes the residuals from the object
+  \code{rv}, and plots them in the manner appropriate to the type of residual
+  or mark selected by \code{type}. If \code{type ="eem"} then
+  \code{rv} should be similar to the return value of \code{\link{eem}},
+  namely, a numeric vector of length equal to
+  the number of points in the original data point pattern.
+  Otherwise, \code{rv} should be similar to the return value of
+  \code{\link{residuals.ppm}}, that is, it should be an object of
+  class \code{"msr"} (see \code{\link{msr}}) representing a signed
+  measure.
+
+  The return value of \code{diagnose.ppm}
+  is an object of class \code{"diagppm"}.
+  The \code{plot} method for this class is documented here.
+  There is also a \code{print} method. See the Examples.
+
+  In \code{plot.diagppm},
+  if a four-panel diagnostic plot is produced (the default), then
+  the extra arguments \code{xlab}, \code{ylab}, \code{rlab} determine the
+  text labels for the \eqn{x} and \eqn{y} coordinates
+  and the residuals, respectively.
+  The undocumented arguments \code{col.neg} and \code{col.smooth}
+  control the colour maps used in the top left and bottom right
+  panels respectively.
+  
+  See also the companion functions \code{\link{qqplot.ppm}}, which produces a
+  Q-Q plot of the residuals, and \code{\link{lurking}}, which produces
+  lurking variable plots for any spatial covariate.
+}
+\section{Replicated Data}{
+  Note that if \code{object} is a model that was obtained by
+  first fitting a model to replicated point pattern data using
+  \code{\link{mppm}} and then using \code{\link{subfits}} to extract
+  a model for one of the individual point patterns, then the
+  variance calculations are only implemented for the
+  innovation variance (\code{oldstyle=TRUE}) and this is the default
+  in such cases.
+}
+\references{
+  Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A., \Moller, J. and Pakes, A.G. (2008) 
+  Properties of residuals for spatial point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{60}, 627--649.
+  
+  Stoyan, D. and Grabarnik, P. (1991)
+  Second-order characteristics for stochastic structures connected with
+  Gibbs point processes.
+  \emph{Mathematische Nachrichten}, 151:95--100.
+}
+\seealso{
+ \code{\link{residuals.ppm}},
+ \code{\link{eem}},
+ \code{\link{ppm.object}},
+ \code{\link{qqplot.ppm}},
+ \code{\link{lurking}},
+ \code{\link{ppm}}
+}
+\examples{
+    fit <- ppm(cells ~x, Strauss(r=0.15))
+    diagnose.ppm(fit)
+    \dontrun{
+    diagnose.ppm(fit, type="pearson")
+    }
+
+    diagnose.ppm(fit, which="marks")
+
+    diagnose.ppm(fit, type="raw", plot.neg="discrete")
+
+    diagnose.ppm(fit, type="pearson", which="smooth")
+
+    # save the diagnostics and plot them later
+    u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE)
+    \dontrun{
+    plot(u)
+    plot(u, which="marks")
+    }
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{hplot}
diff --git a/man/diameter.Rd b/man/diameter.Rd
new file mode 100644
index 0000000..32bbf00
--- /dev/null
+++ b/man/diameter.Rd
@@ -0,0 +1,44 @@
+\name{diameter}
+\alias{diameter}
+\title{Diameter of an Object}
+\description{
+  Computes the diameter of an object such as a two-dimensional window
+  or three-dimensional box. 
+}
+\usage{
+ diameter(x)
+}
+\arguments{
+  \item{x}{
+    A window or other object whose diameter will be computed.
+  }
+}
+\value{
+  The numerical value of the diameter of the object.
+}
+\details{
+  This function computes the diameter of an object
+  such as a two-dimensional window or a three-dimensional box. 
+  The diameter is the maximum distance 
+  between any two points in the object.
+
+  The function \code{diameter} is generic, with methods for
+  the class \code{"owin"} (two-dimensional windows),
+  \code{"box3"} (three-dimensional boxes),
+  \code{"boxx"} (multi-dimensional boxes)
+  and \code{"linnet"} (linear networks).
+}
+\seealso{
+  \code{\link{diameter.owin}},
+  \code{\link{diameter.box3}},
+  \code{\link{diameter.boxx}},
+  \code{\link{diameter.linnet}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/diameter.box3.Rd b/man/diameter.box3.Rd
new file mode 100644
index 0000000..50addf6
--- /dev/null
+++ b/man/diameter.box3.Rd
@@ -0,0 +1,82 @@
+\name{diameter.box3} 
+\Rdversion{1.1}
+\alias{diameter.box3}
+\alias{volume.box3}
+\alias{shortside.box3}
+\alias{sidelengths.box3}
+\alias{eroded.volumes.box3}
+\alias{shortside}
+\alias{sidelengths}
+\alias{eroded.volumes}
+\title{
+  Geometrical Calculations for Three-Dimensional Box
+}
+\description{
+  Calculates the volume, diameter, shortest side, side lengths, 
+  or eroded volume of a three-dimensional box.
+}
+\usage{
+\method{diameter}{box3}(x)
+
+\method{volume}{box3}(x)
+
+shortside(x)
+sidelengths(x)
+eroded.volumes(x, r)
+
+\method{shortside}{box3}(x)
+
+\method{sidelengths}{box3}(x)
+
+\method{eroded.volumes}{box3}(x, r)
+}
+\arguments{
+  \item{x}{
+    Three-dimensional box (object of class \code{"box3"}).
+  }
+  \item{r}{
+    Numeric value or vector of numeric values for which eroded volumes
+    should be calculated.
+  }
+}
+\details{
+  \code{diameter.box3} computes the diameter of the box.
+  \code{volume.box3} computes the volume of the box.
+  \code{shortside.box3} finds the shortest of the three side lengths
+  of the box.
+  \code{sidelengths.box3} returns all three side lengths
+  of the box.
+
+  \code{eroded.volumes} computes, for each entry \code{r[i]},
+  the volume of the smaller box obtained by removing a slab of
+  thickness \code{r[i]} from each face of the box. This smaller box is
+  the subset consisting of points that lie at least \code{r[i]} units
+  away from the boundary of the box.
+}
+\value{
+  For \code{diameter.box3}, \code{shortside.box3} and
+  \code{volume.box3}, a single numeric
+  value. For \code{sidelengths.box3}, a vector of three numbers.
+  For \code{eroded.volumes}, a numeric vector of the same length
+  as \code{r}.  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{as.box3}}
+}
+\examples{
+    X <- box3(c(0,10),c(0,10),c(0,5))
+    diameter(X) 
+    volume(X)
+    sidelengths(X)
+    shortside(X)
+    hd <- shortside(X)/2
+    eroded.volumes(X, seq(0,hd, length=10))
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/diameter.boxx.Rd b/man/diameter.boxx.Rd
new file mode 100644
index 0000000..59f6950
--- /dev/null
+++ b/man/diameter.boxx.Rd
@@ -0,0 +1,74 @@
+\name{diameter.boxx} 
+\Rdversion{1.1}
+\alias{diameter.boxx}
+\alias{volume.boxx}
+\alias{shortside.boxx}
+\alias{sidelengths.boxx}
+\alias{eroded.volumes.boxx}
+\title{
+  Geometrical Calculations for Multi-Dimensional Box
+}
+\description{
+  Calculates the volume, diameter, shortest side,
+  side lengths, or eroded volume of a multi-dimensional box.
+}
+\usage{
+\method{diameter}{boxx}(x)
+
+\method{volume}{boxx}(x)
+
+\method{shortside}{boxx}(x)
+
+\method{sidelengths}{boxx}(x)
+
+\method{eroded.volumes}{boxx}(x, r)
+}
+\arguments{
+  \item{x}{
+    Multi-dimensional box (object of class \code{"boxx"}).
+  }
+  \item{r}{
+    Numeric value or vector of numeric values for which eroded volumes
+    should be calculated.
+  }
+}
+\details{
+  \code{diameter.boxx}, 
+  \code{volume.boxx} and \code{shortside.boxx}
+  compute the diameter, volume and shortest side length of the box.
+  \code{sidelengths.boxx} returns the lengths of each side of the box.
+  
+  \code{eroded.volumes.boxx} computes, for each entry \code{r[i]},
+  the volume of the smaller box obtained by removing a slab of
+  thickness \code{r[i]} from each face of the box. This smaller box is
+  the subset consisting of points that lie at least \code{r[i]} units
+  away from the boundary of the box.
+}
+\value{
+  For \code{diameter.boxx}, \code{shortside.boxx} and
+  \code{volume.boxx}, a single numeric value.
+  For \code{sidelengths.boxx}, a numeric vector of length equal to
+  the number of spatial dimensions.
+  For \code{eroded.volumes.boxx}, a numeric vector of the same length
+  as \code{r}.  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{boxx}}
+}
+\examples{
+    X <- boxx(c(0,10),c(0,10),c(0,5),c(0,2))
+    diameter(X) 
+    volume(X)
+    shortside(X)
+    sidelengths(X)
+    hd <- shortside(X)/2
+    eroded.volumes(X, seq(0,hd, length=10))
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/diameter.linnet.Rd b/man/diameter.linnet.Rd
new file mode 100644
index 0000000..0b1892c
--- /dev/null
+++ b/man/diameter.linnet.Rd
@@ -0,0 +1,52 @@
+\name{diameter.linnet}
+\alias{boundingradius.linnet}
+\alias{diameter.linnet}
+\title{
+  Diameter and Bounding Radius of a Linear Network
+}
+\description{
+  Compute the diameter or bounding radius of a linear network
+  measured using the shortest path distance.
+}
+\usage{
+\method{diameter}{linnet}(x)
+
+\method{boundingradius}{linnet}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  The diameter of a linear network (in the shortest path distance)
+  is the maximum value of the shortest-path distance between any
+  two points \eqn{u} and \eqn{v} on the network.
+
+  The bounding radius of a linear network (in the shortest path distance)
+  is the minimum value, over all points \eqn{u} on the network,
+  of the maximum shortest-path distance from \eqn{u} to another point
+  \eqn{v} on the network.
+  
+  The functions \code{\link{boundingradius}} and
+  \code{\link{diameter}} are generic;
+  the functions \code{boundingradius.linnet} and \code{diameter.linnet}
+  are the methods for objects of class \code{linnet}.
+}
+\value{
+  A single numeric value.
+}
+\author{
+  \adrian
+}
+\seealso{
+ \code{\link{boundingradius}}, \code{\link{diameter}}, 
+ \code{\link{linnet}}
+}
+\examples{
+   diameter(simplenet)
+   boundingradius(simplenet)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/diameter.owin.Rd b/man/diameter.owin.Rd
new file mode 100644
index 0000000..cad0b6d
--- /dev/null
+++ b/man/diameter.owin.Rd
@@ -0,0 +1,52 @@
+\name{diameter.owin}  
+\alias{diameter.owin}
+\title{Diameter of a Window}
+\description{
+  Computes the diameter of a window.
+}
+\usage{
+ \method{diameter}{owin}(x)
+}
+\arguments{
+  \item{x}{
+    A window whose diameter will be computed.
+  }
+}
+\value{
+  The numerical value of the diameter of the window. 
+}
+\details{
+  This function computes the 
+  diameter of a window of arbitrary shape,
+  i.e. the maximum distance between any two points
+  in the window.
+
+  The argument \code{x} should be a window (an object of class
+  \code{"owin"}, see \code{\link{owin.object}} for details)
+  or can be given in any format acceptable to \code{\link{as.owin}()}.
+  
+  The function \code{diameter} is generic. This function is the
+  method for the class \code{"owin"}.
+}
+\seealso{
+  \code{\link{area.owin}},
+  \code{\link{perimeter}},
+  \code{\link{edges}},
+  \code{\link{owin}},
+  \code{\link{as.owin}}
+}
+\examples{
+  w <- owin(c(0,1),c(0,1))
+  diameter(w) 
+  # returns sqrt(2)
+  data(letterR)
+  diameter(letterR)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/dilated.areas.Rd b/man/dilated.areas.Rd
new file mode 100644
index 0000000..c39221d
--- /dev/null
+++ b/man/dilated.areas.Rd
@@ -0,0 +1,79 @@
+\name{dilated.areas}
+\Rdversion{1.1}
+\alias{dilated.areas}
+\title{
+  Areas of Morphological Dilations
+}
+\description{
+  Computes the areas of successive morphological dilations.
+}
+\usage{
+  dilated.areas(X, r, W=as.owin(X), ..., constrained=TRUE, exact = FALSE)
+}
+\arguments{
+  \item{X}{
+    Object to be dilated.
+    A point pattern (object of class \code{"ppp"}),
+    a line segment pattern (object of class \code{"psp"}),
+    or a window (object of class \code{"owin"}).
+  }
+  \item{r}{
+    Numeric vector of radii for the dilations. 
+  }
+  \item{W}{
+    Window (object of class \code{"owin"}) inside which the areas
+    will be computed, if \code{constrained=TRUE}.
+  }
+  \item{\dots}{Ignored.}
+  \item{constrained}{
+    Logical flag indicating whether areas should be restricted
+    to the window \code{W}.
+  }
+  \item{exact}{
+    Logical flag indicating whether areas should be computed
+    using analytic geometry (which is slower but more accurate).
+    Currently available only when \code{X} is a point pattern.
+  }
+}
+\details{
+  This function computes the areas of the dilations of \code{X}
+  by each of the radii \code{r[i]}. Areas may also be computed
+  inside a specified window \code{W}.
+
+  The morphological dilation of a set \eqn{X} by a distance \eqn{r > 0}
+  is the subset 
+  consisting of all points \eqn{x}{x} such that the
+  distance from \eqn{x} to \eqn{X} is less than 
+  or equal to \eqn{r}.
+
+  When \code{X} is a point pattern, the dilation by a distance
+  \eqn{r} is the union of
+  discs of radius \eqn{r} centred at the points of \code{X}.
+
+  The argument \code{r} should be a vector of nonnegative numbers.
+
+  If \code{exact=TRUE} and if \code{X} is a point pattern,
+  then the areas are computed using analytic geometry, which is
+  slower but much more accurate. Otherwise the computation is performed
+  using \code{\link{distmap}}.
+
+  To compute the dilated object itself, use \code{\link{dilation}}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{dilation}},
+  \code{\link{eroded.areas}}
+}
+\examples{
+  X <- runifpoint(10)
+  a <- dilated.areas(X, c(0.1,0.2), W=square(1), exact=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/dilation.Rd b/man/dilation.Rd
new file mode 100644
index 0000000..7ce6807
--- /dev/null
+++ b/man/dilation.Rd
@@ -0,0 +1,83 @@
+\name{dilation} 
+\alias{dilation}
+\alias{dilation.owin}
+\alias{dilation.ppp}
+\alias{dilation.psp}
+\title{Morphological Dilation}
+\description{
+  Perform morphological dilation of a window, a line segment pattern
+  or a point pattern
+}
+\usage{
+ dilation(w, r, \dots)
+ \method{dilation}{owin}(w, r, \dots, polygonal=NULL, tight=TRUE)
+ \method{dilation}{ppp}(w, r, \dots, polygonal=TRUE, tight=TRUE)
+ \method{dilation}{psp}(w, r, \dots, polygonal=TRUE, tight=TRUE)
+}
+\arguments{
+  \item{w}{
+    A window (object of class \code{"owin"}
+    or a line segment pattern (object of class \code{"psp"})
+    or a point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{positive number: the radius of dilation.}
+  \item{\dots}{extra arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution, if the pixel approximation is
+    used; or passed to \code{\link{disc}} if the polygonal approximation
+    is used.
+  }
+  \item{polygonal}{
+    Logical flag indicating whether to compute a polygonal
+    approximation to the dilation (\code{polygonal=TRUE}) or
+    a pixel grid approximation (\code{polygonal=FALSE}).
+  }
+  \item{tight}{
+    Logical flag indicating whether the bounding frame of the window
+    should be taken as the smallest rectangle enclosing the dilated region
+    (\code{tight=TRUE}), or should be the
+    dilation of the bounding frame of \code{w} (\code{tight=FALSE}).
+  }
+}
+\value{
+  If \code{r > 0}, an object of class \code{"owin"} representing the
+  dilated region. If \code{r=0}, the result is identical to \code{w}.
+}
+\details{
+  The morphological dilation of a set \eqn{W} by a distance \eqn{r > 0}
+  is the set consisting of all points lying at most \eqn{r} units
+  away from \eqn{W}. Effectively, dilation adds a margin of width
+  \eqn{r} onto the set \eqn{W}.
+
+  If \code{polygonal=TRUE} then a polygonal approximation
+  to the dilation is computed.
+  If \code{polygonal=FALSE} then a pixel approximation
+  to the dilation is computed from the distance map of \code{w}.
+  The arguments \code{"\dots"} are passed to \code{\link{as.mask}}
+  to control the pixel resolution.
+
+  When \code{w} is a window, the default (when \code{polygonal=NULL})
+  is to compute a polygonal approximation if
+  \code{w} is a rectangle or polygonal window, and to compute a
+  pixel approximation if \code{w} is a window of type \code{"mask"}.
+}
+\seealso{
+  \code{\link{erosion}} for the opposite operation.
+  
+  \code{\link{dilationAny}} for morphological dilation using any shape.
+  
+  \code{\link{owin}},
+  \code{\link{as.owin}}
+}
+\examples{
+  plot(dilation(letterR, 0.2))
+  plot(letterR, add=TRUE, lwd=2, border="red")
+
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(dilation(X, 0.1))
+  plot(X, add=TRUE, col="red")
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/dim.detpointprocfamily.Rd b/man/dim.detpointprocfamily.Rd
new file mode 100644
index 0000000..7bee530
--- /dev/null
+++ b/man/dim.detpointprocfamily.Rd
@@ -0,0 +1,22 @@
+\name{dim.detpointprocfamily}
+\alias{dim.detpointprocfamily}
+\title{Dimension of Determinantal Point Process Model}
+\description{Extracts the dimension of a determinantal point process model.}
+\usage{
+  \method{dim}{detpointprocfamily}(x)
+}
+\arguments{
+  \item{x}{object of class \code{"detpointprocfamily"}.}
+}
+\value{A numeric (or NULL if the dimension of the model is unspecified).}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dimhat.Rd b/man/dimhat.Rd
new file mode 100644
index 0000000..9cb1bd0
--- /dev/null
+++ b/man/dimhat.Rd
@@ -0,0 +1,53 @@
+\name{dimhat}
+\alias{dimhat}
+\title{
+  Estimate Dimension of Central Subspace 
+}
+\description{
+  Given the kernel matrix that characterises a central subspace,
+  this function estimates the dimension of the subspace.
+}
+\usage{
+  dimhat(M)
+}
+\arguments{
+  \item{M}{
+    Kernel of subspace. A symmetric, non-negative definite, numeric
+    matrix, typically obtained from \code{\link{sdr}}.
+  }
+}
+\details{
+  This function computes the maximum descent estimate of
+  the dimension of the central subspace with a given kernel matrix \code{M}.
+
+  The matrix \code{M} should be the kernel matrix of a central subspace,
+  which can be obtained from \code{\link{sdr}}. It must be a symmetric,
+  non-negative-definite, numeric matrix.
+
+  The algorithm finds the eigenvalues
+  \eqn{\lambda_1 \ge \ldots \ge \lambda_n}{lambda[1] \ge  ...\ge lambda[n]}
+  of \eqn{M},
+  and then determines the index \eqn{k} for which
+  \eqn{\lambda_k/\lambda_{k-1}}{lambda[k]/lambda[k-1]} is greatest.
+}
+\value{
+  A single integer giving the estimated dimension.
+}
+\seealso{
+  \code{\link{sdr}}, \code{\link{subspaceDistance}}
+}
+\references{
+  Guan, Y. and Wang, H. (2010)
+  Sufficient dimension reduction for spatial point
+  processes directed by Gaussian random fields.
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{72}, 367--387.
+}
+\author{
+  Matlab original by Yongtao Guan,
+  translated to \R by Suman Rakshit.
+}
+\keyword{array}
+\keyword{algebra}
+\keyword{multivariate}
+
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
new file mode 100644
index 0000000..13575e9
--- /dev/null
+++ b/man/dirichlet.Rd
@@ -0,0 +1,58 @@
+\name{dirichlet}
+\alias{dirichlet}
+\title{Dirichlet Tessellation of Point Pattern}
+\description{
+  Computes the Dirichlet tessellation of a spatial point pattern.
+  Also known as the Voronoi or Thiessen tessellation.
+}
+\usage{
+dirichlet(X)
+}
+\arguments{
+  \item{X}{Spatial point pattern (object of class \code{"ppp"}).}
+}
+\details{
+  In a spatial point pattern \code{X}, the Dirichlet tile associated
+  with a particular point \code{X[i]} is the region of space that is
+  closer to \code{X[i]} than to any other point in \code{X}. The
+  Dirichlet tiles divide the two-dimensional plane into disjoint
+  regions, forming a tessellation.
+
+  The Dirichlet tessellation is also known as the Voronoi or
+  Thiessen tessellation.
+
+  This function computes the Dirichlet tessellation (within the original
+  window of \code{X}) using the function \code{\link[deldir]{deldir}}
+  in the package \pkg{deldir}.
+
+  To ensure that there is a one-to-one correspondence between the
+  points of \code{X} and the tiles of \code{dirichlet(X)},
+  duplicated points in \code{X} should first be removed by
+  \code{X <- unique(X, rule="deldir")}. 
+  
+  The tiles of the tessellation will be computed as polygons
+  if the original window is a rectangle or a polygon.
+  Otherwise the tiles will be computed as binary masks. 
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{delaunay}},
+  \code{\link{ppp}},
+  \code{\link{dirichletVertices}}
+}
+\examples{
+  X <- runifpoint(42)
+  plot(dirichlet(X))
+  plot(X, add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/dirichletAreas.Rd b/man/dirichletAreas.Rd
new file mode 100644
index 0000000..b6463bd
--- /dev/null
+++ b/man/dirichletAreas.Rd
@@ -0,0 +1,50 @@
+\name{dirichletAreas}
+\alias{dirichletAreas}
+\title{
+  Compute Areas of Tiles in Dirichlet Tessellation
+}
+\description{
+  Calculates the area of each tile in the 
+  Dirichlet-Voronoi tessellation of a point pattern.
+}
+\usage{
+dirichletAreas(X)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+}
+\details{
+  This is an efficient algorithm to calculate the areas
+  of the tiles in the Dirichlet-Voronoi tessellation.
+
+  If the window of \code{X} is a binary pixel mask, the
+  tile areas are computed by counting pixels. Otherwise the
+  areas are computed exactly using analytic geometry.
+
+  If any points of \code{X} are duplicated, the duplicates will
+  have tile area zero.
+}
+\value{
+  Numeric vector with one entry for each point of \code{X}.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+\code{\link{dirichlet}},
+\code{\link{dirichletVertices}}
+}
+\examples{
+ aa <- dirichletAreas(cells)
+}
+\keyword{spatial}
+\keyword{math}
+\keyword{manip}
diff --git a/man/dirichletVertices.Rd b/man/dirichletVertices.Rd
new file mode 100644
index 0000000..f10951e
--- /dev/null
+++ b/man/dirichletVertices.Rd
@@ -0,0 +1,71 @@
+\name{dirichletVertices}
+\alias{dirichletVertices}
+\alias{dirichletEdges}
+\title{
+   Vertices and Edges of Dirichlet Tessellation
+}
+\description{
+  Computes the Dirichlet-Voronoi tessellation of a point pattern
+  and extracts the vertices or edges of the tiles.
+}
+\usage{
+dirichletVertices(X)
+
+dirichletEdges(X)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+}
+\details{
+  These function compute the Dirichlet-Voronoi tessellation of \code{X}
+  (see \code{\link{dirichlet}})
+  and extract the vertices or edges of the tiles of the tessellation.
+
+  The Dirichlet vertices are the spatial locations which are locally
+  farthest away from \code{X}, that is, where the distance function
+  of \code{X} reaches a local maximum.
+
+  The Dirichlet edges are the dividing lines equally distant between
+  a pair of points of \code{X}.
+
+  The Dirichlet tessellation of \code{X} is computed
+  using \code{\link{dirichlet}}. The vertices or edges of all
+  tiles of the tessellation are extracted.
+
+  For \code{dirichletVertices}, any vertex which
+  lies on the boundary of the window of \code{X} is deleted.
+  The remaining vertices are returned, as a point pattern,
+  without duplicated entries.
+}
+\value{
+  \code{dirichletVertices}
+  returns a point pattern (object of class \code{"ppp"}) in the same window
+  as \code{X}.
+
+  \code{dirichletEdges} returns a line segment pattern (object of
+  class \code{"psp"}).
+}
+\seealso{
+ \code{\link{dirichlet}},
+ \code{\link{dirichletAreas}}
+}
+\examples{
+  plot(dirichlet(cells))
+
+  plot(dirichletVertices(cells), add=TRUE)
+
+  ed <- dirichletEdges(cells)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{math}
+\keyword{manip}
diff --git a/man/dirichletWeights.Rd b/man/dirichletWeights.Rd
new file mode 100644
index 0000000..5570298
--- /dev/null
+++ b/man/dirichletWeights.Rd
@@ -0,0 +1,61 @@
+\name{dirichletWeights}
+\alias{dirichletWeights}
+\title{Compute Quadrature Weights Based on Dirichlet Tessellation}
+\description{
+  Computes quadrature weights for a given set of points,
+  using the areas of tiles in the Dirichlet tessellation.
+}
+\usage{
+ dirichletWeights(X, window=NULL, exact=TRUE, \dots)
+}
+\arguments{
+  \item{X}{Data defining a point pattern.}
+  \item{window}{Default window for the point pattern}
+  \item{exact}{Logical value. If \code{TRUE}, compute exact areas
+    using the package \code{deldir}. If \code{FALSE}, compute
+    approximate areas using a pixel raster.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Vector of nonnegative weights for each point in \code{X}.
+}
+\details{
+  This function computes a set of quadrature weights
+  for a given pattern of points
+  (typically comprising both ``data'' and `dummy'' points).
+  See \code{\link{quad.object}} for an explanation of quadrature
+  weights and quadrature schemes.
+
+  The weights are computed using the Dirichlet tessellation.
+  First \code{X} and (optionally) \code{window} are converted into a
+  point pattern object. Then the Dirichlet tessellation of the points
+  of \code{X} is computed.
+  The weight attached to a point of \code{X} is the area of
+  its Dirichlet tile (inside the window \code{Window(X)}).
+
+  If \code{exact=TRUE} the Dirichlet tessellation is computed exactly
+  by the Lee-Schachter algorithm using the package \code{deldir}.
+  Otherwise a pixel raster approximation is constructed and the areas
+  are approximations to the true weights. In all cases the sum of the
+  weights is equal to the area of the window.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{gridweights}}
+}
+\examples{
+  Q <- quadscheme(runifpoispp(10))
+  X <- as.ppp(Q) # data and dummy points together
+  w <- dirichletWeights(X, exact=FALSE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
diff --git a/man/disc.Rd b/man/disc.Rd
new file mode 100644
index 0000000..b18dba4
--- /dev/null
+++ b/man/disc.Rd
@@ -0,0 +1,85 @@
+\name{disc}
+\alias{disc}
+\title{Circular Window}
+\description{
+Creates a circular window
+}
+\usage{
+ disc(radius=1, centre=c(0,0), \dots, mask=FALSE, npoly=128, delta=NULL)
+}
+\arguments{
+  \item{radius}{Radius of the circle.}
+  \item{centre}{The centre of the circle.}
+  \item{mask}{Logical flag controlling the type of approximation
+    to a perfect circle. See Details.
+  }
+  \item{npoly}{Number of edges of the polygonal approximation,
+    if \code{mask=FALSE}. Incompatible with \code{delta}.
+  }
+  \item{delta}{
+    Tolerance of polygonal approximation: the length of arc
+    that will be replaced by one edge of the polygon.
+    Incompatible with \code{npoly}.
+  }
+  \item{\dots}{Arguments passed to \code{as.mask} determining the
+    pixel resolution, if \code{mask=TRUE}.
+  }
+}
+\value{
+  An object of class \code{"owin"} (see \code{\link{owin.object}})
+  specifying a window. 
+}
+\details{
+  This command creates a window object
+  representing a disc, with the given radius and centre.
+
+  By default, the circle is
+  approximated by a polygon with \code{npoly} edges.
+
+  If \code{mask=TRUE}, then the disc is approximated by a binary pixel
+  mask. The resolution of the mask is controlled by
+  the arguments \code{\dots} which are passed to \code{\link{as.mask}}.
+
+  The argument \code{radius} must be a single positive number.
+  The argument \code{centre} specifies the disc centre: it can be either 
+  a numeric vector of length 2 giving the coordinates,
+  or a \code{list(x,y)} giving the coordinates of exactly one point, or a
+  point pattern (object of class \code{"ppp"}) containing exactly one point.
+}
+\seealso{
+  \code{\link{ellipse}},
+  \code{\link{discs}},
+  \code{\link{owin.object}},
+  \code{\link{owin}},
+  \code{\link{as.mask}}
+}
+\note{This function can also be used to generate regular polygons,
+  by setting \code{npoly} to a small integer value. For example
+  \code{npoly=5} generates a pentagon and \code{npoly=13} a triskaidecagon.
+}
+\examples{
+ # unit disc
+ W <- disc()
+ # disc of radius 3 centred at x=10, y=5
+ W <- disc(3, c(10,5))
+ #
+ plot(disc())
+ plot(disc(mask=TRUE))
+ # nice smooth circle
+ plot(disc(npoly=256))
+ # how to control the resolution of the mask
+ plot(disc(mask=TRUE, dimyx=256))
+ # check accuracy of approximation
+ area(disc())/pi
+ area(disc(mask=TRUE))/pi
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+ 
+ 
diff --git a/man/discpartarea.Rd b/man/discpartarea.Rd
new file mode 100644
index 0000000..739c58d
--- /dev/null
+++ b/man/discpartarea.Rd
@@ -0,0 +1,74 @@
+\name{discpartarea}
+\Rdversion{1.1}
+\alias{discpartarea}
+\title{
+  Area of Part of Disc
+}
+\description{
+  Compute area of intersection between a disc and a window  
+}
+\usage{
+discpartarea(X, r, W=as.owin(X))
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"})
+    specifying the centres of the discs.
+    Alternatively, \code{X} may be in any format
+    acceptable to \code{\link{as.ppp}}.
+  }
+  \item{r}{
+    Matrix, vector or numeric value specifying the
+    radii of the discs.
+  }
+  \item{W}{
+    Window (object of class \code{"owin"}) with which the
+    discs should be intersected.
+  }
+}
+\details{
+  This algorithm computes the exact area of the intersection between
+  a window \code{W} and a disc (or each of several discs).
+  The centres of the discs are specified by the point pattern
+  \code{X}, and their radii are specified by \code{r}.
+
+  If \code{r} is a single numeric value, then the algorithm computes the
+  area of intersection between \code{W} and the disc of radius \code{r} centred
+  at each point of \code{X}, and returns a one-column matrix
+  containing one entry for each point of \code{X}.
+
+  If \code{r} is a vector of length \code{m}, then the algorithm
+  returns an \code{n * m} matrix in which the entry on row \code{i},
+  column \code{j} is the area of the
+  intersection between \code{W} and the disc centred at \code{X[i]}
+  with radius \code{r[j]}.
+  
+  If \code{r} is a matrix, it should have one row for each point in
+  \code{X}. The algorithm
+  returns a matrix in which the entry on row \code{i},
+  column \code{j} is the area of the
+  intersection between \code{W} and the disc centred at \code{X[i]}
+  with radius \code{r[i,j]}.
+
+  Areas are computed by analytic geometry.
+}
+\value{
+  Numeric matrix, with one row for each point of \code{X}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{disc}}
+}
+\examples{
+  data(letterR)
+  X <- runifpoint(3, letterR)
+  discpartarea(X, 0.2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/discretise.Rd b/man/discretise.Rd
new file mode 100644
index 0000000..d06bd24
--- /dev/null
+++ b/man/discretise.Rd
@@ -0,0 +1,89 @@
+\name{discretise}
+\alias{discretise}
+\title{
+  Safely Convert Point Pattern Window to Binary Mask
+}
+\description{
+  Given a point pattern, discretise its window by converting it to a
+  binary pixel mask, adjusting the mask so that it still contains all
+  the points.
+}
+\usage{
+  discretise(X, eps = NULL, dimyx = NULL, xy = NULL)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}) to be converted.}
+  \item{eps}{(optional) width and height of each pixel}
+  \item{dimyx}{(optional) pixel array dimensions}
+  \item{xy}{(optional) pixel coordinates}
+}
+\details{
+  This function modifies the point pattern \code{X} by converting its
+  observation window \code{Window(X)} to a binary pixel image (a window
+  of type \code{"mask"}). It ensures that no points of \code{X} are
+  deleted by the discretisation.
+
+  The window is first discretised using \code{\link{as.mask}}. 
+  It can happen that points of \code{X} that were inside the original
+  window may fall outside the new mask.
+  The \code{discretise} function corrects this by augmenting the mask
+  (so that the mask includes any pixel that contains a point of the pattern).
+
+  The arguments \code{eps}, \code{dimyx} and \code{xy}
+  control the fineness of the pixel array. They are passed to
+  \code{\link{as.mask}}.
+  
+  If \code{eps}, \code{dimyx} and \code{xy} are all absent or
+  \code{NULL}, and if the window of \code{X} is of type \code{"mask"}
+  to start with, then \code{discretise(X)} returns \code{X} unchanged.
+
+  See \code{\link{as.mask}} for further details
+  about the arguments \code{eps}, \code{dimyx},
+  and \code{xy}, and the process of converting
+  a window to one of type \code{mask}.
+}
+\section{Error checking}{
+  Before doing anything, \code{discretise} checks that
+  all the points of the pattern are actually
+  inside the original window.  This is guaranteed to
+  be the case if the pattern was constructed using \code{\link{ppp}}
+  or \code{\link{as.ppp}}. However anomalies are possible if the
+  point pattern was created or manipulated inappropriately.
+  These will cause an error.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}),
+  identical to \code{X}, except that
+  its observation window has been converted to one
+  of type \code{mask}. 
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{as.mask}}
+}
+\examples{
+  data(demopat)
+  X <- demopat
+  plot(X, main="original pattern")
+  Y <- discretise(X, dimyx=50)
+  plot(Y, main="discretise(X)")
+  stopifnot(npoints(X) == npoints(Y))
+
+  # what happens if we just convert the window to a mask?
+  W <- Window(X)
+  M <- as.mask(W, dimyx=50)
+  plot(M, main="window of X converted to mask")
+  plot(X, add=TRUE, pch=16)
+  plot(X[M], add=TRUE, pch=1, cex=1.5)
+  XM <- X[M]
+  cat(paste(npoints(X) - npoints(XM), "points of X lie outside M\n"))
+}
+\keyword{spatial}
+\keyword{manip}
+
+
diff --git a/man/discs.Rd b/man/discs.Rd
new file mode 100644
index 0000000..811a077
--- /dev/null
+++ b/man/discs.Rd
@@ -0,0 +1,101 @@
+\name{discs}
+\alias{discs}
+\title{
+ Union of Discs
+}
+\description{
+  Make a spatial region composed of discs
+  with given centres and radii.  
+}
+\usage{
+  discs(centres, radii = marks(centres)/2, \dots,
+        separate = FALSE, mask = FALSE, trim = TRUE,
+        delta = NULL, npoly=NULL)
+}
+\arguments{
+  \item{centres}{
+    Point pattern giving the locations of centres for the discs.
+  }
+  \item{radii}{
+    Vector of radii for each disc, or a single number giving a common
+    radius.
+    (Notice that the default assumes that the marks of \code{X} are
+    \emph{diameters}.)
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution, if \code{mask=TRUE}.
+  }
+  \item{separate}{
+    Logical. If \code{TRUE}, the result is a list
+    containing each disc as a separate entry.
+    If \code{FALSE} (the default), the result is a window
+    obtained by forming the union of the discs.
+  }
+  \item{mask}{
+    Logical. If \code{TRUE}, the result is a binary mask window.
+    If \code{FALSE}, the result is a polygonal window.
+    Applies only when \code{separate=FALSE}.
+  }
+  \item{trim}{
+    Logical value indicating whether to restrict the result
+    to the original window of the \code{centres}.
+    Applies only when \code{separate=FALSE}.
+  }
+  \item{delta}{
+    Argument passed to \code{\link{disc}} to determine the 
+    tolerance for the polygonal approximation of each disc.
+    Applies only when \code{mask=FALSE}.
+    Incompatible with \code{npoly}.
+  }
+  \item{npoly}{
+    Argument passed to \code{\link{disc}} to determine the 
+    number of edges in the polygonal approximation of each disc.
+    Applies only when \code{mask=FALSE}.
+    Incompatible with \code{delta}.
+  }
+}
+\details{
+  This command is typically applied to a marked point pattern
+  dataset \code{X} in which the marks represent the sizes of objects.
+  The result is a spatial region representing the space occupied by
+  the objects.
+  
+  If the marks of \code{X} represent the diameters of circular objects,
+  then the result of \code{discs(X)}
+  is a spatial region constructed by taking discs, of the specified
+  diameters, centred at the points of \code{X}, and forming the union
+  of these discs. If the marks of \code{X} represent the areas of
+  objects, one could take \code{discs(X, sqrt(marks(X)/pi))}
+  to produce discs of equivalent area.
+
+  A fast algorithm is used to compute the result as a binary mask, when
+  \code{mask=TRUE}. This option is recommended unless polygons are
+  really necessary.
+
+  If \code{mask=FALSE}, the discs will be constructed as polygons
+  by the function \code{\link{disc}}. To avoid computational problems,
+  by default, the discs will all be constructed using
+  the same physical tolerance value \code{delta}
+  passed to \code{\link{disc}}. The default is such that the smallest
+  disc will be approximated by a 16-sided polygon.
+  (The argument \code{npoly} should not normally be used, to avoid
+  computational problems arising with small radii.)
+}
+\value{
+  If \code{separate=FALSE}, a window (object of class \code{"owin"}).
+
+  If \code{separate=TRUE}, a list of windows.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{disc}},
+  \code{\link{union.owin}}
+}
+\examples{
+  plot(discs(anemones, mask=TRUE, eps=0.5))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/distcdf.Rd b/man/distcdf.Rd
new file mode 100644
index 0000000..4ff3771
--- /dev/null
+++ b/man/distcdf.Rd
@@ -0,0 +1,109 @@
+\name{distcdf}
+\alias{distcdf}
+\title{Distribution Function of Interpoint Distance }
+\description{
+  Computes the cumulative distribution function of the distance
+  between two independent random points in a given window
+  or windows.
+}
+\usage{
+  distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024, regularise=TRUE)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}) containing the
+    first random point.
+  }
+  \item{V}{
+    Optional. Another window containing the second random point.
+    Defaults to \code{W}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution for the calculation.
+  }
+  \item{dV, dW}{
+    Optional. Probability densities (not necessarily normalised)
+    for the first and second random points respectively.
+    Data in any format acceptable
+    to \code{\link{as.im}}, for example, a \code{function(x,y)}
+    or a pixel image or a numeric value. The default
+    corresponds to a uniform distribution over the window.
+  }
+  \item{nr}{
+    Integer. The number of values of interpoint distance \eqn{r}
+    for which the CDF will be computed.
+    Should be a large value!
+  }
+  \item{regularise}{
+    Logical value indicating whether to smooth the results
+    for very small distances, to avoid discretisation artefacts.
+  }
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+}
+\details{
+  This command computes the Cumulative Distribution Function
+  \eqn{
+    CDF(r) = Prob(T \le r)
+  }{
+    CDF(r) = Prob(T \le r)
+  }
+  of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|}
+  between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}.
+
+  In the simplest case, the command \code{distcdf(W)}, the random points are 
+  assumed to be uniformly distributed in the same
+  window \code{W}.
+
+  Alternatively the two random points may be 
+  uniformly distributed in two different windows \code{W} and \code{V}.
+
+  In the most general case the first point \eqn{X_1}{X1} is random
+  in the window \code{W} with a probability density proportional to
+  \code{dW}, and the second point \eqn{X_2}{X2} is random in
+  a different window \code{V} with probability density proportional
+  to \code{dV}. The values of \code{dW} and \code{dV} must be
+  finite and nonnegative.
+
+  The calculation is performed by numerical integration of the set covariance
+  function \code{\link{setcov}} for uniformly distributed points, and
+  by computing the covariance function \code{\link{imcov}} in the
+  general case. The accuracy of the result depends on
+  the pixel resolution used to represent the windows: this is controlled
+  by the arguments \code{\dots} which are passed to \code{\link{as.mask}}.
+  For example use \code{eps=0.1} to specify pixels of size 0.1 units.
+
+  The arguments \code{W} or \code{V} may also be point patterns
+  (objects of class \code{"ppp"}).
+  The result is the cumulative distribution function
+  of the distance from a randomly selected point in the point pattern,
+  to a randomly selected point in the other point pattern or window.
+
+  If \code{regularise=TRUE} (the default), values of the cumulative
+  distribution function for very short distances are smoothed to avoid
+  discretisation artefacts. Smoothing is applied to all distances
+  shorter than the width of 7 pixels. 
+}
+\seealso{
+  \code{\link{setcov}},
+  \code{\link{as.mask}}.
+}
+\examples{
+ # The unit disc
+ B <- disc()
+ plot(distcdf(B))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
+ 
+ 
diff --git a/man/distfun.Rd b/man/distfun.Rd
new file mode 100644
index 0000000..e1072af
--- /dev/null
+++ b/man/distfun.Rd
@@ -0,0 +1,101 @@
+\name{distfun}  
+\Rdversion{1.1}
+\alias{distfun}
+\alias{distfun.ppp}
+\alias{distfun.psp}
+\alias{distfun.owin}
+\title{
+  Distance Map as a Function 
+}
+\description{
+  Compute the distance function of an object, and return it as a function.
+}
+\usage{
+  distfun(X, \dots)
+
+  \method{distfun}{ppp}(X, \dots, k=1)
+
+  \method{distfun}{psp}(X, \dots)
+
+  \method{distfun}{owin}(X, \dots, invert=FALSE)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    a window (object of class \code{"owin"}) or a
+    line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Extra arguments are ignored. 
+  }
+  \item{k}{
+    An integer. The distance to the \code{k}th nearest point
+    will be computed.
+  }
+  \item{invert}{
+    If \code{TRUE}, compute the distance transform of the
+    complement of \code{X}.
+  }
+}
+\details{
+  The \dQuote{distance function} of a set of points \eqn{A} is the
+  mathematical function \eqn{f} such that, for any 
+  two-dimensional spatial location \eqn{(x,y)},
+  the function value \code{f(x,y)}
+  is the shortest distance from \eqn{(x,y)} to \eqn{A}.
+
+  The command \code{f <- distfun(X)} returns a \emph{function}
+  in the \R language, with arguments \code{x,y}, that represents the
+  distance function of \code{X}. Evaluating the function \code{f}
+  in the form \code{v <- f(x,y)}, where \code{x} and \code{y}
+  are any numeric vectors of equal length containing coordinates of
+  spatial locations, yields the values of the distance function at these
+  locations. Alternatively \code{x} can be a
+  point pattern (object of class \code{"ppp"} or \code{"lpp"}) of
+  locations at which the distance function should be computed (and then
+  \code{y} should be missing).
+
+  This should be contrasted with the related command \code{\link{distmap}}
+  which computes the distance function of \code{X}
+  on a grid of locations, and returns the distance
+  values in the form of a pixel image.
+
+  The result of \code{f <- distfun(X)} also belongs to the class
+  \code{"funxy"} and to the special class \code{"distfun"}.
+  It can be printed and plotted immediately as shown in the Examples.
+  
+  A \code{distfun} object can be converted to a pixel image
+  using \code{\link{as.im}}.
+}
+\value{
+  A \code{function} with arguments \code{x,y}.
+  The function also belongs to the class \code{"distfun"} which has
+  a method for \code{print}.
+  It also belongs to the class \code{"funxy"} which has methods
+  for \code{plot}, \code{contour} and \code{persp}.
+}
+\seealso{
+  \code{\link{distmap}},
+  \code{\link{plot.funxy}}
+}
+\examples{
+   data(letterR)
+   f <- distfun(letterR)
+   f
+   plot(f)
+   f(0.2, 0.3)
+
+   plot(distfun(letterR, invert=TRUE), eps=0.1)
+
+   d <- distfun(cells)
+   d2 <- distfun(cells, k=2)
+   d(0.5, 0.5)
+   d2(0.5, 0.5)
+
+   z <- d(japanesepines)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/distfun.lpp.Rd b/man/distfun.lpp.Rd
new file mode 100644
index 0000000..401e07d
--- /dev/null
+++ b/man/distfun.lpp.Rd
@@ -0,0 +1,87 @@
+\name{distfun.lpp}
+\Rdversion{1.1}
+\alias{distfun.lpp}
+\title{
+  Distance Map on Linear Network
+}
+\description{
+  Compute the distance function of a point pattern on a linear network.
+}
+\usage{
+  \method{distfun}{lpp}(X, ..., k=1)
+}
+\arguments{
+  \item{X}{
+    A point pattern on a linear network
+    (object of class \code{"lpp"}).
+  }
+  \item{k}{
+    An integer. The distance to the \code{k}th nearest point
+    will be computed.
+  }
+  \item{\dots}{
+    Extra arguments are ignored. 
+  }
+}
+\details{
+  On a linear network \eqn{L}, the \dQuote{geodesic distance function}
+  of a set of points \eqn{A} in \eqn{L} is the
+  mathematical function \eqn{f} such that, for any 
+  location \eqn{s} on \eqn{L},
+  the function value \code{f(s)}
+  is the shortest-path distance from \eqn{s} to \eqn{A}.
+
+  The command \code{distfun.lpp} is a method for the generic command
+  \code{\link{distfun}}
+  for the class \code{"lpp"} of point patterns on a linear network.
+
+  If \code{X} is a point pattern on a linear network,
+  \code{f <- distfun(X)} returns a \emph{function}
+  in the \R language that represents the
+  distance function of \code{X}. Evaluating the function \code{f}
+  in the form \code{v <- f(x,y)}, where \code{x} and \code{y}
+  are any numeric vectors of equal length containing coordinates of
+  spatial locations, yields the values of the distance function at these
+  locations. More efficiently \code{f} can be called in the form
+  \code{v <- f(x, y, seg, tp)} where \code{seg} and \code{tp} are the local
+  coordinates on the network. It can also be called as
+  \code{v <- f(x)} where \code{x} is a point pattern on the same linear
+  network.
+
+  The function \code{f} obtained from \code{f <- distfun(X)}
+  also belongs to the class \code{"linfun"}. 
+  It can be printed and plotted immediately as shown in the Examples.
+  It can be 
+  converted to a pixel image using \code{\link{as.linim}}. 
+}
+\value{
+  A \code{function} with arguments \code{x,y} and optional
+  arguments \code{seg,tp}.
+  It also belongs to the class \code{"linfun"} which has methods
+  for \code{plot}, \code{print} etc.
+}
+\seealso{
+  \code{\link{linfun}},
+  \code{\link{methods.linfun}}.
+
+  To identify \emph{which} point is the nearest neighbour, see
+  \code{\link{nnfun.lpp}}.
+}
+\examples{
+   data(letterR)
+   X <- runiflpp(3, simplenet)
+   f <- distfun(X)
+   f
+   plot(f)
+
+   # using a distfun as a covariate in a point process model:
+   Y <- runiflpp(4, simplenet)
+   fit <- lppm(Y ~D, covariates=list(D=f))
+
+   f(Y)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/distmap.Rd b/man/distmap.Rd
new file mode 100644
index 0000000..5413b8f
--- /dev/null
+++ b/man/distmap.Rd
@@ -0,0 +1,63 @@
+\name{distmap}
+\alias{distmap}
+\title{
+  Distance Map 
+}
+\description{
+  Compute the distance map of an object, and return it as a pixel image.
+  Generic.
+}
+\usage{
+  distmap(X, \dots)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    a window (object of class \code{"owin"}) or a
+    line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.mask}}
+    to control pixel resolution.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose grey scale values
+  are the values of the distance map.
+}
+\details{
+  The \dQuote{distance map} of a set of points \eqn{A} is the function
+  \eqn{f} whose value \code{f(x)} is defined for any two-dimensional
+  location \eqn{x} as the shortest distance from \eqn{x} to \eqn{A}.
+
+  This function computes the distance map of the set \code{X}
+  and returns the distance map as a pixel image.
+  
+  This is generic. Methods are provided for
+  point patterns (\code{\link{distmap.ppp}}),
+  line segment patterns  (\code{\link{distmap.psp}})
+  and windows (\code{\link{distmap.owin}}).
+}
+\seealso{
+  \code{\link{distmap.ppp}},
+  \code{\link{distmap.psp}},
+  \code{\link{distmap.owin}},
+  \code{\link{distfun}}
+}
+\examples{
+  data(cells)
+  U <- distmap(cells)
+  data(letterR)
+  V <- distmap(letterR)
+  \dontrun{
+  plot(U)
+  plot(V)
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/distmap.owin.Rd b/man/distmap.owin.Rd
new file mode 100644
index 0000000..fe42d2f
--- /dev/null
+++ b/man/distmap.owin.Rd
@@ -0,0 +1,98 @@
+\name{distmap.owin}  
+\alias{distmap.owin}
+\title{Distance Map of Window}
+\description{
+  Computes the distance from each pixel to the nearest point
+  in the given window.
+}
+\usage{
+  \method{distmap}{owin}(X, \dots, discretise=FALSE, invert=FALSE)
+}
+\arguments{
+  \item{X}{
+    A window (object of class \code{"owin"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}}
+    to control pixel resolution.
+  }
+  \item{discretise}{
+    Logical flag controlling the choice of algorithm when \code{X} is
+    a polygonal window. See Details.
+  }
+  \item{invert}{
+    If \code{TRUE}, compute the distance transform of the
+    complement of the window.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose greyscale values
+  are the values of the distance map.
+  The return value has an attribute \code{"bdry"}
+  which is a pixel image.
+}
+\details{
+  The ``distance map'' of a window \eqn{W} is the function
+  \eqn{f} whose value \code{f(u)} is defined for any two-dimensional
+  location \eqn{u} as the shortest distance from \eqn{u} to \eqn{W}.
+
+  This function computes the distance map of the window \code{X}
+  and returns the distance map as a pixel image. The greyscale value
+  at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest
+  pixel in \code{X}.
+
+  Additionally, the return value 
+  has an attribute \code{"bdry"} which is 
+  also a pixel image. The grey values in \code{"bdry"} give the
+  distance from each pixel to the bounding rectangle of the image.
+
+  If \code{X} is a binary pixel mask,
+  the distance values computed are not the
+  usual Euclidean distances. Instead the distance between two pixels
+  is measured by the length of the
+  shortest path connecting the two pixels. A path is a series of steps
+  between neighbouring pixels (each pixel has 8 neighbours). 
+  This is the standard `distance transform' algorithm of image
+  processing (Rosenfeld and Kak, 1968; Borgefors, 1986).
+
+  If \code{X} is a polygonal window, then exact Euclidean distances
+  will be computed if \code{discretise=FALSE}. If \code{discretise=TRUE}
+  then the window will first be converted to a binary pixel mask
+  and the discrete path distances will be computed.
+
+  The arguments \code{\dots} are passed to \code{\link{as.mask}}
+  to control the pixel resolution.
+  
+  This function is a method for the generic \code{\link{distmap}}.
+}
+\seealso{
+  \code{\link{distmap}},
+  \code{\link{distmap.ppp}},
+  \code{\link{distmap.psp}}
+}
+\examples{
+  data(letterR)
+  U <- distmap(letterR)
+  \dontrun{
+  plot(U)
+  plot(attr(U, "bdry"))
+  }
+}
+\references{
+  Borgefors, G.
+  Distance transformations in digital images.
+  \emph{Computer Vision, Graphics and Image Processing} \bold{34}
+  (1986) 344--371.
+  
+  Rosenfeld, A. and Pfalz, J.L.
+  Distance functions on digital pictures.
+  \emph{Pattern Recognition} \bold{1} (1968) 33-61.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/distmap.ppp.Rd b/man/distmap.ppp.Rd
new file mode 100644
index 0000000..539531d
--- /dev/null
+++ b/man/distmap.ppp.Rd
@@ -0,0 +1,76 @@
+\name{distmap.ppp}  
+\alias{distmap.ppp}
+\title{
+  Distance Map of Point Pattern
+}
+\description{
+  Computes the distance from each pixel to the nearest
+  point in the given point pattern.
+}
+\usage{
+  \method{distmap}{ppp}(X, \dots)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.mask}}
+    to control pixel resolution.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose greyscale values
+  are the values of the distance map.
+  The return value has attributes \code{"index"} and \code{"bdry"}
+  which are also pixel images. 
+}
+\details{
+  The ``distance map'' of a point pattern \eqn{X} is the function
+  \eqn{f} whose value \code{f(u)} is defined for any two-dimensional
+  location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}.
+
+  This function computes the distance map of the point pattern \code{X}
+  and returns the distance map as a pixel image. The greyscale value
+  at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest
+  point of the pattern \code{X}.
+
+  Additionally, the return value 
+  has two attributes, \code{"index"} and \code{"bdry"}, which are
+  also pixel images. The grey values in \code{"bdry"} give the
+  distance from each pixel to the bounding rectangle of the image.
+  The grey values in \code{"index"} are integers identifying which
+  point of \code{X} is closest. 
+  
+  This is a method for the generic function \code{\link{distmap}}.
+  
+  Note that this function gives the distance from the
+  \emph{centre of each pixel} to the nearest data point.
+  To compute the exact distance from a given spatial location
+  to the nearest data point in \code{X}, use \code{\link{distfun}} or 
+  \code{\link{nncross}}.
+}
+\seealso{
+  Generic function \code{\link{distmap}} and other methods
+  \code{\link{distmap.psp}},
+  \code{\link{distmap.owin}}.
+  
+  Generic function \code{\link{distfun}}.
+
+  Nearest neighbour distance \code{\link{nncross}}
+}
+\examples{
+  data(cells)
+  U <- distmap(cells)
+  \dontrun{
+  plot(U)
+  plot(attr(U, "bdry"))
+  plot(attr(U, "index"))
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/distmap.psp.Rd b/man/distmap.psp.Rd
new file mode 100644
index 0000000..e088818
--- /dev/null
+++ b/man/distmap.psp.Rd
@@ -0,0 +1,75 @@
+\name{distmap.psp}  
+\alias{distmap.psp}
+\title{
+  Distance Map of Line Segment Pattern
+}
+\description{
+  Computes the distance from each pixel to the nearest
+  line segment in the given line segment pattern.
+}
+\usage{
+  \method{distmap}{psp}(X, \dots)
+}
+\arguments{
+  \item{X}{A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.mask}}
+    to control pixel resolution.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose greyscale values
+  are the values of the distance map.
+  The return value has attributes \code{"index"} and \code{"bdry"}
+  which are also pixel images. 
+}
+\details{
+  The ``distance map'' of a line segment pattern \eqn{X} is the function
+  \eqn{f} whose value \code{f(u)} is defined for any two-dimensional
+  location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}.
+
+  This function computes the distance map of the line segment pattern \code{X}
+  and returns the distance map as a pixel image. The greyscale value
+  at a pixel \eqn{u} equals the distance from \eqn{u}
+  to the nearest line segment of the pattern \code{X}.
+  Distances are computed using analytic geometry.
+
+  Additionally, the return value 
+  has two attributes, \code{"index"} and \code{"bdry"}, which are
+  also pixel images. The grey values in \code{"bdry"} give the
+  distance from each pixel to the bounding rectangle of the image.
+  The grey values in \code{"index"} are integers identifying which
+  line segment of \code{X} is closest. 
+  
+  This is a method for the generic function \code{\link{distmap}}.
+
+  Note that this function gives the exact distance from the
+  centre of each pixel to the nearest line segment.
+  To compute the exact distance from the points in a point pattern
+  to the nearest line segment, use \code{\link{distfun}} or one of the
+  low-level functions \code{\link{nncross}}
+  or \code{\link{project2segment}}.
+}
+\seealso{
+  \code{\link{distmap}},
+  \code{\link{distmap.owin}},
+  \code{\link{distmap.ppp}},
+  \code{\link{distfun}},
+  \code{\link{nncross}},
+  \code{\link{nearestsegment}},
+  \code{\link{project2segment}}.
+}
+\examples{
+    a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+    Z <- distmap(a)
+    plot(Z)
+    plot(a, add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/divide.linnet.Rd b/man/divide.linnet.Rd
new file mode 100644
index 0000000..52e6d13
--- /dev/null
+++ b/man/divide.linnet.Rd
@@ -0,0 +1,46 @@
+\name{divide.linnet}
+\alias{divide.linnet}
+\title{
+  Divide Linear Network at Cut Points
+}
+\description{
+  Make a tessellation of a linear network by dividing it into
+  pieces demarcated by the points of a point pattern.
+}
+\usage{
+ divide.linnet(X)
+}
+\arguments{
+  \item{X}{
+    Point pattern on a linear network (object of class \code{"lpp"}).
+  }
+}
+\details{
+  The points \code{X} are interpreted as dividing the linear network
+  \code{L=as.linnet(X)} into separate pieces.
+
+  Two locations on \code{L}
+  belong to the same piece if and only if they can be joined by a path
+  in \code{L} that does not cross any of the points of \code{X}.
+
+  The result is a
+  tessellation of the network (object of class \code{"lintess"})
+  representing the division of \code{L} into pieces. 
+}
+\value{
+  A tessellation on a linear network (object of class \code{"lintess"}).
+}
+\author{
+  \spatstatAuthors
+  and Greg McSwiggan.
+}
+\seealso{
+  \code{\link{linnet}}, \code{\link{lintess}}.
+}
+\examples{
+  X <- runiflpp(5, simplenet)
+  plot(divide.linnet(X))
+  plot(X, add=TRUE, pch=16)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/dkernel.Rd b/man/dkernel.Rd
new file mode 100644
index 0000000..d5f1c72
--- /dev/null
+++ b/man/dkernel.Rd
@@ -0,0 +1,94 @@
+\name{dkernel}
+\alias{dkernel}
+\alias{pkernel}
+\alias{qkernel}
+\alias{rkernel}
+\title{Kernel distributions and random generation}
+\description{Density, distribution function, quantile function and random
+  generation for several distributions used in kernel estimation
+  for numerical data.
+}
+\usage{
+dkernel(x, kernel = "gaussian", mean = 0, sd = 1)
+pkernel(q, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE)
+qkernel(p, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE)
+rkernel(n, kernel = "gaussian", mean = 0, sd = 1)
+}
+\arguments{
+  \item{x, q}{Vector of quantiles.}
+  \item{p}{Vector of probabilities.}
+  \item{kernel}{
+    String name of the kernel.
+    Options are
+    \code{"gaussian"}, \code{"rectangular"},
+    \code{"triangular"},
+    \code{"epanechnikov"},
+    \code{"biweight"},
+    \code{"cosine"} and \code{"optcosine"}.
+    (Partial matching is used).
+  }
+  \item{n}{Number of observations.}
+  \item{mean}{Mean of distribution.}
+  \item{sd}{Standard deviation of distribution.}
+  \item{lower.tail}{logical; if \code{TRUE} (the default),
+    then probabilities are \eqn{P(X \le x)}{P[X \le x]},
+    otherwise, \eqn{P(X > x)}.
+  }
+}
+\details{
+  These functions give the
+  probability density, cumulative distribution function,
+  quantile function and random generation for several
+  distributions used in kernel estimation for one-dimensional
+  (numerical) data.
+
+  The available kernels are those used in \code{\link[stats]{density.default}},
+  namely \code{"gaussian"}, \code{"rectangular"},
+    \code{"triangular"},
+    \code{"epanechnikov"},
+    \code{"biweight"},
+    \code{"cosine"} and \code{"optcosine"}.
+    For more information about these kernels,
+    see \code{\link[stats]{density.default}}. 
+  
+  \code{dkernel} gives the probability density,
+  \code{pkernel} gives the cumulative distribution function,
+  \code{qkernel} gives the quantile function,
+  and \code{rkernel} generates random deviates.
+}
+\value{
+  A numeric vector.
+  For \code{dkernel}, a vector of the same length as \code{x}
+  containing the corresponding values of the probability density.
+  For \code{pkernel}, a vector of the same length as \code{x}
+  containing the corresponding values of the cumulative distribution function.
+  For \code{qkernel}, a vector of the same length as \code{p}
+  containing the corresponding quantiles.
+  For \code{rkernel}, a vector of length \code{n}
+  containing randomly generated values.
+}
+\examples{
+  x <- seq(-3,3,length=100)
+  plot(x, dkernel(x, "epa"), type="l",
+           main=c("Epanechnikov kernel", "probability density"))
+  plot(x, pkernel(x, "opt"), type="l",
+           main=c("OptCosine kernel", "cumulative distribution function"))
+  p <- seq(0,1, length=256)
+  plot(p, qkernel(p, "biw"), type="l",
+           main=c("Biweight kernel", "cumulative distribution function"))
+  y <- rkernel(100, "tri")
+  hist(y, main="Random variates from triangular density")
+  rug(y)
+}
+\seealso{
+  \code{\link[stats]{density.default}},
+  \code{\link{kernel.factor}}
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and Martin Hazelton
+}
+\keyword{methods}
+\keyword{nonparametric}
+\keyword{smooth}
diff --git a/man/dmixpois.Rd b/man/dmixpois.Rd
new file mode 100644
index 0000000..678696d
--- /dev/null
+++ b/man/dmixpois.Rd
@@ -0,0 +1,96 @@
+\name{dmixpois}
+\alias{dmixpois}
+\alias{pmixpois}
+\alias{qmixpois}
+\alias{rmixpois}
+\title{
+  Mixed Poisson Distribution
+}
+\description{
+  Density, distribution function, quantile function and random
+  generation for a mixture of Poisson distributions.
+}
+\usage{
+dmixpois(x, mu, sd, invlink = exp, GHorder = 5)
+pmixpois(q, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5)
+qmixpois(p, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5)
+rmixpois(n, mu, sd, invlink = exp)
+}
+\arguments{
+  \item{x}{vector of (non-negative integer) quantiles.}
+  \item{q}{vector of quantiles.}
+  \item{p}{vector of probabilities.}
+  \item{n}{number of random values to return.}
+  \item{mu}{
+    Mean of the linear predictor. A single numeric value.
+  }
+  \item{sd}{
+    Standard deviation of the linear predictor. A single numeric value.
+  }
+  \item{invlink}{
+    Inverse link function. A function in the \R language,
+    used to transform the linear predictor into the
+    parameter \code{lambda} of the Poisson distribution.
+  }
+  \item{lower.tail}{
+    Logical. If \code{TRUE} (the default), probabilities are
+    \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.
+  }
+  \item{GHorder}{
+    Number of quadrature points in the Gauss-Hermite quadrature approximation.
+    A small positive integer.
+  }
+}
+\details{
+  These functions are analogous to
+  \code{\link{dpois}}
+  \code{\link{ppois}},
+  \code{\link{qpois}} and
+  \code{\link{rpois}}
+  except that they apply to a mixture of Poisson distributions.
+
+  In effect, the Poisson mean parameter \code{lambda} is randomised
+  by setting \code{lambda = invlink(Z)} where \code{Z}
+  has a Gaussian \eqn{N(\mu,\sigma^2)}{N(\mu, \sigma^2)} distribution.
+  The default is \code{invlink=exp} which means that
+  \code{lambda} is lognormal. Set \code{invlink=I} to assume
+  that \code{lambda} is approximately Normal.
+
+  For \code{dmixpois}, \code{pmixpois} and \code{qmixpois},
+  the probability distribution is approximated using Gauss-Hermite
+  quadrature. For \code{rmixpois}, the deviates are simulated
+  exactly.
+}
+\value{
+  Numeric vector:
+  \code{dmixpois} gives probability masses,
+  \code{ppois} gives cumulative probabilities,
+  \code{qpois} gives (non-negative integer) quantiles, and
+  \code{rpois} generates (non-negative integer) random deviates.
+}
+\seealso{
+  \code{\link{dpois}},
+  \code{\link{gauss.hermite}}.
+}
+\examples{
+  dmixpois(7, 10, 1, invlink = I)
+  dpois(7, 10)
+
+  pmixpois(7, log(10), 0.2)
+  ppois(7, 10)
+
+  qmixpois(0.95, log(10), 0.2)
+  qpois(0.95, 10)
+
+  x <- rmixpois(100, log(10), log(1.2))
+  mean(x)
+  var(x)
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{distribution}
diff --git a/man/domain.Rd b/man/domain.Rd
new file mode 100644
index 0000000..8ba6064
--- /dev/null
+++ b/man/domain.Rd
@@ -0,0 +1,145 @@
+\name{domain}
+\alias{domain}
+\alias{domain.ppp}
+\alias{domain.psp}
+\alias{domain.im}
+\alias{domain.ppx}
+\alias{domain.pp3}
+\alias{domain.lpp}
+\alias{domain.ppm}
+\alias{domain.kppm}
+\alias{domain.dppm}
+\alias{domain.lpp}
+\alias{domain.lppm}
+\alias{domain.msr}
+\alias{domain.quad}
+\alias{domain.quadratcount}
+\alias{domain.quadrattest}
+\alias{domain.tess}
+\alias{domain.linfun}
+\alias{domain.lintess}
+\alias{domain.im}
+\alias{domain.layered}
+\alias{domain.distfun}
+\alias{domain.nnfun}
+\alias{domain.funxy}
+\alias{domain.rmhmodel}
+\alias{domain.leverage.ppm}
+\alias{domain.influence.ppm}
+\title{
+  Extract the Domain of any Spatial Object
+}
+\description{
+  Given a spatial object such as a point pattern, in any number of dimensions,
+  this function extracts the spatial domain in which the object is defined.
+}
+\usage{
+   domain(X, \dots)
+
+   \method{domain}{ppp}(X, \dots)
+
+   \method{domain}{psp}(X, \dots)
+
+   \method{domain}{im}(X, \dots)
+
+   \method{domain}{ppx}(X, \dots)
+
+   \method{domain}{pp3}(X, \dots)
+
+   \method{domain}{lpp}(X, \dots)
+
+ \method{domain}{ppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{domain}{kppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{domain}{dppm}(X, \dots, from=c("points", "covariates"))
+
+ \method{domain}{lpp}(X, \dots)
+
+ \method{domain}{lppm}(X, \dots)
+
+ \method{domain}{msr}(X, \dots)
+
+ \method{domain}{quad}(X, \dots)
+
+ \method{domain}{quadratcount}(X, \dots)
+
+ \method{domain}{quadrattest}(X, \dots)
+
+ \method{domain}{tess}(X, \dots)
+
+ \method{domain}{linfun}(X, \dots)
+
+ \method{domain}{lintess}(X, \dots)
+
+ \method{domain}{im}(X, \dots)
+
+ \method{domain}{layered}(X, \dots)
+
+ \method{domain}{distfun}(X, \dots)
+
+ \method{domain}{nnfun}(X, \dots)
+
+ \method{domain}{funxy}(X, \dots)
+
+ \method{domain}{rmhmodel}(X, \dots)
+
+ \method{domain}{leverage.ppm}(X, \dots)
+
+ \method{domain}{influence.ppm}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a point pattern (in any number
+    of dimensions), line segment pattern or pixel image.
+  }
+  \item{\dots}{
+    Extra arguments. They are ignored by all the methods listed here.
+  }
+  \item{from}{Character string. See Details.}
+}
+\details{
+  The function \code{domain} is generic.
+
+  For a spatial object \code{X} in any number of dimensions, 
+  \code{domain(X)} extracts the spatial domain in which \code{X} is
+  defined.
+
+  For a two-dimensional object \code{X}, typically \code{domain(X)}
+  is the same as \code{domain(X)}.
+
+  The exception is that, if \code{X} is a point pattern on a linear network
+  (class \code{"lpp"}) or a point process model on a linear network
+  (class \code{"lppm"}), then \code{domain(X)} is the linear network
+  on which the points lie, while \code{Window(X)} is the two-dimensional
+  window containing the linear network.
+
+  The argument \code{from} applies when \code{X} is a fitted 
+  point process model
+  (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+  If \code{from="data"} (the default),
+  \code{domain} extracts the window of the original point
+  pattern data to which the model was fitted.
+  If \code{from="covariates"} then \code{domain} returns the
+  window in which the spatial covariates of the model were provided.
+}
+\value{
+  A spatial object representing the domain of \code{X}.
+  Typically a window (object of class \code{"owin"}),
+  a three-dimensional box (\code{"box3"}), a multidimensional
+  box (\code{"boxx"}) or a linear network (\code{"linnet"}).
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{Window}},
+  \code{\link{Frame}}
+}
+\examples{
+  domain(cells)
+  domain(bei.extra$elev)
+  domain(chicago)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/dppBessel.Rd b/man/dppBessel.Rd
new file mode 100644
index 0000000..30517b0
--- /dev/null
+++ b/man/dppBessel.Rd
@@ -0,0 +1,42 @@
+\name{dppBessel}
+\alias{dppBessel}
+\title{Bessel Type Determinantal Point Process Model}
+\description{
+  Function generating an instance of the Bessel-type
+  determinantal point process model.
+}
+\usage{dppBessel(\dots)}
+\arguments{
+  \item{\dots}{arguments of the form \code{tag=value}
+    specifying the model parameters. See Details.
+  }
+}
+\details{
+  The possible parameters are:
+  \itemize{
+    \item the intensity \code{lambda} as a positive numeric
+    \item the scale parameter \code{alpha} as a positive numeric
+    \item the shape parameter \code{sigma} as a non-negative numeric
+    \item the dimension \code{d} as a positive integer
+  }
+}
+\value{An object of class \code{"detpointprocfamily"}.}
+\author{
+  Frederic Lavancier and Christophe Biscio.
+  Modified by \ege ,
+  \adrian
+  
+  and 
+  \rolf
+  
+}
+\examples{
+m <- dppBessel(lambda=100, alpha=.05, sigma=0, d=2)
+}
+\seealso{
+  \code{\link{dppCauchy}},
+  \code{\link{dppGauss}},
+  \code{\link{dppMatern}},
+  \code{\link{dppPowerExp}}
+}
+
diff --git a/man/dppCauchy.Rd b/man/dppCauchy.Rd
new file mode 100644
index 0000000..20d2eea
--- /dev/null
+++ b/man/dppCauchy.Rd
@@ -0,0 +1,50 @@
+\name{dppCauchy}
+\alias{dppCauchy}
+\title{Generalized Cauchy Determinantal Point Process Model}
+\description{
+  Function generating an instance of the (generalized) Cauchy
+  determinantal point process model.
+}
+\usage{dppCauchy(\dots)}
+\arguments{
+  \item{\dots}{arguments of the form \code{tag=value} specifying the
+    parameters. See Details.} 
+}
+\details{
+  The (generalized) Cauchy DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015)
+  The possible parameters are:
+  \itemize{
+    \item the intensity \code{lambda} as a positive numeric
+    \item the scale parameter \code{alpha} as a positive numeric
+    \item the shape parameter \code{nu} as a positive numeric
+    (artificially required to be less than 20 in the code for numerical
+    stability)
+    \item the dimension \code{d} as a positive integer
+  }
+}
+\value{An object of class \code{"detpointprocfamily"}.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+}
+\examples{
+m <- dppCauchy(lambda=100, alpha=.05, nu=1, d=2)
+}
+\seealso{
+  \code{\link{dppBessel}},
+  \code{\link{dppGauss}},
+  \code{\link{dppMatern}},
+  \code{\link{dppPowerExp}}
+}
+
diff --git a/man/dppGauss.Rd b/man/dppGauss.Rd
new file mode 100644
index 0000000..0fc2cfb
--- /dev/null
+++ b/man/dppGauss.Rd
@@ -0,0 +1,47 @@
+\name{dppGauss}
+\alias{dppGauss}
+\title{Gaussian Determinantal Point Process Model}
+\description{
+  Function generating an instance
+  of the Gaussian determinantal point process model.
+}
+\usage{dppGauss(\dots)}
+\arguments{
+  \item{\dots}{arguments of the form \code{tag=value} specifying the
+    parameters. See Details.} 
+}
+\details{
+  The Gaussian DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015)
+  The possible parameters are:
+  \itemize{
+    \item the intensity \code{lambda} as a positive numeric
+    \item the scale parameter \code{alpha} as a positive numeric
+    \item the dimension \code{d} as a positive integer
+  }
+}
+\value{An object of class \code{"detpointprocfamily"}.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+}
+\examples{
+m <- dppGauss(lambda=100, alpha=.05, d=2)
+}
+\seealso{
+  \code{\link{dppBessel}},
+  \code{\link{dppCauchy}},
+  \code{\link{dppMatern}},
+  \code{\link{dppPowerExp}}
+}
+
diff --git a/man/dppMatern.Rd b/man/dppMatern.Rd
new file mode 100644
index 0000000..e0402a8
--- /dev/null
+++ b/man/dppMatern.Rd
@@ -0,0 +1,51 @@
+\name{dppMatern}
+\alias{dppMatern}
+\title{Whittle-Matern Determinantal Point Process Model}
+\description{
+  Function generating an instance of the Whittle-Matern determinantal
+  point process model
+} 
+\usage{dppMatern(\dots)}
+\arguments{
+  \item{\dots}{arguments of the form \code{tag=value} specifying the
+    parameters. See Details.
+  } 
+}
+\details{
+  The Whittle-\ifelse{latex}{\out{Mat\' ern}}{Matern} DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015)
+  The possible parameters are:
+  \itemize{
+    \item the intensity \code{lambda} as a positive numeric
+    \item the scale parameter \code{alpha} as a positive numeric
+    \item the shape parameter \code{nu} as a positive numeric
+    (artificially required to be less than 20 in the code for numerical
+    stability)
+    \item the dimension \code{d} as a positive integer
+  }
+}
+\value{An object of class \code{"detpointprocfamily"}.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+}
+\examples{
+m <- dppMatern(lambda=100, alpha=.02, nu=1, d=2)
+}
+\seealso{
+  \code{\link{dppBessel}},
+  \code{\link{dppCauchy}},
+  \code{\link{dppGauss}},
+  \code{\link{dppPowerExp}}
+}
+
diff --git a/man/dppPowerExp.Rd b/man/dppPowerExp.Rd
new file mode 100644
index 0000000..93a8fa9
--- /dev/null
+++ b/man/dppPowerExp.Rd
@@ -0,0 +1,48 @@
+\name{dppPowerExp}
+\alias{dppPowerExp}
+\title{Power Exponential Spectral Determinantal Point Process Model}
+\description{Function generating an instance of the Power Exponential
+  Spectral determinantal point process model.}
+\usage{dppPowerExp(\dots)}
+\arguments{
+  \item{\dots}{arguments of the form \code{tag=value} specifying the
+    parameters. See Details.} 
+}
+\details{
+  The Power Exponential Spectral DPP is defined in (Lavancier, \ifelse{latex}{\out{M\o ller}}{Moller} and Rubak, 2015)
+  The possible parameters are:
+  \itemize{
+    \item the intensity \code{lambda} as a positive numeric
+    \item the scale parameter \code{alpha} as a positive numeric
+    \item the shape parameter \code{nu} as a positive numeric
+    (artificially required to be less than 20 in the code for numerical
+    stability)
+    \item the dimension \code{d} as a positive integer
+  }
+}
+\value{An object of class \code{"detpointprocfamily"}.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+}
+\examples{
+m <- dppPowerExp(lambda=100, alpha=.01, nu=1, d=2)
+}
+\seealso{
+  \code{\link{dppBessel}},
+  \code{\link{dppCauchy}},
+  \code{\link{dppGauss}},
+  \code{\link{dppMatern}}
+}
+
diff --git a/man/dppapproxkernel.Rd b/man/dppapproxkernel.Rd
new file mode 100644
index 0000000..f1c074f
--- /dev/null
+++ b/man/dppapproxkernel.Rd
@@ -0,0 +1,30 @@
+\name{dppapproxkernel}
+\alias{dppapproxkernel}
+\title{Approximate Determinantal Point Process Kernel}
+\description{
+  Returns an approximation to the kernel of a determinantal
+  point process, as a function of one argument \eqn{x}.
+}
+\usage{dppapproxkernel(model, trunc = 0.99, W = NULL)}
+\arguments{
+  \item{model}{Object of class \code{"detpointprocfamily"}.}
+  \item{trunc}{Numeric specifying how the model truncation is
+    performed. See Details section of \code{\link{simulate.detpointprocfamily}}.
+  }
+  \item{W}{Optional window -- undocumented at the moment.}
+}
+\value{A function}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+
+
+
+
+
diff --git a/man/dppapproxpcf.Rd b/man/dppapproxpcf.Rd
new file mode 100644
index 0000000..5ed3eff
--- /dev/null
+++ b/man/dppapproxpcf.Rd
@@ -0,0 +1,31 @@
+\name{dppapproxpcf}
+\alias{dppapproxpcf}
+\title{Approximate Pair Correlation Function of Determinantal Point Process Model}
+\description{
+  Returns an approximation to the
+  theoretical pair correlation function of a
+  determinantal point process model, as a
+  function of one argument \eqn{x}.
+}
+\usage{dppapproxpcf(model, trunc = 0.99, W = NULL)}
+\arguments{
+  \item{model}{Object of class \code{"detpointprocfamily"}.}
+  \item{trunc}{Numeric specifying how the model truncation is
+    performed. See Details section of \code{\link{simulate.detpointprocfamily}}.}
+  \item{W}{Optional window -- undocumented at the moment.}
+}
+\details{This function is usually NOT needed for anything. It
+only exists for investigative purposes.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+f <- dppapproxpcf(dppMatern(lambda = 100, alpha=.028, nu=1, d=2))
+plot(f, xlim = c(0,0.1))
+}
diff --git a/man/dppeigen.Rd b/man/dppeigen.Rd
new file mode 100644
index 0000000..d675327
--- /dev/null
+++ b/man/dppeigen.Rd
@@ -0,0 +1,29 @@
+\name{dppeigen}
+\alias{dppeigen}
+\title{Internal function calculating eig and index}
+\description{This function is mainly for internal package use and is usually
+not called by the user.}
+\usage{dppeigen(model, trunc, Wscale, stationary = FALSE)}
+\arguments{
+  \item{model}{object of class \code{"detpointprocfamily"} }
+  \item{trunc}{numeric giving the truncation}
+  \item{Wscale}{numeric giving the scale of the window relative to a unit box}
+  \item{stationary}{logical indicating whether the stationarity of the model should be used (only works in dimension 2).}
+}
+\value{A list}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
+
+
+
+
+
diff --git a/man/dppkernel.Rd b/man/dppkernel.Rd
new file mode 100644
index 0000000..d5db861
--- /dev/null
+++ b/man/dppkernel.Rd
@@ -0,0 +1,28 @@
+\name{dppkernel}
+\alias{dppkernel}
+\title{Extract Kernel from Determinantal Point Process Model Object}
+\description{
+  Returns the kernel of a determinantal point process model as a
+  function of one argument \code{x}.
+}
+\usage{dppkernel(model, \dots)}
+\arguments{
+  \item{model}{Model of class \code{"detpointprocfamily"}.}
+  \item{\dots}{Arguments passed to \code{\link{dppapproxkernel}} if the exact kernel is unknown}
+}
+\value{A function}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+kernelMatern <- dppkernel(dppMatern(lambda = 100, alpha=.01, nu=1, d=2))
+plot(kernelMatern, xlim = c(0,0.1))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dppm.Rd b/man/dppm.Rd
new file mode 100644
index 0000000..3ca0f39
--- /dev/null
+++ b/man/dppm.Rd
@@ -0,0 +1,321 @@
+\name{dppm}
+\alias{dppm}
+\concept{point process model}
+\concept{determinantal point process}
+\title{Fit Determinantal Point Process Model}
+\description{
+  Fit a determinantal point process model to a point pattern.
+}
+\usage{
+  dppm(formula, family, data=NULL,
+       ...,
+       startpar = NULL,
+       method = c("mincon", "clik2", "palm"),
+       weightfun=NULL,
+       control=list(),
+       algorithm="Nelder-Mead",
+       statistic="K",
+       statargs=list(),
+       rmax = NULL,
+       covfunargs=NULL,
+       use.gam=FALSE,
+       nd=NULL, eps=NULL)
+}
+\arguments{
+  \item{formula}{
+    A \code{formula} in the \R language
+    specifying the data (on the left side) and the
+    form of the model to be fitted (on the right side).
+    For a stationary model it suffices to provide a point pattern
+    without a formula. See Details.
+  }
+  \item{family}{
+    Information specifying the family of point processes
+    to be used in the model.
+    Typically one of the family functions
+    \code{\link{dppGauss}}, \code{\link{dppMatern}},
+    \code{\link{dppCauchy}}, \code{\link{dppBessel}}
+    or \code{\link{dppPowerExp}}.
+    Alternatively a character string giving the name
+    of a family function, or the result of calling one of the
+    family functions. See Details.
+  }
+  \item{data}{
+    The values of spatial covariates (other than the Cartesian
+    coordinates) required by the model.
+    A named list of pixel images, functions, windows,
+    tessellations or numeric constants.
+  }
+  \item{\dots}{
+    Additional arguments. See Details.
+  }
+  \item{startpar}{
+    Named vector of starting parameter values for the optimization.
+  }
+  \item{method}{
+    The fitting method. Either
+    \code{"mincon"} for minimum contrast,
+    \code{"clik2"} for second order composite likelihood,
+    or \code{"palm"} for Palm likelihood.
+    Partially matched.
+  }
+  \item{weightfun}{
+    Optional weighting function \eqn{w}
+    in the composite likelihood or Palm likelihood.
+    A \code{function} in the \R language.
+    See Details.
+  }
+  \item{control}{
+    List of control parameters passed to the optimization function
+    \code{\link[stats]{optim}}.
+  }
+  \item{algorithm}{
+    Character string determining the mathematical optimisation algorithm
+    to be used by \code{\link[stats]{optim}}. See
+    the argument \code{method} of \code{\link[stats]{optim}}.
+  }
+  \item{statistic}{
+    Name of the summary statistic to be used
+    for minimum contrast estimation: either \code{"K"} or \code{"pcf"}.
+  }
+  \item{statargs}{
+    Optional list of arguments to be used when calculating
+    the \code{statistic}. See Details.
+  }
+  \item{rmax}{
+    Maximum value of interpoint distance
+    to use in the composite likelihood.
+  }
+  \item{covfunargs,use.gam,nd,eps}{
+    Arguments passed to \code{\link{ppm}} when fitting the intensity.
+  }
+}
+\details{
+  This function fits a determinantal point process model to a
+  point pattern dataset as described in Lavancier et al. (2015).
+
+  The model to be fitted is specified by the arguments
+  \code{formula} and \code{family}.
+  
+  The argument \code{formula} should normally be a \code{formula} in the
+  \R language. The left hand side of the formula
+  specifies the point pattern dataset to which the model should be fitted.
+  This should be a single argument which may be a point pattern
+  (object of class \code{"ppp"}) or a quadrature scheme
+  (object of class \code{"quad"}). The right hand side of the formula is called
+  the \code{trend} and specifies the form of the
+  \emph{logarithm of the intensity} of the process.
+  Alternatively the argument \code{formula} may be a point pattern or quadrature
+  scheme, and the trend formula is taken to be \code{~1}.
+
+  The argument \code{family} specifies the family of point processes
+  to be used in the model.
+  It is typically one of the family functions
+  \code{\link{dppGauss}}, \code{\link{dppMatern}},
+  \code{\link{dppCauchy}}, \code{\link{dppBessel}}
+  or \code{\link{dppPowerExp}}. 
+  Alternatively it may be a character string giving the name
+  of a family function, or the result of calling one of the
+  family functions. A family function belongs to class
+  \code{"detpointprocfamilyfun"}. The result of calling a family
+  function is a point process family, which belongs to class
+  \code{"detpointprocfamily"}.   
+  
+  The algorithm first estimates the intensity function
+  of the point process using \code{\link{ppm}}.
+  If the trend formula is \code{~1}
+  (the default if a point pattern or quadrature
+  scheme is given rather than a \code{"formula"})
+  then the model is \emph{homogeneous}. The algorithm begins by
+  estimating the intensity as the number of points divided by
+  the area of the window.
+  Otherwise, the model is \emph{inhomogeneous}.
+  The algorithm begins by fitting a Poisson process with log intensity
+  of the form specified by the formula \code{trend}.
+  (See \code{\link{ppm}} for further explanation).
+
+  The interaction parameters of the model are then fitted
+  either by minimum contrast estimation, or by maximum
+  composite likelihood.
+
+  \describe{
+   \item{Minimum contrast:}{
+      If \code{method = "mincon"} (the default) interaction parameters of
+      the model will be fitted
+      by minimum contrast estimation, that is, by matching the theoretical
+      \eqn{K}-function of the model to the empirical \eqn{K}-function
+      of the data, as explained in \code{\link{mincontrast}}.
+
+      For a homogeneous model (\code{ trend = ~1 })
+      the empirical \eqn{K}-function of the data is computed
+      using \code{\link{Kest}},
+      and the interaction parameters of the model are estimated by
+      the method of minimum contrast.
+
+      For an inhomogeneous model,
+      the inhomogeneous \eqn{K} function is estimated
+      by \code{\link{Kinhom}} using the fitted intensity.
+      Then the interaction parameters of the model
+      are estimated by the method of minimum contrast using the
+      inhomogeneous \eqn{K} function. This two-step estimation
+      procedure is heavily inspired by Waagepetersen (2007).
+
+      If \code{statistic="pcf"} then instead of using the
+      \eqn{K}-function, the algorithm will use
+      the pair correlation function \code{\link{pcf}} for homogeneous
+      models and the inhomogeneous pair correlation function
+      \code{\link{pcfinhom}} for inhomogeneous models.
+      In this case, the smoothing parameters of the pair correlation
+      can be controlled using the argument \code{statargs},
+      as shown in the Examples.
+
+      Additional arguments \code{\dots} will be passed to
+      \code{\link{mincontrast}} to control the minimum contrast fitting
+      algorithm.
+    }
+    \item{Composite likelihood:}{
+      If \code{method = "clik2"} the interaction parameters of the
+      model will be fitted by maximising the second-order composite likelihood
+      (Guan, 2006). The log composite likelihood is
+      \deqn{
+	\sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta)
+	- \left( \sum_{i,j} w(d_{ij}) \right)
+	\log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv
+      }{
+	\sum[i,j] w(d[i,j]) log(\rho(d[i,j]; \theta))
+	- (\sum[i,j] w(d[i,j]))
+	log(integral[D,D] w(||u-v||) \rho(||u-v||; \theta) du dv)
+      }
+      where the sums are taken over all pairs of data points
+      \eqn{x_i, x_j}{x[i], x[j]} separated by a distance
+      \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||}
+      less than \code{rmax},
+      and the double integral is taken over all pairs of locations
+      \eqn{u,v} in the spatial window of the data.
+      Here \eqn{\rho(d;\theta)}{\rho(d;\theta)} is the
+      pair correlation function of the model with
+      cluster parameters \eqn{\theta}{\theta}.
+
+      The function \eqn{w} in the composite likelihood
+      is a weighting function and may be chosen arbitrarily.
+      It is specified by the argument \code{weightfun}.
+      If this is missing or \code{NULL} then the default is
+      a threshold weight function,
+      \eqn{w(d) = 1(d \le R)}{w(d) = 1(d \le R)}, where \eqn{R} is \code{rmax/2}.
+    }
+    \item{Palm likelihood:}{
+      If \code{method = "palm"} the interaction parameters of the
+      model will be fitted by maximising the Palm loglikelihood
+      (Tanaka et al, 2008)
+      \deqn{
+	\sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta)
+	- \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u
+      }{
+	\sum[i,j] w(x[i], x[j]) log(\lambda[P](x[j] | x[i]; \theta)
+	- integral[D] w(x[i], u) \lambda[P](u | x[i]; \theta) du
+      }
+      with the same notation as above. Here
+      \eqn{\lambda_P(u|v;\theta}{\lambda[P](u|v;\theta)} is the Palm intensity of
+      the model at location \eqn{u} given there is a point at \eqn{v}.
+    }
+  }
+  In all three methods, the optimisation is performed by the generic
+  optimisation algorithm \code{\link[stats]{optim}}.
+  The behaviour of this algorithm can be modified using the
+  argument \code{control}.
+  Useful control arguments include
+  \code{trace}, \code{maxit} and \code{abstol}
+  (documented in the help for \code{\link[stats]{optim}}).
+
+  Finally, it is also possible to fix any parameters desired before the
+  optimisation by specifying them as \code{name=value}
+  in the call to the family function. See Examples.
+}
+\value{
+  An object of class \code{"dppm"} representing the fitted model.
+  There are methods for printing, plotting, predicting and simulating
+  objects of this class.
+}
+\seealso{
+  methods for \code{dppm} objects:
+  \code{\link{plot.dppm}},
+  \code{\link{fitted.dppm}},
+  \code{\link{predict.dppm}},
+  \code{\link{simulate.dppm}},
+  \code{\link{methods.dppm}},
+  \code{\link{as.ppm.dppm}},
+  \code{\link{Kmodel.dppm}},
+  \code{\link{pcfmodel.dppm}}.
+
+  Minimum contrast fitting algorithm:
+  \code{\link{mincontrast}}.
+
+  Deterimantal point process models:
+  \code{\link{dppGauss}},
+  \code{\link{dppMatern}},
+  \code{\link{dppCauchy}},
+  \code{\link{dppBessel}},
+  \code{\link{dppPowerExp}},
+
+  Summary statistics:
+  \code{\link{Kest}},
+  \code{\link{Kinhom}},
+  \code{\link{pcf}},
+  \code{\link{pcfinhom}}.
+
+  See also \code{\link{ppm}}
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+
+  Guan, Y. (2006)
+  A composite likelihood approach in fitting spatial point process
+  models.
+  \emph{Journal of the American Statistical Association}
+  \bold{101}, 1502--1512.
+
+  Tanaka, U. and Ogata, Y. and Stoyan, D. (2008)
+  Parameter estimation and model selection for
+  Neyman-Scott point processes.
+  \emph{Biometrical Journal} \bold{50}, 43--57.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\examples{
+  jpines <- residualspaper$Fig1
+  \testonly{
+     # smaller dataset for testing
+    jpines <- jpines[c(TRUE,FALSE)]
+  }
+
+  dppm(jpines ~ 1, dppGauss)
+
+  dppm(jpines ~ 1, dppGauss, method="c")
+  dppm(jpines ~ 1, dppGauss, method="p")
+
+  # Fixing the intensity to lambda=2 rather than the Poisson MLE 2.04:
+  dppm(jpines ~ 1, dppGauss(lambda=2))
+
+  if(interactive()) {
+   # The following is quite slow (using K-function)
+   dppm(jpines ~ x, dppMatern)
+  }
+
+   # much faster using pair correlation function
+  dppm(jpines ~ x, dppMatern, statistic="pcf", statargs=list(stoyan=0.2))
+
+  # Fixing the Matern shape parameter to nu=2 rather than estimating it:
+  dppm(jpines ~ x, dppMatern(nu=2))
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/dppparbounds.Rd b/man/dppparbounds.Rd
new file mode 100644
index 0000000..9fdcb56
--- /dev/null
+++ b/man/dppparbounds.Rd
@@ -0,0 +1,33 @@
+\name{dppparbounds}
+\alias{dppparbounds}
+\title{Parameter Bound for a Determinantal Point Process Model}
+\description{
+  Returns the lower and upper bound for a specific parameter of a
+  determinantal point process model when all other parameters are
+  fixed.
+}
+\usage{dppparbounds(model, name, \dots)}
+\arguments{
+  \item{model}{Model of class \code{"detpointprocfamily"}.}
+  \item{name}{name of the parameter for which the bound should be computed.}
+  \item{\dots}{
+    Additional arguments passed to the \code{parbounds} function of the given
+    model
+  }
+}
+\value{A \code{data.frame} containing lower and upper bounds.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2)
+dppparbounds(model, "lambda")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dppspecden.Rd b/man/dppspecden.Rd
new file mode 100644
index 0000000..8585697
--- /dev/null
+++ b/man/dppspecden.Rd
@@ -0,0 +1,30 @@
+\name{dppspecden}
+\alias{dppspecden}
+\title{Extract Spectral Density from Determinantal Point Process Model Object}
+\description{
+  Returns the spectral density of a determinantal point process
+  model as a function of one argument \code{x}.
+}
+\usage{dppspecden(model)}
+\arguments{
+  \item{model}{Model of class \code{"detpointprocfamily"}.}
+}
+\value{A function}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+model <- dppMatern(lambda = 100, alpha=.01, nu=1, d=2)
+dppspecden(model)
+}
+\seealso{
+  \code{\link{dppspecdenrange}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dppspecdenrange.Rd b/man/dppspecdenrange.Rd
new file mode 100644
index 0000000..1efbecb
--- /dev/null
+++ b/man/dppspecdenrange.Rd
@@ -0,0 +1,30 @@
+\name{dppspecdenrange}
+\alias{dppspecdenrange}
+\title{Range of Spectral Density of a Determinantal Point Process Model}
+\description{
+  Computes the range of the spectral density of a
+  determinantal point process model.
+} 
+\usage{dppspecdenrange(model)}
+\arguments{
+  \item{model}{Model of class \code{"detpointprocfamily"}.}
+}
+\value{Numeric value (possibly \code{Inf}).}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+m <- dppBessel(lambda=100, alpha=0.05, sigma=1, d=2)
+dppspecdenrange(m)
+}
+\seealso{
+  \code{\link{dppspecden}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/dummify.Rd b/man/dummify.Rd
new file mode 100644
index 0000000..0b2659e
--- /dev/null
+++ b/man/dummify.Rd
@@ -0,0 +1,63 @@
+\name{dummify}
+\alias{dummify}
+\title{
+  Convert Data to Numeric Values by Constructing Dummy Variables
+}
+\description{
+  Converts data of any kind to numeric values.
+  A factor is expanded to a set of dummy variables. 
+}
+\usage{
+dummify(x)
+}
+\arguments{
+  \item{x}{
+    Vector, factor, matrix or data frame to be converted.
+  }
+}
+\details{
+  This function converts data (such as a factor) to numeric values
+  in order that the user may calculate, for example, 
+  the mean, variance, covariance and correlation of the data.
+
+  If \code{x} is a numeric vector or integer vector, it is returned
+  unchanged.
+
+  If \code{x} is a logical vector, it is converted to a 0-1 matrix with
+  2 columns. The first column contains a 1 if the logical value is
+  \code{FALSE}, and the second column contains a 1 if the logical
+  value is \code{TRUE}.
+
+  If \code{x} is a complex vector, it is converted to a matrix with 2
+  columns, containing the real and imaginary parts.
+
+  If \code{x} is a factor, the result is a matrix of 0-1 dummy
+  variables. The matrix has one column for each possible level of the
+  factor. The \code{(i,j)} entry is 
+  equal to 1 when the \code{i}th factor value equals the
+  \code{j}th level, and is equal to 0 otherwise.
+
+  If \code{x} is a matrix or data frame, the appropriate conversion is
+  applied to each column of \code{x}.
+
+  Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a
+  factor into a full set of dummy variables (one column for each level of
+  the factor).
+}
+\value{
+  A numeric matrix.
+}
+\author{
+  \adrian 
+  
+}
+\examples{
+   chara <- sample(letters[1:3], 8, replace=TRUE)
+   logi <- (runif(8) < 0.3)
+   comp <- round(4*runif(8) + 3*runif(8) * 1i, 1)
+   nume <- 8:1 + 0.1
+   df <- data.frame(nume, chara, logi, comp)
+   df
+   dummify(df)
+}
+\keyword{math}
diff --git a/man/dummy.ppm.Rd b/man/dummy.ppm.Rd
new file mode 100644
index 0000000..c4b2ba4
--- /dev/null
+++ b/man/dummy.ppm.Rd
@@ -0,0 +1,74 @@
+\name{dummy.ppm}
+\alias{dummy.ppm}
+\title{Extract Dummy Points Used to Fit a Point Process Model}
+\description{
+  Given a fitted point process model,
+  this function extracts the `dummy points' of the
+  quadrature scheme used to fit the model.
+}
+\usage{
+  dummy.ppm(object, drop=FALSE)
+}
+\arguments{
+  \item{object}{
+    fitted point process model (an object of class \code{"ppm"}).
+  }
+  \item{drop}{
+    Logical value determining whether to delete dummy points
+    that were not used to fit the model.
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\details{
+  An object of class \code{"ppm"} represents a point process model
+  that has been fitted to data. It is typically produced by
+  the model-fitting algorithm \code{\link{ppm}}.
+  
+  The maximum pseudolikelihood algorithm in \code{\link{ppm}}
+  approximates the pseudolikelihood
+  integral by a sum over a finite set of quadrature points,
+  which is constructed by augmenting
+  the original data point pattern by a set of ``dummy'' points.
+  The fitted model object returned by \code{\link{ppm}}
+  contains complete information about this quadrature scheme.
+  See \code{\link{ppm}} or \code{\link{ppm.object}} for further
+  information.
+  
+  This function \code{dummy.ppm}
+  extracts the dummy points of the quadrature scheme.
+  A typical use of this function would be to count the number of dummy
+  points, to gauge the accuracy of the approximation to the
+  exact pseudolikelihood. 
+
+  It may happen that some dummy points are not actually used in
+  fitting the model (typically because the value of a covariate is \code{NA}
+  at these points). The argument \code{drop} specifies whether these
+  unused dummy points shall be deleted (\code{drop=TRUE}) or
+  retained (\code{drop=FALSE}) in the return value.
+
+  See \code{\link{ppm.object}} for a list of all operations that can be
+  performed on objects of class \code{"ppm"}.
+}
+\seealso{
+  \code{\link{ppm.object}},
+  \code{\link{ppp.object}},
+  \code{\link{ppm}}
+}
+\examples{
+ data(cells)
+ fit <- ppm(cells, ~1, Strauss(r=0.1))
+ X <- dummy.ppm(fit)
+ npoints(X)
+ # this is the number of dummy points in the quadrature scheme
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
+\keyword{models}
diff --git a/man/duplicated.ppp.Rd b/man/duplicated.ppp.Rd
new file mode 100644
index 0000000..c4aa7e5
--- /dev/null
+++ b/man/duplicated.ppp.Rd
@@ -0,0 +1,95 @@
+\name{duplicated.ppp}
+\alias{duplicated.ppp}
+\alias{duplicated.ppx}
+\alias{anyDuplicated.ppp}
+\alias{anyDuplicated.ppx}
+\title{Determine Duplicated Points in a Spatial Point Pattern}
+\description{
+  Determines which points in a spatial point pattern
+  are duplicates of previous points, and returns a logical vector.
+}
+\usage{
+ \method{duplicated}{ppp}(x, \dots, rule=c("spatstat", "deldir", "unmark"))
+
+ \method{duplicated}{ppx}(x, \dots)
+
+ \method{anyDuplicated}{ppp}(x, \dots)
+
+ \method{anyDuplicated}{ppx}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A spatial point pattern
+    (object of class \code{"ppp"} or \code{"ppx"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{rule}{
+    Character string.
+    The rule for determining duplicated points.
+  }
+}
+\value{
+  \code{duplicated(x)} returns
+  a logical vector of length equal to the number of points in \code{x}.
+
+  \code{anyDuplicated(x)} is a number equal to 0 if there are no
+  duplicated points, and otherwise is equal to the index of the first
+  duplicated point.
+}
+\details{
+  These are methods for the generic functions \code{\link{duplicated}}
+  and \code{\link{anyDuplicated}} for 
+  point pattern datasets (of class \code{"ppp"}, see
+  \code{\link{ppp.object}}, or class \code{"ppx"}).
+
+  \code{anyDuplicated(x)} is a faster version of
+  \code{any(duplicated(x))}.
+
+  Two points in a point pattern are deemed to be identical
+  if their \eqn{x,y} coordinates are the same,
+  and their marks are also the same (if they carry marks).
+  The Examples section illustrates how it is possible for
+  a point pattern to contain a pair of identical points.
+
+  This function determines which points in \code{x} duplicate
+  other points that appeared earlier in the sequence. It 
+  returns a logical vector with entries that are \code{TRUE}
+  for duplicated points and \code{FALSE} for unique (non-duplicated)
+  points.
+
+  If \code{rule="spatstat"} (the default), two points are deemed
+  identical if their coordinates are equal according to \code{==},
+  \emph{and} their marks are equal according to \code{==}.
+  This is the most stringent possible test.
+  If \code{rule="unmark"}, duplicated points are
+  determined by testing equality of their coordinates only,
+  using \code{==}.
+  If \code{rule="deldir"}, duplicated points are
+  determined by testing equality of their coordinates only,
+  using the function \code{\link[deldir]{duplicatedxy}}
+  in the package \pkg{deldir}, which currently uses
+  \code{\link{duplicated.data.frame}}. Setting \code{rule="deldir"}
+  will ensure consistency with functions in the \pkg{deldir} package.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{unique.ppp}},
+  \code{\link{multiplicity.ppp}}
+}
+\examples{
+   X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3))
+   duplicated(X)
+   duplicated(X, rule="deldir")
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+ 
diff --git a/man/edge.Ripley.Rd b/man/edge.Ripley.Rd
new file mode 100644
index 0000000..f2d9db2
--- /dev/null
+++ b/man/edge.Ripley.Rd
@@ -0,0 +1,106 @@
+\name{edge.Ripley}
+\alias{edge.Ripley}
+\alias{rmax.Ripley}
+\title{
+  Ripley's Isotropic Edge Correction 
+}
+\description{
+  Computes Ripley's isotropic edge correction weights
+  for a point pattern.
+}
+\usage{
+edge.Ripley(X, r, W = Window(X), method = "C", maxweight = 100)
+
+rmax.Ripley(W)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{W}{
+    Window for which the edge correction is required.
+  }
+  \item{r}{
+    Vector or matrix of interpoint distances for which the edge correction
+    should be computed.
+  }
+  \item{method}{
+    Choice of algorithm. Either \code{"interpreted"} or \code{"C"}.
+    This is needed only for debugging purposes.
+  }
+  \item{maxweight}{
+    Maximum permitted value of the edge correction weight.
+  }
+}
+\details{
+  The function \code{edge.Ripley}
+  computes Ripley's (1977) isotropic edge correction
+  weight, which is used in estimating the \eqn{K} function and in many
+  other contexts.
+
+  The function \code{rmax.Ripley} computes the maximum value of
+  distance \eqn{r} for which the isotropic edge correction
+  estimate of \eqn{K(r)} is valid.
+    
+  For a single point \eqn{x} in a window \eqn{W},
+  and a distance \eqn{r > 0}, the isotropic edge correction weight
+  is
+  \deqn{
+    e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)}
+  }{
+    e(u, r) = 2 * \pi * r/length(intersection(c(u,r), W))
+  }
+  where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the
+  point \eqn{u}. The denominator is the length of the overlap between
+  this circle and the window \eqn{W}.
+
+  The function \code{edge.Ripley} computes this edge correction weight
+  for each point in the point pattern \code{X} and for each
+  corresponding distance value in the vector or matrix \code{r}.
+  
+  If \code{r} is a vector, with one entry for each point in
+  \code{X}, then the result is a vector containing the
+  edge correction weights \code{e(X[i], r[i])} for each \code{i}.
+
+  If \code{r} is a matrix, with one row for each point in \code{X},
+  then the result is a matrix whose \code{i,j} entry gives the
+  edge correction weight \code{e(X[i], r[i,j])}.
+  For example \code{edge.Ripley(X, pairdist(X))} computes all the
+  edge corrections required for the \eqn{K}-function.
+
+  If any value of the edge correction weight exceeds \code{maxwt},
+  it is set to \code{maxwt}.
+
+  The function \code{rmax.Ripley} computes the smallest distance \eqn{r}
+  such that it is possible to draw a circle of radius \eqn{r}, centred
+  at a point of \code{W}, such that the circle does not intersect the
+  interior of \code{W}. 
+}
+\value{
+  A numeric vector or matrix.
+}
+\references{
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+}
+\seealso{
+  \code{\link{edge.Trans}},
+  \code{\link{rmax.Trans}},
+  \code{\link{Kest}}
+}
+\examples{
+  v <- edge.Ripley(cells, pairdist(cells))
+
+  rmax.Ripley(Window(cells))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/edge.Trans.Rd b/man/edge.Trans.Rd
new file mode 100644
index 0000000..23f0ce3
--- /dev/null
+++ b/man/edge.Trans.Rd
@@ -0,0 +1,146 @@
+\name{edge.Trans}
+\alias{edge.Trans}
+\alias{rmax.Trans}
+\title{
+  Translation Edge Correction
+}
+\description{
+  Computes Ohser and Stoyan's translation edge correction weights
+  for a point pattern.
+}
+\usage{
+edge.Trans(X, Y = X, W = Window(X),
+      exact = FALSE, paired = FALSE,
+      ..., 
+      trim = spatstat.options("maxedgewt"),
+      dx=NULL, dy=NULL,
+      give.rmax=FALSE, gW=NULL)
+
+rmax.Trans(W, g=setcov(W))
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns (objects of class \code{"ppp"}).
+  }
+  \item{W}{
+    Window for which the edge correction is required.
+  }
+  \item{exact}{
+    Logical. If \code{TRUE}, a slow algorithm will be used
+    to compute the exact value. If \code{FALSE}, a fast algorithm
+    will be used to compute the approximate value.
+  }
+  \item{paired}{
+    Logical value indicating whether \code{X} and \code{Y}
+    are paired. If \code{TRUE}, compute
+    the edge correction for corresponding points
+    \code{X[i], Y[i]} for all \code{i}.
+    If \code{FALSE}, compute the edge correction for
+    each possible pair of points \code{X[i], Y[j]}
+    for all \code{i} and \code{j}.
+  }
+  \item{\dots}{Ignored.}
+  \item{trim}{
+    Maximum permitted value of the edge correction weight.
+  }
+  \item{dx,dy}{
+    Alternative data giving the \eqn{x} and \eqn{y} coordinates
+    of the vector differences between the points.
+    Incompatible with \code{X} and \code{Y}. See Details.
+  }
+  \item{give.rmax}{
+    Logical. If \code{TRUE}, also compute the value of
+    \code{rmax.Trans(W)} and return it as an attribute
+    of the result.
+  }
+  \item{g, gW}{
+    Optional. Set covariance of \code{W}, if it has already been
+    computed. Not required if \code{W} is a rectangle.
+  }
+}
+\details{
+  The function \code{edge.Trans}
+  computes Ohser and Stoyan's translation edge correction
+  weight, which is used in estimating the \eqn{K} function and in many
+  other contexts.
+
+  The function \code{rmax.Trans} computes the maximum value of
+  distance \eqn{r} for which the translation edge correction
+  estimate of \eqn{K(r)} is valid.
+  
+  For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W},
+  the translation edge correction weight
+  is
+  \deqn{
+    e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))}
+  }{
+    e(u, r) = area(W) / area(intersect(W, W + y - x))
+  }
+  where \eqn{W + y - x} is the result of shifting the window \eqn{W}
+  by the vector \eqn{y - x}. The denominator is the area of the overlap between
+  this shifted window and the original window.
+
+  The function \code{edge.Trans} computes this edge correction weight.
+  If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the
+  same number of points. The result is a vector containing the
+  edge correction weights \code{e(X[i], Y[i])} for each \code{i}.
+
+  If \code{paired=FALSE}, 
+  then the result is a matrix whose \code{i,j} entry gives the
+  edge correction weight \code{e(X[i], Y[j])}.
+
+  Computation is exact if the window is a rectangle.
+  Otherwise,
+  \itemize{
+    \item if \code{exact=TRUE}, the edge
+    correction weights are computed exactly using 
+    \code{\link{overlap.owin}}, which can be quite slow.
+    \item if \code{exact=FALSE} (the default),
+    the weights are computed rapidly by evaluating the
+    set covariance function \code{\link{setcov}}
+    using the Fast Fourier Transform.
+  }
+  If any value of the edge correction weight exceeds \code{trim},
+  it is set to \code{trim}.
+
+  The arguments \code{dx} and \code{dy} can be provided as
+  an alternative to \code{X} and \code{Y}.
+  If \code{paired=TRUE} then \code{dx,dy} should be vectors of equal length
+  such that the vector difference of the \eqn{i}th pair is
+  \code{c(dx[i], dy[i])}. If \code{paired=FALSE} then 
+  \code{dx,dy} should be matrices of the same dimensions,
+  such that the vector difference between \code{X[i]} and \code{Y[j]} is
+  \code{c(dx[i,j], dy[i,j])}. The argument \code{W} is needed.
+
+  The value of \code{rmax.Trans} is the shortest distance from the
+  origin \eqn{(0,0)} to the boundary of the support of
+  the set covariance function of \code{W}. It is computed by pixel
+  approximation using \code{\link{setcov}}, unless \code{W} is a
+  rectangle, when \code{rmax.Trans(W)} is the length of the
+  shortest side of the rectangle.
+}
+\value{
+  Numeric vector or matrix.
+}
+\references{
+  Ohser, J. (1983)
+  On estimators for the reduced second moment measure of
+  point processes. \emph{Mathematische Operationsforschung und
+  Statistik, series Statistics}, \bold{14}, 63 -- 71.
+}
+\seealso{
+  \code{\link{rmax.Trans}},
+  \code{\link{edge.Ripley}},
+  \code{\link{setcov}},
+  \code{\link{Kest}}
+}
+\examples{
+  v <- edge.Trans(cells)
+  rmax.Trans(Window(cells))
+}
+\author{\adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/edges.Rd b/man/edges.Rd
new file mode 100644
index 0000000..7f42315
--- /dev/null
+++ b/man/edges.Rd
@@ -0,0 +1,49 @@
+\name{edges}
+\alias{edges}
+\title{
+  Extract Boundary Edges of a Window.
+}
+\description{
+  Extracts the boundary edges of a window
+  and returns them as a line segment pattern.
+}
+\usage{
+  edges(x, \dots, window = NULL, check = FALSE) 
+}
+\arguments{
+  \item{x}{
+    A window (object of class \code{"owin"}), or
+    data acceptable to \code{\link{as.owin}}, specifying the window
+    whose boundary is to be extracted.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{window}{
+    Window to contain the resulting line segments.
+    Defaults to \code{as.rectangle(x)}.
+  }
+  \item{check}{
+    Logical. Whether to check the validity of the resulting
+    segment pattern.
+  }
+}
+\details{
+  The boundary edges of the window \code{x} will be extracted as a line
+  segment pattern.
+}
+\value{
+  A line segment pattern (object of class \code{"psp"}).
+}
+\seealso{
+  \code{\link{perimeter}} for calculating the total length of the boundary.
+}
+\examples{
+  edges(square(1))
+  edges(letterR)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/edges2triangles.Rd b/man/edges2triangles.Rd
new file mode 100644
index 0000000..3d28cd6
--- /dev/null
+++ b/man/edges2triangles.Rd
@@ -0,0 +1,63 @@
+\name{edges2triangles}
+\alias{edges2triangles}
+\title{
+  List Triangles in a Graph
+}
+\description{
+  Given a list of edges between vertices,
+  compile a list of all triangles formed by these edges.
+}
+\usage{
+edges2triangles(iedge, jedge, nvert=max(iedge, jedge), \dots,
+                check=TRUE, friendly=rep(TRUE, nvert))
+}
+\arguments{
+  \item{iedge,jedge}{
+    Integer vectors, of equal length, specifying the edges.
+  }
+  \item{nvert}{
+    Number of vertices in the network.
+  }
+  \item{\dots}{Ignored}
+  \item{check}{Logical. Whether to check validity of input data.}
+  \item{friendly}{
+    Optional. For advanced use. See Details.
+  }
+}
+\details{
+  This low level function finds all the triangles (cliques of size 3)
+  in a finite graph with \code{nvert} vertices and with edges
+  specified by \code{iedge, jedge}. 
+
+  The interpretation of \code{iedge, jedge} is that each successive
+  pair of entries specifies an edge in the graph.
+  The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}.
+  Entries of \code{iedge} and \code{jedge} must be integers
+  from 1 to \code{nvert}.
+
+  To improve efficiency in some applications, the optional argument
+  \code{friendly} can be used. It should be a logical vector of
+  length \code{nvert} specifying a labelling of the vertices,
+  such that two vertices \code{j,k} which are \emph{not} friendly
+  (\code{friendly[j] = friendly[k] = FALSE})
+  are \emph{never} connected by an edge. 
+}
+\value{
+  A 3-column matrix of integers, in which each row represents a triangle.
+}
+\seealso{
+  \code{\link{edges2vees}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+ i <- c(1, 2, 5, 5, 1, 4, 2)
+ j <- c(2, 3, 3, 1, 3, 2, 5)
+ edges2triangles(i, j)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/edges2vees.Rd b/man/edges2vees.Rd
new file mode 100644
index 0000000..37babe9
--- /dev/null
+++ b/man/edges2vees.Rd
@@ -0,0 +1,58 @@
+\name{edges2vees}
+\alias{edges2vees}
+\title{
+  List Dihedral Triples in a Graph
+}
+\description{
+  Given a list of edges between vertices,
+  compile a list of all \sQuote{vees} or dihedral triples
+  formed by these edges.
+}
+\usage{
+edges2vees(iedge, jedge, nvert=max(iedge, jedge), \dots,
+                check=TRUE)
+}
+\arguments{
+  \item{iedge,jedge}{
+    Integer vectors, of equal length, specifying the edges.
+  }
+  \item{nvert}{
+    Number of vertices in the network.
+  }
+  \item{\dots}{Ignored}
+  \item{check}{Logical. Whether to check validity of input data.}
+}
+\details{
+  Given a finite graph with \code{nvert} vertices and with edges
+  specified by \code{iedge, jedge}, this low-level function
+  finds all \sQuote{vees} or \sQuote{dihedral triples}
+  in the graph, that is, all triples
+  of vertices \code{(i,j,k)} where \code{i} and \code{j} are joined by
+  an edge and \code{i} and \code{k} are joined by an edge. 
+
+  The interpretation of \code{iedge, jedge} is that each successive
+  pair of entries specifies an edge in the graph.
+  The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}.
+  Entries of \code{iedge} and \code{jedge} must be integers
+  from 1 to \code{nvert}.
+}
+\value{
+  A 3-column matrix of integers, in which each row represents a triple
+  of vertices, with the first vertex joined to the other two vertices.
+}
+\seealso{
+  \code{\link{edges2triangles}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+ i <- c(1, 2, 5, 5, 1, 4, 2)
+ j <- c(2, 3, 3, 1, 3, 2, 5)
+ edges2vees(i, j)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/edit.hyperframe.Rd b/man/edit.hyperframe.Rd
new file mode 100644
index 0000000..bac8e65
--- /dev/null
+++ b/man/edit.hyperframe.Rd
@@ -0,0 +1,56 @@
+\name{edit.hyperframe}
+\alias{edit.hyperframe}
+\title{
+  Invoke Text Editor on Hyperframe
+}
+\description{
+  Invokes a text editor allowing the user to inspect and change
+  entries in a hyperframe.
+}
+\usage{
+\method{edit}{hyperframe}(name, \dots)
+}
+\arguments{
+  \item{name}{
+    A hyperframe (object of class \code{"hyperframe"}).
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link[utils]{edit.data.frame}}.
+  }
+}
+\details{
+  The function \code{\link[utils]{edit}} is generic. This function
+  is the methods for objects of class \code{"hyperframe"}.
+
+  The hyperframe \code{name} is converted to a data frame or array,
+  and the text editor is invoked. The user can change entries in the
+  columns of data, and create new columns of data.
+
+  Only the columns of atomic data
+  (numbers, characters, factor values etc) can be edited.
+  
+  Note that the original object \code{name} is not changed;
+  the function returns the edited dataset.
+}
+\value{
+  Another hyperframe.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link[utils]{edit.data.frame}},
+  \code{\link{edit.ppp}}
+}
+\examples{
+  if(interactive()) Z <- edit(flu)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/edit.ppp.Rd b/man/edit.ppp.Rd
new file mode 100644
index 0000000..00ed481
--- /dev/null
+++ b/man/edit.ppp.Rd
@@ -0,0 +1,68 @@
+\name{edit.ppp}
+\alias{edit.ppp}
+\alias{edit.psp}
+\alias{edit.im}
+\title{
+  Invoke Text Editor on Spatial Data
+}
+\description{
+  Invokes a text editor allowing the user to inspect and change
+  entries in a spatial dataset. 
+}
+\usage{
+\method{edit}{ppp}(name, \dots)
+
+\method{edit}{psp}(name, \dots)
+
+\method{edit}{im}(name, \dots)
+}
+\arguments{
+  \item{name}{
+    A spatial dataset
+    (object of class \code{"ppp"}, \code{"psp"} or \code{"im"}).
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link[utils]{edit.data.frame}}.
+  }
+}
+\details{
+  The function \code{\link[utils]{edit}} is generic. These functions
+  are methods for spatial
+  objects of class \code{"ppp"}, \code{"psp"} and \code{"im"}.
+
+  The spatial dataset \code{name} is converted to a data frame or array,
+  and the text editor is invoked. The user can change the values of spatial
+  coordinates or marks of the points in a point pattern,
+  or the coordinates or marks of the segments in a segment pattern,
+  or the pixel values in an image. 
+  The names of the columns of marks can also be edited.
+
+  If \code{name} is a pixel image, it is converted to a matrix 
+  and displayed in the same spatial orientation as if
+  the image had been plotted.
+
+  Note that the original object \code{name} is not changed;
+  the function returns the edited dataset.
+}
+\value{
+  Object of the same kind as \code{name} containing the edited data.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link[utils]{edit.data.frame}},
+  \code{\link{edit.hyperframe}}
+}
+\examples{
+  if(interactive()) Z <- edit(cells)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/eem.Rd b/man/eem.Rd
new file mode 100644
index 0000000..3706e19
--- /dev/null
+++ b/man/eem.Rd
@@ -0,0 +1,84 @@
+\name{eem}
+\alias{eem}
+\title{
+  Exponential Energy Marks
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  compute the Stoyan-Grabarnik diagnostic ``exponential energy marks''
+  for the data points. 
+}
+\usage{
+  eem(fit, check=TRUE)
+}
+\arguments{
+  \item{fit}{
+    The fitted point process model. An object of class \code{"ppm"}.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{fit}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+}
+\value{
+  A vector containing the values of the exponential energy mark
+  for each point in the pattern.
+}
+\details{
+  Stoyan and Grabarnik (1991) proposed a diagnostic
+  tool for point process models fitted to spatial point pattern data.
+  Each point \eqn{x_i}{x[i]} of the data pattern \eqn{X}
+  is given a `mark' or `weight'
+  \deqn{m_i = \frac 1 {\hat\lambda(x_i,X)}}{m[i] = 1/\lambda(x[i],X)}
+  where \eqn{\hat\lambda(x_i,X)}{\lambda(x[i],X)}
+  is the conditional intensity of the fitted model.
+  If the fitted model is correct, then the sum of these marks
+  for all points in a region \eqn{B} has expected value equal to the
+  area of \eqn{B}.
+  
+  The argument \code{fit} must be a fitted point process model
+  (object of class \code{"ppm"}). Such objects are produced by the maximum
+  pseudolikelihood fitting algorithm \code{\link{ppm}}).
+  This fitted model object contains complete
+  information about the original data pattern and the model that was
+  fitted to it.
+
+  The value returned by \code{eem} is the vector
+  of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i}
+  of the original data pattern. The original data pattern
+  (in corresponding order) can be
+  extracted from \code{fit} using \code{\link{data.ppm}}.
+  
+  The function \code{\link{diagnose.ppm}}
+  produces a set of sensible diagnostic plots based on these weights.
+}
+\references{
+  Stoyan, D. and Grabarnik, P. (1991)
+  Second-order characteristics for stochastic structures connected with
+  Gibbs point processes.
+  \emph{Mathematische Nachrichten}, 151:95--100.
+}
+\seealso{
+ \code{\link{diagnose.ppm}},
+ \code{\link{ppm.object}},
+ \code{\link{data.ppm}},
+ \code{\link{residuals.ppm}},
+ \code{\link{ppm}}
+}
+\examples{
+    data(cells)
+    fit <- ppm(cells, ~x, Strauss(r=0.15))
+    ee <- eem(fit)
+    sum(ee)/area(Window(cells)) # should be about 1 if model is correct
+    Y <- setmarks(cells, ee)
+    plot(Y, main="Cells data\n Exponential energy marks")
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/effectfun.Rd b/man/effectfun.Rd
new file mode 100644
index 0000000..fd6cfa3
--- /dev/null
+++ b/man/effectfun.Rd
@@ -0,0 +1,106 @@
+\name{effectfun}
+\alias{effectfun}
+\title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model}
+\description{
+  Compute the trend or intensity of a fitted point process model
+  as a function of one of its covariates.
+}
+\usage{
+  effectfun(model, covname, \dots, se.fit=FALSE)
+}
+\arguments{
+  \item{model}{
+    A fitted point process model (object of class
+    \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"}
+    or \code{"profilepl"}).
+  }
+  \item{covname}{
+    The name of the covariate. A character string.
+    (Needed only if the model has more than one covariate.)
+  }
+  \item{\dots}{
+    The fixed values of other covariates (in the form
+    \code{name=value}) if required.
+  }
+  \item{se.fit}{
+    Logical. If \code{TRUE}, asymptotic standard errors of the estimates
+    will be computed, together with a 95\% confidence interval.
+  }
+}
+\details{
+  The object \code{model} should be an object of class
+  \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"}
+    or \code{"profilepl"}
+  representing a point process model fitted to point pattern data.
+
+  The model's trend formula should involve a spatial covariate
+  named \code{covname}. This could be \code{"x"} or \code{"y"}
+  representing one of the Cartesian coordinates.
+  More commonly the covariate
+  is another, external variable that was supplied when fitting the model.
+    
+  The command \code{effectfun} computes the fitted trend 
+  of the point process \code{model} as a function of the covariate
+  named \code{covname}. 
+  The return value can be plotted immediately, giving a
+  plot of the fitted trend against the value of the covariate.
+
+  If the model also involves covariates other than \code{covname},
+  then these covariates will be held fixed. Values for
+  these other covariates must be provided as arguments
+  to \code{effectfun} in the form \code{name=value}. 
+
+  If \code{se.fit=TRUE}, the algorithm also calculates
+  the asymptotic standard error of the fitted trend,
+  and a (pointwise) asymptotic 95\% confidence interval for the
+  true trend.
+  
+  This command is just a wrapper for the prediction method
+  \code{\link{predict.ppm}}. For more complicated computations
+  about the fitted intensity, use \code{\link{predict.ppm}}.
+}
+\section{Trend and intensity}{
+  For a Poisson point process model, the trend is the same as the
+  intensity of the point process. For a more general Gibbs model, the trend
+  is the first order potential in the model (the first order term in the
+  Gibbs representation). In Poisson or Gibbs models fitted by
+  \code{\link{ppm}}, the trend is the only part of the model that
+  depends on the covariates.
+}
+\section{Determinantal point process models with fixed intensity}{
+  The function \code{\link{dppm}} which fits 
+  a determinantal point process model allows the user to specify the
+  intensity \code{lambda}. In such cases the effect function is
+  undefined, and \code{effectfun} stops with an error message.
+}
+\value{
+  A data frame containing a column of values of the covariate and a column
+  of values of the fitted trend.
+  If \code{se.fit=TRUE}, there are 3 additional columns containing the
+  standard error and the upper and lower limits of a confidence interval.
+
+  If the covariate named \code{covname} is numeric (rather than a factor
+  or logical variable), the return value is
+  also of class \code{"fv"} so that it can be plotted immediately.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{predict.ppm}},
+  \code{\link{fv.object}}
+}
+\examples{
+  X <- copper$SouthPoints
+  D <- distfun(copper$SouthLines)
+  fit <- ppm(X ~ polynom(D, 5))
+  effectfun(fit)
+  plot(effectfun(fit, se.fit=TRUE))
+
+  fitx <- ppm(X ~ x + polynom(D, 5))
+  plot(effectfun(fitx, "D", x=20))
+}
+\author{
+  \adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/ellipse.Rd b/man/ellipse.Rd
new file mode 100644
index 0000000..9f19a5f
--- /dev/null
+++ b/man/ellipse.Rd
@@ -0,0 +1,76 @@
+\name{ellipse}
+\alias{ellipse}
+\title{
+  Elliptical Window.
+}
+\description{
+  Create an elliptical window.
+}
+\usage{
+   ellipse(a, b, centre=c(0,0), phi=0, \dots, mask=FALSE, npoly = 128)
+}
+\arguments{
+  \item{a,b}{
+    The half-lengths of the axes of the ellipse.
+  }
+  \item{centre}{
+    The centre of the ellipse.
+  }
+  \item{phi}{
+    The (anti-clockwise) angle through which the ellipse should be
+    rotated (about its centre) starting from an orientation in which
+    the axis of half-length \code{a} is horizontal.
+  }
+  \item{mask}{
+    Logical value controlling the type of approximation
+    to a perfect ellipse. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution, if \code{mask} is \code{TRUE}.
+  }
+  \item{npoly}{
+    The number of edges in the polygonal approximation to the ellipse.
+  }
+}
+\details{
+  This command creates a window object
+  representing an ellipse with the given centre and axes.
+
+  By default, the ellipse is
+  approximated by a polygon with \code{npoly} edges.
+
+  If \code{mask=TRUE}, then the ellipse is approximated by a binary pixel
+  mask. The resolution of the mask is controlled by
+  the arguments \code{\dots} which are passed to \code{\link{as.mask}}.
+
+  The arguments \code{a} and \code{b} must be single positive numbers.
+  The argument \code{centre} specifies the ellipse centre: it can be either 
+  a numeric vector of length 2 giving the coordinates,
+  or a \code{list(x,y)} giving the coordinates of exactly one point, or a
+  point pattern (object of class \code{"ppp"}) containing exactly one point.
+}
+\value{
+  An object of class \code{owin} (either of type \dQuote{polygonal}
+  or of type \dQuote{mask}) specifying an elliptical window.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{disc}},
+  \code{\link{owin.object}},
+  \code{\link{owin}},
+  \code{\link{as.mask}}
+}
+\examples{
+  W <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6)
+  plot(W,lwd=2,border="red")
+  WM <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6,mask=TRUE,dimyx=512)
+  plot(WM,add=TRUE,box=FALSE)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/emend.Rd b/man/emend.Rd
new file mode 100644
index 0000000..f130beb
--- /dev/null
+++ b/man/emend.Rd
@@ -0,0 +1,47 @@
+\name{emend}
+\alias{emend}
+\title{
+  Force Model to be Valid
+}
+\description{
+  Check whether a model is valid, and if not,
+  find the nearest model which is valid.
+}
+\usage{
+emend(object, \dots)
+}
+\arguments{
+  \item{object}{
+    A statistical model, belonging to some class.
+  }
+  \item{\dots}{Arguments passed to methods.}
+}
+\details{
+  The function \code{emend} is generic,
+  and has methods for several classes of statistical models
+  in the \pkg{spatstat} package (mostly point process models).
+  Its purpose is to check whether a given model is valid
+  (for example, that none of the model parameters are \code{NA})
+  and, if not, to find the nearest model which is valid.
+
+  See the methods for more information.
+}
+\value{
+  Another model of the same kind.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{emend.ppm}},
+  \code{\link{emend.lppm}},
+  \code{\link{valid}}.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/emend.ppm.Rd b/man/emend.ppm.Rd
new file mode 100644
index 0000000..c78976d
--- /dev/null
+++ b/man/emend.ppm.Rd
@@ -0,0 +1,116 @@
+\name{emend.ppm}
+\alias{emend.ppm}
+\alias{project.ppm}
+\title{
+  Force Point Process Model to be Valid
+}
+\description{
+  Ensures that a fitted point process model 
+  satisfies the integrability conditions for existence of the point process.
+}
+\usage{
+project.ppm(object, \dots, fatal=FALSE, trace=FALSE)
+
+\method{emend}{ppm}(object, \dots, fatal=FALSE, trace=FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{Ignored.}
+  \item{fatal}{
+    Logical value indicating whether to generate an error
+    if the model cannot be projected to a valid model.
+  }
+  \item{trace}{
+    Logical value indicating whether to print a trace
+    of the decision process. 
+  }
+}
+\details{
+  The functions \code{emend.ppm} and \code{project.ppm} are identical:
+  \code{emend.ppm} is a method for the generic \code{\link{emend}},
+  while \code{project.ppm} is an older name for the same function.
+
+  The purpose of the function is to ensure that a fitted model
+  is valid.
+  
+  The model-fitting function \code{\link{ppm}}
+  fits Gibbs point process models to point pattern data.
+  By default, the fitted model returned by \code{\link{ppm}} may not
+  actually exist as a point process. 
+
+  First, some of the fitted coefficients of the model
+  may be \code{NA} or infinite values. 
+  This usually occurs when the data are insufficient to estimate
+  all the parameters. The model is said to be
+  \emph{unidentifiable} or \emph{confounded}.
+
+  Second, unlike a regression model, which is well-defined for any finite values
+  of the fitted regression coefficients, a Gibbs point process model
+  is only well-defined if the fitted interaction parameters 
+  satisfy some constraints. 
+  A famous example is the Strauss process (see \code{\link{Strauss}})
+  which exists only when the interaction parameter \eqn{\gamma}{gamma}
+  is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1},
+  the probability density is not integrable and the process does not
+  exist (and cannot be simulated).
+
+  By default, \code{\link{ppm}} does not enforce the constraint that
+  a fitted Strauss process (for example) must satisfy
+  \eqn{\gamma \le 1}{gamma <= 1}.
+  This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} 
+  could be useful information for data analysis, as it indicates that
+  the Strauss model is not appropriate, and suggests a clustered model should be
+  fitted.
+
+  The function \code{emend.ppm} or \code{project.ppm}
+  modifies the model \code{object}
+  so that the model is valid. It 
+  identifies the terms in the model \code{object}
+  that are associated with illegal parameter values (i.e. parameter
+  values which are either \code{NA}, infinite, or outside their permitted
+  range). It considers all possible sub-models of \code{object}
+  obtained by deleting one or more
+  of these terms. It identifies which of these submodels are valid,
+  and chooses the valid submodel with the largest pseudolikelihood. The result
+  of \code{emend.ppm} or \code{project.ppm} is the
+  true maximum pseudolikelihood fit to the data.
+
+  For large datasets or complex models, the algorithm used in
+  \code{emend.ppm} or 
+  \code{project.ppm} may be time-consuming, because it takes time to
+  compute all the sub-models. A faster, approximate
+  algorithm can be applied by setting
+  \code{spatstat.options(project.fast=TRUE)}. This produces a
+  valid submodel, which may not be the maximum pseudolikelihood submodel.
+
+  Use the function \code{\link{valid.ppm}} to check whether a fitted model
+  object specifies a well-defined point process.
+
+  Use the expression \code{all(is.finite(coef(object)))} to determine
+  whether all parameters are identifiable.
+}
+\value{
+  Another point process model (object of class \code{"ppm"}).
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{valid.ppm}},
+  \code{\link{emend}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+   fit <- ppm(redwood, ~1, Strauss(0.1))
+   coef(fit)
+   fit2 <- emend(fit)
+   coef(fit2)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/endpoints.psp.Rd b/man/endpoints.psp.Rd
new file mode 100644
index 0000000..5945dd9
--- /dev/null
+++ b/man/endpoints.psp.Rd
@@ -0,0 +1,91 @@
+\name{endpoints.psp}
+\alias{endpoints.psp}
+\title{Endpoints of Line Segment Pattern}
+\description{
+  Extracts the endpoints of each line segment
+  in a line segment pattern.
+}
+\usage{
+  endpoints.psp(x, which="both")
+}
+\arguments{
+  \item{x}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{which}{
+    String specifying which endpoint or endpoints should be
+    returned. See Details.
+  }
+}
+\value{
+  Point pattern (object of class \code{"ppp"}).
+}
+\details{
+  This function extracts one endpoint, or both endpoints,
+  from each of the line segments in \code{x},
+  and returns these points as a point pattern object.
+  
+  The argument \code{which} determines which endpoint or endpoints
+  of each line segment should be returned:
+  \describe{
+    \item{\code{which="both"}}{
+      (the default): both endpoints
+      of each line segment are returned. The result is a point pattern
+      with twice as many points as there are line segments in \code{x}.
+    }
+    \item{\code{which="first"}}{
+      select the first endpoint
+      of each line segment (returns the points with coordinates
+      \code{x$ends$x0, x$ends$y0}).
+    }
+    \item{\code{which="second"}}{
+      select the second endpoint
+      of each line segment (returns the points with coordinates
+      \code{x$ends$x1, x$ends$y1}).
+    }
+    \item{\code{which="left"}}{
+      select the left-most endpoint
+      (the endpoint with the smaller \eqn{x} coordinate)
+      of each line segment.
+    }
+    \item{\code{which="right"}}{
+      select the right-most endpoint
+      (the endpoint with the greater \eqn{x} coordinate)
+      of each line segment.
+    }
+    \item{\code{which="lower"}}{
+      select the lower endpoint
+      (the endpoint with the smaller \eqn{y} coordinate)
+      of each line segment.
+    }
+    \item{\code{which="upper"}}{
+      select the upper endpoint
+      (the endpoint with the greater \eqn{y} coordinate)
+      of each line segment.
+    }
+  }
+  The result is a point pattern. It also has an attribute
+  \code{"id"} which is an integer vector identifying
+  the segment which contributed each point.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{ppp.object}},
+  \code{\link{midpoints.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(a)
+  b <- endpoints.psp(a, "left")
+  plot(b, add=TRUE)
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/envelope.Rd b/man/envelope.Rd
new file mode 100644
index 0000000..69a96b5
--- /dev/null
+++ b/man/envelope.Rd
@@ -0,0 +1,812 @@
+\name{envelope}
+\alias{envelope}
+\alias{envelope.ppp}
+\alias{envelope.ppm}
+\alias{envelope.kppm}
+\title{Simulation Envelopes of Summary Function}
+\description{
+  Computes simulation envelopes of a summary function.
+}
+\usage{
+  envelope(Y, fun, \dots)
+
+  \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots,
+  funargs=list(), funYargs=funargs,
+  simulate=NULL,  fix.n=FALSE, fix.marks=FALSE,
+  verbose=TRUE, clipdata=TRUE,
+  transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, 
+  alternative=c("two.sided", "less", "greater"),
+  scale=NULL, clamp=FALSE, 
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim,
+  do.pwrong=FALSE, envir.simul=NULL)
+
+  \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, 
+  funargs=list(), funYargs=funargs,
+  simulate=NULL, fix.n=FALSE, fix.marks=FALSE,
+  verbose=TRUE, clipdata=TRUE,
+  start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5,
+  transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, 
+  alternative=c("two.sided", "less", "greater"), 
+  scale=NULL, clamp=FALSE, 
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim,
+  do.pwrong=FALSE, envir.simul=NULL)
+
+  \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, 
+  funargs=list(), funYargs=funargs,
+  simulate=NULL,
+  verbose=TRUE, clipdata=TRUE,
+  transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, 
+  alternative=c("two.sided", "less", "greater"), 
+  scale=NULL, clamp=FALSE, 
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim,
+  do.pwrong=FALSE, envir.simul=NULL)
+}
+\arguments{
+  \item{Y}{
+    Object containing point pattern data.
+    A point pattern (object of class
+    \code{"ppp"}) or a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"}).
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern. 
+  }
+  \item{nsim}{
+    Number of simulated point patterns to be generated
+    when computing the envelopes.
+  }
+  \item{nrank}{
+    Integer. Rank of the envelope value amongst the \code{nsim} simulated
+    values. A rank of 1 means that the minimum and maximum
+    simulated values will be used.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{fun}.
+  }
+  \item{funargs}{
+    A list, containing extra arguments to be passed to \code{fun}.
+  }
+  \item{funYargs}{
+    Optional. A list, containing extra arguments to be passed to
+    \code{fun} when applied to the original data \code{Y} only.
+  }
+  \item{simulate}{
+    Optional. Specifies how to generate the simulated point patterns.
+    If \code{simulate} is an expression in the R language, then this
+    expression will be evaluated \code{nsim} times,
+    to obtain \code{nsim} point patterns which are taken as the
+    simulated patterns from which the envelopes are computed.
+    If \code{simulate} is a list of point patterns, then the entries
+    in this list will be treated as the simulated patterns from which
+    the envelopes are computed.
+    Alternatively \code{simulate} may be an object produced by the
+    \code{envelope} command: see Details.
+  }
+  \item{fix.n}{
+    Logical. If \code{TRUE}, simulated patterns will have the
+    same number of points as the original data pattern.
+    This option is currently not available for \code{envelope.kppm}.
+  }
+  \item{fix.marks}{
+    Logical. If \code{TRUE}, simulated patterns will have the
+    same number of points \emph{and} the same marks as the
+    original data pattern. In a multitype point pattern this means that
+    the simulated patterns will have the same number of points
+    \emph{of each type} as the original data.
+    This option is currently not available for \code{envelope.kppm}.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports
+    during the simulations.
+  }
+  \item{clipdata}{
+    Logical flag indicating whether the data point pattern should be
+    clipped to the same window as the simulated patterns,
+    before the summary function for the data is computed.
+    This should usually be \code{TRUE} to ensure that the
+    data and simulations are properly comparable.
+  }
+  \item{start,control}{
+    Optional. These specify the arguments \code{start} and \code{control}
+    of \code{rmh}, giving complete control over the simulation
+    algorithm. Applicable only when \code{Y} is a fitted model
+    of class \code{"ppm"}.
+  }
+  \item{nrep}{
+    Number of iterations in the Metropolis-Hastings simulation
+    algorithm. Applicable only when \code{Y} is a fitted model
+    of class \code{"ppm"}.
+  }
+  \item{transform}{
+    Optional. A transformation to be applied to the
+    function values, before the envelopes are computed.
+    An expression object (see Details).
+  }
+  \item{global}{
+    Logical flag indicating whether envelopes should be pointwise
+    (\code{global=FALSE}) or simultaneous (\code{global=TRUE}).
+  }
+  \item{ginterval}{
+    Optional.
+    A vector of length 2 specifying
+    the interval of \eqn{r} values for the simultaneous critical
+    envelopes. Only relevant if \code{global=TRUE}.
+  }
+  \item{use.theory}{
+    Logical value indicating whether to use the theoretical value,
+    computed by \code{fun}, as the reference value for simultaneous
+    envelopes. Applicable only when \code{global=TRUE}.
+    Default is \code{use.theory=TRUE} if \code{Y} is a point pattern,
+    or a point process model equivalent to Complete Spatial Randomness,
+    and \code{use.theory=FALSE} otherwise.
+  }
+  \item{alternative}{
+    Character string determining whether the envelope corresponds
+    to a two-sided test (\code{side="two.sided"}, the default)
+    or a one-sided test with a lower critical boundary
+    (\code{side="less"}) or a one-sided test
+    with an upper critical boundary (\code{side="greater"}).
+  }
+  \item{scale}{
+    Optional. Scaling function for global envelopes.
+    A function in the \R language which determines the
+    relative scale of deviations, as a function of
+    distance \eqn{r}, when computing the global envelopes.
+    Applicable only when \code{global=TRUE}.
+    Summary function values for distance \code{r}
+    will be \emph{divided} by \code{scale(r)} before the
+    maximum deviation is computed. The resulting global envelopes
+    will have width proportional to \code{scale(r)}. 
+  }
+  \item{clamp}{
+    Logical value indicating how to compute envelopes when
+    \code{alternative="less"} or \code{alternative="greater"}.
+    Deviations of the observed
+    summary function from the theoretical summary function are initially
+    evaluated as signed real numbers, with large positive values indicating
+    consistency with the alternative hypothesis.
+    If \code{clamp=FALSE} (the default), these values are not changed.
+    If \code{clamp=TRUE}, any negative values are replaced by zero.
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save all the simulated
+    function values.
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save all the simulated
+    point patterns.
+  }
+  \item{nsim2}{
+    Number of extra simulated point patterns to be generated
+    if it is necessary to use simulation to estimate the theoretical
+    mean of the summary function. Only relevant when \code{global=TRUE}
+    and the simulations are not based on CSR.
+  }
+  \item{VARIANCE}{
+    Logical. If \code{TRUE}, critical envelopes will be calculated
+    as sample mean plus or minus \code{nSD} times sample standard
+    deviation.
+  }
+  \item{nSD}{
+    Number of estimated standard deviations used to determine
+    the critical envelopes, if \code{VARIANCE=TRUE}.
+  }
+  \item{Yname}{
+    Character string that should be used as the name of the 
+    data point pattern \code{Y} when printing or plotting the results.
+  }
+  \item{maxnerr}{
+    Maximum number of rejected patterns.
+    If \code{fun} yields an error when applied to a simulated point
+    pattern (for example, because the pattern is empty and \code{fun}
+    requires at least one point), the pattern will be rejected
+    and a new random point pattern will be generated. If this happens
+    more than \code{maxnerr} times, the algorithm will give up.
+  }
+  \item{do.pwrong}{
+    Logical. If \code{TRUE}, the algorithm will also estimate
+    the true significance level of the \dQuote{wrong} test (the test that
+    declares the summary function for the data to be significant
+    if it lies outside the \emph{pointwise} critical boundary at any
+    point). This estimate is printed when the result is printed.
+  }
+  \item{envir.simul}{
+    Environment in which to evaluate the expression \code{simulate},
+    if not the current environment.
+  }
+}
+\value{
+  An object of class \code{"envelope"}
+  and \code{"fv"}, see \code{\link{fv.object}},
+  which can be printed and plotted directly.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the summary function \code{fun} has been  estimated
+  }
+  \item{obs}{
+    values of the summary function for the data point pattern
+  }
+  \item{lo}{
+    lower envelope of simulations
+  }
+  \item{hi}{
+    upper envelope of simulations
+  }
+  and \emph{either}
+  \item{theo}{
+    theoretical value of the summary function under CSR
+    (Complete Spatial Randomness, a uniform Poisson point process)
+    if the simulations were generated according to CSR
+  }
+  \item{mmean}{
+    estimated theoretical value of the summary function,
+    computed by averaging simulated values, 
+    if the simulations were not generated according to CSR.
+  }
+  Additionally, if \code{savepatterns=TRUE}, the return value has an attribute
+  \code{"simpatterns"} which is a list containing the \code{nsim}
+  simulated patterns. If \code{savefuns=TRUE}, the return value
+  has an attribute \code{"simfuns"} which is an object of class
+  \code{"fv"} containing the summary functions
+  computed for each of the \code{nsim} simulated patterns.
+}
+\details{
+  The \code{envelope} command performs simulations and
+  computes envelopes of a summary statistic based on the simulations.
+  The result is an object that can be plotted to display the envelopes.
+  The envelopes can be used to assess the goodness-of-fit of
+  a point process model to point pattern data.
+
+  For the most basic use, if you have a point pattern \code{X} and
+  you want to test Complete Spatial Randomness (CSR), type
+  \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function
+  for \code{X} plotted together with the envelopes of the
+  \eqn{K} function for 39 simulations of CSR. 
+  
+  The \code{envelope} function is generic, with methods for
+  the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"}
+  described here. There are also methods for the classes \code{"pp3"},
+  \code{"lpp"} and \code{"lppm"} which are described separately
+  under \code{\link{envelope.pp3}} and \code{\link{envelope.lpp}}.
+  Envelopes can also be computed from other envelopes, using
+  \code{\link{envelope.envelope}}.
+  
+  To create simulation envelopes, the command \code{envelope(Y, ...)} 
+  first generates \code{nsim} random point patterns
+  in one of the following ways. 
+  \itemize{
+    \item 
+    If \code{Y} is a point pattern (an object of class \code{"ppp"})
+    and \code{simulate=NULL},
+    then we generate \code{nsim} simulations of
+    Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns
+    each being a realisation of the uniform Poisson point process)
+    with the same intensity as the pattern \code{Y}.
+    (If \code{Y} is a multitype point pattern, then the simulated patterns
+    are also given independent random marks; the probability
+    distribution of the random marks is determined by the
+    relative frequencies of marks in \code{Y}.)
+    \item
+    If \code{Y} is a fitted point process model (an object of class
+    \code{"ppm"} or \code{"kppm"}) and \code{simulate=NULL},
+    then this routine generates \code{nsim} simulated
+    realisations of that model.
+    \item
+    If \code{simulate} is supplied, then it determines how the
+    simulated point patterns are generated. It may be either
+    \itemize{
+      \item
+      an expression in the R language, typically containing a call
+      to a random generator. This expression will be evaluated
+      \code{nsim} times to yield \code{nsim} point patterns. For example
+      if \code{simulate=expression(runifpoint(100))} then each simulated
+      pattern consists of exactly 100 independent uniform random points.
+      \item
+      a list of point patterns.
+      The entries in this list will be taken as the simulated patterns.
+      \item
+      an object of class \code{"envelope"}. This should have been
+      produced by calling \code{envelope} with the
+      argument \code{savepatterns=TRUE}.
+      The simulated point patterns that were saved in this object
+      will be extracted and used as the simulated patterns for the
+      new envelope computation. This makes it possible to plot envelopes
+      for two different summary functions based on exactly the same set of
+      simulated point patterns.
+      }
+  }
+  
+  The summary statistic \code{fun} is applied to each of these simulated
+  patterns. Typically \code{fun} is one of the functions
+  \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf},
+  \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot},
+  \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti},
+  \code{Jmulti} or \code{Kinhom}. It may also be a character string
+  containing the name of one of these functions.
+
+  The statistic \code{fun} can also be a user-supplied function;
+  if so, then it must have arguments \code{X} and \code{r}
+  like those in the functions listed above, and it must return an object
+  of class \code{"fv"}.
+
+  Upper and lower critical envelopes are computed in one of the following ways:
+  \describe{
+    \item{pointwise:}{by default, envelopes are calculated pointwise
+      (i.e. for each value of the distance argument \eqn{r}), by sorting the
+      \code{nsim} simulated values, and taking the \code{m}-th lowest
+      and \code{m}-th highest values, where \code{m = nrank}.
+      For example if \code{nrank=1}, the upper and lower envelopes
+      are the pointwise maximum and minimum of the simulated values.
+
+      The pointwise envelopes are \bold{not} \dQuote{confidence bands}
+      for the true value of the function! Rather,
+      they specify the critical points for a Monte Carlo test
+      (Ripley, 1981). The test is constructed by choosing a
+      \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the
+      observed function value
+      lies outside the envelope \emph{at this value of} \eqn{r}.
+      This test has exact significance level
+      \code{alpha = 2 * nrank/(1 + nsim)}.
+    }
+    \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are
+      determined as follows. First we calculate the theoretical mean value of
+      the summary statistic (if we are testing CSR, the theoretical
+      value is supplied by \code{fun}; otherwise we perform a separate
+      set of \code{nsim2} simulations, compute the
+      average of all these simulated values, and take this average
+      as an estimate of the theoretical mean value). Then, for each simulation,
+      we compare the simulated curve to the theoretical curve, and compute the
+      maximum absolute difference between them (over the interval
+      of \eqn{r} values specified by \code{ginterval}). This gives a
+      deviation value \eqn{d_i}{d[i]} for each of the \code{nsim}
+      simulations. Finally we take the \code{m}-th largest of the
+      deviation values, where \code{m=nrank}, and call this
+      \code{dcrit}. Then the simultaneous envelopes are of the form
+      \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where
+      \code{expected} is either the theoretical mean value \code{theo}
+      (if we are testing CSR) or the estimated theoretical value
+      \code{mmean} (if we are testing another model). The simultaneous critical
+      envelopes have constant width \code{2 * dcrit}.
+
+      The simultaneous critical envelopes allow us to perform a different
+      Monte Carlo test (Ripley, 1981). The test rejects the null
+      hypothesis if the graph of the observed function
+      lies outside the envelope \bold{at any value of} \eqn{r}.
+      This test has exact significance level
+      \code{alpha = nrank/(1 + nsim)}.
+
+      This test can also be performed using \code{\link{mad.test}}.
+    }
+    \item{based on sample moments:}{if \code{VARIANCE=TRUE},
+      the algorithm calculates the
+      (pointwise) sample mean and sample variance of
+      the simulated functions. Then the envelopes are computed
+      as mean plus or minus \code{nSD} standard deviations.
+      These envelopes do not have an exact significance interpretation.
+      They are a naive approximation to
+      the critical points of the Neyman-Pearson test
+      assuming the summary statistic is approximately Normally
+      distributed.
+    }
+  }
+  
+  The return value is an object of class \code{"fv"} containing
+  the summary function for the data point pattern,
+  the upper and lower simulation envelopes, and 
+  the theoretical expected value (exact or estimated) of the summary function 
+  for the model being tested. It can be plotted
+  using \code{\link{plot.envelope}}.
+
+  If \code{VARIANCE=TRUE} then the return value also includes the
+  sample mean, sample variance and other quantities.
+
+  Arguments can be passed to the function \code{fun} through
+  \code{...}. This means that you simply specify these arguments in the call to
+  \code{envelope}, and they will be passed to \code{fun}.
+  In particular, the argument \code{correction}
+  determines the edge correction to be used to calculate the summary
+  statistic. See the section on Edge Corrections, and the Examples.
+
+  Arguments can also be passed to the function \code{fun}
+  through the list \code{funargs}. This mechanism is typically used if
+  an argument of \code{fun} has the same name as an argument of
+  \code{envelope}. The list \code{funargs} should contain
+  entries of the form \code{name=value}, where each \code{name} is the name
+  of an argument of \code{fun}.
+
+  There is also an option, rarely used, in which different function
+  arguments are used when computing the summary function
+  for the data \code{Y} and for the simulated patterns.
+  If \code{funYargs} is given, it will be used
+  when the summary function for the data \code{Y} is computed,
+  while \code{funargs} will be used when computing the summary function
+  for the simulated patterns.
+  This option is only needed in rare cases: usually the basic principle
+  requires that the data and simulated patterns must be treated
+  equally, so that \code{funargs} and \code{funYargs} should be identical.
+
+  If \code{Y} is a fitted cluster point process model (object of
+  class \code{"kppm"}), and \code{simulate=NULL},
+  then the model is simulated directly
+  using \code{\link{simulate.kppm}}.
+  
+  If \code{Y} is a fitted Gibbs point process model (object of
+  class \code{"ppm"}), and \code{simulate=NULL},
+  then the model is simulated
+  by running the Metropolis-Hastings algorithm \code{\link{rmh}}.
+  Complete control over this algorithm is provided by the 
+  arguments \code{start} and \code{control} which are passed
+  to \code{\link{rmh}}.
+
+  For simultaneous critical envelopes (\code{global=TRUE})
+  the following options are also useful:
+  \describe{
+    \item{\code{ginterval}}{determines the interval of \eqn{r} values
+      over which the deviation between curves is calculated.
+      It should be a numeric vector of length 2.
+      There is a sensible default (namely, the recommended plotting
+      interval for \code{fun(X)}, or the range of \code{r} values if
+      \code{r} is explicitly specified).
+    }
+    \item{\code{transform}}{specifies a transformation of the
+      summary function \code{fun} that will be carried out before the
+      deviations are computed.
+      Such transforms are useful if \code{global=TRUE} or
+      \code{VARIANCE=TRUE}.
+      The \code{transform} must be an expression object
+      using the symbol \code{.} to represent the function value
+      (and possibly other symbols recognised by \code{\link{with.fv}}).
+      For example, 
+      the conventional way to normalise the \eqn{K} function
+      (Ripley, 1981) is to transform it to the \eqn{L} function
+      \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)}
+      and this is implemented by setting
+      \code{transform=expression(sqrt(./pi))}.
+    }
+  }
+
+  It is also possible to extract the summary functions for each of the
+  individual simulated point patterns, by setting \code{savefuns=TRUE}.
+  Then the return value also 
+  has an attribute \code{"simfuns"} containing all the 
+  summary functions for the individual simulated patterns.
+  It is an \code{"fv"} object containing
+  functions named \code{sim1, sim2, ...} representing the \code{nsim}
+  summary functions.
+
+  It is also possible to save the simulated point patterns themselves,
+  by setting \code{savepatterns=TRUE}. Then the return value also has
+  an attribute \code{"simpatterns"} which is a list of length
+  \code{nsim} containing all the simulated point patterns.
+
+  See \code{\link{plot.envelope}} and \code{\link{plot.fv}}
+  for information about how to plot the envelopes.
+
+  Different envelopes can be recomputed from the same data
+  using \code{\link{envelope.envelope}}.
+  Envelopes can be combined using \code{\link{pool.envelope}}.
+}
+\section{Errors and warnings}{
+  An error may be generated if one of the simulations produces a
+  point pattern that is empty, or is otherwise unacceptable to the
+  function \code{fun}.
+  
+  The upper envelope may be \code{NA} (plotted as plus or minus
+  infinity) if some of the function values
+  computed for the simulated point patterns are \code{NA}.
+  Whether this occurs will depend on the function \code{fun},
+  but it usually happens when the simulated point pattern does not contain
+  enough points to compute a meaningful value.
+}
+\section{Confidence intervals}{
+  Simulation envelopes do \bold{not} compute confidence intervals;
+  they generate significance bands. 
+  If you really need a confidence interval for the true summary function
+  of the point process, use \code{\link{lohboot}}.
+  See also \code{\link{varblock}}.
+}
+\section{Edge corrections}{
+  It is common to apply a correction for edge effects when
+  calculating a summary function such as the \eqn{K} function.
+  Typically the user has a choice between several possible edge
+  corrections.
+  In a call to \code{envelope}, the user can specify the edge correction
+  to be applied in \code{fun}, using the argument \code{correction}.
+  See the Examples below.
+
+  \describe{
+    \item{Summary functions in \pkg{spatstat}}{
+      Summary functions that are available in \pkg{spatstat}, such as
+      \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}},
+      have a standard argument called \code{correction} which specifies
+      the name of one or more edge corrections.
+
+      The list of available edge
+      corrections is different for each summary function,
+      and may also depend on the kind of window in which the point pattern is
+      recorded.
+      In the
+      case of \code{Kest} (the default and most frequently used value of
+      \code{fun}) the best edge correction is Ripley's isotropic
+      correction if the window is rectangular or polygonal, 
+      and the translation correction if the window is a binary mask.
+      See the help files for the individual
+      functions for more information.
+      
+      All the summary functions in \pkg{spatstat}
+      recognise the option \code{correction="best"}
+      which gives the \dQuote{best} (most accurate) available edge correction
+      for that function. 
+
+      In a call to \code{envelope}, if \code{fun} is one of the
+      summary functions provided in \pkg{spatstat}, then the default
+      is \code{correction="best"}. This means that
+      \emph{by default, the envelope will be computed
+	using the \dQuote{best} available edge correction}.
+
+      The user can override this default by specifying the argument
+      \code{correction}. For example the computation can be accelerated
+      by choosing another edge correction which is less accurate
+      than the \dQuote{best} one, but faster to compute.
+    }
+    \item{User-written summary functions}{
+      If \code{fun} is a function written by the user,
+      then \code{envelope} has to guess what to do.
+
+      If \code{fun} has an argument
+      called \code{correction}, or has \code{\dots} arguments,
+      then \code{envelope} assumes that the function 
+      can handle a correction argument. To compute the envelope,
+      \code{fun} will be called with a \code{correction} argument.
+      The default is \code{correction="best"}, unless
+      overridden in the call to \code{envelope}.
+
+      Otherwise, if \code{fun} does not have an argument
+      called \code{correction} and does not have \code{\dots} arguments,
+      then \code{envelope} assumes that the function 
+      \emph{cannot} handle a correction argument. To compute the
+      envelope, \code{fun} is called without a correction argument.
+    }
+  }
+}
+\references{
+  Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne,
+  R.K. and Nair, G. (2014) On tests of spatial pattern based on
+  simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489.
+  
+  Cressie, N.A.C. \emph{Statistics for spatial data}.
+    John Wiley and Sons, 1991.
+
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+  Arnold, 2003.
+
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+
+  Ripley, B.D. \emph{Statistical inference for spatial processes}.
+  Cambridge University Press, 1988.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+} 
+\seealso{
+  \code{\link{dclf.test}},
+  \code{\link{mad.test}}
+  for envelope-based tests.
+  
+  \code{\link{fv.object}},
+  \code{\link{plot.envelope}},
+  \code{\link{plot.fv}},
+  \code{\link{envelope.envelope}},
+  \code{\link{pool.envelope}}
+  for handling envelopes.
+  There are also methods for \code{print} and \code{summary}.
+  
+  \code{\link{Kest}},
+  \code{\link{Gest}},
+  \code{\link{Fest}},
+  \code{\link{Jest}},
+  \code{\link{pcf}},
+  \code{\link{ppp}},
+  \code{\link{ppm}},
+  \code{\link{default.expand}}
+}
+\examples{
+ X <- simdat
+
+ # Envelope of K function under CSR
+ \dontrun{
+ plot(envelope(X))
+ }
+ \testonly{
+  plot(envelope(X, nsim=3))
+ }
+
+ # Translation edge correction (this is also FASTER):
+ \dontrun{
+ plot(envelope(X, correction="translate"))
+ }
+ \testonly{
+  E <- envelope(X, nsim=3, correction="translate")
+ }
+
+# Global envelopes
+ \dontrun{
+ plot(envelope(X, Lest, global=TRUE))
+ plot(envelope(X, Kest,  global=TRUE, scale=function(r) { r }))
+ }
+ \testonly{
+  E <- envelope(X, Lest, nsim=3, global=TRUE)
+  E <- envelope(X, Kest, nsim=3, global=TRUE, scale=function(r) { r })
+  E
+  summary(E)
+ }
+
+ # Envelope of K function for simulations from Gibbs model 
+ \dontrun{
+ fit <- ppm(cells ~1, Strauss(0.05))
+ plot(envelope(fit))
+ plot(envelope(fit), global=TRUE)
+ }
+ \testonly{
+  fit <- ppm(cells ~1, Strauss(0.05), nd=20)
+  E <- envelope(fit, nsim=3, correction="border", nrep=100)
+  E <- envelope(fit, nsim=3, correction="border", global=TRUE, nrep=100)
+ }
+
+ # Envelope of K function for simulations from cluster model 
+ fit <- kppm(redwood ~1, "Thomas")
+ \dontrun{
+ plot(envelope(fit, Gest))
+ plot(envelope(fit, Gest, global=TRUE))
+ }
+ \testonly{
+  E <- envelope(fit, Gest, correction="rs", nsim=3, global=TRUE, nrep=100)
+ }
+
+ # Envelope of G function under CSR
+ \dontrun{
+ plot(envelope(X, Gest))
+ }
+ \testonly{
+  E <- envelope(X, Gest, correction="rs", nsim=3)
+ }
+
+ # Envelope of L function under CSR
+ #  L(r) = sqrt(K(r)/pi)
+ \dontrun{
+  E <- envelope(X, Kest)
+  plot(E, sqrt(./pi) ~ r)
+ }
+ \testonly{
+  E <- envelope(X, Kest, correction="border", nsim=3)
+  plot(E, sqrt(./pi) ~ r)
+ }
+
+ # Simultaneous critical envelope for L function
+ # (alternatively, use Lest)
+ \dontrun{
+  plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE))
+ }
+ \testonly{
+  E <- envelope(X, Kest, nsim=3, correction="border",
+               transform=expression(sqrt(./pi)), global=TRUE)
+ }
+
+ ## One-sided envelope
+ \dontrun{
+  plot(envelope(X, Lest, alternative="less"))
+ }
+ \testonly{
+  E <- envelope(X, Lest, nsim=3, alternative="less")
+ }
+ 
+ # How to pass arguments needed to compute the summary functions:
+ # We want envelopes for Jcross(X, "A", "B") 
+ # where "A" and "B" are types of points in the dataset 'demopat'
+
+ data(demopat)
+ \dontrun{
+ plot(envelope(demopat, Jcross, i="A", j="B"))
+ }
+ \testonly{
+ plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=3))
+ }
+ 
+ # Use of `simulate'
+ \dontrun{
+ plot(envelope(cells, Gest, simulate=expression(runifpoint(42))))
+ plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02))))
+ }
+ \testonly{
+  plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=3))
+    plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)),
+nsim=3, global=TRUE))
+ }
+
+ # Envelope under random toroidal shifts
+ data(amacrine)
+ \dontrun{
+ plot(envelope(amacrine, Kcross, i="on", j="off",
+               simulate=expression(rshift(amacrine, radius=0.25)))) 
+ }
+
+ # Envelope under random shifts with erosion
+ \dontrun{
+ plot(envelope(amacrine, Kcross, i="on", j="off",
+              simulate=expression(rshift(amacrine, radius=0.1, edge="erode"))))
+ }
+  
+ # Envelope of INHOMOGENEOUS K-function with fitted trend
+
+ # The following is valid.
+ # Setting lambda=fit means that the fitted model is re-fitted to
+ # each simulated pattern to obtain the intensity estimates for Kinhom.
+ # (lambda=NULL would also be valid)
+
+ fit <- kppm(redwood ~1, clusters="MatClust")
+ \dontrun{
+    plot(envelope(fit, Kinhom, lambda=fit, nsim=19))
+ }
+ \testonly{
+    envelope(fit, Kinhom, lambda=fit, nsim=3)
+ }
+
+ # Note that the principle of symmetry, essential to the validity of
+ # simulation envelopes, requires that both the observed and
+ # simulated patterns be subjected to the same method of intensity
+ # estimation. In the following example it would be incorrect to set the
+ # argument 'lambda=red.dens' in the envelope command, because this
+ # would mean that the inhomogeneous K functions of the simulated
+ # patterns would be computed using the intensity function estimated
+ # from the original redwood data, violating the symmetry.  There is
+ # still a concern about the fact that the simulations are generated
+ # from a model that was fitted to the data; this is only a problem in
+ # small datasets.
+
+\dontrun{
+ red.dens <- density(redwood, sigma=bw.diggle)
+ plot(envelope(redwood, Kinhom, sigma=bw.diggle,
+         simulate=expression(rpoispp(red.dens))))
+ }
+
+ # Precomputed list of point patterns
+\dontrun{
+ nX <- npoints(X)
+ PatList <- list()
+ for(i in 1:19) PatList[[i]] <- runifpoint(nX)
+ E <- envelope(X, Kest, nsim=19, simulate=PatList)
+}
+\testonly{
+ PatList <- list()
+ for(i in 1:3) PatList[[i]] <- runifpoint(10)
+ E <- envelope(X, Kest, nsim=3, simulate=PatList)
+}
+
+# re-using the same point patterns
+\dontrun{
+ EK <- envelope(X, Kest, savepatterns=TRUE)
+ EG <- envelope(X, Gest, simulate=EK)
+}
+\testonly{
+ EK <- envelope(X, Kest, nsim=3, savepatterns=TRUE)
+ EG <- envelope(X, Gest, nsim=3, simulate=EK)
+}
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
+ 
+ 
diff --git a/man/envelope.envelope.Rd b/man/envelope.envelope.Rd
new file mode 100644
index 0000000..ee3039b
--- /dev/null
+++ b/man/envelope.envelope.Rd
@@ -0,0 +1,100 @@
+\name{envelope.envelope}
+\alias{envelope.envelope}
+\title{
+  Recompute Envelopes
+}
+\description{
+  Given a simulation envelope (object of class \code{"envelope"}),
+  compute another envelope from the same simulation data
+  using different parameters.
+}
+\usage{
+\method{envelope}{envelope}(Y, fun = NULL, ...,
+                            transform=NULL, global=FALSE, VARIANCE=FALSE)
+}
+\arguments{
+  \item{Y}{
+    A simulation envelope (object of class \code{"envelope"}).
+}
+  \item{fun}{
+    Optional. Summary function to be applied to the simulated point patterns.
+  }
+  \item{\dots,transform,global,VARIANCE}{
+    Parameters controlling the type of envelope that is re-computed. 
+    See \code{\link{envelope}}.
+  }
+}
+\details{
+  This function can be used to re-compute a simulation envelope
+  from previously simulated data, using different parameter settings
+  for the envelope: for example, a different
+  significance level, or a global envelope instead of a pointwise
+  envelope.
+
+  The function \code{\link{envelope}} is generic. This is the method for
+  the class \code{"envelope"}. 
+  
+  The argument \code{Y} should be a simulation envelope (object of
+  class \code{"envelope"}) produced by any of the methods for
+  \code{\link{envelope}}. Additionally, \code{Y} must contain either
+  \itemize{
+    \item the simulated point patterns that were used to create
+    the original envelope (so \code{Y} should have been created by calling
+    \code{\link{envelope}} with \code{savepatterns=TRUE});
+    \item the summary functions of the simulated point patterns
+    that were used to create
+    the original envelope (so \code{Y} should have been created by calling
+    \code{\link{envelope}} with \code{savefuns=TRUE}).
+  }
+
+  If the argument \code{fun} is given, it should be a summary function
+  that can be applied to the simulated point patterns that were
+  used to create \code{Y}. The envelope of
+  the summary function \code{fun} for these point patterns
+  will be computed using the parameters specified in \code{\dots}.
+
+  If \code{fun} is not given, then:
+  \itemize{
+    \item
+    If \code{Y} contains the summary functions that were used to
+    compute the original envelope, then the new envelope will be
+    computed from these original summary functions.
+    \item
+    Otherwise, if \code{Y} contains the simulated point patterns.
+    then the \eqn{K} function \code{\link{Kest}} will be applied to
+    each of these simulated point patterns, and the new envelope will
+    be based on the \eqn{K} functions.
+  }
+  The new envelope 
+  will be computed using the parameters specified in \code{\dots}.
+
+  See \code{\link{envelope}} for a full list of envelope parameters.
+  Frequently-used parameters include \code{nrank} and \code{nsim} (to change the
+  number of simulations used and the significance level of the
+  envelope), \code{global} (to change from pointwise to global
+  envelopes) and \code{VARIANCE} (to compute the envelopes from the sample
+  moments instead of the ranks).
+}
+\value{
+  An envelope (object of class \code{"envelope"}.
+}
+\seealso{
+   \code{\link{envelope}}
+}
+\examples{
+  E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE)
+  E2 <- envelope(E, nrank=2)
+  Eg <- envelope(E, global=TRUE)
+  EG <- envelope(E, Gest)
+  EL <- envelope(E, transform=expression(sqrt(./pi)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
diff --git a/man/envelope.lpp.Rd b/man/envelope.lpp.Rd
new file mode 100644
index 0000000..9af7084
--- /dev/null
+++ b/man/envelope.lpp.Rd
@@ -0,0 +1,264 @@
+\name{envelope.lpp}
+\alias{envelope.lpp}
+\alias{envelope.lppm}
+\title{
+  Envelope for Point Patterns on Linear Network
+}
+\description{
+  Enables envelopes to be computed for point patterns on a linear network.
+}
+\usage{
+  \method{envelope}{lpp}(Y, fun=linearK, nsim=99, nrank=1, \dots, 
+  funargs=list(), funYargs=funargs,
+  simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, 
+  transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL,
+  alternative=c("two.sided", "less", "greater"),
+  scale=NULL, clamp=FALSE,
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL,
+  do.pwrong=FALSE, envir.simul=NULL)
+
+
+  \method{envelope}{lppm}(Y, fun=linearK, nsim=99, nrank=1, \dots, 
+  funargs=list(), funYargs=funargs,
+  simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, 
+  transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL,
+  alternative=c("two.sided", "less", "greater"), 
+  scale=NULL, clamp=FALSE,
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL,
+  do.pwrong=FALSE, envir.simul=NULL)
+}
+\arguments{
+  \item{Y}{
+    A point pattern on a linear network
+    (object of class \code{"lpp"})
+    or a fitted point process model on a linear network
+    (object of class \code{"lppm"}).
+  }
+  \item{fun}{
+    Function that is to be computed for each simulated pattern.
+  }
+  \item{nsim}{
+    Number of simulations to perform.
+  }
+  \item{nrank}{
+    Integer. Rank of the envelope value amongst the \code{nsim} simulated
+    values. A rank of 1 means that the minimum and maximum
+    simulated values will be used.
+  }
+   \item{\dots}{
+    Extra arguments passed to \code{fun}.
+  }
+  \item{funargs}{
+    A list, containing extra arguments to be passed to \code{fun}.
+  }
+  \item{funYargs}{
+    Optional. A list, containing extra arguments to be passed to
+    \code{fun} when applied to the original data \code{Y} only.
+  }
+  \item{simulate}{
+    Optional. Specifies how to generate the simulated point patterns.
+    If \code{simulate} is an expression in the R language, then this
+    expression will be evaluated \code{nsim} times,
+    to obtain \code{nsim} point patterns which are taken as the
+    simulated patterns from which the envelopes are computed.
+    If \code{simulate} is a list of point patterns, then the entries
+    in this list will be treated as the simulated patterns from which
+    the envelopes are computed.
+    Alternatively \code{simulate} may be an object produced by the
+    \code{envelope} command: see Details.
+  }
+  \item{fix.n}{
+    Logical. If \code{TRUE}, simulated patterns will have the
+    same number of points as the original data pattern.
+  }
+  \item{fix.marks}{
+    Logical. If \code{TRUE}, simulated patterns will have the
+    same number of points \emph{and} the same marks as the
+    original data pattern. In a multitype point pattern this means that
+    the simulated patterns will have the same number of points
+    \emph{of each type} as the original data.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports
+    during the simulations.
+  }
+  \item{transform}{
+    Optional. A transformation to be applied to the
+    function values, before the envelopes are computed.
+    An expression object (see Details).
+  }
+  \item{global}{
+    Logical flag indicating whether envelopes should be pointwise
+    (\code{global=FALSE}) or simultaneous (\code{global=TRUE}).
+  }
+  \item{ginterval}{
+    Optional.
+    A vector of length 2 specifying
+    the interval of \eqn{r} values for the simultaneous critical
+    envelopes. Only relevant if \code{global=TRUE}.
+  }
+  \item{use.theory}{
+    Logical value indicating whether to use the theoretical value,
+    computed by \code{fun}, as the reference value for simultaneous
+    envelopes. Applicable only when \code{global=TRUE}.
+  }
+  \item{alternative}{
+    Character string determining whether the envelope corresponds
+    to a two-sided test (\code{side="two.sided"}, the default)
+    or a one-sided test with a lower critical boundary
+    (\code{side="less"}) or a one-sided test
+    with an upper critical boundary (\code{side="greater"}).
+  }
+  \item{scale}{
+    Optional. Scaling function for global envelopes.
+    A function in the \R language which determines the
+    relative scale of deviations, as a function of
+    distance \eqn{r}, when computing the global envelopes.
+    Applicable only when \code{global=TRUE}.
+    Summary function values for distance \code{r}
+    will be \emph{divided} by \code{scale(r)} before the
+    maximum deviation is computed. The resulting global envelopes
+    will have width proportional to \code{scale(r)}. 
+  }
+  \item{clamp}{
+    Logical value indicating how to compute envelopes when
+    \code{alternative="less"} or \code{alternative="greater"}.
+    Deviations of the observed
+    summary function from the theoretical summary function are initially
+    evaluated as signed real numbers, with large positive values indicating
+    consistency with the alternative hypothesis.
+    If \code{clamp=FALSE} (the default), these values are not changed.
+    If \code{clamp=TRUE}, any negative values are replaced by zero.
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save all the simulated
+    function values.
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save all the simulated
+    point patterns.
+  }
+  \item{nsim2}{
+    Number of extra simulated point patterns to be generated
+    if it is necessary to use simulation to estimate the theoretical
+    mean of the summary function. Only relevant when \code{global=TRUE}
+    and the simulations are not based on CSR.
+  }
+  \item{VARIANCE}{
+    Logical. If \code{TRUE}, critical envelopes will be calculated
+    as sample mean plus or minus \code{nSD} times sample standard
+    deviation.
+  }
+  \item{nSD}{
+    Number of estimated standard deviations used to determine
+    the critical envelopes, if \code{VARIANCE=TRUE}.
+  }
+  \item{Yname}{
+    Character string that should be used as the name of the 
+    data point pattern \code{Y} when printing or plotting the results.
+  }
+  \item{do.pwrong}{
+    Logical. If \code{TRUE}, the algorithm will also estimate
+    the true significance level of the \dQuote{wrong} test (the test that
+    declares the summary function for the data to be significant
+    if it lies outside the \emph{pointwise} critical boundary at any
+    point). This estimate is printed when the result is printed.
+  }
+  \item{envir.simul}{
+    Environment in which to evaluate the expression \code{simulate},
+    if not the current environment.
+  }
+}
+\details{
+  This is a method for the generic
+  function \code{\link{envelope}} 
+  applicable to point patterns on a linear network.
+  
+  The argument \code{Y} can be either a point pattern on a linear
+  network, or a fitted point process model on a linear network. 
+  The function \code{fun} will be evaluated for the data
+  and also for \code{nsim} simulated point
+  patterns on the same linear network.
+  The upper and lower
+  envelopes of these evaluated functions will be computed
+  as described in \code{\link{envelope}}.
+  
+  The type of simulation is determined as follows.
+  \itemize{
+    \item
+    if \code{Y} is a point pattern (object of class \code{"lpp"})
+    and \code{simulate} is missing or \code{NULL},
+    then random point patterns will be generated according to
+    a Poisson point process on the linear network on which \code{Y}
+    is defined, with intensity estimated from \code{Y}.
+    \item
+    if \code{Y} is a fitted point process model (object of class
+    \code{"lppm"}) and \code{simulate} is missing or \code{NULL},
+    then random point patterns will be generated by simulating
+    from the fitted model.
+    \item 
+    If \code{simulate} is present, it should be an expression that
+    can be evaluated to yield random point patterns on the same
+    linear network as \code{Y}.
+  }
+  
+  The function \code{fun} should accept as its first argument
+  a point pattern on a linear network (object of class \code{"lpp"})
+  and should have another argument called \code{r} or a \code{\dots}
+  argument. 
+}
+\value{
+  Function value table (object of class \code{"fv"})
+  with additional information,
+  as described in \code{\link{envelope}}.
+}
+\author{
+   Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian
+  
+  
+}
+\seealso{
+   \code{\link{envelope}},
+   \code{\link{linearK}}
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+\emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+  
+  Okabe, A. and Yamada, I. (2001) The K-function method on a network and
+  its computational implementation. \emph{Geographical Analysis}
+  \bold{33}, 271-290.
+}
+\examples{
+   if(interactive()) {
+     ns <- 39
+     np <- 40
+   } else { ns <- np <- 3 }
+   X <- runiflpp(np, simplenet)
+
+   # uniform Poisson: random numbers of points
+   envelope(X, nsim=ns)
+
+   # uniform Poisson: conditional on observed number of points
+   envelope(X, fix.n=TRUE, nsim=ns)
+
+   # nonuniform Poisson
+   fit <- lppm(X ~x)
+   envelope(fit, nsim=ns)
+
+   #multitype
+   marks(X) <- sample(letters[1:2], np, replace=TRUE)
+   envelope(X, nsim=ns)
+}
+\keyword{spatial}
diff --git a/man/envelope.pp3.Rd b/man/envelope.pp3.Rd
new file mode 100644
index 0000000..e0df82d
--- /dev/null
+++ b/man/envelope.pp3.Rd
@@ -0,0 +1,239 @@
+\name{envelope.pp3}
+\alias{envelope.pp3}
+\title{Simulation Envelopes of Summary Function for 3D Point Pattern}
+\description{
+  Computes simulation envelopes of a summary function
+  for a three-dimensional point pattern.
+}
+\usage{
+  \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, 
+  funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, 
+  transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL,
+  alternative=c("two.sided", "less", "greater"),
+  scale=NULL, clamp=FALSE, 
+  savefuns=FALSE, savepatterns=FALSE,
+  nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim,
+  do.pwrong=FALSE, envir.simul=NULL)
+}
+\arguments{
+  \item{Y}{
+    A three-dimensional point pattern (object of class
+    \code{"pp3"}).
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a 3D point pattern. 
+  }
+  \item{nsim}{
+    Number of simulated point patterns to be generated
+    when computing the envelopes.
+  }
+  \item{nrank}{
+    Integer. Rank of the envelope value amongst the \code{nsim} simulated
+    values. A rank of 1 means that the minimum and maximum
+    simulated values will be used.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{fun}.
+  }
+  \item{funargs}{
+    A list, containing extra arguments to be passed to \code{fun}.
+  }
+  \item{funYargs}{
+    Optional. A list, containing extra arguments to be passed to
+    \code{fun} when applied to the original data \code{Y} only.
+  }
+  \item{simulate}{
+    Optional. Specifies how to generate the simulated point patterns.
+    If \code{simulate} is an expression in the R language, then this
+    expression will be evaluated \code{nsim} times,
+    to obtain \code{nsim} point patterns which are taken as the
+    simulated patterns from which the envelopes are computed.
+    If \code{simulate} is a list of point patterns, then the entries
+    in this list will be treated as the simulated patterns from which
+    the envelopes are computed.
+    Alternatively \code{simulate} may be an object produced by the
+    \code{envelope} command: see Details.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports
+    during the simulations.
+  }
+  \item{transform}{
+    Optional. A transformation to be applied to the
+    function values, before the envelopes are computed.
+    An expression object (see Details).
+  }
+  \item{global}{
+    Logical flag indicating whether envelopes should be pointwise
+    (\code{global=FALSE}) or simultaneous (\code{global=TRUE}).
+  }
+  \item{ginterval}{
+    Optional.
+    A vector of length 2 specifying
+    the interval of \eqn{r} values for the simultaneous critical
+    envelopes. Only relevant if \code{global=TRUE}.
+  }
+  \item{use.theory}{
+    Logical value indicating whether to use the theoretical value,
+    computed by \code{fun}, as the reference value for simultaneous
+    envelopes. Applicable only when \code{global=TRUE}.
+  }
+  \item{alternative}{
+    Character string determining whether the envelope corresponds
+    to a two-sided test (\code{side="two.sided"}, the default)
+    or a one-sided test with a lower critical boundary
+    (\code{side="less"}) or a one-sided test
+    with an upper critical boundary (\code{side="greater"}).
+  }
+  \item{scale}{
+    Optional. Scaling function for global envelopes.
+    A function in the \R language which determines the
+    relative scale of deviations, as a function of
+    distance \eqn{r}, when computing the global envelopes.
+    Applicable only when \code{global=TRUE}.
+    Summary function values for distance \code{r}
+    will be \emph{divided} by \code{scale(r)} before the
+    maximum deviation is computed. The resulting global envelopes
+    will have width proportional to \code{scale(r)}. 
+  }
+  \item{clamp}{
+    Logical value indicating how to compute envelopes when
+    \code{alternative="less"} or \code{alternative="greater"}.
+    Deviations of the observed
+    summary function from the theoretical summary function are initially
+    evaluated as signed real numbers, with large positive values indicating
+    consistency with the alternative hypothesis.
+    If \code{clamp=FALSE} (the default), these values are not changed.
+    If \code{clamp=TRUE}, any negative values are replaced by zero.
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save all the simulated
+    function values.
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save all the simulated
+    point patterns.
+  }
+  \item{nsim2}{
+    Number of extra simulated point patterns to be generated
+    if it is necessary to use simulation to estimate the theoretical
+    mean of the summary function. Only relevant when \code{global=TRUE}
+    and the simulations are not based on CSR.
+  }
+  \item{VARIANCE}{
+    Logical. If \code{TRUE}, critical envelopes will be calculated
+    as sample mean plus or minus \code{nSD} times sample standard
+    deviation.
+  }
+  \item{nSD}{
+    Number of estimated standard deviations used to determine
+    the critical envelopes, if \code{VARIANCE=TRUE}.
+  }
+  \item{Yname}{
+    Character string that should be used as the name of the 
+    data point pattern \code{Y} when printing or plotting the results.
+  }
+  \item{maxnerr}{
+    Maximum number of rejected patterns.
+    If \code{fun} yields an error when applied to a simulated point
+    pattern (for example, because the pattern is empty and \code{fun}
+    requires at least one point), the pattern will be rejected
+    and a new random point pattern will be generated. If this happens
+    more than \code{maxnerr} times, the algorithm will give up.
+  }
+  \item{do.pwrong}{
+    Logical. If \code{TRUE}, the algorithm will also estimate
+    the true significance level of the \dQuote{wrong} test (the test that
+    declares the summary function for the data to be significant
+    if it lies outside the \emph{pointwise} critical boundary at any
+    point). This estimate is printed when the result is printed.
+  }
+  \item{envir.simul}{
+    Environment in which to evaluate the expression \code{simulate},
+    if not the current environment.
+  }
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  which can be plotted directly.
+  See \code{\link{envelope}} for further details.
+}
+\details{
+  The \code{envelope} command performs simulations and
+  computes envelopes of a summary statistic based on the simulations.
+  The result is an object that can be plotted to display the envelopes.
+  The envelopes can be used to assess the goodness-of-fit of
+  a point process model to point pattern data.
+  
+  The \code{envelope} function is generic, with methods for
+  the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"}
+  described in the help file for \code{\link{envelope}}.
+  This function \code{envelope.pp3} is the method for 
+  three-dimensional point patterns (objects of class \code{"pp3"}).
+  
+  For the most basic use, if you have a 3D point pattern \code{X} and
+  you want to test Complete Spatial Randomness (CSR), type
+  \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional
+  \eqn{K} function for \code{X} plotted together with the envelopes of
+  the three-dimensional \eqn{K} function for 39 simulations of CSR. 
+  
+  To create simulation envelopes, the command \code{envelope(Y, ...)} 
+  first generates \code{nsim} random point patterns
+  in one of the following ways. 
+  \itemize{
+    \item 
+    If \code{simulate=NULL},
+    then we generate \code{nsim} simulations of
+    Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns
+    each being a realisation of the uniform Poisson point process)
+    with the same intensity as the pattern \code{Y}.
+    \item
+    If \code{simulate} is supplied, then it determines how the
+    simulated point patterns are generated.
+    See \code{\link{envelope}} for details.
+  }
+  
+  The summary statistic \code{fun} is applied to each of these simulated
+  patterns. Typically \code{fun} is one of the functions
+  \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}.
+  It may also be a character string
+  containing the name of one of these functions.
+  
+  For further information, see the documentation for
+  \code{\link{envelope}}.
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42}, 641--668.
+} 
+\seealso{
+  \code{\link{pp3}},
+  \code{\link{rpoispp3}},
+  \code{\link{K3est}},
+  \code{\link{G3est}},
+  \code{\link{F3est}},
+  \code{\link{pcf3est}}.
+}
+\examples{
+   X <- rpoispp3(20, box3())
+ \dontrun{
+ plot(envelope(X, nsim=39))
+ }
+ \testonly{
+  plot(envelope(X, nsim=4))
+ }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
+ 
+ 
diff --git a/man/envelopeArray.Rd b/man/envelopeArray.Rd
new file mode 100644
index 0000000..6cab361
--- /dev/null
+++ b/man/envelopeArray.Rd
@@ -0,0 +1,82 @@
+\name{envelopeArray}
+\alias{envelopeArray}
+\title{
+  Array of Simulation Envelopes of Summary Function
+}
+\description{
+  Compute an array of simulation envelopes using
+  a summary function that returns an array of curves.
+}
+\usage{
+envelopeArray(X, fun, \dots, dataname = NULL, verb = FALSE, reuse = TRUE)
+}
+\arguments{
+  \item{X}{
+    Object containing point pattern data.
+    A point pattern (object of class
+    \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"})
+    or a fitted point process model
+    (object of class \code{"ppm"}, \code{"kppm"} or \code{"lppm"}).
+  }
+  \item{fun}{
+    Function that computes the desired summary statistic
+    for a point pattern. The result of \code{fun} should be a
+    function array (object of class \code{"fasp"}).
+  }
+  \item{\dots}{
+  Arguments passed to \code{\link{envelope}} to control the simulations,
+  or passed to \code{fun} when evaluating the function.
+  }
+  \item{dataname}{
+    Optional character string name for the data.
+  }
+  \item{verb}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{reuse}{
+    Logical value indicating whether the envelopes in each panel
+    should be based on the same set of simulated patterns
+    (\code{reuse=TRUE}, the default)
+    or on different, independent sets of simulated
+    patterns (\code{reuse=FALSE}).
+  }
+}
+\details{
+  This command is the counterpart of \code{\link{envelope}}
+  when the function \code{fun} that is evaluated on each simulated point pattern
+  will return an object of class \code{"fasp"} representing an array of
+  summary functions.
+
+  Simulated point patterns are generated according to the
+  rules described for \code{\link{envelope}}. In brief, 
+  if \code{X} is a point pattern, the algorithm generates
+  simulated point patterns of the same kind, according to complete
+  spatial randomness. If \code{X} is a fitted model, the algorithm
+  generates simulated point patterns according to this model.
+
+  For each simulated point pattern \code{Y}, the function \code{fun}
+  is invoked. The result \code{Z <- fun(Y, ...)} should be an object of
+  class \code{"fasp"} representing an array of summary functions.
+  The dimensions of the array \code{Z} should be the same
+  for each simulated pattern \code{Y}.
+
+  This algorithm finds the simulation envelope of the summary functions
+  in each cell of the array. 
+}
+\value{
+   An object of class \code{"fasp"} representing
+   an array of envelopes.
+}
+\author{
+   \spatstatAuthors.
+}
+\seealso{
+  \code{\link{envelope}}, \code{\link{alltypes}}.
+}
+\examples{
+  A <- envelopeArray(finpines, markcrosscorr, nsim=9)
+  plot(A)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+\keyword{iteration}
diff --git a/man/eroded.areas.Rd b/man/eroded.areas.Rd
new file mode 100644
index 0000000..322f5f7
--- /dev/null
+++ b/man/eroded.areas.Rd
@@ -0,0 +1,58 @@
+\name{eroded.areas}
+\alias{eroded.areas}
+\title{Areas of Morphological Erosions}
+\description{
+  Computes the areas of successive morphological erosions of a window.
+}
+\usage{
+ eroded.areas(w, r, subset=NULL)
+}
+\arguments{
+  \item{w}{A window.}
+  \item{r}{Numeric vector of radii at which erosions will be performed.}
+  \item{subset}{
+    Optional window inside which the areas should be computed.
+  }
+}
+\value{
+  Numeric vector, of the same length as \code{r},
+  giving the areas of the successive erosions.
+}
+\details{
+  This function computes the areas of the erosions of the window
+  \code{w} by each of the radii \code{r[i]}. 
+
+  The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0}
+  is the subset 
+  consisting of all points \eqn{x \in W}{x in W} such that the
+  distance from \eqn{x} to the boundary of \eqn{W} is greater than
+  or equal to \eqn{r}. In other words it is the result of trimming
+  a margin of width \eqn{r} off the set \eqn{W}.
+
+  The argument \code{r} should be a vector of positive numbers.
+  The argument \code{w} should be a window (an object of class
+  \code{"owin"}, see \code{\link{owin.object}} for details)
+  or can be given in any format acceptable to \code{\link{as.owin}()}.
+
+  Unless \code{w} is a rectangle, the computation is performed
+  using a pixel raster approximation.
+
+  To compute the eroded window itself, use \code{\link{erosion}}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{erosion}}
+}
+\examples{
+  w <- owin(c(0,1),c(0,1))
+  a <- eroded.areas(w, seq(0.01,0.49,by=0.01))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/erosion.Rd b/man/erosion.Rd
new file mode 100644
index 0000000..90bd0ab
--- /dev/null
+++ b/man/erosion.Rd
@@ -0,0 +1,91 @@
+\name{erosion}  
+\alias{erosion}
+\alias{erosion.owin}
+\alias{erosion.ppp}
+\alias{erosion.psp}
+\title{Morphological Erosion by a Disc}
+\description{
+  Perform morphological erosion of a window, a line segment pattern
+  or a point pattern by a disc.
+}
+\usage{
+ erosion(w, r, \dots)
+ \method{erosion}{owin}(w, r, shrink.frame=TRUE, \dots,
+   strict=FALSE, polygonal=NULL)
+ \method{erosion}{ppp}(w, r,\dots)
+ \method{erosion}{psp}(w, r,\dots)
+}
+\arguments{
+  \item{w}{
+    A window (object of class \code{"owin"}
+    or a line segment pattern (object of class \code{"psp"})
+    or a point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{positive number: the radius of erosion.}
+  \item{shrink.frame}{logical: if \code{TRUE}, erode the bounding
+    rectangle as well.}
+  \item{\dots}{extra arguments to \code{\link{as.mask}}
+    controlling the pixel resolution, if pixel approximation is used.}
+  \item{strict}{Logical flag determining the fate of boundary pixels,
+    if pixel approximation is used. See details.}
+  \item{polygonal}{
+    Logical flag indicating whether to compute a polygonal
+    approximation to the erosion (\code{polygonal=TRUE}) or
+    a pixel grid approximation (\code{polygonal=FALSE}).
+  }
+}
+\value{
+  If \code{r > 0}, an object of class \code{"owin"} representing the
+  eroded region (or \code{NULL} if this region is empty).
+  If \code{r=0}, the result is identical to \code{w}.
+}
+\details{
+  The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0}
+  is the subset 
+  consisting of all points \eqn{x \in W}{x in W} such that the
+  distance from \eqn{x} to the boundary of \eqn{W} is greater than
+  or equal to \eqn{r}. In other words it is the result of trimming
+  a margin of width \eqn{r} off the set \eqn{W}.
+
+  If \code{polygonal=TRUE} then a polygonal approximation
+  to the erosion is computed.
+  If \code{polygonal=FALSE} then a pixel approximation
+  to the erosion is computed from the distance map of \code{w}.
+  The arguments \code{"\dots"} are passed to \code{\link{as.mask}}
+  to control the pixel resolution.
+  The erosion consists of all pixels whose distance
+  from the boundary of \code{w} is strictly greater than \code{r} (if
+  \code{strict=TRUE}) or is greater than or equal to \code{r} (if
+  \code{strict=FALSE}).
+  
+  When \code{w} is a window, the default (when \code{polygonal=NULL})
+  is to compute a polygonal approximation if
+  \code{w} is a rectangle or polygonal window, and to compute a
+  pixel approximation if \code{w} is a window of type \code{"mask"}.
+
+  If \code{shrink.frame} is false, the resulting window is given the
+  same outer, bounding rectangle as the original window \code{w}.
+  If \code{shrink.frame} is true, the original bounding rectangle
+  is also eroded by the same distance \code{r}.
+
+  To simply compute the area of the eroded window,
+  use \code{\link{eroded.areas}}.
+}
+\seealso{
+  \code{\link{dilation}} for the opposite operation.
+
+  \code{\link{erosionAny}} for morphological erosion using any shape.
+  
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{eroded.areas}}
+}
+\examples{
+  plot(letterR, main="erosion(letterR, 0.2)")
+  plot(erosion(letterR, 0.2), add=TRUE, col="red")
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/erosionAny.Rd b/man/erosionAny.Rd
new file mode 100644
index 0000000..ef222cb
--- /dev/null
+++ b/man/erosionAny.Rd
@@ -0,0 +1,71 @@
+\name{erosionAny}
+\alias{erosionAny}
+\alias{\%(-)\%}  %DoNotExport 
+%NAMESPACE export("%(-)%")
+\title{Morphological Erosion of Windows}
+\description{
+  Compute the morphological erosion of one spatial window by another.
+}
+\usage{
+erosionAny(A, B)
+
+A \%(-)\% B
+}
+\arguments{
+  \item{A,B}{
+    Windows (objects of class \code{"owin"}).
+  }
+}
+\value{
+  Another window (object of class \code{"owin"}).
+}
+\details{
+  The operator \code{A \%(-)\% B} and function \code{erosionAny(A,B)}
+  are synonymous: they both compute the
+  morphological erosion of the window \code{A} by the window \code{B}.
+
+  The morphological erosion
+  \eqn{A \ominus B}{A \%(-)\% B}
+  of region \eqn{A} by region \eqn{B}
+  is the spatial region consisting of all vectors \eqn{z}
+  such that, when \eqn{B} is shifted by the vector \eqn{z}, the result
+  is a subset of \eqn{A}.
+
+  Equivalently
+  \deqn{
+    A \ominus B = ((A^c \oplus (-B))^c
+  }{
+    (A^c \%+\% (-B))^c
+  }
+  where \eqn{\oplus}{\%+\%} is the Minkowski sum,
+  \eqn{A^c} denotes the set complement, and \eqn{(-B)} is
+  the reflection of \eqn{B} through the origin, consisting of all
+  vectors \eqn{-b} where \eqn{b} is a point in \eqn{B}.
+
+  If \code{B} is a disc of radius \code{r}, then
+  \code{erosionAny(A, B)} is equivalent to \code{erosion(A, r)}.
+  See \code{\link{erosion}}.
+  
+  The algorithm currently computes the result as a polygonal
+  window using the \pkg{polyclip} library. 
+  It will be quite slow if applied to binary mask windows.
+}
+\seealso{
+  \code{\link{erosion}},
+  \code{\link{MinkowskiSum}}
+}
+\examples{
+  B <- square(c(-0.1, 0.1))
+  RminusB <- letterR \%(-)\% B
+  FR <- grow.rectangle(Frame(letterR), 0.3)
+  plot(FR, main="", type="n")
+  plot(letterR, add=TRUE, lwd=2, hatch=TRUE, box=FALSE)
+  plot(RminusB, add=TRUE, col="blue", box=FALSE)
+  plot(shift(B, vec=c(3.49, 2.98)),
+       add=TRUE, border="red", lwd=2)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/eval.fasp.Rd b/man/eval.fasp.Rd
new file mode 100644
index 0000000..e30d5b0
--- /dev/null
+++ b/man/eval.fasp.Rd
@@ -0,0 +1,96 @@
+\name{eval.fasp}
+\alias{eval.fasp}
+\title{Evaluate Expression Involving Function Arrays}
+\description{
+  Evaluates any expression involving one or more function arrays
+  (\code{fasp} objects) and returns another function array.
+}
+\usage{
+  eval.fasp(expr, envir, dotonly=TRUE)
+}
+\arguments{
+  \item{expr}{
+    An expression involving the names of objects of class \code{"fasp"}.
+  }
+  \item{envir}{
+    Optional. The environment in which to evaluate the expression,
+    or a named list containing \code{"fasp"} objects to be used in
+    the expression.
+  }
+  \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.}
+}
+\details{
+  This is a wrapper to make it easier to perform
+  pointwise calculations with the arrays of summary functions
+  used in spatial statistics.
+
+  A function array (object of class \code{"fasp"}) can be regarded as a matrix
+  whose entries are functions. Objects of this kind
+  are returned by the command \code{\link{alltypes}}.
+
+  Suppose \code{X} is an object of class \code{"fasp"}.
+  Then \code{eval.fasp(X+3)} effectively adds 3 to the value of
+  every function in the array \code{X}, and returns
+  the resulting object. 
+
+  Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"}
+  which are compatible (for example the arrays
+  must have the same dimensions). Then 
+  \code{eval.fasp(X + Y)} will add the corresponding functions in
+  each cell of the arrays \code{X} and \code{Y},
+  and return the resulting array of functions.
+
+  Suppose \code{X} is an object of class \code{"fasp"}
+  and \code{f} is an object of class \code{"fv"}.
+  Then \code{eval.fasp(X + f)} will add the function \code{f}
+  to the functions in each cell of the array \code{X},
+  and return the resulting array of functions.
+
+  In general, \code{expr} can be any expression involving
+  (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"},
+  (b) scalar constants, and (c) functions which are vectorised.
+  See the Examples.
+
+  First \code{eval.fasp} determines which of the \emph{variable names}
+  in the expression \code{expr} refer to objects of class \code{"fasp"}.
+  The expression is then evaluated for each cell of the array
+  using \code{\link{eval.fv}}.
+
+  The expression \code{expr} must be vectorised.
+  There must be at least one object of class \code{"fasp"} in the expression.
+  All such objects must be compatible.
+}
+\value{
+  Another object of class \code{"fasp"}.
+}
+\seealso{
+  \code{\link{fasp.object}},
+  \code{\link{Kest}}
+}
+\examples{
+  # manipulating the K function
+  K <- alltypes(amacrine, "K")
+
+  # expressions involving a fasp object
+  eval.fasp(K + 3)
+  L <- eval.fasp(sqrt(K/pi))
+
+  # expression involving two fasp objects
+  D <- eval.fasp(K - L)
+
+  # subtracting the unmarked K function from the cross-type K functions
+  K0 <- Kest(unmark(amacrine))
+  DK <- eval.fasp(K - K0)
+
+  ## Use of 'envir'
+  S <- eval.fasp(1-G, list(G=alltypes(amacrine, "G")))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/eval.fv.Rd b/man/eval.fv.Rd
new file mode 100644
index 0000000..6eb1f51
--- /dev/null
+++ b/man/eval.fv.Rd
@@ -0,0 +1,143 @@
+\name{eval.fv}
+\alias{eval.fv}
+\title{Evaluate Expression Involving Functions}
+\description{
+  Evaluates any expression involving one or more function value (fv) objects,
+  and returns another object of the same kind.
+}
+\usage{
+  eval.fv(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE)
+}
+\arguments{
+  \item{expr}{An expression.}
+  \item{envir}{
+    Optional. The environment in which to evaluate the
+    expression, or a named list containing \code{"fv"} objects to be
+    used in the expression.
+  }
+  \item{dotonly}{Logical. See Details.}
+  \item{equiv}{Mapping between column names of different objects
+    that are deemed to be equivalent. See Details.}
+  \item{relabel}{
+    Logical value indicating whether to
+    compute appropriate labels for the resulting function.
+    This should normally be \code{TRUE} (the default).
+    See Details.
+  }
+}
+\details{
+  This is a wrapper to make it easier to perform
+  pointwise calculations with the summary functions
+  used in spatial statistics.
+
+  An object of class \code{"fv"} is essentially a data frame
+  containing several different statistical estimates of the same
+  function. Such objects are returned by \code{\link{Kest}} and its
+  relatives.
+
+  For example, suppose \code{X} is an object of class \code{"fv"}
+  containing several different estimates of the Ripley's K function \eqn{K(r)},
+  evaluated at a sequence of values of \eqn{r}.
+  Then \code{eval.fv(X+3)} effectively adds 3 to 
+  each function estimate in \code{X}, and returns
+  the resulting object. 
+
+  Suppose \code{X} and \code{Y} are two objects of class \code{"fv"}
+  which are compatible (in particular they have the same vector
+  of \eqn{r} values). Then 
+  \code{eval.im(X + Y)} will add the corresponding function values in
+  \code{X} and \code{Y}, and return the resulting function.
+
+  In general, \code{expr} can be any expression involving
+  (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar
+  constants, and (c) functions which are vectorised.
+  See the Examples.
+
+  First \code{eval.fv} determines which of the \emph{variable names}
+  in the expression \code{expr} refer to objects of class \code{"fv"}.
+  Each such name is replaced by a vector containing the function values.
+  The expression is then evaluated. The result should be a vector;
+  it is taken as the new vector of function values.
+
+  The expression \code{expr} must be vectorised.
+  There must be at least one object of class \code{"fv"} in the expression.
+  If the objects are not compatible, they will be made compatible
+  by \code{\link{harmonise.fv}}.
+
+  If \code{dotonly=TRUE} (the default), the expression will be
+  evaluated only for those columns of an \code{"fv"} object
+  that contain values of the function itself (rather than
+  values of the derivative of the function, the hazard rate, etc).
+  If \code{dotonly=FALSE}, the expression will be evaluated for all columns.
+
+  For example the result of \code{\link{Fest}} includes several columns
+  containing estimates of the empty space function \eqn{F(r)},
+  but also includes an estimate of the
+  \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid
+  for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would
+  normally be omitted from the calculation.
+  
+  The columns of an object \code{x} that represent the function itself
+  are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}.
+  They are the columns normally plotted by \code{\link{plot.fv}}
+  and identified by the symbol \code{"."} in plot formulas
+  in \code{\link{plot.fv}}.
+
+  The argument \code{equiv} can be used to specify that 
+  two different column names in different function objects
+  are mathematically equivalent or cognate.
+  It should be a list of \code{name=value} pairs, or a named vector of
+  character strings, indicating the pairing of equivalent names.
+  (Without this argument, these columns would be discarded.)
+  See the Examples.
+
+  The argument \code{relabel} should normally be \code{TRUE} (the default).
+  It determines whether to compute appropriate mathematical labels and
+  descriptions for the resulting function object
+  (used when the object is printed or plotted).
+  If \code{relabel=FALSE} then this does not occur,
+  and the mathematical labels and descriptions
+  in the result are taken from the function object
+  that appears first in the expression. This reduces computation time
+  slightly (for advanced use only).
+}
+\value{
+  Another object of class \code{"fv"}.
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{Kest}}
+}
+\examples{
+  # manipulating the K function
+  X <- rpoispp(42)
+  Ks <- Kest(X)
+
+  eval.fv(Ks + 3)
+  Ls <- eval.fv(sqrt(Ks/pi))
+
+  # manipulating two K functions
+  Y <- rpoispp(20)
+  Kr <- Kest(Y)
+  
+  Kdif <- eval.fv(Ks - Kr)
+  Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi))
+
+  ## Use of 'envir'
+  U <- eval.fv(sqrt(K), list(K=Kest(cells)))
+
+  ## Use of 'equiv'
+  Fc <- Fest(cells)
+  Gc <- Gest(cells)
+  # Hanisch and Chiu-Stoyan estimators are cognate
+  Dc <- eval.fv(Fc - Gc, equiv=list(cs="han"))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/eval.im.Rd b/man/eval.im.Rd
new file mode 100644
index 0000000..44eee84
--- /dev/null
+++ b/man/eval.im.Rd
@@ -0,0 +1,90 @@
+\name{eval.im}
+\alias{eval.im}
+\title{Evaluate Expression Involving Pixel Images}
+\description{
+  Evaluates any expression involving one or more pixel images,
+  and returns a pixel image.
+}
+\usage{
+  eval.im(expr, envir, harmonize=TRUE)
+}
+\arguments{
+  \item{expr}{An expression.}
+  \item{envir}{Optional. The environment in which to evaluate the
+    expression, or a named list containing pixel images to be used
+    in the expression.}
+  \item{harmonize}{
+    Logical. Whether to resolve inconsistencies between
+    the pixel grids.
+  }
+}
+\details{
+  This function is a wrapper to make it easier to perform
+  pixel-by-pixel calculations in an image. 
+
+  Pixel images in \pkg{spatstat}
+  are represented by objects of class \code{"im"}
+  (see \code{\link{im.object}}). These are essentially matrices of
+  pixel values, with extra attributes recording the pixel dimensions,
+  etc.
+
+  Suppose \code{X} is a pixel image. Then \code{eval.im(X+3)}
+  will add 3 to the value of every pixel in \code{X}, and return
+  the resulting pixel image.
+
+  Suppose \code{X} and \code{Y} are two pixel images with compatible
+  dimensions: they have the same number of pixels, the same physical
+  size of pixels, and the same bounding box. Then
+  \code{eval.im(X + Y)} will add the corresponding pixel values in
+  \code{X} and \code{Y}, and return the resulting pixel image.
+
+  In general, \code{expr} can be any expression in the R language involving
+  (a) the \emph{names} of pixel images, (b) scalar
+  constants, and (c) functions which are vectorised.
+  See the Examples.
+
+  First \code{eval.im} determines which of the \emph{variable names}
+  in the expression \code{expr} refer to pixel images. Each such name
+  is replaced by a matrix containing the pixel values. The expression is
+  then evaluated. The result should be a matrix; it is taken as
+  the matrix of pixel values.
+  
+  The expression \code{expr} must be vectorised.
+  There must be at least one pixel image in the expression.
+
+  All images must have compatible dimensions.
+  If \code{harmonize=TRUE}, images that have incompatible dimensions
+  will be resampled so that they are compatible.
+  If \code{harmonize=FALSE}, images that are incompatible will cause an error.
+}
+\value{
+  An image object of class \code{"im"}.
+}
+\seealso{
+  \code{\link{as.im}},
+  \code{\link{compatible.im}},
+  \code{\link{harmonise.im}},
+  \code{\link{im.object}}
+}
+\examples{
+  # test images
+  X <- as.im(function(x,y) { x^2 - y^2 }, unit.square())
+  Y <- as.im(function(x,y) { 3 * x + y }, unit.square())
+
+  eval.im(X + 3)
+  eval.im(X - Y)
+  eval.im(abs(X - Y))
+  Z <- eval.im(sin(X * pi) + Y)
+
+  ## Use of 'envir'
+  W <- eval.im(sin(U), list(U=density(cells)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/eval.linim.Rd b/man/eval.linim.Rd
new file mode 100644
index 0000000..823be2c
--- /dev/null
+++ b/man/eval.linim.Rd
@@ -0,0 +1,86 @@
+\name{eval.linim}
+\alias{eval.linim}
+\title{Evaluate Expression Involving Pixel Images on Linear Network}
+\description{
+  Evaluates any expression involving one or more pixel images
+  on a linear network, and returns a pixel image on the same linear network.
+}
+\usage{
+  eval.linim(expr, envir, harmonize=TRUE)
+}
+\arguments{
+  \item{expr}{An expression in the \R language,
+    involving the names of objects of class \code{"linim"}.}
+  \item{envir}{Optional. The environment in which to evaluate the
+    expression.}
+  \item{harmonize}{
+    Logical. Whether to resolve inconsistencies between
+    the pixel grids.
+  }
+}
+\details{
+  This function a wrapper to make it easier to perform
+  pixel-by-pixel calculations. It is one of several functions
+  whose names begin with \code{eval} which work on objects of
+  different types. This particular function is designed to work with
+  objects of class \code{"linim"} which represent
+  pixel images on a linear network. 
+
+  Suppose \code{X} is a pixel image on a linear network (object of
+  class \code{"linim"}. Then \code{eval.linim(X+3)}
+  will add 3 to the value of every pixel in \code{X}, and return
+  the resulting pixel image on the same linear network.
+
+  Suppose \code{X} and \code{Y} are two pixel images on the same
+  linear network, with compatible pixel dimensions. Then
+  \code{eval.linim(X + Y)} will add the corresponding pixel values in
+  \code{X} and \code{Y}, and return the resulting pixel image
+  on the same linear network.
+
+  In general, \code{expr} can be any expression in the R language involving
+  (a) the \emph{names} of pixel images, (b) scalar
+  constants, and (c) functions which are vectorised.
+  See the Examples.
+
+  First \code{eval.linim} determines which of the \emph{variable names}
+  in the expression \code{expr} refer to pixel images. Each such name
+  is replaced by a matrix containing the pixel values. The expression is
+  then evaluated. The result should be a matrix; it is taken as
+  the matrix of pixel values.
+  
+  The expression \code{expr} must be vectorised.
+  There must be at least one linear pixel image in the expression.
+
+  All images must have compatible dimensions.
+  If \code{harmonize=TRUE}, images that have incompatible dimensions
+  will be resampled so that they are compatible.
+  If \code{harmonize=FALSE}, images that are incompatible will cause an error.
+}
+\value{
+  An image object of class \code{"linim"}.
+}
+\seealso{
+  \code{\link{eval.im}},
+  \code{\link{linim}}
+}
+\examples{
+  M <- as.mask.psp(as.psp(simplenet))
+  Z <- as.im(function(x,y) {x-y}, W=M)
+  X <- linim(simplenet, Z)
+  X
+
+  Y <- linfun(function(x,y,seg,tp){y^2+x}, simplenet)
+  Y <- as.linim(Y)
+  
+  eval.linim(X + 3)
+  eval.linim(X - Y)
+  eval.linim(abs(X - Y))
+  Z <- eval.linim(sin(X * pi) + Y)
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/ewcdf.Rd b/man/ewcdf.Rd
new file mode 100644
index 0000000..3e875b7
--- /dev/null
+++ b/man/ewcdf.Rd
@@ -0,0 +1,59 @@
+\name{ewcdf}
+\alias{ewcdf}
+\title{Weighted Empirical Cumulative Distribution Function}
+\description{
+  Compute a weighted version of the
+  empirical cumulative distribution function.
+}
+\usage{
+ewcdf(x, weights = rep(1/length(x), length(x)))
+}
+\arguments{
+  \item{x}{Numeric vector of observations.}
+  \item{weights}{Numeric vector of non-negative weights
+    for \code{x}.}
+}
+\details{
+  This is a modification of the standard function \code{\link{ecdf}}
+  allowing the observations \code{x} to have weights.
+
+  The weighted e.c.d.f. (empirical cumulative distribution function)
+  \code{Fn} is defined so that, for any real number \code{y}, the value of
+  \code{Fn(y)} is equal to the total weight of all entries of
+  \code{x} that are less than or equal to \code{y}. That is
+  \code{Fn(y) = sum(weights[x <= y])}.
+
+  Thus \code{Fn} is a step function which jumps at the
+  values of \code{x}. The height of the jump at a point \code{y}
+  is the total weight of all entries in \code{x} 
+  number of tied observations at that value.  Missing values are
+  ignored.
+
+  If \code{weights} is omitted, the default is equivalent to
+  \code{ecdf(x)} except for the class membership.
+
+  The result of \code{ewcdf} is a function, of class \code{"ewcdf"},
+  inheriting from the classes \code{"ecdf"} and \code{"stepfun"}.
+  The class \code{ewcdf} has methods for \code{print} and \code{quantile}.
+  The inherited class \code{ecdf}
+  has methods for \code{plot} and \code{summary}.
+}
+\value{
+  A function, of class \code{"ewcdf"}, inheriting from 
+  \code{"ecdf"} and \code{"stepfun"}. 
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{ecdf}}.
+  
+  \code{\link{quantile.ewcdf}}
+}
+\examples{
+   x <- rnorm(100)
+   w <- runif(100)
+   plot(ewcdf(x,w))
+}
+\keyword{nonparametric}
+\keyword{univar}
diff --git a/man/exactMPLEstrauss.Rd b/man/exactMPLEstrauss.Rd
new file mode 100644
index 0000000..49ba520
--- /dev/null
+++ b/man/exactMPLEstrauss.Rd
@@ -0,0 +1,123 @@
+\name{exactMPLEstrauss}
+\alias{exactMPLEstrauss}
+\title{
+  Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process
+}
+\description{
+  Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates
+  of the parameters of a stationary Strauss point process.
+}
+\usage{
+  exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE)
+}
+\arguments{
+  \item{X}{
+    Data to which the Strauss process will be fitted.
+    A point pattern dataset (object of class \code{"ppp"}).
+  }
+  \item{R}{
+    Interaction radius of the Strauss process.
+    A non-negative number.
+  }
+  \item{ngrid}{
+    Grid size for calculation of integrals. An integer, giving the
+    number of grid points in the \eqn{x} and \eqn{y} directions.
+  }
+  \item{plotit}{
+    Logical. If \code{TRUE}, the log pseudolikelihood is plotted
+    on the current device. 
+  }
+  \item{project}{
+    Logical. If \code{TRUE} (the default), the parameter
+    \eqn{\gamma}{gamma} is constrained to lie in the interval
+    \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied.
+  }
+}
+\details{
+  This function is intended mainly for technical investigation
+  of algorithm performance. Its practical use is quite limited.
+
+  It fits the stationary Strauss point process model
+  to the point pattern dataset \code{X} by maximum pseudolikelihood
+  (with the border edge correction) using an algorithm with very high accuracy.
+  This algorithm is more accurate than the
+  \emph{default} behaviour of the model-fitting function
+  \code{\link{ppm}} because the discretisation is much finer.
+
+  Ripley (1988) and Baddeley and Turner (2000) derived the
+  log pseudolikelihood for the stationary Strauss
+  process, and eliminated the parameter \eqn{\beta}{beta},
+  obtaining an exact formula for the partial log pseudolikelihood
+  as a function of the interaction parameter \eqn{\gamma}{gamma} only.
+  The algorithm evaluates this expression to a high degree of accuracy,
+  using numerical integration on a \code{ngrid * ngrid} lattice,
+  uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood
+  with respect to \eqn{\gamma}{gamma}, and finally recovers
+  \eqn{\beta}{beta}.
+
+  The result is a vector of length 2, containing the fitted coefficients
+  \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}.
+  These values correspond to the entries that would be obtained with
+  \code{coef(ppm(X, ~1, Strauss(R)))}.
+  The fitted coefficients are typically accurate to
+  within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013).
+  
+  Note however that (by default) \code{exactMPLEstrauss} 
+  constrains the parameter \eqn{\gamma}{gamma} to lie in the
+  interval \eqn{[0,1]} in which the point process is well defined
+  (Kelly and Ripley, 1976)
+  whereas \code{\link{ppm}} does not constrain
+  the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by
+  the argument \code{project} to \code{\link{ppm}} and
+  \code{exactMPLEstrauss}. The default for \code{\link{ppm}}
+  is \code{project=FALSE}, while the default for \code{exactMPLEstrauss}
+  is \code{project=TRUE}.
+}
+\value{
+  Vector of length 2.
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Baddeley, A. and Turner, R. (2013)
+  Bias correction for parameter estimates of spatial point process models.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{2012}. {doi: 10.1080/00949655.2012.755976}
+
+  Kelly, F.P. and Ripley, B.D. (1976)
+  On Strauss's model for clustering.
+  \emph{Biometrika} \bold{63}, 357--360.
+
+  Ripley, B.D. (1988)
+  \emph{Statistical inference for spatial processes}.
+  Cambridge University Press.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppm}}
+}
+\examples{
+\testonly{
+   exactMPLEstrauss(cells, 0.1, ngrid=128)
+   exactMPLEstrauss(cells, 0.1, ngrid=128, project=FALSE)
+}
+if(interactive()) {
+   exactMPLEstrauss(cells, 0.1)
+   coef(ppm(cells, ~1, Strauss(0.1)))
+   coef(ppm(cells, ~1, Strauss(0.1), nd=128))
+   exactMPLEstrauss(redwood, 0.04)
+   exactMPLEstrauss(redwood, 0.04, project=FALSE)
+   coef(ppm(redwood, ~1, Strauss(0.04)))
+}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/expand.owin.Rd b/man/expand.owin.Rd
new file mode 100644
index 0000000..f2b9d57
--- /dev/null
+++ b/man/expand.owin.Rd
@@ -0,0 +1,55 @@
+\name{expand.owin}
+\alias{expand.owin}
+\title{Apply Expansion Rule}
+\description{
+  Applies an expansion rule to a window.
+}
+\usage{
+ expand.owin(W, \dots)
+}
+\arguments{
+  \item{W}{A window.}
+  \item{\dots}{
+    Arguments passed to \code{\link{rmhexpand}} to
+    determine an expansion rule.
+  }
+}
+\value{
+  A window (object of class \code{"owin"}).
+}
+\details{
+  The argument \code{W} should be a window (an object of class
+  \code{"owin"}).
+
+  This command applies the expansion rule specified by the
+  arguments \code{\dots} to the window \code{W}, yielding another
+  window.
+
+  The arguments \code{\dots} are passed to \code{\link{rmhexpand}}
+  to determine the expansion rule.
+
+  For other transformations of the scale, location and orientation
+  of a window, see \code{\link{shift}}, \code{\link{affine}}
+  and \code{\link{rotate}}.
+}
+\seealso{
+  \code{\link{rmhexpand}} about expansion rules.
+  
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{affine}} for other types of manipulation.
+}
+\examples{
+   expand.owin(square(1), 9)
+   expand.owin(square(1), distance=0.5)
+   expand.owin(letterR, length=2)
+   expand.owin(letterR, distance=0.1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/fardist.Rd b/man/fardist.Rd
new file mode 100644
index 0000000..118bbbc
--- /dev/null
+++ b/man/fardist.Rd
@@ -0,0 +1,65 @@
+\name{fardist}
+\alias{fardist}
+\alias{fardist.ppp}
+\alias{fardist.owin}
+\title{
+  Farthest Distance to Boundary of Window
+}
+\description{
+  Computes the farthest distance from each pixel, or each data point,
+  to the boundary of the window.
+}
+\usage{
+  fardist(X, \dots)
+
+  \method{fardist}{owin}(X, \dots, squared=FALSE)
+
+  \method{fardist}{ppp}(X, \dots, squared=FALSE)
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a window or point pattern.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution, if required.
+  }
+  \item{squared}{
+    Logical. If \code{TRUE}, the squared distances will be
+    returned.
+  }
+}
+\details{
+  The function \code{fardist} is generic, with methods for
+  the classes \code{owin} and \code{ppp}.
+
+  For a window \code{W}, the command \code{fardist(W)}
+  returns a pixel image in which the value at each pixel
+  is the \emph{largest} distance from that pixel to the boundary of
+  \code{W}.
+
+  For a point pattern \code{X}, with window \code{W}, the
+  command \code{fardist(X)} returns a numeric vector
+  with one entry for each point of \code{X}, giving the
+  largest distance from that data point to the boundary of \code{W}.
+}
+\value{
+  For \code{fardist.owin}, a pixel image (object of class \code{"im"}).
+
+  For \code{fardist.ppp}, a numeric vector.
+}
+\examples{
+  fardist(cells)
+
+  plot(FR <- fardist(letterR))
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/fasp.object.Rd b/man/fasp.object.Rd
new file mode 100644
index 0000000..39775c1
--- /dev/null
+++ b/man/fasp.object.Rd
@@ -0,0 +1,103 @@
+\name{fasp.object}
+\alias{fasp.object} %DoNotExport
+\title{Function Arrays for Spatial Patterns}
+\description{
+  A class \code{"fasp"} to represent a \dQuote{matrix}
+  of functions, amenable to plotting as a matrix of plot panels.
+}
+\details{
+  An object of this class is a convenient way of storing
+  (and later plotting, editing, etc)
+  a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r},
+  defined for each possible pair \eqn{(i,j)} of indices
+  \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this
+  as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}.
+
+  Function arrays are particularly useful in the 
+  analysis of a multitype point pattern (a point pattern in which 
+  the points are identified as belonging to separate types).
+  We may want to compute a summary function for the points
+  of type \eqn{i} only, for each of the possible types \eqn{i}.
+  This produces a \eqn{1 \times m}{1 * m} array of functions.
+  Alternatively we may compute a summary function
+  for each possible pair of types \eqn{(i,j)}.
+  This produces an \eqn{m \times m}{m *  m} array of functions.
+
+  For multitype point patterns the command \code{\link{alltypes}}
+  will compute arrays of summary functions for each possible
+  type or for each possible pair of types.
+  The function \code{\link{alltypes}} 
+  returns an object of class \code{"fasp"}.
+
+  An object of class \code{"fasp"} is a list containing at least the
+  following components:
+
+  \describe{
+    \item{fns}{
+      A list of data frames, each representing one of the functions.
+    }
+    \item{which}{
+      A matrix representing the spatial arrangement of the
+      functions. If \code{which[i,j] = k}
+      then the function represented by \code{fns[[k]]} should be plotted
+      in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA}
+      then nothing is plotted in that position.
+    }
+    \item{titles}{
+      A list of character strings, providing suitable plotting titles
+      for the functions.
+    }
+    \item{default.formulae}{
+      A list of default formulae for plotting each of the functions.
+    }
+    \item{title}{
+      A character string, giving a default title for the array
+      when it is plotted.
+    }
+  }
+}
+\section{Functions available}{
+  There are methods for \code{plot}, \code{print} and \code{"["}
+  for this class.
+
+  The plot method displays the entire array of functions.
+  The method \code{\link{[.fasp}} selects a sub-array using the natural
+  indices \code{i,j}.
+
+  The command \code{\link{eval.fasp}} can be used to apply
+  a transformation to each function in the array,
+  and to combine two arrays.
+}
+\seealso{
+  \code{\link{alltypes}},
+  \code{\link{plot.fasp}},
+  \code{\link{[.fasp}},
+  \code{\link{eval.fasp}}
+}
+\examples{
+  # multitype point pattern
+  data(amacrine)
+  GG <- alltypes(amacrine, "G")
+  plot(GG)
+
+  # select the row corresponding to cells of type "on"
+  Gon <- GG["on", ]
+  plot(Gon)
+
+  # extract the G function for i = "on", j = "off"
+  Gonoff <- GG["on", "off", drop=TRUE]
+
+  # Fisher variance stabilising transformation
+  GGfish <- eval.fasp(asin(sqrt(GG)))
+  plot(GGfish)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/finpines.Rd b/man/finpines.Rd
new file mode 100644
index 0000000..18a178f
--- /dev/null
+++ b/man/finpines.Rd
@@ -0,0 +1,67 @@
+\name{finpines}
+\alias{finpines}
+\docType{data}
+\title{
+  Pine saplings in Finland.
+}
+\description{
+  The data record the locations of 126 pine saplings
+  in a Finnish forest, their heights and their diameters.
+
+  The dataset \code{finpines} is a marked point pattern
+  containing the locations of the saplings marked by their heights
+  and their diameters.
+
+  Sapling locations are given in metres (to six significant digits);
+  heights are in metres (rounded to the nearest 0.1 metre,
+  except in one case to the nearest 0.05 metres);
+  diameters are in centimetres (rounded to the nearest centimetre).
+  
+  The data were recorded by Professor Seppo Kellomaki, Faculty of
+  Forestry, University of Joensuu, Finland,
+  and subsequently massaged by Professor Antti Penttinen,
+  Department of Statistics, University of Jyv\"askyl\"a, Finland.
+  
+  Originally the point locations were observed in polar coordinates with
+  rather poor angular precision. Hence the coordinates are imprecise for large
+  radius because of rounding errors: indeed the alignments can be observed by
+  eye.
+  
+  The data were manipulated by Prof Penttinen by making small angular
+  perturbations at random. After this transformation, the original data
+  (in a circular plot) were clipped to a square window, for convenience.
+
+  Professor Penttinen emphasises that the data were intended only
+  for initial experimentation. They have some strange features.
+  For example, if the height is less than 1.3 metres then the diameter
+  can be uncertain. Also there are some very close pairs of points.
+  Some pairs of trees (namely (58,59), (78,79), (96,97) and (102,103))
+  violate the requirement that the interpoint distance should be
+  greater than half the sum of their diameters.
+
+  These data have subsequently been analysed by Van Lieshout (2004).
+} 
+\format{
+  Object of class \code{"ppp"} 
+  representing the point pattern of sapling locations marked by
+  their heights and diameters.
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(finpines)}
+\examples{
+    data(finpines)
+    plot(unmark(finpines), main="Finnish pines: locations")
+    plot(finpines, which.marks="height", main="heights")
+    plot(finpines, which.marks="diameter", main="diameters")
+    plot(finpines, which.marks="diameter", 
+              main="diameters to scale", markscale=1/200)
+}
+\source{Prof Antti Penttinen}
+\references{
+  Van Lieshout, M.N.M. (2004)
+  A J-function for marked point patterns.
+  Research Report PNA-R0404, June 2004.
+  Centrum voor Wiskunde en Informatica (CWI), Amsterdam, 2004.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/fitin.Rd b/man/fitin.Rd
new file mode 100644
index 0000000..ae3a193
--- /dev/null
+++ b/man/fitin.Rd
@@ -0,0 +1,83 @@
+\name{fitin.ppm}
+\alias{fitin}
+\alias{fitin.ppm}
+\title{Extract the Interaction from a Fitted Point Process Model}
+\description{
+  Given a point process model that has been fitted to point pattern
+  data, this function extracts the interpoint interaction part of the
+  model as a separate object.
+}
+\usage{
+fitin(object)
+\method{fitin}{ppm}(object)
+}
+\arguments{
+  \item{object}{A fitted point process model (object of class
+    \code{"ppm"}).
+  }
+}
+\details{
+  An object of class \code{"ppm"} describes a fitted point process
+  model. It contains information about the original data to which the
+  model was fitted, the spatial trend that was fitted, the
+  interpoint interaction that was fitted, and other data.
+  See \code{\link{ppm.object}}) for details of this class.
+
+  The function \code{fitin} extracts from this model the information about the
+  fitted interpoint interaction only.
+  The information is organised as an object of class \code{"fii"}
+  (fitted interpoint interaction).
+  This object can be printed or plotted.
+
+  Users may find this a convenient way to plot the
+  fitted interpoint interaction term, as shown in the Examples.
+
+  For a pairwise interaction, the plot of the fitted interaction
+  shows the pair interaction function (the contribution to the
+  probability density from a pair of points as a function of the
+  distance between them). For a higher-order interaction, the plot shows
+  the strongest interaction (the value most different from 1)
+  that could ever arise at the given distance. 
+
+  The fitted interaction coefficients can also be extracted
+  from this object using \code{\link{coef}}.
+}
+\value{
+  An object of class \code{"fii"} representing the fitted
+  interpoint interaction. This object can be printed and plotted.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  Methods for handling fitted interactions: 
+  \code{\link{methods.fii}}, \code{\link{reach.fii}},
+  \code{\link{as.interact.fii}}.
+
+  Background:
+  \code{\link{ppm}},
+  \code{\link{ppm.object}}.
+}
+\examples{
+   # unmarked 
+   model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4)))
+   f <- fitin(model)
+   f
+   plot(f)
+
+# extract fitted interaction coefficients
+   coef(f)
+
+   # multitype
+   # fit the stationary multitype Strauss process to `amacrine'
+   r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2)
+   model <- ppm(amacrine ~1, MultiStrauss(r))
+   f <- fitin(model)
+   f
+   plot(f)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/fitted.lppm.Rd b/man/fitted.lppm.Rd
new file mode 100644
index 0000000..6293f68
--- /dev/null
+++ b/man/fitted.lppm.Rd
@@ -0,0 +1,90 @@
+\name{fitted.lppm}
+\alias{fitted.lppm}
+\title{
+  Fitted Intensity for Point Process on Linear Network
+}
+\description{
+  Given a point process model fitted to a point pattern on a linear network,
+  compute the fitted intensity of the model
+  at the points of the pattern,
+  or at the points of the quadrature scheme used to fit the model.
+}
+\usage{
+\method{fitted}{lppm}(object, \dots,
+                      dataonly = FALSE, new.coef = NULL,
+		      leaveoneout = FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model on a linear network
+    (object of class \code{"lppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{dataonly}{
+    Logical value indicating whether to computed fitted intensities
+    at the points of the original point pattern dataset
+    (\code{dataonly=TRUE})
+    or at all the quadrature points of the quadrature scheme
+    used to fit the model (\code{dataonly=FALSE}, the default).
+  }
+  \item{new.coef}{
+    Numeric vector of parameter values to replace the 
+    fitted model parameters \code{coef(object)}.
+  }
+  \item{leaveoneout}{
+    Logical. If \code{TRUE} the fitted value at each data
+    point will be computed using a leave-one-out method. See Details.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[stats]{fitted}}
+  for the class \code{"lppm"} of fitted point process models on a linear
+  network.
+  
+  The locations \eqn{u} at which the fitted conditional intensity/trend
+  is evaluated, are the points of the
+  quadrature scheme used to fit the model in \code{\link{ppm}}.
+  They include the data points (the points of the original point pattern
+  dataset \code{x}) and other ``dummy'' points 
+  in the window of observation.
+
+  If \code{leaveoneout=TRUE}, fitted values will be computed
+  for the data points only, using a \sQuote{leave-one-out} rule: 
+  the fitted value at \code{X[i]} is effectively computed by
+  deleting this point from the data and re-fitting the model to the
+  reduced pattern \code{X[-i]}, then predicting the value at
+  \code{X[i]}. (Instead of literally performing this calculation,
+  we apply a Taylor approximation using the influence function
+  computed in \code{\link{dfbetas.ppm}}. 
+}
+\value{
+  A vector containing the values of the fitted spatial trend.
+
+  Entries in this vector correspond to the quadrature points (data or
+  dummy points) used to fit the model. 
+  The quadrature points can be extracted from \code{object}
+  by \code{union.quad(quad.ppm(object))}.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{lppm}},
+  \code{\link{predict.lppm}}
+}
+\examples{
+   fit <- lppm(spiders~x+y)
+   a <- fitted(fit)
+   b <- fitted(fit, dataonly=TRUE)
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/fitted.mppm.Rd b/man/fitted.mppm.Rd
new file mode 100644
index 0000000..2e27fbb
--- /dev/null
+++ b/man/fitted.mppm.Rd
@@ -0,0 +1,83 @@
+\name{fitted.mppm}
+\alias{fitted.mppm}
+\title{Fitted Conditional Intensity for Multiple Point Process Model}
+\description{
+  Given a point process model fitted to multiple point patterns, compute the
+  fitted conditional intensity of the model at the points of each data
+  pattern, or at the points of the quadrature schemes used to fit the
+  model.
+}
+\usage{
+ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE)
+}
+\arguments{
+  \item{object}{
+    The fitted model. An object of class \code{"mppm"}
+    obtained from \code{\link{mppm}}.
+  }
+  \item{\dots}{Ignored.}
+  \item{type}{
+    Type of fitted values: either \code{"trend"} for the spatial trend,
+    or \code{"lambda"} or \code{"cif"} for the conditional intensity.
+  }
+  \item{dataonly}{
+    If \code{TRUE}, fitted values are computed only for the points
+    of the data point patterns. If \code{FALSE}, fitted values are
+    computed for the points of the quadrature schemes used to fit the
+    model.
+  }
+}
+\details{
+  This function evaluates the conditional intensity
+  \eqn{\hat\lambda(u,x)}{lambdahat(u,x)}
+  or spatial trend \eqn{\hat{b(u)}}{bhat(u)}
+  of the fitted point process model for
+  certain locations \eqn{u}, for each of the original point patterns \eqn{x}
+  to which the model was fitted.
+
+  The locations \eqn{u} at which the fitted conditional intensity/trend is
+  evaluated, are the points of the quadrature schemes used to fit the
+  model in \code{\link{mppm}}. They include the data points (the points of the
+  original point pattern datasets) and other ``dummy'' points  in
+  the window of observation.
+
+  Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at
+  other locations or with other values of the explanatory variables.
+}
+\value{
+  A list of vectors (one for each row of the original hyperframe,
+  i.e. one vector for each of the original point patterns)
+  containing the values of the fitted conditional intensity
+  or (if \code{type="trend"}) the fitted spatial trend.
+
+  Entries in these vector correspond to the quadrature points (data
+  or dummy points) used to fit the model. The quadrature points can
+  be extracted from \code{object} by \code{\link{quad.mppm}(object)}.
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+   model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders),
+                           interaction=Strauss(7))
+   cifs <- fitted(model)
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{predict.mppm}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/fitted.ppm.Rd b/man/fitted.ppm.Rd
new file mode 100644
index 0000000..b098415
--- /dev/null
+++ b/man/fitted.ppm.Rd
@@ -0,0 +1,137 @@
+\name{fitted.ppm}
+\alias{fitted.ppm}
+\title{
+  Fitted Conditional Intensity for Point Process Model
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  compute the fitted conditional intensity or fitted trend of the model
+  at the points of the pattern,
+  or at the points of the quadrature scheme used to fit the model.
+}
+\usage{
+  \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE,
+  new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE,
+  dropcoef=FALSE)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"ppm"})
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{type}{
+    String (partially matched) indicating whether the fitted value is the
+    conditional intensity (\code{"lambda"} or \code{"cif"}) or
+    the first order trend (\code{"trend"})
+    or the logarithm of conditional intensity (\code{"link"}).
+  }
+  \item{dataonly}{
+    Logical. If \code{TRUE}, then values will only be computed
+    at the points of the data point pattern. If \code{FALSE}, then
+    values will be computed at all the points of the quadrature scheme
+    used to fit the model, including the points of the data point pattern.
+  }
+  \item{new.coef}{
+    Numeric vector of parameter values to replace the 
+    fitted model parameters \code{coef(object)}.
+  }
+  \item{leaveoneout}{
+    Logical. If \code{TRUE} the fitted value at each data
+    point will be computed using a leave-one-out method. See Details.
+  }
+  \item{drop}{
+    Logical value determining whether to delete quadrature points
+    that were not used to fit the model.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{object}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+  \item{repair}{
+    Logical value indicating whether to repair the internal format
+    of \code{object}, if it is found to be damaged. 
+  }
+  \item{dropcoef}{
+    Internal use only.
+  }
+}
+\value{
+  A vector containing the values of the fitted conditional intensity,
+  fitted spatial trend, or logarithm of the fitted conditional intensity.
+  
+  Entries in this vector correspond to the quadrature points (data or
+  dummy points) used to fit the model. The quadrature points can be
+  extracted from \code{object} by \code{union.quad(quad.ppm(object))}.
+}
+\details{
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"ppm"}). Such objects are produced by the 
+  model-fitting algorithm \code{\link{ppm}}).
+
+  This function evaluates the conditional intensity
+  \eqn{\hat\lambda(u, x)}{lambdahat(u,x)}
+  or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process
+  model for certain locations \eqn{u},
+  where \code{x} is the original point pattern dataset to which
+  the model was fitted.
+
+  The locations \eqn{u} at which the fitted conditional intensity/trend
+  is evaluated, are the points of the
+  quadrature scheme used to fit the model in \code{\link{ppm}}.
+  They include the data points (the points of the original point pattern
+  dataset \code{x}) and other ``dummy'' points 
+  in the window of observation.
+
+  If \code{leaveoneout=TRUE}, fitted values will be computed
+  for the data points only, using a \sQuote{leave-one-out} rule: 
+  the fitted value at \code{X[i]} is effectively computed by
+  deleting this point from the data and re-fitting the model to the
+  reduced pattern \code{X[-i]}, then predicting the value at
+  \code{X[i]}. (Instead of literally performing this calculation,
+  we apply a Taylor approximation using the influence function
+  computed in \code{\link{dfbetas.ppm}}. 
+  
+  The argument \code{drop} is explained in \code{\link{quad.ppm}}.
+  
+  Use \code{\link{predict.ppm}} to compute the fitted conditional
+  intensity at other locations or with other values of the
+  explanatory variables.
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005).
+  Residual analysis for spatial point processes (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+}
+\seealso{
+ \code{\link{ppm.object}},
+ \code{\link{ppm}},
+ \code{\link{predict.ppm}}
+}
+\examples{
+    str <- ppm(cells ~x, Strauss(r=0.1))
+    lambda <- fitted(str)
+
+    # extract quadrature points in corresponding order
+    quadpoints <- union.quad(quad.ppm(str))
+
+    # plot conditional intensity values
+    # as circles centred on the quadrature points 
+    quadmarked <- setmarks(quadpoints, lambda)
+    plot(quadmarked)
+
+    if(!interactive()) str <- ppm(cells ~ x)
+
+    lambdaX <- fitted(str, leaveoneout=TRUE)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/fitted.slrm.Rd b/man/fitted.slrm.Rd
new file mode 100644
index 0000000..de5fc21
--- /dev/null
+++ b/man/fitted.slrm.Rd
@@ -0,0 +1,54 @@
+\name{fitted.slrm}
+\Rdversion{1.1}
+\alias{fitted.slrm}
+\title{
+  Fitted Probabilities for Spatial Logistic Regression
+}
+\description{
+  Given a fitted Spatial Logistic Regression model,
+  this function computes the fitted probabilities for each pixel.
+}
+\usage{
+  \method{fitted}{slrm}(object, ...)
+}
+\arguments{
+  \item{object}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link[stats:fitted.values]{fitted}}
+  for spatial logistic regression models
+  (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}).
+
+  The algorithm computes
+  the fitted probabilities of the presence of a random point in each pixel.
+}
+\value{
+  A pixel image (object of class \code{"im"}) containing the
+  fitted probability for each pixel.
+}
+\seealso{
+  \code{\link{slrm}},
+  \code{\link[stats:fitted.values]{fitted}}
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- slrm(X ~ x+y)
+  plot(fitted(fit))
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/fixef.mppm.Rd b/man/fixef.mppm.Rd
new file mode 100644
index 0000000..e769677
--- /dev/null
+++ b/man/fixef.mppm.Rd
@@ -0,0 +1,62 @@
+\name{fixef.mppm}
+\alias{fixef.mppm}
+\title{
+  Extract Fixed Effects from Point Process Model
+}
+\description{
+  Given a point process model fitted to a list of point patterns,
+  extract the fixed effects of the model.
+  A method for \code{fixef}.
+}
+\usage{
+ \method{fixef}{mppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    A fitted point process model (an object of class \code{"mppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[nlme]{fixef}}.
+
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"mppm"}) produced by the 
+  fitting algorithm \code{\link{mppm}}). This represents a
+  point process model that has been fitted
+  to a list of several point pattern datasets. See \code{\link{mppm}}
+  for information.
+
+  This function extracts the coefficients of the fixed effects
+  of the model.
+}
+\value{
+  A numeric vector of coefficients.
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented in \pkg{spatstat} by
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{coef.mppm}}
+}
+\examples{
+ H <- hyperframe(Y = waterstriders)
+ # Tweak data to exaggerate differences
+ H$Y[[1]] <- rthin(H$Y[[1]], 0.3)
+ m1 <- mppm(Y ~ id,  data=H, Strauss(7))
+ fixef(m1)
+ m2 <- mppm(Y ~ 1,  random=~1|id, data=H, Strauss(7))
+ fixef(m2)
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/flipxy.Rd b/man/flipxy.Rd
new file mode 100644
index 0000000..305182a
--- /dev/null
+++ b/man/flipxy.Rd
@@ -0,0 +1,52 @@
+\name{flipxy} 
+\alias{flipxy}
+\alias{flipxy.owin}
+\alias{flipxy.ppp}
+\alias{flipxy.psp}
+\alias{flipxy.im}
+\title{Exchange X and Y Coordinates}
+\description{
+  Exchanges the \eqn{x} and \eqn{y} coordinates in a spatial dataset.
+}
+\usage{
+ flipxy(X)
+ \method{flipxy}{owin}(X)
+ \method{flipxy}{ppp}(X)
+ \method{flipxy}{psp}(X)
+ \method{flipxy}{im}(X)
+}
+\arguments{
+  \item{X}{Spatial dataset. An object of class
+    \code{"owin"}, \code{"ppp"}, \code{"psp"} or \code{"im"}.
+  }
+}
+\value{
+  Another object of the same type, representing the
+  result of swapping the \eqn{x} and \eqn{y} coordinates.
+}
+\details{
+  This function swaps the \eqn{x} and \eqn{y} coordinates of a spatial
+  dataset. This could also be performed using the command \code{\link{affine}},
+  but \code{flipxy} is faster.
+  
+  The function \code{\link{flipxy}} is generic, with methods
+  for the classes of objects listed above.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{reflect}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  data(cells)
+  X <- flipxy(cells)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/flu.Rd b/man/flu.Rd
new file mode 100644
index 0000000..aebbe3b
--- /dev/null
+++ b/man/flu.Rd
@@ -0,0 +1,108 @@
+\name{flu}
+\alias{flu}
+\docType{data}
+\title{
+  Influenza Virus Proteins
+}
+\description{
+  Replicated spatial point patterns giving the locations of two
+  different virus proteins on the membranes of cells infected with
+  influenza virus.
+}
+\usage{data(flu)}
+\format{
+  A \code{\link{hyperframe}} with 41 rows and four columns:
+  \describe{
+    \item{pattern}{
+      List of spatial point patterns
+      (objects of class \code{"ppp"})
+      with points of two types, identifying the locations of 
+      two different proteins on a membrane sheet.
+    }
+    \item{virustype}{
+      Factor identifying whether the infecting virus was
+      the wild type (\code{wt}) or mutant (\code{mut1}).
+    }
+    \item{stain}{
+      Factor identifying whether the membrane sheet was stained
+      for the proteins \emph{M2} and \emph{M1}
+      (\code{stain="M2-M1"})
+      or stained for the proteins \emph{M2} and \emph{HA}
+      (\code{stain="M2-HA"}).
+    }
+    \item{frameid}{
+      Integer. Serial number of the microscope frame
+      in the original experiment. Frame identifier is not unique
+      across different values of \code{virustype} and \code{stain}.
+    }
+  }
+  The row names of the hyperframe can be used as succinct labels
+  in plots.
+}
+\details{
+  The data consist of 41 spatial point patterns, each
+  giving the locations of two different virus proteins
+  on the membranes of cells infected with influenza virus.
+  
+  Chen et al (2008) conducted the experiment and used spatial analysis
+  to establish evidence for an interaction between the influenza virus
+  proteins M1 and M2 that is important for the study of viral replication.
+
+  Canine kidney cells were infected with human influenza, Udorn strain,
+  either the wild type or a mutant which encodes a defective M2 protein.
+  At twelve hours post-infection, membrane sheets were prepared
+  and stained for viral proteins, using two antibodies conjugated to
+  gold particles of two sizes (6 nanometre and 12 nanometre diameter)
+  enabling localisation of two different proteins on each sheet.
+  The 6 nm particles were stained for M2 (ion channel protein),
+  while the 12 nm particles were stained either for M1 (matrix protein)
+  or for HA (hemagglutinin). Membrane sheets were visualised in
+  electron microscopy.
+  
+  Experimental technique and spatial analysis of the membranes
+  stained for M2 and M1 is reported in Chen et al (2008).
+  Analysis of the membranes stained for
+  M2 and HA is reported in Rossman et al (2010).
+  The M2-HA data shows a stronger association
+  between the two proteins which has also been observed biochemically
+  and functionally (Rossman et al, 2010).
+
+  The dataset \code{flu} is a \code{\link{hyperframe}}
+  with one row for each membrane sheet. The column named \code{pattern}
+  contains the spatial point patterns of gold particle locations,
+  with two types of points (either \code{M1} and \code{M2} or
+  \code{HA} and \code{M2}). The column named \code{virustype}
+  is a factor identifying the virus: either wild type \code{wt}
+  or mutant \code{mut1}. The column named \code{stain} is a factor
+  identifying whether the membrane was stained for
+  M1 and M2 (\code{stain="M2-M1"}) or stained for HA and M2
+  (\code{stain="M2-HA"}).
+  The row names of the hyperframe are a succinct summary of
+  the experimental conditions and can be used as labels
+  in plots. See the Examples.
+}
+\source{
+  Data generously provided by Dr G.P. Leser and Dr R.A. Lamb.
+  Please cite Chen et al (2008) in any use of these data.
+}
+\references{
+  Chen, B.J., Leser, G.P., Jackson, D. and Lamb, R.A. (2008)
+  The influenza virus M2 protein cytoplasmic tail interacts with the M1 protein
+  and influences virus assembly at the site of virus budding.
+  \emph{Journal of Virology} \bold{82}, 10059--10070.
+
+  Rossman, J.S., Jing, X.H.,  Leser, G.P. and Lamb, R.A. (2010)
+  Influenza virus M2 protein mediates ESCRT-independent membrane scission
+  \emph{Cell} \bold{142}, 902--913.
+}
+\examples{
+data(flu)
+flu
+Y <- flu$pattern[10]
+Y <- flu[10, 1, drop=TRUE]
+wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1")
+plot(flu[wildM1, 1, drop=TRUE], 
+     main=c("flu data", "wild type virus, M2-M1 stain"),
+     pch=c(3,16), cex=0.4, cols=2:3)
+}
+\keyword{datasets}
diff --git a/man/foo.Rd b/man/foo.Rd
new file mode 100644
index 0000000..b795261
--- /dev/null
+++ b/man/foo.Rd
@@ -0,0 +1,59 @@
+\name{foo}
+\alias{foo}
+\alias{plot.foo}
+\title{
+  Foo is Not a Real Name
+}
+\description{
+  The name \code{foo} is not a real name: it is a place holder,
+  used to represent the name of any desired thing.
+
+  The functions defined here simply print an explanation of the
+  placeholder name \code{foo}.
+}
+\usage{
+foo()
+
+\method{plot}{foo}(x, \dots)
+}
+\arguments{
+  \item{x}{Ignored.}
+  \item{\dots}{Ignored.}
+}
+\details{
+  The name \code{foo} is used by computer scientists as a
+  \emph{place holder}, to represent the name of any desired object or
+  function. It is not the name of an actual object or function;
+  it serves only as an example, to explain a concept.
+
+  However, many users misinterpret this convention, and actually
+  type the command \code{foo} or \code{foo()}. Then they email the
+  package author to inform them that \code{foo} is not defined.
+
+  To avoid this correspondence, we have now defined an object
+  called \code{foo}.
+  
+  The function \code{foo()} prints a message explaining that \code{foo}
+  is not really the name of a variable. 
+
+  The function can be executed simply by typing \code{foo}
+  without parentheses.
+}
+\value{
+  Null.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{beginner}}
+}
+\examples{
+  foo
+}
+\keyword{documentation}
diff --git a/man/formula.fv.Rd b/man/formula.fv.Rd
new file mode 100644
index 0000000..12727f5
--- /dev/null
+++ b/man/formula.fv.Rd
@@ -0,0 +1,77 @@
+\name{formula.fv}
+\alias{formula.fv}
+\alias{formula<-}
+\alias{formula<-.fv}
+\title{
+  Extract or Change the Plot Formula for a Function Value Table
+}
+\description{
+  Extract or change the default plotting formula
+  for an object of class \code{"fv"} (function value table).
+}
+\usage{
+\method{formula}{fv}(x, \dots)
+
+formula(x, \dots) <- value
+
+\method{formula}{fv}(x, \dots) <- value
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"fv"},
+    containing the values of several estimates of a function.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{value}{
+    New value of the formula. Either a \code{formula} or a character
+    string.
+  }
+}
+\details{
+  A function value table
+  (object of class \code{"fv"}, see \code{\link{fv.object}})
+  is a convenient way of storing and plotting
+  several different estimates of the same function.
+
+  The default behaviour of \code{plot(x)} for a function value table
+  \code{x} is determined by a formula
+  associated with \code{x} called its \emph{plot formula}.
+  See \code{\link{plot.fv}} for explanation about these formulae.
+
+  The function \code{formula.fv} is a method for the generic command
+  \code{\link{formula}}. It extracts the plot formula associated with
+  the object.
+
+  The function \code{formula<-} is generic. It changes the formula
+  associated with an object. 
+
+  The function \code{formula<-.fv} is the method for \code{formula<-}
+  for the class \code{"fv"}. It changes the plot formula associated with
+  the object.
+}
+\value{
+  The result of \code{formula.fv} is a character string containing the
+  plot formula. The result of \code{formula<-.fv} is a new object of
+  class \code{"fv"}.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{fv}},  
+  \code{\link{plot.fv}},
+  \code{\link[stats]{formula}}.
+}
+\examples{
+   K <- Kest(cells)
+   formula(K)
+   formula(K) <- (iso ~ r)
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/formula.ppm.Rd b/man/formula.ppm.Rd
new file mode 100644
index 0000000..e40f79e
--- /dev/null
+++ b/man/formula.ppm.Rd
@@ -0,0 +1,72 @@
+\name{formula.ppm}
+\alias{formula.ppm}
+\alias{terms.ppm}
+\title{
+  Model Formulae for Gibbs Point Process Models
+}
+\description{
+  Extract the trend formula, or the terms in the trend formula,
+  in a fitted Gibbs point process model.
+}
+\usage{
+\method{formula}{ppm}(x, \dots)
+\method{terms}{ppm}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"ppm"},
+    representing a fitted point process model.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{formula}} and 
+  \code{\link{terms}} 
+  for the class \code{"ppm"}.
+
+  An object of class \code{"ppm"} represents a fitted
+  Poisson or Gibbs point process model.
+  It is obtained from the model-fitting function \code{\link{ppm}}.
+
+  The method \code{formula.ppm} extracts the trend formula from the
+  fitted model \code{x} (the formula originally specified as the
+  argument \code{trend} to \code{\link{ppm}}).
+  The method \code{terms.ppm} extracts the individual
+  terms in the trend formula.
+}
+\value{
+  See the help files for the corresponding generic functions.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{ppm}},  
+  \code{\link{as.owin}},
+  \code{\link{coef.ppm}},
+  \code{\link{extractAIC.ppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{logLik.ppm}},
+  \code{\link{model.frame.ppm}},
+  \code{\link{model.matrix.ppm}},
+  \code{\link{plot.ppm}},
+  \code{\link{predict.ppm}},
+  \code{\link{residuals.ppm}},
+  \code{\link{simulate.ppm}},
+  \code{\link{summary.ppm}},
+  \code{\link{update.ppm}},
+  \code{\link{vcov.ppm}}.
+}
+\examples{
+  data(cells)
+  fit <- ppm(cells, ~x)
+  formula(fit)
+  terms(fit)  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/fourierbasis.Rd b/man/fourierbasis.Rd
new file mode 100644
index 0000000..b0d169e
--- /dev/null
+++ b/man/fourierbasis.Rd
@@ -0,0 +1,63 @@
+\name{fourierbasis}
+\alias{fourierbasis}
+\title{Fourier Basis Functions}
+\description{Evaluates the Fourier basis functions
+  on a \eqn{d}-dimensional box
+  with \eqn{d}-dimensional frequencies \eqn{k_i} at the
+  \eqn{d}-dimensional coordinates \eqn{x_j}.
+}
+\usage{
+  fourierbasis(x, k, win = boxx(rep(list(0:1), ncol(k))))
+}
+\arguments{
+  \item{x}{
+    Coordinates. 
+    A \code{data.frame} or matrix with
+    \eqn{m} rows and \eqn{d} columns giving
+    the \eqn{d}-dimensional coordinates.
+  }
+  \item{k}{Frequencies.
+    A \code{data.frame} or matrix with \eqn{n} rows and \eqn{d} columns
+    giving the frequencies of the Fourier-functions.
+  }
+  \item{win}{
+    window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"})
+    giving the \eqn{d}-dimensional box domain of the Fourier functions.
+  }
+}
+\details{
+  The result is an \eqn{n} by \eqn{m} matrix where the \eqn{(i,j)}'th
+  entry is the \eqn{d}-dimensional Fourier basis function with
+  frequency \eqn{k_i} evaluated at the point \eqn{x_j}, i.e.,
+  \deqn{
+    \frac{1}{|W|}
+    \exp(2\pi i <k_i,x_j>/|W|)
+  }{
+    1/|W| * exp(2*pi*i*k_i*x_j/|W|)
+  }
+  where \eqn{<\cdot,\cdot>}{*} is the \eqn{d}-dimensional inner product
+  and \eqn{|W|} is the volume of the 
+  domain (window/box). Note that the algorithm does not check whether
+  the coordinates given in \code{x} are contained in the given box.
+  Actually the box is only 
+  used to determine the volume of the domain for normalization.
+}
+\value{An \code{n} by \code{m} matrix of complex values.}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+## 27 rows of three dimensional Fourier frequencies:
+k <- expand.grid(-1:1,-1:1, -1:1)
+## Two random points in the three dimensional unit box:
+x <- rbind(runif(3),runif(3))
+## 27 by 2 resulting matrix:
+v <- fourierbasis(x, k)
+head(v)
+}
diff --git a/man/fryplot.Rd b/man/fryplot.Rd
new file mode 100644
index 0000000..905c22c
--- /dev/null
+++ b/man/fryplot.Rd
@@ -0,0 +1,149 @@
+\name{fryplot}
+\alias{fryplot}
+\alias{frypoints}
+\title{Fry Plot of Point Pattern}
+\description{
+  Displays the Fry plot (Patterson plot) of a spatial point pattern. 
+}
+\usage{
+fryplot(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE)
+frypoints(X, from=NULL, to=NULL, dmax=Inf)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}) or something
+    acceptable to \code{\link{as.ppp}}.
+  }
+  \item{\dots}{Optional arguments to control the appearance of the
+    plot.}
+  \item{width}{Optional parameter indicating the width of a box
+    for a zoomed-in view of the Fry plot near the origin.}
+  \item{from,to}{
+    Optional. Subset indices specifying which points of \code{X}
+    will be considered when forming the vectors (drawn from each point of
+    \code{from}, to each point of \code{to}.)
+  }
+  \item{axes}{
+    Logical value indicating whether to draw axes,
+    crossing at the origin.
+  }
+  \item{dmax}{
+    Maximum distance between points. Pairs at greater distances
+    do not contribute to the result. The default means there is
+    no maximum distance.
+  }
+}
+\details{
+  The function \code{fryplot} generates a Fry plot (or Patterson plot);
+  \code{frypoints} returns the points of the Fry plot as a point pattern
+  dataset.
+
+  Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for
+  investigating features of a spatial point pattern of mineral deposits.
+  A transparent sheet, marked
+  with an origin or centre point, is placed over the point pattern.
+  The transparent sheet is shifted so that the origin lies over one of
+  the data points, and the positions of all the \emph{other} data points
+  are copied onto the transparent sheet. This procedure is repeated for
+  each data point in turn. The resulting plot (the Fry plot)
+  is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number
+  of data points. This procedure was previously proposed by
+  Patterson (1934, 1935) for studying inter-atomic distances in
+  crystals, and is also known as a Patterson plot.
+
+  The function \code{fryplot} generates the Fry/Patterson plot.
+  Standard graphical parameters
+  such as \code{main}, \code{pch},
+  \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control
+  the appearance of the plot.
+  To zoom in (to view only a subset of the Fry plot at higher
+  magnification), use the argument \code{width} to specify the width
+  of a rectangular field of view centred at the origin, or the standard
+  graphical arguments \code{xlim} and \code{ylim} to specify another
+  rectangular field of view. (The actual field of view may be slightly
+  larger, depending on the graphics device.)
+
+  The function \code{frypoints} returns the points of the Fry
+  plot as a point pattern object. There may be a large number of points
+  in this pattern, so this function should be used only if further
+  analysis of the Fry plot is required.
+
+  Fry plots are particularly useful for recognising anisotropy in
+  regular point patterns. A void around the origin in the Fry plot
+  suggests regularity (inhibition between points) and the shape of the
+  void gives a clue to anisotropy in the pattern. Fry plots are also
+  useful for detecting periodicity or rounding of the spatial
+  coordinates. 
+
+  In mathematical terms, the Fry plot of a point pattern \code{X}
+  is simply a plot of the vectors \code{X[i] - X[j]} connecting all
+  pairs of distinct points in \code{X}. 
+
+  The Fry plot is related to the \eqn{K} function (see
+  \code{\link{Kest}}) and the reduced second moment measure
+  (see \code{\link{Kmeasure}}). For example, the number 
+  of points in the Fry plot lying within a circle of given radius
+  is an unnormalised and uncorrected version of the \eqn{K} function.
+  The Fry plot has a similar appearance to the plot of the
+  reduced second moment measure \code{\link{Kmeasure}} when the
+  smoothing parameter \code{sigma} is very small.
+  
+  The Fry plot does not adjust for the effect
+  of the size and shape of the sampling window. 
+  The density of points in the Fry plot tapers off near the edges of the
+  plot. This is an edge effect, a consequence of the bounded sampling
+  window. In geological applications this is usually not
+  important, because interest is focused on the behaviour near the
+  origin where edge effects can be ignored.
+  To correct for the edge effect, use \code{\link{Kmeasure}} or
+  \code{\link{Kest}} or its relatives.
+}
+\value{
+  \code{fryplot} returns \code{NULL}.
+  \code{frypoints} returns a point pattern (object of class \code{"ppp"}).
+}
+\references{
+  Fry, N. (1979) 
+  Random point distributions and strain measurement in rocks.
+  \emph{Tectonophysics} \bold{60}, 89--105.
+
+  Hanna, S.S. and Fry, N. (1979)
+  A comparison of methods of strain determination in rocks from
+  southwest Dyfed (Pembrokeshire) and adjacent areas.
+  \emph{Journal of Structural Geology} \bold{1}, 155--162.
+
+  Patterson, A.L. (1934) A Fourier series method for the determination
+  of the component of inter-atomic distances in crystals.
+  \emph{Physics Reviews} \bold{46}, 372--376.
+
+  Patterson, A.L. (1935) A direct method for the determination of the
+  components of inter-atomic distances in crystals.
+  \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554.
+}
+\seealso{
+  \code{\link{Kmeasure}},
+  \code{\link{Kest}}
+}
+\examples{
+## unmarked data
+fryplot(cells)
+Y <- frypoints(cells)
+
+## numerical marks
+fryplot(longleaf, width=4, axes=TRUE)
+
+## multitype points
+fryplot(amacrine, width=0.2,
+                  from=(marks(amacrine) == "on"),
+                  chars=c(3,16), cols=2:3,
+                  main="Fry plot centred at an On-cell")
+points(0,0)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/funxy.Rd b/man/funxy.Rd
new file mode 100644
index 0000000..4ecb085
--- /dev/null
+++ b/man/funxy.Rd
@@ -0,0 +1,67 @@
+\name{funxy}
+\Rdversion{1.1}
+\alias{funxy}
+\title{
+  Spatial Function Class
+}
+\description{
+  A simple class of functions of spatial location
+}
+\usage{
+  funxy(f, W)
+}
+\arguments{
+  \item{f}{
+    A \code{function} in the \R language
+    with arguments \code{x,y} (at least)
+  }
+  \item{W}{
+    Window (object of class \code{"owin"}) inside which the
+    function is well-defined.
+  }
+}
+\details{
+  This creates an object of class \code{"funxy"}.
+  This is a simple mechanism for handling a function
+  of spatial location \eqn{f(x,y)} to make it easier to display
+  and manipulate.
+
+  \code{f} should be a \code{function} in the \R language.
+  The first two arguments of \code{f} must be named \code{x} and \code{y}
+  respectively.
+
+  \code{W} should be a window (object of class \code{"owin"}) inside which the
+  function \code{f} is well-defined.
+
+  The function \code{f} should be vectorised: that is,
+  if \code{x} and \code{y} are numeric vectors of the same length
+  \code{n}, then \code{v <- f(x,y)} should be a vector of length
+  \code{n}.
+
+  The resulting function \code{g <- funxy(f, W)} has the same formal
+  arguments as \code{f}. It accepts numeric vectors \code{x,y} as
+  described above, but if \code{y} is missing, then \code{x} may be
+  a point pattern (object of class \code{"ppp"} or \code{"lpp"}) from
+  which the coordinates should be extracted.
+}
+\value{
+  A \code{function} with the same arguments as \code{f},
+  which also belongs to the class \code{"funxy"}.
+  This class has methods for
+  \code{print}, \code{plot}, \code{contour} and \code{persp}.
+}
+\seealso{
+  \code{\link{plot.funxy}}
+}
+\examples{
+   f <- function(x,y) { x^2 + y^2 - 1} 
+   g <- funxy(f, square(2))
+   g(0.2, 0.3)
+   g
+   g(cells[1:4])
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/fv.Rd b/man/fv.Rd
new file mode 100644
index 0000000..86818c4
--- /dev/null
+++ b/man/fv.Rd
@@ -0,0 +1,202 @@
+\name{fv}
+\alias{fv}
+\title{
+  Create a Function Value Table
+}
+\description{
+  Advanced Use Only.
+  This low-level function creates an object of class \code{"fv"}
+  from raw numerical data. 
+}
+\usage{
+fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL,
+   labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab)
+}
+\arguments{
+  \item{x}{
+    A data frame with at least 2 columns containing the
+    values of the function argument and the corresponding values
+    of (one or more versions of) the function.
+  }
+  \item{argu}{
+    String. The name of the column of \code{x} that contains
+    the values of the function argument.
+  }
+  \item{ylab}{
+    Either \code{NULL}, or an \R language expression
+    representing the mathematical name of the
+    function. See Details.
+  }
+  \item{valu}{
+    String. The name of the column of \code{x} that should be taken
+    as containing the function values, in cases where a single column
+    is required.
+  }
+  \item{fmla}{
+    Either \code{NULL}, or a \code{formula} specifying the default
+    plotting behaviour. See Details.
+  }
+  \item{alim}{
+    Optional.
+    The default range of values of the function argument for which the
+    function will be plotted.
+    Numeric vector of length 2.
+  }
+  \item{labl}{
+    Optional. Plot labels for the columns of \code{x}.
+    A vector of strings, with one entry for each column of \code{x}.
+  }
+  \item{desc}{
+    Optional. Descriptions of the columns of \code{x}.
+    A vector of strings, with one entry for each column of \code{x}.
+  }
+  \item{unitname}{
+    Optional. Name of the unit (usually a unit of length)
+    in which the function argument is
+    expressed. Either a single character string,
+    or a vector of two character strings giving the
+    singular and plural forms, respectively.
+  }
+  \item{fname}{
+    Optional.
+    The name of the function itself. A character string.
+  }
+  \item{yexp}{
+    Optional. Alternative form of \code{ylab}
+    more suitable for annotating an axis of the plot.
+    See Details.
+  }
+}
+\details{
+  This documentation is provided
+  for experienced programmers who want to modify the internal
+  behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}.
+
+  The low-level function \code{fv} is used to create an object of
+  class \code{"fv"} from raw numerical data.
+
+  The data frame \code{x} contains the numerical data.
+  It should have one column
+  (typically but not necessarily named \code{"r"})
+  giving the values of the function argument for which
+  the function has been evaluated; and at least one other column,
+  containing the corresponding values of the function.
+
+  Typically there is more than one column of function values.
+  These columns typically give the values of different versions or estimates
+  of the same function,
+  for example, different estimates of the \eqn{K} function
+  obtained using different edge corrections.
+  However they may also contain the values of related functions
+  such as the derivative or hazard rate.
+
+  \code{argu} specifies the name of the column of
+  \code{x} that contains the values of the function argument
+  (typically \code{argu="r"} but this is not compulsory).
+
+  \code{valu} specifies the name of another column
+  that contains the \sQuote{recommended} estimate of the function.
+  It will be used to provide function values in those situations where
+  a single column of data is required. For example,
+  \code{\link{envelope}} computes its simulation envelopes
+  using the recommended value of the summary function.
+
+  \code{fmla} specifies the default plotting behaviour.
+  It should be a formula, or a string that can be converted to a
+  formula. Variables in the formula are names of columns of \code{x}.
+  See \code{\link{plot.fv}} for the interpretation of this
+  formula.
+
+  \code{alim} specifies the recommended range of the
+  function argument. This is used in situations where statistical
+  theory or statistical practice indicates that the computed
+  estimates of the function are not trustworthy outside a certain
+  range of values of the function argument. By default,
+  \code{\link{plot.fv}} will restrict the plot to this range.
+
+  \code{fname} is a string giving the name of the function itself.
+  For example, the \eqn{K} function would have \code{fname="K"}.
+  
+  \code{ylab} is a mathematical expression
+  for the function value, used when labelling an axis
+  of the plot, or when printing a description of the
+  function. It should be an \R language object. 
+  For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered
+  by \code{ylab=quote(K(r))}. 
+
+  If \code{yexp} is present, then \code{ylab} will be
+  used only for printing, and \code{yexp} will be used for
+  annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}).
+  For example the cross-type \eqn{K} function
+  \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like
+  \code{ylab=quote(Kcross[1,2](r))}
+  and 
+  \code{yexp=quote(Kcross[list(1,2)](r))}
+  to get the most satisfactory behaviour.
+
+  (A useful tip: use \code{\link{substitute}} instead of
+  \code{\link{quote}} to insert values of variables into an expression,
+  e.g. \code{substitute(Kcross[i,j](r), list(i=42,j=97))}
+  yields the same as \code{quote(Kcross[42, 97](r))}.)
+
+  \code{labl} is a character vector specifying plot labels
+  for each column of \code{x}. These labels will appear on the
+  plot axes (in non-default plots), legends and printed output.
+  Entries in \code{labl}
+  may contain the string \code{"\%s"} which will be replaced
+  by \code{fname}. For example the border-corrected estimate
+  of the \eqn{K} function has label \code{"\%s[bord](r)"} which
+  becomes \code{"K[bord](r)"}.
+
+  \code{desc} is a character vector containing intelligible
+  explanations of each column of \code{x}. Entries in
+  \code{desc} may contain the string \code{"\%s"} which will be replaced
+  by \code{ylab}. For example the border correction estimate of the
+  \eqn{K} function has description \code{"border correction estimate of \%s"}.
+
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}}.
+}
+\seealso{
+  See \code{\link{plot.fv}} for plotting an \code{"fv"} object.
+
+  See \code{\link{as.function.fv}} to convert an \code{"fv"} object
+  to an \R function.
+  
+  Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects.
+  Use \code{\link{bind.fv}} to glue additional columns onto an existing
+  \code{"fv"} object.
+
+  Use \code{\link{range.fv}} to compute the range of \eqn{y} values
+  for a function, and \code{\link{with.fv}} for more complicated
+  calculations.
+  
+  The functions \code{fvnames}, \code{fvnames<-} allow the user to
+  use standard abbreviations to refer to columns of an \code{"fv"} object.
+  
+  \emph{Undocumented} functions for modifying an \code{"fv"} object
+  include \code{tweak.fv.entry} and \code{rebadge.fv}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+  df <- data.frame(r=seq(0,5,by=0.1))
+  df <- transform(df, a=pi*r^2, b=3*r^2)
+  X <- fv(df, "r", quote(A(r)),
+              "a", cbind(a, b) ~ r,
+              alim=c(0,4),
+              labl=c("r", "\%s[true](r)", "\%s[approx](r)"),
+              desc=c("radius of circle",
+                     "true area \%s",
+                     "rough area \%s"),
+              fname="A")
+  X
+}
+\keyword{spatial}
+\keyword{classes}
+
diff --git a/man/fv.object.Rd b/man/fv.object.Rd
new file mode 100644
index 0000000..e490df5
--- /dev/null
+++ b/man/fv.object.Rd
@@ -0,0 +1,59 @@
+\name{fv.object}
+\alias{fv.object} %DoNotExport
+\title{Function Value Table}
+\description{
+  A class \code{"fv"} to support the convenient plotting
+  of several estimates of the same function.
+}
+\details{
+  An object of this class is a convenient way of storing and plotting
+  several different estimates of the same function.
+
+  It is a data frame with extra attributes indicating
+  the recommended way of plotting the function, and other information.
+
+  There are methods for \code{print} and \code{plot} for
+  this class.
+  
+  Objects of class \code{"fv"} are returned by
+  \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}},
+  and \code{\link{Kest}} along with many other functions.
+}
+\seealso{
+  Objects of class \code{"fv"} are returned by
+  \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}},
+  and \code{\link{Kest}} along with many other functions.
+  
+  See \code{\link{plot.fv}} for plotting an \code{"fv"} object.
+
+  See \code{\link{as.function.fv}} to convert an \code{"fv"} object
+  to an \R function.
+  
+  Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects.
+  Use \code{\link{bind.fv}} to glue additional columns onto an existing
+  \code{"fv"} object.
+
+  \emph{Undocumented} functions for modifying an \code{"fv"} object
+  include \code{fvnames}, \code{fvnames<-},
+  \code{tweak.fv.entry} and \code{rebadge.fv}.
+}
+\examples{
+    data(cells)
+    K <- Kest(cells)
+
+    class(K)
+
+    K  # prints a sensible summary
+
+    plot(K)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/fvnames.Rd b/man/fvnames.Rd
new file mode 100644
index 0000000..6e83d73
--- /dev/null
+++ b/man/fvnames.Rd
@@ -0,0 +1,86 @@
+\name{fvnames}
+\alias{fvnames}
+\alias{fvnames<-}
+\title{
+  Abbreviations for Groups of Columns in Function Value Table
+}
+\description{
+  Groups of columns in a function value table (object
+  of class \code{"fv"}) identified by standard abbreviations.
+}
+\usage{
+fvnames(X, a = ".")
+
+fvnames(X, a = ".") <- value
+
+}
+\arguments{
+  \item{X}{
+    Function value table (object of class \code{"fv"}).
+    See \code{\link{fv.object}}.
+  }
+  \item{a}{
+    One of the standard abbreviations listed below.
+  }
+  \item{value}{
+    Character vector containing names of columns of \code{X}.
+  }
+}
+\details{
+  An object of class \code{"fv"} represents a table of
+  values of a function, usually a summary function for spatial data
+  such as the \eqn{K}-function, for which several different statistical
+  estimators may be available. The different estimates are stored
+  as columns of the table.
+
+  Auxiliary information carried in the object \code{X} specifies some
+  columns or groups of columns of this table that should be
+  used for particular purposes.
+  For convenience these groups can be referred to by standard
+  abbreviations which are recognised by various functions
+  in the \pkg{spatstat} package, such as \code{\link{plot.fv}}.
+
+  These abbreviations are:
+  \tabular{ll}{
+    \code{".x"} \tab the function argument \cr
+    \code{".y"} \tab the recommended value of the function \cr
+    \code{"."}  \tab all function values to be plotted by default \cr
+                \tab (in order of plotting) \cr
+    \code{".s"} \tab the upper and lower limits of shading \cr
+                \tab (for envelopes and confidence intervals)\cr
+    \code{".a"} \tab all function values
+  }
+  The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns
+  a character vector containing the names of the columns.
+
+  The assignment \code{fvnames(X, a) <- value} changes the
+  definition of the abbreviation \code{a} to the character vector
+  \code{value}. It does not change the labels of any columns.
+
+  Note that \code{fvnames(x, ".")} lists the columns of values that will
+  be plotted by default, in the order that they would be plotted, not in
+  order of the column position. The order in which curves are plotted
+  affects the colours and line styles associated with the curves.
+}
+\value{
+  For \code{fvnames}, a character vector.
+
+  For \code{fvnames<-}, the updated object.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{plot.fv}}
+}
+\examples{
+   K <- Kest(cells)
+   fvnames(K, ".y")
+   fvnames(K, ".y") <- "trans"
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/ganglia.Rd b/man/ganglia.Rd
new file mode 100644
index 0000000..4b4ef6b
--- /dev/null
+++ b/man/ganglia.Rd
@@ -0,0 +1,71 @@
+\name{ganglia}
+\alias{ganglia}
+\docType{data}
+\title{Beta Ganglion Cells in Cat Retina, Old Version} 
+\description{
+  Point pattern of retinal ganglion cells identified as `on' or `off'.
+  A marked point pattern.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of cell locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr
+    \code{marks} \tab factor with levels \code{off} and \code{on} \cr
+                 \tab indicating ``off'' and ``on'' cells
+  }
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(ganglia)}
+\source{W\"assle et al (1981), data supplied by Marie-Colette van
+  Lieshout and attributed to Peter Diggle}
+\section{Notes}{
+  \bold{Important: these data are INCORRECT.  See below.}
+  
+  The data represent a pattern of beta-type ganglion cells in the
+  retina of a cat recorded in Figure 6(a) of W\"assle et al. (1981).
+
+  The pattern was first analysed by W\"assle et al (1981) using
+  nearest neighbour distances. The data used in their analysis
+  are not available.
+
+  The present dataset \code{\link{ganglia}} was
+  scanned from Figure 6(a) of W\"assle et al (1981)
+  in the early 1990's, but we have no further information.
+  This dataset is the one analysed by Van Lieshout and Baddeley (1999)
+  using multitype J functions, and by Stoyan (1995) using second
+  order methods (pair correlation and mark correlation).
+  
+  It has now been discovered that these data are \bold{incorrect}.
+  They are not faithful to the scale in Figure 6 of W\"assle et al (1981),
+  and they contain some scanning errors.
+  Hence they should not be used to address the original scientific question.
+  They have been retained only for comparison with other analyses
+  in the statistical literature.
+
+  A new, corrected dataset, scanned from the original microscope image,
+  has been provided under the name \code{\link{betacells}}.  Use that
+  dataset for any further study.
+}
+\section{Warnings}{
+  These data are incorrect.
+  Use the new corrected dataset \code{\link{betacells}}.  
+}
+\references{
+  Stoyan, D. (1995) Personal communication.
+  
+  Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of
+  dependence between types in multivariate point patterns.
+  \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532.
+
+  W\"assle, H., Boycott, B. B. & Illing, R.-B. (1981).  Morphology
+  and mosaic of on- and off-beta cells in the cat retina and some
+  functional considerations.  \emph{Proc. Roy. Soc. London Ser. B}
+  \bold{212}, 177--195.
+}
+\keyword{datasets}
+\keyword{spatial}
+
+
diff --git a/man/gauss.hermite.Rd b/man/gauss.hermite.Rd
new file mode 100644
index 0000000..77c209a
--- /dev/null
+++ b/man/gauss.hermite.Rd
@@ -0,0 +1,61 @@
+\name{gauss.hermite}
+\alias{gauss.hermite}
+\title{
+  Gauss-Hermite Quadrature Approximation to Expectation for Normal Distribution
+}
+\description{
+  Calculates an approximation to the expected value of any function of a
+  normally-distributed random variable, using Gauss-Hermite quadrature.
+}
+\usage{
+gauss.hermite(f, mu = 0, sd = 1, ..., order = 5)
+}
+\arguments{
+  \item{f}{
+    The function whose moment should be approximated.
+  }
+  \item{mu}{
+    Mean of the normal distribution.
+  }
+  \item{sd}{
+    Standard deviation of the normal distribution.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{f}.
+  }
+  \item{order}{
+    Number of quadrature points in the Gauss-Hermite quadrature
+    approximation. A small positive integer.
+  }
+}
+\details{
+  This algorithm calculates the approximate expected value of
+  \code{f(Z)} when \code{Z} is a normally-distributed random
+  variable with mean \code{mu} and standard deviation \code{sd}.
+  The expected value is an integral with respect to the
+  Gaussian density; this integral is approximated
+  using Gauss-Hermite quadrature.
+
+  The argument \code{f} should be a function in the \R language
+  whose first argument is the variable \code{Z}. Additional arguments
+  may be passed through \code{\dots}. The value returned by \code{f}
+  may be a single numeric value, a vector, or a matrix. The values
+  returned by \code{f} for different values of \code{Z} must have
+  compatible dimensions.
+
+  The result is a weighted average of several values of \code{f}.
+}
+\value{
+  Numeric value, vector or matrix.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\examples{
+  gauss.hermite(function(x) x^2, 3, 1)
+}
+\keyword{math}
diff --git a/man/gordon.Rd b/man/gordon.Rd
new file mode 100644
index 0000000..0e834ef
--- /dev/null
+++ b/man/gordon.Rd
@@ -0,0 +1,35 @@
+\name{gordon}
+\alias{gordon}
+\docType{data}
+\title{
+  People in Gordon Square
+}
+\description{
+  This dataset records the location of
+  people sitting on a grass patch in Gordon Square, London,
+  at 3pm on a sunny afternoon.
+
+  The dataset \code{gordon} is a point pattern
+  (object of class \code{"ppp"}) containing the spatial coordinates
+  of each person.
+
+  The grass patch is an irregular polygon with two holes.
+  
+  Coordinates are given in metres.
+}
+\usage{data(gordon)}
+\examples{
+data(gordon)
+plot(gordon)
+}
+\source{
+  Andrew Bevan, University College London.
+}
+\references{
+  Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013)
+  Hybrids of Gibbs point process models and their implementation.
+  \emph{Journal of Statistical Software} \bold{55}:11, 1--43.
+  \url{http://www.jstatsoft.org/v55/i11/}
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/gorillas.Rd b/man/gorillas.Rd
new file mode 100644
index 0000000..64d870c
--- /dev/null
+++ b/man/gorillas.Rd
@@ -0,0 +1,151 @@
+\name{gorillas}
+\alias{gorillas}
+\alias{gorillas.extra}
+\docType{data}
+\title{
+  Gorilla Nesting Sites
+}
+\description{
+  Locations of nesting sites of gorillas, and associated covariates,
+  in a National Park in Cameroon.
+}
+\usage{data(gorillas)}
+\format{
+  \code{gorillas} is a marked point pattern (object
+  of class \code{"ppp"}) representing nest site locations.
+
+  \code{gorillas.extra} is a named list of 7 pixel images (objects of
+  class \code{"im"}) containing spatial covariates.
+  It also belongs to the class \code{"listof"}.
+
+  All spatial coordinates are in metres.
+  The coordinate reference system is \code{WGS_84_UTM_Zone_32N}.
+}
+\details{
+  These data come from a study of gorillas in the Kagwene Gorilla Sanctuary,
+  Cameroon, by the Wildlife Conservation Society
+  Takamanda-Mone Landscape Project (WCS-TMLP). A detailed description
+  and analysis of the data is reported in Funwi-Gabga and Mateu (2012).
+
+  The dataset \code{gorillas} is a marked point pattern
+  (object of class \code{"ppp"})
+  giving the spatial locations of 647 nesting sites of gorilla groups
+  observed in the sanctuary over time.
+  Locations are given as UTM (Zone 32N) coordinates in metres.
+  The observation window is the boundary of the sanctuary, represented
+  as a polygon. Marks attached to the points are:
+  
+  \describe{
+    \item{group}{Identifier of the gorilla group
+      that constructed the nest site:
+      a categorical variable with values \code{major} or \code{minor}.
+    }
+    \item{season}{Season in which data were collected:
+      categorical, either \code{rainy} or \code{dry}.
+    }
+    \item{date}{
+      Day of observation. A value of class \code{"Date"}.
+    }
+  }
+  Note that the data contain duplicated points (two points at the
+  same location). To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+
+  The accompanying dataset \code{gorillas.extra} contains
+  spatial covariate information. It is a named list containing
+  seven pixel images (objects of class \code{"im"}) giving the values of
+  seven covariates over the study region. It also belongs
+  to the class \code{"listof"} so that it can be plotted.
+  The component images are:
+  
+  \describe{
+    \item{aspect}{
+      Compass direction of the terrain slope.
+      Categorical, with levels
+      \code{N}, 
+      \code{NE}, 
+      \code{E}, 
+      \code{SE}, 
+      \code{S}, 
+      \code{SW}, 
+      \code{W} and 
+      \code{NW}.
+    }
+    \item{elevation}{
+      Digital elevation of terrain, in metres.
+    }
+    \item{heat}{
+      Heat Load Index at each point on the surface (Beer's aspect),
+      discretised. Categorical with values \code{Warmest}
+      (Beer's aspect between 0 and 0.999),
+      \code{Moderate} (Beer's aspect between 1 and 1.999),
+      \code{Coolest} (Beer's aspect equals 2).
+    }
+    \item{slopeangle}{
+      Terrain slope, in degrees.
+    }
+    \item{slopetype}{
+      Type of slope.
+      Categorical, with values
+      \code{Valley}, \code{Toe} (toe slope), \code{Flat},
+      \code{Midslope}, \code{Upper} and \code{Ridge}.
+    }
+    \item{vegetation}{
+      Vegetation or cover type.
+      Categorical, with values
+      \code{Disturbed} (highly disturbed forest), \code{Colonising}
+      (colonising forest), \code{Grassland} (savannah),
+      \code{Primary} (primary forest), \code{Secondary} (secondary forest), and
+      \code{Transition} (transitional vegetation).
+    }
+    \item{waterdist}{
+      Euclidean distance from nearest water body, in metres.
+    }
+  }
+
+  For further information see Funwi-Gabga and Mateu (2012).
+}
+\section{Raw Data}{
+  For demonstration and training purposes,
+  the raw data file for the \code{vegetation} covariate is
+  also provided in the \pkg{spatstat} package installation,
+  as the file \code{vegetation.asc} in the folder \code{rawdata/gorillas}.
+  Use \code{\link[base]{system.file}} to obtain the file path:
+  \code{system.file("rawdata/gorillas/vegetation.asc", package="spatstat")}.
+  This is a text file in the simple ASCII file format of the geospatial
+  library \code{GDAL}. The file can be read by the function
+  \code{readGDAL} in the \pkg{rgdal} package, or alternatively
+  read directly using \code{\link[base]{scan}}.
+}
+\source{
+  Field data collector: Wildlife Conservation Society Takamanda-Mone
+  Landscape Project (WCS-TMLP).
+  \emph{Please acknowledge WCS-TMLP in any use of these data.}
+
+  Data kindly provided by 
+  Funwi-Gabga Neba, Data Coordinator of A.P.E.S.
+  Database Project, Department of Primatology,
+  Max Planck Institute for Evolutionary Anthropology, Leipzig, Germany.
+
+  The collaboration of Prof Jorge Mateu, Universitat Jaume I, Castellon, Spain
+  is gratefully acknowledged.
+}
+\references{
+  Funwi-Gabga, N. (2008)
+  \emph{A pastoralist survey and fire impact assessment
+  in the Kagwene Gorilla Sanctuary, Cameroon}. M.Sc. thesis,
+  Geology and Environmental Science, University of Buea, Cameroon.
+
+  Funwi-Gabga, N. and Mateu, J. (2012)
+  Understanding the nesting spatial behaviour of gorillas
+  in the Kagwene Sanctuary, Cameroon.
+  \emph{Stochastic Environmental Research and Risk Assessment}
+  \bold{26} (6), 793--811.
+}
+\examples{
+  summary(gorillas)
+  plot(gorillas)
+  plot(gorillas.extra)
+}
+\keyword{datasets}
diff --git a/man/gridcentres.Rd b/man/gridcentres.Rd
new file mode 100644
index 0000000..23f1658
--- /dev/null
+++ b/man/gridcentres.Rd
@@ -0,0 +1,76 @@
+\name{gridcentres}
+\alias{gridcentres}
+\alias{gridcenters}
+\title{Rectangular grid of points}
+\description{
+  Generates a rectangular grid of points in a window
+}
+\usage{
+ gridcentres(window, nx, ny)
+}
+\arguments{
+  \item{window}{A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{nx}{Number of points
+    in each row of the rectangular grid.
+  }
+  \item{ny}{Number of points
+    in each column of the rectangular grid.
+  }
+}
+\value{
+  A list with two components \code{x} and \code{y}, which are numeric
+  vectors giving the coordinates of the points of the
+  rectangular grid.
+}
+\details{
+  This function creates a rectangular grid of points in the window.
+  
+  The bounding rectangle of the \code{window} is divided into
+  a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles.
+  The function returns the \eqn{x,y} coordinates of the
+  centres of these tiles.
+
+  Note that some of these grid points may lie outside the window,
+  if \code{window} is not of type \code{"rectangle"}. The function
+  \code{\link{inside.owin}} can be used to select those grid points
+  which do lie inside the window. See the examples.
+
+  This function is useful in creating dummy points for quadrature
+  schemes (see \code{\link{quadscheme}}) and for other miscellaneous
+  purposes.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{quadscheme}},
+  \code{\link{inside.owin}},
+  \code{\link{stratrand}}
+}
+\examples{
+  w <- unit.square()
+  xy <- gridcentres(w, 10,15)
+  \dontrun{
+  plot(w)
+  points(xy)
+  }
+
+  bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2),
+               y=c(0.1,0.1,0.5,0.7,0.3))
+  w <- owin(c(0,1), c(0,1), poly=bdry)
+  xy <- gridcentres(w, 30, 30)
+  ok <- inside.owin(xy$x, xy$y, w)
+  \dontrun{
+  plot(w)
+  points(xy$x[ok], xy$y[ok])
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/gridweights.Rd b/man/gridweights.Rd
new file mode 100644
index 0000000..0120f43
--- /dev/null
+++ b/man/gridweights.Rd
@@ -0,0 +1,69 @@
+\name{gridweights}
+\alias{gridweights}
+\title{Compute Quadrature Weights Based on Grid Counts}
+\description{
+  Computes quadrature weights for a given set of points,
+  using the ``counting weights'' for a grid of rectangular tiles.
+}
+\usage{
+ gridweights(X, ntile, \dots, window=NULL, verbose=FALSE, npix=NULL, areas=NULL)
+}
+\arguments{
+  \item{X}{Data defining a point pattern.}
+  \item{ntile}{Number of tiles
+    in each row and column of the rectangular grid.
+    An integer vector of length 1 or 2.
+  }
+  \item{\dots}{Ignored.}
+  \item{window}{Default window for the point pattern}
+  \item{verbose}{Logical flag. If \code{TRUE}, information will be printed
+    about the computation of the grid weights.
+  }
+  \item{npix}{Dimensions of pixel grid to use when
+    computing a digital approximation to the tile areas.
+  }
+  \item{areas}{Vector of areas of the tiles, if they are already known.}
+}
+\value{
+  Vector of nonnegative weights for each point in \code{X}.
+}
+\details{
+  This function computes a set of quadrature weights
+  for a given pattern of points
+  (typically comprising both ``data'' and `dummy'' points).
+  See \code{\link{quad.object}} for an explanation of quadrature
+  weights and quadrature schemes.
+
+  The weights are computed by the ``counting weights'' rule
+  based on a regular grid of rectangular tiles.
+  First \code{X} and (optionally) \code{window} are converted into a
+  point pattern object. Then the bounding rectangle of the window of
+  the point pattern is
+  divided into a regular \code{ntile[1] * ntile[2]} grid of rectangular tiles.
+  The weight attached to a point of \code{X} is the area of the tile
+  in which it lies, divided by the number of points of \code{X} lying in
+  that tile.
+
+  For non-rectangular windows the tile areas are currently calculated
+  by approximating the window as a binary mask. The accuracy of this
+  approximation is controlled by \code{npix}, which becomes
+  the argument \code{dimyx} of \code{\link{as.mask}}. 
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{dirichletWeights}}
+}
+\examples{
+  Q <- quadscheme(runifpoispp(10))
+  X <- as.ppp(Q) # data and dummy points together
+  w <- gridweights(X, 10)
+  w <- gridweights(X, c(10, 10))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/grow.boxx.Rd b/man/grow.boxx.Rd
new file mode 100644
index 0000000..d119b12
--- /dev/null
+++ b/man/grow.boxx.Rd
@@ -0,0 +1,49 @@
+\name{grow.boxx}
+\alias{grow.boxx}
+\alias{grow.box3}
+\title{Add margins to box in any dimension}
+\description{
+  Adds a margin to a box of class boxx.
+}
+\usage{
+ grow.boxx(W, left, right = left)
+ grow.box3(W, left, right = left)
+}
+\arguments{
+  \item{W}{
+    A box (object of class \code{"boxx"} or \code{"box3"}).
+  }
+  \item{left}{Width of margin to be added to left endpoint
+    of box side in every dimension.
+    A single nonnegative number, or a vector of same length
+    as the dimension of the box to add different left margin in each dimension.
+  }
+  \item{right}{Width of margin to be added to right endpoint
+    of box side in every dimension.
+    A single nonnegative number, or a vector of same length
+    as the dimension of the box to add different right margin in each dimension.
+  }
+}
+\value{
+  Another object of the same class \code{"boxx"} or \code{"box3"}
+  representing the window after margins are added.
+}
+\seealso{
+  \code{\link{grow.rectangle}},
+  \code{\link{boxx}},
+  \code{\link{box3}}
+}
+\examples{
+  w <- boxx(c(0,10), c(0,10), c(0,10), c(0,10))
+  # add a margin of size 1 on both sides in all four dimensions
+  b12 <- grow.boxx(w, 1)
+
+  # add margin of size 2 at left, and margin of size 3 at right,
+  # in each dimension.
+  v <- grow.boxx(w, 2, 3)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/grow.rectangle.Rd b/man/grow.rectangle.Rd
new file mode 100644
index 0000000..ba5e2f4
--- /dev/null
+++ b/man/grow.rectangle.Rd
@@ -0,0 +1,61 @@
+\name{grow.rectangle}
+\alias{grow.rectangle}
+\title{Add margins to rectangle}
+\description{
+  Adds a margin to a rectangle.
+}
+\usage{
+ grow.rectangle(W, xmargin=0, ymargin=xmargin, fraction=NULL)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}).
+    Must be of type \code{"rectangle"}.
+  }
+  \item{xmargin}{Width of horizontal margin to be added.
+    A single nonnegative number, or a vector of length 2
+    indicating margins of unequal width at left and right.
+  }
+  \item{ymargin}{Height of vertical margin to be added.
+    A single nonnegative number, or a vector of length 2
+    indicating margins of unequal width at bottom and top.
+  }
+  \item{fraction}{
+    Fraction of width and height to be added.
+    A number greater than zero, or a numeric vector of length 2 indicating
+    different fractions of width and of height, respectively.
+    Incompatible with specifying \code{xmargin} and \code{ymargin}.
+  }
+}
+\value{
+  Another object of class \code{"owin"} representing the
+  window after margins are added.
+}
+\details{
+  This is a simple convenience function to add a
+  margin of specified width and height on each side of a
+  rectangular window. Unequal margins can also be added.
+}
+\seealso{
+  \code{\link{trim.rectangle}},
+  \code{\link{dilation}},
+  \code{\link{erosion}},
+  \code{\link{owin.object}}
+}
+\examples{
+  w <- square(10)
+  # add a margin of width 1 on all four sides
+  square12 <- grow.rectangle(w, 1)
+
+  # add margin of width 3 on the right side
+  # and margin of height 4 on top.
+  v <- grow.rectangle(w, c(0,3), c(0,4))
+
+  # grow by 5 percent on all sides
+  grow.rectangle(w, fraction=0.05)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/hamster.Rd b/man/hamster.Rd
new file mode 100644
index 0000000..7fc406a
--- /dev/null
+++ b/man/hamster.Rd
@@ -0,0 +1,48 @@
+\name{hamster}
+\alias{hamster}
+\docType{data}
+\title{Aherne's hamster tumour data} 
+\description{
+  Point pattern of cell nuclei in hamster kidney, each nucleus classified as
+  either `dividing' or `pyknotic'.
+  A multitype point pattern.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of cell locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr
+    \code{marks} \tab factor with levels \code{"dividing"}
+    and \code{"pyknotic"}.
+  }
+  See \code{\link{ppp.object}} for details of the format.
+}
+\usage{data(hamster)}
+\source{Dr W. A. Aherne, Department of Pathology, University of
+  Newcastle-upon-Tyne, UK. Data supplied by Prof. Peter Diggle}
+\section{Notes}{
+  These data were presented and analysed by Diggle (1983, section 7.3).
+
+  The data give the positions of the centres of the nuclei of certain
+  cells in a histological
+  section of tissue from a laboratory-induced metastasising lymphoma
+  in the kidney of a hamster.
+
+  The nuclei are classified as either
+  "pyknotic" (corresponding to dying cells) or "dividing" (corresponding
+  to cells arrested in metaphase, i.e. in the act of dividing). The
+  background void is occupied by unrecorded, interphase cells in
+  relatively large numbers. 
+
+  The sampling window is a square, originally about 0.25 mm square
+  in real units, which has been rescaled to the unit square.
+}
+\references{
+  Diggle, P.J. (1983)
+  \emph{Statistical analysis of spatial point patterns}.
+  Academic Press.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/harmonic.Rd b/man/harmonic.Rd
new file mode 100644
index 0000000..154ccbe
--- /dev/null
+++ b/man/harmonic.Rd
@@ -0,0 +1,74 @@
+\name{harmonic}
+\alias{harmonic}
+\title{Basis for Harmonic Functions}
+\description{
+  Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y}
+  of degree less than or equal to \eqn{n}.
+}
+\usage{
+   harmonic(x, y, n)
+}
+\arguments{
+  \item{x}{
+    Vector of \eqn{x} coordinates
+  }
+  \item{y}{
+    Vector of \eqn{y} coordinates
+  }
+  \item{n}{
+    Maximum degree of polynomial
+  }
+}
+\value{
+  A data frame with \code{2 * n} columns giving the values of the
+  basis functions at the coordinates. Each column is labelled by an
+  algebraic expression for the corresponding basis function.
+}
+\details{
+  This function computes a basis for the harmonic polynomials
+  in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n}
+  and evaluates them at given \eqn{x,y} locations.
+  It can be used in model formulas (for example in
+  the model-fitting functions
+  \code{\link{lm},\link{glm},\link{gam}} and \code{\link{ppm}}) to specify a
+  linear predictor which is a harmonic function.
+
+  A function \eqn{f(x,y)} is harmonic if
+  \deqn{\frac{\partial^2}{\partial x^2} f
+    + \frac{\partial^2}{\partial y^2}f = 0.}{
+    (d/dx)^2 f + (d/dy)^2 f = 0.}
+  The harmonic polynomials of degree less than or equal to
+  \eqn{n} have a basis consisting of \eqn{2 n} functions.
+
+  This function was implemented on a suggestion of P. McCullagh
+  for fitting nonstationary spatial trend to point process models.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{polynom}}
+}
+\examples{
+   # inhomogeneous point pattern
+   X <- unmark(longleaf)
+   \testonly{
+     # smaller dataset
+     X <- X[seq(1,npoints(X), by=50)]
+   }
+
+   # fit Poisson point process with log-cubic intensity
+   fit.3 <- ppm(X ~ polynom(x,y,3), Poisson())
+
+   # fit Poisson process with log-cubic-harmonic intensity
+   fit.h <- ppm(X ~ harmonic(x,y,3), Poisson())
+
+   # Likelihood ratio test
+   lrts <- 2 * (logLik(fit.3) - logLik(fit.h))
+   df <- with(coords(X),
+              ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3)))
+   pval <- 1 - pchisq(lrts, df=df)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/harmonise.Rd b/man/harmonise.Rd
new file mode 100644
index 0000000..6108782
--- /dev/null
+++ b/man/harmonise.Rd
@@ -0,0 +1,53 @@
+\name{harmonise}
+\alias{harmonise}
+\alias{harmonize}
+\title{Make Objects Compatible}
+\description{
+  Converts several objects of the same class to a common format
+  so that they can be combined or compared.
+}
+\usage{
+harmonise(\dots)
+harmonize(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of objects of the same class.
+  } 
+}
+\details{
+  This generic command takes any number of objects of the same
+  class, and \emph{attempts} to make them compatible in the sense
+  of \code{\link{compatible}} so that they can be combined or compared.
+
+  There are methods for the classes \code{"fv"}
+  (\code{\link{harmonise.fv}})
+  and \code{"im"} (\code{\link{harmonise.im}}).
+  
+  All arguments \code{\dots} must be objects of the same class.
+  The result will be a list, of length equal to the number of
+  arguments \code{\dots}, containing new versions of each of these
+  objects, converted to a common format.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+}
+\value{
+  A list, of length equal to the number of arguments \code{\dots},
+  whose entries are objects of the same class.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{compatible}},
+  \code{\link{harmonise.fv}},
+  \code{\link{harmonise.im}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/harmonise.fv.Rd b/man/harmonise.fv.Rd
new file mode 100644
index 0000000..8453a62
--- /dev/null
+++ b/man/harmonise.fv.Rd
@@ -0,0 +1,92 @@
+\name{harmonise.fv}
+\alias{harmonise.fv}
+\alias{harmonize.fv}
+\title{Make Function Tables Compatible}
+\description{
+  Convert several objects of class \code{"fv"} 
+  to the same values of the function argument.
+}
+\usage{
+\method{harmonise}{fv}(\dots, strict=FALSE)
+
+\method{harmonize}{fv}(\dots, strict=FALSE)
+}
+\arguments{
+  \item{\dots}{
+    Any number of function tables (objects of class \code{"fv"}).
+  }
+  \item{strict}{
+    Logical. If \code{TRUE}, a column of data will be deleted
+    if columns of the same name do not appear in every object.
+  }
+}
+\details{
+  A function value table (object of class \code{"fv"}) is
+  essentially a data frame giving the values of a function \eqn{f(x)}
+  (or several alternative estimates of this value)
+  at equally-spaced values of the function argument \eqn{x}.
+
+  The command \code{\link{harmonise}} is generic. This is the
+  method for objects of class \code{"fv"}.
+  
+  This command makes any number of \code{"fv"} objects compatible,
+  in the loose sense that they have the same sequence of values of
+  \eqn{x}. They can then be combined by \code{\link{cbind.fv}},
+  but not necessarily by \code{\link{eval.fv}}.
+
+  All arguments \code{\dots} must be function value tables
+  (objects of class \code{"fv"}). 
+  The result will be a list, of length equal to the number of
+  arguments \code{\dots}, containing new versions of each of these functions,
+  converted to a common sequence of \eqn{x} values.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+
+  The range of \eqn{x} values in the resulting functions
+  will be the intersection of the ranges of \eqn{x} values
+  in the original functions. 
+  The spacing of \eqn{x} values in the resulting functions
+  will be the finest (narrowest) of the spacings of the
+  \eqn{x} values in the original functions.
+  Function values are interpolated using \code{\link[stats]{approxfun}}.
+
+  If \code{strict=TRUE}, each column of data will be retained only if
+  a column of the same name appears in all of the arguments \code{\dots}.
+  This ensures that the resulting objects are strictly compatible
+  in the sense of \code{\link{compatible.fv}},
+  and can be combined using \code{\link{eval.fv}}
+  or \code{\link{collapse.fv}}.
+  
+  If \code{strict=FALSE} (the default), this does not occur,
+  and then the resulting objects are \bold{not} guaranteed to be compatible
+  in the sense of \code{\link{compatible.fv}}.
+}
+\value{
+  A list, of length equal to the number of arguments \code{\dots},
+  whose entries are objects of class \code{"fv"}.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\examples{
+   H <- harmonise(K=Kest(cells), G=Gest(cells))
+   H
+   \dontrun{
+      ## generates a warning about duplicated columns
+      try(cbind(H$K, H$G))
+   }
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{cbind.fv}},
+  \code{\link{eval.fv}},
+  \code{\link{compatible.fv}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/harmonise.im.Rd b/man/harmonise.im.Rd
new file mode 100644
index 0000000..3eda940
--- /dev/null
+++ b/man/harmonise.im.Rd
@@ -0,0 +1,67 @@
+\name{harmonise.im}
+\alias{harmonise.im}
+\alias{harmonize.im}
+\title{Make Pixel Images Compatible}
+\description{
+  Convert several pixel images to a common pixel raster.
+}
+\usage{
+\method{harmonise}{im}(\dots)
+
+\method{harmonize}{im}(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of pixel images (objects of class \code{"im"})
+    or data which can be converted to pixel images by \code{\link{as.im}}.
+  } 
+}
+\details{
+  This function makes any number of pixel images compatible,
+  by converting them all to a common pixel grid.
+
+  The command \code{\link{harmonise}} is generic. This is the
+  method for objects of class \code{"im"}.
+  
+  At least one of the arguments \code{\dots} must be a pixel image.
+  Some arguments may be windows (objects of class \code{"owin"}),
+  functions (\code{function(x,y)}) or numerical constants. These will be
+  converted to images using \code{\link{as.im}}.
+  
+  The common pixel grid is determined by inspecting all the pixel
+  images in the argument list, computing the bounding box of all the
+  images, then finding the image with the highest spatial resolution, 
+  and extending its pixel grid to cover the bounding box. 
+
+  The return value is a list with entries corresponding to the input
+  arguments.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+
+  If you just want to determine the appropriate pixel resolution,
+  without converting the images, use \code{\link{commonGrid}}.
+}
+\value{
+  A list,
+  of length equal to the number of arguments \code{\dots},
+  whose entries are pixel images.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+   A <- setcov(square(1))
+   B <- function(x,y) { x }
+   G <- density(runifpoint(42))
+   harmonise(X=A, Y=B, Z=G)
+}
+\seealso{
+  \code{\link{commonGrid}},
+  \code{\link{compatible.im}},
+  \code{\link{as.im}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/harmonise.msr.Rd b/man/harmonise.msr.Rd
new file mode 100644
index 0000000..df63633
--- /dev/null
+++ b/man/harmonise.msr.Rd
@@ -0,0 +1,44 @@
+\name{harmonise.msr}
+\alias{harmonise.msr}
+\title{Make Measures Compatible}
+\description{
+  Convert several measures to a common quadrature scheme
+}
+\usage{
+\method{harmonise}{msr}(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of measures (objects of class \code{"msr"}).
+  } 
+}
+\details{
+  This function makes any number of measures compatible,
+  by converting them all to a common quadrature scheme.
+
+  The command \code{\link{harmonise}} is generic. This is the
+  method for objects of class \code{"msr"}.
+}
+\value{
+  A list, of length equal to the number of arguments \code{\dots},
+  whose entries are measures.
+}
+\author{
+  \spatstatAuthors.
+}
+\examples{
+  fit1 <- ppm(cells ~ x)
+  fit2 <- ppm(rpoispp(ex=cells) ~ x)
+  m1 <- residuals(fit1)
+  m2 <- residuals(fit2)
+  harmonise(m1, m2)
+  s1 <- residuals(fit1, type="score")
+  s2 <- residuals(fit2, type="score")
+  harmonise(s1, s2)
+}
+\seealso{
+  \code{\link{harmonise}},
+  \code{\link{msr}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/harmonise.owin.Rd b/man/harmonise.owin.Rd
new file mode 100644
index 0000000..ce534c7
--- /dev/null
+++ b/man/harmonise.owin.Rd
@@ -0,0 +1,68 @@
+\name{harmonise.owin}
+\alias{harmonise.owin}
+\alias{harmonize.owin}
+\title{Make Windows Compatible}
+\description{
+  Convert several windows to a common pixel raster.
+}
+\usage{
+\method{harmonise}{owin}(\dots)
+
+\method{harmonize}{owin}(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of windows (objects of class \code{"owin"})
+    or data which can be converted to windows by \code{\link{as.owin}}.
+  } 
+}
+\details{
+  This function makes any number of windows compatible,
+  by converting them all to a common pixel grid.
+
+  This only has an effect if one of the windows is a binary mask.
+  If all the windows are rectangular or polygonal, they are
+  returned unchanged.
+
+  The command \code{\link{harmonise}} is generic. This is the
+  method for objects of class \code{"owin"}.
+  
+  Each argument must be a window (object of class \code{"owin"}),
+  or data that can be converted to a window by \code{\link{as.owin}}.
+  
+  The common pixel grid is determined by inspecting all the windows
+  in the argument list, computing the bounding box of all the
+  windows, then finding the binary mask with the finest spatial resolution, 
+  and extending its pixel grid to cover the bounding box. 
+
+  The return value is a list with entries corresponding to the input
+  arguments.
+  If the arguments were named (\code{name=value}) then the return value
+  also carries these names.
+
+  If you just want to determine the appropriate pixel resolution,
+  without converting the windows, use \code{\link{commonGrid}}.
+}
+\value{
+  A list of windows, of length equal to the number of arguments
+  \code{\dots}. The list belongs to the class \code{"solist"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+   harmonise(X=letterR,
+             Y=grow.rectangle(Frame(letterR), 0.2),
+             Z=as.mask(letterR, eps=0.1),
+             V=as.mask(letterR, eps=0.07))
+}
+\seealso{
+  \code{\link{commonGrid}},
+  \code{\link{harmonise.im}},
+  \code{\link{as.owin}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/has.close.Rd b/man/has.close.Rd
new file mode 100644
index 0000000..e00ced6
--- /dev/null
+++ b/man/has.close.Rd
@@ -0,0 +1,71 @@
+\name{has.close}
+\alias{has.close}
+\alias{has.close.default}
+\alias{has.close.ppp}
+\alias{has.close.pp3}
+\title{
+  Check Whether Points Have Close Neighbours
+}
+\description{
+  For each point in a point pattern, determine whether the
+  point has a close neighbour in the same pattern.
+}
+\usage{
+  has.close(X, r, Y=NULL, \dots)
+
+  \method{has.close}{default}(X,r, Y=NULL, \dots, periodic=FALSE)
+
+  \method{has.close}{ppp}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE)
+
+  \method{has.close}{pp3}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE)
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns of class \code{"ppp"} or \code{"pp3"} or \code{"lpp"}.
+  }
+  \item{r}{
+    Threshold distance: a number greater than zero.
+  }
+  \item{periodic}{
+    Logical value indicating whether to measure distances in the
+    periodic sense, so that opposite sides of the (rectangular) window
+    are treated as identical.
+  }
+  \item{sorted}{
+    Logical value, indicating whether the points of \code{X}
+    (and \code{Y}, if given) are already sorted into increasing order of the
+    \eqn{x} coordinates.
+  }
+  \item{\dots}{Other arguments are ignored.}
+}
+\details{
+  This is simply a faster version of \code{(nndist(X) <= r)}
+  or \code{(nncross(X,Y,what="dist") <= r)}.
+
+  \code{has.close(X,r)} determines, for each point in the pattern \code{X},
+  whether or not this point has a neighbour in the same pattern \code{X}
+  which lies at a distance less than or equal to \code{r}.
+
+  \code{has.close(X,r,Y)} determines, for each point in the pattern \code{X},
+  whether or not this point has a neighbour in the \emph{other} pattern
+  \code{Y} which lies at a distance less than or equal to \code{r}.
+
+  The function \code{has.close} is generic, with methods for
+  \code{"ppp"} and \code{"pp3"} and a default method.
+}
+\value{
+  A logical vector, with one entry for each point of \code{X}.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{nndist}}
+}
+\examples{
+  has.close(redwood, 0.05)
+  with(split(amacrine), has.close(on, 0.05, off))
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/headtail.Rd b/man/headtail.Rd
new file mode 100644
index 0000000..55fde3b
--- /dev/null
+++ b/man/headtail.Rd
@@ -0,0 +1,80 @@
+\name{headtail}
+\alias{head.ppp}
+\alias{head.ppx}
+\alias{head.psp}
+\alias{head.tess}
+\alias{tail.ppp}
+\alias{tail.ppx}
+\alias{tail.psp}
+\alias{tail.tess}
+\title{
+  First or Last Part of a Spatial Pattern
+}
+\description{
+  Returns the first few elements (\code{head}) or the last few
+  elements (\code{tail}) of a spatial pattern.
+}
+\usage{
+  \method{head}{ppp}(x, n = 6L, \dots)
+
+  \method{head}{ppx}(x, n = 6L, \dots)
+
+  \method{head}{psp}(x, n = 6L, \dots)
+
+  \method{head}{tess}(x, n = 6L, \dots)
+
+  \method{tail}{ppp}(x, n = 6L, \dots)
+
+  \method{tail}{ppx}(x, n = 6L, \dots)
+
+  \method{tail}{psp}(x, n = 6L, \dots)
+
+  \method{tail}{tess}(x, n = 6L, \dots)
+}
+\arguments{
+  \item{x}{
+    A spatial pattern of geometrical figures,
+    such as a spatial pattern of points
+    (an object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or
+    \code{"lpp"}) or a spatial pattern of line segments
+    (an object of class \code{"psp"}) or a tessellation
+    (object of class \code{"tess"}).
+  }
+  \item{n}{
+    Integer. The number of elements of the pattern that should be extracted.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  These are methods for the generic functions \code{\link[utils]{head}}
+  and \code{\link[utils]{tail}}. They extract the first or last
+  \code{n} elements from \code{x} and return them as an object of the
+  same kind as \code{x}.
+
+  To inspect the spatial coordinates themselves, use
+  \code{\link[utils]{View}(x)}
+  or \code{head(as.data.frame(x))}.
+}
+\value{
+  An object of the same class as \code{x}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link[utils]{View}}, \code{\link[utils]{edit}}.
+  
+  Conversion to data frame:
+  \code{\link{as.data.frame.ppp}},
+  \code{\link{as.data.frame.ppx}},
+  \code{\link{as.data.frame.psp}}
+}
+\examples{
+  head(cells)
+  tail(as.psp(spiders), 10)
+  head(dirichlet(cells), 4)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/heather.Rd b/man/heather.Rd
new file mode 100644
index 0000000..5e8d463
--- /dev/null
+++ b/man/heather.Rd
@@ -0,0 +1,109 @@
+\name{heather}
+\alias{heather}
+\docType{data}
+\title{Diggle's Heather Data}
+\description{
+  The spatial mosaic of vegetation of the heather plant
+  (\emph{Calluna vulgaris}) recorded in a 10 by 20 metre
+  sampling plot in Sweden.
+} 
+\format{
+  A list with three entries, representing the same data at
+  different spatial resolutions:
+  \tabular{ll}{
+    \code{coarse} \tab original heather data, 100 by 200 pixels \cr
+    \code{medium} \tab current  heather data, 256 by 512 pixels \cr
+    \code{fine} \tab   finest resolution data, 778 by 1570 pixels
+  }
+  Each of these entries is an object of class \code{"owin"}
+  containing a binary pixel mask. 
+}
+\usage{data(heather)}
+\source{Peter Diggle}
+\section{Notes on data}{
+  These data record the spatial mosaic of vegetation
+  of the heather plant (\emph{Calluna vulgaris}) in a 10 by 20 metre
+  sampling plot near \ifelse{latex}{\out{J{\"a}dra{\aa}s}}{Jadraas}, Sweden. 
+  They were recorded and first analysed by Diggle(1981).
+
+  The dataset \code{heather} contains three different versions of the data
+  that have been analysed by different writers over the decades.
+
+  \describe{
+    \item{coarse:}{
+      Data as originally digitised by Diggle in 1983
+      at 100 by 200 pixels resolution (i.e. 10 pixels = 1 metre).
+
+      These data were entered by hand in the form of a
+      run-length encoding (original file no longer available)
+      and translated by a program into a 100 by 200 pixel binary image.
+
+      There are known to be some errors in the image
+      which arise from errors in counting the run-length
+      so that occasionally there will be an unexpected 'spike'
+      on one single column.
+    }
+    \item{fine:}{
+      A fine scale digitisation of the original map,
+      prepared by CWI (Centre for Computer Science, Amsterdam,
+      Netherlands) in 1994.
+
+      The original hand-drawn map
+      was scanned by \adrian, and processed
+      by Chris Jonker, Henk Heijmans and \adrian
+      to yield a clean binary image of 778 by 1570 pixels resolution.
+    }
+    \item{medium:}{
+      The version of the heather data currently supplied on
+      Professor Diggle's website. This is a 256 by 512 pixel image.
+      The method used to create this image is not stated.
+    }
+  }
+}
+\section{History of analysis of data}{
+  The data were recorded, presented and analysed by Diggle (1983).
+  He proposed a Boolean model consisting of discs of random size
+  with centres generated by of a Poisson point process.
+  
+  Renshaw and Ford (1983) reported that spectral analysis of the data
+  suggested the presence of strong row and column effects. However, this
+  may have been attributable to errors in the run-length encoding
+  of the original data.
+
+  Hall (1985) and Hall (1988, pp 301-318) took a bootstrap approach.
+
+  Ripley (1988, pp. 121-122, 131-135] used opening and closing functions
+  to argue that a Boolean model of discs is inappropriate.
+
+  Cressie (1991, pp. 763-770) tried a more general Boolean model.
+}
+\references{
+  Cressie, N.A.C. (1991)
+  \emph{Statistics for Spatial Data}.
+  John Wiley and Sons, New York.
+
+  Diggle, P.J. (1981)
+  Binary mosaics and the spatial pattern of heather.
+  \emph{Biometrics} \bold{37}, 531-539.
+
+  Hall, P. (1985)
+  Resampling a coverage pattern.
+  \emph{Stochastic Processes and their Applications}
+  \bold{20} 231-246.
+
+  Hall, P. (1988)
+  \emph{An introduction to the theory of coverage processes}.
+  John Wiley and Sons, New York.
+
+  Renshaw, E. and Ford, E.D. (1983)
+  The interpretation of process from pattern using
+  two-dimensional spectral analysis: Methods and
+  problems of interpretation. \emph{Applied Statistics} \bold{32} 51-63.
+  
+  Ripley, B.D. (1988)
+  \emph{Statistical Inference for Spatial Processes}.
+  Cambridge University Press.
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/hextess.Rd b/man/hextess.Rd
new file mode 100644
index 0000000..f1586f7
--- /dev/null
+++ b/man/hextess.Rd
@@ -0,0 +1,89 @@
+\name{hextess}
+\alias{hexgrid}
+\alias{hextess}
+\title{
+  Hexagonal Grid or Tessellation
+}
+\description{
+  Construct a hexagonal grid of points,
+  or a hexagonal tessellation.
+}
+\usage{
+hexgrid(W, s, offset = c(0, 0), origin=NULL, trim = TRUE)
+
+hextess(W, s, offset = c(0, 0), origin=NULL, trim = TRUE)
+}
+
+\arguments{
+  \item{W}{
+    Window in which to construct the hexagonal grid or tessellation.
+    An object of class \code{"owin"}.
+  }
+  \item{s}{
+    Side length of hexagons. A positive number.
+  }
+  \item{offset}{
+    Numeric vector of length 2 specifying a shift of the
+    hexagonal grid. See Details.
+  }
+  \item{origin}{
+    Numeric vector of length 2 specifying the initial origin
+    of the hexagonal grid, before the offset is applied.
+    See Details.
+  }
+  \item{trim}{
+    Logical value indicating whether to restrict the result to
+    the window \code{W}. See Details.
+  }
+}
+\details{
+  \code{hexgrid} constructs a hexagonal grid of points
+  on the window \code{W}. If \code{trim=TRUE} (the default),
+  the grid is intersected with \code{W} so that all points lie
+  inside \code{W}. If \code{trim=FALSE}, then we retain all grid points
+  which are the centres of hexagons that intersect \code{W}.
+
+  \code{hextess} constructs a tessellation of hexagons
+  on the window \code{W}. If \code{trim=TRUE} (the default),
+  the tessellation is restricted to the interior of \code{W},
+  so that there will be some fragmentary hexagons near the
+  boundary of \code{W}. If \code{trim=FALSE}, the tessellation
+  consists of all hexagons which intersect \code{W}.
+
+  The points of \code{hexgrid(...)} are the
+  centres of the tiles of \code{hextess(...)} 
+  in the same order.
+  
+  In the initial position of the grid or tessellation,
+  one of the grid points (tile centres) is placed at the 
+  \code{origin}, which defaults to the midpoint of the
+  bounding rectangle of \code{W}. The grid can be shifted
+  relative to this origin by specifing the \code{offset}.
+}
+\value{
+  The value of \code{hexgrid} is a point pattern (object of class
+  \code{"ppp"}).
+
+  The value of \code{hextess} is a tessellation (object of class \code{"tess"}).
+}
+\seealso{
+  \code{\link{tess}}
+
+  \code{\link{hexagon}}
+}
+\examples{
+  if(interactive()) {
+    W <- Window(chorley)
+    s <- 0.7
+  } else {
+    W <- letterR
+    s <- 0.3
+  }
+  plot(hextess(W, s))
+  plot(hexgrid(W, s), add=TRUE)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/hierpair.family.Rd b/man/hierpair.family.Rd
new file mode 100644
index 0000000..f1e5415
--- /dev/null
+++ b/man/hierpair.family.Rd
@@ -0,0 +1,38 @@
+\name{hierpair.family}
+\alias{hierpair.family}
+\title{Hierarchical Pairwise Interaction Process Family}
+\description{
+  An object describing the family of all hierarchical pairwise interaction Gibbs
+  point processes.
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes 
+  the hierarchical pairwise interaction family of point process models.
+
+  Anyway, \code{hierpair.family} is an object of class \code{"isf"}
+  containing a function \code{hierpair.family$eval} for
+  evaluating the sufficient statistics of any hierarchical pairwise interaction
+  point process model taking an exponential family form. 
+} 
+\seealso{
+  Other families: 
+  \code{\link{pairwise.family}},
+  \code{\link{pairsat.family}},
+  \code{\link{ord.family}},
+  \code{\link{inforder.family}}.
+
+  Hierarchical Strauss interaction:
+  \code{\link{HierStrauss}}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/hist.funxy.Rd b/man/hist.funxy.Rd
new file mode 100644
index 0000000..bfe54d1
--- /dev/null
+++ b/man/hist.funxy.Rd
@@ -0,0 +1,67 @@
+\name{hist.funxy}
+\alias{hist.funxy}
+\title{Histogram of Values of a Spatial Function}
+\description{
+  Computes and displays a histogram of the values of a spatial function
+  of class \code{"funxy"}.
+}
+\usage{
+  \method{hist}{funxy}(x, \dots, xname)
+}
+\arguments{
+  \item{x}{A pixel image (object of class \code{"funxy"}).}
+  \item{\dots}{
+    Arguments passed to \code{\link{as.im}}
+    or \code{\link{hist.im}}.
+  }
+  \item{xname}{
+    Optional. Character string to be used as the
+    name of the dataset \code{x}.
+  }
+}
+\details{
+  This function computes and (by default) displays a histogram
+  of the values of the function \code{x}.
+
+  An object of class \code{"funxy"} 
+  describes a function of spatial location. It is a \code{function(x,y,..)}
+  in the \R language, with additional attributes.
+
+  The function \code{hist.funxy} is a method for the generic
+  function \code{\link{hist}} for the class \code{"funxy"}.
+
+  The function is first converted to a pixel image using \code{\link{as.im}},
+  then \code{\link{hist.im}} is called to produce the histogram.
+
+  Any arguments in \code{...} are passed to \code{\link{as.im}}
+  to determine the pixel resolution, 
+  or to \code{\link{hist.im}} to determine the histogram breaks
+  and to control or suppress plotting.
+  Useful arguments include \code{W} for the spatial domain,
+  \code{eps,dimyx} for pixel resolution, \code{main} for the main title.
+}
+\value{
+  An object of class \code{"histogram"} as returned
+  by \code{\link[graphics:hist]{hist.default}}. This object can be
+  plotted.
+}
+\seealso{
+  \code{\link{spatialcdf}} for the cumulative distribution function
+  of an image or function.
+
+  \code{\link{hist}},
+  \code{\link{hist.default}}.
+
+  For other statistical graphics such as Q-Q plots,
+  use \code{as.im(X)[]} to extract the pixel values of image \code{X},
+  and apply the usual statistical graphics commands.
+}
+\examples{
+  f <- funxy(function(x,y) {x^2}, unit.square())
+  hist(f)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/hist.im.Rd b/man/hist.im.Rd
new file mode 100644
index 0000000..3c41700
--- /dev/null
+++ b/man/hist.im.Rd
@@ -0,0 +1,78 @@
+\name{hist.im}
+\alias{hist.im}
+\title{Histogram of Pixel Values in an Image}
+\description{
+  Computes and displays a histogram of the pixel values in a pixel image.
+  The \code{hist} method for class \code{"im"}.
+}
+\usage{
+  \method{hist}{im}(x, \dots, probability=FALSE, xname)
+}
+\arguments{
+  \item{x}{A pixel image (object of class \code{"im"}).}
+  \item{\dots}{Arguments passed to \code{\link{hist.default}}
+    or \code{\link{barplot}}.}
+  \item{probability}{Logical. If \code{TRUE}, the histogram will be
+    normalised to give probabilities or probability densities.
+  }
+  \item{xname}{Optional. Character string to be used as the
+  name of the dataset \code{x}.
+  }
+}
+\details{
+  This function computes and (by default) displays a histogram
+  of the pixel values in the image \code{x}.
+
+  An object of class \code{"im"}
+  describes a pixel image. See \code{\link{im.object}})
+  for details of this class.
+
+  The function \code{hist.im} is a method for the generic
+  function \code{\link{hist}} for the class \code{"im"}. 
+  
+  Any arguments in \code{...} are passed to \code{\link{hist.default}}
+  (for numeric valued images) or \code{\link{barplot}} (for factor or
+  logical images).
+  For example, such arguments control the axes, and may be used to
+  suppress the plotting.
+}
+\value{
+  For numeric-valued images, an object of class \code{"histogram"} as returned
+  by \code{\link[graphics:hist]{hist.default}}. This object can be
+  plotted.
+  
+  For factor-valued or logical images, an object of class
+  \code{"barplotdata"}, which can be plotted.
+  This is a list with components
+  called \code{counts} (contingency table of counts of the numbers of
+  pixels taking each possible value), \code{probs} (corresponding relative
+  frequencies) and \code{mids} (graphical \eqn{x}-coordinates of the
+  midpoints of the bars in the barplot). 
+}
+\seealso{
+  \code{\link{spatialcdf}} for the cumulative distribution function
+  of an image.
+
+  \code{\link{hist}},
+  \code{\link{hist.default}},
+  \code{\link{barplot}}.
+
+  For other statistical graphics such as Q-Q plots,
+  use \code{X[]} to extract the pixel values of image \code{X},
+  and apply the usual statistical graphics commands.
+  
+  For information about pixel images see
+  \code{\link{im.object}},
+  \code{\link{summary.im}}.
+}
+\examples{
+  X <- as.im(function(x,y) {x^2}, unit.square())
+  hist(X)
+  hist(cut(X,3))
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/hopskel.Rd b/man/hopskel.Rd
new file mode 100644
index 0000000..96614ab
--- /dev/null
+++ b/man/hopskel.Rd
@@ -0,0 +1,107 @@
+\name{hopskel}
+\alias{hopskel}
+\alias{hopskel.test}
+\title{Hopkins-Skellam Test}
+\description{
+  Perform the Hopkins-Skellam test of Complete Spatial Randomness,
+  or simply calculate the test statistic.
+}
+\usage{
+hopskel(X)
+
+hopskel.test(X, \dots,
+             alternative=c("two.sided", "less", "greater",
+                           "clustered", "regular"),
+             method=c("asymptotic", "MonteCarlo"),
+             nsim=999)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{alternative}{
+    String indicating the type of alternative for the
+    hypothesis test. Partially matched.
+  }
+  \item{method}{
+    Method of performing the test. Partially matched.
+  }
+  \item{nsim}{
+    Number of Monte Carlo simulations to perform, if a Monte Carlo
+    p-value is required.
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  Hopkins and Skellam (1954) proposed a test of Complete Spatial
+  Randomness based on comparing nearest-neighbour distances with
+  point-event distances.
+
+  If the point pattern \code{X} contains \code{n}
+  points, we first compute the nearest-neighbour distances
+  \eqn{P_1, \ldots, P_n}{P[1], ..., P[n]} 
+  so that \eqn{P_i}{P[i]} is the distance from the \eqn{i}th data
+  point to the nearest other data point. Then we 
+  generate another completely random pattern \code{U} with
+  the same number \code{n} of points, and compute for each point of \code{U}
+  the distance to the nearest point of \code{X}, giving
+  distances \eqn{I_1, \ldots, I_n}{I[1], ..., I[n]}.
+  The test statistic is 
+  \deqn{
+    A = \frac{\sum_i P_i^2}{\sum_i I_i^2}
+  }{
+    A = (sum[i] P[i]^2) / (sum[i] I[i]^2)
+  }
+  The null distribution of \eqn{A} is roughly
+  an \eqn{F} distribution with shape parameters \eqn{(2n,2n)}.
+  (This is equivalent to using the test statistic \eqn{H=A/(1+A)}
+  and referring \eqn{H} to the Beta distribution with parameters
+  \eqn{(n,n)}).
+
+  The function \code{hopskel} calculates the Hopkins-Skellam test statistic
+  \eqn{A}, and returns its numeric value. This can be used as a simple
+  summary of spatial pattern: the value \eqn{H=1} is consistent
+  with Complete Spatial Randomness, while values \eqn{H < 1} are
+  consistent with spatial clustering, and values \eqn{H > 1} are consistent
+  with spatial regularity.
+
+  The function \code{hopskel.test} performs the test.
+  If \code{method="asymptotic"} (the default), the test statistic \eqn{H}
+  is referred to the \eqn{F} distribution. If \code{method="MonteCarlo"},
+  a Monte Carlo test is performed using \code{nsim} simulated point
+  patterns.
+}
+\value{
+  The value of \code{hopskel} is a single number.
+
+  The value of \code{hopskel.test} is an object of class \code{"htest"}
+  representing the outcome of the test. It can be printed. 
+}
+\references{
+  Hopkins, B. and Skellam, J.G. (1954) 
+  A new method of determining the type of distribution
+  of plant individuals. \emph{Annals of Botany} \bold{18}, 
+  213--227.
+}
+\seealso{
+  \code{\link{clarkevans}},
+  \code{\link{clarkevans.test}},
+  \code{\link{nndist}},
+  \code{\link{nncross}}
+}
+\examples{
+  hopskel(redwood)
+  hopskel(redwood)
+  hopskel.test(redwood, alternative="clustered")
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+\keyword{htest}
diff --git a/man/humberside.Rd b/man/humberside.Rd
new file mode 100644
index 0000000..6f17979
--- /dev/null
+++ b/man/humberside.Rd
@@ -0,0 +1,86 @@
+\name{humberside}
+\alias{humberside}
+\alias{humberside.convex}
+\docType{data}
+\title{Humberside Data on Childhood Leukaemia and Lymphoma}
+\description{
+  Spatial locations of cases of childhood leukaemia
+  and lymphoma, and randomly-selected controls,
+  in North Humberside.
+  A marked point pattern.
+} 
+\format{
+  The dataset \code{humberside} is
+  an object of class \code{"ppp"}
+  representing a marked point pattern.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of home address \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of home address \cr
+    \code{marks} \tab factor with levels \code{case} and \code{control} \cr
+    \tab indicating whether this is a disease case\cr
+    \tab or a control.
+  }
+  See \code{\link{ppp.object}} for details of the format.
+
+  The dataset \code{humberside.convex} is an object of the
+  same format, representing the same point pattern data,
+  but contained in a larger, 5-sided convex polygon.
+}
+\usage{data(humberside)}
+\examples{
+   humberside
+   summary(humberside)
+   plot(humberside)
+   plot(Window(humberside.convex), add=TRUE, lty=2)
+}
+\source{
+  Dr Ray Cartwright and Dr Freda Alexander.
+  Published and analysed in Cuzick and Edwards (1990), see Table 1.
+  Pentagonal boundary from Diggle and Chetwynd (1991), Figure 1.
+  Point coordinates and pentagonal boundary supplied by Andrew Lawson.
+  Detailed region boundary was digitised by \adrian, 2005, from
+  a reprint of Cuzick and Edwards (1990).
+}
+\section{Notes}{
+  Cuzick and Edwards (1990) first presented and analysed these data.
+
+  The data record 62 cases of childhood leukaemia and lymphoma
+  diagnosed in the North Humberside region of England between 1974 and
+  1986, together with 141 controls selected at random from the birth
+  register for the same period.
+
+  The data are represented as a marked point pattern,
+  with the points giving the spatial location of each individual's home address
+  (actually, the centroid for the postal code)
+  and the marks identifying cases and controls.
+
+  Coordinates are expressed in units of 100 metres, and the resolution is
+  100 metres. At this resolution, there are some duplicated points.
+  To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+  
+  Two versions of the dataset are supplied, both containing the
+  same point coordinates, but using different windows.
+  The dataset \code{humberside} has a polygonal window with 102 edges
+  which closely approximates the Humberside region,
+  while \code{humberside.convex} has a convex 5-sided polygonal window 
+  originally used by Diggle and Chetwynd (1991) and shown in
+  Figure 1 of that paper. (This pentagon has been modified slightly
+  from the original data, by shifting two vertices horizontally by 1 unit,
+  so that the pentagon contains all the data points.)
+}
+\references{
+  J. Cuzick and R. Edwards (1990)
+  Spatial clustering for inhomogeneous populations.
+  \emph{Journal of the Royal Statistical Society, series B},
+  \bold{52} (1990) 73-104.
+
+  P.J. Diggle and A.G. Chetwynd (1991)
+  Second-order analysis of spatial clustering for
+  inhomogeneous populations. \emph{Biometrics} 47 (1991) 1155-1163.
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/hybrid.family.Rd b/man/hybrid.family.Rd
new file mode 100644
index 0000000..a444c23
--- /dev/null
+++ b/man/hybrid.family.Rd
@@ -0,0 +1,40 @@
+\name{hybrid.family}
+\alias{hybrid.family}
+\title{
+  Hybrid Interaction Family
+}
+\description{
+  An object describing the family of all hybrid interactions.
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes 
+  the family of all hybrid point process models.
+ 
+  If you need to create a specific hybrid interaction model for use in 
+  modelling, use the function \code{\link{Hybrid}}.
+ 
+  Anyway, \code{hybrid.family} is an object of class \code{"isf"}
+  containing a function \code{hybrid.family$eval} for
+  evaluating the sufficient statistics of any hybrid interaction
+  point process model.
+}
+\seealso{
+  Use \code{\link{Hybrid}} to make hybrid interactions.
+  
+  Other families: 
+  \code{\link{pairwise.family}},
+  \code{\link{pairsat.family}},
+  \code{\link{ord.family}},
+  \code{\link{inforder.family}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/hyperframe.Rd b/man/hyperframe.Rd
new file mode 100644
index 0000000..f7bb25c
--- /dev/null
+++ b/man/hyperframe.Rd
@@ -0,0 +1,108 @@
+\name{hyperframe}
+\alias{hyperframe}
+\title{Hyper Data Frame}
+\description{
+  Create a hyperframe: a two-dimensional array in which each column
+  consists of values of the same atomic type (like the columns
+  of a data frame) or objects of the same class. 
+}
+\usage{
+   hyperframe(...,
+             row.names=NULL, check.rows=FALSE, check.names=TRUE,
+             stringsAsFactors=default.stringsAsFactors())
+}
+\arguments{
+  \item{\dots}{
+    Arguments of the form \code{value} or \code{tag=value}.
+    Each \code{value} is either an atomic vector, or a list of objects of the
+    same class, or a single atomic value, or a single object.
+    Each \code{value} will become a column of the array.
+    The \code{tag} determines the name of the column. See Details.
+  }
+  \item{row.names,check.rows,check.names,stringsAsFactors}{
+    Arguments passed to \code{\link{data.frame}} controlling the
+    names of the rows, whether to check that rows are consistent,
+    whether to check validity of the column names, and whether to
+    convert character columns to factors.
+  }
+}
+\details{
+  A hyperframe is like a data frame, except that its entries
+  can be objects of any kind.
+
+  A hyperframe is a two-dimensional array in which each column consists of
+  values of one atomic type (as in a data frame) or consists of
+  objects of one class.
+
+  The arguments \code{\dots} are any number of arguments of
+  the form \code{value} or \code{tag=value}. Each \code{value} will
+  become a column of the array. The \code{tag} determines the name
+  of the column.
+
+  Each \code{value} can be either
+  \itemize{
+    \item an atomic vector or factor
+    (i.e. numeric vector, integer vector, character vector, logical
+    vector, complex vector or factor)
+    \item a list of objects which are all of the same class
+    \item one atomic value, which will be replicated to make an atomic
+    vector or factor
+    \item one object, which will be replicated to make a list of objects.
+  }
+
+  All columns (vectors, factors and lists) must be of the same length,
+  if their length is greater than 1. 
+}
+\section{Methods for Hyperframes}{
+  There are methods for
+  \code{print}, \code{plot}, \code{summary}, \code{with}, \code{split},
+  \code{[}, \code{[<},\code{$}, \code{$<-},
+  \code{names}, \code{as.data.frame} \code{as.list},
+  \code{cbind} and \code{rbind} for the class of hyperframes. There is also
+  \code{is.hyperframe} and \code{\link{as.hyperframe}}.
+}
+\value{
+  An object of class \code{"hyperframe"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{as.hyperframe}},
+  \code{\link{as.hyperframe.ppx}},
+  \code{\link{plot.hyperframe}},
+  \code{\link{[.hyperframe}},
+  \code{\link{with.hyperframe}},
+  \code{\link{split.hyperframe}},
+  \code{\link{as.data.frame.hyperframe}},
+  \code{\link{cbind.hyperframe}},
+  \code{\link{rbind.hyperframe}}
+}
+\examples{
+ # equivalent to a data frame
+  hyperframe(X=1:10, Y=3)
+
+ # list of functions
+  hyperframe(f=list(sin, cos, tan))
+
+ # table of functions and matching expressions
+  hyperframe(f=list(sin, cos, tan),
+             e=list(expression(sin(x)), expression(cos(x)), expression(tan(x))))
+
+  hyperframe(X=1:10, Y=letters[1:10], Z=factor(letters[1:10]),
+    stringsAsFactors=FALSE)
+
+  lambda <- runif(4, min=50, max=100)
+  X <- lapply(as.list(lambda), function(x) { rpoispp(x) })
+  h <- hyperframe(lambda=lambda, X=X)
+  h
+
+  h$lambda2 <- lambda^2
+  h[, "lambda3"] <- lambda^3
+  h[, "Y"] <- X
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/hyytiala.Rd b/man/hyytiala.Rd
new file mode 100644
index 0000000..6ba1c11
--- /dev/null
+++ b/man/hyytiala.Rd
@@ -0,0 +1,41 @@
+\name{hyytiala}
+\alias{hyytiala}
+\docType{data}
+\title{
+  Scots pines and other trees at Hyytiala
+}
+\description{
+  This dataset is a spatial point pattern of trees recorded at
+  \ifelse{latex}{\out{Hyyti\"{a}l\"{a}}}{Hyytiala}, Finland. 
+  The majority of the trees are Scots pines.
+  See Kokkila et al (2002).
+
+  The dataset \code{hyytiala} is a point pattern
+  (object of class \code{"ppp"}) containing the spatial coordinates
+  of each tree, marked by species (a factor with levels \code{aspen},
+  \code{birch}, \code{pine} and \code{rowan}).
+  The survey region is a 20 by 20 metre square.
+  Coordinates are given in metres.
+}
+\usage{data(hyytiala)}
+\examples{
+data(hyytiala)
+plot(hyytiala, cols=2:5)
+}
+\source{
+  Nicolas Picard
+}
+\references{
+  Kokkila, T., \ifelse{latex}{\out{M{\"a}kel{\"a}}}{Makela}, A. and
+  Nikinmaa E. (2002)
+  A method for generating stand structures using Gibbs marked point
+  process. 
+  \emph{Silva Fennica} \bold{36} 265--277.
+
+  Picard, N, Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009)
+  The multi-scale marked area-interaction point process: a model for
+  the spatial pattern of trees.
+  \emph{Scandinavian Journal of Statistics} \bold{36} 23--41
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/identify.ppp.Rd b/man/identify.ppp.Rd
new file mode 100644
index 0000000..ebe71ab
--- /dev/null
+++ b/man/identify.ppp.Rd
@@ -0,0 +1,62 @@
+\name{identify.ppp}
+\alias{identify.ppp}
+\alias{identify.lpp}
+\title{Identify Points in a Point Pattern}
+\description{
+  If a point pattern is plotted in the graphics window,
+  this function will find the point of the pattern which is nearest to
+  the mouse position, and print its mark value (or its serial number
+  if there is no mark).
+}
+\usage{
+  \method{identify}{ppp}(x, \dots)
+
+  \method{identify}{lpp}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A point pattern (object of class \code{"ppp"} or \code{"lpp"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[graphics]{identify.default}}.
+  }
+}
+\value{
+  If \code{x} is unmarked, the result is 
+  a vector containing the serial numbers of the points in the pattern
+  \code{x} that were identified.
+  If \code{x} is marked, the result is a 
+  2-column matrix, the first column containing the serial numbers
+  and the second containing the marks for these points.
+}
+\details{
+  This is a method for the generic function \code{\link[graphics]{identify}}
+  for point pattern objects.
+
+  The point pattern \code{x} should first be plotted
+  using \code{\link{plot.ppp}} or \code{\link{plot.lpp}}
+  as appropriate. Then \code{identify(x)}
+  reads the position of the graphics pointer each time the
+  left mouse button is pressed.  It then finds 
+  the point of the pattern \code{x} closest to the mouse position.
+  If this closest point is sufficiently close to the mouse pointer,
+  its index (and its mark if any) 
+  will be returned as part of the value of the call.
+
+  Each time a point of the pattern is identified,
+  text will be displayed next to the point,
+  showing its serial number (if \code{x} is unmarked)
+  or its mark value (if \code{x} is marked).
+}
+\seealso{
+  \code{\link[graphics]{identify}},
+  \code{\link{clickppp}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/identify.psp.Rd b/man/identify.psp.Rd
new file mode 100644
index 0000000..3c69187
--- /dev/null
+++ b/man/identify.psp.Rd
@@ -0,0 +1,61 @@
+\name{identify.psp}
+\alias{identify.psp}
+\title{Identify Segments in a Line Segment Pattern}
+\description{
+  If a line segment pattern is plotted in the graphics window,
+  this function will find the segment which is nearest to
+  the mouse position, and print its serial number.
+}
+\usage{
+  \method{identify}{psp}(x, \dots, labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{labels}{
+    Labels associated with the segments, to be plotted when the
+    segments are identified. A character vector or numeric vector
+    of length equal to the number of segments in \code{x}.
+  }
+  \item{n}{
+    Maximum number of segments to be identified.
+  }
+  \item{plot}{
+    Logical. Whether to plot the labels when a segment is identified.
+  }
+}
+\value{
+  Vector containing the serial numbers of the segments in the pattern
+  \code{x} that were identified.
+}
+\details{
+  This is a method for the generic function \code{\link[graphics]{identify}}
+  for line segment pattern objects.
+
+  The line segment pattern \code{x} should first be plotted
+  using \code{\link{plot.psp}}. Then \code{identify(x)}
+  reads the position of the graphics pointer each time the
+  left mouse button is pressed.  It then finds 
+  the segment in the pattern \code{x} that is closest to the mouse position.
+  This segment's index will be returned as part of the value of the call.
+
+  Each time a segment is identified,
+  text will be displayed next to the point,
+  showing its serial number (or the relevant entry of \code{labels}).
+}
+\seealso{
+  \code{\link[graphics]{identify}},
+  \code{\link{identify.ppp}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{iplot}
diff --git a/man/idw.Rd b/man/idw.Rd
new file mode 100644
index 0000000..d21a854
--- /dev/null
+++ b/man/idw.Rd
@@ -0,0 +1,115 @@
+\name{idw}
+\alias{idw}
+\title{Inverse-distance weighted smoothing of observations at irregular points}
+\description{
+  Performs spatial smoothing of numeric values observed
+  at a set of irregular locations using inverse-distance weighting.
+}
+\usage{
+idw(X, power=2, at="pixels", ...)
+}
+\arguments{
+  \item{X}{A marked point pattern (object of class \code{"ppp"}).}
+  \item{power}{Numeric. Power of distance used in the weighting.}
+  \item{at}{
+    String specifying whether to compute the intensity values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{X} (\code{at="points"}).
+  }
+  \item{\dots}{Arguments passed to \code{\link{as.mask}}
+    to control the pixel resolution of the result.}
+}
+\details{
+  This function performs spatial smoothing of numeric values
+  observed at a set of irregular locations.
+  
+  Smoothing is performed by inverse distance weighting. If the
+  observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]}
+  at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively,
+  then the smoothed value at a location \eqn{u} is
+  \deqn{
+    g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i}
+  }{
+    g(u) = (sum of w[i] * v[i])/(sum of w[i])
+  }
+  where the weights are the inverse \eqn{p}-th powers of distance,
+  \deqn{
+    w_i = \frac 1 {d(u,x_i)^p}
+  }{
+    w[i] = 1/d(u,x[i])^p
+  }
+  where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])}
+  is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}.
+  
+  The argument \code{X} must be a marked point pattern (object
+  of class \code{"ppp"}, see \code{\link{ppp.object}}).
+  The points of the pattern are taken to be the
+  observation locations \eqn{x_i}{x[i]}, and the marks of the pattern
+  are taken to be the numeric values \eqn{v_i}{v[i]} observed at these
+  locations.
+
+  The marks are allowed to be a data frame.
+  Then the smoothing procedure is applied to each
+  column of marks. 
+  
+  If \code{at="pixels"} (the default), the smoothed mark value
+  is calculated at a grid of pixels, and the result is a pixel image.
+  The arguments \code{\dots} control the pixel resolution.
+  See \code{\link{as.mask}}.
+
+  If \code{at="points"}, the smoothed mark values are calculated
+  at the data points only, using a leave-one-out rule (the mark value
+  at a data point is excluded when calculating the smoothed value
+  for that point). 
+
+  An alternative to  inverse-distance weighting is kernel smoothing,
+  which is performed by \code{\link{Smooth.ppp}}.
+}
+\value{
+  \emph{If \code{X} has a single column of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is
+    a pixel image (object of class \code{"im"}). 
+    Pixel values are values of the interpolated function.
+    \item
+    If \code{at="points"}, the result is a numeric vector
+    of length equal to the number of points in \code{X}.
+    Entries are values of the interpolated function at the points of \code{X}.
+  }
+  \emph{If \code{X} has a data frame of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is a named list of 
+    pixel images (object of class \code{"im"}). There is one
+    image for each column of marks. This list also belongs to
+    the class \code{"solist"}, for which there is a plot method.
+    \item
+    If \code{at="points"}, the result is a data frame
+    with one row for each point of \code{X},
+    and one column for each column of marks. 
+    Entries are values of the interpolated function at the points of \code{X}.
+  }
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{ppp.object}},
+  \code{\link{im.object}}.
+
+  See \code{\link{Smooth.ppp}} for kernel smoothing
+  and \code{\link{nnmark}} for nearest-neighbour interpolation.
+  
+  To perform other kinds of interpolation, see also the \code{akima} package.
+}
+\examples{
+   # data frame of marks: trees marked by diameter and height
+   data(finpines)
+   plot(idw(finpines))
+   idw(finpines, at="points")[1:5,]
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/im.Rd b/man/im.Rd
new file mode 100644
index 0000000..6fd1b29
--- /dev/null
+++ b/man/im.Rd
@@ -0,0 +1,146 @@
+\name{im}
+\alias{im}
+\title{Create a Pixel Image Object}
+\description{
+  Creates an object of
+  class \code{"im"} representing a two-dimensional pixel image.
+}
+\usage{
+  im(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)),
+   xrange=NULL, yrange=NULL,
+   unitname=NULL)
+}
+\arguments{
+  \item{mat}{
+    matrix or vector containing the pixel values of the image.
+  }
+  \item{xcol}{
+    vector of \eqn{x} coordinates for the pixel grid
+  }
+  \item{yrow}{
+    vector of \eqn{y} coordinates for the pixel grid
+  }
+  \item{xrange,yrange}{
+    Optional. Vectors of length 2 giving the \eqn{x} and \eqn{y}
+    limits of the enclosing rectangle.
+    (Ignored if \code{xcol}, \code{yrow} are present.)
+  }
+  \item{unitname}{
+    Optional. Name of unit of length. Either a single character string,
+    or a vector of two character strings giving the
+    singular and plural forms, respectively.
+  }
+}
+\details{
+  This function creates an object of class \code{"im"} representing
+  a \sQuote{pixel image} or two-dimensional array of values.
+
+  The pixel grid is rectangular and occupies a rectangular window
+  in the spatial coordinate system. 
+  The pixel values are \emph{scalars}: they can be real numbers, integers, 
+  complex numbers, single characters or strings, 
+  logical values, or categorical values. A pixel's
+  value can also be \code{NA}, meaning that no value is defined
+  at that location, and effectively that pixel is \sQuote{outside} the window. 
+  Although the pixel values must be scalar,
+  photographic colour images (i.e., with red, green, and blue brightness
+  channels) can be represented as character-valued images in \pkg{spatstat},
+  using \R's standard encoding of colours as character strings.
+
+  The matrix \code{mat} contains the \sQuote{greyscale} values
+  for a rectangular grid of pixels.
+  Note carefully that the entry \code{mat[i,j]}
+  gives the pixel value at the location \code{(xcol[j],yrow[i])}.
+  That is, the \bold{row} index of the matrix \code{mat} corresponds
+  to increasing \bold{y} coordinate, while the column index of \code{mat}
+  corresponds to increasing \bold{x} coordinate.
+  Thus \code{yrow} has one entry for each row of \code{mat}
+  and \code{xcol} has one entry for each column of \code{mat}.
+  Under the usual convention in \R, a correct
+  display of the image would be obtained by transposing the matrix, e.g.
+  \code{image.default(xcol, yrow, t(mat))}, if you wanted to do it by hand.
+
+  The entries of \code{mat} may be numeric (real or integer), complex, 
+  logical, character, or factor values.
+  If \code{mat} is not a matrix, it will be converted into
+  a matrix with \code{nrow(mat) = length(yrow)} and
+  \code{ncol(mat) = length(xcol)}.
+  
+  To make a factor-valued image, note that 
+  \R has a quirky way of handling matrices with
+  factor-valued entries. The command \code{\link{matrix}} cannot be used
+  directly, because it destroys factor information.
+  To make a factor-valued image, do one of the following:
+  \itemize{
+    \item
+    Create a \code{factor} containing the pixel values,
+    say \code{mat <- factor(.....)}, 
+    and then assign matrix dimensions to it by \code{dim(mat) <- c(nr, nc)}
+    where \code{nr, nc} are the numbers of rows and columns. The
+    resulting object \code{mat} is both a factor and a vector.
+    \item
+    Supply \code{mat} as a one-dimensional factor
+    and specify the arguments \code{xcol} and \code{yrow}
+    to determine the dimensions of the image.
+    \item
+    Use the functions
+    \code{\link{cut.im}} or \code{\link{eval.im}} to make factor-valued
+    images from other images).
+  }
+    
+  For a description of the methods available for pixel image objects,
+  see \code{\link{im.object}}.
+
+  To convert other kinds of data to a pixel image (for example,
+  functions or windows), use \code{\link{as.im}}.
+}
+\seealso{
+  \code{\link{im.object}} for details of the class.
+  
+  \code{\link{as.im}} for converting other kinds of data to an image.
+  
+  \code{\link{as.matrix.im}},
+  \code{\link{[.im}},
+  \code{\link{eval.im}} for manipulating images.
+}
+\section{Warnings}{
+  The internal representation of images is likely to change in future
+  releases of \pkg{spatstat}. The safe way to extract pixel values
+  from an image object is to use \code{\link{as.matrix.im}}
+  or \code{\link{[.im}}.
+}
+\examples{
+   vec <- rnorm(1200)
+   mat <- matrix(vec, nrow=30, ncol=40)
+   whitenoise <- im(mat)
+   whitenoise <- im(mat, xrange=c(0,1), yrange=c(0,1))
+   whitenoise <- im(mat, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30))
+   whitenoise <- im(vec, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30))
+   plot(whitenoise)
+
+   # Factor-valued images:
+   f <- factor(letters[1:12])
+   dim(f) <- c(3,4)
+   Z <- im(f)
+
+   # Factor image from other image:
+   cutwhite <- cut(whitenoise, 3)
+   plot(cutwhite)
+
+   # Factor image from raw data
+   cutmat <- cut(mat, 3)
+   dim(cutmat) <- c(30,40)
+   cutwhite <- im(cutmat)
+   plot(cutwhite)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{datagen}
+ 
+ 
diff --git a/man/im.apply.Rd b/man/im.apply.Rd
new file mode 100644
index 0000000..5928337
--- /dev/null
+++ b/man/im.apply.Rd
@@ -0,0 +1,55 @@
+\name{im.apply}
+\alias{im.apply}
+\title{
+  Apply Function Pixelwise to List of Images 
+}
+\description{
+  Returns a pixel image obtained by applying a function
+  to the values of corresponding pixels in several pixel images.
+}
+\usage{
+im.apply(X, FUN, ...)
+}
+\arguments{
+  \item{X}{
+    A list of pixel images (objects of class \code{"im"}).
+  }
+  \item{FUN}{
+    A function that can be applied to vectors,
+    or a character string giving the name of such a function.
+  }
+  \item{\dots}{
+    Additional arguments to \code{FUN}.
+  }
+}
+\details{
+  The argument \code{X} should be a list of pixel images
+  (objects of class \code{"im"}). If the images do not have
+  identical pixel grids, they will be converted to a common
+  grid using \code{\link{harmonise.im}}.
+
+  At each pixel location, the values of the images in \code{X}
+  at that pixel will be extracted as a vector. The function
+  \code{FUN} will be applied to this vector. The result (which should be
+  a single value) becomes the pixel value of the resulting image.
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{eval.im}} for algebraic operations with images.
+}
+\examples{
+  DA <- density(split(amacrine))
+  DA
+  im.apply(DA, max)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/im.object.Rd b/man/im.object.Rd
new file mode 100644
index 0000000..f77b964
--- /dev/null
+++ b/man/im.object.Rd
@@ -0,0 +1,97 @@
+\name{im.object}
+\alias{im.object} %DoNotExport
+\title{Class of Images}
+\description{
+  A class \code{"im"} to represent a two-dimensional pixel image.
+}
+\details{
+  An object of this class represents
+  a two-dimensional pixel image. It specifies
+  \itemize{
+    \item the dimensions of the rectangular array of pixels
+    \item \eqn{x} and \eqn{y} coordinates for the pixels
+    \item a numeric value (``grey value'') at each pixel
+  }
+  If \code{X} is an object of type \code{im},
+  it contains the following elements:
+  \tabular{ll}{
+    \code{v} \tab matrix of values \cr
+    \code{dim} \tab dimensions of matrix \code{v} \cr
+    \code{xrange} \tab range of \eqn{x} coordinates of image window \cr
+    \code{yrange} \tab range of \eqn{y} coordinates of image window \cr
+    \code{xstep} \tab width of one pixel \cr
+    \code{ystep} \tab height of one pixel \cr
+    \code{xcol} \tab vector of \eqn{x} coordinates of centres of pixels \cr
+    \code{yrow} \tab vector of \eqn{y} coordinates of centres of pixels 
+  }
+  Users are strongly advised not to manipulate these entries
+  directly.
+
+  Objects of class \code{"im"}
+  may be created by the functions
+  \code{\link{im}} and \code{\link{as.im}}.
+  Image objects are also returned by various functions including
+  \code{\link{distmap}}, \code{\link{Kmeasure}}, \code{\link{setcov}}, 
+  \code{\link{eval.im}} and \code{\link{cut.im}}.
+
+  Image objects may be displayed using the methods
+  \code{\link{plot.im}}, \code{image.im}, \code{\link{persp.im}}
+  and \code{contour.im}. There are also methods 
+  \code{\link{print.im}} for printing information about an image,
+  \code{\link{summary.im}} for summarising an image,
+  \code{\link{mean.im}} for calculating the average pixel value, 
+  \code{\link{hist.im}} for plotting a histogram of pixel values,
+  \code{\link{quantile.im}} for calculating quantiles of pixel values,
+  and \code{\link{cut.im}} for dividing the range of pixel values into
+  categories.
+
+  Pixel values in an image may be extracted
+  using the subset operator \code{\link{[.im}}.
+  To extract all pixel values from an image object,
+  use \code{\link{as.matrix.im}}.
+  The levels of a factor-valued image can be extracted and
+  changed with \code{levels} and \code{levels<-}.
+
+  Calculations involving one or more images (for example,
+  squaring all the pixel values in an image, converting numbers to 
+  factor levels, or 
+  subtracting one image from another) can often be done
+  easily using \code{\link{eval.im}}.
+  To find all pixels satisfying
+  a certain constraint, use \code{\link{solutionset}}.
+  
+  Note carefully that the entry \code{v[i,j]}
+  gives the pixel value at the location \code{(xcol[j],yrow[i]}.
+  That is, the \bold{row} index of the matrix \code{v} corresponds
+  to increasing \bold{y} coordinate, while the column index of \code{mat}
+  corresponds to increasing \bold{x} coordinate.
+  Thus \code{yrow} has one entry for each row of \code{v}
+  and \code{xcol} has one entry for each column of \code{v}.
+  Under the usual convention in \R, a correct
+  display of the image would be obtained by transposing the matrix, e.g.
+  \code{image.default(xcol, yrow, t(v))}, if you wanted to do it by hand.
+}
+\seealso{
+  \code{\link{im}},
+  \code{\link{as.im}},
+  \code{\link{plot.im}},
+  \code{\link{persp.im}},
+  \code{\link{eval.im}},
+  \code{\link{[.im}}
+}
+\section{Warnings}{
+  The internal representation of images is likely to change in future
+  releases of \pkg{spatstat}. Do not address the entries in an
+  image directly. To extract all pixel values from an image object,
+  use \code{\link{as.matrix.im}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/imcov.Rd b/man/imcov.Rd
new file mode 100644
index 0000000..92669ed
--- /dev/null
+++ b/man/imcov.Rd
@@ -0,0 +1,73 @@
+\name{imcov}
+\alias{imcov}
+\title{Spatial Covariance of a Pixel Image}
+\description{
+  Computes the unnormalised spatial covariance function of a pixel image.
+}
+\usage{
+ imcov(X, Y=X)
+}
+\arguments{
+  \item{X}{
+    A pixel image (object of class \code{"im"}.
+  }
+  \item{Y}{
+    Optional. Another pixel image.
+  }
+}
+\value{
+  A pixel image (an object of class \code{"im"}) representing the
+  spatial covariance function of \code{X},
+  or the cross-covariance of \code{X} and \code{Y}.
+}
+\details{
+  The (uncentred, unnormalised)
+  \emph{spatial covariance function} of a pixel image \eqn{X} in the plane
+  is the function \eqn{C(v)} defined for each vector \eqn{v} as
+  \deqn{
+    C(v) = \int X(u)X(u-v)\, {\rm d}u
+  }{
+    C(v) = integral of X(u) * X(u-v) du
+  }
+  where the integral is
+  over all spatial locations \eqn{u}, and where \eqn{X(u)} denotes the
+  pixel value at location \eqn{u}.
+  
+  This command computes a discretised approximation to
+  the spatial covariance function, using the Fast Fourier Transform.
+  The return value is
+  another pixel image (object of class \code{"im"}) whose greyscale values
+  are values of the spatial covariance function.
+
+  If the argument \code{Y} is present, then \code{imcov(X,Y)}
+  computes the set \emph{cross-covariance} function \eqn{C(u)}
+  defined as 
+  \deqn{
+    C(v) = \int X(u)Y(u-v)\, {\rm d}u.
+  }{
+    C(v) = integral of X(u) * Y(u-v) du.
+  }
+
+  Note that \code{imcov(X,Y)} is equivalent to
+  \code{convolve.im(X,Y,reflectY=TRUE)}.
+}
+\seealso{
+  \code{\link{setcov}},
+  \code{\link{convolve.im}},
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{erosion}}
+}
+\examples{
+  X <- as.im(square(1))
+  v <- imcov(X)
+  plot(v)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/improve.kppm.Rd b/man/improve.kppm.Rd
new file mode 100644
index 0000000..5412d07
--- /dev/null
+++ b/man/improve.kppm.Rd
@@ -0,0 +1,136 @@
+\name{improve.kppm}
+\alias{improve.kppm}
+\title{Improve Intensity Estimate of Fitted Cluster Point Process Model}
+\description{
+  Update the fitted intensity of a fitted cluster point process model.
+}
+\usage{
+improve.kppm(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL,
+             eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06,
+             fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE,
+                          save.internals = FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted cluster point process model (object of class \code{"kppm"}).
+  }
+  \item{type}{
+    A character string indicating the method of estimation.
+    Current options are \code{"clik1"}, \code{"wclik1"} and \code{"quasi"}
+    for, respectively, first order composite (Poisson) likelihood,
+    weighted first order composite likelihood and quasi-likelihood.
+  }
+  \item{rmax}{
+    Optional. The dependence range. Not usually specified by the user.
+  }
+  \item{eps.rmax}{
+    Numeric. A small positive number which is used to determine \code{rmax}
+    from the tail behaviour of the pair correlation function. Namely
+    \code{rmax} is the smallest value of \eqn{r}
+    at which \eqn{(g(r)-1)/(g(0)-1)}
+    falls below \code{eps.rmax}. 
+    Ignored if \code{rmax} is provided.
+  }
+  \item{dimyx}{
+    Pixel array dimensions. See Details.
+  }
+  \item{maxIter}{
+    Integer. Maximum number of iterations of iterative weighted least squares
+    (Fisher scoring).
+  }
+  \item{tolerance}{
+    Numeric. Tolerance value specifying when to stop iterative weighted
+    least squares (Fisher scoring).
+  }
+  \item{fast}{
+    Logical value indicating whether tapering should be used to make the 
+    computations faster (requires the package \pkg{Matrix}).
+  }
+  \item{vcov}{
+    Logical value indicating whether to calculate the asymptotic variance
+    covariance/matrix.
+  }
+  \item{fast.vcov}{
+    Logical value indicating whether tapering should be used for the
+    variance/covariance matrix to make the computations faster
+    (requires the package \pkg{Matrix}). Caution:
+    This is expected to underestimate the true asymptotic variances/covariances.
+  }
+  \item{verbose}{
+    A logical indicating whether the details of computations should be printed.
+  }
+  \item{save.internals}{
+    A logical indicating whether internal quantities should be saved in the
+    returned object (mostly for development purposes).
+  }
+}
+\value{
+  A fitted cluster point process model of class \code{"kppm"}.
+}
+\details{
+  This function reestimates the intensity parameters in a fitted \code{"kppm"}
+  object. If \code{type="clik1"} estimates are based on the first order
+  composite (Poisson) likelihood, which ignores dependence between the
+  points. Note that \code{type="clik1"} is mainly included for testing
+  purposes and is not recommended for the typical user;
+  instead the more efficient \code{\link{kppm}}
+  with \code{improve.type="none"} should be used.
+  
+  When \code{type="quasi"} or \code{type="wclik1"} the dependence
+  structure between the points is incorporated in the estimation
+  procedure by using the estimated pair correlation function in the
+  estimating equation.
+
+  In all cases the estimating equation is based on dividing the
+  observation window into small subregions and count the number of points
+  in each subregion. To do this the observation window is first
+  converted into a digital mask by \code{\link{as.mask}} where the
+  resolution is controlled by the argument \code{dimyx}. The
+  computational time grows with the cube of the number of subregions, so fine
+  grids may take very long to compute (or even run out of memory).
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{kppm}},
+  \code{\link{improve.kppm}}
+}
+\references{
+  Waagepetersen, R. (2007) An estimating function approach to inference
+  for inhomogeneous Neyman-Scott processes, \emph{Biometrics},
+  \bold{63}, 252-258.
+  
+  Guan, Y. and Shen, Y. (2010) A weighted estimating equation approach
+  to inference for inhomogeneous spatial point processes, \emph{Biometrika},
+  \bold{97}, 867-880.
+  
+  Guan, Y., Jalilian, A. and Waagepetersen, R. (2015)
+  Quasi-likelihood for spatial point processes. 
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 677--697.
+}
+\examples{
+  # fit a Thomas process using minimum contrast estimation method 
+  # to model interaction between points of the pattern
+  fit0 <- kppm(bei ~ elev + grad, data = bei.extra)
+
+  # fit the log-linear intensity model with quasi-likelihood method
+  fit1 <- improve.kppm(fit0, type="quasi")
+
+  # compare
+  coef(fit0)
+  coef(fit1)
+}
+\author{Abdollah Jalilian
+  \email{jalilian at razi.ac.ir}
+%  \url{http://www.razi.ac.ir/ajalilian/}
+  and Rasmus Waagepetersen
+  \email{rw at math.aau.dk}
+  adapted for \pkg{spatstat} by \adrian
+  
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{fit model}
+
diff --git a/man/incircle.Rd b/man/incircle.Rd
new file mode 100644
index 0000000..3befd4b
--- /dev/null
+++ b/man/incircle.Rd
@@ -0,0 +1,59 @@
+\name{incircle}
+\alias{incircle}
+\alias{inradius}
+\title{Find Largest Circle Inside Window}
+\description{
+  Find the largest circle contained in a given window.
+}
+\usage{
+incircle(W)
+
+inradius(W)
+}
+\arguments{
+  \item{W}{A window (object of class \code{"owin"}).}
+}
+\details{
+  Given a window \code{W} of any type and shape, 
+  the function \code{incircle} determines the largest circle
+  that is contained inside \code{W}, while \code{inradius} computes its
+  radius only.
+
+  For non-rectangular windows, the incircle is computed approximately
+  by finding the maximum of the distance map (see
+  \code{\link{distmap}}) of the complement of the window.
+}
+\value{
+  The result of \code{incircle} is 
+  a list with entries \code{x,y,r} giving the location \code{(x,y)}
+  and radius \code{r} of the incircle.
+
+  The result of \code{inradius} is the numerical value of radius.
+}
+\seealso{
+  \code{\link{centroid.owin}}
+}
+\examples{
+  W <- square(1)
+  Wc <- incircle(W)
+  plot(W)
+  plot(disc(Wc$r, c(Wc$x, Wc$y)), add=TRUE)
+
+  plot(letterR)
+  Rc <- incircle(letterR)
+  plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE)
+
+  W <- as.mask(letterR)
+  plot(W)
+  Rc <- incircle(W)
+  plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/increment.fv.Rd b/man/increment.fv.Rd
new file mode 100644
index 0000000..34d6539
--- /dev/null
+++ b/man/increment.fv.Rd
@@ -0,0 +1,48 @@
+\name{increment.fv}
+\alias{increment.fv}
+\title{
+  Increments of a Function
+}
+\description{
+  Compute the change in the value of a function \code{f}
+  when the function argument increases by \code{delta}.
+}
+\usage{
+increment.fv(f, delta)
+}
+\arguments{
+  \item{f}{
+    Object of class \code{"fv"} representing a function.
+  }
+  \item{delta}{
+    Numeric. The increase in the value of the function argument.
+  }
+}
+\details{
+  This command computes the new function
+  \deqn{g(x) = f(x+h) - f(x-h)}
+  where \code{h = delta/2}. The value of \eqn{g(x)} is
+  the change in the value of \eqn{f} over an interval of length
+  \code{delta} centred at \eqn{x}.
+}
+\value{
+  Another object of class \code{"fv"} compatible with \code{X}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{fv.object}}, 
+  \code{\link{deriv.fv}}
+}
+\examples{
+  plot(increment.fv(Kest(cells), 0.05))
+}
+\keyword{spatial}
+\keyword{math}
+\keyword{nonparametric}
diff --git a/man/infline.Rd b/man/infline.Rd
new file mode 100644
index 0000000..3215b97
--- /dev/null
+++ b/man/infline.Rd
@@ -0,0 +1,100 @@
+\name{infline}
+\alias{infline}
+\alias{plot.infline}
+\alias{print.infline}
+\title{Infinite Straight Lines}
+\description{
+  Define the coordinates of one or more straight lines in the plane
+}
+\usage{
+infline(a = NULL, b = NULL, h = NULL, v = NULL, p = NULL, theta = NULL)
+
+\method{print}{infline}(x, \dots)
+
+\method{plot}{infline}(x, \dots)
+}
+\arguments{
+  \item{a,b}{Numeric vectors of equal length giving the
+    intercepts \eqn{a} and slopes \eqn{b} of the lines.
+    Incompatible with \code{h,v,p,theta}
+  }
+  \item{h}{Numeric vector giving the positions of horizontal lines when
+    they cross the \eqn{y} axis.
+    Incompatible with \code{a,b,v,p,theta}
+  }
+  \item{v}{Numeric vector giving the positions of vertical lines when
+    they cross the \eqn{x} axis.
+    Incompatible with \code{a,b,h,p,theta}
+  }
+  \item{p,theta}{Numeric vectors of equal length
+    giving the polar coordinates of the line.
+    Incompatible with \code{a,b,h,v}
+  }
+  \item{x}{An object of class \code{"infline"}}
+  \item{\dots}{
+    Extra arguments passed to \code{\link[base]{print}}
+    for printing or \code{\link[graphics]{abline}} for plotting
+  }
+}
+\details{
+  The class \code{infline} is a convenient way to handle
+  infinite straight lines in the plane.
+
+  The position of a line can be specified in several ways:
+  \itemize{
+    \item
+    its intercept \eqn{a} and slope \eqn{b} 
+    in the equation \eqn{y = a + b x}{y = a + b * x}
+    can be used unless the line is vertical.
+    \item
+    for vertical lines we can use the
+    position \eqn{v} where the line crosses the \eqn{y} axis
+    \item
+    for horizontal lines we can use the
+    position \eqn{h} where the line crosses the \eqn{x} axis
+    \item
+    the polar coordinates \eqn{p} and \eqn{\theta}{theta}
+    can be used for any line. The line equation is
+    \deqn{
+      y \cos\theta + x \sin\theta = p
+    }{
+      y * cos(theta) + x * sin(theta) = p
+    }
+  }
+  The command \code{infline} will accept line coordinates in any
+  of these formats. The arguments \code{a,b,h,v} have the same interpretation
+  as they do in the line-plotting function
+  \code{\link[graphics]{abline}}.
+
+  The command \code{infline} converts between different coordinate
+  systems (e.g. from \code{a,b} to \code{p,theta}) and returns an
+  object of class \code{"infline"}
+  that contains a representation of the lines in
+  each appropriate coordinate system. This object can be printed
+  and plotted.
+}
+\value{
+  The value of \code{infline} is an object of class \code{"infline"}
+  which is basically a data frame with columns \code{a,b,h,v,p,theta}.
+  Each row of the data frame represents one line. 
+  Entries may be \code{NA} if a coordinate is not applicable to
+  a particular line. 
+}
+\seealso{
+  \code{\link{rotate.infline}},
+  \code{\link{clip.infline}},
+  \code{\link{chop.tess}},
+  \code{\link{whichhalfplane}}
+}
+\examples{
+  infline(a=10:13,b=1)
+  infline(p=1:3, theta=pi/4)
+  plot(c(-1,1),c(-1,1),type="n",xlab="",ylab="", asp=1)
+  plot(infline(p=0.4, theta=seq(0,pi,length=20)))
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/influence.ppm.Rd b/man/influence.ppm.Rd
new file mode 100644
index 0000000..cc132a5
--- /dev/null
+++ b/man/influence.ppm.Rd
@@ -0,0 +1,95 @@
+\name{influence.ppm}
+\alias{influence.ppm}
+\title{
+  Influence Measure for Spatial Point Process Model
+}
+\description{
+  Computes the influence measure for a fitted spatial point process model.
+}
+\usage{
+\method{influence}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{drop}{
+    Logical. Whether to include (\code{drop=FALSE}) or
+    exclude (\code{drop=TRUE}) contributions from quadrature
+    points that were not used to fit the model.
+  }
+  \item{iScore,iHessian}{
+    Components of the score vector and Hessian matrix for
+    the irregular parameters, if required. See Details.
+  }
+  \item{iArgs}{
+    List of extra arguments for the functions \code{iScore},
+    \code{iHessian} if required.
+  }
+}
+\details{
+  Given a fitted spatial point process model \code{model},
+  this function computes the influence measure
+  described in Baddeley, Chang and Song (2013).
+   
+  The function \code{\link[stats]{influence}} is generic,
+  and \code{influence.ppm} is the method for objects of class
+  \code{"ppm"} representing point process models.
+
+  The influence of a point process model is a value attached to each data point
+  (i.e. each point of the point pattern to which the \code{model}
+  was fitted).
+  The influence value \eqn{s(x_i)}{s(x[i])} at a data point
+  \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood
+  that occurs when the point \eqn{x_i}{x[i]} is deleted.
+  A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a 
+  data point with a large influence on the fitted model.
+  
+  If the point process model trend has irregular parameters that were
+  fitted (using \code{\link{ippm}})
+  then the influence calculation requires the first and second
+  derivatives of the log trend with respect to the irregular parameters. 
+  The argument \code{iScore} should be a list,
+  with one entry for each irregular parameter, of \R functions that compute the
+  partial derivatives of the log trend (i.e. log intensity or
+  log conditional intensity) with respect to each irregular
+  parameter. The argument \code{iHessian} should be a list,
+  with \eqn{p^2} entries where \eqn{p} is the number of irregular
+  parameters, of \R functions that compute the second order
+  partial derivatives of the
+  log trend with respect to each pair of irregular parameters.
+  
+  The result of \code{influence.ppm} is 
+  an object of class \code{"influence.ppm"}. It can be plotted
+  (by \code{\link{plot.influence.ppm}}), or converted to a marked
+  point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}).
+}
+\value{
+  An object of class \code{"influence.ppm"} that can be plotted
+  (by \code{\link{plot.influence.ppm}}). There are also methods
+  for \code{print}, \code{[}, \code{as.ppp} and \code{as.owin}.
+}
+\references{
+  Baddeley, A. and Chang, Y.M. and Song, Y. (2013)
+  Leverage and influence diagnostics for spatial point process models.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{leverage.ppm}},
+  \code{\link{dfbetas.ppm}},
+  \code{\link{ppmInfluence}},
+  \code{\link{plot.influence.ppm}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X ~x+y)
+   plot(influence(fit))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/inforder.family.Rd b/man/inforder.family.Rd
new file mode 100644
index 0000000..b8bcbb5
--- /dev/null
+++ b/man/inforder.family.Rd
@@ -0,0 +1,45 @@
+\name{inforder.family}
+\alias{inforder.family}
+\title{Infinite Order Interaction Family}
+\description{
+  An object describing the family of all Gibbs point processes
+  with infinite interaction order.
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes the interaction structure
+  of Gibbs point processes which have infinite order of interaction,
+  such as the area-interaction process \cite{\link{AreaInter}}.
+ 
+  Anyway, \code{inforder.family} is an object of class \code{"isf"}
+  containing a function \code{inforder.family$eval} for
+  evaluating the sufficient statistics of a Gibbs
+  point process model taking an exponential family form. 
+} 
+\seealso{
+  \code{\link{AreaInter}} to create the area interaction process
+  structure.
+  
+  Other families:
+  \code{\link{pairwise.family}},
+  \code{\link{pairsat.family}},
+  \code{\link{ord.family}}.
+
+  
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/insertVertices.Rd b/man/insertVertices.Rd
new file mode 100644
index 0000000..773dafd
--- /dev/null
+++ b/man/insertVertices.Rd
@@ -0,0 +1,82 @@
+\name{insertVertices}
+\alias{insertVertices}
+\title{
+  Insert New Vertices in a Linear Network
+}
+\description{
+  Adds new vertices to a linear network
+  at specified locations along the network.
+}
+\usage{
+insertVertices(L, \dots)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"})
+    or point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{as.lpp}}
+    specifying the positions of the new vertices along the network.
+  }
+}
+\details{
+  This function adds new vertices at locations along an existing
+  linear network.
+  
+  The argument \code{L} can be either a linear network (class
+  \code{"linnet"}) or some other object that includes a linear network.
+ 
+  The new vertex locations can be specified either as a
+  point pattern (class \code{"lpp"} or \code{"ppp"})
+  or using coordinate vectors \code{x,y} or \code{seg,tp}
+  or \code{x,y,seg,tp} as explained in the help for \code{\link{as.lpp}}.
+
+  This function breaks the existing line segments
+  of \code{L} into pieces at the locations specified by
+  the coordinates \code{seg,tp} and creates new vertices at these
+  locations.
+
+  The result is the modified object, with an attribute \code{"id"} such that
+  the \code{i}th added vertex has become the
+  \code{id[i]}th vertex of the new network.
+}
+\value{
+  An object of the same class as \code{L} representing the result of
+  adding the new vertices.
+  The result also has an attribute \code{"id"} as described in Details.
+}
+\author{
+  Adrian Baddeley
+}
+\seealso{
+  \code{\link{as.lpp}}
+}
+\examples{
+   opa <- par(mfrow=c(1,3), mar=rep(0,4))
+   simplenet
+
+   plot(simplenet, main="")
+   plot(vertices(simplenet), add=TRUE)
+
+   # add two new vertices at specified local coordinates
+   L <- insertVertices(simplenet, seg=c(3,7), tp=c(0.2, 0.5))
+   L
+   plot(L, main="")
+   plot(vertices(L), add=TRUE)
+   id <- attr(L, "id")
+   id
+   plot(vertices(L)[id], add=TRUE, pch=16)
+
+   # add new vertices at three randomly-generated points
+   X <- runiflpp(3, simplenet)
+   LL <- insertVertices(simplenet, X)
+   plot(LL, main="")
+   plot(vertices(LL), add=TRUE)
+   ii <- attr(LL, "id")
+   plot(vertices(LL)[ii], add=TRUE, pch=16)
+   par(opa)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/inside.boxx.Rd b/man/inside.boxx.Rd
new file mode 100644
index 0000000..f9872dc
--- /dev/null
+++ b/man/inside.boxx.Rd
@@ -0,0 +1,71 @@
+\name{inside.boxx}
+\alias{inside.boxx}
+\title{Test Whether Points Are Inside A Multidimensional Box}
+\description{
+ Test whether points lie inside or outside
+ a given multidimensional box.
+}
+\usage{
+ inside.boxx(\dots, w)
+}
+\arguments{
+  \item{\dots}{
+    Coordinates of points to be tested.
+    One vector for each dimension (all of same length).
+    (Alternatively, a single point pattern object
+    of class \code{"\link{ppx}"}
+    or its coordinates as a \code{"\link{hyperframe}"})
+  }
+  \item{w}{A window.
+    This should be an object of class \code{\link{boxx}},
+    or can be given in any format acceptable to \code{\link{as.boxx}()}.
+  }
+}
+\value{
+  Logical vector whose \code{i}th entry is 
+  \code{TRUE} if the corresponding point is inside \code{w}. 
+}
+\details{
+  This function tests whether each of the points 
+  \code{(x[i],y[i])} lies inside or outside
+  the window \code{w} and returns \code{TRUE} if it is inside.
+  
+  The boundary of the window is treated as being inside.
+
+  Normally each argument provided (except \code{w}) must be numeric vectors of
+  equal length (length zero is allowed) containing the coordinates
+  of points.
+  Alternatively a single point pattern (object of class \code{"ppx"})
+  can be given; then the coordinates of the point pattern are extracted.
+}
+\seealso{
+  \code{\link{boxx}},
+  \code{\link{as.boxx}}
+}
+\examples{
+  # Random points in box with side [0,2]
+  w <- boxx(c(0,2), c(0,2), c(0,2))
+
+  # Random points in box with side [-1,3]
+  x <- runif(30, min=-1, max=3)
+  y <- runif(30, min=-1, max=3)
+  z <- runif(30, min=-1, max=3)
+
+  # Points falling in smaller box
+  ok <- inside.boxx(x, y, z, w=w)
+
+  # Same using a point pattern as argument:
+  X <- ppx(data = cbind(x, y, z), domain = boxx(c(0,3), c(0,3), c(0,3)))
+  ok2 <- inside.boxx(X, w=w)
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/inside.owin.Rd b/man/inside.owin.Rd
new file mode 100644
index 0000000..e223411
--- /dev/null
+++ b/man/inside.owin.Rd
@@ -0,0 +1,89 @@
+\name{inside.owin}
+\alias{inside.owin}
+\title{Test Whether Points Are Inside A Window}
+\description{
+ Test whether points lie inside or outside
+ a given window.
+}
+\usage{
+ inside.owin(x, y, w)
+}
+\arguments{
+  \item{x}{
+    Vector of \eqn{x} coordinates of points to be tested.
+    (Alternatively, a point pattern object providing both
+    \eqn{x} and \eqn{y} coordinates.)
+  }
+  \item{y}{
+    Vector of \eqn{y} coordinates of points to be tested.
+  }
+  \item{w}{A window.
+    This should be an object of class \code{\link{owin}},
+    or can be given in any format acceptable to \code{\link{as.owin}()}.
+  }
+}
+\value{
+  Logical vector whose \code{i}th entry is 
+  \code{TRUE} if the corresponding point \code{(x[i],y[i])}
+  is inside \code{w}. 
+}
+\details{
+  This function tests whether each of the points 
+  \code{(x[i],y[i])} lies inside or outside
+  the window \code{w} and returns \code{TRUE} if it is inside.
+  
+  The boundary of the window is treated as being inside.
+
+  If \code{w} is of type \code{"rectangle"} or 
+  \code{"polygonal"}, the algorithm uses analytic geometry
+  (the discrete Stokes theorem).
+  Computation time is linear in the number of points
+  and (for polygonal windows) in the number of vertices of the
+  boundary polygon. Boundary cases are correct to single
+  precision accuracy.
+  
+  If \code{w} is of type \code{"mask"} then the 
+  pixel closest to \code{(x[i],y[i])} is tested. The
+  results may be incorrect for points lying within 
+  one pixel diameter of the window boundary.
+
+  Normally \code{x} and \code{y} must be numeric vectors of
+  equal length (length zero is allowed) containing the coordinates
+  of points. Alternatively \code{x}
+  can be a point pattern (object of class \code{"ppp"}) while \code{y}
+  is missing; then the coordinates of the point pattern are extracted.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{as.owin}}
+}
+\examples{
+  # hexagonal window
+  k <- 6
+  theta <- 2 * pi * (0:(k-1))/k
+  co <- cos(theta)
+  si <- sin(theta)
+  mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si))
+  \dontrun{
+  plot(mas)
+  }
+
+  # random points in rectangle
+  x <- runif(30,min=-1, max=1)
+  y <- runif(30,min=-1, max=1)
+
+  ok <- inside.owin(x, y, mas)
+
+  \dontrun{
+  points(x[ok], y[ok])
+  points(x[!ok], y[!ok], pch="x")
+  }  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/integral.im.Rd b/man/integral.im.Rd
new file mode 100644
index 0000000..4f2049e
--- /dev/null
+++ b/man/integral.im.Rd
@@ -0,0 +1,83 @@
+\name{integral.im}
+\alias{integral}
+\alias{integral.im}
+\title{
+  Integral of a Pixel Image
+}
+\description{
+  Computes the integral of a pixel image.
+}
+\usage{
+integral(f, domain=NULL, \dots)
+
+\method{integral}{im}(f, domain=NULL, \dots)
+}
+\arguments{
+  \item{f}{
+    A pixel image (object of class \code{"im"}) with pixel values
+    that can be treated as numeric or complex values.
+  }
+  \item{domain}{
+    Optional. Window specifying the domain of integration.
+    Alternatively a tessellation.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  The function \code{integral} is generic, with methods
+  for \code{"im"}, \code{"msr"}, \code{"linim"} and \code{"linfun"}.
+  
+  The method \code{integral.im} treats the pixel image \code{f} as a function of
+  the spatial coordinates, and computes its integral.
+  The integral is calculated
+  by summing the pixel values and multiplying by the area of one pixel.
+
+  The pixel values of \code{f} may be numeric, integer, logical or
+  complex. They cannot be factor or character values.
+
+  The logical values \code{TRUE} and \code{FALSE} are converted to
+  \code{1} and \code{0} respectively, so that the integral of a logical
+  image is the total area of the \code{TRUE} pixels, in the same units
+  as \code{unitname(x)}.
+
+  If \code{domain} is a window (class \code{"owin"}) then the integration
+  will be restricted to this window. If \code{domain} is a tessellation
+  (class \code{"tess"}) then the integral of \code{f} in each
+  tile of \code{domain} will be computed.
+}
+\value{
+  A single numeric or complex value (or a vector of such values
+  if \code{domain} is a tessellation).
+}
+\seealso{
+  \code{\link{eval.im}},
+  \code{\link{[.im}}
+}
+\examples{
+   # approximate integral of f(x,y) dx dy
+   f <- function(x,y){3*x^2 + 2*y}
+   Z <- as.im(f, square(1))
+   integral.im(Z)
+   # correct answer is 2
+
+   D <- density(cells)
+   integral.im(D)
+   # should be approximately equal to number of points = 42
+
+   # integrate over the subset [0.1,0.9] x [0.2,0.8]
+   W <- owin(c(0.1,0.9), c(0.2,0.8))
+   integral.im(D, W)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/integral.linim.Rd b/man/integral.linim.Rd
new file mode 100644
index 0000000..8d36530
--- /dev/null
+++ b/man/integral.linim.Rd
@@ -0,0 +1,61 @@
+\name{integral.linim}
+\alias{integral.linim}
+\alias{integral.linfun}
+\title{
+  Integral on a Linear Network
+}
+\description{
+  Computes the integral (total value) of a function or pixel image
+  over a linear network.
+}
+\usage{
+\method{integral}{linim}(f, domain=NULL, ...)
+
+\method{integral}{linfun}(f, domain=NULL, ..., delta)
+}
+\arguments{
+  \item{f}{
+    A pixel image on a linear network (class \code{"linim"})
+    or a function on a linear network (class \code{"linfun"}).
+  }
+  \item{domain}{
+    Optional window specifying the domain of integration.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{delta}{
+    Optional. 
+    The step length (in coordinate units)
+    for computing the approximate integral.
+    A single positive number.
+  }
+}
+\details{
+  The integral (total value of the function over the network) is calculated.
+}
+\value{
+  A numeric value.
+}
+\seealso{
+  \code{\link{linim}},
+  \code{\link{integral.im}}
+}
+\examples{
+  # make some data
+  xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet)
+  integral(xcoord)
+  X <- as.linim(xcoord)
+  integral(X)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/integral.msr.Rd b/man/integral.msr.Rd
new file mode 100644
index 0000000..14a64ae
--- /dev/null
+++ b/man/integral.msr.Rd
@@ -0,0 +1,67 @@
+\name{integral.msr}
+\alias{integral.msr}
+\title{
+  Integral of a Measure
+}
+\description{
+  Computes the integral (total value) of a measure over its domain.
+}
+\usage{
+\method{integral}{msr}(f, domain=NULL, \dots)
+}
+\arguments{
+  \item{f}{
+    A signed measure or vector-valued measure (object of class
+    \code{"msr"}).
+  }
+  \item{domain}{
+    Optional window specifying the domain of integration.
+    Alternatively a tessellation.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  The integral (total value of the measure over its domain) is calculated.
+
+  If \code{domain} is a window (class \code{"owin"}) then the integration
+  will be restricted to this window. If \code{domain} is a tessellation
+  (class \code{"tess"}) then the integral of \code{f} in each
+  tile of \code{domain} will be computed.
+
+  For a multitype measure \code{m}, use \code{\link{split.msr}}
+  to separate the contributions for each type of point,
+  as shown in the Examples.
+}
+\value{
+  A numeric value (for a signed measure)
+  or a vector of values (for a vector-valued measure).
+}
+\seealso{
+  \code{\link{msr}},
+  \code{\link{integral}}
+}
+\examples{
+   fit <- ppm(cells ~ x)
+   rr <- residuals(fit)
+   integral(rr)
+
+   # vector-valued measure
+   rs <- residuals(fit, type="score")
+   integral(rs)
+
+   # multitype
+   fitA <- ppm(amacrine ~ x)
+   rrA <- residuals(fitA)
+   sapply(split(rrA), integral)
+
+   # multitype and vector-valued
+   rsA <- residuals(fitA, type="score")
+   sapply(split(rsA), integral)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/intensity.Rd b/man/intensity.Rd
new file mode 100644
index 0000000..66582e2
--- /dev/null
+++ b/man/intensity.Rd
@@ -0,0 +1,51 @@
+\name{intensity}
+\alias{intensity}
+\title{
+  Intensity of a Dataset or a Model
+}
+\description{
+  Generic function for computing the intensity of a spatial dataset
+  or spatial point process model.
+}
+\usage{
+intensity(X, ...)
+}
+\arguments{
+  \item{X}{
+    A spatial dataset or a spatial point process model.
+  }
+  \item{\dots}{
+    Further arguments depending on the class of \code{X}.
+  }
+}
+\details{
+  This is a generic function for computing the intensity of a spatial dataset
+  or spatial point process model. There are methods for point patterns
+  (objects of class \code{"ppp"}) and fitted point process models
+  (objects of class \code{"ppm"}).
+  
+  The empirical intensity of a dataset is the average density
+  (the average amount of \sQuote{stuff} per unit area or volume).
+  The empirical intensity of a point pattern is computed by the
+  method \code{\link{intensity.ppp}}.
+
+  The theoretical intensity of a stochastic model is the expected density
+  (expected  amount of \sQuote{stuff} per unit area or volume).
+  The theoretical intensity of a fitted point process model is computed by the
+  method \code{\link{intensity.ppm}}.
+}
+\value{
+  Usually a numeric value or vector.
+}
+\seealso{
+  \code{\link{intensity.ppp}},
+  \code{\link{intensity.ppm}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/intensity.dppm.Rd b/man/intensity.dppm.Rd
new file mode 100644
index 0000000..3d5fee4
--- /dev/null
+++ b/man/intensity.dppm.Rd
@@ -0,0 +1,35 @@
+\name{intensity.dppm}
+\alias{intensity.dppm}
+\alias{intensity.detpointprocfamily}
+\title{Intensity of Determinantal Point Process Model}
+\description{Extracts the intensity of a determinantal point process model.}
+\usage{
+  \method{intensity}{detpointprocfamily}(X, \dots)
+
+  \method{intensity}{dppm}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A determinantal point process model (object of class
+    \code{"detpointprocfamily"} or \code{"dppm"}).
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  A numeric value (if the model is stationary), a pixel image
+  (if the model is non-stationary) or \code{NA} if the intensity is
+  unknown for the model.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
+
+
diff --git a/man/intensity.lpp.Rd b/man/intensity.lpp.Rd
new file mode 100644
index 0000000..7294551
--- /dev/null
+++ b/man/intensity.lpp.Rd
@@ -0,0 +1,48 @@
+\name{intensity.lpp}  
+\alias{intensity.lpp}
+\title{
+  Empirical Intensity of Point Pattern on Linear Network
+}
+\description{
+  Computes the average number of points per unit length
+  in a point pattern on a linear network.
+}
+\usage{
+\method{intensity}{lpp}(X, ...)
+}
+\arguments{
+  \item{X}{
+    A point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{intensity}}
+  It computes the empirical intensity of a point pattern
+  on a linear network (object of class \code{"lpp"}),
+  i.e. the average density of points per unit length.
+
+  If the point pattern is multitype, the intensities of the
+  different types are computed separately.
+}
+\value{
+  A numeric value (giving the intensity) or numeric vector
+  (giving the intensity for each possible type).
+}
+\seealso{
+  \code{\link{intensity}}, 
+  \code{\link{intensity.ppp}}
+}
+\examples{
+  intensity(chicago)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/intensity.ppm.Rd b/man/intensity.ppm.Rd
new file mode 100644
index 0000000..51e3146
--- /dev/null
+++ b/man/intensity.ppm.Rd
@@ -0,0 +1,95 @@
+\name{intensity.ppm}  
+\alias{intensity.ppm}
+\title{
+  Intensity of Fitted Point Process Model
+}
+\description{
+  Computes the intensity of a fitted point process model.
+}
+\usage{
+\method{intensity}{ppm}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{predict.ppm}} in some cases.
+    See Details.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{intensity}}
+  for fitted point process models (class \code{"ppm"}).
+
+  The intensity of a point process model is the expected
+  number of random points per unit area. 
+
+  If \code{X} is a Poisson point process model, the intensity of the
+  process is computed exactly.
+  The result is a numerical value if \code{X}
+  is a stationary Poisson point process, and a pixel image if \code{X}
+  is non-stationary. (In the latter case, the resolution of the pixel
+  image is controlled by the arguments \code{\dots} which are passed
+  to \code{\link{predict.ppm}}.)
+
+  If \code{X} is another Gibbs point process model, the intensity is
+  computed approximately using the Poisson-saddlepoint approximation
+  (Baddeley and Nair, 2012a, 2012b, 2016; Anderssen et al, 2014). 
+  The approximation is currently available for pairwise-interaction
+  models (Baddeley and Nair, 2012a, 2012b)
+  and for the area-interaction model and Geyer saturation model
+  (Baddeley and Nair, 2016).
+
+  For a non-stationary Gibbs model, the 
+  pseudostationary solution (Baddeley and Nair, 2012b;
+  Anderssen et al, 2014) is used. The result is a pixel image,
+  whose resolution is controlled by the arguments \code{\dots} which are passed
+  to \code{\link{predict.ppm}}.
+}
+\value{
+  A numeric value (if the model is stationary)
+  or a pixel image.
+}
+\references{
+  Anderssen, R.S., Baddeley, A., DeHoog, F.R. and Nair, G.M. (2014)
+  Solution of an integral equation arising in spatial point process theory.
+  \emph{Journal of Integral Equations and Applications} 
+  \bold{26} (4) 437--453.
+
+  Baddeley, A. and Nair, G. (2012a) 
+  Fast approximation of the intensity of Gibbs point processes.
+  \emph{Electronic Journal of Statistics} \bold{6} 1155--1169.
+
+  Baddeley, A. and Nair, G. (2012b)
+  Approximating the moments of a spatial point process.
+  \emph{Stat} \bold{1}, 1, 18--30.
+  doi: 10.1002/sta4.5
+
+  Baddeley, A. and Nair, G. (2016)
+  Poisson-saddlepoint approximation for spatial point processes
+  with infinite order interaction.
+  Submitted for publication.
+}
+\seealso{
+  \code{\link{intensity}},
+  \code{\link{intensity.ppp}}
+}
+\examples{
+  fitP <- ppm(swedishpines ~ 1)
+  intensity(fitP)
+  fitS <- ppm(swedishpines ~ 1, Strauss(9))
+  intensity(fitS)
+  fitSx <- ppm(swedishpines ~ x, Strauss(9))
+  lamSx <- intensity(fitSx)
+  fitG <- ppm(swedishpines ~ 1, Geyer(9, 1))
+  lamG <- intensity(fitG)
+  fitA <- ppm(swedishpines ~ 1, AreaInter(7))
+  lamA <- intensity(fitA)
+}
+\author{
+  \adrian
+  and Gopalan Nair.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/intensity.ppp.Rd b/man/intensity.ppp.Rd
new file mode 100644
index 0000000..25a4564
--- /dev/null
+++ b/man/intensity.ppp.Rd
@@ -0,0 +1,92 @@
+\name{intensity.ppp}  
+\alias{intensity.ppp}
+\alias{intensity.splitppp}
+\title{
+  Empirical Intensity of Point Pattern
+}
+\description{
+  Computes the average number of points per unit area
+  in a point pattern dataset.
+}
+\usage{
+\method{intensity}{ppp}(X, ..., weights=NULL)
+
+\method{intensity}{splitppp}(X, ..., weights=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{weights}{
+    Optional.
+    Numeric vector of weights attached to the points of \code{X}.
+    Alternatively, an \code{expression} which can be evaluated to
+    give a vector of weights.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{intensity}}.
+  It computes the empirical intensity of a point pattern
+  (object of class \code{"ppp"}),
+  i.e. the average density of points per unit area.
+
+  If the point pattern is multitype, the intensities of the
+  different types are computed separately.
+
+  Note that the intensity will be computed as the number of points
+  per square unit, based on the unit of length for \code{X},
+  given by \code{unitname(X)}. If the unit of length is a strange multiple
+  of a standard unit, like \code{5.7 metres}, then it can be converted
+  to the standard unit using \code{\link{rescale}}. See the Examples.
+
+  If \code{weights} are given, then the intensity is
+  computed as the total \emph{weight} per square unit.
+  The argument \code{weights} should be a numeric vector
+  of weights for each point of \code{X} (weights may be negative or
+  zero).
+
+  Alternatively \code{weights} can be an \code{expression}
+  which will be evaluated for the dataset to yield a vector of weights.
+  The expression may involve the Cartesian coordinates \eqn{x,y} of the
+  points, and the marks of the points, if any. Variable names
+  permitted in the expression include \code{x} and \code{y},
+  the name \code{marks} if \code{X} has a single column of marks,
+  the names of any columns of marks if \code{X} has a data frame of
+  marks, and the names of constants or functions that exist
+  in the global environment. See the Examples.
+}
+\value{
+  A numeric value (giving the intensity) or numeric vector
+  (giving the intensity for each possible type).
+}
+\seealso{
+  \code{\link{intensity}}, 
+  \code{\link{intensity.ppm}}
+}
+\examples{
+  japanesepines
+  intensity(japanesepines)
+  unitname(japanesepines)
+  intensity(rescale(japanesepines))
+
+  intensity(amacrine)
+  intensity(split(amacrine))
+
+  # numeric vector of weights
+  volumes <- with(marks(finpines), (pi/4) * height * diameter^2)
+  intensity(finpines, weights=volumes)
+
+  # expression for weights
+  intensity(finpines, weights=expression((pi/4) * height * diameter^2))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/intensity.ppx.Rd b/man/intensity.ppx.Rd
new file mode 100644
index 0000000..a05ca7d
--- /dev/null
+++ b/man/intensity.ppx.Rd
@@ -0,0 +1,46 @@
+\name{intensity.ppx}
+\alias{intensity.ppx}
+\title{Intensity of a Multidimensional Space-Time Point Pattern}
+\description{
+  Calculates the intensity of points in a multi-dimensional point
+  pattern of class \code{"ppx"} or \code{"pp3"}.
+}
+\usage{
+  \method{intensity}{ppx}(X, \dots)
+}
+\arguments{
+  \item{X}{Point pattern of class \code{"ppx"} or \code{"pp3"}.}
+  \item{\dots}{Ignored.}
+}
+\value{
+  A single number or a numeric vector.
+}
+\details{
+  This is a method for the generic function \code{\link{intensity}}.
+  It computes the empirical intensity of a multi-dimensional point pattern
+  (object of class \code{"ppx"} including \code{"pp3"}),
+  i.e. the average density of points per unit volume.
+
+  If the point pattern is multitype, the intensities of the
+  different types are computed separately.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  X <- osteo$pts[[1]]
+  intensity(X)
+  marks(X) <- factor(sample(letters[1:3], npoints(X), replace=TRUE))
+  intensity(X)
+}
+
+
+
+
+
diff --git a/man/intensity.quadratcount.Rd b/man/intensity.quadratcount.Rd
new file mode 100644
index 0000000..3055212
--- /dev/null
+++ b/man/intensity.quadratcount.Rd
@@ -0,0 +1,74 @@
+\name{intensity.quadratcount} 
+\alias{intensity.quadratcount}
+\title{
+  Intensity Estimates Using Quadrat Counts
+}
+\description{
+  Uses quadrat count data to estimate the intensity of a point pattern
+  in each tile of a tessellation, assuming the intensity is constant in
+  each tile.
+}
+\usage{
+\method{intensity}{quadratcount}(X, ..., image=FALSE)
+}
+\arguments{
+  \item{X}{
+    An object of class \code{"quadratcount"}.
+  }
+  \item{image}{
+    Logical value specifying whether to return
+    a table of estimated intensities (the default)
+    or a pixel image of the estimated intensity (\code{image=TRUE}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine
+    the resolution of the pixel image, if \code{image=TRUE}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{intensity}}.
+  It computes an estimate of the intensity of a point pattern
+  from its quadrat counts.
+
+  The argument \code{X} should be an object of class
+  \code{"quadratcount"}. It would have been obtained by applying the function
+  \code{\link{quadratcount}} to a point pattern
+  (object of class \code{"ppp"}). It contains
+  the counts of the numbers of points of the point pattern falling in each
+  tile of a tessellation.
+
+  Using this information, \code{intensity.quadratcount}
+  divides the quadrat counts by the tile areas,
+  yielding the average density of points per unit area
+  in each tile of the tessellation.
+
+  If \code{image=FALSE} (the default), these intensity values
+  are returned in a contingency table. Cells of the contingency
+  table correspond to tiles of the tessellation.
+
+  If \code{image=TRUE}, the estimated intensity function is
+  returned as a pixel image. For each pixel, the pixel value is the
+  estimated intensity in the tile which contains that pixel.
+}
+\value{
+  If \code{image=FALSE} (the default), a contingency table.
+  If \code{image=TRUE}, a pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{intensity}}, 
+  \code{\link{quadratcount}}
+}
+\examples{
+  qa <- quadratcount(swedishpines, 4,3)
+  qa
+  intensity(qa)
+  plot(intensity(qa, image=TRUE))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/interp.colourmap.Rd b/man/interp.colourmap.Rd
new file mode 100644
index 0000000..690da71
--- /dev/null
+++ b/man/interp.colourmap.Rd
@@ -0,0 +1,56 @@
+\name{interp.colourmap}
+\alias{interp.colourmap}
+\title{
+  Interpolate smoothly between specified colours
+}
+\description{
+  Given a colourmap object which maps numbers to colours,
+  this function interpolates smoothly between the colours,
+  yielding a new colour map.
+}
+\usage{
+interp.colourmap(m, n = 512)
+}
+
+\arguments{
+  \item{m}{
+    A colour map (object of class \code{"colourmap"}).
+  }
+  \item{n}{
+    Number of colour steps to be created in the new colour map.
+  }
+}
+\details{
+  Given a colourmap object \code{m}, which maps numerical values to
+  colours, this function interpolates the mapping, 
+  yielding a new colour map.
+
+  This makes it easy to build a colour map that
+  has smooth gradation between different colours or shades.
+  First specify a small vector of numbers \code{x} which should be mapped to
+  specific colours \code{y}. Use \code{m <- colourmap(y, inputs=x)}
+  to create a colourmap that represents this simple
+  mapping. Then apply \code{interp.colourmap(m)}
+  to obtain a smooth transition between these points.
+}
+\value{
+  Another colour map (object of class \code{"colourmap"}).
+}
+\seealso{
+  \code{\link{colourmap}}, 
+  \code{\link{tweak.colourmap}}, 
+  \code{\link[spatstat:colourtools]{colourtools}}.
+}
+\examples{
+  co <- colourmap(inputs=c(0, 0.5, 1), c("black", "red", "white"))
+  plot(interp.colourmap(co))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{color}
+
diff --git a/man/interp.im.Rd b/man/interp.im.Rd
new file mode 100644
index 0000000..285ae70
--- /dev/null
+++ b/man/interp.im.Rd
@@ -0,0 +1,62 @@
+\name{interp.im}
+\alias{interp.im}
+\title{Interpolate a Pixel Image}
+\description{
+  Interpolates the values of a pixel image at any
+  desired location in the frame.
+}
+\usage{
+interp.im(Z, x, y=NULL)
+}
+\arguments{
+  \item{Z}{
+    Pixel image (object of class \code{"im"})
+    with numeric or integer values.
+  }
+  \item{x,y}{
+    Vectors of Cartesian coordinates.
+    Alternatively \code{x} can be a point pattern and \code{y} can be missing.
+  }
+}
+\details{
+  A value at each location \code{(x[i],y[i])} will be
+  interpolated using the pixel values of \code{Z} at the four
+  surrounding pixel centres, by simple bilinear interpolation.
+
+  At the boundary (where \code{(x[i],y[i])} is not surrounded by
+  four pixel centres) the value at the nearest pixel
+  is taken.
+
+  The arguments \code{x,y} can be anything acceptable to
+  \code{\link[grDevices]{xy.coords}}.
+}
+\value{
+  Vector of interpolated values, with \code{NA} for points that lie
+  outside the domain of the image.
+}
+\examples{
+   opa <- par(mfrow=c(1,2))
+   # coarse image
+   V <- as.im(function(x,y) { x^2 + y }, owin(), dimyx=10)
+   plot(V, main="coarse image", col=terrain.colors(256))
+
+   # lookup value at location (0.5,0.5)
+   V[list(x=0.5,y=0.5)]
+   # interpolated value at location (0.5,0.5)
+   interp.im(V, 0.5, 0.5)
+   # true value is 0.75
+
+   # how to obtain an interpolated image at a desired resolution
+   U <- as.im(interp.im, W=owin(), Z=V, dimyx=256)
+   plot(U, main="interpolated image", col=terrain.colors(256))
+   par(opa)
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/intersect.owin.Rd b/man/intersect.owin.Rd
new file mode 100644
index 0000000..12ebd87
--- /dev/null
+++ b/man/intersect.owin.Rd
@@ -0,0 +1,130 @@
+\name{intersect.owin}
+\alias{intersect.owin}
+\alias{union.owin}
+\alias{setminus.owin}
+\title{Intersection, Union or Set Subtraction of Windows}
+\description{
+  Yields the intersection, union or set subtraction of windows.
+}
+\usage{
+   intersect.owin(\dots, fatal=TRUE, p)
+   union.owin(\dots, p)
+   setminus.owin(A, B, \dots, p)
+}
+\arguments{
+  \item{A,B}{Windows (objects of class \code{"owin"}).}
+  \item{\dots}{
+    Windows,
+    or arguments passed to \code{\link{as.mask}}
+    to control the discretisation.
+  }
+  \item{fatal}{Logical.
+    Determines what happens if the intersection is empty.
+  }
+  \item{p}{
+    Optional list of parameters passed to
+    \code{\link[polyclip]{polyclip}} to control the
+    accuracy of polygon geometry.
+  }
+}
+\value{
+  A window (object of class \code{"owin"})
+  or possibly \code{NULL}.
+}
+\details{
+  The function \code{intersect.owin} computes the intersection between
+  the windows given in \code{\dots}, while 
+  \code{union.owin} computes their union.
+  The function \code{setminus.owin} computes the intersection of
+  \code{A} with the complement of \code{B}.
+
+  For \code{intersect.owin} and \code{union.owin}, 
+  the arguments \code{\dots} must be either
+  \itemize{
+    \item window objects of class \code{"owin"},
+    \item data that can be coerced to this class by
+    \code{\link{as.owin}}),
+    \item lists of windows, of class \code{"solist"},
+    \item named arguments of \code{\link{as.mask}} to control
+    the discretisation if required.
+  }
+  For \code{setminus.owin}, the arguments \code{\dots}
+  must be named arguments of \code{\link{as.mask}}.
+
+  If the intersection is empty, then if \code{fatal=FALSE}
+  the result is NULL, while if \code{fatal=TRUE} an error occurs.
+}
+
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{is.subset.owin}},
+  \code{\link{overlap.owin}},
+  \code{\link{boundingbox}},
+  \code{\link{owin.object}}
+}
+\examples{
+# rectangles
+   u <- unit.square()
+   v <- owin(c(0.5,3.5), c(0.4,2.5))
+# polygon
+   data(letterR)
+# mask
+   m <- as.mask(letterR)
+
+# two rectangles
+   intersect.owin(u, v) 
+   union.owin(u,v)
+   setminus.owin(u,v)
+
+# polygon and rectangle
+   intersect.owin(letterR, v)
+   union.owin(letterR,v)
+   setminus.owin(letterR,v)
+
+# mask and rectangle
+   intersect.owin(m, v)
+   union.owin(m,v)
+   setminus.owin(m,v)
+
+# mask and polygon
+   p <- rotate(v, 0.2)
+   intersect.owin(m, p)
+   union.owin(m,p)
+   setminus.owin(m,p)
+
+# two polygons
+   A <- letterR
+   B <- rotate(letterR, 0.2)
+   plot(boundingbox(A,B), main="intersection")
+   w <- intersect.owin(A, B)
+   plot(w, add=TRUE, col="lightblue")
+   plot(A, add=TRUE)
+   plot(B, add=TRUE)
+
+   plot(boundingbox(A,B), main="union")
+   w <- union.owin(A,B)
+   plot(w, add=TRUE, col="lightblue")   
+   plot(A, add=TRUE)
+   plot(B, add=TRUE)
+
+   plot(boundingbox(A,B), main="set minus")
+   w <- setminus.owin(A,B)
+   plot(w, add=TRUE, col="lightblue")   
+   plot(A, add=TRUE)
+   plot(B, add=TRUE)
+
+# intersection and union of three windows
+   C <- shift(B, c(0.2, 0.3))
+   plot(union.owin(A,B,C))
+   plot(intersect.owin(A,B,C))
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/intersect.tess.Rd b/man/intersect.tess.Rd
new file mode 100644
index 0000000..56ef361
--- /dev/null
+++ b/man/intersect.tess.Rd
@@ -0,0 +1,82 @@
+\name{intersect.tess}
+\alias{intersect.tess}
+\title{Intersection of Two Tessellations}
+\description{
+  Yields the intersection of two tessellations,
+  or the intersection of a tessellation with a window.
+}
+\usage{
+   intersect.tess(X, Y, \dots, keepmarks=FALSE)
+}
+\arguments{
+  \item{X,Y}{Two tessellations (objects of class \code{"tess"}),
+    or windows (objects of class \code{"tess"}),
+    or other data that can be converted to 
+    tessellations by \code{\link{as.tess}}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    to control the discretisation, if required.
+  }
+  \item{keepmarks}{
+    Logical value. If \code{TRUE}, the marks attached to the
+    tiles of \code{X} and \code{Y} will be retained as marks of
+    the intersection tiles. 
+  }
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. See \code{\link{tess}}.
+
+  If \code{X} and \code{Y} are not tessellations, they are first
+  converted into tessellations by \code{\link{as.tess}}.
+
+  The function \code{intersect.tess} then computes the intersection between
+  the two tessellations. This is another tessellation, each of whose
+  tiles is the intersection of a tile from \code{X} and a tile from \code{Y}.
+
+  One possible use of this function is to slice a window \code{W} into
+  subwindows determined by a tessellation. See the Examples.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{as.tess}},
+  \code{\link{intersect.owin}}
+}
+\examples{
+  opa <- par(mfrow=c(1,3))
+# polygon
+  data(letterR)
+  plot(letterR)
+# tessellation of rectangles
+  X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8))
+  plot(X)
+  plot(intersect.tess(X, letterR))
+
+  A <- runifpoint(10)
+  B <- runifpoint(10)
+  plot(DA <- dirichlet(A))
+  plot(DB <- dirichlet(B))
+  plot(intersect.tess(DA, DB))
+  par(opa)
+
+  marks(DA) <- 1:10
+  marks(DB) <- 1:10
+  plot(Z <- intersect.tess(DA,DB, keepmarks=TRUE))
+  mZ <- marks(Z)
+  tZ <- tiles(Z)
+  for(i in which(mZ[,1] == 3)) plot(tZ[[i]], add=TRUE, col="pink")
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/invoke.symbolmap.Rd b/man/invoke.symbolmap.Rd
new file mode 100644
index 0000000..7459c5d
--- /dev/null
+++ b/man/invoke.symbolmap.Rd
@@ -0,0 +1,72 @@
+\name{invoke.symbolmap}
+\alias{invoke.symbolmap}
+\title{
+  Plot Data Using Graphics Symbol Map
+}
+\description{
+  Apply a graphics symbol map to a vector of data values and
+  plot the resulting symbols.
+}
+\usage{
+invoke.symbolmap(map, values, x=NULL, y = NULL, \dots, add = FALSE,
+               do.plot = TRUE, started = add && do.plot)
+}
+\arguments{
+  \item{map}{
+    Graphics symbol map (object of class \code{"symbolmap"}).
+  }
+  \item{values}{
+    Vector of data that can be mapped by the symbol map.
+  }
+  \item{x,y}{
+    Coordinate vectors for the spatial locations of the
+    symbols to be plotted.
+  }
+  \item{\dots}{
+    Additional graphics parameters.
+  }
+  \item{add}{
+    Logical value indicating whether to add the symbols to
+    an existing plot (\code{add=TRUE}) or to initialise a new
+    plot (\code{add=FALSE}, the default).
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plotting.
+  }
+  \item{started}{
+    Logical value indicating whether the plot has already been initialised.
+  }
+}
+\details{
+  A symbol map is an association between data values and graphical symbols. 
+
+  This command applies the symbol map \code{map} to the data 
+  \code{values} and plots the resulting symbols at the locations
+  given by \code{\link{xy.coords}(x,y)}.
+}
+\value{
+  (Invisibly) the maximum diameter of the symbols, in user coordinate units.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{plot.symbolmap}} to plot the graphics map itself.
+  
+  \code{\link{symbolmap}} to create a graphics map.
+}
+\examples{
+  g <- symbolmap(range=c(-1,1),
+                   shape=function(x) ifelse(x > 0, "circles", "squares"),
+                   size=function(x) sqrt(ifelse(x > 0, x/pi, -x))/15,
+                   bg=function(x) ifelse(x > 0, "green", "red"))
+  plot(square(1), main="")
+  a <- invoke.symbolmap(g, runif(10, -1, 1), runifpoint(10), add=TRUE)
+  a 
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/iplot.Rd b/man/iplot.Rd
new file mode 100644
index 0000000..88c84b7
--- /dev/null
+++ b/man/iplot.Rd
@@ -0,0 +1,93 @@
+\name{iplot} 
+\alias{iplot}
+\alias{iplot.ppp}
+\alias{iplot.layered}
+\alias{iplot.linnet}
+\alias{iplot.lpp}
+\alias{iplot.default}
+\title{Point and Click Interface for Displaying Spatial Data}
+\description{
+  Plot spatial data
+  with interactive (point-and-click) control over the plot.
+}
+\usage{
+ iplot(x, ...)
+
+ \method{iplot}{ppp}(x, ..., xname)
+
+ \method{iplot}{linnet}(x, ..., xname)
+
+ \method{iplot}{lpp}(x, ..., xname)
+
+ \method{iplot}{layered}(x, ..., xname, visible)
+
+ \method{iplot}{default}(x, ..., xname)
+}
+\arguments{
+  \item{x}{
+    The spatial object to be plotted.
+    An object of class \code{"ppp"}, \code{"psp"}, \code{"im"},
+    \code{"owin"}, \code{"linnet"}, \code{"lpp"} or \code{"layered"}.
+  }
+  \item{\dots}{Ignored.}
+  \item{xname}{
+    Optional. Character string to use as the title of the dataset.
+  }
+  \item{visible}{
+    Optional. Logical vector indicating which layers of
+    \code{x} should initially be turned on (visible).
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  The function \code{iplot} generates a plot of the spatial dataset \code{x}
+  and allows interactive control over the appearance of the plot
+  using a point-and-click interface.
+
+  The function \code{iplot} is generic, with methods for
+  for point patterns (\code{\link{iplot.ppp}}),
+  layered objects (\code{\link{iplot.layered}})
+  and a default method. The default method will handle objects of
+  class \code{"psp"}, \code{"im"} and \code{"owin"}
+  at least.
+
+  A new popup window is launched. The spatial dataset
+  \code{x} is displayed in the middle of the window using the
+  appropriate \code{plot} method.
+
+  The left side of the window contains
+  buttons and sliders allowing the user to change the plot parameters.
+
+  The right side of the window contains navigation controls for
+  zooming (changing magnification), panning (shifting the
+  field of view relative to the data), redrawing and exiting.
+
+  If the user clicks in the area where the point pattern is displayed,
+  the field of view will be re-centred at the point that was clicked.
+}
+\seealso{
+  \code{\link{istat}}
+}
+\section{Package Dependence}{
+  This function requires the package \pkg{rpanel} to be loaded.
+}
+\examples{
+   if(interactive() && require(rpanel)) {
+      iplot(cells)
+      iplot(amacrine)
+      iplot(lansing)
+      L <- layered(D=distmap(cells), P=cells,
+            plotargs=list(list(ribbon=FALSE), list(pch=16)))
+      iplot(L)
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/ippm.Rd b/man/ippm.Rd
new file mode 100644
index 0000000..0e3a89e
--- /dev/null
+++ b/man/ippm.Rd
@@ -0,0 +1,171 @@
+\name{ippm}
+\alias{ippm}
+\title{
+  Fit Point Process Model Involving Irregular Trend Parameters
+}
+\description{
+  Experimental extension to \code{ppm}
+  which finds optimal values of the irregular trend parameters in a
+  point process model.
+}
+\usage{
+ippm(Q, \dots,
+          iScore=NULL,
+          start=list(),
+          covfunargs=start,
+          nlm.args=list(stepmax=1/2),
+          silent=FALSE,
+          warn.unused=TRUE)
+}
+\arguments{
+  \item{Q,\dots}{
+    Arguments passed to \code{\link{ppm}}
+    to fit the point process model.
+  }
+  \item{iScore}{
+    Optional. A named list of \R functions that compute the partial derivatives
+    of the logarithm of the trend, with respect to each irregular parameter.
+    See Details.
+  }
+  \item{start}{
+    Named list containing initial values of the
+    irregular parameters over which to optimise.
+  }
+  \item{covfunargs}{
+    Argument passed to \code{\link{ppm}}.
+    A named list containing values for \emph{all} irregular parameters
+    required by the covariates in the model.
+    Must include all the parameters named in \code{start}.
+  }
+  \item{nlm.args}{
+    Optional list of arguments passed to \code{\link[stats]{nlm}}
+    to control the optimization algorithm.
+  }
+  \item{silent}{
+    Logical. Whether to print warnings if the optimization algorithm
+    fails to converge.
+  }
+  \item{warn.unused}{
+    Logical. Whether to print a warning if some of the parameters
+    in \code{start} are not used in the model.
+  }
+}
+\details{
+  This function is an experimental extension to the
+  point process model fitting command \code{\link{ppm}}.
+  The extension allows the trend of the model to include irregular parameters,
+  which will be maximised by a Newton-type iterative
+  method, using \code{\link[stats]{nlm}}.
+
+  For the sake of explanation,
+  consider a Poisson point process with intensity function
+  \eqn{\lambda(u)}{\lambda(u)} at location \eqn{u}. Assume that
+  \deqn{
+    \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma)
+  }{
+    \lambda(u) = exp(\alpha + \beta * Z(u)) * f(u, \gamma)
+  }
+  where \eqn{\alpha,\beta,\gamma} are
+  parameters to be estimated, \eqn{Z(u)} is a spatial covariate
+  function, and \eqn{f} is some known function.
+  Then the parameters
+  \eqn{\alpha,\beta} are called \emph{regular} because they
+  appear in a loglinear form; the parameter 
+  \eqn{\gamma} is called \emph{irregular}.
+  
+  To fit this model using \code{ippm}, we specify the
+  intensity using the \code{trend} formula
+  in the same way as usual for \code{\link{ppm}}.
+  The trend formula is a representation of the log intensity.
+  In the above example the log intensity is
+  \deqn{
+    \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma)
+  }{
+    log(\lambda(u)) =  \alpha + \beta * Z(u) + log(f(u, \gamma))
+  }
+  So the model above would be encoded with the trend formula
+  \code{~Z + offset(log(f))}. Note that the irregular part of the model
+  is an \emph{offset} term, which means that it is included in the log trend
+  as it is, without being multiplied by another regular parameter.
+
+  The optimisation runs faster if we specify the derivative
+  of \eqn{\log f(u,\gamma)}{log(f(u,\gamma))} with
+  respect to \eqn{\gamma}. We call this the
+  \emph{irregular score}. To specify this, the user must write an \R function
+  that computes the irregular score for any value of
+  \eqn{\gamma} at any location \code{(x,y)}.
+  
+  Thus, to code such a problem,
+  \enumerate{
+    \item The argument \code{trend} should define the
+    log intensity, with the irregular part as an offset;
+    \item The argument \code{start} should be a list
+    containing initial values of each of the irregular parameters;
+    \item The argument \code{iScore}, if provided,
+    must be a list (with one entry
+    for each entry of \code{start}) of functions
+    with arguments \code{x,y,\dots}, that evaluate the partial derivatives
+    of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with
+    respect to each irregular parameter.
+  }
+  
+  The coded example below illustrates the model with two irregular
+  parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term
+  \deqn{
+    f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3)
+  }{
+    f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta * x^3)
+  }
+
+  Arguments \code{\dots} passed to \code{\link{ppm}} may
+  also include \code{interaction}. In this case the model is not
+  a Poisson point process but a more general Gibbs point process;
+  the trend formula \code{trend} 
+  determines the first-order trend
+  of the model (the first order component of the conditional intensity),
+  not the intensity.
+}
+\value{
+  A fitted point process model (object of class \code{"ppm"}).
+}
+\author{\spatstatAuthors.}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{profilepl}}
+}
+\examples{
+  nd <- 32
+  \testonly{nd <- 10}
+  
+  gamma0 <- 3
+  delta0 <- 5
+  POW <- 3
+  # Terms in intensity
+  Z <- function(x,y) { -2*y }
+  f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) }
+  # True intensity
+  lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) }
+  # Simulate realisation
+  lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0))
+  set.seed(42)
+  X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0)
+  # Partial derivatives of log f
+  DlogfDgamma <- function(x,y, gamma, delta) {
+    topbit <- exp(gamma - delta * x^POW)
+    topbit/(1 + topbit)
+  }
+  DlogfDdelta <- function(x,y, gamma, delta) {
+    topbit <- exp(gamma - delta * x^POW)
+    - (x^POW) * topbit/(1 + topbit)
+  }
+  # irregular score
+  Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta)
+  # fit model
+  ippm(X ~Z + offset(log(f)),
+       covariates=list(Z=Z, f=f),
+       iScore=Dlogf,
+       start=list(gamma=1, delta=1),
+       nd=nd)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/is.connected.Rd b/man/is.connected.Rd
new file mode 100644
index 0000000..56c87bf
--- /dev/null
+++ b/man/is.connected.Rd
@@ -0,0 +1,62 @@
+\name{is.connected}  
+\Rdversion{1.1}
+\alias{is.connected}
+\alias{is.connected.default}
+\alias{is.connected.linnet}
+\title{
+  Determine Whether an Object is Connected
+}
+\description{
+  Determine whether an object is 
+  topologically connected.
+}
+\usage{
+is.connected(X, \dots)
+
+\method{is.connected}{default}(X, \dots)
+
+\method{is.connected}{linnet}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A spatial object such as a pixel image (object of class \code{"im"}),
+    a window (object of class \code{"owin"}) or a linear network
+    (object of class \code{"linnet"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{connected}} to determine the
+    connected components.
+  }
+}
+\details{
+  The command \code{is.connected(X)} returns \code{TRUE} if the object
+  \code{X} consists of a single, topologically-connected piece,
+  and returns \code{FALSE} if \code{X} consists of several pieces
+  which are not joined together.
+  
+  The function \code{is.connected} is generic.
+  The default method \code{is.connected.default} 
+  works for many classes of objects, including windows (class \code{"owin"})
+  and images (class \code{"im"}).
+  There is a method for linear networks, \code{is.connected.linnet},
+  described here, and a method for point patterns
+  described in \code{\link{is.connected.ppp}}.
+}
+\value{
+  A logical value.
+}
+\seealso{
+  \code{\link{connected}},
+  \code{\link{is.connected.ppp}}.
+}
+\examples{
+  d <- distmap(cells, dimyx=256)
+  X <- levelset(d, 0.07)
+  plot(X)
+  is.connected(X)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/is.connected.ppp.Rd b/man/is.connected.ppp.Rd
new file mode 100644
index 0000000..5eb06c8
--- /dev/null
+++ b/man/is.connected.ppp.Rd
@@ -0,0 +1,54 @@
+\name{is.connected.ppp}  
+\Rdversion{1.1}
+\alias{is.connected.ppp}
+\title{
+  Determine Whether a Point Pattern is Connected
+}
+\description{
+  Determine whether a point pattern is topologically connected
+  when all pairs of points closer than a threshold distance are joined.
+}
+\usage{
+\method{is.connected}{ppp}(X, R, \dots)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{R}{
+    Threshold distance. Pairs of points closer than \code{R} units apart
+    will be joined together.
+  }
+  \item{\dots}{
+     Ignored.
+  }
+}
+\details{
+  The function \code{is.connected} is generic. This is the method for
+  point patterns (objects of class \code{"ppp"}).
+
+  The point pattern \code{X} is first converted into an abstract graph
+  by joining every pair of points that lie closer than \code{R} units
+  apart. Then the algorithm determines whether this graph is connected.
+
+  That is, the result of \code{is.connected(X)} is \code{TRUE}
+  if any point in \code{X} can be reached from any other point,
+  by a series of steps between points of \code{X},
+  each step being shorter than \code{R} units in length.
+}
+\value{
+  A logical value.
+}
+\seealso{
+  \code{\link{is.connected}}, 
+  \code{\link{connected.ppp}}.
+}
+\examples{
+  is.connected(redwoodfull, 0.1)
+  is.connected(redwoodfull, 0.2)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/is.convex.Rd b/man/is.convex.Rd
new file mode 100644
index 0000000..d539e7d
--- /dev/null
+++ b/man/is.convex.Rd
@@ -0,0 +1,43 @@
+\name{is.convex}
+\alias{is.convex}
+\title{Test Whether a Window is Convex}
+\description{
+  Determines whether a window is convex.
+}
+\usage{
+  is.convex(x)
+}
+\arguments{
+  \item{x}{
+    Window (object of class \code{"owin"}).
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{x} is convex.
+}
+\details{
+  If \code{x} is a rectangle, the result is TRUE.
+
+  If \code{x} is polygonal, the result is TRUE if \code{x} consists of a
+  single polygon and this polygon is equal to the minimal convex hull
+  of its vertices computed by \code{\link[grDevices]{chull}}. 
+
+  If \code{x} is a mask, the algorithm first extracts all boundary
+  pixels of \code{x} using \code{\link{vertices}}. Then it computes
+  the (polygonal) convex hull \eqn{K} of the boundary pixels.
+  The result is TRUE if every boundary pixel lies within 
+  one pixel diameter of an edge of \eqn{K}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{convexhull.xy}},
+  \code{\link{vertices}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.dppm.Rd b/man/is.dppm.Rd
new file mode 100644
index 0000000..0448ef6
--- /dev/null
+++ b/man/is.dppm.Rd
@@ -0,0 +1,18 @@
+\name{is.dppm}
+\alias{is.dppm}
+\title{Recognise Fitted Determinantal Point Process Models}
+\description{Check that an object inherits the class dppm}
+\usage{is.dppm(x)}
+\arguments{
+  \item{x}{Any object.}
+}
+
+\value{A single logical value.}
+
+\author{\ege <rubak at math.aau.dk>,
+\adrian <Adrian.Baddeley at uwa.edu.au>
+and \rolf <r.turner at auckland.ac.nz>}
+
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/is.empty.Rd b/man/is.empty.Rd
new file mode 100644
index 0000000..d578861
--- /dev/null
+++ b/man/is.empty.Rd
@@ -0,0 +1,51 @@
+\name{is.empty}  
+\alias{is.empty}
+\alias{is.empty.owin}
+\alias{is.empty.ppp}
+\alias{is.empty.psp}
+\alias{is.empty.default}
+\title{Test Whether An Object Is Empty}
+\description{
+  Checks whether the argument is an empty window,
+  an empty point pattern, etc.
+}
+\usage{
+is.empty(x)
+\method{is.empty}{owin}(x)
+\method{is.empty}{ppp}(x)
+\method{is.empty}{psp}(x)
+\method{is.empty}{default}(x)
+}
+\arguments{
+  \item{x}{
+    A window (object of class \code{"owin"}),
+    a point pattern (object of class \code{"ppp"}), or
+    a line segment pattern (object of class \code{"psp"}).
+  }
+}
+\details{
+  This function tests whether the object \code{x}
+  represents an empty spatial object, such as an empty window,
+  a point pattern with zero points, or a line segment pattern
+  with zero line segments. 
+
+  An empty window can be obtained as the output of
+  \code{\link{intersect.owin}}, \code{\link{erosion}},
+  \code{\link{opening}}, \code{\link{complement.owin}}
+  and some other operations.
+
+  An empty point pattern or line segment pattern can be
+  obtained as the result of simulation.
+}
+\value{
+  Logical value.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.hybrid.Rd b/man/is.hybrid.Rd
new file mode 100644
index 0000000..8f1ca22
--- /dev/null
+++ b/man/is.hybrid.Rd
@@ -0,0 +1,73 @@
+\name{is.hybrid}  
+\alias{is.hybrid}
+\alias{is.hybrid.ppm}
+\alias{is.hybrid.interact}
+\title{
+  Test Whether Object is a Hybrid
+}
+\description{
+  Tests where a point process model or point process interaction
+  is a hybrid of several interactions.
+}
+\usage{
+is.hybrid(x)
+
+\method{is.hybrid}{ppm}(x)
+
+\method{is.hybrid}{interact}(x)
+}
+\arguments{
+  \item{x}{
+    A point process model (object of class \code{"ppm"})
+    or a point process interaction structure
+    (object of class \code{"interact"}).
+  }
+}
+\details{
+  A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012)
+  is a point process model created by combining two or more
+  point process models, or an interpoint interaction created by combining
+  two or more interpoint interactions.
+  
+  The function \code{is.hybrid} is generic, with methods for
+  point process models (objects of class \code{"ppm"})
+  and point process interactions
+  (objects of class \code{"interact"}).
+  These functions return \code{TRUE} if the object \code{x} is a hybrid,
+  and \code{FALSE} if it is not a hybrid.
+
+  Hybrids of two or more interpoint interactions
+  are created by the function \code{\link{Hybrid}}.
+  Such a hybrid interaction can then be fitted to point pattern data
+  using \code{\link{ppm}}. 
+}
+\value{
+  \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise.
+}
+\references{
+  Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013)
+  Hybrids of Gibbs point process models and their implementation.
+  \emph{Journal of Statistical Software} \bold{55}:11, 1--43.
+  \url{http://www.jstatsoft.org/v55/i11/}
+}
+\seealso{
+  \code{\link{Hybrid}}
+}
+\examples{
+  S <- Strauss(0.1)
+  is.hybrid(S)
+  H <- Hybrid(Strauss(0.1), Geyer(0.2, 3))
+  is.hybrid(H)
+
+  data(redwood)
+  fit <- ppm(redwood, ~1, H)
+  is.hybrid(fit)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/is.im.Rd b/man/is.im.Rd
new file mode 100644
index 0000000..3a07d50
--- /dev/null
+++ b/man/is.im.Rd
@@ -0,0 +1,33 @@
+\name{is.im}
+\alias{is.im}
+\title{Test Whether An Object Is A Pixel Image}
+\description{
+  Tests whether its argument is a pixel image
+  (object of class \code{"im"}).
+}
+\usage{
+is.im(x)
+}
+\arguments{
+  \item{x}{Any object.}
+}
+\details{
+  This function tests whether the argument \code{x} is a
+  pixel image object of class \code{"im"}. For details of this
+  class, see \code{\link{im.object}}.
+  
+  The object is determined to be an image if it inherits from
+  class \code{"im"}.
+}
+\value{
+  \code{TRUE} if \code{x} is a pixel image, otherwise \code{FALSE}.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.lpp.Rd b/man/is.lpp.Rd
new file mode 100644
index 0000000..3030707
--- /dev/null
+++ b/man/is.lpp.Rd
@@ -0,0 +1,30 @@
+\name{is.lpp}
+\alias{is.lpp}
+\title{Test Whether An Object Is A Point Pattern on a Linear Network}
+\description{
+  Checks whether its argument is a point pattern
+  on a linear network
+  (object of class \code{"lpp"}).
+}
+\usage{
+is.lpp(x)
+}
+\arguments{
+  \item{x}{Any object.}
+}
+\details{
+  This function tests whether the object \code{x}
+  is a point pattern object of class \code{"lpp"}. 
+}
+\value{
+  \code{TRUE} if \code{x} is a point pattern of class \code{"lpp"},
+  otherwise \code{FALSE}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.marked.Rd b/man/is.marked.Rd
new file mode 100644
index 0000000..d1a0dca
--- /dev/null
+++ b/man/is.marked.Rd
@@ -0,0 +1,51 @@
+\name{is.marked}
+\alias{is.marked}
+\title{Test Whether Marks Are Present}
+\description{
+  Generic function to test whether a given object (usually a point
+  pattern or something related to a point pattern) has ``marks''
+  attached to the points.
+}
+\usage{
+  is.marked(X, \dots) 
+}
+\arguments{
+  \item{X}{
+    Object to be inspected
+  }
+  \item{\dots}{
+    Other arguments.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{X} is marked.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+
+  Other objects related to point patterns, such as point process models,
+  may involve marked points.
+
+  This function tests whether the object \code{X}
+  contains or involves marked points.
+  It is generic; methods are provided
+  for point patterns (objects of class \code{"ppp"})
+  and point process models (objects of class \code{"ppm"}).
+}
+\seealso{
+  \code{\link{is.marked.ppp}},
+  \code{\link{is.marked.ppm}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.marked.ppm.Rd b/man/is.marked.ppm.Rd
new file mode 100644
index 0000000..f178f65
--- /dev/null
+++ b/man/is.marked.ppm.Rd
@@ -0,0 +1,89 @@
+\name{is.marked.ppm}
+\alias{is.marked.ppm}
+\alias{is.marked.lppm}
+\title{Test Whether A Point Process Model is Marked}
+\description{
+  Tests whether a fitted point process model involves ``marks''
+  attached to the points.
+}
+\usage{
+  \method{is.marked}{ppm}(X, \dots) 
+
+  \method{is.marked}{lppm}(X, \dots) 
+}
+\arguments{
+  \item{X}{
+    Fitted point process model (object of class \code{"ppm"})
+    usually obtained from \code{\link{ppm}}.
+    Alternatively, a model of class \code{"lppm"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if
+  \code{X} is a model that was fitted to a marked point pattern dataset.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+
+  The argument \code{X} is a fitted point process model
+  (an object of class \code{"ppm"}) typically obtained
+  by fitting a model to point pattern data using \code{\link{ppm}}.
+
+  This function returns \code{TRUE} if the \emph{original data}
+  (to which the model \code{X} was fitted) were a marked point pattern.
+
+  Note that this is not the same as testing whether the
+  model involves terms that depend on the marks (i.e. whether the
+  fitted model ignores the marks in the data).
+  Currently we have not implemented a test for this.
+
+  If this function returns \code{TRUE}, the implications are
+  (for example) that
+  any simulation of this model will require simulation of random marks
+  as well as random point locations.
+}
+\seealso{
+  \code{\link{is.marked}},
+  \code{\link{is.marked.ppp}}
+}
+\examples{
+   X <- lansing
+   # Multitype point pattern --- trees marked by species
+
+   \testonly{
+      # Smaller dataset
+      X <- amacrine
+   }
+
+  fit1 <- ppm(X, ~ marks, Poisson())
+  is.marked(fit1)
+  # TRUE
+
+  fit2 <- ppm(X, ~ 1, Poisson())
+  is.marked(fit2)
+  # TRUE
+
+  # Unmarked point pattern
+  fit3 <- ppm(cells, ~ 1, Poisson())
+  is.marked(fit3)
+  # FALSE
+
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/is.marked.ppp.Rd b/man/is.marked.ppp.Rd
new file mode 100644
index 0000000..707ccb1
--- /dev/null
+++ b/man/is.marked.ppp.Rd
@@ -0,0 +1,62 @@
+\name{is.marked.ppp}
+\alias{is.marked.ppp}
+\title{Test Whether A Point Pattern is Marked}
+\description{
+  Tests whether a point pattern has ``marks''
+  attached to the points.
+}
+\usage{
+  \method{is.marked}{ppp}(X, na.action="warn", \dots) 
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"})
+  }
+  \item{na.action}{
+    String indicating what to do if \code{NA} values are
+    encountered amongst the marks.
+    Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{X} is a marked point pattern.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+
+  This function tests whether the point pattern \code{X}
+  contains or involves marked points.
+  It is a method for the generic function \code{\link{is.marked}}.
+
+  The argument \code{na.action} determines what action will be taken
+  if the point pattern has a vector of marks but some or all of the
+  marks are \code{NA}. Options are   \code{"fatal"} to cause a fatal
+  error; \code{"warn"} to issue a warning and then return \code{TRUE};
+  and \code{"ignore"} to take no action except returning \code{TRUE}.
+}
+\seealso{
+  \code{\link{is.marked}},
+  \code{\link{is.marked.ppm}}
+}
+\examples{
+   data(cells)
+   is.marked(cells)  #FALSE
+   data(longleaf)
+   is.marked(longleaf) #TRUE
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.multitype.Rd b/man/is.multitype.Rd
new file mode 100644
index 0000000..c719149
--- /dev/null
+++ b/man/is.multitype.Rd
@@ -0,0 +1,57 @@
+\name{is.multitype}
+\alias{is.multitype}
+\title{Test whether Object is Multitype}
+\description{
+  Generic function to test whether a given object (usually a point
+  pattern or something related to a point pattern) has ``marks''
+  attached to the points which classify the points into several types.
+}
+\usage{
+  is.multitype(X, \dots) 
+}
+\arguments{
+  \item{X}{
+    Object to be inspected
+  }
+  \item{\dots}{
+    Other arguments.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{X} is multitype.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+  Other objects related to point patterns, such as point process models,
+  may involve marked points.
+
+  This function tests whether the object \code{X}
+  contains or involves marked points, \bold{and} that the
+  marks are a factor.
+
+  For example, the \code{\link{amacrine}}
+  dataset is multitype (there are two types of cells, on and off),
+  but the \code{\link{longleaf}} dataset is \emph{not} multitype
+  (the marks are real numbers).
+  
+  This function is generic; methods are provided
+  for point patterns (objects of class \code{"ppp"})
+  and point process models (objects of class \code{"ppm"}).
+}
+\seealso{
+  \code{\link{is.multitype.ppp}},
+  \code{\link{is.multitype.ppm}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.multitype.ppm.Rd b/man/is.multitype.ppm.Rd
new file mode 100644
index 0000000..12b46c5
--- /dev/null
+++ b/man/is.multitype.ppm.Rd
@@ -0,0 +1,89 @@
+\name{is.multitype.ppm}
+\alias{is.multitype.ppm}
+\alias{is.multitype.lppm}
+\title{Test Whether A Point Process Model is Multitype}
+\description{
+  Tests whether a fitted point process model involves ``marks''
+  attached to the points that classify the points into several types.
+}
+\usage{
+  \method{is.multitype}{ppm}(X, \dots) 
+
+  \method{is.multitype}{lppm}(X, \dots) 
+}
+\arguments{
+  \item{X}{
+    Fitted point process model (object of class \code{"ppm"})
+    usually obtained from \code{\link{ppm}}.
+    Alternatively a model of class \code{"lppm"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if
+  \code{X} is a model that was fitted to a multitype point pattern dataset.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+
+  The argument \code{X} is a fitted point process model
+  (an object of class \code{"ppm"}) typically obtained
+  by fitting a model to point pattern data using \code{\link{ppm}}.
+
+  This function returns \code{TRUE} if the \emph{original data}
+  (to which the model \code{X} was fitted) were a multitype point pattern.
+
+  Note that this is not the same as testing whether the
+  model involves terms that depend on the marks (i.e. whether the
+  fitted model ignores the marks in the data).
+  Currently we have not implemented a test for this.
+
+  If this function returns \code{TRUE}, the implications are
+  (for example) that
+  any simulation of this model will require simulation of random marks
+  as well as random point locations.
+}
+\seealso{
+  \code{\link{is.multitype}},
+  \code{\link{is.multitype.ppp}}
+}
+\examples{
+   X <- lansing
+   # Multitype point pattern --- trees marked by species
+
+   \testonly{
+      # Smaller dataset
+      X <- amacrine
+   }
+
+  fit1 <- ppm(X, ~ marks, Poisson())
+  is.multitype(fit1)
+  # TRUE
+
+  fit2 <- ppm(X, ~ 1, Poisson())
+  is.multitype(fit2)
+  # TRUE
+
+  # Unmarked point pattern
+  fit3 <- ppm(cells, ~ 1, Poisson())
+  is.multitype(fit3)
+  # FALSE
+
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/is.multitype.ppp.Rd b/man/is.multitype.ppp.Rd
new file mode 100644
index 0000000..328c0f8
--- /dev/null
+++ b/man/is.multitype.ppp.Rd
@@ -0,0 +1,70 @@
+\name{is.multitype.ppp}
+\alias{is.multitype.ppp}
+\alias{is.multitype.lpp}
+\title{Test Whether A Point Pattern is Multitype}
+\description{
+  Tests whether a point pattern has ``marks''
+  attached to the points which classify the points into several types.
+}
+\usage{
+  \method{is.multitype}{ppp}(X, na.action="warn", \dots) 
+
+  \method{is.multitype}{lpp}(X, na.action="warn", \dots) 
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"} or \code{"lpp"})
+  }
+  \item{na.action}{
+    String indicating what to do if \code{NA} values are
+    encountered amongst the marks.
+    Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{X} is a multitype point pattern.
+}
+\details{
+  ``Marks'' are observations attached to each point of a point pattern.
+  For example the \code{\link{longleaf}} dataset contains the locations
+  of trees, each tree being marked by its diameter;
+  the \code{\link{amacrine}} dataset gives the locations of cells
+  of two types (on/off) and the type of cell may be regarded as a mark attached
+  to the location of the cell.
+
+  This function tests whether the point pattern \code{X}
+  contains or involves marked points, \bold{and} that the
+  marks are a factor.
+  It is a method for the generic function \code{\link{is.multitype}}.
+  
+  For example, the \code{\link{amacrine}}
+  dataset is multitype (there are two types of cells, on and off),
+  but the \code{\link{longleaf}} dataset is \emph{not} multitype
+  (the marks are real numbers).
+
+  The argument \code{na.action} determines what action will be taken
+  if the point pattern has a vector of marks but some or all of the
+  marks are \code{NA}. Options are   \code{"fatal"} to cause a fatal
+  error; \code{"warn"} to issue a warning and then return \code{TRUE};
+  and \code{"ignore"} to take no action except returning \code{TRUE}.
+}
+\seealso{
+  \code{\link{is.multitype}},
+  \code{\link{is.multitype.ppm}}
+}
+\examples{
+   is.multitype(cells)  #FALSE - no marks
+   is.multitype(longleaf) #FALSE - real valued marks
+   is.multitype(amacrine) #TRUE
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.owin.Rd b/man/is.owin.Rd
new file mode 100644
index 0000000..7010c24
--- /dev/null
+++ b/man/is.owin.Rd
@@ -0,0 +1,36 @@
+\name{is.owin}
+\alias{is.owin}
+\title{Test Whether An Object Is A Window}
+\description{
+  Checks whether its argument is a window
+  (object of class \code{"owin"}).
+}
+\usage{
+is.owin(x)
+}
+\arguments{
+  \item{x}{Any object.}
+}
+\details{
+  This function tests whether the object \code{x}
+  is a window object of class
+  \code{"owin"}. See \code{\link{owin.object}} for details
+  of this class.
+
+  The result is determined to be \code{TRUE} if \code{x}
+  inherits from \code{"owin"}, i.e. if \code{x}
+  has \code{"owin"} amongst its classes.
+}
+\value{
+  \code{TRUE} if \code{x} is a point pattern,
+  otherwise \code{FALSE}.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.ppm.Rd b/man/is.ppm.Rd
new file mode 100644
index 0000000..94e3623
--- /dev/null
+++ b/man/is.ppm.Rd
@@ -0,0 +1,40 @@
+\name{is.ppm}
+\alias{is.ppm}
+\alias{is.lppm}
+\alias{is.kppm}
+\alias{is.slrm}
+\title{Test Whether An Object Is A Fitted Point Process Model}
+\description{
+  Checks whether its argument is a fitted point process model
+  (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}
+  or \code{"slrm"}).
+}
+\usage{
+is.ppm(x)
+is.kppm(x)
+is.lppm(x)
+is.slrm(x)
+}
+\arguments{
+  \item{x}{Any object.}
+}
+\details{
+  These functions test whether the object \code{x}
+  is a fitted point process model object of the specified class.
+
+  The result of \code{is.ppm(x)} is \code{TRUE} if \code{x}
+  has \code{"ppm"} amongst its classes, and 
+  otherwise \code{FALSE}. Similarly for the other functions.
+}
+\value{
+  A single logical value.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/is.ppp.Rd b/man/is.ppp.Rd
new file mode 100644
index 0000000..8f97927
--- /dev/null
+++ b/man/is.ppp.Rd
@@ -0,0 +1,36 @@
+\name{is.ppp}
+\alias{is.ppp}
+\title{Test Whether An Object Is A Point Pattern}
+\description{
+  Checks whether its argument is a point pattern
+  (object of class \code{"ppp"}).
+}
+\usage{
+is.ppp(x)
+}
+\arguments{
+  \item{x}{Any object.}
+}
+\details{
+  This function tests whether the object \code{x}
+  is a point pattern object of class
+  \code{"ppp"}. See \code{\link{ppm.object}} for details
+  of this class.
+
+  The result is determined to be \code{TRUE} if \code{x}
+  inherits from \code{"ppp"}, i.e. if \code{x}
+  has \code{"ppp"} amongst its classes.
+}
+\value{
+  \code{TRUE} if \code{x} is a point pattern,
+  otherwise \code{FALSE}.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.rectangle.Rd b/man/is.rectangle.Rd
new file mode 100644
index 0000000..23f13c3
--- /dev/null
+++ b/man/is.rectangle.Rd
@@ -0,0 +1,42 @@
+\name{is.rectangle}
+\alias{is.rectangle}
+\alias{is.polygonal}
+\alias{is.mask}
+\title{Determine Type of Window}
+\description{
+  Determine whether a window is a
+  rectangle, a polygonal region, or a binary mask.
+}
+\usage{
+  is.rectangle(w)
+  is.polygonal(w)
+  is.mask(w)
+}
+\arguments{
+  \item{w}{
+    Window to be inspected. An object of class \code{"owin"}.
+  }
+}
+\value{
+  Logical value, equal to \code{TRUE} if \code{w} is a window
+  of the specified type.
+}
+\details{
+  These simple functions determine whether a window \code{w}
+  (object of class \code{"owin"}) is
+  a rectangle (\code{is.rectangle(w) = TRUE}), 
+  a domain with polygonal boundary (\code{is.polygonal(w) = TRUE}),
+  or 
+  a binary pixel mask (\code{is.mask(w) = TRUE}). 
+}
+\seealso{
+  \code{\link{owin}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/is.stationary.Rd b/man/is.stationary.Rd
new file mode 100644
index 0000000..25cc334
--- /dev/null
+++ b/man/is.stationary.Rd
@@ -0,0 +1,123 @@
+\name{is.stationary}
+\alias{is.stationary}
+\alias{is.stationary.ppm}
+\alias{is.stationary.kppm}
+\alias{is.stationary.lppm}
+\alias{is.stationary.slrm}
+\alias{is.stationary.rmhmodel}
+\alias{is.stationary.dppm}
+\alias{is.stationary.detpointprocfamily}
+\alias{is.poisson}
+\alias{is.poisson.ppm}
+\alias{is.poisson.kppm}
+\alias{is.poisson.lppm}
+\alias{is.poisson.slrm}
+\alias{is.poisson.rmhmodel}
+\alias{is.poisson.interact}
+\title{
+  Recognise Stationary and Poisson Point Process Models
+}
+\description{
+  Given a point process model that has been fitted to data,
+  determine whether the model is a stationary point process,
+  and whether it is a Poisson point process.
+}
+\usage{
+is.stationary(x)
+\method{is.stationary}{ppm}(x)
+\method{is.stationary}{kppm}(x)
+\method{is.stationary}{lppm}(x)
+\method{is.stationary}{slrm}(x)
+\method{is.stationary}{rmhmodel}(x)
+\method{is.stationary}{dppm}(x)
+\method{is.stationary}{detpointprocfamily}(x)
+
+is.poisson(x)
+\method{is.poisson}{ppm}(x)
+\method{is.poisson}{kppm}(x)
+\method{is.poisson}{lppm}(x)
+\method{is.poisson}{slrm}(x)
+\method{is.poisson}{rmhmodel}(x)
+\method{is.poisson}{interact}(x)
+}
+\arguments{
+  \item{x}{
+    A fitted spatial point process model
+    (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}
+    or \code{"slrm"}) or similar object.
+  }
+}
+\details{
+  The argument \code{x} represents a fitted spatial point process model
+  or a similar object.
+
+  \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents
+  a stationary point process, and \code{FALSE} if not.
+
+  \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents
+  a Poisson point process, and \code{FALSE} if not.
+
+  The functions \code{is.stationary} and \code{is.poisson} are generic,
+  with methods for the classes \code{"ppm"} (Gibbs point process models),
+  \code{"kppm"} (cluster or Cox point process models),
+  \code{"slrm"} (spatial logistic regression models) and
+  \code{"rmhmodel"} (model specifications for the
+  Metropolis-Hastings algorithm).
+  Additionally \code{is.stationary} has a method for
+  classes \code{"detpointprocfamily"} and \code{"dppm"}
+  (both determinantal point processes) and
+  \code{is.poisson} has a method for 
+  class \code{"interact"} (interaction structures for Gibbs models). 
+
+  \code{is.poisson.kppm} will return \code{FALSE}, unless
+  the model \code{x} is degenerate:
+  either \code{x} has zero intensity so that its realisations are empty
+  with probability 1, or it is a log-Gaussian Cox process
+  where the log intensity has zero variance.
+
+  \code{is.poisson.slrm} will always return \code{TRUE},
+  by convention.
+}
+\value{
+  A logical value.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{is.marked}} to determine whether a model is a marked
+  point process. 
+  
+  \code{\link{summary.ppm}} for detailed information.
+
+  Model-fitting functions 
+  \code{\link{ppm}},
+  \code{\link{dppm}},
+  \code{\link{kppm}},
+  \code{\link{lppm}},
+  \code{\link{slrm}}.
+}
+\examples{
+  data(cells)
+  data(redwood)
+
+  fit <- ppm(cells ~ x)
+  is.stationary(fit)
+  is.poisson(fit)
+
+  fut <- kppm(redwood ~ 1, "MatClust")
+  is.stationary(fut)
+  is.poisson(fut)
+
+  fot <- slrm(cells ~ x)
+  is.stationary(fot)
+  is.poisson(fot)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/is.subset.owin.Rd b/man/is.subset.owin.Rd
new file mode 100644
index 0000000..b294377
--- /dev/null
+++ b/man/is.subset.owin.Rd
@@ -0,0 +1,50 @@
+\name{is.subset.owin}
+\alias{is.subset.owin}
+\title{Determine Whether One Window is Contained In Another}
+\description{
+  Tests whether window \code{A} is a subset of window \code{B}.
+}
+\usage{
+is.subset.owin(A, B)
+}
+\arguments{
+  \item{A}{A window object (see Details).}
+  \item{B}{A window object (see Details).}
+}
+\value{
+  Logical scalar; \code{TRUE} if \code{A} is a sub-window of \code{B},
+  otherwise \code{FALSE}.
+}
+\details{
+  This function tests whether the window \code{A} is a subset
+  of the window \code{B}.
+
+  The arguments \code{A} and \code{B} must be window objects
+  (either objects of class \code{"owin"}, or data that can be
+  coerced to this class by \code{\link{as.owin}}).
+
+  Various algorithms are used, depending on the geometrical type
+  of the two windows.
+
+  Note that if \code{B} is not rectangular, the algorithm proceeds by
+  discretising \code{A}, converting it to a pixel mask using 
+  \code{\link{as.mask}}. In this case the resulting
+  answer is only ``approximately correct''. The accuracy of the
+  approximation can be controlled: see \code{\link{as.mask}}.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+
+\examples{
+   w1 <- as.owin(c(0,1,0,1))
+   w2 <- as.owin(c(-1,2,-1,2))
+   is.subset.owin(w1,w2)  # Returns TRUE.
+   is.subset.owin(w2,w1)  # Returns FALSE.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/istat.Rd b/man/istat.Rd
new file mode 100644
index 0000000..5dd05d4
--- /dev/null
+++ b/man/istat.Rd
@@ -0,0 +1,63 @@
+\name{istat}
+\alias{istat}
+\title{Point and Click Interface for Exploratory Analysis of Point Pattern}
+\description{
+  Compute various summary functions for a point pattern
+  using a point-and-click interface.
+}
+\usage{
+ istat(x, xname)
+}
+\arguments{
+  \item{x}{
+    The spatial point pattern to be analysed.
+    An object of class \code{"ppp"}.
+  }
+  \item{xname}{
+    Optional. Character string to use as the title of the dataset.
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  This command launches an interactive (point-and-click) interface 
+  which offers a choice of spatial summary functions
+  that can be applied to the point pattern \code{x}.
+
+  The selected summary function is computed for the point pattern
+  \code{x} and plotted in a popup window.
+
+  The selection of functions includes
+  \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{pcf}},
+  \code{\link{Fest}} ,\code{\link{Gest}} and \code{\link{Jest}}.
+  For the function \code{\link{pcf}} it is possible to control
+  the bandwidth parameter \code{bw}.
+  
+  There is also an option to show simulation envelopes of
+  the summary function.
+}
+\section{Note}{
+   Before adjusting the bandwidth parameter \code{bw},
+   it is advisable to select \emph{No simulation envelopes}
+   to save a lot of computation time.
+}
+\section{Package Dependence}{
+  This function requires the package \pkg{rpanel} to be loaded.
+}
+\seealso{
+  \code{\link{iplot}}
+}
+\examples{
+   if(interactive() && require(rpanel)) {
+      istat(swedishpines)
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/japanesepines.Rd b/man/japanesepines.Rd
new file mode 100644
index 0000000..8efefea
--- /dev/null
+++ b/man/japanesepines.Rd
@@ -0,0 +1,39 @@
+\name{japanesepines}
+\alias{japanesepines}
+\docType{data}
+\title{
+  Japanese Pines Point Pattern
+}
+\description{
+  The data give the locations of Japanese black pine saplings
+  in a square sampling region in a natural forest.
+  The observations were originally collected by Numata (1961).
+
+  These data are used as a standard example in the textbook of 
+  Diggle (2003); see pages 1, 14, 19, 22, 24, 56--57 and 61.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations
+  in a 5.7 x 5.7 metre square, rescaled to the unit square
+  and rounded to two decimal places.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(japanesepines)}
+\source{Diggle (2003), obtained from Numata (1961)}
+
+\references{
+  Diggle, P.J. (2003)
+  \emph{Statistical Analysis of Spatial Point Patterns}.
+  Arnold Publishers.
+
+  Numata, M. (1961)
+  Forest vegetation in the vicinity of Choshi. Coastal flora and
+  vegetation at Choshi, Chiba Prefecture. IV.
+  \emph{Bulletin of Choshi Marine Laboratory, Chiba University}
+  \bold{3}, 28--48 (in Japanese).
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/kaplan.meier.Rd b/man/kaplan.meier.Rd
new file mode 100644
index 0000000..2e701cc
--- /dev/null
+++ b/man/kaplan.meier.Rd
@@ -0,0 +1,87 @@
+\name{kaplan.meier}
+\alias{kaplan.meier}
+\title{Kaplan-Meier Estimator using Histogram Data}
+\description{
+  Compute the Kaplan-Meier estimator of a survival time distribution
+  function, from histogram data
+}
+\usage{
+  kaplan.meier(obs, nco, breaks, upperobs=0)
+}
+\arguments{
+  \item{obs}{vector of \eqn{n} integers giving the histogram of
+    all observations (censored or uncensored survival times)
+  }
+  \item{nco}{vector of \eqn{n} integers giving the histogram of
+    uncensored observations (those survival times that are less than or
+    equal to the censoring time)
+  }
+  \item{breaks}{Vector of \eqn{n+1} breakpoints which were used to form
+    both histograms.
+  }
+  \item{upperobs}{
+    Number of observations beyond the rightmost breakpoint, if any.
+  }
+}
+\value{
+  A list with two elements:
+  \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)}
+  }
+  \item{lambda}{corresponding Nelson-Aalen estimate of the
+    hazard rate \eqn{\lambda(t)}{lambda(t)}
+  }
+  These are numeric vectors of length \eqn{n}.
+}
+\details{
+  This function is needed mainly for internal use in \pkg{spatstat},
+  but may be useful in other applications where you want to form the
+  Kaplan-Meier estimator from a huge dataset.
+
+  Suppose \eqn{T_i}{T[i]} are the survival times of individuals
+  \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)}
+  which we wish to estimate. Suppose these times are right-censored
+  by random censoring times \eqn{C_i}{C[i]}.
+  Thus the observations consist of right-censored survival times
+  \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])}
+  and non-censoring indicators
+  \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])}
+  for each \eqn{i}.
+
+  If the number of observations \eqn{M} is large, it is efficient to
+  use histograms.
+  Form the histogram \code{obs} of all observed times \eqn{\tilde T_i}{T*[i]}.
+  That is, \code{obs[k]} counts the number of values 
+  \eqn{\tilde T_i}{T*[i]} in the interval
+  \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1}
+  and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}.
+  Also form the histogram \code{nco} of all uncensored times,
+  i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}.
+  These two histograms are the arguments passed to \code{kaplan.meier}.
+  
+  The vectors \code{km} and \code{lambda} returned by \code{kaplan.meier}
+  are (histogram approximations to) the Kaplan-Meier estimator
+  of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}.
+  Specifically, \code{km[k]} is an estimate of
+  \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of
+  the average of \eqn{\lambda(t)}{lambda(t)} over the interval
+  \code{(breaks[k],breaks[k+1])}.
+
+  The histogram breaks must include \eqn{0}.
+  If the histogram breaks do not span the range of the observations,
+  it is important to count how many survival times
+  \eqn{\tilde T_i}{T*[i]} exceed the rightmost breakpoint,
+  and give this as the value \code{upperobs}.
+}
+\seealso{
+  \code{\link{reduced.sample}},
+  \code{\link{km.rs}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+  }
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/kernel.factor.Rd b/man/kernel.factor.Rd
new file mode 100644
index 0000000..fba34f4
--- /dev/null
+++ b/man/kernel.factor.Rd
@@ -0,0 +1,57 @@
+\name{kernel.factor}
+\alias{kernel.factor}
+\title{Scale factor for density kernel} 
+\description{
+  Returns a scale factor for the kernels used in density estimation
+  for numerical data.
+}
+\usage{
+  kernel.factor(kernel = "gaussian")
+}
+\arguments{
+  \item{kernel}{
+    String name of the kernel.
+    Options are
+    \code{"gaussian"}, \code{"rectangular"},
+    \code{"triangular"},
+    \code{"epanechnikov"},
+    \code{"biweight"},
+    \code{"cosine"} and \code{"optcosine"}.
+    (Partial matching is used).
+  }
+}
+\details{
+  Kernel estimation of a probability density in one dimension
+  is performed by \code{\link[stats]{density.default}}
+  using a kernel function selected from the list above.
+
+  This function computes a scale constant for the kernel.
+  For the Gaussian kernel, this constant is equal to 1.
+  Otherwise, the constant \eqn{c} is such that the kernel
+  with standard deviation \eqn{1} is supported on the interval
+  \eqn{[-c,c]}. 
+  
+  For more information about these kernels,
+  see \code{\link[stats]{density.default}}. 
+}
+\value{
+  A single number.
+}
+\seealso{
+  \code{\link[stats]{density.default}},
+  \code{\link{dkernel}},
+  \code{\link{kernel.moment}},
+  \code{\link{kernel.squint}}
+}
+\examples{
+   kernel.factor("rect")
+   # bandwidth for Epanechnikov kernel with half-width h=1
+   h <- 1
+   bw <- h/kernel.factor("epa")
+}
+\author{\adrian
+  and Martin Hazelton
+}
+\keyword{methods}
+\keyword{nonparametric}
+\keyword{smooth}
diff --git a/man/kernel.moment.Rd b/man/kernel.moment.Rd
new file mode 100644
index 0000000..f8e3010
--- /dev/null
+++ b/man/kernel.moment.Rd
@@ -0,0 +1,66 @@
+\name{kernel.moment}
+\alias{kernel.moment}
+\title{Moment of Smoothing Kernel}
+\description{
+  Computes the complete or incomplete \eqn{m}th moment of a
+  smoothing kernel.
+}
+\usage{
+  kernel.moment(m, r, kernel = "gaussian")
+}
+\arguments{
+  \item{m}{
+    Exponent (order of moment).
+    An integer.
+  }
+  \item{r}{
+    Upper limit of integration for the incomplete moment.
+    A numeric value or numeric vector.
+    Set \code{r=Inf} to obtain the complete moment.
+  }
+  \item{kernel}{
+    String name of the kernel.
+    Options are
+    \code{"gaussian"}, \code{"rectangular"},
+    \code{"triangular"},
+    \code{"epanechnikov"},
+    \code{"biweight"},
+    \code{"cosine"} and \code{"optcosine"}.
+    (Partial matching is used).
+  }
+}
+\details{
+  Kernel estimation of a probability density in one dimension
+  is performed by \code{\link[stats]{density.default}}
+  using a kernel function selected from the list above.
+  For more information about these kernels,
+  see \code{\link[stats]{density.default}}. 
+
+  The function \code{kernel.moment} computes the partial integral 
+  \deqn{
+    \int_{-\infty}^r t^m k(t) dt
+  }{
+    integral[-Inf][r] t^m k(t) dt
+  }
+  where \eqn{k(t)} is the selected kernel, \eqn{r} is the upper limit of
+  integration, and \eqn{m} is the exponent or order.
+}
+\value{
+  A single number, or a numeric vector of the same length as \code{r}.
+}
+\seealso{
+  \code{\link[stats]{density.default}},
+  \code{\link{dkernel}},
+  \code{\link{kernel.factor}},
+}
+\examples{
+   kernel.moment(1, 0.1, "epa")
+   curve(kernel.moment(2, x, "epa"), from=-1, to=1)
+}
+\author{
+  \adrian
+  and Martin Hazelton.
+}
+\keyword{methods}
+\keyword{nonparametric}
+\keyword{smooth}
diff --git a/man/kernel.squint.Rd b/man/kernel.squint.Rd
new file mode 100644
index 0000000..8acfc73
--- /dev/null
+++ b/man/kernel.squint.Rd
@@ -0,0 +1,63 @@
+\name{kernel.squint}
+\alias{kernel.squint}
+\title{Integral of Squared Kernel} 
+\description{
+  Computes the integral of the squared kernel,
+  for the kernels used in density estimation
+  for numerical data.
+}
+\usage{
+  kernel.squint(kernel = "gaussian", bw=1)
+}
+\arguments{
+  \item{kernel}{
+    String name of the kernel.
+    Options are
+    \code{"gaussian"}, \code{"rectangular"},
+    \code{"triangular"},
+    \code{"epanechnikov"},
+    \code{"biweight"},
+    \code{"cosine"} and \code{"optcosine"}.
+    (Partial matching is used).
+  }
+  \item{bw}{
+    Bandwidth (standard deviation) of the kernel.
+  }
+}
+\details{
+  Kernel estimation of a probability density in one dimension
+  is performed by \code{\link[stats]{density.default}}
+  using a kernel function selected from the list above.
+
+  This function computes the integral of the squared kernel,
+  \deqn{
+    R = \int_{-\infty}^{\infty} k(x)^2 \, {\rm d}x
+  }{
+    R = integral of k(x)^2 dx from x = -infinity to x = +infinity
+  }
+  where \eqn{k(x)} is the kernel with bandwidth \code{bw}.
+}
+\value{
+  A single number.
+}
+\seealso{
+  \code{\link[stats]{density.default}},
+  \code{\link{dkernel}},
+  \code{\link{kernel.moment}},
+  \code{\link{kernel.factor}}
+}
+\examples{
+   kernel.squint("gaussian", 3)
+
+   # integral of squared Epanechnikov kernel with half-width h=1
+   h <- 1
+   bw <- h/kernel.factor("epa")
+   kernel.squint("epa", bw)
+}
+\author{
+  \spatstatAuthors
+  and Martin Hazelton
+}
+\keyword{methods}
+\keyword{nonparametric}
+\keyword{smooth}
diff --git a/man/km.rs.Rd b/man/km.rs.Rd
new file mode 100644
index 0000000..fbb85bb
--- /dev/null
+++ b/man/km.rs.Rd
@@ -0,0 +1,90 @@
+\name{km.rs}
+\alias{km.rs}
+\title{Kaplan-Meier and Reduced Sample Estimator using Histograms}
+\description{
+  Compute the Kaplan-Meier and Reduced Sample estimators of a
+  survival time distribution function, using histogram techniques
+}
+\usage{
+  km.rs(o, cc, d, breaks)
+}
+\arguments{
+  \item{o}{vector of observed survival times
+  }
+  \item{cc}{vector of censoring times
+  }
+  \item{d}{vector of non-censoring indicators
+  }
+  \item{breaks}{Vector of breakpoints to be used to form histograms.
+  }
+}
+\value{
+  A list with five elements
+  \item{rs}{Reduced-sample estimate of the survival time c.d.f. \eqn{F(t)}
+  }
+  \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)}
+  }
+  \item{hazard}{corresponding Nelson-Aalen estimate of the
+    hazard rate \eqn{\lambda(t)}{lambda(t)}
+  }
+  \item{r}{values of \eqn{t} for which \eqn{F(t)} is estimated
+  }
+  \item{breaks}{the breakpoints vector
+  }
+}
+\details{
+  This function is needed mainly for internal use in \pkg{spatstat},
+  but may be useful in other applications where you want to form the
+  Kaplan-Meier estimator from a huge dataset.
+
+  Suppose \eqn{T_i}{T[i]} are the survival times of individuals
+  \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)}
+  which we wish to estimate. Suppose these times are right-censored
+  by random censoring times \eqn{C_i}{C[i]}.
+  Thus the observations consist of right-censored survival times
+  \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])}
+  and non-censoring indicators
+  \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])}
+  for each \eqn{i}.
+
+  The arguments to this function are 
+  vectors \code{o}, \code{cc}, \code{d}
+  of observed values of \eqn{\tilde T_i}{T*[i]}, \eqn{C_i}{C[i]}
+  and \eqn{D_i}{D[i]} respectively.
+  The function computes histograms and forms the reduced-sample
+  and Kaplan-Meier estimates of  \eqn{F(t)} by
+  invoking the functions \code{\link{kaplan.meier}}
+  and \code{\link{reduced.sample}}.
+  This is efficient if the lengths of \code{o}, \code{cc}, \code{d}
+  (i.e. the number of observations) is large.
+
+  The vectors \code{km} and \code{hazard} returned by \code{kaplan.meier}
+  are (histogram approximations to) the Kaplan-Meier estimator
+  of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}.
+  Specifically, \code{km[k]} is an estimate of
+  \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of
+  the average of \eqn{\lambda(t)}{lambda(t)} over the interval
+  \code{(breaks[k],breaks[k+1])}. This approximation is exact only if the
+  survival times are discrete and the 
+  histogram breaks are fine enough to ensure that each interval
+  \code{(breaks[k],breaks[k+1])} contains only one possible value of
+  the survival time. 
+
+  The vector \code{rs} is the reduced-sample estimator,
+  \code{rs[k]} being the reduced sample estimate of \code{F(breaks[k+1])}.
+  This value is exact, i.e. the use of histograms does not introduce any
+  approximation error in the reduced-sample estimator.
+}
+\seealso{
+  \code{\link{reduced.sample}},
+  \code{\link{kaplan.meier}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+  }
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/kppm.Rd b/man/kppm.Rd
new file mode 100644
index 0000000..d043d45
--- /dev/null
+++ b/man/kppm.Rd
@@ -0,0 +1,424 @@
+\name{kppm}
+\alias{kppm}
+\alias{kppm.formula}
+\alias{kppm.ppp}
+\alias{kppm.quad}
+\concept{point process model}
+\concept{Cox point process}
+\concept{cluster process}
+\concept{Neyman-Scott cluster process}
+\title{Fit Cluster or Cox Point Process Model}
+\description{
+  Fit a homogeneous or inhomogeneous cluster process or
+  Cox point process model to a point pattern.
+}
+\usage{
+  kppm(X, \dots)
+
+  \method{kppm}{formula}(X,
+                clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"),
+                \dots,
+                data=NULL)
+
+  \method{kppm}{ppp}(X,
+       trend = ~1,
+       clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"),
+       data = NULL,
+       ...,
+       covariates=data,
+       subset,
+       method = c("mincon", "clik2", "palm"),
+       improve.type = c("none", "clik1", "wclik1", "quasi"),
+       improve.args = list(),
+       weightfun=NULL,
+       control=list(),
+       algorithm="Nelder-Mead",
+       statistic="K",
+       statargs=list(),
+       rmax = NULL,
+       covfunargs=NULL,
+       use.gam=FALSE,
+       nd=NULL, eps=NULL)
+
+\method{kppm}{quad}(X,
+       trend = ~1,
+       clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"),
+       data = NULL,
+       ...,
+       covariates=data,
+       subset,
+       method = c("mincon", "clik2", "palm"),
+       improve.type = c("none", "clik1", "wclik1", "quasi"),
+       improve.args = list(),
+       weightfun=NULL,
+       control=list(),
+       algorithm="Nelder-Mead",
+       statistic="K",
+       statargs=list(),
+       rmax = NULL,
+       covfunargs=NULL,
+       use.gam=FALSE,
+       nd=NULL, eps=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern dataset (object of class \code{"ppp"} or
+    \code{"quad"}) to which the model should be fitted, or a
+    \code{formula} in the \R language defining the model. See Details.
+  }
+  \item{trend}{
+    An \R formula, with no left hand side,
+    specifying the form of the log intensity.
+  }
+  \item{clusters}{
+    Character string determining the cluster model.
+    Partially matched.
+    Options are \code{"Thomas"}, \code{"MatClust"},
+    \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}.
+  }
+  \item{data,covariates}{
+    The values of spatial covariates (other than the Cartesian
+    coordinates) required by the model.
+    A named list of pixel images, functions, windows,
+    tessellations or numeric constants.
+  }
+  \item{\dots}{
+    Additional arguments. See Details.
+  }
+  \item{subset}{
+    Optional.
+    A subset of the spatial domain,
+    to which the model-fitting should be restricted.
+    A window (object of class \code{"owin"})
+    or a logical-valued pixel image (object of class \code{"im"}),
+    or an expression (possibly involving the names of entries in \code{data})
+    which can be evaluated to yield a window or pixel image.
+  }
+  \item{method}{
+    The fitting method. Either 
+    \code{"mincon"} for minimum contrast,
+    \code{"clik2"} for second order composite likelihood,
+    or \code{"palm"} for Palm likelihood.
+    Partially matched.
+  }
+  \item{improve.type}{
+    Method for updating the initial estimate of the trend.
+    Initially the trend is estimated as if the process
+    is an inhomogeneous Poisson process.
+    The default, \code{improve.type = "none"}, is to use this initial estimate.
+    Otherwise, the trend estimate is
+    updated by \code{\link{improve.kppm}}, using information
+    about the pair correlation function.
+    Options are \code{"clik1"}
+    (first order composite likelihood, essentially equivalent to \code{"none"}),
+    \code{"wclik1"} (weighted first order composite likelihood) and
+    \code{"quasi"} (quasi likelihood).
+  }
+  \item{improve.args}{
+    Additional arguments passed to \code{\link{improve.kppm}} when
+    \code{improve.type != "none"}. See Details.
+  }
+  \item{weightfun}{
+    Optional weighting function \eqn{w}
+    in the composite likelihood or Palm likelihood.
+    A \code{function} in the \R language.
+    See Details.
+  }
+  \item{control}{
+    List of control parameters passed to the optimization function
+    \code{\link[stats]{optim}}.
+  }
+  \item{algorithm}{
+    Character string determining the mathematical optimisation algorithm
+    to be used by \code{\link[stats]{optim}}. See
+    the argument \code{method} of \code{\link[stats]{optim}}.
+  }
+  \item{statistic}{
+    Name of the summary statistic to be used
+    for minimum contrast estimation: either \code{"K"} or \code{"pcf"}.
+  }
+  \item{statargs}{
+    Optional list of arguments to be used when calculating
+    the \code{statistic}. See Details.
+  }
+  \item{rmax}{
+    Maximum value of interpoint distance
+    to use in the composite likelihood.
+  }
+  \item{covfunargs,use.gam,nd,eps}{
+    Arguments passed to \code{\link{ppm}} when fitting the intensity.
+  }
+}
+\details{
+  This function fits a clustered point process model to the
+  point pattern dataset \code{X}. 
+
+  The model may be either a \emph{Neyman-Scott cluster process}
+  or another \emph{Cox process}.
+  The type of model is determined by the argument \code{clusters}.
+  Currently the options 
+  are \code{clusters="Thomas"} for the Thomas process,
+  \code{clusters="MatClust"} for the Matern cluster process,
+  \code{clusters="Cauchy"} for the Neyman-Scott cluster process
+  with Cauchy kernel,
+  \code{clusters="VarGamma"} for the Neyman-Scott cluster process
+  with Variance Gamma kernel (requires an additional argument \code{nu}
+  to be passed through the dots; see \code{\link{rVarGamma}} for details),
+  and \code{clusters="LGCP"} for the log-Gaussian Cox process (may
+  require additional arguments passed through \code{\dots}; see
+  \code{\link{rLGCP}} for details on argument names).
+  The first four models are Neyman-Scott cluster processes.
+  
+  The algorithm first estimates the intensity function
+  of the point process using \code{\link{ppm}}.
+  The argument \code{X} may be a point pattern
+  (object of class \code{"ppp"}) or a quadrature scheme
+  (object of class \code{"quad"}). The intensity is specified by
+  the \code{trend} argument.
+  If the trend formula is \code{~1} (the default)
+  then the model is \emph{homogeneous}. The algorithm begins by
+  estimating the intensity as the number of points divided by
+  the area of the window.
+  Otherwise, the model is \emph{inhomogeneous}. 
+  The algorithm begins by fitting a Poisson process with log intensity
+  of the form specified by the formula \code{trend}.
+  (See \code{\link{ppm}} for further explanation).
+
+  The argument \code{X} may also be a \code{formula} in the
+  \R language. The right hand side of the formula gives the
+  \code{trend} as described above. The left hand side of the formula
+  gives the point pattern dataset to which the model should be fitted.
+
+  If \code{improve.type="none"} this is the final estimate of the
+  intensity. Otherwise, the intensity estimate is updated, as explained in
+  \code{\link{improve.kppm}}. Additional arguments to
+  \code{\link{improve.kppm}} are passed as a named list in
+  \code{improve.args}.
+  
+  The clustering parameters of the model are then fitted
+  either by minimum contrast estimation, or by maximum
+  composite likelihood.
+
+  \describe{
+   \item{Minimum contrast:}{
+      If \code{method = "mincon"} (the default) clustering parameters of
+      the model will be fitted
+      by minimum contrast estimation, that is, by matching the theoretical
+      \eqn{K}-function of the model to the empirical \eqn{K}-function
+      of the data, as explained in \code{\link{mincontrast}}.
+
+      For a homogeneous model (\code{ trend = ~1 })
+      the empirical \eqn{K}-function of the data is computed
+      using \code{\link{Kest}},
+      and the parameters of the cluster model are estimated by
+      the method of minimum contrast.
+
+      For an inhomogeneous model, 
+      the inhomogeneous \eqn{K} function is estimated
+      by \code{\link{Kinhom}} using the fitted intensity.
+      Then the parameters of the cluster model
+      are estimated by the method of minimum contrast using the
+      inhomogeneous \eqn{K} function. This two-step estimation
+      procedure is due to Waagepetersen (2007).
+  
+      If \code{statistic="pcf"} then instead of using the
+      \eqn{K}-function, the algorithm will use
+      the pair correlation function \code{\link{pcf}} for homogeneous
+      models and the inhomogeneous pair correlation function
+      \code{\link{pcfinhom}} for inhomogeneous models.
+      In this case, the smoothing parameters of the pair correlation
+      can be controlled using the argument \code{statargs},
+      as shown in the Examples.
+
+      Additional arguments \code{\dots} will be passed to
+      \code{\link{mincontrast}} to control the minimum contrast fitting
+      algorithm.
+    }
+    \item{Composite likelihood:}{
+      If \code{method = "clik2"} the clustering parameters of the
+      model will be fitted by maximising the second-order composite likelihood
+      (Guan, 2006). The log composite likelihood is
+      \deqn{
+	\sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta)
+	- \left( \sum_{i,j} w(d_{ij}) \right)
+	\log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv
+      }{
+	sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta))
+	- (sum[i,j] w(d[i,j]))
+	log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv)
+      }
+      where the sums are taken over all pairs of data points
+      \eqn{x_i, x_j}{x[i], x[j]} separated by a distance
+      \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||}
+      less than \code{rmax},
+      and the double integral is taken over all pairs of locations
+      \eqn{u,v} in the spatial window of the data.
+      Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the
+      pair correlation function of the model with
+      cluster parameters \eqn{\theta}{theta}.
+      
+      The function \eqn{w} in the composite likelihood
+      is a weighting function and may be chosen arbitrarily.
+      It is specified by the argument \code{weightfun}.
+      If this is missing or \code{NULL} then the default is
+      a threshold weight function,
+      \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)}, where \eqn{R} is \code{rmax/2}.
+    }
+    \item{Palm likelihood:}{
+      If \code{method = "palm"} the clustering parameters of the
+      model will be fitted by maximising the Palm loglikelihood
+      (Tanaka et al, 2008)
+      \deqn{
+	\sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta)
+	- \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u
+      }{
+	sum[i,j] w(x[i], x[j]) log(lambdaP(x[j] | x[i]; theta)
+	- integral[D] w(x[i], u) lambdaP(u | x[i]; theta) du
+      }
+      with the same notation as above. Here
+      \eqn{\lambda_P(u|v;\theta}{lambdaP(u|v;theta)} is the Palm intensity of
+      the model at location \eqn{u} given there is a point at \eqn{v}.
+    }
+  }
+  In all three methods, the optimisation is performed by the generic
+  optimisation algorithm \code{\link[stats]{optim}}.
+  The behaviour of this algorithm can be modified using the
+  argument \code{control}.
+  Useful control arguments include
+  \code{trace}, \code{maxit} and \code{abstol}
+  (documented in the help for \code{\link[stats]{optim}}).
+
+  Fitting the LGCP model requires the \pkg{RandomFields} package,
+  except in the default case where the exponential covariance
+  is assumed.
+}
+\section{Log-Gaussian Cox Models}{
+  To fit a log-Gaussian Cox model with non-exponential covariance,
+  specify \code{clusters="LGCP"} and use additional arguments
+  to specify the covariance structure. These additional arguments can
+  be given individually in the call to \code{kppm}, or they can be
+  collected together in a list called \code{covmodel}.
+
+  For example a Matern model with parameter \eqn{\nu=0.5} could be specified
+  either by \code{kppm(X, clusters="LGCP", model="matern", nu=0.5)} or by
+  \code{kppm(X, clusters="LGCP", covmodel=list(model="matern", nu=0.5))}.
+
+  The argument \code{model} specifies the type of covariance
+  model: the default is \code{model="exp"} for an exponential covariance.
+  Alternatives include \code{"matern"}, \code{"cauchy"} and \code{"spheric"}.
+  Model names correspond to functions beginning with \code{RM} in the
+  \pkg{RandomFields} package: for example \code{model="matern"}
+  corresponds to the function \code{RMmatern} in the 
+  \pkg{RandomFields} package.
+  
+  Additional arguments are passed to the
+  relevant function in the \pkg{RandomFields} package:
+  for example if \code{model="matern"} then the additional argument
+  \code{nu} is required, and is passed to the function
+  \code{RMmatern} in the \pkg{RandomFields} package.
+
+  Note that it is not possible to use \emph{anisotropic} covariance models
+  because the \code{kppm} technique assumes the pair correlation function
+  is isotropic.
+}
+\value{
+  An object of class \code{"kppm"} representing the fitted model.
+  There are methods for printing, plotting, predicting, simulating
+  and updating objects of this class.
+}
+\section{Error and warning messages}{
+  See \code{\link{ppm.ppp}} for a list of common error messages
+  and warnings originating from the first stage of model-fitting.
+}
+\seealso{
+  Methods for \code{kppm} objects:
+  \code{\link{plot.kppm}},
+  \code{\link{fitted.kppm}},
+  \code{\link{predict.kppm}},
+  \code{\link{simulate.kppm}},
+  \code{\link{update.kppm}},
+  \code{\link{vcov.kppm}},
+  \code{\link[spatstat:methods.kppm]{methods.kppm}},
+  \code{\link{as.ppm.kppm}},
+  \code{\link{Kmodel.kppm}},
+  \code{\link{pcfmodel.kppm}}.
+
+  Minimum contrast fitting algorithm:
+  \code{\link{mincontrast}}.
+
+  Alternative fitting algorithms:
+  \code{\link{thomas.estK}},
+  \code{\link{matclust.estK}},
+  \code{\link{lgcp.estK}},
+  \code{\link{cauchy.estK}},
+  \code{\link{vargamma.estK}},
+  \code{\link{thomas.estpcf}},
+  \code{\link{matclust.estpcf}},
+  \code{\link{lgcp.estpcf}},
+  \code{\link{cauchy.estpcf}},
+  \code{\link{vargamma.estpcf}},
+
+  Summary statistics:
+  \code{\link{Kest}},
+  \code{\link{Kinhom}},
+  \code{\link{pcf}},
+  \code{\link{pcfinhom}}.
+
+  See also \code{\link{ppm}}
+}
+\references{
+  Guan, Y. (2006) 
+  A composite likelihood approach in fitting spatial point process
+  models.
+  \emph{Journal of the American Statistical Association}
+  \bold{101}, 1502--1512.
+
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2012)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119--137.
+
+  Tanaka, U. and Ogata, Y. and Stoyan, D. (2008)
+  Parameter estimation and model selection for
+  Neyman-Scott point processes. 
+  \emph{Biometrical Journal} \bold{50}, 43--57.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\examples{
+  # method for point patterns
+  kppm(redwood, ~1, "Thomas")
+  # method for formulas
+  kppm(redwood ~ 1, "Thomas")
+
+  kppm(redwood ~ 1, "Thomas", method="c")
+  kppm(redwood ~ 1, "Thomas", method="p")
+
+  kppm(redwood ~ x, "MatClust") 
+  kppm(redwood ~ x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) 
+  kppm(redwood ~ x, cluster="Cauchy", statistic="K")
+  kppm(redwood, cluster="VarGamma", nu = 0.5, statistic="pcf")
+
+  # LGCP models
+  kppm(redwood ~ 1, "LGCP", statistic="pcf")
+  if(require("RandomFields")) {
+    kppm(redwood ~ x, "LGCP", statistic="pcf",
+                              model="matern", nu=0.3,
+                              control=list(maxit=10))
+  }
+
+  # fit with composite likelihood method
+  kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8)
+
+  # fit intensity with quasi-likelihood method
+  kppm(redwood ~ x, "Thomas", improve.type = "quasi")
+}
+\author{
+  \spatstatAuthors,
+  with contributions from Abdollah Jalilian and Rasmus Waagepetersen.
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/lansing.Rd b/man/lansing.Rd
new file mode 100644
index 0000000..ea3d23c
--- /dev/null
+++ b/man/lansing.Rd
@@ -0,0 +1,106 @@
+\name{lansing}
+\alias{lansing}
+\docType{data}
+\title{
+  Lansing Woods Point Pattern
+}
+\description{
+  Locations and botanical classification of trees in Lansing Woods.
+
+  The data come from an investigation of a 924 ft x 924 ft (19.6 acre)
+  plot in Lansing Woods, Clinton County, Michigan USA
+  by D.J. Gerrard. The data give the locations of 2251 trees and 
+  their botanical classification (into hickories, maples, red oaks,
+  white oaks, black oaks and miscellaneous trees).
+  The original plot size (924 x 924 feet)
+  has been rescaled to the unit square.
+  
+  Note that the data contain duplicated points (two points at the
+  same location). To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of tree \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of tree \cr
+    \code{marks} \tab factor with levels indicating species of
+      each tree
+  }
+  The levels of \code{marks} are
+  \code{blackoak},
+  \code{hickory},
+  \code{maple},
+  \code{misc},
+  \code{redoak} and
+  \code{whiteoak}.
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(lansing)}
+\examples{
+     data(lansing)
+     plot(lansing)
+     summary(lansing)
+     plot(split(lansing))
+     plot(split(lansing)$maple)
+}
+\references{
+Besag, J. (1978)
+Some methods of statistical analysis for spatial data.
+\emph{Bull. Internat. Statist. Inst.} \bold{44}, 77--92.
+
+Cox, T.F. (1976)
+The robust estimation of the density of a forest
+stand using a new conditioned distance method.
+\emph{Biometrika} \bold{63}, 493--500.
+
+Cox, T.F. (1979)
+A method for mapping the dense and sparse regions of
+a forest stand.
+\emph{Applied Statistics} \bold{28}, 14--19.
+
+Cox, T.F. and Lewis, T. (1976)
+A conditioned distance ratio method for analysing spatial patterns.
+\emph{Biometrika} \bold{63}, 483--492.
+
+Diggle, P.J. (1979a)
+The detection of random heterogeneity in plant populations.
+\emph{Biometrics} \bold{33}, 390--394.
+
+Diggle, P.J. (1979b)
+Statistical methods for spatial point patterns in ecology.
+\emph{Spatial and temporal analysis in ecology}.
+R.M. Cormack and J.K. Ord (eds.)
+Fairland: International Co-operative Publishing House.
+pages 95--150.
+
+Diggle, P.J. (1981)
+Some graphical methods in the analysis of spatial point patterns.
+In \emph{Interpreting Multivariate Data}.
+V. Barnett (eds.) John Wiley and Sons. Pages 55--73.
+
+Diggle, P.J. (1983)
+\emph{Statistical analysis of spatial point patterns}.
+Academic Press.
+
+Gerrard, D.J. (1969)
+Competition quotient: a new measure of the competition
+affecting individual forest trees.
+Research Bulletin 20, Agricultural Experiment Station,
+Michigan State University.
+
+Lotwick, H.W. (1981)
+\emph{Spatial stochastic point processes}.
+PhD thesis, University of Bath, UK.
+
+Ord, J.K. (1978) 
+How many trees in a forest?
+\emph{Mathematical Scientist} \bold{3}, 23--33.
+
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/laslett.Rd b/man/laslett.Rd
new file mode 100644
index 0000000..0748647
--- /dev/null
+++ b/man/laslett.Rd
@@ -0,0 +1,152 @@
+\name{laslett}
+\alias{laslett}
+\title{
+  Laslett's Transform
+}
+\description{
+  Apply Laslett's Transform to a spatial region,
+  returning the original and transformed regions,
+  and the original and transformed positions of the lower tangent points.
+  This is a diagnostic for the Boolean model.
+}
+\usage{
+laslett(X, \dots, verbose = FALSE, plotit = TRUE, discretise = FALSE,
+        type=c("lower", "upper", "left", "right"))
+}
+\arguments{
+  \item{X}{
+    Spatial region to be transformed.
+    A window (object of class \code{"owin"}) or a logical-valued pixel
+    image (object of class \code{"im"}).
+  }
+  \item{\dots}{
+    Graphics arguments to control the plot (passed to
+    \code{\link{plot.laslett}} when \code{plotit=TRUE})
+    or arguments determining the pixel resolution
+    (passed to \code{\link{as.mask}}).
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{plotit}{
+    Logical value indicating whether to plot the result.
+  }
+  \item{discretise}{
+    Logical value indicating whether polygonal windows should first be
+    converted to pixel masks before the Laslett transform is
+    computed. This should be set to \code{TRUE} for very complicated
+    polygons. 
+  }
+  \item{type}{
+    Type of tangent points to be detected.
+    This also determines the direction of contraction in the
+    set transformation.
+    Default is \code{type="lower"}.
+  }
+}
+\details{
+  This function finds the lower tangent points of the spatial region \code{X},
+  then applies Laslett's Transform to the space,
+  and records the transformed positions of the lower tangent points.
+
+  Laslett's transform is a diagnostic for the Boolean Model.
+  A test of the Boolean model can be performed by applying a test of CSR
+  to the transformed tangent points. See the Examples.
+
+  The rationale is that, if the region \code{X} was generated by a
+  Boolean model with convex grains, then the lower tangent points of
+  \code{X}, when subjected to Laslett's transform,
+  become a Poisson point process (Cressie, 1993, section 9.3.5;
+  Molchanov, 1997; Barbour and Schmidt, 2001).
+
+  Intuitively, Laslett's transform is a way to account for the fact that
+  tangent points of \code{X} cannot occur \emph{inside} \code{X}.
+  It treats the interior of \code{X} as empty space, and collapses
+  this empty space so that only the \emph{exterior} of \code{X} remains.
+  In this collapsed space, the tangent points are completely random.
+
+  Formally, Laslett's transform is a random (i.e. data-dependent)
+  spatial transformation which maps each spatial
+  location \eqn{(x,y)} to a new location \eqn{(x',y)} at the same height
+  \eqn{y}. The transformation is defined so that \eqn{x'}
+  is the total \emph{uncovered} length of the line segment from \eqn{(0,y)} to
+  \eqn{(x,y)}, that is, the total length of the parts of this segment that
+  fall outside the region \code{X}.
+
+  In more colourful terms, suppose we use an abacus to display a
+  pixellated version of \code{X}. Each wire of the abacus represents one
+  horizontal line in the pixel image. Each pixel lying \emph{outside}
+  the region \code{X} is represented by a bead of the abacus; pixels
+  \emph{inside} \code{X} are represented by the absence of a bead. Next
+  we find any beads which are lower tangent points of \code{X}, and
+  paint them green. Then Laslett's Transform is applied by pushing all
+  beads to the left, as far as possible. The final locations of all the
+  beads provide a new spatial region, inside which is the point pattern
+  of tangent points (marked by the green-painted beads). 
+
+  If \code{plotit=TRUE} (the default), a before-and-after plot is
+  generated, showing the region \code{X} and the tangent points
+  before and after the transformation. This plot can also be generated
+  by calling \code{plot(a)} where \code{a} is the object returned by
+  the function \code{laslett}.
+
+  If the argument \code{type} is given, then this determines the
+  type of tangents that will be detected, and also the direction of
+  contraction in Laslett's transform. The computation is performed
+  by first rotating \code{X}, applying Laslett's transform for lower
+  tangent points, then rotating back.
+
+  There are separate algorithms for polygonal windows and
+  pixellated windows (binary masks). The polygonal algorithm may be slow
+  for very complicated polygons. If this happens, setting
+  \code{discretise=TRUE} will convert the polygonal window to a binary
+  mask and invoke the pixel raster algorithm.
+}
+\value{
+  A list, which also belongs to the class \code{"laslett"}
+  so that it can immediately be printed and plotted.
+  
+  The list elements are:
+  \describe{
+    \item{oldX:}{the original dataset \code{X};}
+    \item{TanOld:}{a point pattern, whose window is \code{Frame(X)},
+      containing the lower tangent points of \code{X};}
+    \item{TanNew:}{a point pattern, whose window is the Laslett transform
+      of \code{Frame(X)}, and which contains the Laslett-transformed
+      positions of the tangent points;}
+    \item{Rect:}{a rectangular window, which is the largest rectangle
+      lying inside the transformed set;}
+    \item{df:}{a data frame giving the locations of the tangent points
+      before and after transformation. }
+    \item{type:}{character string specifying the type of tangents.}
+  }
+}
+\references{
+  Barbour, A.D. and Schmidt, V. (2001)
+  On Laslett's Transform for the Boolean Model.
+  \emph{Advances in Applied Probability} 
+  \bold{33}(1), 1--5.
+
+  Cressie, N.A.C. (1993)
+  \emph{Statistics for spatial data}, second edition.
+  John Wiley and Sons.
+
+  Molchanov, I. (1997)
+  \emph{Statistics of the Boolean Model for Practitioners and Mathematicians}.
+  Wiley.
+}
+\author{
+  Kassel Hingee and \adrian.
+}
+\seealso{
+  \code{\link{plot.laslett}}
+}
+\examples{
+a <- laslett(heather$coarse)
+with(a, clarkevans.test(TanNew[Rect], correction="D", nsim=39))
+X <- discs(runifpoint(15) \%mark\% 0.2, npoly=16)
+b <- laslett(X)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/latest.news.Rd b/man/latest.news.Rd
new file mode 100644
index 0000000..af64eae
--- /dev/null
+++ b/man/latest.news.Rd
@@ -0,0 +1,58 @@
+\name{latest.news}
+\alias{latest.news}
+\title{
+  Print News About Latest Version of Package
+}
+\description{
+  Prints the news documentation for the current version of \code{spatstat}
+  or another specified package.
+}
+\usage{
+latest.news(package = "spatstat", doBrowse=FALSE)
+}
+\arguments{
+  \item{package}{
+    Name of package for which the latest news should be printed.
+  }
+  \item{doBrowse}{
+    Logical value indicating whether to display the results in a browser
+    window instead of printing them.
+  }
+}
+\details{
+  By default, this function prints the news documentation about changes in the
+  current installed version of the \pkg{spatstat} package.
+  The function can be called simply by typing its
+  name without parentheses (see the Examples).
+
+  If \code{package} is given, then the function reads the
+  news for the specified package from its \code{NEWS} file (if it has one) 
+  and prints only the entries that refer to the current version
+  of the package.
+
+  To see the news for all previous versions as well as the current
+  version, use the \R utility \code{\link[utils]{news}}. See the Examples.
+}
+\value{
+  Null.
+}
+\author{
+  \adrian
+  and \rolf
+}
+\seealso{
+  \code{\link[utils]{news}},
+  \code{\link{bugfixes}}
+}
+\examples{
+  if(interactive()) {
+
+    # current news
+    latest.news
+
+    # all news
+    news(package="spatstat")
+
+  }
+}
+\keyword{documentation}
diff --git a/man/layered.Rd b/man/layered.Rd
new file mode 100644
index 0000000..ebf47a5
--- /dev/null
+++ b/man/layered.Rd
@@ -0,0 +1,92 @@
+\name{layered}
+\alias{layered}
+\title{
+  Create List of Plotting Layers
+}
+\description{
+  Given several objects which are capable of being plotted,
+  create a list containing these objects as if they were
+  successive layers of a plot. The list can then be plotted
+  in different ways.
+}
+\usage{
+layered(..., plotargs = NULL, LayerList=NULL)
+}
+\arguments{
+  \item{\dots}{
+    Objects which can be plotted by \code{plot}.
+  }
+  \item{plotargs}{
+    Default values of the plotting arguments for each of the objects.
+    A list of lists of arguments of the form \code{name=value}.
+  }
+  \item{LayerList}{
+    A list of objects.
+    Incompatible with \code{\dots}.
+  }
+}
+\details{
+  Layering is a simple mechanism for controlling
+  a high-level plot that is composed of
+  several successive plots, for example, a background and a foreground
+  plot. The layering mechanism makes it easier to issue the plot command,
+  to switch on or off the plotting of each individual layer,
+  to control the plotting arguments that are passed to each layer,
+  and to zoom in.
+  
+  Each individual layer in the plot should be saved as an object
+  that can be plotted using \code{plot}. It will typically belong to
+  some class, which has a method for the generic function \code{plot}.
+
+  The command \code{layered} simply saves the objects \code{\dots}
+  as a list of class \code{"layered"}. This list can then be plotted by
+  the method \code{\link{plot.layered}}. Thus, you only need to
+  type a single \code{plot} command to produce the multi-layered plot.
+  Individual layers of the plot can be switched on or off, or
+  manipulated, using arguments to \code{\link{plot.layered}}.
+
+  The argument \code{plotargs} contains default values of the
+  plotting arguments for each layer. It should be a list, with one
+  entry for each object in \code{\dots}. Each entry of  \code{plotargs}
+  should be a list of arguments in the form \code{name=value}, which are
+  recognised by the \code{plot} method for the relevant layer.
+
+  The \code{plotargs} can also include an argument named \code{.plot}
+  specifying (the name of) a function to perform the plotting
+  instead of the generic \code{plot}.
+
+  The length of \code{plotargs} should either be equal to the
+  number of layers, or equal to 1. In the latter case it will be
+  replicated to the appropriate length.
+}
+\value{
+  A list, belonging to the class \code{"layered"}.
+  There are methods for \code{plot}, \code{"["},
+  \code{"shift"}, \code{"affine"}, \code{"rotate"} and \code{"rescale"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{plot.layered}},
+  \code{\link{methods.layered}},
+  \code{\link{as.layered}},
+  \code{\link{[.layered}},
+  \code{\link{layerplotargs}}.
+}
+\examples{
+   D <- distmap(cells)
+   L <- layered(D, cells)
+   L
+   L <- layered(D, cells,
+    plotargs=list(list(ribbon=FALSE), list(pch=16)))
+   plot(L)
+
+   layerplotargs(L)[[1]] <- list(.plot="contour")
+   plot(L)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/layerplotargs.Rd b/man/layerplotargs.Rd
new file mode 100644
index 0000000..3983d73
--- /dev/null
+++ b/man/layerplotargs.Rd
@@ -0,0 +1,69 @@
+\name{layerplotargs}
+\alias{layerplotargs}
+\alias{layerplotargs<-}
+\title{
+  Extract or Replace the Plot Arguments of a Layered Object
+}
+\description{
+  Extracts or replaces the plot arguments of a layered object.
+}
+\usage{
+layerplotargs(L)
+
+layerplotargs(L) <- value
+}
+\arguments{
+  \item{L}{
+    An object of class \code{"layered"}
+    created by the function \code{\link{layered}}.
+  }
+  \item{value}{
+    Replacement value. 
+    A list, with the same length as \code{L},
+    whose elements are lists of plot arguments.
+  }
+}
+\details{
+  These commands extract or replace the \code{plotargs}
+  in a layered object. See \code{\link{layered}}.
+
+  The replacement \code{value} should normally have the same
+  length as the current value. However, it can also be a list with
+  \emph{one} element which is a list of parameters. This will be
+  replicated to the required length.
+
+  For the assignment function \code{layerplotargs<-},
+  the argument \code{L} can be any spatial object; it will be converted
+  to a \code{layered} object with a single layer.
+}
+\value{
+  \code{layerplotargs} returns a list of lists of plot arguments.
+
+  \code{"layerplotargs<-"} returns the updated object 
+  of class \code{"layered"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{layered}},
+  \code{\link{methods.layered}},
+  \code{\link{[.layered}}.
+}
+\examples{
+   W <- square(2)
+   L <- layered(W=W, X=cells)
+   ## The following are equivalent
+   layerplotargs(L) <- list(list(), list(pch=16))
+   layerplotargs(L)[[2]] <- list(pch=16)
+   layerplotargs(L)$X <- list(pch=16)
+
+   ## The following are equivalent
+   layerplotargs(L) <- list(list(cex=2), list(cex=2))
+   layerplotargs(L) <- list(list(cex=2))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/layout.boxes.Rd b/man/layout.boxes.Rd
new file mode 100644
index 0000000..e5fcd7d
--- /dev/null
+++ b/man/layout.boxes.Rd
@@ -0,0 +1,60 @@
+\name{layout.boxes}
+\alias{layout.boxes}
+\title{
+  Generate a Row or Column Arrangement of Rectangles.
+}
+\description{
+  A simple utility to generate a row or column of boxes (rectangles)
+  for use in point-and-click panels.
+}
+\usage{
+layout.boxes(B, n, horizontal = FALSE, aspect = 0.5, usefrac = 0.9)
+}
+\arguments{
+  \item{B}{
+    Bounding rectangle for the boxes.
+    An object of class \code{"owin"}.
+  }
+  \item{n}{
+    Integer. The number of boxes.
+  }
+  \item{horizontal}{
+    Logical. If \code{TRUE}, arrange the boxes in a horizontal row.
+    If \code{FALSE} (the default), arrange them in a vertical column.
+  }
+  \item{aspect}{
+    Aspect ratio (height/width) of each box.
+  }
+  \item{usefrac}{
+    Number between 0 and 1. The 
+    fraction of height or width of \code{B} that should be
+    occupied by boxes.
+  }
+}
+\details{
+  This simple utility generates a list of boxes (rectangles)
+  inside the bounding box \code{B} arranged in a regular
+  row or column. It is useful for generating the
+  positions of the panel buttons in the function \code{\link{simplepanel}}.
+}
+\value{
+  A list of rectangles.
+}
+\examples{
+  B <- owin(c(0,10),c(0,1))
+  boxes <- layout.boxes(B, 5, horizontal=TRUE)
+  plot(B, main="", col="blue")
+  niets <- lapply(boxes, plot, add=TRUE, col="grey")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{simplepanel}}
+}
+\keyword{utilities}
+
+
diff --git a/man/lengths.psp.Rd b/man/lengths.psp.Rd
new file mode 100644
index 0000000..fd4d18c
--- /dev/null
+++ b/man/lengths.psp.Rd
@@ -0,0 +1,51 @@
+\name{lengths.psp}
+\alias{lengths.psp}
+\title{Lengths of Line Segments}
+\description{
+  Computes the length of each line segment
+  in a line segment pattern.
+}
+\usage{
+  lengths.psp(x, squared=FALSE)
+}
+\arguments{
+  \item{x}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{squared}{
+    Logical value indicating whether to return
+    the squared lengths (\code{squared=TRUE})
+    or the lengths themselves (\code{squared=FALSE}, the default).
+  }
+}
+\value{
+  Numeric vector.
+}
+\details{
+  The length of each line segment is computed
+  and the lengths are returned as a numeric vector.
+
+  Using squared lengths may be more efficient for some purposes,
+  for example, to find the length of the shortest segment,
+  \code{sqrt(min(lengths.psp(x, squared=TRUE)))}
+  is faster than \code{min(lengths.psp(x))}.
+}
+\seealso{
+  \code{\link{summary.psp}},
+  \code{\link{midpoints.psp}},
+  \code{\link{angles.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  b <- lengths.psp(a)   
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/letterR.Rd b/man/letterR.Rd
new file mode 100644
index 0000000..d69cfba
--- /dev/null
+++ b/man/letterR.Rd
@@ -0,0 +1,20 @@
+\name{letterR}
+\alias{letterR}
+\docType{data}
+\title{Window in Shape of Letter R}
+\description{
+  A window in the shape of the capital letter R,
+  for use in demonstrations.
+} 
+\format{
+  An object of class \code{"owin"}
+  representing the capital letter R,
+  in the same font as the R package logo.
+  See \code{\link{owin.object}} for details of the format.
+}
+\usage{
+ data(letterR)
+}
+\source{\adrian}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/levelset.Rd b/man/levelset.Rd
new file mode 100644
index 0000000..bfa64b1
--- /dev/null
+++ b/man/levelset.Rd
@@ -0,0 +1,70 @@
+\name{levelset}
+\alias{levelset}
+\title{Level Set of a Pixel Image}
+\description{
+  Given a pixel image, find all pixels which have values less than a
+  specified threshold value (or greater than a threshold, etc),
+  and assemble these pixels into a window.
+}
+\usage{
+  levelset(X, thresh, compare="<=")
+}
+\arguments{
+  \item{X}{A pixel image (object of class "im")}.
+  \item{thresh}{Threshold value.
+    A single number or value compatible with the pixel values in
+    \code{X}}.
+  \item{compare}{Character string specifying one of the comparison
+    operators \code{"<", ">", "==", "<=", ">=", "!="}. 
+  }
+}
+\details{
+  If \code{X} is a pixel image with numeric values,
+  then \code{levelset(X, thresh)} finds the region of space
+  where the pixel values are less than or equal to
+  the threshold value \code{thresh}.
+  This region is returned as a spatial window.
+
+  The argument \code{compare} specifies how the pixel values should be
+  compared with the threshold value.
+  Instead of requiring pixel values to be less than or equal to
+  \code{thresh}, you can specify that they must be less than (\code{<}),
+  greater than (\code{>}), equal to (\code{==}), greater than or equal
+  to (\code{>=}), or not equal to (\code{!=}) the threshold value
+  \code{thresh}.
+
+  If \code{X} has non-numeric pixel values (for example, logical
+  or factor values) it is advisable to use only the comparisons
+  \code{==} and \code{!=}, unless you really know what you are doing.
+
+  For more complicated logical comparisons, see \code{\link{solutionset}}.
+}
+\value{
+  A spatial window (object of class \code{"owin"},
+  see \code{\link{owin.object}}) containing the pixels satisfying the
+  constraint.
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{as.owin}},
+  \code{\link{solutionset}}.
+}
+\examples{
+  # test image
+  X <- as.im(function(x,y) { x^2 - y^2 }, unit.square())
+
+  W <- levelset(X, 0.2)
+  W <- levelset(X, -0.3, ">")
+
+  # compute area of level set
+  area(levelset(X, 0.1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{programming}
+\keyword{manip}
diff --git a/man/leverage.ppm.Rd b/man/leverage.ppm.Rd
new file mode 100644
index 0000000..e6f7d16
--- /dev/null
+++ b/man/leverage.ppm.Rd
@@ -0,0 +1,102 @@
+\name{leverage.ppm}
+\alias{leverage}
+\alias{leverage.ppm}
+\title{
+  Leverage Measure for Spatial Point Process Model
+}
+\description{
+  Computes the leverage measure for a fitted spatial point process model.
+}
+\usage{
+leverage(model, ...)
+
+\method{leverage}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{drop}{
+    Logical. Whether to include (\code{drop=FALSE}) or
+    exclude (\code{drop=TRUE}) contributions from quadrature
+    points that were not used to fit the model.
+  }
+  \item{iScore,iHessian}{
+    Components of the score vector and Hessian matrix for
+    the irregular parameters, if required. See Details.
+  }
+  \item{iArgs}{
+    List of extra arguments for the functions \code{iScore},
+    \code{iHessian} if required.
+  }
+}
+\details{
+  The function \code{leverage} is generic, and
+  \code{leverage.ppm} is the method for objects of class \code{"ppm"}.
+  
+  Given a fitted spatial point process model \code{model},
+  the function \code{leverage.ppm} computes the leverage of the model,
+  described in Baddeley, Chang and Song (2013).
+  
+  The leverage of a spatial point process model
+  is a function of spatial location, and is typically
+  displayed as a colour pixel image. 
+  The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the
+  change in the fitted trend of the fitted point process model that would have
+  occurred if a data point were to have occurred at the location \eqn{u}. 
+  A relatively large value of \eqn{h()} indicates a 
+  part of the space where the data have a \emph{potentially}
+  strong effect on the fitted model (specifically, a strong effect
+  on the intensity or trend of the fitted model) due to the values
+  of the covariates. 
+  
+  If the point process model trend has irregular parameters that were
+  fitted (using \code{\link{ippm}})
+  then the leverage calculation requires the first and second
+  derivatives of the log trend with respect to the irregular parameters. 
+  The argument \code{iScore} should be a list,
+  with one entry for each irregular parameter, of \R functions that compute the
+  partial derivatives of the log trend (i.e. log intensity or
+  log conditional intensity) with respect to each irregular
+  parameter. The argument \code{iHessian} should be a list,
+  with \eqn{p^2} entries where \eqn{p} is the number of irregular
+  parameters, of \R functions that compute the second order
+  partial derivatives of the log trend with respect to each
+  pair of irregular parameters.  
+
+  The result of \code{leverage.ppm} is an object of
+  class \code{"leverage.ppm"}. It can be plotted
+  (by \code{\link{plot.leverage.ppm}}) or converted to a pixel
+  image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}).
+}
+\value{
+  An object of class \code{"leverage.ppm"} that can be plotted
+  (by \code{\link{plot.leverage.ppm}}). There are also methods
+  for \code{persp}, \code{print}, \code{[}, \code{as.im}, \code{as.function}
+  and \code{as.owin}.
+}
+\references{
+  Baddeley, A., Chang, Y.M. and Song, Y. (2013)
+  Leverage and influence diagnostics for spatial point process models.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{influence.ppm}},
+  \code{\link{dfbetas.ppm}},
+  \code{\link{ppmInfluence}},
+  \code{\link{plot.leverage.ppm}}
+  \code{\link{as.function.leverage.ppm}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X ~x+y)
+   plot(leverage(fit))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/lgcp.estK.Rd b/man/lgcp.estK.Rd
new file mode 100644
index 0000000..cc245eb
--- /dev/null
+++ b/man/lgcp.estK.Rd
@@ -0,0 +1,245 @@
+\name{lgcp.estK}
+\alias{lgcp.estK}
+\title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast}
+\description{
+  Fits a log-Gaussian Cox point process model
+  to a point pattern dataset by the Method of Minimum Contrast.
+}
+\usage{
+lgcp.estK(X, startpar=c(var=1,scale=1),
+             covmodel=list(model="exponential"),
+             lambda=NULL,
+             q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    log-Gaussian Cox process model.
+  }
+  \item{covmodel}{
+    Specification of the covariance model
+    for the log-Gaussian field. See Details.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+}
+\details{
+  This algorithm fits a log-Gaussian Cox point process (LGCP) model
+  to a point pattern dataset by the Method of Minimum Contrast,
+  using the K function of the point pattern.
+
+  The shape of the covariance of the LGCP must be specified:
+  the default is the exponential covariance function,
+  but other covariance models can be selected.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The \eqn{K} function of the point pattern will be computed
+      using \code{\link{Kest}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the \eqn{K} function,
+      and this object should have been obtained by a call to
+      \code{\link{Kest}} or one of its relatives.
+    }
+  }
+  The algorithm fits a log-Gaussian Cox point process (LGCP)
+  model to \code{X},  by finding the parameters of the LGCP model
+  which give the closest match between the
+  theoretical \eqn{K} function of the LGCP model
+  and the observed \eqn{K} function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+
+  The model fitted is a stationary, isotropic log-Gaussian Cox process
+  (\ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen, 2003, pp. 72-76).
+  To define this process we start with
+  a stationary Gaussian random field \eqn{Z} in the two-dimensional plane,
+  with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}.
+  Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity
+  function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at
+  location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process.
+
+  The \eqn{K}-function of the LGCP is
+  \deqn{
+    K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s.
+  }{
+    K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds.
+  }
+  The intensity of the LGCP is 
+  \deqn{
+    \lambda = \exp(\mu + \frac{C(0)}{2}).
+  }{
+    lambda= exp(mu + C(0)/2).
+  }
+  
+  The covariance function \eqn{C(r)} is parametrised in the form
+  \deqn{
+    C(r) = \sigma^2 c(r/\alpha)
+  }{
+    C(r) = sigma^2 * c(-r/alpha)
+  }
+  where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters
+  controlling the strength and the scale of autocorrelation,
+  respectively, and \eqn{c(r)} is a known covariance function
+  determining the shape of the covariance. 
+  The strength and scale parameters
+  \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha}
+  will be estimated by the algorithm as the values
+  \code{var} and \code{scale} respectively.
+  The template covariance function \eqn{c(r)} must be specified
+  as explained below.
+  
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\sigma^2}{sigma^2}
+  and \eqn{\alpha}{alpha^2}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  The template covariance function \eqn{c(r)} is specified
+  using the argument \code{covmodel}. This should be of the form
+  \code{list(model="modelname", \dots)} where
+  \code{modelname} is a string identifying the template model
+  as explained below, and  \code{\dots} are optional arguments of the
+  form \code{tag=value} giving the values of parameters controlling the
+  \emph{shape} of the template model.
+  The default is the exponential covariance
+  \eqn{c(r) = e^{-r}}{c(r) = e^(-r)}
+  so that the scaled covariance is 
+  \deqn{
+    C(r) = \sigma^2 e^{-r/\alpha}.
+  }{
+    C(r) = sigma^2 * exp(-r/alpha).
+  }
+  To determine the template model, the string \code{"modelname"} will be
+  prefixed by \code{"RM"} and the code will search for
+  a function of this name in the \pkg{RandomFields} package.
+  For a list of available models see 
+  \code{\link[RandomFields]{RMmodel}} in the
+  \pkg{RandomFields} package. For example the
+  Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified
+  by \code{covmodel=list(model="matern", nu=0.3)} corresponding
+  to the function \code{RMmatern} in the \pkg{RandomFields} package.
+  
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\note{
+  This function is considerably slower than \code{\link{lgcp.estpcf}}
+  because of the computation time required for the integral
+  in the \eqn{K}-function.
+
+  Computation can be accelerated, at the cost of less accurate results,
+  by setting \code{spatstat.options(fastK.lgcp=TRUE)}. 
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J, Syversveen, A. and Waagepetersen, R. (1998)
+  Log Gaussian Cox Processes.
+  \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482.
+  
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{
+  Rasmus Waagepetersen
+  \email{rw at math.auc.dk}.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+  Further modifications by Rasmus Waagepetersen
+  and Shen Guochun, and by \ege.
+}
+\seealso{
+  \code{\link{lgcp.estpcf}} for alternative method of fitting LGCP.
+  
+  \code{\link{matclust.estK}},
+  \code{\link{thomas.estK}} for other models.
+  
+  \code{\link{mincontrast}} for the generic minimum contrast
+  fitting algorithm, including important parameters that affect
+  the accuracy of the fit.
+  
+  \code{\link[RandomFields]{RMmodel}} in the
+  \pkg{RandomFields} package, for covariance function models.
+  
+  \code{\link{Kest}} for the \eqn{K} function.
+}
+\examples{
+    if(interactive()) {
+      u <- lgcp.estK(redwood)
+    } else {
+      # slightly faster - better starting point
+      u <- lgcp.estK(redwood, c(var=1, scale=0.1))
+    }
+    u
+    plot(u)
+
+    \testonly{
+      if(require(RandomFields)) {
+        K <- Kest(redwood, r=seq(0, 0.1, length=9))
+        op <- spatstat.options(fastK.lgcp=TRUE)
+        lgcp.estK(K, covmodel=list(model="matern", nu=0.3),
+                  control=list(maxit=2))
+        spatstat.options(op)
+      }
+    }
+    if(FALSE) {
+      ## takes several minutes!
+      lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3))
+    }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/lgcp.estpcf.Rd b/man/lgcp.estpcf.Rd
new file mode 100644
index 0000000..09aac5f
--- /dev/null
+++ b/man/lgcp.estpcf.Rd
@@ -0,0 +1,227 @@
+\name{lgcp.estpcf}
+\alias{lgcp.estpcf}
+\title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast}
+\description{
+  Fits a log-Gaussian Cox point process model
+  to a point pattern dataset by the Method of Minimum Contrast
+  using the pair correlation function.
+}
+\usage{
+lgcp.estpcf(X,
+            startpar=c(var=1,scale=1),
+            covmodel=list(model="exponential"),
+            lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list())
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    log-Gaussian Cox process model.
+  }
+  \item{covmodel}{
+    Specification of the covariance model
+    for the log-Gaussian field. See Details.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+  \item{pcfargs}{
+    Optional list containing arguments passed to \code{\link{pcf.ppp}}
+    to control the smoothing in the estimation of the
+    pair correlation function.
+  }
+}
+\details{
+  This algorithm fits a log-Gaussian Cox point process (LGCP) model
+  to a point pattern dataset by the Method of Minimum Contrast,
+  using the estimated pair correlation function of the point pattern.
+
+  The shape of the covariance of the LGCP must be specified:
+  the default is the exponential covariance function,
+  but other covariance models can be selected.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The pair correlation function of the point pattern will be computed
+      using \code{\link{pcf}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the pair correlation function,
+      and this object should have been obtained by a call to
+      \code{\link{pcf}} or one of its relatives.
+    }
+  }
+  The algorithm fits a log-Gaussian Cox point process (LGCP)
+  model to \code{X},  by finding the parameters of the LGCP model
+  which give the closest match between the
+  theoretical pair correlation function of the LGCP model
+  and the observed pair correlation function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+
+  The model fitted is a stationary, isotropic log-Gaussian Cox process
+  (\ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen, 2003, pp. 72-76).
+  To define this process we start with
+  a stationary Gaussian random field \eqn{Z} in the two-dimensional plane,
+  with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}.
+  Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity
+  function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at
+  location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process.
+
+  The theoretical pair correlation function of the LGCP is
+  \deqn{
+    g(r) = \exp(C(s))
+  }{
+    g(r) = exp(C(s))
+  }
+  The intensity of the LGCP is 
+  \deqn{
+    \lambda = \exp(\mu + \frac{C(0)}{2}).
+  }{
+    lambda= exp(mu + C(0)/2).
+  }
+  
+  The covariance function \eqn{C(r)} takes the form
+  \deqn{
+    C(r) = \sigma^2 c(r/\alpha)
+  }{
+    C(r) = sigma^2 * c(-r/alpha)
+  }
+  where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters
+  controlling the strength and the scale of autocorrelation,
+  respectively, and \eqn{c(r)} is a known covariance function
+  determining the shape of the covariance. 
+  The strength and scale parameters
+  \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha}
+  will be estimated by the algorithm.
+  The template covariance function \eqn{c(r)} must be specified
+  as explained below.
+  
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\sigma^2}{sigma^2}
+  and \eqn{\alpha}{alpha^2}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  The template covariance function \eqn{c(r)} is specified
+  using the argument \code{covmodel}. This should be of the form
+  \code{list(model="modelname", \dots)} where
+  \code{modelname} is a string identifying the template model
+  as explained below, and  \code{\dots} are optional arguments of the
+  form \code{tag=value} giving the values of parameters controlling the
+  \emph{shape} of the template model.
+  The default is the exponential covariance
+  \eqn{c(r) = e^{-r}}{c(r) = e^(-r)}
+  so that the scaled covariance is 
+  \deqn{
+    C(r) = \sigma^2 e^{-r/\alpha}.
+  }{
+    C(r) = sigma^2 * exp(-r/alpha).
+  }
+  To determine the template model, the string \code{"modelname"} will be
+  prefixed by \code{"RM"} and the code will search for
+  a function of this name in the \pkg{RandomFields} package.
+  For a list of available models see 
+  \code{\link[RandomFields]{RMmodel}} in the
+  \pkg{RandomFields} package. For example the
+  Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified
+  by \code{covmodel=list(model="matern", nu=0.3)} corresponding
+  to the function \code{RMmatern} in the \pkg{RandomFields} package.
+  
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J.,
+  Syversveen, A. and Waagepetersen, R. (1998)
+  Log Gaussian Cox Processes.
+  \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482.
+  
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{
+  \adrian
+  with modifications by Shen Guochun and 
+  Rasmus Waagepetersen
+  \email{rw at math.auc.dk}
+  and \ege.
+}
+\seealso{
+  \code{\link{lgcp.estK}} for alternative method of fitting LGCP.
+  
+  \code{\link{matclust.estpcf}},
+  \code{\link{thomas.estpcf}} for other models.
+  
+  \code{\link{mincontrast}} for the generic minimum contrast
+  fitting algorithm, including important parameters that affect
+  the accuracy of the fit.
+  
+  \code{\link[RandomFields]{RMmodel}} in the
+  \pkg{RandomFields} package, for covariance function models.
+  
+  \code{\link{pcf}} for the pair correlation function.
+}
+\examples{
+    data(redwood)
+    u <- lgcp.estpcf(redwood, c(var=1, scale=0.1))
+    u
+    plot(u)
+    if(require(RandomFields)) {
+      lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3))
+    }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/linearK.Rd b/man/linearK.Rd
new file mode 100644
index 0000000..0c76db9
--- /dev/null
+++ b/man/linearK.Rd
@@ -0,0 +1,79 @@
+\name{linearK}
+\alias{linearK}
+\title{
+  Linear K Function
+}
+\description{
+  Computes an estimate of the linear \eqn{K} function
+  for a point pattern on a linear network.
+}
+\usage{
+linearK(X, r=NULL, ..., correction="Ang", ratio=FALSE)
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{r}{
+    Optional. Numeric vector of values of the function argument \eqn{r}.
+    There is a sensible default.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    the estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\details{
+  This command computes the
+  linear \eqn{K} function from point pattern data on a linear network.
+
+  If \code{correction="none"}, the calculations do not include
+  any correction for the geometry of the linear network. The result is the
+  network \eqn{K} function as defined by Okabe and Yamada (2001).
+
+  If \code{correction="Ang"}, the pair counts are weighted using
+  Ang's correction (Ang, 2010; Ang et al, 2012). 
+}
+\value{
+  Function value table (object of class \code{"fv"}).
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian.
+}
+\references{
+  Ang, Q.W. (2010) Statistical methodology for spatial point patterns
+  on a linear network. MSc thesis, University of Western Australia.
+
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  Okabe, A. and Yamada, I. (2001) The K-function method on a network and
+  its computational implementation. \emph{Geographical Analysis}
+  \bold{33}, 271-290.
+}
+
+\seealso{
+  \code{\link{compileK}},
+  \code{\link{lpp}}
+}
+\examples{
+  data(simplenet)
+  X <- rpoislpp(5, simplenet)
+  linearK(X)
+  linearK(X, correction="none")
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/linearKcross.Rd b/man/linearKcross.Rd
new file mode 100644
index 0000000..8f9d5b5
--- /dev/null
+++ b/man/linearKcross.Rd
@@ -0,0 +1,89 @@
+\name{linearKcross}
+\alias{linearKcross}
+\title{
+  Multitype K Function (Cross-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the multitype \eqn{K} function
+  which counts the expected number of points of type \eqn{j}
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+linearKcross(X, i, j, r=NULL, \dots, correction="Ang")
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross type \eqn{K} function
+    \eqn{K_{ij}(r)}{Kij(r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the \eqn{K}-function
+    \eqn{K_{ij}(r)}{Kij(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{Kcross}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The arguments \code{i} and \code{j} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} and \code{j} are missing, they default to the first
+  and second level of the marks factor, respectively.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are interpreted as
+  levels of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearKdot}},
+ \code{\link{linearK}}.
+}
+\examples{
+   data(chicago)
+   K <- linearKcross(chicago, "assault", "robbery")
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearKcross.inhom.Rd b/man/linearKcross.inhom.Rd
new file mode 100644
index 0000000..e32e65e
--- /dev/null
+++ b/man/linearKcross.inhom.Rd
@@ -0,0 +1,126 @@
+\name{linearKcross.inhom}
+\alias{linearKcross.inhom}
+\title{
+  Inhomogeneous multitype K Function (Cross-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the inhomogeneous multitype \eqn{K} function
+  which counts the expected number of points of type \eqn{j}
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+linearKcross.inhom(X, i, j, lambdaI, lambdaJ,
+                   r=NULL, \dots, correction="Ang", normalise=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross type \eqn{K} function
+    \eqn{K_{ij}(r)}{Kij(r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Intensity values for the points of type \code{i}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{lambdaJ}{
+    Intensity values for the points of type \code{j}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the \eqn{K}-function
+    \eqn{K_{ij}(r)}{Kij(r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{lambdaI} and \code{lambdaJ} if
+    they are functions.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at
+    the points of type \code{i}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{Kcross.inhom}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The arguments \code{i} and \code{j} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} and \code{j} are missing, they default to the first
+  and second level of the marks factor, respectively.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+
+  If \code{lambdaI} or \code{lambdaJ} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The arguments \code{i} and \code{j} are interpreted as
+  levels of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearKdot}},
+ \code{\link{linearK}}.
+}
+\examples{
+   lam <- table(marks(chicago))/(summary(chicago)$totlength)
+   lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) }
+   lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) }
+
+   K <- linearKcross.inhom(chicago, "assault", "robbery", lamI, lamJ)
+
+   \dontrun{
+     fit <- lppm(chicago, ~marks + x)
+     linearKcross.inhom(chicago, "assault", "robbery", fit, fit)
+   }
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearKdot.Rd b/man/linearKdot.Rd
new file mode 100644
index 0000000..8f10856
--- /dev/null
+++ b/man/linearKdot.Rd
@@ -0,0 +1,86 @@
+\name{linearKdot}
+\alias{linearKdot}
+\title{
+  Multitype K Function (Dot-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the multitype \eqn{K} function
+  which counts the expected number of points (of any type)
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+linearKdot(X, i, r=NULL, \dots, correction="Ang")
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the dot type \eqn{K} function
+    \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the \eqn{K}-function
+    \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{Kdot}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{Kdot}},
+ \code{\link{linearKcross}},
+ \code{\link{linearK}}.
+}
+\examples{
+   data(chicago)
+   K <- linearKdot(chicago, "assault")
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearKdot.inhom.Rd b/man/linearKdot.inhom.Rd
new file mode 100644
index 0000000..b4326fd
--- /dev/null
+++ b/man/linearKdot.inhom.Rd
@@ -0,0 +1,122 @@
+\name{linearKdot.inhom}
+\alias{linearKdot.inhom}
+\title{
+  Inhomogeneous multitype K Function (Dot-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the inhomogeneous multitype \eqn{K} function
+  which counts the expected number of points (of any type)
+  within a given distance of a point of type \eqn{i}.
+}
+\usage{
+linearKdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots,
+                 correction="Ang", normalise=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the dot type \eqn{K} function
+    \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Intensity values for the points of type \code{i}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{lambdadot}{
+    Intensity values for all points of \code{X}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the \eqn{K}-function
+    \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{lambdaI} and \code{lambdadot} if
+    they are functions.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at
+    the points of type \code{i}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{Kdot.inhom}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+
+  If \code{lambdaI} or \code{lambdadot} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearKdot}},
+ \code{\link{linearK}}.
+}
+\examples{
+   lam <- table(marks(chicago))/(summary(chicago)$totlength)
+   lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) }
+   lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) }
+
+   K <- linearKdot.inhom(chicago, "assault", lamI, lam.)
+
+   \dontrun{
+     fit <- lppm(chicago, ~marks + x)
+     linearKdot.inhom(chicago, "assault", fit, fit)
+   }
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearKinhom.Rd b/man/linearKinhom.Rd
new file mode 100644
index 0000000..7079145
--- /dev/null
+++ b/man/linearKinhom.Rd
@@ -0,0 +1,165 @@
+\name{linearKinhom}
+\alias{linearKinhom}
+\title{
+  Inhomogeneous Linear K Function
+}
+\description{
+  Computes an estimate of the inhomogeneous linear \eqn{K} function
+  for a point pattern on a linear network.
+}
+\usage{
+linearKinhom(X, lambda=NULL, r=NULL, ..., correction="Ang",
+             normalise=TRUE, normpower=1,
+	     update=TRUE, leaveoneout=TRUE, ratio=FALSE)
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{lambda}{
+    Intensity values for the point pattern. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{
+    Optional. Numeric vector of values of the function argument \eqn{r}.
+    There is a sensible default.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at the data
+    points, raised to \code{normpower}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+  \item{normpower}{
+    Integer (usually either 1 or 2).
+    Normalisation power. See Details.
+  }
+  \item{update}{
+    Logical value indicating what to do when \code{lambda} is a fitted model
+    (class \code{"lppm"} or \code{"ppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.lppm}} or \code{\link{update.ppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{fitted.lppm}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity,
+    when \code{lambda} is a fitted model.
+    Supported only when \code{update=TRUE}.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    the estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\details{
+  This command computes the inhomogeneous version of the 
+  linear \eqn{K} function from point pattern data on a linear network.
+
+  If \code{lambda = NULL} the result is equivalent to the
+  homogeneous \eqn{K} function \code{\link{linearK}}.
+  If \code{lambda} is given, then it is expected to provide estimated values
+  of the intensity of the point process at each point of \code{X}. 
+  The argument \code{lambda} may be a numeric vector (of length equal to
+  the number of points in \code{X}), or a \code{function(x,y)} that will be
+  evaluated at the points of \code{X} to yield numeric values, 
+  or a pixel image (object of class \code{"im"}) or a fitted point 
+  process model (object of class \code{"ppm"} or \code{"lppm"}).
+
+  If \code{lambda} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+  
+  If \code{correction="none"}, the calculations do not include
+  any correction for the geometry of the linear network.
+  If \code{correction="Ang"}, the pair counts are weighted using
+  Ang's correction (Ang, 2010).
+
+  Each estimate is initially computed as 
+  \deqn{
+    \widehat K_{\rm inhom}(r) = \frac{1}{\mbox{length}(L)}
+    \sum_i \sum_j \frac{1\{d_{ij} \le r\}
+      e(x_i,x_j)}{\lambda(x_i)\lambda(x_j)}
+  }{
+    K^inhom(r)= (1/length(L)) sum[i] sum[j] 1(d[i,j] <= r) * 
+    e(x[i],x[j])/(lambda(x[i]) * lambda(x[j]))
+  }
+  where \code{L} is the linear network,
+  \eqn{d_{ij}}{d[i,j]} is the distance between points
+  \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and
+  \eqn{e(x_i,x_j)}{e(x[i],x[j])} is a weight.
+  If \code{correction="none"} then this weight is equal to 1,
+  while if  \code{correction="Ang"} the weight is
+  \eqn{e(x_i,x_j,r) = 1/m(x_i, d_{ij})}{e(x[i],x[j],r) = 1/m(x[i],d[i,j])}
+  where \eqn{m(u,t)} is the number of locations on the network that lie
+  exactly \eqn{t} units distant from location \eqn{u} by the shortest
+  path.
+
+  If \code{normalise=TRUE} (the default), then the estimates
+  described above
+  are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where 
+  \eqn{
+    c = \mbox{length}(L)/\sum (1/\lambda(x_i)).
+  }{
+    c = length(L)/sum[i] (1/lambda(x[i])).
+  }
+  This rescaling reduces the variability and bias of the estimate
+  in small samples and in cases of very strong inhomogeneity.
+  The default value of \code{normpower} is 1 (for consistency with
+  previous versions of \pkg{spatstat})
+  but the most sensible value is 2, which would correspond to rescaling
+  the \code{lambda} values so that
+  \eqn{
+    \sum (1/\lambda(x_i)) = \mbox{area}(W).
+  }{
+    sum[i] (1/lambda(x[i])) = area(W).
+  }
+}
+\value{
+  Function value table (object of class \code{"fv"}).
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian
+}
+\references{
+  Ang, Q.W. (2010) Statistical methodology for spatial point patterns
+  on a linear network. MSc thesis, University of Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+}
+\seealso{
+  \code{\link{lpp}}
+}
+\examples{
+  data(simplenet)
+  X <- rpoislpp(5, simplenet)
+  fit <- lppm(X ~x)
+  K <- linearKinhom(X, lambda=fit)
+  plot(K)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
diff --git a/man/lineardirichlet.Rd b/man/lineardirichlet.Rd
new file mode 100644
index 0000000..6e94c86
--- /dev/null
+++ b/man/lineardirichlet.Rd
@@ -0,0 +1,50 @@
+\name{lineardirichlet}
+\alias{lineardirichlet}
+\title{
+  Dirichlet Tessellation on a Linear Network
+}
+\description{
+  Given a point pattern on a linear network, compute the Dirichlet
+  (or Voronoi or Thiessen) tessellation induced by the points.
+}
+\usage{
+lineardirichlet(X)
+}
+\arguments{
+  \item{X}{
+    Point pattern on a linear network (object of class \code{"lpp"}).
+  }
+}
+\details{
+  The Dirichlet tessellation induced by a point pattern \code{X}
+  on a linear network \code{L}
+  is a partition of \code{L} into subsets. The subset \code{L[i]}
+  associated with the data point \code{X[i]} is the part of \code{L}
+  lying closer to \code{X[i]} than to any other data point \code{X[j]},
+  where distance is measured by the shortest path.
+}
+\section{Missing tiles}{
+  If the linear network is not connected, and if one of the connected
+  components contains no data points, then the Dirichlet tessellation
+  is mathematically undefined inside this component.
+  The resulting tessellation object includes
+  a tile with label \code{NA}, which contains this component of the network.
+  A plot of the tessellation will not show this tile. 
+}
+\value{
+  A tessellation on a linear network
+  (object of class \code{"lintess"}).
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{lintess}}
+}
+\examples{
+  X <- runiflpp(5, simplenet)
+  plot(lineardirichlet(X), lwd=3)
+  points(X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/lineardisc.Rd b/man/lineardisc.Rd
new file mode 100644
index 0000000..4dfd1c4
--- /dev/null
+++ b/man/lineardisc.Rd
@@ -0,0 +1,108 @@
+\name{lineardisc}
+\alias{lineardisc}
+\alias{countends}
+\title{
+  Compute Disc of Given Radius in Linear Network
+}
+\description{
+  Computes the \sQuote{disc} of given radius and centre
+  in a linear network.
+}
+\usage{
+  lineardisc(L, x = locator(1), r, plotit = TRUE,
+             cols=c("blue", "red","green"))
+
+  countends(L, x = locator(1), r, toler=NULL)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{x}{
+    Location of centre of disc.
+    Either a point pattern (object of class \code{"ppp"})
+    containing exactly 1 point, or a numeric vector of length 2.
+  }
+  \item{r}{
+    Radius of disc.
+  }
+  \item{plotit}{
+    Logical. Whether to plot the disc.
+  }
+  \item{cols}{
+    Colours for plotting the disc. A numeric or character vector of
+    length 3 specifying the colours of the disc centre, disc lines and
+    disc endpoints respectively.
+  }
+  \item{toler}{
+    Optional. Distance threshold for \code{countends}. See Details.
+    There is a sensible default.
+  }
+}
+\details{
+  The \sQuote{disc} \eqn{B(u,r)} of centre \eqn{x} and radius \eqn{r}
+  in a linear network \eqn{L} is the set of all points
+  \eqn{u} in \eqn{L} such that the shortest path distance from \eqn{x}
+  to \eqn{u} is less than or equal to \eqn{r}. This is a union of line
+  segments contained in \eqn{L}.
+
+  The \emph{relative boundary} of the disc \eqn{B(u,r)}
+  is the set of points \eqn{v} such that the shortest path distance from
+  \eqn{x} to \eqn{u} is \emph{equal} to \eqn{r}.
+  
+  The function \code{lineardisc} computes the
+  disc of radius \eqn{r} and its relative boundary,
+  optionally plots them, and returns them.
+  The faster function \code{countends} simply counts the number of
+  points in the relative boundary.
+
+  The optional threshold \code{toler} is used to suppress numerical
+  errors in \code{countends}.
+  If the distance from \eqn{u} to a network vertex \eqn{v}
+  is between \code{r-toler} and \code{r+toler}, the vertex
+  will be treated as lying on the relative boundary.    
+}
+\value{
+  The value of \code{lineardisc} is a list with two entries:
+  \item{lines }{Line segment pattern (object of class \code{"psp"})
+    representing the interior disc}
+  \item{endpoints}{Point pattern (object of class \code{"ppp"})
+    representing the relative boundary of the disc.
+  }
+  The value of \code{countends} is an integer giving the number of
+  points in the relative boundary.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian
+}
+\seealso{
+  \code{\link{linnet}}
+}
+\references{
+Ang, Q.W. (2010)
+\emph{Statistical methodology for events on a network}.
+Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+Geometrically corrected second-order analysis of 
+events on a linear network, with applications to
+ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+}
+\examples{
+    # letter 'A' 
+    v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7))
+    edg <- cbind(1:4, 2:5)
+    edg <- rbind(edg, c(2,4))
+    letterA <- linnet(v, edges=edg)
+
+   lineardisc(letterA, c(0,3), 1.6)
+   # count the endpoints
+   countends(letterA, c(0,3), 1.6)
+   # cross-check (slower)
+   en <- lineardisc(letterA, c(0,3), 1.6, plotit=FALSE)$endpoints
+   npoints(en)
+}
+\keyword{spatial}
diff --git a/man/linearmarkconnect.Rd b/man/linearmarkconnect.Rd
new file mode 100644
index 0000000..0be0e3a
--- /dev/null
+++ b/man/linearmarkconnect.Rd
@@ -0,0 +1,92 @@
+\name{linearmarkconnect}
+\alias{linearmarkconnect}
+\title{
+  Mark Connection Function for Multitype Point Pattern on Linear Network
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the mark connection function
+  from points of type \eqn{i} to points of type \eqn{j}.
+}
+\usage{
+linearmarkconnect(X, i, j, r=NULL, \dots)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the mark connection function
+    \eqn{p_{ij}(r)}{p[ij](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{linearpcfcross}}
+    and \code{\link{linearpcf}}.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{markconnect}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearpcfcross}},
+ \code{\link{linearpcf}},
+ \code{\link{linearmarkequal}},
+ \code{\link{markconnect}}.
+}
+\examples{
+   pab <- linearmarkconnect(chicago, "assault", "burglary")
+  \dontrun{
+   plot(alltypes(chicago, linearmarkconnect))
+  }
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearmarkequal.Rd b/man/linearmarkequal.Rd
new file mode 100644
index 0000000..4676e7e
--- /dev/null
+++ b/man/linearmarkequal.Rd
@@ -0,0 +1,77 @@
+\name{linearmarkequal}
+\alias{linearmarkequal}
+\title{
+  Mark Connection Function for Multitype Point Pattern on Linear Network
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the mark connection function
+  from points of type \eqn{i} to points of type \eqn{j}.
+}
+\usage{
+linearmarkequal(X, r=NULL, \dots)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the mark connection function
+    \eqn{p_{ij}(r)}{p[ij](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{linearpcfcross}}
+    and \code{\link{linearpcf}}.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is the mark equality function
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\seealso{
+ \code{\link{linearpcfcross}},
+ \code{\link{linearpcf}},
+ \code{\link{linearmarkconnect}},
+ \code{\link{markconnect}}.
+}
+\examples{
+   
+   if(interactive()) {
+     X <- chicago
+   } else {
+     X <- runiflpp(20, simplenet) \%mark\% sample(c("A","B"), 20,
+     replace=TRUE)
+   }
+   p <- linearmarkequal(X)
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearpcf.Rd b/man/linearpcf.Rd
new file mode 100644
index 0000000..d18acd7
--- /dev/null
+++ b/man/linearpcf.Rd
@@ -0,0 +1,102 @@
+\name{linearpcf}
+\alias{linearpcf}
+\title{
+  Linear Pair Correlation Function
+}
+\description{
+  Computes an estimate of the linear pair correlation function
+  for a point pattern on a linear network.
+}
+\usage{
+linearpcf(X, r=NULL, ..., correction="Ang", ratio=FALSE)
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{r}{
+    Optional. Numeric vector of values of the function argument \eqn{r}.
+    There is a sensible default.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{density.default}}
+    to control the smoothing.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\details{
+  This command computes the linear pair correlation function
+  from point pattern data on a linear network.
+
+  The pair correlation function is estimated from the
+  shortest-path distances between each pair of data points,
+  using the fixed-bandwidth kernel smoother
+  \code{\link{density.default}}, 
+  with a bias correction at each end of the interval of \eqn{r} values.
+  To switch off the bias correction, set \code{endcorrect=FALSE}.
+
+  The bandwidth for smoothing the pairwise distances
+  is determined by arguments \code{\dots}
+  passed to \code{\link{density.default}}, mainly the arguments
+  \code{bw} and \code{adjust}. The default is
+  to choose the bandwidth by Silverman's rule of thumb 
+  \code{bw="nrd0"} explained in \code{\link{density.default}}.
+
+  If \code{correction="none"}, the calculations do not include
+  any correction for the geometry of the linear network. The result is
+  an estimate of the first derivative of the 
+  network \eqn{K} function defined by Okabe and Yamada (2001).
+
+  If \code{correction="Ang"}, the pair counts are weighted using
+  Ang's correction (Ang, 2010). The result is an estimate of the
+  pair correlation function in the linear network.
+}
+\value{
+  Function value table (object of class \code{"fv"}).
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{g(r)}. 
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian.
+}
+\references{
+  Ang, Q.W. (2010) Statistical methodology for spatial point patterns
+  on a linear network. MSc thesis, University of Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+  
+  Okabe, A. and Yamada, I. (2001) The K-function method on a network and
+  its computational implementation. \emph{Geographical Analysis}
+  \bold{33}, 271-290.
+}
+\seealso{
+  \code{\link{linearK}},
+  \code{\link{linearpcfinhom}},
+  \code{\link{lpp}}
+}
+\examples{
+  data(simplenet)
+  X <- rpoislpp(5, simplenet)
+  linearpcf(X)
+  linearpcf(X, correction="none")
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/linearpcfcross.Rd b/man/linearpcfcross.Rd
new file mode 100644
index 0000000..50b1f0b
--- /dev/null
+++ b/man/linearpcfcross.Rd
@@ -0,0 +1,93 @@
+\name{linearpcfcross}
+\alias{linearpcfcross}
+\title{
+  Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the multitype pair correlation function
+  from points of type \eqn{i} to points of type \eqn{j}.
+}
+\usage{
+linearpcfcross(X, i, j, r=NULL, \dots, correction="Ang")
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the \eqn{i}-to-any pair correlation function
+    \eqn{g_{ij}(r)}{g[ij](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[stats]{density.default}}
+    to control the kernel smoothing.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{pcfcross}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearpcfdot}},
+ \code{\link{linearpcf}},
+ \code{\link{pcfcross}}.
+}
+\examples{
+   data(chicago)
+   g <- linearpcfcross(chicago, "assault")
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearpcfcross.inhom.Rd b/man/linearpcfcross.inhom.Rd
new file mode 100644
index 0000000..4adef63
--- /dev/null
+++ b/man/linearpcfcross.inhom.Rd
@@ -0,0 +1,128 @@
+\name{linearpcfcross.inhom}
+\alias{linearpcfcross.inhom}
+\title{
+  Inhomogeneous Multitype Pair Correlation Function
+  (Cross-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the inhomogeneous multitype pair correlation function
+  from points of type \eqn{i} to points of type \eqn{j}.
+}
+\usage{
+linearpcfcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots,
+                     correction="Ang", normalise=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the \eqn{i}-to-any pair correlation function
+    \eqn{g_{ij}(r)}{g[ij](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Intensity values for the points of type \code{i}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{lambdaJ}{
+    Intensity values for the points of type \code{j}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[stats]{density.default}}
+    to control the kernel smoothing.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at
+    the points of type \code{i}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{pcfcross.inhom}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+
+  If \code{lambdaI} or \code{lambdaJ} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearpcfdot}},
+ \code{\link{linearpcf}},
+ \code{\link{pcfcross.inhom}}.
+}
+\examples{
+   lam <- table(marks(chicago))/(summary(chicago)$totlength)
+   lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) }
+   lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) }
+
+   g <- linearpcfcross.inhom(chicago, "assault", "robbery", lamI, lamJ)
+
+   \dontrun{
+     fit <- lppm(chicago, ~marks + x)
+     linearpcfcross.inhom(chicago, "assault", "robbery", fit, fit)
+   }
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearpcfdot.Rd b/man/linearpcfdot.Rd
new file mode 100644
index 0000000..aaace3d
--- /dev/null
+++ b/man/linearpcfdot.Rd
@@ -0,0 +1,89 @@
+\name{linearpcfdot}
+\alias{linearpcfdot}
+\title{
+  Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the multitype pair correlation function
+  from points of type \eqn{i} to points of any type.
+}
+\usage{
+linearpcfdot(X, i, r=NULL, \dots, correction="Ang")
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the \eqn{i}-to-any pair correlation function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[stats]{density.default}}
+    to control the kernel smoothing.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{pcfdot}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+   Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearpcfcross}},
+ \code{\link{linearpcf}}.
+ \code{\link{pcfcross}}.
+}
+\examples{
+   data(chicago)
+   g <- linearpcfdot(chicago, "assault")
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearpcfdot.inhom.Rd b/man/linearpcfdot.inhom.Rd
new file mode 100644
index 0000000..be8dd31
--- /dev/null
+++ b/man/linearpcfdot.inhom.Rd
@@ -0,0 +1,124 @@
+\name{linearpcfdot.inhom}
+\alias{linearpcfdot.inhom}
+\title{
+  Inhomogeneous Multitype
+  Pair Correlation Function (Dot-type) for Linear Point Pattern
+}
+\description{
+  For a multitype point pattern on a linear network,
+  estimate the inhomogeneous multitype pair correlation function
+  from points of type \eqn{i} to points of any type.
+}
+\usage{
+linearpcfdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots,
+                   correction="Ang", normalise=TRUE)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the \eqn{i}-to-any pair correlation function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed.
+    An object of class \code{"lpp"} which 
+    must be a multitype point pattern (a marked point pattern
+    whose marks are a factor).
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Intensity values for the points of type \code{i}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{lambdadot}{
+    Intensity values for all points of \code{X}. Either a numeric vector,
+    a \code{function}, a pixel image
+    (object of class \code{"im"} or \code{"linim"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated.
+    There is a sensible default.
+    First-time users are strongly advised not to specify this argument.
+    See below for important conditions on \eqn{r}.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[stats]{density.default}}
+    to control the kernel smoothing.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at
+    the points of type \code{i}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+}
+\details{
+  This is a counterpart of the function \code{\link{pcfdot.inhom}} 
+  for a point pattern on a linear network (object of class \code{"lpp"}).
+
+  The argument \code{i} will be interpreted as
+  levels of the factor \code{marks(X)}. 
+  If \code{i} is missing, it defaults to the first
+  level of the marks factor.
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)}
+  should be evaluated. 
+  The values of \eqn{r} must be increasing nonnegative numbers
+  and the maximum \eqn{r} value must not exceed the radius of the
+  largest disc contained in the window.
+
+  If \code{lambdaI} or \code{lambdadot} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+}
+\references{
+  Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear)
+  Multitype point process analysis of spines on the
+  dendrite network of a neuron.
+  \emph{Applied Statistics} (Journal of the Royal Statistical
+  Society, Series C), In press.
+}
+\section{Warnings}{
+  The argument \code{i} is interpreted as a
+  level of the factor \code{marks(X)}. Beware of the usual
+  trap with factors: numerical values are not
+  interpreted in the same way as character values. 
+}
+\seealso{
+ \code{\link{linearpcfcross.inhom}},
+ \code{\link{linearpcfcross}},
+ \code{\link{pcfcross.inhom}}.
+}
+\examples{
+   lam <- table(marks(chicago))/(summary(chicago)$totlength)
+   lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) }
+   lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) }
+
+   g <- linearpcfdot.inhom(chicago, "assault", lamI, lam.)
+
+   \dontrun{
+     fit <- lppm(chicago, ~marks + x)
+     linearpcfdot.inhom(chicago, "assault", fit, fit)
+   }
+}
+\author{\adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/linearpcfinhom.Rd b/man/linearpcfinhom.Rd
new file mode 100644
index 0000000..1384fdf
--- /dev/null
+++ b/man/linearpcfinhom.Rd
@@ -0,0 +1,146 @@
+\name{linearpcfinhom}
+\alias{linearpcfinhom}
+\title{
+  Inhomogeneous Linear Pair Correlation Function
+}
+\description{
+  Computes an estimate of the inhomogeneous linear pair correlation function
+  for a point pattern on a linear network.
+}
+\usage{
+linearpcfinhom(X, lambda=NULL, r=NULL, ..., correction="Ang",
+               normalise=TRUE, normpower=1,
+	       update = TRUE, leaveoneout = TRUE,
+	       ratio = FALSE)
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{lambda}{
+    Intensity values for the point pattern. Either a numeric vector,
+    a \code{function}, a pixel image (object of class \code{"im"}) or
+    a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{r}{
+    Optional. Numeric vector of values of the function argument \eqn{r}.
+    There is a sensible default.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{density.default}}
+    to control the smoothing.
+  }
+  \item{correction}{
+    Geometry correction.
+    Either \code{"none"} or \code{"Ang"}. See Details.
+  }
+  \item{normalise}{
+    Logical. If \code{TRUE} (the default), the denominator of the estimator is 
+    data-dependent (equal to the sum of the reciprocal intensities at the data
+    points, raised to \code{normpower}), which reduces the sampling variability.
+    If \code{FALSE}, the denominator is the length of the network.
+  }
+  \item{normpower}{
+    Integer (usually either 1 or 2).
+    Normalisation power. See explanation in \code{\link{linearKinhom}}.
+  }
+  \item{update}{
+    Logical value indicating what to do when \code{lambda} is a fitted model
+    (class \code{"lppm"} or \code{"ppm"}).
+    If \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.lppm}} or \code{\link{update.ppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{fitted.lppm}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity,
+    when \code{lambda} is a fitted model.
+    Supported only when \code{update=TRUE}.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+}
+\details{
+  This command computes the inhomogeneous version of the 
+  linear pair correlation function from point pattern data on a linear network.
+
+  If \code{lambda = NULL} the result is equivalent to the
+  homogeneous pair correlation function \code{\link{linearpcf}}.
+  If \code{lambda} is given, then it is expected to provide estimated values
+  of the intensity of the point process at each point of \code{X}. 
+  The argument \code{lambda} may be a numeric vector (of length equal to
+  the number of points in \code{X}), or a \code{function(x,y)} that will be
+  evaluated at the points of \code{X} to yield numeric values, 
+  or a pixel image (object of class \code{"im"}) or a fitted point 
+  process model (object of class \code{"ppm"} or \code{"lppm"}).
+
+  If \code{lambda} is a fitted point process model,
+  the default behaviour is to update the model by re-fitting it to
+  the data, before computing the fitted intensity.
+  This can be disabled by setting \code{update=FALSE}.
+  
+  If \code{correction="none"}, the calculations do not include
+  any correction for the geometry of the linear network.
+  If \code{correction="Ang"}, the pair counts are weighted using
+  Ang's correction (Ang, 2010). 
+
+  The bandwidth for smoothing the pairwise distances
+  is determined by arguments \code{\dots}
+  passed to \code{\link{density.default}}, mainly the arguments
+  \code{bw} and \code{adjust}. The default is
+  to choose the bandwidth by Silverman's rule of thumb 
+  \code{bw="nrd0"} explained in \code{\link{density.default}}.
+}
+\value{
+  Function value table (object of class \code{"fv"}).
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{g(r)}. 
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian.
+}
+\references{
+  Ang, Q.W. (2010) Statistical methodology for spatial point patterns
+  on a linear network. MSc thesis, University of Western Australia.
+
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  Okabe, A. and Yamada, I. (2001) The K-function method on a network and
+  its computational implementation. \emph{Geographical Analysis}
+  \bold{33}, 271-290.
+}
+
+\seealso{
+  \code{\link{linearpcf}},
+  \code{\link{linearKinhom}},
+  \code{\link{lpp}}
+}
+\examples{
+  data(simplenet)
+  X <- rpoislpp(5, simplenet)
+  fit <- lppm(X ~x)
+  K <- linearpcfinhom(X, lambda=fit)
+  plot(K)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
diff --git a/man/linequad.Rd b/man/linequad.Rd
new file mode 100644
index 0000000..25af80c
--- /dev/null
+++ b/man/linequad.Rd
@@ -0,0 +1,65 @@
+\name{linequad}
+\alias{linequad}
+\title{
+  Quadrature Scheme on a Linear Network
+}
+\description{
+  Generates a quadrature scheme (an object of class \code{"quad"})
+  on a linear network.
+}
+\usage{
+linequad(X, Y, \dots, eps = NULL, nd = 1000, random = FALSE)
+}
+\arguments{
+  \item{X}{
+    Data points. An object of class \code{"lpp"} or \code{"ppp"}.
+  }
+  \item{Y}{
+    Line segments on which the points of \code{X} lie.
+    An object of class \code{"psp"}.
+    Required only when \code{X} is a \code{"ppp"} object.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{eps}{
+    Optional. Spacing between successive dummy points along each
+    segment.
+  }
+  \item{nd}{
+    Optional. Total number of dummy points to be generated.
+  }
+  \item{random}{
+    Logical value indicating whether the sequence of dummy points
+    should start at a randomly-chosen position along each segment.
+  }
+}
+\details{
+  This command generates a quadrature scheme (object of class
+  \code{"quad"}) from a pattern of points on a linear network.
+
+  Normally the user does not need to call \code{linequad} explicitly.
+  It is invoked by \pkg{spatstat} functions when needed.
+  A quadrature scheme is required by \code{\link{lppm}}
+  in order to fit point process models to point pattern data on a linear
+  network. A quadrature scheme is also used by \code{\link{rhohat.lpp}}
+  and other functions.
+
+  In order to create the quadrature scheme, dummy points are placed
+  along each line segment of the network. The dummy points are 
+  evenly-spaced with spacing \code{eps}. The default is
+  \code{eps = totlen/nd} where \code{totlen} is the total length of
+  all line segments in the network. 
+}
+\value{
+  A quadrature scheme (object of class \code{"quad"}).
+}
+\author{
+  \adrian,
+  Greg McSwiggan and Suman Rakshit.
+}
+\seealso{
+  \code{\link{lppm}}
+}
+\keyword{datagen}
+\keyword{spatial}
diff --git a/man/linfun.Rd b/man/linfun.Rd
new file mode 100644
index 0000000..2762db8
--- /dev/null
+++ b/man/linfun.Rd
@@ -0,0 +1,74 @@
+\name{linfun}
+\Rdversion{1.1}
+\alias{linfun}
+\title{
+  Function on a Linear Network
+}
+\description{
+  Create a function on a linear network.
+}
+\usage{
+  linfun(f, L)
+}
+\arguments{
+  \item{f}{
+    A \code{function} in the \R language.
+  }
+  \item{L}{
+    A linear network (object of class \code{"linnet"})
+    on which \code{f} is defined.
+  }
+}
+\details{
+  This creates an object of class \code{"linfun"}.
+  This is a simple mechanism for handling a function
+  defined on a linear network, to make it easier to display
+  and manipulate.
+
+  \code{f} should be a \code{function} in the \R language,
+  with formal arguments \code{f(x,y,seg,tp)} or 
+  \code{f(x,y,seg,tp, \dots)} where \code{x,y} are
+  Cartesian coordinates of locations on the linear network,
+  \code{seg, tp} are the local coordinates, and
+  \code{\dots} are optional additional arguments.
+  
+  The function \code{f} should be vectorised: that is,
+  if \code{x,y,seg,tp} are numeric vectors of the same length
+  \code{n}, then \code{v <- f(x,y,seg,tp)}
+  should be a vector of length \code{n}.
+
+  \code{L} should be a linear network (object of class \code{"linnet"})
+  inside which the function \code{f} is well-defined.
+
+  The result is a function \code{g} in the \R language which belongs to
+  the special class \code{"linfun"}. This function
+  can be called as \code{g(X)} where \code{X} is an \code{"lpp"} object,
+  or called as \code{g(x,y)} or \code{g(x,y,seg,tp)} where
+  \code{x,y,seg,tp} are coordinates. There are several methods
+  for this class including \code{print}, \code{plot}
+  and \code{\link{as.linim}}.
+}
+\value{
+  A function in the \R\ language.
+  It also belongs to the class \code{"linfun"} which has methods
+  for \code{plot}, \code{print} etc.
+}
+\seealso{
+  \code{\link{methods.linfun}} for methods applicable to
+  \code{"linfun"} objects.
+
+  \code{\link{distfun.lpp}},
+  \code{\link{nnfun.lpp}}.
+}
+\examples{
+  f <- linfun(function(x,y,seg,tp) { x+y }, simplenet)
+  plot(f)
+  X <- runiflpp(3, simplenet)
+  plot(X, add=TRUE, cex=2)
+  f(X)
+}
+\author{\adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/linim.Rd b/man/linim.Rd
new file mode 100644
index 0000000..7ea4001
--- /dev/null
+++ b/man/linim.Rd
@@ -0,0 +1,88 @@
+\name{linim}
+\alias{linim}
+\title{
+  Create Pixel Image on Linear Network
+}
+\description{
+  Creates an object of class \code{"linim"} that represents
+  a pixel image on a linear network.
+}
+\usage{
+  linim(L, Z, \dots, df=NULL)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{Z}{
+    Pixel image (object of class \code{"im"}).
+  }
+  \item{\dots}{Ignored.}
+  \item{df}{
+    Advanced use only. Data frame giving full details of the mapping between
+    the pixels of \code{Z} and the lines of \code{L}.
+    See Details.
+  }
+}
+\details{
+  This command creates an object of class \code{"linim"} that represents
+  a pixel image defined on a linear network.
+  Typically such objects are
+  used to represent the result of smoothing or model-fitting on the
+  network. Most users will not need to call \code{linim} directly.
+
+  The argument \code{L} is a linear network (object of class \code{"linnet"}).
+  It gives the exact spatial locations
+  of the line segments of the network, and their connectivity.
+
+  The argument \code{Z} is a pixel image object of class \code{"im"}
+  that gives a pixellated approximation of the function values.
+  
+  For increased efficiency, advanced users may specify the 
+  optional argument \code{df}. This is a data frame giving the
+  precomputed mapping between the pixels of \code{Z}
+  and the line segments of \code{L}.
+  It should have columns named \code{xc, yc} containing the coordinates of
+  the pixel centres, \code{x,y} containing the projections of these
+  pixel centres onto the linear network, \code{mapXY} identifying the
+  line segment on which each projected point lies, and \code{tp} giving
+  the parametric position of \code{(x,y)} along the segment.
+}
+\value{
+  Object of class \code{"linim"} that also inherits the class
+  \code{"im"}.
+  There is a special method for plotting this class.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{plot.linim}},
+  \code{\link{linnet}},
+  \code{\link{eval.linim}},
+  \code{\link{Math.linim}},
+  \code{\link{im}}.
+}
+\examples{
+  M <- as.mask.psp(as.psp(simplenet))
+  Z <- as.im(function(x,y) {x-y}, W=M)
+  X <- linim(simplenet, Z)
+  X
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  McSwiggan, G., Nair, M.G. and Baddeley, A. (2012)
+  Fitting Poisson point process models to events 
+  on a linear network. Manuscript in preparation.
+}
+\keyword{spatial}
diff --git a/man/linnet.Rd b/man/linnet.Rd
new file mode 100644
index 0000000..2560f56
--- /dev/null
+++ b/man/linnet.Rd
@@ -0,0 +1,99 @@
+\name{linnet}
+\alias{linnet}
+\title{
+  Create a Linear Network
+}
+\description{
+  Creates an object of class \code{"linnet"} representing
+  a network of line segments.
+}
+\usage{
+linnet(vertices, m, edges, sparse=FALSE, warn=TRUE)
+}
+\arguments{
+  \item{vertices}{
+    Point pattern (object of class \code{"ppp"})
+    specifying the vertices of the network.
+  }
+  \item{m}{
+    Adjacency matrix. A matrix or sparse matrix
+    of logical values equal to \code{TRUE}
+    when the corresponding vertices are joined by a line.
+    (Specify either \code{m} or \code{edges}.)
+  }
+  \item{edges}{
+    Edge list. A two-column matrix of integers,
+    specifying all pairs of vertices
+    that should be joined by an edge. 
+    (Specify either \code{m} or \code{edges}.)
+  }
+  \item{sparse}{
+    Optional. Logical value indicating whether to use a
+    sparse matrix representation of the network. See Details.
+  }
+  \item{warn}{
+    Logical value indicating whether to issue a warning if the resulting
+    network is not connected.
+  }
+}
+\details{
+  An object of class \code{"linnet"} represents a network of
+  straight line segments in two dimensions. The function \code{linnet} creates
+  such an object from the minimal information: the spatial location
+  of each vertex (endpoint, crossing point or meeting point of lines)
+  and information about which vertices are joined by an edge.
+
+  If \code{sparse=FALSE} (the default), the algorithm will compute
+  and store various properties of the network, including
+  the adjacency matrix \code{m} and a matrix giving the
+  shortest-path distances between each pair of vertices in the network.
+  This is more efficient for small datasets. However it can require
+  large amounts of memory and can take a long time to execute.
+
+  If \code{sparse=TRUE}, then the shortest-path distances will not be computed,
+  and the network adjacency matrix \code{m} will be stored as a
+  sparse matrix. This saves a lot of time and memory when creating the
+  linear network.
+
+  If the argument \code{edges} is given, then it will also determine
+  the \emph{ordering} of the line segments when they are stored or extracted.
+  For example, \code{edges[i,]} corresponds to \code{as.psp(L)[i]}.
+}
+\value{
+  Object of class \code{"linnet"} representing the linear network.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian 
+}
+\seealso{
+  \code{\link{simplenet}} for an example of a linear network.
+
+  \code{\link[spatstat:methods.linnet]{methods.linnet}} for
+  methods applicable to \code{linnet} objects.
+
+  Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}},
+  \code{\link{connected.linnet}}, \code{\link{lixellate}}.
+  
+  \code{\link{delaunayNetwork}} for the Delaunay triangulation
+  as a network.
+  
+  \code{\link{ppp}},
+  \code{\link{psp}}.
+}
+\examples{
+  # letter 'A' specified by adjacency matrix
+  v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7))
+  m <- matrix(FALSE, 5,5)
+  for(i in 1:4) m[i,i+1] <- TRUE
+  m[2,4] <- TRUE
+  m <- m | t(m)
+  letterA <- linnet(v, m)
+  plot(letterA)
+
+  # letter 'A' specified by edge list
+  edg <- cbind(1:4, 2:5)
+  edg <- rbind(edg, c(2,4))
+  letterA <- linnet(v, edges=edg)
+}
+\keyword{spatial}
diff --git a/man/lintess.Rd b/man/lintess.Rd
new file mode 100644
index 0000000..9a47dc3
--- /dev/null
+++ b/man/lintess.Rd
@@ -0,0 +1,80 @@
+\name{lintess}
+\alias{lintess}
+\title{
+  Tessellation on a Linear Network
+}
+\description{
+  Create a tessellation on a linear network.
+}
+\usage{
+lintess(L, df)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{df}{
+    Data frame of coordinates of endpoints of the 
+    tiles of the tessellation. 
+  }
+}
+\details{
+  A tessellation on a linear network \code{L} is a partition of the
+  network into non-overlapping pieces (tiles). Each tile consists of one
+  or more line segments which are subsets of the line segments making up
+  the network. A tile can consist of several disjoint pieces.
+  
+  The data frame \code{df} should have columns named
+  \code{seg}, \code{t0}, \code{t1} and \code{tile}.
+
+  Each row of the data frame specifies one sub-segment of the network
+  and allocates it to a particular tile.
+  
+  The \code{seg} column specifies which line segment of the network
+  contains the sub-segment. Values of \code{seg} are integer indices
+  for the segments in \code{as.psp(L)}.
+
+  The \code{t0} and \code{t1} columns specify the start and end points
+  of the sub-segment. They should be numeric values between 0 and 1
+  inclusive, where the values 0 and 1 representing the network vertices
+  that are joined by this network segment.
+
+  The \code{tile} column specifies which tile of the tessellation
+  includes this sub-segment. It will be coerced to a factor and its
+  levels will be the names of the tiles.
+
+  If \code{df} is missing or \code{NULL}, the result is a tessellation
+  with only one tile, consisting of the entire network \code{L}.
+}
+\value{
+  An object of class \code{"lintess"}.
+  There are methods for printing and plotting this object.
+}
+\author{
+  \adrian and Greg McSwiggan.
+}
+\seealso{
+  \code{\link{linnet}} for linear networks.
+
+  \code{\link{plot.lintess}} for plotting.
+
+  \code{\link{divide.linnet}} to make a tessellation demarcated by
+  given points.
+  
+  \code{\link{as.linfun.lintess}}, \code{\link{as.linnet.lintess}} and
+  \code{\link{as.linim}} to convert to other classes.
+
+  The undocumented methods \code{Window.lintess} and
+  \code{as.owin.lintess} extract the spatial window.
+}
+\examples{
+   # tessellation consisting of one tile for each existing segment
+   ns <- nsegments(simplenet)
+   df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns])
+   u <- lintess(simplenet, df)
+   u
+   plot(u)
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/lixellate.Rd b/man/lixellate.Rd
new file mode 100644
index 0000000..37c1436
--- /dev/null
+++ b/man/lixellate.Rd
@@ -0,0 +1,83 @@
+\name{lixellate}
+\alias{lixellate}
+\title{
+  Subdivide Segments of a Network
+}
+\description{
+  Each line segment of a linear network
+  will be divided into several shorter segments
+  (line elements or lixels).
+}
+\usage{
+lixellate(X, \dots, nsplit, eps, sparse = TRUE)
+}
+\arguments{
+  \item{X}{
+    A linear network (object of class \code{"linnet"})
+    or a point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{nsplit}{
+    Number of pieces into which \emph{each} line segment of \code{X}
+    should be divided. Either a single integer, or an integer vector
+    with one entry for each line segment in \code{X}.
+    Incompatible with \code{eps}.
+  }
+  \item{eps}{
+    Maximum length of the resulting pieces of line segment.
+    A single numeric value.
+    Incompatible with \code{nsplit}.
+  }
+  \item{sparse}{
+    Optional. Logical value specifying whether the resulting
+    linear network should be represented using a sparse matrix.
+    If \code{sparse=NULL}, then the representation will be the
+    same as in \code{X}.
+  }
+}
+\details{
+  Each line segment in \code{X} will be subdivided into equal pieces.
+  The result is an object of the same kind as \code{X}, representing the
+  same data as \code{X} except that the segments have been subdivided.
+
+  Splitting is controlled by the arguments \code{nsplit} and \code{eps},
+  exactly one of which should be given.
+
+  If \code{nsplit} is given, it specifies the 
+  number of pieces into which \emph{each} line segment of \code{X}
+  should be divided. It should be either a single integer, or an integer vector
+  of length equal to the number of line segments in \code{X}.
+
+  If \code{eps} is given, it specifies the maximum length of
+  any resulting piece of line segment. 
+
+  It is strongly advisable to use \code{sparse=TRUE} (the default)
+  to limit the computation time.
+
+  If \code{X} is a point pattern (class \code{"lpp"}) then the
+  spatial coordinates and marks of each data point are unchanged, but the
+  local coordinates will change, because they are
+  adjusted to map them to the new subdivided network.
+}
+\value{
+  Object of the same kind as \code{X}.
+}
+\author{
+  Greg McSwiggan, 
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{linnet}}, \code{\link{lpp}}.
+}
+\examples{
+   A <- lixellate(simplenet, nsplit=4)
+   plot(A, main="lixellate(simplenet, nsplit=4)")
+   points(vertices(A), pch=16)
+
+   spiders
+   lixellate(spiders, nsplit=3)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/localK.Rd b/man/localK.Rd
new file mode 100644
index 0000000..65a1998
--- /dev/null
+++ b/man/localK.Rd
@@ -0,0 +1,132 @@
+\name{localK}
+\alias{localK}
+\alias{localL}
+\title{Neighbourhood density function}
+\description{
+  Computes the neighbourhood density function, a local version of
+  the \eqn{K}-function or \eqn{L}-function,
+  defined by Getis and Franklin (1987).
+}
+\usage{
+  localK(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL)
+  localL(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{\dots}{Ignored.}
+  \item{correction}{String specifying the edge correction to be applied.
+    Options are \code{"none"}, \code{"translate"}, \code{"translation"},
+    \code{"Ripley"},
+    \code{"isotropic"} or \code{"best"}.
+    Only one correction may be specified.
+  }
+  \item{verbose}{Logical flag indicating whether to print progress
+    reports during the calculation.
+  }
+  \item{rvalue}{Optional. A \emph{single} value of the distance argument
+    \eqn{r} at which the function L or K should be computed.
+  }
+}
+\details{
+  The command \code{localL} computes the \emph{neighbourhood density function},
+  a local version of the \eqn{L}-function (Besag's transformation of Ripley's
+  \eqn{K}-function) that was proposed by Getis and Franklin (1987).
+  The command \code{localK} computes the corresponding
+  local analogue of the K-function.
+
+  Given a spatial point pattern \code{X}, the neighbourhood density function
+  \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point
+  in \code{X} is computed by
+  \deqn{
+    L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}}
+  }{
+    L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j])
+  }
+  where the sum is over all points \eqn{j \neq i}{j != i} that lie
+  within a distance \eqn{r} of the \eqn{i}th point, 
+  \eqn{a} is the area of the observation window, \eqn{n} is the number
+  of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction
+  term (as described in \code{\link{Kest}}).
+  The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one
+  of the summands that contributes to the global estimate of the L
+  function.
+
+  By default, the function \eqn{L_i(r)}{L[i](r)} or
+  \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values
+  for each point \eqn{i}. The results are stored as a function value
+  table (object of class \code{"fv"}) with a column of the table
+  containing the function estimates for each point of the pattern
+  \code{X}.
+
+  Alternatively, if the argument \code{rvalue} is given, and it is a
+  single number, then the function will only be computed for this value
+  of \eqn{r}, and the results will be returned as a numeric vector,
+  with one entry of the vector for each point of the pattern \code{X}.
+
+  Inhomogeneous counterparts of \code{localK} and \code{localL}
+  are computed by \code{localKinhom} and \code{localLinhom}.
+}
+\value{
+  If \code{rvalue} is given, the result is a numeric vector
+  of length equal to the number of points in the point pattern.
+
+  If \code{rvalue} is absent, the result is 
+  an object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{K} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}
+    or \eqn{L(r)=r} for a stationary Poisson process
+  }
+  together with columns containing the values of the
+  neighbourhood density function for each point in the pattern.
+  Column \code{i} corresponds to the \code{i}th point.
+  The last two columns contain the \code{r} and \code{theo} values.
+}
+\references{
+  Getis, A. and Franklin, J. (1987)
+  Second-order neighbourhood analysis of mapped point patterns.
+  \emph{Ecology} \bold{68}, 473--477.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{Lest}},
+  \code{\link{localKinhom}},
+  \code{\link{localLinhom}}.
+}
+\examples{
+  data(ponderosa)
+  X <- ponderosa
+
+  # compute all the local L functions
+  L <- localL(X)
+
+  # plot all the local L functions against r
+  plot(L, main="local L functions for ponderosa", legend=FALSE)
+
+  # plot only the local L function for point number 7
+  plot(L, iso007 ~ r)
+  
+  # compute the values of L(r) for r = 12 metres
+  L12 <- localL(X, rvalue=12)
+
+  # Spatially interpolate the values of L12
+  # Compare Figure 5(b) of Getis and Franklin (1987)
+  X12 <- X \%mark\% L12
+  Z <- Smooth(X12, sigma=5, dimyx=128)
+
+  plot(Z, col=topo.colors(128), main="smoothed neighbourhood density")
+  contour(Z, add=TRUE)
+  points(X, pch=16, cex=0.5)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/localKinhom.Rd b/man/localKinhom.Rd
new file mode 100644
index 0000000..4af2a99
--- /dev/null
+++ b/man/localKinhom.Rd
@@ -0,0 +1,139 @@
+\name{localKinhom}
+\alias{localKinhom}
+\alias{localLinhom}
+\title{Inhomogeneous Neighbourhood Density Function}
+\description{
+  Computes spatially-weighted versions of the
+  the local \eqn{K}-function or \eqn{L}-function.
+}
+\usage{
+  localKinhom(X, lambda, ...,
+              correction = "Ripley", verbose = TRUE, rvalue=NULL,
+              sigma = NULL, varcov = NULL)
+  localLinhom(X, lambda, ..., 
+              correction = "Ripley", verbose = TRUE, rvalue=NULL,
+              sigma = NULL, varcov = NULL)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{\dots}{
+    Extra arguments. Ignored if \code{lambda} is present.
+    Passed to \code{\link{density.ppp}} if \code{lambda} is omitted.
+  }
+  \item{correction}{
+    String specifying the edge correction to be applied.
+    Options are \code{"none"}, \code{"translate"}, \code{"Ripley"},
+    \code{"translation"}, \code{"isotropic"} or \code{"best"}.
+    Only one correction may be specified.
+  }
+  \item{verbose}{Logical flag indicating whether to print progress
+    reports during the calculation.
+  }
+  \item{rvalue}{Optional. A \emph{single} value of the distance argument
+    \eqn{r} at which the function L or K should be computed.
+  }
+  \item{sigma, varcov}{
+    Optional arguments passed to \code{\link{density.ppp}} to control
+    the kernel smoothing procedure for estimating \code{lambda},
+    if \code{lambda} is missing.
+  }
+}
+\details{
+  The functions \code{localKinhom} and \code{localLinhom}
+  are inhomogeneous or weighted versions of the
+  neighbourhood density function implemented in
+  \code{\link{localK}} and \code{\link{localL}}.
+
+  Given a spatial point pattern \code{X}, the
+  inhomogeneous neighbourhood density function
+  \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point
+  in \code{X} is computed by
+  \deqn{
+    L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}}
+  }{
+    L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j])
+  }
+  where the sum is over all points \eqn{j \neq i}{j != i} that lie
+  within a distance \eqn{r} of the \eqn{i}th point, 
+  \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the
+  point pattern at the point \eqn{j},
+  and \eqn{e_{ij}}{e[i,j]} is an edge correction
+  term (as described in \code{\link{Kest}}).
+  The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one
+  of the summands that contributes to the global estimate of the
+  inhomogeneous L function (see \code{\link{Linhom}}).
+
+  By default, the function \eqn{L_i(r)}{L[i](r)} or
+  \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values
+  for each point \eqn{i}. The results are stored as a function value
+  table (object of class \code{"fv"}) with a column of the table
+  containing the function estimates for each point of the pattern
+  \code{X}.
+
+  Alternatively, if the argument \code{rvalue} is given, and it is a
+  single number, then the function will only be computed for this value
+  of \eqn{r}, and the results will be returned as a numeric vector,
+  with one entry of the vector for each point of the pattern \code{X}.
+}
+\value{
+  If \code{rvalue} is given, the result is a numeric vector
+  of length equal to the number of points in the point pattern.
+
+  If \code{rvalue} is absent, the result is 
+  an object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{K} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}
+    or \eqn{L(r)=r} for a stationary Poisson process
+  }
+  together with columns containing the values of the
+  neighbourhood density function for each point in the pattern.
+  Column \code{i} corresponds to the \code{i}th point.
+  The last two columns contain the \code{r} and \code{theo} values.
+}
+\seealso{
+  \code{\link{Kinhom}},
+  \code{\link{Linhom}},
+  \code{\link{localK}},
+  \code{\link{localL}}.
+}
+\examples{
+  data(ponderosa)
+  X <- ponderosa
+
+  # compute all the local L functions
+  L <- localLinhom(X)
+
+  # plot all the local L functions against r
+  plot(L, main="local L functions for ponderosa", legend=FALSE)
+
+  # plot only the local L function for point number 7
+  plot(L, iso007 ~ r)
+  
+  # compute the values of L(r) for r = 12 metres
+  L12 <- localL(X, rvalue=12)
+}
+\author{
+  Mike Kuhn,
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/localpcf.Rd b/man/localpcf.Rd
new file mode 100644
index 0000000..b8c5e21
--- /dev/null
+++ b/man/localpcf.Rd
@@ -0,0 +1,151 @@
+\name{localpcf}
+\alias{localpcf}
+\alias{localpcfinhom}
+\title{Local pair correlation function}
+\description{
+  Computes individual contributions to the
+  pair correlation function from each data point.
+}
+\usage{
+  localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15)
+  localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15,
+         lambda=NULL, sigma=NULL, varcov=NULL)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{delta}{
+    Smoothing bandwidth for pair correlation.
+    The halfwidth of the Epanechnikov kernel.
+  }
+  \item{rmax}{
+    Optional. Maximum value of distance \eqn{r} for which
+    pair correlation values \eqn{g(r)} should be computed.
+  }
+  \item{nr}{
+    Optional. Number of values of distance \eqn{r} for which
+    pair correlation \eqn{g(r)} should be computed.
+  }
+  \item{stoyan}{
+    Optional. The value of the constant \eqn{c} in Stoyan's rule
+    of thumb for selecting the smoothing bandwidth \code{delta}.
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function, for the
+    inhomogeneous pair correlation.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{sigma,varcov,\dots}{
+    These arguments are ignored by \code{localpcf}
+    but are passed by \code{localpcfinhom} (when \code{lambda=NULL})
+    to the function \code{\link{density.ppp}}
+    to control the kernel smoothing estimation of \code{lambda}.
+  }
+}
+\details{
+  \code{localpcf} computes the contribution, from each individual
+  data point in a point pattern \code{X}, to the
+  empirical pair correlation function of \code{X}.
+  These contributions are sometimes known as LISA (local indicator
+  of spatial association) functions based on pair correlation.
+  
+  \code{localpcfinhom} computes the corresponding contribution
+  to the \emph{inhomogeneous} empirical pair correlation function of \code{X}.
+  
+  Given a spatial point pattern \code{X}, the local pcf
+  \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point
+  in \code{X} is computed by
+  \deqn{
+    g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) 
+  }{
+    g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) 
+  }
+  where the sum is over all points \eqn{j \neq i}{j != i},
+  \eqn{a} is the area of the observation window, \eqn{n} is the number
+  of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance
+  between points \code{i} and \code{j}. Here \code{k} is the
+  Epanechnikov kernel,
+  \deqn{
+    k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}).
+  }{
+    k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2).
+  }
+  Edge correction is performed using the border method
+  (for the sake of computational efficiency):
+  the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if
+  \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]}
+  is the distance from point \eqn{i} to the boundary of the
+  observation window.
+
+  The smoothing bandwidth \eqn{\delta}{delta} may be specified.
+  If not, it is chosen by Stoyan's rule of thumb
+  \eqn{\delta = c/\hat\lambda}{delta = c/lambda}
+  where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity
+  and \eqn{c} is a constant, usually taken to be 0.15.
+  The value of \eqn{c} is controlled by the argument \code{stoyan}.
+
+  For \code{localpcfinhom}, the optional argument \code{lambda}
+  specifies the values of the estimated intensity function.
+  If \code{lambda} is given, it should be either a
+  numeric vector giving the intensity values
+  at the points of the pattern \code{X},
+  a pixel image (object of class \code{"im"}) giving the
+  intensity values at all locations, a fitted point process model
+  (object of class \code{"ppm"}) or a \code{function(x,y)} which
+  can be evaluated to give the intensity value at any location.
+  If \code{lambda} is not given, then it will be estimated
+  using a leave-one-out kernel density smoother as described
+  in \code{\link{pcfinhom}}.
+}
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{K} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}
+    or \eqn{L(r)=r} for a stationary Poisson process
+  }
+  together with columns containing the values of the
+  local pair correlation function for each point in the pattern.
+  Column \code{i} corresponds to the \code{i}th point.
+  The last two columns contain the \code{r} and \code{theo} values.
+}
+\seealso{
+  \code{\link{localK}},
+  \code{\link{localKinhom}},
+  \code{\link{pcf}},
+  \code{\link{pcfinhom}}
+}
+\examples{
+  data(ponderosa)
+  X <- ponderosa
+
+  g <- localpcf(X, stoyan=0.5)
+  colo <- c(rep("grey", npoints(X)), "blue")
+  a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"),
+          legend=FALSE, col=colo, lty=1)
+
+  # plot only the local pair correlation function for point number 7
+  plot(g, est007 ~ r)
+
+  gi <- localpcfinhom(X, stoyan=0.5)
+  a <- plot(gi, main=c("inhomogeneous local pair correlation functions",
+                       "Ponderosa pines"),
+                legend=FALSE, col=colo, lty=1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/logLik.dppm.Rd b/man/logLik.dppm.Rd
new file mode 100644
index 0000000..e060ebe
--- /dev/null
+++ b/man/logLik.dppm.Rd
@@ -0,0 +1,103 @@
+\name{logLik.dppm}
+\alias{logLik.dppm}
+\alias{AIC.dppm}
+\alias{extractAIC.dppm}
+\alias{nobs.dppm}
+\title{Log Likelihood and AIC for Fitted Determinantal Point Process Model}
+\description{
+  Extracts the log Palm likelihood, deviance, and AIC
+  of a fitted determinantal point process model.
+}
+\usage{
+\method{logLik}{dppm}(object, ...)
+\method{AIC}{dppm}(object, \dots, k=2)
+\method{extractAIC}{dppm}(fit, scale=0, k=2, \dots)
+\method{nobs}{dppm}(object, ...)
+}
+\arguments{
+  \item{object,fit}{Fitted point process model.
+    An object of class \code{"dppm"}.
+  }
+  \item{\dots}{Ignored.}
+  \item{scale}{Ignored.}
+  \item{k}{Numeric value specifying the weight of the
+    equivalent degrees of freedom in the AIC. See Details.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{logLik}},
+  \code{\link{extractAIC}}  and
+  \code{\link{nobs}}
+  for the class \code{"dppm"}.
+
+  An object of class \code{"dppm"} represents a fitted
+  Cox or cluster point process model.
+  It is obtained from the model-fitting function \code{\link{dppm}}.
+
+  These methods apply only when the model was fitted
+  by maximising the Palm likelihood (Tanaka et al, 2008)
+  by calling \code{\link{dppm}} with the argument \code{method="palm"}.
+
+  The method \code{logLik.dppm} computes the
+  maximised value of the log Palm likelihood for the fitted model \code{object}.
+
+  The methods \code{AIC.dppm} and \code{extractAIC.dppm} compute the
+  Akaike Information Criterion AIC for the fitted model
+  based on the Palm likelihood (Tanaka et al, 2008)
+  \deqn{
+    AIC = -2 \log(PL) + k \times \mbox{edf}
+  }{
+    AIC = -2 * log(PL) + k * edf
+  }
+  where \eqn{PL} is the maximised Palm likelihood of the fitted model,
+  and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom
+  of the model.
+
+  The method \code{nobs.dppm} returns the number of points
+  in the original data point pattern to which the model was fitted.
+
+  The \R function \code{\link{step}} uses these methods, but it does
+  not work for determinantal models yet due to a missing implementation
+  of \code{update.dppm}.
+}
+\value{
+  \code{logLik} returns a numerical value, belonging to the class
+  \code{"logLik"}, with an attribute \code{"df"} giving the degrees of
+  freedom.
+
+  \code{AIC} returns a numerical value.
+
+  \code{extractAIC} returns a numeric vector of length 2
+  containing the degrees of freedom and the AIC value.
+
+  \code{nobs} returns an integer value.
+}
+\references{
+  Tanaka, U. and Ogata, Y. and Stoyan, D. (2008)
+  Parameter estimation and model selection for
+  Neyman-Scott point processes.
+  \emph{Biometrical Journal} \bold{50}, 43--57.
+}
+\seealso{
+  \code{\link{dppm}},
+  \code{\link{logLik.ppm}}
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  fit <- dppm(swedishpines ~ x, dppGauss(), method="palm")
+  nobs(fit)
+  logLik(fit)
+  extractAIC(fit)
+  AIC(fit)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/logLik.kppm.Rd b/man/logLik.kppm.Rd
new file mode 100644
index 0000000..f45dd9c
--- /dev/null
+++ b/man/logLik.kppm.Rd
@@ -0,0 +1,102 @@
+\name{logLik.kppm}
+\alias{logLik.kppm}
+\alias{AIC.kppm}
+\alias{extractAIC.kppm}
+\alias{nobs.kppm}
+\title{Log Likelihood and AIC for Fitted Cox or Cluster Point Process Model}
+\description{
+  Extracts the log Palm likelihood, deviance, and AIC
+  of a fitted Cox or cluster point process model.
+}
+\usage{
+\method{logLik}{kppm}(object, ...)
+\method{AIC}{kppm}(object, \dots, k=2)
+\method{extractAIC}{kppm}(fit, scale=0, k=2, \dots)
+\method{nobs}{kppm}(object, ...)
+}
+\arguments{
+  \item{object,fit}{Fitted point process model.
+    An object of class \code{"kppm"}.
+  }
+  \item{\dots}{Ignored.}
+  \item{scale}{Ignored.}
+  \item{k}{Numeric value specifying the weight of the
+    equivalent degrees of freedom in the AIC. See Details.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{logLik}},
+  \code{\link{extractAIC}}  and
+  \code{\link{nobs}}
+  for the class \code{"kppm"}.
+
+  An object of class \code{"kppm"} represents a fitted
+  Cox or cluster point process model.
+  It is obtained from the model-fitting function \code{\link{kppm}}.
+
+  These methods apply only when the model was fitted
+  by maximising the Palm likelihood (Tanaka et al, 2008)
+  by calling \code{\link{kppm}} with the argument \code{method="palm"}.
+  
+  The method \code{logLik.kppm} computes the 
+  maximised value of the log Palm likelihood for the fitted model \code{object}.
+
+  The methods \code{AIC.kppm} and \code{extractAIC.kppm} compute the
+  Akaike Information Criterion AIC for the fitted model
+  based on the Palm likelihood (Tanaka et al, 2008)
+  \deqn{
+    AIC = -2 \log(PL) + k \times \mbox{edf}
+  }{
+    AIC = -2 * log(PL) + k * edf
+  }
+  where \eqn{PL} is the maximised Palm likelihood of the fitted model,
+  and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom
+  of the model.
+
+  The method \code{nobs.kppm} returns the number of points
+  in the original data point pattern to which the model was fitted.
+  
+  The \R function \code{\link{step}} uses these methods.
+}
+\value{
+  \code{logLik} returns a numerical value, belonging to the class
+  \code{"logLik"}, with an attribute \code{"df"} giving the degrees of
+  freedom.
+  
+  \code{AIC} returns a numerical value.
+
+  \code{extractAIC} returns a numeric vector of length 2
+  containing the degrees of freedom and the AIC value.
+
+  \code{nobs} returns an integer value.
+}
+\references{
+  Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) 
+  Parameter estimation and model selection for
+  Neyman-Scott point processes.
+  \emph{Biometrical Journal} \bold{50}, 43--57.
+}
+\seealso{
+  \code{\link{kppm}}, 
+  \code{\link{logLik.ppm}}
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  fit <- kppm(redwood ~ x, "Thomas", method="palm")
+  nobs(fit)
+  logLik(fit)
+  extractAIC(fit)
+  AIC(fit)
+  step(fit)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/logLik.mppm.Rd b/man/logLik.mppm.Rd
new file mode 100644
index 0000000..34caf05
--- /dev/null
+++ b/man/logLik.mppm.Rd
@@ -0,0 +1,127 @@
+\name{logLik.mppm}
+\alias{logLik.mppm}
+\alias{AIC.mppm}
+\alias{extractAIC.mppm}
+\alias{nobs.mppm}
+\alias{getCall.mppm}
+\alias{terms.mppm}
+\title{Log Likelihood and AIC for Multiple Point Process Model}
+\description{
+  For a point process model that has been fitted to multiple point
+  patterns, these functions extract the log likelihood and AIC,
+  or analogous quantities based on the pseudolikelihood.
+}
+\usage{
+ \method{logLik}{mppm}(object, \dots, warn=TRUE)
+
+ \method{AIC}{mppm}(object, \dots, k=2, takeuchi=TRUE)
+
+ \method{extractAIC}{mppm}(fit, scale = 0, k = 2, \dots, takeuchi = TRUE) 
+
+ \method{nobs}{mppm}(object, \dots)
+
+ \method{getCall}{mppm}(x, \dots)
+
+ \method{terms}{mppm}(x, \dots)
+}
+\arguments{
+  \item{object,fit,x}{
+    Fitted point process model (fitted to multiple point
+    patterns). An object of class \code{"mppm"}.
+  }
+  \item{\dots}{Ignored.}
+  \item{warn}{
+    If \code{TRUE}, a warning is given when the
+    pseudolikelihood is returned instead of the likelihood.
+  }
+  \item{scale}{Ignored.}
+  \item{k}{Numeric value specifying the weight of the
+    equivalent degrees of freedom in the AIC. See Details.
+  }
+  \item{takeuchi}{
+    Logical value specifying whether to use the Takeuchi penalty
+    (\code{takeuchi=TRUE}) or the
+    number of fitted parameters (\code{takeuchi=FALSE})
+    in calculating AIC.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link[stats]{logLik}},
+  \code{\link[stats]{AIC}},
+  \code{\link[stats]{extractAIC}},
+  \code{\link[stats]{terms}} and
+  \code{\link[stats:update]{getCall}} 
+  for the class \code{"mppm"}.
+
+  An object of class \code{"mppm"} represents a fitted
+  Poisson or Gibbs point process model fitted to several point patterns.
+  It is obtained from the model-fitting function \code{\link{mppm}}.
+
+  The method \code{logLik.mppm} extracts the 
+  maximised value of the log likelihood for the fitted model
+  (as approximated by quadrature using the Berman-Turner approximation).
+  If \code{object} is not a Poisson process, the maximised log
+  \emph{pseudolikelihood} is returned, with a warning.
+
+  The Akaike Information Criterion AIC for a fitted model is defined as
+  \deqn{
+    AIC = -2 \log(L) + k \times \mbox{penalty}
+  }{
+    AIC = -2 * log(L) + k * penalty
+  }
+  where \eqn{L} is the maximised likelihood of the fitted model,
+  and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity,
+  usually equal to the effective degrees of freedom of the model.
+  The method \code{extractAIC.mppm} returns the \emph{analogous} quantity
+  \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*},
+  the quadrature approximation
+  to the likelihood (if \code{fit} is a Poisson model)
+  or the pseudolikelihood (if \code{fit} is a Gibbs model).
+
+  The \eqn{\mbox{penalty}}{penalty} term is calculated
+  as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is
+  the number of fitted parameters. If \code{takeuchi=TRUE} then
+  \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))}
+  where \eqn{J} and \eqn{H} are the estimated variance and hessian,
+  respectively, of the composite score.
+  These two choices are equivalent for a Poisson process.
+  
+  The method \code{nobs.mppm} returns the total number of points
+  in the original data point patterns to which the model was fitted.
+
+  The method \code{getCall.mppm} extracts the original call to
+  \code{\link{mppm}} which caused the model to be fitted.
+  
+  The method \code{terms.mppm} extracts the covariate terms in the
+  model formula as a \code{terms} object. Note that these terms do not
+  include the interaction component of the model.
+  
+  The \R function \code{\link[stats]{step}} uses these methods.
+}
+\value{
+  See the help files for the corresponding generic functions.
+}
+\seealso{
+  \code{\link{mppm}}
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \spatstatAuthors.
+}
+\examples{
+  fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders))
+  logLik(fit)
+  AIC(fit)
+  nobs(fit)
+  getCall(fit)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/logLik.ppm.Rd b/man/logLik.ppm.Rd
new file mode 100644
index 0000000..147695d
--- /dev/null
+++ b/man/logLik.ppm.Rd
@@ -0,0 +1,155 @@
+\name{logLik.ppm}
+\alias{logLik.ppm}
+\alias{deviance.ppm}
+\alias{AIC.ppm}
+\alias{extractAIC.ppm}
+\alias{nobs.ppm}
+\title{Log Likelihood and AIC for Point Process Model}
+\description{
+  Extracts the log likelihood, deviance, and AIC
+  of a fitted Poisson point process
+  model, or analogous quantities based on the pseudolikelihood
+  or logistic likelihood for a fitted Gibbs point process model.
+}
+\usage{
+\method{logLik}{ppm}(object, \dots, new.coef=NULL, warn=TRUE, absolute=FALSE)
+
+\method{deviance}{ppm}(object, \dots)
+
+\method{AIC}{ppm}(object, \dots, k=2, takeuchi=TRUE)
+
+\method{extractAIC}{ppm}(fit, scale=0, k=2, \dots, takeuchi=TRUE)
+
+\method{nobs}{ppm}(object, \dots)
+}
+\arguments{
+  \item{object,fit}{Fitted point process model.
+    An object of class \code{"ppm"}.
+  }
+  \item{\dots}{Ignored.}
+  \item{warn}{
+    If \code{TRUE}, a warning is given when the
+    pseudolikelihood or logistic likelihood
+    is returned instead of the likelihood.
+  }
+  \item{absolute}{
+    Logical value indicating whether to include 
+    constant terms in the loglikelihood. 
+  }
+  \item{scale}{Ignored.}
+  \item{k}{Numeric value specifying the weight of the
+    equivalent degrees of freedom in the AIC. See Details.
+  }
+  \item{new.coef}{
+    New values for the canonical parameters of the model.
+    A numeric vector of the same length as \code{coef(object)}.
+  }
+  \item{takeuchi}{
+    Logical value specifying whether to use the Takeuchi penalty
+    (\code{takeuchi=TRUE}) or the
+    number of fitted parameters (\code{takeuchi=FALSE})
+    in calculating AIC.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link[stats]{logLik}},
+  \code{\link[stats]{deviance}},
+  \code{\link[stats]{extractAIC}}  and
+  \code{\link[stats]{nobs}}
+  for the class \code{"ppm"}.
+
+  An object of class \code{"ppm"} represents a fitted
+  Poisson or Gibbs point process model.
+  It is obtained from the model-fitting function \code{\link{ppm}}.
+  
+  The method \code{logLik.ppm} computes the 
+  maximised value of the log likelihood for the fitted model \code{object}
+  (as approximated by quadrature using the Berman-Turner approximation)
+  is extracted. If \code{object} is not a Poisson process, the maximised log
+  \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}).
+
+  The Akaike Information Criterion AIC for a fitted model is defined as
+  \deqn{
+    AIC = -2 \log(L) + k \times \mbox{penalty}
+  }{
+    AIC = -2 * log(L) + k * penalty
+  }
+  where \eqn{L} is the maximised likelihood of the fitted model,
+  and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity,
+  usually equal to the effective degrees of freedom of the model.
+  The method \code{extractAIC.ppm} returns the \emph{analogous} quantity
+  \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*},
+  the quadrature approximation
+  to the likelihood (if \code{fit} is a Poisson model)
+  or the pseudolikelihood or logistic likelihood
+  (if \code{fit} is a Gibbs model).
+
+  The \eqn{\mbox{penalty}}{penalty} term is calculated
+  as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is
+  the number of fitted parameters. If \code{takeuchi=TRUE} then
+  \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))}
+  where \eqn{J} and \eqn{H} are the estimated variance and hessian,
+  respectively, of the composite score.
+  These two choices are equivalent for a Poisson process.
+  
+  The method \code{nobs.ppm} returns the number of points
+  in the original data point pattern to which the model was fitted.
+  
+  The \R function \code{\link[stats]{step}} uses these methods.
+}
+\value{
+  \code{logLik} returns a numerical value, belonging to the class
+  \code{"logLik"}, with an attribute \code{"df"} giving the degrees of
+  freedom.
+  
+  \code{AIC} returns a numerical value.
+
+  \code{extractAIC} returns a numeric vector of length 2
+  containing the degrees of freedom and the AIC value.
+
+  \code{nobs} returns an integer value.
+}
+\references{
+  Varin, C. and Vidoni, P. (2005)
+  A note on composite likelihood inference and model selection.
+  \emph{Biometrika} \bold{92}, 519--528.
+}
+\seealso{
+  \code{\link{ppm}},  
+  \code{\link{as.owin}},
+  \code{\link{coef.ppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{formula.ppm}},
+  \code{\link{model.frame.ppm}},
+  \code{\link{model.matrix.ppm}},
+  \code{\link{plot.ppm}},
+  \code{\link{predict.ppm}},
+  \code{\link{residuals.ppm}},
+  \code{\link{simulate.ppm}},
+  \code{\link{summary.ppm}},
+  \code{\link{terms.ppm}},
+  \code{\link{update.ppm}},
+  \code{\link{vcov.ppm}}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  data(cells)
+  fit <- ppm(cells, ~x)
+  nobs(fit)
+  logLik(fit)
+  deviance(fit)
+  extractAIC(fit)
+  AIC(fit)
+  step(fit)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/logLik.slrm.Rd b/man/logLik.slrm.Rd
new file mode 100644
index 0000000..8cb2dcc
--- /dev/null
+++ b/man/logLik.slrm.Rd
@@ -0,0 +1,65 @@
+\name{logLik.slrm}
+\Rdversion{1.1}
+\alias{logLik.slrm}
+\title{
+  Loglikelihood of Spatial Logistic Regression
+}
+\description{
+  Computes the (maximised) loglikelihood of a fitted
+  Spatial Logistic Regression model.
+}
+\usage{
+  \method{logLik}{slrm}(object, ..., adjust = TRUE)
+}
+\arguments{
+  \item{object}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{adjust}{
+    Logical value indicating whether to adjust the loglikelihood
+    of the model to make it comparable with a point process
+    likelihood. See Details.
+  }
+}
+\details{
+  This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic
+  regression models (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}). It computes the log-likelihood
+  of a fitted spatial logistic regression model.
+
+  If \code{adjust=FALSE}, the loglikelihood is computed
+  using the standard formula for the loglikelihood of a
+  logistic regression model for a finite set of (pixel) observations.
+
+  If \code{adjust=TRUE} then the loglikelihood is adjusted so that it
+  is approximately comparable with the likelihood of a point process
+  in continuous space, by subtracting the value
+  \eqn{n \log(a)}{n *  log(a)}
+  where \eqn{n} is the number of points in the original point pattern
+  dataset, and \eqn{a} is the area of one pixel.
+}
+\value{
+  A numerical value.
+}
+\seealso{
+  \code{\link{slrm}}
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- slrm(X ~ x+y)
+  logLik(fit)
+  logLik(fit, adjust=FALSE)
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/lohboot.Rd b/man/lohboot.Rd
new file mode 100644
index 0000000..48dceee
--- /dev/null
+++ b/man/lohboot.Rd
@@ -0,0 +1,139 @@
+\name{lohboot}
+\alias{lohboot}
+\title{Bootstrap Confidence Bands for Summary Function}
+\description{
+  Computes a bootstrap confidence band for a summary function
+  of a point process.
+}
+\usage{
+  lohboot(X,
+          fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"),
+          \dots, nsim=200, confidence=0.95, global=FALSE, type=7)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{fun}{
+    Name of the summary function for which confidence intervals are
+    desired: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"},
+    \code{"pcfinhom"}, \code{"Kinhom"} or \code{"Linhom"}.
+    Alternatively, the function itself; it must be
+    one of the functions listed here.
+  }
+  \item{\dots}{
+    Arguments passed to the corresponding local version of the summary
+    function (see Details).
+  }
+  \item{nsim}{
+    Number of bootstrap simulations.
+  }
+  \item{confidence}{
+    Confidence level, as a fraction between 0 and 1.
+  }
+  \item{global}{
+    Logical. If \code{FALSE} (the default), pointwise confidence intervals
+    are constructed. If \code{TRUE}, a global (simultaneous) confidence band is
+    constructed.
+  }
+  \item{type}{
+    Integer. Argument passed to \code{\link[stats]{quantile}}
+    controlling the way the quantiles are calculated.
+  }
+} 
+\value{
+  A function value table
+  (object of class \code{"fv"})
+  containing columns giving the estimate of the summary function,
+  the upper and lower limits of the bootstrap confidence interval,
+  and the theoretical value of the summary function for a Poisson process.
+}
+\details{
+  This algorithm computes 
+  confidence bands for the true value of the summary function
+  \code{fun} using the bootstrap method of Loh (2008).
+
+  If \code{fun="pcf"}, for example, the algorithm computes a pointwise
+  \code{(100 * confidence)}\% confidence interval for the true value of
+  the pair correlation function for the point process,
+  normally estimated by \code{\link{pcf}}.
+  It starts by computing the array of
+  \emph{local} pair correlation functions,
+  \code{\link{localpcf}}, of the data pattern \code{X}.
+  This array consists of the contributions to the estimate of the
+  pair correlation function from each
+  data point. Then these contributions are resampled \code{nsim} times
+  with replacement; from each resampled dataset the total contribution
+  is computed, yielding \code{nsim} random pair correlation functions.
+  The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of
+  these functions are computed, where \code{alpha = 1 - confidence}.
+  The average of the local functions is also computed as an estimate
+  of the pair correlation function.
+
+  To control the estimation algorithm, use the 
+  arguments \code{\dots}, which are passed to the local version
+  of the summary function, as shown below:
+  
+  \tabular{ll}{
+    \bold{fun} \tab \bold{local version} \cr
+    \code{\link{pcf}} \tab \code{\link{localpcf}} \cr
+    \code{\link{Kest}} \tab \code{\link{localK}} \cr
+    \code{\link{Lest}} \tab \code{\link{localK}} \cr
+    \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr
+    \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr
+    \code{\link{Linhom}} \tab \code{\link{localKinhom}}
+  }
+  For \code{fun="Lest"}, the calculations are first performed
+  as if \code{fun="Kest"}, and then the square-root transformation is
+  applied to obtain the \eqn{L}-function. 
+
+  Note that the confidence bands computed by 
+  \code{lohboot(fun="pcf")} may not contain the estimate of the
+  pair correlation function computed by \code{\link{pcf}},
+  because of differences between the algorithm parameters
+  (such as the choice of edge correction)
+  in \code{\link{localpcf}} and \code{\link{pcf}}.
+  If you are using \code{lohboot}, the
+  appropriate point estimate of the pair correlation itself is
+  the pointwise mean of the local estimates, which is provided
+  in the result of \code{lohboot} and is shown in the default plot.
+
+  If the confidence bands seem unbelievably narrow,
+  this may occur because the point pattern has a hard core
+  (the true pair correlation function is zero for certain values of
+  distance) or because of an optical illusion when the
+  function is steeply sloping (remember the width of the confidence
+  bands should be measured \emph{vertically}).
+  
+  An alternative to \code{lohboot} is \code{\link{varblock}}.
+}
+\references{
+  Loh, J.M. (2008)
+  A valid and fast spatial bootstrap for correlation functions.
+  \emph{The Astrophysical Journal}, \bold{681}, 726--734.
+}
+\seealso{
+  Summary functions
+  \code{\link{Kest}},
+  \code{\link{pcf}},
+  \code{\link{Kinhom}},
+  \code{\link{pcfinhom}},
+  \code{\link{localK}},
+  \code{\link{localpcf}},
+  \code{\link{localKinhom}},
+  \code{\link{localpcfinhom}}.
+
+  See \code{\link{varblock}} for an alternative bootstrap technique.
+}
+\examples{
+  p <- lohboot(simdat, stoyan=0.5)
+  plot(p)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/longleaf.Rd b/man/longleaf.Rd
new file mode 100644
index 0000000..1fc27dd
--- /dev/null
+++ b/man/longleaf.Rd
@@ -0,0 +1,54 @@
+\name{longleaf}
+\alias{longleaf}
+\docType{data}
+\title{
+  Longleaf Pines Point Pattern
+}
+\description{
+  Locations and sizes of Longleaf pine trees.
+  A marked point pattern.
+
+  The data record the locations and diameters of 
+  584 Longleaf pine (\emph{Pinus palustris}) trees 
+  in a 200 x 200 metre region in southern Georgia (USA).
+  They were collected and analysed by Platt, Evans and Rathbun (1988).
+ 
+  This is a marked point pattern; the mark associated with a tree is its
+  diameter at breast height (\code{dbh}), a convenient measure of its size. 
+  Several analyses have considered only the ``adult'' trees which
+  are conventionally defined as those trees with \code{dbh}
+  greater than or equal to 30 cm.
+ 
+  The pattern is regarded as spatially inhomogeneous.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  Entries include
+  \tabular{ll}{
+    \code{x} \tab Cartesian \eqn{x}-coordinate of tree \cr
+    \code{y} \tab Cartesian \eqn{y}-coordinate of tree \cr
+    \code{marks} \tab diameter at breast height, in centimetres.
+  }
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(longleaf)}
+\examples{
+    data(longleaf)
+    plot(longleaf)
+    plot(cut(longleaf, breaks=c(0,30,Inf), labels=c("Sapling","Adult")))
+}
+\source{Platt, Evans and Rathbun (1988)}
+\references{
+Platt, W. J., Evans, G. W. and Rathbun, S. L. (1988)
+The population dynamics of a long-lived Conifer (Pinus palustris).
+\emph{The American Naturalist} \bold{131}, 491--525.
+
+Rathbun, S. L. and Cressie, N. (1994)
+A space-time survival point process for a longleaf
+pine forest in southern Georgia.
+\emph{Journal of the American Statistical Association} \bold{89}, 1164--1173.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/lpp.Rd b/man/lpp.Rd
new file mode 100644
index 0000000..f5f1718
--- /dev/null
+++ b/man/lpp.Rd
@@ -0,0 +1,125 @@
+\name{lpp}
+\alias{lpp}
+\title{
+  Create Point Pattern on Linear Network
+}
+\description{
+  Creates an object of class \code{"lpp"} that represents
+  a point pattern on a linear network.
+}
+\usage{
+lpp(X, L, \dots)
+}
+\arguments{
+  \item{X}{
+    Locations of the points. A matrix or data frame of coordinates,
+    or a point pattern object (of class
+    \code{"ppp"}) or other data acceptable to \code{\link{as.ppp}}.
+  }
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This command creates an object of class \code{"lpp"} that represents
+  a point pattern on a linear network.
+
+  Normally \code{X} is a point pattern. The points of \code{X} should lie
+  on the lines of \code{L}.
+
+  Alternatively \code{X} may be a matrix or data frame containing at
+  least two columns.
+  \itemize{
+    \item Usually
+    the first two columns of \code{X} will be interpreted
+    as spatial coordinates, and any remaining columns as marks.
+    \item 
+    An exception occurs if \code{X} is a data frame with columns named
+    \code{x}, \code{y}, \code{seg} and \code{tp}. Then
+    \code{x} and \code{y} will be interpreted as spatial
+    coordinates, and \code{seg} and \code{tp} as local
+    coordinates, with \code{seg} indicating which line segment of
+    \code{L} the point lies on, and \code{tp} indicating how far along
+    the segment the point lies (normalised to 1). Any remaining columns
+    will be interpreted as marks.
+    \item 
+    Another exception occurs if \code{X} is a data frame with columns named
+    \code{seg} and \code{tp}. Then
+    \code{seg} and \code{tp} will be interpreted as local
+    coordinates, as above, and the spatial coordinates
+    \code{x,y} will be computed from them.
+    Any remaining columns will be interpreted as marks.
+  }
+  If \code{X} is missing or \code{NULL}, the result is an empty
+  point pattern (i.e. containing no points).
+}
+\section{Note on changed format}{
+  The internal format of \code{"lpp"} objects was changed in
+  \pkg{spatstat} version \code{1.28-0}.
+  Objects in the old format are still handled correctly,
+  but computations are faster in the new format.
+  To convert an object \code{X} from the old format to the new format,
+  use \code{X <- lpp(as.ppp(X), as.linnet(X))}.
+}
+\value{
+  An object of class \code{"lpp"}. 
+  Also inherits the class \code{"ppx"}.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian 
+  
+}
+\seealso{
+  Installed datasets which are \code{"lpp"} objects:
+  \code{\link{chicago}}, \code{\link{dendrite}}, \code{\link{spiders}}.
+  
+  See \code{\link{as.lpp}} for converting data to an \code{lpp} object.
+
+  See \code{\link{methods.lpp}} and
+  \code{\link{methods.ppx}} for other methods applicable
+  to \code{lpp} objects.
+
+  Calculations on an \code{lpp} object:
+  \code{\link{intensity.lpp}},
+  \code{\link{distfun.lpp}},
+  \code{\link{nndist.lpp}},
+  \code{\link{nnwhich.lpp}},
+  \code{\link{nncross.lpp}},
+  \code{\link{nnfun.lpp}}.
+
+  Summary functions: 
+  \code{\link{linearK}},
+  \code{\link{linearKinhom}},
+  \code{\link{linearpcf}},
+  \code{\link{linearKdot}},
+  \code{\link{linearKcross}},
+  \code{\link{linearmarkconnect}}, etc.
+  
+  Random point patterns on a linear network can be generated by
+  \code{\link{rpoislpp}} or \code{\link{runiflpp}}.
+
+  See \code{\link{linnet}} for linear networks.
+}
+\examples{
+  # letter 'A' 
+  v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7))
+  edg <- cbind(1:4, 2:5)
+  edg <- rbind(edg, c(2,4))
+  letterA <- linnet(v, edges=edg)
+
+  # points on letter A
+  xx <- list(x=c(-1.5,0,0.5,1.5), y=c(1.5,3,4.5,1.5))
+  X <- lpp(xx, letterA)
+
+  plot(X)
+  X
+  summary(X)
+
+  # empty pattern
+  lpp(L=letterA)
+}
+\keyword{spatial}
diff --git a/man/lppm.Rd b/man/lppm.Rd
new file mode 100644
index 0000000..2377dad
--- /dev/null
+++ b/man/lppm.Rd
@@ -0,0 +1,120 @@
+\name{lppm}
+\alias{lppm}
+\alias{lppm.formula}
+\alias{lppm.lpp}
+\title{
+  Fit Point Process Model to Point Pattern on Linear Network
+}
+\description{
+  Fit a point process model to a point pattern dataset on a linear network
+}
+\usage{
+lppm(X, ...)
+
+\method{lppm}{formula}(X, interaction=NULL, ..., data=NULL)
+
+\method{lppm}{lpp}(X, ..., eps=NULL, nd=1000, random=FALSE)
+}
+\arguments{
+  \item{X}{
+    Either an object of class \code{"lpp"} specifying a point pattern
+    on a linear network, or a \code{formula} specifying the
+    point process model.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{ppm}}.
+  }
+  \item{interaction}{
+    An object of class \code{"interact"}
+    describing the point process interaction
+    structure, or \code{NULL} indicating that a Poisson process (stationary
+    or nonstationary) should be fitted.
+  }
+  \item{data}{
+    Optional. The values of spatial covariates (other than the Cartesian
+    coordinates) required by the model.
+    A list whose entries are images,
+    functions, windows, tessellations or single numbers. 
+  }
+  \item{eps}{
+    Optional. Spacing between dummy points along each segment of the
+    network. 
+  }
+  \item{nd}{
+    Optional. Total number of dummy points placed on 
+    the network. Ignored if \code{eps} is given.
+  }
+  \item{random}{
+    Logical value indicating whether the grid of dummy points should be
+    placed at a randomised starting position.
+  }
+}
+\details{
+  This function fits a point process model to data that specify
+  a point pattern on a linear network. It is a counterpart of
+  the model-fitting function \code{\link{ppm}} designed
+  to work with objects of class \code{"lpp"} instead of \code{"ppp"}.
+
+  The function \code{lppm} is generic, with methods for
+  the classes \code{formula} and \code{lppp}.
+
+  In \code{lppm.lpp}
+  the first argument \code{X} should be an object of class \code{"lpp"}
+  (created by the command \code{\link{lpp}}) specifying a point pattern
+  on a linear network.
+
+  In \code{lppm.formula},
+  the first argument is a \code{formula} in the \R language
+  describing the spatial trend model to be fitted. It has the general form
+  \code{pattern ~ trend} where the left hand side \code{pattern} is usually
+  the name of a point pattern on a linear network
+  (object of class \code{"lpp"})
+  to which the model should be fitted, or an expression which evaluates
+  to such a point pattern;
+  and the right hand side \code{trend} is an expression specifying the
+  spatial trend of the model.
+
+  Other arguments \code{...} are passed from \code{lppm.formula}
+  to \code{lppm.lpp} and from \code{lppm.lpp} to \code{\link{ppm}}.
+}
+\value{
+  An object of class \code{"lppm"} representing the fitted model.
+  There are methods for \code{print}, \code{predict},
+  \code{coef} and similar functions.
+}
+\author{
+  \adrian
+  and Greg McSwiggan.
+}
+\seealso{
+  \code{\link{methods.lppm}},
+  \code{\link{predict.lppm}},
+  \code{\link{ppm}},
+  \code{\link{lpp}}.
+}
+\examples{
+  X <- runiflpp(15, simplenet)
+  lppm(X ~1)
+  lppm(X ~x)
+  marks(X) <- factor(rep(letters[1:3], 5))
+  lppm(X ~ marks)
+  lppm(X ~ marks * x)
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  McSwiggan, G., Nair, M.G. and Baddeley, A. (2012)
+  Fitting Poisson point process models to events 
+  on a linear network. Manuscript in preparation.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/lurking.Rd b/man/lurking.Rd
new file mode 100644
index 0000000..bad6dcd
--- /dev/null
+++ b/man/lurking.Rd
@@ -0,0 +1,281 @@
+\name{lurking}
+\alias{lurking}               
+\title{Lurking variable plot}
+\description{
+  Plot spatial point process residuals against a covariate
+}
+\usage{
+lurking(object, covariate, type="eem",
+                    cumulative=TRUE,
+                    clipwindow=default.clipwindow(object),
+                    rv,
+                    plot.sd,
+                    envelope=FALSE, nsim=39, nrank=1,
+                    plot.it=TRUE,
+                    typename,
+                    covname,
+                    oldstyle=FALSE, check=TRUE,
+                    \dots,
+                    splineargs=list(spar=0.5),
+                    verbose=TRUE)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"ppm"})
+    for which diagnostics should be produced. This object
+    is usually obtained from \code{\link{ppm}}. Alternatively,
+    \code{object} may be a point pattern (object of class
+    \code{"ppp"}).
+  }
+  \item{covariate}{
+    The covariate against which residuals should be plotted.
+    Either a numeric vector, a pixel image, or an \code{expression}.
+    See \emph{Details} below.
+  }
+  \item{type}{
+    String indicating the type of residuals or weights to be computed.
+    Choices include \code{"eem"},
+    \code{"raw"}, \code{"inverse"} and \code{"pearson"}.
+    See \code{\link{diagnose.ppm}} for all possible choices.
+  }
+  \item{cumulative}{
+    Logical flag indicating whether to plot a
+    cumulative sum of marks (\code{cumulative=TRUE}) or the derivative
+    of this sum, a marginal density of the smoothed residual field
+    (\code{cumulative=FALSE}).
+   }
+  \item{clipwindow}{
+    If not \code{NULL} this argument indicates that residuals shall
+    only be computed inside a subregion of the window containing the
+    original point pattern data. Then \code{clipwindow} should be
+    a window object of class \code{"owin"}.
+  }
+  \item{rv}{
+    Usually absent. 
+    If this argument is present, the point process residuals will not be
+    calculated from the fitted model \code{object},
+    but will instead be taken directly from \code{rv}. 
+  }
+  \item{plot.sd}{
+    Logical value indicating whether 
+    error bounds should be added to plot.
+    The default is \code{TRUE} for Poisson models and
+    \code{FALSE} for non-Poisson models. See Details.
+  }
+  \item{envelope}{
+    Logical value indicating whether to compute simulation envelopes
+    for the plot. Alternatively \code{envelope} may be a list of
+    point patterns to use for computing the simulation envelopes,
+    or an object of class \code{"envelope"} containing simulated point
+    patterns.
+  }
+  \item{nsim}{
+    Number of simulated point patterns to be generated
+    to produce the simulation envelope, if \code{envelope=TRUE}.
+  }
+  \item{nrank}{
+    Integer. Rank of the envelope value amongst the \code{nsim} simulated
+    values. A rank of 1 means that the minimum and maximum
+    simulated values will be used.
+  }
+  \item{plot.it}{
+    Logical value indicating whether 
+    plots should be shown. If \code{plot.it=FALSE}, only
+    the computed coordinates for the plots are returned.
+    See \emph{Value}.
+  }
+  \item{typename}{
+    Usually absent. 
+    If this argument is present, it should be a string, and will be used
+    (in the axis labels of plots) to describe the type of residuals.
+  }
+  \item{covname}{
+    A string name for the covariate, to be used in axis labels of plots.
+  }
+  \item{oldstyle}{
+    Logical flag indicating whether error bounds should be plotted
+    using the approximation given in the original paper
+    (\code{oldstyle=TRUE}),
+    or using the correct asymptotic formula (\code{oldstyle=FALSE}).
+  }
+  \item{check}{
+    Logical flag indicating whether the integrity of the data structure
+    in \code{object} should be checked.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.default}}
+    and \code{\link{lines}} to control the plot behaviour.
+  }
+  \item{splineargs}{
+    A list of arguments passed to \code{smooth.spline}
+    for the estimation of the derivatives in the case \code{cumulative=FALSE}.
+  }
+  \item{verbose}{
+    Logical value indicating
+    whether to print progress reports during Monte Carlo simulation.
+  }
+}
+\value{
+  A list containing two dataframes
+  \code{empirical} and \code{theoretical}. 
+  The first dataframe \code{empirical} contains columns
+  \code{covariate} and \code{value} giving the coordinates of the
+  lurking variable plot. The second dataframe \code{theoretical}
+  contains columns \code{covariate}, \code{mean} and \code{sd}
+  giving the coordinates of the plot of the theoretical mean
+  and standard deviation.
+
+  The return value belongs to the class \code{"lurk"} for which there is
+  a plot method. 
+}
+\details{
+  This function generates a `lurking variable' plot for a
+  fitted point process model. 
+  Residuals from the model represented by \code{object}
+  are plotted against the covariate specified by \code{covariate}.
+  This plot can be used to reveal departures from the fitted model,
+  in particular, to reveal that the point pattern depends on the covariate.
+
+  First the residuals from the fitted model (Baddeley et al, 2004)
+  are computed at each quadrature point,
+  or alternatively the `exponential energy marks' (Stoyan and Grabarnik,
+  1991) are computed at each data point.
+  The argument \code{type} selects the type of
+  residual or weight. See \code{\link{diagnose.ppm}} for options
+  and explanation.
+
+  A lurking variable plot for point processes (Baddeley et al, 2004)
+  displays either the cumulative sum of residuals/weights
+  (if \code{cumulative = TRUE}) or a kernel-weighted average of the
+  residuals/weights (if \code{cumulative = FALSE}) plotted against
+  the covariate. The empirical plot (solid lines) is shown
+  together with its expected value assuming the model is true
+  (dashed lines) and optionally also the pointwise
+  two-standard-deviation limits (grey shading).
+  
+  To be more precise, let \eqn{Z(u)} denote the value of the covariate
+  at a spatial location \eqn{u}. 
+  \itemize{
+    \item
+    If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z},
+    where \eqn{H(z)} is the sum of the residuals 
+    over all quadrature points where the covariate takes
+    a value less than or equal to \eqn{z}, or the sum of the
+    exponential energy weights over all data points where the covariate
+    takes a value less than or equal to \eqn{z}.
+    \item
+    If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z},
+    where \eqn{h(z)} is the derivative of \eqn{H(z)},
+    computed approximately by spline smoothing.
+  }
+  For the point process residuals \eqn{E(H(z)) = 0},
+  while for the exponential energy weights
+  \eqn{E(H(z)) = } area of the subset of the window 
+  satisfying \eqn{Z(u) <= z}{Z(u) \le z}. 
+
+  If the empirical and theoretical curves deviate substantially
+  from one another, the interpretation is that the fitted model does
+  not correctly account for dependence on the covariate.
+  The correct form (of the spatial trend part of the model)
+  may be suggested by the shape of the plot.
+  
+  If \code{plot.sd = TRUE}, then superimposed on the lurking variable
+  plot are the pointwise
+  two-standard-deviation error limits for \eqn{H(x)} calculated for the
+  inhomogeneous Poisson process. The default is \code{plot.sd = TRUE}
+  for Poisson models and \code{plot.sd = FALSE} for non-Poisson
+  models.
+
+  By default, the two-standard-deviation limits are calculated
+  from the exact formula for the asymptotic variance
+  of the residuals under the asymptotic normal approximation,
+  equation (37) of Baddeley et al (2006).
+  However, for compatibility with the original paper
+  of Baddeley et al (2005), if \code{oldstyle=TRUE},
+  the two-standard-deviation limits are calculated
+  using the innovation variance, an over-estimate of the true
+  variance of the residuals.
+
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"ppm"}) typically produced by the maximum
+  pseudolikelihood fitting algorithm \code{\link{ppm}}).
+
+  The argument \code{covariate} is either a numeric vector, a pixel
+  image, or an R language expression.
+  If it is a numeric vector, it is assumed to contain
+  the values of the covariate for each of the quadrature points
+  in the fitted model. The quadrature points can be extracted by
+  \code{\link{quad.ppm}(object)}.
+
+  If \code{covariate} is a pixel image, it is assumed to contain the
+  values of the covariate at each location in the window. The values of
+  this image at the quadrature points will be extracted.
+
+  Alternatively, if \code{covariate}
+  is an \code{expression}, it will be evaluated in the same environment
+  as the model formula used in fitting the model \code{object}. It must
+  yield a vector of the same length as the number of quadrature points.
+  The expression may contain the terms \code{x} and \code{y} representing the
+  cartesian coordinates, and may also contain other variables that were
+  available when the model was fitted. Certain variable names are
+  reserved words; see \code{\link{ppm}}.
+
+  Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates
+  are also generated by \code{\link{diagnose.ppm}}, amongst other
+  types of diagnostic plots. This function is more general in that it
+  enables the user to plot the residuals against any chosen covariate
+  that may have been present.
+
+  For advanced use, even the values of the residuals/weights
+  can be altered. If the argument \code{rv} is present,
+  the residuals will not be calculated from the fitted model
+  \code{object} but will instead be taken directly from the object \code{rv}.
+  If \code{type = "eem"} then \code{rv} should be similar to the
+  return value of \code{\link{eem}}, namely, a numeric vector
+  with length equal to the number of data points in the original point
+  pattern. Otherwise, \code{rv} should be
+  similar to the return value of \code{\link{residuals.ppm}},
+  that is, \code{rv} should be an object of class
+  \code{"msr"} (see \code{\link{msr}}) representing a signed measure.
+}
+\seealso{
+ \code{\link{residuals.ppm}},
+ \code{\link{diagnose.ppm}},
+ \code{\link{residuals.ppm}},
+ \code{\link{qqplot.ppm}},
+ \code{\link{eem}},
+ \code{\link{ppm}}
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2006)
+  Properties of residuals for spatial point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{60}, 627--649.
+  
+  Stoyan, D. and Grabarnik, P. (1991)
+  Second-order characteristics for stochastic structures connected with
+  Gibbs point processes.
+  \emph{Mathematische Nachrichten}, 151:95--100.
+}
+\examples{
+  data(nztrees)
+  lurking(nztrees, expression(x))
+  fit <- ppm(nztrees, ~x, Poisson())
+  lurking(fit, expression(x))
+  lurking(fit, expression(x), cumulative=FALSE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{hplot}
diff --git a/man/lut.Rd b/man/lut.Rd
new file mode 100644
index 0000000..64e816a
--- /dev/null
+++ b/man/lut.Rd
@@ -0,0 +1,97 @@
+\name{lut}
+\alias{lut}
+\title{Lookup Tables}
+\description{
+  Create a lookup table.
+}
+\usage{
+lut(outputs, ..., range=NULL, breaks=NULL, inputs=NULL)
+}
+\arguments{
+  \item{outputs}{Vector of output values}
+  \item{\dots}{Ignored.}
+  \item{range}{
+    Interval of numbers to be mapped.
+    A numeric vector of length 2, specifying the ends of the range of values
+    to be mapped.
+    Incompatible with \code{breaks} or \code{inputs}.
+  }
+  \item{inputs}{
+    Input values to which the output values are associated.
+    A factor or vector of the same length as \code{outputs}.
+    Incompatible with \code{breaks} or \code{range}.
+  }
+  \item{breaks}{
+    Breakpoints for the lookup table.
+    A numeric vector of length equal to \code{length(outputs)+1}.
+    Incompatible with \code{range} or \code{inputs}.
+  }
+}
+\details{
+  A lookup table is a function, mapping input values to output values.
+
+  The command \code{lut} creates an object representing
+  a lookup table, which can then be used to control various behaviour
+  in the \pkg{spatstat} package. It can also be used to compute the
+  output value assigned to any input value. 
+
+  The argument \code{outputs} specifies the output values to which
+  input data values will be mapped. It should be a vector of
+  any atomic type (e.g. numeric, logical, character, complex) or factor
+  values. 
+
+  Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks}
+  must be specified by name.
+
+  If \code{inputs} is given, then it should be a vector or factor,
+  of the same length as \code{outputs}. The entries of \code{inputs} can be
+  any atomic type (e.g. numeric, logical, character, complex) or factor
+  values. The resulting lookup table associates the value \code{inputs[i]}
+  with the value \code{outputs[i]}.
+
+  If \code{range} is given, then it determines the interval of the real
+  number line that will be mapped. It should be a numeric vector of
+  length 2. 
+
+  If \code{breaks} is given, then it determines intervals
+  of the real number line
+  which are mapped to each output value. It should be a numeric vector,
+  of length at least 2, with entries that are in increasing order.
+  Infinite values are allowed. Any number in the range
+  between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the
+  value \code{outputs[i]}. 
+
+  The result is an object of class \code{"lut"}. 
+  There is a \code{print} method for this class.
+  Some plot commands in the \pkg{spatstat} package accept an object
+  of this class as a specification of a lookup table.
+
+  The result is also a function \code{f} which can be used to compute
+  the output value assigned to any input data value. 
+  That is, \code{f(x)} returns the output value assigned
+  to \code{x}. This also works for vectors of input data values.
+}
+\value{
+  A function, which is also an object of class \code{"lut"}.
+}
+\seealso{
+  \code{\link{colourmap}}.
+}
+\examples{
+  # lookup table for real numbers, using breakpoints
+  cr <- lut(factor(c("low", "medium", "high")), breaks=c(0,5,10,15))
+  cr
+  cr(3.2)
+  cr(c(3,5,7))
+  # lookup table for discrete set of values
+  ct <- lut(c(0,1), inputs=c(FALSE, TRUE))
+  ct(TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/macros/defns.Rd b/man/macros/defns.Rd
new file mode 100644
index 0000000..466babb
--- /dev/null
+++ b/man/macros/defns.Rd
@@ -0,0 +1,19 @@
+%% macro definitions for spatstat man pages
+\newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley at curtin.edu.au}}
+\newcommand{\rolf}{Rolf Turner \email{r.turner at auckland.ac.nz}}
+\newcommand{\ege}{Ege Rubak \email{rubak at math.aau.dk}}
+\newcommand{\spatstatAuthors}{\adrian, \rolf and \ege}
+% Names with accents
+\newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}}
+\newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}}
+\newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}}
+\newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}}
+\newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}}
+\newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}}
+\newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}}
+\newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}}
+\newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}}
+%% List of all Gibbs interactions 
+\newcommand{\GibbsInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code [...]
+%% List of interactions recognised by RMH code
+\newcommand{\rmhInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}}
diff --git a/man/markconnect.Rd b/man/markconnect.Rd
new file mode 100644
index 0000000..848de66
--- /dev/null
+++ b/man/markconnect.Rd
@@ -0,0 +1,186 @@
+\name{markconnect}
+\alias{markconnect}
+\title{
+  Mark Connection Function
+}
+\description{
+  Estimate the marked connection function
+  of a multitype point pattern.
+}
+\usage{
+markconnect(X, i, j, r=NULL,
+         correction=c("isotropic", "Ripley", "translate"),
+         method="density", \dots, normalise=FALSE)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. 
+  }
+  \item{i}{Number or character string identifying the type (mark value)
+    of the points in \code{X} from which distances are measured.
+  }
+  \item{j}{Number or character string identifying the type (mark value)
+    of the points in \code{X} to which distances are measured.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)}
+    should be evaluated. There is a sensible default.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}.
+    It specifies the edge correction(s) to be applied.
+  }
+  \item{method}{
+    A character vector indicating the user's choice of
+    density estimation technique to be used. Options are
+    \code{"density"}, 
+    \code{"loess"},
+    \code{"sm"} and \code{"smrep"}.
+  }
+  \item{\dots}{
+    Arguments passed to the density estimation routine
+    (\code{\link{density}}, \code{\link{loess}} or \code{sm.density})
+    selected by \code{method}.
+  }
+  \item{normalise}{
+    If \code{TRUE}, normalise the pair connection function by
+    dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability
+    that randomly-selected points will have marks \eqn{i} and \eqn{j}.
+  }
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)}
+    has been  estimated
+  }
+  \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)}
+    when the marks attached to different points are independent
+  }
+  together with a column or columns named 
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)}
+  obtained by the edge corrections named.
+}
+\details{
+  The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)}
+  of a multitype point process \eqn{X}
+  is a measure of the dependence between the types of two 
+  points of the process a distance \eqn{r} apart.
+
+  Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined
+  as the conditional probability,
+  given that there is a point of the process at a location \eqn{u}
+  and another point of the process at a location \eqn{v}
+  separated by a distance \eqn{||u-v|| = r}, that the first point
+  is of type \eqn{i} and the second point is of type \eqn{j}.
+  See Stoyan and Stoyan (1994).
+
+  If the marks attached to the points of \code{X} are independent
+  and identically distributed, then
+  \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) =  p[i]p[j]} where
+  \eqn{p_i}{p[i]} denotes the probability that a point is of type
+  \eqn{i}. Values larger than this,
+  \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) >  p[i]p[j]},
+  indicate positive association between the two types,
+  while smaller values indicate negative association.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a multitype point pattern (a marked point pattern
+  with factor-valued marks).
+
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated.
+  There is a sensible default.
+
+  This algorithm assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kest}}.
+  The edge corrections implemented here are
+  \describe{
+    \item{isotropic/Ripley}{Ripley's isotropic correction
+      (see Ripley, 1988; Ohser, 1983).
+      This is implemented only for rectangular and polygonal windows
+      (not for binary masks).
+    }
+    \item{translate}{Translation correction (Ohser, 1983).
+      Implemented for all window geometries, but slow for
+      complex windows. 
+    }
+  }
+  Note that the estimator assumes the process is stationary (spatially
+  homogeneous). 
+
+  The mark connection function is estimated using density estimation
+  techniques. The user can choose between
+  \describe{
+    \item{\code{"density"}}{
+      which uses the standard kernel
+      density estimation routine \code{\link{density}}, and
+      works only for evenly-spaced \code{r} values;
+    }
+    \item{\code{"loess"}}{
+      which uses the function \code{loess} in the
+      package \pkg{modreg};
+    }
+    \item{\code{"sm"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is extremely slow;
+    }
+    \item{\code{"smrep"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is relatively fast, but may require manual
+      control of the smoothing parameter \code{hmult}.
+    }
+  }
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  Multitype pair correlation \code{\link{pcfcross}}
+  and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}.
+
+  Use \code{\link{alltypes}} to compute the mark connection functions
+  between all pairs of types.
+
+  Mark correlation \code{\link{markcorr}} and 
+  mark variogram \code{\link{markvario}}
+  for numeric-valued marks.
+
+}
+\examples{
+    # Hughes' amacrine data
+    # Cells marked as 'on'/'off'
+    data(amacrine)
+    M <- markconnect(amacrine, "on", "off")
+    plot(M)
+
+    # Compute for all pairs of types at once
+    plot(alltypes(amacrine, markconnect))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
diff --git a/man/markcorr.Rd b/man/markcorr.Rd
new file mode 100644
index 0000000..dc7353a
--- /dev/null
+++ b/man/markcorr.Rd
@@ -0,0 +1,319 @@
+\name{markcorr}
+\alias{markcorr}
+\title{
+  Mark Correlation Function
+}
+\description{
+  Estimate the marked correlation function
+  of a marked point pattern.
+}
+\usage{
+markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL,
+         correction=c("isotropic", "Ripley", "translate"),
+         method="density", \dots, weights=NULL,
+         f1=NULL, normalise=TRUE, fargs=NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. 
+  }
+  \item{f}{Optional. Test function \eqn{f} used in the definition of the
+    mark correlation function. An \R function with at least two
+    arguments. There is a sensible default.
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the mark correlation function 
+    \eqn{k_f(r)}{k[f](r)} should be evaluated.
+    There is a sensible default.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"}, \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{method}{
+    A character vector indicating the user's choice of
+    density estimation technique to be used. Options are
+    \code{"density"}, 
+    \code{"loess"},
+    \code{"sm"} and \code{"smrep"}.
+  }
+  
+  \item{\dots}{
+    Arguments passed to the density estimation routine
+    (\code{\link{density}}, \code{\link{loess}} or \code{sm.density})
+    selected by \code{method}.
+  }
+  \item{weights}{
+    Optional numeric vector of weights for each data point in \code{X}.
+  }
+  \item{f1}{
+    An alternative to \code{f}. If this argument is given,
+    then \eqn{f} is assumed to take the form
+    \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}.
+  }
+  \item{normalise}{
+    If \code{normalise=FALSE},
+    compute only the numerator of the expression for the
+    mark correlation.
+  }
+  \item{fargs}{
+    Optional. A list of extra arguments to be passed to the function
+    \code{f} or \code{f1}.
+  }
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  or a list of function value tables, one for each column of marks.
+  
+  An object of class \code{"fv"} (see \code{\link{fv.object}})
+  is essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the mark correlation function \eqn{k_f(r)}{k[f](r)}
+    has been  estimated
+  }
+  \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)}
+    when the marks attached to different points are independent,
+    namely 1
+  }
+  together with a column or columns named 
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)}
+  obtained by the edge corrections named.
+}
+\details{
+  By default, this command calculates an estimate of
+  Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)}
+  for the point pattern.
+
+  Alternatively if the argument \code{f} or \code{f1} is given, then it
+  calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)}
+  with test function \eqn{f}.
+
+  Theoretical definitions are as follows
+  (see Stoyan and Stoyan (1994, p. 262)):
+  \itemize{
+    \item
+    For a point process \eqn{X} with numeric marks,
+    Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)},
+    is
+    \deqn{
+      k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']}
+    }{
+      k[mm](r) = E[0u](M(0) * M(u))/E(M * M')
+    }
+    where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation
+    given that there are points of the process at the locations
+    \eqn{0} and \eqn{u} separated by a distance \eqn{r},
+    and where \eqn{M(0),M(u)} denote the marks attached to these
+    two points. On the denominator, \eqn{M,M'} are random marks
+    drawn independently from the marginal distribution of marks,
+    and \eqn{E} is the usual expectation.
+    \item
+    For a multitype point process \eqn{X}, the mark correlation is 
+    \deqn{
+      k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']}
+    }{
+      k[mm](r) = P[0u](M(0) = M(u))/P(M = M')
+    }
+    where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the
+    probability and conditional probability.
+    \item 
+    The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)}
+    of a marked point process \eqn{X}, with test function \eqn{f},
+    is
+    \deqn{
+      k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]}
+    }{
+      k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M'))
+    }
+  }
+
+  The test function \eqn{f} is any function
+  \eqn{f(m_1,m_2)}{f(m1,m2)}
+  with two arguments which are possible marks of the pattern,
+  and which returns a nonnegative real value.
+  Common choices of \eqn{f} are:
+  for continuous nonnegative real-valued marks,
+  \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2}
+  for discrete marks (multitype point patterns),
+  \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)}
+  and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)},
+  \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}.
+  
+  Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation''
+  in the usual statistical sense. It can take any 
+  nonnegative real value. The value 1 suggests ``lack of correlation'':
+  if the marks attached to the points of \code{X} are independent
+  and identically distributed, then
+  \eqn{k_f(r) \equiv 1}{k[f](r) =  1}.
+  The interpretation of values larger or smaller than 1 depends
+  on the choice of function \eqn{f}.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern.
+
+  The argument \code{f} determines the function to be applied to
+  pairs of marks. It has a sensible default, which depends on the
+  kind of marks in \code{X}. If the marks
+  are numeric values, then \code{f <- function(m1, m2) { m1 * m2}}
+  computes the product of two marks.
+  If the marks are a factor (i.e. if \code{X} is a multitype point
+  pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields
+  the value 1 when the two marks are equal, and 0 when they are unequal.
+  These are the conventional definitions for numerical
+  marks and multitype points respectively.
+
+  The argument \code{f} may be specified by the user.
+  It must be an \R function, accepting two arguments \code{m1}
+  and \code{m2} which are vectors of equal length containing mark
+  values (of the same type as the marks of \code{X}).
+  (It may also take additional arguments, passed through \code{fargs}).
+  It must return a vector of numeric
+  values of the same length as \code{m1} and \code{m2}.
+  The values must be non-negative, and \code{NA} values are not permitted.
+
+  Alternatively the user may specify the argument \code{f1}
+  instead of \code{f}. This indicates that the test function \eqn{f}
+  should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}
+  where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}.
+  The argument \code{f1} should be an \R function with at least one
+  argument.
+  (It may also take additional arguments, passed through \code{fargs}).
+  
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated.
+
+  This algorithm assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+
+  Biases due to edge effects are
+  treated in the same manner as in \code{\link{Kest}}.
+  The edge corrections implemented here are
+  \describe{
+    \item{isotropic/Ripley}{Ripley's isotropic correction
+      (see Ripley, 1988; Ohser, 1983).
+      This is implemented only for rectangular and polygonal windows
+      (not for binary masks).
+    }
+    \item{translate}{Translation correction (Ohser, 1983).
+      Implemented for all window geometries, but slow for
+      complex windows. 
+    }
+  }
+  Note that the estimator assumes the process is stationary (spatially
+  homogeneous). 
+
+  The numerator and denominator of the mark correlation function
+  (in the expression above) are estimated using density estimation
+  techniques. The user can choose between
+  \describe{
+    \item{\code{"density"}}{
+      which uses the standard kernel
+      density estimation routine \code{\link{density}}, and
+      works only for evenly-spaced \code{r} values;
+    }
+    \item{\code{"loess"}}{
+      which uses the function \code{loess} in the
+      package \pkg{modreg};
+    }
+    \item{\code{"sm"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is extremely slow;
+    }
+    \item{\code{"smrep"}}{
+      which uses the function \code{sm.density} in the
+      package \pkg{sm} and is relatively fast, but may require manual
+      control of the smoothing parameter \code{hmult}.
+    }
+  }
+  If \code{normalise=FALSE} then the algorithm will compute
+  only the numerator
+  \deqn{
+    c_f(r) = E_{0u} f(M(0),M(u))
+  }{
+    c[f](r) = E[0u] f(M(0),M(u))
+  }
+  of the expression for the mark correlation function.
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  Mark variogram \code{\link{markvario}} for numeric marks.
+  
+  Mark connection function \code{\link{markconnect}} and 
+  multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}
+  for factor-valued marks.
+
+  Mark cross-correlation function \code{\link{markcrosscorr}}
+  for point patterns with several columns of marks.
+  
+  \code{\link{Kmark}} to estimate a cumulative function
+  related to the mark correlation function.
+}
+\examples{
+    # CONTINUOUS-VALUED MARKS:
+    # (1) Spruces
+    # marks represent tree diameter
+    # mark correlation function
+    ms <- markcorr(spruces)
+    plot(ms)
+
+    # (2) simulated data with independent marks
+    X <- rpoispp(100)
+    X <- X \%mark\% runif(npoints(X))
+    \dontrun{
+    Xc <- markcorr(X)
+    plot(Xc)
+    }
+    
+    # MULTITYPE DATA:
+    # Hughes' amacrine data
+    # Cells marked as 'on'/'off'
+    # (3) Kernel density estimate with Epanecnikov kernel
+    # (as proposed by Stoyan & Stoyan)
+    M <- markcorr(amacrine, function(m1,m2) {m1==m2},
+                  correction="translate", method="density",
+                  kernel="epanechnikov")
+    plot(M)
+    # Note: kernel="epanechnikov" comes from help(density)
+
+    # (4) Same again with explicit control over bandwidth
+    \dontrun{
+    M <- markcorr(amacrine, 
+                  correction="translate", method="density",
+                  kernel="epanechnikov", bw=0.02)
+    # see help(density) for correct interpretation of 'bw'
+    }
+
+   \testonly{
+    niets <- markcorr(amacrine, function(m1,m2){m1 == m2}, method="loess")
+    if(require(sm))
+      niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2)
+    }
+
+   # weighted mark correlation
+   Y <- subset(betacells, select=type)
+   a <- marks(betacells)$area
+   v <- markcorr(Y, weights=a)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
+
diff --git a/man/markcrosscorr.Rd b/man/markcrosscorr.Rd
new file mode 100644
index 0000000..e694482
--- /dev/null
+++ b/man/markcrosscorr.Rd
@@ -0,0 +1,117 @@
+\name{markcrosscorr}
+\alias{markcrosscorr}
+\title{
+  Mark Cross-Correlation Function
+}
+\description{
+  Given a spatial point pattern with several columns of marks,
+  this function computes the mark correlation function between
+  each pair of columns of marks.
+}
+\usage{
+  markcrosscorr(X, r = NULL,
+                correction = c("isotropic", "Ripley", "translate"),
+                method = "density", \dots, normalise = TRUE, Xname = NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. 
+  }
+  \item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
+    at which the mark correlation function 
+    \eqn{k_f(r)}{k[f](r)} should be evaluated.
+    There is a sensible default.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"},
+    \code{"translation"}, \code{"none"} or \code{"best"}.
+    It specifies the edge correction(s) to be applied.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{method}{
+    A character vector indicating the user's choice of
+    density estimation technique to be used. Options are
+    \code{"density"}, 
+    \code{"loess"},
+    \code{"sm"} and \code{"smrep"}.
+  }
+  \item{\dots}{
+    Arguments passed to the density estimation routine
+    (\code{\link{density}}, \code{\link{loess}} or \code{sm.density})
+    selected by \code{method}.
+  }
+  \item{normalise}{
+    If \code{normalise=FALSE},
+    compute only the numerator of the expression for the
+    mark correlation.
+  }
+  \item{Xname}{
+    Optional character string name for the dataset \code{X}.
+  }
+}
+\details{
+  First, all columns of marks are converted to numerical values.
+  A factor with \eqn{m} possible levels is converted to
+  \eqn{m} columns of dummy (indicator) values.
+
+  Next, each pair of columns is considered, and the mark
+  cross-correlation is defined as
+  \deqn{
+    k_{mm}(r) = \frac{E_{0u}[M_i(0) M_j(u)]}{E[M_i,M_j]}
+  }{
+    k[mm](r) = E[0u](M(i,0) * M(j,u))/E(Mi * Mj)
+  }
+  where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation
+  given that there are points of the process at the locations
+  \eqn{0} and \eqn{u} separated by a distance \eqn{r}.
+  On the numerator,
+  \eqn{M_i(0)}{M(i,0)} and \eqn{M_j(u)}{M(j,u)}
+  are the marks attached to locations \eqn{0} and \eqn{u} respectively
+  in the \eqn{i}th and \eqn{j}th columns of marks respectively.
+  On the denominator, \eqn{M_i}{Mi} and \eqn{M_j}{Mj} are
+  independent random values drawn from the
+  \eqn{i}th and \eqn{j}th columns of marks, respectively,
+  and \eqn{E} is the usual expectation.
+  
+  Note that \eqn{k_{mm}(r)}{k[mm](r)} is not a ``correlation''
+  in the usual statistical sense. It can take any 
+  nonnegative real value. The value 1 suggests ``lack of correlation'':
+  if the marks attached to the points of \code{X} are independent
+  and identically distributed, then
+  \eqn{k_{mm}(r) \equiv 1}{k[mm](r) =  1}.
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}.
+  It must be a marked point pattern.
+
+  The cross-correlations are estimated in the same manner as
+  for \code{\link{markcorr}}.
+}
+\value{
+  A function array (object of class \code{"fasp"}) containing
+  the mark cross-correlation functions for each possible pair
+  of columns of marks.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{markcorr}}
+}
+\examples{
+  # The dataset 'betacells' has two columns of marks:
+  #       'type' (factor)
+  #       'area' (numeric)
+  if(interactive()) plot(betacells)
+  plot(markcrosscorr(betacells))
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/marks.Rd b/man/marks.Rd
new file mode 100644
index 0000000..0ba62cd
--- /dev/null
+++ b/man/marks.Rd
@@ -0,0 +1,115 @@
+\name{marks}
+\alias{marks}
+\alias{marks.ppp}           
+\alias{marks.ppx}           
+\alias{marks<-}           
+\alias{marks<-.ppp}           
+\alias{marks<-.ppx}           
+\alias{setmarks}
+\alias{\%mark\%}  %DoNotExport 
+%NAMESPACE export("%mark%")
+\title{Marks of a Point Pattern}
+\description{
+  Extract or change the marks attached to
+  a point pattern dataset.
+}
+\usage{
+marks(x, \dots)
+
+\method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE)           
+
+\method{marks}{ppx}(x, \dots, drop=TRUE)
+
+marks(x, \dots) <- value
+
+\method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE) <- value
+
+\method{marks}{ppx}(x, \dots) <- value
+
+setmarks(x, value)
+
+x \%mark\% value
+}
+\arguments{
+  \item{x}{
+    Point pattern dataset (object of class \code{"ppp"} or \code{"ppx"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{dfok}{
+    Logical. If \code{FALSE}, data frames of marks are not permitted
+    and will generate an error.
+  }
+  \item{drop}{
+    Logical. If \code{TRUE}, a data frame consisting of a single column
+    of marks will be converted to a vector or factor.
+  }
+  \item{value}{
+    Replacement value. A vector, data frame or hyperframe of mark values,
+    or \code{NULL}. 
+  }
+}
+\value{
+  For \code{marks(x)}, the result is a vector, factor, data frame or hyperframe,
+  containing the mark values attached to the points of \code{x}.
+
+  For \code{marks(x) <- value}, the result is the updated point pattern
+  \code{x} (with the side-effect that the dataset \code{x} is updated in
+  the current environment).
+
+  For \code{setmarks(x,value)} and \code{x \%mark\% value}, the return value
+  is the point pattern obtained by replacing the
+  marks of \code{x} by \code{value}.
+}
+\details{
+  These functions extract or change the marks
+  attached to the points of the point pattern \code{x}.
+  
+  The expression \code{marks(x)} extracts the marks of \code{x}.
+  The assignment \code{marks(x) <- value} assigns new marks to the
+  dataset \code{x}, and updates the dataset \code{x} in the current
+  environment. The expression \code{setmarks(x,value)}
+  or equivalently \code{x \%mark\% value} returns a point pattern
+  obtained by replacing the marks of \code{x} by \code{value}, but does
+  not change the dataset \code{x} itself.
+  
+  For point patterns in two-dimensional space (objects of class
+  \code{"ppp"}) the marks can be a vector, a factor, or a data frame.
+  
+  For general point patterns (objects of class "ppx") the
+  marks can be a vector, a factor, a data frame or a
+  hyperframe.
+  
+  For the assignment \code{marks(x) <- value}, the \code{value}
+  should be a vector or factor of length equal to the number of
+  points in \code{x}, or a data frame or hyperframe with as many rows
+  as there are points in \code{x}. If \code{value} is a single value,
+  or a data frame or hyperframe with one row, then it will be replicated
+  so that the same marks will be attached to each point.
+  
+  To remove marks, use \code{marks(x) <- NULL} or
+  \code{\link{unmark}(x)}.
+  
+  Use \code{\link{ppp}} or \code{\link{ppx}}
+  to create point patterns in more general
+  situations.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{ppx}},
+  \code{\link{unmark}},
+  \code{\link{hyperframe}}
+}
+\examples{
+   X <- amacrine
+   # extract marks
+   m <- marks(X)
+   # recode the mark values "off", "on" as 0, 1
+   marks(X) <- as.integer(m == "on")
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/marks.psp.Rd b/man/marks.psp.Rd
new file mode 100644
index 0000000..9f34138
--- /dev/null
+++ b/man/marks.psp.Rd
@@ -0,0 +1,79 @@
+\name{marks.psp}
+\alias{marks.psp}
+\alias{marks<-.psp}
+\title{Marks of a Line Segment Pattern}
+\description{
+  Extract or change the marks attached to
+  a line segment pattern.
+}
+\usage{
+\method{marks}{psp}(x, \dots, dfok=TRUE)           
+\method{marks}{psp}(x, \dots) <- value
+}
+\arguments{
+  \item{x}{
+    Line segment pattern dataset (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{dfok}{
+    Logical. If \code{FALSE}, data frames of marks are not permitted
+    and will generate an error.
+  }
+  \item{value}{
+    Vector or data frame of mark values,
+    or \code{NULL}.
+  }
+}
+\value{
+  For \code{marks(x)}, the result is a vector, factor or data frame,
+  containing the mark values attached to the line segments of \code{x}.
+  If there are no marks, the result is \code{NULL}.
+
+  For \code{marks(x) <- value}, the result is the updated line segment pattern
+  \code{x} (with the side-effect that the dataset \code{x} is updated in
+  the current environment).
+}
+\details{
+  These functions extract or change the marks
+  attached to each of the line segments in the pattern \code{x}.
+  They are methods for the generic functions
+  \code{\link{marks}} and \code{\link{marks<-}}
+  for the class \code{"psp"} of line segment patterns.
+    
+  The expression \code{marks(x)} extracts the marks of \code{x}.
+  The assignment \code{marks(x) <- value} assigns new marks to the
+  dataset \code{x}, and updates the dataset \code{x} in the current
+  environment. 
+  
+  The marks can be a vector, a factor, or a data frame.
+  
+  For the assignment \code{marks(x) <- value}, the \code{value}
+  should be a vector or factor of length equal to the number of
+  segments in \code{x}, or a data frame with as many rows
+  as there are segments in \code{x}. If \code{value} is a single value,
+  or a data frame with one row, then it will be replicated
+  so that the same marks will be attached to each segment.
+  
+  To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{marks}},
+  \code{\link{marks<-}}
+}
+\examples{
+  m <- data.frame(A=1:10, B=letters[1:10])
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m)
+
+  marks(X) 
+  marks(X)[,2]
+  marks(X) <- 42
+  marks(X) <- NULL
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/marks.tess.Rd b/man/marks.tess.Rd
new file mode 100644
index 0000000..ebb9968
--- /dev/null
+++ b/man/marks.tess.Rd
@@ -0,0 +1,81 @@
+\name{marks.tess}
+\alias{marks.tess}
+\alias{marks<-.tess}
+\alias{unmark.tess}
+\title{Marks of a Tessellation}
+\description{
+  Extract or change the marks attached to
+  the tiles of a tessellation.
+}
+\usage{
+\method{marks}{tess}(x, \dots)           
+
+\method{marks}{tess}(x, \dots) <- value
+
+\method{unmark}{tess}(X)
+}
+\arguments{
+  \item{x,X}{
+    Tessellation (object of class \code{"tess"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{value}{
+    Vector or data frame of mark values,
+    or \code{NULL}.
+  }
+}
+\value{
+  For \code{marks(x)}, the result is a vector, factor or data frame,
+  containing the mark values attached to the tiles of \code{x}.
+  If there are no marks, the result is \code{NULL}.
+
+  For \code{unmark(x)}, the result is the tessellation without marks.
+  
+  For \code{marks(x) <- value}, the result is the updated tessellation
+  \code{x} (with the side-effect that the dataset \code{x} is updated in
+  the current environment).
+}
+\details{
+  These functions extract or change the marks
+  attached to each of the tiles in the tessellation \code{x}.
+  They are methods for the generic functions
+  \code{\link{marks}} and \code{\link{marks<-}}
+  for the class \code{"tess"} of tessellations.
+    
+  The expression \code{marks(x)} extracts the marks of \code{x}.
+  The assignment \code{marks(x) <- value} assigns new marks to the
+  dataset \code{x}, and updates the dataset \code{x} in the current
+  environment. 
+  
+  The marks can be a vector, a factor, or a data frame.
+  
+  For the assignment \code{marks(x) <- value}, the \code{value}
+  should be a vector or factor of length equal to the number of
+  tiles in \code{x}, or a data frame with as many rows
+  as there are tiles in \code{x}. If \code{value} is a single value,
+  or a data frame with one row, then it will be replicated
+  so that the same marks will be attached to each tile.
+  
+  To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}.
+}
+\seealso{
+  \code{\link{marks}},
+  \code{\link{marks<-}}
+}
+\examples{
+  D <- dirichlet(cells)
+  marks(D) <- tile.areas(D)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/markstat.Rd b/man/markstat.Rd
new file mode 100644
index 0000000..0597638
--- /dev/null
+++ b/man/markstat.Rd
@@ -0,0 +1,107 @@
+\name{markstat}
+\alias{markstat}
+\title{Summarise Marks in Every Neighbourhood in a Point Pattern}
+\description{
+  Visit each point in a point pattern, find the neighbouring points,
+  and summarise their marks
+}
+\usage{
+   markstat(X, fun, N=NULL, R=NULL, \dots)
+}
+\arguments{
+  \item{X}{
+    A marked point pattern. 
+    An object of class \code{"ppp"}.
+  }
+  \item{fun}{
+    Function to be applied to the vector of marks.
+  }
+  \item{N}{
+    Integer. If this argument is present,
+    the neighbourhood of a point of \code{X} is defined to consist of the
+    \code{N} points of \code{X} which are closest to it.
+  }
+  \item{R}{
+    Nonnegative numeric value. If this argument is present,
+    the neighbourhood of a point of \code{X} is defined to consist of
+    all points of \code{X} which lie within a distance \code{R}
+    of it.
+  }
+  \item{\dots}{
+    extra arguments passed to the function \code{fun}.
+    They must be given in the form \code{name=value}.
+  }
+}
+\value{
+  Similar to the result of \code{\link{apply}}.
+  if each call to \code{fun} returns a single numeric value,
+  the result is a vector of dimension \code{npoints(X)}, the number of points
+  in \code{X}.
+  If each call to \code{fun} returns a vector of the same length
+  \code{m}, then the result is a matrix of dimensions \code{c(m,n)};
+  note the transposition of the indices, as usual for the family of
+  \code{apply} functions.
+  If the calls to \code{fun} return vectors of different lengths,
+  the result is a list of length \code{npoints(X)}. 
+}
+\details{
+  This algorithm visits each point in the point pattern \code{X},
+  determines which points of \code{X} are ``neighbours'' of the current
+  point, extracts the marks of these neighbouring points,
+  applies the function \code{fun} to the marks,
+  and collects the value or values returned by \code{fun}.
+
+  The definition of ``neighbours'' depends on the arguments
+  \code{N} and \code{R}, exactly one of which must be given.
+
+  If \code{N} is given, then the neighbours of the current
+  point are the \code{N} points of \code{X} which are closest to
+  the current point (including the current point itself).
+  If \code{R} is given, then the neighbourhood of the current point
+  consists of all points of \code{X} which lie closer than a distance \code{R}
+  from the current point.
+
+  Each point of \code{X} is visited; the neighbourhood
+  of the current point is determined; the marks of these points
+  are extracted as a vector \code{v}; then the function 
+  \code{fun} is called as:
+  
+  \code{fun(v, \dots)}
+
+  where \code{\dots} are the arguments passed from the call to
+  \code{markstat}.
+
+  The results of each call to \code{fun} are collected and returned
+  according to the usual rules for \code{\link{apply}} and its
+  relatives. See the section on \bold{Value}.
+
+  This function is just a convenient wrapper for a common use of the
+  function \code{\link{applynbd}}. For more complex tasks,
+  use \code{\link{applynbd}}. 
+  To simply tabulate the marks in every \code{R}-neighbourhood, use
+  \code{\link{marktable}}.
+}
+\seealso{
+  \code{\link{applynbd}},
+  \code{\link{marktable}},
+  \code{\link{ppp.object}},
+  \code{\link{apply}}
+}
+\examples{
+  trees <- longleaf
+  \testonly{
+     trees <- trees[seq(1, npoints(trees), by=6)]
+  }
+
+  # average diameter of 5 closest neighbours of each tree
+  md <- markstat(trees, mean, N=5)
+
+  # range of diameters of trees within 10 metre radius
+  rd <- markstat(trees, range, R=10)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{programming}
+ 
diff --git a/man/marktable.Rd b/man/marktable.Rd
new file mode 100644
index 0000000..0a2a987
--- /dev/null
+++ b/man/marktable.Rd
@@ -0,0 +1,86 @@
+\name{marktable}
+\alias{marktable}
+\title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern}
+\description{
+  Visit each point in a point pattern, find the neighbouring points,
+  and compile a frequency table of the marks of these neighbour points.
+}
+\usage{
+   marktable(X, R, N, exclude=TRUE, collapse=FALSE)
+}
+\arguments{
+  \item{X}{
+    A marked point pattern. 
+    An object of class \code{"ppp"}.
+  }
+  \item{R}{
+    Neighbourhood radius. Incompatible with \code{N}.
+  }
+  \item{N}{
+    Number of neighbours of each point. Incompatible with \code{R}.
+  }
+  \item{exclude}{
+    Logical. If \code{exclude=TRUE}, the neighbours of a point
+    do not include the point itself. If \code{exclude=FALSE},
+    a point belongs to its own neighbourhood.
+  }
+  \item{collapse}{
+    Logical. If \code{collapse=FALSE} (the default) the results for
+    each point are returned as separate rows of a table.
+    If \code{collapse=TRUE}, the results are aggregated according to the
+    type of point.
+  }
+}
+\value{
+  A contingency table (object of class \code{"table"}).
+  If \code{collapse=FALSE}, the table has one row for
+  each point in \code{X}, and one column for each possible mark value.
+  If \code{collapse=TRUE}, the table has one row and one column
+  for each possible mark value.
+}
+\details{
+  This algorithm visits each point in the point pattern \code{X},
+  inspects all the neighbouring points within a radius \code{R} of the current
+  point (or the \code{N} nearest neighbours of the current point),
+  and compiles a frequency table of the marks attached to the
+  neighbours. 
+
+  The dataset \code{X} must be a multitype point pattern, that is,
+  \code{marks(X)} must be a \code{factor}.
+  
+  If \code{collapse=FALSE} (the default), 
+  the result is a two-dimensional contingency table with one row for
+  each point in the pattern, and one column for each possible mark
+  value. The \code{[i,j]} entry in the table gives the number of
+  neighbours of point \code{i} that have mark \code{j}.
+
+  If \code{collapse=TRUE}, this contingency table is aggregated
+  according to the type of point, so that the result is a contingency
+  table with one row and one column for each possible mark value.
+  The  \code{[i,j]} entry in the table gives the number of
+  neighbours of a point with mark \code{i} that have mark \code{j}.
+
+  To perform more complicated calculations on the neighbours of every
+  point, use \code{\link{markstat}} or \code{\link{applynbd}}. 
+}
+\seealso{
+  \code{\link{markstat}},
+  \code{\link{applynbd}},
+  \code{\link{Kcross}},
+  \code{\link{ppp.object}},
+  \code{\link{table}}
+}
+\examples{
+  head(marktable(amacrine, 0.1))
+  head(marktable(amacrine, 0.1, exclude=FALSE))
+  marktable(amacrine, N=1, collapse=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{programming}
+ 
diff --git a/man/markvario.Rd b/man/markvario.Rd
new file mode 100644
index 0000000..534127a
--- /dev/null
+++ b/man/markvario.Rd
@@ -0,0 +1,119 @@
+\name{markvario}
+\alias{markvario}
+\title{Mark Variogram}
+\description{
+  Estimate the mark variogram of a marked point pattern.
+}
+\usage{
+markvario(X, correction = c("isotropic", "Ripley", "translate"),
+r = NULL, method = "density", ..., normalise=FALSE)
+}
+\arguments{
+  \item{X}{The observed point pattern.
+    An object of class \code{"ppp"} or something acceptable to
+    \code{\link{as.ppp}}. It must have marks which are numeric.
+  }
+  \item{correction}{
+    A character vector containing any selection of the
+    options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}.
+    It specifies the edge correction(s) to be applied.
+  }
+  \item{r}{numeric vector. The values of the argument \eqn{r}
+    at which the mark variogram
+    \eqn{\gamma(r)}{gamma(r)}
+    should be evaluated.
+    There is a sensible default.
+  }
+  \item{method}{
+    A character vector indicating the user's choice of
+    density estimation technique to be used. Options are
+    \code{"density"}, 
+    \code{"loess"},
+    \code{"sm"} and \code{"smrep"}.
+  }
+  \item{\dots}{
+    Arguments passed to the density estimation routine
+    (\code{\link{density}}, \code{\link{loess}} or \code{sm.density})
+    selected by \code{method}.
+  }
+  \item{normalise}{If \code{TRUE}, normalise the variogram by
+    dividing it by the estimated mark variance.
+  }
+}
+\details{
+  The mark variogram \eqn{\gamma(r)}{gamma(r)}
+  of a marked point process \eqn{X}
+  is a measure of the dependence between the marks of two 
+  points of the process a distance \eqn{r} apart.
+  It is informally defined as
+  \deqn{
+    \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2]
+  }{
+    gamma(r) = E[(1/2) * (M1 - M2)^2 ]
+  }
+  where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2}
+  are the marks attached to two points of the process
+  a distance \eqn{r} apart.
+
+  The mark variogram of a marked point process is analogous,
+  but \bold{not equivalent}, to the variogram of a random field
+  in geostatistics. See Waelder and Stoyan (1996).
+}
+\value{
+  An object of class \code{"fv"} (see \code{\link{fv.object}}).
+  
+  Essentially a data frame containing numeric columns 
+  \item{r}{the values of the argument \eqn{r} 
+    at which the mark variogram \eqn{\gamma(r)}{gamma(r)}
+    has been  estimated
+  }
+  \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)}
+    when the marks attached to different points are independent;
+    equal to the sample variance of the marks
+  }
+  together with a column or columns named 
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{\gamma(r)}{gamma(r)}
+  obtained by the edge corrections named.
+}
+\references{
+  Cressie, N.A.C. (1991)
+  \emph{Statistics for spatial data}.
+  John Wiley and Sons, 1991.
+  
+  Mase, S. (1996) 
+  The threshold method for estimating annual rainfall.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{48} (1996) 201-213.
+
+  Waelder, O. and Stoyan, D. (1996)
+  On variograms in point process statistics.
+  \emph{Biometrical Journal} \bold{38} (1996) 895-905.
+}
+\seealso{
+  Mark correlation function \code{\link{markcorr}} for numeric marks.
+
+  Mark connection function \code{\link{markconnect}} and 
+  multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}
+  for factor-valued marks.
+}
+\examples{
+    # Longleaf Pine data
+    # marks represent tree diameter
+    data(longleaf)
+    # Subset of this large pattern
+    swcorner <- owin(c(0,100),c(0,100))
+    sub <- longleaf[ , swcorner]
+    # mark correlation function
+    mv <- markvario(sub)
+    plot(mv)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/matchingdist.Rd b/man/matchingdist.Rd
new file mode 100644
index 0000000..9418c5d
--- /dev/null
+++ b/man/matchingdist.Rd
@@ -0,0 +1,105 @@
+\name{matchingdist}
+\alias{matchingdist}
+\title{Distance for a Point Pattern Matching}
+\description{
+  Computes the distance associated with a matching between two point patterns.
+}
+\usage{
+  matchingdist(matching, type = NULL, cutoff = NULL, q = NULL)
+}
+\arguments{
+  \item{matching}{A point pattern matching (an object of class \code{"pppmatching"}).}
+  \item{type}{
+    A character string giving the type of distance to be computed.
+    One of \code{"spa"}, \code{"ace"} or \code{"mat"}. See details below. 
+  }
+  \item{cutoff}{
+    The value \eqn{> 0} at which interpoint distances are cut off.
+  }
+  \item{q}{
+    The order of the average that is applied to the interpoint distances.
+    May be \code{Inf}, in which case the maximum of the interpoint distances is taken.
+  }
+}
+\details{
+  Computes the distance specified by \code{type}, \code{cutoff}, and \code{order}
+  for a point matching. If any of these arguments are not provided, the function
+  uses the corresponding elements of \code{matching} (if available).
+
+  For the type \code{"spa"} (subpattern assignment) it is assumed that the points
+  of the point pattern with the smaller cardinality \eqn{m} are matched to a
+  \eqn{m}-point subpattern of the point pattern with the larger
+  cardinality \eqn{n} in a 1-1 way. The distance
+  is then given as the \code{q}-th order average of the \eqn{m} distances between
+  matched points (minimum of Euclidean distance and \code{cutoff})
+  and \eqn{n-m} "penalty distances" of value \code{cutoff}.
+
+  For the type \code{"ace"} (assignment only if cardinalities equal) the matching
+  is assumed to be 1-1 if the cardinalities of the point patterns are
+  the same, in which case the \code{q}-th order average of the matching distances
+  (minimum of Euclidean distance and \code{cutoff}) is taken. If the cardinalities
+  are different, the matching may be arbitrary and the distance returned is always
+  equal to \code{cutoff}.
+
+  For the type \code{mat} (mass transfer) it is assumed that each point of
+  the point pattern with the smaller cardinality \eqn{m} has mass \eqn{1},
+  each point of the point pattern with the larger cardinality \eqn{n}
+  has mass \eqn{m/n},
+  and fractions of these masses are matched in such a way that each point
+  contributes exactly its mass. The distance is then given as the \code{q}-th
+  order weighted average of all distances (minimum of Euclidean distance
+  and \code{cutoff}) of (partially) matched points with weights equal to the
+  fractional masses divided by \eqn{m}.
+
+  If the cardinalities of the two point patterns are equal,
+  \code{matchingdist(m, type, cutoff, q)} yields the same result
+  no matter if \code{type} is \code{"spa"}, \code{"ace"} or
+  \code{"mat"}.
+}
+\value{
+  Numeric value of the distance associated with the matching.
+}
+\author{
+  Dominic Schuhmacher
+  \email{dominic.schuhmacher at stat.unibe.ch}
+  \url{http://www.dominic.schuhmacher.name}
+}
+\seealso{
+  \code{\link{pppdist}}
+  \code{\link{pppmatching.object}}
+}
+\examples{
+  # an optimal matching
+  X <- runifpoint(20)
+  Y <- runifpoint(20)
+  m.opt <- pppdist(X, Y)
+  summary(m.opt)
+  matchingdist(m.opt)
+       # is the same as the distance given by summary(m.opt)
+  
+  # sequential nearest neighbour matching
+  # (go through all points of point pattern X in sequence
+  # and match each point with the closest point of Y that is
+  # still unmatched)
+  am <- matrix(0, 20, 20)
+  h <- matrix(c(1:20, rep(0,20)), 20, 2)
+  h[1,2] = nncross(X[1],Y)[1,2]
+  for (i in 2:20) {
+    nn <- nncross(X[i],Y[-h[1:(i-1),2]])[1,2]
+    h[i,2] <- ((1:20)[-h[1:(i-1),2]])[nn]
+  }
+  am[h] <- 1
+  m.nn <- pppmatching(X, Y, am)
+  matchingdist(m.nn, type="spa", cutoff=1, q=1)
+       # is >= the distance obtained for m.opt
+       # in most cases strictly >
+
+  \dontrun{
+    par(mfrow=c(1,2))
+    plot(m.opt)
+    plot(m.nn)
+    text(X$x, X$y, 1:20, pos=1, offset=0.3, cex=0.8)
+  }
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/matclust.estK.Rd b/man/matclust.estK.Rd
new file mode 100644
index 0000000..22f776e
--- /dev/null
+++ b/man/matclust.estK.Rd
@@ -0,0 +1,173 @@
+\name{matclust.estK}
+\alias{matclust.estK}
+\title{Fit the Matern Cluster Point Process by Minimum Contrast}
+\description{
+  Fits the Matern Cluster point process to a point pattern dataset
+  by the Method of Minimum Contrast.
+}
+\usage{
+matclust.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Data to which the Matern Cluster model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    Matern Cluster process.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+}
+\details{
+  This algorithm fits the Matern Cluster point process model
+  to a point pattern dataset
+  by the Method of Minimum Contrast, using the \eqn{K} function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The \eqn{K} function of the point pattern will be computed
+      using \code{\link{Kest}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the \eqn{K} function,
+      and this object should have been obtained by a call to
+      \code{\link{Kest}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Matern Cluster point process to \code{X},
+  by finding the parameters of the Matern Cluster model
+  which give the closest match between the
+  theoretical \eqn{K} function of the Matern Cluster process
+  and the observed \eqn{K} function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The Matern Cluster point process is described in \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen
+  (2003, p. 62). It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{mu}, and the locations of the offspring points of one parent
+  are independent and uniformly distributed inside a circle of radius
+  \eqn{R}{R} centred on the parent point, where \eqn{R}{R} is equal to
+  the parameter \code{scale}. The named vector of stating values can use
+  either \code{R} or \code{scale} as the name of the second component,
+  but the latter is recommended for consistency with other cluster models.
+
+  The theoretical \eqn{K}-function of the Matern Cluster process is
+  \deqn{
+    K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R})
+  }{
+    K(r) = pi r^2 + h(r/(2*R))/kappa
+  }
+  where the radius R is the parameter \code{scale} and
+  \deqn{
+    h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z)
+    - 2 \mbox{arcsin}(z)
+    + 4 z \sqrt{(1 - z^2)^3}
+    - 6 z \sqrt{1 - z^2}
+    ]
+  }{
+    h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) 
+    + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2))
+  }
+  for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}.
+  The theoretical intensity
+  of the Matern Cluster process
+  is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}.
+
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\kappa}{kappa}
+  and \eqn{R}{R}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The Matern Cluster process can be simulated, using
+  \code{\link{rMatClust}}.
+
+  Homogeneous or inhomogeneous Matern Cluster models can also be
+  fitted using the function \code{\link{kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Rasmus Waagepetersen
+  \email{rw at math.auc.dk}
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{lgcp.estK}},
+  \code{\link{thomas.estK}},
+  \code{\link{mincontrast}},
+  \code{\link{Kest}},
+  \code{\link{rMatClust}} to simulate the fitted model.
+}
+\examples{
+    data(redwood)
+    u <- matclust.estK(redwood, c(kappa=10, scale=0.1))
+    u
+    plot(u)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/matclust.estpcf.Rd b/man/matclust.estpcf.Rd
new file mode 100644
index 0000000..f70ddb4
--- /dev/null
+++ b/man/matclust.estpcf.Rd
@@ -0,0 +1,175 @@
+\name{matclust.estpcf}
+\alias{matclust.estpcf}
+\title{Fit the Matern Cluster Point Process by Minimum Contrast Using Pair Correlation}
+\description{
+  Fits the Matern Cluster point process to a point pattern dataset
+  by the Method of Minimum Contrast using the pair correlation function.
+}
+\usage{
+matclust.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...,
+            pcfargs=list())
+}
+\arguments{
+  \item{X}{
+    Data to which the Matern Cluster model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    Matern Cluster process.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+  \item{pcfargs}{
+    Optional list containing arguments passed to \code{\link{pcf.ppp}}
+    to control the smoothing in the estimation of the
+    pair correlation function.
+  }
+}
+\details{
+  This algorithm fits the Matern Cluster point process model
+  to a point pattern dataset
+  by the Method of Minimum Contrast, using the pair correlation function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The pair correlation function of the point pattern will be computed
+      using \code{\link{pcf}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the pair correlation function,
+      and this object should have been obtained by a call to
+      \code{\link{pcf}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Matern Cluster point process to \code{X},
+  by finding the parameters of the Matern Cluster model
+  which give the closest match between the
+  theoretical pair correlation function of the Matern Cluster process
+  and the observed pair correlation function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}. 
+  
+  The Matern Cluster point process is described in \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen
+  (2003, p. 62). It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{mu}, and the locations of the offspring points of one parent
+  are independent and uniformly distributed inside a circle of radius
+  \eqn{R} centred on the parent point, where \eqn{R}{R} is equal to
+  the parameter \code{scale}. The named vector of stating values can use
+  either \code{R} or \code{scale} as the name of the second component,
+  but the latter is recommended for consistency with other cluster models.
+
+  The theoretical pair correlation function of the Matern Cluster process is
+  \deqn{
+    g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R})
+  }{
+    g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r)
+  }
+  where the radius R is the parameter \code{scale} and
+  \deqn{
+    h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ]
+  }{
+    h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2))
+  }
+  for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}.
+  The theoretical intensity
+  of the Matern Cluster process
+  is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}.
+
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\kappa}{kappa}
+  and \eqn{R}{R}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The Matern Cluster process can be simulated, using
+  \code{\link{rMatClust}}.
+
+  Homogeneous or inhomogeneous Matern Cluster models can also be
+  fitted using the function \code{\link{kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{matclust.estK}},
+  \code{\link{thomas.estpcf}},
+  \code{\link{thomas.estK}},
+  \code{\link{lgcp.estK}},
+  \code{\link{mincontrast}},
+  \code{\link{pcf}},
+  \code{\link{rMatClust}} to simulate the fitted model.
+}
+\examples{
+    data(redwood)
+    u <- matclust.estpcf(redwood, c(kappa=10, R=0.1))
+    u
+    plot(u, legendpos="topright")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/matrixpower.Rd b/man/matrixpower.Rd
new file mode 100644
index 0000000..665c93a
--- /dev/null
+++ b/man/matrixpower.Rd
@@ -0,0 +1,65 @@
+\name{matrixpower}
+\alias{matrixpower}
+\alias{matrixsqrt}
+\alias{matrixinvsqrt}
+\title{
+  Power of a Matrix
+}
+\description{
+  Evaluate a specified power of a matrix.
+}
+\usage{
+  matrixpower(x, power, complexOK = TRUE)
+  matrixsqrt(x, complexOK = TRUE)
+  matrixinvsqrt(x, complexOK = TRUE)
+}
+\arguments{
+  \item{x}{
+    A square matrix containing numeric or complex values.
+  }
+  \item{power}{
+    A numeric value giving the power (exponent) to which \code{x} should
+    be raised.
+  }
+  \item{complexOK}{
+    Logical value indicating whether the result is allowed to be complex.
+  }
+}
+\details{
+  These functions raise the matrix \code{x} to the desired power:
+  \code{matrixsqrt} takes the square root, \code{matrixinvsqrt} takes
+  the inverse square root, and \code{matrixpower} takes the specified
+  power of \code{x}.
+  
+  Up to numerical error, \code{matrixpower(x, 2)} should be equivalent
+  to \code{x \%*\% x}, and \code{matrixpower(x, -1)} should be
+  equivalent to \code{solve(x)}, the inverse of \code{x}. 
+
+  The square root \code{y <- matrixsqrt(x)} should satisfy
+  \code{y \%*\% y = x}. The inverse square root
+  \code{z <- matrixinvsqrt(x)} should satisfy \code{z \%*\% z = solve(x)}.
+
+  Computations are performed using the eigen decomposition
+  (\code{\link{eigen}}).
+}
+\value{
+  A matrix of the same size as \code{x} containing
+  numeric or complex values.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link[base]{eigen}}, \code{\link[base]{svd}}
+}
+\examples{
+  x <- matrix(c(10,2,2,1), 2, 2)
+  y <- matrixsqrt(x)
+  y
+  y \%*\% y 
+  z <- matrixinvsqrt(x)
+  z \%*\% y
+  matrixpower(x, 0.1)
+}
+\keyword{algebra}
+\keyword{array}
diff --git a/man/maxnndist.Rd b/man/maxnndist.Rd
new file mode 100644
index 0000000..f2766e9
--- /dev/null
+++ b/man/maxnndist.Rd
@@ -0,0 +1,63 @@
+\name{maxnndist}
+\alias{maxnndist}
+\alias{minnndist}
+\title{
+  Compute Minimum or Maximum Nearest-Neighbour Distance
+}
+\description{
+  A faster way to compute the minimum or maximum
+  nearest-neighbour distance in a point pattern.
+}
+\usage{
+minnndist(X, positive=FALSE)
+maxnndist(X, positive=FALSE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{positive}{
+    Logical. If \code{FALSE} (the default), compute the usual
+    nearest-neighbour distance.
+    If \code{TRUE}, ignore coincident points, so that the
+    nearest neighbour distance for each point is greater than zero.
+  }
+}
+\details{
+  These functions find the minimum and maximum values
+  of nearest-neighbour distances in the point pattern \code{X}.
+  \code{minnndist(X)} and \code{maxnndist(X)} are 
+  equivalent to, but faster than, \code{min(nndist(X))}
+  and \code{max(nndist(X))} respectively.
+
+  The value is \code{NA} if \code{npoints(X) < 2}.
+}
+\value{
+  A single numeric value (possibly \code{NA}).
+}
+\seealso{
+  \code{\link{nndist}}
+}
+\examples{
+  min(nndist(swedishpines))
+  minnndist(swedishpines)
+
+  max(nndist(swedishpines))
+  maxnndist(swedishpines)
+
+  minnndist(lansing, positive=TRUE)
+
+  if(interactive()) {
+     X <- rpoispp(1e6)
+     system.time(min(nndist(X)))
+     system.time(minnndist(X))
+  }
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf and \ege.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/mean.im.Rd b/man/mean.im.Rd
new file mode 100644
index 0000000..b0821ee
--- /dev/null
+++ b/man/mean.im.Rd
@@ -0,0 +1,87 @@
+\name{mean.im} %DontDeclareMethods
+\alias{mean.im}
+\alias{median.im}
+\title{Mean and Median of Pixel Values in an Image}
+\description{
+  Calculates the mean or median
+  of the pixel values in a pixel image.
+}
+%NAMESPACE S3method("mean", "im")
+%NAMESPACE S3method("median", "im")
+\usage{
+## S3 method for class 'im'
+## mean(x, trim=0, na.rm=TRUE, ...) 
+
+## S3 method for class 'im'
+## median(x, na.rm=TRUE)        [R < 3.4.0]
+## median(x, na.rm=TRUE, ...)   [R >= 3.4.0]
+}
+\arguments{
+  \item{x}{A pixel image (object of class \code{"im"}).}
+  \item{na.rm}{
+    Logical value indicating whether \code{NA} values should be
+    stripped before the computation proceeds.
+  }
+  \item{trim}{
+    The fraction (0 to 0.5) of pixel values to be trimmed from
+    each end of their range, before the mean is computed.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  These functions calculate the mean and median
+  of the pixel values in the image \code{x}.
+
+  An object of class \code{"im"}
+  describes a pixel image. See \code{\link{im.object}})
+  for details of this class.
+
+  The function \code{mean.im} is a method for the generic
+  function \code{\link[base]{mean}} for the class \code{"im"}.
+  Similarly \code{median.im} is a method for the generic
+  \code{\link[stats]{median}}.
+  
+  If the image \code{x} is logical-valued, the mean value of \code{x} is
+  the fraction of pixels that have the value \code{TRUE}. The median is
+  not defined. 
+
+  If the image \code{x} is factor-valued, then the mean of \code{x}
+  is the mean of the integer codes of the pixel values. The median is
+  are not defined.
+
+  Other mathematical operations on images are supported by
+  \code{\link{Math.im}}, \code{\link{Summary.im}}
+  and \code{\link{Complex.im}}.
+
+  Other information about an image can be obtained using
+  \code{\link{summary.im}} or \code{\link{quantile.im}}.
+}
+\value{
+  A single number.
+}
+\seealso{
+  \code{\link{Math.im}} for other operations.
+
+  Generics and default methods:
+  \code{\link[base]{mean}},
+  \code{\link[stats]{median}}.
+  
+  \code{\link{quantile.im}},
+  \code{\link{anyNA.im}},
+  \code{\link{im.object}},
+  \code{\link{summary.im}}.
+}
+\examples{
+  X <- as.im(function(x,y) {x^2}, unit.square())
+  mean(X)
+  median(X)
+  mean(X, trim=0.05)
+}
+\author{
+  \spatstatAuthors and Kassel Hingee.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{univar}
diff --git a/man/mean.linim.Rd b/man/mean.linim.Rd
new file mode 100644
index 0000000..5a62881
--- /dev/null
+++ b/man/mean.linim.Rd
@@ -0,0 +1,65 @@
+\name{mean.linim}
+\alias{mean.linim}
+\alias{median.linim}
+\alias{quantile.linim}
+\title{Mean, Median, Quantiles of Pixel Values on a Linear Network}
+\description{
+  Calculates the mean, median, or quantiles
+  of the pixel values in a pixel image on a linear network.
+}
+\usage{
+  \method{mean}{linim}(x, \dots)
+
+  \method{median}{linim}(x, \dots)
+
+  \method{quantile}{linim}(x, probs=seq(0,1,0.25), \dots)
+}
+\arguments{
+  \item{x}{
+    A pixel image on a linear network (object of class
+    \code{"linim"}).
+  }
+  \item{probs}{
+    Vector of probabilities for which quantiles should be
+    calculated.
+  } 
+  \item{\dots}{Arguments passed to other methods.}
+}
+\details{
+  These functions calculate the mean, median and quantiles
+  of the pixel values in the image
+  \code{x} on a linear network.
+
+  An object of class \code{"linim"}
+  describes a pixel image on a linear network. See \code{\link{linim}}.
+
+  The functions described here are methods for the 
+  generic \code{\link{mean}}, \code{\link[stats]{median}}
+  and \code{\link[stats]{quantile}} for the class \code{"linim"}.
+}
+\value{
+  For \code{mean} and \code{median}, a single number.
+  For \code{quantile}, a numeric vector of the same length as \code{probs}.
+}
+\seealso{
+  \code{\link{mean}},
+  \code{\link[stats]{median}},
+  \code{\link[stats]{quantile}},
+
+  \code{\link{mean.im}}.
+}
+\examples{
+  M <- as.mask.psp(as.psp(simplenet))
+  Z <- as.im(function(x,y) {x-y}, W=M)
+  X <- linim(simplenet, Z)
+  X
+  mean(X)
+  median(X)
+  quantile(X)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{univar}
diff --git a/man/measureVariation.Rd b/man/measureVariation.Rd
new file mode 100644
index 0000000..e9875bb
--- /dev/null
+++ b/man/measureVariation.Rd
@@ -0,0 +1,77 @@
+\name{measureVariation}
+\alias{measureVariation}
+\alias{measurePositive}
+\alias{measureNegative}
+\alias{totalVariation}
+\title{
+  Positive and Negative Parts, and Variation, of a Measure
+}
+\description{
+  Given a measure \code{A} (object of class \code{"msr"})
+  these functions find the positive part, negative part and variation
+  of \code{A}.
+}
+\usage{
+measurePositive(x)
+measureNegative(x)
+measureVariation(x)
+totalVariation(x)
+}
+\arguments{
+  \item{x}{
+    A measure (object of class \code{"msr"}).
+  }
+}
+\details{
+  The functions \code{measurePositive} and \code{measureNegative}
+  return the positive and negative parts of the measure,
+  and \code{measureVariation} returns the variation (sum of positive and
+  negative parts). The function \code{totalVariation} returns the total
+  variation norm.
+
+  If \eqn{\mu} is a signed measure,
+  it can be represented as
+  \deqn{\mu = \mu_{+} - \mu_{-}}{\mu = \mu[+] - \mu[-]}
+  where \eqn{\mu_{+}}{\mu[+]} and \eqn{\mu_{-}}{\mu[-]}
+  are \emph{nonnegative} measures called the positive and negative
+  parts of \eqn{\mu}.
+  In a nutshell, the positive part of \eqn{\mu}
+  consists of all positive contributions or increments,
+  and the negative part consists of all negative contributions
+  multiplied by \code{-1}. 
+
+  The variation \eqn{|\mu|} is defined by 
+  \deqn{\mu = \mu_{+} + \mu_{-}}{\mu = \mu[+] + \mu[-]}
+  and is also a nonnegative measure.
+
+  The total variation norm is the integral of the variation. 
+}
+\value{
+  The result of \code{measurePositive}, \code{measureNegative}
+  and \code{measureVariation} is another measure (object of class \code{"msr"})
+  on the same spatial domain.
+  The result of \code{totalVariation} is a non-negative number.
+}
+\references{
+  Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   rp <- residuals(fit, type="pearson")
+
+   measurePositive(rp)
+   measureNegative(rp)
+   measureVariation(rp)
+
+   # total variation norm
+   totalVariation(rp)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/mergeLevels.Rd b/man/mergeLevels.Rd
new file mode 100644
index 0000000..0e74a63
--- /dev/null
+++ b/man/mergeLevels.Rd
@@ -0,0 +1,75 @@
+\name{mergeLevels}
+\alias{mergeLevels}
+\title{
+  Merge Levels of a Factor
+}
+\description{
+  Specified levels of the factor will be merged into a single level.
+}
+\usage{
+mergeLevels(.f, \dots)
+}
+\arguments{
+  \item{.f}{
+    A factor (or a factor-valued pixel image
+    or a point pattern with factor-valued marks).
+  }
+  \item{\dots}{
+    List of \code{name=value} pairs, where \code{name} is the
+    new merged level, and \code{value} is the vector of old
+    levels that will be merged.
+  }
+}
+\details{
+  This utility function takes a factor \code{.f}
+  and merges specified levels of the factor.
+
+  The grouping is specified by the arguments \code{\dots}
+  which must each be given in the form \code{new=old}, where
+  \code{new} is the name for the new merged level,
+  and \code{old} is a character vector containing the old levels
+  that are to be merged.
+
+  The result is a new factor (or factor-valued object),
+  in which the levels listed in \code{old}
+  have been replaced by a single level \code{new}.
+
+  An argument of the form \code{name=character(0)} or \code{name=NULL}
+  is interpreted to mean that all other levels of the old factor
+  should be mapped to \code{name}.
+}
+\value{
+  Another factor of the same length as \code{.f}
+  (or object of the same kind as \code{.f}).
+}
+\section{Tips for manipulating factor levels}{
+  To remove unused levels from a factor \code{f},
+  just type \code{f <- factor(f)}.
+
+  To change the ordering of levels in a factor,
+  use \code{\link[base]{factor}(f, levels=l)} or
+  \code{\link[stats]{relevel}(f, ref)}.
+}
+\seealso{
+  \code{\link[base]{factor}},
+  \code{\link[stats]{relevel}}
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\examples{
+   likert <- c("Strongly Agree", "Agree", "Neutral",
+               "Disagree", "Strongly Disagree")
+   answers <- factor(sample(likert, 15, replace=TRUE), levels=likert)
+   answers
+   mergeLevels(answers, Positive=c("Strongly Agree", "Agree"),
+                        Negative=c("Strongly Disagree", "Disagree"))
+}
+\keyword{manip}
+\keyword{spatial}
diff --git a/man/methods.box3.Rd b/man/methods.box3.Rd
new file mode 100644
index 0000000..0d18667
--- /dev/null
+++ b/man/methods.box3.Rd
@@ -0,0 +1,61 @@
+\name{methods.box3}
+\Rdversion{1.1}
+\alias{methods.box3}  %DoNotExport
+\alias{print.box3}
+\alias{unitname.box3}
+\alias{unitname<-.box3}
+\title{
+  Methods for Three-Dimensional Box
+}
+\description{
+  Methods for class \code{"box3"}.
+}
+\usage{
+  \method{print}{box3}(x, ...)
+  \method{unitname}{box3}(x) 
+  \method{unitname}{box3}(x) <- value
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"box3"} representing a three-dimensional box.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{print.default}.
+  }
+  \item{value}{
+    Name of the unit of length. See \code{\link{unitname}}.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}} and \code{\link{unitname}}
+  for the class \code{"box3"} of three-dimensional boxes.
+
+  The \code{print} method prints a description of the box,
+  while the \code{unitname} method extracts the name of the unit of
+  length in which the box coordinates are expressed.
+}
+\value{
+  For \code{print.box3} the value is \code{NULL}.
+  For \code{unitname.box3} an object of class \code{"units"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{box3}},
+  \code{\link{print}},
+  \code{\link{unitname}}
+}
+\examples{
+   X <- box3(c(0,10),c(0,10),c(0,5), unitname=c("metre", "metres"))
+   X
+   unitname(X)
+   # Northern European usage
+   unitname(X) <- "meter"
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.boxx.Rd b/man/methods.boxx.Rd
new file mode 100644
index 0000000..0743e8e
--- /dev/null
+++ b/man/methods.boxx.Rd
@@ -0,0 +1,61 @@
+\name{methods.boxx}
+\Rdversion{1.1}
+\alias{methods.boxx}  %DoNotExport
+\alias{print.boxx}
+\alias{unitname.boxx}
+\alias{unitname<-.boxx}
+\title{
+  Methods for Multi-Dimensional Box
+}
+\description{
+  Methods for class \code{"boxx"}.
+}
+\usage{
+  \method{print}{boxx}(x, ...)
+  \method{unitname}{boxx}(x)
+  \method{unitname}{boxx}(x) <- value
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"boxx"} representing a multi-dimensional box.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{print.default}.
+  }
+  \item{value}{
+    Name of the unit of length. See \code{\link{unitname}}.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}} and \code{\link{unitname}}
+  for the class \code{"boxx"} of multi-dimensional boxes.
+
+  The \code{print} method prints a description of the box,
+  while the \code{unitname} method extracts the name of the unit of
+  length in which the box coordinates are expressed.
+}
+\value{
+  For \code{print.boxx} the value is \code{NULL}.
+  For \code{unitname.boxx} an object of class \code{"units"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{boxx}},
+  \code{\link{print}},
+  \code{\link{unitname}}
+}
+\examples{
+   X <- boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre", "metres"))
+   X
+   unitname(X)
+   # Northern European usage
+   unitname(X) <- "meter"
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.dppm.Rd b/man/methods.dppm.Rd
new file mode 100644
index 0000000..37eaa9b
--- /dev/null
+++ b/man/methods.dppm.Rd
@@ -0,0 +1,67 @@
+\name{methods.dppm}
+\alias{methods.dppm} %DoNotExport
+\alias{coef.dppm}
+\alias{formula.dppm}
+\alias{print.dppm}
+\alias{terms.dppm}
+\alias{labels.dppm}
+\title{
+  Methods for Determinantal Point Process Models
+}
+\description{
+  These are methods for the class \code{"dppm"}.
+}
+\usage{
+\method{coef}{dppm}(object, \dots)
+\method{formula}{dppm}(x, \dots)
+\method{print}{dppm}(x, ...)
+\method{terms}{dppm}(x, \dots)
+\method{labels}{dppm}(object, \dots)
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"dppm"},
+    representing a fitted determinantal point process model.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{coef}},
+  \code{\link{formula}},
+  \code{\link{print}},
+  \code{\link{terms}} and
+  \code{\link{labels}}
+  for the class \code{"dppm"}.
+
+  An object of class \code{"dppm"} represents a fitted
+  determinantal point process model. It is obtained from \code{\link{dppm}}.
+
+  The method \code{coef.dppm} returns the vector of
+  \emph{regression coefficients} of the fitted model.
+  It does not return the interaction parameters.
+}
+\value{
+  See the help files for the corresponding generic functions.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{dppm}},  \code{\link{plot.dppm}},
+  \code{\link{predict.dppm}}, \code{\link{simulate.dppm}},
+  \code{\link{as.ppm.dppm}}.
+}
+\examples{
+  fit <- dppm(swedishpines ~ x + y, dppGauss())
+  coef(fit)
+  formula(fit)
+  tf <- terms(fit)
+  labels(fit)
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.fii.Rd b/man/methods.fii.Rd
new file mode 100644
index 0000000..fff5a5e
--- /dev/null
+++ b/man/methods.fii.Rd
@@ -0,0 +1,100 @@
+\name{methods.fii}
+\alias{methods.fii} %DoNotExport
+\Rdversion{1.1}
+\alias{print.fii}
+\alias{plot.fii}
+\alias{coef.fii}
+\alias{summary.fii}
+\alias{print.summary.fii}
+\alias{coef.summary.fii}
+\title{
+  Methods for Fitted Interactions
+}
+\description{
+  These are methods specifically for the class \code{"fii"} of
+  fitted interpoint interactions.
+}
+\usage{
+\method{print}{fii}(x, \dots)
+
+\method{coef}{fii}(object, \dots)
+
+\method{plot}{fii}(x, \dots)
+
+\method{summary}{fii}(object,\dots)
+
+\method{print}{summary.fii}(x, ...)
+
+\method{coef}{summary.fii}(object, ...)
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"fii"} representing a fitted
+    interpoint interaction.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+}
+\details{
+  These are methods for the class \code{"fii"}.
+  An object of class \code{"fii"} represents a fitted interpoint
+  interaction. It is usually obtained by
+  using the command \code{\link{fitin}} to extract the fitted
+  interaction part of a fitted point process model. 
+  See \code{\link{fitin}} for further explanation of this class.
+  
+  The commands listed here are methods for the generic functions
+  \code{\link{print}},
+  \code{\link{summary}},
+  \code{\link{plot}} 
+  and 
+  \code{\link{coef}}
+  for objects of the class \code{"fii"}.
+
+  Following the usual convention, \code{summary.fii} returns an object of class
+  \code{summary.fii}, for which there is a print method.
+  The effect is that, when the user types \code{summary(x)},
+  the summary is printed, but when the user types \code{y <- summary(x)},
+  the summary information is saved.
+
+  The method \code{coef.fii} extracts the canonical coefficients of
+  the fitted interaction, and returns them as a numeric vector.
+  The method \code{coef.summary.fii} transforms these values into
+  quantities that are more easily interpretable, in a format that
+  depends on the particular model.
+  
+  There are also methods for the generic commands \code{\link{reach}} and
+  \code{\link{as.interact}}, described elsewhere.
+}
+\value{
+  The \code{print} and \code{plot} methods return \code{NULL}.
+
+  The \code{summary} method returns an object of class
+  \code{summary.fii}.
+
+  \code{coef.fii} returns a numeric vector.
+  \code{coef.summary.fii} returns data whose structure
+  depends on the model.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{fitin}},
+  \code{\link{reach.fii}},
+  \code{\link{as.interact.fii}}
+}
+\examples{
+  mod <- ppm(cells, ~1, Strauss(0.1))
+  f <- fitin(mod)
+  f
+  summary(f)
+  plot(f)
+  coef(f)
+  coef(summary(f))
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.funxy.Rd b/man/methods.funxy.Rd
new file mode 100644
index 0000000..eef9ff8
--- /dev/null
+++ b/man/methods.funxy.Rd
@@ -0,0 +1,74 @@
+\name{methods.funxy}
+\alias{methods.funxy} %DoNotExport
+\alias{contour.funxy}
+\alias{persp.funxy}
+\alias{plot.funxy}
+\Rdversion{1.1}
+\title{
+  Methods for Spatial Functions
+}
+\description{
+  Methods for objects of the class \code{"funxy"}.
+}
+\usage{
+\method{contour}{funxy}(x, \dots)
+\method{persp}{funxy}(x, \dots)
+\method{plot}{funxy}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"funxy"} representing a
+    function of \eqn{x,y} coordinates.
+  }
+  \item{\dots}{
+    Named arguments controlling the plot. See Details.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{plot}},
+  \code{\link{contour}}
+  and \code{\link{persp}}
+  for the class \code{"funxy"} of spatial functions.
+
+  Objects of class \code{"funxy"} are created, for example,
+  by the commands \code{\link{distfun}} and \code{\link{funxy}}.
+  
+  The \code{plot}, \code{contour} and \code{persp} methods first convert
+  \code{x} to a pixel image object using \code{\link{as.im}},
+  then display it using \code{\link{plot.im}}, \code{\link{contour.im}} or
+  \code{\link{persp.im}}.
+
+  Additional arguments \code{\dots} are either
+  passed to \code{\link{as.im.function}} to
+  control the spatial resolution of the pixel image, or passed to 
+  \code{\link{contour.im}},
+  \code{\link{persp.im}} or 
+  \code{\link{plot.im}} to control the appearance of the plot. 
+}
+\value{
+  \code{NULL}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{funxy}},
+  \code{\link{distfun}},
+  \code{\link{as.im}},
+  \code{\link{plot.im}},
+  \code{\link{persp.im}},
+  \code{\link{contour.im}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+   data(letterR)
+   f <- distfun(letterR)
+   contour(f)
+   contour(f, W=owin(c(1,5),c(-1,4)), eps=0.1)
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.kppm.Rd b/man/methods.kppm.Rd
new file mode 100644
index 0000000..cadf64a
--- /dev/null
+++ b/man/methods.kppm.Rd
@@ -0,0 +1,69 @@
+\name{methods.kppm}
+\alias{methods.kppm} %DoNotExport
+\alias{coef.kppm}
+\alias{formula.kppm}
+\alias{print.kppm}
+\alias{terms.kppm}
+\alias{labels.kppm}
+\title{
+  Methods for Cluster Point Process Models
+}
+\description{
+  These are methods for the class \code{"kppm"}. 
+}
+\usage{
+\method{coef}{kppm}(object, \dots)
+\method{formula}{kppm}(x, \dots)
+\method{print}{kppm}(x, ...)
+\method{terms}{kppm}(x, \dots)
+\method{labels}{kppm}(object, \dots)
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"kppm"},
+    representing a fitted cluster point process model.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{coef}},
+  \code{\link{formula}},
+  \code{\link{print}},
+  \code{\link{terms}} and
+  \code{\link{labels}}
+  for the class \code{"kppm"}.
+
+  An object of class \code{"kppm"} represents a fitted
+  cluster point process model. It is obtained from \code{\link{kppm}}.
+  
+  The method \code{coef.kppm} returns the vector of
+  \emph{regression coefficients} of the fitted model.
+  It does not return the clustering parameters.
+}
+\value{
+  See the help files for the corresponding generic functions.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},  \code{\link{plot.kppm}},
+  \code{\link{predict.kppm}}, \code{\link{simulate.kppm}},
+  \code{\link{update.kppm}}, \code{\link{vcov.kppm}},
+  \code{\link{as.ppm.kppm}}.
+}
+\examples{
+  data(redwood)
+  fit <- kppm(redwood ~ x, "MatClust")
+  coef(fit)
+  formula(fit)
+  tf <- terms(fit)
+  labels(fit)
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.layered.Rd b/man/methods.layered.Rd
new file mode 100644
index 0000000..ed1ce75
--- /dev/null
+++ b/man/methods.layered.Rd
@@ -0,0 +1,93 @@
+\name{methods.layered} 
+\Rdversion{1.1}
+\alias{methods.layered} %DoNotExport
+\alias{shift.layered}
+\alias{reflect.layered}
+\alias{flipxy.layered}
+\alias{rotate.layered}
+\alias{affine.layered}
+\alias{rescale.layered}
+\alias{scalardilate.layered}
+\title{
+  Methods for Layered Objects
+}
+\description{
+  Methods for geometrical transformations of
+  layered objects (class \code{"layered"}).
+}
+\usage{
+  \method{shift}{layered}(X, vec=c(0,0), ...)
+
+  \method{rotate}{layered}(X, ..., centre=NULL)
+
+  \method{affine}{layered}(X, ...)
+
+  \method{reflect}{layered}(X)
+
+  \method{flipxy}{layered}(X)
+
+  \method{rescale}{layered}(X, s, unitname)
+
+  \method{scalardilate}{layered}(X, ...)
+}
+\arguments{
+  \item{X}{
+    Object of class \code{"layered"}.
+  }
+  \item{\dots}{
+    Arguments passed to the relevant methods
+    when applying the operation to each layer of \code{X}.
+  }
+  \item{s}{
+    Rescaling factor passed to the relevant method for
+    \code{\link{rescale}}. May be missing.
+  }
+  \item{vec}{
+    Shift vector (numeric vector of length 2).
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    A value acceptable to the function \code{\link{unitname<-}}
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{shift}},
+  \code{\link{rotate}},
+  \code{\link{reflect}},
+  \code{\link{affine}},
+  \code{\link{rescale}},
+  \code{\link{scalardilate}} and
+  \code{\link{flipxy}}
+  for the class of layered objects.
+
+  A layered object represents data that should be plotted in
+  successive layers, for example, a background and a foreground.
+  See \code{\link{layered}}.
+}
+\value{
+  Another object of class \code{"layered"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{layered}}
+}
+\examples{
+  L <- layered(letterR, runifpoint(20, letterR))
+  plot(L)
+  plot(rotate(L, pi/4))
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.linfun.Rd b/man/methods.linfun.Rd
new file mode 100644
index 0000000..f9f42f4
--- /dev/null
+++ b/man/methods.linfun.Rd
@@ -0,0 +1,90 @@
+\name{methods.linfun}
+\Rdversion{1.1}
+\alias{methods.linfun} %DoNotExport
+\alias{print.linfun}
+\alias{summary.linfun}
+\alias{plot.linfun}
+\alias{as.data.frame.linfun}
+\alias{as.owin.linfun}
+\alias{as.function.linfun}
+\title{
+   Methods for Functions on Linear Network
+}
+\description{
+  Methods for the class \code{"linfun"} of functions on a linear network.
+}
+\usage{
+  \method{print}{linfun}(x, \dots)
+
+  \method{summary}{linfun}(object, \dots)
+
+  \method{plot}{linfun}(x, \dots, L=NULL, main) 
+
+  \method{as.data.frame}{linfun}(x, \dots)
+
+  \method{as.owin}{linfun}(W, \dots)
+
+  \method{as.function}{linfun}(x, \dots)
+}
+\arguments{
+  \item{x,object,W}{
+    A function on a linear network
+    (object of class \code{"linfun"}).
+  }
+  \item{L}{A linear network}
+  \item{\dots}{
+    Extra arguments passed to \code{\link{as.linim}}, 
+    \code{\link{plot.linim}}, \code{\link{plot.im}}
+    or \code{\link{print.default}}, or arguments passed to
+    \code{x} if it is a function.
+  }
+  \item{main}{Main title for plot.}
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}}
+  \code{\link{as.data.frame}} and \code{\link{as.function}},
+  and for the \pkg{spatstat} generic function
+  \code{\link{as.owin}}.
+
+  An object of class \code{"linfun"} represents a
+  mathematical function that could be evaluated at any location
+  on a linear network. It is essentially an \R \code{function} with some
+  extra attributes.
+
+  The method \code{as.owin.linfun} extracts the two-dimensional spatial
+  window containing the linear network.
+
+  The method \code{plot.linfun} first converts the function to a
+  pixel image using \code{\link{as.linim.linfun}}, then plots the image using
+  \code{\link{plot.linim}}.
+
+  Note that a \code{linfun} function may have additional arguments,
+  other than those which specify the location on the network
+  (see \code{\link{linfun}}). These additional arguments may be passed
+  to \code{plot.linfun}. 
+}
+\value{
+  For \code{print.linfun} and \code{summary.linfun} the result is \code{NULL}.
+
+  For \code{plot.linfun} the result is the same as
+  for \code{\link{plot.linim}}.
+  
+  For the conversion methods, the result is an object of the
+  required type: \code{as.owin.linfun} returns an object of
+  class \code{"owin"}, and so on.
+}
+\examples{
+   X <- runiflpp(3, simplenet)
+   f <- nnfun(X)
+   f
+   plot(f)
+   as.function(f)
+   as.owin(f)
+   head(as.data.frame(f))
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/methods.linim.Rd b/man/methods.linim.Rd
new file mode 100644
index 0000000..8744d1e
--- /dev/null
+++ b/man/methods.linim.Rd
@@ -0,0 +1,101 @@
+\name{methods.linim}
+\Rdversion{1.1}
+\alias{methods.linim} %DoNotExport
+\alias{as.im.linim}
+\alias{as.data.frame.linim}
+\alias{print.linim}
+\alias{summary.linim}
+\alias{affine.linim}
+\alias{scalardilate.linim}
+\alias{shift.linim}
+\title{
+   Methods for Images on a Linear Network
+}
+\description{
+  Methods for the class \code{"linim"} of functions on a linear network.
+}
+\usage{
+  \method{print}{linim}(x, \dots)
+
+  \method{summary}{linim}(object, \dots)
+
+  \method{as.im}{linim}(X, \dots)
+
+  \method{as.data.frame}{linim}(x, \dots)
+
+  \method{shift}{linim}(X, \dots)
+
+  \method{scalardilate}{linim}(X, f, \dots, origin=NULL)
+
+  \method{affine}{linim}(X, mat=diag(c(1,1)), vec=c(0,0), \dots)
+}
+\arguments{
+  \item{X,x,object}{
+    A pixel image on a linear network
+    (object of class \code{"linim"}).
+  }
+  \item{\dots}{
+    Extra arguments passed to other methods.
+  }
+  \item{f}{Numeric. Scalar dilation factor.}
+  \item{mat}{Numeric matrix representing the linear transformation.}
+  \item{vec}{Numeric vector of length 2 specifying the shift vector.}
+  \item{origin}{Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched. 
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}}, \code{\link{summary}}
+  and \code{\link{as.data.frame}},
+  and the \pkg{spatstat} generic functions
+  \code{\link{as.im}},
+  \code{\link{shift}}, 
+  \code{\link{scalardilate}} and 
+  \code{\link{affine}}.
+
+  An object of class \code{"linfun"} represents a
+  pixel image defined on a linear network. 
+
+  The method \code{as.im.linim} extracts the pixel values
+  and returns a pixel image of class \code{"im"}.
+
+  The method \code{as.data.frame.linim} returns a data frame
+  giving spatial locations (in cartesian and network coordinates)
+  and corresponding function values.
+  
+  The methods \code{shift.linim},
+  \code{scalardilate.linim} and \code{affine.linim}
+  apply geometric transformations to the pixels and the underlying
+  linear network, without changing the pixel values.
+}
+\value{
+  For \code{print.linim} the result is \code{NULL}.
+
+  The function \code{summary.linim} returns an object of class
+  \code{"summary.linim"}. In normal usage this summary is
+  automatically printed by \code{\link{print.summary.linim}}.
+
+  For \code{as.im.linim} the result is an object of class \code{"im"}.
+  
+  For the geometric transformations \code{shift.linim},
+  \code{scalardilate.linim} and \code{affine.linim}, the result is
+  another object of class \code{"linim"}.
+}
+\examples{
+   M <- as.mask.psp(as.psp(simplenet))
+   Z <- as.im(function(x,y) {x-y}, W=M)
+   X <- linim(simplenet, Z)
+
+   X
+   shift(X, c(1,1))
+   scalardilate(X, 2)
+   head(as.data.frame(X))
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/methods.linnet.Rd b/man/methods.linnet.Rd
new file mode 100644
index 0000000..ea8d0ff
--- /dev/null
+++ b/man/methods.linnet.Rd
@@ -0,0 +1,161 @@
+\name{methods.linnet}
+\alias{methods.linnet} %DoNotExport
+\Rdversion{1.1}
+\alias{as.linnet}
+\alias{as.linnet.linnet}
+\alias{as.owin.linnet}
+\alias{as.psp.linnet}
+\alias{nsegments.linnet}
+\alias{nvertices.linnet}
+\alias{pixellate.linnet}
+\alias{print.linnet}
+\alias{summary.linnet}
+\alias{unitname.linnet}
+\alias{unitname<-.linnet}
+\alias{vertexdegree}
+\alias{vertices.linnet}
+\alias{volume.linnet}
+\alias{Window.linnet}
+\title{
+  Methods for Linear Networks
+}
+\description{
+  These are methods for the class \code{"linnet"} of linear networks.
+}
+\usage{
+as.linnet(X, \dots)
+
+\method{as.linnet}{linnet}(X, \dots, sparse)
+
+\method{as.owin}{linnet}(W, \dots)
+
+\method{as.psp}{linnet}(x, \dots, fatal=TRUE)
+
+\method{nsegments}{linnet}(x)
+
+\method{nvertices}{linnet}(x, \dots)
+
+\method{pixellate}{linnet}(x, \dots)
+
+\method{print}{linnet}(x, \dots)
+
+\method{summary}{linnet}(object, \dots)
+
+\method{unitname}{linnet}(x)
+
+\method{unitname}{linnet}(x) <- value
+
+vertexdegree(x)
+
+\method{vertices}{linnet}(w)
+
+\method{volume}{linnet}(x)
+
+\method{Window}{linnet}(X, \dots)
+}
+\arguments{
+  \item{x,X,object,w,W}{
+    An object of class \code{"linnet"} representing 
+    a linear network.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{value}{
+    A valid name for the unit of length for \code{x}.
+    See \code{\link{unitname}}.
+  }
+  \item{fatal}{
+    Logical value indicating whether data in the wrong format
+    should lead to an error (\code{fatal=TRUE}) or a warning
+    (\code{fatal=FALSE}).
+  }
+  \item{sparse}{
+    Logical value indicating whether to use a sparse matrix
+    representation, as explained in \code{\link{linnet}}.
+    Default is to keep the same representation as in \code{X}.
+  }
+}
+\details{
+  The function \code{as.linnet} is generic.
+  It converts data from some other format
+  into an object of class \code{"linnet"}.
+  The method \code{as.linnet.lpp} extracts the linear network
+  information from an \code{lpp} object.
+
+  The other functions are methods for the generic commands
+  \code{\link{as.owin}},
+  \code{\link{as.psp}},
+  \code{\link{nsegments}},
+  \code{\link{nvertices}},
+  \code{\link{pixellate}},
+  \code{\link{print}},
+  \code{\link{summary}},
+  \code{\link{unitname}},
+  \code{\link{unitname<-}},
+  \code{\link{vertices}},
+  \code{\link{volume}}
+  and \code{\link{Window}}
+  for the class \code{"linnet"}.
+
+  The methods \code{as.owin.linnet} and \code{Window.linnet}
+  extract the window containing
+  the linear network, and return it as an object of class \code{"owin"}.
+  
+  The method \code{as.psp.linnet} extracts the
+  lines of the linear network as a line segment pattern (object of class
+  \code{"psp"}) while \code{nsegments.linnet} simply counts the number
+  of line segments.
+
+  The method \code{vertices.linnet} extracts the vertices (nodes)
+  of the linear network and \code{nvertices.linnet} simply counts the
+  vertices. The function \code{vertexdegree} calculates 
+  the topological degree of each vertex (the number of lines
+  emanating from that vertex) and returns these values as an integer
+  vector.
+
+  The method \code{pixellate.linnet} applies \code{\link{as.psp.linnet}}
+  to convert the network to a collection of line segments,
+  then invokes \code{\link{pixellate.psp}}.
+}
+\value{
+  For \code{as.linnet} the value is an object of class \code{"linnet"}.
+  For other functions, see the help file for the corresponding
+  generic function.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{linnet}}.
+
+  Generic functions:
+  \code{\link{as.owin}},
+  \code{\link{as.psp}},
+  \code{\link{nsegments}},
+  \code{\link{nvertices}},
+  \code{\link{pixellate}},
+  \code{\link{print}},
+  \code{\link{summary}},
+  \code{\link{unitname}},
+  \code{\link{unitname<-}},
+  \code{\link{vertices}},
+  \code{\link{volume}}
+  and \code{\link{Window}}.
+
+  Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}},
+  \code{\link{connected.linnet}}.
+  
+  \code{\link{lixellate}} for dividing segments into shorter segments.
+}
+\examples{
+  simplenet
+  summary(simplenet)
+  nsegments(simplenet)
+  nvertices(simplenet)
+  volume(simplenet)
+  unitname(simplenet) <- c("cubit", "cubits")
+  Window(simplenet)
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.lpp.Rd b/man/methods.lpp.Rd
new file mode 100644
index 0000000..aa5460c
--- /dev/null
+++ b/man/methods.lpp.Rd
@@ -0,0 +1,106 @@
+\name{methods.lpp}
+\alias{methods.lpp} %DoNotExport
+\Rdversion{1.1}
+\alias{as.ppp.lpp}
+\alias{as.psp.lpp}
+\alias{marks<-.lpp}
+\alias{nsegments.lpp}
+\alias{print.lpp}
+\alias{print.summary.lpp}
+\alias{summary.lpp}
+\alias{unitname.lpp}
+\alias{unitname<-.lpp}
+\alias{unmark.lpp}
+\title{
+  Methods for Point Patterns on a Linear Network
+}
+\description{
+  These are methods specifically for the class \code{"lpp"} of point patterns on
+  linear networks.
+}
+\usage{
+\method{as.ppp}{lpp}(X, ..., fatal=TRUE)
+
+\method{as.psp}{lpp}(x, ..., fatal=TRUE)
+
+\method{marks}{lpp}(x, ...) <- value
+
+\method{nsegments}{lpp}(x)
+
+\method{print}{lpp}(x, ...)
+
+\method{print}{summary.lpp}(x, ...)
+
+\method{summary}{lpp}(object, ...)
+
+\method{unitname}{lpp}(x)
+
+\method{unitname}{lpp}(x) <- value
+
+\method{unmark}{lpp}(X)
+}
+\arguments{
+  \item{x,X,object}{
+    An object of class \code{"lpp"} representing a point pattern
+    on a linear network.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{value}{
+    Replacement value for the \code{marks} or \code{unitname}
+    of \code{x}. See Details.
+  }
+  \item{fatal}{
+    Logical value indicating whether data in the wrong format
+    should lead to an error (\code{fatal=TRUE}) or a warning
+    (\code{fatal=FALSE}).
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{as.ppp}},
+  \code{\link{as.psp}},
+  \code{\link{marks<-}},
+  \code{\link{nsegments}},
+  \code{\link{print}},
+  \code{\link{summary}},
+  \code{\link{unitname}}, 
+  \code{\link{unitname<-}} and
+  \code{\link{unmark}}
+  for objects of the class \code{"lpp"}.
+
+  For \code{"marks<-.lpp"} the replacement \code{value}
+  should be either \code{NULL}, or a vector of length equal
+  to the number of points in \code{x},
+  or a data frame with one row for each point in \code{x}.
+  
+  For \code{"unitname<-.lpp"} the replacement \code{value}
+  should be a valid name for the unit of length, as
+  described in \code{\link{unitname}}.
+}
+\section{Other methods}{
+  An object of class \code{"lpp"} also inherits the class
+  \code{"ppx"} for which many other methods are available.
+  See \code{\link[spatstat:methods.ppx]{methods.ppx}}.
+}
+\value{
+  See the documentation on the corresponding generic function.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lpp}},
+  \code{\link{intensity.lpp}},
+  \code{\link[spatstat:methods.ppx]{methods.ppx}}
+}
+\examples{
+  X <- runiflpp(10, simplenet)
+  X
+  as.ppp(X)
+  summary(X)
+  unitname(X) <- c("furlong", "furlongs")
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.lppm.Rd b/man/methods.lppm.Rd
new file mode 100644
index 0000000..c329cf7
--- /dev/null
+++ b/man/methods.lppm.Rd
@@ -0,0 +1,113 @@
+\name{methods.lppm}
+\alias{methods.lppm} %DoNotExport
+\alias{coef.lppm}
+\alias{emend.lppm}
+\alias{extractAIC.lppm}
+\alias{formula.lppm}
+\alias{logLik.lppm}
+\alias{deviance.lppm}
+\alias{nobs.lppm}
+\alias{print.lppm}
+\alias{summary.lppm}
+\alias{terms.lppm}
+\alias{update.lppm}
+\alias{valid.lppm}
+\alias{vcov.lppm}
+\alias{as.linnet.lppm}
+\title{
+  Methods for Fitted Point Process Models on a Linear Network
+}
+\description{
+  These are methods for the class \code{"lppm"} of fitted point process
+  models on a linear network.
+}
+\usage{
+  \method{coef}{lppm}(object, ...)
+
+  \method{emend}{lppm}(object, \dots)
+
+  \method{extractAIC}{lppm}(fit, ...)
+
+  \method{formula}{lppm}(x, ...)
+
+  \method{logLik}{lppm}(object, ...)
+
+  \method{deviance}{lppm}(object, ...)
+
+  \method{nobs}{lppm}(object, ...)
+
+  \method{print}{lppm}(x, ...)
+
+  \method{summary}{lppm}(object, ...)
+
+  \method{terms}{lppm}(x, ...)
+
+  \method{update}{lppm}(object, ...)
+
+  \method{valid}{lppm}(object, ...)
+
+  \method{vcov}{lppm}(object, ...)
+
+  \method{as.linnet}{lppm}(X, ...)
+}
+\arguments{
+  \item{object,fit,x,X}{
+    An object of class \code{"lppm"} representing a fitted point process
+    model on a linear network.
+  }
+  \item{\dots}{
+    Arguments passed to other methods, usually the
+    method for the class \code{"ppm"}.
+  }
+}
+\details{
+  These are methods for the generic commands
+  \code{\link[stats]{coef}},
+  \code{\link{emend}}, 
+  \code{\link[stats]{extractAIC}},
+  \code{\link[stats]{formula}},
+  \code{\link[stats]{logLik}},
+  \code{\link[stats]{deviance}},
+  \code{\link[stats]{nobs}},
+  \code{\link[base]{print}}, 
+  \code{\link[base]{summary}}, 
+  \code{\link[stats]{terms}},
+  \code{\link[stats]{update}},
+  \code{\link{valid}}
+  and
+  \code{\link[stats]{vcov}}
+  for the class \code{"lppm"}. 
+}
+\value{
+  See the default methods.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{lppm}},
+  \code{\link{plot.lppm}}.
+}
+\examples{
+  X <- runiflpp(15, simplenet)
+  fit <- lppm(X ~ x)
+  print(fit)
+  coef(fit)
+  formula(fit)
+  terms(fit)
+  logLik(fit)
+  deviance(fit)
+  nobs(fit)
+  extractAIC(fit)
+  update(fit, ~1)
+  valid(fit)
+  vcov(fit)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/methods.objsurf.Rd b/man/methods.objsurf.Rd
new file mode 100644
index 0000000..8a47961
--- /dev/null
+++ b/man/methods.objsurf.Rd
@@ -0,0 +1,62 @@
+\name{methods.objsurf}
+\Rdversion{1.1}
+\alias{methods.objsurf} %DoNotExport
+\alias{print.objsurf}
+\alias{plot.objsurf}
+\alias{persp.objsurf}
+\alias{image.objsurf}
+\alias{contour.objsurf}
+\title{
+  Methods for Objective Function Surfaces
+}
+\description{
+  Methods for printing and plotting an objective function surface.
+}
+\usage{
+\method{print}{objsurf}(x, ...)
+\method{plot}{objsurf}(x, ...)
+\method{image}{objsurf}(x, ...)
+\method{contour}{objsurf}(x, ...)
+\method{persp}{objsurf}(x, ...)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"objsurf"} representing an objective function surface.
+  }
+  \item{\dots}{
+    Additional arguments passed to plot methods.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}}, \code{\link{plot}},
+  \code{\link{image}}, \code{\link{contour}} and  \code{\link{persp}}
+  for the class \code{"objsurf"}.
+}
+\value{
+  For \code{print.objsurf}, \code{plot.objsurf} and \code{image.objsurf}
+  the value is \code{NULL}.
+
+  For \code{contour.objsurf} and \code{persp.objsurf}
+  the value is described in the help for
+  \code{\link{contour.default}} and \code{\link{persp.default}}
+  respectively.
+}
+\author{\adrian
+  
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{objsurf}}
+}
+\examples{
+ fit <- kppm(redwood ~ 1, "Thomas")
+ os <- objsurf(fit)
+  os
+  plot(os)
+  contour(os, add=TRUE)
+  persp(os)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/methods.pp3.Rd b/man/methods.pp3.Rd
new file mode 100644
index 0000000..a672c07
--- /dev/null
+++ b/man/methods.pp3.Rd
@@ -0,0 +1,71 @@
+\name{methods.pp3}
+\Rdversion{1.1}
+\alias{methods.pp3} %DoNotExport
+\alias{print.pp3}
+\alias{summary.pp3}
+\alias{print.summary.pp3}
+\alias{unitname.pp3}
+\alias{unitname<-.pp3}
+\title{
+  Methods for three-dimensional point patterns
+}
+\description{
+  Methods for class \code{"pp3"}.
+}
+\usage{
+  \method{print}{pp3}(x, ...)
+  \method{print}{summary.pp3}(x, ...)
+  \method{summary}{pp3}(object, ...)
+  \method{unitname}{pp3}(x)
+  \method{unitname}{pp3}(x) <- value
+}
+\arguments{
+  \item{x,object}{
+    Object of class \code{"pp3"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{value}{
+    Name of the unit of length. See \code{\link{unitname}}.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}}, \code{\link{summary}},
+  \code{\link{unitname}} and  \code{\link{unitname<-}}
+  for the class \code{"pp3"} of three-dimensional point patterns.
+
+  The \code{print} and \code{summary} methods print a description
+  of the point pattern.
+
+  The \code{unitname} method extracts the name of the unit of
+  length in which the point coordinates are expressed.
+  The \code{unitname<-} method assigns the name of the unit of length.
+}
+\value{
+  For \code{print.pp3} the value is \code{NULL}.
+  For \code{unitname.pp3} an object of class \code{"units"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{pp3}},
+  \code{\link{print}},
+  \code{\link{unitname}}
+  \code{\link{unitname<-}}
+}
+\examples{
+   X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1), unitname="mm"))
+   X
+   unitname(X)
+   unitname(X) <- c("foot", "feet")
+   summary(X)
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/methods.ppx.Rd b/man/methods.ppx.Rd
new file mode 100644
index 0000000..6fbba54
--- /dev/null
+++ b/man/methods.ppx.Rd
@@ -0,0 +1,59 @@
+\name{methods.ppx}
+\Rdversion{1.1}
+\alias{methods.ppx} %DoNotExport
+\alias{print.ppx}
+\alias{plot.ppx}
+\alias{unitname.ppx}
+\alias{unitname<-.ppx}
+\title{
+  Methods for Multidimensional Space-Time Point Patterns
+}
+\description{
+  Methods for printing and plotting a general multidimensional
+  space-time point pattern.
+}
+\usage{
+\method{print}{ppx}(x, ...)
+\method{plot}{ppx}(x, ...)
+\method{unitname}{ppx}(x)
+\method{unitname}{ppx}(x) <- value
+}
+\arguments{
+  \item{x}{
+    Multidimensional point pattern (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Additional arguments passed to plot methods.
+  }
+  \item{value}{
+    Name of the unit of length. See \code{\link{unitname}}.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}}, \code{\link{plot}},
+  \code{\link{unitname}} and  \code{\link{unitname<-}}
+  for the class \code{"ppx"} of multidimensional point patterns.
+  
+  The \code{print} method prints a description
+  of the point pattern and its spatial domain.
+
+  The \code{unitname} method extracts the name of the unit of
+  length in which the point coordinates are expressed.
+  The \code{unitname<-} method assigns the name of the unit of length.
+}
+\value{
+  For \code{print.ppx} the value is \code{NULL}.
+  For \code{unitname.ppx} an object of class \code{"units"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppx}},
+  \code{\link{unitname}}  
+}
+\keyword{spatial}
diff --git a/man/methods.rho2hat.Rd b/man/methods.rho2hat.Rd
new file mode 100644
index 0000000..dc92e6d
--- /dev/null
+++ b/man/methods.rho2hat.Rd
@@ -0,0 +1,78 @@
+\name{methods.rho2hat}
+\alias{methods.rho2hat} %DoNotExport
+\alias{predict.rho2hat}
+\alias{print.rho2hat}
+\alias{plot.rho2hat}
+\title{
+  Methods for Intensity Functions of Two Spatial Covariates
+}
+\description{
+  These are methods for the class \code{"rho2hat"}. 
+}
+\usage{
+\method{plot}{rho2hat}(x, \dots, do.points=FALSE)
+
+\method{print}{rho2hat}(x, \dots)
+
+\method{predict}{rho2hat}(object, \dots, relative=FALSE)
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"rho2hat"}.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{do.points}{
+    Logical value indicating whether to plot the observed values of
+    the covariates at the data points.
+  }
+  \item{relative}{
+    Logical value indicating whether to compute the
+    estimated point process intensity (\code{relative=FALSE})
+    or the relative risk (\code{relative=TRUE}) in the case
+    of a relative risk estimate.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{print}},
+  \code{\link{predict}} and
+  \code{\link{plot}} 
+  for the class \code{"rho2hat"}.
+
+  An object of class \code{"rho2hat"} is an estimate
+  of the intensity of a point process, as a function of two
+  given spatial covariates. See \code{\link{rho2hat}}.
+  
+  The method \code{plot.rho2hat} displays the estimated function
+  \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally
+  adds a \code{\link{rug}} plot of the observed values of the covariate.
+  In this plot the two axes represent possible values of the two covariates.
+
+  The method \code{predict.rho2hat} computes a pixel image of the
+  intensity \eqn{\rho(Z_1(u), Z_2(u))}{rho(Z1(u), Z2(u))}
+  at each spatial location \eqn{u}, where \eqn{Z_1(u)}{Z1(u)}
+  and \eqn{Z_2(u)}{Z2(u)} are the two spatial covariates.
+}
+\value{
+  For \code{predict.rho2hat} the value is a pixel image
+  (object of class \code{"im"}). 
+  For other functions, the value is \code{NULL}.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{rho2hat}}
+}
+\examples{
+  r2 <- with(bei.extra, rho2hat(bei, elev, grad))
+  r2
+  plot(r2)
+  plot(predict(r2))
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.rhohat.Rd b/man/methods.rhohat.Rd
new file mode 100644
index 0000000..f0d55f6
--- /dev/null
+++ b/man/methods.rhohat.Rd
@@ -0,0 +1,113 @@
+\name{methods.rhohat}
+\alias{methods.rhohat} %DoNotExport
+\alias{print.rhohat}
+\alias{plot.rhohat}
+\alias{predict.rhohat}
+\alias{simulate.rhohat}
+\title{
+  Methods for Intensity Functions of Spatial Covariate
+}
+\description{
+  These are methods for the class \code{"rhohat"}. 
+}
+\usage{
+\method{print}{rhohat}(x, ...)
+
+\method{plot}{rhohat}(x, ..., do.rug=TRUE)
+
+\method{predict}{rhohat}(object, ..., relative=FALSE,
+             what=c("rho", "lo", "hi", "se"))
+
+\method{simulate}{rhohat}(object, nsim=1, ..., drop=TRUE)
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"rhohat"} representing 
+    a smoothed estimate of the intensity function of a point process.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{do.rug}{
+    Logical value indicating whether to plot the observed values of
+    the covariate as a rug plot along the horizontal axis.
+  }
+  \item{relative}{
+    Logical value indicating whether to compute the
+    estimated point process intensity (\code{relative=FALSE})
+    or the relative risk (\code{relative=TRUE}) in the case
+    of a relative risk estimate.
+  }
+  \item{nsim}{
+    Number of simulations to be generated.
+  }
+  \item{drop}{
+    Logical value indicating what to do when \code{nsim=1}.
+    If \code{drop=TRUE} (the default), a point pattern is returned.
+    If \code{drop=FALSE}, a list of length 1 containing a point pattern
+    is returned.
+  }
+  \item{what}{
+    Optional character string (partially matched) specifying which
+    value should be calculated: either the function estimate (\code{what="rho"},
+    the default), the lower or upper end of the confidence interval
+    (\code{what="lo"} or \code{what="hi"}) or the standard error
+    (\code{what="se"}).
+  }	 
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{print}},
+  \code{\link[graphics]{plot}},
+  \code{\link[stats]{predict}} and
+  \code{\link[stats]{simulate}}
+  for the class \code{"rhohat"}.
+
+  An object of class \code{"rhohat"} is an estimate
+  of the intensity of a point process, as a function of a
+  given spatial covariate. See \code{\link{rhohat}}.
+  
+  The method \code{plot.rhohat} displays the estimated function
+  \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally
+  adds a \code{\link{rug}} plot of the observed values of the covariate.
+  
+  The method \code{predict.rhohat} computes a pixel image of the
+  intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location
+  \eqn{u}, where \eqn{Z} is the spatial covariate.
+
+  The method \code{simulate.rhohat} invokes \code{predict.rhohat}
+  to determine the predicted intensity, and then simulates a
+  Poisson point process with this intensity.
+}
+\value{
+  For \code{predict.rhohat} the value is a pixel image
+  (object of class \code{"im"} or \code{"linim"}).
+  For \code{simulate.rhohat} the value is a point pattern
+  (object of class \code{"ppp"} or \code{"lpp"}).
+  For other functions, the value is \code{NULL}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{rhohat}}
+}
+\examples{
+  X <-  rpoispp(function(x,y){exp(3+3*x)})
+  rho <- rhohat(X, function(x,y){x})
+  rho
+  plot(rho)
+  Y <- predict(rho)
+  plot(Y)
+  plot(simulate(rho), add=TRUE)
+  # 
+  fit <- ppm(X, ~x)
+  rho <- rhohat(fit, "y")
+  opa <- par(mfrow=c(1,2))
+  plot(predict(rho))
+  plot(predict(rho, relative=TRUE))
+  par(opa)
+  plot(predict(rho, what="se"))
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.slrm.Rd b/man/methods.slrm.Rd
new file mode 100644
index 0000000..ea8469c
--- /dev/null
+++ b/man/methods.slrm.Rd
@@ -0,0 +1,73 @@
+\name{methods.slrm}
+\alias{methods.slrm} %DoNotExport
+\alias{formula.slrm}
+\alias{update.slrm}
+\alias{print.slrm}
+\alias{terms.slrm}
+\alias{labels.slrm}
+\title{
+  Methods for Spatial Logistic Regression Models
+}
+\description{
+  These are methods for the class \code{"slrm"}. 
+}
+\usage{
+\method{formula}{slrm}(x, \dots)
+\method{print}{slrm}(x, ...)
+\method{terms}{slrm}(x, \dots)
+\method{labels}{slrm}(object, \dots)
+\method{update}{slrm}(object, ..., evaluate = TRUE, env = parent.frame())
+}
+\arguments{
+  \item{x,object}{
+    An object of class \code{"slrm"},
+    representing a fitted spatial logistic regression model.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{evaluate}{
+    Logical value. If \code{TRUE}, evaluate the updated call to
+    \code{slrm}, so that the model is refitted; 
+    if \code{FALSE}, simply return the updated call.
+  }
+  \item{env}{
+    Optional environment in which the model should be updated.
+  }
+}
+\details{
+  These functions are methods for the generic commands
+  \code{\link{formula}},
+  \code{\link{update}},
+  \code{\link{print}},
+  \code{\link{terms}} and
+  \code{\link{labels}}
+  for the class \code{"slrm"}.
+
+  An object of class \code{"slrm"} represents a fitted
+  spatial logistic regression model. It is obtained from \code{\link{slrm}}.
+}
+\value{
+  See the help files for the corresponding generic functions.
+}
+\author{
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{slrm}},  \code{\link{plot.slrm}},
+  \code{\link{predict.slrm}}, \code{\link{simulate.slrm}},
+  \code{\link{vcov.slrm}},
+  \code{\link{coef.slrm}}.
+}
+\examples{
+  data(redwood)
+  fit <- slrm(redwood ~ x)
+  coef(fit)
+  formula(fit)
+  tf <- terms(fit)  
+  labels(fit)  
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.ssf.Rd b/man/methods.ssf.Rd
new file mode 100644
index 0000000..d2c5d38
--- /dev/null
+++ b/man/methods.ssf.Rd
@@ -0,0 +1,110 @@
+\name{methods.ssf}
+\alias{methods.ssf} %DoNotExport
+\alias{marks.ssf}
+\alias{marks<-.ssf}
+\alias{unmark.ssf}
+\alias{as.im.ssf}
+\alias{as.function.ssf}
+\alias{as.ppp.ssf}
+\alias{print.ssf}
+\alias{range.ssf}
+\alias{min.ssf}
+\alias{max.ssf}
+\alias{integral.ssf}
+\title{Methods for Spatially Sampled Functions}
+\description{
+  Methods for various generic commands, for the class
+  \code{"ssf"} of spatially sampled functions.
+}
+\usage{
+  \method{marks}{ssf}(x, \dots)
+
+  \method{marks}{ssf}(x, \dots) <- value
+
+  \method{unmark}{ssf}(X)
+
+  \method{as.im}{ssf}(X, \dots)
+
+  \method{as.function}{ssf}(x, \dots)
+
+  \method{as.ppp}{ssf}(X, \dots)
+
+  \method{print}{ssf}(x, \dots, brief=FALSE)
+
+  \method{range}{ssf}(x, \dots)
+
+  \method{min}{ssf}(x, \dots)
+
+  \method{max}{ssf}(x, \dots)
+
+  \method{integral}{ssf}(f, domain=NULL, ..., weights=attr(f, "weights"))
+}
+\arguments{
+  \item{x,X,f}{
+    A spatially sampled function (object of class \code{"ssf"}). 
+  }
+  \item{\dots}{Arguments passed to the default method.}
+  \item{brief}{Logical value controlling the amount of detail printed.}
+  \item{value}{Matrix of replacement values for the function.}
+  \item{domain}{Optional.
+    Domain of integration. An object of class\code{"owin"}.
+  }
+  \item{weights}{
+    Optional. Numeric vector of weights associated with the
+    sample points.
+  }
+}
+\value{
+  \code{marks} returns a matrix.
+
+  \code{marks(x) <- value} returns an object of class \code{"ssf"}.
+  
+  \code{as.owin} returns a window (object of class \code{"owin"}).
+
+  \code{as.ppp} and \code{unmark}
+  return a point pattern (object of class \code{"ppp"}).
+
+  \code{as.function} returns a \code{function(x,y)} of class \code{"funxy"}.
+  
+  \code{print} returns \code{NULL}.
+
+  \code{range} returns a numeric vector of length 2.
+  \code{min} and \code{max} return a single numeric value.
+
+  \code{integral} returns a numeric value (if \code{x} had numeric
+  values) or a numeric vector (if \code{x} had vector values).
+}
+\details{
+  An object of class \code{"ssf"} represents a
+  function (real- or vector-valued) that has been
+  sampled at a finite set of points.
+
+  The commands documented here are methods for this class,
+  for the generic commands
+  \code{\link[spatstat]{marks}},
+  \code{\link[spatstat]{marks<-}},
+  \code{\link[spatstat]{unmark}},
+  \code{\link[spatstat]{as.im}},
+  \code{\link{as.function}},
+  \code{\link[spatstat]{as.ppp}},
+  \code{\link{print}},
+  \code{\link{range}},
+  \code{\link{min}},
+  \code{\link{max}}
+  and \code{\link[spatstat]{integral}}.
+}
+\seealso{
+  \code{\link{ssf}}
+}
+\examples{
+  X <- cells[1:4]
+  f <- ssf(X, nndist(X, k=1:3))
+  f
+  marks(f)
+  as.ppp(f)
+  as.im(f)
+}
+\author{Adrian Baddeley}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/methods.units.Rd b/man/methods.units.Rd
new file mode 100644
index 0000000..ebd7eea
--- /dev/null
+++ b/man/methods.units.Rd
@@ -0,0 +1,75 @@
+\name{methods.units}
+\Rdversion{1.1}
+\alias{methods.units} %DoNotExport
+\alias{print.units}
+\alias{summary.units}
+\alias{rescale.units}
+\alias{compatible.units}
+\title{
+  Methods for Units
+}
+\description{
+  Methods for class \code{"units"}.
+}
+\usage{
+  \method{print}{units}(x, ...)
+  \method{summary}{units}(object, ...)
+  \method{rescale}{units}(X, s, unitname)
+  \method{compatible}{units}(A,B, ..., coerce=TRUE)
+}
+\arguments{
+  \item{x,X,A,B,object}{
+    Objects of class \code{"units"} representing
+    units of length.
+  }
+  \item{s}{Conversion factor: the new units are \code{s} times the old units.}
+  \item{\dots}{
+    Other arguments.
+    For \code{print.units} these arguments are passed to \code{print.default}.
+    For \code{summary.units} they are ignored.
+    For \code{compatible.units} these arguments
+    are other objects of class \code{"units"}.
+  }
+  \item{coerce}{
+    Logical. If \code{TRUE}, a null unit of length is compatible with
+    any non-null unit.
+  }
+  \item{unitname}{Optional new name for the unit. If present, this overrides the
+    rescaling operation and simply substitutes the new name for the old one.}
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{print}}, \code{\link{summary}}, \code{\link{rescale}}
+  and \code{\link{compatible}}
+  for the class \code{"units"}.
+
+  An object of class \code{"units"} represents a unit of length.
+
+  The \code{print} method prints a description of the unit of length,
+  and the \code{summary} method gives a more detailed description.
+
+  The \code{rescale} method changes the unit of length by rescaling it.
+  
+  The \code{compatible} method tests whether two or more units of length
+  are compatible.
+}
+\value{
+  For \code{print.units} the value is \code{NULL}.
+  For \code{summary.units} the value is an object of class
+  \code{summary.units} (with its own print method).
+  For \code{rescale.units} the value is another object of class \code{"units"}.
+  For \code{compatible.units} the result is logical.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{box3}},
+  \code{\link{print}},
+  \code{\link{unitname}}
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/methods.zclustermodel.Rd b/man/methods.zclustermodel.Rd
new file mode 100644
index 0000000..fb3ec2a
--- /dev/null
+++ b/man/methods.zclustermodel.Rd
@@ -0,0 +1,66 @@
+\name{methods.zclustermodel}
+\alias{methods.zclustermodel} % DoNotExport
+\alias{pcfmodel.zclustermodel}
+\alias{predict.zclustermodel}
+\alias{print.zclustermodel}
+\title{
+  Methods for Cluster Models
+}
+\description{
+  Methods for the experimental class of cluster models.
+}
+\usage{
+ \method{pcfmodel}{zclustermodel}(model, \dots)
+
+ \method{predict}{zclustermodel}(object, \dots,
+                  locations, type = "intensity", ngrid = NULL)
+
+ \method{print}{zclustermodel}(x, \dots)
+}
+\arguments{
+  \item{model,object,x}{
+    Object of class \code{"zclustermodel"}.
+  }
+  \item{\dots}{
+    Arguments passed to other methods.
+  }
+  \item{locations}{
+    Locations where prediction should be performed.
+    A window or a point pattern.
+  }
+  \item{type}{
+    Currently must equal \code{"intensity"}.
+  }
+  \item{ngrid}{
+    Pixel grid dimensions for prediction, if \code{locations} is
+    a rectangle or polygon.
+  }
+}
+\details{
+  Experimental.
+}
+\value{
+  Same as for other methods.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{zclustermodel}}
+}
+\examples{
+  m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1)
+  m2 <- zclustermodel("VarGamma", kappa=10, mu=10, scale=0.1, nu=0.7)
+  m
+  m2
+  g <- pcfmodel(m)
+  g(0.2)
+  g2 <- pcfmodel(m2)
+  g2(1)
+  Z <- predict(m, locations=square(2))
+  Z2 <- predict(m2, locations=square(1))
+  varcount(m, square(1))
+  varcount(m2, square(1))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/midpoints.psp.Rd b/man/midpoints.psp.Rd
new file mode 100644
index 0000000..164b084
--- /dev/null
+++ b/man/midpoints.psp.Rd
@@ -0,0 +1,40 @@
+\name{midpoints.psp}
+\alias{midpoints.psp}
+\title{Midpoints of Line Segment Pattern}
+\description{
+  Computes the midpoints of each line segment
+  in a line segment pattern.
+}
+\usage{
+  midpoints.psp(x)
+}
+\arguments{
+  \item{x}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+}
+\value{
+  Point pattern (object of class \code{"ppp"}).
+}
+\details{
+  The midpoint of each line segment is computed.
+}
+\seealso{
+  \code{\link{summary.psp}},
+  \code{\link{lengths.psp}},
+  \code{\link{angles.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  b <- midpoints.psp(a)   
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/mincontrast.Rd b/man/mincontrast.Rd
new file mode 100644
index 0000000..8f924bd
--- /dev/null
+++ b/man/mincontrast.Rd
@@ -0,0 +1,159 @@
+\name{mincontrast}
+\alias{mincontrast}
+\title{Method of Minimum Contrast}
+\description{
+  A general low-level algorithm for fitting theoretical point process models
+  to point pattern data by the Method of Minimum Contrast.
+}
+\usage{
+mincontrast(observed, theoretical, startpar, \dots,
+          ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL),
+          fvlab=list(label=NULL, desc="minimum contrast fit"),
+          explain=list(dataname=NULL, modelname=NULL, fname=NULL),
+	  adjustment=NULL)
+}
+\arguments{
+  \item{observed}{
+    Summary statistic, computed for the data.
+    An object of class \code{"fv"}.
+  }
+  \item{theoretical}{
+    An R language function that calculates the theoretical expected value
+    of the summary statistic, given the model parameters.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of initial values of the parameters of the
+    point process model (passed to \code{theoretical}). 
+  }
+  \item{\dots}{
+    Additional arguments passed to the function \code{theoretical}
+    and to the optimisation algorithm \code{\link[stats]{optim}}.
+  }
+  \item{ctrl}{
+    Optional. List of arguments controlling the optimisation. See Details.
+  }
+  \item{fvlab}{
+    Optional. List containing some labels for the return value. See Details.
+  }
+  \item{explain}{
+    Optional. List containing strings that give a human-readable description
+    of the model, the data and the summary statistic.
+  }
+  \item{adjustment}{
+    Internal use only.
+  }
+}
+\details{
+  This function is a general algorithm for fitting point process models
+  by the Method of Minimum Contrast. If you want to fit the
+  Thomas process, see \code{\link{thomas.estK}}.
+  If you want to fit a log-Gaussian Cox process, see
+  \code{\link{lgcp.estK}}.  If you want to fit the Matern cluster
+  process, see \code{\link{matclust.estK}}.
+
+  The Method of Minimum Contrast (Diggle and Gratton, 1984)
+  is a general technique for fitting
+  a point process model to point pattern data. First a summary function
+  (typically the \eqn{K} function) is computed from the data point
+  pattern. Second, the theoretical expected
+  value of this summary statistic under the point process model
+  is derived (if possible, as an algebraic expression involving the
+  parameters of the model) or estimated from simulations of the model. 
+  Then the model is fitted by finding the optimal parameter values
+  for the model to give the closest match between the theoretical
+  and empirical curves. 
+
+  The argument \code{observed} should be an object of class \code{"fv"}
+  (see \code{\link{fv.object}}) containing the values of a summary
+  statistic computed from the data point pattern. Usually this is the
+  function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives.
+  
+  The argument \code{theoretical} should be a user-supplied function
+  that computes the theoretical expected value of the summary statistic.
+  It must have an argument named \code{par} that will be the vector
+  of parameter values for the model (the length and format of this
+  vector are determined by the starting values in \code{startpar}).
+  The function \code{theoretical} should also expect a second argument
+  (the first argument other than \code{par})
+  containing values of the distance \eqn{r} for which the theoretical
+  value of the summary statistic \eqn{K(r)} should be computed.
+  The value returned by \code{theoretical} should be a vector of the
+  same length as the given vector of \eqn{r} values.
+
+  The argument \code{ctrl} determines the contrast criterion
+  (the objective function that will be minimised). 
+  The algorithm minimises the criterion
+  \deqn{
+    D(\theta)=
+    \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}}
+    |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r
+  }{
+    D(theta) = integral from rmin to rmax of
+    abs(Fhat(r)^q - F(theta,r)^q)^p
+  }
+  where \eqn{\theta}{theta} is the vector of parameters of the model,
+  \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic
+  computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the
+  theoretical expected value of the summary statistic,
+  and \eqn{p,q} are two exponents. The default is \code{q = 1/4},
+  \code{p=2} so that the contrast criterion is the integrated squared
+  difference between the fourth roots of the two functions
+  (Waagepetersen, 2006).
+
+  The other arguments just make things print nicely.
+  The argument \code{fvlab} contains labels for the component
+  \code{fit} of the return value.
+  The argument \code{explain} contains human-readable strings
+  describing the data, the model and the summary statistic.
+
+  The \code{"..."} argument of \code{mincontrast} can be used to
+  pass extra arguments to the function \code{theoretical}
+  and/or to the optimisation function \code{\link[stats]{optim}}.
+  In this case, the function \code{theoretical}
+  should also have a \code{"..."} argument and should ignore it
+  (so that it ignores arguments intended for \code{\link[stats]{optim}}).
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+  \item{opt }{The return value from the optimizer \code{\link{optim}}.}
+  \item{crtl }{The control parameters of the algorithm.}
+  \item{info }{List of explanatory strings.}
+}
+\references{
+  Diggle, P.J. and Gratton, R.J. (1984)
+  Monte Carlo methods of inference for implicit statistical models.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{46}, 193 -- 212.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Waagepetersen, R. (2006).
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63} (2007) 252--258.
+}  
+\author{Rasmus Waagepetersen
+  \email{rw at math.auc.dk},
+  adapted for \pkg{spatstat} by
+  \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{lgcp.estK}},
+  \code{\link{matclust.estK}},
+  \code{\link{thomas.estK}},
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/miplot.Rd b/man/miplot.Rd
new file mode 100644
index 0000000..ac1abda
--- /dev/null
+++ b/man/miplot.Rd
@@ -0,0 +1,74 @@
+\name{miplot}
+\alias{miplot}
+\title{Morisita Index Plot}
+\description{
+  Displays the Morisita Index Plot of a spatial point pattern.
+}
+\usage{
+miplot(X, ...)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}) or something
+    acceptable to \code{\link{as.ppp}}.
+  }
+  \item{\dots}{Optional arguments to control the appearance of the plot.}
+}
+\details{
+  Morisita (1959) defined an index of spatial aggregation for a spatial
+  point pattern based on quadrat counts. The spatial domain of the point
+  pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and
+  shape. The numbers of points falling in each quadrat are counted.
+  Then the Morisita Index is computed as
+  \deqn{
+    \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)}
+  }{
+    MI = Q * sum(n[i] (n[i]-1))/(N(N-1))
+  }
+  where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th
+  quadrat, and \eqn{N} is the total number of points.
+  If the pattern is completely random, \code{MI} should be approximately
+  equal to 1. Values of \code{MI} greater than 1 suggest clustering.
+
+  The \emph{Morisita Index plot} is a plot of the Morisita Index
+  \code{MI} against the linear dimension of the quadrats. 
+  The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2}
+  quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the
+  Morisita Index is computed each time. This plot is an attempt to
+  discern different scales of dependence in the point pattern data.
+}
+\value{
+  None.
+}
+\references{
+  M. Morisita (1959) Measuring of the dispersion of individuals and
+  analysis of the distributional patterns.
+  Memoir of the Faculty of Science, Kyushu University, Series E: Biology. 
+  \bold{2}: 215--235. 
+}
+\seealso{
+  \code{\link{quadratcount}}
+}
+\examples{
+ data(longleaf)
+ miplot(longleaf)
+ opa <- par(mfrow=c(2,3))
+ data(cells)
+ data(japanesepines)
+ data(redwood)
+ plot(cells)
+ plot(japanesepines)
+ plot(redwood)
+ miplot(cells)
+ miplot(japanesepines)
+ miplot(redwood)
+ par(opa)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/model.depends.Rd b/man/model.depends.Rd
new file mode 100644
index 0000000..b32adae
--- /dev/null
+++ b/man/model.depends.Rd
@@ -0,0 +1,109 @@
+\name{model.depends}
+\alias{model.depends}
+\alias{model.is.additive}
+\alias{model.covariates}
+\alias{has.offset.term}
+\alias{has.offset}
+\title{
+  Identify Covariates Involved in each Model Term
+}
+\description{
+  Given a fitted model (of any kind), identify which of the covariates
+  is involved in each term of the model.
+}
+\usage{
+model.depends(object)
+model.is.additive(object)
+model.covariates(object, fitted=TRUE, offset=TRUE)
+has.offset.term(object)
+has.offset(object)
+}
+\arguments{
+  \item{object}{
+    A fitted model of any kind.
+  }
+  \item{fitted,offset}{
+    Logical values determining which type of covariates to include.
+  }
+}
+\details{
+  The \code{object} can be a fitted model of any kind,
+  including models of the classes \code{\link{lm}}, \code{\link{glm}}
+  and \code{\link{ppm}}.
+
+  To be precise,
+  \code{object} must belong to a class for which there are methods
+  for \code{\link{formula}}, \code{\link{terms}}
+  and \code{\link{model.matrix}}.
+  
+  The command \code{model.depends} determines the relationship between
+  the original covariates (the data supplied when \code{object} was
+  fitted) and the canonical covariates (the columns of the design matrix).
+  It returns a logical matrix, with one row for each canonical
+  covariate, and one column for each of the original covariates,
+  with the \code{i,j} entry equal to \code{TRUE} if the
+  \code{i}th canonical covariate depends on the \code{j}th
+  original covariate.
+
+  If the model formula of \code{object} includes offset terms
+  (see \code{\link{offset}}), then the return value of \code{model.depends}
+  also has an attribute \code{"offset"}. This is a logical value or
+  matrix with one row for each offset term and one column for each of
+  the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the
+  \code{i}th offset term depends on the \code{j}th
+  original covariate.
+
+  The command \code{model.covariates} returns a character vector
+  containing the names of all (original) covariates that were actually
+  used to fit the model. By default, this includes all covariates that
+  appear in the model formula, including offset terms as well as 
+  canonical covariate terms. To omit the offset terms, set
+  \code{offset=FALSE}. To omit the canonical covariate terms,
+  set \code{fitted=FALSE}.
+
+  The command \code{model.is.additive} determines whether the model
+  is additive, in the sense that there is no canonical covariate that
+  depends on two or more original covariates. It returns a logical value.
+
+  The command \code{has.offset.term} is a faster way to determine whether the
+  model \emph{formula} includes an \code{offset} term.
+
+  The functions \code{model.depends} and \code{has.offset.term}
+  only detect \code{offset} terms which are present
+  in the model formula. They do not detect numerical offsets in the
+  model object, that were inserted using the \code{offset} argument
+  in \code{lm}, \code{glm} etc. To detect the presence of offsets
+  of both kinds, use \code{has.offset}.
+}
+\value{
+  A logical value or matrix.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{model.matrix}}
+}
+\examples{
+   x <- 1:10
+   y <- 3*x + 2
+   z <- rep(c(-1,1), 5)
+   fit <- lm(y ~ poly(x,2) + sin(z))
+   model.depends(fit)
+   model.covariates(fit)
+   model.is.additive(fit)
+
+   fitoff1 <- lm(y ~ x + offset(z))
+   fitoff2 <- lm(y ~ x, offset=z)
+   has.offset.term(fitoff1)
+   has.offset(fitoff1)
+   has.offset.term(fitoff2)
+   has.offset(fitoff2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/model.frame.ppm.Rd b/man/model.frame.ppm.Rd
new file mode 100644
index 0000000..48ebccd
--- /dev/null
+++ b/man/model.frame.ppm.Rd
@@ -0,0 +1,83 @@
+\name{model.frame.ppm}
+\alias{model.frame.ppm}
+\alias{model.frame.kppm}
+\alias{model.frame.dppm}
+\alias{model.frame.lppm}
+\title{
+Extract the Variables in a Point Process Model
+}
+\description{
+  Given a fitted point process model, this function
+  returns a data frame containing all the variables needed to
+  fit the model using the Berman-Turner device.
+}
+\usage{
+ \method{model.frame}{ppm}(formula, ...)
+
+ \method{model.frame}{kppm}(formula, ...)
+
+ \method{model.frame}{dppm}(formula, ...)
+
+ \method{model.frame}{lppm}(formula, ...)
+}
+\arguments{
+  \item{formula}{
+    A fitted point process model.
+    An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{model.frame.glm}}.
+  }
+}
+\details{
+  The function \code{\link{model.frame}} is generic.
+  These functions are method for \code{\link{model.frame}}
+  for fitted point process models (objects of class \code{"ppm"}
+  or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}).
+
+  The first argument should be a fitted point process model;
+  it has to be named \code{formula} for consistency with the generic
+  function.
+
+  The result is a data frame containing all the variables used in
+  fitting the model. The data frame has one row for each quadrature point
+  used in fitting the model. The quadrature scheme can be extracted using
+  \code{\link{quad.ppm}}.  
+}
+\value{
+  A \code{data.frame} containing all the variables used in the
+  fitted model, plus additional variables specified in \code{\dots}.
+  It has an additional attribute \code{"terms"} containing information
+  about the model formula. For details see \code{\link{model.frame.glm}}.
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{kppm}},
+  \code{\link{dppm}},
+  \code{\link{lppm}},
+  \code{\link{model.frame}},
+  \code{\link{model.matrix.ppm}}
+}
+\examples{
+  fit <- ppm(cells ~ x)
+  mf <- model.frame(fit)
+  kfit <- kppm(redwood ~ x, "Thomas")
+  kmf <- model.frame(kfit)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/model.images.Rd b/man/model.images.Rd
new file mode 100644
index 0000000..694c3bd
--- /dev/null
+++ b/man/model.images.Rd
@@ -0,0 +1,144 @@
+\name{model.images}  
+\alias{model.images}
+\alias{model.images.ppm}
+\alias{model.images.dppm}
+\alias{model.images.kppm}
+\alias{model.images.lppm}
+\alias{model.images.slrm}
+\title{Compute Images of Constructed Covariates}
+\description{
+  For a point process model fitted to spatial point pattern data,
+  this function computes pixel images of the covariates
+  in the design matrix.
+}
+\usage{
+  model.images(object, ...)
+
+  \method{model.images}{ppm}(object, W = as.owin(object), ...)
+
+  \method{model.images}{kppm}(object, W = as.owin(object), ...)
+
+  \method{model.images}{dppm}(object, W = as.owin(object), ...)
+
+  \method{model.images}{lppm}(object, L = as.linnet(object), ...)
+
+  \method{model.images}{slrm}(object, ...)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model. An object of class \code{"ppm"}
+    or \code{"kppm"} or \code{"lppm"} or \code{"slrm"} or \code{"dppm"}.
+  }
+  \item{W}{
+    A window (object of class \code{"owin"}) in which the
+    images should be computed. Defaults to the window
+    in which the model was fitted.
+  }
+  \item{L}{
+    A linear network (object of class \code{"linnet"}) in which the
+    images should be computed. Defaults to the network
+    in which the model was fitted.
+  }
+  \item{\dots}{
+    Other arguments (such as \code{na.action}) passed to
+    \code{\link{model.matrix.lm}}.
+  }
+}
+\details{
+  This command is similar to \code{\link{model.matrix.ppm}} except
+  that it computes pixel images of the covariates,
+  instead of computing the covariate values at certain points only.
+
+  The \code{object} must be a fitted spatial point process model
+  object of class \code{"ppm"} (produced by the model-fitting
+  function \code{\link{ppm}}) or class \code{"kppm"} (produced by the
+  fitting function \code{\link{kppm}})
+  or class \code{"dppm"} (produced by the
+  fitting function \code{\link{dppm}}) or class \code{"lppm"} (produced
+  by \code{\link{lppm}}) or class \code{"slrm"} (produced by
+  \code{\link{slrm}}). 
+
+  The spatial covariates required by the model-fitting procedure
+  are computed at every pixel location in the window \code{W}.
+  For \code{lppm} objects, the covariates are computed at every
+  location on the network \code{L}. For \code{slrm} objects, the
+  covariates are computed on the pixels that were used to fit the
+  model.
+
+  Note that the spatial covariates computed here
+  are not the original covariates that were supplied when fitting the
+  model. Rather, they are the covariates that actually appear in the
+  loglinear representation of the (conditional) intensity
+  and in the columns of the design matrix. For example, they might include
+  dummy or indicator variables for different levels of a factor,
+  depending on the contrasts that are in force.
+
+  The pixel resolution is determined by \code{W} 
+  if \code{W} is a mask (that is \code{W$type = "mask"}).
+  Otherwise, the pixel resolution is determined by
+  \code{\link{spatstat.options}}.
+
+  The format of the result depends on whether the original point pattern
+  data were marked or unmarked.
+  \itemize{
+    \item
+    If the original dataset was unmarked,
+    the result is a named list of pixel images (objects of class
+    \code{"im"}) containing the values of the spatial covariates.
+    The names of the list elements are the names of the covariates
+    determined by \code{\link{model.matrix.lm}}.
+    The result is also of class \code{"solist"} so that it can
+    be plotted immediately.
+    \item 
+    If the original dataset was a multitype point pattern,
+    the result is a \code{\link{hyperframe}}
+    with one column for each possible type of points.
+    Each column is a named list of pixel images (objects of class
+    \code{"im"}) containing the values of the spatial covariates.
+    The row names of the hyperframe are the names of the covariates
+    determined by \code{\link{model.matrix.lm}}.
+  }
+}
+\value{
+  A list (of class \code{"solist"}) or
+  array (of class \code{"hyperframe"}) containing
+  pixel images (objects of class \code{"im"}).
+  For \code{model.images.lppm}, the images are also of class \code{"linim"}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{model.matrix.ppm}},
+  \code{\link[stats]{model.matrix}},
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{lppm}},
+  \code{\link{dppm}},
+  \code{\link{kppm}},
+  \code{\link{slrm}},
+  \code{\link{im}},
+  \code{\link{im.object}},
+  \code{\link{plot.solist}},
+  \code{\link{spatstat.options}}
+}
+
+\examples{
+   fit <- ppm(cells ~ x)
+   model.images(fit)
+   B <- owin(c(0.2, 0.4), c(0.3, 0.8))
+   model.images(fit, B)
+   fit2 <- ppm(cells ~ cut(x,3))
+   model.images(fit2)
+   fit3 <- slrm(japanesepines ~ x)
+   model.images(fit3)
+   fit4 <- ppm(amacrine ~ marks + x)
+   model.images(fit4)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/model.matrix.ppm.Rd b/man/model.matrix.ppm.Rd
new file mode 100644
index 0000000..59255dc
--- /dev/null
+++ b/man/model.matrix.ppm.Rd
@@ -0,0 +1,133 @@
+\name{model.matrix.ppm}
+\alias{model.matrix.ppm}
+\alias{model.matrix.kppm}
+\alias{model.matrix.dppm}
+\alias{model.matrix.lppm}
+\alias{model.matrix.ippm}
+\title{Extract Design Matrix from Point Process Model}
+\description{
+  Given a point process model that has been
+  fitted to spatial point pattern data,
+  this function extracts the design matrix of the model.
+}
+\usage{
+   \method{model.matrix}{ppm}(object,
+                              data=model.frame(object, na.action=NULL),
+                              \dots, 
+                              Q=NULL, keepNA=TRUE)
+
+   \method{model.matrix}{kppm}(object,
+                              data=model.frame(object, na.action=NULL),
+                              \dots, 
+                              Q=NULL, keepNA=TRUE)
+
+   \method{model.matrix}{dppm}(object,
+                              data=model.frame(object, na.action=NULL),
+                              \dots, 
+                              Q=NULL, keepNA=TRUE)
+
+   \method{model.matrix}{lppm}(object,
+                              data=model.frame(object, na.action=NULL),
+                              \dots, 
+                              keepNA=TRUE)
+
+   \method{model.matrix}{ippm}(object,
+                              data=model.frame(object, na.action=NULL),
+                              \dots, 
+                              Q=NULL, keepNA=TRUE,
+			      irregular=FALSE)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model. An object of class \code{"ppm"}
+    or \code{"kppm"} or \code{"dppm"} or \code{"ippm"} or \code{"lppm"}.
+  }
+  \item{data}{
+    A model frame, containing the data required for the Berman-Turner device.
+  }
+  \item{Q}{
+    A point pattern (class \code{"ppp"}) or quadrature scheme
+    (class \code{"quad"}) specifying new locations where the
+    covariates should be computed.
+  }
+  \item{keepNA}{
+    Logical. Determines whether rows containing NA values will be
+    deleted or retained.
+  }
+  \item{\dots}{
+    Other arguments (such as \code{na.action}) passed to
+    \code{\link{model.matrix.lm}}.
+  }
+  \item{irregular}{
+    Logical value indicating whether to include the irregular score
+    components.
+  }
+}
+\details{
+  These commands are methods for the generic function
+  \code{\link{model.matrix}}.
+  They extract the design matrix of a spatial point process model
+  (class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}
+  or \code{"lppm"}).
+
+  More precisely, this command extracts
+  the design matrix of the generalised linear model associated with
+  a spatial point process model. 
+  
+  The \code{object} must be a fitted point process model
+  (object of class \code{"ppm"} or \code{"kppm"}
+  or \code{"dppm"} or \code{"lppm"})
+  fitted to spatial point pattern data.
+  Such objects are produced by the model-fitting
+  functions \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}
+  and \code{\link{lppm}}.
+
+  The methods \code{model.matrix.ppm},
+  \code{model.matrix.kppm}, \code{model.matrix.dppm}
+  and \code{model.matrix.lppm} 
+  extract the model matrix for the GLM.
+
+  The result is a matrix, with one row for every quadrature point
+  in the fitting procedure, and one column for every constructed
+  covariate in the design matrix.
+
+  If there are \code{NA} values in the covariates,
+  the argument \code{keepNA} determines whether to retain or delete
+  the corresponding rows of the model matrix. The default
+  \code{keepNA=TRUE} is to retain them. Note that this differs from
+  the default behaviour of many other methods for \code{model.matrix},
+  which typically delete rows containing \code{NA}.
+  
+  The quadrature points themselves can be extracted using
+  \code{\link{quad.ppm}}.
+}
+\value{
+  A matrix. Columns of the matrix are canonical covariates in the model.
+  Rows of the matrix correspond to quadrature points
+  in the fitting procedure (provided \code{keepNA=TRUE}).
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{model.matrix}},
+  \code{\link{model.images}},
+  \code{\link{ppm}},
+  \code{\link{kppm}},
+  \code{\link{dppm}},
+  \code{\link{lppm}},
+  \code{\link{ippm}},
+  \code{\link{ppm.object}},
+  \code{\link{quad.ppm}},
+  \code{\link{residuals.ppm}}
+}
+
+\examples{
+   fit <- ppm(cells ~ x)
+   head(model.matrix(fit))
+   model.matrix(fit, Q=runifpoint(5))
+   kfit <- kppm(redwood ~ x, "Thomas")
+   m <- model.matrix(kfit)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/model.matrix.slrm.Rd b/man/model.matrix.slrm.Rd
new file mode 100644
index 0000000..638ca9b
--- /dev/null
+++ b/man/model.matrix.slrm.Rd
@@ -0,0 +1,64 @@
+\name{model.matrix.slrm}
+\alias{model.matrix.slrm}
+\title{Extract Design Matrix from Spatial Logistic Regression Model}
+\description{
+  This function extracts the design matrix of a
+  spatial logistic regression model.
+}
+\usage{
+   \method{model.matrix}{slrm}(object, ..., keepNA=TRUE)
+}
+\arguments{
+  \item{object}{
+    A fitted spatial logistic regression model. An object of class 
+    \code{"slrm"}.
+  }
+  \item{\dots}{
+    Other arguments (such as \code{na.action}) passed to
+    \code{\link{model.matrix.lm}}.
+  }
+  \item{keepNA}{
+    Logical. Determines whether rows containing NA values will be
+    deleted or retained.
+  }
+}
+\details{
+  This command is a method for the generic function
+  \code{\link{model.matrix}}. It extracts the design matrix of a
+  spatial logistic regression.
+  
+  The \code{object} must be a fitted
+  spatial logistic regression 
+  (object of class \code{"slrm"}).
+  Such objects are produced by the model-fitting
+  function \code{\link{slrm}}.
+
+  Usually the result is a matrix with one column for every
+  constructed covariate in the model, and one row
+  for every pixel in the grid used to fit the model.
+
+  If \code{object} was fitted using split pixels (by calling
+  \code{\link{slrm}} using the argument \code{splitby}) then the
+  matrix has one row for every pixel or half-pixel.
+}
+\value{
+  A matrix. Columns of the matrix are canonical covariates in the model.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{model.matrix}},
+  \code{\link{model.images}},
+  \code{\link{slrm}}.
+}
+\examples{
+   fit <- slrm(japanesepines ~x)
+   head(model.matrix(fit))
+   # matrix with two columns: '(Intercept)' and 'x'
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/moribund.Rd b/man/moribund.Rd
new file mode 100644
index 0000000..d92dbb0
--- /dev/null
+++ b/man/moribund.Rd
@@ -0,0 +1,64 @@
+\name{moribund} 
+\alias{kstest}
+\alias{kstest.ppp}
+\alias{kstest.ppm}
+\alias{kstest.lpp}
+\alias{kstest.lppm}
+\alias{kstest.slrm}
+\alias{plot.kstest}
+\alias{bermantest}
+\alias{bermantest.ppm}
+\alias{bermantest.ppp}
+\alias{bermantest.lppm}
+\alias{bermantest.lpp}
+\title{Outdated Functions} 
+\description{
+  These outdated functions are retained only for
+  compatibility; they will soon be marked as Deprecated.
+}
+\usage{
+kstest(\dots)
+kstest.ppp(\dots)
+kstest.ppm(\dots)
+kstest.lpp(\dots)
+kstest.lppm(\dots)
+kstest.slrm(\dots)
+\method{plot}{kstest}(x, \dots)
+
+bermantest(\dots)
+bermantest.ppp(\dots)
+bermantest.ppm(\dots)
+bermantest.lpp(\dots)
+bermantest.lppm(\dots)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"kstest"} or \code{"cdftest"}.
+  }
+  \item{\dots}{
+    Arguments passed to other functions.
+  }
+}
+\details{
+  These functions will be Deprecated in future releases of \pkg{spatstat}.
+
+  The \code{kstest} functions have been superseded by
+  \code{\link{cdf.test}}.
+
+  The \code{bermantest} functions have been superseded by
+  \code{\link{berman.test}}.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{cdf.test}},
+  \code{\link{berman.test}},
+  \code{\link{plot.cdftest}}
+}
+\keyword{spatial}
+
diff --git a/man/mppm.Rd b/man/mppm.Rd
new file mode 100644
index 0000000..8871641
--- /dev/null
+++ b/man/mppm.Rd
@@ -0,0 +1,271 @@
+\name{mppm}
+\alias{mppm}
+\title{Fit Point Process Model to Several Point Patterns}
+\description{
+  Fits a Gibbs point process model to several point patterns
+  simultaneously. 
+}
+\usage{
+   mppm(formula, data, interaction=Poisson(), ...,
+        iformula=NULL, 
+        random=NULL,
+        use.gam = FALSE, 
+        reltol.pql=1e-3,
+        gcontrol=list())
+}
+\arguments{
+  \item{formula}{
+    A formula describing the systematic part of the model.
+    Variables in the formula are names of columns in \code{data}.
+  }
+  \item{data}{
+    A hyperframe (object of class \code{"hyperframe"},
+    see \code{\link{hyperframe}}) containing the
+    point pattern responses and the explanatory variables. 
+  }
+  \item{interaction}{
+    Interpoint interaction(s) appearing in the model.
+    Either an object of class \code{"interact"}
+    describing the point process interaction
+    structure, or a hyperframe (with the same number of
+    rows as \code{data}) whose entries are objects of class
+    \code{"interact"}.     
+  }
+  \item{\dots}{Arguments passed to \code{\link{ppm}} controlling
+    the fitting procedure.
+  }
+  \item{iformula}{
+    Optional. A formula (with no left hand side)
+    describing the interaction to be applied to each case.
+    Each variable name in the formula should either be the name of a column
+    in the hyperframe \code{interaction}, or the name of a column
+    in the hyperframe \code{data} that is a vector or factor.
+  }
+  \item{random}{
+    Optional. A formula (with no left hand side)
+    describing a random effect. Variable names in the formula
+    may be any of the column names of \code{data} and \code{interaction}.
+    The formula must be recognisable to \code{\link{lme}}. 
+  }
+  \item{use.gam}{Logical flag indicating whether to fit the model
+    using \code{\link[mgcv]{gam}} or \code{\link[stats]{glm}}.
+  }
+  \item{reltol.pql}{
+    Relative tolerance for successive steps in
+    the penalised quasi-likelihood algorithm,
+    used when the model includes random effects.
+    The algorithm terminates when the root mean square of the
+    relative change in coefficients is less than \code{reltol.pql}.
+  }
+  \item{gcontrol}{
+    List of arguments to control the fitting algorithm.
+    Arguments are passed to \code{\link[stats]{glm.control}}
+    or \code{\link[mgcv]{gam.control}}
+    or \code{\link[nlme]{lmeControl}}
+    depending on the kind of model being fitted.
+    If the model has random effects, the arguments are passed to
+    \code{\link[nlme]{lmeControl}}. Otherwise,
+    if \code{use.gam=TRUE} the arguments are passed to
+    \code{\link[mgcv]{gam.control}}, and if
+    \code{use.gam=FALSE} (the default) they are passed to
+    \code{\link[stats]{glm.control}}.
+  }
+}
+\details{
+  This function fits a common point process model to a dataset
+  containing several different point patterns.
+  
+  It extends the capabilities of the function \code{\link{ppm}}
+  to deal with data such as
+  \itemize{
+    \item replicated observations of spatial point patterns
+    \item two groups of spatial point patterns
+    \item a designed experiment in which the response from each unit
+    is a point pattern.
+  }
+  
+  The syntax of this function is similar to that of
+  standard \R model-fitting functions like \code{\link{lm}} and
+  \code{\link{glm}}. The first argument \code{formula}  is an \R formula
+  describing the systematic part of the model. The second argument
+  \code{data} contains the responses and the explanatory variables.
+  Other arguments determine the stochastic structure of the model.
+
+  Schematically,
+  the data are regarded as the results of a designed experiment
+  involving \eqn{n} experimental units.  Each unit has a
+  \sQuote{response}, and optionally some \sQuote{explanatory variables}
+  (covariates) describing the experimental conditions for that unit.
+  In this context,
+  \emph{the response from each unit is a point pattern}.
+  The value of a particular covariate for each unit can be
+  either a single value (numerical, logical or factor),
+  or a spatial covariate.
+  A \sQuote{spatial} covariate is a quantity that depends on spatial location,
+  for example, the soil acidity or altitude at each location.
+  For the purposes of \code{mppm}, a spatial covariate must be stored
+  as a pixel image (object of class \code{"im"}) which gives the values
+  of the covariate at a fine grid of locations.
+
+  The argument \code{data} is a hyperframe (a generalisation of
+  a data frame, see \code{\link{hyperframe}}). This is like a data frame
+  except that the entries can be objects of any class.
+  The hyperframe has one row for each experimental unit,
+  and one column for each variable (response or explanatory variable).
+
+  The \code{formula} should be an \R formula.
+  The left hand side of \code{formula} determines the \sQuote{response}
+  variable. This should be a single name, which
+  should correspond to a column in \code{data}.
+
+  The right hand side of \code{formula} determines the 
+  spatial trend of the model. It specifies the linear predictor,
+  and effectively represents the \bold{logarithm}
+  of the spatial trend.
+  Variables in the formula must be the names of columns of
+  \code{data}, or one of the reserved names
+  \describe{
+    \item{x,y}{Cartesian coordinates of location}
+    \item{marks}{Mark attached to point}
+    \item{id}{which is a factor representing the
+      serial number (\eqn{1} to \eqn{n}) of the point pattern,
+      i.e. the row number in the data hyperframe.
+    }
+  }
+
+  The column of responses in \code{data}
+  must consist of point patterns (objects of class \code{"ppp"}).
+  The individual point pattern responses 
+  can be defined in different spatial windows.
+  If some of the point patterns are marked, then they must all be
+  marked, and must have the same type of marks.
+
+  The scope of models that can be fitted to each pattern is the same as the
+  scope of \code{\link{ppm}}, that is, Gibbs point processes with
+  interaction terms that belong to a specified list, including
+  for example the Poisson process, Strauss process, Geyer's saturation
+  model, and piecewise constant pairwise interaction models.
+  Additionally, it is possible to include random effects
+  as explained in the section on Random Effects below.
+  
+  The stochastic part of the model is determined by
+  the arguments \code{interaction} and (optionally) \code{iformula}.
+  \itemize{
+    \item 
+    In the simplest case, \code{interaction} is 
+    an object of class \code{"interact"},
+    determining the interpoint interaction structure of the point
+    process model, for all experimental units.
+    \item
+    Alternatively, \code{interaction} may be a hyperframe,
+    whose entries are objects of class \code{"interact"}.
+    It should have the same number of rows as \code{data}.
+    \itemize{
+      \item
+      If \code{interaction} consists of only one column,
+      then the entry in row \code{i} is taken to be the
+      interpoint interaction for the \code{i}th experimental unit
+      (corresponding to the \code{i}th row of \code{data}).
+      \item
+      If \code{interaction} has more than one column,
+      then the argument \code{iformula} is also required.
+      Each row of \code{interaction} determines
+      several interpoint interaction structures that might be applied
+      to the corresponding row of \code{data}.
+      The choice of interaction is determined by \code{iformula};
+      this should be an \R formula,
+      without a left hand side.
+      For example if \code{interaction} has two columns called
+      \code{A} and \code{B} then \code{iformula = ~B} indicates that the
+      interpoint interactions are taken from the second column.
+    }
+  }
+  Variables in \code{iformula}
+  typically refer to column names of \code{interaction}.
+  They can also be names of columns in
+  \code{data}, but only for columns of numeric, logical or factor
+  values. For example \code{iformula = ~B * group} (where \code{group}
+  is a column of \code{data} that contains a factor) causes the
+  model with interpoint interaction \code{B} to be fitted
+  with different interaction parameters for each level of \code{group}.
+}
+\section{Random Effects}{
+  It is also possible to include random effects in the
+  trend term. The argument \code{random} is a formula,
+  with no left-hand side, that specifies the structure of the
+  random effects. The formula should be recognisable to
+  \code{\link{lme}} (see the description of the argument \code{random}
+  for \code{\link{lme}}).
+
+  The names in the formula \code{random} may be any of the covariates
+  supplied by \code{data}.
+  Additionally the formula may involve the name
+  \code{id}, which is a factor representing the
+  serial number (\eqn{1} to \eqn{n}) of the point pattern in the
+  list \code{X}.
+}
+\value{
+  An object of class \code{"mppm"} representing the
+  fitted model.
+
+  There are methods for
+  \code{print}, \code{summary}, \code{coef},
+  \code{AIC}, \code{anova}, \code{fitted}, \code{fixef}, \code{logLik},
+  \code{plot}, \code{predict}, \code{ranef}, \code{residuals},
+  \code{summary}, \code{terms} and \code{vcov} for this class.
+
+  The default methods for \code{\link[stats]{update}}
+  and \code{\link[stats]{formula}} also work on this class.
+}
+\references{
+  Baddeley, A. and Turner, R.
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} (2000) 283--322.
+ 
+  Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S.,
+  Bell, M. and Turner, R. 
+  Analysis of a designed experiment where the response is a spatial
+  point pattern. In preparation.
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press.
+
+  Bell, M. and Grunwald, G. (2004)
+  Mixed models for the analysis of replicated spatial point patterns.
+  \emph{Biostatistics} \bold{5}, 633--648.
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented in \pkg{spatstat} by 
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{print.mppm}},
+  \code{\link{summary.mppm}},
+  \code{\link{coef.mppm}},
+}
+\examples{
+# Waterstriders data
+ H <- hyperframe(Y = waterstriders)
+ mppm(Y ~ 1,  data=H)
+ mppm(Y ~ 1,  data=H, Strauss(7))
+ mppm(Y ~ id, data=H)
+ mppm(Y ~ x,  data=H)
+
+# Synthetic data from known model
+n <- 10
+H <- hyperframe(V=1:n,
+                U=runif(n, min=-1, max=1),
+                M=factor(letters[1 + (1:n) \%\% 3]))
+H$Z <- setcov(square(1))
+H$U <- with(H, as.im(U, as.rectangle(Z)))
+H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z))))
+
+fit <- mppm(Y ~Z + U + V, data=H)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/msr.Rd b/man/msr.Rd
new file mode 100644
index 0000000..5f32bff
--- /dev/null
+++ b/man/msr.Rd
@@ -0,0 +1,146 @@
+\name{msr}
+\alias{msr}
+\title{
+  Signed or Vector-Valued Measure
+}
+\description{
+  Defines an object representing a signed measure or vector-valued
+  measure on a spatial domain.
+}
+\usage{
+  msr(qscheme, discrete, density, check=TRUE)
+}
+\arguments{
+  \item{qscheme}{
+    A quadrature scheme (object of class \code{"quad"} usually
+    extracted from a fitted point process model).
+  }
+  \item{discrete}{
+    Vector or matrix containing the values (masses) of the discrete component
+    of the measure, for each of the data points in \code{qscheme}.
+  }
+  \item{density}{
+    Vector or matrix containing values of the density of the
+    diffuse component of the measure, for each of the
+    quadrature points in \code{qscheme}.
+  }
+  \item{check}{
+    Logical. Whether to check validity of the arguments.
+  }
+}
+\details{
+  This function creates an object that represents a
+  signed or vector valued \emph{measure} on the two-dimensional plane.
+  It is not normally called directly by the user.
+
+  A signed measure is a classical mathematical object
+  (Diestel and Uhl, 1977)
+  which can be visualised as a collection of electric charges, positive and/or
+  negative, spread over the plane. Electric charges may be
+  concentrated at specific points (atoms), or spread diffusely over a
+  region. 
+
+  An object of class \code{"msr"} represents a signed (i.e. real-valued)
+  or vector-valued measure in the \pkg{spatstat} package.
+
+  Spatial residuals for point process models
+  (Baddeley et al, 2005, 2008) take the form of a real-valued
+  or vector-valued measure. The function
+  \code{\link{residuals.ppm}} returns an object of
+  class \code{"msr"} representing the residual measure.
+
+  The function \code{msr}  would not normally be called directly by the
+  user. It is the low-level creator function that
+  makes an object of class \code{"msr"} from raw data.
+  
+  The first argument \code{qscheme} is a quadrature scheme (object of
+  class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or
+  extracted from a fitted point process model using
+  \code{\link{quad.ppm}}. A quadrature scheme contains both data points
+  and dummy points. The data points of \code{qscheme} are used as the locations
+  of the atoms of the measure. All quadrature points
+  (i.e. both data points and dummy points)
+  of \code{qscheme} are used as sampling points for the density
+  of the continuous component of the measure.
+
+  The argument \code{discrete} gives the values of the
+  atomic component of the measure for each \emph{data point} in \code{qscheme}.
+  It should be either a numeric vector with one entry for each
+  data point, or a numeric matrix with one row
+  for each data point. 
+
+  The argument \code{density} gives the values of the \emph{density}
+  of the diffuse component of the measure, at each
+  \emph{quadrature point} in \code{qscheme}.
+  It should be either a numeric vector with one entry for each
+  quadrature point, or a numeric matrix with one row
+  for each quadrature point. 
+
+  If both \code{discrete} and \code{density} are vectors
+  (or one-column matrices) then the result is a signed (real-valued) measure.
+  Otherwise, the result is a vector-valued measure, with the dimension
+  of the vector space being determined by the number of columns
+  in the matrices \code{discrete} and/or \code{density}.
+  (If one of these is a \eqn{k}-column matrix and the other
+  is a 1-column matrix, then the latter is replicated to \eqn{k} columns).
+  
+  The class \code{"msr"} has methods for \code{print},
+  \code{plot} and \code{[}. 
+  There is also a function \code{\link{Smooth.msr}} for smoothing a measure.
+}
+\value{
+  An object of class \code{"msr"} that can be plotted
+  by \code{\link{plot.msr}}.
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J.
+  and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A., \ifelse{latex}{\out{M\o ller}}{Moller}, J.
+  and Pakes, A.G. (2008) 
+  Properties of residuals for spatial point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{60}, 627--649.
+  
+  Diestel, J. and Uhl, J.J. Jr (1977)
+  \emph{Vector measures}.
+  Providence, RI, USA: American Mathematical Society.
+
+  Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{plot.msr}},
+  \code{\link{Smooth.msr}},
+  \code{\link{[.msr}},
+  \code{\link{with.msr}},
+  \code{\link{split.msr}},
+  \code{\link{Ops.msr}},
+  \code{\link{measureVariation}}.
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   
+   rp <- residuals(fit, type="pearson")
+   rp
+
+   rs <- residuals(fit, type="score")
+   rs
+   colnames(rs)
+
+   # An equivalent way to construct the Pearson residual measure by hand
+   Q <- quad.ppm(fit)
+   lambda <- fitted(fit)
+   slam <- sqrt(lambda)
+   Z <- is.data(Q)
+   m <- msr(Q, discrete=1/slam[Z], density = -slam)
+   m
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/mucosa.Rd b/man/mucosa.Rd
new file mode 100644
index 0000000..b41914f
--- /dev/null
+++ b/man/mucosa.Rd
@@ -0,0 +1,54 @@
+\name{mucosa}
+\alias{mucosa}
+\alias{mucosa.subwin}
+\docType{data}
+\title{
+  Cells in Gastric Mucosa
+}
+\description{
+  A bivariate inhomogeneous point pattern, giving the locations of
+  the centres of two types of cells in a cross-section of the
+  gastric mucosa of a rat.
+}
+\usage{data(mucosa)}
+\format{
+  An object of class \code{"ppp"}, see \code{\link{ppp.object}}.
+  This is a multitype point pattern with two types of points,
+  \code{ECL} and \code{other}.
+}
+\details{
+  This point pattern dataset gives the locations of 
+  cell centres in a cross-section of the gastric mucosa (mucous membrane of the
+  stomach) of a rat. The rectangular observation window has been scaled
+  to unit width. The lower edge of the window is closest to the outside
+  of the stomach.
+
+  The cells are classified into two types: \emph{ECL cells}
+  (enterochromaffin-like cells) and \emph{other} cells. There are 86 ECL cells
+  and 807 other cells in the dataset. 
+  ECL cells are a type of neuroendocrine cell which 
+  synthesize and secrete histamine.
+  One hypothesis of interest is whether the 
+  spatially-varying intensities of ECL cells and other cells are
+  proportional.
+
+  The data were originally collected by Dr Thomas Berntsen.
+  The data were discussed and analysed in \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen (2004,
+  pp. 2, 169).
+
+  The associated object \code{mucosa.subwin} is the smaller window
+  to which the data were restricted for analysis by \ifelse{latex}{\out{M\o ller}}{Moller} and Waagepetersen.
+}
+\source{
+  Dr Thomas Berntsen and Prof Rasmus Waagepetersen.
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2004).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes}.
+  Chapman and Hall/CRC.
+}
+\examples{
+  plot(mucosa, chars=c(1,3), cols=c("red", "green"))
+  plot(mucosa.subwin, add=TRUE, lty=3)
+}
+\keyword{datasets}
diff --git a/man/multiplicity.ppp.Rd b/man/multiplicity.ppp.Rd
new file mode 100644
index 0000000..a50ffaf
--- /dev/null
+++ b/man/multiplicity.ppp.Rd
@@ -0,0 +1,77 @@
+\name{multiplicity.ppp}
+\alias{multiplicity}
+\alias{multiplicity.default}
+\alias{multiplicity.data.frame}
+\alias{multiplicity.ppp}
+\alias{multiplicity.ppx}
+\title{Count Multiplicity of Duplicate Points}
+\description{
+  Counts the number of duplicates for each point
+  in a spatial point pattern.
+}
+\usage{
+ multiplicity(x)
+
+ \method{multiplicity}{ppp}(x)
+
+ \method{multiplicity}{ppx}(x)
+
+ \method{multiplicity}{data.frame}(x)
+
+ \method{multiplicity}{default}(x)
+}
+\arguments{
+  \item{x}{
+    A spatial point pattern
+    (object of class \code{"ppp"} or \code{"ppx"})
+    or a vector, matrix or data frame.
+  }
+}
+\value{
+  A vector of integers (multiplicities) of length equal to the
+  number of points in \code{x}.
+}
+\details{
+  Two points in a point pattern are deemed to be identical
+  if their \eqn{x,y} coordinates are the same,
+  and their marks are also the same (if they carry marks).
+  The Examples section illustrates how it is possible for
+  a point pattern to contain a pair of identical points.
+
+  For each point in \code{x}, the function \code{multiplicity} counts how many
+  points are identical to it, and returns the vector of counts.
+
+  The argument \code{x} can also be a vector, a matrix or a data frame.
+  When \code{x} is a vector, \code{m <- multiplicity(x)} is a vector
+  of the same length as \code{x}, and \code{m[i]} is the
+  number of elements of \code{x} that are identical to \code{x[i]}.
+  When \code{x} is a matrix or data frame,
+  \code{m <- multiplicity(x)} is a vector
+  of length equal to the number of rows of \code{x}, and \code{m[i]}
+  is the number of rows of \code{x} that are identical to
+  the \code{i}th row.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{duplicated.ppp}},
+  \code{\link{unique.ppp}}
+}
+\examples{
+   X <- ppp(c(1,1,0.5,1), c(2,2,1,2), window=square(3), check=FALSE)
+   m <- multiplicity(X)
+
+   # unique points in X, marked by their multiplicity
+   first <- !duplicated(X)
+   Y <- X[first] \%mark\% m[first]
+}
+
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and Sebastian Meyer.
+}
+\keyword{spatial}
+\keyword{utilities}
+ 
diff --git a/man/murchison.Rd b/man/murchison.Rd
new file mode 100644
index 0000000..80fce29
--- /dev/null
+++ b/man/murchison.Rd
@@ -0,0 +1,108 @@
+\name{murchison}
+\alias{murchison}
+\docType{data}
+\title{
+  Murchison gold deposits
+}
+\description{
+  Data recording the spatial locations of gold deposits and associated
+  geological features in the Murchison area of Western Australia.
+  Extracted from a large scale (1:500,000) study of the
+  Murchison area by the Geological Survey of Western Australia
+  (Watkins and Hickman, 1990). The features recorded are
+  \itemize{
+    \item the locations of gold deposits;
+    \item the locations of geological faults;
+    \item the region that contains greenstone bedrock.
+  }
+  The study region is contained in a \eqn{330\times 400}{330 * 400} kilometre
+  rectangle. At this scale, gold deposits are points, i.e. their spatial
+  extent is negligible. 
+  Gold deposits in this region occur only in greenstone bedrock.
+  Geological faults can be observed reliably only within the same
+  region. However, some faults have been extrapolated
+  (by geological ``interpretation'') outside the greenstone boundary 
+  from information observed in the greenstone region.
+
+  These data were analysed by Foxall and Baddeley (2002) and Brown et al
+  (2002); see also Groves et al (2000), Knox-Robinson and Groves (1997).
+  The main aim is to predict the intensity of the
+  point pattern of gold deposits from the more easily observable fault
+  pattern.
+}
+\format{
+  \code{murchison} is a list with the following entries:
+  \describe{
+    \item{gold}{a point pattern (object of class \code{"ppp"})
+      representing the point pattern of gold deposits.
+      See \code{\link{ppp.object}} for details of the format.
+    }
+    \item{faults}{a line segment pattern (object of class \code{"psp"})
+      representing the geological faults.
+      See \code{\link{psp.object}} for details of the format.
+    }
+    \item{greenstone}{the greenstone bedrock region.
+      An object of class \code{"owin"}. Consists of multiple
+      irregular polygons with holes.
+    }
+  }
+  All coordinates are given in \bold{metres}.
+}
+\usage{
+ data(murchison)
+}
+\examples{
+  if(interactive()) {
+  data(murchison)
+  plot(murchison$greenstone, main="Murchison data", col="lightgreen")
+  plot(murchison$gold, add=TRUE, pch="+",col="blue")
+  plot(murchison$faults, add=TRUE, col="red")
+  }
+}
+\source{
+  Data were kindly provided by Dr Carl Knox-Robinson of the
+  Department of Geology and Geophysics, University of Western Australia.
+  Permission to use the data is granted by Dr Tim Griffin,
+  Geological Survey of Western Australia and by Dr Knox-Robinson.
+  \emph{Please make appropriate acknowledgement} to
+  Watkins and Hickman (1990) and the Geological Survey of Western Australia.
+}
+\references{
+  Brown, W.M., Gedeon, T.D., Baddeley, A.J. and Groves, D.I. (2002)
+  Bivariate J-function and other graphical
+  statistical methods help select the best predictor
+  variables as inputs for a neural network method of
+  mineral prospectivity mapping.
+  In U. Bayer, H. Burger and W. Skala (eds.)
+  \emph{IAMG 2002: 8th Annual Conference of the
+    International Association for Mathematical Geology},
+  Volume 1, 2002. International Association of Mathematical Geology.
+  Pages 257--268.
+
+  Foxall, R. and Baddeley, A. (2002)
+  Nonparametric measures of association between a
+  spatial point process and a random set, with
+  geological applications. \emph{Applied Statistics} \bold{51}, 165--182.
+
+  Groves, D.I., Goldfarb, R.J., Knox-Robinson, C.M., Ojala, J., Gardoll,
+  S, Yun, G.Y. and Holyland, P. (2000)
+  Late-kinematic timing of orogenic gold deposits and significance for
+  computer-based exploration techniques with emphasis on the Yilgarn Block,
+  Western Australia.
+  \emph{Ore Geology Reviews}, \bold{17}, 1--38.
+
+  Knox-Robinson, C.M. and Groves, D.I. (1997)
+  Gold prospectivity mapping using a geographic information system
+  (GIS), with examples from the Yilgarn Block of Western Australia.
+  \emph{Chronique de la Recherche Miniere} \bold{529}, 127--138.
+
+  Watkins, K.P. and Hickman, A.H. (1990)
+  \emph{Geological evolution and mineralization of the Murchison Province,
+    Western Australia}.
+  Bulletin 137, Geological Survey of Western Australia. 267 pages.
+  Published by Department of Mines, Western Australia, 1990.
+  Available online from Department of Industry and Resources,
+  State Government of Western Australia, \code{www.doir.wa.gov.au}
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/nbfires.Rd b/man/nbfires.Rd
new file mode 100644
index 0000000..4e64f33
--- /dev/null
+++ b/man/nbfires.Rd
@@ -0,0 +1,225 @@
+\name{nbfires}
+\alias{nbfires}
+\alias{nbfires.extra}
+\alias{nbw.rect}
+\docType{data}
+\title{Point Patterns of New Brunswick Forest Fires}
+\description{
+Point patterns created from yearly records, provided by the New
+Brunswick Department of Natural Resources, of all fires falling
+under their jurisdiction for the years 1987 to 2003 inclusive
+(with the year 1988 omitted until further notice).
+}
+\usage{data(nbfires)}
+\format{
+Executing \code{data(nbfires)} gives access to three objects: \code{nbfires},
+\code{nbfires.extra} and \code{nbw.rect}.
+
+The object \code{nbfires} is a marked point pattern (an object of
+class \code{"ppp"}) consisting of all of the fires in the years
+1987 to 2003 inclusive, with the omission of 1988.  The marks
+consist of a data frame of auxiliary information about the fires;
+see \emph{Details.} Patterns for individual years can be extracted
+using the function \code{\link{split.ppp}()}.  (See \bold{Examples}.)
+
+The object \code{nbw.rect} is a rectangular window which covers
+central New Brunswick.  It is provided for use in illustrative and
+\sQuote{practice} calculations inasmuch as the use of a rectangular
+window simplifies some computations considerably.
+
+For conformity with other datasets, \code{nbfires.extra} is also
+provided. It is a list containing just the window \code{nbw.rect}.
+}
+\details{
+The coordinates of the fire locations were provided in terms of
+latitude and longitude, to the nearest minute of arc.  These were
+converted to New Brunswick stereographic projection coordinates
+(Thomson, Mephan and Steeves, 1977) which was the coordinate
+system in which the map of New Brunswick --- which constitutes the
+observation window for the pattern --- was obtained.  The conversion
+was done using a \code{C} program kindly provided by Jonathan
+Beaudoin of the Department of Geodesy and Geomatics, University of
+New Brunswick.
+
+Finally the data and window were rescaled since the use of the
+New Brunswick stereographic projection coordinate system resulted
+in having to deal with coordinates which are expressed as very
+large integers with a bewildering number of digits.  Amongst other
+things, these huge numbers tended to create very untidy axis labels
+on graphs.  The width of the bounding box of the window was made
+equal to 1000 units. In addition the lower left hand
+corner of this bounding box was shifted to the origin. The height
+of the bounding box was changed proportionately, resulting in a
+value of approximately 959.
+
+In the final dataset \code{nbfires}, one coordinate unit is equivalent to
+0.403716 kilometres. To convert the data to kilometres,
+use \code{rescale(nbfires)}.
+
+The window for the fire patterns comprises 6 polygonal components,
+consisting of mainland New Brunswick and the 5 largest islands.
+Some lakes which should form holes in the mainland component are
+currently missing; this problem may be remedied in future releases.
+The window was formed by \sQuote{simplifying} the map that was originally
+obtained.  The simplification consisted in reducing (using
+an interactive visual technique) the number of polygon edges in
+each component.  For instance the number of edges in the mainland
+component was reduced from over 138,000 to 500.
+
+For some purposes it is probably better to use a discretized
+(mask type) window.  See \bold{Examples}.
+
+Because of the coarseness of the coordinates of the original
+data (1 minute of longitude is approximately 1 kilometer at the
+latitude of New Brunswick), data entry errors, and the simplification
+of the observation window, many of the original fire locations
+appeared to be outside of the window.  This problem was addressed
+by shifting the location of the \sQuote{outsider} points slightly,
+or deleting them, as seemed appropriate.
+
+Note that the data contain duplicated points (two points at the
+same location). To determine which points are duplicates,
+use \code{\link{duplicated.ppp}}.
+To remove the duplication, use \code{\link{unique.ppp}}.
+
+The columns of the data frame comprising the marks of
+\code{nbfires} are:
+\describe{
+  \item{year}{
+    This a \emph{factor} with levels 1987, 1989,
+    \ldots, 2002, 2003.  Note that 1988 is not present in
+    the levels.
+  }
+  \item{fire.type}{
+    A factor with levels \code{forest},
+    \code{grass}, \code{dump}, and \code{other}.
+  }
+  \item{dis.date}{
+    The discovery date of the fire, which is the
+    nearest possible surrogate for the starting time
+    of the fire.  This is an object of class \code{POSIXct}
+    and gives the starting discovery time of the fire to
+    the nearest minute.
+  }
+  \item{dis.julian}{
+    The discovery date and time of the fire, expressed
+    in \sQuote{Julian days}, i.e. as a decimal fraction representing the number
+    of days since the beginning of the year (midnight 31 December).
+  }
+  \item{out.date}{
+    The date on which the fire was judged to be
+    \sQuote{out}. This is an object of class \code{POSIXct} and gives the
+    \sQuote{out} time of the fire to the nearest minute.
+  }
+  \item{out.julian}{
+    The date and time at which the fire was judged
+    to be \sQuote{out}, expressed in Julian days.
+  }
+  \item{cause}{
+    General cause of the fire.  This is a factor with
+    levels \code{unknown}, \code{rrds} (railroads), \code{misc}
+    (miscellaneous), \code{ltning} (lightning), \code{for.ind}
+    (forest industry), \code{incend} (incendiary), \code{rec}
+    (recreation), \code{resid} (resident), and \code{oth.ind}
+    (other industry).  Causes \code{unknown}, \code{ltning}, and
+    \code{incend} are supposedly designated as \sQuote{final} by the New Brunswick
+    Department of Natural Resources, meaning (it seems) \dQuote{that's
+    all there is to it}.  Other causes are apparently intended
+    to be refined by being combined with \dQuote{source of ignition}.
+    However cross-tabulating \code{cause} with \code{ign.src} ---
+    see below --- reveals that very often these three \sQuote{causes}
+    are associated with an \dQuote{ignition source} as well.
+  }
+  \item{ign.src}{
+    Source of ignition, a factor with levels
+    \code{cigs} (cigarette/match/pipe/ashes), \code{burn.no.perm}
+    (burning without a permit), \code{burn.w.perm} (burning with a
+    permit), \code{presc.burn} (prescribed burn), \code{wood.spark}
+    (wood spark), \code{mach.spark} (machine spark), \code{campfire},
+    \code{chainsaw}, \code{machinery}, \code{veh.acc} (vehicle
+    accident), \code{rail.acc} (railroad accident), \code{wheelbox}
+    (wheelbox on railcars), \code{hot.flakes} (hot flakes off
+    railcar wheels), \code{dump.fire} (fire escaping from a dump),
+    \code{ashes} (ashes, briquettes, burning garbage, etc.)
+  }
+  \item{fnl.size}{
+    The final size of the fire (area burned)
+    in hectares, to the nearest 10th hectare.
+  }
+}
+Note that due to data entry errors some of the \dQuote{out dates} and
+\dQuote{out times} in the original data sets were actually \emph{earlier}
+than the corresponding \dQuote{discovery dates} and \dQuote{discover times}.
+In such cases all corresponding entries of the marks data frame
+(i.e. \code{dis.date}, \code{dis.julian}, \code{out.date}, and
+\code{out.julian}) were set equal to \code{NA}.  Also, some of the
+dates and times were missing (equal to \code{NA}) in the original
+data sets.
+
+The \sQuote{ignition source} data were given as integer codes
+in the original data sets.  The code book that I obtained
+gave interpretations for codes 1, 2, \ldots, 15.  However
+the actually also contained codes of 0, 16, 17, 18, and in
+one instance 44.  These may simply be data entry errors.
+These uninterpretable values were assigned the level
+\code{unknown}.  Many of the years had most, or sometimes
+all, of the ignition source codes equal to 0 (hence turning
+out as \code{unknown}, and many of the years had many
+missing values as well.  These were also assigned the
+level \code{unknown}.  Of the 7108 fires in \code{nbfires},
+4354 had an \code{unknown} ignition source.  This variable
+is hence unlikely to be very useful.
+
+There are also anomalies between \code{cause} and \code{ign.src},
+e.g. \code{cause} being \code{unknown} but \code{ign.src}
+being \code{cigs}, \code{burn.no.perm}, \code{mach.spark},
+\code{hot.flakes}, \code{dump.fire} or \code{ashes}.  Particularly
+worrisome is the fact that the cause \code{ltning} (!!!) is
+associate with sources of ignition \code{cigs}, \code{burn.w.perm},
+\code{presc.burn}, and \code{wood.spark}.
+}
+\source{
+  The data were kindly provided by the New Brunswick Department
+  of Natural Resources.  Special thanks are due to Jefferey Betts for
+  a great deal of assistance.
+}
+\references{
+Turner, Rolf.
+Point patterns of forest fire locations.
+\emph{Environmental and Ecological Statistics}
+\bold{16} (2009) 197 -- 223, doi:10.1007/s10651-007-0085-1.
+
+Thomson, D. B., Mephan, M. P., and Steeves, R. R. (1977)
+The stereographic double projection.
+Technical Report 46, University of New Brunswick,
+Fredericton, N. B., Canada
+URL: \code{gge.unb.ca/Pubs/Pubs.html}.
+}
+
+\examples{
+\dontrun{
+# Get the year 2000 data.
+X <- split(nbfires,"year")
+Y.00 <- X[["2000"]]
+# Plot all of the year 2000 data, marked by fire type.
+plot(Y.00,which.marks="fire.type")
+# Cut back to forest and grass fires.
+Y.00 <- Y.00[marks(Y.00)$fire.type \%in\% c("forest","grass")]
+# Plot the year 2000 forest and grass fires marked by fire duration time.
+stt  <- marks(Y.00)$dis.julian
+fin  <- marks(Y.00)$out.julian
+marks(Y.00) <- cbind(marks(Y.00),dur=fin-stt)
+plot(Y.00,which.marks="dur")
+# Look at just the rectangular subwindow (superimposed on the entire window).
+nbw.mask <- as.mask(Window(nbfires), dimyx=500)
+plot(nbw.mask, col=c("green", "white"))
+plot(Window(nbfires), border="red", add=TRUE)
+plot(Y.00[nbw.rect],use.marks=FALSE,add=TRUE)
+plot(nbw.rect,add=TRUE,border="blue")
+# Look at the K function for the year 2000 forest and grass fires.
+K.00 <- Kest(Y.00)
+plot(K.00)
+}
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/nearest.raster.point.Rd b/man/nearest.raster.point.Rd
new file mode 100644
index 0000000..1c212c0
--- /dev/null
+++ b/man/nearest.raster.point.Rd
@@ -0,0 +1,67 @@
+\name{nearest.raster.point}
+\alias{nearest.raster.point}
+\title{Find Pixel Nearest to a Given Point}
+\description{
+  Given cartesian coordinates, find the nearest pixel.
+}
+\usage{
+ nearest.raster.point(x,y,w, indices=TRUE)
+}
+\arguments{
+  \item{x}{Numeric vector of \eqn{x} coordinates of any points}
+  \item{y}{Numeric vector of \eqn{y} coordinates of any points}
+  \item{w}{An image (object of class \code{"im"})
+    or a binary mask window (an object of class \code{"owin"}
+    of type \code{"mask"}).
+  }
+  \item{indices}{Logical flag indicating whether to return the
+    row and column indices, or the actual \eqn{x,y} coordinates.
+    }
+}
+\value{
+  If \code{indices=TRUE}, a 
+  list containing two vectors \code{rr} and \code{cc}
+  giving row and column positions (in the image matrix).
+  If \code{indices=FALSE}, a list containing
+  vectors \code{x} and \code{y} giving actual coordinates
+  of the pixels.
+}
+\details{
+  The argument \code{w} should be either a pixel image
+  (object of class \code{"im"}) or a window (an object of class
+  \code{"owin"}, see \code{\link{owin.object}} for details)
+  of type \code{"mask"}. 
+
+  The arguments \code{x} and \code{y} should be numeric vectors
+  of equal length. They are interpreted as the coordinates of
+  points in space. For each point \code{(x[i], y[i])}, the function
+  finds the nearest pixel in the grid of pixels for \code{w}.
+
+  If \code{indices=TRUE},
+  this function returns a list containing two vectors \code{rr} and
+  \code{cc} giving row and column positions (in the image matrix).
+  For the location \code{(x[i],y[i])} the nearest
+  pixel is at row \code{rr[i]} and column \code{cc[i]} of
+  the image.
+
+  If \code{indices=FALSE}, the function returns a list containing
+  two vectors \code{x} and \code{y} giving the actual coordinates
+  of the pixels.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{as.mask}}
+}
+\examples{
+  w <- owin(c(0,1), c(0,1), mask=matrix(TRUE, 100,100))  # 100 x 100 grid
+  nearest.raster.point(0.5, 0.3, w)
+  nearest.raster.point(0.5, 0.3, w, indices=FALSE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/nearestsegment.Rd b/man/nearestsegment.Rd
new file mode 100644
index 0000000..c43b33b
--- /dev/null
+++ b/man/nearestsegment.Rd
@@ -0,0 +1,54 @@
+\name{nearestsegment}
+\alias{nearestsegment}
+\title{Find Line Segment Nearest to Each Point}
+\description{
+  Given a point pattern and a line segment pattern,
+  this function finds the nearest line segment for each point.
+}
+\usage{
+nearestsegment(X, Y)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{Y}{A line segment pattern (object of class \code{"psp"}).}
+}
+\details{
+  The distance between a point \code{x} and a straight line segment \code{y} is
+  defined to be the shortest Euclidean distance between \code{x} and any
+  location on \code{y}. This algorithm first calculates the distance
+  from each point of \code{X} to
+  each segment of \code{Y}. Then it determines, for each point \code{x} in
+  \code{X}, which segment of \code{Y} is closest. The index of this
+  segment is returned.
+}
+\value{
+  Integer vector \code{v} (of length equal to the number of points in
+  \code{X}) identifying the nearest segment to each point.
+  If \code{v[i] = j}, then
+  \code{Y[j]} is the line segment lying closest to \code{X[i]}.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{project2segment}} to project each point of \code{X} to
+  a point lying on one of the line segments.
+
+  Use \code{\link{distmap.psp}} to 
+  identify the nearest line segment for each pixel in a grid.
+}
+\examples{
+  X <- runifpoint(3)
+  Y <- as.psp(matrix(runif(20), 5, 4), window=owin())
+  v <- nearestsegment(X,Y)
+  plot(Y)
+  plot(X, add=TRUE)
+  plot(X[1], add=TRUE, col="red")
+  plot(Y[v[1]], add=TRUE, lwd=2, col="red") 
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nestsplit.Rd b/man/nestsplit.Rd
new file mode 100644
index 0000000..f1a8c70
--- /dev/null
+++ b/man/nestsplit.Rd
@@ -0,0 +1,85 @@
+\name{nestsplit}
+\alias{nestsplit}
+\title{
+  Nested Split
+}
+\description{
+  Applies two splitting operations to a point pattern,
+  producing a list of lists of patterns.
+}
+\usage{
+  nestsplit(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Point pattern to be split.
+    Object of class \code{"ppp"}.
+  }
+  \item{\dots}{
+    Data determining the splitting factors or splitting regions.
+    See Details.
+  }
+}
+\details{
+  This function splits the point pattern \code{X} into several
+  sub-patterns using \code{\link{split.ppp}}, then splits each of the
+  sub-patterns into sub-sub-patterns using \code{\link{split.ppp}}
+  again.
+  The result is a hyperframe containing the sub-sub-patterns
+  and two factors indicating the grouping.
+
+  The arguments \code{\dots} determine the two splitting factors
+  or splitting regions. Each argument may be:
+  \itemize{
+    \item a factor (of length equal to the number of points in \code{X})
+    \item the name of a column of marks of \code{X} (provided this
+    column contains factor values)
+    \item a tessellation (class \code{"tess"})
+    \item a pixel image (class \code{"im"}) with factor values
+    \item a window (class \code{"owin"})
+    \item identified by name (in the form \code{name=value})
+    as one of the formal arguments of \code{\link{quadrats}} or
+    \code{\link{tess}}
+  }
+  The arguments will be processed to yield a list of two
+  splitting factors/tessellations. The splits will be applied
+  to \code{X} consecutively to produce the sub-sub-patterns.
+}
+\value{
+  A hyperframe with three columns. The first column contains the
+  sub-sub-patterns. The second and third columns are factors
+  which identify the grouping according to the two splitting factors.
+}
+\author{
+  Original idea by Ute Hahn.
+  Code by 
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{split.ppp}},
+  \code{\link{quantess}}
+}
+\examples{
+  # factor and tessellation
+  Nft <- nestsplit(amacrine, marks(amacrine), quadrats(amacrine, 3, 1))
+  Ntf <- nestsplit(amacrine, quadrats(amacrine, 3, 1), marks(amacrine))
+  Ntf
+
+  # two factors
+  big <- with(marks(betacells), area > 300)
+  Nff <- nestsplit(betacells, "type", factor(big))
+
+  # two tessellations
+  Tx <- quantess(redwood, "x", 4)
+  Td <- dirichlet(runifpoint(5, Window(redwood)))
+  Ntt <- nestsplit(redwood, Td, Tx)
+  Ntt2 <- nestsplit(redwood, Td, ny=3)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/nnclean.Rd b/man/nnclean.Rd
new file mode 100644
index 0000000..2692a4d
--- /dev/null
+++ b/man/nnclean.Rd
@@ -0,0 +1,132 @@
+\name{nnclean}
+\alias{nnclean}
+\alias{nnclean.ppp}
+\alias{nnclean.pp3}
+\title{
+  Nearest Neighbour Clutter Removal
+}
+\description{
+  Detect features in a 2D or 3D spatial point pattern
+  using nearest neighbour clutter removal.
+}
+\usage{
+  nnclean(X, k, ...)
+
+  \method{nnclean}{ppp}(X, k, ..., 
+                     edge.correct = FALSE, wrap = 0.1,
+                     convergence = 0.001, plothist = FALSE,
+                     verbose = TRUE, maxit = 50)
+
+  \method{nnclean}{pp3}(X, k, ..., 
+                     convergence = 0.001, plothist = FALSE,
+                     verbose = TRUE, maxit = 50)
+}
+\arguments{
+  \item{X}{
+    A two-dimensional spatial point pattern (object of class
+    \code{"ppp"}) or a three-dimensional point pattern
+    (object of class \code{"pp3"}).
+  }
+  \item{k}{
+    Degree of neighbour: \code{k=1} means nearest neighbour,
+    \code{k=2} means second nearest, etc.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{hist.default}} to control
+    the appearance of the histogram, if \code{plothist=TRUE}.
+  }
+  \item{edge.correct}{
+    Logical flag specifying whether periodic edge correction
+    should be performed (only implemented in 2 dimensions).
+  }
+  \item{wrap}{
+    Numeric value specifying the relative size of the margin
+    in which data will be replicated for the 
+    periodic edge correction (if \code{edge.correct=TRUE}).
+    A fraction of window width and window height.
+  }
+  \item{convergence}{
+    Relative tolerance threshold for testing convergence of EM algorithm.
+  }
+  \item{maxit}{
+    Maximum number of iterations for EM algorithm.
+  }
+  \item{plothist}{
+    Logical flag specifying whether to plot a diagnostic histogram
+    of the nearest neighbour distances and the fitted distribution.
+  }
+  \item{verbose}{
+    Logical flag specifying whether to print progress reports.
+  }
+}
+\details{
+  Byers and Raftery (1998) developed a technique for recognising
+  features in a spatial point pattern in the presence of
+  random clutter.
+
+  For each point in the pattern, the distance to the
+  \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is
+  used to fit a mixture distribution to the
+  \eqn{k}th nearest neighbour distances.
+  The mixture components represent the feature and the clutter. The
+  mixture model can be used to classify each point as belong to one
+  or other component.
+
+  The function \code{nnclean} is generic, with methods for
+  two-dimensional point patterns (class \code{"ppp"})
+  and three-dimensional point patterns (class \code{"pp3"})
+  currently implemented.
+
+  The result is a point pattern (2D or 3D) with two additional
+  columns of marks:
+  \describe{
+    \item{class}{
+      A factor, with levels \code{"noise"} and \code{"feature"},
+      indicating the maximum likelihood classification of each point.
+    }
+    \item{prob}{
+      Numeric vector giving the estimated probabilities
+      that each point belongs to a feature.
+    }
+  }
+  The object also has extra information stored in attributes:
+  \code{"theta"} contains the fitted parameters
+  of the mixture model, \code{"info"} contains
+  information about the fitting procedure, and \code{"hist"} contains
+  the histogram structure returned from \code{\link{hist.default}}
+  if \code{plothist = TRUE}.
+}
+\value{
+  An object of the same kind as \code{X},
+  obtained by attaching marks to the points of \code{X}.
+
+  The object also has attributes, as described under Details.
+}
+\references{
+  Byers, S. and Raftery, A.E. (1998)
+  Nearest-neighbour clutter removal for estimating features
+  in spatial point processes.
+  \emph{Journal of the American Statistical Association}
+  \bold{93}, 577--584.
+}
+\author{
+  Original by Simon Byers and Adrian Raftery.
+  Adapted for \pkg{spatstat} by \adrian.
+}
+\seealso{
+  \code{\link{nndist}}, 
+  \code{\link{split.ppp}}, 
+  \code{\link{cut.ppp}}
+}
+\examples{
+  data(shapley)
+  X <- nnclean(shapley, k=17, plothist=TRUE)
+  plot(X, which.marks=1, chars=c(".", "+"), cols=1:2)
+  plot(X, which.marks=2, cols=function(x)hsv(0.2+0.8*(1-x),1,1))
+  Y <- split(X, un=TRUE)
+  plot(Y, chars="+", cex=0.5)
+  marks(X) <- marks(X)$prob
+  plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3)
+}
+\keyword{spatial}
+\keyword{classif}
diff --git a/man/nncorr.Rd b/man/nncorr.Rd
new file mode 100644
index 0000000..620553f
--- /dev/null
+++ b/man/nncorr.Rd
@@ -0,0 +1,212 @@
+\name{nncorr}
+\alias{nncorr}
+\alias{nnmean}
+\alias{nnvario}
+\title{Nearest-Neighbour Correlation Indices of Marked Point Pattern}
+\description{
+  Computes nearest-neighbour correlation indices of a marked point
+  pattern, including the nearest-neighbour mark product index
+  (default case of \code{nncorr}),
+  the nearest-neighbour mark index (\code{nnmean}),
+  and the nearest-neighbour variogram index (\code{nnvario}).
+}
+\usage{
+     nncorr(X,
+            f = function(m1, m2) { m1 * m2 },
+            k = 1,       
+            \dots,
+            use = "all.obs", method = c("pearson", "kendall", "spearman"),
+            denominator=NULL)
+     nnmean(X, k=1)
+     nnvario(X, k=1)
+}
+\arguments{
+  \item{X}{
+    The observed point pattern.
+    An object of class \code{"ppp"}.
+  }
+  \item{f}{
+    Function \eqn{f} used in the definition of the
+    nearest neighbour correlation. There is a sensible default
+    that depends on the type of marks of \code{X}.
+  }
+  \item{k}{
+    Integer. The \code{k}-th nearest neighbour of each point will be used.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{f}.
+  }
+  \item{use,method}{
+    Arguments passed to the standard correlation function \code{\link{cor}}.
+  }
+  \item{denominator}{
+    Internal use only.
+  }
+}
+\details{
+  The nearest neighbour correlation index \eqn{\bar n_f}{nbar}
+  of a marked point process \eqn{X}
+  is a number measuring the dependence between the mark of a typical point
+  and the mark of its nearest neighbour. 
+
+  The command \code{nncorr} computes the nearest neighbour correlation index
+  based on any test function \code{f} provided by the user.
+  The default behaviour of \code{nncorr} is to compute the
+  nearest neighbour mark product index.
+  The commands \code{nnmean} and \code{nnvario} are
+  convenient abbreviations for other special choices of \code{f}.
+
+  In the default case, \code{nncorr(X)} computes three different
+  versions of the nearest-neighbour correlation index:
+  the unnormalised, normalised, and classical correlations.
+  \describe{
+    \item{unnormalised:}{
+      The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan,
+      1994, section 14.7) is defined as
+      \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]}
+      where \eqn{E[]} denotes mean value,
+      \eqn{M} is the mark attached to a
+      typical point of the point process, and \eqn{M^\ast}{M*} is the mark
+      attached to its nearest neighbour (i.e. the nearest other point of the
+      point process).
+  
+      Here \eqn{f} is any function
+      \eqn{f(m_1,m_2)}{f(m1,m2)}
+      with two arguments which are possible marks of the pattern,
+      and which returns a nonnegative real value.
+      Common choices of \eqn{f} are:
+      for continuous real-valued marks,
+      \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2}
+      for discrete marks (multitype point patterns),
+      \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)}
+      and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)},
+      \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).}
+      For example, in the second case, the unnormalised nearest neighbour
+      correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of
+      points in the pattern which have the same mark as their nearest
+      neighbour.
+
+      Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation''
+      in the usual statistical sense. It can take values greater than 1.
+    }
+    \item{normalised:}{
+      We can define a \bold{normalised} nearest neighbour correlation
+      by 
+      \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]}
+      where again \eqn{M} is the
+      mark attached to a typical point, \eqn{M^\ast}{M*} is the mark
+      attached to its nearest neighbour, and \eqn{M'} is an independent
+      copy of \eqn{M} with the same distribution.
+      This normalisation is also not a ``correlation''
+      in the usual statistical sense, but is normalised so that 
+      the value 1 suggests ``lack of correlation'':
+      if the marks attached to the points of \code{X} are independent
+      and identically distributed, then
+      \eqn{\bar m_f = 1}{mbar[f] =  1}.
+      The interpretation of values larger or smaller than 1 depends
+      on the choice of function \eqn{f}.
+    }
+    \item{classical:}{
+      Finally if the marks of \code{X} are real numbers,
+      we can also compute the
+      \bold{classical} correlation, that is, the correlation coefficient
+      of the two random variables \eqn{M} and \eqn{M^\ast}{M*}.
+      The classical correlation has a value between \eqn{-1} and \eqn{1}.
+      Values close to \eqn{-1} or \eqn{1} indicate strong dependence between
+      the marks.
+    }
+  }
+
+  In the default case where \code{f} is not given,
+  \code{nncorr(X)} computes
+  \itemize{
+    \item
+    If the marks of \code{X} are real numbers, 
+    the unnormalised and normalised
+    versions of the nearest-neighbour product index
+    \eqn{E[M \, M^\ast]}{E[M * M*]},
+    and the classical correlation
+    between \eqn{M} and \eqn{M^\ast}{M*}.
+    \item
+    If the marks of \code{X} are factor valued,
+    the unnormalised and normalised
+    versions of the nearest-neighbour equality index
+    \eqn{P[M = M^\ast]}{P[M = M*]}.
+  }
+
+  The wrapper functions \code{nnmean} and \code{nnvario}
+  compute the correlation indices for two special choices of the
+  function \eqn{f(m_1,m_2)}{f(m1,m2)}.
+  \itemize{
+    \item
+    \code{nnmean} computes the correlation indices for 
+    \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index
+    is simply the mean value of the mark of the neighbour of a typical point,
+    \eqn{E[M^\ast]}{E[M*]}, while the normalised index is
+    \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the
+    neighbour of a typical point to the mean mark of a typical point.
+    \item 
+    \code{nnvario} computes the correlation indices for 
+    \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}.
+  }
+
+  The argument \code{X} must be a point pattern (object of class
+  \code{"ppp"}) and must be a marked point pattern.
+  (The marks may be a data frame, containing several columns of mark variables;
+  each column is treated separately.)
+
+  If the argument \code{f} is given, it
+  must be a function, accepting two arguments \code{m1}
+  and \code{m2} which are vectors of equal length containing mark
+  values (of the same type as the marks of \code{X}).
+  It must return a vector of numeric
+  values of the same length as \code{m1} and \code{m2}.
+  The values must be non-negative.
+
+  The arguments \code{use} and \code{method} control
+  the calculation of the classical correlation using \code{\link{cor}},
+  as explained in the help file for \code{\link{cor}}.
+
+  Other arguments may be passed to \code{f} through the \code{...}
+  argument.
+  
+  This algorithm assumes that \code{X} can be treated
+  as a realisation of a stationary (spatially homogeneous) 
+  random spatial point process in the plane, observed through
+  a bounded window.
+  The window (which is specified in \code{X} as \code{Window(X)})
+  may have arbitrary shape.
+  Biases due to edge effects are
+  treated using the \sQuote{border method} edge correction.
+}
+\value{
+  Labelled vector of length 2 or 3
+  containing the unnormalised and normalised
+  nearest neighbour correlations, and the classical correlation
+  if appropriate.
+  Alternatively a matrix with 2 or 3 rows, containing this information
+  for each mark variable.
+}
+\examples{
+  data(finpines)
+  nncorr(finpines)
+  # heights of neighbouring trees are slightly negatively correlated
+
+  data(amacrine)
+  nncorr(amacrine)
+  # neighbouring cells are usually of different type
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/nncross.Rd b/man/nncross.Rd
new file mode 100644
index 0000000..bda3298
--- /dev/null
+++ b/man/nncross.Rd
@@ -0,0 +1,194 @@
+\name{nncross}
+\alias{nncross}
+\alias{nncross.ppp}
+\alias{nncross.default}
+\title{Nearest Neighbours Between Two Patterns}
+\description{
+  Given two point patterns \code{X} and \code{Y},
+  finds the nearest neighbour in \code{Y} of each point of \code{X}.
+  Alternatively \code{Y} may be a line segment pattern.
+}
+\usage{
+  nncross(X, Y, \dots)
+
+  \method{nncross}{ppp}(X, Y,
+          iX=NULL, iY=NULL,
+          what = c("dist", "which"),
+          \dots,
+          k = 1,
+          sortby=c("range", "var", "x", "y"),
+          is.sorted.X = FALSE,
+          is.sorted.Y = FALSE)
+
+  \method{nncross}{default}(X, Y, \dots)
+}
+\arguments{
+  \item{X}{Point pattern (object of class \code{"ppp"}).}
+  \item{Y}{Either a point pattern (object of class \code{"ppp"})
+    or a line segment pattern (object of class \code{"psp"}).}
+  \item{iX, iY}{Optional identifiers, applicable only in the case where
+    \code{Y} is a point pattern, used to determine whether a point in
+    \code{X} is identical to a point in \code{Y}. See Details.
+  }
+  \item{what}{
+    Character string specifying what information should be returned.
+    Either the nearest neighbour distance (\code{"dist"}),
+    the identifier of the nearest neighbour (\code{"which"}),
+    or both.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour.
+  }
+  \item{sortby}{
+    Determines which coordinate to use to sort the point patterns.
+    See Details.
+  }
+  \item{is.sorted.X, is.sorted.Y}{
+    Logical values attesting whether the point patterns \code{X} and
+    \code{Y} have been sorted. See Details.
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  Given two point patterns \code{X} and \code{Y} this
+  function finds, for each point of \code{X}, 
+  the nearest point of \code{Y}. The distance between these points
+  is also computed.
+  If the argument \code{k} is specified, then the \code{k}-th nearest
+  neighbours will be found.
+
+  Alternatively if \code{X} is a point pattern and \code{Y} is a line
+  segment pattern, the function finds the nearest line segment to each point
+  of \code{X}, and computes the distance.
+
+  The return value is a data frame, with rows corresponding to
+  the points of \code{X}.  The first column gives the nearest neighbour
+  distances (i.e. the \code{i}th entry is the distance 
+  from the \code{i}th point of \code{X} to the nearest element of
+  \code{Y}). The second column gives the indices of the nearest
+  neighbours (i.e.\ the \code{i}th entry is the index of
+  the nearest element in \code{Y}.)
+  If \code{what="dist"} then only the vector of distances is returned.
+  If \code{what="which"} then only the vector of indices is returned.
+
+  The argument \code{k} may be an integer or an integer vector.
+  If it is a single integer, then the \code{k}-th nearest neighbours
+  are computed. If it is a vector, then the \code{k[i]}-th nearest
+  neighbours are computed for each entry \code{k[i]}. For example, setting
+  \code{k=1:3} will compute the nearest, second-nearest and
+  third-nearest neighbours. The result is a data frame.
+
+  Note that this function is not symmetric in \code{X} and \code{Y}.
+  To find the nearest neighbour in \code{X} of each point in \code{Y},
+  where \code{Y} is a point pattern, use \code{nncross(Y,X)}.
+
+  The arguments \code{iX} and \code{iY} are used when
+  the two point patterns \code{X} and \code{Y} have some points in
+  common.  In this situation \code{nncross(X, Y)} would return some zero
+  distances. To avoid this, attach a unique integer identifier to
+  each point, such that two points are identical if their
+  identifying numbers are equal. Let \code{iX} be the vector of
+  identifier values for the points in \code{X}, and \code{iY}
+  the vector of identifiers for points in \code{Y}. Then the code
+  will only compare two points if they have different values of the
+  identifier. See the Examples.
+}
+\section{Sorting data and pre-sorted data}{
+  Read this section if you care about the speed of computation.
+  
+  For efficiency, the algorithm sorts the point patterns \code{X} and \code{Y}
+  into increasing order of the \eqn{x} coordinate
+  or increasing order of the the \eqn{y} coordinate.
+  Sorting is only an intermediate step;
+  it does not affect the output, which is always given in the same
+  order as the original data.
+  
+  By default (if \code{sortby="range"}),
+  the sorting will occur on the coordinate that has the larger range of
+  values (according to the frame of the enclosing window of \code{Y}).
+  If \code{sortby = "var"}), sorting will occur on the coordinate that
+  has the greater variance (in the pattern \code{Y}).
+  Setting \code{sortby="x"} or \code{sortby = "y"} will specify that
+  sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively.
+
+  If the point pattern \code{X} is already
+  sorted, then the corresponding argument \code{is.sorted.X}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"} or \code{"y"} to indicate which coordinate
+  is sorted.
+
+  Similarly if \code{Y} is already sorted, then \code{is.sorted.Y}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"} or \code{"y"} to indicate which coordinate
+  is sorted.
+
+  If both \code{X} and \code{Y} are sorted \emph{on the same coordinate
+  axis} then both \code{is.sorted.X} and \code{is.sorted.Y}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"} or \code{"y"} to indicate which coordinate
+  is sorted.  
+}
+\value{
+  A data frame, or a vector if the data frame would contain only one column.
+  
+  By default (if \code{what=c("dist", "which")} and \code{k=1})
+  a data frame with two columns:
+  \item{dist}{Nearest neighbour distance}
+  \item{which}{Nearest neighbour index in \code{Y}}
+
+  If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances.
+
+  If \code{what="which"} and \code{k=1}, a vector of nearest neighbour
+  indices.
+
+  If \code{k} is specified, the result is a data frame with
+  columns containing the \code{k}-th nearest neighbour distances
+  and/or nearest neighbour indices.
+}
+\seealso{
+  \code{\link{nndist}} for nearest neighbour
+  distances in a single point pattern.
+}
+\examples{
+  # two different point patterns
+  X <- runifpoint(15)
+  Y <- runifpoint(20)
+  N <- nncross(X,Y)$which
+  # note that length(N) = 15
+  plot(superimpose(X=X,Y=Y), main="nncross", cols=c("red","blue"))
+  arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15)
+
+  # third-nearest neighbour
+  NXY <- nncross(X, Y, k=3)
+  NXY[1:3,]
+  # second and third nearest neighbours
+  NXY <- nncross(X, Y, k=2:3)
+  NXY[1:3,]
+
+  # two patterns with some points in common
+  Z <- runifpoint(50)
+  X <- Z[1:30]
+  Y <- Z[20:50]
+  iX <- 1:30
+  iY <- 20:50
+  N <- nncross(X,Y, iX, iY)$which
+  N <- nncross(X,Y, iX, iY, what="which") #faster
+  plot(superimpose(X=X, Y=Y), main="nncross", cols=c("red","blue"))
+  arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15)
+
+  # point pattern and line segment pattern
+  X <- runifpoint(15)
+  Y <- rpoisline(10)
+  N <- nncross(X,Y)
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf
+  ,
+  and Jens Oehlschlaegel
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nncross.lpp.Rd b/man/nncross.lpp.Rd
new file mode 100644
index 0000000..414e975
--- /dev/null
+++ b/man/nncross.lpp.Rd
@@ -0,0 +1,127 @@
+\name{nncross.lpp}
+\alias{nncross.lpp}
+\title{Nearest Neighbours on a Linear Network}
+\description{
+  Given two point patterns \code{X} and \code{Y} on a linear network,
+  finds the nearest neighbour in \code{Y} of each point of \code{X}
+  using the shortest path in the network.
+}
+\usage{
+  \method{nncross}{lpp}(X, Y,
+          iX=NULL, iY=NULL,
+          what = c("dist", "which"),
+          \dots,
+          k = 1,
+          method="C")
+}
+\arguments{
+  \item{X,Y}{
+    Point patterns on a linear network (objects of class \code{"lpp"}).
+    They must lie on the \emph{same} linear network.
+  }
+  \item{iX, iY}{
+    Optional identifiers, used to determine whether a point in
+    \code{X} is identical to a point in \code{Y}. See Details.
+  }
+  \item{what}{
+    Character string specifying what information should be returned.
+    Either the nearest neighbour distance (\code{"dist"}),
+    the identifier of the nearest neighbour (\code{"which"}),
+    or both.
+  }
+  \item{\dots}{Ignored.}
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour, for each value of \code{k}.
+  }
+  \item{method}{
+    Internal use only.
+  }
+}
+\details{
+  Given two point patterns \code{X} and \code{Y} on the same linear
+  network, this function finds, for each point of \code{X}, 
+  the nearest point of \code{Y}, measuring distance by the shortest path
+  in the network. The distance between these points
+  is also computed.
+
+  The return value is a data frame, with rows corresponding to
+  the points of \code{X}.  The first column gives the nearest neighbour
+  distances (i.e. the \code{i}th entry is the distance 
+  from the \code{i}th point of \code{X} to the nearest element of
+  \code{Y}). The second column gives the indices of the nearest
+  neighbours (i.e.\ the \code{i}th entry is the index of
+  the nearest element in \code{Y}.)
+  If \code{what="dist"} then only the vector of distances is returned.
+  If \code{what="which"} then only the vector of indices is returned.
+
+  Note that this function is not symmetric in \code{X} and \code{Y}.
+  To find the nearest neighbour in \code{X} of each point in \code{Y},
+  use \code{nncross(Y,X)}.
+
+  The arguments \code{iX} and \code{iY} are used when
+  the two point patterns \code{X} and \code{Y} have some points in
+  common.  In this situation \code{nncross(X, Y)} would return some zero
+  distances. To avoid this, attach a unique integer identifier to
+  each point, such that two points are identical if their
+  identifying numbers are equal. Let \code{iX} be the vector of
+  identifier values for the points in \code{X}, and \code{iY}
+  the vector of identifiers for points in \code{Y}. Then the code
+  will only compare two points if they have different values of the
+  identifier. See the Examples.
+
+  The \code{k}th nearest neighbour may be undefined, for example
+  if there are fewer than \code{k+1} points in the dataset, or if
+  the linear network is not connected.
+  In this case, the \code{k}th nearest neighbour distance is infinite.
+}
+\value{
+  By default (if \code{what=c("dist", "which")} and \code{k=1})
+  a data frame with two columns:
+  \item{dist}{Nearest neighbour distance}
+  \item{which}{Nearest neighbour index in \code{Y}}
+
+  If \code{what="dist"}, a vector of nearest neighbour distances.
+
+  If \code{what="which"}, a vector of nearest neighbour indices.
+
+  If \code{k} is a vector of integers, the result is a matrix
+  with one row for each point in \code{X},
+  giving the distances and/or indices of the \code{k}th nearest
+  neighbours in \code{Y}.
+}
+\seealso{
+  \code{\link{nndist.lpp}} for nearest neighbour
+  distances in a single point pattern.
+
+  \code{\link{nnwhich.lpp}} to identify which points are nearest
+  neighbours in a single point pattern.
+}
+\examples{
+  # two different point patterns
+  X <- runiflpp(3, simplenet)
+  Y <- runiflpp(5, simplenet)
+  nn <- nncross(X,Y)
+  nn
+  plot(simplenet, main="nncross")
+  plot(X, add=TRUE, cols="red")
+  plot(Y, add=TRUE, cols="blue", pch=16)
+  XX <- as.ppp(X)
+  YY <- as.ppp(Y)
+  i <- nn$which
+  arrows(XX$x, XX$y, YY[i]$x, YY[i]$y, length=0.15)
+
+  # nearest and second-nearest neighbours
+  nncross(X, Y, k=1:2)
+
+  # two patterns with some points in common
+  X <- Y[1:2]
+  iX <- 1:2
+  iY <- 1:5
+  nncross(X,Y, iX, iY)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nncross.pp3.Rd b/man/nncross.pp3.Rd
new file mode 100644
index 0000000..85dbf4a
--- /dev/null
+++ b/man/nncross.pp3.Rd
@@ -0,0 +1,174 @@
+\name{nncross.pp3}
+\alias{nncross.pp3}
+\title{Nearest Neighbours Between Two Patterns in 3D}
+\description{
+  Given two point patterns \code{X} and \code{Y} in three dimensions,
+  finds the nearest neighbour in \code{Y} of each point of \code{X}.
+}
+\usage{
+  \method{nncross}{pp3}(X, Y,
+          iX=NULL, iY=NULL,
+          what = c("dist", "which"),
+          \dots,
+          k = 1,
+          sortby=c("range", "var", "x", "y", "z"),
+          is.sorted.X = FALSE,
+          is.sorted.Y = FALSE)
+}
+\arguments{
+  \item{X,Y}{Point patterns in three dimensions
+    (objects of class \code{"pp3"}).}
+  \item{iX, iY}{Optional identifiers, 
+    used to determine whether a point in
+    \code{X} is identical to a point in \code{Y}. See Details.
+  }
+  \item{what}{
+    Character string specifying what information should be returned.
+    Either the nearest neighbour distance (\code{"dist"}),
+    the identifier of the nearest neighbour (\code{"which"}),
+    or both.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour.
+  }
+  \item{sortby}{
+    Determines which coordinate to use to sort the point patterns.
+    See Details.
+  }
+  \item{is.sorted.X, is.sorted.Y}{
+    Logical values attesting whether the point patterns \code{X} and
+    \code{Y} have been sorted. See Details.
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  Given two point patterns \code{X} and \code{Y} in three dimensions,
+  this function finds, for each point of \code{X}, 
+  the nearest point of \code{Y}. The distance between these points
+  is also computed.
+  If the argument \code{k} is specified, then the \code{k}-th nearest
+  neighbours will be found.
+
+  The return value is a data frame, with rows corresponding to
+  the points of \code{X}.  The first column gives the nearest neighbour
+  distances (i.e. the \code{i}th entry is the distance 
+  from the \code{i}th point of \code{X} to the nearest element of
+  \code{Y}). The second column gives the indices of the nearest
+  neighbours (i.e.\ the \code{i}th entry is the index of
+  the nearest element in \code{Y}.)
+  If \code{what="dist"} then only the vector of distances is returned.
+  If \code{what="which"} then only the vector of indices is returned.
+
+  The argument \code{k} may be an integer or an integer vector.
+  If it is a single integer, then the \code{k}-th nearest neighbours
+  are computed. If it is a vector, then the \code{k[i]}-th nearest
+  neighbours are computed for each entry \code{k[i]}. For example, setting
+  \code{k=1:3} will compute the nearest, second-nearest and
+  third-nearest neighbours. The result is a data frame.
+
+  Note that this function is not symmetric in \code{X} and \code{Y}.
+  To find the nearest neighbour in \code{X} of each point in \code{Y},
+  use \code{nncross(Y,X)}.
+
+  The arguments \code{iX} and \code{iY} are used when
+  the two point patterns \code{X} and \code{Y} have some points in
+  common.  In this situation \code{nncross(X, Y)} would return some zero
+  distances. To avoid this, attach a unique integer identifier to
+  each point, such that two points are identical if their
+  identifying numbers are equal. Let \code{iX} be the vector of
+  identifier values for the points in \code{X}, and \code{iY}
+  the vector of identifiers for points in \code{Y}. Then the code
+  will only compare two points if they have different values of the
+  identifier. See the Examples.
+}
+\section{Sorting data and pre-sorted data}{
+  Read this section if you care about the speed of computation.
+  
+  For efficiency, the algorithm sorts both
+  the point patterns \code{X} and \code{Y}
+  into increasing order of the \eqn{x} coordinate,
+  or both into increasing order of the \eqn{y} coordinate,
+  or both into increasing order of the \eqn{z} coordinate.
+  Sorting is only an intermediate step;
+  it does not affect the output, which is always given in the same
+  order as the original data.
+  
+  By default (if \code{sortby="range"}),
+  the sorting will occur on the coordinate that has the largest range of
+  values (according to the frame of the enclosing window of \code{Y}).
+  If \code{sortby = "var"}), sorting will occur on the coordinate that
+  has the greater variance (in the pattern \code{Y}).
+  Setting \code{sortby="x"} or \code{sortby = "y"}
+  or \code{sortby = "z"} will specify that
+  sorting should occur on the \eqn{x}, \eqn{y} or \eqn{z} coordinate,
+  respectively.
+
+  If the point pattern \code{X} is already
+  sorted, then the corresponding argument \code{is.sorted.X}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate
+  is sorted.
+
+  Similarly if \code{Y} is already sorted, then \code{is.sorted.Y}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate
+  is sorted.
+
+  If both \code{X} and \code{Y} are sorted \emph{on the same coordinate
+  axis} then both \code{is.sorted.X} and \code{is.sorted.Y}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate
+  is sorted.  
+}
+\value{
+  A data frame, or a vector if the data frame would contain only one column.
+  
+  By default (if \code{what=c("dist", "which")} and \code{k=1})
+  a data frame with two columns:
+  \item{dist}{Nearest neighbour distance}
+  \item{which}{Nearest neighbour index in \code{Y}}
+
+  If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances.
+
+  If \code{what="which"} and \code{k=1}, a vector of nearest neighbour
+  indices.
+
+  If \code{k} is specified, the result is a data frame with
+  columns containing the \code{k}-th nearest neighbour distances
+  and/or nearest neighbour indices.
+}
+\seealso{
+  \code{\link{nndist}} for nearest neighbour
+  distances in a single point pattern.
+}
+\examples{
+  # two different point patterns
+  X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)))
+  Y <- pp3(runif(20), runif(20), runif(20), box3(c(0,1)))
+  N <- nncross(X,Y)$which
+  N <- nncross(X,Y, what="which") #faster
+  # note that length(N) = 10
+
+  # k-nearest neighbours
+  N3 <- nncross(X, Y, k=1:3)
+
+  # two patterns with some points in common
+  Z <- pp3(runif(20), runif(20), runif(20), box3(c(0,1)))
+  X <- Z[1:15]
+  Y <- Z[10:20]
+  iX <- 1:15
+  iY <- 10:20
+  N <- nncross(X,Y, iX, iY, what="which")
+
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf
+  ,
+  and Jens Oehlschlaegel
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nndensity.Rd b/man/nndensity.Rd
new file mode 100644
index 0000000..eeeda19
--- /dev/null
+++ b/man/nndensity.Rd
@@ -0,0 +1,90 @@
+\name{nndensity.ppp}
+\alias{nndensity}
+\alias{nndensity.ppp}
+\title{
+  Estimate Intensity of Point Pattern Using Nearest Neighbour Distances
+}
+\description{
+  Estimates the intensity of a point pattern
+  using the distance from each spatial location
+  to the \code{k}th nearest data point.
+}
+\usage{
+nndensity(x, ...)
+
+\method{nndensity}{ppp}(x, k, ..., verbose = TRUE)
+}
+\arguments{
+  \item{x}{
+    A point pattern (object of class \code{"ppp"})
+    or some other spatial object.
+  }
+  \item{k}{
+    Integer. The distance to the \code{k}th nearest data point
+    will be computed. There is a sensible default.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{nnmap}} and \code{\link{as.mask}}
+    controlling the pixel resolution.
+  }
+  \item{verbose}{
+    Logical. If \code{TRUE}, print the value of \code{k}
+    when it is automatically selected. If \code{FALSE}, remain silent.
+  }
+}
+\details{
+  This function computes a quick estimate of the intensity of the point
+  process that generated the point pattern \code{x}.
+
+  For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s}
+  to the \eqn{k}-th nearest point in the dataset \code{x}.
+  If the data came from a homogeneous
+  Poisson process with intensity \eqn{\lambda}{lambda},
+  then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a 
+  negative exponential distribution with mean
+  \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of
+  \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}.
+  This is the estimate computed by \code{nndensity},
+  apart from an edge effect correction.
+
+  This estimator of intensity is relatively fast to compute, 
+  and is spatially adaptive
+  (so that it can handle wide variation in the intensity
+  function). However, it implicitly assumes the points are independent,
+  so it does not perform well if the pattern is strongly clustered
+  or strongly inhibited.
+
+  The value of \code{k} should be greater than 1 in order to avoid
+  infinite peaks in the intensity estimate around each data point.
+  The default value of \code{k} is the square root of the number of
+  points in \code{x}, which seems to work well in many cases.
+  
+  The window of \code{x} is digitised using \code{\link{as.mask}}
+  and the values \eqn{d(s)} are computed using \code{\link{nnmap}}.
+  To control the pixel resolution, see \code{\link{as.mask}}.
+}
+\value{
+  A pixel image (object of class \code{"im"}) giving the
+  estimated intensity of the point process at each spatial location.
+  Pixel values are intensities (number of points per unit area).
+}
+\references{
+  NEED REFERENCES. TRY CRESSIE 
+}
+\seealso{
+ \code{\link{density.ppp}},
+ \code{\link{intensity}}
+ for alternative estimates of point process intensity.
+}
+\examples{
+   plot(nndensity(swedishpines))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/nndist.Rd b/man/nndist.Rd
new file mode 100644
index 0000000..45b88ae
--- /dev/null
+++ b/man/nndist.Rd
@@ -0,0 +1,186 @@
+\name{nndist}
+\alias{nndist}
+\alias{nndist.ppp}
+\alias{nndist.default}
+\title{Nearest neighbour distances}
+\description{
+  Computes the distance from each point to its nearest neighbour
+  in a point pattern. Alternatively computes the distance to the
+  second nearest neighbour, or third nearest, etc.
+}
+\usage{
+  nndist(X, \dots)
+  \method{nndist}{ppp}(X, \dots, k=1, by=NULL, method="C")
+  \method{nndist}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C")
+}
+\arguments{
+  \item{X,Y}{
+    Arguments specifying the locations of
+    a set of points.
+    For \code{nndist.ppp}, the argument \code{X} should be a point
+    pattern (object of class \code{"ppp"}).
+    For \code{nndist.default}, typically \code{X} and \code{Y} would be
+    numeric vectors of equal length. Alternatively \code{Y} may be
+    omitted and \code{X} may be
+    a list with two components \code{x} and \code{y},
+    or a matrix with two columns.
+  }
+  \item{\dots}{
+    Ignored by \code{nndist.ppp}
+    and \code{nndist.default}.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+  \item{by}{
+    Optional. A factor, which separates \code{X} into groups.
+    The algorithm will compute the distance to
+    the nearest point in each group. 
+  }
+  \item{method}{String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+  }
+}
+\value{
+  Numeric vector or matrix containing the 
+  nearest neighbour distances for each point.
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  nearest neighbour distance for the \code{i}th data point.
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  \code{k}th nearest neighbour distance for the
+  \code{i}th data point.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  \code{k[j]}th nearest neighbour distance for the
+  \code{i}th data point.
+
+  If the argument \code{by} is given, then the result is a data frame
+  containing the distances described above, from each point of \code{X},
+  to the nearest point in each subset of \code{X}
+  defined by the factor \code{by}.
+}
+\details{
+  This function computes the Euclidean distance from each point
+  in a point pattern to its nearest neighbour (the nearest other
+  point of the pattern). If \code{k} is specified, it computes the
+  distance to the \code{k}th nearest neighbour.
+
+  The function \code{nndist} is generic, with
+  a method for point patterns (objects of class \code{"ppp"}),
+  and a default method for coordinate vectors.
+  There is also a method for line segment patterns, \code{\link{nndist.psp}}.
+
+  The method for point patterns expects a single
+  point pattern argument \code{X} and returns the vector of its
+  nearest neighbour distances.
+
+  The default method expects that \code{X} and \code{Y} will determine
+  the coordinates of a set of points. Typically \code{X} and
+  \code{Y} would be numeric vectors of equal length. Alternatively
+  \code{Y} may be omitted and \code{X} may be a list with two components
+  named \code{x} and \code{y}, or a matrix or data frame with two columns.
+  
+  The argument \code{k} may be a single integer, or an integer vector.
+  If it is a vector, then the \eqn{k}th nearest neighbour distances are
+  computed for each value of \eqn{k} specified in the vector.
+
+  If the argument \code{by} is given, it should be a \code{factor},
+  of length equal to the number of points in \code{X}.
+  This factor effectively partitions \code{X} into subsets,
+  each subset associated with one of the levels of \code{X}.
+  The algorithm will then compute, for each point of \code{X},
+  the distance to the nearest neighbour \emph{in each subset}.
+
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. 
+  The C code is faster by two to three orders of magnitude
+  and uses much less memory.
+
+  If there is only one point (if \code{x} has length 1),
+  then a nearest neighbour distance of \code{Inf} is returned.
+  If there are no points (if \code{x} has length zero)
+  a numeric vector of length zero is returned.
+
+  To identify \emph{which} point is the nearest neighbour of a given point,
+  use \code{\link{nnwhich}}.
+
+  To use the nearest neighbour distances for statistical inference,
+  it is often advisable to use the edge-corrected empirical distribution,
+  computed by \code{\link{Gest}}.
+
+  To find the nearest neighbour distances from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+}
+\section{Nearest neighbours of each type}{
+  If \code{X} is a multitype point pattern 
+  and \code{by=marks(X)}, then the algorithm will compute,
+  for each point of \code{X}, the distance to the nearest neighbour
+  of each type. See the Examples.
+
+  To find the minimum distance from \emph{any} point of type \code{i}
+  to the nearest point of type \code{j}, for all combinations of \code{i} and
+  \code{j}, use the \R function \code{\link[stats]{aggregate}} as
+  suggested in the Examples.
+}
+\section{Warnings}{
+  An infinite or \code{NA} value is returned if the
+  distance is not defined (e.g. if there is only one point
+  in the point pattern).
+}
+\seealso{
+  \code{\link{nndist.psp}},
+  \code{\link{pairdist}},
+  \code{\link{Gest}},
+  \code{\link{nnwhich}},
+  \code{\link{nncross}}.
+}
+\examples{
+   data(cells)
+   # nearest neighbours
+   d <- nndist(cells)
+
+   # second nearest neighbours
+   d2 <- nndist(cells, k=2)
+
+   # first, second and third nearest
+   d1to3 <- nndist(cells, k=1:3)
+
+   x <- runif(100)
+   y <- runif(100)
+   d <- nndist(x, y)
+
+   # Stienen diagram
+   plot(cells \%mark\% (nndist(cells)/2), markscale=1)
+
+   # distance to nearest neighbour of each type
+   nnda <- nndist(ants, by=marks(ants)) 
+   head(nnda)
+   # For nest number 1, the nearest Cataglyphis nest is 87.32125 units away
+
+   # Use of 'aggregate':
+   # minimum distance between each pair of types
+   aggregate(nnda, by=list(from=marks(ants)), min)
+   # Always a symmetric matrix
+
+   # mean nearest neighbour distances
+   aggregate(nnda, by=list(from=marks(ants)), mean)
+   # The mean distance from a Messor nest to
+   # the nearest Cataglyphis nest is 59.02549 units
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and
+  \adrian.
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nndist.lpp.Rd b/man/nndist.lpp.Rd
new file mode 100644
index 0000000..8821331
--- /dev/null
+++ b/man/nndist.lpp.Rd
@@ -0,0 +1,63 @@
+\name{nndist.lpp}
+\alias{nndist.lpp}
+\title{
+  Nearest neighbour distances on a linear network
+}
+\description{
+  Given a pattern of points on a linear network, compute the
+  nearest-neighbour distances, measured
+  by the shortest path in the network.
+}
+\usage{
+\method{nndist}{lpp}(X, ..., k=1, method="C")
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{method}{
+    Optional string determining the method of calculation.
+    Either \code{"interpreted"} or \code{"C"}.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  Given a pattern of points on a linear network, this function computes the
+  nearest neighbour distance for each point (i.e. the distance
+  from each point to the nearest other point), measuring
+  distance by the shortest path in the network.
+  
+  If \code{method="C"} the distances are computed using
+  code in the C language. If \code{method="interpreted"} then the
+  computation is performed using interpreted \R code. The \R code is
+  much slower, but is provided for checking purposes.
+
+  The \code{k}th nearest neighbour distance is infinite
+  if the \code{k}th nearest neighbour does not exist. This can occur
+  if there are fewer than \code{k+1} points in the dataset, or if
+  the linear network is not connected.
+}
+\value{
+  A numeric vector, of length equal to the number of points in \code{X},
+  or a matrix, with one row for each point in \code{X} and one column
+  for each entry of \code{k}. Entries are nonnegative numbers or
+  infinity (\code{Inf}).
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lpp}}
+}
+\examples{
+   X <- runiflpp(12, simplenet)
+   nndist(X)
+   nndist(X, k=2)
+}
+\keyword{spatial}
diff --git a/man/nndist.pp3.Rd b/man/nndist.pp3.Rd
new file mode 100644
index 0000000..8b6cd36
--- /dev/null
+++ b/man/nndist.pp3.Rd
@@ -0,0 +1,105 @@
+\name{nndist.pp3}
+\alias{nndist.pp3}
+\title{Nearest neighbour distances in three dimensions}
+\description{
+  Computes the distance from each point to its nearest neighbour
+  in a three-dimensional point pattern.
+  Alternatively computes the distance to the
+  second nearest neighbour, or third nearest, etc.
+}
+\usage{
+  \method{nndist}{pp3}(X, \dots, k=1)
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern
+    (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+}
+\value{
+  Numeric vector or matrix containing the 
+  nearest neighbour distances for each point.
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  nearest neighbour distance for the \code{i}th data point.
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  \code{k}th nearest neighbour distance for the
+  \code{i}th data point.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  \code{k[j]}th nearest neighbour distance for the
+  \code{i}th data point.
+}
+\details{
+  This function computes the Euclidean distance from each point
+  in a three-dimensional
+  point pattern to its nearest neighbour (the nearest other
+  point of the pattern). If \code{k} is specified, it computes the
+  distance to the \code{k}th nearest neighbour.
+
+  The function \code{nndist} is generic; this function
+  \code{nndist.pp3} is the method for the class \code{"pp3"}.
+
+  The argument \code{k} may be a single integer, or an integer vector.
+  If it is a vector, then the \eqn{k}th nearest neighbour distances are
+  computed for each value of \eqn{k} specified in the vector.
+
+  If there is only one point (if \code{x} has length 1),
+  then a nearest neighbour distance of \code{Inf} is returned.
+  If there are no points (if \code{x} has length zero)
+  a numeric vector of length zero is returned.
+
+  To identify \emph{which} point is the nearest neighbour of a given point,
+  use \code{\link{nnwhich}}.
+
+  To use the nearest neighbour distances for statistical inference,
+  it is often advisable to use the edge-corrected empirical distribution,
+  computed by \code{\link{G3est}}.
+
+  To find the nearest neighbour distances from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+}
+\section{Warnings}{
+  An infinite or \code{NA} value is returned if the
+  distance is not defined (e.g. if there is only one point
+  in the point pattern).
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{pairdist}},
+  \code{\link{G3est}},
+  \code{\link{nnwhich}}
+}
+\examples{
+   X <- runifpoint3(40)
+
+   # nearest neighbours
+   d <- nndist(X)
+
+   # second nearest neighbours
+   d2 <- nndist(X, k=2)
+
+   # first, second and third nearest
+   d1to3 <- nndist(X, k=1:3)
+}
+\author{
+  \adrian
+  
+  
+  based on code for two dimensions by
+  Pavel Grabarnik
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nndist.ppx.Rd b/man/nndist.ppx.Rd
new file mode 100644
index 0000000..ec0b9e9
--- /dev/null
+++ b/man/nndist.ppx.Rd
@@ -0,0 +1,104 @@
+\name{nndist.ppx}
+\alias{nndist.ppx}
+\title{Nearest Neighbour Distances in Any Dimensions}
+\description{
+  Computes the distance from each point to its nearest neighbour
+  in a multi-dimensional point pattern.
+  Alternatively computes the distance to the
+  second nearest neighbour, or third nearest, etc.
+}
+\usage{
+  \method{nndist}{ppx}(X, \dots, k=1)
+}
+\arguments{
+  \item{X}{
+    Multi-dimensional point pattern
+    (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{coords.ppx}} to determine
+    which coordinates should be used.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+}
+\value{
+  Numeric vector or matrix containing the 
+  nearest neighbour distances for each point.
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  nearest neighbour distance for the \code{i}th data point.
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  \code{k}th nearest neighbour distance for the
+  \code{i}th data point.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  \code{k[j]}th nearest neighbour distance for the
+  \code{i}th data point.
+}
+\details{
+  This function computes the Euclidean distance from each point
+  in a multi-dimensional
+  point pattern to its nearest neighbour (the nearest other
+  point of the pattern). If \code{k} is specified, it computes the
+  distance to the \code{k}th nearest neighbour.
+
+  The function \code{nndist} is generic; this function
+  \code{nndist.ppx} is the method for the class \code{"ppx"}.
+
+  The argument \code{k} may be a single integer, or an integer vector.
+  If it is a vector, then the \eqn{k}th nearest neighbour distances are
+  computed for each value of \eqn{k} specified in the vector.
+
+  If there is only one point (if \code{x} has length 1),
+  then a nearest neighbour distance of \code{Inf} is returned.
+  If there are no points (if \code{x} has length zero)
+  a numeric vector of length zero is returned.
+
+  To identify \emph{which} point is the nearest neighbour of a given point,
+  use \code{\link{nnwhich}}.
+
+  To find the nearest neighbour distances from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+  
+  By default, both spatial and temporal coordinates are extracted.
+  To obtain the spatial distance between points in a space-time point
+  pattern, set \code{temporal=FALSE}.
+}
+\section{Warnings}{
+  An infinite or \code{NA} value is returned if the
+  distance is not defined (e.g. if there is only one point
+  in the point pattern).
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{pairdist}},
+  \code{\link{nnwhich}}
+}
+\examples{
+   df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5))
+   X <- ppx(data=df)
+
+   # nearest neighbours
+   d <- nndist(X)
+
+   # second nearest neighbours
+   d2 <- nndist(X, k=2)
+
+   # first, second and third nearest
+   d1to3 <- nndist(X, k=1:3)
+}
+\author{
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nndist.psp.Rd b/man/nndist.psp.Rd
new file mode 100644
index 0000000..c10e07a
--- /dev/null
+++ b/man/nndist.psp.Rd
@@ -0,0 +1,92 @@
+\name{nndist.psp}
+\alias{nndist.psp}
+\title{Nearest neighbour distances between line segments}
+\description{
+  Computes the distance from each line segment 
+  to its nearest neighbour in a line segment pattern.
+  Alternatively finds the distance to the second nearest,
+  third nearest etc.
+}
+\usage{
+  \method{nndist}{psp}(X, \dots, k=1, method="C")
+}
+\arguments{
+  \item{X}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+    Usually not specified.
+  }
+}
+\value{
+  Numeric vector or matrix containing the 
+  nearest neighbour distances for each line segment.
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  nearest neighbour distance for the \code{i}th segment.
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector \code{v} such that \code{v[i]} is the
+  \code{k}th nearest neighbour distance for the
+  \code{i}th segment.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  \code{k[j]}th nearest neighbour distance for the
+  \code{i}th segment.
+}
+\details{
+  This is a method for the generic function \code{\link{nndist}}
+  for the class \code{"psp"}.
+
+  If \code{k=1}, this function computes the distance from each line segment 
+  to the nearest other line segment in \code{X}.
+  In general it computes the distance from each line segment to the
+  \code{k}th nearest other line segment.
+  The argument \code{k} can also be a vector, and this computation will
+  be performed for each value of \code{k}.
+
+  Distances are calculated using the Hausdorff metric. The Hausdorff
+  distance between two line segments is the maximum distance
+  from any point on one of the segments to the nearest point on
+  the other segment.   
+
+  If there are fewer than \code{max(k)+1} line segments in the pattern,
+  some of the nearest neighbour distances will be infinite (\code{Inf}).
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted \R code only. If \code{method="C"}
+  (the default) then compiled \code{C} code is used.
+  The \code{C} code is somewhat faster.
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{nndist.ppp}}
+}
+\examples{
+   L <- psp(runif(10), runif(10), runif(10), runif(10), owin())
+   D <- nndist(L)
+   D <- nndist(L, k=1:3)
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nnfun.Rd b/man/nnfun.Rd
new file mode 100644
index 0000000..54c8a86
--- /dev/null
+++ b/man/nnfun.Rd
@@ -0,0 +1,94 @@
+\name{nnfun}
+\Rdversion{1.1}
+\alias{nnfun}
+\alias{nnfun.ppp}
+\alias{nnfun.psp}
+\title{
+  Nearest Neighbour Index Map as a Function 
+}
+\description{
+  Compute the nearest neighbour index map of an object,
+  and return it as a function.
+}
+\usage{
+  nnfun(X, ...)
+
+  \method{nnfun}{ppp}(X, ..., k=1)
+
+  \method{nnfun}{psp}(X, ...)
+}
+\arguments{
+  \item{X}{
+    Any suitable dataset representing a two-dimensional
+    collection of objects,
+    such as a point pattern (object of class \code{"ppp"})
+    or a line segment pattern (object of class \code{"psp"}).
+  }
+  \item{k}{
+    A single integer. The \code{k}th nearest neighbour will be found.
+  }
+  \item{\dots}{
+    Extra arguments are ignored. 
+  }
+}
+\details{
+  For a collection \eqn{X} of two dimensional objects
+  (such as a point pattern or a line segment pattern),
+  the \dQuote{nearest neighbour index function}
+  of \eqn{X} is the mathematical function \eqn{f} such that, for any 
+  two-dimensional spatial location \eqn{(x,y)},
+  the function value \code{f(x,y)}
+  is the index \eqn{i} identifying the closest member of \eqn{X}.
+  That is, if \eqn{i = f(x,y)} then \eqn{X[i]} is the closest member of
+  the collection \eqn{X} to the location \eqn{(x,y)}.
+  
+  The command \code{f <- nnfun(X)} returns a \emph{function}
+  in the \R language, with arguments \code{x,y}, that represents the
+  nearest neighbour index function of \code{X}. Evaluating the function \code{f}
+  in the form \code{v <- f(x,y)}, where \code{x} and \code{y}
+  are any numeric vectors of equal length containing coordinates of
+  spatial locations, yields the indices of the nearest neighbours
+  to these locations.
+
+  If the argument \code{k} is specified then the \code{k}-th nearest
+  neighbour will be found.
+
+  The result of \code{f <- nnfun(X)} also belongs to the class
+  \code{"funxy"} and to the special class \code{"nnfun"}.
+  It can be printed and plotted immediately as shown in the Examples.
+  
+  A \code{nnfun} object can be converted to a pixel image
+  using \code{\link{as.im}}.
+}
+\value{
+  A \code{function} with arguments \code{x,y}.
+  The function also belongs to the class \code{"nnfun"} which has
+  a method for \code{print}.
+  It also belongs to the class \code{"funxy"} which has methods
+  for \code{plot}, \code{contour} and \code{persp}.
+}
+\seealso{
+  \code{\link{distfun}},
+  \code{\link{plot.funxy}}
+}
+\examples{
+   f <- nnfun(cells)
+   f
+   plot(f)
+   f(0.2, 0.3)
+
+   g <- nnfun(cells, k=2)
+   g(0.2, 0.3)
+
+   L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+   h <- nnfun(L)
+   h(0.2, 0.3)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nnfun.lpp.Rd b/man/nnfun.lpp.Rd
new file mode 100644
index 0000000..a29a7fd
--- /dev/null
+++ b/man/nnfun.lpp.Rd
@@ -0,0 +1,90 @@
+\name{nnfun.lpp}
+\Rdversion{1.1}
+\alias{nnfun.lpp}
+\title{
+   Nearest Neighbour Map on Linear Network
+}
+\description{
+  Compute the nearest neighbour function of a point pattern on a linear network.
+}
+\usage{
+  \method{nnfun}{lpp}(X, ..., k=1)
+}
+\arguments{
+  \item{X}{
+    A point pattern on a linear network
+    (object of class \code{"lpp"}).
+  }
+  \item{k}{
+    Integer. The algorithm finds the \code{k}th nearest neighbour in
+    \code{X} from any spatial location.    
+  }
+  \item{\dots}{
+    Other arguments are ignored. 
+  }
+}
+\details{
+  The (geodesic) \emph{nearest neighbour function} of a
+  point pattern \code{X} on a linear network \code{L}
+  tells us which point of \code{X} is closest to
+  any given location.
+  
+  If \code{X} is a point pattern on a linear network \code{L},
+  the \emph{nearest neighbour function} of \code{X}
+  is the mathematical function \eqn{f} defined for any 
+  location \eqn{s} on the network by \code{f(s) = i}, where
+  \code{X[i]} is the closest point of \code{X} to the location \code{s}
+  measured by the shortest path. In other words the value of \code{f(s)}
+  is the identifier or serial number of the closest point of \code{X}.
+  
+  The command \code{nnfun.lpp} is a method for the generic command
+  \code{\link{nnfun}}
+  for the class \code{"lpp"} of point patterns on a linear network.
+
+  If \code{X} is a point pattern on a linear network,
+  \code{f <- nnfun(X)} returns a \emph{function}
+  in the \R language, with arguments \code{x,y, \dots}, that represents the
+  nearest neighbour function of \code{X}. Evaluating the function \code{f}
+  in the form \code{v <- f(x,y)}, where \code{x} and \code{y}
+  are any numeric vectors of equal length containing coordinates of
+  spatial locations, yields a vector of identifiers or serial numbers of
+  the data points closest to these spatial locations.
+  More efficiently \code{f} can take the arguments
+  \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local
+  coordinates on the network.
+
+  The result of \code{f <- nnfun(X)} also belongs to the class
+  \code{"linfun"}.
+  It can be printed and plotted immediately as shown in the Examples.
+  It can be converted to a pixel image
+  using \code{\link{as.linim}}.
+
+}
+\value{
+  A \code{function} in the \R language, with arguments \code{x,y} and optional
+  arguments \code{seg,tp}.
+  It also belongs to the class \code{"linfun"} which has methods
+  for \code{plot}, \code{print} etc.
+}
+\seealso{
+  \code{\link{linfun}},
+  \code{\link{methods.linfun}}.
+
+  To compute the \emph{distance} to the nearest neighbour, see
+  \code{\link{distfun.lpp}}.
+}
+\examples{
+   data(letterR)
+   X <- runiflpp(3, simplenet)
+   f <- nnfun(X)
+   f
+   plot(f)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nnmap.Rd b/man/nnmap.Rd
new file mode 100644
index 0000000..37825ac
--- /dev/null
+++ b/man/nnmap.Rd
@@ -0,0 +1,138 @@
+\name{nnmap}
+\alias{nnmap}
+\title{
+  K-th Nearest Point Map
+}
+\description{
+  Given a point pattern, this function constructs pixel images
+  giving the distance from each pixel to its \eqn{k}-th nearest
+  neighbour in the point pattern, and the index of the \eqn{k}-th nearest
+  neighbour.
+}
+\usage{
+  nnmap(X, k = 1, what = c("dist", "which"),
+  \dots, W = as.owin(X),
+  is.sorted.X = FALSE, sortby = c("range", "var", "x", "y"))
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will find the 
+    \code{k}th nearest neighbour. 
+  }
+  \item{what}{
+    Character string specifying what information should be returned.
+    Either the nearest neighbour distance (\code{"dist"}),
+    the index of the nearest neighbour (\code{"which"}),
+    or both.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the pixel
+    resolution of the result.
+  }
+  \item{W}{
+    Window (object of class \code{"owin"}) specifying the spatial domain
+    in which the distances will be computed.
+    Defaults to the window of \code{X}.
+  }
+  \item{is.sorted.X}{
+    Logical value attesting whether the point pattern \code{X}
+    has been sorted. See Details.
+  }
+  \item{sortby}{
+    Determines which coordinate to use to sort the point pattern.
+    See Details.
+  }
+}
+\details{
+  Given a point pattern \code{X}, this function constructs two pixel images:
+  \itemize{
+    \item a distance map giving, for each pixel,
+    the distance to the nearest point of \code{X};
+    \item a nearest neighbour map giving, for each pixel,
+    the identifier of the nearest point of \code{X}.
+  }
+  If the argument \code{k} is specified, then the \code{k}-th nearest
+  neighbours will be found.
+
+  If \code{what="dist"} then only the distance map is returned.
+  If \code{what="which"} then only the nearest neighbour map
+  is returned.
+
+  The argument \code{k} may be an integer or an integer vector.
+  If it is a single integer, then the \code{k}-th nearest neighbours
+  are computed. If it is a vector, then the \code{k[i]}-th nearest
+  neighbours are computed for each entry \code{k[i]}. For example, setting
+  \code{k=1:3} will compute the nearest, second-nearest and
+  third-nearest neighbours. 
+}
+\section{Sorting data and pre-sorted data}{
+  Read this section if you care about the speed of computation.
+  
+  For efficiency, the algorithm sorts the point pattern \code{X}
+  into increasing order of the \eqn{x} coordinate
+  or increasing order of the the \eqn{y} coordinate.
+  Sorting is only an intermediate step;
+  it does not affect the output, which is always given in the same
+  order as the original data.
+  
+  By default (if \code{sortby="range"}),
+  the sorting will occur on the coordinate that has the larger range of
+  values (according to the frame of the enclosing window of \code{X}).
+  If \code{sortby = "var"}), sorting will occur on the coordinate that
+  has the greater variance (in the pattern \code{X}).
+  Setting \code{sortby="x"} or \code{sortby = "y"} will specify that
+  sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively.
+
+  If the point pattern \code{X} is already
+  sorted, then the argument \code{is.sorted.X}
+  should be set to \code{TRUE}, and \code{sortby} should be set
+  equal to \code{"x"} or \code{"y"} to indicate which coordinate
+  is sorted.
+}
+\section{Warning About Ties}{
+  Ties are possible: there may be two data points
+  which lie exactly the same distance away from a particular pixel.
+  This affects the results from \code{nnmap(what="which")}.
+  The handling of ties is not well-defined: it is not consistent
+  between different computers and different installations of \R.
+  If there are ties, then different calls to \code{nnmap(what="which")}
+  may give inconsistent results. For example, you may get a different answer
+  from \code{nnmap(what="which",k=1)}
+  and \code{nnmap(what="which", k=1:2)[[1]]}.
+}
+\value{
+  A pixel image, or a list of pixel images.
+
+  By default (if \code{what=c("dist", "which")}), the result is
+  a list with two components \code{dist} and \code{which}
+  containing the distance map and the nearest neighbour map.
+
+  If \code{what="dist"} then the result is a real-valued pixel image
+  containing the distance map.
+  
+  If \code{what="which"} then the result is an integer-valued pixel image
+  containing the nearest neighbour map.
+
+  If \code{k} is a vector of several integers, then the result is
+  similar except that each pixel image is replaced by a list of
+  pixel images, one for each entry of \code{k}.
+}
+\seealso{
+  \code{\link{distmap}}
+}
+\examples{
+  plot(nnmap(cells, 2, what="which"))
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf
+  ,
+  and Jens Oehlschlaegel
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/nnmark.Rd b/man/nnmark.Rd
new file mode 100644
index 0000000..603c85d
--- /dev/null
+++ b/man/nnmark.Rd
@@ -0,0 +1,115 @@
+\name{nnmark}
+\alias{nnmark}
+\title{
+  Mark of Nearest Neighbour 
+}
+\description{
+  Given a marked point pattern dataset \code{X}
+  this function computes, for each desired location \code{y},
+  the mark attached to the nearest neighbour of \code{y} in \code{X}.
+  The desired locations \code{y} can be either a pixel grid
+  or the point pattern \code{X} itself.
+}
+\usage{
+nnmark(X, \dots, k = 1, at=c("pixels", "points"))
+}
+\arguments{
+  \item{X}{
+    A marked point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the
+    pixel resolution.
+  }
+  \item{k}{
+    Single integer. The \code{k}th nearest data point will be used.
+  }
+  \item{at}{
+    String specifying whether to compute the values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{X} (\code{at="points"}).
+  }
+}
+\details{
+  Given a marked point pattern dataset \code{X}
+  this function computes, for each desired location \code{y},
+  the mark attached to the point of \code{X} that is nearest
+  to \code{y}. The desired locations \code{y} can be either a pixel grid
+  or the point pattern \code{X} itself.
+
+  The argument \code{X} must be a marked point pattern (object
+  of class \code{"ppp"}, see \code{\link{ppp.object}}).
+  The marks are allowed to be a vector or a data frame.
+
+  \itemize{
+    \item
+    If \code{at="points"}, then for each point in \code{X},
+    the algorithm finds the nearest \emph{other} point in \code{X},
+    and extracts the mark attached to it.
+    The result is a vector or data frame containing the marks
+    of the neighbours of each point.
+    \item
+    If \code{at="pixels"} (the default), then for each pixel
+    in a rectangular grid, the algorithm finds the nearest point in \code{X},
+    and extracts the mark attached to it.
+    The result is an image or a list of images containing the marks
+    of the neighbours of each pixel.
+    The pixel resolution is controlled by the arguments \code{\dots}
+    passed to \code{\link{as.mask}}.
+  }
+
+  If the argument \code{k} is given, then the \code{k}-th nearest
+  neighbour will be used.
+}
+\value{
+  \emph{If \code{X} has a single column of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is
+    a pixel image (object of class \code{"im"}). 
+    The value at each pixel is the mark attached
+    to the nearest point of \code{X}.
+    \item
+    If \code{at="points"}, the result is a vector or factor
+    of length equal to the number of points in \code{X}.
+    Entries are the mark values of the
+    nearest neighbours of each point of \code{X}.
+  }
+  \emph{If \code{X} has a data frame of marks:}
+  \itemize{
+    \item 
+    If \code{at="pixels"} (the default), the result is a named list of 
+    pixel images (object of class \code{"im"}). There is one
+    image for each column of marks. This list also belongs to
+    the class \code{"solist"}, for which there is a plot method.
+    \item
+    If \code{at="points"}, the result is a data frame
+    with one row for each point of \code{X},
+    Entries are the mark values of the
+    nearest neighbours of each point of \code{X}.
+  }
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{Smooth.ppp}},
+  \code{\link{marktable}},
+  \code{\link{nnwhich}}
+}
+\examples{
+  plot(nnmark(ants))
+  v <- nnmark(ants, at="points")
+  v[1:10]
+  plot(nnmark(finpines))
+  vf <- nnmark(finpines, at="points")
+  vf[1:5,]
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/nnorient.Rd b/man/nnorient.Rd
new file mode 100644
index 0000000..43efae9
--- /dev/null
+++ b/man/nnorient.Rd
@@ -0,0 +1,109 @@
+\name{nnorient}
+\alias{nnorient}
+\title{
+  Nearest Neighbour Orientation Distribution
+}
+\description{
+  Computes the distribution of the orientation of the vectors
+  from each point to its nearest neighbour.
+}
+\usage{
+  nnorient(X, \dots, cumulative = FALSE, correction, k = 1,
+                     unit = c("degree", "radian"),
+                     domain = NULL, ratio = FALSE)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{circdensity}} to control
+    the kernel smoothing, if \code{cumulative=FALSE}.
+  }
+  \item{cumulative}{
+    Logical value specifying whether to estimate the probability density
+    (\code{cumulative=FALSE}, the default) or the cumulative
+    distribution function (\code{cumulative=TRUE}).
+  }
+  \item{correction}{
+    Character vector specifying edge correction or corrections.
+    Options are \code{"none"}, \code{"bord.modif"}, 
+    \code{"good"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{k}{
+    Integer. The \eqn{k}th nearest neighbour will be used.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{unit}{
+    Unit in which the angles should be expressed.
+    Either \code{"degree"} or \code{"radian"}.
+  }
+  \item{domain}{
+    Optional window. The first point \eqn{x_i}{x[i]} of each pair of points
+    will be constrained to lie in \code{domain}.
+  }
+}
+\details{
+  This algorithm considers each point in the pattern \code{X}
+  and finds its nearest neighbour (or \eqn{k}th nearest neighour).
+  The \emph{direction} of the arrow joining the data point to its neighbour
+  is measured, as an angle in degrees or radians,
+  anticlockwise from the \eqn{x} axis.
+
+  If \code{cumulative=FALSE} (the default),
+  a kernel estimate of the probability density of the angles
+  is calculated using \code{\link{circdensity}}.
+  This is the function \eqn{\vartheta(\phi)}{theta(phi)} defined 
+  in Illian et al (2008), equation (4.5.3), page 253.
+
+  If \code{cumulative=TRUE}, then the cumulative distribution
+  function of these angles is calculated.
+
+  In either case the result can be plotted as a rose diagram by 
+  \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}.
+  
+  The algorithm gives each observed direction a weight,
+  determined by an edge correction, to adjust for the fact that some
+  interpoint distances are more likely to be observed than others.
+  The choice of edge correction or corrections is determined by the argument
+  \code{correction}.
+
+  It is also possible to calculate an estimate of the probability
+  density from the cumulative distribution function,
+  by numerical differentiation. 
+  Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  containing the estimates of the probability density or the
+  cumulative distribution function of angles,
+  in degrees (if \code{unit="degree"})
+  or radians (if \code{unit="radian"}).
+}
+\references{
+  Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008)
+  \emph{Statistical Analysis and Modelling of Spatial Point Patterns.}
+  Wiley.
+}
+\seealso{
+  \code{\link{pairorient}}
+}
+\examples{
+  rose(nnorient(redwood, adjust=0.6), col="grey")
+  plot(CDF <- nnorient(redwood, cumulative=TRUE))
+}
+\author{\adrian
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/nnwhich.Rd b/man/nnwhich.Rd
new file mode 100644
index 0000000..f34dfec
--- /dev/null
+++ b/man/nnwhich.Rd
@@ -0,0 +1,163 @@
+\name{nnwhich}
+\alias{nnwhich}
+\alias{nnwhich.ppp}
+\alias{nnwhich.default}
+\title{Nearest neighbour}
+\description{
+  Finds the nearest neighbour of each point in a point pattern.
+}
+\usage{
+  nnwhich(X, \dots)
+  \method{nnwhich}{ppp}(X, \dots, k=1, by=NULL, method="C")
+  \method{nnwhich}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C")
+}
+\arguments{
+  \item{X,Y}{
+    Arguments specifying the locations of
+    a set of points.
+    For \code{nnwhich.ppp}, the argument \code{X} should be a point
+    pattern (object of class \code{"ppp"}).
+    For \code{nnwhich.default}, typically \code{X} and \code{Y} would be
+    numeric vectors of equal length. Alternatively \code{Y} may be
+    omitted and \code{X} may be
+    a list with two components \code{x} and \code{y},
+    or a matrix with two columns.
+  }
+  \item{\dots}{
+    Ignored by \code{nnwhich.ppp}
+    and \code{nnwhich.default}.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+  \item{by}{
+    Optional. A factor, which separates \code{X} into groups.
+    The algorithm will find the nearest neighbour in each group. 
+  }
+  \item{method}{String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+  }
+}
+\value{
+  Numeric vector or matrix giving, for each point,
+  the index of its nearest neighbour (or \code{k}th nearest neighbour).
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} giving the indices of the nearest neighbours
+  (the nearest neighbout of the \code{i}th point is
+  the \code{j}th point where \code{j = v[i]}).
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector giving the indices of the
+  \code{k}th nearest neighbours.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  index of the \code{k[j]}th nearest neighbour for the
+  \code{i}th data point.
+
+  If the argument \code{by} is given, then the result is a data frame
+  containing the indices described above, from each point of \code{X},
+  to the nearest point in each subset of \code{X}
+  defined by the factor \code{by}.
+}
+\details{
+  For each point in the given point pattern, this function finds
+  its nearest neighbour (the nearest other point of the pattern).
+  By default it returns a vector giving, for each point,
+  the index of the point's
+  nearest neighbour. If \code{k} is specified, the algorithm finds
+  each point's \code{k}th nearest neighbour.
+
+  The function \code{nnwhich} is generic, with
+  method for point patterns (objects of class \code{"ppp"})
+  and a default method which are described here, as well as a method for
+  three-dimensional point patterns (objects of class \code{"pp3"},
+  described in \code{\link{nnwhich.pp3}}.
+
+  The method \code{nnwhich.ppp} expects a single
+  point pattern argument \code{X}.
+  The default method expects that \code{X} and \code{Y} will determine
+  the coordinates of a set of points. Typically \code{X} and
+  \code{Y} would be numeric vectors of equal length. Alternatively
+  \code{Y} may be omitted and \code{X} may be a list with two components
+  named \code{x} and \code{y}, or a matrix or data frame with two columns.
+  
+  The argument \code{k} may be a single integer, or an integer vector.
+  If it is a vector, then the \eqn{k}th nearest neighbour distances are
+  computed for each value of \eqn{k} specified in the vector.
+
+  If the argument \code{by} is given, it should be a \code{factor},
+  of length equal to the number of points in \code{X}.
+  This factor effectively partitions \code{X} into subsets,
+  each subset associated with one of the levels of \code{X}.
+  The algorithm will then find, for each point of \code{X},
+  the nearest neighbour \emph{in each subset}.
+
+  If there are no points (if \code{x} has length zero)
+  a numeric vector of length zero is returned.
+  If there is only one point (if \code{x} has length 1),
+  then the nearest neighbour is undefined, and a value of \code{NA}
+  is returned. In general if the number of points is less than or equal
+  to \code{k}, then a vector of \code{NA}'s is returned.
+
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. 
+  The C code is faster by two to three orders of magnitude
+  and uses much less memory.
+  
+  To evaluate the \emph{distance} between a point and its nearest
+  neighbour, use \code{\link{nndist}}.
+
+  To find the nearest neighbours from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+}
+\section{Nearest neighbours of each type}{
+  If \code{X} is a multitype point pattern 
+  and \code{by=marks(X)}, then the algorithm will find,
+  for each point of \code{X}, the nearest neighbour
+  of each type. See the Examples. 
+}
+\section{Warnings}{
+  A value of \code{NA} is returned if there is only one point
+  in the point pattern. 
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{nncross}}
+}
+\examples{
+   data(cells)
+   plot(cells)
+   m <- nnwhich(cells)
+   m2 <- nnwhich(cells, k=2)
+
+   # plot nearest neighbour links
+   b <- cells[m]
+   arrows(cells$x, cells$y, b$x, b$y, angle=15, length=0.15, col="red")
+
+   # find points which are the neighbour of their neighbour
+   self <- (m[m] == seq(m))
+   # plot them
+   A <- cells[self]
+   B <- cells[m[self]]
+   plot(cells)
+   segments(A$x, A$y, B$x, B$y)
+
+   # nearest neighbours of each type
+   head(nnwhich(ants, by=marks(ants)))
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nnwhich.lpp.Rd b/man/nnwhich.lpp.Rd
new file mode 100644
index 0000000..ee71275
--- /dev/null
+++ b/man/nnwhich.lpp.Rd
@@ -0,0 +1,66 @@
+\name{nnwhich.lpp}
+\alias{nnwhich.lpp}
+\title{
+  Identify Nearest Neighbours on a Linear Network
+}
+\description{
+  Given a pattern of points on a linear network, identify the
+  nearest neighbour for each point, measured
+  by the shortest path in the network.
+}
+\usage{
+\method{nnwhich}{lpp}(X, ..., k=1, method="C")
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{method}{
+    Optional string determining the method of calculation.
+    Either \code{"interpreted"} or \code{"C"}.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will find the
+    \code{k}th nearest neighbour. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  Given a pattern of points on a linear network,
+  this function finds the nearest neighbour of each point
+  (i.e. for each point it identifies the nearest other point)
+  measuring distance by the shortest path in the network.
+  
+  If \code{method="C"} the task is performed using
+  code in the C language. If \code{method="interpreted"} then the
+  computation is performed using interpreted \R code. The \R code is
+  much slower, but is provided for checking purposes.
+
+  The result is \code{NA} if the \code{k}th nearest neighbour
+  does not exist. This can occur
+  if there are fewer than \code{k+1} points in the dataset, or if
+  the linear network is not connected.
+}
+\value{
+  An integer vector, of length equal to the number of points in
+  \code{X}, identifying the nearest neighbour of each point.
+  If \code{nnwhich(X)[2] = 4} then the nearest neighbour of
+  point 2 is point 4.
+
+  Alternatively a matrix with one row for each point in \code{X}
+  and one column for each entry of \code{k}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lpp}}
+}
+\examples{
+   X <- runiflpp(10, simplenet)
+   nnwhich(X)
+   nnwhich(X, k=2)
+}
+\keyword{spatial}
diff --git a/man/nnwhich.pp3.Rd b/man/nnwhich.pp3.Rd
new file mode 100644
index 0000000..fbec86b
--- /dev/null
+++ b/man/nnwhich.pp3.Rd
@@ -0,0 +1,90 @@
+\name{nnwhich.pp3}
+\alias{nnwhich.pp3}
+\title{Nearest neighbours in three dimensions}
+\description{
+  Finds the nearest neighbour of each point in a three-dimensional
+  point pattern.
+}
+\usage{
+  \method{nnwhich}{pp3}(X, \dots, k=1)
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern 
+    (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+}
+\value{
+  Numeric vector or matrix giving, for each point,
+  the index of its nearest neighbour (or \code{k}th nearest neighbour).
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} giving the indices of the nearest neighbours
+  (the nearest neighbout of the \code{i}th point is
+  the \code{j}th point where \code{j = v[i]}).
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector giving the indices of the
+  \code{k}th nearest neighbours.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  index of the \code{k[j]}th nearest neighbour for the
+  \code{i}th data point.
+}
+\details{
+  For each point in the given three-dimensional
+  point pattern, this function finds
+  its nearest neighbour (the nearest other point of the pattern).
+  By default it returns a vector giving, for each point,
+  the index of the point's
+  nearest neighbour. If \code{k} is specified, the algorithm finds
+  each point's \code{k}th nearest neighbour.
+
+  The function \code{nnwhich} is generic. This is the method
+  for the class \code{"pp3"}.
+
+  If there are no points in the pattern,
+  a numeric vector of length zero is returned.
+  If there is only one point,
+  then the nearest neighbour is undefined, and a value of \code{NA}
+  is returned. In general if the number of points is less than or equal
+  to \code{k}, then a vector of \code{NA}'s is returned.
+
+  To evaluate the \emph{distance} between a point and its nearest
+  neighbour, use \code{\link{nndist}}.
+
+  To find the nearest neighbours from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+}
+\section{Warnings}{
+  A value of \code{NA} is returned if there is only one point
+  in the point pattern. 
+}
+\seealso{
+  \code{\link{nnwhich}},
+  \code{\link{nndist}},
+  \code{\link{nncross}}
+}
+\examples{
+   X <- runifpoint3(30)
+   m <- nnwhich(X)
+   m2 <- nnwhich(X, k=2)
+}
+\author{
+  \adrian
+  
+  
+  based on two-dimensional code by
+  Pavel Grabarnik
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nnwhich.ppx.Rd b/man/nnwhich.ppx.Rd
new file mode 100644
index 0000000..1985aab
--- /dev/null
+++ b/man/nnwhich.ppx.Rd
@@ -0,0 +1,94 @@
+\name{nnwhich.ppx}
+\alias{nnwhich.ppx}
+\title{Nearest Neighbours in Any Dimensions}
+\description{
+  Finds the nearest neighbour of each point in a multi-dimensional
+  point pattern.
+}
+\usage{
+  \method{nnwhich}{ppx}(X, \dots, k=1)
+}
+\arguments{
+  \item{X}{
+    Multi-dimensional point pattern 
+    (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{coords.ppx}} to determine
+    which coordinates should be used.
+  }
+  \item{k}{
+    Integer, or integer vector. The algorithm will compute the distance to the
+    \code{k}th nearest neighbour. 
+  }
+}
+\value{
+  Numeric vector or matrix giving, for each point,
+  the index of its nearest neighbour (or \code{k}th nearest neighbour).
+
+  If \code{k = 1} (the default), the return value is a
+  numeric vector \code{v} giving the indices of the nearest neighbours
+  (the nearest neighbout of the \code{i}th point is
+  the \code{j}th point where \code{j = v[i]}).
+  
+  If \code{k} is a single integer, then the return value is a
+  numeric vector giving the indices of the
+  \code{k}th nearest neighbours.
+
+  If \code{k} is a vector, then the return value is a
+  matrix \code{m} such that \code{m[i,j]} is the
+  index of the \code{k[j]}th nearest neighbour for the
+  \code{i}th data point.
+}
+\details{
+  For each point in the given multi-dimensional
+  point pattern, this function finds
+  its nearest neighbour (the nearest other point of the pattern).
+  By default it returns a vector giving, for each point,
+  the index of the point's
+  nearest neighbour. If \code{k} is specified, the algorithm finds
+  each point's \code{k}th nearest neighbour.
+
+  The function \code{nnwhich} is generic. This is the method
+  for the class \code{"ppx"}.
+
+  If there are no points in the pattern,
+  a numeric vector of length zero is returned.
+  If there is only one point,
+  then the nearest neighbour is undefined, and a value of \code{NA}
+  is returned. In general if the number of points is less than or equal
+  to \code{k}, then a vector of \code{NA}'s is returned.
+
+  To evaluate the \emph{distance} between a point and its nearest
+  neighbour, use \code{\link{nndist}}.
+
+  To find the nearest neighbours from one point pattern
+  to another point pattern, use \code{\link{nncross}}.
+  
+  By default, both spatial and temporal coordinates are extracted.
+  To obtain the spatial distance between points in a space-time point
+  pattern, set \code{temporal=FALSE}.
+}
+\section{Warnings}{
+  A value of \code{NA} is returned if there is only one point
+  in the point pattern. 
+}
+\seealso{
+  \code{\link{nnwhich}},
+  \code{\link{nndist}},
+  \code{\link{nncross}}
+}
+\examples{
+   df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5))
+   X <- ppx(data=df)
+   m <- nnwhich(X)
+   m2 <- nnwhich(X, k=2)
+}
+\author{
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/nobjects.Rd b/man/nobjects.Rd
new file mode 100644
index 0000000..9dbef14
--- /dev/null
+++ b/man/nobjects.Rd
@@ -0,0 +1,57 @@
+\name{nobjects}
+\alias{nobjects}
+\alias{nobjects.ppp}
+\alias{nobjects.ppx}
+\alias{nobjects.psp}
+\alias{nobjects.tess}
+\title{
+  Count Number of Geometrical Objects in a Spatial Dataset
+}
+\description{
+  A generic function to count the number of geometrical objects in a
+  spatial dataset.
+}
+\usage{
+  nobjects(x)
+
+  \method{nobjects}{ppp}(x)
+
+  \method{nobjects}{ppx}(x)
+
+  \method{nobjects}{psp}(x)
+
+  \method{nobjects}{tess}(x)
+}
+\arguments{
+  \item{x}{A dataset.}
+}
+\details{
+  The generic function \code{nobjects} counts the number of
+  geometrical objects in the spatial dataset \code{x}.
+
+  The methods for point patterns (classes \code{"ppp"} and \code{"ppx"},
+  embracing \code{"pp3"} and \code{"lpp"})
+  count the number of points in the pattern.
+
+  The method for line segment patterns (class \code{"psp"})
+  counts the number of line segments in the pattern.
+
+  The method for tessellations (class \code{"tess"})
+  counts the number of tiles of the tessellation.
+}
+\value{
+  A single integer.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{npoints}}
+}
+\examples{
+   nobjects(redwood)
+   nobjects(edges(letterR))
+   nobjects(dirichlet(cells))
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/npfun.Rd b/man/npfun.Rd
new file mode 100644
index 0000000..fc07d1e
--- /dev/null
+++ b/man/npfun.Rd
@@ -0,0 +1,46 @@
+\name{npfun}
+\alias{npfun}
+\title{
+  Dummy Function Returns Number of Points
+}
+\description{
+  Returns a summary function which is constant with value equal to
+  the number of points in the point pattern.
+}
+\usage{
+npfun(X, ..., r)
+}
+\arguments{
+  \item{X}{
+    Point pattern.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{r}{
+    Vector of values of the distance argument \eqn{r}.
+  }
+}
+\details{
+  This function is normally not called by the user. Instead it is passed
+  as an argument to the function \code{\link{psst}}.
+}
+\value{
+  Object of class \code{"fv"} representing a constant function.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  \code{\link{psst}}
+}
+\examples{
+  fit0 <- ppm(cells, ~1, nd=10)
+  v <- psst(fit0, npfun)
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/npoints.Rd b/man/npoints.Rd
new file mode 100644
index 0000000..32f8219
--- /dev/null
+++ b/man/npoints.Rd
@@ -0,0 +1,47 @@
+\name{npoints}
+\alias{npoints}
+\alias{npoints.ppp}
+\alias{npoints.pp3}
+\alias{npoints.ppx}
+\title{Number of Points in a Point Pattern}
+\description{
+  Returns the number of points in a point pattern of any kind.
+}
+\usage{
+  npoints(x)
+  \method{npoints}{ppp}(x)
+  \method{npoints}{pp3}(x)
+  \method{npoints}{ppx}(x)
+}
+\arguments{
+  \item{x}{
+    A point pattern (object of class \code{"ppp"},
+    \code{"pp3"}, \code{"ppx"} or some other suitable class).
+  }
+}
+\value{
+  Integer.
+}
+\details{
+  This function returns the number of points in
+  a point pattern. The function \code{npoints} is generic
+  with methods for the classes \code{"ppp"}, \code{"pp3"},
+  \code{"ppx"} and possibly other classes.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{print.pp3}},
+  \code{\link{print.ppx}}.
+}
+\examples{
+   data(cells)
+   npoints(cells)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/nsegments.Rd b/man/nsegments.Rd
new file mode 100644
index 0000000..d1b988f
--- /dev/null
+++ b/man/nsegments.Rd
@@ -0,0 +1,39 @@
+\name{nsegments}
+\alias{nsegments}
+\alias{nsegments.psp}
+\title{
+   Number of Line Segments in a Line Segment Pattern
+}
+\description{
+   Returns the number of line segments in a line segment pattern.
+}
+\usage{
+   nsegments(x)
+
+   \method{nsegments}{psp}(x)
+}
+\arguments{
+  \item{x}{
+    A line segment pattern, i.e. an object of class \code{psp},
+    or an object containing a linear network.
+  }
+}
+\details{
+   This function is generic, with methods for 
+   classes \code{psp}, \code{linnet} and \code{lpp}.
+}
+\value{
+   Integer.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{npoints}()}, \code{\link{psp.object}()}
+}
+\examples{
+nsegments(copper$Lines)
+nsegments(copper$SouthLines)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/nvertices.Rd b/man/nvertices.Rd
new file mode 100644
index 0000000..2fc3c6c
--- /dev/null
+++ b/man/nvertices.Rd
@@ -0,0 +1,47 @@
+\name{nvertices}
+\alias{nvertices}
+\alias{nvertices.owin}
+\alias{nvertices.default}
+\title{
+  Count Number of Vertices
+}
+\description{
+  Count the number of vertices in an object for which vertices are well-defined.
+}
+\usage{
+  nvertices(x, \dots)
+
+  \method{nvertices}{owin}(x, \dots)
+
+  \method{nvertices}{default}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A window (object of class \code{"owin"}),
+    or some other object which has vertices.
+  }
+  \item{\dots}{
+    Currently ignored.
+  }
+}
+\details{
+  This function counts the number of vertices of \code{x}
+  as they would be returned by \code{\link{vertices}(x)}.
+  It is more efficient than executing \code{npoints(vertices(x))}.
+}
+\value{
+  A single integer.
+}
+\author{
+  \spatstatAuthors
+  and Suman Rakshit.
+}
+\seealso{
+  \code{\link{vertices}}
+}
+\examples{
+  nvertices(square(2))
+  nvertices(letterR)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/nztrees.Rd b/man/nztrees.Rd
new file mode 100644
index 0000000..b5f5141
--- /dev/null
+++ b/man/nztrees.Rd
@@ -0,0 +1,48 @@
+\name{nztrees}
+\alias{nztrees}
+\docType{data}
+\title{
+   New Zealand Trees Point Pattern
+}
+\description{
+  The data give the locations of trees in a forest plot.
+
+  They were collected by Mark and Esler (1970) and
+  were extracted and analysed by Ripley (1981, pp. 169-175). 
+  They represent the 
+  positions of 86 trees in a forest plot approximately 140 by 85 feet.
+  
+  Ripley discarded from his analysis the eight trees at the right-hand edge of
+  the plot (which appear to be part of a planted border)
+  and trimmed the window by a 5-foot margin accordingly.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  The Cartesian coordinates are in feet.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(nztrees)}
+\source{Mark and Esler (1970), Ripley (1981).}
+\section{Note}{
+  To trim a 5-foot margin off the window, type
+  \code{nzsub <- nztrees[owin(c(0,148),c(0,95)) ]}
+}
+\references{
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+
+  Mark, A.F. and Esler, A.E. (1970)
+  An assessment of the point-centred quarter method of plotless
+  sampling in some New Zealand forests.
+  \emph{Proceedings of the New Zealand Ecological Society}
+  \bold{17}, 106--110.
+}
+\keyword{datasets}
+\keyword{spatial}
+
+
+ 
diff --git a/man/objsurf.Rd b/man/objsurf.Rd
new file mode 100644
index 0000000..fb5760f
--- /dev/null
+++ b/man/objsurf.Rd
@@ -0,0 +1,97 @@
+\name{objsurf}
+\alias{objsurf}
+\alias{objsurf.dppm}
+\alias{objsurf.kppm}
+\alias{objsurf.minconfit}
+\title{
+  Objective Function Surface
+}
+\description{
+  For a model that was fitted by optimisation,
+  compute the values of the objective function in a neighbourhood
+  of the optimal value.
+}
+\usage{
+objsurf(x, \dots)
+
+\method{objsurf}{dppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE)
+
+\method{objsurf}{kppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE)
+
+\method{objsurf}{minconfit}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE)
+}
+\arguments{
+  \item{x}{
+    Some kind of model that was fitted
+    by finding the optimal value of an objective function. 
+    An object of class \code{"dppm"}, \code{"kppm"} or \code{"minconfit"}.
+  }
+  \item{\dots}{
+    Extra arguments are usually ignored.
+  }
+  \item{ngrid}{
+    Number of grid points to evaluate along each axis.
+    Either a single integer, or a pair of integers.
+    For example \code{ngrid=32} would mean a \code{32 * 32} grid.
+  }
+  \item{ratio}{
+    Number greater than 1 determining the range of parameter values
+    to be considered. If the optimal parameter value is \code{opt}
+    then the objective function will be evaluated for
+    values between \code{opt/ratio} and \code{opt * ratio}.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  The object \code{x} should be some kind of model that was fitted
+  by maximising or minimising the value of an objective function.
+  The objective function will be evaluated on a grid of
+  values of the model parameters. 
+
+  Currently the following types of objects are accepted:
+  \itemize{
+    \item
+    an object of class \code{"dppm"} representing a
+    determinantal point process.
+    See \code{\link{dppm}}.
+    \item
+    an object of class \code{"kppm"} representing a
+    cluster point process or Cox point process. 
+    See \code{\link{kppm}}.
+    \item
+    an object of class \code{"minconfit"} representing a
+    minimum-contrast fit between a summary function and its
+    theoretical counterpart. 
+    See \code{\link{mincontrast}}.
+  }
+  The result is an object of class \code{"objsurf"} which can be
+  printed and plotted: see \code{\link{methods.objsurf}}.
+}
+\value{
+  An object of class \code{"objsurf"} which can be
+  printed and plotted.
+  Essentially a list containing entries \code{x}, \code{y}, \code{z}
+  giving the parameter values and objective function values.
+}
+\author{
+  \adrian and \ege.
+}
+\seealso{
+  \code{\link{methods.objsurf}},
+  \code{\link{kppm}}, 
+  \code{\link{mincontrast}}
+}
+\examples{
+   fit <- kppm(redwood ~ 1, "Thomas")
+   os <- objsurf(fit)
+
+   if(interactive()) {
+     plot(os)
+     contour(os, add=TRUE)
+     persp(os)
+   }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/opening.Rd b/man/opening.Rd
new file mode 100644
index 0000000..9ea6939
--- /dev/null
+++ b/man/opening.Rd
@@ -0,0 +1,87 @@
+\name{opening} 
+\alias{opening}
+\alias{opening.owin}
+\alias{opening.psp}
+\alias{opening.ppp}
+\title{Morphological Opening}
+\description{
+  Perform morphological opening of a window, a line segment pattern
+  or a point pattern.
+}
+\usage{
+ opening(w, r, \dots)
+
+ \method{opening}{owin}(w, r, \dots, polygonal=NULL)
+
+ \method{opening}{ppp}(w, r, \dots)
+
+ \method{opening}{psp}(w, r, \dots)
+}
+\arguments{
+  \item{w}{
+    A window (object of class \code{"owin"}
+    or a line segment pattern (object of class \code{"psp"})
+    or a point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{positive number: the radius of the opening.}
+  \item{\dots}{
+    extra arguments passed to \code{\link{as.mask}}
+    controlling the pixel resolution, if a pixel approximation is used
+  }
+  \item{polygonal}{
+    Logical flag indicating whether to compute a polygonal
+    approximation to the erosion (\code{polygonal=TRUE}) or
+    a pixel grid approximation (\code{polygonal=FALSE}).
+  }
+}
+\value{
+  If \code{r > 0}, an object of class \code{"owin"} representing the
+  opened region. If \code{r=0}, the result is identical to \code{w}.
+}
+\details{
+  The morphological opening (Serra, 1982)
+  of a set \eqn{W} by a distance \eqn{r > 0}
+  is the subset of points in \eqn{W} that can be 
+  separated from the boundary of \eqn{W} by a circle of radius \eqn{r}.
+  That is, a point \eqn{x} belongs to the opening 
+  if it is possible to draw a circle of radius \eqn{r} (not necessarily
+  centred on \eqn{x}) that has \eqn{x} on the inside
+  and the boundary of \eqn{W} on the outside.
+  The opened set is a subset of \code{W}.
+
+  For a small radius \eqn{r}, the opening operation
+  has the effect of smoothing out irregularities in the boundary of
+  \eqn{W}. For larger radii, the opening operation removes promontories
+  in the boundary. For very large radii, the opened set is empty.
+
+  The algorithm applies \code{\link{erosion}} followed by
+  \code{\link{dilation}}.
+}
+\seealso{
+  \code{\link{closing}} for the opposite operation.
+
+  \code{\link{dilation}}, \code{\link{erosion}} for the basic
+  operations.  
+  
+  \code{\link{owin}},
+  \code{\link{as.owin}} for information about windows.
+}
+\examples{
+  v <- opening(letterR, 0.3)
+  plot(letterR, type="n", main="opening")
+  plot(v, add=TRUE, col="grey")
+  plot(letterR, add=TRUE)
+}
+\references{
+  Serra, J. (1982)
+  Image analysis and mathematical morphology.
+  Academic Press.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/ord.family.Rd b/man/ord.family.Rd
new file mode 100644
index 0000000..bed4b2d
--- /dev/null
+++ b/man/ord.family.Rd
@@ -0,0 +1,63 @@
+\name{ord.family}
+\alias{ord.family}
+\title{Ord Interaction Process Family}
+\description{
+  An object describing the family of all Ord interaction point processes
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes 
+  the family of point process models introduced by Ord (1977).
+ 
+  If you need to create a specific Ord-type model for use in 
+  analysis, use the function \code{\link{OrdThresh}} or
+  \code{\link{Ord}}.
+ 
+  Anyway, \code{ord.family} is an object of class \code{"isf"}
+  containing a function \code{ord.family$eval} for
+  evaluating the sufficient statistics of any Ord type
+  point process model taking an exponential family form. 
+} 
+\seealso{
+  \code{\link{pairwise.family}},
+  \code{\link{pairsat.family}},
+  \code{\link{Poisson}},
+  \code{\link{Pairwise}},
+  \code{\link{PairPiece}},
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}},
+  \code{\link{Softcore}},
+  \code{\link{Geyer}},
+  \code{\link{SatPiece}},
+  \code{\link{Saturated}},
+  \code{\link{Ord}},
+  \code{\link{OrdThresh}}
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+
+  Ord, J.K. (1977) 
+  Contribution to the discussion of Ripley (1977).
+
+  Ord, J.K. (1978) 
+  How many trees in a forest?
+  \emph{Mathematical Scientist} \bold{3}, 23--33.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/osteo.Rd b/man/osteo.Rd
new file mode 100644
index 0000000..3cca89b
--- /dev/null
+++ b/man/osteo.Rd
@@ -0,0 +1,145 @@
+\name{osteo}
+\alias{osteo}
+\docType{data}
+\title{
+  Osteocyte Lacunae Data: Replicated Three-Dimensional Point Patterns
+}
+\description{
+  These data give the three-dimensional locations of 
+  osteocyte lacunae observed in rectangular volumes of
+  solid bone using a confocal microscope.
+
+  There were four samples of bone, and ten regions were mapped
+  in each bone, yielding 40 spatial point patterns.
+  The data can be regarded as replicated observations of a
+  three-dimensional point process, nested within bone samples.
+}
+\usage{data(osteo)}
+\format{
+  A \code{\link{hyperframe}} with the following columns:
+
+  \tabular{ll}{
+    \code{id} \tab character string identifier of bone sample \cr
+    \code{shortid} \tab last numeral in \code{id} \cr
+    \code{brick} \tab serial number (1 to 10) of sampling volume within
+    this bone sample \cr
+    \code{pts} \tab three dimensional point pattern (class \code{pp3}) \cr
+    \code{depth} \tab the depth of the brick in microns 
+  }
+}
+\details{
+  These data are three-dimensional point patterns
+  representing the positions of \emph{osteocyte lacunae}, holes
+  in bone which were occupied by osteocytes (bone-building cells) during life. 
+  
+  Observations were made on four different skulls of Macaque monkeys
+  iusing a three-dimensional microscope.
+  From each skull, observations were collected in 10 separate sampling volumes.
+  In all, there are 40 three-dimensional point patterns in the dataset.
+  
+  The data were collected in 1984
+  by A. Baddeley, A. Boyde, C.V. Howard and S. Reid
+  (see references) using the tandem-scanning reflected light microscope
+  (TSRLM) at University College London. This was one of the first
+  optical confocal microscopes available.
+
+  Each point pattern dataset gives the \eqn{(x,y,z)} coordinates  
+  (in microns) of all points visible in a
+  three-dimensional rectangular box (``brick'') of dimensions
+  \eqn{81 \times 100 \times d}{81 * 100 * d} microns,
+  where \eqn{d} varies.
+  The \eqn{z} coordinate is depth into the bone
+  (depth of the focal plane of the confocal microscope); the \eqn{(x,y)}
+  plane is parallel to the exterior surface of the bone;
+  the relative orientation of the \eqn{x} and \eqn{y} axes is not important.
+  
+  The bone samples were three intact skulls and one skull
+  cap, all originally identified as belonging to the macaque monkey
+  \emph{Macaca fascicularis}, from the collection of the
+  Department of Anatomy, University of London. Later analysis
+  (Baddeley et al, 1993) suggested that the skull cap, given here as
+  the first animal, was a different subspecies, and this was confirmed by
+  anatomical inspection.
+}
+\section{Sampling Procedure}{
+  The following extract from Baddeley et al (1987)
+  describes the sampling procedure.
+
+  The parietal bones of three fully articulated adult Macaque monkey
+    \emph{(Macaca fascicularis)} skulls from the collection of
+    University College London were used. The right parietal bone was
+    examined, in each case, approximately 1 cm lateral to the sagittal
+    suture and 2 cm posterior to the coronal suture. The skulls were
+    mounted on plasticine on a moving stage placed beneath the TSRLM.
+    Immersion oil was applied and a \eqn{\times 60}{X 60}, NA 1.0 oil immersion
+    objective lens (Lomo) was focussed at 10 microns below the cranial
+    surface. The TV image was produced by a Panasonic WB 1850/B camera
+    on a Sony PVM 90CE TV monitor.
+
+    A graduated rectangular counting frame
+    \eqn{90 \times 110}{90 * 110} mm (representing
+    \eqn{82 \times 100}{82 * 100} microns in real units)
+    was marked on a Perspex overlay
+    and fixed to the screen. The area of tissue seen within the frame defined
+    a subfield: a guard area of 10 mm width was visible on all sides of the 
+    frame. Ten subfields were examined, arranged approximately in
+    a rectangular grid pattern, with at least one field width separating
+    each pair of fields. The initial field position was determined randomly
+    by applying a randomly-generated coordinate shift to the moving stage.
+    Subsequent fields were attained 
+    using the coarse controls of the microscope stage, in accordance with 
+    the rectangular grid pattern.
+
+    For each subfield, the focal plane was racked down from its initial
+    10 micron depth until all visible osteocyte lacunae had been examined.
+    This depth \eqn{d} was recorded. The 3-dimensional sampling volume was
+    therefore a rectangular box of dimensions
+    \eqn{82 \times 100 \times d}{82 * 100 * d} microns,
+    called a ``brick''.
+    For each visible lacuna, the fine focus racking control was adjusted until
+    maximum brightness was obtained. The depth of the focal plane was then
+    recorded as the $z$ coordinate of the ``centre point''  of the
+    lacuna. Without moving the focal plane, the \eqn{x} and \eqn{y}
+    coordinates of
+    the centre of the lacunar image were read off the graduated counting frame.
+    This required a subjective judgement of the position of the centre of the
+    2-dimensional image. Profiles were approximately elliptical and the centre 
+    was considered to be well-defined. Accuracy of 
+    the recording procedure was tested by independent repetition (by the
+    same operator and by different operators) and found to be reproducible
+    to plus or minus 2 mm on the screen.
+  
+    A lacuna was counted only if its \eqn{(x, y)} coordinates lay inside
+    the \eqn{90 \times 110}{90 * 110} mm counting frame.
+}
+\source{
+  Data were collected by \adrian.
+}
+\references{
+  Baddeley, A.J., Howard, C.V, Boyde, A. and Reid, S.A. (1987)
+  Three dimensional analysis of the spatial distribution of
+  particles using the tandem-scanning reflected light microscope.
+  \emph{Acta Stereologica} \bold{6} (supplement II) 87--100.
+
+  Baddeley, A.J., Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern
+  with replication.
+  \emph{Applied Statistics} \bold{42} (1993) 641--668.
+  
+  Howard, C.V. and Reid, S. and Baddeley, A.J. and Boyde, A. (1985)
+  Unbiased estimation of particle density 
+  in the tandem-scanning reflected light microscope.
+  \emph{Journal of Microscopy} \bold{138} 203--212.
+}
+\examples{
+  data(osteo)
+  osteo
+  if(interactive()) {
+    plot(osteo$pts[[1]], main="animal 1, brick 1")
+    ape1 <- osteo[osteo$shortid==4, ]
+    plot(ape1, tick.marks=FALSE)
+    with(osteo, intensity(pts))
+    plot(with(ape1, K3est(pts)))
+  }
+}
+\keyword{datasets}
diff --git a/man/overlap.owin.Rd b/man/overlap.owin.Rd
new file mode 100644
index 0000000..4358adb
--- /dev/null
+++ b/man/overlap.owin.Rd
@@ -0,0 +1,47 @@
+\name{overlap.owin}
+\alias{overlap.owin}
+\title{
+  Compute Area of Overlap
+}
+\description{
+  Computes the area of the overlap (intersection) of
+  two windows.
+}
+\usage{
+overlap.owin(A, B)
+}
+\arguments{
+  \item{A,B}{
+    Windows (objects of class \code{"owin"}).
+  }
+}
+\details{
+  This function computes the area of the overlap between the two
+  windows \code{A} and \code{B}.
+
+  If one of the windows is a binary mask, then both windows
+  are converted to masks on the same grid, and the area is
+  computed by counting pixels. Otherwise, the area is computed
+  analytically (using the discrete Stokes theorem).
+}
+\value{
+  A single numeric value.
+}
+\seealso{
+  \code{\link{intersect.owin}}, 
+  \code{\link{area.owin}},
+  \code{\link{setcov}}.
+}
+\examples{
+  A <- square(1)
+  B <- shift(A, c(0.3, 0.2))
+  overlap.owin(A, B)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
\ No newline at end of file
diff --git a/man/owin.Rd b/man/owin.Rd
new file mode 100644
index 0000000..ca20efd
--- /dev/null
+++ b/man/owin.Rd
@@ -0,0 +1,204 @@
+\name{owin}
+\alias{owin}
+\title{Create a Window}
+\description{
+  Creates an object of class \code{"owin"} representing 
+  an observation window in the two-dimensional plane
+}
+\usage{
+  owin(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL,
+unitname=NULL, xy=NULL)
+}
+\arguments{
+  \item{xrange}{\eqn{x} coordinate limits of enclosing box}
+  \item{yrange}{\eqn{y} coordinate limits of enclosing box}
+  \item{\dots}{Ignored.}
+  \item{poly}{
+    Optional. Polygonal boundary of window.
+    Incompatible with \code{mask}.
+  }
+  \item{mask}{
+    Optional. Logical matrix giving binary image of window.
+    Incompatible with \code{poly}.
+  }
+  \item{unitname}{
+    Optional. Name of unit of length. Either a single character string,
+    or a vector of two character strings giving the
+    singular and plural forms, respectively.
+  }
+  \item{xy}{
+    Optional. List with components \code{x} and \code{y} specifying the
+    pixel coordinates for \code{mask}.
+  }
+}
+\value{
+An object of class \code{"owin"} 
+describing a window in the two-dimensional plane.
+}
+\details{
+  In the \pkg{spatstat} library, a point pattern dataset must include
+  information about the window of observation. This is represented by
+  an object of class \code{"owin"}.
+  See \code{\link{owin.object}} for an overview.
+
+  To create a window in its own right, 
+  users would normally invoke \code{owin},
+  although sometimes \code{\link{as.owin}} may be convenient.
+
+  A window may be rectangular, polygonal, or a mask (a binary image).
+  \itemize{
+    \item
+    \bold{rectangular windows:}
+    If only \code{xrange} and \code{yrange} are given, then
+    the window will be rectangular, with its \eqn{x} and \eqn{y}
+    coordinate dimensions given by these two arguments
+    (which must be vectors of length 2).
+    If no arguments are given at all, the default is the unit square
+    with dimensions \code{xrange=c(0,1)} and \code{yrange=c(0,1)}.
+    \item
+    \bold{polygonal windows:}
+    If \code{poly} is given, then the window will be polygonal.
+    \itemize{
+      \item
+      \emph{single polygon:}
+      If \code{poly} is a matrix or data frame with two columns, or
+      a structure with two component vectors \code{x} and \code{y}
+      of equal length,
+      then these values are interpreted as the cartesian coordinates
+      of the vertices of a polygon circumscribing the window.
+      The vertices must be listed \emph{anticlockwise}.
+      No vertex should be repeated (i.e. do not repeat the first
+      vertex).
+      \item
+      \emph{multiple polygons or holes:}
+      If \code{poly} is a list, each entry \code{poly[[i]]}
+      of which is a matrix or data frame with two columns
+      or a structure with
+      two component vectors \code{x} and \code{y} of equal length,
+      then the successive list members \code{poly[[i]]}
+      are interpreted as separate polygons which together
+      make up the boundary of the window.
+      The vertices of each polygon must be listed \emph{anticlockwise}
+      if the polygon is part of the external boundary,
+      but \emph{clockwise}
+      if the polygon is the boundary of a hole in the window.
+      Again, do not repeat any vertex.
+    }
+    \item
+    \bold{binary masks:}
+    If \code{mask} is given, then the window will be a binary image.
+    \itemize{
+      \item
+      \emph{Specified by logical matrix:}
+      Normally the argument \code{mask} should be a logical matrix
+      such that \code{mask[i,j]} is \code{TRUE} if the point
+      \code{(x[j],y[i])} belongs to the window, and \code{FALSE} if it
+      does not. Note carefully that rows of \code{mask} correspond to the
+      \eqn{y} coordinate, and columns to the \eqn{x} coordinate.
+      Here \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y}
+      coordinates equally spaced over \code{xrange} and \code{yrange}
+      respectively. The pixel coordinate vectors \code{x} and \code{y}
+      may be specified explicitly using the argument \code{xy}, which
+      should be a list containing components \code{x} and \code{y}.
+      Alternatively there is a sensible default.
+      \item
+      \emph{Specified by list of pixel coordinates:}
+      Alternatively the argument \code{mask} can be a data frame
+      with 2 or 3 columns. If it has 2 columns, it is expected to
+      contain the spatial coordinates of all the
+      pixels which are inside the window.
+      If it has 3 columns,
+      it should contain the spatial coordinates \eqn{(x,y)}
+      of every pixel in the grid, and the logical value associated
+      with each pixel. The pixels may be listed in any order.
+    }
+  }
+  To create a window which is mathematically
+  defined by inequalities in the Cartesian coordinates, 
+  use \code{\link{raster.x}()} and \code{\link{raster.y}()}
+  as in the examples below.
+
+  Functions \code{\link{square}} and \code{\link{disc}}
+  will create square and circular windows, respectively.
+}
+\section{Validity of polygon data}{
+  Polygon data may contain geometrical inconsistencies such as
+  self-intersections and overlaps. These inconsistencies must be
+  removed to prevent problems in other \pkg{spatstat} functions.
+  By default, polygon data will be repaired automatically
+  using polygon-clipping code.
+  The repair process may change the number of vertices in a polygon
+  and the number of polygon components. 
+  To disable the repair process, set \code{spatstat.options(fixpolygons=FALSE)}.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{as.owin}},
+  \code{\link{complement.owin}},
+  \code{\link{ppp.object}},
+  \code{\link{ppp}}
+
+  \code{\link{square}},
+  \code{\link{hexagon}},
+  \code{\link{regularpolygon}}, \code{\link{disc}},
+  \code{\link{ellipse}}. 
+}
+\examples{
+  w <- owin()
+  w <- owin(c(0,1), c(0,1))
+  # the unit square
+
+  w <- owin(c(10,20), c(10,30), unitname=c("foot","feet"))
+  # a rectangle of dimensions 10 x 20 feet
+  # with lower left corner at (10,10)
+
+  # polygon (diamond shape)
+  w <- owin(poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1)))
+  w <- owin(c(0,1), c(0,2), poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1)))
+
+  # polygon with hole
+  ho <- owin(poly=list(list(x=c(0,1,1,0), y=c(0,0,1,1)),
+                       list(x=c(0.6,0.4,0.4,0.6), y=c(0.2,0.2,0.4,0.4))))
+  
+  w <- owin(c(-1,1), c(-1,1), mask=matrix(TRUE, 100,100))
+          # 100 x 100 image, all TRUE
+  X <- raster.x(w)
+  Y <- raster.y(w)
+  wm <- owin(w$xrange, w$yrange, mask=(X^2 + Y^2 <= 1))
+          # discrete approximation to the unit disc
+
+  \dontrun{
+  if(FALSE) {
+    plot(c(0,1),c(0,1),type="n")
+    bdry <- locator()
+    # click the vertices of a polygon (anticlockwise)
+  }
+  }
+  \testonly{
+  bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2),
+               y=c(0.1,0.1,0.5,0.7,0.3))
+  }
+  w <- owin(poly=bdry)
+  \dontrun{plot(w)}
+ 
+ \dontrun{
+ im <- as.logical(matrix(scan("myfile"), nrow=128, ncol=128))
+ # read in an arbitrary 128 x 128 digital image from text file
+ rim <- im[, 128:1]
+ # Assuming it was given in row-major order in the file
+ # i.e. scanning left-to-right in rows from top-to-bottom,
+ # the use of matrix() has effectively transposed rows & columns,
+ # so to convert it to our format just reverse the column order.
+ w <- owin(mask=rim)
+ plot(w)
+ # display it to check!
+ }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/owin.object.Rd b/man/owin.object.Rd
new file mode 100644
index 0000000..a5d50f4
--- /dev/null
+++ b/man/owin.object.Rd
@@ -0,0 +1,131 @@
+\name{owin.object}
+\alias{owin.object} %DoNotExport
+\title{Class owin}
+\description{
+  A class \code{owin} to define the ``observation window'' of a point pattern
+}
+\details{
+  In the \pkg{spatstat} library, a point pattern dataset must include
+  information about the window or region in which the pattern was
+  observed. A window is described by an object of class \code{"owin"}.
+  Windows of arbitrary shape are supported.
+  
+  An object of class \code{"owin"} has one of three types:
+  \tabular{ll}{
+    \code{"rectangle"}: \tab
+      a rectangle in the two-dimensional plane with edges parallel to the axes
+    \cr
+    \code{"polygonal"}: \tab
+    a region whose boundary is a polygon or several polygons.
+    The region may have holes and may consist of several disconnected pieces.
+    \cr
+    \code{"mask"}: \tab
+      a binary image (a logical matrix)
+      set to \code{TRUE} for pixels inside the window and
+      \code{FALSE} outside the window.
+  }
+  Objects of class \code{"owin"} may be created by the function
+  \code{\link{owin}}
+  and converted from other types of data by the function
+  \code{\link{as.owin}}.
+
+  They may be manipulated by the functions 
+  \code{\link{as.rectangle}},
+  \code{\link{as.mask}}, 
+  \code{\link{complement.owin}},
+  \code{\link{rotate}},
+  \code{\link{shift}},
+  \code{\link{affine}},
+  \code{\link{erosion}},
+  \code{\link{dilation}},
+  \code{\link{opening}}
+  and
+  \code{\link{closing}}.
+
+  Geometrical calculations available for windows include
+  \code{\link{area.owin}},
+  \code{\link{perimeter}},
+  \code{\link{diameter.owin}},
+  \code{\link{boundingbox}},
+  \code{\link{eroded.areas}},
+  \code{\link{bdist.points}},
+  \code{\link{bdist.pixels}},
+  and
+  \code{even.breaks.owin}.
+  The mapping between continuous coordinates and pixel raster indices
+  is facilitated by the functions
+  \code{\link{raster.x}},
+  \code{\link{raster.y}} and
+  \code{\link{nearest.raster.point}}.
+
+  There is a \code{plot} method for window objects,
+  \code{\link{plot.owin}}. This may be useful if you wish to
+  plot a point pattern's window without the points for graphical
+  purposes.
+
+  There are also methods for
+  \code{summary} and \code{print}. 
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{as.rectangle}},
+  \code{\link{as.mask}},
+  \code{\link{summary.owin}},
+  \code{\link{print.owin}},
+  \code{\link{complement.owin}},
+  \code{\link{erosion}},
+  \code{\link{dilation}},
+  \code{\link{opening}},
+  \code{\link{closing}},
+  \code{\link{affine.owin}},
+  \code{\link{shift.owin}},
+  \code{\link{rotate.owin}},
+  \code{\link{raster.x}},
+  \code{\link{raster.y}},
+  \code{\link{nearest.raster.point}},
+  \code{\link{plot.owin}},
+  \code{\link{area.owin}},
+  \code{\link{boundingbox}},
+  \code{\link{diameter}},
+  \code{\link{eroded.areas}},
+  \code{\link{bdist.points}},
+  \code{\link{bdist.pixels}}
+}
+\section{Warnings}{
+  In a window of type \code{"mask"}, the 
+  row index corresponds to increasing \eqn{y} coordinate,
+  and the column index corresponds to increasing \eqn{x} coordinate.
+}
+\examples{
+ w <- owin()
+ w <- owin(c(0,1), c(0,1))
+ # the unit square
+  
+ w <- owin(c(0,1), c(0,2))
+ \dontrun{
+ if(FALSE) {
+   plot(w)
+   # plots edges of a box 1 unit x 2 units
+   v <- locator() 
+   # click on points in the plot window
+   # to be the vertices of a polygon 
+   # traversed in anticlockwise order 
+   u <- owin(c(0,1), c(0,2), poly=v)
+   plot(u)
+   # plots polygonal boundary using polygon()
+   plot(as.mask(u, eps=0.02))
+   # plots discrete pixel approximation to polygon
+ }
+}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/padimage.Rd b/man/padimage.Rd
new file mode 100644
index 0000000..0e01a8c
--- /dev/null
+++ b/man/padimage.Rd
@@ -0,0 +1,69 @@
+\name{padimage}
+\alias{padimage}
+\title{
+  Pad the Border of a Pixel Image
+}
+\description{
+  Fills the border of a pixel image with a given value or values,
+  or extends a pixel image to fill a larger window.
+}
+\usage{
+padimage(X, value=NA, n=1, W=NULL)
+}
+\arguments{
+  \item{X}{
+    Pixel image (object of class \code{"im"}).
+  }
+  \item{value}{
+    Single value to be placed around the border of \code{X}.    
+  }
+  \item{n}{
+    Width of border, in pixels.
+    See Details.
+  }
+  \item{W}{
+    Window for the resulting image.
+    Incompatible with \code{n}.
+  }
+}
+\details{
+  The image \code{X} will be expanded by a margin of \code{n}
+  pixels, or extended to fill the window \code{W},
+  with new pixel values set to \code{value}.
+
+  The argument \code{value} should be a single value (a vector of length
+  1), normally a value of the same type as the pixel values of \code{X}.
+  It may be \code{NA}. Alternatively if \code{X} is a
+  factor-valued image, \code{value} can be one of the levels of
+  \code{X}.
+
+  If \code{n} is given, it may be a single number, specifying the
+  width of the border in pixels. Alternatively it may be a vector
+  of length 2 or 4. It will be replicated to length 4, and these numbers
+  will be interpreted as the border widths for the
+  (left, right, top, bottom) margins respectively.
+
+  Alternatively if \code{W} is given, the image will be extended to
+  the window \code{W}. 
+}
+\value{
+  Another object of class \code{"im"}, of the same type as \code{X}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{im}}
+}
+\examples{
+ Z <- setcov(owin())
+ plot(padimage(Z, 1, 10))
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/pairdist.Rd b/man/pairdist.Rd
new file mode 100644
index 0000000..2d12bb2
--- /dev/null
+++ b/man/pairdist.Rd
@@ -0,0 +1,56 @@
+\name{pairdist}
+\alias{pairdist}
+\title{Pairwise distances}
+\description{
+  Computes the matrix of distances between all pairs of `things'
+  in a dataset
+}
+\usage{
+  pairdist(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Object specifying the locations of a set of `things'
+    (such as a set of points or a set of line segments).
+  }
+  \item{\dots}{
+    Further arguments depending on the method.
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the `things' numbered \code{i} and \code{j}.
+}
+\details{
+  Given a dataset \code{X} and \code{Y}
+  (representing either a point pattern or
+  a line segment pattern)
+  \code{pairdist} computes the distance between each pair of
+  `things' in the dataset, and returns 
+  a matrix containing these distances.
+
+  The function \code{pairdist} is generic, with
+  methods for point patterns (objects of class \code{"ppp"}),
+  line segment patterns (objects of class \code{"psp"})
+  and a default method. See the documentation
+  for \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}}
+  or \code{\link{pairdist.default}} for details.
+}
+\seealso{
+  \code{\link{pairdist.ppp}},
+  \code{\link{pairdist.psp}},
+  \code{\link{pairdist.default}},
+  \code{\link{crossdist}},
+  \code{\link{nndist}},
+  \code{\link{Kest}}
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and 
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairdist.default.Rd b/man/pairdist.default.Rd
new file mode 100644
index 0000000..7010143
--- /dev/null
+++ b/man/pairdist.default.Rd
@@ -0,0 +1,92 @@
+\name{pairdist.default}
+\alias{pairdist.default}
+\title{Pairwise distances}
+\description{
+  Computes the matrix of distances between all pairs of points
+  in a set of points
+}
+\usage{
+  \method{pairdist}{default}(X, Y=NULL, \dots, period=NULL, method="C", squared=FALSE)
+}
+\arguments{
+  \item{X,Y}{
+    Arguments specifying the coordinates of a set of points.
+    Typically \code{X} and \code{Y} would be
+    numeric vectors of equal length.
+    Alternatively \code{Y} may be omitted and \code{X} may be
+    a list with two components \code{x} and \code{y},
+    or a matrix with two columns.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{period}{
+    Optional. Dimensions for periodic edge correction.
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+    Usually not specified.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the points numbered \code{i} and \code{j}.
+}
+\details{
+  Given the coordinates of a set of points,
+  this function computes the Euclidean distances between all pairs of
+  points, and returns the matrix of distances.
+  It is a method for the generic function \code{pairdist}.
+
+  The arguments \code{X} and \code{Y} must determine
+  the coordinates of a set of points. Typically \code{X} and
+  \code{Y} would be numeric vectors of equal length. Alternatively
+  \code{Y} may be omitted and \code{X} may be a list with two components
+  named \code{x} and \code{y}, or a matrix or data frame with two columns.
+  
+  Alternatively if \code{period} is given,
+  then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance).
+  The points will be treated as if they are in a rectangle
+  of width \code{period[1]} and height \code{period[2]}.
+  Opposite edges of the rectangle are regarded as equivalent.
+  
+  If \code{squared=TRUE} then the \emph{squared} Euclidean distances
+  \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}.
+  The squared distances are faster to calculate, and are sufficient for
+  many purposes (such as finding the nearest neighbour of a point).
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. The C code is somewhat faster.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{nndist}},
+  \code{\link{Kest}}
+}
+\examples{
+   x <- runif(100)
+   y <- runif(100)
+   d <- pairdist(x, y)
+   d <- pairdist(cbind(x,y))
+   d <- pairdist(x, y, period=c(1,1))
+   d <- pairdist(x, y, squared=TRUE)
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and 
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairdist.lpp.Rd b/man/pairdist.lpp.Rd
new file mode 100644
index 0000000..689af46
--- /dev/null
+++ b/man/pairdist.lpp.Rd
@@ -0,0 +1,55 @@
+\name{pairdist.lpp}
+\alias{pairdist.lpp}
+\title{
+  Pairwise shortest-path distances between points on
+  a linear network
+}
+\description{
+  Given a pattern of points on a linear network, compute the
+  matrix of distances between all pairs of points, measuring
+  distance by the shortest path in the network.
+}
+\usage{
+\method{pairdist}{lpp}(X, ..., method="C")
+}
+\arguments{
+  \item{X}{
+    Point pattern on linear network (object of class \code{"lpp"}).
+  }
+  \item{method}{
+    Optional string determining the method of calculation.
+    Either \code{"interpreted"} or \code{"C"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  Given a pattern of points on a linear network, this function computes the
+  matrix of distances between all pairs of points, measuring
+  distance by the shortest path in the network. 
+  
+  If \code{method="C"} the distances are computed using
+  code in the C language. If \code{method="interpreted"} then the
+  computation is performed using interpreted \R code. The \R code is
+  much slower, but is provided for checking purposes.
+
+  If two points cannot be joined by a path,
+  the distance between them is infinite (\code{Inf}).
+}
+\value{
+  A symmetric matrix, whose values are nonnegative numbers or infinity
+  (\code{Inf}).
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian.
+}
+\seealso{
+  \code{\link{lpp}}
+}
+\examples{
+   X <- runiflpp(12, simplenet)
+   pairdist(X)
+}
+\keyword{spatial}
diff --git a/man/pairdist.pp3.Rd b/man/pairdist.pp3.Rd
new file mode 100644
index 0000000..85b6598
--- /dev/null
+++ b/man/pairdist.pp3.Rd
@@ -0,0 +1,70 @@
+\name{pairdist.pp3}
+\alias{pairdist.pp3}
+\title{Pairwise distances in Three Dimensions}
+\description{
+  Computes the matrix of distances between all pairs of points
+  in a three-dimensional point pattern.
+}
+\usage{
+  \method{pairdist}{pp3}(X, \dots, periodic=FALSE, squared=FALSE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{periodic}{
+    Logical. Specifies whether to apply a periodic edge correction.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the points numbered \code{i} and \code{j}.
+}
+\details{
+  This is a method for the generic function \code{pairdist}.
+
+  Given a three-dimensional point pattern \code{X}
+  (an object of class \code{"pp3"}),
+  this function computes the Euclidean distances between all pairs of
+  points in \code{X}, and returns the matrix of distances.
+
+  Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a
+  box, then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance): opposite faces of the
+  box are regarded as equivalent.
+  This is meaningless if the window is not a box.
+
+  If \code{squared=TRUE} then the \emph{squared} Euclidean distances
+  \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}.
+  The squared distances are faster to calculate, and are sufficient for
+  many purposes (such as finding the nearest neighbour of a point).
+}
+\seealso{
+  \code{\link{pairdist}},
+  \code{\link{crossdist}},
+  \code{\link{nndist}},
+  \code{\link{K3est}}
+}
+\examples{
+   X <- runifpoint3(20)
+   d <- pairdist(X)
+   d <- pairdist(X, periodic=TRUE)
+   d <- pairdist(X, squared=TRUE)
+}
+\author{
+  \adrian
+  
+  
+  based on two-dimensional code by 
+  Pavel Grabarnik.
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairdist.ppp.Rd b/man/pairdist.ppp.Rd
new file mode 100644
index 0000000..f1d8053
--- /dev/null
+++ b/man/pairdist.ppp.Rd
@@ -0,0 +1,82 @@
+\name{pairdist.ppp}
+\alias{pairdist.ppp}
+\title{Pairwise distances}
+\description{
+  Computes the matrix of distances between all pairs of points
+  in a point pattern.
+}
+\usage{
+  \method{pairdist}{ppp}(X, \dots, periodic=FALSE, method="C", squared=FALSE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{periodic}{
+    Logical. Specifies whether to apply a periodic edge correction.
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+    Usually not specified.
+  }
+  \item{squared}{
+    Logical. If \code{squared=TRUE}, the squared distances are
+    returned instead (this computation is faster).
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the points numbered \code{i} and \code{j}.
+}
+\details{
+  This is a method for the generic function \code{pairdist}.
+
+  Given a point pattern \code{X} (an object of class \code{"ppp"}),
+  this function computes the Euclidean distances between all pairs of
+  points in \code{X}, and returns the matrix of distances.
+
+  Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a
+  rectangle, then the distances will be computed in the `periodic'
+  sense (also known as `torus' distance): opposite edges of the
+  rectangle are regarded as equivalent.
+  This is meaningless if the window is not a rectangle.
+
+  If \code{squared=TRUE} then the \emph{squared} Euclidean distances
+  \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}.
+  The squared distances are faster to calculate, and are sufficient for
+  many purposes (such as finding the nearest neighbour of a point).
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted R code only. If \code{method="C"}
+  (the default) then C code is used. The C code is somewhat faster.
+}
+\seealso{
+  \code{\link{pairdist}},
+  \code{\link{pairdist.default}},
+  \code{\link{pairdist.psp}},
+  \code{\link{crossdist}},
+  \code{\link{nndist}},
+  \code{\link{Kest}}
+}
+\examples{
+   data(cells)
+   d <- pairdist(cells)
+   d <- pairdist(cells, periodic=TRUE)
+   d <- pairdist(cells, squared=TRUE)
+}
+\author{Pavel Grabarnik
+  \email{pavel.grabar at issp.serpukhov.su}
+  and 
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairdist.ppx.Rd b/man/pairdist.ppx.Rd
new file mode 100644
index 0000000..ce842c8
--- /dev/null
+++ b/man/pairdist.ppx.Rd
@@ -0,0 +1,53 @@
+\name{pairdist.ppx}
+\alias{pairdist.ppx}
+\title{Pairwise Distances in Any Dimensions}
+\description{
+  Computes the matrix of distances between all pairs of points
+  in a multi-dimensional point pattern.
+}
+\usage{
+  \method{pairdist}{ppx}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppx"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{coords.ppx}} to determine
+    which coordinates should be used.
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the points numbered \code{i} and \code{j}.
+}
+\details{
+  This is a method for the generic function \code{pairdist}.
+
+  Given a multi-dimensional point pattern \code{X}
+  (an object of class \code{"ppx"}),
+  this function computes the Euclidean distances between all pairs of
+  points in \code{X}, and returns the matrix of distances.
+
+  By default, both spatial and temporal coordinates are extracted.
+  To obtain the spatial distance between points in a space-time point
+  pattern, set \code{temporal=FALSE}.
+}
+\seealso{
+  \code{\link{pairdist}},
+  \code{\link{crossdist}},
+  \code{\link{nndist}}
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),z=runif(4),w=runif(4))
+   X <- ppx(data=df)
+   pairdist(X)
+}
+\author{
+  \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairdist.psp.Rd b/man/pairdist.psp.Rd
new file mode 100644
index 0000000..079f9f7
--- /dev/null
+++ b/man/pairdist.psp.Rd
@@ -0,0 +1,78 @@
+\name{pairdist.psp}
+\alias{pairdist.psp}
+\title{Pairwise distances between line segments}
+\description{
+  Computes the matrix of distances between all pairs of line segments
+  in a line segment pattern.
+}
+\usage{
+  \method{pairdist}{psp}(X, \dots, method="C", type="Hausdorff")
+}
+\arguments{
+  \item{X}{
+    A line segment pattern (object of class \code{"psp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{method}{
+    String specifying which method of calculation to use.
+    Values are \code{"C"} and \code{"interpreted"}.
+    Usually not specified.
+  }
+  \item{type}{
+    Type of distance to be computed. Options are
+    \code{"Hausdorff"} and \code{"separation"}. Partial matching is used.
+  }
+}
+\value{
+  A square matrix whose \code{[i,j]} entry is the distance
+  between the line segments numbered \code{i} and \code{j}.
+}
+\details{
+  This function computes the distance between each pair of
+  line segments in \code{X}, and returns the matrix of distances.
+  
+  This is a method for the generic function \code{\link{pairdist}}
+  for the class \code{"psp"}.
+
+  The distances between line segments are measured in one of two ways:
+  \itemize{
+    \item if \code{type="Hausdorff"}, distances are computed
+    in the Hausdorff metric. The Hausdorff
+    distance between two line segments is the \emph{maximum} distance
+    from any point on one of the segments to the nearest point on
+    the other segment.
+    \item if \code{type="separation"}, distances are computed
+    as the \emph{minimum} distance from a point on one line segment to
+    a point on the other line segment. For example, line segments which
+    cross over each other have separation zero.
+  }
+  
+  The argument \code{method} is not normally used. It is
+  retained only for checking the validity of the software.
+  If \code{method = "interpreted"} then the distances are
+  computed using interpreted \R code only. If \code{method="C"}
+  (the default) then compiled \code{C} code is used,
+  which is somewhat faster.
+}
+\seealso{
+  \code{\link{crossdist}},
+  \code{\link{nndist}},
+  \code{\link{pairdist.ppp}}
+}
+\examples{
+   L <- psp(runif(10), runif(10), runif(10), runif(10), owin())
+   D <- pairdist(L)
+   S <- pairdist(L, type="sep")
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/pairorient.Rd b/man/pairorient.Rd
new file mode 100644
index 0000000..7e739a8
--- /dev/null
+++ b/man/pairorient.Rd
@@ -0,0 +1,112 @@
+\name{pairorient}
+\alias{pairorient}
+\title{
+  Point Pair Orientation Distribution
+}
+\description{
+  Computes the distribution of the orientation of vectors joining
+  pairs of points at a particular range of distances.
+}
+\usage{
+pairorient(X, r1, r2, ..., cumulative=FALSE,
+           correction, ratio = FALSE,
+           unit=c("degree", "radian"), domain=NULL)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{r1,r2}{
+    Minimum and maximum values of distance
+    to be considered.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{circdensity}} to control
+    the kernel smoothing, if \code{cumulative=FALSE}.
+  }
+  \item{cumulative}{
+    Logical value specifying whether to estimate the probability density
+    (\code{cumulative=FALSE}, the default) or the cumulative
+    distribution function (\code{cumulative=TRUE}).
+  }
+  \item{correction}{
+    Character vector specifying edge correction or corrections.
+    Options are \code{"none"}, \code{"isotropic"}, \code{"translate"},
+    \code{"good"} and \code{"best"}.
+    Alternatively \code{correction="all"} selects all options.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{unit}{
+    Unit in which the angles should be expressed.
+    Either \code{"degree"} or \code{"radian"}.
+  }
+  \item{domain}{
+    Optional window. The first point \eqn{x_i}{x[i]} of each pair of points
+    will be constrained to lie in \code{domain}.
+  }
+}
+\details{
+  This algorithm considers all pairs of points in the pattern
+  \code{X} that lie more than \code{r1} and less than \code{r2}
+  units apart. The \emph{direction} of the arrow joining the points
+  is measured, as an angle in degrees or radians,
+  anticlockwise from the \eqn{x} axis.
+
+  If \code{cumulative=FALSE} (the default),
+  a kernel estimate of the probability density of the orientations
+  is calculated using \code{\link{circdensity}}.
+
+  If \code{cumulative=TRUE}, then the cumulative distribution
+  function of these directions is calculated.
+  This is the function \eqn{O_{r1,r2}(\phi)}{O[r1,r2](phi)} defined 
+  in Stoyan and Stoyan (1994), equation (14.53), page 271.
+
+  In either case the result can be plotted as a rose diagram by 
+  \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}.
+  
+  The algorithm gives each observed direction a weight,
+  determined by an edge correction, to adjust for the fact that some
+  interpoint distances are more likely to be observed than others.
+  The choice of edge correction or corrections is determined by the argument
+  \code{correction}.
+
+  It is also possible to calculate an estimate of the probability
+  density from the cumulative distribution function,
+  by numerical differentiation. 
+  Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  containing the estimates of the probability density or the
+  cumulative distribution function of angles,
+  in degrees (if \code{unit="degree"})
+  or radians (if \code{unit="radian"}).
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  \code{\link{Kest}}, \code{\link{Ksector}}, \code{\link{nnorient}}
+}
+\examples{
+  rose(pairorient(redwood, 0.05, 0.15, sigma=8), col="grey")
+  plot(CDF <- pairorient(redwood, 0.05, 0.15, cumulative=TRUE))
+  plot(f <- deriv(CDF, spar=0.6, Dperiodic=TRUE))
+}
+\author{\adrian
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pairs.im.Rd b/man/pairs.im.Rd
new file mode 100644
index 0000000..a7283c4
--- /dev/null
+++ b/man/pairs.im.Rd
@@ -0,0 +1,93 @@
+\name{pairs.im}
+\alias{pairs.im}
+\title{
+  Scatterplot Matrix for Pixel Images
+}
+\description{
+  Produces a scatterplot matrix of the pixel values
+  in two or more pixel images.
+}
+\usage{
+\method{pairs}{im}(..., plot=TRUE)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, each of which is either
+    a pixel image (object of class \code{"im"})
+    or a named argument to be passed to \code{\link{pairs.default}}.
+  }
+  \item{plot}{
+    Logical. If \code{TRUE}, the scatterplot matrix is plotted.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{pairs}}
+  for the class of pixel images.
+  
+  It produces a square array of plot panels, in which each panel shows
+  a scatterplot of the pixel values of one image against the
+  corresponding pixel values of another image.
+
+  At least two of the arguments \code{\dots}
+  should be pixel images (objects of class \code{"im"}).
+  Their spatial domains must overlap, but need not have the same
+  pixel dimensions.
+
+  First the pixel image domains are intersected, and converted to a
+  common pixel resolution. Then the corresponding pixel values of each image are
+  extracted. Then \code{\link{pairs.default}} is called to 
+  plot the scatterplot matrix.
+
+  Any arguments in \code{\dots} which are not pixel images will be
+  passed to \code{\link{pairs.default}} to control the plot.
+}
+\section{Image or Contour Plots}{
+  Since the scatterplots may show very dense concentrations of points,
+  it may be useful to set \code{panel=panel.image}
+  or \code{panel=panel.contour} to draw a colour image or contour plot
+  of the kernel-smoothed density of the scatterplot in each panel.
+  The argument \code{panel} is passed
+  to \code{\link{pairs.default}}. See the help for 
+  \code{\link{panel.image}} and \code{\link{panel.contour}}.
+}
+\section{Low Level Control of Graphics}{
+  To control the appearance of the individual scatterplot panels,
+  see \code{\link{pairs.default}}, \code{\link{points}}
+  or \code{\link{par}}.
+  To control the plotting symbol for the points in the scatterplot,
+  use the arguments \code{pch}, \code{col}, \code{bg} as described
+  under \code{\link{points}} (because 
+  the default panel plotter is the function \code{\link{points}}).
+  To suppress the tick marks on the plot axes,
+  type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}.
+}
+\value{
+  Invisible. A \code{data.frame} containing the
+  corresponding pixel values for each image.
+  The return value also belongs to the class \code{plotpairsim} which has
+  a plot method, so that it can be re-plotted.
+}
+\seealso{
+  \code{\link{pairs}},
+  \code{\link{pairs.default}},
+  \code{\link{panel.contour}},
+  \code{\link{panel.image}},
+  \code{\link{plot.im}},
+  \code{\link{im}},
+  \code{\link{par}}
+}
+\examples{
+  X <- density(rpoispp(30))
+  Y <- density(rpoispp(40))
+  Z <- density(rpoispp(30))
+  pairs(X,Y,Z)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/pairs.linim.Rd b/man/pairs.linim.Rd
new file mode 100644
index 0000000..883568c
--- /dev/null
+++ b/man/pairs.linim.Rd
@@ -0,0 +1,69 @@
+\name{pairs.linim}
+\alias{pairs.linim}
+\title{
+  Scatterplot Matrix for Pixel Images on a Linear Network
+}
+\description{
+  Produces a scatterplot matrix of the pixel values
+  in two or more pixel images on a linear network.
+}
+\usage{
+\method{pairs}{linim}(..., plot=TRUE, eps=NULL)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, each of which is either
+    a pixel image on a linear network (object of class \code{"linim"}),
+    a pixel image (object of class \code{"im"}),
+    or a named argument to be passed to \code{\link{pairs.default}}.
+  }
+  \item{plot}{
+    Logical. If \code{TRUE}, the scatterplot matrix is plotted.
+  }
+  \item{eps}{
+    Optional. Spacing between sample points on the network.
+    A positive number.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{pairs}}
+  for the class of pixel images on a linear network.
+  
+  It produces a square array of plot panels, in which each panel shows
+  a scatterplot of the pixel values of one image against the
+  corresponding pixel values of another image.
+
+  At least two of the arguments \code{\dots} should be a pixel image
+  on a linear network (object of class \code{"linim"}).
+  They should be defined on the \bold{same} linear network,
+  but may have different pixel resolutions.
+
+  First the pixel values of each image are extracted at a
+  set of sample points equally-spaced across the network.
+  Then \code{\link{pairs.default}} is called to 
+  plot the scatterplot matrix.
+
+  Any arguments in \code{\dots} which are not pixel images will be
+  passed to \code{\link{pairs.default}} to control the plot.
+}
+\value{
+  Invisible. A \code{data.frame} containing the
+  corresponding pixel values for each image.
+  The return value also belongs to the class \code{plotpairsim} which has
+  a plot method, so that it can be re-plotted.
+}
+\seealso{
+  \code{\link{pairs.default}},
+  \code{\link{pairs.im}}
+}
+\examples{
+  fit <- lppm(chicago ~ marks * (x+y))
+  lam <- predict(fit)
+  do.call(pairs, lam)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/pairsat.family.Rd b/man/pairsat.family.Rd
new file mode 100644
index 0000000..0a4c5e7
--- /dev/null
+++ b/man/pairsat.family.Rd
@@ -0,0 +1,67 @@
+\name{pairsat.family}
+\alias{pairsat.family}
+\title{Saturated Pairwise Interaction Point Process Family}
+\description{
+  An object describing the Saturated Pairwise Interaction
+  family of point process models
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes the ``saturated pairwise interaction''
+  family of point process models.
+ 
+  If you need to create a specific interaction model for use in 
+  spatial pattern analysis, use the function \code{\link{Saturated}()}
+  or the two existing implementations of models in this family,
+  \code{\link{Geyer}()} and \code{\link{SatPiece}()}.
+ 
+  Geyer (1999) introduced the ``saturation process'', a modification of the
+  Strauss process in which the total contribution
+  to the potential  from each point (from its pairwise interaction with all 
+  other points) is trimmed to a maximum value \eqn{c}. 
+  This model is implemented in the function \code{\link{Geyer}()}.
+ 
+  The present class \code{pairsat.family} is the 
+  extension of this saturation idea to all pairwise interactions.
+  Note that the resulting models are no longer pairwise interaction
+  processes - they have interactions of infinite order.
+ 
+  \code{pairsat.family} is an object of class \code{"isf"} 
+  containing a function \code{pairwise$eval} for
+  evaluating the sufficient statistics of any saturated pairwise interaction
+  point process model in which the original pair potentials 
+  take an exponential family form. 
+} 
+\references{
+  Geyer, C.J. (1999)
+  Likelihood Inference for Spatial Point Processes.
+  Chapter 3 in 
+  O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds)
+  \emph{Stochastic Geometry: Likelihood and Computation},
+  Chapman and Hall / CRC, 
+  Monographs on Statistics and Applied Probability, number 80.
+  Pages 79--140.
+}
+\seealso{
+  \code{\link{Geyer}} to create the Geyer saturation process.
+
+  \code{\link{SatPiece}} to create a saturated process with
+  piecewise constant pair potential.
+
+  \code{\link{Saturated}} to create a more general saturation model.
+
+  Other families:
+  \code{\link{inforder.family}},
+  \code{\link{ord.family}},
+  \code{\link{pairwise.family}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/pairwise.family.Rd b/man/pairwise.family.Rd
new file mode 100644
index 0000000..575b794
--- /dev/null
+++ b/man/pairwise.family.Rd
@@ -0,0 +1,58 @@
+\name{pairwise.family}
+\alias{pairwise.family}
+\title{Pairwise Interaction Process Family}
+\description{
+  An object describing the family of all pairwise interaction Gibbs
+  point processes.
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes 
+  the pairwise interaction family of point process models.
+ 
+  If you need to create a specific pairwise interaction model for use in 
+  modelling, use the function \code{\link{Pairwise}} or one of the existing
+  functions listed below.
+ 
+  Anyway, \code{pairwise.family} is an object of class \code{"isf"}
+  containing a function \code{pairwise.family$eval} for
+  evaluating the sufficient statistics of any pairwise interaction
+  point process model taking an exponential family form. 
+} 
+\seealso{
+  Other families: 
+  \code{\link{pairsat.family}},
+  \code{\link{ord.family}},
+  \code{\link{inforder.family}}.
+
+  Pairwise interactions:
+  \code{\link{Poisson}},
+  \code{\link{Pairwise}},
+  \code{\link{PairPiece}},
+  \code{\link{Fiksel}},
+  \code{\link{Hardcore}},
+  \code{\link{LennardJones}},
+  \code{\link{MultiHard}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}},
+  \code{\link{Softcore}}.
+
+  Other interactions:
+  \code{\link{AreaInter}},
+  \code{\link{Geyer}},
+  \code{\link{Saturated}},
+  \code{\link{Ord}},
+  \code{\link{OrdThresh}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/panel.contour.Rd b/man/panel.contour.Rd
new file mode 100644
index 0000000..e8272c1
--- /dev/null
+++ b/man/panel.contour.Rd
@@ -0,0 +1,81 @@
+\name{panel.contour}
+\alias{panel.contour}
+\alias{panel.image}
+\alias{panel.histogram}
+\title{
+  Panel Plots using Colour Image or Contour Lines
+}
+\description{
+  These functions can be passed to \code{\link[graphics]{pairs}} or
+  \code{\link[graphics]{coplot}}
+  to determine what kind of plotting is done in each panel
+  of a multi-panel graphical display. 
+}
+\usage{
+panel.contour(x, y, ..., sigma = NULL)
+
+panel.image(x, y, ..., sigma = NULL)
+
+panel.histogram(x, ...)
+}
+\arguments{
+  \item{x,y}{
+    Coordinates of points in a scatterplot.
+  }
+  \item{\dots}{
+    Extra graphics arguments, passed to \code{\link{contour.im}},
+    \code{\link{plot.im}} or \code{\link[graphics]{rect}}, respectively,
+    to control the appearance of the panel.
+  }
+  \item{sigma}{
+    Bandwidth of kernel smoother, on a scale where
+    \eqn{x} and \eqn{y} range between 0 and 1.
+  }
+}
+\details{
+  These functions can serve as one of the arguments \code{panel},
+  \code{lower.panel}, \code{upper.panel}, \code{diag.panel}
+  passed to graphics commands like
+  \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}},
+  to determine what kind of plotting is done in each panel
+  of a multi-panel graphical display. In particular they work
+  with \code{\link{pairs.im}}.
+
+  The functions \code{panel.contour} and \code{panel.contour}
+  are suitable for the off-diagonal plots which involve
+  two datasets \code{x} and \code{y}.
+  They first rescale \code{x} and \code{y} to the unit square,
+  then apply kernel smoothing with bandwidth \code{sigma}
+  using \code{\link{density.ppp}}.
+  Then \code{panel.contour} draws a contour plot
+  while \code{panel.image} draws a colour image.
+
+  The function \code{panel.histogram} is suitable for the
+  diagonal plots which involve a single dataset \code{x}.
+  It displays a histogram of the data.
+}
+\value{
+  Null.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{pairs.im}},
+  \code{\link{pairs.default}},
+  \code{\link{panel.smooth}}
+}
+\examples{
+  with(bei.extra,
+         pairs(grad, elev,
+               panel      = panel.contour,
+               diag.panel = panel.histogram))
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/paracou.Rd b/man/paracou.Rd
new file mode 100644
index 0000000..6238f49
--- /dev/null
+++ b/man/paracou.Rd
@@ -0,0 +1,54 @@
+\name{paracou}
+\alias{paracou}
+\docType{data}
+\title{
+  Kimboto trees at Paracou, French Guiana
+}
+\description{
+  This dataset is a point pattern of adult and juvenile Kimboto trees
+  (\emph{Pradosia cochlearia} or \emph{P. ptychandra})
+  recorded at Paracou in French Guiana.
+  See Flores (2005).
+
+  The dataset \code{paracou} is a point pattern
+  (object of class \code{"ppp"}) containing the spatial coordinates
+  of each tree, marked by age (a factor with levels \code{adult} and
+  \code{juvenile}. The survey region is a rectangle
+  approximately 400 by 525 metres. Coordinates are given in metres.
+
+  Note that the data contain duplicated points (two points at the
+  same location). To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+}
+\usage{data(paracou)}
+\examples{
+plot(paracou, cols=2:3, chars=c(16,3))
+}
+\source{
+  Data kindly contributed by Olivier Flores.
+  All data belong to CIRAD \url{http://www.cirad.fr}
+  and UMR EcoFoG \url{http://www.ecofog.gf} and
+  are included in \pkg{spatstat} with permission.
+  Original data sources:
+  juvenile and some adult trees collected by Flores (2005);
+  adult tree data sourced from CIRAD Paracou experimental plots dataset
+  (2003 campaign).
+}
+\references{
+  Flores, O. (2005)
+  \emph{\ifelse{latex}{\out{D{\'e}terminisme de la
+  r{\'e}g{\'e}n{\'e}ration chez quinze esp{\`e}ces d'arbres tropicaux
+  en for{\^e}t guyanaise: les effets de l'environnement et de la
+  limitation par la dispersion.}}{ Determinisme de la regeneration
+  chez quinze especes d'arbres tropicaux en foret guyanaise: les
+  effets de l'environnement et de la limitation par la dispersion.}}
+  PhD Thesis, University of Montpellier 2, Montpellier, France.
+
+  Picard, N, Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009)
+  The multi-scale marked area-interaction point process: a model for
+  the spatial pattern of trees.
+  \emph{Scandinavian Journal of Statistics} \bold{36} 23--41
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/parameters.Rd b/man/parameters.Rd
new file mode 100644
index 0000000..c4268bc
--- /dev/null
+++ b/man/parameters.Rd
@@ -0,0 +1,65 @@
+\name{parameters}
+\alias{parameters}
+\alias{parameters.dppm}
+\alias{parameters.kppm}
+\alias{parameters.ppm}
+\alias{parameters.profilepl}
+\alias{parameters.interact}
+\alias{parameters.fii}
+\title{
+  Extract Model Parameters in Understandable Form
+}
+\description{
+  Given a fitted model of some kind, this function
+  extracts all the parameters needed to specify the model,
+  and returns them as a list.
+}
+\usage{
+parameters(model, \dots)
+
+\method{parameters}{dppm}(model, \dots)
+
+\method{parameters}{kppm}(model, \dots)
+
+\method{parameters}{ppm}(model, \dots)
+
+\method{parameters}{profilepl}(model, \dots)
+
+\method{parameters}{fii}(model, \dots)
+
+\method{parameters}{interact}(model, \dots)
+}
+\arguments{
+  \item{model}{
+    A fitted model of some kind.
+  }
+  \item{\dots}{
+    Arguments passed to methods.
+  }
+}
+\details{
+  The argument \code{model} should be a fitted model of some kind.
+  This function extracts all the parameters that would be needed to
+  specify the model, and returns them as a list.
+
+  The function \code{parameters} is generic, with methods
+  for class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} and \code{"profilepl"}
+  and other classes.
+}
+\value{
+  A named list, whose format depends on the fitted model.
+}
+\author{
+\spatstatAuthors
+}
+\seealso{
+  \code{\link{coef}}
+}
+\examples{
+  fit1 <- ppm(cells ~ x, Strauss(0.1))
+  parameters(fit1)
+  fit2 <- kppm(redwood ~ x, "Thomas")
+  parameters(fit2)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/parres.Rd b/man/parres.Rd
new file mode 100644
index 0000000..9fd2174
--- /dev/null
+++ b/man/parres.Rd
@@ -0,0 +1,204 @@
+\name{parres}
+\alias{parres}
+\title{
+  Partial Residuals for Point Process Model
+}
+\description{
+  Computes the smoothed partial residuals, a diagnostic
+  for transformation of a covariate in a Poisson point process model.
+}
+\usage{
+parres(model, covariate, ...,
+       smooth.effect=FALSE, subregion=NULL,
+       bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512,
+       bw.input = c("points", "quad"), bw.restrict=FALSE, covname)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{covariate}{
+    The covariate of interest.
+    Either a character string matching the name of one of
+    the canonical covariates in the model, or one of the names
+    \code{"x"} or \code{"y"} referring to the Cartesian coordinates,
+    or one of the names of the covariates given when \code{model} was fitted,
+    or a pixel image (object of class \code{"im"})
+    or \code{function(x,y)} supplying the
+    values of a covariate at any location.
+  }
+  \item{smooth.effect}{
+    Logical. Determines the choice of algorithm. See Details.
+  }
+  \item{subregion}{
+    Optional.  A window (object of class \code{"owin"})
+    specifying a subset of the spatial domain of the data.
+    The calculation will be confined to the data in this subregion.
+  }
+  \item{bw}{
+    Smoothing bandwidth or bandwidth rule
+    (passed to \code{\link[stats]{density.default}}).
+  }
+  \item{adjust}{
+    Smoothing bandwidth adjustment factor
+    (passed to \code{\link[stats]{density.default}}).
+  }
+  \item{n, from, to}{
+    Arguments passed to \code{\link[stats]{density.default}} to
+    control the number and range of values at which the function
+    will be estimated.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link[stats]{density.default}}.
+  }
+  \item{bw.input}{
+    Character string specifying the input data used for automatic
+    bandwidth selection. 
+  }
+  \item{bw.restrict}{
+    Logical value, specifying whether bandwidth selection is performed using
+    data from the entire spatial domain or from the \code{subregion}.
+  }
+  \item{covname}{
+    Optional. Character string to use as the name of the covariate.
+  }
+}
+\details{
+  This command computes the smoothed partial residual diagnostic
+  (Baddeley, Chang, Song and Turner, 2012) 
+  for the transformation of a covariate
+  in a Poisson point process model.
+  
+  The argument \code{model} must be a fitted Poisson point process model.
+  
+  The diagnostic works in two different ways:
+  \describe{
+    \item{Canonical covariate:}{
+      The argument \code{covariate} may be a character string
+      which is the name of one of the \emph{canonical covariates} in the
+      model. 
+      The canonical covariates are the
+      functions \eqn{Z_j}{Z[j]} that appear
+      in the expression for the Poisson point process intensity
+      \deqn{
+	\lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u))
+      }{
+	lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u))
+      }
+      at spatial location \eqn{u}.
+      Type \code{names(coef(model))} to see the names of the
+      canonical covariates in \code{model}.
+      If the selected covariate is \eqn{Z_j}{Z[j]}, then 
+      the diagnostic plot concerns the model term
+      \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth
+      estimate of a function \eqn{h(z)} that should replace this linear
+      term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be
+      replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is
+      also plotted as a dotted line.
+    }
+    \item{New covariate:}{
+      If the argument \code{covariate} is a pixel image
+      (object of class \code{"im"}) or a \code{function(x,y)},
+      it is assumed to provide the values of a covariate that is
+      not present in the model.
+      Alternatively \code{covariate} can be the name of a
+      covariate that was supplied when the model was fitted
+      (i.e. in the call to \code{\link{ppm}})
+      but which does not feature in the model formula.
+      In either case we speak of a new covariate \eqn{Z(u)}.
+      If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)}
+      then we consider modifying this to
+      \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))}
+      where \eqn{h(z)} is some function. The diagnostic plot shows
+      an estimate of \eqn{h(z)}.
+      \bold{Warning: in this case the diagnostic is not theoretically
+      justified. This option is provided for research purposes.}
+    }
+  }
+  Alternatively \code{covariate} can be one of the character strings
+  \code{"x"} or \code{"y"} signifying the Cartesian coordinates.
+  The behaviour here depends on whether the coordinate was one of the
+  canonical covariates in the model.
+
+  If there is more than one canonical covariate in the model
+  that depends on the specified \code{covariate}, then
+  the covariate effect is computed using all these canonical covariates.
+  For example in a log-quadratic model which includes the terms \code{x} and
+  \code{I(x^2)}, the quadratic effect involving both these terms
+  will be computed.
+
+  There are two choices for the algorithm.
+  If \code{smooth.effect=TRUE}, the fitted covariate effect (according
+  to \code{model}) is added to the point process residuals, then
+  smoothing is applied to these values. If \code{smooth.effect=FALSE},
+  the point process residuals are smoothed first, and then the fitted
+  covariate effect is added to the result.
+
+  The smoothing bandwidth is controlled by the arguments
+  \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}.
+  If \code{bw} is a numeric value, then
+  the bandwidth is taken to be \code{adjust * bw}.
+  If \code{bw} is a string representing a bandwidth selection rule
+  (recognised by \code{\link[stats]{density.default}})
+  then the bandwidth is selected by this rule.
+
+  The data used for automatic bandwidth selection are
+  specified by \code{bw.input} and \code{bw.restrict}.
+  If \code{bw.input="points"}  (the default) then bandwidth selection is
+  based on the covariate values at the points of the original point
+  pattern dataset to which the model was fitted.
+  If \code{bw.input="quad"} then bandwidth selection is
+  based on the covariate values at every quadrature point used to
+  fit the model.
+  If \code{bw.restrict=TRUE} then the bandwidth selection is performed
+  using only data from inside the \code{subregion}.
+}
+\section{Slow computation}{
+  In a large dataset, computation can be very slow if the default
+  settings are used, because the smoothing bandwidth is selected
+  automatically. To avoid this, specify a numerical value
+  for the bandwidth \code{bw}. One strategy is to use a coarser
+  subset of the data to select \code{bw} automatically.
+  The selected bandwidth can be read off the print output for
+  \code{parres}.  
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  containing the values of the smoothed partial residual,
+  the estimated variance, and the fitted effect of the covariate.
+  Also belongs to the class \code{"parres"}
+  which has methods for \code{print} and \code{plot}.
+}
+\references{
+  Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013)
+  Residual diagnostics for covariate effects in
+  spatial point process models.
+  \emph{Journal of Computational and Graphical Statistics},
+  \bold{22}, 886--905.
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf
+  ,
+  Ya-Mei Chang and Yong Song.
+}
+\seealso{
+  \code{\link{addvar}},
+  \code{\link{rhohat}},
+  \code{\link{rho2hat}}
+}
+\examples{
+  X <-  rpoispp(function(x,y){exp(3+x+2*x^2)})
+  model <- ppm(X, ~x+y)
+  tra <- parres(model, "x")
+  plot(tra)
+  plot(parres(model, "x", subregion=square(0.5)))
+  model2 <- ppm(X, ~x+I(x^2)+y)
+  plot(parres(model2, "x"))
+  Z <- setcov(owin())
+  plot(parres(model2, Z))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/pcf.Rd b/man/pcf.Rd
new file mode 100644
index 0000000..0333f97
--- /dev/null
+++ b/man/pcf.Rd
@@ -0,0 +1,121 @@
+\name{pcf}
+\alias{pcf}
+\title{Pair Correlation Function}
+\description{
+  Estimate the pair correlation function.
+}
+\usage{
+ pcf(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Either the observed data point pattern,
+    or an estimate of its \eqn{K} function,
+    or an array of multitype \eqn{K} functions
+    (see Details).
+  }
+  \item{\dots}{
+    Other arguments passed to the appropriate method.
+  }
+} 
+\value{
+  Either a function value table
+  (object of class \code{"fv"}, see \code{\link{fv.object}})
+  representing a pair correlation function,
+  or a function array (object of class \code{"fasp"},
+  see \code{\link{fasp.object}})
+  representing an array of pair correlation functions.
+}
+\details{
+  The pair correlation function of a stationary point process is
+  \deqn{
+    g(r) = \frac{K'(r)}{2\pi r}
+  }{
+    g(r) = K'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the
+  reduced second moment function (aka ``Ripley's \eqn{K} function'')
+  of the point process. See \code{\link{Kest}} for information
+  about \eqn{K(r)}. For a stationary Poisson process, the
+  pair correlation function is identically equal to 1. Values
+  \eqn{g(r) < 1} suggest inhibition between points;
+  values greater than 1 suggest clustering.
+
+  We also apply the same definition to
+  other variants of the classical \eqn{K} function,
+  such as the multitype \eqn{K} functions
+  (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the
+  inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}).
+  For all these variants, the benchmark value of
+  \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to
+  \eqn{g(r) = 1}.
+
+  This routine computes an estimate of \eqn{g(r)}
+  either directly from a point pattern,
+  or indirectly from an estimate of \eqn{K(r)} or one of its variants.
+
+  This function is generic, with methods for
+  the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}.
+
+  If \code{X} is a point pattern (object of class \code{"ppp"})
+  then the pair correlation function is estimated using
+  a traditional kernel smoothing method (Stoyan and Stoyan, 1994).
+  See \code{\link{pcf.ppp}} for details.
+
+  If \code{X} is a function value table (object of class \code{"fv"}),
+  then it is assumed to contain estimates of the \eqn{K} function
+  or one of its variants (typically obtained from \code{\link{Kest}} or
+  \code{\link{Kinhom}}).
+  This routine computes an estimate of \eqn{g(r)} 
+  using smoothing splines to approximate the derivative.
+  See \code{\link{pcf.fv}} for details.
+
+  If \code{X} is a function value array (object of class \code{"fasp"}),
+  then it is assumed to contain estimates of several \eqn{K} functions
+  (typically obtained from \code{\link{Kmulti}} or
+  \code{\link{alltypes}}). This routine computes
+  an estimate of \eqn{g(r)} for each cell in the array,
+  using smoothing splines to approximate the derivatives.
+  See \code{\link{pcf.fasp}} for details.
+}
+\references{
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  \code{\link{pcf.ppp}},
+  \code{\link{pcf.fv}},
+  \code{\link{pcf.fasp}},
+  \code{\link{Kest}},
+  \code{\link{Kinhom}},
+  \code{\link{Kcross}},
+  \code{\link{Kdot}},
+  \code{\link{Kmulti}},
+  \code{\link{alltypes}}
+}
+\examples{
+  # ppp object
+  X <- simdat
+  \testonly{
+    X <- X[seq(1,npoints(X), by=4)]
+  }
+  p <- pcf(X)
+  plot(p)
+
+  # fv object
+  K <- Kest(X)
+  p2 <- pcf(K, spar=0.8, method="b")
+  plot(p2)
+
+  # multitype pattern; fasp object
+  amaK <- alltypes(amacrine, "K")
+  amap <- pcf(amaK, spar=1, method="b")
+  plot(amap)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcf.fasp.Rd b/man/pcf.fasp.Rd
new file mode 100644
index 0000000..bfff2cc
--- /dev/null
+++ b/man/pcf.fasp.Rd
@@ -0,0 +1,141 @@
+\name{pcf.fasp}
+\alias{pcf.fasp}
+\title{Pair Correlation Function obtained from array of K functions}
+\description{
+  Estimates the (bivariate) pair correlation functions of
+  a point pattern, given an array of (bivariate) K functions.
+}
+\usage{
+ \method{pcf}{fasp}(X, \dots, method="c")
+}
+\arguments{
+  \item{X}{
+    An array of multitype \eqn{K} functions
+    (object of class \code{"fasp"}).
+  }
+  \item{\dots}{
+    Arguments controlling the smoothing spline
+    function \code{smooth.spline}.
+  }
+  \item{method}{
+    Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the
+    method for deriving the pair correlation function from the
+    \code{K} function.
+  }
+} 
+\value{
+  A function array (object of class \code{"fasp"},
+  see \code{\link{fasp.object}})
+  representing an array of pair correlation functions.
+  This can be thought of as a matrix \code{Y} each of whose entries
+  \code{Y[i,j]} is a function value table (class \code{"fv"})
+  representing the pair correlation function between
+  points of type \code{i} and points of type \code{j}.
+}
+\details{
+  The pair correlation function of a stationary point process is
+  \deqn{
+    g(r) = \frac{K'(r)}{2\pi r}
+  }{
+    g(r) = K'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the
+  reduced second moment function (aka ``Ripley's \eqn{K} function'')
+  of the point process. See \code{\link{Kest}} for information
+  about \eqn{K(r)}. For a stationary Poisson process, the
+  pair correlation function is identically equal to 1. Values
+  \eqn{g(r) < 1} suggest inhibition between points;
+  values greater than 1 suggest clustering.
+
+  We also apply the same definition to
+  other variants of the classical \eqn{K} function,
+  such as the multitype \eqn{K} functions
+  (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the
+  inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}).
+  For all these variants, the benchmark value of
+  \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to
+  \eqn{g(r) = 1}.
+
+  This routine computes an estimate of \eqn{g(r)}
+  from an array of estimates of \eqn{K(r)} or its variants,
+  using smoothing splines to approximate the derivatives.
+  It is a method for the generic function \code{\link{pcf}}.
+
+  The argument \code{X} should be
+  a function array (object of class \code{"fasp"},
+  see \code{\link{fasp.object}})
+  containing several estimates of \eqn{K} functions.
+  This should have been obtained from \code{\link{alltypes}}
+  with the argument \code{fun="K"}.
+  
+  The smoothing spline operations are performed by
+  \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}}
+  from the \code{modreg} library.
+  Four numerical methods are available:
+  \itemize{
+    \item
+    \bold{"a"} apply smoothing to \eqn{K(r)},
+    estimate its derivative, and plug in to the formula above;
+    \item 
+    \bold{"b"} apply smoothing to
+    \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)}
+    constraining \eqn{Y(0) = 0},
+    estimate the derivative of \eqn{Y}, and solve;
+    \item
+    \bold{"c"} apply smoothing to 
+    \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)}
+    constraining \eqn{Z(0)=1},
+    estimate its derivative, and solve.
+    \item
+    \bold{"d"} apply smoothing to 
+    \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))},
+    estimate its derivative, and solve.
+  }
+  Method \code{"c"} seems to be the best at 
+  suppressing variability for small values of \eqn{r}.
+  However it effectively constrains \eqn{g(0) = 1}.
+  If the point pattern seems to have inhibition at small distances,
+  you may wish to experiment with method \code{"b"} which effectively
+  constrains \eqn{g(0)=0}. Method \code{"a"} seems
+  comparatively unreliable.
+
+  Useful arguments to control the splines
+  include the smoothing tradeoff parameter \code{spar}
+  and the degrees of freedom \code{df}. See \code{\link{smooth.spline}}
+  for details.
+}
+\references{
+  Stoyan, D, Kendall, W.S. and Mecke, J. (1995)
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{Kinhom}},
+  \code{\link{Kcross}},
+  \code{\link{Kdot}},
+  \code{\link{Kmulti}},
+  \code{\link{alltypes}},
+  \code{\link{smooth.spline}},
+  \code{\link{predict.smooth.spline}}
+}
+\examples{
+  # multitype point pattern
+  KK <- alltypes(amacrine, "K")
+  p <- pcf.fasp(KK, spar=0.5, method="b")
+  plot(p)
+  # strong inhibition between points of the same type
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcf.fv.Rd b/man/pcf.fv.Rd
new file mode 100644
index 0000000..ac9d91b
--- /dev/null
+++ b/man/pcf.fv.Rd
@@ -0,0 +1,149 @@
+\name{pcf.fv}
+\alias{pcf.fv}
+\title{Pair Correlation Function obtained from K Function}
+\description{
+  Estimates the pair correlation function of
+  a point pattern, given an estimate of the K function.
+}
+\usage{
+  \method{pcf}{fv}(X, \dots, method="c")
+}
+\arguments{
+  \item{X}{
+    An estimate of the \eqn{K} function
+    or one of its variants.
+    An object of class \code{"fv"}.
+  }
+  \item{\dots}{
+    Arguments controlling the smoothing spline
+    function \code{smooth.spline}.
+  }
+  \item{method}{
+    Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the
+    method for deriving the pair correlation function from the
+    \code{K} function.
+  }
+} 
+\value{
+  A function value table
+  (object of class \code{"fv"}, see \code{\link{fv.object}})
+  representing a pair correlation function.
+
+  Essentially a data frame containing (at least) the variables
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the pair correlation function \eqn{g(r)} has been  estimated
+  }
+  \item{pcf}{vector of values of \eqn{g(r)}
+  }
+}
+\details{
+  The pair correlation function of a stationary point process is
+  \deqn{
+    g(r) = \frac{K'(r)}{2\pi r}
+  }{
+    g(r) = K'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the
+  reduced second moment function (aka ``Ripley's \eqn{K} function'')
+  of the point process. See \code{\link{Kest}} for information
+  about \eqn{K(r)}. For a stationary Poisson process, the
+  pair correlation function is identically equal to 1. Values
+  \eqn{g(r) < 1} suggest inhibition between points;
+  values greater than 1 suggest clustering.
+
+  We also apply the same definition to
+  other variants of the classical \eqn{K} function,
+  such as the multitype \eqn{K} functions
+  (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the
+  inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}).
+  For all these variants, the benchmark value of
+  \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to
+  \eqn{g(r) = 1}.
+
+  This routine computes an estimate of \eqn{g(r)}
+  from an estimate of \eqn{K(r)} or its variants,
+  using smoothing splines to approximate the derivative.
+  It is a method for the generic function \code{\link{pcf}}
+  for the class \code{"fv"}.
+  
+  The argument \code{X} should be an estimated \eqn{K} function,
+  given as a function value table (object of class \code{"fv"},
+  see \code{\link{fv.object}}).
+  This object should be the value returned by
+  \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}}
+  or \code{\link{Kinhom}}.
+  
+  The smoothing spline operations are performed by
+  \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}}
+  from the \code{modreg} library.
+  Four numerical methods are available:
+  \itemize{
+    \item
+    \bold{"a"} apply smoothing to \eqn{K(r)},
+    estimate its derivative, and plug in to the formula above;
+    \item 
+    \bold{"b"} apply smoothing to
+    \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)}
+    constraining \eqn{Y(0) = 0},
+    estimate the derivative of \eqn{Y}, and solve;
+    \item
+    \bold{"c"} apply smoothing to 
+    \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)}
+    constraining \eqn{Z(0)=1},
+    estimate its derivative, and solve.
+    \item
+    \bold{"d"} apply smoothing to 
+    \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))},
+    estimate its derivative, and solve.
+  }
+  Method \code{"c"} seems to be the best at 
+  suppressing variability for small values of \eqn{r}.
+  However it effectively constrains \eqn{g(0) = 1}.
+  If the point pattern seems to have inhibition at small distances,
+  you may wish to experiment with method \code{"b"} which effectively
+  constrains \eqn{g(0)=0}. Method \code{"a"} seems
+  comparatively unreliable.
+
+  Useful arguments to control the splines
+  include the smoothing tradeoff parameter \code{spar}
+  and the degrees of freedom \code{df}. See \code{\link{smooth.spline}}
+  for details.
+}
+\references{
+  Stoyan, D, Kendall, W.S. and Mecke, J. (1995)
+  \emph{Stochastic geometry and its applications}.
+  2nd edition. Springer Verlag.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  Fractals, random shapes and point fields:
+  methods of geometrical statistics.
+  John Wiley and Sons.
+}
+\seealso{
+  \code{\link{pcf}},
+  \code{\link{pcf.ppp}},
+  \code{\link{Kest}},
+  \code{\link{Kinhom}},
+  \code{\link{Kcross}},
+  \code{\link{Kdot}},
+  \code{\link{Kmulti}},
+  \code{\link{alltypes}},
+  \code{\link{smooth.spline}},
+  \code{\link{predict.smooth.spline}}
+}
+\examples{
+  # univariate point pattern
+  X <- simdat
+  \testonly{
+    X <- X[seq(1,npoints(X), by=4)]
+  }
+  K <- Kest(X)
+  p <- pcf.fv(K, spar=0.5, method="b")
+  plot(p, main="pair correlation function for simdat")
+  # indicates inhibition at distances r < 0.3
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcf.ppp.Rd b/man/pcf.ppp.Rd
new file mode 100644
index 0000000..bd798a2
--- /dev/null
+++ b/man/pcf.ppp.Rd
@@ -0,0 +1,273 @@
+\name{pcf.ppp}
+\alias{pcf.ppp}
+\title{Pair Correlation Function of Point Pattern}
+\description{
+  Estimates the pair correlation function of
+  a point pattern using kernel methods.
+}
+\usage{
+  \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL,
+                    stoyan=0.15,
+                    correction=c("translate", "Ripley"),
+                    divisor = c("r", "d"),
+                    var.approx = FALSE,
+                    domain=NULL,
+                    ratio=FALSE, close=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which \eqn{g(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel,
+    passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel,
+    passed to \code{\link{density.default}}.
+    Either a single numeric value,
+    or a character string specifying a bandwidth selection rule
+    recognised by \code{\link{density.default}}.
+    If \code{bw} is missing or \code{NULL},
+    the default value is computed using
+    Stoyan's rule of thumb: see Details.
+  }
+  \item{\dots}{
+    Other arguments passed to the kernel density estimation 
+    function \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Coefficient for Stoyan's bandwidth selection rule; see Details.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}. See Details.
+  }
+  \item{var.approx}{
+    Logical value indicating whether to compute an analytic
+    approximation to the variance of the estimated pair correlation.
+  }
+  \item{domain}{
+    Optional. Calculations will be restricted to this subset
+    of the window. See Details.
+  }
+  \item{ratio}{
+    Logical. 
+    If \code{TRUE}, the numerator and denominator of
+    each edge-corrected estimate will also be saved,
+    for use in analysing replicated point patterns.
+  }
+  \item{close}{
+    Advanced use only. Precomputed data. See section on Advanced Use.
+  }
+} 
+\value{
+  A function value table
+  (object of class \code{"fv"}).
+  Essentially a data frame containing the variables
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the pair correlation function \eqn{g(r)} has been  estimated
+  }
+  \item{theo}{vector of values equal to 1,
+    the theoretical value of \eqn{g(r)} for the Poisson process
+  }
+  \item{trans}{vector of values of \eqn{g(r)}
+    estimated by translation correction
+  }
+  \item{iso}{vector of values of \eqn{g(r)}
+    estimated by Ripley isotropic correction
+  }
+  \item{v}{vector of approximate values of the variance of
+    the estimate of \eqn{g(r)}
+  }
+  as required.
+
+  If \code{ratio=TRUE} then the return value also has two
+  attributes called \code{"numerator"} and \code{"denominator"}
+  which are \code{"fv"} objects
+  containing the numerators and denominators of each
+  estimate of \eqn{g(r)}.
+
+  The return value also has an attribute \code{"bw"} giving the
+  smoothing bandwidth that was used.
+}
+\details{
+  The pair correlation function \eqn{g(r)} 
+  is a summary of the dependence between points in a spatial point
+  process. The best intuitive interpretation is the following: the probability
+  \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y}
+  separated by a distance \eqn{r} is equal to
+  \deqn{
+    p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda^2 * g(r) dx dy
+  }
+  where \eqn{\lambda}{lambda} is the intensity of the point process.
+  For a completely random (uniform Poisson) process,
+  \eqn{p(r) = \lambda^2 \,{\rm d}x \, {\rm d}y}{p(r) = lambda^2 dx dy}
+  so \eqn{g(r) = 1}.
+  Formally, the pair correlation function of a stationary point process
+  is defined by 
+  \deqn{
+    g(r) = \frac{K'(r)}{2\pi r}
+  }{
+    g(r) = K'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the
+  reduced second moment function (aka ``Ripley's \eqn{K} function'')
+  of the point process. See \code{\link{Kest}} for information
+  about \eqn{K(r)}.
+
+  For a stationary Poisson process, the
+  pair correlation function is identically equal to 1. Values
+  \eqn{g(r) < 1} suggest inhibition between points;
+  values greater than 1 suggest clustering.
+
+  This routine computes an estimate of \eqn{g(r)}
+  by kernel smoothing. 
+
+  \itemize{
+    \item
+    If \code{divisor="r"} (the default), then the standard
+    kernel estimator (Stoyan and Stoyan, 1994, pages 284--285)
+    is used. By default, the recommendations of Stoyan and Stoyan (1994)
+    are followed exactly. 
+    \item
+    If \code{divisor="d"} then a modified estimator is used:
+    the contribution from
+    an interpoint distance \eqn{d_{ij}}{d[ij]} to the
+    estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]}
+    instead of dividing by \eqn{r}. This usually improves the
+    bias of the estimator when \eqn{r} is close to zero.
+  }
+
+  There is also a choice of spatial edge corrections
+  (which are needed to avoid bias due to edge effects
+  associated with the boundary of the spatial window):
+
+  \itemize{
+    \item
+    If \code{correction="translate"} or \code{correction="translation"}
+    then the translation correction
+    is used. For \code{divisor="r"} the translation-corrected estimate
+    is given in equation (15.15), page 284 of Stoyan and Stoyan (1994).
+    \item
+    If \code{correction="Ripley"} then Ripley's isotropic edge correction
+    is used. For \code{divisor="r"} the isotropic-corrected estimate
+    is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). 
+    \item
+    If \code{correction=c("translate", "Ripley")} then both estimates
+    will be computed.
+  }
+  Alternatively \code{correction="all"} selects all options.
+  
+  The choice of smoothing kernel is controlled by the 
+  argument \code{kernel} which is passed to \code{\link{density.default}}.
+  The default is the Epanechnikov kernel, recommended by
+  Stoyan and Stoyan (1994, page 285).
+
+  The bandwidth of the smoothing kernel can be controlled by the
+  argument \code{bw}. Its precise interpretation
+  is explained in the documentation for \code{\link{density.default}}.
+  For the Epanechnikov kernel, the argument \code{bw} is
+  equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}.
+
+  Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov
+  kernel with support \eqn{[-h,h]} chosen by the rule of thumn
+  \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)},
+  where \eqn{\lambda}{lambda} is the (estimated) intensity of the
+  point process, and \eqn{c} is a constant in the range from 0.1 to 0.2.
+  See equation (15.16).
+  If \code{bw} is missing or \code{NULL},
+  then this rule of thumb will be applied.
+  The argument \code{stoyan} determines the value of \eqn{c}.
+  The smoothing bandwidth that was used in the calculation is returned
+  as an attribute of the final result.
+  
+  The argument \code{r} is the vector of values for the
+  distance \eqn{r} at which \eqn{g(r)} should be evaluated.
+  There is a sensible default.
+  If it is specified, \code{r} must be a vector of increasing numbers
+  starting from \code{r[1] = 0}, 
+  and \code{max(r)} must not exceed half the diameter of 
+  the window.
+
+  If the argument \code{domain} is given, estimation will be restricted
+  to this region. That is, the estimate of 
+  \eqn{g(r)} will be based on pairs of points in which the first point lies
+  inside \code{domain} and the second point is unrestricted.
+  The argument \code{domain}
+  should be a window (object of class \code{"owin"}) or something acceptable to
+  \code{\link{as.owin}}. It must be a subset of the
+  window of the point pattern \code{X}.
+
+  To compute a confidence band for the true value of the
+  pair correlation function, use \code{\link{lohboot}}.
+
+  If \code{var.approx = TRUE}, the variance of the
+  estimate of the pair correlation will also be calculated using
+  an analytic approximation (Illian et al, 2008, page 234)
+  which is valid for stationary point processes which are not
+  too clustered. This calculation is not yet implemented when
+  the argument \code{domain} is given.
+}
+\section{Advanced Use}{
+  To perform the same computation using several different bandwidths \code{bw},
+  it is efficient to use the argument \code{close}.
+  This should be the result of \code{\link{closepairs}(X, rmax)}
+  for a suitably large value of \code{rmax}, namely
+  \code{rmax >= max(r) + 3 * bw}.
+}
+\references{
+  Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008)
+  \emph{Statistical Analysis and Modelling of Spatial Point Patterns.}
+  Wiley.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  \emph{Fractals, random shapes and point fields:
+  methods of geometrical statistics.}
+  John Wiley and Sons.
+}
+\seealso{
+  \code{\link{Kest}},
+  \code{\link{pcf}},
+  \code{\link{density.default}},
+  \code{\link{bw.stoyan}},
+  \code{\link{bw.pcf}},
+  \code{\link{lohboot}}.
+}
+\examples{
+  X <- simdat
+  \testonly{
+    X <- X[seq(1,npoints(X), by=4)]
+  }
+  p <- pcf(X)
+  plot(p, main="pair correlation function for X")
+  # indicates inhibition at distances r < 0.3
+
+  pd <- pcf(X, divisor="d")
+
+  # compare estimates
+  plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"),
+         ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE)
+  plot(pd, iso ~ r, col="green", lwd=2, add=TRUE)
+  legend("center", col=c("blue", "green"), lty=1, lwd=2,
+         legend=c("divisor=r","divisor=d"))
+
+  # calculate approximate variance and show POINTWISE confidence bands
+  pv <- pcf(X, var.approx=TRUE)
+  plot(pv, cbind(iso, iso+2*sqrt(v), iso-2*sqrt(v)) ~ r)
+}
+\author{
+  \spatstatAuthors
+  and Martin Hazelton.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcf3est.Rd b/man/pcf3est.Rd
new file mode 100644
index 0000000..6646366
--- /dev/null
+++ b/man/pcf3est.Rd
@@ -0,0 +1,137 @@
+\name{pcf3est}
+\Rdversion{1.1}
+\alias{pcf3est}
+\title{
+  Pair Correlation Function of a Three-Dimensional Point Pattern
+}
+\description{
+  Estimates the pair correlation function
+  from a three-dimensional point pattern.
+}
+\usage{
+pcf3est(X, ..., rmax = NULL, nrval = 128, correction = c("translation",
+"isotropic"), delta=NULL, adjust=1, biascorrect=TRUE)
+}
+\arguments{
+  \item{X}{
+    Three-dimensional point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{rmax}{
+    Optional. Maximum value of argument \eqn{r} for which
+    \eqn{g_3(r)}{g3(r)} will be estimated. 
+  }
+  \item{nrval}{
+    Optional. Number of values of \eqn{r} for which
+    \eqn{g_3(r)}{g3(r)} will be estimated. 
+  }
+  \item{correction}{
+    Optional. Character vector specifying the edge correction(s)
+    to be applied. See Details.
+  }
+  \item{delta}{
+    Optional. Half-width of the Epanechnikov smoothing kernel.
+  }
+  \item{adjust}{
+    Optional. Adjustment factor for the default value of \code{delta}.
+  }
+  \item{biascorrect}{
+    Logical value. Whether to correct for underestimation due to
+    truncation of the kernel near \eqn{r=0}.
+  }
+}
+\details{
+  For a stationary point process \eqn{\Phi}{Phi} in three-dimensional
+  space, the pair correlation function is
+  \deqn{
+    g_3(r) = \frac{K_3'(r)}{4\pi r^2}
+  }{
+    g3(r) = K3'(r)/(4 * pi * r^2)
+  }
+  where \eqn{K_3'}{K3'} is the derivative of the
+  three-dimensional \eqn{K}-function (see \code{\link{K3est}}).
+  
+  The three-dimensional point pattern \code{X} is assumed to be a
+  partial realisation of a stationary point process \eqn{\Phi}{Phi}.
+  The distance between each pair of distinct points is computed.
+  Kernel smoothing is applied to these distance values (weighted by
+  an edge correction factor) and the result is 
+  renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}.
+
+  The available edge corrections are:
+  \describe{
+    \item{\code{"translation"}:}{
+      the Ohser translation correction estimator
+      (Ohser, 1983; Baddeley et al, 1993)
+    }
+    \item{\code{"isotropic"}:}{
+      the three-dimensional counterpart of
+      Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993).
+    }
+  }
+
+  Kernel smoothing is performed using the Epanechnikov kernel
+  with half-width \code{delta}. If \code{delta} is missing, the
+  default is to use the rule-of-thumb
+  \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where
+  \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed
+  from the number \eqn{n} of data points and the volume \eqn{v} of the
+  enclosing box. This default value of \code{delta} is multiplied by
+  the factor \code{adjust}. 
+
+  The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)}
+  is typically an underestimate when \eqn{r} is small, due to
+  truncation of the kernel at \eqn{r=0}. 
+  If \code{biascorrect=TRUE}, the smoothed estimate is
+  approximately adjusted for this bias. This is advisable whenever
+  the dataset contains a sufficiently large number of points.
+}
+\value{
+  A function value table (object of class \code{"fv"}) that can be
+  plotted, printed or coerced to a data frame containing the function
+  values.
+
+  Additionally the value of \code{delta} is returned as an attribute
+  of this object.
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42}, 641--668.
+
+  Ohser, J. (1983)
+  On estimators for the reduced second moment measure of
+  point processes. \emph{Mathematische Operationsforschung und
+  Statistik, series Statistics}, \bold{14}, 63 -- 71.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{39}, 172 -- 212.
+}
+\author{
+  \adrian
+  
+  
+  and Rana Moyeed.
+}
+\seealso{
+  \code{\link{K3est}},
+  \code{\link{pcf}}
+}
+\examples{
+  X <- rpoispp3(250)
+  Z <- pcf3est(X)
+  Zbias <- pcf3est(X, biascorrect=FALSE)
+  if(interactive()) {
+    opa <- par(mfrow=c(1,2))
+    plot(Z,     ylim.covers=c(0, 1.2))
+    plot(Zbias, ylim.covers=c(0, 1.2))
+    par(opa)
+  }
+  attr(Z, "delta")
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfcross.Rd b/man/pcfcross.Rd
new file mode 100644
index 0000000..93cab82
--- /dev/null
+++ b/man/pcfcross.Rd
@@ -0,0 +1,185 @@
+\name{pcfcross}
+\alias{pcfcross}
+\title{Multitype pair correlation function (cross-type)}
+\description{
+  Calculates an estimate of the cross-type pair correlation function
+  for a multitype point pattern.
+}
+\usage{
+  pcfcross(X, i, j, ...,
+           r = NULL, 
+           kernel = "epanechnikov", bw = NULL, stoyan = 0.15,
+           correction = c("isotropic", "Ripley", "translate"),
+           divisor = c("r", "d"))
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross-type pair correlation function
+    \eqn{g_{ij}(r)}{g[i,j](r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). 
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which \eqn{g(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel,
+    passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel, 
+    passed to \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Coefficient for default bandwidth rule; see Details.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}. See Details.
+  }
+}
+\details{
+  The cross-type pair correlation function
+  is a generalisation of the pair correlation function \code{\link{pcf}}
+  to multitype point patterns.
+
+  For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r},
+  the probability \eqn{p(r)} of finding a point of type \eqn{i} at location
+  \eqn{x} and a point of type \eqn{j} at location \eqn{y} is 
+  \deqn{
+    p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy
+  }
+  where \eqn{\lambda_i}{lambda[i]} is the intensity of the points
+  of type \eqn{i}. 
+  For a completely random Poisson marked point process,
+  \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]}
+  so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}.
+  Indeed for any marked point pattern in which the points of type \code{i}
+  are independent of the points of type \code{j},
+  the theoretical value of the cross-type pair correlation is
+  \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}.
+  
+  For a stationary multitype point process, the cross-type pair correlation
+  function between marks \eqn{i} and \eqn{j} is formally defined as
+  \deqn{
+    g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r}
+  }{
+    g(r) = K[i,j]'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of
+  the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}.
+  of the point process. See \code{\link{Kest}} for information
+  about \eqn{K(r)}. 
+
+  The command \code{pcfcross} computes a kernel estimate of
+  the cross-type pair correlation function between marks \eqn{i} and
+  \eqn{j}. 
+
+  \itemize{
+    \item
+    If \code{divisor="r"} (the default), then the multitype
+    counterpart of the standard
+    kernel estimator (Stoyan and Stoyan, 1994, pages 284--285)
+    is used. By default, the recommendations of Stoyan and Stoyan (1994)
+    are followed exactly. 
+    \item
+    If \code{divisor="d"} then a modified estimator is used:
+    the contribution from
+    an interpoint distance \eqn{d_{ij}}{d[ij]} to the
+    estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]}
+    instead of dividing by \eqn{r}. This usually improves the
+    bias of the estimator when \eqn{r} is close to zero.
+  }
+
+  There is also a choice of spatial edge corrections
+  (which are needed to avoid bias due to edge effects
+  associated with the boundary of the spatial window):
+  \code{correction="translate"} is the Ohser-Stoyan translation
+  correction, and \code{correction="isotropic"} or \code{"Ripley"}
+  is Ripley's isotropic correction.  
+
+  The choice of smoothing kernel is controlled by the 
+  argument \code{kernel} which is passed to \code{\link{density}}.
+  The default is the Epanechnikov kernel.
+
+  The bandwidth of the smoothing kernel can be controlled by the
+  argument \code{bw}. Its precise interpretation
+  is explained in the documentation for \code{\link{density.default}}.
+  For the Epanechnikov kernel with support \eqn{[-h,h]},
+  the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}.
+
+  If \code{bw} is not specified, the default bandwidth
+  is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page
+  285) applied to the points of type \code{j}. That is,
+  \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)},
+  where \eqn{\lambda}{lambda} is the (estimated) intensity of the
+  point process of type \code{j},
+  and \eqn{c} is a constant in the range from 0.1 to 0.2.
+  The argument \code{stoyan} determines the value of \eqn{c}.
+
+  The companion function \code{\link{pcfdot}} computes the
+  corresponding analogue of \code{\link{Kdot}}.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{g_{i,j}}{g[i,j]} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r}
+    for independent marks.
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{g_{i,j}}{g[i,j]}
+  obtained by the edge corrections named.
+}
+\seealso{
+  Mark connection function \code{\link{markconnect}}.
+
+  Multitype pair correlation \code{\link{pcfdot}}, \code{\link{pcfmulti}}.
+  
+  Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}.
+  
+  \code{\link{Kcross}}
+}
+\examples{
+ data(amacrine)
+ p <- pcfcross(amacrine, "off", "on")
+ p <- pcfcross(amacrine, "off", "on", stoyan=0.1)
+ plot(p)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfcross.inhom.Rd b/man/pcfcross.inhom.Rd
new file mode 100644
index 0000000..8c85c95
--- /dev/null
+++ b/man/pcfcross.inhom.Rd
@@ -0,0 +1,151 @@
+\name{pcfcross.inhom}
+\alias{pcfcross.inhom}
+\title{
+  Inhomogeneous Multitype Pair Correlation Function (Cross-Type)
+}
+\description{
+  Estimates the inhomogeneous cross-type pair correlation function
+  for a multitype point pattern.
+}
+\usage{
+pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ...,
+               r = NULL, breaks = NULL,
+               kernel="epanechnikov", bw=NULL, stoyan=0.15,
+               correction = c("isotropic", "Ripley", "translate"),
+               sigma = NULL, varcov = NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous
+    cross-type pair correlation function
+    \eqn{g_{ij}(r)}{g[i,j](r)}
+    will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). 
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{j}{The type (mark value)
+    of the points in \code{X} to which distances are measured.
+    A character string (or something that will be
+    converted to a character string).
+    Defaults to the second level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Optional.
+    Values of the estimated intensity function of the points of type \code{i}.
+    Either a vector giving the intensity values
+    at the points of type \code{i},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lambdaJ}{
+    Optional.
+    Values of the estimated intensity function of the points of type \code{j}.
+    A numeric vector, pixel image or \code{function(x,y)}.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which
+    \eqn{g_{ij}(r)}{g[i,j](r)}
+    should be evaluated. There is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel, passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel, passed to \code{\link{density.default}}.
+  }
+  \item{\dots}{
+    Other arguments passed to the kernel density estimation 
+    function \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Bandwidth coefficient; see Details.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambdaI} or
+    \code{lambdaJ} is estimated by kernel smoothing.
+  }
+}
+\details{
+  The inhomogeneous cross-type pair correlation function
+  \eqn{g_{ij}(r)}{g[i,j](r)}
+  is a summary of the dependence between two types of points in a
+  multitype spatial point process that does not have a uniform
+  density of points.
+
+  The best intuitive interpretation is the following: the probability
+  \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j}
+  respectively, at locations \eqn{x} and \eqn{y}
+  separated by a distance \eqn{r} is equal to
+  \deqn{
+    p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy
+  }
+  where \eqn{\lambda_i}{lambda[i]} is the intensity function
+  of the process of points of type \eqn{i}.
+  For a multitype Poisson point process,
+  this probability is 
+  \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)}
+  so   \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}.
+
+  The command \code{pcfcross.inhom} estimates the inhomogeneous
+  pair correlation using a modified version of
+  the algorithm in \code{\link{pcf.ppp}}.
+
+  If the arguments \code{lambdaI} and \code{lambdaJ} are missing or
+  null, they are estimated from \code{X} by kernel smoothing using a
+  leave-one-out estimator. 
+}
+\value{
+  A function value table (object of class \code{"fv"}).
+  Essentially a data frame containing the variables
+  \item{r}{
+    the vector of values of the argument \eqn{r} 
+    at which the inhomogeneous cross-type pair correlation function
+    \eqn{g_{ij}(r)}{g[i,j](r)} has been  estimated
+  }
+  \item{theo}{vector of values equal to 1,
+    the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)}
+    for the Poisson process
+  }
+  \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)}
+    estimated by translation correction
+  }
+  \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)}
+    estimated by Ripley isotropic correction
+  }
+  as required.
+}
+\seealso{
+  \code{\link{pcf.ppp}}, 
+  \code{\link{pcfinhom}}, 
+  \code{\link{pcfcross}},
+  \code{\link{pcfdot.inhom}}
+}
+\examples{
+  data(amacrine)
+  plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1),
+       legendpos="bottom")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfdot.Rd b/man/pcfdot.Rd
new file mode 100644
index 0000000..4de6991
--- /dev/null
+++ b/man/pcfdot.Rd
@@ -0,0 +1,178 @@
+\name{pcfdot}
+\alias{pcfdot}
+\title{Multitype pair correlation function (i-to-any)}
+\description{
+  Calculates an estimate of the multitype pair correlation function
+  (from points of type \code{i} to points of any type)
+  for a multitype point pattern.
+}
+\usage{
+  pcfdot(X, i, ..., r = NULL,
+         kernel = "epanechnikov", bw = NULL, stoyan = 0.15,
+         correction = c("isotropic", "Ripley", "translate"),
+         divisor = c("r", "d"))
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the dot-type pair correlation function
+    \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). 
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which \eqn{g(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel,
+    passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel, 
+    passed to \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Coefficient for default bandwidth rule; see Details.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}. See Details.
+  }
+}
+\details{
+  This is a generalisation of the pair correlation function \code{\link{pcf}}
+  to multitype point patterns.
+
+  For two locations \eqn{x} and \eqn{y} separated by a nonzero
+  distance \eqn{r},
+  the probability \eqn{p(r)} of finding a point of type \eqn{i} at location
+  \eqn{x} and a point of any type at location \eqn{y} is 
+  \deqn{
+    p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda[i] * lambda * gdot[i](r) dx dy
+  }
+  where \eqn{\lambda}{lambda} is the intensity of all points,
+  and \eqn{\lambda_i}{lambda[i]} is the intensity of the points
+  of type \eqn{i}. 
+  For a completely random Poisson marked point process,
+  \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda}
+  so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}.
+  
+  For a stationary multitype point process, the
+  type-\code{i}-to-any-type pair correlation
+  function between marks \eqn{i} and \eqn{j} is formally defined as
+  \deqn{
+    g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r}
+  }{
+    g(r) = Kdot[i]'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of
+  the type-\code{i}-to-any-type \eqn{K} function
+  \eqn{K_{i\bullet}(r)}{Kdot[i](r)}.
+  of the point process. See \code{\link{Kdot}} for information
+  about   \eqn{K_{i\bullet}(r)}{Kdot[i](r)}.
+
+  The command \code{pcfdot} computes a kernel estimate of
+  the multitype pair correlation function from points of type \eqn{i}
+  to points of any type.
+
+  \itemize{
+    \item
+    If \code{divisor="r"} (the default), then the multitype
+    counterpart of the standard
+    kernel estimator (Stoyan and Stoyan, 1994, pages 284--285)
+    is used. By default, the recommendations of Stoyan and Stoyan (1994)
+    are followed exactly. 
+    \item
+    If \code{divisor="d"} then a modified estimator is used:
+    the contribution from
+    an interpoint distance \eqn{d_{ij}}{d[ij]} to the
+    estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]}
+    instead of dividing by \eqn{r}. This usually improves the
+    bias of the estimator when \eqn{r} is close to zero.
+  }
+
+  There is also a choice of spatial edge corrections
+  (which are needed to avoid bias due to edge effects
+  associated with the boundary of the spatial window):
+  \code{correction="translate"} is the Ohser-Stoyan translation
+  correction, and \code{correction="isotropic"} or \code{"Ripley"}
+  is Ripley's isotropic correction.  
+
+  The choice of smoothing kernel is controlled by the 
+  argument \code{kernel} which is passed to \code{\link{density}}.
+  The default is the Epanechnikov kernel.
+
+  The bandwidth of the smoothing kernel can be controlled by the
+  argument \code{bw}. Its precise interpretation
+  is explained in the documentation for \code{\link{density.default}}.
+  For the Epanechnikov kernel with support \eqn{[-h,h]},
+  the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}.
+
+  If \code{bw} is not specified, the default bandwidth
+  is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page
+  285). That is,
+  \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)},
+  where \eqn{\lambda}{lambda} is the (estimated) intensity of the
+  unmarked point process, 
+  and \eqn{c} is a constant in the range from 0.1 to 0.2.
+  The argument \code{stoyan} determines the value of \eqn{c}.
+
+  The companion function \code{\link{pcfcross}} computes the
+  corresponding analogue of \code{\link{Kcross}}.
+}
+
+\value{
+  An object of class \code{"fv"}, see \code{\link{fv.object}},
+  which can be plotted directly using \code{\link{plot.fv}}.
+
+  Essentially a data frame containing columns
+  \item{r}{the vector of values of the argument \eqn{r} 
+    at which the function \eqn{g_{i\bullet}}{gdot[i]} has been  estimated
+  }
+  \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r}
+    for independent marks.
+  }
+  together with columns named 
+  \code{"border"}, \code{"bord.modif"},
+  \code{"iso"} and/or \code{"trans"},
+  according to the selected edge corrections. These columns contain
+  estimates of the function \eqn{g_{i,j}}{g[i,j]}
+  obtained by the edge corrections named.
+}
+\seealso{
+  Mark connection function \code{\link{markconnect}}.
+
+  Multitype pair correlation \code{\link{pcfcross}}, \code{\link{pcfmulti}}.
+  
+  Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}.
+  
+  \code{\link{Kdot}}
+}
+\examples{
+ data(amacrine)
+ p <- pcfdot(amacrine, "on")
+ p <- pcfdot(amacrine, "on", stoyan=0.1)
+ plot(p)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfdot.inhom.Rd b/man/pcfdot.inhom.Rd
new file mode 100644
index 0000000..dc416a1
--- /dev/null
+++ b/man/pcfdot.inhom.Rd
@@ -0,0 +1,148 @@
+\name{pcfdot.inhom}
+\alias{pcfdot.inhom}
+\title{
+  Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type)
+}
+\description{
+  Estimates the inhomogeneous multitype pair correlation function
+  (from type \eqn{i} to any type)
+  for a multitype point pattern.
+}
+\usage{
+pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ...,
+               r = NULL, breaks = NULL,
+               kernel="epanechnikov", bw=NULL, stoyan=0.15,
+               correction = c("isotropic", "Ripley", "translate"),
+               sigma = NULL, varcov = NULL)
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the inhomogeneous
+    multitype pair correlation function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)}
+    will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). 
+  }
+  \item{i}{The type (mark value)
+    of the points in \code{X} from which distances are measured.
+    A character string (or something that will be converted to a
+    character string).
+    Defaults to the first level of \code{marks(X)}.
+  }
+  \item{lambdaI}{
+    Optional.
+    Values of the estimated intensity function of the points of type \code{i}.
+    Either a vector giving the intensity values
+    at the points of type \code{i},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{lambdadot}{
+    Optional.
+    Values of the estimated intensity function of the point pattern \code{X}.
+    A numeric vector, pixel image or \code{function(x,y)}.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which
+    \eqn{g_{i\bullet}(r)}{g[i.](r)}
+    should be evaluated. There is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel, passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel, passed to \code{\link{density.default}}.
+  }
+  \item{\dots}{
+    Other arguments passed to the kernel density estimation 
+    function \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Bandwidth coefficient; see Details.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambdaI} or
+    \code{lambdadot} is estimated by kernel smoothing.
+  }
+}
+\details{
+  The inhomogeneous multitype (type \eqn{i} to any type)
+  pair correlation function
+  \eqn{g_{i\bullet}(r)}{g[i.](r)}
+  is a summary of the dependence between different types of points in a
+  multitype spatial point process that does not have a uniform
+  density of points.
+
+  The best intuitive interpretation is the following: the probability
+  \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x}
+  and another point of any type at location \eqn{y},
+  where \eqn{x} and \eqn{y} are separated by a distance \eqn{r},
+  is equal to
+  \deqn{
+    p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda[i](x) * lambda(y) * g(r) dx dy
+  }
+  where \eqn{\lambda_i}{lambda[i]} is the intensity function
+  of the process of points of type \eqn{i}, and 
+  where \eqn{\lambda}{lambda} is the intensity function of the points
+  of all types.
+  For a multitype Poisson point process, this probability is 
+  \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)}
+  so   \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}.
+
+  The command \code{pcfdot.inhom} estimates the inhomogeneous
+  multitype pair correlation using a modified version of
+  the algorithm in \code{\link{pcf.ppp}}.
+
+  If the arguments \code{lambdaI} and \code{lambdadot} are missing or
+  null, they are estimated from \code{X} by kernel smoothing using a
+  leave-one-out estimator. 
+}
+\value{
+  A function value table (object of class \code{"fv"}).
+  Essentially a data frame containing the variables
+  \item{r}{
+    the vector of values of the argument \eqn{r} 
+    at which the inhomogeneous multitype pair correlation function
+    \eqn{g_{i\bullet}(r)}{g[i.](r)} has been  estimated
+  }
+  \item{theo}{vector of values equal to 1,
+    the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)}
+    for the Poisson process
+  }
+  \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)}
+    estimated by translation correction
+  }
+  \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)}
+    estimated by Ripley isotropic correction
+  }
+  as required.
+}
+\seealso{
+  \code{\link{pcf.ppp}}, 
+  \code{\link{pcfinhom}}, 
+  \code{\link{pcfdot}},
+  \code{\link{pcfcross.inhom}}
+}
+\examples{
+  data(amacrine)
+  plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfinhom.Rd b/man/pcfinhom.Rd
new file mode 100644
index 0000000..6a92883
--- /dev/null
+++ b/man/pcfinhom.Rd
@@ -0,0 +1,208 @@
+\name{pcfinhom}
+\alias{pcfinhom}
+\title{
+  Inhomogeneous Pair Correlation Function
+}
+\description{
+  Estimates the inhomogeneous pair correlation function of
+  a point pattern using kernel methods.
+}
+\usage{
+pcfinhom(X, lambda = NULL, ..., r = NULL,
+         kernel = "epanechnikov", bw = NULL, stoyan = 0.15,
+         correction = c("translate", "Ripley"),
+         divisor = c("r", "d"),
+         renormalise = TRUE, normpower=1,
+         update = TRUE, leaveoneout = TRUE,
+         reciplambda = NULL,
+         sigma = NULL, varcov = NULL, close=NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{lambda}{
+    Optional.
+    Values of the estimated intensity function.
+    Either a vector giving the intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    intensity values at all locations, a fitted point process model
+    (object of class \code{"ppm"}) or a \code{function(x,y)} which
+    can be evaluated to give the intensity value at any location.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which \eqn{g(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel, passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel,
+    passed to \code{\link{density.default}}.
+    Either a single numeric value,
+    or a character string specifying a bandwidth selection rule
+    recognised by \code{\link{density.default}}.
+    If \code{bw} is missing or \code{NULL},
+    the default value is computed using
+    Stoyan's rule of thumb: see \code{\link{bw.stoyan}}.
+  }
+  \item{\dots}{
+    Other arguments passed to the kernel density estimation 
+    function \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Coefficient for Stoyan's bandwidth selection rule;
+    see \code{\link{bw.stoyan}}.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}.
+    See \code{\link{pcf.ppp}}.
+  }
+  \item{renormalise}{
+    Logical. Whether to renormalise the estimate. See Details.
+  }
+  \item{normpower}{
+    Integer (usually either 1 or 2).
+    Normalisation power. See Details.
+  }
+  \item{update}{
+    Logical. If \code{lambda} is a fitted model
+    (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"})
+    and \code{update=TRUE} (the default),
+    the model will first be refitted to the data \code{X}
+    (using \code{\link{update.ppm}} or \code{\link{update.kppm}})
+    before the fitted intensity is computed.
+    If \code{update=FALSE}, the fitted intensity of the
+    model will be computed without re-fitting it to \code{X}.
+  }
+  \item{leaveoneout}{
+    Logical value (passed to \code{\link{density.ppp}} or
+    \code{\link{fitted.ppm}}) specifying whether to use a
+    leave-one-out rule when calculating the intensity.
+  }
+  \item{reciplambda}{
+    Alternative to \code{lambda}.
+    Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda}
+    of the intensity function.
+    Either a vector giving the reciprocal intensity values
+    at the points of the pattern \code{X},
+    a pixel image (object of class \code{"im"}) giving the
+    reciprocal intensity values at all locations,
+    or a \code{function(x,y)} which can be evaluated to give the
+    reciprocal intensity value at any location.
+  }
+  \item{sigma,varcov}{
+    Optional arguments passed to  \code{\link{density.ppp}}
+    to control the smoothing bandwidth, when \code{lambda} is
+    estimated by kernel smoothing.
+  }
+  \item{close}{
+    Advanced use only. Precomputed data. See section on Advanced Use.
+  }
+}
+\details{
+  The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)}
+  is a summary of the dependence between points in a spatial point
+  process that does not have a uniform density of points.
+
+  The best intuitive interpretation is the following: the probability
+  \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y}
+  separated by a distance \eqn{r} is equal to
+  \deqn{
+    p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y
+  }{
+    p(r) = lambda(x) * lambda(y) * g(r) dx dy
+  }
+  where \eqn{\lambda}{lambda} is the intensity function
+  of the point process.
+  For a Poisson point process with intensity function
+  \eqn{\lambda}{lambda}, this probability is 
+  \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)}
+  so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}.
+
+  The inhomogeneous pair correlation function 
+  is related to the inhomogeneous \eqn{K} function through
+  \deqn{
+    g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r}
+  }{
+    ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) 
+  }
+  where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)}
+  is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the
+  inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information
+  about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}.
+
+  The command \code{pcfinhom} estimates the inhomogeneous
+  pair correlation using a modified version of
+  the algorithm in \code{\link{pcf.ppp}}.   
+  
+  If \code{renormalise=TRUE} (the default), then the estimates 
+  are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where 
+  \eqn{
+    c = \mbox{area}(W)/\sum (1/\lambda(x_i)).
+  }{
+    c = area(W)/sum[i] (1/lambda(x[i])).
+  }
+  This rescaling reduces the variability and bias of the estimate
+  in small samples and in cases of very strong inhomogeneity.
+  The default value of \code{normpower} is 1
+  but the most sensible value is 2, which would correspond to rescaling
+  the \code{lambda} values so that
+  \eqn{
+    \sum (1/\lambda(x_i)) = \mbox{area}(W).
+  }{
+    sum[i] (1/lambda(x[i])) = area(W).
+  }
+}
+\value{
+  A function value table (object of class \code{"fv"}).
+  Essentially a data frame containing the variables
+  \item{r}{
+    the vector of values of the argument \eqn{r} 
+    at which the inhomogeneous pair correlation function
+    \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been  estimated
+  }
+  \item{theo}{vector of values equal to 1,
+    the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)}
+    for the Poisson process
+  }
+  \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)}
+    estimated by translation correction
+  }
+  \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)}
+    estimated by Ripley isotropic correction
+  }
+  as required.
+}
+\section{Advanced Use}{
+  To perform the same computation using several different bandwidths \code{bw},
+  it is efficient to use the argument \code{close}.
+  This should be the result of \code{\link{closepairs}(X, rmax)}
+  for a suitably large value of \code{rmax}, namely
+  \code{rmax >= max(r) + 3 * bw}.
+}
+\seealso{
+  \code{\link{pcf}}, 
+  \code{\link{pcf.ppp}}, 
+  \code{\link{bw.stoyan}},
+  \code{\link{bw.pcf}},
+  \code{\link{Kinhom}}
+}
+\examples{
+  data(residualspaper)
+  X <- residualspaper$Fig4b
+  plot(pcfinhom(X, stoyan=0.2, sigma=0.1))
+  fit <- ppm(X, ~polynom(x,y,2))
+  plot(pcfinhom(X, lambda=fit, normpower=2))
+}
+\author{
+  \spatstatAuthors.
+} 
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pcfmulti.Rd b/man/pcfmulti.Rd
new file mode 100644
index 0000000..7f3431c
--- /dev/null
+++ b/man/pcfmulti.Rd
@@ -0,0 +1,148 @@
+\name{pcfmulti}
+\alias{pcfmulti}
+\title{
+  Marked pair correlation function
+}
+\description{
+For a marked point pattern, 
+estimate the multitype pair correlation function
+using kernel methods.
+}
+\usage{
+   pcfmulti(X, I, J, ..., r = NULL,
+            kernel = "epanechnikov", bw = NULL, stoyan = 0.15,
+            correction = c("translate", "Ripley"),
+            divisor = c("r", "d"),
+            Iname = "points satisfying condition I",
+            Jname = "points satisfying condition J")
+}
+\arguments{
+  \item{X}{The observed point pattern, 
+    from which an estimate of the cross-type pair correlation function
+    \eqn{g_{ij}(r)}{g[i,j](r)} will be computed.
+    It must be a multitype point pattern (a marked point pattern
+    whose marks are a factor). 
+  }
+  \item{I}{Subset index specifying the points of \code{X}
+    from which distances are measured. 
+  }
+  \item{J}{Subset index specifying the points in \code{X} to which
+    distances are measured. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{r}{
+    Vector of values for the argument \eqn{r} at which \eqn{g(r)} 
+    should be evaluated. There is a sensible default.
+  }
+  \item{kernel}{
+    Choice of smoothing kernel,
+    passed to \code{\link{density.default}}.
+  }
+  \item{bw}{
+    Bandwidth for smoothing kernel, 
+    passed to \code{\link{density.default}}.
+  }
+  \item{stoyan}{
+    Coefficient for default bandwidth rule.
+  }
+  \item{correction}{
+    Choice of edge correction.
+  }
+  \item{divisor}{
+    Choice of divisor in the estimation formula:
+    either \code{"r"} (the default) or \code{"d"}. 
+  }
+  \item{Iname,Jname}{
+    Optional. Character strings describing the members of
+    the subsets \code{I} and \code{J}.
+  }
+}
+\details{
+  This is a generalisation of \code{\link{pcfcross}}
+  to arbitrary collections of points.
+
+  The algorithm measures the distance from each data point
+  in subset \code{I} to each data point in subset \code{J},
+  excluding identical pairs of points. The distances are
+  kernel-smoothed and renormalised to form a pair correlation
+  function.
+
+  \itemize{
+    \item
+    If \code{divisor="r"} (the default), then the multitype
+    counterpart of the standard
+    kernel estimator (Stoyan and Stoyan, 1994, pages 284--285)
+    is used. By default, the recommendations of Stoyan and Stoyan (1994)
+    are followed exactly. 
+    \item
+    If \code{divisor="d"} then a modified estimator is used:
+    the contribution from
+    an interpoint distance \eqn{d_{ij}}{d[ij]} to the
+    estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]}
+    instead of dividing by \eqn{r}. This usually improves the
+    bias of the estimator when \eqn{r} is close to zero.
+  }
+
+  There is also a choice of spatial edge corrections
+  (which are needed to avoid bias due to edge effects
+  associated with the boundary of the spatial window):
+  \code{correction="translate"} is the Ohser-Stoyan translation
+  correction, and \code{correction="isotropic"} or \code{"Ripley"}
+  is Ripley's isotropic correction.  
+
+  The arguments \code{I} and \code{J} specify two subsets of the
+  point pattern \code{X}. They may be any type of subset indices, for example,
+  logical vectors of length equal to \code{npoints(X)},
+  or integer vectors with entries in the range 1 to
+  \code{npoints(X)}, or negative integer vectors.
+
+  Alternatively, \code{I} and \code{J} may be \bold{functions}
+  that will be applied to the point pattern \code{X} to obtain
+  index vectors. If \code{I} is a function, then evaluating
+  \code{I(X)} should yield a valid subset index. This option
+  is useful when generating simulation envelopes using
+  \code{\link{envelope}}.
+
+  The choice of smoothing kernel is controlled by the 
+  argument \code{kernel} which is passed to \code{\link{density}}.
+  The default is the Epanechnikov kernel.
+
+  The bandwidth of the smoothing kernel can be controlled by the
+  argument \code{bw}. Its precise interpretation
+  is explained in the documentation for \code{\link{density.default}}.
+  For the Epanechnikov kernel with support \eqn{[-h,h]},
+  the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}.
+
+  If \code{bw} is not specified, the default bandwidth
+  is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page
+  285) applied to the points of type \code{j}. That is,
+  \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)},
+  where \eqn{\lambda}{lambda} is the (estimated) intensity of the
+  point process of type \code{j},
+  and \eqn{c} is a constant in the range from 0.1 to 0.2.
+  The argument \code{stoyan} determines the value of \eqn{c}.
+}
+\value{
+  An object of class \code{"fv"}.
+}
+\seealso{
+  \code{\link{pcfcross}}, 
+  \code{\link{pcfdot}}, 
+  \code{\link{pcf.ppp}}.
+}
+\examples{
+  adult <- (marks(longleaf) >= 30)
+  juvenile <- !adult
+  p <- pcfmulti(longleaf, adult, juvenile)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/perimeter.Rd b/man/perimeter.Rd
new file mode 100644
index 0000000..b8c62ef
--- /dev/null
+++ b/man/perimeter.Rd
@@ -0,0 +1,50 @@
+\name{perimeter}
+\Rdversion{1.1}
+\alias{perimeter}
+\title{
+  Perimeter Length of Window
+}
+\description{
+  Computes the perimeter length of a window
+}
+\usage{
+perimeter(w)
+}
+\arguments{
+  \item{w}{
+    A window (object of class \code{"owin"})
+    or data that can be converted to a window by \code{\link{as.owin}}.
+  }
+}
+\details{
+  This function computes the perimeter (length of the boundary) of the
+  window \code{w}. If \code{w} is a rectangle or a polygonal window,
+  the perimeter is the sum of the lengths of the edges of \code{w}.
+  If \code{w} is a mask, it is first converted to a polygonal window
+  using \code{\link{as.polygonal}}, then staircase edges are removed
+  using \code{\link{simplify.owin}}, and the perimeter of the resulting
+  polygon is computed.
+}
+\value{
+  A numeric value giving the perimeter length of the window.
+}
+\seealso{
+  \code{\link{area.owin}}
+  \code{\link{diameter.owin}},
+  \code{\link{owin.object}},
+  \code{\link{as.owin}}
+}
+\examples{
+  perimeter(square(3))
+  data(letterR)
+  perimeter(letterR)
+  if(interactive()) print(perimeter(as.mask(letterR)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/periodify.Rd b/man/periodify.Rd
new file mode 100644
index 0000000..2102eeb
--- /dev/null
+++ b/man/periodify.Rd
@@ -0,0 +1,114 @@
+\name{periodify} 
+\alias{periodify}
+\alias{periodify.owin}
+\alias{periodify.ppp}
+\alias{periodify.psp}
+\title{
+  Make Periodic Copies of a Spatial Pattern  
+}
+\description{
+  Given a spatial pattern (point pattern, line segment pattern,
+  window, etc) make shifted copies of the pattern
+  and optionally combine them to make a periodic pattern.
+}
+\usage{
+periodify(X, ...)
+\method{periodify}{ppp}(X, nx = 1, ny = 1, ...,
+           combine=TRUE, warn=TRUE, check=TRUE,
+           ix=(-nx):nx, iy=(-ny):ny,
+           ixy=expand.grid(ix=ix,iy=iy))
+\method{periodify}{psp}(X, nx = 1, ny = 1, ..., 
+           combine=TRUE, warn=TRUE, check=TRUE,
+           ix=(-nx):nx, iy=(-ny):ny, 
+           ixy=expand.grid(ix=ix,iy=iy))
+\method{periodify}{owin}(X, nx = 1, ny = 1, ...,
+           combine=TRUE, warn=TRUE, 
+           ix=(-nx):nx, iy=(-ny):ny,
+           ixy=expand.grid(ix=ix,iy=iy))
+}
+\arguments{
+  \item{X}{
+    An object representing a spatial pattern
+    (point pattern, line segment pattern or window).
+  }
+  \item{nx,ny}{
+    Integers.
+    Numbers of additional copies of \code{X} in each direction.
+    The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1}
+    copies of the original object.
+    (Overruled by \code{ix, iy, ixy}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{combine}{
+    Logical flag determining whether the copies should be superimposed
+    to make an object like \code{X} (if \code{combine=TRUE}) or
+    simply returned as a list of objects (\code{combine=FALSE}).
+  }
+  \item{warn}{
+    Logical flag determining whether to issue warnings.
+  }
+  \item{check}{
+    Logical flag determining whether to check the validity of the
+    combined pattern.
+  }
+  \item{ix, iy}{
+    Integer vectors determining the grid positions of the copies
+    of \code{X}. (Overruled by \code{ixy}).
+  }
+  \item{ixy}{
+    Matrix or data frame with two columns, giving the 
+    grid positions of the copies of \code{X}. 
+  }
+}
+\details{
+  Given a spatial pattern (point pattern, line segment pattern, etc)
+  this function makes a number of shifted copies of the pattern
+  and optionally combines them. The function \code{periodify} is
+  generic, with methods for various kinds of spatial objects.
+
+  The default is to make a 3 by 3 array of copies of \code{X} and
+  combine them into a single pattern of the same kind as \code{X}.
+  This can be used (for example) to compute toroidal or periodic
+  edge corrections for various operations on \code{X}.
+
+  If the arguments \code{nx}, \code{ny} are given
+  and other arguments are missing,
+  the original object will be copied \code{nx} times to the right
+  and \code{nx} times to the left, then \code{ny} times upward and
+  \code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)} 
+  copies altogether, arranged in a grid, centred on the original object.
+  
+  If the arguments \code{ix}, \code{iy} or \code{ixy} are specified,
+  then these determine the grid positions of the copies of \code{X}
+  that will be made. For example \code{(ix,iy) = (1, 2)} means a
+  copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where
+  \code{w,h} are the width and height of the bounding rectangle of \code{X}.
+
+  If \code{combine=TRUE} (the default) the copies of \code{X} are
+  superimposed to create an object of the same kind as \code{X}.
+  If \code{combine=FALSE} the copies of \code{X} are returned as a list.
+}
+\value{
+  If \code{combine=TRUE}, an object of the same class as \code{X}.
+  If \code{combine=FALSE}, a list of objects of the same class as \code{X}.
+}
+\seealso{
+  \code{\link{shift}}
+}
+\examples{
+  data(cells)
+  plot(periodify(cells))
+  a <- lapply(periodify(Window(cells), combine=FALSE),
+        plot, add=TRUE,lty=2)
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/persp.im.Rd b/man/persp.im.Rd
new file mode 100644
index 0000000..c9506af
--- /dev/null
+++ b/man/persp.im.Rd
@@ -0,0 +1,153 @@
+\name{persp.im}
+\alias{persp.im}
+\title{Perspective Plot of Pixel Image}
+\description{
+  Displays a perspective plot of a pixel image.
+}
+\usage{
+  \method{persp}{im}(x, \dots,
+                     colmap=NULL, colin=x, apron=FALSE, visible=FALSE)
+}
+\arguments{
+  \item{x}{
+    The pixel image to be plotted as a surface.
+    An object of class \code{"im"} (see \code{\link{im.object}}).
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{persp.default}} to control the
+    display.
+  }
+  \item{colmap}{
+    Optional data controlling the colour map. See Details.
+  }
+  \item{colin}{
+    Optional. Colour input. Another pixel image (of the same dimensions
+    as \code{x}) containing the values that will be mapped to colours. 
+  }
+  \item{apron}{
+    Logical. If \code{TRUE}, a grey apron is placed
+    around the sides of the perspective plot.
+  }
+  \item{visible}{
+    Logical value indicating whether to compute which pixels of \code{x}
+    are visible in the perspective view. See Details.
+  }
+}
+\value{
+  (invisibly) the 3D transformation matrix
+  returned by \code{\link{persp.default}},
+  together with an attribute \code{"expand"} which gives the
+  relative scale of the \eqn{z} coordinate.
+
+  If argument \code{visible=TRUE} was given, the return value
+  also has an attribute \code{"visible"} which is a pixel image,
+  compatible with \code{x}, with logical values which are \emph{TRUE}
+  when the corresponding pixel is visible in the perspective view,
+  and \code{FALSE} when it is obscured.
+}
+\details{
+  This is the \code{persp} method for the class \code{"im"}.
+
+  The pixel image \code{x} must have real or integer values.
+  These values are treated as heights of a surface, and the
+  surface is displayed as a perspective plot on the current plot device,
+  using equal scales on the \code{x} and \code{y} axes.
+
+  The optional argument \code{colmap} gives an easy way to display
+  different altitudes in different colours (if this is what you
+  want).
+  \itemize{
+    \item If \code{colmap} is a colour map (object of class \code{"colourmap"},
+    created by the function \code{\link{colourmap}})
+    then this colour map will be used to associate
+    altitudes with colours.
+    \item If \code{colmap} is a character vector, then the range of
+    altitudes in the perspective plot will be divided into
+    \code{length(colmap)} intervals, and those parts of the surface
+    which lie in a particular altitude range will be assigned
+    the corresponding colour from \code{colmap}.
+    \item If \code{colmap} is a function in the \R language
+    of the form \code{function(n, ...)}, this function will be called
+    with an appropriate value of \code{n} to generate a character
+    vector of \code{n} colours. 
+      Examples of such functions are
+      \code{\link[grDevices]{heat.colors}},
+      \code{\link[grDevices]{terrain.colors}},
+      \code{\link[grDevices]{topo.colors}} and
+      \code{\link[grDevices]{cm.colors}}. 
+    \item If \code{colmap} is a function in the \R language of the
+    form \code{function(range, ...)} then it will be called
+    with \code{range} equal to the range of altitudes,
+    to determine the colour values or colour map.
+    Examples of such functions are \code{\link{beachcolours}}
+    and \code{\link{beachcolourmap}}.
+    \item If \code{colmap} is a list with entries \code{breaks} and \code{col},
+    then \code{colmap$breaks} determines the breakpoints of the altitude
+    intervals, and \code{colmap$col} provides the corresponding colours.
+  }
+  Alternatively, if the argument \code{colin} (\emph{colour input}) is present,
+  then the colour map \code{colmap} will be applied to
+  the pixel values of \code{colin} instead of the pixel values of
+  \code{x}. The result is a perspective view of a surface with
+  heights determined by \code{x} and colours determined by \code{colin}
+  (mapped by \code{colmap}).  
+  
+  If \code{apron=TRUE}, vertical surface is drawn around the boundary
+  of the perspective plot, so that the terrain appears to have been
+  cut out of a solid material. If colour data were supplied, then
+  the apron is coloured light grey.
+
+  Graphical parameters controlling the perspective plot
+  are passed through the \code{...} arguments
+  directly to the function \code{\link{persp.default}}.
+  See the examples in \code{\link{persp.default}} or in
+  \code{demo(persp)}.
+
+  The vertical scale is controlled by the argument \code{expand}:
+  setting \code{expand=1} will interpret the pixel values as being in
+  the same units as the spatial coordinates \eqn{x} and \eqn{y} and
+  represent them at the same scale.
+
+  If \code{visible=TRUE}, the algorithm also computes whether each pixel
+  in \code{x} is visible in the perspective view. In order to be
+  visible, a pixel must not be obscured by another pixel which lies
+  in front of it (as seen from the viewing direction), and the
+  three-dimensional vector normal to the surface must be pointing toward the
+  viewer. The return value of \code{persp.im} then has an attribute
+  \code{"visible"} which is a pixel image, compatible with \code{x},
+  with pixel value equal to \code{TRUE} if the corresponding pixel in
+  \code{x} is visible, and \code{FALSE} if it is not visible.
+}
+\seealso{
+  \code{\link{perspPoints}},
+  \code{\link{perspLines}}
+  for drawing additional points or lines on the surface.
+
+  \code{\link{im.object}},
+  \code{\link{plot.im}},
+  \code{\link{contour.im}}
+}
+\examples{
+   # an image
+   Z <- setcov(owin())
+   persp(Z, colmap=terrain.colors(128))
+   co <- colourmap(range=c(0,1), col=rainbow(128))
+   persp(Z, colmap=co, axes=FALSE, shade=0.3)
+
+   ## Terrain elevation
+   persp(bei.extra$elev, colmap=terrain.colors(128),
+         apron=TRUE, theta=-30, phi=20,
+         zlab="Elevation", main="", ticktype="detailed",
+         expand=6)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+ 
+ \keyword{hplot}
diff --git a/man/perspPoints.Rd b/man/perspPoints.Rd
new file mode 100644
index 0000000..9b77d38
--- /dev/null
+++ b/man/perspPoints.Rd
@@ -0,0 +1,86 @@
+\name{perspPoints}
+\alias{perspPoints}
+\alias{perspSegments}
+\alias{perspLines}
+\alias{perspContour}
+\title{
+  Draw Points or Lines on a Surface Viewed in Perspective
+}
+\description{
+  After a surface has been plotted in a perspective view
+  using \code{\link{persp.im}}, these functions can be used to
+  draw points or lines on the surface.
+}
+\usage{
+perspPoints(x, y=NULL, \dots, Z, M)
+
+perspLines(x, y = NULL, \dots, Z, M)
+
+perspSegments(x0, y0 = NULL, x1 = NULL, y1 = NULL, \dots, Z, M)
+
+perspContour(Z, M, \dots,
+             nlevels=10, levels=pretty(range(Z), nlevels))
+}
+\arguments{
+  \item{x,y}{
+    Spatial coordinates,
+    acceptable to \code{\link[grDevices]{xy.coords}},
+    for the points or lines on the horizontal plane.
+  }
+  \item{Z}{
+    Pixel image (object of class \code{"im"})
+    specifying the surface heights.
+  }
+  \item{M}{
+    Projection matrix returned from \code{\link{persp.im}}
+    when \code{Z} was plotted.
+  }
+  \item{\dots}{
+    Graphical arguments passed to
+    \code{\link[graphics]{points}},
+    \code{\link[graphics]{lines}} or
+    \code{\link[graphics]{segments}}
+    to control the drawing.
+  }
+  \item{x0,y0,x1,y1}{
+    Spatial coordinates of the line segments, on the
+    horizontal plane. Alternatively \code{x0} can be a
+    line segment pattern (object of class \code{"psp"})
+    and \code{y0,x1,y1} can be \code{NULL}.
+  }
+  \item{nlevels}{Number of contour levels}
+  \item{levels}{Vector of heights of contours.}
+}
+\details{
+  After a surface has been plotted in a perspective view,
+  these functions can be used to draw points or lines on the surface.
+
+  The user should already have called \code{\link{persp.im}}
+  in the form \code{M <- persp(Z, visible=TRUE, ...)} 
+  to display the perspective view of the surface \code{Z}.
+
+  Only points and lines which are visible from the viewer's standpoint
+  will be drawn.  
+}
+\value{
+  Same as the return value from
+  \code{\link[graphics]{points}}
+  or \code{\link[graphics]{segments}}. 
+}
+\seealso{
+  \code{\link{persp.im}}
+}
+\examples{
+  M <- persp(bei.extra$elev, colmap=terrain.colors(128),
+             apron=TRUE, theta=-30, phi=20,
+             zlab="Elevation", main="", 
+             expand=6, visible=TRUE, shade=0.3)
+
+  perspContour(bei.extra$elev, M=M, col="pink", nlevels=12)
+  perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3, col="chartreuse")
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/pixelcentres.Rd b/man/pixelcentres.Rd
new file mode 100644
index 0000000..6aa4bf7
--- /dev/null
+++ b/man/pixelcentres.Rd
@@ -0,0 +1,55 @@
+\name{pixelcentres}
+\alias{pixelcentres}
+\title{
+  Extract Pixel Centres as Point Pattern
+}
+\description{
+  Given a pixel image or binary mask window,
+  extract the centres of all pixels and return them as a point pattern.
+}
+\usage{
+pixelcentres(X, W = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Pixel image (object of class \code{"im"})
+    or window (object of class \code{"owin"}).
+  }
+  \item{W}{
+    Optional window to contain the resulting point pattern.
+  }
+  \item{\dots}{
+    Optional arguments defining the pixel resolution.
+  }
+}
+\details{
+  If the argument \code{X} is a pixel image, the result is a point
+  pattern, consisting of the centre of every pixel whose pixel value
+  is not \code{NA}. 
+
+  If \code{X} is a window which is a binary mask, the result is a point
+  pattern consisting of the centre of every pixel inside the window
+  (i.e. every pixel for which the mask value is \code{TRUE}).
+
+  Otherwise, \code{X} is first converted to a window, then
+  converted to a mask using \code{\link{as.mask}}, then handled as
+  above.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\seealso{
+  \code{\link{raster.xy}}
+}
+\examples{
+  pixelcentres(letterR, dimyx=5)
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/pixellate.Rd b/man/pixellate.Rd
new file mode 100644
index 0000000..317307a
--- /dev/null
+++ b/man/pixellate.Rd
@@ -0,0 +1,66 @@
+\name{pixellate}
+\Rdversion{1.1}
+\alias{pixellate}
+\title{
+  Convert Spatial Object to Pixel Image
+}
+\description{
+  Convert a spatial object to 
+  a pixel image by measuring the amount of stuff
+  in each pixel.
+}
+\usage{
+pixellate(x, ...)
+}
+\arguments{
+  \item{x}{
+    Spatial object to be converted.
+    A point pattern (object of class \code{"ppp"}),
+    a window (object of class \code{"owin"}),
+    a line segment pattern (object of class \code{"psp"}),
+    or some other suitable data.
+}
+  \item{\dots}{
+    Arguments passed to methods.
+}
+}
+\details{
+  The function \code{pixellate} converts a geometrical object \code{x} into
+  a pixel image, by measuring the \emph{amount} of \code{x} that is
+  inside each pixel.
+  
+  If \code{x} is a point pattern, \code{pixellate(x)} counts the
+  number of points of \code{x} falling in each pixel.
+  If \code{x} is a window, \code{pixellate(x)} measures the area
+  of intersection of each pixel with the window.
+
+  The function \code{pixellate} is generic, with methods for
+  point patterns (\code{\link{pixellate.ppp}}),
+  windows (\code{\link{pixellate.owin}}),
+  and 
+  line segment patterns (\code{\link{pixellate.psp}}),
+  See the separate documentation for these methods.
+
+  The related function \code{\link{as.im}}
+  also converts \code{x} into a pixel image,
+  but typically measures only the presence or absence of
+  \code{x} inside each pixel.
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{pixellate.ppp}},
+  \code{\link{pixellate.owin}},
+  \code{\link{pixellate.psp}},
+  \code{\link{pixellate.linnet}},
+  \code{\link{as.im}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/pixellate.owin.Rd b/man/pixellate.owin.Rd
new file mode 100644
index 0000000..05f0a9e
--- /dev/null
+++ b/man/pixellate.owin.Rd
@@ -0,0 +1,84 @@
+\name{pixellate.owin}  
+\Rdversion{1.1}
+\alias{pixellate.owin}
+\title{
+  Convert Window to Pixel Image
+}
+\description{
+  Convert a window to 
+  a pixel image by measuring the area of intersection
+  between the window and each pixel in a raster.
+}
+\usage{
+\method{pixellate}{owin}(x, W = NULL, ...)
+}
+\arguments{
+  \item{x}{
+    Window (object of class \code{"owin"}) to be converted.
+  }
+  \item{W}{
+    Optional. Window determining the pixel raster on which the conversion
+    should occur. 
+  }
+  \item{\dots}{
+    Optional. Extra arguments passed to \code{\link{as.mask}}
+    to determine the pixel raster.
+  }
+}
+\details{
+  This is a method for the generic function \code{pixellate}.
+
+  It converts a window \code{x} into
+  a pixel image, by measuring the \emph{amount} of \code{x} that is
+  inside each pixel.
+
+  (The related function \code{\link{as.im}}
+  also converts \code{x} into a pixel image,
+  but records only the presence or absence of
+  \code{x} in each pixel.)
+
+  The pixel raster for the conversion is determined by the
+  argument \code{W} and the extra arguments \code{\dots}.
+  \itemize{
+    \item 
+    If \code{W} is given, and it is a binary mask (a window
+    of type \code{"mask"}) then it determines the pixel raster.
+    \item
+    If \code{W} is given, but it is not a binary mask (it is a
+    window of another type) then it will be converted to a binary mask
+    using \code{as.mask(W, \dots)}.
+    \item
+    If \code{W} is not given, it defaults to
+    \code{as.mask(as.rectangle(x), \dots)}
+  }
+  In the second and third cases it would be common to use the
+  argument \code{dimyx} to control the number of pixels. See the Examples.
+
+  The algorithm then computes the area
+  of intersection of each pixel with the window.
+
+  The result is a pixel image with pixel entries equal to these
+  intersection areas.
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{pixellate.ppp}},
+  \code{\link{pixellate}},
+  \code{\link{as.im}}
+}
+\examples{
+   data(letterR)
+   plot(pixellate(letterR, dimyx=15))
+   W <- grow.rectangle(as.rectangle(letterR), 0.2)
+   plot(pixellate(letterR, W, dimyx=15))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/pixellate.ppp.Rd b/man/pixellate.ppp.Rd
new file mode 100644
index 0000000..6831ae5
--- /dev/null
+++ b/man/pixellate.ppp.Rd
@@ -0,0 +1,114 @@
+\name{pixellate.ppp} 
+\alias{pixellate.ppp}
+\alias{as.im.ppp}
+\title{Convert Point Pattern to Pixel Image}
+\description{
+  Converts a point pattern to a pixel image. The value in each pixel
+  is the number of points falling in that pixel, and is typically
+  either 0 or 1.
+}
+\usage{
+\method{pixellate}{ppp}(x, W=NULL, \dots, weights = NULL,
+                        padzero=FALSE, fractional=FALSE, preserve=FALSE)
+
+\method{as.im}{ppp}(X, \dots)
+}
+\arguments{
+  \item{x,X}{Point pattern (object of class \code{"ppp"}).}
+  \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution}
+  \item{W}{Optional window mask (object of class \code{"owin"}) determining
+    the pixel raster.
+  }
+  \item{weights}{Optional vector of weights associated with the points.}
+  \item{padzero}{
+    Logical value indicating whether to set pixel values
+    to zero outside the window. 
+  }
+  \item{fractional,preserve}{
+    Logical values determining the type of discretisation.
+    See Details.
+  }
+}
+\details{
+  The functions \code{pixellate.ppp} and \code{as.im.ppp}
+  convert a spatial point pattern \code{x} into a pixel
+  image, by counting the number of points (or the total weight of
+  points) falling in each pixel. 
+
+  Calling \code{as.im.ppp} is equivalent to
+  calling \code{pixellate.ppp} with its default arguments.
+  Note that \code{pixellate.ppp} is more general than \code{as.im.ppp}
+  (it has additional arguments for greater flexibility).
+
+  The functions \code{as.im.ppp} and \code{pixellate.ppp}
+  are methods for the generic functions \code{\link{as.im}}
+  and \code{\link{pixellate}} respectively,
+  for the class of point patterns.
+
+  The pixel raster (in which points are counted) is determined
+  by the argument \code{W} if it is present (for \code{pixellate.ppp} only).
+  In this case \code{W} should be a binary mask (a window object of
+  class \code{"owin"} with type \code{"mask"}).
+  Otherwise the pixel raster is determined by
+  extracting the window containing \code{x} and converting it to a
+  binary pixel mask using \code{\link{as.mask}}. The arguments
+  \code{\dots} are passed to \code{\link{as.mask}} to
+  control the pixel resolution.
+
+  If \code{weights} is \code{NULL}, then for each pixel
+  in the mask, the algorithm counts how many points in \code{x} fall
+  in the pixel. This count is usually either 0 (for a pixel with no data
+  points in it) or 1 (for a pixel containing one data point) but may be
+  greater than 1. The result is an image with these counts as its pixel values.
+
+  If \code{weights} is given, it should be a numeric vector of the same
+  length as the number of points in \code{x}. For each pixel, the
+  algorithm finds the total weight associated with points in \code{x} that fall
+  in the given pixel. The result is an image with these total weights
+  as its pixel values.
+
+  By default (if \code{zeropad=FALSE}) the resulting pixel image has the same
+  spatial domain as the window of the point pattern \code{x}. If
+  \code{zeropad=TRUE} then the resulting pixel image has a rectangular
+  domain; pixels outside the original window are assigned the value zero.
+
+  The discretisation procedure is controlled by the arguments
+  \code{fractional} and \code{preserve}.
+  \itemize{
+    \item 
+    The argument \code{fractional} specifies how data points are mapped to
+    pixels. If \code{fractional=FALSE} (the default),
+    each data point is allocated to the nearest pixel centre.
+    If \code{fractional=TRUE}, each data point is allocated
+    with fractional weight to four pixel centres
+    (the corners of a rectangle containing the data
+    point).
+    \item
+    The argument \code{preserve} specifies what to do with pixels
+    lying near the boundary of the window, if the window is not a rectangle.
+    If \code{preserve=FALSE} (the default), any contributions
+    that are attributed to pixel centres lying outside the window
+    are reset to zero. If \code{preserve=TRUE}, any such contributions
+    are shifted to the nearest pixel lying inside the window, so that
+    the total mass is preserved.
+  }
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\seealso{
+  \code{\link{pixellate}},
+  \code{\link{im}}, \code{\link{as.im}},
+  \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}.
+}
+\examples{
+  data(humberside)
+  plot(pixellate(humberside))
+  plot(pixellate(humberside, fractional=TRUE))
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/pixellate.psp.Rd b/man/pixellate.psp.Rd
new file mode 100644
index 0000000..81a293e
--- /dev/null
+++ b/man/pixellate.psp.Rd
@@ -0,0 +1,88 @@
+\name{pixellate.psp}  
+\alias{pixellate.psp}
+\title{
+  Convert Line Segment Pattern to Pixel Image
+}
+\description{
+  Converts a line segment pattern to a pixel image
+  by measuring the length or number of lines intersecting each pixel.
+}
+\usage{
+\method{pixellate}{psp}(x, W=NULL, ..., weights = NULL,
+                        what=c("length", "number"))
+}
+\arguments{
+  \item{x}{
+    Line segment pattern (object of class \code{"psp"}).
+  }
+  \item{W}{
+    Optional window (object of class \code{"owin"}) determining
+    the pixel resolution.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution.
+  }
+  \item{weights}{
+    Optional vector of weights associated with each line segment.
+  }
+  \item{what}{
+    String (partially matched) indicating whether to compute the
+    total length of intersection (\code{what="length"}, the default)
+    or the total number of segments intersecting each pixel
+    (\code{what="number"}).
+  }
+}
+\details{
+  This function converts a line segment pattern to a pixel image by computing,
+  for each pixel, the total length of
+  intersection between the pixel and the line segments.
+  Alternatively it can count the number of line segments intersecting
+  each pixel. 
+  
+  This is a method for the generic function
+  \code{\link{pixellate}} for the class of line segment patterns.
+
+  The pixel raster is determined by \code{W}
+  and the optional arguments \code{\dots}.
+  If \code{W} is missing or \code{NULL}, it defaults to the window
+  containing \code{x}.
+  Then \code{W} is converted to a
+  binary pixel mask using \code{\link{as.mask}}. The arguments
+  \code{\dots} are passed to \code{\link{as.mask}} to
+  control the pixel resolution.
+
+  If \code{weights} are given, then the length of the intersection
+  between line segment \code{i} and pixel \code{j} is multiplied by
+  \code{weights[i]} before the lengths are summed for each pixel.
+}
+\value{
+  A pixel image (object of class \code{"im"}) with numeric values.
+}
+\seealso{
+  \code{\link{pixellate}},
+  \code{\link{as.mask}},
+  \code{\link{as.mask.psp}}.
+  
+  Use \code{\link{as.mask.psp}} if you only want to know
+  which pixels are intersected by lines.
+}
+\examples{
+  X <- psp(runif(10),runif(10), runif(10), runif(10), window=owin())
+  plot(pixellate(X))
+  plot(X, add=TRUE)
+  sum(lengths.psp(X))
+  sum(pixellate(X))
+  plot(pixellate(X, what="n"))
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/pixelquad.Rd b/man/pixelquad.Rd
new file mode 100644
index 0000000..adc6ace
--- /dev/null
+++ b/man/pixelquad.Rd
@@ -0,0 +1,86 @@
+\name{pixelquad}
+\alias{pixelquad}
+\title{Quadrature Scheme Based on Pixel Grid}
+\description{
+  Makes a quadrature scheme with a dummy point at every pixel
+  of a pixel image.
+}
+\usage{
+pixelquad(X, W = as.owin(X))
+}
+\arguments{
+  \item{X}{Point pattern (object of class \code{"ppp"}) containing the
+    data points for the quadrature scheme.
+  }
+  \item{W}{
+    Specifies the pixel grid.
+    A pixel image (object of class \code{"im"}),
+    a window (object of class \code{"owin"}), or anything that can
+    be converted to a window by \code{\link{as.owin}}.
+  }
+}
+\value{
+  An object of class \code{"quad"} describing the quadrature scheme
+  (data points, dummy points, and quadrature weights)
+  suitable as the argument \code{Q} of the function \code{\link{ppm}()} for
+  fitting a point process model.
+
+  The quadrature scheme can be inspected using the
+  \code{print} and \code{plot} methods for objects
+  of class \code{"quad"}. 
+}
+\details{
+  This is a method for producing a quadrature scheme
+  for use by \code{\link{ppm}}. It is an alternative to
+  \code{\link{quadscheme}}.
+  
+  The function \code{\link{ppm}} fits a point process model to an
+  observed point pattern using 
+  the Berman-Turner quadrature approximation (Berman and Turner, 1992;
+  Baddeley and Turner, 2000) to the pseudolikelihood of the model. 
+  It requires a quadrature scheme consisting of 
+  the original data point pattern, an additional pattern of dummy points,
+  and a vector of quadrature weights for all these points.
+  Such quadrature schemes are represented by objects of class
+  \code{"quad"}. See \code{\link{quad.object}} for a description of this class.
+
+  Given a grid of pixels, this function creates a quadrature scheme
+  in which there is one dummy point at the centre of each pixel. The
+  counting weights are used (the weight attached to each quadrature
+  point is 1 divided by the number of quadrature points falling
+  in the same pixel).
+
+  The argument \code{X} specifies the locations of the data points
+  for the quadrature scheme. Typically this would be a point pattern
+  dataset.
+
+  The argument \code{W} specifies the grid of pixels for the dummy
+  points of the quadrature scheme. It should be a pixel image
+  (object of class \code{"im"}), a window (object of class
+  \code{"owin"}), or anything that can
+  be converted to a window by \code{\link{as.owin}}. If \code{W} is a
+  pixel image or a binary mask (a window of type \code{"mask"})
+  then the pixel grid of \code{W} will be used. If \code{W} is a
+  rectangular or polygonal window, then it will first be converted to a
+  binary mask using \code{\link{as.mask}} at the default pixel
+  resolution.
+}
+\examples{
+  W <- owin(c(0,1),c(0,1))
+  X <- runifpoint(42, W)
+  W <- as.mask(W,dimyx=128)
+  pixelquad(X,W)
+}
+\seealso{
+  \code{\link{quadscheme}},
+  \code{\link{quad.object}},
+  \code{\link{ppm}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/plot.anylist.Rd b/man/plot.anylist.Rd
new file mode 100644
index 0000000..eecee01
--- /dev/null
+++ b/man/plot.anylist.Rd
@@ -0,0 +1,233 @@
+\name{plot.anylist}
+\alias{plot.anylist}
+\title{Plot a List of Things}
+\description{
+  Plots a list of things
+}
+\usage{
+  \method{plot}{anylist}(x, \dots, main, arrange=TRUE,
+   nrows=NULL, ncols=NULL, main.panel=NULL,
+   mar.panel=c(2,1,1,2), hsep=0, vsep=0,
+   panel.begin=NULL, panel.end=NULL, panel.args=NULL,
+   panel.begin.args=NULL, panel.end.args=NULL, 
+   plotcommand="plot",
+   adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL,
+   adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE)
+}
+\arguments{
+  \item{x}{
+    An object of the class \code{"anylist"}.
+    Essentially a list of objects.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot}} when generating each
+    plot panel.
+  }
+  \item{main}{
+    Overall heading for the plot.
+  }
+  \item{arrange}{
+    Logical flag indicating whether to plot the objects
+    side-by-side on a single page (\code{arrange=TRUE})
+    or plot them individually in a succession of frames
+    (\code{arrange=FALSE}).
+  }
+  \item{nrows,ncols}{
+    Optional. The number of rows/columns in the plot layout
+    (assuming \code{arrange=TRUE}).
+    You can specify either or both of these numbers.
+  }
+  \item{main.panel}{
+    Optional. A character string, or a vector of character strings,
+    giving the headings for each of the objects.
+  }
+  \item{mar.panel}{
+    Size of the margins outside each plot panel.
+    A numeric vector of length 4 giving the bottom, left, top,
+    and right margins in that order. (Alternatively the vector
+    may have length 1 or 2 and will be replicated to length 4).
+    See the section on \emph{Spacing between plots}.
+  }
+  \item{hsep,vsep}{
+    Additional horizontal and vertical separation between plot panels,
+    expressed in the same units as \code{mar.panel}. 
+  }
+  \item{panel.begin,panel.end}{
+    Optional. Functions that will be executed before and after each panel is
+    plotted. See Details.
+  }
+  \item{panel.args}{
+    Optional. Function that determines different plot arguments
+    for different panels. See Details.
+  }
+  \item{panel.begin.args}{
+    Optional. List of additional arguments for \code{panel.begin}
+    when it is a function.
+  }
+  \item{panel.end.args}{
+    Optional. List of additional arguments for \code{panel.end}
+    when it is a function.
+  }
+  \item{plotcommand}{
+    Optional.
+    Character string containing the name of the command that should be
+    executed to plot each panel. 
+  }
+  \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{
+    Optional. Functions (with no arguments) that will be executed to
+    generate additional plots at the margins (left, right, top and/or
+    bottom, respectively) of the array of plots.
+  }
+  \item{adorn.size}{
+    Relative width (as a fraction of the other panels' widths)
+    of the margin plots.
+  }
+  \item{equal.scales}{
+    Logical value indicating whether the components
+    should be plotted at (approximately) the same physical scale. 
+  }
+  \item{halign,valign}{
+    Logical values indicating whether panels in a column
+    should be aligned to the same \eqn{x} coordinate system
+    (\code{halign=TRUE}) and whether panels in a row should
+    be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}).
+    These are applicable only if \code{equal.scales=TRUE}.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  This is the \code{plot} method for the class \code{"anylist"}.
+
+  An object of class \code{"anylist"} represents
+  a list of objects intended to be treated in the same way.
+  This is the method for \code{plot}.
+
+  In the \pkg{spatstat} package, various functions produce
+  an object of class \code{"anylist"}, essentially a list of
+  objects of the same kind. 
+  These objects can be plotted in a nice arrangement
+  using \code{plot.anylist}. See the Examples.
+
+  The argument \code{panel.args} determines extra graphics parameters
+  for each panel. It should be a function that will be called
+  as \code{panel.args(i)} where \code{i} is the panel number.
+  Its return value should be a list of graphics parameters that can
+  be passed to the relevant \code{plot} method. These parameters
+  override any parameters specified in the \code{\dots} arguments.
+
+  The arguments \code{panel.begin} and \code{panel.end}
+  determine graphics that will be plotted before and after
+  each panel is plotted. They may be objects
+  of some class that can be plotted
+  with the generic \code{plot} command. Alternatively they
+  may be functions that will be
+  called as \code{panel.begin(i, y, main=main.panel[i])}
+  and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel
+  number and \code{y = x[[i]]}.
+
+  If all entries of \code{x} are pixel images,
+  the function \code{\link{image.listof}} is called to control
+  the plotting. The arguments \code{equal.ribbon} and \code{col}
+  can be used to determine the colour map or maps applied.
+
+  If \code{equal.scales=FALSE} (the default), then the 
+  plot panels will have equal height on the plot device
+  (unless there is only one column of panels, in which case
+  they will have equal width on the plot device). This means that the
+  objects are plotted at different physical scales, by default.
+
+  If \code{equal.scales=TRUE}, then the dimensions of the
+  plot panels on the plot device will be proportional
+  to the spatial dimensions of the
+  corresponding components of \code{x}. This means that the
+  objects will be plotted at \emph{approximately} equal physical scales.
+  If these objects have very different spatial sizes,
+  the plot command could fail (when it tries
+  to plot the smaller objects at a tiny scale), with an error
+  message that the figure margins are too large.
+
+  The objects will be plotted at \emph{exactly} equal
+  physical scales, and \emph{exactly} aligned on the device,
+  under the following conditions:
+  \itemize{
+    \item
+    every component of \code{x} is a spatial object
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{panel.begin} and \code{panel.end} are either
+    \code{NULL} or they are spatial objects 
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{adorn.left}, 
+    \code{adorn.right}, 
+    \code{adorn.top} and 
+    \code{adorn.bottom} are all \code{NULL}.
+  }
+
+  Another special case is when every component of \code{x} is an
+  object of class \code{"fv"} representing a function.
+  If \code{equal.scales=TRUE} then all these functions will 
+  be plotted with the same axis scales
+  (i.e. with the same \code{xlim} and the same \code{ylim}).
+}
+\section{Spacing between plots}{
+  The spacing between individual plots is controlled by the parameters
+  \code{mar.panel}, \code{hsep} and \code{vsep}.
+
+  If \code{equal.scales=FALSE}, the plot panels are
+  logically separate plots. The margins for each panel are
+  determined by the argument \code{mar.panel} which becomes 
+  the graphics parameter \code{mar}
+  described in the help file for \code{\link{par}}.
+  One unit of \code{mar} corresponds to one line of text in the margin.
+  If \code{hsep} or \code{vsep} are present, \code{mar.panel}
+  is augmented by \code{c(vsep, hsep, vsep, hsep)/2}.
+  
+  If \code{equal.scales=TRUE}, all the plot panels are drawn
+  in the same coordinate system which represents a physical scale.
+  The unit of measurement for \code{mar.panel[1,3]}
+  is one-sixth of the greatest height of any object plotted in the same row
+  of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the
+  greatest width of any object plotted in the same column of panels.
+  If \code{hsep} or \code{vsep} are present,
+  they are interpreted in the same units as \code{mar.panel[2]}
+  and \code{mar.panel[1]} respectively.
+}
+\seealso{
+  \code{\link{contour.listof}},
+  \code{\link{image.listof}},
+  \code{\link{density.splitppp}}
+}
+\section{Error messages}{
+  If the error message \sQuote{Figure margins too large}
+  occurs, this generally means that one of the
+  objects had a much smaller physical scale than the others.
+  Ensure that \code{equal.scales=FALSE}
+  and increase the values of \code{mar.panel}.
+}
+\examples{
+ trichotomy <- list(regular=cells,
+                    random=japanesepines,
+                    clustered=redwood)
+ K <- lapply(trichotomy, Kest)
+ K <- as.anylist(K)
+ plot(K, main="")
+
+# list of 3D point patterns
+ ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE]
+ class(ape1)
+ plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1,
+      cex=1.5, pch=21, bg='white')
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.bermantest.Rd b/man/plot.bermantest.Rd
new file mode 100644
index 0000000..bf34028
--- /dev/null
+++ b/man/plot.bermantest.Rd
@@ -0,0 +1,93 @@
+\name{plot.bermantest}
+\alias{plot.bermantest}
+\title{Plot Result of Berman Test}
+\description{
+  Plot the result of Berman's test of goodness-of-fit
+}
+\usage{
+\method{plot}{bermantest}(x, ...,
+                   lwd=par("lwd"), col=par("col"), lty=par("lty"),
+                   lwd0=lwd, col0=2, lty0=2)
+}
+\arguments{
+  \item{x}{
+    Object to be plotted. An object of class \code{"bermantest"}
+    produced by \code{\link{berman.test}}.
+  }
+  \item{\dots}{
+    extra arguments that will be passed to the plotting function
+    \code{\link{plot.ecdf}}.
+  }
+  \item{col,lwd,lty}{
+    The width, colour and type of lines used to plot the
+    empirical distribution curve.
+  }
+  \item{col0,lwd0,lty0}{
+    The width, colour and type of lines used to plot the
+    predicted (null) distribution curve.
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  This is the \code{plot} method for the class \code{"bermantest"}.
+  An object of this class represents the outcome of Berman's test
+  of goodness-of-fit of a spatial Poisson point process model,
+  computed by \code{\link{berman.test}}.
+
+  For the \emph{Z1} test (i.e. if \code{x} was computed using
+  \code{berman.test( ,which="Z1")}), 
+  the plot displays the two cumulative distribution functions
+  that are compared by the test: namely the empirical cumulative distribution
+  function of the covariate at the data points, \eqn{\hat F}{Fhat},
+  and the predicted
+  cumulative distribution function of the covariate under the model,
+  \eqn{F_0}{F0}, both plotted against the value of the covariate.
+  Two vertical lines show the mean values of these two distributions.
+  If the model is correct, the two curves should be close; the test is
+  based on comparing the two vertical lines.
+
+  For the \emph{Z2} test (i.e. if \code{x} was computed using
+  \code{berman.test( ,which="Z2")}), the plot displays the empirical
+  cumulative distribution function of the values
+  \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the
+  value of the covariate at the \eqn{i}-th data point. The diagonal line
+  with equation \eqn{y=x} is also shown. Two vertical lines show the
+  mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the
+  model is correct, the two curves should be close. The test is based on
+  comparing the two vertical lines. 
+}
+\seealso{
+  \code{\link{berman.test}}
+}
+\examples{
+   # synthetic data: nonuniform Poisson process
+   X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1))
+
+   # fit uniform Poisson process
+   fit0 <- ppm(X, ~1)
+
+   # test covariate = x coordinate
+   xcoord <- function(x,y) { x }
+
+   # test wrong model
+   k <- berman.test(fit0, xcoord, "Z1")
+   
+   # plot result of test
+   plot(k, col="red", col0="green")
+
+   # Z2 test
+   k2 <- berman.test(fit0, xcoord, "Z2")
+   plot(k2, col="red", col0="green")
+}
+
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.cdftest.Rd b/man/plot.cdftest.Rd
new file mode 100644
index 0000000..be2d0f2
--- /dev/null
+++ b/man/plot.cdftest.Rd
@@ -0,0 +1,113 @@
+\name{plot.cdftest}
+\alias{plot.cdftest}
+\title{Plot a Spatial Distribution Test}
+\description{
+  Plot the result of a spatial distribution test
+  computed by \code{cdf.test}.
+}
+\usage{
+\method{plot}{cdftest}(x, ...,
+                   style=c("cdf", "PP", "QQ"),
+                   lwd=par("lwd"), col=par("col"), lty=par("lty"),
+                   lwd0=lwd, col0=2, lty0=2,
+                   do.legend)
+}
+\arguments{
+  \item{x}{
+    Object to be plotted. An object of class \code{"cdftest"}
+    produced by a method for \code{\link{cdf.test}}.
+  }
+  \item{\dots}{
+    extra arguments that will be passed to the plotting function
+    \code{\link{plot.default}}.
+  }
+  \item{style}{
+    Style of plot. See Details.
+  }
+  \item{col,lwd,lty}{
+    The width, colour and type of lines used to plot the
+    empirical curve (the empirical distribution, or PP plot or QQ plot).
+  }
+  \item{col0,lwd0,lty0}{
+    The width, colour and type of lines used to plot the
+    reference curve (the predicted distribution, or the diagonal).
+  }
+  \item{do.legend}{
+    Logical value indicating whether to add an
+    explanatory legend. Applies only when \code{style="cdf"}.
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  This is the \code{plot} method for the class \code{"cdftest"}.
+  An object of this class represents the outcome of
+  a spatial distribution test, computed by \code{\link{cdf.test}},
+  and based on either the Kolmogorov-Smirnov,
+  \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises
+  or Anderson-Darling test.
+
+  If \code{style="cdf"} (the default), 
+  the plot displays the two cumulative distribution functions
+  that are compared by the test: namely the empirical cumulative distribution
+  function of the covariate at the data points, and the predicted
+  cumulative distribution function of the covariate under the model,
+  both plotted against the value of the covariate. The
+  Kolmogorov-Smirnov test statistic (for example)
+  is the maximum vertical separation
+  between the two curves.
+
+  If \code{style="PP"} then the P-P plot is drawn. The
+  \eqn{x} coordinates of the plot are cumulative
+  probabilities for the covariate under the model.
+  The \eqn{y} coordinates are cumulative probabilities
+  for the covariate at the data points. The diagonal line
+  \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov
+  test statistic is the maximum vertical separation
+  between the P-P plot and the diagonal reference line.
+
+  If \code{style="QQ"} then the Q-Q plot is drawn. The
+  \eqn{x} coordinates of the plot are quantiles
+  of the covariate under the model.
+  The \eqn{y} coordinates are quantiles of the 
+  covariate at the data points. The diagonal line
+  \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov
+  test statistic cannot be read off the Q-Q plot.
+}
+\seealso{
+  \code{\link{cdf.test}}
+}
+\examples{
+   op <- options(useFancyQuotes=FALSE)
+
+   # synthetic data: nonuniform Poisson process
+   X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1))
+
+   # fit uniform Poisson process
+   fit0 <- ppm(X, ~1)
+
+   # test covariate = x coordinate
+   xcoord <- function(x,y) { x }
+
+   # test wrong model
+   k <- cdf.test(fit0, xcoord)
+
+   # plot result of test
+   plot(k, lwd0=3)
+
+   plot(k, style="PP")
+
+   plot(k, style="QQ")
+
+   options(op)
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.colourmap.Rd b/man/plot.colourmap.Rd
new file mode 100644
index 0000000..9d2d339
--- /dev/null
+++ b/man/plot.colourmap.Rd
@@ -0,0 +1,97 @@
+\name{plot.colourmap}
+\alias{plot.colourmap}
+\title{Plot a Colour Map}
+\description{
+  Displays a colour map as a colour ribbon
+}
+\usage{
+\method{plot}{colourmap}(x, ...,
+       main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE,
+       labelmap=NULL, gap=0.25, add=FALSE)
+}
+\arguments{
+  \item{x}{Colour map to be plotted. An object of class \code{"colourmap"}.}
+  \item{\dots}{
+    Graphical arguments passed to \code{\link{image.default}}
+    or \code{\link{axis}}.
+  }
+  \item{main}{Main title for plot. A character string.}
+  \item{xlim}{
+    Optional range of \code{x} values for the location of the
+    colour ribbon.
+  }
+  \item{ylim}{
+    Optional range of \code{y} values for the location of the
+    colour ribbon.
+  }
+  \item{vertical}{Logical flag determining whether the colour ribbon
+    is plotted as a horizontal strip (\code{FALSE}) or a vertical strip
+    (\code{TRUE}).}
+  \item{axis}{Logical flag determining whether an axis should be plotted
+    showing the numerical values that are mapped to the colours.
+  }
+  \item{labelmap}{
+    Function. If this is present, then the labels on the plot,
+    which indicate the input values corresponding to particular colours,
+    will be transformed by \code{labelmap} before being displayed
+    on the plot. Typically used to simplify or shorten the
+    labels on the plot.
+  }
+  \item{gap}{
+    Distance between separate blocks of colour, as a fraction of
+    the width of one block, if the colourmap is discrete.
+  }
+  \item{add}{
+    Logical value indicating whether to add the colourmap to the
+    existing plot (\code{add=TRUE}), or to start a new plot
+    (\code{add=FALSE}, the default).
+  }
+}
+\details{
+  This is the plot method for the class \code{"colourmap"}.
+  An object of this class
+  (created by the function \code{\link{colourmap}})
+  represents a colour map or
+  colour lookup table associating colours with each data value.
+
+  The command \code{plot.colourmap} displays the colour map as a colour
+  ribbon or as a colour legend (a sequence of blocks of colour).
+  This plot can be useful on its own to inspect the colour map.
+
+  If the domain of the colourmap is an interval of real numbers,
+  the colourmap is displayed as a continuous ribbon of colour.
+  If the domain of the colourmap is a finite set of inputs,
+  the colours are displayed as separate blocks of colour.
+  The separation between blocks is equal to \code{gap} times
+  the width of one block. 
+
+  To annotate an existing plot with an explanatory colour ribbon
+  or colour legend,
+  specify \code{add=TRUE} and use the arguments \code{xlim}
+  and/or \code{ylim} to control the physical position of the ribbon
+  on the plot.
+
+  Labels explaining the colour map are
+  drawn by \code{\link[graphics]{axis}} and can be modified by
+  specifying arguments that will be passed to this function.
+}
+\value{
+  None.
+}
+\seealso{\code{\link{colourmap}}}
+\examples{
+  co <- colourmap(rainbow(100), breaks=seq(-1,1,length=101))
+  plot(co)
+  plot(co, col.ticks="pink")
+  ca <- colourmap(rainbow(8), inputs=letters[1:8])
+  plot(ca, vertical=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{color}
+\keyword{hplot}
diff --git a/man/plot.dppm.Rd b/man/plot.dppm.Rd
new file mode 100644
index 0000000..e567b74
--- /dev/null
+++ b/man/plot.dppm.Rd
@@ -0,0 +1,70 @@
+\name{plot.dppm}
+\alias{plot.dppm}
+\title{Plot a fitted determinantal point process}
+\description{
+  Plots a fitted determinantal point process model,
+  displaying the fitted intensity and the fitted summary function.
+}
+\usage{
+  \method{plot}{dppm}(x, ..., what=c("intensity", "statistic"))
+}
+\arguments{
+  \item{x}{
+    Fitted determinantal point process model.
+    An object of class \code{"dppm"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.ppm}}
+    and \code{\link{plot.fv}} to control the plot.
+  }
+  \item{what}{
+    Character vector determining what will be plotted.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link{plot}} for the class \code{"dppm"} of fitted
+  determinantal point process models.
+
+  The argument \code{x} should be a determinantal point process model
+  (object of class \code{"dppm"}) obtained using
+  the function \code{\link{dppm}}.
+
+  The choice of plots (and the order in which they are
+  displayed) is controlled by the argument \code{what}.
+  The options (partially matched) are \code{"intensity"} and
+  \code{"statistic"}.
+
+  This command is capable of producing two different plots:
+  \describe{
+
+    \item{what="intensity"}{specifies the fitted intensity of the model,
+    which is plotted using \code{\link{plot.ppm}}. By default this plot
+    is not produced for stationary models.}
+
+    \item{what="statistic"}{specifies the empirical and fitted summary
+    statistics, which are plotted using \code{\link{plot.fv}}. This is
+    only meaningful if the model has been fitted using the Method of
+    Minimum Contrast, and it is turned off otherwise.}
+
+  }
+}
+\value{
+  Null.
+}
+\examples{
+  fit <- dppm(swedishpines ~ x + y, dppGauss())
+  plot(fit)
+}
+\seealso{
+  \code{\link{dppm}},
+  \code{\link{plot.ppm}},
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/plot.envelope.Rd b/man/plot.envelope.Rd
new file mode 100644
index 0000000..fc898e3
--- /dev/null
+++ b/man/plot.envelope.Rd
@@ -0,0 +1,59 @@
+\name{plot.envelope}
+\alias{plot.envelope}
+\title{Plot a Simulation Envelope}
+\description{
+  Plot method for the class \code{"envelope"}.
+}
+\usage{
+ \method{plot}{envelope}(x, \dots, main)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"envelope"}, containing the variables to be plotted
+    or variables from which the plotting coordinates can be computed.
+  }
+  \item{main}{Main title for plot.}
+  \item{\dots}{
+    Extra arguments passed to \code{\link{plot.fv}}.
+  }
+}
+\value{
+  Either \code{NULL}, or a data frame giving the meaning of the
+  different line types and colours.
+}
+\details{
+  This is the \code{plot} method for the class \code{"envelope"}
+  of simulation envelopes. Objects of this class are
+  created by the command \code{\link{envelope}}.
+
+  This plot method is currently identical to \code{\link{plot.fv}}.
+
+  Its default behaviour is to shade the region
+  between the upper and lower envelopes in a light grey colour.
+  To suppress the shading and plot the upper and lower envelopes
+  as curves, set \code{shade=NULL}.
+  To change the colour of the shading, use the argument \code{shadecol}
+  which is passed to \code{\link{plot.fv}}. 
+  
+  See \code{\link{plot.fv}} for further information on how to
+  control the plot.
+}
+\examples{
+   data(cells)
+   E <- envelope(cells, Kest, nsim=19)
+   plot(E)
+   plot(E, sqrt(./pi) ~ r)
+}
+\seealso{
+  \code{\link{envelope}},
+  \code{\link{plot.fv}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+ 
diff --git a/man/plot.fasp.Rd b/man/plot.fasp.Rd
new file mode 100644
index 0000000..70e0367
--- /dev/null
+++ b/man/plot.fasp.Rd
@@ -0,0 +1,159 @@
+\name{plot.fasp}
+\alias{plot.fasp}
+\title{Plot a Function Array}
+\description{
+   Plots an array of summary functions, usually associated with a
+   point pattern, stored in an object of class \code{"fasp"}.
+   A method for \code{plot}.
+}
+
+\usage{
+   \method{plot}{fasp}(x,formule=NULL, \dots,
+                        subset=NULL, title=NULL, banner=TRUE,
+                        transpose=FALSE,
+                        samex=FALSE, samey=FALSE,
+                        mar.panel=NULL,
+                        outerlabels=TRUE, cex.outerlabels=1.25,
+                        legend=FALSE)
+}
+
+\arguments{
+  \item{x}{An object of class \code{"fasp"} representing a
+    function array.
+  }
+  \item{formule}{
+    A formula or list of formulae indicating what
+    variables are to be plotted against what variable. Each formula is
+    either an R language formula object, or a string that can be parsed
+    as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component
+    should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th}
+    plot where \code{x$which[i,j]=k}.  If the formula is left
+    as \code{NULL}, then \code{plot.fasp} attempts to use the component
+    \code{default.formula} of \code{x}.  If that component is NULL
+    as well, it gives up.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.fv}} to control 
+    the individual plot panels. 
+  }
+  \item{subset}{
+    A logical vector, or a vector of indices, or an
+    expression or a character string, or a \bold{list} of such,
+    indicating a subset of the data to be included in each plot.
+    If \code{subset} is a list, its \eqn{k^{th}}{k-th} component
+    should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot
+    where \code{x$which[i,j]=k}.
+  }
+  \item{title}{
+    Overall title for the plot.
+  }
+  \item{banner}{
+    Logical. If \code{TRUE}, the overall title is plotted.
+    If \code{FALSE}, the overall title is not plotted
+    and no space is allocated for it.
+  }
+  \item{transpose}{
+    Logical. If \code{TRUE}, rows and columns will be exchanged.
+  }
+  \item{samex,samey}{
+    Logical values indicating whether all individual plot panels should have the
+    same x axis limits and the same y axis limits, respectively.
+    This makes it easier to compare the plots.
+  }
+  \item{mar.panel}{
+    Vector of length 4 giving the value of the
+    graphics parameter \code{mar} controlling the size of plot margins
+    for each individual plot panel. See \code{\link{par}}.
+  }
+  \item{outerlabels}{Logical.
+    If \code{TRUE}, the row and column names of the array of functions
+    are plotted in the margins of the array of plot panels.
+    If \code{FALSE}, each individual plot panel is labelled by its
+    row and column name.
+  }
+  \item{cex.outerlabels}{
+    Character expansion factor for row and column labels of array.
+  }
+  \item{legend}{
+    Logical flag determining whether to plot a legend in each panel.
+  }
+}
+
+\details{
+  An object of class \code{"fasp"} represents
+  an array of summary functions, usually associated with a point
+  pattern. See \code{\link{fasp.object}} for details.
+  Such an object is created, for example, by \code{\link{alltypes}}.
+
+  The function \code{plot.fasp} is
+  a method for \code{plot}.  It calls \code{\link{plot.fv}} to plot the
+  individual panels.
+
+  For information about the interpretation of the
+  arguments \code{formule} and \code{subset},
+  see \code{\link{plot.fv}}.
+
+  Arguments that are often passed through \code{...} include
+  \code{col} to control the colours of the different lines in a panel,
+  and \code{lty} and \code{lwd} to control the line type and line width
+  of the different lines in a panel. The argument \code{shade}
+  can also be used to display confidence intervals or significance bands
+  as filled grey shading. See \code{\link{plot.fv}}.
+  
+  The argument \code{title}, if present, will determine the
+  overall title of the plot. If it is absent, it defaults to \code{x$title}.
+  Titles for the individual plot panels will be taken from
+  \code{x$titles}.
+}
+
+\value{None.}
+
+\section{Warnings}{
+  (Each component of) the \code{subset} argument may be a
+  logical vector (of the same length as the vectors of data which
+  are extracted from \code{x}), or a vector of indices, or an
+  \bold{expression} such as \code{expression(r<=0.2)}, or a text string,
+  such as \code{"r<=0.2"}.
+
+  Attempting a syntax such as \code{subset = r<=0.2} (without
+  wrapping \code{r<=0.2} either in quote marks or in \code{expression()})
+  will cause this function to fall over.
+
+  Variables referred to in any formula must exist in the data frames
+  stored in \code{x}.  What the names of these variables are will
+  of course depend upon the nature of \code{x}.
+}
+
+\seealso{
+  \code{\link{alltypes}},
+  \code{\link{plot.fv}},
+  \code{\link{fasp.object}}
+}
+
+\examples{
+   \dontrun{
+   # Bramble Canes data.
+   data(bramblecanes)
+
+   X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE)
+   plot(X.G)
+   plot(X.G,subset="r<=0.2")
+   plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo)))
+   plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2")
+
+   # Simulated data.
+   pp <- runifpoint(350, owin(c(0,1),c(0,1)))
+   pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200)))
+   X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data")
+   plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75")
+   }
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+  }
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.fv.Rd b/man/plot.fv.Rd
new file mode 100644
index 0000000..589f36c
--- /dev/null
+++ b/man/plot.fv.Rd
@@ -0,0 +1,248 @@
+\name{plot.fv}
+\alias{plot.fv}
+\title{Plot Function Values}
+\description{
+  Plot method for the class \code{"fv"}.
+}
+\usage{
+ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL,
+           xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL,
+           legend=!add, legendpos="topleft", legendavoid=missing(legendpos),
+           legendmath=TRUE, legendargs=list(),
+           shade=fvnames(x, ".s"), shadecol="grey",
+           add=FALSE, log="",
+           mathfont=c("italic", "plain", "bold", "bolditalic"), 
+           limitsonly=FALSE)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"fv"}, containing the variables to be plotted
+    or variables from which the plotting coordinates can be computed.
+  }
+  \item{fmla}{
+    an R language formula 
+    determining which variables or expressions are plotted.
+    Either a formula object, or a string that can be parsed as a
+    formula.
+    See Details.
+  }
+  \item{subset}{
+    (optional) subset of rows of the data frame that will be plotted.
+  }
+  \item{lty}{
+    (optional) numeric vector of values of the graphical parameter
+    \code{lty} controlling the line style of each plot.
+  }
+  \item{col}{
+    (optional) numeric vector of values of the graphical parameter
+    \code{col} controlling the colour of each plot.
+  }
+  \item{lwd}{
+    (optional) numeric vector of values of the graphical parameter
+    \code{lwd} controlling the line width of each plot.
+  }
+  \item{xlim}{
+    (optional) range of x axis
+  }
+  \item{ylim}{
+    (optional) range of y axis
+  }
+  \item{xlab}{
+    (optional) label for x axis
+  }
+  \item{ylab}{
+    (optional) label for y axis
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{plot.default}.
+  }
+  \item{ylim.covers}{
+    Optional vector of \eqn{y} values that must be included in the
+    \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the
+    \eqn{y} axis includes the origin.
+  }
+  \item{legend}{
+    Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm
+    plots a legend in the top left corner of the plot,
+    explaining the meaning of the different line types and colours.
+  }
+  \item{legendpos}{
+    The position of the legend. Either a character string keyword
+    (see \code{\link[graphics]{legend}} for keyword options)
+    or a pair of coordinates in the format \code{list(x,y)}.
+    Alternatively if \code{legendpos="float"}, a location will be
+    selected inside the plot region, avoiding the graphics.
+  }
+  \item{legendavoid}{
+    Whether to avoid collisions between the legend and the graphics.
+    Logical value.
+    If \code{TRUE}, the code will check for collisions between the
+    legend box and the graphics, and will override \code{legendpos}
+    if a collision occurs.
+    If \code{FALSE}, the value of \code{legendpos} is always respected.
+  }
+  \item{legendmath}{
+    Logical. If \code{TRUE}, the legend will display the
+    mathematical notation for each curve. If \code{FALSE}, the legend text
+    is the identifier (column name) for each curve.
+  }
+  \item{legendargs}{
+    Named list containing additional arguments to be passed to
+    \code{\link{legend}} controlling the appearance of the legend.
+  }
+  \item{shade}{
+    A character vector giving the names of two columns of \code{x},
+    or another type of index that identifies two columns.
+    When the corresponding curves are plotted,
+    the region between the curves will be shaded in light grey.
+    The object \code{x} may or may not contain two columns which are designated
+    as boundaries for shading; they are identified by \code{fvnames(x, ".s")}.
+    The default is to shade between these two curves
+    if they exist. To suppress this behaviour, set \code{shade=NULL}.
+  }
+  \item{shadecol}{
+    The colour to be used in the \code{shade} plot.
+    A character string or an integer specifying a colour.
+  }
+  \item{add}{
+    Logical. Whether the plot should be added to an existing plot
+  }
+  \item{log}{
+    A character string which contains \code{"x"} if the x axis is to
+    be logarithmic, \code{"y"} if the y axis is to be logarithmic and
+    \code{"xy"} or \code{"yx"} if both axes are to be logarithmic.
+  }
+  \item{mathfont}{
+    Character string. The font to be used for mathematical
+    expressions in the axis labels and the legend.
+  }
+  \item{limitsonly}{
+    Logical. If \code{FALSE}, plotting is performed normally.
+    If \code{TRUE}, no plotting is performed at all; 
+    just the \eqn{x} and \eqn{y} limits of the plot are computed
+    and returned. 
+  }
+}
+\value{
+  Invisible: either \code{NULL}, or a data frame giving the meaning of the
+  different line types and colours.
+}
+\details{
+  This is the \code{plot} method for the class \code{"fv"}.
+
+  The use of the argument \code{fmla} is like \code{plot.formula}, but offers
+  some extra functionality.
+
+  The left and right hand sides of \code{fmla} are evaluated,
+  and the results are plotted against each other
+  (the left side on the \eqn{y} axis 
+  against the right side on the \eqn{x} axis).
+
+  The left and right hand sides of \code{fmla} may be
+  the names of columns of the data frame \code{x},
+  or expressions involving these names. If a variable in \code{fmla}
+  is not the name of a column of \code{x}, the algorithm will search for
+  an object of this name in the environment where \code{plot.fv} was
+  called, and then in the enclosing environment, and so on. 
+
+  Multiple curves may be specified by a single formula
+  of the form 
+  \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are
+  expressions involving the variables in the data frame.
+  Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted
+  against \code{x}. 
+  See the examples.
+
+  Convenient abbreviations which can be used in the formula
+  are 
+  \itemize{
+    \item the symbol \code{.} which represents all the
+    columns in the data frame that will be plotted by default;
+    \item the symbol \code{.x} which represents the function argument;
+    \item the symbol \code{.y} which represents the recommended value
+    of the function.
+  }
+  For further information, see \code{\link{fvnames}}.
+
+  The value returned by this plot function indicates the
+  meaning of the line types and colours in the plot. It can be used
+  to make a suitable legend for the plot if you want to do this
+  by hand. See the examples.
+
+  The argument \code{shade} can be used to display critical bands
+  or confidence intervals. If it is not \code{NULL}, then it should be
+  a subset index for the columns of \code{x}, that identifies exactly
+  2 columns. When the corresponding curves are plotted, the region
+  between the curves will be shaded in light grey. See the Examples.
+
+  The default values of \code{lty}, \code{col} and \code{lwd} can
+  be changed using \code{\link{spatstat.options}("plot.fv")}.
+
+  Use \code{type = "n"} to create the plot region and draw the axes
+  without plotting any data.
+
+  Use \code{limitsonly=TRUE} to suppress all plotting
+  and just compute the \eqn{x} and \eqn{y} limits. This can be used
+  to calculate common \eqn{x} and \eqn{y} scales for several plots.
+
+  To change the kind of parenthesis enclosing the
+  explanatory text about the unit of length, use
+  \code{\link{spatstat.options}('units.paren')}
+}
+\examples{
+   K <- Kest(cells)
+   # K is an object of class "fv"
+
+   plot(K, iso ~ r)                # plots iso against r
+
+   plot(K, sqrt(iso/pi) ~ r)   # plots sqrt(iso/r)  against r
+
+   plot(K, cbind(iso,theo) ~ r)   # plots iso against r  AND theo against r
+
+   plot(K, .  ~ r)            # plots all available estimates of K against r
+
+   plot(K, sqrt(./pi) ~ r)   # plots all estimates of L-function
+                             # L(r) = sqrt(K(r)/pi)
+
+   plot(K, cbind(iso,theo) ~ r, col=c(2,3))
+                                   # plots iso against r  in colour 2
+                                   # and theo against r in colour 3
+
+   plot(K, iso ~ r, subset=quote(r < 0.2))
+                                   # plots iso against r for r < 10
+
+   # Can't remember the names of the columns? No problem..
+   plot(K, sqrt(./pi) ~ .x)
+
+   # making a legend by hand
+   v <- plot(K, . ~ r, legend=FALSE)
+   legend("topleft", legend=v$meaning, lty=v$lty, col=v$col)
+
+   # significance bands
+   KE <- envelope(cells, Kest, nsim=19)
+   plot(KE, shade=c("hi", "lo"))
+
+   # how to display two functions on a common scale
+   Kr <- Kest(redwood)
+   a <- plot(K, limitsonly=TRUE)
+   b <- plot(Kr, limitsonly=TRUE)
+   xlim <- range(a$xlim, b$xlim)
+   ylim <- range(a$ylim, b$ylim)
+   opa <- par(mfrow=c(1,2))
+   plot(K, xlim=xlim, ylim=ylim)
+   plot(Kr, xlim=xlim, ylim=ylim)
+   par(opa)
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{Kest}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+ 
diff --git a/man/plot.hyperframe.Rd b/man/plot.hyperframe.Rd
new file mode 100644
index 0000000..9552e9a
--- /dev/null
+++ b/man/plot.hyperframe.Rd
@@ -0,0 +1,106 @@
+\name{plot.hyperframe}
+\alias{plot.hyperframe}
+\title{Plot Entries in a Hyperframe}
+\description{
+  Plots the entries in a hyperframe, in a series of
+  panels, one panel for each row of the hyperframe.
+}
+\usage{
+   \method{plot}{hyperframe}(x, e, \dots, main, arrange=TRUE,
+                            nrows=NULL, ncols=NULL,
+                            parargs=list(mar=mar * marsize),
+                            marsize=1, mar=c(1,1,3,1))
+}
+\arguments{
+  \item{x}{
+    Data to be plotted. A hyperframe (object of class \code{"hyperframe"}, see
+    \code{\link{hyperframe}}).
+  }
+  \item{e}{
+    How to plot each row. Optional. An \R language call or expression
+    (typically enclosed in \code{\link{quote}()} that will be
+    evaluated in each row of the hyperframe to generate the plots.
+  }
+  \item{\dots}{
+    Extra arguments controlling the plot (when \code{e} is missing).
+  }
+  \item{main}{Overall title for the array of plots.}
+  \item{arrange}{
+    Logical flag indicating whether to plot the objects
+    side-by-side on a single page (\code{arrange=TRUE})
+    or plot them individually in a succession of frames
+    (\code{arrange=FALSE}).
+  }
+  \item{nrows,ncols}{
+    Optional. The number of rows/columns in the plot layout
+    (assuming \code{arrange=TRUE}).
+    You can specify either or both of these numbers.
+  }
+  \item{parargs}{
+    Optional list of arguments passed to \code{\link{par}} before
+    plotting each panel. Can be used to control margin sizes, etc.
+  }
+  \item{marsize}{
+    Optional scale parameter controlling the sizes of margins around
+    the panels. Incompatible with \code{parargs}.
+  }
+  \item{mar}{
+    Optional numeric vector of length 1, 2 or 4
+    controlling the relative sizes of margins between
+    the panels. Incompatible with \code{parargs}.
+  }
+}
+\details{
+  This is the \code{plot} method for the class \code{"hyperframe"}.
+
+  The argument \code{x} must be a hyperframe (like a data frame,
+  except that the entries can be objects of any class; see
+  \code{\link{hyperframe}}).
+
+  This function generates a series of plots, one plot for each
+  row of the hyperframe. If \code{arrange=TRUE} (the default), then
+  these plots are arranged in a neat array of panels within a single
+  plot frame. If \code{arrange=FALSE}, the plots are simply executed
+  one after another.
+
+  Exactly what is plotted, and how it is plotted, depends on the
+  argument \code{e}. The default (if \code{e} is missing) is to plot
+  only the first column of \code{x}. Each entry in the first column
+  is plotted using the generic \code{\link{plot}} command, together with
+  any extra arguments given in \code{\dots}.
+
+  If \code{e} is present, it should be an \R language expression
+  involving the column names of \code{x}.
+  (It is typically created using \code{\link{quote}} or
+  \code{\link{expression}}.)
+  The expression will be evaluated once for each row of \code{x}.
+  It will be evaluated in an environment where each column name of
+  \code{x} is interpreted as meaning the object in that column
+  in the current row.
+  See the Examples.
+}
+\value{
+  \code{NULL}.
+}
+\seealso{
+  \code{\link{hyperframe}},
+  \code{\link{with.hyperframe}}
+}
+\examples{
+   H <- hyperframe(id=1:10)
+   H$X <- with(H, rpoispp(100))
+   H$D <- with(H, distmap(X))
+   # points only
+   plot(H[,"X"])
+   plot(H, quote(plot(X, main=id)))
+   # points superimposed on images
+   plot(H, quote({plot(D, main=id); plot(X, add=TRUE)}))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.im.Rd b/man/plot.im.Rd
new file mode 100644
index 0000000..8b9c82c
--- /dev/null
+++ b/man/plot.im.Rd
@@ -0,0 +1,391 @@
+\name{plot.im}
+\alias{plot.im}
+\alias{image.im}
+\title{Plot a Pixel Image}
+\description{
+  Plot a pixel image.
+}
+\usage{
+  \method{plot}{im}(x, \dots,
+                   main,
+                   add=FALSE, clipwin=NULL,
+                   col=NULL, valuesAreColours=NULL, log=FALSE,
+                   ribbon=show.all, show.all=!add,
+                   ribside=c("right", "left", "bottom", "top"),
+                   ribsep=0.15, ribwid=0.05, ribn=1024,
+                   ribscale=1, ribargs=list(), colargs=list(),
+                   useRaster=NULL, workaround=FALSE,
+                   do.plot=TRUE) 
+
+  \method{image}{im}(x, \dots,
+                   main,
+                   add=FALSE, clipwin=NULL,
+                   col=NULL, valuesAreColours=NULL, log=FALSE,
+                   ribbon=show.all, show.all=!add,
+                   ribside=c("right", "left", "bottom", "top"),
+                   ribsep=0.15, ribwid=0.05, ribn=1024,
+                   ribscale=1, ribargs=list(), colargs=list(),
+                   useRaster=NULL, workaround=FALSE, 
+                   do.plot=TRUE) 
+}
+\arguments{
+  \item{x}{
+    The pixel image to be plotted.
+    An object of class \code{"im"} (see \code{\link{im.object}}).
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link[graphics]{image.default}}
+    to control the plot. See Details.
+  }
+  \item{main}{Main title for the plot.}
+  \item{add}{
+    Logical value indicating whether to superimpose the image on the
+    existing plot (\code{add=TRUE})
+    or to initialise a new plot (\code{add=FALSE}, the default).
+  }
+  \item{clipwin}{
+    Optional. A window (object of class \code{"owin"}).
+    Only this subset of the image will be displayed.
+  }
+  \item{col}{
+    Colours for displaying the pixel values.
+    Either a character vector of colour values,
+    an object of class \code{\link{colourmap}},
+    or a \code{function} as described under Details.
+  }
+  \item{valuesAreColours}{
+    Logical value. If \code{TRUE}, the pixel values of \code{x}
+    are to be interpreted as colour values.
+  }
+  \item{log}{
+    Logical value. If \code{TRUE}, the colour map will be
+    evenly-spaced on a logarithmic scale.
+  }
+  \item{ribbon}{
+    Logical flag indicating whether to display a ribbon
+    showing the colour map. Default is \code{TRUE}
+    for new plots and \code{FALSE} for added plots.
+  }
+  \item{show.all}{
+    Logical value indicating whether to display all plot elements
+    including the main title and colour ribbon. Default is \code{TRUE}
+    for new plots and \code{FALSE} for added plots.
+  }
+  \item{ribside}{
+    Character string indicating where to display the ribbon
+    relative to the main image.
+  }
+  \item{ribsep}{
+    Factor controlling the space between the ribbon and the image.
+  }
+  \item{ribwid}{
+    Factor controlling the width of the ribbon.
+  }
+  \item{ribn}{
+    Number of different values to display in the ribbon.
+  }
+  \item{ribscale}{
+    Rescaling factor for tick marks. The values on the numerical scale
+    printed beside the ribbon will be multiplied by this rescaling factor.
+  }
+  \item{ribargs}{
+    List of additional arguments passed to
+    \code{\link[graphics]{image.default}} and
+    \code{\link[graphics]{axis}} 
+    to control the display of the ribbon and its scale axis. These may override
+    the \code{\dots} arguments. 
+  }
+  \item{colargs}{
+    List of additional arguments passed to
+    \code{col} if it is a function.
+  }
+  \item{useRaster}{
+    Logical value, passed to \code{\link[graphics]{image.default}}.
+    Images are plotted using a bitmap raster if \code{useRaster=TRUE}
+    or by drawing polygons if \code{useRaster=FALSE}.
+    Bitmap raster display tends to produce better results,
+    but is not supported on all graphics devices.
+    The default is to use bitmap raster display if it is supported.
+  }
+  \item{workaround}{
+    Logical value, specifying whether to use a workaround to avoid a bug
+    which occurs with some device drivers in \R, in which the image
+    has the wrong spatial orientation. See the section on
+    \bold{Image is Displayed in Wrong Spatial Orientation} below.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually plot the image
+    and colour ribbon.
+    Setting \code{do.plot=FALSE} will simply return the
+    colour map and the bounding box that were chosen for the plot.
+  }
+}
+\value{
+  The colour map used. An object of class \code{"colourmap"}.
+
+  Also has an attribute \code{"bbox"} giving a bounding box
+  for the colour image (including the ribbon if present).
+}
+\details{
+  This is the \code{plot} method for the class \code{"im"}.
+  [It is also the \code{image} method for \code{"im"}.]
+
+  The pixel image \code{x} is displayed on the current plot device,
+  using equal scales on the \code{x} and \code{y} axes.
+
+  If \code{ribbon=TRUE}, a legend will be plotted.
+  The legend consists of a colour ribbon and an axis with tick-marks,
+  showing the correspondence between the pixel values and the colour map.
+
+  By default, the ribbon is placed at the right of the main image.
+  This can be changed using the argument \code{ribside}.
+  
+  Arguments \code{ribsep, ribwid, ribn} control the appearance of the
+  ribbon.
+  The width of the ribbon is \code{ribwid} times the size of the pixel
+  image, where `size' means the larger of the width and the height.
+  The distance separating the ribbon and the image is \code{ribsep} times
+  the size of the pixel image. The ribbon contains \code{ribn}
+  different numerical values, evenly spaced between the minimum and
+  maximum pixel values in the image \code{x}, rendered according to
+  the chosen colour map.
+
+  Arguments \code{ribscale, ribargs} control the annotation of the
+  colour ribbon. To plot the colour ribbon without the axis and
+  tick-marks, use \code{ribargs=list(axes=FALSE)}.
+
+  Normally the pixel values are displayed using the colours given in the
+  argument \code{col}. This may be either
+  \itemize{
+    \item an explicit colour map (an object of class
+    \code{"colourmap"}, created by the command \code{\link{colourmap}}).
+    This is the best way to ensure
+    that when we plot different images, the colour maps are consistent.
+    \item a character vector or integer vector
+    that specifies a set of colours.
+    The colour mapping will be stretched to match the range of
+    pixel values in the image \code{x}. The mapping of pixel values
+    to colours is determined as follows.
+    \describe{
+      \item{logical-valued images:}{the values \code{FALSE} and
+	\code{TRUE} are mapped to the colours \code{col[1]} and
+	\code{col[2]} respectively.
+	The vector \code{col} should have length 2.
+      }
+      \item{factor-valued images:}{the factor levels \code{levels(x)}
+	are mapped to the entries of \code{col} in order. The vector
+	\code{col} should have the same length as \code{levels(x)}.
+      }
+      \item{numeric-valued images:}{
+	By default, the range of pixel values in \code{x}
+	is divided into \code{n = length(col)}
+	equal subintervals, which are mapped to the colours in \code{col}.
+	(If \code{col} was not specified, it defaults to a vector of 255
+	colours.)
+      
+	Alternatively if the argument \code{zlim} is given, it should be
+	a vector of length 2 specifying an interval of real numbers.
+	This interval will be used instead of the range of pixel
+	values. The interval from \code{zlim[1]} to \code{zlim[2]} will be
+	mapped to the colours in \code{col}. This facility enables the user to
+	plot several images using a consistent colour map.
+
+	Alternatively if the argument \code{breaks} is given,
+	then this specifies
+	the endpoints of the subintervals that are mapped to each colour.
+	This is incompatible with \code{zlim}.
+      
+	The arguments \code{col} and \code{zlim} or \code{breaks}
+	are then passed to the function \code{\link{image.default}}.
+	For examples of the use of these arguments,
+	see \code{\link{image.default}}.
+      }
+    }
+    \item {
+      a \code{function} in the \R language
+      with an argument named \code{range}
+      or \code{inputs}.
+    
+      If \code{col} is a function with an argument named \code{range},
+      and if the pixel values of \code{x} are numeric values, 
+      then the colour values will be determined by evaluating
+      \code{col(range=range(x))}. The result of this evaluation
+      should be a character vector containing colour values, or
+      a \code{"colourmap"} object. Examples of such functions
+      are \code{\link{beachcolours}} and \code{\link{beachcolourmap}}.
+  
+      If \code{col} is a function with an argument named \code{inputs},
+      and if the pixel values of \code{x} are discrete values (integer,
+      logical, factor or character), 
+      then the colour values will be determined by evaluating
+      \code{col(inputs=p)} where \code{p} is the set of possible pixel
+      values. The result should be a character vector 
+      containing colour values, or a \code{"colourmap"} object.
+    }
+    \item{
+      a \code{function} in the \R language with first argument
+      named \code{n}.
+      The colour values will be determined by evaluating
+      \code{col(n)} where \code{n} is the
+      number of distinct pixel values, up to a maximum of 128.
+      The result of this evaluation
+      should be a character vector containing color values.
+      Examples of such functions are
+      \code{\link[grDevices]{heat.colors}},
+      \code{\link[grDevices]{terrain.colors}},
+      \code{\link[grDevices]{topo.colors}} and
+      \code{\link[grDevices]{cm.colors}}. 
+   }
+  }
+
+  If \code{spatstat.options("monochrome")} has been set to \code{TRUE}
+  then \bold{all colours will be converted to grey scale values}.
+  
+  Other graphical parameters controlling the display of both the pixel image
+  and the ribbon can be passed through the \code{...} arguments
+  to the function \code{\link[graphics]{image.default}}.
+  A parameter is handled only if it is one of the following:
+  \itemize{
+    \item a formal argument of \code{\link[graphics]{image.default}}
+    that is operative when \code{add=TRUE}.
+    \item one of the
+    parameters \code{"main", "asp", "sub", "axes", "ann",
+      "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub",
+      "col.axis", "col.lab", "col.main", "col.sub",
+      "font.axis", "font.lab", "font.main", "font.sub"}
+    described in \code{\link[graphics]{par}}.
+    \item the argument \code{box}, a logical value specifying whether
+    a box should be drawn.
+  }
+  Images are plotted using a bitmap raster if \code{useRaster=TRUE}
+  or by drawing polygons if \code{useRaster=FALSE}.
+  Bitmap raster display (performed by \code{\link[graphics]{rasterImage}})
+  tends to produce better results, but is not supported on all graphics devices.
+  The default is to use bitmap raster display if it is
+  supported according to \code{\link[grDevices]{dev.capabilities}}.
+
+  Alternatively, the pixel values could be directly 
+  interpretable as colour values in \R. That is, the pixel values
+  could be character strings that represent colours, or
+  values of a factor whose levels are character strings representing
+  colours.
+  \itemize{
+    \item If \code{valuesAreColours=TRUE}, then the pixel values will
+    be interpreted as colour values and displayed using these colours.
+    \item If \code{valuesAreColours=FALSE}, then the pixel values will
+    \emph{not} be interpreted as colour values, even if they could be.
+    \item If \code{valuesAreColours=NULL}, the algorithm will guess
+    what it should do. If the argument \code{col} is given,
+    the pixel values will \emph{not} be interpreted as colour values. Otherwise,
+    if all the pixel values are strings that represent colours, then
+    they will be interpreted and displayed as colours.
+  }
+  If pixel values are interpreted as colours, 
+  the arguments \code{col} and \code{ribbon} will be ignored,
+  and a ribbon will not be plotted.
+
+}
+\section{Complex-valued images}{
+  If the pixel values in \code{x} are complex numbers,
+  they will be converted into four images containing the real and
+  imaginary parts and the modulus and argument, 
+  and plotted side-by-side using \code{\link{plot.imlist}}.
+}
+\section{Monochrome colours}{
+  If \code{spatstat.options("monochrome")} has been set to \code{TRUE},
+  then \bold{the image will be plotted in greyscale}.
+  The colours are converted to grey scale values using
+  \code{\link{to.grey}}.
+  The choice of colour map still has an effect, since it determines
+  the final grey scale values.
+
+  Monochrome display can also be achieved by
+  setting the graphics device parameter \code{colormodel="grey"}
+  when starting a new graphics device, or in a call to
+  \code{\link{ps.options}} or \code{\link{pdf.options}}.
+}
+\section{Image Rendering Errors and Problems}{
+  The help for \code{\link[graphics]{image.default}}
+  and \code{\link[graphics]{rasterImage}} explains that
+  errors may occur, or images may be rendered incorrectly, on some
+  devices, depending on the availability of colours and other
+  device-specific constraints.
+
+  If the image is not displayed at all,
+  try setting \code{useRaster=FALSE} in the call to \code{plot.im}.
+  If the ribbon colours are not displayed, set
+  \code{ribargs=list(useRaster=FALSE)}.
+
+  Errors may occur on some graphics devices if the image is very
+  large. If this happens, try setting \code{useRaster=FALSE} in the
+  call to \code{plot.im}.
+
+  The error message
+  \code{useRaster=TRUE can only be used with a regular grid}
+  means that the \eqn{x} and \eqn{y} coordinates of the pixels in the
+  image are not perfectly equally spaced, due to numerical rounding.
+  This occurs with some images created by earlier versions of \pkg{spatstat}.
+  To repair the coordinates in an image \code{X}, type
+  \code{X <- as.im(X)}.
+}
+\section{Image is Displayed in Wrong Spatial Orientation}{
+  If the image is displayed in the wrong spatial orientation,
+  and you created the image data directly, 
+  please check that you understand the \pkg{spatstat} convention for
+  the spatial orientation of pixel images. The row index of the matrix
+  of pixel values corresponds to the increasing \eqn{y} coordinate; the
+  column index of the matrix corresponds to the increasing \eqn{x} coordinate
+  (Baddeley, Rubak and Turner, 2015, section 3.6.3, pages 66--67).
+
+  Images can be displayed in the wrong spatial orientation
+  on some devices, due to a bug in the device driver. This occurs only
+  when the plot coordinates are \emph{reversed}, that is, when 
+  the plot was initialised with coordinate limits \code{xlim, ylim}
+  such that \code{xlim[1] > xlim[2]} or \code{ylim[1] > ylim[2]} or
+  both. This bug is reported to occur only when \code{useRaster=TRUE}.
+  To fix this, try setting \code{workaround=TRUE}, or if that is
+  unsuccessful, \code{useRaster=FALSE}.
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{colourmap}},
+  \code{\link{contour.im}},
+  \code{\link{persp.im}},
+  \code{\link{hist.im}},
+  \code{\link[graphics]{image.default}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+   # an image
+   Z <- setcov(owin())
+   plot(Z)
+   plot(Z, ribside="bottom")
+   # stretchable colour map
+   plot(Z, col=terrain.colors(128), axes=FALSE)
+   # fixed colour map
+   tc <- colourmap(rainbow(128), breaks=seq(-1,2,length=129))
+   plot(Z, col=tc)
+   # colour map function, with argument 'range'
+   plot(Z, col=beachcolours, colargs=list(sealevel=0.5))
+   # tweaking the plot
+   plot(Z, main="La vie en bleu", col.main="blue", cex.main=1.5,
+        box=FALSE,
+        ribargs=list(col.axis="blue", col.ticks="blue", cex.axis=0.75))
+   # log scale
+   V <- eval.im(exp(exp(Z+2))/1e4)
+   plot(V, log=TRUE, main="Log scale")
+   # it's complex
+   Y <- exp(Z + V * 1i)
+   plot(Y)
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  Chapman and Hall/CRC Press.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+ 
+\keyword{hplot}
diff --git a/man/plot.imlist.Rd b/man/plot.imlist.Rd
new file mode 100644
index 0000000..0fd3543
--- /dev/null
+++ b/man/plot.imlist.Rd
@@ -0,0 +1,87 @@
+\name{plot.imlist}
+\alias{plot.imlist}
+\alias{image.imlist}
+\alias{image.listof}
+\title{Plot a List of Images}
+\description{
+  Plots an array of pixel images.
+}
+\usage{
+  \method{plot}{imlist}(x, \dots, plotcommand="image",
+                          equal.ribbon=FALSE, ribmar=NULL)
+
+  \method{image}{imlist}(x, \dots, equal.ribbon=FALSE, ribmar=NULL)
+
+  \method{image}{listof}(x, \dots, equal.ribbon=FALSE, ribmar=NULL)
+}
+\arguments{
+  \item{x}{
+    An object of the class \code{"imlist"}
+    representing a list of pixel images.
+    Alternatively \code{x} may belong to the outdated class \code{"listof"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.solist}} to control the
+    spatial arrangement of panels, and arguments passed to \code{\link{plot.im}}
+    to control the display of each panel.
+  }
+  \item{equal.ribbon}{
+    Logical. If \code{TRUE}, the colour maps of all the images will be
+    the same. If \code{FALSE}, the colour map of each
+    image is adjusted to the range of values of that image.
+  }
+  \item{ribmar}{
+    Numeric vector of length 4 specifying the margins around the
+    colour ribbon, if \code{equal.ribbon=TRUE}. Entries in the vector
+    give the margin at the bottom, left, top, and right respectively,
+    as a multiple of the height of a line of text.
+  }
+  \item{plotcommand}{
+    Character string giving the name of a function
+    to be used to display each image.
+    Recognised by \code{plot.imlist} only.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  These are methods for the generic plot commands
+  \code{plot} and \code{image} for the class \code{"imlist"}.
+  They are currently identical.
+  
+  An object of class \code{"imlist"} represents a list of pixel images.
+  (The outdated class \code{"listof"} is also handled.)
+
+  Each entry in the list \code{x} will be displayed as a pixel image,
+  in an array of panels laid out on the same graphics display,
+  using \code{\link{plot.solist}}. Individual panels are plotted by
+  \code{\link{plot.im}}.
+
+  If \code{equal.ribbon=FALSE} (the default), 
+  the images are rendered using different colour maps,
+  which are displayed as colour ribbons beside each image.
+  If \code{equal.ribbon=TRUE}, the images are rendered using the
+  same colour map, and a single colour ribbon will be displayed at the right
+  side of the array. 
+  The colour maps and the placement of the colour ribbons are
+  controlled by arguments \code{\dots} passed to \code{\link{plot.im}}.
+}
+\seealso{
+  \code{\link{plot.solist}},
+  \code{\link{plot.im}}
+}
+\examples{
+ D <- density(split(amacrine))
+ image(D, equal.ribbon=TRUE, main="", col.ticks="red", col.axis="red")
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.influence.ppm.Rd b/man/plot.influence.ppm.Rd
new file mode 100644
index 0000000..0a9a85a
--- /dev/null
+++ b/man/plot.influence.ppm.Rd
@@ -0,0 +1,79 @@
+\name{plot.influence.ppm}
+\alias{plot.influence.ppm}
+\title{
+  Plot Influence Measure
+}
+\description{
+  Plots an influence measure that has been
+  computed by \code{\link{influence.ppm}}.
+}
+\usage{
+ \method{plot}{influence.ppm}(x, ..., multiplot=TRUE)
+}
+\arguments{
+  \item{x}{
+    Influence measure (object of class \code{"influence.ppm"}) computed by
+    \code{\link{influence.ppm}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.ppp}}
+    to control the plotting.
+  }
+  \item{multiplot}{
+    Logical value indicating whether it is permissible to
+    plot more than one panel. This happens if the original point process
+    model is multitype.
+  }
+}
+\details{
+  This is the plot method for objects of class \code{"influence.ppm"}.
+  These objects are computed by the command \code{\link{influence.ppm}}.
+
+  For a point process model fitted by maximum likelihood or
+  maximum pseudolikelihood (the default), influence values are
+  associated with the data points.
+  The display shows circles centred at the data points
+  with radii proportional to the influence values.
+  If the original data were a multitype point pattern, then
+  if \code{multiplot=TRUE} (the default), 
+  there is one such display for each possible type of point,
+  while if \code{multiplot=FALSE} there is a single plot
+  combining all data points regardless of type.
+  
+  For a model fitted by logistic composite likelihood
+  (\code{method="logi"} in \code{\link{ppm}}) influence values
+  are associated with the data points and also with the
+  dummy points used to fit the model. The display consist of two
+  panels, for the data points and dummy points respectively,
+  showing circles with radii proportional to the influence values.
+  If the original data were a multitype point pattern, then
+  if \code{multiplot=TRUE} (the default), 
+  there is one pair of panels for each possible type of point,
+  while if \code{multiplot=FALSE} there is a single plot
+  combining all data and dummy points regardless of type.
+
+  Use the argument \code{clipwin} to restrict the plot to a subset
+  of the full data.
+}
+\value{
+  None.
+}
+\references{
+  Baddeley, A. and Chang, Y.M. and Song, Y. (2013)
+  Leverage and influence diagnostics for spatial point process models.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{influence.ppm}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   plot(influence(fit))
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/plot.kppm.Rd b/man/plot.kppm.Rd
new file mode 100644
index 0000000..419e76b
--- /dev/null
+++ b/man/plot.kppm.Rd
@@ -0,0 +1,96 @@
+\name{plot.kppm}
+\alias{plot.kppm}
+\title{Plot a fitted cluster point process}
+\description{
+  Plots a fitted cluster point process model,
+  displaying the fitted intensity and the fitted \eqn{K}-function.
+}
+\usage{
+  \method{plot}{kppm}(x, \dots,
+                      what=c("intensity", "statistic", "cluster"),
+                      pause=interactive(),
+                      xname)
+}
+\arguments{
+  \item{x}{
+    Fitted cluster point process model.
+    An object of class \code{"kppm"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.ppm}}
+    and \code{\link{plot.fv}} to control the plot.
+  }
+  \item{what}{
+    Character vector determining what will be plotted.
+  }
+  \item{pause}{
+    Logical value specifying whether to pause between plots.
+  }
+  \item{xname}{
+    Optional. Character string. The name of the object \code{x}
+    for use in the title of the plot.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link[graphics]{plot}} for the class \code{"kppm"} of fitted
+  cluster point process models.
+  
+  The argument \code{x} should be a cluster point process model
+  (object of class \code{"kppm"}) obtained using
+  the function \code{\link{kppm}}.
+
+  The choice of plots (and the order in which they are
+  displayed) is controlled by the argument \code{what}.
+  The options (partially matched) are \code{"intensity"},
+  \code{"statistic"} and \code{"cluster"}.
+
+  This command is capable of producing three different plots:
+  \describe{
+
+    \item{what="intensity"}{specifies the fitted intensity of the model,
+    which is plotted using \code{\link{plot.ppm}}. By default this plot
+    is not produced for stationary models.}
+
+    \item{what="statistic"}{specifies the empirical and fitted summary
+    statistics, which are plotted using \code{\link{plot.fv}}. This is
+    only meaningful if the model has been fitted using the Method of
+    Minimum Contrast, and it is turned off otherwise.}
+
+    \item{what="cluster"}{specifies a fitted cluster, which is computed
+    by \code{\link{clusterfield}} and plotted by
+    \code{\link{plot.im}}. It is only meaningful for Poisson cluster
+    (incl. Neyman-Scott) processes, and it is turned off for
+    log-Gaussian Cox processes (LGCP). If the model is stationary (and
+    non-LGCP) this option is turned on by default and shows a fitted
+    cluster positioned at the centroid of the observation window. For
+    non-stationary (and non-LGCP) models this option is only invoked if
+    explicitly told so, and in that case an additional argument
+    \code{locations} (see \code{\link{clusterfield}}) must be given to
+    specify where to position the parent point(s) .}
+}
+Alternatively \code{what="all"} selects all available options.
+}
+\value{
+  Null.
+}
+\examples{
+  data(redwood)
+  fit <- kppm(redwood~1, "Thomas")
+  plot(fit)
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{plot.ppm}},
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/plot.laslett.Rd b/man/plot.laslett.Rd
new file mode 100644
index 0000000..dc0724e
--- /dev/null
+++ b/man/plot.laslett.Rd
@@ -0,0 +1,64 @@
+\name{plot.laslett}
+\alias{plot.laslett}
+\title{
+  Plot Laslett Transform
+}
+\description{
+  Plot the result of Laslett's Transform.
+}
+\usage{
+\method{plot}{laslett}(x, \dots,
+          Xpars = list(box = TRUE, col = "grey"),
+          pointpars = list(pch = 3, cols = "blue"),
+          rectpars = list(lty = 3, border = "green"))
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"laslett"} produced by \code{\link{laslett}}
+    representing the result of Laslett's transform.
+  }
+  \item{\dots}{
+    Additional plot arguments passed to \code{\link{plot.solist}}.
+  }
+  \item{Xpars}{
+    A list of plot arguments passed to \code{\link{plot.owin}}
+    or \code{\link{plot.im}} to display the original region \code{X}
+    before transformation.
+  }
+  \item{pointpars}{
+    A list of plot arguments passed to \code{\link{plot.ppp}}
+    to display the tangent points.
+  }
+  \item{rectpars}{
+    A list of plot arguments passed to \code{\link{plot.owin}}
+    to display the maximal rectangle.
+  }
+}
+\details{
+  This is the \code{plot} method for the class \code{"laslett"}.
+  
+  The function \code{\link{laslett}} applies Laslett's Transform
+  to a spatial region \code{X} and returns an object of class
+  \code{"laslett"} representing the result of the transformation.
+  The result is plotted by this method.
+
+  The plot function \code{\link{plot.solist}} is used to align
+  the before-and-after pictures. See \code{\link{plot.solist}} for
+  further options to control the plot.
+}
+\value{
+  None.
+}
+\author{
+  Kassel Hingee and \adrian.
+}
+\seealso{
+  \code{\link{laslett}}
+}
+\examples{
+  b <- laslett(heather$coarse, plotit=FALSE)
+  plot(b, main="Heather Data")
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/plot.layered.Rd b/man/plot.layered.Rd
new file mode 100644
index 0000000..07ec336
--- /dev/null
+++ b/man/plot.layered.Rd
@@ -0,0 +1,111 @@
+\name{plot.layered}
+\alias{plot.layered}
+\title{
+Layered Plot
+}
+\description{
+  Generates a layered plot.
+  The plot method for objects of class \code{"layered"}.
+}
+\usage{
+\method{plot}{layered}(x, ..., which = NULL, plotargs = NULL,
+                         add=FALSE, show.all=!add, main=NULL,
+                         do.plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"layered"}
+    created by the function \code{\link{layered}}.
+  }
+  \item{\dots}{
+    Arguments to be passed to the \code{plot} method
+    for \emph{every} layer.
+  }
+  \item{which}{
+    Subset index specifying which layers should be plotted. 
+  }
+  \item{plotargs}{
+    Arguments to be passed to the  \code{plot} methods
+    for individual layers. A list of lists of arguments of the
+    form \code{name=value}.
+  }
+  \item{add}{Logical value indicating whether to add the graphics
+    to an existing plot.
+  }
+  \item{show.all}{
+     Logical value indicating whether the \emph{first} layer should be
+     displayed in full (including the main title, bounding window,
+     coordinate axes, colour ribbon, and so on). 
+  }
+  \item{main}{Main title for the plot}
+  \item{do.plot}{Logical value indicating whether to actually do the plotting.}
+}
+\details{
+  Layering is a simple mechanism for controlling
+  a high-level plot that is composed of
+  several successive plots, for example, a background and a foreground
+  plot. The layering mechanism makes it easier to plot,
+  to switch on or off the plotting of each individual layer,
+  to control the plotting arguments that are passed to each layer,
+  and to zoom in on a subregion.
+
+  The layers of data to be plotted should first be converted
+  into a single object of class \code{"layered"} using the
+  function \code{\link{layered}}. Then the layers can be plotted using
+  the method \code{plot.layered}.
+
+  To zoom in on a subregion,
+  apply the subset operator \code{\link{[.layered}} to \code{x}
+  before plotting.
+
+  Graphics parameters for each layer are determined by (in order of precedence)
+  \code{\dots}, \code{plotargs}, and \code{\link{layerplotargs}(x)}.
+
+  The graphics parameters may also include the special argument
+  \code{.plot} specifying (the name of) a function which will be used to
+  perform the plotting instead of the generic \code{plot}.
+  
+  The argument \code{show.all} is recognised by many plot methods
+  in \pkg{spatstat}. It determines whether a plot is drawn
+  with all its additional components such as the main title, bounding window,
+  coordinate axes, colour ribbons and legends. The default is
+  \code{TRUE} for new plots and \code{FALSE} for added plots.
+  
+  In \code{plot.layered}, the argument \code{show.all} applies only to the
+  \bold{first} layer. The subsequent layers are plotted
+  with \code{show.all=FALSE}. 
+
+  To override this, that is, if you really want to draw all
+  the components of \bold{all} layers of \code{x},
+  insert the argument \code{show.all=TRUE} in each
+  entry of \code{plotargs} or \code{\link{layerplotargs}(x)}.
+}
+\value{
+  (Invisibly) a list containing the return values from
+  the plot commands for each layer. This list has
+  an attribute \code{"bbox"} giving a bounding box for the entire plot.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{layered}},
+  \code{\link{layerplotargs}},
+  \code{\link{[.layered}},
+  \code{\link{plot}}.
+}
+\examples{
+   data(cells)
+   D <- distmap(cells)
+   L <- layered(D, cells)
+   plot(L)
+   plot(L, which = 2)
+   plot(L, plotargs=list(list(ribbon=FALSE), list(pch=3, cols="white")))
+   # plot a subregion
+   plot(L[, square(0.5)])
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.leverage.ppm.Rd b/man/plot.leverage.ppm.Rd
new file mode 100644
index 0000000..fd28589
--- /dev/null
+++ b/man/plot.leverage.ppm.Rd
@@ -0,0 +1,80 @@
+\name{plot.leverage.ppm}
+\alias{plot.leverage.ppm}
+\alias{persp.leverage.ppm}
+\title{
+  Plot Leverage Function
+}
+\description{
+  Generate a pixel image plot, or a perspective plot,
+  of a leverage function that has been computed by \code{\link{leverage.ppm}}.
+}
+\usage{
+ \method{plot}{leverage.ppm}(x, \dots, showcut=TRUE, col.cut=par("fg"),
+                             multiplot=TRUE)
+
+ \method{persp}{leverage.ppm}(x, \dots, main)
+}
+\arguments{
+  \item{x}{
+    Leverage function (object of class \code{"leverage.ppm"}) computed by
+    \code{\link{leverage.ppm}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.im}}
+    or \code{\link{contour.im}} controlling the plot.
+  }
+  \item{showcut}{
+    Logical. If \code{TRUE}, a contour line is plotted at the
+    level equal to the theoretical mean of the leverage.
+  }
+  \item{col.cut}{
+    Optional colour for the contour line.
+  }
+  \item{multiplot}{
+    Logical value indicating whether it is permissible to display
+    several plot panels.
+  }
+  \item{main}{
+    Optional main title.
+  }
+}
+\details{
+  These functions are the \code{plot} and \code{persp} methods
+  for objects of class \code{"leverage.ppm"}.
+  Such objects are computed by the command \code{\link{leverage.ppm}}.
+
+  The \code{plot} method displays the leverage function 
+  as a colour pixel image using \code{\link{plot.im}},
+  and draws a single contour line at the mean leverage value
+  using \code{\link{contour.im}}.
+  Use the argument \code{clipwin} to restrict the plot to a subset
+  of the full data.
+
+  The \code{persp} method displays the leverage function as a surface
+  in perspective view, using \code{\link{persp.im}}.
+}
+\value{
+  Same as for \code{\link{plot.im}} 
+  and \code{\link{persp.im}} respectively.
+}
+\references{
+  Baddeley, A., Chang, Y.M. and Song, Y. (2013)
+  Leverage and influence diagnostics for spatial point process models.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{leverage.ppm}}.
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X ~x+y)
+   lef <- leverage(fit)
+   plot(lef)
+   persp(lef)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/plot.linim.Rd b/man/plot.linim.Rd
new file mode 100644
index 0000000..3424a52
--- /dev/null
+++ b/man/plot.linim.Rd
@@ -0,0 +1,118 @@
+\name{plot.linim}
+\alias{plot.linim}
+\title{
+  Plot Pixel Image on Linear Network
+}
+\description{
+  Given a pixel image on a linear network,
+  the pixel values are displayed
+  either as colours or as line widths.
+}
+\usage{
+\method{plot}{linim}(x, ..., style = c("colour", "width"),
+             scale, adjust = 1,
+             legend=TRUE,
+             leg.side=c("right", "left", "bottom", "top"),
+             leg.sep=0.1,
+             leg.wid=0.1,
+             leg.args=list(),
+             leg.scale=1,
+             do.plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    The pixel image to be plotted. An object of class \code{"linim"}.
+  }
+  \item{\dots}{
+    Extra graphical parameters, passed to \code{\link{plot.im}}
+    if \code{style="colour"}, or to \code{\link{polygon}}
+    if \code{style="width"}.
+  }
+  \item{style}{
+    Character string specifying the type of plot. See Details.
+  }
+  \item{scale}{
+    Physical scale factor for representing the pixel values as
+    line widths. 
+  }
+  \item{adjust}{
+    Adjustment factor for the default scale.
+  }
+  \item{legend}{
+    Logical value indicating whether to plot a legend
+    (colour ribbon or scale bar).
+  }
+  \item{leg.side}{
+    Character string indicating where to display the legend
+    relative to the main image.
+  }
+  \item{leg.sep}{
+    Factor controlling the space between the legend and the image.
+  }
+  \item{leg.wid}{
+    Factor controlling the width of the legend.
+  }
+  \item{leg.scale}{
+    Rescaling factor for annotations on the legend.
+    The values on the numerical scale
+    printed beside the legend will be multiplied by this rescaling factor.
+  }
+  \item{leg.args}{
+    List of additional arguments passed to
+    \code{\link[graphics]{image.default}},
+    \code{\link[graphics]{axis}}
+    or \code{\link[graphics]{text.default}} 
+    to control the display of the legend.
+    These may override the \code{\dots} arguments. 
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plot.
+  }
+}
+\details{
+  This is the \code{plot} method for objects 
+  of class \code{"linim"}. Such an object represents
+  a pixel image defined on a linear network.
+
+  If \code{style="colour"} (the default) then
+  the pixel values of \code{x} are plotted as colours,
+  using \code{\link{plot.im}}. 
+
+  If \code{style="width"} then
+  the pixel values of \code{x} are used to determine the widths of
+  thick lines centred on the line segments of the linear network.
+}
+\value{
+  If \code{style="colour"}, the result is
+  an object of class \code{"colourmap"} specifying the colour map used.
+  If \code{style="width"}, the result is 
+  a numeric value \code{v} giving the physical scale:
+  one unit of pixel value is represented as \code{v} physical units on the plot.
+
+  The result also has an attribute \code{"bbox"} giving a bounding box
+  for the plot. The bounding box includes the ribbon or scale bar, if present,
+  but not the main title.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{linim}},
+  \code{\link{plot.im}},
+  \code{\link{polygon}}
+}
+\references{
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+}
+\examples{
+  X <- linfun(function(x,y,seg,tp){y^2+x}, simplenet)
+  X <- as.linim(X)
+  
+  plot(X)
+  plot(X, style="width", main="Width represents value")
+}
+\keyword{spatial}
diff --git a/man/plot.linnet.Rd b/man/plot.linnet.Rd
new file mode 100644
index 0000000..c6a0d55
--- /dev/null
+++ b/man/plot.linnet.Rd
@@ -0,0 +1,57 @@
+\name{plot.linnet}
+\alias{plot.linnet}
+\title{
+  Plot a linear network
+}
+\description{
+  Plots a linear network
+}
+\usage{
+ \method{plot}{linnet}(x, ..., main=NULL, add=FALSE, 
+                               vertices=FALSE, window=FALSE,
+                               do.plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    Linear network (object of class \code{"linnet"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.psp}}
+    controlling the plot.
+  }
+  \item{main}{
+    Main title for plot. Use \code{main=""} to suppress it.
+  }
+  \item{add}{
+    Logical. If code{TRUE}, superimpose the graphics
+    over the current plot. If \code{FALSE}, generate a new plot.
+  }
+  \item{vertices}{
+    Logical. Whether to plot the vertices as well.
+  }
+  \item{window}{
+    Logical. Whether to plot the window containing the linear network.
+  }
+  \item{do.plot}{
+    Logical. Whether to actually perform the plot.
+  }
+}
+\details{
+  This is the plot method for class \code{"linnet"}.
+}
+\value{
+  An (invisible) object of class \code{"owin"}
+  giving the bounding box of the network.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian 
+  
+}
+\seealso{
+   \code{\link{linnet}}
+}
+\examples{
+   plot(simplenet)
+}
+\keyword{spatial}
diff --git a/man/plot.lintess.Rd b/man/plot.lintess.Rd
new file mode 100644
index 0000000..41b576f
--- /dev/null
+++ b/man/plot.lintess.Rd
@@ -0,0 +1,74 @@
+\name{plot.lintess}
+\alias{plot.lintess}
+\title{
+  Plot a Tessellation on a Linear Network
+}
+\description{
+  Plot a tessellation or division of a linear network into tiles.
+}
+\usage{
+\method{plot}{lintess}(x, \dots,
+        main, add = FALSE, style = c("segments", "image"), col = NULL)
+}
+\arguments{
+  \item{x}{
+    Tessellation on a linear network
+    (object of class \code{"lintess"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[graphics]{segments}} (if
+    \code{style="segments"})
+    or to \code{\link{plot.im}} (if \code{style="image"})
+    to control the plot.
+  }
+  \item{main}{
+    Optional main title for the plot.
+  }
+  \item{add}{
+    Logical value indicating whether the plot is to be added to an
+    existing plot.
+  }
+  \item{style}{
+    Character string (partially matched) indicating whether to
+    plot the tiles of the tessellation using \code{\link[graphics]{segments}}
+    or to convert the tessellation to a pixel image and use
+    \code{\link{plot.im}}.
+  }
+  \item{col}{
+    Vector of colours, or colour map, determining the colours used
+    to plot the different tiles of the tessellation.
+  }
+}
+\details{
+  A tessellation on a linear network \code{L} is a partition of the
+  network into non-overlapping pieces (tiles). Each tile consists of one
+  or more line segments which are subsets of the line segments making up
+  the network. A tile can consist of several disjoint pieces.
+
+  This function plots the tessellation on the current device.
+  It is a method for the generic \code{plot}.
+  
+  If \code{style="segments"}, each tile is plotted using
+  \code{\link[graphics]{segments}}. Colours distinguish the different
+  tiles. 
+
+  If \code{style="image"}, the tessellation is converted to a pixel
+  image, and plotted using \code{\link{plot.im}}.
+}
+\value{
+  (Invisible) colour map.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lintess}}
+}
+\examples{
+   X <- runiflpp(7, simplenet)
+   Z <- divide.linnet(X)
+   plot(Z, main="tessellation on network")
+   points(as.ppp(X))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.listof.Rd b/man/plot.listof.Rd
new file mode 100644
index 0000000..84dcc8a
--- /dev/null
+++ b/man/plot.listof.Rd
@@ -0,0 +1,234 @@
+\name{plot.listof}
+\alias{plot.listof}
+\title{Plot a List of Things}
+\description{
+  Plots a list of things
+}
+\usage{
+  \method{plot}{listof}(x, \dots, main, arrange=TRUE,
+   nrows=NULL, ncols=NULL, main.panel=NULL,
+   mar.panel=c(2,1,1,2), hsep=0, vsep=0,
+   panel.begin=NULL, panel.end=NULL, panel.args=NULL,
+   panel.begin.args=NULL, panel.end.args=NULL,
+   plotcommand="plot",
+   adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL,
+   adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE)
+}
+\arguments{
+  \item{x}{
+    An object of the class \code{"listof"}.
+    Essentially a list of objects.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot}} when generating each
+    plot panel.
+  }
+  \item{main}{
+    Overall heading for the plot.
+  }
+  \item{arrange}{
+    Logical flag indicating whether to plot the objects
+    side-by-side on a single page (\code{arrange=TRUE})
+    or plot them individually in a succession of frames
+    (\code{arrange=FALSE}).
+  }
+  \item{nrows,ncols}{
+    Optional. The number of rows/columns in the plot layout
+    (assuming \code{arrange=TRUE}).
+    You can specify either or both of these numbers.
+  }
+  \item{main.panel}{
+    Optional. A character string, or a vector of character strings,
+    giving the headings for each of the objects.
+  }
+  \item{mar.panel}{
+    Size of the margins outside each plot panel.
+    A numeric vector of length 4 giving the bottom, left, top,
+    and right margins in that order. (Alternatively the vector
+    may have length 1 or 2 and will be replicated to length 4).
+    See the section on \emph{Spacing between plots}.
+  }
+  \item{hsep,vsep}{
+    Additional horizontal and vertical separation between plot panels,
+    expressed in the same units as \code{mar.panel}. 
+  }
+  \item{panel.begin,panel.end}{
+    Optional. Functions that will be executed before and after each panel is
+    plotted. See Details.
+  }
+  \item{panel.args}{
+    Optional. Function that determines different plot arguments
+    for different panels. See Details.
+  }
+  \item{panel.begin.args}{
+    Optional. List of additional arguments for \code{panel.begin}
+    when it is a function.
+  }
+  \item{panel.end.args}{
+    Optional. List of additional arguments for \code{panel.end}
+    when it is a function.
+  }
+  \item{plotcommand}{
+    Optional.
+    Character string containing the name of the command that should be
+    executed to plot each panel. 
+  }
+  \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{
+    Optional. Functions (with no arguments) that will be executed to
+    generate additional plots at the margins (left, right, top and/or
+    bottom, respectively) of the array of plots.
+  }
+  \item{adorn.size}{
+    Relative width (as a fraction of the other panels' widths)
+    of the margin plots.
+  }
+  \item{equal.scales}{
+    Logical value indicating whether the components
+    should be plotted at (approximately) the same physical scale. 
+  }
+  \item{halign,valign}{
+    Logical values indicating whether panels in a column
+    should be aligned to the same \eqn{x} coordinate system
+    (\code{halign=TRUE}) and whether panels in a row should
+    be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}).
+    These are applicable only if \code{equal.scales=TRUE}.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  This is the \code{plot} method for the class \code{"listof"}.
+
+  An object of class \code{"listof"} (defined in the base R package) represents
+  a list of objects, all belonging to a common class.
+  The base R package defines a method for printing these objects,
+  \code{\link[base]{print.listof}},
+  but does not define a method for \code{plot}.
+  So here we have provided a method for \code{plot}.
+
+  In the \pkg{spatstat} package, various functions produce
+  an object of class \code{"listof"}, essentially a list of
+  spatial objects of the same kind. 
+  These objects can be plotted in a nice arrangement
+  using \code{plot.listof}. See the Examples.
+
+  The argument \code{panel.args} determines extra graphics parameters
+  for each panel. It should be a function that will be called
+  as \code{panel.args(i)} where \code{i} is the panel number.
+  Its return value should be a list of graphics parameters that can
+  be passed to the relevant \code{plot} method. These parameters
+  override any parameters specified in the \code{\dots} arguments.
+
+  The arguments \code{panel.begin} and \code{panel.end}
+  determine graphics that will be plotted before and after
+  each panel is plotted. They may be objects
+  of some class that can be plotted
+  with the generic \code{plot} command. Alternatively they
+  may be functions that will be
+  called as \code{panel.begin(i, y, main=main.panel[i])}
+  and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel
+  number and \code{y = x[[i]]}.
+
+  If all entries of \code{x} are pixel images,
+  the function \code{\link{image.listof}} is called to control
+  the plotting. The arguments \code{equal.ribbon} and \code{col}
+  can be used to determine the colour map or maps applied.
+
+  If \code{equal.scales=FALSE} (the default), then the 
+  plot panels will have equal height on the plot device
+  (unless there is only one column of panels, in which case
+  they will have equal width on the plot device). This means that the
+  objects are plotted at different physical scales, by default.
+
+  If \code{equal.scales=TRUE}, then the dimensions of the
+  plot panels on the plot device will be proportional
+  to the spatial dimensions of the
+  corresponding components of \code{x}. This means that the
+  objects will be plotted at \emph{approximately} equal physical scales.
+  If these objects have very different spatial sizes,
+  the plot command could fail (when it tries
+  to plot the smaller objects at a tiny scale), with an error
+  message that the figure margins are too large.
+
+  The objects will be plotted at \emph{exactly} equal
+  physical scales, and \emph{exactly} aligned on the device,
+  under the following conditions:
+  \itemize{
+    \item
+    every component of \code{x} is a spatial object
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{panel.begin} and \code{panel.end} are either
+    \code{NULL} or they are spatial objects 
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{adorn.left}, 
+    \code{adorn.right}, 
+    \code{adorn.top} and 
+    \code{adorn.bottom} are all \code{NULL}.
+  }
+
+  Another special case is when every component of \code{x} is an
+  object of class \code{"fv"} representing a function.
+  If \code{equal.scales=TRUE} then all these functions will 
+  be plotted with the same axis scales
+  (i.e. with the same \code{xlim} and the same \code{ylim}).
+}
+\section{Spacing between plots}{
+  The spacing between individual plots is controlled by the parameters
+  \code{mar.panel}, \code{hsep} and \code{vsep}.
+
+  If \code{equal.scales=FALSE}, the plot panels are
+  logically separate plots. The margins for each panel are
+  determined by the argument \code{mar.panel} which becomes 
+  the graphics parameter \code{mar}
+  described in the help file for \code{\link{par}}.
+  One unit of \code{mar} corresponds to one line of text in the margin.
+  If \code{hsep} or \code{vsep} are present, \code{mar.panel}
+  is augmented by \code{c(vsep, hsep, vsep, hsep)/2}.
+  
+  If \code{equal.scales=TRUE}, all the plot panels are drawn
+  in the same coordinate system which represents a physical scale.
+  The unit of measurement for \code{mar.panel[1,3]}
+  is one-sixth of the greatest height of any object plotted in the same row
+  of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the
+  greatest width of any object plotted in the same column of panels.
+  If \code{hsep} or \code{vsep} are present,
+  they are interpreted in the same units as \code{mar.panel[2]}
+  and \code{mar.panel[1]} respectively.
+}
+\seealso{
+  \code{\link[base]{print.listof}},
+  \code{\link{contour.listof}},
+  \code{\link{image.listof}},
+  \code{\link{density.splitppp}}
+}
+\section{Error messages}{
+  If the error message \sQuote{Figure margins too large}
+  occurs, this generally means that one of the
+  objects had a much smaller physical scale than the others.
+  Ensure that \code{equal.scales=FALSE}
+  and increase the values of \code{mar.panel}.
+}
+\examples{
+# Intensity estimate of multitype point pattern
+ plot(D <- density(split(amacrine)))
+ plot(D, main="", equal.ribbon=TRUE,
+      panel.end=function(i,y,...){contour(y, ...)})
+
+# list of 3D point patterns
+ ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE]
+ class(ape1)
+ plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1,
+      cex=1.5, pch=21, bg='white')
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.lpp.Rd b/man/plot.lpp.Rd
new file mode 100644
index 0000000..581e57e
--- /dev/null
+++ b/man/plot.lpp.Rd
@@ -0,0 +1,105 @@
+\name{plot.lpp}
+\alias{plot.lpp}
+\title{
+  Plot Point Pattern on Linear Network
+}
+\description{
+  Plots a point pattern on a linear network.
+  Plot method for the class \code{"lpp"} of point patterns on a linear network.
+}
+\usage{
+\method{plot}{lpp}(x, \dots, main, add = FALSE,
+                   use.marks=TRUE, which.marks=NULL,
+                   show.all = !add, show.window=FALSE, show.network=TRUE, 
+                   do.plot = TRUE, multiplot=TRUE) 
+}
+\arguments{
+  \item{x}{
+    Point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{plot.linnet}}
+    or \code{\link{plot.ppp}}.
+  }
+  \item{main}{
+    Main title for plot.
+  }
+  \item{add}{
+    Logical value indicating whether the plot is to be added to the
+    existing plot (\code{add=TRUE}) or whether a new plot should be
+    initialised (\code{add=FALSE}, the default).
+  }
+  \item{use.marks}{
+    logical flag; if \code{TRUE}, plot points using a different
+    plotting symbol for each mark;
+    if \code{FALSE}, only the locations of the points will be plotted,
+    using \code{\link{points}()}.
+  }
+  \item{which.marks}{
+    Index determining which column of marks to use,
+    if the marks of \code{x} are a data frame.
+    A character or integer vector identifying one or more
+    columns of marks. 
+    If \code{add=FALSE} then
+    the default is to plot all columns of marks, in a series of separate
+    plots.
+    If \code{add=TRUE} then only one column of marks can be plotted,
+    and the default is \code{which.marks=1}
+    indicating the first column of marks.
+  }
+  \item{show.all}{
+    Logical value indicating whether to plot everything
+    including the main title and the window containing the network.
+  }
+  \item{show.window}{
+    Logical value indicating whether to plot the
+    window containing the network. Overrides \code{show.all}.
+  }
+  \item{show.network}{
+    Logical value indicating whether to plot the network.
+  }
+  \item{do.plot}{
+    Logical value determining whether to actually perform the plotting.
+  }
+  \item{multiplot}{
+    Logical value giving permission to display multiple plots.
+  }
+}
+\details{
+  The linear network is plotted by \code{\link{plot.linnet}},
+  then the points are plotted by \code{\link{plot.ppp}}.
+
+  Commonly-used arguments include:
+  \itemize{
+    \item \code{col} and \code{lwd} for the colour and width of lines
+    in the linear network
+    \item \code{cols} for the colour or colours of the points
+    \item \code{chars} for the plot characters representing different
+    types of points
+    \item \code{legend} and \code{leg.side} to control the graphics
+    legend
+  }
+
+  Note that the linear network will be plotted
+  even when \code{add=TRUE}, unless \code{show.network=FALSE}.
+}
+\value{
+  (Invisible) object of class \code{"symbolmap"}
+  giving the correspondence between 
+  mark values and plotting characters.
+}
+\seealso{
+  \code{\link{lpp}}.
+
+  See \code{\link{plot.ppp}} for options for representing the points.
+
+  See also \code{\link{points.lpp}}.
+}
+\examples{
+  plot(chicago, cols=1:6)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.lppm.Rd b/man/plot.lppm.Rd
new file mode 100644
index 0000000..e50e79a
--- /dev/null
+++ b/man/plot.lppm.Rd
@@ -0,0 +1,56 @@
+\name{plot.lppm}
+\alias{plot.lppm}
+\title{
+  Plot a Fitted Point Process Model on a Linear Network
+}
+\description{
+  Plots the fitted intensity of a point process model
+  on a linear network. 
+}
+\usage{
+  \method{plot}{lppm}(x, ..., type="trend")
+}
+\arguments{
+  \item{x}{
+    An object of class \code{"lppm"} representing a fitted point process
+    model on a linear network.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.linim}} to control the
+    plot.
+  }
+  \item{type}{
+    Character string (either \code{"trend"} or \code{"cif"})
+    determining whether to plot the fitted first order trend
+    or the conditional intensity.
+  }
+}
+\details{
+  This function is the plot method for the class 
+  \code{"lppm"}. It computes the fitted intensity of the
+  point process model, and displays it using \code{\link{plot.linim}}.
+
+  The default is to display intensity values as colours. Alternatively
+  if the argument \code{style="width"} is given, intensity values are
+  displayed as the widths of thick lines drawn over the network.
+}
+\value{
+  Null.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lppm}},
+  \code{\link{plot.linim}},
+  \code{\link{methods.lppm}},
+  \code{\link{predict.lppm}}.
+}
+\examples{
+  X <- runiflpp(10, simplenet)  
+  fit <- lppm(X ~x)
+  plot(fit)
+  plot(fit, style="width")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/plot.mppm.Rd b/man/plot.mppm.Rd
new file mode 100644
index 0000000..18af105
--- /dev/null
+++ b/man/plot.mppm.Rd
@@ -0,0 +1,92 @@
+\name{plot.mppm}
+\alias{plot.mppm}
+\title{plot a Fitted Multiple Point Process Model}
+\description{
+  Given a point process model fitted to multiple point patterns
+  by \code{\link{mppm}},
+  compute spatial trend or conditional intensity surface of the model,
+  in a form suitable for plotting, and (optionally) plot this
+  surface.
+}
+\usage{
+  \method{plot}{mppm}(x, \dots,
+                trend=TRUE, cif=FALSE, se=FALSE,
+                how=c("image", "contour", "persp"))
+}
+\arguments{
+  \item{x}{
+    A point process model fitted to multiple point patterns,
+    typically obtained from
+    the model-fitting algorithm \code{\link{mppm}}.
+    An object of class \code{"mppm"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.ppm}} or
+    \code{\link{plot.anylist}} controlling the plot.
+  }
+  \item{trend}{
+    Logical value indicating whether to plot the fitted trend.
+  }
+  \item{cif}{
+    Logical value indicating whether to plot the fitted conditional intensity.
+  }
+  \item{se}{
+    Logical value indicating whether to plot the standard error of the
+    fitted trend.
+  }
+  \item{how}{
+    Single character string indicating the style of plot to be performed. 
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  This is the \code{plot} method for the class \code{"mppm"}
+  of point process models fitted to multiple point patterns
+  (see \code{\link{mppm}}).
+
+  It invokes \code{\link{subfits}} to compute the fitted model for
+  each individual point pattern dataset, then calls
+  \code{\link{plot.ppm}} to plot these individual models. These
+  individual plots are displayed using \code{\link{plot.anylist}},
+  which generates either a series of separate plot frames or an
+  array of plot panels on a single page. 
+}
+\seealso{
+  \code{\link{plot.ppm}},
+  \code{\link{mppm}},
+  \code{\link{plot.listof}}
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\examples{
+  # Synthetic data from known model
+  n <- 9
+  H <- hyperframe(V=1:n,
+                  U=runif(n, min=-1, max=1))
+  H$Z <- setcov(square(1))
+  H$U <- with(H, as.im(U, as.rectangle(Z)))
+  H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z))))
+
+  fit <- mppm(Y ~Z + U + V, data=H)
+
+  plot(fit)
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{models}
diff --git a/man/plot.msr.Rd b/man/plot.msr.Rd
new file mode 100644
index 0000000..4c95cce
--- /dev/null
+++ b/man/plot.msr.Rd
@@ -0,0 +1,104 @@
+\name{plot.msr}
+\alias{plot.msr}
+\title{Plot a Signed or Vector-Valued Measure}
+\description{
+  Plot a signed measure or vector-valued measure.
+}
+\usage{
+  \method{plot}{msr}(x, \dots, add=FALSE,
+                     how=c("image", "contour", "imagecontour"),
+                     main=NULL,
+                     do.plot=TRUE,
+                     multiplot=TRUE,
+                     massthresh=0)
+}
+\arguments{
+  \item{x}{
+    The signed or vector measure to be plotted.
+    An object of class \code{"msr"} (see \code{\link{msr}}).
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{Smooth.ppp}}
+    to control the interpolation of the
+    continuous density component of \code{x},
+    or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}}
+    to control the appearance of the plot.
+  }
+  \item{add}{
+    Logical flag; if \code{TRUE}, the graphics are added to the existing
+    plot. If \code{FALSE} (the default) a new plot is initialised.
+  }
+  \item{how}{
+    String indicating how to display the continuous density component.
+  }
+  \item{main}{
+    String. Main title for the plot.
+  }
+  \item{do.plot}{
+    Logical value determining whether to actually perform the plotting.
+  }
+  \item{multiplot}{
+    Logical value indicating whether it is permissible to display
+    a plot with multiple panels (representing different components of
+    a vector-valued measure, or different types of points in a multitype
+    measure.) 
+  }
+  \item{massthresh}{
+    Threshold for plotting atoms.
+    A single numeric value or \code{NULL}.
+    If \code{massthresh=0} (the default) then only atoms with
+    nonzero mass will be plotted.
+    If \code{massthresh > 0} then only atoms whose absolute mass
+    exceeds \code{massthresh} will be plotted.
+    If \code{massthresh=NULL}, then all atoms of the measure will be plotted.
+  }
+}
+\value{
+  (Invisible) colour map (object of class \code{"colourmap"}) for the
+  colour image.
+}
+\details{
+  This is the \code{plot} method for the class \code{"msr"}.
+  
+  The continuous density component of \code{x} is interpolated
+  from the existing data by \code{\link{Smooth.ppp}},
+  and then displayed as a colour image by \code{\link{plot.im}}.
+
+  The discrete atomic component of \code{x} is then superimposed on this
+  image by plotting the atoms as circles (for positive mass)
+  or squares (for negative mass) by \code{\link{plot.ppp}}.
+  By default, atoms with zero mass are not plotted at all. 
+
+  To smooth both the discrete and continuous components,
+  use   \code{\link{Smooth.msr}}.
+
+  Use the argument \code{clipwin} to restrict the plot to a subset
+  of the full data.
+
+  To remove atoms with tiny masses, use the argument \code{massthresh}.
+}
+\seealso{
+  \code{\link{msr}},
+  \code{\link{Smooth.ppp}},
+  \code{\link{Smooth.msr}},
+  \code{\link{plot.im}},
+  \code{\link{plot.ppp}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   rp <- residuals(fit, type="pearson")
+   rs <- residuals(fit, type="score")
+
+   plot(rp)
+   plot(rs)
+   plot(rs, how="contour")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.onearrow.Rd b/man/plot.onearrow.Rd
new file mode 100644
index 0000000..b447b1a
--- /dev/null
+++ b/man/plot.onearrow.Rd
@@ -0,0 +1,94 @@
+\name{plot.onearrow}
+\alias{plot.onearrow}
+\title{Plot an Arrow}
+\description{Plots an object of class \code{"onearrow"}.}
+\usage{
+ \method{plot}{onearrow}(x, \dots,
+  add = FALSE, main = "",
+  retract = 0.05, headfraction = 0.25, headangle = 12, headnick = 0.1,
+  col.head = NA, lwd.head = lwd, lwd = 1, col = 1,
+  zap = FALSE, zapfraction = 0.07,
+  pch = 1, cex = 1, do.plot = TRUE, do.points = FALSE, show.all = !add)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"onearrow"} to be plotted.
+    This object is created by the command \code{\link{onearrow}}.
+  }
+  \item{\dots}{
+    Additional graphics arguments passed to
+    \code{\link[graphics]{segments}} to control the appearance of the line.
+  }
+  \item{add}{Logical value indicating whether to add graphics to the
+    existing plot (\code{add=TRUE}) or to start a new plot
+    (\code{add=FALSE}).
+  }
+  \item{main}{Main title for the plot.}
+  \item{retract}{
+    Fraction of length of arrow to remove at each end.
+  }
+  \item{headfraction}{
+    Length of arrow head as a fraction of overall length of arrow.
+  }
+  \item{headangle}{
+    Angle (in degrees) between the outer edge of the arrow head
+    and the shaft of the arrow.
+  }
+  \item{headnick}{
+    Size of the nick in the trailing edge of the arrow head
+    as a fraction of length of arrow head. 
+  }
+  \item{col.head,lwd.head}{
+    Colour and line style of the filled arrow head.
+  }
+  \item{col,lwd}{
+    Colour and line style of the arrow shaft.
+  }
+  \item{zap}{
+    Logical value indicating whether the arrow should
+    include a Z-shaped (lightning-bolt) feature in the
+    middle of the shaft.
+  }
+  \item{zapfraction}{
+   Size of Z-shaped deviation as a fraction of total arrow length.
+  }
+  \item{pch,cex}{
+    Plot character and character size for the two end points of the arrow,
+    if \code{do.points=TRUE}.
+  }
+  \item{do.plot}{
+    Logical. Whether to actually perform the plot.
+  }
+  \item{do.points}{
+    Logical. Whether to display the two end points of the arrow as well.
+  }
+  \item{show.all}{
+    Internal use only.
+  }
+}
+\details{
+  The argument \code{x} should be an object of class \code{"onearrow"}
+  created by the command \code{\link{onearrow}}. 
+}
+\value{
+  A window (class \code{"owin"}) enclosing the plotted graphics.
+}
+\examples{
+  oa <- onearrow(cells[c(1, 42)])
+  plot(oa)
+  plot(oa, zap=TRUE, do.points=TRUE, col.head="pink", col="red")
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{onearrow}},
+  \code{\link{yardstick}}
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.owin.Rd b/man/plot.owin.Rd
new file mode 100644
index 0000000..b5d5930
--- /dev/null
+++ b/man/plot.owin.Rd
@@ -0,0 +1,211 @@
+\name{plot.owin}
+\alias{plot.owin}
+\title{Plot a Spatial Window}
+\description{
+  Plot a two-dimensional window of observation for a spatial point pattern
+}
+\usage{
+ \method{plot}{owin}(x, main, add=FALSE, \dots, box, edge=0.04,
+                      type=c("w","n"), show.all=!add, 
+                      hatch=FALSE,
+                      hatchargs=list(), 
+                      invert=FALSE, do.plot=TRUE,
+                      claim.title.space=FALSE)
+}
+\arguments{
+  \item{x}{
+    The window to be plotted.
+    An object of class \code{\link{owin}},
+    or data which can be converted into 
+    this format by \code{\link{as.owin}()}.
+  }
+  \item{main}{
+    text to be displayed as a title above the plot.
+  }
+  \item{add}{
+    logical flag: if \code{TRUE}, draw the window in 
+    the current plot; if \code{FALSE}, generate a new plot.
+  }
+  \item{\dots}{
+    extra arguments controlling the appearance of the plot.
+    These arguments are passed to \code{\link[graphics]{polygon}}
+    if \code{x} is a
+    polygonal or rectangular window, or passed to
+    \code{\link[graphics]{image.default}}
+    if \code{x} is a binary mask. See Details.
+  }
+  \item{box}{
+    logical flag; if \code{TRUE}, plot the enclosing rectangular box
+  }
+  \item{edge}{
+    nonnegative number; the plotting region will have coordinate limits
+    that are \code{1 + edge}
+    times as large as the limits of the rectangular box
+    that encloses the pattern.
+  }
+  \item{type}{
+    Type of plot: either \code{"w"} or \code{"n"}.
+    If \code{type="w"} (the default), the window is plotted.
+    If \code{type="n"} and \code{add=TRUE}, a new plot is initialised
+    and the coordinate system is established, but nothing is drawn.
+  }
+  \item{show.all}{
+    Logical value indicating whether to plot everything
+    including the main title.
+  }
+  \item{hatch}{
+    logical flag; if \code{TRUE}, the interior of the window will
+    be shaded by texture, such as a grid of parallel lines.
+  }
+  \item{hatchargs}{
+    List of arguments passed to \code{\link{add.texture}}
+    to control the texture shading when \code{hatch=TRUE}. 
+  }
+  \item{invert}{
+    logical flag; when the window is a binary pixel mask,
+    the mask colours will be inverted if \code{invert=TRUE}.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plot.
+  }
+  \item{claim.title.space}{
+    Logical value indicating whether extra space for the main title
+    should be allocated when declaring the plot dimensions.
+    Should be set to \code{FALSE} under normal conditions.
+  }
+}
+\value{
+  none.
+}
+\details{
+  This is the \code{plot} method for the class \code{\link{owin}}.
+  The action is to plot the boundary of the window on the current plot device,
+  using equal scales on the \code{x} and \code{y} axes.
+
+  If the window \code{x} is of type \code{"rectangle"} or \code{"polygonal"},
+  the boundary of the window is plotted as a polygon or series of polygons.
+  If \code{x} is of type \code{"mask"} 
+  the discrete raster approximation of the window is displayed
+  as a binary image (white inside the window, black outside).
+
+  Graphical parameters controlling the display (e.g. setting the
+  colours) may be passed directly via the \code{...} arguments,
+  or indirectly reset using 
+  \code{\link{spatstat.options}}.
+
+  When \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, it
+  is plotted by the \R function \code{\link[graphics]{polygon}}. To control the
+  appearance (colour, fill density, line density etc) of the polygon
+  plot, determine the required argument of \code{\link[graphics]{polygon}} and
+  pass it through \code{...} For example, to paint the interior of the
+  polygon in red, use the argument \code{col="red"}. To draw the polygon
+  edges in green, use \code{border="green"}. To suppress the drawing of
+  polygon edges, use \code{border=NA}.
+
+  When \code{x} is of type \code{"mask"}, it is plotted by
+  \code{\link[graphics]{image.default}}. The appearance of the image plot
+  can be controlled by passing arguments to
+  \code{\link[graphics]{image.default}}
+  through \code{...}. The default appearance can also be changed
+  by setting the parameter \code{par.binary} of
+  \code{\link{spatstat.options}}. 
+
+  To zoom in (to view only a subset of the window at higher
+  magnification), use the graphical arguments
+  \code{xlim} and \code{ylim} to specify the desired rectangular field of
+  view. (The actual field of view may be larger, depending on the
+  graphics device).
+}
+\section{Notes on Filled Polygons with Holes}{
+  The function \code{\link[graphics]{polygon}} can only handle
+  polygons without holes. To plot polygons with holes in a solid colour,
+  we have implemented two workarounds.
+  
+  \describe{
+    \item{polypath function:}{
+      The first workaround uses
+      the relatively new function \code{\link[graphics]{polypath}} which
+      \emph{does} have the capability to handle polygons with holes.
+      However, not all graphics devices support
+      \code{\link[graphics]{polypath}}.
+      The older devices \code{\link{xfig}} and \code{\link{pictex}}
+      do not support \code{\link[graphics]{polypath}}.
+      On a Windows system, the default graphics device
+#ifdef windows      
+      \code{\link{windows}} 
+#endif
+#ifndef windows      
+      \code{windows}
+#endif
+      supports \code{\link[graphics]{polypath}}.
+#ifdef unix
+      On a Linux system, the default graphics device
+      \code{X11(type="Xlib")} does \emph{not} support
+      \code{\link[graphics]{polypath}}
+      but \code{X11(type="cairo")} does support it.
+      See \code{\link{X11}} and the section on Cairo below.
+#endif
+    }
+    \item{polygon decomposition:}{
+      The other workaround involves decomposing the polygonal window
+      into pieces which do not have holes. This code is experimental
+      but works in all our test cases. If this code fails, a warning
+      will be issued, and the filled colours will not be plotted.
+    }
+  }
+}
+#ifdef unix
+\section{Cairo graphics on a Linux system}{
+  Linux systems support
+  the graphics device \code{X11(type="cairo")} (see \code{\link{X11}})
+  provided the external library \pkg{cairo} is installed
+  on the computer. See \code{www.cairographics.org}
+  for instructions on obtaining and installing \pkg{cairo}.  After having
+  installed \pkg{cairo} one needs to re-install \R from source so
+  that it has \pkg{cairo} capabilites.  To check whether your
+  current installation of R has \pkg{cairo} capabilities, type
+  (in \R) \code{capabilities()["cairo"]}.
+  The default type for \code{\link{X11}} is controlled by
+  \code{\link[grDevices]{X11.options}}.
+  You may find it convenient to
+  make \pkg{cairo} the default, e.g. via your \code{.Rprofile}.
+  The magic incantation to put into \code{.Rprofile} is
+  \preformatted{
+    setHook(packageEvent("graphics", "onLoad"),
+    function(...) grDevices::X11.options(type="cairo"))
+  }
+}
+#endif
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{plot.ppp}},
+  \code{\link[graphics]{polygon}},
+  \code{\link[graphics]{image.default}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+  # rectangular window
+   plot(Window(nztrees))
+   abline(v=148, lty=2)
+
+  # polygonal window
+  w <- Window(demopat)
+  plot(w)
+  plot(w, col="red", border="green", lwd=2)
+  plot(w, hatch=TRUE, lwd=2)
+
+  # binary mask
+  we <- as.mask(w)
+  plot(we)
+  op <- spatstat.options(par.binary=list(col=grey(c(0.5,1))))
+  plot(we)
+  spatstat.options(op)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.plotppm.Rd b/man/plot.plotppm.Rd
new file mode 100644
index 0000000..d1e95b7
--- /dev/null
+++ b/man/plot.plotppm.Rd
@@ -0,0 +1,104 @@
+\name{plot.plotppm}
+\alias{plot.plotppm}
+\title{Plot a plotppm Object Created by plot.ppm}
+\description{
+  The function plot.ppm produces objects which specify plots
+  of fitted point process models.  The function plot.plotppm
+  carries out the actual plotting of these objects.
+}
+\usage{
+  \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE,
+             se = TRUE, pause = interactive(),
+             how = c("persp", "image", "contour"),
+             \dots, pppargs)
+}
+\arguments{
+  \item{x}{
+    An object of class \code{plotppm} produced by
+    \code{\link{plot.ppm}()}
+  }.
+  \item{data}{
+    The point pattern (an object of class \code{ppp})
+    to which the point process model was fitted (by \code{\link{ppm}}).
+  }
+  \item{trend}{
+    Logical scalar; should the trend component of
+    the fitted model be plotted?
+  }
+  \item{cif}{
+    Logical scalar; should the complete conditional
+    intensity of the fitted model be plotted?
+  }
+  \item{se}{
+    Logical scalar; should the estimated standard error
+    of the fitted intensity be plotted?
+  }
+  \item{pause}{
+    Logical scalar indicating whether to pause with a prompt 
+    after each plot. Set \code{pause=FALSE} if plotting to a file.
+  }
+  \item{how}{
+    Character string or character vector indicating the style or styles of
+    plots to be performed.
+  }
+  \item{\dots}{
+    Extra arguments to the plotting functions
+    \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}.
+  }
+  \item{pppargs}{
+    List of extra arguments passed to \code{\link{plot.ppp}}
+    when displaying the original point pattern data.
+  }
+}
+\details{
+  If argument \code{data} is supplied then the point pattern will
+  be superimposed on the image and contour plots.
+
+  Sometimes a fitted model does not have a trend component, or the
+  trend component may constitute all of the conditional intensity (if
+  the model is Poisson).  In such cases the object \code{x} will not
+  contain a trend component, or will contain only a trend component.
+  This will also be the case if one of the arguments \code{trend}
+  and \code{cif} was set equal to \code{FALSE} in the call to
+  \code{plot.ppm()} which produced \code{x}.  If this is so then
+  only the item which is present will be plotted.  Explicitly setting
+  \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give
+  an error.
+}
+\value{
+   None.
+}
+
+\section{Warning}{
+  Arguments which are passed to \code{persp}, \code{image}, and
+  \code{contour} via the \dots argument get passed to any of the
+  other functions listed in the \code{how} argument, and won't be
+  recognized by them.  This leads to a lot of annoying but harmless
+  warning messages.  Arguments to \code{persp} may be supplied via
+  \code{\link{spatstat.options}()} which alleviates the warning
+  messages in this instance.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{ \code{\link{plot.ppm}()} }
+\examples{
+ \dontrun{
+ m <- ppm(cells ~ 1, Strauss(0.05))
+ mpic <- plot(m)
+ # Perspective plot only, with altered parameters:
+  plot(mpic,how="persp", theta=-30,phi=40,d=4)
+ # All plots, with altered parameters for perspective plot:
+ op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4))
+ plot(mpic)
+ # Revert
+ spatstat.options(op)
+ }
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{models}
diff --git a/man/plot.pp3.Rd b/man/plot.pp3.Rd
new file mode 100644
index 0000000..a5206c4
--- /dev/null
+++ b/man/plot.pp3.Rd
@@ -0,0 +1,81 @@
+\name{plot.pp3}
+\Rdversion{1.1}
+\alias{plot.pp3}
+\title{
+  Plot a Three-Dimensional Point Pattern
+}
+\description{
+  Plots a three-dimensional point pattern.
+}
+\usage{
+ \method{plot}{pp3}(x, ..., eye=NULL, org=NULL, theta=25, phi=15,
+                    type=c("p", "n", "h"),
+                    box.back=list(col="pink"),
+                    box.front=list(col="blue", lwd=2))
+}
+\arguments{
+  \item{x}{
+    Three-dimensional point pattern (object of class \code{"pp3"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[graphics]{points}}
+    controlling the appearance of the points.
+  }
+  \item{eye}{
+    Optional. Eye position. A numeric vector of length 3 giving the
+    location from which the scene is viewed.
+  }
+  \item{org}{
+    Optional. Origin (centre) of the view. A numeric vector of length
+    3 which will be at the centre of the view.
+  }
+  \item{theta,phi}{
+    Optional angular coordinates (in degrees) specifying the direction
+    from which the scene is viewed: \code{theta} is the azimuth
+    and \code{phi} is the colatitude. Ignored if \code{eye} is given.
+  }
+  \item{type}{
+    Type of plot: \code{type="p"} for points,
+    \code{type="h"} for points on vertical lines,
+    \code{type="n"} for box only.
+  }
+  \item{box.front,box.back}{
+    How to plot the three-dimensional box that contains the points.
+    A list of graphical arguments passed to \code{\link[graphics]{segments}},
+    or a logical value indicating whether or not to plot
+    the relevant part of the box. See Details.
+  }
+}
+\details{
+  This is the plot method for objects of class \code{"pp3"}.
+  It generates a two-dimensional plot of the point pattern \code{x}
+  and its containing box as if they had been viewed from the
+  location specified by \code{eye} (or from the direction
+  specified by \code{theta} and \code{phi}). 
+
+  The edges of the box at the \sQuote{back} of the scene (as viewed from the
+  eye position) are plotted first. Then the points are added. Finally the
+  remaining \sQuote{front} edges are plotted. The arguments
+  \code{box.back} and \code{box.front} specify graphical parameters for
+  drawing the back and front edges, respectively. Alternatively
+  \code{box.back=FALSE} specifies that the back edges shall not be drawn.
+  
+  Note that default values of arguments to \code{plot.pp3}
+  can be set by \code{\link{spatstat.options}("par.pp3")}.
+}
+\value{Null.}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{pp3}},
+  \code{\link{spatstat.options}}.
+}
+\examples{
+  X <- osteo$pts[[1]]
+  plot(X, main="Osteocyte lacunae, animal 1, brick 1",
+       cex=1.5, pch=16)
+  plot(X, main="", box.back=list(lty=3))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.ppm.Rd b/man/plot.ppm.Rd
new file mode 100644
index 0000000..750d245
--- /dev/null
+++ b/man/plot.ppm.Rd
@@ -0,0 +1,180 @@
+\name{plot.ppm}
+\alias{plot.ppm}
+\title{plot a Fitted Point Process Model}
+\description{
+  Given a fitted point process model obtained by \code{\link{ppm}},
+  create spatial trend and conditional intensity surfaces of the model,
+  in a form suitable for plotting, and (optionally) plot these
+  surfaces.
+}
+\usage{
+  \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, 
+                  trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(),
+                  how=c("persp","image", "contour"), plot.it = TRUE,
+                  locations = NULL, covariates=NULL, \dots)
+}
+\arguments{
+  \item{x}{
+    A fitted point process model, typically obtained from
+    the model-fitting algorithm \code{\link{ppm}}.
+    An object of class \code{"ppm"}.
+  }
+  \item{ngrid}{
+    The dimensions for a grid on which to evaluate,
+    for plotting, the spatial trend and conditional intensity.
+    A vector of 1 or 2 integers. If it is of length 1,
+    \code{ngrid} is replaced by \code{c(ngrid,ngrid)}.
+  }
+  \item{superimpose}{
+    logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the
+    original data point pattern will be superimposed on the plots.
+  }
+  \item{trend}{
+    logical flag; if \code{TRUE}, the spatial trend surface will be produced.
+  }
+  \item{cif}{
+    logical flag; if \code{TRUE}, the conditional intensity surface will be
+    produced.
+  }
+  \item{se}{
+    logical flag; if \code{TRUE}, the estimated standard error of the
+    spatial trend surface will be produced.
+  }
+  \item{pause}{
+    logical flag indicating whether to pause with a prompt 
+    after each plot. Set \code{pause=FALSE} if plotting to a file.
+    (This flag is ignored if \code{plot=FALSE}).
+  }
+  \item{how}{
+    character string or character vector indicating the style or styles of
+    plots to be performed. Ignored if \code{plot=FALSE}.  
+  }
+  \item{plot.it}{
+    logical scalar; should a plot be produced immediately?
+  }
+  \item{locations}{
+    If present, this determines the locations of the pixels
+    at which predictions are computed. It must be a binary pixel image
+    (an object of class \code{"owin"} with type \code{"mask"}).
+    (Incompatible with \code{ngrid}).
+  }
+  \item{covariates}{
+    Values of external covariates required by the fitted model.
+    Passed to \code{\link{predict.ppm}}.
+  }
+  \item{\dots}{
+    extra arguments to the plotting functions \code{\link{persp}},
+    \code{\link{image}} and \code{\link{contour}}.
+  }
+}
+\value{
+  An object of class \code{plotppm}.  Such objects may be plotted by
+  \code{\link{plot.plotppm}()}.
+
+  This is a list with components named \code{trend} and \code{cif},
+  either of which may
+  be missing. They will be missing if the corresponding component
+  does not make sense for the model, or if the corresponding
+  argument was set equal to \code{FALSE}.
+
+  Both \code{trend} and \code{cif} are lists of images.
+  If the model is an unmarked point process, then they are lists of
+  length 1, so that \code{trend[[1]]} is an image of the spatial trend
+  and \code{cif[[1]]} is an image of the conditional intensity.
+
+  If the model is a marked point process, then \code{trend[[i]]}
+  is an image of the spatial trend for the mark \code{m[i]},
+  and \code{cif[[1]]} is an image of the conditional intensity
+  for the mark \code{m[i]}, where \code{m} is the vector of levels
+  of the marks.
+}
+\details{
+  This is the \code{plot} method for the class \code{"ppm"}
+  (see \code{\link{ppm.object}} for details of this class).
+  
+  It invokes \code{\link{predict.ppm}} to compute the spatial 
+  trend and conditional intensity of the fitted point process model.
+  See \code{\link{predict.ppm}} for more explanation about spatial trend
+  and conditional intensity.
+ 
+  The default action is to create a rectangular grid
+  of points in (the bounding box of) the observation window of
+  the data point pattern, and evaluate the spatial trend and
+  conditional intensity of the fitted spatial point process model
+  \code{x} at these locations.  If the argument \code{locations=}
+  is supplied, then the spatial trend
+  and conditional intensity are calculated at the grid of points
+  specified by this argument.
+
+  The argument \code{locations}, if present, should be a
+  binary image mask (an object of class \code{"owin"}
+  and type \code{"mask"}). This determines a rectangular grid
+  of locations, or a subset of such a grid, at which predictions
+  will be computed. Binary image masks
+  are conveniently created using \code{\link{as.mask}}.
+
+  The argument \code{covariates} gives the values of any spatial covariates
+  at the prediction locations.
+  If the trend formula in the fitted model 
+  involves spatial covariates (other than
+  the Cartesian coordinates \code{x}, \code{y})
+  then \code{covariates} is required.
+
+  The argument \code{covariates} has the same format and interpretation
+  as in \code{\link{predict.ppm}}. It may be
+  either a data frame (the number of whose rows must match
+  the number of pixels in \code{locations} multiplied by the number of
+  possible marks in the point pattern),  or a list of images.
+  If argument \code{locations}
+  is not supplied, and \code{covariates} \bold{is} supplied, then
+  it \bold{must} be a list of images.
+
+  If the fitted model was a marked (multitype) point process, then 
+  predictions are made for each possible mark value in turn.
+ 
+  If the fitted model had no spatial trend, then the default is 
+  to omit calculating this (flat) surface, unless \code{trend=TRUE}
+  is set explicitly.
+ 
+  If the fitted model was Poisson, so that there were no spatial interactions,
+  then the conditional intensity and spatial trend are identical, and the
+  default is to omit the conditional intensity, unless \code{cif=TRUE} is set
+  explicitly.
+
+  If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called
+  upon to plot the class \code{plotppm} object which is produced.
+  (That object is also returned, silently.)
+  
+  Plots are produced successively using \code{\link{persp}},
+  \code{\link{image}} and \code{\link{contour}} (or only a
+  selection of these three, if \code{how} is given).  Extra
+  graphical parameters controlling the display may be passed
+  directly via the arguments \code{...} or indirectly reset using
+  \code{\link{spatstat.options}}.
+}
+\seealso{
+  \code{\link{plot.plotppm}},
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{predict.ppm}},
+  \code{\link{print.ppm}},
+  \code{\link{persp}},
+  \code{\link{image}},
+  \code{\link{contour}},
+  \code{\link{plot}},
+  \code{\link{spatstat.options}}
+}
+\section{Warnings}{
+  See warnings in \code{\link{predict.ppm}}.
+}
+\examples{
+ m <- ppm(cells ~1, Strauss(0.05))
+ pm <- plot(m) # The object ``pm'' will be plotted as well as saved
+               # for future plotting.
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{models}
diff --git a/man/plot.ppp.Rd b/man/plot.ppp.Rd
new file mode 100644
index 0000000..dc3a3ea
--- /dev/null
+++ b/man/plot.ppp.Rd
@@ -0,0 +1,386 @@
+\name{plot.ppp}
+\alias{plot.ppp}
+\title{plot a Spatial Point Pattern}
+\description{
+  Plot a two-dimensional spatial point pattern
+}
+\usage{
+ \method{plot}{ppp}(x, main, \dots, clipwin=NULL,
+                    chars=NULL, cols=NULL,
+                    use.marks=TRUE, which.marks=NULL,
+                    add=FALSE, type=c("p","n"),
+                    legend=TRUE,
+                    leg.side=c("left", "bottom", "top", "right"),
+                    leg.args=list(),
+                    symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL,
+                    zap=0.01,
+                    show.window=show.all, show.all=!add, do.plot=TRUE,
+                    multiplot=TRUE)
+}
+\arguments{
+  \item{x}{
+    The spatial point pattern to be plotted.
+    An object of class \code{"ppp"},
+    or data which can be converted into 
+    this format by \code{\link{as.ppp}()}.
+  }
+  \item{main}{
+    text to be displayed as a title above the plot.
+  }
+  \item{\dots}{
+    extra arguments that will be passed to the plotting functions
+    \code{\link{plot.default}}, \code{\link{points}} and/or
+    \code{\link{symbols}}.
+  }
+  \item{clipwin}{
+    Optional. A window (object of class \code{"owin"}).
+    Only this subset of the image will be displayed.
+  }
+  \item{chars}{
+    plotting character(s) used to plot points.
+  }
+  \item{cols}{
+    the colour(s) used to plot points.
+  }
+  \item{use.marks}{
+    logical flag; if \code{TRUE}, plot points using a different
+    plotting symbol for each mark;
+    if \code{FALSE}, only the locations of the points will be plotted,
+    using \code{\link{points}()}.
+  }
+  \item{which.marks}{
+    Index determining which column of marks to use,
+    if the marks of \code{x} are a data frame.
+    A character or integer vector identifying one or more
+    columns of marks. 
+    If \code{add=FALSE} then
+    the default is to plot all columns of marks, in a series of separate
+    plots.
+    If \code{add=TRUE} then only one column of marks can be plotted,
+    and the default is \code{which.marks=1}
+    indicating the first column of marks.
+  }
+  \item{add}{
+    logical flag; if \code{TRUE},
+    just the points are plotted, over the existing plot.
+    A new plot is not created, and
+    the window is not plotted.
+  }
+  \item{type}{
+    Type of plot: either \code{"p"} or \code{"n"}.
+    If \code{type="p"} (the default), both the points and the observation window
+    are plotted. If \code{type="n"}, only the window is plotted.
+  }
+  \item{legend}{
+    Logical value indicating whether to add a legend showing the mapping
+    between mark values and graphical symbols (for a marked point pattern).
+  }
+  \item{leg.side}{
+    Position of legend relative to main plot.
+  }
+  \item{leg.args}{
+    List of additional arguments passed to \code{\link{plot.symbolmap}}
+    or \code{\link{symbolmap}} to control the legend.
+    In addition to arguments documented under
+    \code{\link{plot.symbolmap}}, and graphical arguments recognised
+    by \code{\link{symbolmap}}, the list may also include the argument
+    \code{sep} giving the separation between the main plot and the
+    legend, or \code{sep.frac} giving the separation as a fraction
+    of the relevant dimension (width or height) of the main plot.
+  }
+  \item{symap}{
+    Optional. The graphical symbol map to be applied to the marks.
+    An object of class \code{"symbolmap"}; see
+    \code{\link{symbolmap}}.
+  }
+  \item{maxsize}{
+    \emph{Maximum} physical size of the circles/squares plotted
+    when \code{x} is a marked point pattern with 
+    numerical marks.
+    Incompatible with \code{meansize} and \code{markscale}.
+    Ignored if \code{symap} is given.
+  }
+  \item{meansize}{
+    \emph{Average} physical size of the circles/squares plotted
+    when \code{x} is a marked point pattern with 
+    numerical marks.
+    Incompatible with \code{maxsize} and \code{markscale}.
+    Ignored if \code{symap} is given.
+  }
+  \item{markscale}{
+    physical scale factor determining the sizes of the
+    circles/squares plotted when \code{x} is a marked point pattern with 
+    numerical marks. Mark value will be multiplied by \code{markscale}
+    to determine physical size.
+    Incompatible with \code{maxsize} and \code{meansize}.
+    Ignored if \code{symap} is given.
+  }
+  \item{zap}{
+    Fraction between 0 and 1.
+    When \code{x} is a marked point pattern with numerical marks,
+    \code{zap} is the smallest mark value
+    (expressed as a fraction of the maximum possible mark) that will
+    be plotted. 
+    Any points which have marks smaller in absolute value
+    than \code{zap * max(abs(marks(x)))} will not be plotted.
+  }
+  \item{show.window}{
+    Logical value indicating whether to plot the observation
+    window of \code{x}.
+  }
+  \item{show.all}{
+    Logical value indicating whether to plot everything
+    including the main title and the observation window of \code{x}.
+  }
+  \item{do.plot}{
+    Logical value determining whether to actually perform the plotting.
+  }
+  \item{multiplot}{
+    Logical value giving permission to display multiple plots.
+  }
+}
+\value{
+  (Invisible) object of class \code{"symbolmap"}
+  giving the correspondence between 
+  mark values and plotting characters.
+}
+\details{
+  This is the \code{plot} method for 
+  point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}).
+
+  First the observation window \code{Window(x)} is plotted
+  (if \code{show.window=TRUE}).
+  Then the points themselves are plotted,
+  in a fashion that depends on their marks,
+  as follows.
+  \describe{
+    \item{unmarked point pattern:}{
+      If the point pattern does not have marks, or if \code{use.marks = FALSE},
+      then the locations of all points will be plotted 
+      using a single plot character
+    }
+    \item{multitype point pattern:}{
+      If \code{x$marks} is a factor, then 
+      each level of the factor is 
+      represented by a different plot character.
+    }
+    \item{continuous marks:}{
+      If \code{x$marks} is a numeric vector,
+      the marks are rescaled to the unit interval and
+      each point is represented by a circle 
+      with \emph{diameter} proportional to the rescaled mark
+      (if the value is positive) or a square with \emph{side length}
+      proportional to the absolute value of the rescaled mark
+      (if the value is negative).
+    }
+    \item{other kinds of marks:}{
+      If \code{x$marks} is neither numeric nor a factor,
+      then each possible mark will be represented by a
+      different plotting character. The default is to
+      represent the \eqn{i}th smallest mark value by
+      \code{points(..., pch=i)}.
+    }
+  }
+
+  If there are several columns of marks, and if \code{which.marks} is
+  missing or \code{NULL}, then
+  \itemize{
+    \item 
+    if \code{add=FALSE} and \code{multiplot=TRUE}
+    the default is to plot all columns of marks, in a series of separate
+    plots, placed side-by-side. The plotting is coordinated by
+    \code{\link{plot.listof}}, which calls \code{plot.ppp} to make each of
+    the individual plots.
+    \item
+    Otherwise, only one column of marks can be plotted,
+    and the default is \code{which.marks=1}
+    indicating the first column of marks.
+  }
+
+  Plotting of the window \code{Window(x)} is performed by
+  \code{\link{plot.owin}}. This plot may be modified 
+  through the \code{...} arguments. In particular the
+  extra argument \code{border} determines
+  the colour of the window, if the window is not a binary mask.
+
+  Plotting of the points themselves is performed
+  by the function \code{\link{points}}, except for the case of
+  continuous marks, where it is performed by \code{\link{symbols}}.
+  Their plotting behaviour may be modified through the \code{...}
+  arguments.
+
+  The argument \code{chars} determines the plotting character
+  or characters used to display the points (in all cases except
+  for the case of continuous marks). For an unmarked point pattern,
+  this should be a single integer or character determining a
+  plotting character (see \code{par("pch")}).
+  For a multitype point pattern, \code{chars} should be a vector
+  of integers or characters, of the same length
+  as \code{levels(x$marks)}, and then the \eqn{i}th level or type 
+  will be plotted using character \code{chars[i]}.
+
+  If \code{chars} is absent, but there is an extra argument
+  \code{pch}, then this will determine the plotting character for
+  all points.
+  
+  The argument \code{cols} determines the colour or colours used to
+  display the points. For an unmarked point pattern,
+  \code{cols} should be a character string
+  determining a colour. For a multitype point pattern, \code{cols}
+  should be a character vector, of the same length
+  as \code{levels(marks(x))}: that is, there is one colour for each
+  possible mark value. The \eqn{i}th level or type will
+  be plotted using colour \code{cols[i]}. For a point pattern with
+  continuous marks, \code{cols} can be either a character string
+  or a character vector specifying colour values: the range of mark
+  values will be mapped to the specified colours.
+
+  If \code{cols} is absent, the colours used to plot the
+  points may be determined by the extra argument \code{fg}
+  (for multitype point patterns) or the extra argument \code{col}
+  (for all other cases). Note that specifying \code{col} will also
+  apply this colour to the window itself.
+
+  The default colour for the points is a semi-transparent grey,
+  if this is supported by the plot device. This behaviour can be
+  suppressed (so that the default colour is non-transparent)
+  by setting \code{spatstat.options(transparent=FALSE)}.
+
+  The arguments \code{maxsize}, \code{meansize} and \code{markscale} 
+  incompatible. They control the physical size of the circles and
+  squares which represent the marks in a point pattern with continuous
+  marks. The size of a circle is defined as its \emph{diameter};
+  the size of a square is its side length.
+  If \code{markscale} is given, then a mark value of \code{m}
+  is plotted as a circle of diameter \code{m * markscale}
+  (if \code{m} is positive) or a square of side \code{abs(m) * markscale}
+  (if \code{m} is negative). If \code{maxsize} is given, then the
+  largest mark in absolute value, \code{mmax=max(abs(marks(x)))},
+  will be scaled to have physical size \code{maxsize}.
+  If \code{meansize} is given, then the
+  average absolute mark value, \code{mmean=mean(abs(marks(x)))},
+  will be scaled to have physical size \code{meansize}.
+
+  The user can set the default values of these plotting parameters
+  using \code{\link{spatstat.options}("par.points")}.
+  
+  To zoom in (to view only a subset of the point pattern at higher
+  magnification), use the graphical arguments
+  \code{xlim} and \code{ylim} to specify the rectangular field of view.
+
+  The value returned by this plot function is an object of
+  class \code{"symbolmap"} representing the mapping from mark values
+  to graphical symbols. See \code{\link{symbolmap}}.
+  It can be used to make a suitable legend,
+  or to ensure that two plots use the same graphics map.
+}
+\section{Removing White Space Around The Plot}{
+  A frequently-asked question is: How do I remove the white space around
+  the plot? Currently \code{plot.ppp} uses the base graphics system of
+  \R, so the space around the plot is controlled by parameters 
+  to \code{\link{par}}. To reduce the white space, change the
+  parameter \code{mar}. Typically, \code{par(mar=rep(0.5, 4))} is
+  adequate, if there are no annotations or titles outside the window.
+}
+\seealso{
+  \code{\link{iplot}},
+  \code{\link{ppp.object}},
+  \code{\link{plot}},
+  \code{\link{par}},
+  \code{\link{points}},
+  \code{\link{plot.owin}},
+  \code{\link{symbols}}
+}
+\examples{
+   plot(cells)
+
+   plot(cells, pch=16)
+
+   # make the plotting symbols larger (for publication at reduced scale)
+   plot(cells, cex=2)
+
+   # set it in spatstat.options
+   oldopt <- spatstat.options(par.points=list(cex=2))
+   plot(cells)
+   spatstat.options(oldopt)
+
+   # multitype 
+   plot(lansing)
+
+   # marked by a real number
+   plot(longleaf)
+
+   # just plot the points
+   plot(longleaf, use.marks=FALSE)
+   plot(unmark(longleaf)) # equivalent
+
+   # point pattern with multiple marks
+   plot(finpines)
+   plot(finpines, which.marks="height")
+
+   # controlling COLOURS of points
+   plot(cells, cols="blue")
+   plot(lansing, cols=c("black", "yellow", "green", 
+                        "blue","red","pink"))
+   plot(longleaf, fg="blue")
+
+   # make window purple
+   plot(lansing, border="purple")
+   # make everything purple
+   plot(lansing, border="purple", cols="purple", col.main="purple",
+                 leg.args=list(col.axis="purple"))
+ 
+   # controlling PLOT CHARACTERS for multitype pattern
+   plot(lansing, chars = 11:16)
+   plot(lansing, chars = c("o","h","m",".","o","o"))
+
+   ## multitype pattern mapped to symbols
+   plot(amacrine, shape=c("circles", "squares"), size=0.04)
+   plot(amacrine, shape="arrows", direction=c(0,90), size=0.07)
+
+   ## plot trees as trees!
+   plot(lansing, shape="arrows", direction=90, cols=1:6)
+
+   # controlling MARK SCALE for pattern with numeric marks
+   plot(longleaf, markscale=0.1)
+   plot(longleaf, maxsize=5)
+   plot(longleaf, meansize=2)
+
+   # draw circles of diameter equal to nearest neighbour distance
+   plot(cells \%mark\% nndist(cells), markscale=1, legend=FALSE)
+
+   # inspecting the symbol map
+   v <- plot(amacrine)
+   v
+
+   ## variable colours ('cols' not 'col')
+   plot(longleaf, cols=function(x) ifelse(x < 30, "red", "black"))
+
+   ## re-using the same mark scale
+   a <- plot(longleaf)
+   juveniles <- longleaf[marks(longleaf) < 30]
+   plot(juveniles, symap=a)
+
+   ## numerical marks mapped to symbols of fixed size with variable colour
+   ra <- range(marks(longleaf))
+   colmap <- colourmap(terrain.colors(20), range=ra)
+   ## filled plot characters are the codes 21-25
+   ## fill colour is indicated by 'bg'
+   sy <- symbolmap(pch=21, bg=colmap, range=ra)
+   plot(longleaf, symap=sy)
+
+   ## or more compactly..
+   plot(longleaf, bg=terrain.colors(20), pch=21, cex=1)
+
+   ## clipping
+   plot(humberside)
+   B <- owin(c(4810, 5190), c(4180, 4430))
+   plot(B, add=TRUE, border="red")
+   plot(humberside, clipwin=B, main="Humberside (clipped)")
+}
+
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.psp.Rd b/man/plot.psp.Rd
new file mode 100644
index 0000000..7d99073
--- /dev/null
+++ b/man/plot.psp.Rd
@@ -0,0 +1,122 @@
+\name{plot.psp}
+\alias{plot.psp}
+\title{plot a Spatial Line Segment Pattern}
+\description{
+  Plot a two-dimensional line segment pattern
+}
+\usage{
+  \method{plot}{psp}(x, \dots, main, add=FALSE,
+                     show.all=!add, show.window=show.all,
+                     which.marks=1, ribbon=show.all,
+                     ribsep=0.15, ribwid=0.05, ribn=1024,
+                     do.plot=TRUE)
+}
+\arguments{
+  \item{x}{
+    The line segment pattern to be plotted.
+    An object of class \code{"psp"},
+    or data which can be converted into 
+    this format by \code{\link{as.psp}()}.
+  }
+  \item{\dots}{
+    extra arguments that will be passed to the plotting functions
+    \code{\link{segments}} (to plot the segments)
+    and 
+    \code{\link{plot.owin}} (to plot the observation window).
+  }
+  \item{main}{
+    Character string giving a title for the plot.
+  }
+  \item{add}{
+    Logical. If \code{TRUE}, the current plot is not erased;
+    the segments are plotted on top of the
+    current plot, and the window is not plotted (by default).
+  }
+  \item{show.all}{
+    Logical value specifying whether to plot everything
+    including the window, main title, and colour ribbon.
+  }
+  \item{show.window}{
+    Logical value specifying whether to plot the window.
+  }
+  \item{which.marks}{
+    Index determining which column of marks to use,
+    if the marks of \code{x} are a data frame.
+    A character string or an integer.
+    Defaults to \code{1} indicating the first column of marks.
+  }
+  \item{ribbon}{
+    Logical flag indicating whether to display a ribbon
+    showing the colour map (in which mark values are associated with colours).
+  }
+  \item{ribsep}{
+    Factor controlling the space between the ribbon and the image.
+  }
+  \item{ribwid}{
+    Factor controlling the width of the ribbon.
+  }
+  \item{ribn}{
+    Number of different values to display in the ribbon.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plot.
+  }
+}
+\value{
+  (Invisibly) a colour map object specifying the association between
+  marks and colours, if any. The return value also has an attribute
+  \code{"bbox"} giving a bounding box for the plot.
+}
+\details{
+  This is the \code{plot} method for 
+  line segment pattern datasets (of class \code{"psp"},
+  see \code{\link{psp.object}}).
+  It plots both the observation window \code{Window(x)}
+  and the line segments themselves.
+  
+  Plotting of the window \code{Window(x)} is performed by
+  \code{\link{plot.owin}}. This plot may be modified 
+  through the \code{...} arguments. 
+
+  Plotting of the segments themselves is performed
+  by the standard R function \code{\link{segments}}.
+  Its plotting behaviour may also be modified through the \code{...}
+  arguments.
+
+  For a \emph{marked} line segment pattern
+  (i.e. if \code{marks(x)} is not \code{NULL})
+  the line segments are plotted in colours
+  determined by the mark values. 
+  If \code{marks(x)} is a data frame, the default is to use the first
+  column of \code{marks(x)} to determine the colours. To specify another
+  column, use the argument \code{which.marks}.
+  The colour map (associating mark values with colours) will be
+  displayed as a vertical colour ribbon to the right of the plot, if
+  \code{ribbon=TRUE}. 
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{plot}},
+  \code{\link{par}},
+  \code{\link{plot.owin}},
+  \code{\link{symbols}}
+}
+\examples{
+  X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin())
+  plot(X)
+  plot(X, lwd=3)
+  lettuce <- sample(letters[1:4], 20, replace=TRUE)
+  marks(X) <- data.frame(A=1:20, B=factor(lettuce))
+  plot(X)
+  plot(X, which.marks="B")
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+ 
+ \keyword{hplot}
diff --git a/man/plot.quad.Rd b/man/plot.quad.Rd
new file mode 100644
index 0000000..1d8908a
--- /dev/null
+++ b/man/plot.quad.Rd
@@ -0,0 +1,83 @@
+\name{plot.quad}
+\alias{plot.quad}
+\title{Plot a Spatial Quadrature Scheme}
+\description{
+  Plot a two-dimensional spatial quadrature scheme.
+}
+\usage{
+  \method{plot}{quad}(x, ..., main, add=FALSE, dum=list(), tiles=FALSE)
+}
+\arguments{
+  \item{x}{
+    The spatial quadrature scheme to be plotted.
+    An object of class \code{"quad"}.
+  }
+  \item{\dots}{
+    extra arguments controlling the plotting
+    of the data points of the quadrature scheme.
+  }
+  \item{main}{
+    text to be displayed as a title above the plot.
+  }
+  \item{add}{
+    Logical value indicating whether the graphics should be added to the
+    current plot if there is one (\code{add=TRUE})
+    or whether a new plot should be initialised (\code{add=FALSE}, the default).
+  }
+  \item{dum}{
+    list of extra arguments controlling the plotting of the dummy points of the
+    quadrature scheme. See below.
+  }
+  \item{tiles}{
+    Logical value indicating whether to display the tiles used to
+    compute the quadrature weights.
+  }
+}
+\value{
+  \code{NULL}.
+}
+\details{
+  This is the \code{plot} method for 
+  quadrature schemes (objects of class \code{"quad"},
+  see \code{\link{quad.object}}).
+
+  First the data points of the quadrature scheme
+  are plotted (in their observation window) using 
+  \code{\link{plot.ppp}} with any arguments specified in \code{...}
+
+  Then the dummy points of the quadrature scheme are plotted
+  using \code{\link{plot.ppp}} with any arguments specified in
+  \code{dum}.
+
+  By default the dummy points are superimposed onto the plot of data
+  points. This can be overridden by including the argument
+  \code{add=FALSE} in the list \code{dum} as shown in the examples.
+  In this case the data and dummy point patterns are plotted separately.
+
+  See \code{\link[graphics]{par}} and \code{\link{plot.ppp}}
+  for other possible arguments controlling the plots.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{plot.ppp}},
+  \code{\link[graphics]{par}}
+}
+\examples{
+   data(nztrees)
+   Q <- quadscheme(nztrees)
+
+   plot(Q, main="NZ trees: quadrature scheme")
+
+   oldpar <- par(mfrow=c(2,1))
+   plot(Q, main="NZ trees", dum=list(add=FALSE))
+   par(oldpar)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+ 
+ \keyword{hplot}
diff --git a/man/plot.quadratcount.Rd b/man/plot.quadratcount.Rd
new file mode 100644
index 0000000..71d2452
--- /dev/null
+++ b/man/plot.quadratcount.Rd
@@ -0,0 +1,84 @@
+\name{plot.quadratcount}
+\alias{plot.quadratcount}
+\title{
+  Plot Quadrat Counts
+}
+\description{
+  Given a table of quadrat counts for a spatial point pattern,
+  plot the quadrats which were used, and display the quadrat count
+  as text in the centre of each quadrat.
+}
+\usage{
+\method{plot}{quadratcount}(x, \dots, add = FALSE,
+              entries = as.vector(t(as.table(x))),
+              dx = 0, dy = 0, show.tiles = TRUE,
+              textargs = list())
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"quadratcount"}
+    produced by the function \code{\link{quadratcount}}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{plot.tess}}
+    to plot the quadrats.
+  }
+  \item{add}{
+    Logical. Whether to add the graphics to an existing plot.
+  }
+  \item{entries}{
+    Vector of numbers to be plotted in each quadrat.
+    The default is to plot the quadrat counts.
+  }
+  \item{dx,dy}{
+    Horizontal and vertical displacement of text
+    relative to centroid of quadrat.
+  }
+  \item{show.tiles}{
+    Logical value indicating whether to plot the quadrats.
+  }
+  \item{textargs}{
+    List containing extra arguments
+    passed to \code{\link[graphics]{text.default}}
+    to control the annotation.
+  }
+}
+\details{
+  This is the plot method for the objects 
+  of class \code{"quadratcount"} that are
+  produced by the function \code{\link{quadratcount}}.
+  Given a spatial point pattern, \code{\link{quadratcount}}
+  divides the observation window into disjoint tiles or quadrats,
+  counts the number of points in each quadrat, and stores the
+  result as a contingency table which also belongs to the
+  class \code{"quadratcount"}.
+
+  First the quadrats are plotted
+  (provided \code{show.tiles=TRUE}, the default).
+  This display can be controlled by passing additional arguments \code{\dots}
+  to \code{\link{plot.tess}}.
+  
+  Then the quadrat counts are printed using
+  \code{\link[graphics]{text.default}}. This display can be controlled
+  using the arguments \code{dx,dy} and \code{textargs}.
+}
+\value{
+  Null.
+}
+\seealso{
+  \code{\link{quadratcount}},
+  \code{\link{plot.tess}},
+  \code{\link[graphics]{text.default}},
+  \code{\link{plot.quadrattest}}.
+}
+\examples{
+   plot(quadratcount(swedishpines, 5))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.quadrattest.Rd b/man/plot.quadrattest.Rd
new file mode 100644
index 0000000..e9efd73
--- /dev/null
+++ b/man/plot.quadrattest.Rd
@@ -0,0 +1,62 @@
+\name{plot.quadrattest}
+\alias{plot.quadrattest}
+\title{
+  Display the result of a quadrat counting test.
+}
+\description{
+  Given the result of a quadrat counting test,
+  graphically display the quadrats that were used, the 
+  observed and expected counts, and the residual in each quadrat.
+}
+\usage{
+ \method{plot}{quadrattest}(x, ..., textargs=list())
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"quadrattest"} containing the result
+    of \code{\link{quadrat.test}}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{plot.tess}} to
+    control the display of the quadrats.
+  }
+  \item{textargs}{
+    List of additional arguments passed to
+    \code{\link[graphics]{text.default}}
+    to control the appearance of the text.
+  }
+}
+\details{
+  This is the plot method for objects
+  of class \code{"quadrattest"}. Such an object is produced by
+  \code{\link{quadrat.test}} and represents the result of
+  a \eqn{\chi^2}{chi^2} test for a spatial point pattern.
+
+  The quadrats are first plotted using \code{\link{plot.tess}}.
+  Then in each quadrat, the observed and expected counts
+  and the Pearson residual are displayed as text using
+  \code{\link[graphics]{text.default}}.
+  Observed count is displayed at top left; expected count at top right;
+  and Pearson residual at bottom. 
+}
+\value{
+  Null.
+}
+\examples{
+   plot(quadrat.test(swedishpines, 3))
+}
+\seealso{
+  \code{\link{quadrat.test}},
+  \code{\link{plot.tess}},
+  \code{\link[graphics]{text.default}},
+  \code{\link{plot.quadratcount}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
diff --git a/man/plot.rppm.Rd b/man/plot.rppm.Rd
new file mode 100644
index 0000000..85fa5bb
--- /dev/null
+++ b/man/plot.rppm.Rd
@@ -0,0 +1,74 @@
+\name{plot.rppm}
+\alias{plot.rppm}
+\title{
+  Plot a Recursively Partitioned Point Process Model
+}
+\description{
+  Given a model which has been fitted to point pattern data
+  by recursive partitioning, plot the partition tree
+  or the fitted intensity.
+}
+\usage{
+\method{plot}{rppm}(x, \dots, what = c("tree", "spatial"), treeplot=NULL)
+}
+\arguments{
+  \item{x}{
+    Fitted point process model of class \code{"rppm"}
+    produced by the function \code{\link{rppm}}.
+  }
+  \item{what}{
+    Character string (partially matched) specifying whether to plot the
+    partition tree or the fitted intensity.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[rpart]{plot.rpart}}
+    and \code{\link[rpart]{text.rpart}} (if \code{what="tree"})
+    or passed to \code{\link{plot.im}} (if \code{what="spatial"})
+    controlling the appearance of the plot.
+  }
+  \item{treeplot}{
+    Optional. A function to be used to plot and label the partition tree,
+    replacing the two functions  \code{\link[rpart]{plot.rpart}}
+    and \code{\link[rpart]{text.rpart}}.
+  }
+}
+\details{
+  If \code{what="tree"} (the default), the partition tree will be plotted
+  using \code{\link[rpart]{plot.rpart}}, and labelled using
+  \code{\link[rpart]{text.rpart}}.
+
+  If the argument \code{treeplot} is
+  given, then plotting and labelling will be performed by
+  \code{treeplot} instead. A good choice is the function
+  \code{prp} in package \pkg{rpart.plot}.
+
+  If \code{what="spatial"}, the predicted intensity
+  will be computed using \code{\link{predict.rppm}}, and
+  this intensity will be plotted as an image using \code{\link{plot.im}}.
+}
+\value{
+  If \code{what="tree"}, a list containing \code{x} and \code{y}
+  coordinates of the plotted nodes of the tree.
+  If \code{what="spatial"}, the return value of \code{\link{plot.im}}.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{rppm}}
+}
+\examples{
+    # Murchison gold data
+    mur <- solapply(murchison, rescale, s=1000, unitname="km")
+    mur$dfault <- distfun(mur$faults)
+    # 
+    fit <- rppm(gold ~ dfault + greenstone, data=mur)
+    #
+    opa <- par(mfrow=c(1,2))
+    plot(fit)
+    plot(fit, what="spatial")
+    par(opa)
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{models}
diff --git a/man/plot.scan.test.Rd b/man/plot.scan.test.Rd
new file mode 100644
index 0000000..f262ac2
--- /dev/null
+++ b/man/plot.scan.test.Rd
@@ -0,0 +1,85 @@
+\name{plot.scan.test}
+\alias{plot.scan.test}
+\alias{as.im.scan.test}
+\title{
+  Plot Result of Scan Test
+}
+\description{
+  Computes or plots an image showing the
+  likelihood ratio test statistic for the scan test,
+  or the optimal circle radius. 
+}
+\usage{
+ \method{plot}{scan.test}(x, \dots, what=c("statistic", "radius"),
+               do.window = TRUE)
+
+ \method{as.im}{scan.test}(X, \dots, what=c("statistic", "radius"))
+}
+\arguments{
+  \item{x,X}{
+    Result of a scan test. An object of class \code{"scan.test"}
+    produced by \code{\link{scan.test}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.im}} to control the appearance
+    of the plot.
+  }
+  \item{what}{
+    Character string indicating whether to produce an image of the
+    (profile) likelihood ratio test statistic (\code{what="statistic"},
+    the default) or an image of the optimal value of circle radius
+    (\code{what="radius"}).
+  }
+  \item{do.window}{
+    Logical value indicating whether to plot the original window
+    of the data as well.
+  }
+}
+\details{
+  These functions extract, and plot, the spatially-varying value
+  of the likelihood ratio test statistic which forms the basis of
+  the scan test.
+
+  If the test result \code{X} was based on circles of
+  the same radius \code{r}, then \code{as.im(X)} is a pixel image
+  of the likelihood ratio test statistic as a function of the
+  position of the centre of the circle.
+
+  If the test result \code{X} was based on circles of
+  several different radii \code{r}, then \code{as.im(X)} is a pixel image
+  of the profile (maximum value over all radii \code{r})
+  likelihood ratio test statistic as a function of the
+  position of the centre of the circle, and
+  \code{as.im(X, what="radius")} is a pixel image giving
+  for each location \eqn{u} the value of \code{r} which maximised
+  the likelihood ratio test statistic at that location.
+
+  The \code{plot} method plots the corresponding image.
+}
+\value{
+  The value of \code{as.im.scan.test} is a pixel image (object of
+  class \code{"im"}). The value of \code{plot.scan.test} is \code{NULL}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{scan.test}},
+  \code{\link{scanLRTS}}
+}
+\examples{
+   if(interactive()) {
+     a <- scan.test(redwood, seq(0.04, 0.1, by=0.01),
+                    method="poisson", nsim=19)
+   } else {
+     a <- scan.test(redwood, c(0.05, 0.1), method="poisson", nsim=2)
+   }
+   plot(a)
+   as.im(a)
+   plot(a, what="radius")
+}
+\keyword{htest}
+\keyword{spatial}
diff --git a/man/plot.slrm.Rd b/man/plot.slrm.Rd
new file mode 100644
index 0000000..ba5a3d4
--- /dev/null
+++ b/man/plot.slrm.Rd
@@ -0,0 +1,60 @@
+\name{plot.slrm}
+\Rdversion{1.1}
+\alias{plot.slrm}
+\title{
+  Plot a Fitted Spatial Logistic Regression
+}
+\description{
+  Plots a fitted Spatial Logistic Regression model.
+}
+\usage{
+  \method{plot}{slrm}(x, ..., type = "intensity")
+}
+
+\arguments{
+  \item{x}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{plot.im}} to
+    control the appearance of the plot.
+  }
+  \item{type}{
+    Character string (partially) matching one of 
+    \code{"probabilities"}, \code{"intensity"} or \code{"link"}.
+  }
+}
+\details{
+  This is a method for \code{\link{plot}} for fitted spatial logistic
+  regression models (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}).
+
+  This function plots the result of \code{\link{predict.slrm}}.
+}
+\value{
+  None.
+}
+\seealso{
+  \code{\link{slrm}}, 
+  \code{\link{predict.slrm}}, 
+  \code{\link{plot.im}}
+}
+\examples{
+   data(copper)
+   X <- copper$SouthPoints
+   Y <- copper$SouthLines
+   Z <- distmap(Y)
+   fit <- slrm(X ~ Z)
+   plot(fit)
+   plot(fit, type="link")
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{models}
diff --git a/man/plot.solist.Rd b/man/plot.solist.Rd
new file mode 100644
index 0000000..830d319
--- /dev/null
+++ b/man/plot.solist.Rd
@@ -0,0 +1,225 @@
+\name{plot.solist}
+\alias{plot.solist}
+\title{Plot a List of Spatial Objects}
+\description{
+  Plots a list of two-dimensional spatial objects.
+}
+\usage{
+  \method{plot}{solist}(x, \dots, main, arrange=TRUE,
+   nrows=NULL, ncols=NULL, main.panel=NULL,
+   mar.panel=c(2,1,1,2), hsep=0, vsep=0,
+   panel.begin=NULL, panel.end=NULL, panel.args=NULL,
+   panel.begin.args=NULL, panel.end.args=NULL,
+   plotcommand="plot",
+   adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL,
+   adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE)
+}
+\arguments{
+  \item{x}{
+    An object of the class \code{"solist"},
+    essentially a list of two-dimensional spatial datasets.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot}} when generating each
+    plot panel.
+  }
+  \item{main}{
+    Overall heading for the plot.
+  }
+  \item{arrange}{
+    Logical flag indicating whether to plot the objects
+    side-by-side on a single page (\code{arrange=TRUE})
+    or plot them individually in a succession of frames
+    (\code{arrange=FALSE}).
+  }
+  \item{nrows,ncols}{
+    Optional. The number of rows/columns in the plot layout
+    (assuming \code{arrange=TRUE}).
+    You can specify either or both of these numbers.
+  }
+  \item{main.panel}{
+    Optional. A character string, or a vector of character strings,
+    giving the headings for each of the objects.
+  }
+  \item{mar.panel}{
+    Size of the margins outside each plot panel.
+    A numeric vector of length 4 giving the bottom, left, top,
+    and right margins in that order. (Alternatively the vector
+    may have length 1 or 2 and will be replicated to length 4).
+    See the section on \emph{Spacing between plots}.
+  }
+  \item{hsep,vsep}{
+    Additional horizontal and vertical separation between plot panels,
+    expressed in the same units as \code{mar.panel}. 
+  }
+  \item{panel.begin,panel.end}{
+    Optional. Functions that will be executed before and after each panel is
+    plotted. See Details.
+  }
+  \item{panel.args}{
+    Optional. Function that determines different plot arguments
+    for different panels. See Details.
+  }
+  \item{panel.begin.args}{
+    Optional. List of additional arguments for \code{panel.begin}
+    when it is a function.
+  }
+  \item{panel.end.args}{
+    Optional. List of additional arguments for \code{panel.end}
+    when it is a function.
+  }
+  \item{plotcommand}{
+    Optional.
+    Character string containing the name of the command that should be
+    executed to plot each panel. 
+  }
+  \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{
+    Optional. Functions (with no arguments) that will be executed to
+    generate additional plots at the margins (left, right, top and/or
+    bottom, respectively) of the array of plots.
+  }
+  \item{adorn.size}{
+    Relative width (as a fraction of the other panels' widths)
+    of the margin plots.
+  }
+  \item{equal.scales}{
+    Logical value indicating whether the components
+    should be plotted at (approximately) the same physical scale. 
+  }
+  \item{halign,valign}{
+    Logical values indicating whether panels in a column
+    should be aligned to the same \eqn{x} coordinate system
+    (\code{halign=TRUE}) and whether panels in a row should
+    be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}).
+    These are applicable only if \code{equal.scales=TRUE}.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  This is the \code{plot} method for the class \code{"solist"}.
+
+  An object of class \code{"solist"} represents a 
+  list of two-dimensional spatial datasets.
+  This is the \code{plot} method for such objects.
+
+  In the \pkg{spatstat} package, various functions produce
+  an object of class \code{"solist"}.
+  These objects can be plotted in a nice arrangement
+  using \code{plot.solist}. See the Examples.
+
+  The argument \code{panel.args} determines extra graphics parameters
+  for each panel. It should be a function that will be called
+  as \code{panel.args(i)} where \code{i} is the panel number.
+  Its return value should be a list of graphics parameters that can
+  be passed to the relevant \code{plot} method. These parameters
+  override any parameters specified in the \code{\dots} arguments.
+
+  The arguments \code{panel.begin} and \code{panel.end}
+  determine graphics that will be plotted before and after
+  each panel is plotted. They may be objects
+  of some class that can be plotted
+  with the generic \code{plot} command. Alternatively they
+  may be functions that will be
+  called as \code{panel.begin(i, y, main=main.panel[i])}
+  and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel
+  number and \code{y = x[[i]]}.
+
+  If all entries of \code{x} are pixel images,
+  the function \code{\link{image.listof}} is called to control
+  the plotting. The arguments \code{equal.ribbon} and \code{col}
+  can be used to determine the colour map or maps applied.
+
+  If \code{equal.scales=FALSE} (the default), then the 
+  plot panels will have equal height on the plot device
+  (unless there is only one column of panels, in which case
+  they will have equal width on the plot device). This means that the
+  objects are plotted at different physical scales, by default.
+
+  If \code{equal.scales=TRUE}, then the dimensions of the
+  plot panels on the plot device will be proportional
+  to the spatial dimensions of the
+  corresponding components of \code{x}. This means that the
+  objects will be plotted at \emph{approximately} equal physical scales.
+  If these objects have very different spatial sizes,
+  the plot command could fail (when it tries
+  to plot the smaller objects at a tiny scale), with an error
+  message that the figure margins are too large.
+
+  The objects will be plotted at \emph{exactly} equal
+  physical scales, and \emph{exactly} aligned on the device,
+  under the following conditions:
+  \itemize{
+    \item
+    every component of \code{x} is a spatial object
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{panel.begin} and \code{panel.end} are either
+    \code{NULL} or they are spatial objects 
+    whose position can be shifted by \code{\link{shift}};
+    \item
+    \code{adorn.left}, 
+    \code{adorn.right}, 
+    \code{adorn.top} and 
+    \code{adorn.bottom} are all \code{NULL}.
+  }
+
+  Another special case is when every component of \code{x} is an
+  object of class \code{"fv"} representing a function.
+  If \code{equal.scales=TRUE} then all these functions will 
+  be plotted with the same axis scales
+  (i.e. with the same \code{xlim} and the same \code{ylim}).
+}
+\section{Spacing between plots}{
+  The spacing between individual plots is controlled by the parameters
+  \code{mar.panel}, \code{hsep} and \code{vsep}.
+
+  If \code{equal.scales=FALSE}, the plot panels are
+  logically separate plots. The margins for each panel are
+  determined by the argument \code{mar.panel} which becomes 
+  the graphics parameter \code{mar}
+  described in the help file for \code{\link{par}}.
+  One unit of \code{mar} corresponds to one line of text in the margin.
+  If \code{hsep} or \code{vsep} are present, \code{mar.panel}
+  is augmented by \code{c(vsep, hsep, vsep, hsep)/2}.
+  
+  If \code{equal.scales=TRUE}, all the plot panels are drawn
+  in the same coordinate system which represents a physical scale.
+  The unit of measurement for \code{mar.panel[1,3]}
+  is one-sixth of the greatest height of any object plotted in the same row
+  of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the
+  greatest width of any object plotted in the same column of panels.
+  If \code{hsep} or \code{vsep} are present,
+  they are interpreted in the same units as \code{mar.panel[2]}
+  and \code{mar.panel[1]} respectively.
+}
+\seealso{
+  \code{\link{plot.anylist}},
+  \code{\link{contour.listof}},
+  \code{\link{image.listof}},
+  \code{\link{density.splitppp}}
+}
+\section{Error messages}{
+  If the error message \sQuote{Figure margins too large}
+  occurs, this generally means that one of the
+  objects had a much smaller physical scale than the others.
+  Ensure that \code{equal.scales=FALSE}
+  and increase the values of \code{mar.panel}.
+}
+\examples{
+# Intensity estimate of multitype point pattern
+ plot(D <- density(split(amacrine)))
+ plot(D, main="", equal.ribbon=TRUE,
+      panel.end=function(i,y,...){contour(y, ...)})
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.splitppp.Rd b/man/plot.splitppp.Rd
new file mode 100644
index 0000000..eaf3b20
--- /dev/null
+++ b/man/plot.splitppp.Rd
@@ -0,0 +1,64 @@
+\name{plot.splitppp}
+\alias{plot.splitppp}
+\title{Plot a List of Point Patterns}
+\description{
+  Plots a list of point patterns.
+}
+\usage{
+  \method{plot}{splitppp}(x, \dots, main)
+}
+\arguments{
+  \item{x}{
+    A named list of point patterns,
+    typically obtained from \code{\link{split.ppp}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.listof}} which control the
+    layout of the plot panels, their appearance, and the
+    plot behaviour in individual plot panels.
+  }
+  \item{main}{
+    Optional main title for the plot.
+  }
+}
+\value{
+  Null.
+}
+\details{
+  This is the \code{plot} method for the class \code{"splitppp"}.
+  It is typically used to plot the result of the function
+  \code{\link{split.ppp}}.
+
+  The argument \code{x} should be a named list of point patterns
+  (objects of class \code{"ppp"}, see \code{\link{ppp.object}}).
+  Each of these point patterns will be plotted in turn
+  using \code{\link{plot.ppp}}.
+
+  Plotting is performed by \code{\link{plot.listof}}.
+}
+\seealso{
+  \code{\link{plot.listof}} for arguments controlling the plot.
+  
+  \code{\link{split.ppp}},
+  \code{\link{plot.ppp}},
+  \code{\link{ppp.object}}.
+}
+\section{Error messages}{
+  If the error message \sQuote{Figure margins too large}
+  occurs, ensure that \code{equal.scales=FALSE}
+  and increase the values of \code{mar.panel}.
+}
+\examples{
+# Multitype point pattern
+ plot(split(amacrine))
+ plot(split(amacrine), main="", 
+     panel.begin=function(i, y, ...) { plot(density(y), ribbon=FALSE, ...) })
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.ssf.Rd b/man/plot.ssf.Rd
new file mode 100644
index 0000000..37dbfb3
--- /dev/null
+++ b/man/plot.ssf.Rd
@@ -0,0 +1,102 @@
+\name{plot.ssf}
+\alias{plot.ssf}
+\alias{image.ssf}
+\alias{contour.ssf}
+\title{
+  Plot a Spatially Sampled Function
+}
+\description{
+  Plot a spatially sampled function object.
+}
+\usage{
+\method{plot}{ssf}(x, \dots,
+                   how = c("smoothed", "nearest", "points"),
+                   style = c("image", "contour", "imagecontour"),
+                   sigma = NULL, contourargs=list())
+
+\method{image}{ssf}(x, \dots)
+
+\method{contour}{ssf}(x, ..., main, sigma = NULL)
+}
+\arguments{
+  \item{x}{
+    Spatially sampled function (object of class \code{"ssf"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[graphics]{image.default}}
+    or 
+    \code{\link[spatstat]{plot.ppp}} to control the plot.
+  }
+  \item{how}{
+    Character string determining whether to display the
+    function values at the data points (\code{how="points"}),
+    a smoothed interpolation of the function
+    (\code{how="smoothed"}), or the function value at the
+    nearest data point (\code{how="nearest"}).
+  }
+  \item{style}{
+    Character string indicating whether to plot the smoothed function as
+    a colour image, a contour map, or both.
+  }
+  \item{contourargs}{
+    Arguments passed to \code{\link[graphics]{contour.default}}
+    to control the contours, if \code{style="contour"} or
+    \code{style="imagecontour"}.
+  }
+  \item{sigma}{
+    Smoothing bandwidth for smooth interpolation.
+  }
+  \item{main}{
+    Optional main title for the plot.
+  }
+}
+\details{
+  These are methods for the generic
+  \code{\link[graphics]{plot}},
+  \code{\link[graphics]{image}} and
+  \code{\link[graphics]{contour}}
+  for the class \code{"ssf"}.
+  
+  An object of class \code{"ssf"} represents a
+  function (real- or vector-valued) that has been
+  sampled at a finite set of points.
+
+  For \code{plot.ssf} there are three types of display.
+  If \code{how="points"} the exact function values
+  will be displayed as circles centred at the locations where they
+  were computed. If \code{how="smoothed"} (the default) these
+  values will be kernel-smoothed using \code{\link{smooth.ppp}}
+  and displayed as a pixel image.
+  If \code{how="nearest"} the values will be interpolated
+  by nearest neighbour interpolation using \code{\link{nnmark}}
+  and displayed as a pixel image.
+
+  For \code{image.ssf} and \code{contour.ssf} the values are
+  kernel-smoothed before being displayed.
+}
+\value{
+  \code{NULL}.
+}
+\references{
+  Baddeley, A. (2016)
+  Local composite likelihood for spatial point processes.
+  \emph{Spatial Statistics}, in press.
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  Chapman and Hall/CRC Press.
+}
+\author{
+  \adrian.
+}
+\seealso{
+\code{\link{ssf}}
+}
+\examples{
+  a <- ssf(cells, nndist(cells, k=1:3))
+  plot(a, how="points")
+  plot(a, how="smoothed")
+  plot(a, how="nearest")
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.symbolmap.Rd b/man/plot.symbolmap.Rd
new file mode 100644
index 0000000..79c5606
--- /dev/null
+++ b/man/plot.symbolmap.Rd
@@ -0,0 +1,85 @@
+\name{plot.symbolmap}
+\alias{plot.symbolmap}
+\title{
+  Plot a Graphics Symbol Map
+}
+\description{
+  Plot a representation of a graphics symbol map,
+  similar to a plot legend.
+}
+\usage{
+\method{plot}{symbolmap}(x, \dots, main, xlim = NULL, ylim = NULL,
+                           vertical = FALSE,
+                           side = c("bottom", "left", "top", "right"),
+                           annotate = TRUE, labelmap = NULL, add = FALSE)
+} 
+\arguments{
+  \item{x}{
+    Graphics symbol map (object of class \code{"symbolmap"}).
+  }
+  \item{\dots}{
+    Additional graphics arguments passed to
+    \code{\link{points}}, \code{\link{symbols}} or \code{\link{axis}}.
+  }
+  \item{main}{
+    Main title for the plot. A character string.
+  }
+  \item{xlim,ylim}{
+    Coordinate limits for the plot.
+    Numeric vectors of length 2.
+  }
+  \item{vertical}{
+    Logical. Whether to plot the symbol map in a vertical orientation.
+  }
+  \item{side}{
+    Character string specifying the position of the text
+    that annotates the symbols.
+  }
+  \item{annotate}{
+    Logical. Whether to annotate the symbols with labels.
+  }
+  \item{labelmap}{
+    Transformation of the labels.
+    A function or a scale factor which will be applied to
+    the data values corresponding to the plotted symbols.
+  }
+  \item{add}{
+    Logical value indicating whether to add the plot to the
+    current plot (\code{add=TRUE}) or to initialise a new plot.
+  }
+}
+\details{
+  A graphics symbol map is
+  an association between data values and graphical symbols. 
+
+  This command plots the graphics symbol map itself,
+  in the style of a plot legend.
+}
+\value{
+  None.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{symbolmap}} to create a symbol map.
+  
+  \code{\link{invoke.symbolmap}} to apply the symbol map to some data
+  and plot the resulting symbols.
+}
+\examples{
+  g <- symbolmap(inputs=letters[1:10], pch=11:20)
+  plot(g)
+
+  g2 <- symbolmap(range=c(-1,1),
+                    shape=function(x) ifelse(x > 0, "circles", "squares"),
+                    size=function(x) sqrt(ifelse(x > 0, x/pi, -x)),
+                    bg = function(x) ifelse(abs(x) < 1, "red", "black"))
+  plot(g2, vertical=TRUE, side="left", col.axis="blue", cex.axis=2)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.tess.Rd b/man/plot.tess.Rd
new file mode 100644
index 0000000..be2f935
--- /dev/null
+++ b/man/plot.tess.Rd
@@ -0,0 +1,75 @@
+\name{plot.tess}
+\alias{plot.tess}
+\title{Plot a tessellation}
+\description{
+  Plots a tessellation.
+}
+\usage{
+  \method{plot}{tess}(x, ..., main, add=FALSE,
+                      show.all=!add, col=NULL, do.plot=TRUE,
+                      do.labels=FALSE,
+                      labels=tilenames(x), labelargs=list())
+}
+\arguments{
+  \item{x}{Tessellation (object of class \code{"tess"}) to be plotted.}
+  \item{\dots}{Arguments controlling the appearance of the plot.}
+  \item{main}{Heading for the plot. A character string.}
+  \item{add}{Logical. Determines whether the tessellation plot is added
+    to the existing plot.
+  }
+  \item{show.all}{
+    Logical value indicating whether to plot everything
+    including the main title and the observation window of \code{x}.
+  }
+  \item{col}{
+    Colour of the tile boundaries. A character string.
+    Ignored for pixel tessellations.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually perform the plot.
+  }
+  \item{do.labels}{
+    Logical value indicating whether to show a text label for each tile
+    of the tessellation.
+  }
+  \item{labels}{Character vector of labels for the tiles.}
+  \item{labelargs}{
+    List of arguments passed to
+    \code{\link{text.default}} to control display of the text labels.
+  }
+}
+\details{
+  This is a method for the generic \code{\link{plot}} function
+  for the class \code{"tess"} of tessellations (see \code{\link{tess}}).
+
+  The arguments \code{\dots} control the appearance of the plot.
+  They are passed to \code{\link{segments}},
+  \code{\link{plot.owin}} or \code{\link{plot.im}}, depending on the
+  type of tessellation. 
+}
+\value{
+  (Invisible) window of class \code{"owin"} specifying a bounding box
+  for the plot (including a colour ribbon if plotted).
+}
+\seealso{
+  \code{\link{tess}}
+}
+\examples{
+  A <- tess(xgrid=0:4,ygrid=0:4)
+  plot(A, col="blue", lwd=2, lty=2)
+  B <- A[c(1, 2, 5, 7, 9)]
+  plot(B, hatch=TRUE)
+  v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin())
+  levels(v) <- letters[seq(length(levels(v)))]
+  E <- tess(image=v)
+  plot(E)
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.textstring.Rd b/man/plot.textstring.Rd
new file mode 100644
index 0000000..f263be2
--- /dev/null
+++ b/man/plot.textstring.Rd
@@ -0,0 +1,49 @@
+\name{plot.textstring}
+\alias{plot.textstring}
+\title{Plot a Text String}
+\description{Plots an object of class \code{"textstring"}.}
+\usage{
+\method{plot}{textstring}(x, \dots, do.plot = TRUE)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"textstring"} to be plotted.
+    This object is created by the command \code{\link{textstring}}.
+  }
+  \item{\dots}{
+    Additional graphics arguments passed to
+    \code{\link[graphics]{text}} to control the plotting of text.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to actually plot the text.
+  }
+}
+\details{
+  The argument \code{x} should be an object of class \code{"textstring"} 
+  created by the command \code{\link{textstring}}. 
+
+  This function displays the text using 
+  \code{\link[graphics]{text}}.
+}
+\value{
+  A window (class \code{"owin"}) enclosing the plotted graphics.
+}
+\examples{
+  W <- Window(humberside)
+  te <- textstring(centroid.owin(W), txt="Humberside", cex=2.5)
+  plot(layered(W, te), main="")
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{onearrow}},
+  \code{\link{yardstick}}
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.texturemap.Rd b/man/plot.texturemap.Rd
new file mode 100644
index 0000000..b959309
--- /dev/null
+++ b/man/plot.texturemap.Rd
@@ -0,0 +1,97 @@
+\name{plot.texturemap}
+\alias{plot.texturemap}
+\title{
+  Plot a Texture Map
+}
+\description{
+  Plot a representation of a texture map,
+  similar to a plot legend.
+}
+\usage{
+ \method{plot}{texturemap}(x, \dots, main, xlim = NULL, ylim = NULL,
+                           vertical = FALSE, axis = TRUE,
+                           labelmap = NULL, gap = 0.25,
+                           spacing = NULL, add = FALSE)
+}
+\arguments{
+  \item{x}{
+    Texture map object (class \code{"texturemap"}).
+  }
+  \item{\dots}{
+    Additional graphics arguments passed to
+    \code{\link{add.texture}} or \code{\link{axis.default}}.
+  }
+  \item{main}{
+    Main title for plot.
+  }
+  \item{xlim,ylim}{
+    Optional vectors of length 2 giving the \eqn{x} and \eqn{y} limits
+    of the plot.
+  }
+  \item{vertical}{
+    Logical value indicating whether to arrange the texture boxes
+    in a vertical column (\code{vertical=TRUE} or a horizontal row
+    (\code{vertical=FALSE}, the default).
+  }
+  \item{axis}{
+    Logical value indicating whether to plot an axis line
+    joining the texture boxes.
+  }
+  \item{labelmap}{
+    Optional. A \code{function} which will be applied to the
+    data values (the inputs of the texture map) before they are
+    displayed on the plot.
+  }
+  \item{gap}{
+    Separation between texture boxes, as a fraction of the
+    width or height of a box.
+  }
+  \item{spacing}{
+    Argument passed to \code{\link{add.texture}} controlling the density
+    of lines in a texture. Expressed in spatial coordinate units.
+  }
+  \item{add}{
+    Logical value indicating whether to add the graphics to an existing
+    plot (\code{add=TRUE}) or to initialise a new plot
+    (\code{add=FALSE}, the default).
+  }
+}
+\details{
+  A texture map is an association between data values and graphical
+  textures. An object of class \code{"texturemap"} represents a texture
+  map. Such objects are returned from the plotting function
+  \code{\link{textureplot}}, and can be created directly by the function
+  \code{\link{texturemap}}.
+
+  This function \code{plot.texturemap} is a method for the generic
+  \code{\link{plot}} for the class \code{"texturemap"}. It displays
+  a sample of each of the textures in the texture map, in a separate
+  box, annotated by the data value which is mapped to that texture.
+
+  The arrangement and position of the boxes is controlled by
+  the arguments \code{vertical}, \code{xlim}, \code{ylim} and
+  \code{gap}.
+}
+\value{
+  Null.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+}
+\seealso{
+  \code{\link{texturemap}},
+  \code{\link{textureplot}},
+  \code{\link{add.texture}}.
+}
+\examples{
+   tm <- texturemap(c("First", "Second", "Third"), 2:4, col=2:4)
+   plot(tm, vertical=FALSE)
+   ## abbreviate the labels
+   plot(tm, labelmap=function(x) substr(x, 1, 2))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/plot.yardstick.Rd b/man/plot.yardstick.Rd
new file mode 100644
index 0000000..0a30bb9
--- /dev/null
+++ b/man/plot.yardstick.Rd
@@ -0,0 +1,98 @@
+\name{plot.yardstick}
+\alias{plot.yardstick}
+\title{
+  Plot a Yardstick or Scale Bar
+}
+\description{
+  Plots an object of class \code{"yardstick"}.
+}
+\usage{
+ \method{plot}{yardstick}(x, \dots,
+        angle = 20, frac = 1/8, 
+        split = FALSE, shrink = 1/4,
+        pos = NULL,
+        txt.args=list(),
+        txt.shift=c(0,0),
+        do.plot = TRUE)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"yardstick"} to be plotted.
+    This object is created by the command \code{\link{yardstick}}.
+  }
+  \item{\dots}{
+    Additional graphics arguments passed to
+    \code{\link[graphics]{segments}} to control the appearance of the line.
+  }
+  \item{angle}{
+    Angle between the arrows and the line segment, in degrees. 
+  }
+  \item{frac}{
+    Length of arrow as a fraction of total length of the line segment.
+  }
+  \item{split}{
+    Logical. If \code{TRUE}, then the line will be broken in the
+    middle, and the text will be placed in this gap. If \code{FALSE},
+    the line will be unbroken, and the text will be placed beside
+    the line.
+  }
+  \item{shrink}{
+    Fraction of total length to be removed from the middle of the
+    line segment, if \code{split=TRUE}.
+  }
+  \item{pos}{
+    Integer (passed to \code{\link[graphics]{text}}) determining the
+    position of the annotation text relative to the line segment,
+    if \code{split=FALSE}. Values of 1, 2, 3 and 4
+    indicate positions below, to the left of,
+    above and to the right of the line, respectively.
+  }
+  \item{txt.args}{
+    Optional list of additional arguments passed to
+    \code{\link[graphics]{text}} controlling the appearance of the text.
+    Examples include \code{adj}, \code{srt}, \code{col}, \code{cex},
+    \code{font}.
+  }
+  \item{txt.shift}{
+    Optional numeric vector of length 2 specifying displacement
+    of the text position relative to the centre of the yardstick.
+  }
+  \item{do.plot}{
+    Logical. Whether to actually perform the plot (\code{do.plot=TRUE}).
+  }
+}
+\details{
+  A yardstick or scale bar is a line segment, drawn on any spatial
+  graphics display, indicating the scale of the plot. 
+
+  The argument \code{x} should be an object of class \code{"yardstick"}
+  created by the command \code{\link{yardstick}}. 
+}
+\value{
+  A window (class \code{"owin"}) enclosing the plotted graphics.
+}
+\examples{
+  plot(owin(), main="Yardsticks")
+  ys <- yardstick(as.psp(list(xmid=0.5, ymid=0.1, length=0.4, angle=0),
+                         window=owin(c(0.2, 0.8), c(0, 0.2))),
+                  txt="1 km")
+  plot(ys)
+  ys <- shift(ys, c(0, 0.3))
+  plot(ys, angle=90, frac=0.08)
+  ys <- shift(ys, c(0, 0.3))
+  plot(ys, split=TRUE)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{yardstick}}
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/points.lpp.Rd b/man/points.lpp.Rd
new file mode 100644
index 0000000..4a40279
--- /dev/null
+++ b/man/points.lpp.Rd
@@ -0,0 +1,63 @@
+\name{points.lpp}
+\alias{points.lpp}
+\title{
+  Draw Points on Existing Plot
+}
+\description{
+  For a point pattern on a linear network, this function draws the
+  coordinates of the points only, on the existing plot display.
+}
+\usage{
+\method{points}{lpp}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A point pattern on a linear network (object of class \code{"lpp"}).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link[graphics]{points.default}}.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link[graphics]{points}}
+  for the class \code{"lpp"} of point patterns on a linear network.
+
+  If \code{x} is a point pattern on a linear network, then
+  \code{points(x)} plots the spatial coordinates of the points only,
+  on the existing plot display, 
+  without plotting the underlying network.
+  It is an error to call this function if a plot has not yet been
+  initialised.
+  
+  The spatial coordinates are extracted and passed to
+  \code{\link[graphics]{points.default}} along with any extra arguments.
+  Arguments controlling the colours and the plot symbols are interpreted
+  by \code{\link[graphics]{points.default}}. For example, if the
+  argument \code{col} is a vector, then the \code{i}th point is drawn
+  in the colour \code{col[i]}.
+}
+\section{Difference from plot method}{
+  The more usual way to plot the points is using \code{\link{plot.lpp}}.
+  For example \code{plot(x)} would plot both the points and the
+  underlying network, while \code{plot(x, add=TRUE)} would plot only the
+  points. The interpretation of arguments controlling the colours and
+  plot symbols is different here: they determine a symbol map, as explained
+  in the help for \code{\link{plot.ppp}}.
+}
+\value{
+  Null.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{plot.lpp}}, \code{\link[graphics]{points.default}}
+}
+\examples{
+ plot(Frame(spiders), main="Spiders on a Brick Wall")
+ points(spiders)
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/pointsOnLines.Rd b/man/pointsOnLines.Rd
new file mode 100644
index 0000000..eb1cafe
--- /dev/null
+++ b/man/pointsOnLines.Rd
@@ -0,0 +1,58 @@
+\name{pointsOnLines}
+\alias{pointsOnLines}
+\title{Place Points Evenly Along Specified Lines}
+\description{
+  Given a line segment pattern, place a series of points
+  at equal distances along each line segment.
+}
+\usage{
+pointsOnLines(X, eps = NULL, np = 1000, shortok=TRUE)
+}
+\arguments{
+  \item{X}{A line segment pattern (object of class \code{"psp"}).}
+  \item{eps}{Spacing between successive points.}
+  \item{np}{Approximate total number of points (incompatible with
+    \code{eps}).}
+  \item{shortok}{
+    Logical. If \code{FALSE}, very short segments
+    (of length shorter than \code{eps}) will not generate any points.
+    If \code{TRUE}, a very short segment will be represented by its
+    midpoint.
+  }
+}
+\details{
+  For each line segment in the pattern \code{X}, a succession of points
+  is placed along the line segment. These points are equally spaced at
+  a distance \code{eps}, except for the first and last points
+  in the sequence. 
+
+  The spacing \code{eps} is measured in coordinate units of \code{X}.
+  
+  If \code{eps} is not given, then it is determined by
+  \code{eps = len/np} where \code{len} is the total length of the
+  segments in \code{X}. The actual number of points will then be slightly
+  larger than \code{np}.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}) in the same window
+  as \code{X}.
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{ppp}},
+  \code{\link{runifpointOnLines}}
+}
+\examples{
+  X <- psp(runif(20), runif(20), runif(20), runif(20),  window=owin())
+  Y <- pointsOnLines(X, eps=0.05)
+  plot(X, main="")
+  plot(Y, add=TRUE, pch="+")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/polynom.Rd b/man/polynom.Rd
new file mode 100644
index 0000000..4ca5a5f
--- /dev/null
+++ b/man/polynom.Rd
@@ -0,0 +1,61 @@
+\name{polynom}
+\alias{polynom}
+\title{
+  Polynomial in One or Two Variables
+}
+\description{
+  This function is used to represent a polynomial term in a model
+  formula.
+  It computes the homogeneous terms in the polynomial of degree \code{n}
+  in one variable \code{x} or two variables \code{x,y}.
+}
+\usage{
+  polynom(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A numerical vector.
+  }
+  \item{\dots}{
+    Either a single integer \code{n} specifying the degree of the polynomial,
+    or two arguments \code{y,n} giving another vector of data \code{y}
+    and the degree of the polynomial.
+  }
+}
+\details{
+  This function is typically used inside a model formula
+  in order to specify the most general possible polynomial
+  of order \code{n} involving one numerical variable \code{x}
+  or two numerical variables \code{x,y}.
+
+  It is equivalent to \code{\link[stats]{poly}(, raw=TRUE)}.
+
+  If only one numerical vector argument \code{x} is given, 
+  the function computes the vectors \code{x^k} for
+  \code{k = 1, 2, \dots, n}. These vectors are combined into a matrix
+  with \code{n} columns.
+  
+  If two numerical vector arguments \code{x,y} are given,
+  the function computes the vectors \code{x^k * y^m} for
+  \code{k >= 0} and \code{m >= 0} satisfying
+  \code{0 < k + m <= n}. These vectors are combined into a matrix
+  with one column for each homogeneous term.
+}
+\value{
+   A numeric matrix, with rows corresponding to the entries of \code{x},
+   and columns corresponding to the terms in the polynomial.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link[stats]{poly}},
+  \code{\link{harmonic}}
+}
+\examples{
+   x <- 1:4
+   y <- 10 * (0:3)
+   polynom(x, 3)
+   polynom(x, y, 3)
+}
+\keyword{arith}
diff --git a/man/ponderosa.Rd b/man/ponderosa.Rd
new file mode 100644
index 0000000..903be18
--- /dev/null
+++ b/man/ponderosa.Rd
@@ -0,0 +1,61 @@
+\name{ponderosa}
+\alias{ponderosa}
+\alias{ponderosa.extra}
+\docType{data}
+\title{
+  Ponderosa Pine Tree Point Pattern
+}
+\description{
+  The data record the locations of 108 Ponderosa Pine
+  (\emph{Pinus ponderosa}) trees in a 120 metre square region in the Klamath
+  National Forest in northern California, published as Figure 2 of
+  Getis and Franklin (1987).
+
+  Franklin et al. (1985) determined the locations of approximately 5000
+  trees from United States Forest Service aerial photographs and
+  digitised them for analysis. Getis and Franklin (1987) selected a 120
+  metre square subregion that appeared to exhibit clustering. This subregion
+  is the \code{ponderosa} dataset.
+
+  In principle these data are equivalent to
+  Figure 2 of Getis and Franklin (1987) but they are not exactly
+  identical; some of the spatial locations appear to be slightly
+  perturbed.
+
+  The data points identified as A, B, C on Figure 2 of Getis and Franklin
+  (1987) correspond to points numbered 42, 7 and 77 in the dataset
+  respectively.
+} 
+\format{
+  Typing \code{data(ponderosa)} gives access to two objects,
+  \code{ponderosa} and \code{ponderosa.extra}.
+  
+  The dataset \code{ponderosa} is a spatial point pattern 
+  (object of class \code{"ppp"})
+  representing the point pattern of tree positions.
+  See \code{\link{ppp.object}} for details of the format.
+
+  The dataset \code{ponderosa.extra} is a list containing supplementary
+  data. The entry \code{id} contains the index numbers of the
+  three special points A, B, C in the point pattern. The entry
+  \code{plotit} is a function that can be called to produce a nice plot
+  of the point pattern.
+}
+\usage{data(ponderosa)}
+\source{Prof. Janet Franklin, University of California, Santa Barbara}
+\examples{
+   data(ponderosa)
+   ponderosa.extra$plotit()
+}
+\references{
+  Franklin, J., Michaelsen, J. and Strahler, A.H. (1985)
+  Spatial analysis of density dependent pattern in coniferous forest
+  stands.
+  \emph{Vegetatio} \bold{64}, 29--36.
+  
+  Getis, A. and Franklin, J. (1987)
+  Second-order neighbourhood analysis of mapped point patterns.
+  \emph{Ecology} \bold{68}, 473--477.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/pool.Rd b/man/pool.Rd
new file mode 100644
index 0000000..1c3b27d
--- /dev/null
+++ b/man/pool.Rd
@@ -0,0 +1,43 @@
+\name{pool}
+\alias{pool}
+\title{
+  Pool Data 
+}
+\description{
+  Pool the data from several objects of the same class.
+}
+\usage{
+pool(...)
+}
+\arguments{
+  \item{\dots}{
+    Objects of the same type.
+  }
+}
+\details{
+  The function \code{pool} is generic. There are methods for several
+  classes, listed below.
+  
+  \code{pool} is used to combine the data from several objects of the same type,
+  and to compute statistics based on the combined dataset.
+  It may be used to pool the estimates obtained from replicated datasets.
+  It may also be used in high-performance computing applications,
+  when the objects \code{\dots} have been computed on different processors
+  or in different batch runs, and we wish to combine them.
+}
+\value{
+  An object of the same class as the arguments \code{\dots}.
+}
+\seealso{
+  \code{\link{pool.envelope}},
+  \code{\link{pool.fasp}},
+  \code{\link{pool.rat}},
+  \code{\link{pool.fv}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
diff --git a/man/pool.anylist.Rd b/man/pool.anylist.Rd
new file mode 100644
index 0000000..4706230
--- /dev/null
+++ b/man/pool.anylist.Rd
@@ -0,0 +1,56 @@
+\name{pool.anylist}
+\alias{pool.anylist}
+\title{
+  Pool Data from a List of Objects
+}
+\description{
+  Pool the data from the objects in a list.
+}
+\usage{
+\method{pool}{anylist}(x, ...)
+}
+\arguments{
+  \item{x}{
+    A list, belonging to the class \code{"anylist"},
+    containing objects that can be pooled.
+  }
+  \item{\dots}{
+    Optional additional objects 
+    which can be pooled with the elements of \code{x}.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. Its purpose is to combine
+  data from several objects of the same type (typically computed
+  from different datasets) into a common, pooled estimate. 
+
+  The function \code{pool.anyist} is the method
+  for the class \code{"anylist"}. It is used when the objects to be
+  pooled are given in a list \code{x}.
+
+  Each of the elements of the list \code{x}, and each of the
+  subsequent arguments \code{\dots} if provided, must be an object of the same
+  class. 
+}
+\value{
+  An object of the same class as each of the entries in \code{x}.
+}
+\seealso{
+  \code{\link{anylist}},
+  \code{\link{pool}}.
+}
+\examples{
+   Keach <- anylapply(waterstriders, Kest, ratio=TRUE, correction="iso")
+   K <- pool(Keach)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pool.envelope.Rd b/man/pool.envelope.Rd
new file mode 100644
index 0000000..5b77c5c
--- /dev/null
+++ b/man/pool.envelope.Rd
@@ -0,0 +1,95 @@
+\name{pool.envelope}
+\alias{pool.envelope}
+\title{
+  Pool Data from Several Envelopes
+}
+\description{
+  Pool the simulation data from several simulation envelopes
+  (objects of class \code{"envelope"})
+  and compute a new envelope.
+}
+\usage{
+\method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE)
+}
+\arguments{
+  \item{\dots}{
+    Objects of class \code{"envelope"}.
+  }
+  \item{savefuns}{
+    Logical flag indicating whether to save all the simulated
+    function values.
+  }
+  \item{savepatterns}{
+    Logical flag indicating whether to save all the simulated
+    point patterns.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. This is the method for the
+  class \code{"envelope"} of simulation envelopes. It is used to
+  combine the simulation data from several simulation envelopes
+  and to compute an envelope based on the combined data.
+
+  Each of the arguments \code{\dots} must be an object of class
+  \code{"envelope"}. These envelopes must be compatible,
+  in that they are envelopes for the same function,
+  and were computed using the same options.
+
+  \itemize{
+    \item In normal use, each envelope object will have been 
+    created by running the command \code{\link{envelope}}
+    with the argument \code{savefuns=TRUE}.
+    This ensures that each object contains the simulated data
+    (summary function values for the simulated point patterns)
+    that were used to construct the envelope.
+
+    The simulated data are extracted from each object and combined.
+    A new envelope is computed from the combined set of simulations.
+    \item
+    Alternatively, if each envelope object was created
+    by running \code{\link{envelope}} with \code{VARIANCE=TRUE},
+    then the saved functions are not required.
+    
+    The sample means and sample variances from each envelope
+    will be pooled. A new envelope is computed from the pooled
+    mean and variance.
+  }
+
+  Warnings or errors will be issued if the envelope objects \code{\dots}
+  appear to be incompatible. Apart from these basic checks,
+  the code is not smart enough to decide whether it is sensible
+  to pool the data.
+
+  To modify the envelope parameters or the type of envelope that is
+  computed, first pool the envelope data using \code{pool.envelope},
+  then use \code{\link{envelope.envelope}} to modify the envelope
+  parameters.
+}
+\value{
+  An object of class \code{"envelope"}.
+}
+\seealso{
+  \code{\link{envelope}},
+  \code{\link{envelope.envelope}},
+  \code{\link{pool}},
+  \code{\link{pool.fasp}}
+}
+\examples{
+   E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE)
+   E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE)
+   pool(E1, E2)
+
+   V1 <- envelope(E1, VARIANCE=TRUE)
+   V2 <- envelope(E2, VARIANCE=TRUE)
+   pool(V1, V2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
diff --git a/man/pool.fasp.Rd b/man/pool.fasp.Rd
new file mode 100644
index 0000000..82768f4
--- /dev/null
+++ b/man/pool.fasp.Rd
@@ -0,0 +1,65 @@
+\name{pool.fasp}
+\alias{pool.fasp}
+\title{
+  Pool Data from Several Function Arrays
+}
+\description{
+  Pool the simulation data from several function arrays
+  (objects of class \code{"fasp"})
+  and compute a new function array.
+}
+\usage{
+\method{pool}{fasp}(...)
+}
+\arguments{
+  \item{\dots}{
+    Objects of class \code{"fasp"}.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. This is the method for the
+  class \code{"fasp"} of function arrays. It is used to
+  combine the simulation data from several arrays of simulation envelopes
+  and to compute a new array of envelopes based on the combined data.
+
+  Each of the arguments \code{\dots} must be a function array
+  (object of class \code{"fasp"}) containing simulation envelopes.
+  This is typically created by running the command
+  \code{\link{alltypes}} with the arguments
+  \code{envelope=TRUE} and \code{savefuns=TRUE}.
+  This ensures that each object is an array of simulation envelopes,
+  and that each envelope contains the simulated data
+  (summary function values) that were used to construct the envelope.
+
+  The simulated data are extracted from each object and combined.
+  A new array of envelopes is computed from the combined set of simulations.
+
+  Warnings or errors will be issued if the objects \code{\dots}
+  appear to be incompatible. However, the code is not smart enough to
+  decide whether it is sensible to pool the data.
+}
+\value{
+  An object of class \code{"fasp"}.
+}
+\seealso{
+  \code{\link{fasp}},
+  \code{\link{alltypes}},
+  \code{\link{pool.envelope}},
+  \code{\link{pool}}
+}
+\examples{
+   data(amacrine)
+   A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE)
+   A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE)
+   pool(A1, A2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
diff --git a/man/pool.fv.Rd b/man/pool.fv.Rd
new file mode 100644
index 0000000..bc622c4
--- /dev/null
+++ b/man/pool.fv.Rd
@@ -0,0 +1,61 @@
+\name{pool.fv}
+\alias{pool.fv}
+\title{Pool Several Functions}
+\description{
+  Combine several summary functions into a single function.
+}
+\usage{
+\method{pool}{fv}(..., weights=NULL, relabel=TRUE, variance=TRUE)
+}
+\arguments{
+  \item{\dots}{
+    Objects of class \code{"fv"}.
+  }
+  \item{weights}{
+    Optional numeric vector of weights for the functions.
+  }
+  \item{relabel}{
+    Logical value indicating whether the columns of the resulting function
+    should be labelled to show that they were obtained by pooling.
+  }
+  \item{variance}{
+    Logical value indicating whether to compute the sample variance
+    and related terms.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. This is the method for the
+  class \code{"fv"} of summary functions. It is used to
+  combine several estimates of the same function into a single function.
+
+  Each of the arguments \code{\dots} must be an object of class
+  \code{"fv"}. They must be compatible,
+  in that they are estimates of the same function,
+  and were computed using the same options.
+
+  The sample mean and sample variance of the corresponding
+  estimates will be computed.
+}
+\value{
+  An object of class \code{"fv"}.
+}
+\seealso{
+  \code{\link{pool}},
+  \code{\link{pool.anylist}},
+  \code{\link{pool.rat}}
+}
+\examples{
+   K <- lapply(waterstriders, Kest, correction="iso")
+   Kall <- pool(K[[1]], K[[2]], K[[3]])
+   Kall <- pool(as.anylist(K))
+   plot(Kall, cbind(pooliso, pooltheo) ~ r,
+              shade=c("loiso", "hiiso"),
+              main="Pooled K function of waterstriders")
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{htest}
+\keyword{hplot}
+\keyword{iteration}
diff --git a/man/pool.quadrattest.Rd b/man/pool.quadrattest.Rd
new file mode 100644
index 0000000..1acd902
--- /dev/null
+++ b/man/pool.quadrattest.Rd
@@ -0,0 +1,91 @@
+\name{pool.quadrattest}
+\alias{pool.quadrattest}
+\title{
+  Pool Several Quadrat Tests
+}
+\description{
+  Pool several quadrat tests into a single quadrat test.
+}
+\usage{
+\method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999,
+                                Xname=NULL, CR=NULL)
+}
+\arguments{
+  \item{\dots}{
+    Any number of objects, each of which is a quadrat test
+    (object of class \code{"quadrattest"}).
+  }
+  \item{df}{
+    Optional. Number of degrees of freedom of the test statistic.
+    Relevant only for \eqn{\chi^2}{chi^2} tests.
+    Incompatible with \code{df.est}.
+  }
+  \item{df.est}{
+    Optional. The number of fitted parameters, or the
+    number of degrees of freedom lost by estimation of
+    parameters. 
+    Relevant only for \eqn{\chi^2}{chi^2} tests.
+    Incompatible with \code{df}.
+  }
+  \item{nsim}{
+    Number of simulations, for Monte Carlo test.
+  }
+  \item{Xname}{
+    Optional. Name of the original data.
+  }
+  \item{CR}{
+    Optional. Numeric value of the Cressie-Read exponent \code{CR}
+    overriding the value used in the tests.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. This is the method for the
+  class \code{"quadrattest"}.
+
+  An object of class \code{"quadrattest"} represents a
+  \eqn{\chi^2}{chi^2} test or Monte Carlo test
+  of goodness-of-fit for a point process model, based on quadrat counts.
+  Such objects are created by the command \code{\link{quadrat.test}}.
+
+  Each of the arguments \code{\dots} must be an object of class
+  \code{"quadrattest"}. They must all be the same type of test
+  (chi-squared test or Monte Carlo test, conditional or unconditional)
+  and must all have the same type of alternative hypothesis.
+
+  The test statistic of the pooled test is the Pearson \eqn{X^2}
+  statistic taken over all cells (quadrats) of all tests.
+  The \eqn{p} value of the pooled test is then computed using
+  either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test.
+
+  For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of
+  the combined test is computed by adding the degrees of freedom
+  of all the tests (equivalent to assuming the tests are independent)
+  unless it is determined by the arguments \code{df} or \code{df.est}.
+  The resulting \eqn{p} value is computed to obtain the
+  pooled test.
+
+  For a pooled Monte Carlo test, new simulations are performed
+  to determine the pooled Monte Carlo \eqn{p} value. 
+}
+\value{
+  Another object of class \code{"quadrattest"}.
+}
+\seealso{
+  \code{\link{pool}},
+  \code{\link{quadrat.test}}
+}
+\examples{
+  Y <- split(humberside)
+  test1 <- quadrat.test(Y[[1]])
+  test2 <- quadrat.test(Y[[2]])
+  pool(test1, test2, Xname="Humberside")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
+
diff --git a/man/pool.rat.Rd b/man/pool.rat.Rd
new file mode 100644
index 0000000..8d80bcf
--- /dev/null
+++ b/man/pool.rat.Rd
@@ -0,0 +1,108 @@
+\name{pool.rat}
+\alias{pool.rat}
+\title{
+  Pool Data from Several Ratio Objects
+}
+\description{
+  Pool the data from several ratio objects
+  (objects of class \code{"rat"})
+  and compute a pooled estimate.
+}
+\usage{
+\method{pool}{rat}(..., weights=NULL, relabel=TRUE, variance=TRUE)
+}
+\arguments{
+  \item{\dots}{
+    Objects of class \code{"rat"}.
+  }
+  \item{weights}{
+    Numeric vector of weights.
+  }
+  \item{relabel}{
+    Logical value indicating whether the result 
+    should be relabelled to show that it was obtained by pooling.
+  }
+  \item{variance}{
+    Logical value indicating whether to compute the sample variance
+    and related terms.
+  }
+}
+\details{
+  The function \code{\link{pool}} is generic. This is the method for the
+  class \code{"rat"} of ratio objects. It is used to
+  combine several estimates of the same quantity
+  when each estimate is a ratio. 
+
+  Each of the arguments \code{\dots} must be an object of class
+  \code{"rat"} representing a ratio object (basically a
+  numerator and a denominator; see \code{\link{rat}}).
+  We assume that these ratios are all estimates of the same quantity.
+
+  If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]}
+  and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and
+  denominator \eqn{X_i}{X[i]}, so that notionally
+  \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the
+  ratio-of-sums estimator
+  \deqn{
+    R = \frac{\sum_i Y_i}{\sum_i X_i}.
+  }{
+    R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]).
+  }
+  The standard error of \eqn{R} is computed using the delta method
+  as described in Baddeley \emph{et al.} (1993)
+  or Cochran (1977, pp 154, 161).
+
+  If the argument \code{weights} is given, it should be a numeric vector
+  of length equal to the number of objects to be pooled. 
+  The pooled estimator is the ratio-of-sums estimator
+  \deqn{
+    R = \frac{\sum_i w_i Y_i}{\sum_i w_i X_i}
+  }{
+    R = (w[1] * Y[1]+\dots+ w[n] * Y[n])/(w[1] * X[1]+\dots+w[n] * X[n])
+  }
+  where \code{w_i}{w[i]} is the \code{i}th weight.
+
+  This calculation is implemented only for certain classes of objects
+  where the arithmetic can be performed.
+  
+  This calculation is currently implemented only for objects which
+  also belong to the class \code{"fv"} (function value tables).
+  For example, if \code{\link{Kest}} is called with argument
+  \code{ratio=TRUE}, the result is a suitable object (belonging to the classes
+  \code{"rat"} and \code{"fv"}).
+
+  Warnings or errors will be issued if the ratio objects \code{\dots}
+  appear to be incompatible. However, the code is not smart enough to
+  decide whether it is sensible to pool the data.
+
+}
+\value{
+  An object of the same class as the input.
+}
+\seealso{
+  \code{\link{rat}},
+  \code{\link{pool}},
+  \code{\link{pool.fv}},
+  \code{\link{Kest}}
+}
+\examples{
+   K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso")   
+   K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso")   
+   K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso")
+   K <- pool(K1, K2, K3)
+   plot(K, pooliso ~ r, shade=c("hiiso", "loiso"))
+}
+\references{
+  Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993)
+  Analysis of a three-dimensional point pattern with replication.
+  \emph{Applied Statistics} \bold{42}, 641--668.
+
+  Cochran, W.G. (1977) 
+  \emph{Sampling techniques}, 3rd edition.
+  New York: John Wiley and Sons.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/pp3.Rd b/man/pp3.Rd
new file mode 100644
index 0000000..54424df
--- /dev/null
+++ b/man/pp3.Rd
@@ -0,0 +1,48 @@
+\name{pp3}
+\Rdversion{1.1}
+\alias{pp3}
+\title{
+  Three Dimensional Point Pattern
+}
+\description{
+  Create a three-dimensional point pattern
+}
+\usage{
+pp3(x, y, z, ...)
+}
+\arguments{
+  \item{x,y,z}{
+    Numeric vectors of equal length, containing Cartesian coordinates
+    of points in three-dimensional space.
+}
+  \item{\dots}{
+    Arguments passed to \code{\link{as.box3}} to determine
+    the three-dimensional box in which the points have been observed.
+  }
+}
+\details{
+  An object of class \code{"pp3"} represents a pattern of points in
+  three-dimensional space. The points are assumed to have been observed
+  by exhaustively inspecting a three-dimensional rectangular box. The
+  boundaries of the box are included as part of the dataset.
+}
+\value{
+  Object of class \code{"pp3"} representing a three dimensional
+  point pattern. Also belongs to class \code{"ppx"}. 
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{box3}}, 
+  \code{\link{print.pp3}}, 
+  \code{\link{ppx}} 
+}
+\examples{
+   X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/ppm.Rd b/man/ppm.Rd
new file mode 100644
index 0000000..b4c142d
--- /dev/null
+++ b/man/ppm.Rd
@@ -0,0 +1,431 @@
+\name{ppm}
+\alias{ppm}
+\alias{ppm.formula}
+\concept{point process model}
+\concept{Poisson point process}
+\concept{Gibbs point process}
+\title{
+  Fit Point Process Model to Data
+}
+\description{
+  Fits a point process model to an observed point pattern.
+}
+\usage{
+   ppm(Q, \dots)
+
+   \method{ppm}{formula}(Q, interaction=NULL, \dots, data=NULL, subset)
+}
+\arguments{
+  \item{Q}{
+    A \code{formula} in the \R language describing the model
+    to be fitted. 
+  }
+  \item{interaction}{
+    An object of class \code{"interact"}
+    describing the point process interaction
+    structure, or a function that makes such an object,
+    or \code{NULL} indicating that a Poisson process (stationary
+    or nonstationary) should be fitted.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{ppm.ppp}}
+    or \code{\link{ppm.quad}} to control the model-fitting process.
+  }
+  \item{data}{
+    Optional. The values of spatial covariates (other than the Cartesian
+    coordinates) required by the model.
+    Either a data frame, or a list whose entries are images,
+    functions, windows, tessellations or single numbers. See Details.
+  }
+  \item{subset}{
+    Optional.
+    An expression (which may involve the names of the
+    Cartesian coordinates \code{x} and \code{y}
+    and the names of entries in \code{data})
+    defining a subset of the spatial domain,
+    to which the model-fitting should be restricted.
+    The result of evaluating the expression should be either a logical
+    vector, or a window (object of class \code{"owin"})
+    or a logical-valued pixel image (object of class \code{"im"}).
+  }
+}
+\value{
+  An object of class \code{"ppm"} describing a fitted point process
+  model.
+ 
+  See \code{\link{ppm.object}} for details of the format of this object
+  and methods available for manipulating it.
+}
+\details{
+  This function fits a point process model
+  to an observed point pattern.
+  The model may include
+  spatial trend, interpoint interaction, and dependence on covariates.
+
+  The model fitted by \code{ppm}
+  is either a Poisson point process (in which different points
+  do not interact with each other) or a Gibbs point process (in which
+  different points typically inhibit each other).
+  For clustered point process models, use \code{\link{kppm}}.
+
+  The function \code{ppm} is generic, with methods for
+  the classes \code{formula}, \code{ppp} and \code{quad}.
+  This page describes the method for a \code{formula}.
+
+  The first argument is a \code{formula} in the \R language
+  describing the spatial trend model to be fitted. It has the general form
+  \code{pattern ~ trend} where the left hand side \code{pattern} is usually
+  the name of a spatial point pattern (object of class \code{"ppp"})
+  to which the model should be fitted, or an expression which evaluates
+  to a point pattern;
+  and the right hand side \code{trend} is an expression specifying the
+  spatial trend of the model.
+
+  Systematic effects (spatial trend and/or dependence on 
+  spatial covariates) are specified by the 
+  \code{trend} expression on the right hand side of the formula.
+  The trend may involve
+  the Cartesian coordinates \code{x}, \code{y},
+  the marks \code{marks},
+  the names of entries in the argument \code{data} (if supplied),
+  or the names of objects that exist in the \R session.
+  The trend formula specifies the \bold{logarithm} of the
+  intensity of a Poisson process, or in general, the logarithm of
+  the first order potential of the Gibbs process.
+  The formula should not use any names beginning with \code{.mpl}
+  as these are reserved for internal use.
+  If the formula is \code{pattern~1}, then
+  the model to be fitted is stationary (or at least, its first order 
+  potential is constant).
+  
+  The symbol \code{.} in the trend expression stands for
+  all the covariates supplied in the argument \code{data}.
+  For example the formula \code{pattern ~ .} indicates an additive
+  model with a main effect for each covariate in \code{data}.
+  
+  Stochastic interactions between random points of the point process
+  are defined by the argument \code{interaction}. This is an object of
+  class \code{"interact"} which is initialised in a very similar way to the
+  usage of family objects in \code{\link{glm}} and \code{gam}.
+  The interaction models currently available are:
+  \GibbsInteractionsList.
+  See the examples below.
+  Note that it is possible to combine several interactions
+  using \code{\link{Hybrid}}.
+ 
+  If \code{interaction} is missing or \code{NULL},
+  then the model to be fitted
+  has no interpoint interactions, that is, it is a Poisson process
+  (stationary or nonstationary according to \code{trend}). In this case
+  the methods of maximum pseudolikelihood and maximum logistic likelihood
+  coincide with maximum likelihood. 
+  
+  The fitted point process model returned by this function can be printed 
+  (by the print method \code{\link{print.ppm}})
+  to inspect the fitted parameter values.
+  If a nonparametric spatial trend was fitted, this can be extracted using
+  the predict method \code{\link{predict.ppm}}.
+
+  To fit a model involving spatial covariates
+  other than the Cartesian coordinates \eqn{x} and \eqn{y},
+  the values of the covariates should either be supplied in the
+  argument \code{data}, or should be stored in objects that exist
+  in the \R session.
+  Note that it is not sufficient to have observed
+  the covariate only at the points of the data point pattern; 
+  the covariate must also have been observed at other 
+  locations in the window.
+
+  If it is given, the argument \code{data} is typically
+  a list, with names corresponding to variables in the \code{trend} formula.
+  Each entry in the list is either
+  \describe{
+    \item{a pixel image,}{
+      giving the values of a spatial covariate at 
+      a fine grid of locations. It should be an object of
+      class \code{"im"}, see \code{\link{im.object}}.
+    }
+    \item{a function,}{
+      which can be evaluated
+      at any location \code{(x,y)} to obtain the value of the spatial
+      covariate. It should be a \code{function(x, y)}
+      or \code{function(x, y, ...)} in the \R language.
+      The first two arguments of the function should be the
+      Cartesian coordinates \eqn{x} and \eqn{y}. The function may have
+      additional arguments; if the function does not have default
+      values for these additional arguments, then the user must
+      supply values for them, in \code{covfunargs}.
+      See the Examples.
+    }
+    \item{a window,}{
+      interpreted as a logical variable
+      which is \code{TRUE} inside the window and \code{FALSE} outside
+      it. This should be an object of class \code{"owin"}.
+    }
+    \item{a tessellation,}{
+      interpreted as a factor covariate.
+      For each spatial location, the factor value indicates
+      which tile of the tessellation it belongs to.
+      This should be an object of class \code{"tess"}.
+    }
+    \item{a single number,}{indicating a covariate that is
+      constant in this dataset.
+    }
+  }
+  The software will look up
+  the values of each covariate at the required locations
+  (quadrature points).
+
+  Note that, for covariate functions, only the \emph{name} of the
+  function appears in the trend formula. A covariate function is
+  treated as if it were a single variable. The function arguments do not
+  appear in the trend formula. See the Examples.
+
+  If \code{data} is a list,
+  the list entries should have names corresponding to
+  (some of) the names of covariates in the model formula \code{trend}.
+  The variable names \code{x}, \code{y} and \code{marks}
+  are reserved for the Cartesian 
+  coordinates and the mark values,
+  and these should not be used for variables in \code{data}.
+
+  Alternatively, \code{data} may be a data frame
+  giving the values of the covariates at specified locations.
+  Then \code{pattern} should be a quadrature scheme (object of class
+  \code{"quad"}) giving the corresponding locations.
+  See \code{\link{ppm.quad}} for details.
+}
+\section{Interaction parameters}{
+  Apart from the Poisson model, every point process model fitted by
+  \code{ppm} has parameters that determine the strength and
+  range of \sQuote{interaction} or dependence between points.
+  These parameters are of two types:
+  \describe{
+    \item{regular parameters:}{
+      A parameter \eqn{\phi}{phi} is called \emph{regular}
+      if the log likelihood is a linear function of \eqn{\theta}{theta} where 
+      \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of 
+      \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical
+      parameter.]
+    }
+    \item{irregular parameters}{
+      Other parameters are called \emph{irregular}. 
+    }
+  }
+  Typically, regular parameters determine the \sQuote{strength}
+  of the interaction, while irregular parameters determine the
+  \sQuote{range} of the interaction. For example, the Strauss process
+  has a regular parameter \eqn{\gamma}{gamma} controlling the strength
+  of interpoint inhibition, and an irregular parameter \eqn{r}
+  determining the range of interaction.
+
+  The \code{ppm} command is only designed to estimate regular
+  parameters of the interaction.
+  It requires the values of any irregular parameters of the interaction
+  to be fixed. For example, to fit a Strauss process model to the \code{cells}
+  dataset, you could type \code{ppm(cells ~ 1, Strauss(r=0.07))}.
+  Note that the value of the irregular parameter \code{r} must be given.
+  The result of this command will be a fitted model in which the
+  regular parameter \eqn{\gamma}{gamma} has been estimated.
+
+  To determine the irregular parameters, there are several
+  practical techniques, but no general statistical theory available.
+  Useful techniques include maximum profile pseudolikelihood, which
+  is implemented in the command \code{\link{profilepl}},
+  and Newton-Raphson maximisation, implemented in the
+  experimental command \code{\link{ippm}}. 
+
+  Some irregular parameters can be estimated directly from data:
+  the hard-core radius in the model \code{\link{Hardcore}}
+  and the matrix of hard-core radii in \code{\link{MultiHard}} can be
+  estimated easily from data. In these cases, \code{ppm} allows the user
+  to specify the interaction without giving
+  the value of the irregular parameter. The user can give the
+  hard core interaction as \code{interaction=Hardcore()}
+  or even \code{interaction=Hardcore}, and 
+  the hard core radius will then be estimated from the data.
+}
+\section{Technical Warnings and Error Messages}{
+  See \code{\link{ppm.ppp}} for some technical warnings about the
+  weaknesses of the algorithm, and explanation of some common error messages.
+}
+\references{
+  Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014)
+  Logistic regression for spatial Gibbs point processes.
+  \emph{Biometrika} \bold{101} (2) 377--392.
+
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} 283--322.
+ 
+  Berman, M. and Turner, T.R. (1992)
+  Approximating point process likelihoods with GLIM.
+  \emph{Applied Statistics} \bold{41},  31--38.
+ 
+  Besag, J. (1975)
+  Statistical analysis of non-lattice data.
+  \emph{The Statistician} \bold{24}, 179-195.
+ 
+  Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and
+  Tanemura, M. (1994)
+  On parameter estimation for pairwise interaction processes.
+  \emph{International Statistical Review} \bold{62}, 99-117.
+
+  Huang, F. and Ogata, Y. (1999)
+  Improvements of the maximum pseudo-likelihood estimators
+  in various spatial statistical models.
+  \emph{Journal of Computational and Graphical Statistics}
+  \bold{8}, 510--530.
+  
+  Jensen, J.L. and Moeller, M. (1991)
+  Pseudolikelihood for exponential family models of spatial point processes.
+  \emph{Annals of Applied Probability} \bold{1}, 445--461.
+ 
+  Jensen, J.L. and Kuensch, H.R. (1994)
+  On asymptotic normality of pseudo likelihood
+  estimates for pairwise interaction processes,
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{46}, 475--486.
+}
+\seealso{
+  \code{\link{ppm.ppp}} and \code{\link{ppm.quad}} for
+  more details on the fitting technique and edge correction.
+
+  \code{\link{ppm.object}} for details of how to
+  print, plot and manipulate a fitted model.
+
+  \code{\link{ppp}} and \code{\link{quadscheme}}
+  for constructing data.
+  
+  Interactions: \GibbsInteractionsList.
+
+  See \code{\link{profilepl}} for advice on
+  fitting nuisance parameters in the interaction,
+  and \code{\link{ippm}} for irregular parameters in the trend.
+
+  See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for
+  ensuring the fitted model is a valid point process.
+
+  See \code{\link{kppm}} for fitting Cox point process models
+  and cluster point process models, and \code{\link{dppm}} for fitting
+  determinantal point process models.
+}
+\examples{
+ # fit the stationary Poisson process
+ # to point pattern 'nztrees'
+
+ ppm(nztrees ~ 1)
+
+ \dontrun{
+ Q <- quadscheme(nztrees) 
+ ppm(Q ~ 1) 
+ # equivalent.
+ }
+
+fit1 <- ppm(nztrees ~ x)
+ # fit the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(a + bx)
+ # where x,y are the Cartesian coordinates
+ # and a,b are parameters to be estimated
+
+fit1
+coef(fit1)
+coef(summary(fit1))
+
+\dontrun{
+ ppm(nztrees ~ polynom(x,2))
+}
+\testonly{
+ ppm(nztrees ~ polynom(x,2), nd=16)
+}
+
+ # fit the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(a + bx + cx^2)
+
+ \dontrun{
+ library(splines)
+ ppm(nztrees ~ bs(x,df=3))
+ }
+ #       WARNING: do not use predict.ppm() on this result
+ # Fits the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(B(x))
+ # where B is a B-spline with df = 3
+
+\dontrun{
+ ppm(nztrees ~ 1, Strauss(r=10), rbord=10)
+}
+\testonly{
+ ppm(nztrees ~ 1, Strauss(r=10), rbord=10, nd=16)
+}
+ # Fit the stationary Strauss process with interaction range r=10
+ # using the border method with margin rbord=10
+
+\dontrun{
+ ppm(nztrees ~ x, Strauss(13), correction="periodic")
+}
+\testonly{
+ ppm(nztrees ~ x, Strauss(13), correction="periodic", nd=16)
+}
+ # Fit the nonstationary Strauss process with interaction range r=13
+ # and exp(first order potential) =  activity = beta(x,y) = exp(a+bx)
+ # using the periodic correction.
+
+
+# Compare Maximum Pseudolikelihood, Huang-Ogata and Variational Bayes fits:
+\dontrun{ppm(swedishpines ~ 1, Strauss(9))}
+
+\dontrun{ppm(swedishpines ~ 1, Strauss(9), method="ho")}
+\testonly{ppm(swedishpines ~ 1, Strauss(9), method="ho", nd=16, nsim=8)}
+
+ppm(swedishpines ~ 1, Strauss(9), method="VBlogi")
+
+ # COVARIATES
+ #
+ X <- rpoispp(42)
+ weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) }
+ #
+ # (a) covariate values as function
+ ppm(X ~ y + weirdfunction)
+ #
+ # (b) covariate values in pixel image
+ Zimage <- as.im(weirdfunction, unit.square())
+ ppm(X ~ y + Z, covariates=list(Z=Zimage))
+ #
+ # (c) covariate values in data frame
+ Q <- quadscheme(X)
+ xQ <- x.quad(Q)
+ yQ <- y.quad(Q)
+ Zvalues <- weirdfunction(xQ,yQ)
+ ppm(Q ~  y + Z, data=data.frame(Z=Zvalues))
+ # Note Q not X
+
+ # COVARIATE FUNCTION WITH EXTRA ARGUMENTS
+ #
+f <- function(x,y,a){ y - a }
+ppm(X ~ x + f, covfunargs=list(a=1/2))
+
+ # COVARIATE: inside/outside window
+ b <- owin(c(0.1, 0.6), c(0.1, 0.9))
+ ppm(X ~ b)
+
+ ## MULTITYPE POINT PROCESSES ### 
+ # fit stationary marked Poisson process
+ # with different intensity for each species
+\dontrun{ppm(lansing ~  marks, Poisson())}
+\testonly{
+  ama <- amacrine[square(0.7)]
+  a <- ppm(ama ~  marks, Poisson(), nd=16)
+}
+
+ # fit nonstationary marked Poisson process
+ # with different log-cubic trend for each species
+\dontrun{ppm(lansing ~  marks * polynom(x,y,3), Poisson())}
+\testonly{b <- ppm(ama ~  marks * polynom(x,y,2), Poisson(), nd=16)}
+
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/ppm.object.Rd b/man/ppm.object.Rd
new file mode 100644
index 0000000..0fd173b
--- /dev/null
+++ b/man/ppm.object.Rd
@@ -0,0 +1,164 @@
+\name{ppm.object}
+\alias{ppm.object}  %DoNotExport
+\alias{methods.ppm} %DoNotExport
+\title{Class of Fitted Point Process Models}
+\description{
+  A class \code{ppm} to represent a fitted stochastic model
+  for a point process. The output of \code{\link{ppm}}.
+}
+\details{
+  An object of class \code{ppm} represents a stochastic point process
+  model that has been fitted to a point pattern dataset.
+  Typically it is the output of the model fitter,
+  \code{\link{ppm}}.
+
+  The class \code{ppm} has methods for the following
+  standard generic functions:
+
+  \tabular{lll}{
+    generic \tab method \tab description \cr
+    \code{print} \tab \code{\link{print.ppm}}
+    \tab print details \cr
+    \code{plot} \tab \code{\link{plot.ppm}}
+    \tab plot fitted model \cr
+    \code{predict} \tab \code{\link{predict.ppm}}
+    \tab fitted intensity and conditional intensity \cr 
+    \code{fitted} \tab \code{\link{fitted.ppm}}
+    \tab fitted intensity \cr
+    \code{coef} \tab \code{\link{coef.ppm}}
+    \tab fitted coefficients of model \cr
+    \code{anova} \tab \code{\link{anova.ppm}}
+    \tab Analysis of Deviance \cr
+    \code{formula} \tab \code{\link{formula.ppm}}
+    \tab Extract model formula \cr
+    \code{terms} \tab \code{\link{terms.ppm}}
+    \tab Terms in the model formula \cr
+    \code{labels} \tab \code{labels.ppm}
+    \tab Names of estimable terms in the model formula \cr
+    \code{residuals} \tab \code{\link{residuals.ppm}}
+    \tab Point process residuals \cr
+    \code{simulate} \tab \code{\link{simulate.ppm}}
+    \tab Simulate the fitted model \cr
+    \code{update} \tab \code{\link{update.ppm}}
+    \tab Change or refit the model \cr
+    \code{vcov} \tab \code{\link{vcov.ppm}}
+    \tab Variance/covariance matrix of parameter estimates \cr
+    \code{model.frame} \tab \code{\link{model.frame.ppm}}
+    \tab Model frame \cr
+    \code{model.matrix} \tab \code{\link{model.matrix.ppm}}
+    \tab Design matrix \cr
+    \code{logLik} \tab \code{\link{logLik.ppm}}
+    \tab log \emph{pseudo} likelihood \cr
+    \code{extractAIC} \tab \code{\link{extractAIC.ppm}}
+    \tab pseudolikelihood counterpart of AIC \cr
+    \code{nobs} \tab \code{\link{nobs.ppm}}
+    \tab number of observations 
+  }
+
+  Objects of class \code{ppm} can also be handled by the
+  following standard functions, without requiring a special method:
+
+  \tabular{ll}{
+    name \tab description \cr
+    \code{\link{confint}} \tab Confidence intervals for parameters \cr
+    \code{\link{step}} \tab Stepwise model selection \cr
+    \code{\link{drop1}} \tab One-step model improvement \cr
+    \code{\link{add1}} \tab  One-step model improvement
+  }
+
+  The class \code{ppm} also has methods for the following
+  generic functions defined in the \pkg{spatstat} package:
+
+  \tabular{lll}{
+    generic \tab method \tab description \cr
+    \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}}
+    \tab Interpoint interaction structure \cr
+    \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}}
+    \tab Observation window of data \cr
+    \code{\link{berman.test}} \tab \code{\link{berman.test.ppm}}
+    \tab Berman's test \cr
+    \code{\link{envelope}} \tab \code{\link{envelope.ppm}}
+    \tab Simulation envelopes \cr
+    \code{\link{fitin}} \tab \code{\link{fitin.ppm}}
+    \tab Fitted interaction \cr
+    \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}}
+    \tab Determine whether the model is marked \cr
+    \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}}
+    \tab Determine whether the model is multitype \cr
+    \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}}
+    \tab Determine whether the model is Poisson \cr
+    \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}}
+    \tab Determine whether the model is stationary \cr
+    \code{\link{cdf.test}} \tab \code{\link{cdf.test.ppm}}
+    \tab Spatial distribution test \cr
+    \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}}
+    \tab Quadrat counting test \cr
+    \code{\link{reach}} \tab \code{\link{reach.ppm}}
+    \tab Interaction range of model \cr
+    \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}}
+    \tab Model in a form that can be simulated \cr
+    \code{\link{rmh}} \tab \code{\link{rmh.ppm}}
+    \tab Perform simulation \cr
+    \code{\link{unitname}} \tab \code{\link{unitname.ppm}}
+    \tab Name of unit of length
+    }
+    
+  Information about the data (to which the model was fitted)
+  can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}}
+  and \code{\link{quad.ppm}}.
+}
+\section{Internal format}{
+  If you really need to get at the internals,
+  a \code{ppm} object contains at least the following entries:
+  \tabular{ll}{
+    \code{coef} \tab  the fitted regular parameters (as returned by
+                     \code{glm}) \cr
+    \code{trend} \tab  the trend formula or \code{NULL} \cr
+    \code{interaction} \tab the point process interaction family 
+                        (an object of class \code{"interact"})
+			or \code{NULL} \cr
+    \code{Q} \tab      the quadrature scheme used \cr
+    \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr
+    \code{correction} \tab  name of edge correction method used \cr
+  }
+  See \code{\link{ppm}} for explanation of these concepts.
+  The irregular parameters (e.g. the interaction radius of the
+  Strauss process) are encoded in the \code{interaction} entry.
+  However see the Warnings.
+}
+
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{coef.ppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{print.ppm}},
+  \code{\link{predict.ppm}},
+  \code{\link{plot.ppm}}.
+}
+\section{Warnings}{
+  The internal representation of \code{ppm} objects
+  may change slightly between releases of the \pkg{spatstat} package.
+}
+\examples{
+  data(cells)
+  fit <- ppm(cells, ~ x, Strauss(0.1), correction="periodic")
+  fit
+  coef(fit)
+  \dontrun{
+  pred <- predict(fit)
+  }
+  pred <- predict(fit, ngrid=20, type="trend")
+  \dontrun{
+  plot(fit)
+  }  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/ppm.ppp.Rd b/man/ppm.ppp.Rd
new file mode 100644
index 0000000..cc103f2
--- /dev/null
+++ b/man/ppm.ppp.Rd
@@ -0,0 +1,869 @@
+\name{ppm.ppp}
+\alias{ppm.ppp}
+\alias{ppm.quad}
+\concept{point process model}
+\concept{Poisson point process}
+\concept{Gibbs point process}
+\title{
+  Fit Point Process Model to Point Pattern Data
+}
+\description{
+  Fits a point process model to an observed point pattern.
+}
+\usage{
+   \method{ppm}{ppp}(Q, trend=~1, interaction=Poisson(),
+       \dots,
+       covariates=data,
+       data=NULL,
+       covfunargs = list(),
+       subset,
+       correction="border",
+       rbord=reach(interaction),
+       use.gam=FALSE,
+       method="mpl",
+       forcefit=FALSE,
+       emend=project,
+       project=FALSE,
+       prior.mean = NULL,
+       prior.var = NULL,
+       nd = NULL,
+       eps = NULL,
+       gcontrol=list(),
+       nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh),
+       verb=TRUE,
+       callstring=NULL)
+
+   \method{ppm}{quad}(Q, trend=~1, interaction=Poisson(),
+       \dots,
+       covariates=data,
+       data=NULL,
+       covfunargs = list(),
+       subset,
+       correction="border",
+       rbord=reach(interaction),
+       use.gam=FALSE,
+       method="mpl",
+       forcefit=FALSE,
+       emend=project,
+       project=FALSE,
+       prior.mean = NULL,
+       prior.var = NULL,
+       nd = NULL,
+       eps = NULL,
+       gcontrol=list(),
+       nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh),
+       verb=TRUE,
+       callstring=NULL)
+}
+\arguments{
+  \item{Q}{
+    A data point pattern (of class \code{"ppp"})
+    to which the model will be fitted,
+    or a quadrature scheme (of class \code{"quad"})
+    containing this pattern.
+  }
+  \item{trend}{
+  An \R formula object specifying the spatial trend to be fitted. 
+  The default formula, \code{~1}, indicates the model is stationary
+  and no trend is to be fitted. 
+  }
+  \item{interaction}{
+    An object of class \code{"interact"}
+    describing the point process interaction
+    structure, or a function that makes such an object,
+    or \code{NULL} indicating that a Poisson process (stationary
+    or nonstationary) should be fitted.
+  }
+  \item{\dots}{Ignored.}
+  \item{data,covariates}{
+    The values of any spatial covariates (other than the Cartesian
+    coordinates) required by the model.
+    Either a data frame, or a list whose entries are images,
+    functions, windows, tessellations or single numbers. See Details.
+  }
+  \item{subset}{
+    Optional.
+    An expression (which may involve the names of the
+    Cartesian coordinates \code{x} and \code{y}
+    and the names of entries in \code{data})
+    defining a subset of the spatial domain,
+    to which the model-fitting should be restricted.
+    The result of evaluating the expression should be either a logical
+    vector, or a window (object of class \code{"owin"})
+    or a logical-valued pixel image (object of class \code{"im"}).
+  }
+  \item{covfunargs}{
+    A named list containing the values of any additional arguments
+    required by covariate functions.
+  }
+  \item{correction}{
+    The name of the edge correction to be used. The default 
+    is \code{"border"} indicating the border correction.
+    Other possibilities may include \code{"Ripley"}, \code{"isotropic"},
+    \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the 
+    \code{interaction}.
+  }
+  \item{rbord}{
+    If \code{correction = "border"}
+    this argument specifies the distance by which
+    the window should be eroded for the border correction.
+  }
+  \item{use.gam}{
+    Logical flag; if \code{TRUE} then computations are performed
+    using \code{gam} instead of \code{\link{glm}}.
+  }
+  \item{method}{
+    The method used to fit the model. Options are 
+    \code{"mpl"} for the method of Maximum PseudoLikelihood,
+    \code{"logi"} for the Logistic Likelihood method,
+    \code{"VBlogi"} for the Variational Bayes Logistic Likelihood method,
+    and \code{"ho"} for the Huang-Ogata approximate maximum likelihood
+    method.
+  }
+  \item{forcefit}{
+    Logical flag for internal use.
+    If \code{forcefit=FALSE}, some trivial models will be
+    fitted by a shortcut. If \code{forcefit=TRUE},
+    the generic fitting method will always be used. 
+  }
+  \item{emend,project}{
+    (These are equivalent: \code{project} is an older name for
+    \code{emend}.)
+    Logical value. Setting \code{emend=TRUE} will ensure that the
+    fitted model is always a valid point process by
+    applying \code{\link{emend.ppm}}.
+  }
+  \item{prior.mean}{
+    Optional vector of prior means for canonical parameters (for
+       \code{method="VBlogi"}). See Details.
+  }
+  \item{prior.var}{
+    Optional prior variance covariance matrix for canonical parameters (for \code{method="VBlogi"}). See Details.
+  }
+  \item{nd}{
+    Optional. Integer or pair of integers.
+    The dimension of the grid of dummy points (\code{nd * nd}
+    or \code{nd[1] * nd[2]})
+    used to evaluate the integral in the pseudolikelihood.
+    Incompatible with \code{eps}.
+  }
+  \item{eps}{
+    Optional. 
+    A positive number, or a vector of two positive numbers, giving the
+    horizontal and vertical spacing, respectively, of the grid of
+    dummy points. Incompatible with \code{nd}.
+  }
+  \item{gcontrol}{
+    Optional. List of parameters passed to \code{\link{glm.control}}
+    (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE})
+    controlling the model-fitting algorithm. 
+  }
+  \item{nsim}{
+    Number of simulated realisations
+    to generate (for \code{method="ho"})
+  }
+  \item{nrmh}{
+    Number of Metropolis-Hastings iterations
+    for each simulated realisation (for \code{method="ho"})
+  }
+  \item{start,control}{
+    Arguments passed to \code{\link{rmh}} controlling the behaviour
+    of the Metropolis-Hastings algorithm (for \code{method="ho"})
+  }
+  \item{verb}{
+    Logical flag indicating whether to print progress reports
+    (for \code{method="ho"})
+  }
+  \item{callstring}{
+    Internal use only.
+  }
+}
+\value{
+  An object of class \code{"ppm"} describing a fitted point process
+  model.
+ 
+  See \code{\link{ppm.object}} for details of the format of this object
+  and methods available for manipulating it.
+}
+\details{
+  \bold{NOTE:} This help page describes the \bold{old syntax} of the
+  function \code{ppm}, described in many older documents.
+  This old syntax is still supported. However, if you are learning about
+  \code{ppm} for the first time, we recommend you use the
+  \bold{new syntax} described in the help file for \code{\link{ppm}}.
+  
+  This function fits a point process model
+  to an observed point pattern.
+  The model may include
+  spatial trend, interpoint interaction, and dependence on covariates.
+
+  \describe{
+    \item{basic use:}{
+      In basic use, \code{Q} is a point pattern dataset
+      (an object of class \code{"ppp"}) to which we wish to fit a model.
+
+      The syntax of \code{ppm()} is closely analogous to the \R functions
+      \code{\link{glm}} and \code{gam}.
+      The analogy is:
+      \tabular{ll}{
+	\bold{glm} \tab \bold{ppm} \cr
+	\code{formula} \tab \code{trend} \cr
+	\code{family} \tab \code{interaction}
+      }
+      The point process model to be fitted is specified by the 
+      arguments \code{trend} and \code{interaction}
+      which are respectively analogous to
+      the \code{formula} and \code{family} arguments of glm(). 
+ 
+      Systematic effects (spatial trend and/or dependence on 
+      spatial covariates) are specified by the argument
+      \code{trend}. This is an \R formula object, which may be expressed
+      in terms of the Cartesian coordinates \code{x}, \code{y},
+      the marks \code{marks},
+      or the variables in \code{covariates} (if supplied), or both.
+      It specifies the \bold{logarithm} of the first order potential
+      of the process.
+      The formula should not use any names beginning with \code{.mpl}
+      as these are reserved for internal use.
+      If \code{trend} is absent or equal to the default, \code{~1}, then
+      the model to be fitted is stationary (or at least, its first order 
+      potential is constant). 
+ 
+      The symbol \code{.} in the trend expression stands for
+      all the covariates supplied in the argument \code{data}.
+      For example the formula \code{~ .} indicates an additive
+      model with a main effect for each covariate in \code{data}.
+  
+      Stochastic interactions between random points of the point process
+      are defined by the argument \code{interaction}. This is an object of
+      class \code{"interact"} which is initialised in a very similar way to the
+      usage of family objects in \code{\link{glm}} and \code{gam}.
+      The models currently available are:
+      \GibbsInteractionsList.
+      See the examples below.
+      It is also possible to combine several interactions
+      using \code{\link{Hybrid}}.
+ 
+      If \code{interaction} is missing or \code{NULL},
+      then the model to be fitted
+      has no interpoint interactions, that is, it is a Poisson process
+      (stationary or nonstationary according to \code{trend}). In this case
+      the methods of maximum pseudolikelihood and maximum logistic likelihood
+      coincide with maximum likelihood. 
+
+      The fitted point process model returned by this function can be printed 
+      (by the print method \code{\link{print.ppm}})
+      to inspect the fitted parameter values.
+      If a nonparametric spatial trend was fitted, this can be extracted using
+      the predict method \code{\link{predict.ppm}}.
+    }
+    \item{Models with covariates:}{
+      To fit a model involving spatial covariates
+      other than the Cartesian coordinates \eqn{x} and \eqn{y},
+      the values of the covariates should be supplied in the
+      argument \code{covariates}. 
+      Note that it is not sufficient to have observed
+      the covariate only at the points of the data point pattern; 
+      the covariate must also have been observed at other 
+      locations in the window.
+
+      Typically the argument \code{covariates} is a list,
+      with names corresponding to variables in the \code{trend} formula.
+      Each entry in the list is either
+      \describe{
+	\item{a pixel image,}{
+	  giving the values of a spatial covariate at 
+	  a fine grid of locations. It should be an object of
+	  class \code{"im"}, see \code{\link{im.object}}.
+	}
+	\item{a function,}{
+	  which can be evaluated
+	  at any location \code{(x,y)} to obtain the value of the spatial
+	  covariate. It should be a \code{function(x, y)}
+	  or \code{function(x, y, ...)} in the \R language.
+	  The first two arguments of the function should be the
+	  Cartesian coordinates \eqn{x} and \eqn{y}. The function may have
+	  additional arguments; if the function does not have default
+	  values for these additional arguments, then the user must
+	  supply values for them, in \code{covfunargs}.
+	  See the Examples.
+	}
+	\item{a window,}{
+	  interpreted as a logical variable
+	  which is \code{TRUE} inside the window and \code{FALSE} outside
+	  it. This should be an object of class \code{"owin"}.
+	}
+	\item{a tessellation,}{
+	  interpreted as a factor covariate.
+	  For each spatial location, the factor value indicates
+	  which tile of the tessellation it belongs to.
+	  This should be an object of class \code{"tess"}.
+	}
+	\item{a single number,}{indicating a covariate that is
+	  constant in this dataset.
+	}
+      }
+      The software will look up
+      the values of each covariate at the required locations
+      (quadrature points).
+
+      Note that, for covariate functions, only the \emph{name} of the
+      function appears in the trend formula. A covariate function is
+      treated as if it were a single variable. The function arguments do not
+      appear in the trend formula. See the Examples.
+
+      If \code{covariates} is a list,
+      the list entries should have names corresponding to
+      the names of covariates in the model formula \code{trend}.
+      The variable names \code{x}, \code{y} and \code{marks}
+      are reserved for the Cartesian 
+      coordinates and the mark values,
+      and these should not be used for variables in \code{covariates}.
+
+      If \code{covariates} is a data frame, \code{Q} must be a
+      quadrature scheme (see under Quadrature Schemes below).
+      Then \code{covariates} must have
+      as many rows as there are points in \code{Q}.
+      The \eqn{i}th row of \code{covariates} should contain the values of
+      spatial variables which have been observed
+      at the \eqn{i}th point of \code{Q}. 
+    }
+    \item{Quadrature schemes:}{
+      In advanced use, \code{Q} may be a `quadrature scheme'.
+      This was originally just a technicality but it has turned out
+      to have practical uses, as we explain below.
+
+      Quadrature schemes are required for our implementation of
+      the method of maximum pseudolikelihood.
+      The definition of the pseudolikelihood involves an integral over
+      the spatial window containing the data. In practice this integral
+      must be approximated by a finite sum over a set of quadrature points.
+      We use the technique of Baddeley and Turner (2000), a generalisation
+      of the Berman-Turner (1992) device. In this technique the quadrature
+      points for the numerical approximation include all the data points
+      (points of the observed point pattern) as well as
+      additional `dummy' points. 
+
+      Quadrature schemes are also required for 
+      the method of maximum logistic likelihood, which
+      combines the data points with additional `dummy' points.
+      
+      A quadrature scheme is an object of class \code{"quad"}
+      (see \code{\link{quad.object}})
+      which specifies both the data point pattern and the dummy points
+      for the quadrature scheme, as well as the quadrature weights
+      associated with these points.
+      If \code{Q} is simply a point pattern
+      (of class \code{"ppp"}, see \code{\link{ppp.object}})
+      then it is interpreted as specifying the
+      data points only; a set of dummy points specified
+      by \code{\link{default.dummy}()} is added,
+      and the default weighting rule is
+      invoked to compute the quadrature weights.
+ 
+      Finer quadrature schemes (i.e. those with more dummy
+      points) generally yield a better approximation, at the
+      expense of higher computational load. 
+
+      An easy way to fit models using a finer quadrature scheme
+      is to let \code{Q} be the original point pattern data,
+      and use the argument \code{nd}
+      to determine the number of dummy points in the quadrature scheme.
+      
+      Complete control over the quadrature scheme is possible.
+      See \code{\link{quadscheme}} for an overview.
+      Use \code{quadscheme(X, D, method="dirichlet")} to compute
+      quadrature weights based on the Dirichlet tessellation,
+      or \code{quadscheme(X, D, method="grid")} to compute
+      quadrature weights by counting points in grid squares,
+      where \code{X} and \code{D} are the patterns of data points
+      and dummy points respectively.
+      Alternatively use \code{\link{pixelquad}} to make a quadrature
+      scheme with a dummy point at every pixel in a pixel image.
+
+      A practical advantage of quadrature schemes arises when we want to fit
+      a model involving covariates (e.g. soil pH). Suppose we have only been
+      able to observe the covariates at a small number of locations.
+      Suppose \code{cov.dat} is a data frame containing the values of
+      the covariates at the data points (i.e.\ \code{cov.dat[i,]}
+      contains the observations for the \code{i}th data point)
+      and \code{cov.dum} is another data frame (with the same columns as
+      \code{cov.dat}) containing the covariate values at another
+      set of points whose locations are given by the point pattern \code{Y}.
+      Then setting \code{Q = quadscheme(X,Y)} combines the data points
+      and dummy points into a quadrature scheme, and 
+      \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate
+      data frames. We can then fit the model by calling
+      \code{ppm(Q, ..., covariates)}.
+    }
+    \item{Model-fitting technique:}{
+      There are several choices for the technique used
+      to fit the model.
+      \describe{
+	\item{method="mpl"}{
+	  (the default):
+	  the model will be fitted by maximising the 
+	  pseudolikelihood (Besag, 1975) using the
+	  Berman-Turner computational approximation
+	  (Berman and Turner, 1992; Baddeley and Turner, 2000).
+	  Maximum pseudolikelihood is equivalent to maximum likelihood
+	  if the model is a Poisson process. 
+	  Maximum pseudolikelihood is biased if the
+	  interpoint interaction is very strong, unless there
+	  is a large number of dummy points.
+	  The default settings for \code{method='mpl'}
+	  specify a moderately large number of dummy points,
+	  striking a compromise between speed and accuracy.
+	}
+	\item{method="logi":}{
+	  the model will be fitted by maximising the 
+	  logistic likelihood (Baddeley et al, 2014).
+	  This technique is roughly equivalent in speed to
+	  maximum pseudolikelihood, but is 
+	  believed to be less biased. Because it is less biased,
+	  the default settings for \code{method='logi'}
+	  specify a relatively small number of dummy points,
+	  so that this method is the fastest, in practice.
+	}
+	\item{method="VBlogi":}{	  
+	  the model will be fitted in a Bayesian setup by maximising the
+	  posterior probability density for the canonical model
+	  parameters. This uses the variational Bayes approximation to
+	  the posterior derived from the logistic likelihood as described
+	  in Rajala (2014). The prior is assumed to be multivariate
+	  Gaussian with mean vector \code{prior.mean} and variance-covariance
+	  matrix \code{prior.var}.	  
+	}
+	\item{method="ho":}{
+	  the model will be fitted
+	  by applying the approximate maximum likelihood
+	  method of Huang and Ogata (1999). See below.
+	  The Huang-Ogata method is slower than the other options,
+	  but has better statistical properties.
+	}
+      }
+      Note that \code{method='logi'}, \code{method='VBlogi'} and
+      \code{method='ho'} involve randomisation, so that the results are
+      subject to random variation.
+    }
+    \item{Huang-Ogata method:}{
+      If \code{method="ho"} then the model will be fitted using
+      the Huang-Ogata (1999) approximate maximum likelihood method.
+      First the model is fitted by maximum pseudolikelihood as
+      described above, yielding an initial estimate of the parameter
+      vector \eqn{\theta_0}{theta0}.
+      From this initial model, \code{nsim} simulated
+      realisations are generated. The score and Fisher information of
+      the model at \eqn{\theta=\theta_0}{theta=theta0}
+      are estimated from the simulated realisations. Then one step
+      of the Fisher scoring algorithm is taken, yielding an updated
+      estimate \eqn{\theta_1}{theta1}. The corresponding model is
+      returned.
+
+      Simulated realisations are generated using \code{\link{rmh}}.
+      The iterative behaviour of the Metropolis-Hastings algorithm
+      is controlled by the arguments \code{start} and \code{control}
+      which are passed to \code{\link{rmh}}.
+
+      As a shortcut, the argument
+      \code{nrmh} determines the number of Metropolis-Hastings
+      iterations run to produce one simulated realisation (if
+      \code{control} is absent). Also
+      if \code{start} is absent or equal to \code{NULL}, it defaults to
+      \code{list(n.start=N)} where \code{N} is the number of points
+      in the data point pattern.
+    }
+    \item{Edge correction}{
+      Edge correction should be applied to the sufficient statistics
+      of the model, to reduce bias.
+      The argument \code{correction} is the name of an edge correction
+      method.
+      The default \code{correction="border"} specifies the border correction,
+      in which the quadrature window (the domain of integration of the 
+      pseudolikelihood) is obtained by trimming off a margin of width
+      \code{rbord} from the observation window of the data pattern.
+      Not all edge corrections are implemented (or implementable)
+      for arbitrary windows.
+      Other options depend on the argument \code{interaction}, but these
+      generally include \code{correction="periodic"} (the periodic or toroidal edge
+      correction in which opposite edges of a rectangular window are
+      identified) and \code{correction="translate"} (the translation correction,
+      see Baddeley 1998 and Baddeley and Turner 2000).
+      For pairwise interaction models
+      there is also Ripley's isotropic correction,
+      identified by \code{correction="isotropic"} or \code{"Ripley"}.
+    }
+  }
+}
+\section{Interaction parameters}{
+  Apart from the Poisson model, every point process model fitted by
+  \code{ppm} has parameters that determine the strength and
+  range of \sQuote{interaction} or dependence between points.
+  These parameters are of two types:
+  \describe{
+    \item{regular parameters:}{
+      A parameter \eqn{\phi}{phi} is called \emph{regular}
+      if the log likelihood is a linear function of \eqn{\theta}{theta} where 
+      \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of 
+      \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical
+      parameter.]
+    }
+    \item{irregular parameters}{
+      Other parameters are called \emph{irregular}. 
+    }
+  }
+  Typically, regular parameters determine the \sQuote{strength}
+  of the interaction, while irregular parameters determine the
+  \sQuote{range} of the interaction. For example, the Strauss process
+  has a regular parameter \eqn{\gamma}{gamma} controlling the strength
+  of interpoint inhibition, and an irregular parameter \eqn{r}
+  determining the range of interaction.
+
+  The \code{ppm} command is only designed to estimate regular
+  parameters of the interaction.
+  It requires the values of any irregular parameters of the interaction
+  to be fixed. For example, to fit a Strauss process model to the \code{cells}
+  dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}.
+  Note that the value of the irregular parameter \code{r} must be given.
+  The result of this command will be a fitted model in which the
+  regular parameter \eqn{\gamma}{gamma} has been estimated.
+
+  To determine the irregular parameters, there are several
+  practical techniques, but no general statistical theory available.
+  Useful techniques include maximum profile pseudolikelihood, which
+  is implemented in the command \code{\link{profilepl}},
+  and Newton-Raphson maximisation, implemented in the
+  experimental command \code{\link{ippm}}. 
+
+  Some irregular parameters can be estimated directly from data:
+  the hard-core radius in the model \code{\link{Hardcore}}
+  and the matrix of hard-core radii in \code{\link{MultiHard}} can be
+  estimated easily from data. In these cases, \code{ppm} allows the user
+  to specify the interaction without giving
+  the value of the irregular parameter. The user can give the
+  hard core interaction as \code{interaction=Hardcore()}
+  or even \code{interaction=Hardcore}, and 
+  the hard core radius will then be estimated from the data.
+}
+\references{
+  Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014)
+  Logistic regression for spatial Gibbs point processes.
+  \emph{Biometrika} \bold{101} (2) 377--392.
+
+  Baddeley, A. and Turner, R.
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} (2000) 283--322.
+ 
+  Berman, M. and Turner, T.R. 
+  Approximating point process likelihoods with GLIM.
+  \emph{Applied Statistics} \bold{41} (1992) 31--38.
+ 
+  Besag, J.
+  Statistical analysis of non-lattice data.
+  \emph{The Statistician} \bold{24} (1975) 179-195.
+ 
+  Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and
+  Tanemura, M.
+  On parameter estimation for pairwise interaction processes.
+  \emph{International Statistical Review} \bold{62} (1994) 99-117.
+
+  Huang, F. and Ogata, Y.
+  Improvements of the maximum pseudo-likelihood estimators
+  in various spatial statistical models.
+  \emph{Journal of Computational and Graphical Statistics}
+  \bold{8} (1999) 510-530.
+  
+  Jensen, J.L. and Moeller, M.
+  Pseudolikelihood for exponential family models of spatial point processes.
+  \emph{Annals of Applied Probability} \bold{1} (1991) 445--461.
+ 
+  Jensen, J.L. and Kuensch, H.R. 
+  On asymptotic normality of pseudo likelihood
+  estimates for pairwise interaction processes,
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{46} (1994) 475-486.
+
+  Rajala T. (2014)
+  \emph{A note on Bayesian logistic regression for spatial exponential family
+  Gibbs point processes},
+  Preprint on ArXiv.org. \url{http://arxiv.org/abs/1411.0539}
+
+}
+
+\seealso{
+  \code{\link{ppm.object}} for details of how to
+  print, plot and manipulate a fitted model.
+
+  \code{\link{ppp}} and \code{\link{quadscheme}}
+  for constructing data.
+  
+  Interactions:
+  \GibbsInteractionsList.
+
+  See \code{\link{profilepl}} for advice on
+  fitting nuisance parameters in the interaction,
+  and \code{\link{ippm}} for irregular parameters in the trend.
+
+  See \code{\link{valid.ppm}} and \code{\link{emend.ppm}} for
+  ensuring the fitted model is a valid point process.
+}
+\section{Error and Warning Messages}{
+  Some common error messages and warning messages 
+  are listed below, with explanations.
+  \describe{
+    \item{\dQuote{System is computationally singular}}{
+      The Fisher information matrix of the fitted model has a
+      determinant close to zero, so that the matrix cannot be inverted,
+      and the software cannot calculate standard errors or confidence intervals.
+      This error is usually reported when the model is printed,
+      because the \code{print} method calculates standard errors for the
+      fitted parameters. Singularity usually occurs because the spatial
+      coordinates in the original data were very large numbers
+      (e.g. expressed in metres) so that the fitted coefficients were
+      very small numbers. The simple remedy is to
+      \bold{rescale the data}, for example, to convert from metres to
+      kilometres by \code{X <- \link{rescale}(X, 1000)}, then re-fit the
+      model. Singularity can also occur if the covariate values are
+      very large numbers, or if the covariates are approximately
+      collinear.       
+    }
+    \item{\dQuote{Covariate values were NA or undefined at X\%
+	(M out of N) of the quadrature points}}{
+      The covariate data (typically a pixel image) did not provide
+      values of the covariate at some of the spatial locations in the 
+      observation window of the point pattern. This means that the
+      spatial domain of the pixel image does not completely cover the
+      observation window of the point pattern. If the percentage
+      is small, this warning can be ignored - typically it happens
+      because of rounding effects which cause the pixel image
+      to be one-pixel-width narrower than the observation window.
+      However if more than a few percent of covariate values are
+      undefined, it would be prudent to check that the pixel images
+      are correct, and are correctly registered in their spatial relation to
+      the observation window.
+    }
+    \item{\dQuote{Model is unidentifiable}}{
+      It is not possible to estimate all the model parameters
+      from this dataset. The error message gives a further explanation,
+      such as \dQuote{data pattern is empty}.
+      Choose a simpler model, or check the data.
+    }
+    \item{\dQuote{N data points are illegal (zero conditional intensity)}}{
+      In a Gibbs model (i.e. with interaction between
+      points), the conditional intensity may be zero at some spatial
+      locations, indicating that the model forbids the presence of a 
+      point at these locations. However if the conditional intensity is
+      zero \emph{at a data point}, this means that the model is
+      inconsistent with the data. Modify the interaction parameters so
+      that the data point is not illegal (e.g. reduce the value of the
+      hard core radius) or choose a different interaction.
+    }
+  }
+}
+\section{Warnings}{
+  The implementation of the Huang-Ogata method is experimental;
+  several bugs were fixed in \pkg{spatstat} 1.19-0.
+  
+  See the comments above about the possible inefficiency
+  and bias of the maximum pseudolikelihood estimator.
+ 
+  The accuracy of the Berman-Turner approximation to
+  the pseudolikelihood depends on the number of dummy points used
+  in the quadrature scheme. The number of dummy points should 
+  at least equal the number of data points.
+ 
+  The parameter values of the fitted model
+  do not necessarily determine a valid point process.
+  Some of the point process models are only defined when the parameter
+  values lie in a certain subset. For example the Strauss process only 
+  exists when the interaction parameter \eqn{\gamma}{gamma}
+  is less than or equal to \eqn{1},
+  corresponding to a value of \code{ppm()$theta[2]}
+  less than or equal to \code{0}.
+
+  By default (if \code{emend=FALSE}) the algorithm
+  maximises the pseudolikelihood
+  without constraining the parameters, and does not apply any checks for
+  sanity after fitting the model.
+  This is because the fitted parameter value
+  could be useful information for data analysis.
+  To constrain the parameters to ensure that the model is a valid
+  point process, set \code{emend=TRUE}. See also the functions
+  \code{\link{valid.ppm}} and \code{\link{emend.ppm}}.
+  
+  The \code{trend} formula should not use any variable names
+  beginning with the prefixes \code{.mpl} or \code{Interaction}
+  as these names are reserved
+  for internal use. The data frame \code{covariates} should have as many rows
+  as there are points in \code{Q}. It should not contain
+  variables called \code{x}, \code{y} or \code{marks}
+  as these names are reserved for the Cartesian coordinates
+  and the marks.
+ 
+  If the model formula involves one of the functions
+  \code{poly()}, \code{bs()}
+  or \code{ns()}
+  (e.g. applied to spatial coordinates \code{x} and \code{y}),
+  the fitted coefficients can be misleading.
+  The resulting fit is not to the raw spatial variates
+  (\code{x}, \code{x^2}, \code{x*y}, etc.) 
+  but to a transformation of these variates.  The transformation is implemented
+  by \code{poly()} in order to achieve better numerical stability.
+  However the
+  resulting coefficients are appropriate for use with the transformed
+  variates, not with the raw variates.  
+  This affects the interpretation of the constant
+  term in the fitted model, \code{logbeta}. 
+  Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the  
+  value taken by the conditional intensity function when all predictors
+  (including spatial or ``trend'' predictors) are set equal to \eqn{0}.
+  However the coefficient actually produced is the value that the
+  log conditional intensity takes when all the predictors, 
+  including the \emph{transformed}
+  spatial predictors, are set equal to \code{0}, which is not the same thing.
+
+  Worse still, the result of \code{\link{predict.ppm}} can be
+  completely wrong if the trend formula contains one of the
+  functions \code{poly()}, \code{bs()}
+  or \code{ns()}. This is a weakness of the underlying
+  function \code{\link{predict.glm}}. 
+
+  If you wish to fit a polynomial trend, 
+  we offer an alternative to \code{\link{poly}()},
+  namely \code{polynom()}, which avoids the
+  difficulty induced by transformations.  It is completely analogous
+  to \code{poly} except that it does not orthonormalise.
+  The resulting coefficient estimates then have
+  their natural interpretation and can be predicted correctly. 
+  Numerical stability may be compromised.
+
+  Values of the maximised pseudolikelihood are not comparable
+  if they have been obtained with different values of \code{rbord}.
+}
+\examples{
+ # fit the stationary Poisson process
+ # to point pattern 'nztrees'
+
+ ppm(nztrees)
+ ppm(nztrees ~ 1)
+
+ \dontrun{
+ Q <- quadscheme(nztrees) 
+ ppm(Q) 
+ # equivalent.
+ }
+
+ \dontrun{
+  ppm(nztrees, nd=128)
+ }
+ \testonly{
+   ppm(nztrees, nd=16)
+ }
+
+fit1 <- ppm(nztrees, ~ x)
+ # fit the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(a + bx)
+ # where x,y are the Cartesian coordinates
+ # and a,b are parameters to be estimated
+
+fit1
+coef(fit1)
+coef(summary(fit1))
+
+\dontrun{
+ ppm(nztrees, ~ polynom(x,2))
+}
+\testonly{
+ ppm(nztrees, ~ polynom(x,2), nd=16)
+}
+
+ # fit the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(a + bx + cx^2)
+
+ \dontrun{
+ library(splines)
+ ppm(nztrees, ~ bs(x,df=3))
+ }
+ #       WARNING: do not use predict.ppm() on this result
+ # Fits the nonstationary Poisson process 
+ # with intensity function lambda(x,y) = exp(B(x))
+ # where B is a B-spline with df = 3
+
+\dontrun{
+ ppm(nztrees, ~1, Strauss(r=10), rbord=10)
+}
+\testonly{
+ ppm(nztrees, ~1, Strauss(r=10), rbord=10, nd=16)
+}
+ # Fit the stationary Strauss process with interaction range r=10
+ # using the border method with margin rbord=10
+
+\dontrun{
+ ppm(nztrees, ~ x, Strauss(13), correction="periodic")
+}
+\testonly{
+ ppm(nztrees, ~ x, Strauss(13), correction="periodic", nd=16)
+}
+ # Fit the nonstationary Strauss process with interaction range r=13
+ # and exp(first order potential) =  activity = beta(x,y) = exp(a+bx)
+ # using the periodic correction.
+
+
+# Compare Maximum Pseudolikelihood, Huang-Ogata and VB fits:
+\dontrun{ppm(swedishpines, ~1, Strauss(9))}
+
+\dontrun{ppm(swedishpines, ~1, Strauss(9), method="ho")}
+\testonly{ppm(swedishpines, ~1, Strauss(9), method="ho", nd=16, nsim=8)}
+
+ppm(swedishpines, ~1, Strauss(9), method="VBlogi")
+
+ # COVARIATES
+ #
+ X <- rpoispp(42)
+ weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) }
+ #
+ # (a) covariate values as function
+ ppm(X, ~ y + Z, covariates=list(Z=weirdfunction))
+ #
+ # (b) covariate values in pixel image
+ Zimage <- as.im(weirdfunction, unit.square())
+ ppm(X, ~ y + Z, covariates=list(Z=Zimage))
+ #
+ # (c) covariate values in data frame
+ Q <- quadscheme(X)
+ xQ <- x.quad(Q)
+ yQ <- y.quad(Q)
+ Zvalues <- weirdfunction(xQ,yQ)
+ ppm(Q, ~ y + Z, covariates=data.frame(Z=Zvalues))
+ # Note Q not X
+
+ # COVARIATE FUNCTION WITH EXTRA ARGUMENTS
+ #
+f <- function(x,y,a){ y - a }
+ppm(X, ~x + f, covariates=list(f=f), covfunargs=list(a=1/2))
+
+ # COVARIATE: inside/outside window
+ b <- owin(c(0.1, 0.6), c(0.1, 0.9))
+ ppm(X, ~w, covariates=list(w=b))
+
+ ## MULTITYPE POINT PROCESSES ### 
+ # fit stationary marked Poisson process
+ # with different intensity for each species
+\dontrun{ppm(lansing, ~ marks, Poisson())}
+\testonly{
+   ama <- amacrine[square(0.7)]
+   a <- ppm(ama, ~ marks, Poisson(), nd=16)
+}
+
+ # fit nonstationary marked Poisson process
+ # with different log-cubic trend for each species
+\dontrun{ppm(lansing, ~ marks * polynom(x,y,3), Poisson())}
+\testonly{b <- ppm(ama, ~ marks * polynom(x,y,2), Poisson(), nd=16)}
+
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/ppmInfluence.Rd b/man/ppmInfluence.Rd
new file mode 100644
index 0000000..b272a1b
--- /dev/null
+++ b/man/ppmInfluence.Rd
@@ -0,0 +1,92 @@
+\name{ppmInfluence}
+\alias{ppmInfluence}
+\title{
+  Leverage and Influence Measures for Spatial Point Process Model
+}
+\description{
+  Calculates all the leverage and
+  influence measures described in \code{\link{influence.ppm}},
+  \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}.
+}
+\usage{
+   ppmInfluence(fit,
+                what = c("leverage", "influence", "dfbetas"),
+                \dots,
+                iScore = NULL, iHessian = NULL, iArgs = NULL,
+                drop = FALSE,
+                fitname = NULL)
+}
+\arguments{
+  \item{fit}{
+    A fitted point process model of class \code{"ppm"}.
+  }
+  \item{what}{
+    Character vector specifying which quantities are to be calculated.
+    Default is to calculate all quantities.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{iScore,iHessian}{
+    Components of the score vector and Hessian matrix for
+    the irregular parameters, if required. See Details.
+  }
+  \item{iArgs}{
+    List of extra arguments for the functions \code{iScore},
+    \code{iHessian} if required.
+  }
+  \item{drop}{
+    Logical. Whether to include (\code{drop=FALSE}) or
+    exclude (\code{drop=TRUE}) contributions from quadrature
+    points that were not used to fit the model.
+  }
+  \item{fitname}{
+    Optional character string name for the fitted model \code{fit}.
+  }
+}
+\details{
+  This function calculates all the
+  leverage and influence measures
+  described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}}
+  and \code{\link{dfbetas.ppm}}.
+
+  When analysing large datasets, the user can
+  call \code{ppmInfluence} to perform the calculations efficiently,
+  then extract the leverage and influence values as desired.
+
+  If the point process model trend has irregular parameters that were
+  fitted (using \code{\link{ippm}})
+  then the influence calculation requires the first and second
+  derivatives of the log trend with respect to the irregular parameters. 
+  The argument \code{iScore} should be a list,
+  with one entry for each irregular parameter, of \R functions that compute the
+  partial derivatives of the log trend (i.e. log intensity or
+  log conditional intensity) with respect to each irregular
+  parameter. The argument \code{iHessian} should be a list,
+  with \eqn{p^2} entries where \eqn{p} is the number of irregular
+  parameters, of \R functions that compute the second order
+  partial derivatives of the
+  log trend with respect to each pair of irregular parameters.
+}
+\value{
+  A list containing the leverage and influence measures specified by
+  \code{what}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{leverage.ppm}},
+  \code{\link{influence.ppm}},
+  \code{\link{dfbetas.ppm}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X ~ x+y)
+   fI <- ppmInfluence(fit)
+   fI$influence
+   fI$leverage
+   fI$dfbetas
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/ppp.Rd b/man/ppp.Rd
new file mode 100644
index 0000000..de96600
--- /dev/null
+++ b/man/ppp.Rd
@@ -0,0 +1,245 @@
+\name{ppp}
+\alias{ppp}
+\title{Create a Point Pattern}
+\description{
+  Creates an object of class \code{"ppp"} representing 
+  a point pattern dataset in the two-dimensional plane.
+}
+\usage{
+  ppp(x,y, \dots, window, marks,
+      check=TRUE, checkdup=check, drop=TRUE)
+}
+\arguments{
+  \item{x}{Vector of \eqn{x} coordinates of data points}
+  \item{y}{Vector of \eqn{y} coordinates of data points}
+  \item{window}{window of observation,
+    an object of class \code{"owin"}}
+  \item{\dots}{arguments passed to \code{\link{owin}} to create the
+    window, if \code{window} is missing}
+  \item{marks}{(optional) mark values for the points.
+    A vector or data frame.}
+  \item{check}{
+    Logical value indicating whether to check
+    that all the \eqn{(x,y)} points lie inside the specified window.
+    Do not set this to \code{FALSE} unless you are absolutely sure that this
+    check is unnecessary. See Warnings below.
+  }
+  \item{checkdup}{
+    Logical value indicating whether to check for duplicated
+    coordinates. See Warnings below.
+  }
+  \item{drop}{
+    Logical flag indicating whether to simplify data frames of marks.
+    See Details.
+  }
+}
+\value{
+  An object of class \code{"ppp"} 
+  describing a point pattern in the two-dimensional plane
+  (see \code{\link{ppp.object}}).
+}
+\details{
+  In the \pkg{spatstat} library, a point pattern dataset is
+  described by an object of class \code{"ppp"}. This function
+  creates such objects.
+
+  The vectors \code{x} and \code{y} must be numeric vectors of
+  equal length. They are interpreted as the cartesian coordinates
+  of the points in the pattern. Note that \code{x} and \code{y} are
+  permitted to have length zero, corresponding to an empty point
+  pattern; this is the default if these arguments are missing.
+
+  A point pattern dataset is assumed to have been observed within a specific
+  region of the plane called the observation window.
+  An object of class \code{"ppp"} representing a point pattern
+  contains information specifying the observation window.
+  This window must always be specified when creating a point pattern dataset;
+  there is intentionally no default action of ``guessing'' the window
+  dimensions from the data points alone. 
+
+  You can specify the observation window in several
+  (mutually exclusive) ways:
+  \itemize{
+    \item
+    \code{xrange, yrange} specify a rectangle
+    with these dimensions;
+    \item
+    \code{poly} specifies a polygonal boundary.
+    If the boundary is a single polygon then \code{poly}
+    must be a list with components \code{x,y}
+    giving the coordinates of the vertices.
+    If the boundary consists of several disjoint polygons
+    then \code{poly} must be a list of such lists
+    so that \code{poly[[i]]$x} gives the \eqn{x} coordinates
+    of the vertices of the \eqn{i}th boundary polygon.
+    \item
+    \code{mask} specifies a binary pixel image with entries
+    that are \code{TRUE} if the corresponding pixel is inside
+    the window.
+    \item
+    \code{window} is an object of class \code{"owin"}
+    specifying the window. A window object can be created
+    by \code{\link{owin}} from raw coordinate data. Special shapes
+    of windows can be created by the functions
+    \code{\link{square}}, \code{\link{hexagon}},
+    \code{\link{regularpolygon}}, \code{\link{disc}}
+    and \code{\link{ellipse}}. See the Examples.
+  }
+  The arguments \code{xrange, yrange} or \code{poly}
+  or \code{mask} are passed to the window creator function
+  \code{\link{owin}} for interpretation. See
+  \code{\link{owin}} for further details.
+
+  The argument \code{window}, if given, must be an object of class
+  \code{"owin"}. It is a full description of the window geometry,
+  and could have been obtained from \code{\link{owin}} or
+  \code{\link{as.owin}}, or by just extracting the observation window
+  of another point pattern, or by manipulating such windows.
+  See \code{\link{owin}} or the Examples below.
+
+  The points with coordinates \code{x} and \code{y}
+  \bold{must} lie inside the specified window, in order to
+  define a valid object of this class. 
+  Any points which do not lie inside the window will be
+  removed from the point pattern, and a warning will be issued.
+  See the section on Rejected Points.
+
+  The name of the unit of length for the \code{x} and \code{y} coordinates
+  can be specified in the dataset, using the argument \code{unitname}, which is
+  passed to \code{\link{owin}}. See the examples below, or the help file
+  for \code{\link{owin}}.
+  
+  The optional argument \code{marks} is given if the point pattern
+  is marked, i.e. if each data point carries additional information.
+  For example, points which are classified into two or more different
+  types, or colours, may be regarded as having a mark which identifies
+  which colour they are. Data recording the locations and heights of
+  trees in a forest can be regarded as a marked point pattern where the
+  mark is the tree height.
+  
+  The argument \code{marks} can be either
+  \itemize{
+    \item a vector, of
+    the same length as \code{x} and \code{y}, which is interpreted so
+    that \code{marks[i]} is the mark attached to the point
+    \code{(x[i],y[i])}. If the mark is a real number then \code{marks}
+    should be a numeric vector, while if the mark takes only a finite
+    number of possible values (e.g. colours or types) then
+    \code{marks} should be a \code{factor}.
+    \item
+    a data frame, with the number of rows equal to the number of points
+    in the point pattern. The \code{i}th row of the data frame is interpreted
+    as containing the mark values for the \code{i}th point in the point
+    pattern. The columns of the data frame correspond to different
+    mark variables (e.g. tree species and tree diameter).
+  }
+  If \code{drop=TRUE} (the default), then 
+  a data frame with only one column will be
+  converted to a vector, and a data frame with no columns will be
+  converted to \code{NULL}.
+  
+  See \code{\link{ppp.object}} for a description of the
+  class \code{"ppp"}.
+
+  Users would normally invoke \code{ppp} to create a point pattern,
+  but the functions \code{\link{as.ppp}} and 
+  \code{scanpp} may sometimes be convenient.
+}
+\section{Invalid coordinate values}{
+  The coordinate vectors \code{x} and \code{y} must contain only
+  finite numerical values. If the coordinates include
+  any of the values \code{NA}, \code{NaN}, \code{Inf} or \code{-Inf},
+  these will be removed. 
+}
+\section{Rejected points}{
+  The points with coordinates \code{x} and \code{y}
+  \bold{must} lie inside the specified window, in order to
+  define a valid object of class \code{"ppp"}.
+  Any points which do not lie inside the window will be
+  removed from the point pattern, and a warning will be issued.
+
+  The rejected points are still accessible: they are stored
+  as an attribute of the point pattern called \code{"rejects"}
+  (which is an object of class \code{"ppp"} containing the rejected points
+  in a large window). However, rejected points in a point pattern
+  will be ignored by all other functions except
+  \code{\link{plot.ppp}}.
+
+  To remove the rejected points altogether,
+  use \code{\link{as.ppp}}. To include the rejected points,
+  you will need to find a larger window that contains them,
+  and use this larger window in a call to \code{ppp}.
+}
+\section{Warnings}{
+  The code will check for problems with the data,
+  and issue a warning if any problems are found.
+  The checks and warnings can be switched off, for efficiency's sake,
+  but this should only be done if you are confident that the data
+  do not have these problems. 
+
+  Setting \code{check=FALSE} will disable all the checking procedures:
+  the check for points outside the window, and the check for
+  duplicated points. This is extremely dangerous, because points lying
+  outside the window will break many of the procedures in
+  \pkg{spatstat}, causing crashes and strange errors.
+  Set \code{check=FALSE} only if you are absolutely
+  sure that there are no points outside the window.
+
+  If duplicated points are found, a warning is issued, but no action is
+  taken. Duplicated points are not illegal, but may cause unexpected problems
+  later. Setting \code{checkdup=FALSE} will disable the check for duplicated
+  points. Do this only if you already know the answer.
+
+  Methodology and software for spatial point patterns often assume
+  that all points are distinct so that there are no duplicated points.
+  If duplicated points are present, the consequence could be
+  an incorrect result or a software crash. To the best of our knowledge,
+  all \pkg{spatstat} code handles duplicated points correctly.
+  However, if duplicated points are present, we advise using
+  \code{\link{unique.ppp}} or \code{\link{multiplicity.ppp}}
+  to eliminate duplicated points and re-analyse the data.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{as.ppp}},
+  \code{\link{owin.object}},
+  \code{\link{owin}},
+  \code{\link{as.owin}}
+}
+\examples{
+  # some arbitrary coordinates in [0,1]
+  x <- runif(20)
+  y <- runif(20)
+
+  # the following are equivalent
+  X <- ppp(x, y, c(0,1), c(0,1))
+  X <- ppp(x, y)
+  X <- ppp(x, y, window=owin(c(0,1),c(0,1)))
+
+  # specify that the coordinates are given in metres
+  X <- ppp(x, y, c(0,1), c(0,1), unitname=c("metre","metres"))
+
+  \dontrun{plot(X)}
+
+  # marks
+  m <- sample(1:2, 20, replace=TRUE)
+  m <- factor(m, levels=1:2)
+  X <- ppp(x, y, c(0,1), c(0,1), marks=m)
+  \dontrun{plot(X)}
+
+  # polygonal window
+  X <- ppp(x, y, poly=list(x=c(0,10,0), y=c(0,0,10)))
+  \dontrun{plot(X)}
+
+  # circular window of radius 2
+  X <- ppp(x, y, window=disc(2))
+
+  # copy the window from another pattern
+  data(cells)
+  X <- ppp(x, y, window=Window(cells))
+}
+\author{\adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/ppp.object.Rd b/man/ppp.object.Rd
new file mode 100644
index 0000000..0b0ab62
--- /dev/null
+++ b/man/ppp.object.Rd
@@ -0,0 +1,134 @@
+\name{ppp.object}
+\alias{ppp.object} %DoNotExport
+\title{Class of Point Patterns}
+\description{
+  A class \code{"ppp"} to represent a two-dimensional point
+  pattern. Includes information about the window in which the
+  pattern was observed. Optionally includes marks.
+}
+\details{
+  This class represents
+  a two-dimensional point pattern dataset. It specifies
+  \itemize{
+    \item the locations of the points
+    \item the window in which the pattern was observed
+    \item optionally, ``marks'' attached to each point
+      (extra information such as a type label).
+    }
+  If \code{X} is an object of type \code{ppp},
+  it contains the following elements:
+  \tabular{ll}{
+    \code{x} \tab vector of \eqn{x} coordinates of data points \cr
+    \code{y} \tab vector of \eqn{y} coordinates of data points \cr
+    \code{n} \tab number of points \cr
+    \code{window} \tab window of observation \cr
+                  \tab (an object of class \code{\link{owin}}) \cr
+    \code{marks} \tab optional vector or data frame of marks
+  }
+  Users are strongly advised not to manipulate these entries
+  directly.
+  
+  Objects of class \code{"ppp"}
+  may be created by the function
+  \code{\link{ppp}}
+  and converted from other types of data by the function
+  \code{\link{as.ppp}}.
+  Note that you must always specify the window of observation;
+  there is intentionally no default action of ``guessing'' the window
+  dimensions from the data points alone.
+
+  Standard point pattern datasets provided with the package
+  include
+  \code{\link{amacrine}},
+  \code{\link{betacells}},
+  \code{\link{bramblecanes}},
+  \code{\link{cells}},
+  \code{\link{demopat}},
+  \code{\link{ganglia}},
+  \code{\link{lansing}},
+  \code{\link{longleaf}},
+  \code{\link{nztrees}},
+  \code{\link{redwood}},
+  \code{\link{simdat}} and
+  \code{\link{swedishpines}}.
+  
+  Point patterns may be scanned from your own data files by
+  \code{\link{scanpp}} or by using \code{\link{read.table}} and
+  \code{\link{as.ppp}}.
+  
+  They may be manipulated by the functions 
+  \code{\link{[.ppp}} and
+  \code{\link{superimpose}}.
+
+  Point pattern objects can be plotted just by typing \code{plot(X)}
+  which invokes the \code{plot} method for point pattern objects,
+  \code{\link{plot.ppp}}. See \code{\link{plot.ppp}} for further information.
+
+  There are also methods for \code{summary} and \code{print}
+  for point patterns. Use \code{summary(X)} to see a useful description
+  of the data.
+  
+  Patterns may be generated at random by
+   \code{\link{runifpoint}},
+   \code{\link{rpoispp}},
+   \code{\link{rMaternI}},
+   \code{\link{rMaternII}},
+   \code{\link{rSSI}},
+   \code{\link{rNeymanScott}},
+   \code{\link{rMatClust}},
+   and
+   \code{\link{rThomas}}.
+
+   Most functions which are intended to operate on a window
+   (of class \code{\link{owin}})
+   will, if presented with a \code{\link{ppp}} object instead,
+   automatically extract the window information from the point pattern.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{ppp}},
+  \code{\link{as.ppp}},
+  \code{\link{[.ppp}}
+}
+\section{Warnings}{
+  The internal representation of marks is likely to change in the
+  next release of this package.
+}
+\examples{
+  x <- runif(100)
+  y <- runif(100)
+  X <- ppp(x, y, c(0,1),c(0,1))
+  X
+  \dontrun{plot(X)}
+  mar <- sample(1:3, 100, replace=TRUE)
+  mm <- ppp(x, y, c(0,1), c(0,1), marks=mar)
+  \dontrun{plot(mm)}
+  # points with mark equal to 2
+  ss <- mm[ mm$marks == 2 , ]
+  \dontrun{plot(ss)}
+  # left half of pattern 'mm'
+  lu <- owin(c(0,0.5),c(0,1))
+  mmleft <- mm[ , lu]
+  \dontrun{plot(mmleft)}
+  \dontrun{
+  if(FALSE) {
+  # input data from file
+  qq <- scanpp("my.table", unit.square())
+
+  # interactively build a point pattern
+  plot(unit.square())
+  X <- as.ppp(locator(10), unit.square())
+  plot(X)
+  }
+ }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/pppdist.Rd b/man/pppdist.Rd
new file mode 100644
index 0000000..0ac78f9
--- /dev/null
+++ b/man/pppdist.Rd
@@ -0,0 +1,222 @@
+\name{pppdist}
+\alias{pppdist}
+\title{Distance Between Two Point Patterns}
+\description{
+  Given two point patterns, find the distance between them based on
+  optimal point matching.
+}
+\usage{
+  pppdist(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE,
+    ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10,
+    show.rprimal = FALSE, timelag = 0)
+}
+\arguments{
+  \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).}
+  \item{type}{
+    A character string giving the type of distance to be computed.
+    One of \code{"spa"} (default), \code{"ace"} or \code{"mat"}, indicating
+    whether the algorithm should find the optimal matching based on
+    \dQuote{subpattern assignment},
+    \dQuote{assignment only if cardinalities are equal}
+    or \dQuote{mass transfer}. See Details. 
+  }
+  \item{cutoff}{
+    The value \eqn{> 0} at which interpoint distances are cut off.
+  }
+  \item{q}{
+    The order of the average that is applied to the interpoint distances.
+    May be \code{Inf}, in which case the maximum of the interpoint
+    distances is taken.
+  }
+  \item{matching}{
+    Logical. Whether to return the optimal matching or only the
+    associated distance.
+  }
+  \item{ccode}{
+    Logical. If \code{FALSE}, \R code is used which allows for higher
+    precision, but is much slower.
+  }
+  \item{auction}{
+    Logical. By default a version of Bertsekas' auction algorithm
+    is used to compute an optimal point matching if \code{type} is
+    either \code{"spa"} or \code{"ace"}.
+    If \code{auction} is \code{FALSE} (or \code{type} is \code{"mat"})
+    a specialized primal-dual algorithm is used instead.
+    This was the standard in earlier versions
+    of \pkg{spatstat}, but is several orders of magnitudes slower. 
+  }
+  \item{precision}{
+    Index controlling accuracy of algorithm. The \code{q}-th powers of
+    interpoint distances will be rounded to the nearest multiple of
+    \code{10^(-precision)}. There is a sensible default which depends
+    on \code{ccode}.
+  }
+  \item{approximation}{
+    If \code{q = Inf}, compute distance based on the optimal matching for the
+    corresponding distance of order \code{approximation}. Can be
+    \code{Inf}, but this makes computations extremely slow.
+  }
+  \item{show.rprimal}{
+    Logical. Whether to plot the progress of the primal-dual
+    algorithm. If \code{TRUE}, slow primal-dual \R code is used,
+    regardless of the arguments \code{ccode} and \code{auction}.
+  }
+  \item{timelag}{
+    Time lag, in seconds, between successive displays of the
+    iterative solution of the restricted primal problem.
+  }
+}
+\details{
+  Computes the distance between point patterns \code{X} and \code{Y} based
+  on finding the matching between them which minimizes the average of
+  the distances between matched points
+  (if \code{q=1}), the maximum distance between matched points
+  (if \code{q=Inf}), and in general the \code{q}-th order average
+  (i.e. the \code{1/q}th power of the sum of
+  the \code{q}th powers) of the distances between matched points.
+  Distances between matched points are Euclidean distances cut off at
+  the value of \code{cutoff}.
+
+  The parameter \code{type} controls the behaviour of the algorithm if
+  the cardinalities of the point patterns are different. For the type
+  \code{"spa"} (subpattern assignment) the subpattern of the point pattern
+  with the larger cardinality \eqn{n} that is closest to the point pattern
+  with the smaller cardinality \eqn{m} is determined; then the \code{q}-th order
+  average is taken over \eqn{n} values: the \eqn{m} distances of matched points
+  and \eqn{n-m} "penalty distances" of value \code{cutoff} for
+  the unmatched points. For the type \code{"ace"} (assignment only if 
+  cardinalities equal) the matching is empty and the distance returned is equal
+  to \code{cutoff} if the cardinalities differ. For the
+  type \code{"mat"} (mass transfer) each point pattern is assumed
+  to have total mass \eqn{m} (= the smaller cardinality) distributed evenly
+  among its points; the algorithm finds then the "mass transfer plan" that
+  minimizes the \code{q}-th order weighted average of the distances, where 
+  the weights are given by the transferred mass divided by \eqn{m}. The
+  result is a fractional matching (each match of two points has a weight
+  in \eqn{(0,1]}) with the minimized quantity as the associated distance.
+
+  The central problem to be solved is the assignment problem (for types
+  \code{"spa"} and \code{"ace"}) or the more general transport problem
+  (for type \code{"mat"}). Both are well-known problems in discrete
+  optimization, see e.g. Luenberger (2003). 
+
+  For the assignment problem \code{pppdist} uses by default the
+  forward/backward version of Bertsekas' auction algorithm with
+  automated epsilon scaling; see Bertsekas (1992). The implemented
+  version gives good overall performance and can handle point patterns
+  with several thousand points. 
+  
+  For the transport problem a specialized primal-dual algorithm is
+  employed; see Luenberger (2003), Section 5.9. The C implementation
+  used by default can handle patterns with a few hundreds of points, but
+  should not be used with thousands of points. By setting
+  \code{show.rprimal = TRUE}, some insight in the working of the
+  algorithm can be gained. 
+
+  For a broader selection of optimal transport algorithms that are not
+  restricted to spatial point patterns and allow for additional fine
+  tuning, we recommend the \R package \pkg{transport}. 
+   
+  For moderate and large values of \code{q} there can be numerical
+  issues based on the fact that the \code{q}-th powers of distances are
+  taken and some positive values enter the optimization algorithm as
+  zeroes because they are too small in comparison with the larger
+  values. In this case the number of zeroes introduced is given in a
+  warning message, and it is possible then that the matching obtained is
+  not optimal and the associated distance is only a strict upper bound
+  of the true distance. As a general guideline (which can be very wrong
+  in special situations) a small number of zeroes (up to about 50\% of
+  the smaller point pattern cardinality \eqn{m}) usually still results
+  in the right matching, and the number can even be quite a bit higher
+  and usually still provides a highly accurate upper bound for the
+  distance. These numerical problems can be reduced by enforcing (much
+  slower) \R code via the argument \code{ccode = FALSE}. 
+
+  For \code{q = Inf} there is no fast algorithm available, which is why
+  approximation is normally used: for finding the optimal matching,
+  \code{q} is set to the value of \code{approximation}. The
+  resulting distance is still given as the maximum rather than the
+  \code{q}-th order average in the corresponding distance computation.
+  If \code{approximation = Inf}, approximation is suppressed and a very
+  inefficient exhaustive search for the best matching is performed.
+
+  The value of \code{precision} should normally not be supplied by the
+  user. If \code{ccode = TRUE}, this value is preset to the highest
+  exponent of 10 that the C code still can handle (usually \eqn{9}). If
+  \code{ccode = FALSE}, the value is preset according to \code{q}
+  (usually \eqn{15} if \code{q} is small), which can sometimes be
+  changed to obtain less severe warning messages. 
+}
+\value{
+  Normally an object of class \code{pppmatching} that contains detailed
+  information about the parameters used and the resulting distance.
+  See \code{\link{pppmatching.object}} for details.
+  If \code{matching = FALSE}, only the numerical value of the distance
+  is returned.
+}
+\references{
+  Bertsekas, D.P. (1992).
+  Auction algorithms for network flow problems: a tutorial introduction.
+  Computational Optimization and Applications 1, 7-66.
+
+  Luenberger, D.G. (2003). \emph{Linear and nonlinear programming.}
+  Second edition. Kluwer.
+
+  Schuhmacher, D. (2014).
+  \emph{transport: optimal transport in various forms.}
+  R package version 0.6-2 (or later)
+
+  Schuhmacher, D. and Xia, A. (2008).
+  A new metric between distributions of point processes.
+  \emph{Advances in Applied Probability} \bold{40}, 651--672
+
+  Schuhmacher, D., Vo, B.-T. and Vo, B.-N. (2008).
+  A consistent metric for performance evaluation of multi-object
+  filters.
+  \emph{IEEE Transactions on Signal Processing} \bold{56}, 3447--3457.
+}
+\author{
+  Dominic Schuhmacher
+  \email{dominic.schuhmacher at mathematik.uni-goettingen.de} \cr
+  \url{http://www.dominic.schuhmacher.name}
+}
+\seealso{
+  \code{\link{pppmatching.object}}, \code{\link{matchingdist}}
+}
+\examples{
+# equal cardinalities
+set.seed(140627)
+X <- runifpoint(500)
+Y <- runifpoint(500)
+m <- pppdist(X, Y)
+m
+\dontrun{
+plot(m)}
+  
+# differing cardinalities
+X <- runifpoint(14)
+Y <- runifpoint(10)
+m1 <- pppdist(X, Y, type="spa")
+m2 <- pppdist(X, Y, type="ace")
+m3 <- pppdist(X, Y, type="mat", auction=FALSE)
+summary(m1)
+summary(m2)
+summary(m3)
+\dontrun{
+m1$matrix
+m2$matrix
+m3$matrix}
+
+# q = Inf
+X <- runifpoint(10)
+Y <- runifpoint(10)
+mx1 <- pppdist(X, Y, q=Inf, matching=FALSE)
+mx2 <- pppdist(X, Y, q=Inf, matching=FALSE, ccode=FALSE, approximation=50)
+mx3 <- pppdist(X, Y, q=Inf, matching=FALSE, approximation=Inf)
+all.equal(mx1,mx2,mx3)
+# sometimes TRUE
+all.equal(mx2,mx3)
+# very often TRUE
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/pppmatching.Rd b/man/pppmatching.Rd
new file mode 100644
index 0000000..adb3229
--- /dev/null
+++ b/man/pppmatching.Rd
@@ -0,0 +1,83 @@
+\name{pppmatching}
+\alias{pppmatching}
+\title{Create a Point Matching}
+\description{
+  Creates an object of class \code{"pppmatching"} representing
+  a matching of two planar point patterns (objects of class \code{"ppp"}).
+}
+\usage{
+  pppmatching(X, Y, am, type = NULL, cutoff = NULL, q = NULL,
+    mdist = NULL)
+}
+\arguments{
+  \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).}
+  \item{am}{
+    An \code{npoints(X)} by \code{npoints(Y)} matrix with entries \eqn{\geq 0}{>= 0} 
+    that specifies which points are matched and with what weight;
+    alternatively, an object that can be coerced to this form
+    by \code{as.matrix}.
+  }
+  \item{type}{
+    A character string giving the type of the matching.
+    One of \code{"spa"}, \code{"ace"} or \code{"mat"}, or \code{NULL}
+    for a generic or unknown matching.
+  }
+  \item{cutoff, q}{
+    Numerical values specifying the cutoff value \eqn{> 0} for interpoint distances and
+    the order \eqn{q \in [1,\infty]}{q in [0,Inf]} of the average that is applied to them.
+    \code{NULL} if not applicable or unknown.
+  }
+  \item{mdist}{
+    Numerical value for the distance to be associated with the matching.
+  }
+}
+\details{
+  The argument \code{am} is interpreted as a "generalized adjacency matrix":
+  if the \code{[i,j]}-th entry is positive, then the \code{i}-th point
+  of \code{X} and the \code{j}-th point of \code{Y} are matched and the
+  value of the entry gives the corresponding weight of the match. For
+  an unweighted matching all the weights should be set to \eqn{1}.
+
+  The remaining arguments are optional and allow to save
+  additional information about the matching. See the help files for
+  \code{\link{pppdist}} and \code{\link{matchingdist}} for details on
+  the meaning of these parameters.
+}
+\author{
+  Dominic Schuhmacher
+  \email{dominic.schuhmacher at stat.unibe.ch}
+  \url{http://www.dominic.schuhmacher.name}
+}
+\seealso{
+  \code{\link{pppmatching.object}}
+  \code{\link{matchingdist}}
+}
+\examples{
+  # a random unweighted complete matching
+  X <- runifpoint(10)
+  Y <- runifpoint(10)
+  am <- r2dtable(1, rep(1,10), rep(1,10))[[1]]
+        # generates a random permutation matrix
+  m <- pppmatching(X, Y, am)
+  summary(m)
+  m$matrix
+  \dontrun{
+    plot(m)
+  }
+
+  # a random weighted complete matching
+  X <- runifpoint(7)
+  Y <- runifpoint(7)
+  am <- r2dtable(1, rep(10,7), rep(10,7))[[1]]/10
+        # generates a random doubly stochastic matrix
+  m2 <- pppmatching(X, Y, am)
+  summary(m2)
+  m2$matrix
+  \dontrun{
+    # Note: plotting does currently not distinguish
+    # between different weights
+    plot(m2)
+  }
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/pppmatching.object.Rd b/man/pppmatching.object.Rd
new file mode 100644
index 0000000..3b946b1
--- /dev/null
+++ b/man/pppmatching.object.Rd
@@ -0,0 +1,87 @@
+\name{pppmatching.object}
+\alias{pppmatching.object} %DoNotExport
+\title{Class of Point Matchings}
+\description{
+  A class \code{"pppmatching"} to represent a matching of two planar
+  point patterns.
+  Optionally includes information about the construction of the matching
+  and its associated distance between the point patterns.
+}
+\details{
+  This class represents a (possibly weighted and incomplete) matching
+  between two planar point patterns (objects of class \code{"ppp"}).
+
+  A matching can be thought of as a bipartite weighted graph where
+  the vertices are given by the two point patterns and edges of positive
+  weights are drawn each time a point of the first point pattern is
+  "matched" with a point of the second point pattern.
+
+  If \code{m} is an object of type \code{pppmatching}, it contains the
+  following elements
+  \tabular{ll}{
+    \code{pp1, pp2} \tab the two point patterns to be matched (vertices) \cr
+    \code{matrix} \tab a matrix specifying which points are matched \cr
+                  \tab and with what weights (edges) \cr
+    \code{type} \tab (optional) a character string for the type of \cr
+                 \tab the matching (one of \code{"spa"}, \code{"ace"} or \code{"mat"}) \cr
+    \code{cutoff} \tab (optional) cutoff value for interpoint distances \cr
+    \code{q} \tab (optional) the order for taking averages of \cr
+             \tab interpoint distances \cr
+    \code{distance} \tab (optional) the distance associated with the matching
+  }
+
+  The element \code{matrix} is a "generalized adjacency matrix".
+  The numbers of rows
+  and columns match the cardinalities of the first and second point
+  patterns, respectively. The \code{[i,j]}-th entry is positive if
+  the \code{i}-th point of \code{X} and the \code{j}-th point of
+  \code{Y} are matched (zero otherwise) and its value then gives
+  the corresponding weight of the match. For an unweighted matching
+  all the weights are set to \eqn{1}.
+
+  The optional elements are for saving details about matchings in the context of
+  optimal point matching techniques. \code{type} can be one of \code{"spa"} (for
+  "subpattern assignment"), \code{"ace"} (for "assignment only if
+  cardinalities differ") or \code{"mat"} (for "mass transfer"). \code{cutoff}
+  is a positive numerical value that specifies the maximal interpoint distance and
+  \code{q} is a value in \eqn{[1,\infty]}{[1,Inf]} that gives the order of the average
+  applied to the interpoint distances. See the help files for \code{\link{pppdist}}
+  and \code{\link{matchingdist}} for detailed information about these elements.
+  
+  Objects of class \code{"pppmatching"} may be created by the function 
+  \code{\link{pppmatching}}, and are most commonly obtained as output of the
+  function \code{\link{pppdist}}. There are methods \code{plot}, \code{print} and
+  \code{summary} for this class.
+}
+\author{
+  Dominic Schuhmacher
+  \email{dominic.schuhmacher at stat.unibe.ch}
+  \url{http://www.dominic.schuhmacher.name}
+}
+\seealso{
+  \code{\link{matchingdist}}
+  \code{\link{pppmatching}}
+}
+\examples{
+  # a random complete unweighted matching
+  X <- runifpoint(10)
+  Y <- runifpoint(10)
+  am <- r2dtable(1, rep(1,10), rep(1,10))[[1]]
+        # generates a random permutation matrix
+  m <- pppmatching(X, Y, am)
+  summary(m)
+  m$matrix
+  \dontrun{
+    plot(m)
+  }
+
+  # an optimal complete unweighted matching
+  m2 <- pppdist(X,Y)
+  summary(m2)
+  m2$matrix
+  \dontrun{
+    plot(m2)
+  }
+}
+\keyword{spatial}
+\keyword{attribute}
diff --git a/man/ppx.Rd b/man/ppx.Rd
new file mode 100644
index 0000000..2791804
--- /dev/null
+++ b/man/ppx.Rd
@@ -0,0 +1,100 @@
+\name{ppx}
+\Rdversion{1.1}
+\alias{ppx}
+\title{
+  Multidimensional Space-Time Point Pattern
+}
+\description{
+  Creates a multidimensional space-time point pattern
+  with any kind of coordinates and marks.
+}
+\usage{
+ppx(data, domain=NULL, coord.type=NULL, simplify=FALSE)
+}
+\arguments{
+  \item{data}{
+    The coordinates and marks of the points.
+    A \code{data.frame} or \code{hyperframe}.
+  }
+  \item{domain}{
+    Optional. The space-time domain containing the points.
+    An object in some appropriate format, or \code{NULL}.
+  }
+  \item{coord.type}{
+    Character vector specifying how each column of
+    \code{data} should be interpreted: as a spatial coordinate,
+    a temporal coordinate, a local coordinate or a mark.
+    Entries are partially matched to the values
+    \code{"spatial"},
+    \code{"temporal"},
+    \code{"local"} and
+    \code{"mark"}.
+  }
+  \item{simplify}{
+    Logical value indicating whether to simplify the result
+    in special cases.
+    If \code{simplify=TRUE}, a two-dimensional point pattern
+    will be returned as an object of class \code{"ppp"},
+    and a three-dimensional point pattern will be returned as
+    an object of class \code{"pp3"}.
+    If \code{simplify=FALSE} (the default) then the result is always
+    an object of class \code{"ppx"}.
+  }
+}
+\details{
+  An object of class \code{"ppx"}
+  represents a marked point pattern
+  in multidimensional space and/or time. There may be any
+  number of spatial coordinates, any number of temporal coordinates,
+  any number of local coordinates,
+  and any number of mark variables. The individual marks may be
+  atomic (numeric values, factor values, etc) or objects of any kind.
+
+  The argument \code{data} should contain the coordinates and marks of
+  the points. It should be a \code{data.frame} or more generally a
+  \code{hyperframe} (see \code{\link{hyperframe}}) with
+  one row of data for each point.
+
+  Each column of \code{data} is either
+  a spatial coordinate, a temporal coordinate,
+  a local coordinate, or a mark variable.
+  The argument \code{coord.type} determines how each column is interpreted.
+  It should be a character vector, of length equal to the number of
+  columns of \code{data}. It should contain strings that partially match
+  the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and
+  \code{"mark"}. (The first letters will be sufficient.)
+  
+  By default (if \code{coord.type} is missing or \code{NULL}),
+  columns of numerical data are assumed to represent
+  spatial coordinates, while other columns are assumed to be marks.
+}
+\value{
+  Usually an object of class \code{"ppx"}.
+  If \code{simplify=TRUE} the result may be an object of
+  class \code{"ppp"} or \code{"pp3"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{pp3}},
+  \code{\link{print.ppx}} 
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),t=runif(4),
+                    age=rep(c("old", "new"), 2),
+                    size=runif(4))
+   X <- ppx(data=df, coord.type=c("s","s","t","m","m"))
+   X
+
+   val <- 20 * runif(4)
+   E <- lapply(val, function(s) { rpoispp(s) })
+   hf <- hyperframe(t=val, e=as.listof(E))
+   Z <- ppx(data=hf, domain=c(0,1))
+   Z
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/predict.dppm.Rd b/man/predict.dppm.Rd
new file mode 100644
index 0000000..49f5dcd
--- /dev/null
+++ b/man/predict.dppm.Rd
@@ -0,0 +1,62 @@
+\name{predict.dppm}
+\alias{predict.dppm}
+\alias{fitted.dppm}
+\title{Prediction from a Fitted Determinantal Point Process Model}
+\description{
+  Given a fitted determinantal point process model,
+  these functions compute the fitted intensity.
+}
+\usage{
+  \method{fitted}{dppm}(object, ...)
+
+  \method{predict}{dppm}(object, ...)
+}
+\arguments{
+  \item{object}{
+    Fitted determinantal point process model.
+    An object of class \code{"dppm"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{fitted.ppm}} or
+    \code{\link{predict.ppm}} respectively.
+  }
+}
+\details{
+  These functions are methods for the generic functions
+  \code{\link{fitted}} and \code{\link{predict}}.
+  The argument \code{object} should be a determinantal point process model
+  (object of class \code{"dppm"}) obtained using
+  the function \code{\link{dppm}}.
+
+  The \emph{intensity} of the fitted model
+  is computed, using \code{\link{fitted.ppm}} or
+  \code{\link{predict.ppm}} respectively.
+}
+\value{
+  The value of \code{fitted.dppm} is a numeric vector
+  giving the fitted values at the quadrature points.
+
+  The value of \code{predict.dppm} is usually a pixel image
+  (object of class \code{"im"}), but see \code{\link{predict.ppm}}
+  for details.
+}
+\seealso{
+  \code{\link{dppm}},
+  \code{\link{plot.dppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{predict.ppm}}
+}
+\examples{
+  fit <- dppm(swedishpines ~ x + y, dppGauss())
+  predict(fit)
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/predict.kppm.Rd b/man/predict.kppm.Rd
new file mode 100644
index 0000000..e696112
--- /dev/null
+++ b/man/predict.kppm.Rd
@@ -0,0 +1,62 @@
+\name{predict.kppm}
+\alias{predict.kppm}
+\alias{fitted.kppm}
+\title{Prediction from a Fitted Cluster Point Process Model}
+\description{
+  Given a fitted cluster point process model,
+  these functions compute the fitted intensity.
+}
+\usage{
+  \method{fitted}{kppm}(object, ...)
+
+  \method{predict}{kppm}(object, ...)
+}
+\arguments{
+  \item{object}{
+    Fitted cluster point process model.
+    An object of class \code{"kppm"}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{fitted.ppm}} or
+    \code{\link{predict.ppm}} respectively.
+  }
+}
+\details{
+  These functions are methods for the generic functions
+  \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}.
+  The argument \code{object} should be a cluster point process model
+  (object of class \code{"kppm"}) obtained using
+  the function \code{\link{kppm}}.
+  
+  The \emph{intensity} of the fitted model
+  is computed, using \code{\link{fitted.ppm}} or
+  \code{\link{predict.ppm}} respectively.
+}
+\value{
+  The value of \code{fitted.kppm} is a numeric vector
+  giving the fitted values at the quadrature points. 
+
+  The value of \code{predict.kppm} is usually a pixel image
+  (object of class \code{"im"}), but see \code{\link{predict.ppm}}
+  for details.
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{plot.kppm}},
+  \code{\link{vcov.kppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{predict.ppm}}
+}
+\examples{
+  data(redwood)
+  fit <- kppm(redwood ~ x, "Thomas")
+  predict(fit)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/predict.lppm.Rd b/man/predict.lppm.Rd
new file mode 100644
index 0000000..6b5d841
--- /dev/null
+++ b/man/predict.lppm.Rd
@@ -0,0 +1,109 @@
+\name{predict.lppm}
+\alias{predict.lppm}
+\title{
+  Predict Point Process Model on Linear Network
+}
+\description{
+  Given a fitted point process model on a linear network,
+  compute the fitted intensity or conditional intensity of the model.
+}
+\usage{
+\method{predict}{lppm}(object, ...,
+                       type = "trend", locations = NULL, new.coef=NULL)
+}
+\arguments{
+  \item{object}{
+    The fitted model. An object of class \code{"lppm"},
+    see \code{\link{lppm}}.
+  }
+  \item{type}{
+    Type of values to be computed. Either \code{"trend"},
+    \code{"cif"} or \code{"se"}.
+  }
+  \item{locations}{
+    Optional. Locations at which predictions should be computed.
+    Either a data frame with two columns of coordinates,
+    or a binary image mask.
+  }
+  \item{new.coef}{
+    Optional. Numeric vector of model coefficients,
+    to be used instead of the fitted coefficients
+    \code{coef(object)} when calculating
+    the prediction.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    to determine the
+    pixel resolution (if \code{locations} is missing).
+  }
+}
+\details{
+  This function computes the fitted poin process intensity,
+  fitted conditional intensity, or standard error of the fitted
+  intensity, for a point process model on a linear network.
+  It is a method for the generic \code{\link[stats]{predict}}
+  for the class \code{"lppm"}.
+
+  The argument \code{object} should be an object of class \code{"lppm"}
+  (produced by \code{\link{lppm}}) representing a point process model
+  on a linear network.
+
+  Predicted values are computed at the locations given by the
+  argument \code{locations}. If this argument is missing,
+  then predicted values are computed at a fine grid of points
+  on the linear network.
+
+  \itemize{
+    \item 
+    If \code{locations} is missing or \code{NULL} (the default),
+    the return value is a pixel image (object of class \code{"linim"}
+    which inherits class \code{"im"})
+    corresponding to a discretisation
+    of the linear network, with numeric pixel values giving the
+    predicted values at each location on the linear network.
+    \item
+    If \code{locations} is a data frame, the result is a 
+    numeric vector of predicted values at the locations specified by
+    the data frame.
+    \item
+    If \code{locations} is a binary mask, the result is a pixel image
+    with predicted values computed at the pixels of the mask.
+  }
+}
+\value{
+  A pixel image (object of class \code{"linim"} which inherits
+  class \code{"im"}) or
+  a numeric vector, depending on the argument \code{locations}.
+  See Details.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{lpp}},
+  \code{\link{linim}}
+}
+\examples{
+  X <- runiflpp(12, simplenet)
+  fit <- lppm(X ~ x)
+  v <- predict(fit, type="trend")
+  plot(v)
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+  
+  Ang, Q.W., Baddeley, A. and Nair, G. (2012)
+  Geometrically corrected second-order analysis of 
+  events on a linear network, with applications to
+  ecology and criminology.
+  \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617.
+
+  McSwiggan, G., Nair, M.G. and Baddeley, A. (2012)
+  Fitting Poisson point process models to events 
+  on a linear network. Manuscript in preparation.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/predict.mppm.Rd b/man/predict.mppm.Rd
new file mode 100644
index 0000000..7180eef
--- /dev/null
+++ b/man/predict.mppm.Rd
@@ -0,0 +1,131 @@
+\name{predict.mppm}
+\alias{predict.mppm}
+\title{Prediction for Fitted Multiple Point Process Model}
+\description{
+    Given a fitted multiple point process model obtained by \code{\link{mppm}},
+    evaluate the spatial trend and/or the conditional intensity of the
+    model. By default, predictions are evaluated over a grid of
+    locations, yielding pixel images of the trend and conditional intensity.
+    Alternatively predictions may be evaluated at specified
+    locations with specified values of the covariates.
+}
+\usage{
+\method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"),
+             ngrid = 40, locations=NULL, verbose=FALSE)
+}
+\arguments{
+  \item{object}{The fitted model. An object of class \code{"mppm"}
+    obtained from \code{\link{mppm}}.
+  }
+  \item{\dots}{Ignored.}
+  \item{newdata}{
+    New values of the covariates, for which the predictions should be computed.
+    If \code{newdata=NULL}, predictions are computed for the original
+    values of the covariates, to which the model was fitted.
+    Otherwise \code{newdata} should be a hyperframe
+    (see \code{\link{hyperframe}}) containing columns of covariates
+    as required by the model. If \code{type} includes \code{"cif"},
+    then \code{newdata} must also include a column of spatial point
+    pattern responses, in order to compute the conditional intensity.
+  }
+  \item{type}{
+    Type of predicted values required. A character string or vector of
+    character strings. Options are \code{"trend"} for the spatial trend
+    (first-order term) and \code{"cif"} or \code{"lambda"} for the
+    conditional intensity.
+    Alternatively \code{type="all"} selects all options.
+  }
+  \item{ngrid}{
+    Dimensions of the grid of spatial locations at which prediction will be
+    performed (if \code{locations=NULL}). An integer or a pair of integers.
+  }
+  \item{locations}{
+    Optional. The locations at which
+    predictions should be performed. A list of point patterns, with one entry
+    for each row of \code{newdata}.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports.
+  }
+}
+\details{
+  This function computes the spatial trend and the conditional
+  intensity of a fitted multiple spatial point process model. See Baddeley
+  and Turner (2000) and Baddeley et al (2007) for explanation and examples.
+  
+  Note that by ``spatial trend'' we mean the (exponentiated) first
+  order potential and not the intensity of the process. [For example
+  if we fit the stationary Strauss process with parameters
+  \eqn{\beta}{beta} and \eqn{\gamma}{gamma},
+  then the spatial trend is constant and equal to \eqn{\beta}{beta}.]
+  The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted
+  model is evaluated at each required spatial location u, with respect
+  to the response point pattern X.
+
+  If \code{locations=NULL}, then predictions are performed
+  at an \code{ngrid} by \code{ngrid} grid of locations in the window
+  for each response point pattern. The result will be a hyperframe
+  containing a column of images of the trend (if selected)
+  and a column of images of the conditional intensity (if selected).
+  The result can be plotted.
+
+  If \code{locations} is given, then it should be a list of point
+  patterns (objects of class \code{"ppp"}). Predictions are performed at these
+  points. The result is a hyperframe containing a column of 
+  marked point patterns where the locations 
+  each point. 
+}
+\value{
+  A hyperframe with columns named \code{trend} and \code{cif}.
+
+  If  \code{locations=NULL}, the entries of the hyperframe are
+  pixel images.
+
+  If \code{locations} is not null, the entries are
+  marked point patterns constructed by attaching the predicted values
+  to the \code{locations} point patterns.
+}
+\references{
+  Baddeley, A. and Turner, R.
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} (2000) 283--322.
+ 
+  Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S.,
+  Bell, M. and Turner, R. 
+  Analysis of a designed experiment where the response is a spatial
+  point pattern. In preparation.
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{fitted.mppm}},
+  \code{\link{hyperframe}}
+}
+\examples{
+  h <- hyperframe(Bugs=waterstriders)
+  fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7))
+  # prediction on a grid
+  p <- predict(fit)
+  plot(p$trend)
+  # prediction at specified locations
+  loc <- with(h, runifpoint(20, Window(Bugs)))
+  p2 <- predict(fit, locations=loc)
+  plot(p2$trend)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/predict.ppm.Rd b/man/predict.ppm.Rd
new file mode 100644
index 0000000..7f324d5
--- /dev/null
+++ b/man/predict.ppm.Rd
@@ -0,0 +1,372 @@
+\name{predict.ppm}
+\alias{predict.ppm}
+\title{Prediction from a Fitted Point Process Model}
+\description{
+Given a fitted point process model obtained by \code{\link{ppm}},	
+evaluate the spatial trend or the conditional intensity of the model
+at new locations.
+}
+\usage{
+   \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL,
+   covariates=NULL,
+   type=c("trend", "cif", "intensity", "count"),
+   se=FALSE,
+   interval=c("none", "confidence", "prediction"),
+   level = 0.95, 
+   X=data.ppm(object), correction, 
+   \dots, new.coef=NULL, check=TRUE, repair=TRUE)
+}
+\arguments{
+  \item{object}{
+    A fitted point process model, typically obtained from
+    the model-fitting algorithm \code{\link{ppm}}. An object of
+    class \code{"ppm"} (see \code{\link{ppm.object}}).
+  }
+  \item{window}{
+    Optional. A window (object of class \code{"owin"})
+    \emph{delimiting} the locations where predictions
+    should be computed. Defaults to the window of the
+    original data used to fit the model \code{object}.
+  }
+  \item{ngrid}{
+    Optional. Dimensions of a rectangular grid of locations
+    inside \code{window} where the predictions should be computed.
+    An integer, or an integer vector of length 2,
+    specifying the number of grid points in the \eqn{y} and \eqn{x}
+    directions. (Incompatible with \code{locations})
+  }
+  \item{locations}{
+    Optional. Data giving the exact
+    \eqn{x,y} coordinates (and marks, if required)
+    of locations at which predictions should be computed.
+    Either a point pattern, or a data frame with columns named \code{x} and
+    \code{y}, or a binary image mask, or a pixel image.
+    (Incompatible with \code{ngrid})
+  }
+  \item{covariates}{
+    Values of external covariates required by the model.
+    Either a data frame or a list of images.
+    See Details.
+  }
+  \item{type}{
+    Character string.
+    Indicates which property of the fitted model should be predicted.
+    Options are \code{"trend"} for the spatial trend, 
+    \code{"cif"} or \code{"lambda"} for the conditional intensity,
+    \code{"intensity"} for the intensity, and
+    \code{"count"} for the total number of points in \code{window}.
+  }
+  \item{se}{
+    Logical value indicating whether to calculate
+    standard errors as well.
+  }
+  \item{interval}{
+    String (partially matched) indicating whether to produce
+    estimates (\code{interval="none"}, the default)
+    or a confidence interval (\code{interval="confidence"})
+    or a prediction interval (\code{interval="prediction"}).
+  }
+  \item{level}{
+    Coverage probability for the confidence or prediction interval.
+  }
+  \item{X}{
+    Optional. A point pattern (object of class \code{"ppp"})
+    to be taken as the data point pattern when calculating the
+    conditional intensity. The default is to use the original data
+    to which the model was fitted.
+  }
+  \item{correction}{
+    Name of the edge correction to be used
+    in calculating the conditional intensity.
+    Options include \code{"border"} and \code{"none"}.
+    Other options may include \code{"periodic"},
+    \code{"isotropic"} and \code{"translate"} depending on the model.
+    The default correction is the one that was used to fit \code{object}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{new.coef}{
+    Numeric vector of parameter values to replace the 
+    fitted model parameters \code{coef(object)}.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{object}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+  \item{repair}{
+    Logical value indicating whether to repair the internal format
+    of \code{object}, if it is found to be damaged. 
+  }
+}
+\value{
+  \emph{If \code{total} is given:}
+  a numeric vector or matrix.  
+  
+  \emph{If \code{locations} is given and is a data frame:}
+  a vector of predicted values for the spatial locations
+  (and marks, if required) given in \code{locations}.
+
+  \emph{If \code{ngrid} is given, or if \code{locations} is given
+    and is a binary image mask or a pixel image:}
+  If \code{object} is an unmarked point process,
+  the result is a pixel image object (of class \code{"im"}, see
+  \code{\link{im.object}}) containing the predictions. 
+  If \code{object} is a multitype point process,
+  the result is a list of pixel images, containing the predictions
+  for each type at the same grid of locations.
+
+  The ``predicted values'' are either values of the spatial trend
+  (if \code{type="trend"}), values of the conditional intensity
+  (if \code{type="cif"} or \code{type="lambda"}), 
+  values of the intensity (if \code{type="intensity"})
+  or numbers of points (if \code{type="count"}).
+
+  If \code{se=TRUE}, then the result is a list with two entries,
+  the first being the predicted values in the format described above,
+  and the second being the standard errors in the same format.
+}
+\details{
+  This function computes properties of a fitted spatial point process
+  model (object of class \code{"ppm"}). For a Poisson point process
+  it can compute the fitted intensity function, or the expected number of
+  points in a region. For a Gibbs point process it can compute the
+  spatial trend (first order potential), conditional intensity,
+  and approximate intensity of the process. 
+  Point estimates, standard errors,
+  confidence intervals and prediction intervals are available.
+ 
+  Given a point pattern dataset, we may fit
+  a point process model to the data using the 
+  model-fitting algorithm \code{\link{ppm}}. This
+  returns an object of class \code{"ppm"} representing 
+  the fitted point process model (see \code{\link{ppm.object}}).
+  The parameter estimates in this fitted model can be read off 
+  simply by printing the \code{ppm} object.
+  The spatial trend, conditional intensity and intensity of the 
+  fitted model are evaluated using this function \code{predict.ppm}.
+
+  The default action is to create a rectangular grid of points
+  in the observation window of the data point pattern, and evaluate
+  the spatial trend at these locations.
+
+  The argument \code{type} specifies the values that are desired:
+  \describe{
+    \item{If \code{type="trend"}:}{
+      the ``spatial trend'' of the fitted model is evaluated at each
+      required spatial location \eqn{u}. See below.
+    }
+    \item{If \code{type="cif"}:}{
+      the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the
+      fitted model is evaluated at each required spatial location \eqn{u},
+      with respect to the data point pattern \eqn{X}.
+    }
+    \item{If \code{type="intensity"}:}{
+      the intensity \eqn{\lambda(u)}{lambda(u)} of the
+      fitted model is evaluated at each required spatial location \eqn{u}.
+    }
+    \item{If \code{type="count"}:}{
+      the expected total number of points (or the expected number
+      of points falling in \code{window}) is evaluated. 
+      If \code{window} is a tessellation,
+      the expected number of points in each tile of the tessellation
+      is evaluated.
+    }
+  }
+  The spatial trend, conditional intensity, and intensity
+  are all equivalent if the fitted model is a Poisson point process.
+  However, if the model is not a Poisson process, then they are
+  all different. The ``spatial trend'' is the (exponentiated)
+  first order potential, and not the intensity of the process.
+  [For example if we fit the
+  stationary Strauss process with parameters
+  \eqn{\beta}{beta} and \eqn{\gamma}{gamma},
+  then the spatial trend is constant and equal to \eqn{\beta}{beta},
+  while the intensity is a smaller value.]
+
+  The default is to compute an estimate of the desired quantity.
+  If \code{interval="confidence"} or \code{interval="prediction"},
+  the estimate is replaced by a confidence interval or prediction interval.
+
+  If \code{se=TRUE}, then a standard error is also calculated,
+  and is returned together with the (point or interval) estimate.
+
+  The spatial locations where predictions are required,
+  are determined by the (incompatible)
+  arguments \code{ngrid} and \code{locations}.
+  \itemize{
+    \item 
+    If the argument \code{ngrid} is present, then
+    predictions are performed at a rectangular 
+    grid of locations in the window \code{window}. 
+    The result of prediction will be a pixel image or images.
+    \item 
+    If \code{locations} is present, then predictions
+    will be performed at the spatial locations given by
+    this dataset. These may be an arbitrary list of spatial locations,
+    or they may be a rectangular grid. 
+    The result of prediction will be either a numeric vector
+    or a pixel image or images.
+    \item 
+    If neither \code{ngrid} nor \code{locations} is given, then
+    \code{ngrid} is assumed. The value of \code{ngrid} defaults to
+    \code{\link{spatstat.options}("npixel")}, which is initialised to 128
+    when \pkg{spatstat} is loaded.
+  }
+  The argument \code{locations} may be a point pattern,
+  a data frame or a list specifying arbitrary locations;
+  or it may be a binary image mask (an object of class \code{"owin"}
+  with type \code{"mask"}) or a pixel image (object of class
+  \code{"im"}) specifying (a subset of) a rectangular
+  grid of locations. 
+  \itemize{
+    \item 
+    If \code{locations} is a point pattern (object of class \code{"ppp"}),
+    then prediction will be performed at the points of the point pattern.
+    The result of prediction will be a vector of predicted values,
+    one value for each point.
+    If the model is a marked point process, then
+    \code{locations} should be a marked point pattern, with marks of the
+    same kind as the model; prediction will be performed at these
+    marked points.
+    The result of prediction will be a vector of predicted values,
+    one value for each (marked) point.
+    \item 
+    If \code{locations} is a data frame or list, then it must contain
+    vectors \code{locations$x} and \code{locations$y} specifying the
+    \eqn{x,y} coordinates of the prediction locations. Additionally, if
+    the model is a marked point process, then \code{locations} must also contain
+    a factor \code{locations$marks} specifying the marks of the
+    prediction locations. These vectors must have equal length.
+    The result of prediction will be a vector of predicted values,
+    of the same length.
+    \item 
+    If \code{locations} is a binary image mask, then prediction will be
+    performed at each pixel in this binary image where the pixel value
+    is \code{TRUE} (in other words, at each pixel that is inside the
+    window). If the fitted model is an unmarked point process, then the
+    result of prediction will be an image. If the fitted model is a
+    marked point process, then prediction will
+    be performed for each possible value of the mark at each such
+    location, and the result of prediction will be a 
+    list of images, one for each mark value.
+    \item 
+    If \code{locations} is a pixel image (object of class \code{"im"}),
+    then prediction will be performed at each pixel in this image where
+    the pixel value is defined (i.e.\ where the pixel value is not
+    \code{NA}).
+  }
+  The argument \code{covariates} gives the values of any spatial covariates
+  at the prediction locations.
+  If the trend formula in the fitted model 
+  involves spatial covariates (other than
+  the Cartesian coordinates \code{x}, \code{y})
+  then \code{covariates} is required.
+  The format and use of \code{covariates} are analogous to those of the
+  argument of the same name in \code{\link{ppm}}.
+  It is either a data frame or a list of images.
+  \itemize{
+    \item
+    If \code{covariates} is a list of images, then
+    the names of the entries should correspond to
+    the names of covariates in the model formula \code{trend}.
+    Each entry in the list must be an image object (of class \code{"im"},
+    see \code{\link{im.object}}).
+    The software will look up
+    the pixel values of each image at the quadrature points.
+    \item 
+    If \code{covariates} is a data frame, then the
+    \code{i}th row of \code{covariates}
+    is assumed to contain covariate data for the \code{i}th location.
+    When \code{locations} is a data frame,
+    this just means that each row of \code{covariates} contains the
+    covariate data for the location specified in the corresponding row of
+    \code{locations}. When \code{locations} is a binary image
+    mask, the row \code{covariates[i,]} must correspond to the location
+    \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))}
+    and \code{y = as.vector(raster.y(locations))}.
+  }
+
+  Note that if you only want to use prediction in order to
+  generate a plot of the predicted values,
+  it may be easier to use \code{\link{plot.ppm}} which calls
+  this function and plots the results.
+}
+\references{
+  Baddeley, A. and Turner, R.
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} (2000) 283--322.
+ 
+  Berman, M. and Turner, T.R. 
+  Approximating point process likelihoods with GLIM.
+  \emph{Applied Statistics} \bold{41} (1992) 31--38.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{ppm.object}},
+  \code{\link{plot.ppm}},
+  \code{\link{print.ppm}},
+  \code{\link{fitted.ppm}},
+  \code{\link{spatstat.options}}
+}
+\section{Warnings}{
+  The current implementation invokes \code{\link{predict.glm}}
+  so that \bold{prediction is wrong} if the trend formula in
+  \code{object} involves terms in \code{ns()},
+  \code{bs()} or \code{poly()}.
+  This is a weakness of \code{\link{predict.glm}} itself!
+  
+  Error messages may be very opaque,
+  as they tend to come from deep in the workings of 
+  \code{\link{predict.glm}}.
+  If you are passing the \code{covariates} argument
+  and the function crashes,
+  it is advisable to start by checking that all the conditions 
+  listed above are satisfied.
+}
+\examples{
+  \testonly{op <- spatstat.options(npixel=32)}
+  m <- ppm(cells ~ polynom(x,y,2), Strauss(0.05))
+  trend <- predict(m, type="trend")
+  \dontrun{
+  image(trend)
+  points(cells)
+  }
+  cif <- predict(m, type="cif")
+  \dontrun{
+  persp(cif)
+  }
+  data(japanesepines)
+  mj <- ppm(japanesepines ~ harmonic(x,y,2))
+  se <- predict(mj, se=TRUE)
+
+  # prediction interval for total number of points
+  predict(mj, type="count", interval="p")
+
+  # prediction at arbitrary locations
+  predict(mj, locations=data.frame(x=0.3, y=0.4))
+
+  X <- runifpoint(5, Window(japanesepines))
+  predict(mj, locations=X, se=TRUE)
+
+  # multitype
+  rr <- matrix(0.06, 2, 2)
+  ma <- ppm(amacrine ~ marks,  MultiStrauss(rr))
+  Z <- predict(ma)
+  Z <- predict(ma, type="cif")
+  predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif")
+
+  \testonly{spatstat.options(op)}
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{models}
+ 
+ 
diff --git a/man/predict.rppm.Rd b/man/predict.rppm.Rd
new file mode 100644
index 0000000..b288f6a
--- /dev/null
+++ b/man/predict.rppm.Rd
@@ -0,0 +1,80 @@
+\name{predict.rppm}
+\alias{fitted.rppm}
+\alias{predict.rppm}
+\title{
+  Make Predictions From a Recursively Partitioned Point Process Model
+}
+\description{
+  Given a model which has been fitted to point pattern data
+  by recursive partitioning, compute the predicted intensity
+  of the model.
+}
+\usage{
+\method{predict}{rppm}(object, \dots)
+
+\method{fitted}{rppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model of class \code{"rppm"}
+    produced by the function \code{\link{rppm}}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{predict.ppm}}
+    to specify the locations where prediction is required.
+    (Ignored by \code{fitted.rppm})
+  }
+}
+\details{
+  These functions are methods for the generic functions
+  \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}.
+  They compute the fitted intensity of a point process model.
+  The argument \code{object} should be a fitted point process model
+  of class \code{"rppm"} produced by the function \code{\link{rppm}}.
+
+  The \code{fitted} method computes the fitted intensity at the original data
+  points, yielding a numeric vector with one entry for each data point.
+
+  The \code{predict} method computes the fitted intensity at
+  any locations. By default, predictions are
+  calculated at a regular grid of spatial locations, and the result
+  is a pixel image giving the predicted intensity values at these
+  locations.
+
+  Alternatively, predictions can be performed at other
+  locations, or a finer grid of locations, or only at certain specified
+  locations, using additional arguments \code{\dots}
+  which will be interpreted by \code{\link{predict.ppm}}.
+  Common arguments are \code{ngrid} to increase the grid resolution,
+  \code{window} to specify the prediction region, and \code{locations}
+  to specify the exact locations of predictions.
+  See \code{\link{predict.ppm}} for details of these arguments.
+
+  Predictions are computed by evaluating the explanatory covariates at
+  each desired location, and applying the recursive partitioning rule
+  to each set of covariate values.
+}
+\value{
+  The result of \code{fitted.rppm} is a numeric vector.
+
+  The result of \code{predict.rppm} is a pixel image, a list of pixel images,
+  or a numeric vector.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{rppm}},
+  \code{\link{plot.rppm}}  
+}
+\examples{
+    fit <- rppm(unmark(gorillas) ~ vegetation, data=gorillas.extra)
+    plot(predict(fit))
+    lambdaX <- fitted(fit)
+    lambdaX[1:5]
+    # Mondriaan pictures
+    plot(predict(rppm(redwoodfull ~ x + y)))
+    points(redwoodfull)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/predict.slrm.Rd b/man/predict.slrm.Rd
new file mode 100644
index 0000000..d33fc2c
--- /dev/null
+++ b/man/predict.slrm.Rd
@@ -0,0 +1,94 @@
+\name{predict.slrm}
+\Rdversion{1.1}
+\alias{predict.slrm}
+\title{
+  Predicted or Fitted Values from Spatial Logistic Regression
+}
+\description{
+  Given a fitted Spatial Logistic Regression model,
+  this function computes the fitted probabilities for each pixel,
+  or the fitted point process intensity, or the values of the
+  linear predictor in each pixel.
+}
+\usage{
+ \method{predict}{slrm}(object, ..., type = "intensity",
+                        newdata=NULL, window=NULL)
+}
+\arguments{
+  \item{object}{
+    a fitted spatial logistic regression model.
+    An object of class \code{"slrm"}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{pixellate}}
+    determining the pixel resolution for the discretisation
+    of the point pattern.
+  }
+  \item{type}{
+    Character string (partially) matching one of 
+    \code{"probabilities"}, \code{"intensity"} or \code{"link"}.
+  }
+  \item{newdata}{
+    Optional.  List containing new covariate values for the prediction.
+    See Details.
+  }
+  \item{window}{
+    Optional.  New window in which to predict.
+    An object of class \code{"owin"}.
+  }
+}
+\details{
+  This is a method for \code{\link[stats]{predict}} for spatial logistic
+  regression models (objects of class \code{"slrm"}, usually obtained
+  from the function \code{\link{slrm}}).
+
+  The argument \code{type} determines which quantity is computed.
+  If \code{type="intensity"}), the value of the point process intensity
+  is computed at each pixel. If \code{type="probabilities"}) the 
+  probability of the presence of a random point in each pixel is
+  computed. If \code{type="link"}, the value of the linear predictor is
+  computed at each pixel. 
+
+  If \code{newdata = NULL} (the default), the algorithm computes
+  fitted values of the model (based on the data that was originally used
+  to fit the model \code{object}). 
+
+  If \code{newdata} is given, the algorithm computes predicted values
+  of the model, using the new values of the covariates provided by
+  \code{newdata}. The argument \code{newdata} should be a list;
+  names of entries in the list should correspond
+  to variables appearing in the model formula of the \code{object}.
+  Each list entry may be a pixel image or a single numeric
+  value. 
+}
+\value{
+  A pixel image (object of class \code{"im"}) containing the predicted
+  values for each pixel.
+}
+\seealso{
+  \code{\link{slrm}}
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- slrm(X ~ x+y)
+  plot(predict(fit))
+
+  data(copper)
+  X <- copper$SouthPoints
+  Y <- copper$SouthLines
+  Z <- distmap(Y)
+  fitc <- slrm(X ~ Z)
+  pc <- predict(fitc)
+
+  Znew <- distmap(copper$Lines)[copper$SouthWindow]
+  pcnew <- predict(fitc, newdata=list(Z=Znew))
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/print.im.Rd b/man/print.im.Rd
new file mode 100644
index 0000000..0c55076
--- /dev/null
+++ b/man/print.im.Rd
@@ -0,0 +1,36 @@
+\name{print.im}
+\alias{print.im}
+\title{Print Brief Details of an Image}
+\description{
+  Prints a very brief description of a pixel image object.
+}
+\usage{
+  \method{print}{im}(x, \dots)
+}
+\arguments{
+  \item{x}{Pixel image (object of class \code{"im"}).}
+  \item{\dots}{Ignored.}
+}
+\details{
+  A very brief description of the pixel image \code{x} is printed.
+
+  This is a method for the generic function \code{\link{print}}.
+}
+\seealso{
+  \code{\link{print}},
+  \code{\link{im.object}},
+  \code{\link{summary.im}}
+}
+\examples{
+  data(letterR)
+  U <- as.im(letterR)
+  U
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
diff --git a/man/print.owin.Rd b/man/print.owin.Rd
new file mode 100644
index 0000000..1194176
--- /dev/null
+++ b/man/print.owin.Rd
@@ -0,0 +1,42 @@
+\name{print.owin}
+\alias{print.owin}
+\title{Print Brief Details of a Spatial Window}
+\description{
+  Prints a very brief description of a window object.
+}
+\usage{
+  \method{print}{owin}(x, \dots, prefix="window: ")
+}
+\arguments{
+  \item{x}{Window (object of class \code{"owin"}).}
+  \item{\dots}{Ignored.}
+  \item{prefix}{Character string to be printed at the start of the output.}
+}
+\details{
+  A very brief description of the window \code{x} is printed.
+
+  This is a method for the generic function \code{\link{print}}.
+}
+\seealso{
+  \code{\link{print}},
+  \code{\link{print.ppp}},
+  \code{\link{summary.owin}}
+}
+\examples{
+  owin()  # the unit square
+
+  data(demopat)
+  W <- Window(demopat)
+  W                    # just says it is polygonal
+  as.mask(W)           # just says it is a binary image 
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
+
diff --git a/man/print.ppm.Rd b/man/print.ppm.Rd
new file mode 100644
index 0000000..ca69aca
--- /dev/null
+++ b/man/print.ppm.Rd
@@ -0,0 +1,64 @@
+\name{print.ppm}
+\alias{print.ppm}
+\title{Print a Fitted Point Process Model}
+\description{
+  Default \code{print} method for a fitted point process model.
+}
+\usage{
+ \method{print}{ppm}(x,\dots,
+     what=c("all", "model", "trend", "interaction", "se", "errors"))
+}
+\arguments{
+  \item{x}{
+    A fitted point process model, typically obtained from
+    the model-fittingg algorithm \code{\link{ppm}}.
+    An object of class \code{"ppm"}.
+  }
+  \item{what}{
+    Character vector (partially-matched) indicating what information
+    should be printed.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  none.
+}
+\details{
+  This is the \code{print} method for the class \code{"ppm"}.
+  It prints information about the fitted model in a sensible format.
+
+  The argument \code{what} makes it possible to print only some
+  of the information.
+
+  If \code{what} is missing, then by default,
+  standard errors for the estimated coefficients of the model
+  will be printed only if the model is a Poisson point process.
+  To print the standard errors for a non-Poisson model,
+  call \code{print.ppm} with the argument \code{what} given explicitly,
+  or reset the default rule by typing
+  \code{spatstat.options(print.ppm.SE="always")}.
+}
+\seealso{
+  \code{\link{ppm.object}} for details of the class \code{"ppm"}.
+
+  \code{\link{ppm}} for generating these objects.
+  
+  \code{\link{plot.ppm}},
+  \code{\link{predict.ppm}}
+}
+\examples{
+ \dontrun{
+ m <- ppm(cells, ~1, Strauss(0.05))
+ m
+ }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
+\keyword{models}
+
diff --git a/man/print.ppp.Rd b/man/print.ppp.Rd
new file mode 100644
index 0000000..04a7ae7
--- /dev/null
+++ b/man/print.ppp.Rd
@@ -0,0 +1,45 @@
+\name{print.ppp}
+\alias{print.ppp}
+\title{Print Brief Details of a Point Pattern Dataset}
+\description{
+  Prints a very brief description of a point pattern dataset.
+}
+\usage{
+  \method{print}{ppp}(x, \dots)
+}
+\arguments{
+  \item{x}{Point pattern (object of class \code{"ppp"}).}
+  \item{\dots}{Ignored.}
+}
+\details{
+  A very brief description of the point pattern \code{x} is printed.
+
+  This is a method for the generic function \code{\link{print}}.
+}
+\seealso{
+  \code{\link{print}},
+  \code{\link{print.owin}},
+  \code{\link{summary.ppp}}
+}
+\examples{
+  data(cells)      # plain vanilla point pattern
+  cells
+
+  data(lansing)    # multitype point pattern
+  lansing          
+  
+  data(longleaf)    # numeric marks
+  longleaf          
+
+  data(demopat)     # weird polygonal window
+  demopat
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
+
diff --git a/man/print.psp.Rd b/man/print.psp.Rd
new file mode 100644
index 0000000..57e2d3c
--- /dev/null
+++ b/man/print.psp.Rd
@@ -0,0 +1,36 @@
+\name{print.psp}
+\alias{print.psp}
+\title{Print Brief Details of a Line Segment Pattern Dataset}
+\description{
+  Prints a very brief description of a line segment pattern dataset.
+}
+\usage{
+  \method{print}{psp}(x, \dots)
+}
+\arguments{
+  \item{x}{Line segment pattern (object of class \code{"psp"}).}
+  \item{\dots}{Ignored.}
+}
+\details{
+  A very brief description of the line segment pattern \code{x} is printed.
+
+  This is a method for the generic function \code{\link{print}}.
+}
+\seealso{
+  \code{\link{print}},
+  \code{\link{print.owin}},
+  \code{\link{summary.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  a
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
+
diff --git a/man/print.quad.Rd b/man/print.quad.Rd
new file mode 100644
index 0000000..e79effe
--- /dev/null
+++ b/man/print.quad.Rd
@@ -0,0 +1,46 @@
+\name{print.quad}
+\alias{print.quad}
+\title{Print a Quadrature Scheme}
+\description{
+  \code{print} method for a quadrature scheme.
+}
+\usage{
+ \method{print}{quad}(x,\dots)
+}
+\arguments{
+  \item{x}{
+    A quadrature scheme object, typically obtained from
+    \code{\link{quadscheme}}.
+    An object of class \code{"quad"}.
+  }
+  \item{\dots}{Ignored.}
+}
+\value{
+  none.
+}
+\details{
+  This is the \code{print} method for the class \code{"quad"}.
+  It prints simple information about the quadrature scheme.
+
+  See \code{\link{quad.object}} for details of the class \code{"quad"}.
+}
+\seealso{
+  \code{\link{quadscheme}},
+  \code{\link{quad.object}},
+  \code{\link{plot.quad}},
+  \code{\link{summary.quad}}
+}
+\examples{
+ data(cells)
+ Q <- quadscheme(cells)
+ Q
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{print}
+
diff --git a/man/profilepl.Rd b/man/profilepl.Rd
new file mode 100644
index 0000000..b14192d
--- /dev/null
+++ b/man/profilepl.Rd
@@ -0,0 +1,181 @@
+\name{profilepl}
+\alias{profilepl}
+\title{Fit Models by Profile Maximum Pseudolikelihood or AIC}
+\description{
+  Fits point process models by maximising the profile likelihood,
+  profile pseudolikelihood, profile composite likelihood or AIC.
+}
+\usage{
+profilepl(s, f, \dots, aic=FALSE, rbord=NULL, verbose = TRUE)
+}
+\arguments{
+  \item{s}{
+    Data frame containing values of the irregular parameters
+    over which the criterion will be computed.
+  }
+  \item{f}{
+    Function (such as \code{\link{Strauss}})
+    that generates an interpoint interaction object, given
+    values of the irregular parameters.
+  }
+  \item{\dots}{
+    Data passed to \code{\link{ppm}} to fit the model.
+  }
+  \item{aic}{
+    Logical value indicating whether to find the parameter values
+    which minimise the AIC (\code{aic=TRUE}) or maximise the
+    profile likelihood (\code{aic=FALSE}, the default).
+  }
+  \item{rbord}{
+    Radius for border correction (same for all models).
+    If omitted, this will be computed from the interactions.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports.
+  }
+}
+\details{
+  The model-fitting function \code{\link{ppm}} fits point process
+  models to point pattern data. However, 
+  only the \sQuote{regular} parameters of the model can be fitted by
+  \code{\link{ppm}}. The model may also depend on \sQuote{irregular}
+  parameters that must be fixed in any call to \code{\link{ppm}}.
+
+  This function \code{profilepl} is a wrapper which finds the values of the
+  irregular parameters that give the best fit.
+  If \code{aic=FALSE} (the default),
+  the best fit is the model which maximises the
+  likelihood (if the models are Poisson processes) or maximises
+  the pseudolikelihood or logistic likelihood. 
+  If \code{aic=TRUE} then the best fit is the model which
+  minimises the Akaike Information Criterion \code{\link{AIC.ppm}}.
+  
+  The argument \code{s} must be a data frame whose columns contain
+  values of the irregular parameters over which the maximisation is
+  to be performed.
+
+  An irregular parameter may affect either the interpoint interaction
+  or the spatial trend. 
+  
+  \describe{
+    \item{interaction parameters:}{
+      in a call to \code{\link{ppm}}, the argument \code{interaction}
+      determines the interaction between points. It is usually
+      a call to a function such as \code{\link{Strauss}}. The
+      arguments of this call are irregular parameters.
+      For example, the interaction radius parameter \eqn{r} of the Strauss
+      process, determined by the argument \code{r}
+      to the function \code{\link{Strauss}}, is an irregular parameter.
+    }
+    \item{trend parameters:}{
+      in a call to \code{\link{ppm}}, the spatial trend may depend on
+      covariates, which are supplied by the argument \code{covariates}.
+      These covariates may be functions written by the user,
+      of the form \code{function(x,y,...)}, and the extra arguments
+      \code{\dots} are irregular parameters.
+    }
+  }
+      
+  The argument \code{f} determines the interaction
+  for each model to be fitted. It would typically be one of the functions
+  \code{\link{Poisson}},
+  \code{\link{AreaInter}},
+  \code{\link{BadGey}},
+  \code{\link{DiggleGatesStibbard}},
+  \code{\link{DiggleGratton}},
+  \code{\link{Fiksel}},
+  \code{\link{Geyer}},
+  \code{\link{Hardcore}},
+  \code{\link{LennardJones}},
+  \code{\link{OrdThresh}}, 
+  \code{\link{Softcore}},
+  \code{\link{Strauss}} or
+  \code{\link{StraussHard}}.
+  Alternatively it could be a function written by the user.
+
+  Columns of \code{s} which match the names of arguments of \code{f}
+  will be interpreted as interaction parameters. Other columns will be
+  interpreted as trend parameters.
+
+  The data frame \code{s} must provide values for each argument of
+  \code{f}, except for the optional arguments, which are those arguments of
+  \code{f} that have the default value \code{NA}.  
+
+  To find the best fit,
+  each row of \code{s} will be taken in turn. Interaction parameters in this
+  row will be passed to \code{f}, resulting in an interaction object. 
+  Then \code{\link{ppm}} will be applied to the data \code{...}
+  using this interaction. Any trend parameters will be passed to
+  \code{\link{ppm}} through the argument \code{covfunargs}.
+  This results in a fitted point process model.
+  The value of the log pseudolikelihood or AIC from this model is stored.
+  After all rows of \code{s} have been processed in this way, the
+  row giving the maximum value of log pseudolikelihood will be found.
+
+  The object returned by \code{profilepl} contains the profile
+  pseudolikelihood (or profile AIC) function,
+  the best fitting model, and other data.
+  It can be plotted (yielding a
+  plot of the log pseudolikelihood or AIC values against the irregular
+  parameters) or printed (yielding information about the best fitting
+  values of the irregular parameters). 
+ 
+  In general, \code{f} may be any function that will return
+  an interaction object (object of class \code{"interact"})
+  that can be used in a call to \code{\link{ppm}}. Each argument of
+  \code{f} must be a single value. 
+}
+\value{
+  An object of class \code{"profilepl"}. There are methods
+  for \code{\link[graphics]{plot}},
+  \code{\link[base]{print}},
+  \code{\link[base]{summary}},
+  \code{\link[stats]{simulate}},
+  \code{\link{as.ppm}}
+  and 
+  \code{\link{parameters}} for objects of this class.
+
+  The components of the object include
+  \item{fit}{Best-fitting model}
+  \item{param}{The data frame \code{s}}
+  \item{iopt}{Row index of the best-fitting parameters in \code{s}}
+
+  To extract the best fitting model you can also use \code{\link{as.ppm}}.
+}
+\examples{
+    # one irregular parameter
+    rr <- data.frame(r=seq(0.05,0.15, by=0.01))
+    \testonly{
+      rr <- data.frame(r=c(0.05,0.1,0.15))
+    }
+    ps <- profilepl(rr, Strauss, cells)
+    ps
+    if(interactive()) plot(ps)
+
+    # two irregular parameters
+    rs <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3)
+    \testonly{
+      rs <- expand.grid(r=c(0.07,0.12),sat=1:2)
+    }
+    pg <- profilepl(rs, Geyer, cells)
+    pg
+    if(interactive()) {
+      plot(pg)
+      as.ppm(pg)
+    }
+
+    # multitype pattern with a common interaction radius
+    \dontrun{
+     RR <- data.frame(R=seq(0.03,0.05,by=0.01))
+     MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) }
+     pm <- profilepl(RR, MS, amacrine ~marks)
+    }
+    ## more information 
+    summary(pg)
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/progressreport.Rd b/man/progressreport.Rd
new file mode 100644
index 0000000..9c3bc8e
--- /dev/null
+++ b/man/progressreport.Rd
@@ -0,0 +1,118 @@
+\name{progressreport}
+\alias{progressreport}
+\title{Print Progress Reports}
+\description{
+  Prints Progress Reports during a loop or iterative calculation.
+}
+\usage{
+progressreport(i, n,
+               every = min(100,max(1, ceiling(n/100))),
+               tick = 1,
+               nperline = NULL,
+               charsperline = getOption("width"),
+               style = spatstat.options("progress"),
+               showtime = NULL,
+               state=NULL)
+}
+\arguments{
+  \item{i}{
+    Integer. The current iteration number (from 1 to \code{n}).
+  }
+  \item{n}{
+    Integer. The (maximum) number of iterations to be computed.
+  }
+  \item{every}{
+    Optional integer. Iteration number will be printed 
+    when \code{i} is a multiple of \code{every}.
+  }
+  \item{tick}{
+    Optional integer. A tick mark or dot will be printed
+    when \code{i} is a multiple of \code{tick}.
+  }
+  \item{nperline}{
+    Optional integer. Number of iterations per line of output.
+  }
+  \item{charsperline}{
+    Optional integer. The number of characters in a line of output.
+  }
+  \item{style}{
+    Character string determining the style of display.
+    Options are \code{"tty"} (the default), \code{"tk"} and \code{"txtbar"}.
+    See Details.
+  }
+  \item{showtime}{
+    Optional. Logical value indicating whether to print the estimated
+    time remaining. Applies only when \code{style="tty"}.
+  }
+  \item{state}{
+    Optional. A list containing the internal data.
+  }
+}
+\details{
+  This is a convenient function for reporting progress
+  during an iterative sequence of calculations
+  or a suite of simulations.
+
+  \itemize{
+    \item 
+    If \code{style="tk"} then \code{tcltk::tkProgressBar} is
+    used to pop-up a new graphics window showing a progress bar.
+    This requires the package \pkg{tcltk}.
+    As \code{i} increases from 1 to \code{n}, the bar will lengthen.
+    The arguments \code{every, tick, nperline, showtime} are ignored.
+    \item 
+    If \code{style="txtbar"} then \code{\link[utils]{txtProgressBar}} is
+    used to represent progress as a bar made of text characters in the
+    \R interpreter window.
+    As \code{i} increases from 1 to \code{n}, the bar will lengthen.
+    The arguments \code{every, tick, nperline, showtime} are ignored.
+    \item 
+    If \code{style="tty"} (the default),
+    then progress reports are printed to the
+    console. This only seems to work well under Linux.
+    As \code{i} increases from 1 to \code{n}, 
+    the output will be a sequence of dots (one dot for every \code{tick}
+    iterations), iteration numbers (printed when iteration number is
+    a multiple of \code{every} or is less than 4),
+    and optionally the estimated time
+    remaining. For example \code{[etd 1:20:05]} means an estimated time
+    of 1 hour, 20 minutes and 5 seconds until finished.
+
+    The estimated time remaining will be printed only if
+    \code{style="tty"}, and the argument \code{state} is given,
+    and either \code{showtime=TRUE}, or \code{showtime=NULL} and the
+    iterations are slow (defined as: the estimated time remaining
+    is longer than 3 minutes, or the average time per iteration is
+    longer than 20 seconds).
+  }
+  
+  It is optional, but strongly advisable, to use the argument \code{state}
+  to store and update the internal data for the progress reports
+  (such as the cumulative time taken for computation)
+  as shown in the last example below.
+  This avoids conflicts with other programs that might be
+  calling \code{progressreport} at the same time.
+}
+\value{
+  If \code{state} was \code{NULL}, the result is \code{NULL}.
+  Otherwise the result is the updated value of \code{state}.
+}
+\author{
+  \spatstatAuthors.
+}
+\examples{
+  for(i in 1:40) {
+     #
+     # code that does something...
+     # 
+     progressreport(i, 40)
+  }
+
+  # saving internal state: *recommended*
+  sta <- list()
+  for(i in 1:20) {
+     # some code ...
+     sta <- progressreport(i, 20, state=sta)
+  }
+}
+\keyword{print}
diff --git a/man/project2segment.Rd b/man/project2segment.Rd
new file mode 100644
index 0000000..bc3b835
--- /dev/null
+++ b/man/project2segment.Rd
@@ -0,0 +1,75 @@
+\name{project2segment}
+\alias{project2segment}
+\title{Move Point To Nearest Line}
+\description{
+  Given a point pattern and a line segment pattern,
+  this function moves each point to the closest location
+  on a line segment.
+}
+\usage{
+project2segment(X, Y)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{Y}{A line segment pattern (object of class \code{"psp"}).}
+}
+\details{
+  For each point \code{x} in the point pattern \code{X}, this function
+  finds the closest line segment \code{y} in the line segment pattern
+  \code{Y}. It then `projects' the point \code{x} onto the line segment
+  \code{y} by finding the position \code{z} along \code{y}
+  which is closest to \code{x}. This position \code{z} is returned,
+  along with supplementary information.
+}
+\value{
+  A list with the following components. Each component has length equal to
+  the number of points in \code{X}, and its entries correspond to the
+  points of \code{X}.
+  
+  \item{Xproj }{
+    Point pattern (object of class \code{"ppp"}
+    containing the projected points.
+  }
+  \item{mapXY }{
+    Integer vector identifying the nearest segment to each point.
+  }
+  \item{d}{
+    Numeric vector of distances from each point of \code{X} to
+    the corresponding projected point.
+  }
+  \item{tp}{
+    Numeric vector giving the scaled parametric coordinate 
+    \eqn{0 \le t_p \le 1}{0 <= tp <= 1} of the position of the projected point
+    along the segment.
+  }
+
+  For example suppose \code{mapXY[2] = 5} and \code{tp[2] = 0.33}.
+  Then \code{Y[5]} is the line segment lying closest to \code{X[2]}.
+  The projection of the point \code{X[2]} onto the segment \code{Y[5]}
+  is the point \code{Xproj[2]}, which lies one-third of the way
+  between the first and second endpoints of the line segment \code{Y[5]}.
+
+  }
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{nearestsegment}} for a faster way to determine
+  which segment is closest to each point.
+}
+\examples{
+  X <- rstrat(square(1), 5)
+  Y <- as.psp(matrix(runif(20), 5, 4), window=owin())
+  plot(Y, lwd=3, col="green")
+  plot(X, add=TRUE, col="red", pch=16)
+  v <- project2segment(X,Y)
+  Xproj <- v$Xproj
+  plot(Xproj, add=TRUE, pch=16)
+  arrows(X$x, X$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red")
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/project2set.Rd b/man/project2set.Rd
new file mode 100644
index 0000000..1749a70
--- /dev/null
+++ b/man/project2set.Rd
@@ -0,0 +1,64 @@
+\name{project2set}
+\alias{project2set}
+\title{
+  Find Nearest Point in a Region
+}
+\description{
+  For each data point in a point pattern \code{X},
+  find the nearest location in a given spatial region \code{W}.
+}
+\usage{
+  project2set(X, W, \dots)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{W}{
+    Window (object of class \code{"owin"})
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} controlling the
+    pixel resolution.
+  }
+}
+\details{
+  The window \code{W} is first discretised as a binary mask
+  using \code{\link{as.mask}}.
+  
+  For each data point \code{X[i]} in the point pattern \code{X},
+  the algorithm finds the nearest pixel in \code{W}.
+
+  The result is a point pattern \code{Y} containing these nearest points,
+  that is, \code{Y[i]} is the nearest point in \code{W} to the
+  point \code{X[i]}.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}) with the same
+  number of points as \code{X} in the window \code{W}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{project2segment}},
+  \code{\link{nncross}}
+}
+\examples{
+  He <- heather$fine[owin(c(2.8, 7.4), c(4.0, 7.8))]
+  plot(He, main="project2set")
+  X <- runifpoint(4, erosion(complement.owin(He), 0.2))
+  points(X, col="red")
+  Y <- project2set(X, He)
+  points(Y, col="green")
+  arrows(X$x, X$y, Y$x, Y$y, angle=15, length=0.2)
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/prune.rppm.Rd b/man/prune.rppm.Rd
new file mode 100644
index 0000000..0e46ed6
--- /dev/null
+++ b/man/prune.rppm.Rd
@@ -0,0 +1,54 @@
+\name{prune.rppm}
+\alias{prune.rppm}
+\title{
+  Prune a Recursively Partitioned Point Process Model
+}
+\description{
+  Given a model which has been fitted to point pattern data
+  by recursive partitioning, apply pruning to reduce the
+  complexity of the partition tree.
+}
+\usage{
+\method{prune}{rppm}(tree, \dots) 
+}
+\arguments{
+  \item{tree}{
+    Fitted point process model of class \code{"rppm"}
+    produced by the function \code{\link{rppm}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link[rpart]{prune.rpart}}
+    to control the pruning procedure.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[rpart]{prune}}
+  for the class \code{"rppm"}. An object of this class is a
+  point process model, fitted to point pattern data by
+  recursive partitioning, by the function \code{\link{rppm}}.
+  
+  The recursive partition tree will be pruned using
+  \code{\link[rpart]{prune.rpart}}. The result is another
+  object of class \code{"rppm"}.
+}
+\value{
+  Object of class \code{"rppm"}.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{rppm}},
+  \code{\link{plot.rppm}},
+  \code{\link{predict.rppm}}.
+}
+\examples{
+  # Murchison gold data
+  mur <- solapply(murchison, rescale, s=1000, unitname="km")
+  mur$dfault <- distfun(mur$faults)
+  fit <- rppm(gold ~ dfault + greenstone, data=mur)
+  fit
+  prune(fit, cp=0.1)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/pseudoR2.Rd b/man/pseudoR2.Rd
new file mode 100644
index 0000000..84d6fa2
--- /dev/null
+++ b/man/pseudoR2.Rd
@@ -0,0 +1,71 @@
+\name{pseudoR2}
+\alias{pseudoR2}
+\alias{pseudoR2.ppm}
+\alias{pseudoR2.lppm}
+\title{
+  Calculate Pseudo-R-Squared for Point Process Model
+}
+\description{
+  Given a fitted point process model, calculate 
+  the pseudo-R-squared value, which measures the 
+  fraction of variation in the data that is explained
+  by the model.  
+}
+\usage{
+  pseudoR2(object, \dots)
+
+  \method{pseudoR2}{ppm}(object, \dots)
+
+  \method{pseudoR2}{lppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model. An object of class \code{"ppm"}
+    or \code{"lppm"}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{deviance.ppm}}
+    or \code{\link{deviance.lppm}}.
+  }
+}
+\details{
+  The function \code{pseudoR2} is generic, with methods
+  for fitted point process models of class \code{"ppm"} and
+  \code{"lppm"}.
+
+  This function computes McFadden's pseudo-Rsquared
+  \deqn{
+    R^2 = 1 - \frac{D}{D_0}
+  }{
+    R^2 = 1 - D/D0
+  }
+  where \eqn{D} is the deviance of the fitted model \code{object},
+  and \eqn{D_0}{D0} is the deviance of the null model
+  (obtained by refitting \code{object}
+  using the trend formula \code{~1}).
+  Deviance is defined as twice the negative log-likelihood
+  or log-pseudolikelihood.
+}
+\value{
+  A single numeric value.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{deviance.ppm}}, 
+  \code{\link{deviance.lppm}}.
+}
+\examples{
+  fit <- ppm(swedishpines ~ x+y)
+  pseudoR2(fit)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/psib.Rd b/man/psib.Rd
new file mode 100644
index 0000000..418ff67
--- /dev/null
+++ b/man/psib.Rd
@@ -0,0 +1,61 @@
+\name{psib}
+\alias{psib}
+\alias{psib.kppm}
+\title{
+  Sibling Probability of Cluster Point Process
+}
+\description{
+  Computes the sibling probability of a cluster point process model.
+}
+\usage{
+  psib(object)
+
+  \method{psib}{kppm}(object)
+}
+\arguments{
+  \item{object}{
+    Fitted cluster point process model
+    (object of class \code{"kppm"}).
+  }
+}
+\details{
+  In a Poisson cluster process, two points are called \emph{siblings}
+  if they belong to the same cluster, that is, if they had the same
+  parent point. If two points of the process are
+  separated by a distance \eqn{r}, the probability that
+  they are siblings is \eqn{p(r) = 1 - 1/g(r)} where \eqn{g} is the
+  pair correlation function of the process.
+  
+  The value \eqn{p(0) = 1 - 1/g(0)} is the probability that,
+  if two points of the process are situated very close to each other,
+  they came from the same cluster. This probability
+  is an index of the strength of clustering, with high values
+  suggesting strong clustering.
+
+  This concept was proposed in Baddeley, Rubak and Turner (2015, page 479)
+  and Baddeley (2016).
+}
+\value{
+  A single number.
+}
+\references{
+  Baddeley, A. (2016)
+  Local composite likelihood for spatial point processes.
+  \emph{Spatial Statistics}, in press.
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  Chapman and Hall/CRC Press.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link[spatstat]{kppm}}
+}
+\examples{
+  fit <- kppm(redwood ~1, "Thomas")
+  psib(fit)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/psp.Rd b/man/psp.Rd
new file mode 100644
index 0000000..1878df7
--- /dev/null
+++ b/man/psp.Rd
@@ -0,0 +1,91 @@
+\name{psp}
+\alias{psp}
+\title{Create a Line Segment Pattern}
+\description{
+  Creates an object of class \code{"psp"} representing 
+  a line segment pattern in the two-dimensional plane.
+}
+\usage{
+  psp(x0,y0, x1, y1, window, marks=NULL,
+        check=spatstat.options("checksegments"))
+}
+\arguments{
+  \item{x0}{Vector of \eqn{x} coordinates of first endpoint of each segment}
+  \item{y0}{Vector of \eqn{y} coordinates of first endpoint of each segment}
+  \item{x1}{Vector of \eqn{x} coordinates of second endpoint of each segment}
+  \item{y1}{Vector of \eqn{y} coordinates of second endpoint of each segment}
+  \item{window}{window of observation,
+    an object of class \code{"owin"}}
+  \item{marks}{(optional) vector or data frame of mark values}
+  \item{check}{Logical value indicating whether to check that the line segments
+    lie inside the window.}
+}
+\value{
+  An object of class \code{"psp"} 
+  describing a line segment pattern in the two-dimensional plane
+  (see \code{\link{psp.object}}).
+}
+\details{
+  In the \pkg{spatstat} library, a spatial pattern of line segments is
+  described by an object of class \code{"psp"}. This function
+  creates such objects.
+
+  The vectors \code{x0}, \code{y0}, \code{x1} and \code{y1} must be
+  numeric vectors of equal length. They are interpreted as the cartesian
+  coordinates of the endpoints of the line segments.
+
+  A line segment pattern is assumed to have been observed within a specific
+  region of the plane called the observation window.
+  An object of class \code{"psp"} representing a point pattern
+  contains information specifying the observation window.
+  This window must always be specified when creating a point pattern dataset;
+  there is intentionally no default action of ``guessing'' the window
+  dimensions from the data points alone. 
+
+  The argument \code{window} must be an object of class
+  \code{"owin"}. It is a full description of the window geometry,
+  and could have been obtained from \code{\link{owin}} or
+  \code{\link{as.owin}}, or by just extracting the observation window
+  of another dataset, or by manipulating such windows.
+  See \code{\link{owin}} or the Examples below.
+
+  The optional argument \code{marks} is given if the line segment pattern
+  is marked, i.e. if each line segment carries additional information.
+  For example, line segments which are classified into two or more different
+  types, or colours, may be regarded as having a mark which identifies
+  which colour they are. 
+
+  The object \code{marks} must be a vector of the same length
+  as \code{x0}, or a data frame with number of rows equal to the
+  length of \code{x0}.  The interpretation is that \code{marks[i]}
+  or \code{marks[i,]} is the mark attached to the \eqn{i}th line
+  segment.  If the marks are real numbers then \code{marks} should
+  be a numeric vector, while if the marks takes only a finite number
+  of possible values (e.g. colours or types) then \code{marks}
+  should be a \code{factor}.
+
+  See \code{\link{psp.object}} for a description of the class
+  \code{"psp"}.
+
+  Users would normally invoke \code{psp} to create a line segment pattern,
+  and the function \code{\link{as.psp}} to convert data in another
+  format into a line segment pattern.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{as.psp}},
+  \code{\link{owin.object}},
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{marks.psp}}
+}
+\examples{
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  m <- data.frame(A=1:10, B=letters[1:10])
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m)
+}
+\author{\adrian
+  and \rolf.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/psp.object.Rd b/man/psp.object.Rd
new file mode 100644
index 0000000..630d5d1
--- /dev/null
+++ b/man/psp.object.Rd
@@ -0,0 +1,90 @@
+\name{psp.object}
+\alias{psp.object} %DoNotExport
+\title{Class of Line Segment Patterns}
+\description{
+  A class \code{"psp"} to represent a spatial pattern of
+  line segments in the plane.
+  Includes information about the window in which the
+  pattern was observed. Optionally includes marks.
+}
+\details{
+  An object of this class represents 
+  a two-dimensional pattern of line segments. It specifies
+  \itemize{
+    \item the locations of the line segments (both endpoints)
+    \item the window in which the pattern was observed
+    \item optionally, a ``mark'' attached to each line segment
+      (extra information such as a type label).
+    }
+  If \code{X} is an object of type \code{psp},
+  it contains the following elements:
+  \tabular{ll}{
+    \code{ends} \tab data frame with entries \code{x0, y0, x1, y1} \cr
+                \tab giving coordinates of segment endpoints \cr 
+    \code{window} \tab window of observation \cr
+                  \tab (an object of class \code{\link{owin}}) \cr
+    \code{n} \tab number of line segments \cr
+    \code{marks} \tab optional vector or data frame of marks \cr
+    \code{markformat} \tab character string specifying the format of the \cr
+                      \tab marks; \dQuote{none}, \dQuote{vector}, or
+                      \dQuote{dataframe}
+  }
+  Users are strongly advised not to manipulate these entries
+  directly.
+  
+  Objects of class \code{"psp"}
+  may be created by the function
+  \code{\link{psp}}
+  and converted from other types of data by the function
+  \code{\link{as.psp}}.
+  Note that you must always specify the window of observation;
+  there is intentionally no default action of ``guessing'' the window
+  dimensions from the line segments alone.
+
+  Subsets of a line segment pattern may be obtained by the functions
+  \code{\link{[.psp}} and \code{\link{clip.psp}}.
+
+  Line segment pattern objects can be plotted just by typing \code{plot(X)}
+  which invokes the \code{plot} method for line segment pattern objects,
+  \code{\link{plot.psp}}. See \code{\link{plot.psp}} for further information.
+
+  There are also methods for \code{summary} and \code{print}
+  for line segment patterns. Use \code{summary(X)} to see a useful description
+  of the data.
+
+  Utilities for line segment patterns include
+  \code{\link{midpoints.psp}} (to compute the midpoints of each segment),
+  \code{\link{lengths.psp}}, (to compute the length of each segment),
+  \code{\link{angles.psp}}, (to compute the angle of orientation of
+  each segment), and 
+  \code{\link{distmap.psp}} to compute the distance map of a
+  line segment pattern.
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{as.psp}},
+  \code{\link{[.psp}}
+}
+\examples{
+# creating 
+    a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin())
+# converting from other formats
+    a <- as.psp(matrix(runif(80), ncol=4), window=owin())
+    a <- as.psp(data.frame(x0=runif(20), y0=runif(20),
+                            x1=runif(20), y1=runif(20)), window=owin())
+# clipping
+    w <- owin(c(0.1,0.7), c(0.2, 0.8))
+    b <- clip.psp(a, w)
+    b <- a[w]
+# the last two lines are equivalent.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/psst.Rd b/man/psst.Rd
new file mode 100644
index 0000000..560d06a
--- /dev/null
+++ b/man/psst.Rd
@@ -0,0 +1,156 @@
+\name{psst}
+\alias{psst}
+\title{
+  Pseudoscore Diagnostic For Fitted Model against General Alternative
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  and any choice of functional summary statistic,
+  this function computes the pseudoscore test statistic
+  of goodness-of-fit for the model.
+}
+\usage{
+psst(object, fun, r = NULL, breaks = NULL, ...,
+     model=NULL,
+     trend = ~1, interaction = Poisson(), rbord = reach(interaction),
+     truecoef=NULL, hi.res=NULL, funargs = list(correction="best"),
+     verbose=TRUE)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"})
+    or a point pattern (object of class \code{"ppp"})
+    or quadrature scheme (object of class \code{"quad"}).
+  }
+  \item{fun}{
+    Summary function to be applied to each point pattern.
+  }
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    function \eqn{S(r)} should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+    Optional alternative to \code{r} for advanced use.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{model}{
+    Optional. A fitted point process model (object of
+    class \code{"ppm"}) to be re-fitted to the data
+    using \code{\link{update.ppm}}, if \code{object} is a point pattern.
+    Overrides the arguments \code{trend,interaction,rbord}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern.
+    See \code{\link{ppm}} for details.
+  }
+  \item{truecoef}{
+    Optional. Numeric vector. If present, this will be treated as 
+    if it were the true coefficient vector of the point process model,
+    in calculating the diagnostic. Incompatible with \code{hi.res}.
+  }
+  \item{hi.res}{
+    Optional. List of parameters passed to \code{\link{quadscheme}}.
+    If this argument is present, the model will be
+    re-fitted at high resolution as specified by these parameters.
+    The coefficients
+    of the resulting fitted model will be taken as the true coefficients.
+    Then the diagnostic will be computed for the default
+    quadrature scheme, but using the high resolution coefficients.
+  }
+  \item{funargs}{
+    List of additional arguments to be passed to \code{fun}.
+  }
+  \item{verbose}{
+    Logical value determining whether to print progress reports
+    during the computation.
+  }
+}
+\details{
+  Let \eqn{x} be a point pattern dataset consisting of points
+  \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}.
+  Consider a point process model fitted to \eqn{x}, with
+  conditional intensity
+  \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}.
+  For the purpose of testing goodness-of-fit, we regard the fitted model
+  as the null hypothesis. Given a functional summary statistic \eqn{S},
+  consider a family of alternative models obtained by exponential
+  tilting of the null model by \eqn{S}. 
+  The pseudoscore for the null model is
+  \deqn{
+    V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x)
+    {\rm d} u
+  }{
+    V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du)
+  }
+  where the \eqn{\Delta}{Delta} operator is
+  \deqn{
+    \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r)
+  }{
+    Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r)
+  }
+  the difference between the values of \eqn{S} for the
+  point pattern with and without the point \eqn{u}.
+
+  According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have
+  mean zero if the model is correct (ignoring the fact that the
+  parameters of the model have been estimated). Hence \eqn{V(r)} can be
+  used as a diagnostic for goodness-of-fit.
+
+  This algorithm computes \eqn{V(r)} by direct evaluation of the sum and
+  integral. It is computationally intensive, but it is available for
+  any summary statistic \eqn{S(r)}.
+
+  The diagnostic \eqn{V(r)} is also called 
+  the \bold{pseudoresidual} of \eqn{S}. On the right
+  hand side of the equation for \eqn{V(r)} given above,
+  the sum over points of \eqn{x} is called the
+  \bold{pseudosum} and the integral is called the \bold{pseudocompensator}.
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+
+  Columns in this data frame include \code{dat} for the pseudosum,
+  \code{com} for the compensator and \code{res} for the
+  pseudoresidual.
+  
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Special cases:
+  \code{\link{psstA}},
+  \code{\link{psstG}}.
+
+  Alternative functions:
+  \code{\link{Kres}},
+  \code{\link{Gres}}.
+}
+\examples{
+    data(cells)
+    fit0 <- ppm(cells, ~1) # uniform Poisson
+    \testonly{fit0 <- ppm(cells, ~1, nd=8)}
+    G0 <- psst(fit0, Gest)
+    G0
+    if(interactive()) plot(G0)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/psstA.Rd b/man/psstA.Rd
new file mode 100644
index 0000000..d59ad4f
--- /dev/null
+++ b/man/psstA.Rd
@@ -0,0 +1,193 @@
+\name{psstA}
+\Rdversion{1.1}
+\alias{psstA}
+\title{
+  Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the pseudoscore diagnostic 
+  of goodness-of-fit for the model, against moderately
+  clustered or moderately inhibited alternatives of area-interaction type.
+}
+\usage{
+psstA(object, r = NULL, breaks = NULL, \dots,
+      model = NULL, 
+      trend = ~1, interaction = Poisson(),
+      rbord = reach(interaction), ppmcorrection = "border",
+      correction = "all",
+      truecoef = NULL, hi.res = NULL,
+      nr=spatstat.options("psstA.nr"),
+      ngrid=spatstat.options("psstA.ngrid"))
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"})
+    or a point pattern (object of class \code{"ppp"})
+    or quadrature scheme (object of class \code{"quad"}).
+  }
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    diagnostic should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+	This argument is for internal use only.
+  }
+  \item{\dots}{
+    Extra arguments passed to \code{\link{quadscheme}} to determine
+    the quadrature scheme, if \code{object} is a point pattern.
+  }
+  \item{model}{
+    Optional. A fitted point process model (object of
+    class \code{"ppm"}) to be re-fitted to the data
+    using \code{\link{update.ppm}}, if \code{object} is a point pattern.
+    Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern.
+    See \code{\link{ppm}} for details.
+  }
+  \item{ppmcorrection}{
+    Optional. Character string specifying the edge correction
+    for the pseudolikelihood to be used
+    in fitting the point process model. Passed to \code{\link{ppm}}.
+  }
+  \item{correction}{
+    Optional. Character string specifying which diagnostic quantities
+    will be computed. Options are \code{"all"} and \code{"best"}.
+    The default is to compute all diagnostic quantities.
+  }
+  \item{truecoef}{
+    Optional. Numeric vector. If present, this will be treated as 
+    if it were the true coefficient vector of the point process model,
+    in calculating the diagnostic. Incompatible with \code{hi.res}.
+  }
+  \item{hi.res}{
+    Optional. List of parameters passed to \code{\link{quadscheme}}.
+    If this argument is present, the model will be
+    re-fitted at high resolution as specified by these parameters.
+    The coefficients
+    of the resulting fitted model will be taken as the true coefficients.
+    Then the diagnostic will be computed for the default
+    quadrature scheme, but using the high resolution coefficients.
+  }
+  \item{nr}{
+    Optional. Number of \code{r} values to be used
+    if \code{r} is not specified.
+  }
+  \item{ngrid}{
+    Integer. Number of points in the square grid used to compute
+    the approximate area.
+  }
+}
+\details{
+  This function computes the pseudoscore test statistic
+  which can be used as a diagnostic for goodness-of-fit of a fitted
+  point process model.
+  
+  Let \eqn{x} be a point pattern dataset consisting of points
+  \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}.
+  Consider a point process model fitted to \eqn{x}, with
+  conditional intensity
+  \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}.
+  For the purpose of testing goodness-of-fit, we regard the fitted model
+  as the null hypothesis.
+  The alternative hypothesis is a family of
+  hybrid models obtained by combining 
+  the fitted model with the area-interaction process
+  (see \code{\link{AreaInter}}). The family of alternatives includes
+  models that are slightly more regular than the fitted model,
+  and others that are slightly more clustered than the fitted model.
+
+  The pseudoscore, evaluated at the null model, is
+  \deqn{
+    V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x)
+    {\rm d} u
+  }{
+    V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du)
+  }
+  where
+  \deqn{
+    A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r)
+  }{
+    A(u,x,r) = B(x union u, r) - B(x setminus u, r)
+  }
+  where \eqn{B(x,r)} is the area of the union of the discs of radius
+  \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area
+  of the dilation of \eqn{x} by a distance \eqn{r}).
+  Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with
+  \eqn{u}, that is, the area of that part of the disc
+  of radius \eqn{r} centred at the point \eqn{u} that is
+  not covered by any of the discs of radius \eqn{r} centred at
+  points of \eqn{x}.
+
+  According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have
+  mean zero if the model is correct (ignoring the fact that the
+  parameters of the model have been estimated). Hence \eqn{V(r)} can be
+  used as a diagnostic for goodness-of-fit.
+
+  The diagnostic \eqn{V(r)} is also called 
+  the \bold{pseudoresidual} of \eqn{S}. On the right
+  hand side of the equation for \eqn{V(r)} given above,
+  the sum over points of \eqn{x} is called the
+  \bold{pseudosum} and the integral is called the \bold{pseudocompensator}.
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+
+  Columns in this data frame include \code{dat} for the pseudosum,
+  \code{com} for the compensator and \code{res} for the
+  pseudoresidual.
+  
+  There is a plot method for this class. See \code{\link{fv.object}}.
+}
+\section{Warning}{
+  This computation can take a \bold{very long time}.
+
+  To shorten the computation time, choose smaller values of the
+  arguments \code{nr} and \code{ngrid}, or reduce the values of their
+  defaults \code{spatstat.options("psstA.nr")}
+  and \code{spatstat.options("psstA.ngrid")}.
+
+  Computation time is roughly proportional to
+  \code{nr * npoints * ngrid^2} where \code{npoints} is the number
+  of points in the point pattern.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Alternative functions:
+  \code{\link{psstG}},
+  \code{\link{psst}},
+  \code{\link{Gres}},
+  \code{\link{Kres}}.
+
+  Point process models: \code{\link{ppm}}.
+  
+  Options: \code{\link{spatstat.options}}
+}
+\examples{
+   pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10)
+   X <- rStrauss(200,0.1,0.05)
+   plot(psstA(X))
+   plot(psstA(X, interaction=Strauss(0.05)))
+   spatstat.options(pso)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/psstG.Rd b/man/psstG.Rd
new file mode 100644
index 0000000..53c9031
--- /dev/null
+++ b/man/psstG.Rd
@@ -0,0 +1,150 @@
+\name{psstG}
+\Rdversion{1.1}
+\alias{psstG}
+\title{
+  Pseudoscore Diagnostic For Fitted Model against Saturation Alternative
+}
+\description{
+  Given a point process model fitted to a point pattern dataset,
+  this function computes the pseudoscore  diagnostic 
+  of goodness-of-fit for the model, against moderately
+  clustered or moderately inhibited alternatives of saturation type.
+}
+\usage{
+psstG(object, r = NULL, breaks = NULL, \dots,
+      model=NULL,
+      trend = ~1, interaction = Poisson(), rbord = reach(interaction),
+      truecoef = NULL, hi.res = NULL)
+}
+\arguments{
+  \item{object}{
+    Object to be analysed.
+    Either a fitted point process model (object of class \code{"ppm"})
+    or a point pattern (object of class \code{"ppp"})
+    or quadrature scheme (object of class \code{"quad"}).
+  }
+  \item{r}{
+    Optional. 
+    Vector of values of the argument \eqn{r} at which the
+    diagnostic should be computed.
+    This argument is usually not specified. There is a sensible default.
+  }
+  \item{breaks}{
+    Optional alternative to \code{r} for advanced use. 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{model}{
+    Optional. A fitted point process model (object of
+    class \code{"ppm"}) to be re-fitted to the data
+    using \code{\link{update.ppm}}, if \code{object} is a point pattern.
+    Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}.
+  }
+  \item{trend,interaction,rbord}{
+    Optional. Arguments passed to \code{\link{ppm}}
+    to fit a point process model to the data,
+    if \code{object} is a point pattern.
+    See \code{\link{ppm}} for details.
+  }
+  \item{truecoef}{
+    Optional. Numeric vector. If present, this will be treated as 
+    if it were the true coefficient vector of the point process model,
+    in calculating the diagnostic. Incompatible with \code{hi.res}.
+  }
+  \item{hi.res}{
+    Optional. List of parameters passed to \code{\link{quadscheme}}.
+    If this argument is present, the model will be
+    re-fitted at high resolution as specified by these parameters.
+    The coefficients
+    of the resulting fitted model will be taken as the true coefficients.
+    Then the diagnostic will be computed for the default
+    quadrature scheme, but using the high resolution coefficients.
+  }
+}
+\details{
+  This function
+  computes the pseudoscore test statistic
+  which can be used as a diagnostic for goodness-of-fit of a fitted
+  point process model.
+  
+  Consider a point process model fitted to \eqn{x}, with
+  conditional intensity
+  \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}.
+  For the purpose of testing goodness-of-fit, we regard the fitted model
+  as the null hypothesis.
+  The alternative hypothesis is a family of
+  hybrid models obtained by combining 
+  the fitted model with the Geyer saturation process
+  (see \code{\link{Geyer}}) with saturation parameter 1.
+  The family of alternatives includes
+  models that are more regular than the fitted model,
+  and others that are more clustered than the fitted model.
+
+  For any point pattern \eqn{x}, and any \eqn{r > 0}, let
+  \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest
+  neighbour (the nearest other point in \eqn{x})
+  is closer than \eqn{r} units. Then the pseudoscore for the null model is
+  \deqn{
+    V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x)
+    {\rm d} u
+  }{
+    V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du)
+  }
+  where the \eqn{\Delta}{Delta} operator is
+  \deqn{
+    \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r)
+  }{
+    Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r)
+  }
+  the difference between the values of \eqn{S} for the
+  point pattern with and without the point \eqn{u}.
+
+  According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have
+  mean zero if the model is correct (ignoring the fact that the
+  parameters of the model have been estimated). Hence \eqn{V(r)} can be
+  used as a diagnostic for goodness-of-fit.
+
+  The diagnostic \eqn{V(r)} is also called 
+  the \bold{pseudoresidual} of \eqn{S}. On the right
+  hand side of the equation for \eqn{V(r)} given above,
+  the sum over points of \eqn{x} is called the
+  \bold{pseudosum} and the integral is called the \bold{pseudocompensator}.
+}
+\value{
+  A function value table (object of class \code{"fv"}),
+  essentially a data frame of function values.
+
+  Columns in this data frame include \code{dat} for the pseudosum,
+  \code{com} for the compensator and \code{res} for the
+  pseudoresidual.
+  
+  There is a plot method for this class. See
+  \code{\link{fv.object}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+}
+\author{
+  \adrian
+  
+  
+  \ege and Jesper \ifelse{latex}{\out{M\o ller}}{Moller}.
+}
+\seealso{
+  Alternative functions:
+  \code{\link{psstA}},
+  \code{\link{psst}},
+  \code{\link{Kres}},
+  \code{\link{Gres}}.
+}
+\examples{
+   X <- rStrauss(200,0.1,0.05)
+   plot(psstG(X))
+   plot(psstG(X, interaction=Strauss(0.05)))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/pyramidal.Rd b/man/pyramidal.Rd
new file mode 100644
index 0000000..e5368a7
--- /dev/null
+++ b/man/pyramidal.Rd
@@ -0,0 +1,45 @@
+\name{pyramidal}
+\alias{pyramidal}
+\docType{data}
+\title{
+  Pyramidal Neurons in Cingulate Cortex
+}
+\description{
+  Point patterns giving the locations of pyramidal neurons in micrographs
+  from area 24, layer 2 of the cingulate cortex in the human brain.
+  There is one point pattern from each of 31 human subjects.
+  The subjects are divided into three groups:
+  controls (12 subjects), schizoaffective (9  subjects)
+  and schizophrenic (10 subjects).
+
+  Each point pattern is recorded in a unit square region; the unit of
+  measurement is unknown.
+  
+  These data were introduced and analysed by
+  Diggle, Lange and Benes (1991). 
+} 
+\format{
+  \code{pyramidal} is a hyperframe with 31 rows, one row for each
+  subject. It has a column named
+  \code{Neurons} containing the point patterns of neuron locations,
+  and a column named \code{group} which is a factor with levels
+  \code{"control", "schizoaffective", "schizophrenic"}
+  identifying the grouping of subjects.
+}
+\usage{data(pyramidal)}
+\source{
+  Peter Diggle's website.
+}
+\references{
+   Diggle, P.J., Lange, N. and Benes, F.M. (1991). Analysis of variance
+   for replicated spatial point patterns in clinical neuroanatomy.
+   \emph{Journal of the American Statistical Association}
+   \bold{86}, 618--625.
+}
+\examples{
+pyr <- pyramidal
+pyr$grp <- abbreviate(pyramidal$group, minlength=7)
+plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons")
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/qqplot.ppm.Rd b/man/qqplot.ppm.Rd
new file mode 100644
index 0000000..effc52b
--- /dev/null
+++ b/man/qqplot.ppm.Rd
@@ -0,0 +1,382 @@
+\name{qqplot.ppm}
+\alias{qqplot.ppm}
+\title{
+  Q-Q Plot of Residuals from Fitted Point Process Model
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  produce a Q-Q plot based on residuals from the model.
+}
+\usage{
+  qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw",
+             style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE,
+             dimyx=NULL, nrep=if(fast) 5e4 else 1e5,
+             control=update(default.rmhcontrol(fit), nrep=nrep),
+             saveall=FALSE,
+             monochrome=FALSE,
+             limcol=if(monochrome) "black" else "red",
+             maxerr=max(100, ceiling(nsim/10)),
+             check=TRUE, repair=TRUE, envir.expr)
+}
+\arguments{
+  \item{fit}{
+    The fitted point process model, which is to be assessed
+    using the Q-Q plot. An object of class \code{"ppm"}.
+    Smoothed residuals obtained from this fitted model will provide the
+    ``data'' quantiles for the Q-Q plot.
+  }
+  \item{nsim}{
+    The number of simulations from the ``reference'' point process model.
+  }
+  \item{expr}{
+    Determines the simulation mechanism
+    which provides the ``theoretical'' quantiles for the
+    Q-Q plot. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{diagnose.ppm}} influencing the
+    computation of residuals.
+  }
+  \item{type}{
+    String indicating the type of residuals or weights to be used.
+    Current options are \code{"eem"}
+    for the Stoyan-Grabarnik exponential energy weights,
+    \code{"raw"} for the raw residuals,
+    \code{"inverse"} for the inverse-lambda residuals,
+    and \code{"pearson"} for the Pearson residuals.
+    A partial match is adequate.
+  }
+  \item{style}{
+    Character string controlling the type of Q-Q plot.
+    Options are \code{"classical"} and \code{"mean"}.
+    See Details.
+  }
+  \item{fast}{
+    Logical flag controlling the speed and accuracy of computation.
+    Use \code{fast=TRUE} for interactive use and \code{fast=FALSE}
+    for publication standard plots. See Details.
+  }
+  \item{verbose}{
+    Logical flag controlling whether the algorithm prints progress
+    reports during long computations.
+  }
+  \item{plot.it}{
+    Logical flag controlling whether the function produces a plot
+    or simply returns a value (silently).
+  }
+  \item{dimyx}{
+    Dimensions of the pixel grid on which the smoothed residual field
+    will be calculated. A vector of two integers.
+  }
+  \item{nrep}{
+    If \code{control} is absent, then \code{nrep} gives the
+    number of iterations of the Metropolis-Hastings algorithm
+    that should be used to generate one simulation of the fitted point process.
+  }
+  \item{control}{
+    List of parameters controlling the Metropolis-Hastings algorithm
+    \code{\link{rmh}} which generates each simulated realisation from
+    the model (unless the model is Poisson).
+    This list becomes the argument \code{control}
+    of \code{\link{rmh.default}}. It overrides \code{nrep}.
+  }
+  \item{saveall}{
+    Logical flag indicating whether to save all the intermediate
+    calculations. 
+  }
+  \item{monochrome}{
+    Logical flag indicating whether the plot should be
+    in black and white (\code{monochrome=TRUE}), or in colour
+    (\code{monochrome=FALSE}).
+  }
+  \item{limcol}{
+    String. The colour to be used when plotting the 95\% limit
+    curves.
+  }
+  \item{maxerr}{
+    Maximum number of failures tolerated while generating
+    simulated realisations. See Details.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{fit}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+  \item{repair}{
+    Logical value indicating whether to repair the internal format
+    of \code{fit}, if it is found to be damaged. 
+  }
+  \item{envir.expr}{
+    Optional. An environment in which the expression
+    \code{expr} should be evaluated.
+  }
+}
+\value{
+  An object of class \code{"qqppm"} containing the information
+  needed to reproduce the Q-Q plot.
+  Entries \code{x} and \code{y} are numeric vectors containing 
+  quantiles of the simulations and of the data, respectively.
+}
+\details{
+  This function generates a Q-Q plot of the residuals from a
+  fitted point process model. It is an addendum to the suite of
+  diagnostic plots produced by the function \code{\link{diagnose.ppm}},
+  kept separate because it is computationally intensive. The
+  quantiles of the theoretical distribution are estimated by simulation.
+
+  In classical statistics, a Q-Q plot of residuals is a useful
+  diagnostic for checking the distributional assumptions. Analogously,
+  in spatial statistics, a Q-Q plot of the (smoothed) residuals from a
+  fitted point process model is a useful way
+  to check the interpoint interaction part of the model
+  (Baddeley et al, 2005). The systematic part of the model
+  (spatial trend, covariate effects, etc) is assessed using
+  other plots made by \code{\link{diagnose.ppm}}.
+
+  The argument \code{fit} represents the fitted point process
+  model. It must be an object of class \code{"ppm"} (typically produced
+  by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}).
+  Residuals will be computed for this fitted model using
+  \code{\link{residuals.ppm}},
+  and the residuals will be kernel-smoothed to produce a ``residual
+  field''. The values of this residual field will provide the
+  ``data'' quantiles for the Q-Q plot.
+
+  The argument \code{expr} is not usually specified.
+  It provides a way to modify the ``theoretical'' or ``reference''
+  quantiles for the Q-Q plot.
+
+  In normal usage we set \code{expr=NULL}. The default
+  is to generate \code{nsim} simulated realisations
+  of the fitted model \code{fit}, re-fit this model to
+  each of the simulated patterns,
+  evaluate the residuals from
+  these fitted models, and use the kernel-smoothed residual field
+  from these fitted models as a sample from the reference distribution
+  for the Q-Q plot.
+
+  In advanced use, \code{expr} may be an \code{expression}.
+  It will be re-evaluated \code{nsim} times, and should include
+  random computations so that the results are not identical
+  each time. The result of evaluating \code{expr}
+  should be either a point pattern (object of class
+  \code{"ppp"}) or a fitted point process model (object of class
+  \code{"ppm"}). If the value is a point pattern, then the
+  original fitted model \code{fit} will be fitted to this new point
+  pattern using \code{\link{update.ppm}}, to yield another fitted
+  model. Smoothed residuals obtained from these
+  \code{nsim} fitted models will yield the ``theoretical'' quantiles for the
+  Q-Q plot.
+
+  Alternatively \code{expr} can be a list of point patterns,
+  or an \code{envelope} object that contains a list of point patterns
+  (typically generated by calling \code{\link{envelope}} with
+  \code{savepatterns=TRUE}). These point patterns will be used
+  as the simulated patterns. 
+
+  Simulation is performed (if \code{expr=NULL})
+  using the Metropolis-Hastings algorithm \code{\link{rmh}}.
+  Each simulated realisation is the result of
+  running the Metropolis-Hastings algorithm
+  from an independent random starting state each time.
+  The iterative and termination behaviour of the Metropolis-Hastings
+  algorithm are governed by the argument \code{control}.
+  See \code{\link{rmhcontrol}} for information about this argument.
+  As a shortcut, the argument \code{nrep} determines
+  the number of Metropolis-Hastings iterations used to generate
+  each simulated realisation, if \code{control} is absent.
+
+  By default, simulations are generated in an expanded
+  window. Use the argument \code{control} to change this,
+  as explained in the section on \emph{Warning messages}.
+  
+  The argument \code{type} selects the type of residual or weight
+  that will be computed. For options, see \code{\link{diagnose.ppm}}.
+
+  The argument \code{style} determines the type of Q-Q plot. It is
+  highly recommended to use the default, \code{style="mean"}.
+  \describe{
+    \item{\code{style="classical"}}{
+      The quantiles of the residual field for the data (on the \eqn{y}
+      axis) are plotted against the
+      quantiles of the \bold{pooled} simulations (on the \eqn{x} axis).
+      This plot is biased, and therefore difficult to interpret,
+      because of strong autocorrelations in the residual field
+      and the large differences in sample size.
+    }
+    \item{\code{style="mean"}}{
+      The order statistics of the residual field for the data are plotted
+      against the sample means, over the \code{nsim} simulations,
+      of the corresponding order statistics of the residual field
+      for the simulated datasets.
+      Dotted lines show the 2.5 and 97.5 percentiles, over the
+      \code{nsim} simulations, of each order statistic.
+    }
+  }
+
+  The argument \code{fast} is a simple way to control
+  the accuracy and speed of computation.
+  If \code{fast=FALSE}, the residual field is computed on
+  a fine grid of pixels (by default 100 by 100 pixels, see below)
+  and the Q-Q plot is based on the complete set of order statistics
+  (usually 10,000 quantiles).
+  If \code{fast=TRUE}, the residual field is computed on a coarse
+  grid (at most 40 by 40 pixels) and the Q-Q plot is based on the
+  \emph{percentiles} only. This is about 7 times faster.
+  It is recommended to use \code{fast=TRUE} for interactive data
+  analysis and \code{fast=FALSE} for definitive plots for
+  publication.
+
+  The argument \code{dimyx} gives full control over the resolution of the
+  pixel grid used to calculate the smoothed residuals.
+  Its interpretation is the same as the argument \code{dimyx}
+  to the function \code{\link{as.mask}}.
+  Note that \code{dimyx[1]} is the number of
+  pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number
+  in the \eqn{x} direction. 
+  If \code{dimyx} is not present, then the default pixel grid dimensions
+  are controlled by \code{spatstat.options("npixel")}.
+
+  Since the computation is so time-consuming, \code{qqplot.ppm} returns
+  a list containing all the data necessary to re-display the Q-Q plot.
+  It is advisable to assign the result of \code{qqplot.ppm} to something
+  (or use \code{.Last.value} if you forgot to.)
+  The return value is an object of class \code{"qqppm"}. There are methods for
+  \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the
+  Examples.
+
+  The argument \code{saveall} is usually set to \code{FALSE}.
+  If \code{saveall=TRUE}, then the intermediate results of calculation for each
+  simulated realisation are saved and returned. The return value
+  includes a 3-dimensional array \code{sim} containing the
+  smoothed residual field images for each of the \code{nsim} 
+  realisations. When \code{saveall=TRUE}, the return value is an object of very
+  large size, and should not be saved on disk.
+
+  Errors may occur during the simulation process, because
+  random data are generated. For example:
+  \itemize{
+    \item one of the simulated patterns may be empty.
+    \item one of the simulated patterns may 
+    cause an error in the code that fits the point process model.
+    \item the user-supplied argument \code{expr} may have a bug.
+  }
+  Empty point patterns do not cause a problem for the code,
+  but they are reported.
+  Other problems that would lead to a crash are trapped; 
+  the offending simulated data are discarded, and the simulation is
+  retried. The argument \code{maxerr} determines the maximum number of
+  times that such errors will be tolerated (mainly as a
+  safeguard against an infinite loop).
+}
+
+\section{Side Effects}{
+	Produces a Q-Q plot if \code{plot.it} is TRUE.
+}
+\section{Warning messages}{
+  A warning message will be issued if any of the simulations
+  trapped an error (a potential crash).
+  
+  A warning message will be issued if all, or many, of the
+  simulated point patterns are empty.
+  This usually indicates a problem with the simulation procedure.
+  
+  The default behaviour of \code{qqplot.ppm} is to simulate patterns 
+  on an expanded window (specified through the argument
+  \code{control}) in order to avoid edge effects.
+  The model's trend is extrapolated over this expanded
+  window. If the trend is strongly inhomogeneous, the 
+  extrapolated trend may have very large (or even infinite)
+  values. This can cause the simulation algorithm to 
+  produce empty patterns.
+
+  The only way to suppress this problem entirely is to
+  prohibit the expansion of the window, by setting
+  the \code{control} argument to something like
+  \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1}
+  means there will be no expansion. See \code{\link{rmhcontrol}}
+  for more information about the argument \code{control}.
+}
+
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Stoyan, D. and Grabarnik, P. (1991)
+  Second-order characteristics for stochastic structures connected with
+  Gibbs point processes.
+  \emph{Mathematische Nachrichten}, 151:95--100.
+}
+\seealso{
+ \code{\link{diagnose.ppm}},
+ \code{\link{lurking}},
+ \code{\link{residuals.ppm}},
+ \code{\link{eem}},
+ \code{\link{ppm.object}},
+ \code{\link{ppm}},
+ \code{\link{rmh}},
+ \code{\link{rmhcontrol}}
+}
+\examples{
+    data(cells)
+
+    fit <- ppm(cells, ~1, Poisson())
+    diagnose.ppm(fit)  # no suggestion of departure from stationarity
+    \dontrun{qqplot.ppm(fit, 80)  # strong evidence of non-Poisson interaction}
+    \testonly{qqplot.ppm(fit, 4)}
+
+    \dontrun{
+     diagnose.ppm(fit, type="pearson")  
+     qqplot.ppm(fit, type="pearson")
+    }
+    \testonly{qqplot.ppm(fit, 4, type="pearson")}
+
+    ###########################################
+    ## oops, I need the plot coordinates
+    mypreciousdata <- .Last.value
+    \dontrun{mypreciousdata <- qqplot.ppm(fit, type="pearson")}
+    \testonly{mypreciousdata <- qqplot.ppm(fit, 4, type="pearson")}
+    plot(mypreciousdata)
+
+    ######################################################
+    # Q-Q plots based on fixed n
+    # The above QQ plots used simulations from the (fitted) Poisson process.
+    # But I want to simulate conditional on n, instead of Poisson
+    # Do this by setting rmhcontrol(p=1)
+    fixit <- list(p=1)
+    \dontrun{qqplot.ppm(fit, 100, control=fixit)}
+    \testonly{qqplot.ppm(fit, 4, control=fixit)}
+
+    ######################################################
+    # Inhomogeneous Poisson data
+    X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000)
+    plot(X)
+    # Inhomogeneous Poisson model
+    fit <- ppm(X, ~x, Poisson())
+    \dontrun{qqplot.ppm(fit, 100)}
+    \testonly{qqplot.ppm(fit, 4)}
+    # conclusion: fitted inhomogeneous Poisson model looks OK
+
+    ######################################################
+    # Advanced use of 'expr' argument
+    # 
+    # set the initial conditions in Metropolis-Hastings algorithm
+    # 
+    expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE))
+    \dontrun{qqplot.ppm(fit, 100, expr)}
+    \testonly{qqplot.ppm(fit, 4, expr)}
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{hplot}
diff --git a/man/quad.object.Rd b/man/quad.object.Rd
new file mode 100644
index 0000000..ca41eb8
--- /dev/null
+++ b/man/quad.object.Rd
@@ -0,0 +1,87 @@
+\name{quad.object}
+\alias{quad.object} %DoNotExport
+\title{Class of Quadrature Schemes}
+\description{
+  A class \code{"quad"} to represent a quadrature scheme.
+}
+\details{
+  A (finite) quadrature scheme is a list of quadrature points
+  \eqn{u_j}{u[j]} and associated weights \eqn{w_j}{w[j]}
+  which is used to approximate an integral by a finite sum:
+  \deqn{
+    \int f(x) dx \approx \sum_j f(u_j) w_j
+  }{
+    integral(f(x) dx) ~= sum( f(u[j]) w[j] )
+  }
+  Given a point pattern dataset, a \emph{Berman-Turner}
+  quadrature scheme is one which includes all these data points,
+  as well as a nonzero number of other (``dummy'') points.
+
+  These quadrature schemes are used to approximate the
+  pseudolikelihood of a point process, in the method of
+  Baddeley and Turner (2000) (see Berman and Turner (1992)).
+  Accuracy and computation time both increase with the number of
+  points in the quadrature scheme.
+
+  An object of class \code{"quad"} represents a Berman-Turner
+  quadrature scheme. It can be passed as an argument to
+  the model-fitting function \code{\link{ppm}}, which
+  requires a quadrature scheme.
+
+  An object of this class contains at least the following elements:
+  \tabular{ll}{
+    \code{data}: \tab an object of class \code{"ppp"} \cr
+                 \tab giving the locations (and marks) of the data points.\cr
+    \code{dummy}: \tab an object of class \code{"ppp"} \cr
+                 \tab giving the locations (and marks) of the dummy points.\cr
+    \code{w}: \tab vector of nonnegative weights for the quadrature points\cr
+  }
+  Users are strongly advised not to manipulate these entries
+  directly.
+  
+  The domain of quadrature is specified by \code{Window(dummy)}
+  while the observation window (if this needs to be specified
+  separately) is taken to be \code{Window(data)}. 
+
+  The weights vector \code{w} may also have an attribute
+  \code{attr(w, "zeroes")} equivalent to the logical vector
+  \code{(w == 0)}. If this is absent then all points are known to
+  have positive weights.
+  
+  To create an object of class \code{"quad"},
+  users would typically call the
+  high level function \code{\link{quadscheme}}.
+  (They are actually
+  created by the low level function \code{quad}.)
+  
+  Entries are extracted from a \code{"quad"} object by the functions
+  \code{x.quad},
+  \code{y.quad},
+  \code{w.quad} and
+  \code{marks.quad},
+  which extract the \eqn{x} coordinates, \eqn{y} coordinates,
+  weights, and marks, respectively. The function
+  \code{n.quad} returns the total number of quadrature points
+  (dummy plus data).
+
+  An object of class \code{"quad"} can be converted into an ordinary
+  point pattern by the function \code{\link{union.quad}} which simply
+  takes the union of the data and dummy points.
+
+  Quadrature schemes can be plotted using \code{\link{plot.quad}}
+  (a method for the generic \code{\link{plot}}).
+}
+\seealso{
+  \code{\link{quadscheme}},
+  \code{\link{ppm}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{attribute}
+ 
+ 
diff --git a/man/quad.ppm.Rd b/man/quad.ppm.Rd
new file mode 100644
index 0000000..ce78d3b
--- /dev/null
+++ b/man/quad.ppm.Rd
@@ -0,0 +1,95 @@
+\name{quad.ppm}
+\alias{quad.ppm}
+\title{Extract Quadrature Scheme Used to Fit a Point Process Model}
+\description{
+  Given a fitted point process model,
+  this function extracts the 
+  quadrature scheme used to fit the model.
+}
+\usage{
+  quad.ppm(object, drop=FALSE, clip=FALSE)
+}
+\arguments{
+  \item{object}{
+    fitted point process model (an object of class \code{"ppm"}
+    or \code{"kppm"} or \code{"lppm"}).
+  }
+  \item{drop}{
+    Logical value determining whether to delete quadrature points
+    that were not used to fit the model.
+  }
+  \item{clip}{
+    Logical value determining whether to erode the window,
+    if \code{object} was fitted using the border correction.
+    See Details.
+  }
+}
+\value{
+  A quadrature scheme (object of class \code{"quad"}).
+}
+\details{
+  An object of class \code{"ppm"} represents a point process model
+  that has been fitted to data. It is typically produced by
+  the model-fitting algorithm \code{\link{ppm}}.
+  
+  The maximum pseudolikelihood algorithm in \code{\link{ppm}}
+  approximates the pseudolikelihood
+  integral by a sum over a finite set of quadrature points,
+  which is constructed by augmenting
+  the original data point pattern by a set of ``dummy'' points.
+  The fitted model object returned by \code{\link{ppm}}
+  contains complete information about this quadrature scheme.
+  See \code{\link{ppm}} or \code{\link{ppm.object}} for further
+  information.
+  
+  This function \code{quad.ppm} extracts the quadrature scheme.
+  A typical use of this function would be to inspect the quadrature scheme
+  (points and weights) to gauge the accuracy of the approximation to the
+  exact pseudolikelihood.
+
+  Some quadrature points may not have been used in
+  fitting the model. This happens if the border correction is used,
+  and in other cases (e.g. when the value of a covariate is \code{NA}
+  at these points). The argument \code{drop} specifies whether these
+  unused quadrature points shall be deleted (\code{drop=TRUE}) or
+  retained (\code{drop=FALSE}) in the return value.
+
+  The quadrature scheme has a \emph{window}, which by default is set to
+  equal the window of the original data. However this window may be
+  larger than the actual domain of integration of the pseudolikelihood
+  or composite likelihood that was used to fit the model.
+  If \code{clip=TRUE} then the window of the quadrature scheme is
+  set to the actual domain of integration. This option only has an effect
+  when the model was fitted using the border correction; then
+  the window is obtained by eroding the original data window
+  by the border correction distance. 
+  
+  See \code{\link{ppm.object}} for a list of all operations that can be
+  performed on objects of class \code{"ppm"}.
+  See \code{\link{quad.object}} for a list of all operations that can be
+  performed on objects of class \code{"quad"}.
+
+  This function can also be applied to objects of class \code{"kppm"}
+  and \code{"lppm"}.
+}
+\seealso{
+  \code{\link{ppm.object}},
+  \code{\link{quad.object}},
+  \code{\link{ppm}}
+}
+\examples{
+ fit <- ppm(cells ~1, Strauss(r=0.1))
+ Q <- quad.ppm(fit)
+ \dontrun{plot(Q)}
+ npoints(Q$data)
+ npoints(Q$dummy)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{models}
diff --git a/man/quadrat.test.Rd b/man/quadrat.test.Rd
new file mode 100644
index 0000000..f60e873
--- /dev/null
+++ b/man/quadrat.test.Rd
@@ -0,0 +1,281 @@
+\name{quadrat.test}
+\alias{quadrat.test}
+\alias{quadrat.test.ppp}
+\alias{quadrat.test.ppm}
+\alias{quadrat.test.quadratcount}
+\title{Dispersion Test for Spatial Point Pattern Based on
+       Quadrat Counts}
+\description{
+  Performs a test of Complete Spatial Randomness
+  for a given point pattern, based on quadrat counts.
+  Alternatively performs a goodness-of-fit test of a fitted
+  inhomogeneous Poisson model.
+  By default performs chi-squared tests; can also perform
+  Monte Carlo based tests.
+}
+\usage{
+quadrat.test(X, ...)
+
+\method{quadrat.test}{ppp}(X, nx=5, ny=nx,
+                          alternative=c("two.sided", "regular", "clustered"),
+                           method=c("Chisq", "MonteCarlo"),
+                           conditional=TRUE, CR=1,
+                           lambda=NULL, 
+                           ...,
+                           xbreaks=NULL, ybreaks=NULL, tess=NULL,
+                           nsim=1999)
+
+\method{quadrat.test}{ppm}(X, nx=5, ny=nx, 
+                          alternative=c("two.sided", "regular", "clustered"),
+                           method=c("Chisq", "MonteCarlo"),
+                           conditional=TRUE, CR=1, 
+                           ..., 
+                           xbreaks=NULL, ybreaks=NULL, tess=NULL,
+                           nsim=1999)
+
+\method{quadrat.test}{quadratcount}(X,
+                          alternative=c("two.sided", "regular", "clustered"),
+                          method=c("Chisq", "MonteCarlo"),
+                          conditional=TRUE, CR=1,
+                          lambda=NULL, 
+                          ...,
+                          nsim=1999)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"})
+    to be subjected to the goodness-of-fit test.
+    Alternatively a fitted point process model (object of class
+    \code{"ppm"}) to be tested.
+    Alternatively \code{X} can be the result of applying
+    \code{\link{quadratcount}} to a point pattern.
+  }
+  \item{nx,ny}{
+    Numbers of quadrats in the \eqn{x} and \eqn{y} directions.
+    Incompatible with \code{xbreaks} and \code{ybreaks}.
+  }
+  \item{alternative}{
+    Character string (partially matched) specifying the alternative
+    hypothesis.
+  }
+  \item{method}{
+    Character string (partially matched) specifying the test to use:
+    either \code{method="Chisq"} for the chi-squared test (the default),
+    or \code{method="MonteCarlo"} for a Monte Carlo test.
+  }
+  \item{conditional}{
+    Logical.  Should the Monte Carlo test be conducted
+    conditionally upon the observed number of points of the pattern?
+    Ignored if \code{method="Chisq"}.
+  }
+  \item{CR}{
+    Optional. Numerical value of the index \eqn{\lambda}{lambda}
+    for the Cressie-Read test statistic. 
+  }
+  \item{lambda}{
+    Optional. Pixel image (object of class \code{"im"})
+    or function (class \code{"funxy"}) giving the predicted
+    intensity of the point process.
+  }
+  \item{\dots}{Ignored.}
+  \item{xbreaks}{
+    Optional. Numeric vector giving the \eqn{x} coordinates of the
+    boundaries of the quadrats. Incompatible with \code{nx}.
+  }
+  \item{ybreaks}{
+    Optional. Numeric vector giving the \eqn{y} coordinates of the
+    boundaries of the quadrats. Incompatible with \code{ny}.
+  }
+  \item{tess}{
+    Tessellation (object of class \code{"tess"} or something acceptable
+    to \code{\link{as.tess}}) determining the
+    quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}.
+  }
+  \item{nsim}{
+    The number of simulated samples to generate when
+    \code{method="MonteCarlo"}. 
+  }
+}
+\details{
+  These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests
+  of goodness-of-fit for a point process model, based on quadrat counts.
+
+  The function \code{quadrat.test} is generic, with methods for
+  point patterns (class \code{"ppp"}), split point patterns
+  (class \code{"splitppp"}), point process models
+  (class \code{"ppm"}) and quadrat count tables (class \code{"quadratcount"}).
+
+  \itemize{
+    \item
+    if \code{X} is a point pattern, we test the null hypothesis
+    that the data pattern is a realisation of Complete Spatial
+    Randomness (the uniform Poisson point process). Marks in the point
+    pattern are ignored. (If \code{lambda} is given then the null
+    hypothesis is the Poisson process with intensity \code{lambda}.)
+    \item 
+    if \code{X} is a split point pattern, then for each of the
+    component point patterns (taken separately) we test 
+    the null hypotheses of Complete Spatial Randomness.
+    See \code{\link{quadrat.test.splitppp}} for documentation.
+    \item
+    If \code{X} is a fitted point process model, then it should be
+    a Poisson point process model. The 
+    data to which this model was fitted are extracted from the model
+    object, and are treated as the data point pattern for the test.
+    We test the null hypothesis 
+    that the data pattern is a realisation of the (inhomogeneous) Poisson point
+    process specified by \code{X}.
+  }
+
+  In all cases, the window of observation is divided
+  into tiles, and the number of data points in each tile is
+  counted, as described in \code{\link{quadratcount}}.
+  The quadrats are rectangular by default, or may be regions of arbitrary shape
+  specified by the argument \code{tess}.
+  The expected number of points in each quadrat is also calculated,
+  as determined by CSR (in the first case) or by the fitted model
+  (in the second case).
+  Then the Pearson \eqn{X^2} statistic 
+  \deqn{
+    X^2 = sum((observed - expected)^2/expected)
+  }
+  is computed.
+  
+  If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of
+  goodness-of-fit is performed by comparing the test statistic
+  to the \eqn{\chi^2}{chi^2} distribution
+  with \eqn{m-k} degrees of freedom, where \code{m} is the number of
+  quadrats and \eqn{k} is the number of fitted parameters
+  (equal to 1 for \code{quadrat.test.ppp}). The default is to
+  compute the \emph{two-sided} \eqn{p}-value, so that the test will
+  be declared significant if \eqn{X^2} is either very large or very
+  small. One-sided \eqn{p}-values can be obtained by specifying the
+  \code{alternative}. An important requirement of the
+  \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat
+  be greater than 5.
+
+  If \code{method="MonteCarlo"} then a Monte Carlo test is performed,
+  obviating the need for all expected counts to be at least 5.  In the
+  Monte Carlo test, \code{nsim} random point patterns are generated
+  from the null hypothesis (either CSR or the fitted point process
+  model). The Pearson \eqn{X^2} statistic is computed as above.
+  The \eqn{p}-value is determined by comparing the \eqn{X^2}
+  statistic for the observed point pattern, with the values obtained
+  from the simulations. Again the default is to
+  compute the \emph{two-sided} \eqn{p}-value.
+
+  If \code{conditional} is \code{TRUE} then the simulated samples are
+  generated from the multinomial distribution with the number of \dQuote{trials}
+  equal to the number of observed points and the vector of probabilities
+  equal to the expected counts divided by the sum of the expected counts.
+  Otherwise the simulated samples are independent Poisson counts, with
+  means equal to the expected counts.
+
+  If the argument \code{CR} is given, then instead of the
+  Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence
+  test statistic
+  \deqn{
+    2nI = \frac{2}{\lambda(\lambda+1)}
+           \sum_i \left[ \left( \frac{X_i}{E_i} \right)^\lambda - 1 \right]
+  }{
+    2nI = (2/(lambda * (lambda+1))) * sum((X[i]/E[i])^lambda - 1)
+  }
+  is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count
+  and \eqn{E_i}{E[i]} is the corresponding expected count,
+  and the exponent \eqn{\lambda}{lambda} is equal to \code{CR}.
+  The value \code{CR=1} gives the Pearson \eqn{X^2} statistic;
+  \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2};
+  \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2};
+  \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2};
+  and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}.
+  In all cases the asymptotic distribution of this test statistic is
+  the same \eqn{\chi^2}{chi^2} distribution as above.
+
+  The return value is an object of class \code{"htest"}.
+  Printing the object gives comprehensible output
+  about the outcome of the test.
+
+  The return value also belongs to
+  the special class \code{"quadrat.test"}. Plotting the object
+  will display the quadrats, annotated by their observed and expected
+  counts and the Pearson residuals. See the examples.
+}
+\seealso{
+  \code{\link{quadrat.test.splitppp}},
+  \code{\link{quadratcount}},
+  \code{\link{quadrats}},
+  \code{\link{quadratresample}},
+  \code{\link{chisq.test}},
+  \code{\link{cdf.test}}.
+
+  To test a Poisson point process model against a specific alternative,
+  use \code{\link{anova.ppm}}.
+}
+\value{
+  An object of class \code{"htest"}. See \code{\link{chisq.test}}
+  for explanation. 
+
+  The return value is also an object of the special class
+  \code{"quadrattest"}, and there is a plot method for this class.
+  See the examples.
+}
+\references{
+  Cressie, N. and Read, T.R.C. (1984)
+  Multinomial goodness-of-fit tests.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{46}, 440--464. 
+}
+\examples{
+  data(simdat)
+  quadrat.test(simdat)
+  quadrat.test(simdat, 4, 3)
+
+  quadrat.test(simdat, alternative="regular")
+  quadrat.test(simdat, alternative="clustered")
+
+  # Using Monte Carlo p-values
+  quadrat.test(swedishpines) # Get warning, small expected values.
+  \dontrun{
+    quadrat.test(swedishpines, method="M", nsim=4999)
+    quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE)
+  }
+  \testonly{
+    quadrat.test(swedishpines, method="M", nsim=19)
+    quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE)
+  }
+
+  # quadrat counts
+  qS <- quadratcount(simdat, 4, 3)
+  quadrat.test(qS)
+
+  # fitted model: inhomogeneous Poisson
+  fitx <- ppm(simdat, ~x, Poisson())
+  quadrat.test(fitx)
+
+  te <- quadrat.test(simdat, 4)
+  residuals(te)  # Pearson residuals
+
+  plot(te)
+
+  plot(simdat, pch="+", cols="green", lwd=2)
+  plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3)
+
+  sublab <- eval(substitute(expression(p[chi^2]==z),
+                       list(z=signif(te$p.value,3))))
+  title(sub=sublab, cex.sub=3)
+
+  # quadrats of irregular shape
+  B <- dirichlet(runifpoint(6, Window(simdat)))
+  qB <- quadrat.test(simdat, tess=B)
+  plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+")
+  plot(qB, add=TRUE, col="red", lwd=2, cex=1.2)
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/quadrat.test.mppm.Rd b/man/quadrat.test.mppm.Rd
new file mode 100644
index 0000000..0510af9
--- /dev/null
+++ b/man/quadrat.test.mppm.Rd
@@ -0,0 +1,122 @@
+\name{quadrat.test.mppm}
+\alias{quadrat.test.mppm}
+\title{Chi-Squared Test for Multiple Point Process Model Based on
+  Quadrat Counts}
+\description{
+  Performs a chi-squared goodness-of-fit test of a 
+  Poisson point process model fitted to multiple point patterns.
+}
+\usage{
+  \method{quadrat.test}{mppm}(X, ...)
+}
+\arguments{
+  \item{X}{
+    An object of class \code{"mppm"} representing a
+    point process model fitted to multiple point patterns.
+    It should be a Poisson model.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{quadrat.test.ppm}}
+    which determine the size of the quadrats.
+  }
+}
+\details{
+  This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit
+  for a Poisson point process model, based on quadrat counts.
+  It can also be used to perform a test of Complete Spatial Randomness
+  for a list of point patterns.
+
+  The function \code{quadrat.test} is generic, with methods for
+  point patterns (class \code{"ppp"}), point process models
+  (class \code{"ppm"}) and
+  multiple point process models (class
+  \code{"mppm"}).
+
+  For this function, the argument \code{X} should be a
+  multiple point process model (object of class \code{"mppm"})
+  obtained by fitting a point process model to a list of
+  point patterns using the function \code{\link{mppm}}.
+
+  To perform the test, the data point patterns are extracted from \code{X}.
+  For each point pattern
+  \itemize{
+    \item the window of observation is divided
+    into rectangular tiles, and the number of data points in each tile is
+    counted, as described in \code{\link{quadratcount}}.
+    \item 
+    The expected number of points in each quadrat is calculated,
+    as determined by the fitted model.
+  }
+  Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit
+  based on these observed and expected counts. 
+}
+\section{Testing Complete Spatial Randomness}{
+  If the intention is to test Complete Spatial Randomness (CSR)
+  there are two options:
+  \itemize{
+    \item CSR with the same intensity of points in each point pattern;
+    \item CSR with a different, unrelated intensity of points in each
+    point pattern.
+  }
+  In the first case, 
+  suppose \code{P} is a list of point patterns we want to test.
+  Then fit the multiple model \code{fit1 <- mppm(P, ~1)} which signifies a
+  Poisson point process model with a constant intensity. Then
+  apply \code{quadrat.test(fit1)}.
+
+  In the second case, fit the model code{fit2 <- mppm(P, ~id)}
+  which signifies a Poisson point process with a different constant
+  intensity for each point pattern. Then apply \code{quadrat.test(fit2)}.
+}
+
+\value{
+  An object of class \code{"htest"}.
+  Printing the object gives comprehensible output
+  about the outcome of the test.
+  The \eqn{p}-value of the test is stored in the
+  component \code{p.value}.
+
+  The return value also belongs to
+  the special class \code{"quadrat.test"}. Plotting the object
+  will display, for each window, the position of the quadrats,
+  annotated by their observed and expected
+  counts and the Pearson residuals. See the examples.
+
+  The return value also has an attribute \code{"components"}
+  which is a list containing the results of 
+  \eqn{\chi^2}{chi^2} tests of goodness-of-fit
+  for each individual point pattern.
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{quadrat.test}}
+}
+\examples{
+  H <- hyperframe(X=waterstriders)
+  # Poisson with constant intensity for all patterns
+  fit1 <- mppm(X~1, H)
+  quadrat.test(fit1, nx=2)
+
+  # uniform Poisson with different intensity for each pattern
+  fit2 <- mppm(X ~ id, H)
+  quadrat.test(fit2, nx=2)
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{htest}
+
diff --git a/man/quadrat.test.splitppp.Rd b/man/quadrat.test.splitppp.Rd
new file mode 100644
index 0000000..c15ff10
--- /dev/null
+++ b/man/quadrat.test.splitppp.Rd
@@ -0,0 +1,66 @@
+\name{quadrat.test.splitppp}
+\alias{quadrat.test.splitppp}
+\title{Dispersion Test of CSR for Split Point Pattern Based on
+       Quadrat Counts}
+\description{
+  Performs a test of Complete Spatial Randomness
+  for each of the component patterns in a split point pattern,
+  based on quadrat counts.
+  By default performs chi-squared tests; can also perform
+  Monte Carlo based tests.
+}
+\usage{
+\method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL)
+}
+\arguments{
+  \item{X}{
+    A split point pattern (object of class \code{"splitppp"}),
+    each component of which will be subjected to the goodness-of-fit test.
+  }
+  \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.}
+  \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.}
+}
+\details{
+  The function \code{quadrat.test} is generic, with methods for
+  point patterns (class \code{"ppp"}), split point patterns
+  (class \code{"splitppp"}) and point process models
+  (class \code{"ppm"}).
+
+  If \code{X} is a split point pattern, then for each of the
+  component point patterns (taken separately) we test 
+  the null hypotheses of Complete Spatial Randomness,
+  then combine the result into a single test.
+
+  The method \code{quadrat.test.ppp} is applied to each
+  component point pattern. Then the results are pooled using
+  \code{\link{pool.quadrattest}} to obtain a single test.
+}
+\seealso{
+  \code{\link{quadrat.test}},
+  \code{\link{quadratcount}},
+  \code{\link{quadrats}},
+  \code{\link{quadratresample}},
+  \code{\link{chisq.test}},
+  \code{\link{cdf.test}}.
+
+  To test a Poisson point process model against a specific Poisson alternative,
+  use \code{\link{anova.ppm}}.
+}
+\value{
+  An object of class \code{"quadrattest"} which can be printed and
+  plotted.
+}
+\examples{
+ data(humberside)
+ qH <- quadrat.test(split(humberside), 2, 3)
+ plot(qH)
+ qH
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/quadratcount.Rd b/man/quadratcount.Rd
new file mode 100644
index 0000000..0dcbba3
--- /dev/null
+++ b/man/quadratcount.Rd
@@ -0,0 +1,179 @@
+\name{quadratcount}
+\alias{quadratcount}
+\alias{quadratcount.ppp}
+\alias{quadratcount.splitppp}
+\title{Quadrat counting for a point pattern}
+\description{
+  Divides window into quadrats and 
+  counts the numbers of points in each quadrat.
+}
+\usage{
+  quadratcount(X, \dots)
+
+  \method{quadratcount}{ppp}(X, nx=5, ny=nx, \dots,
+               xbreaks=NULL, ybreaks=NULL, tess=NULL)
+
+  \method{quadratcount}{splitppp}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"})
+    or a split point pattern (object of class \code{"splitppp"}).
+  }
+  \item{nx,ny}{
+    Numbers of rectangular quadrats in the \eqn{x} and \eqn{y} directions.
+    Incompatible with \code{xbreaks} and \code{ybreaks}.
+  }
+  \item{\dots}{Additional arguments passed to \code{quadratcount.ppp}.}
+  \item{xbreaks}{
+    Numeric vector giving the \eqn{x} coordinates of the
+    boundaries of the rectangular quadrats. Incompatible with \code{nx}.
+  }
+  \item{ybreaks}{
+    Numeric vector giving the \eqn{y} coordinates of the
+    boundaries of the rectangular quadrats. Incompatible with \code{ny}.
+  }
+  \item{tess}{
+    Tessellation (object of class \code{"tess"} or something acceptable
+    to \code{\link{as.tess}}) determining the quadrats. Incompatible
+    with \code{nx,ny,xbreaks,ybreaks}.
+  }
+}
+\value{
+  The value of \code{quadratcount.ppp} is a 
+  contingency table containing the number of points in each
+  quadrat. The table is also an object of the
+  special class \code{"quadratcount"}
+  and there is a plot method for this class.
+
+  The value of \code{quadratcount.splitppp} is a list of such
+  contingency tables, each containing the quadrat counts for one of the
+  component point patterns in \code{X}.
+  This list also has the class \code{"solist"} which has
+  print and plot methods.
+}
+\details{
+  Quadrat counting is an elementary technique for analysing spatial
+  point patterns. See Diggle (2003).
+
+  \bold{If \code{X} is a point pattern}, then 
+  by default, the window containing the point pattern \code{X} is divided into
+  an \code{nx * ny} grid of rectangular tiles or `quadrats'.
+  (If the window is not a rectangle, then these tiles are intersected
+  with the window.)
+  The number of points of \code{X} falling in each quadrat is
+  counted. These numbers are returned as a contingency table.
+
+  If \code{xbreaks} is given, it should be a numeric vector
+  giving the \eqn{x} coordinates of the quadrat boundaries.
+  If it is not given, it defaults to a
+  sequence of \code{nx+1} values equally spaced
+  over the range of \eqn{x} coordinates in the window \code{Window(X)}.
+
+  Similarly if \code{ybreaks} is given, it should be a numeric
+  vector giving the \eqn{y} coordinates of the quadrat boundaries.
+  It defaults to a vector of \code{ny+1} values
+  equally spaced over the range of \eqn{y} coordinates in the window.
+  The lengths of \code{xbreaks} and \code{ybreaks} may be different.
+
+  Alternatively, quadrats of any shape may be used.
+  The argument \code{tess} can be a tessellation (object of class
+  \code{"tess"}) whose tiles will serve as the quadrats.
+  
+  The algorithm counts the number of points of \code{X}
+  falling in each quadrat, and returns these counts as a
+  contingency table. 
+
+  The return value is a \code{table} which can be printed neatly.
+  The return value is also a member of the special class
+  \code{"quadratcount"}. Plotting the object will display the
+  quadrats, annotated by their counts. See the examples.
+
+  To perform a chi-squared test based on the quadrat counts,
+  use \code{\link{quadrat.test}}.
+  
+  To calculate an estimate of intensity based on the quadrat counts,
+  use \code{\link{intensity.quadratcount}}.
+
+  To extract the quadrats used in a \code{quadratcount} object,
+  use \code{\link{as.tess}}.
+
+  \bold{If \code{X} is a split point pattern} (object of class
+  \code{"splitppp"} then quadrat counting will be performed on
+  each of the components point patterns, and the resulting
+  contingency tables will be returned in a list. This list can be
+  printed or plotted.
+
+  Marks attached to the points are ignored by \code{quadratcount.ppp}.
+  To obtain a separate contingency table for each type of point
+  in a multitype point pattern,
+  first separate the different points using \code{\link{split.ppp}},
+  then apply \code{quadratcount.splitppp}. See the Examples.
+}
+\note{
+  To perform a chi-squared test based on the quadrat counts,
+  use \code{\link{quadrat.test}}.
+}
+\section{Warning}{
+  If \code{Q} is the result of \code{quadratcount}
+  using rectangular tiles, then \code{as.numeric(Q)}
+  extracts the counts \bold{in the wrong order}.
+  To obtain the quadrat counts in the same order as the
+  tiles of the corresponding tessellation would be listed,
+  use \code{as.vector(t(Q))}, which works in all cases.
+}
+\seealso{
+  \code{\link{plot.quadratcount}},
+  \code{\link{intensity.quadratcount}},
+  \code{\link{quadrats}},
+  \code{\link{quadrat.test}},
+  \code{\link{tess}},
+  \code{\link{hextess}},
+  \code{\link{quadratresample}},
+  \code{\link{miplot}}
+}
+\references{
+  Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
+  Academic Press, 2003.
+
+  Stoyan, D. and Stoyan, H. (1994)
+  \emph{Fractals, random shapes and point fields:
+  methods of geometrical statistics.}
+  John Wiley and Sons.
+} 
+\examples{
+ X <- runifpoint(50)
+ quadratcount(X)
+ quadratcount(X, 4, 5)
+ quadratcount(X, xbreaks=c(0, 0.3, 1), ybreaks=c(0, 0.4, 0.8, 1))
+ qX <-  quadratcount(X, 4, 5)
+
+ # plotting:
+ plot(X, pch="+")
+ plot(qX, add=TRUE, col="red", cex=1.5, lty=2)
+
+ # irregular window
+ data(humberside)
+ plot(humberside)
+ qH <- quadratcount(humberside, 2, 3)
+ plot(qH, add=TRUE, col="blue", cex=1.5, lwd=2)
+
+ # multitype - split
+ plot(quadratcount(split(humberside), 2, 3))
+ 
+ # quadrats determined by tessellation:
+ B <- dirichlet(runifpoint(6))
+ qX <- quadratcount(X, tess=B)
+ plot(X, pch="+")
+ plot(qX, add=TRUE, col="red", cex=1.5, lty=2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
+ 
+ 
diff --git a/man/quadratresample.Rd b/man/quadratresample.Rd
new file mode 100644
index 0000000..6cb889d
--- /dev/null
+++ b/man/quadratresample.Rd
@@ -0,0 +1,75 @@
+\name{quadratresample}
+\alias{quadratresample}
+\title{Resample a Point Pattern by Resampling Quadrats}
+\description{
+  Given a point pattern dataset, create a resampled point pattern
+  by dividing the window into rectangular quadrats and randomly
+  resampling the list of quadrats. 
+}
+\usage{
+quadratresample(X, nx, ny=nx, ...,
+                replace = FALSE, nsamples = 1,
+                verbose = (nsamples > 1))
+}
+\arguments{
+  \item{X}{
+    A point pattern dataset (object of class \code{"ppp"}).
+  }
+  \item{nx,ny}{
+    Numbers of quadrats in the \eqn{x} and \eqn{y} directions.
+  }
+  \item{\dots}{Ignored.}
+  \item{replace}{
+    Logical value. Specifies whether quadrats should be sampled
+    with or without replacement.
+  }
+  \item{nsamples}{Number of randomised point patterns to be generated.}
+  \item{verbose}{Logical value indicating whether to print progress reports.}
+}
+\details{
+  This command implements a very simple bootstrap resampling procedure
+  for spatial point patterns \code{X}.
+
+  The dataset \code{X} must be a point pattern (object of class
+  \code{"ppp"}) and its observation window must be a rectangle.
+
+  The window is first divided into \code{N = nx * ny} rectangular tiles
+  (quadrats) of equal size and shape. 
+  To generate one resampled point pattern, a random sample of
+  \code{N} quadrats is selected from the list of \code{N} quadrats,
+  with replacement (if \code{replace=TRUE}) or without replacement
+  (if \code{replace=FALSE}). The \eqn{i}th quadrat in the original
+  dataset is then replaced by the \eqn{i}th sampled quadrat, after the
+  latter is shifted so that it
+  occupies the correct spatial position. The quadrats are then
+  reconstituted into a point pattern inside the same window as \code{X}.
+
+  If \code{replace=FALSE}, this procedure effectively involves a random
+  permutation of the quadrats. The resulting resampled point pattern has
+  the same number of points as \code{X}.
+  If \code{replace=TRUE}, the number of points in the resampled point
+  pattern is random.
+}
+\value{
+  A point pattern (if \code{nsamples = 1}) or a 
+  list of point patterns (if \code{nsamples > 1}).
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{quadrats}},
+  \code{\link{quadratcount}}.
+
+  See \code{\link{varblock}} to estimate the variance of
+  a summary statistic by block resampling.
+}
+\examples{
+  data(bei)
+  quadratresample(bei, 6, 3)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/quadrats.Rd b/man/quadrats.Rd
new file mode 100644
index 0000000..b4808db
--- /dev/null
+++ b/man/quadrats.Rd
@@ -0,0 +1,90 @@
+\name{quadrats}
+\alias{quadrats}
+\title{Divide Region into Quadrats}
+\description{
+  Divides window into rectangular quadrats and 
+  returns the quadrats as a tessellation.
+}
+\usage{
+quadrats(X, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE)
+}
+\arguments{
+  \item{X}{
+    A window (object of class \code{"owin"})
+    or anything that can be coerced to a window using
+    \code{\link{as.owin}}, such as a point pattern.
+  }
+  \item{nx,ny}{
+    Numbers of quadrats in the \eqn{x} and \eqn{y} directions.
+    Incompatible with \code{xbreaks} and \code{ybreaks}.
+  }
+  \item{xbreaks}{
+    Numeric vector giving the \eqn{x} coordinates of the
+    boundaries of the quadrats. Incompatible with \code{nx}.
+  }
+  \item{ybreaks}{
+    Numeric vector giving the \eqn{y} coordinates of the
+    boundaries of the quadrats. Incompatible with \code{ny}.
+  }
+  \item{keepempty}{
+    Logical value indicating whether to delete or retain
+    empty quadrats. See Details.
+  }
+}
+\details{
+  If the window \code{X} is a rectangle, it is divided into
+  an \code{nx * ny} grid of rectangular tiles or `quadrats'.
+
+  If \code{X} is not a rectangle, then the bounding rectangle of
+  \code{X} is first divided into an \code{nx * ny} grid of rectangular
+  tiles, and these tiles are then intersected with the window \code{X}.
+
+  The resulting tiles are returned as a tessellation (object of class
+  \code{"tess"}) which can be plotted and used in other analyses.
+
+  If \code{xbreaks} is given, it should be a numeric vector
+  giving the \eqn{x} coordinates of the quadrat boundaries.
+  If it is not given, it defaults to a
+  sequence of \code{nx+1} values equally spaced
+  over the range of \eqn{x} coordinates in the window \code{Window(X)}.
+
+  Similarly if \code{ybreaks} is given, it should be a numeric
+  vector giving the \eqn{y} coordinates of the quadrat boundaries.
+  It defaults to a vector of \code{ny+1} values
+  equally spaced over the range of \eqn{y} coordinates in the window.
+  The lengths of \code{xbreaks} and \code{ybreaks} may be different.
+
+  By default (if \code{keepempty=FALSE}), any rectangular tile which
+  does not intersect the window \code{X} is
+  ignored, and only the non-empty intersections are treated as quadrats,
+  so the tessellation may consist of fewer than \code{nx * ny} tiles.
+  If \code{keepempty=TRUE}, empty intersections are retained,
+  and the tessellation always contains exactly \code{nx * ny} tiles,
+  some of which may be empty.
+}
+\value{
+  A tessellation (object of class \code{"tess"}) as described under
+  \code{\link{tess}}.
+}
+\examples{
+ W <- square(10)
+ Z <- quadrats(W, 4, 5)
+ plot(Z)
+
+ data(letterR)
+ plot(quadrats(letterR, 5, 7))
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{quadratcount}},
+  \code{\link{quadrat.test}},
+  \code{\link{quadratresample}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{utilities}
+\keyword{datagen}
diff --git a/man/quadscheme.Rd b/man/quadscheme.Rd
new file mode 100644
index 0000000..f3e1b63
--- /dev/null
+++ b/man/quadscheme.Rd
@@ -0,0 +1,165 @@
+\name{quadscheme}
+\alias{quadscheme}
+\title{Generate a Quadrature Scheme from a Point Pattern}
+\description{
+  Generates a quadrature scheme (an object of class \code{"quad"})
+  from point patterns of data and dummy points.
+}
+\usage{
+ quadscheme(data, dummy, method="grid", \dots)
+}
+\arguments{
+  \item{data}{
+    The observed data point pattern.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{dummy}{
+    The pattern of dummy points for the quadrature. 
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+    Defaults to \code{default.dummy(data, ...)}
+  }
+  \item{method}{
+    The name of the method for calculating quadrature weights: either
+    \code{"grid"} or \code{"dirichlet"}.
+  }
+  \item{\dots}{
+    Parameters of the weighting method (see below)
+    and parameters for constructing the dummy points if necessary.
+  }
+} 
+\value{
+  An object of class \code{"quad"} describing the quadrature scheme
+  (data points, dummy points, and quadrature weights)
+  suitable as the argument \code{Q} of the function \code{\link{ppm}()} for
+  fitting a point process model.
+
+  The quadrature scheme can be inspected using the
+  \code{print} and \code{plot} methods for objects
+  of class \code{"quad"}. 
+}
+\details{
+  This is the primary method for producing a quadrature schemes
+  for use by \code{\link{ppm}}.
+  
+  The function \code{\link{ppm}} fits a point process model to an
+  observed point pattern using 
+  the Berman-Turner quadrature approximation (Berman and Turner, 1992;
+  Baddeley and Turner, 2000) to the pseudolikelihood of the model. 
+  It requires a quadrature scheme consisting of 
+  the original data point pattern, an additional pattern of dummy points,
+  and a vector of quadrature weights for all these points.
+  Such quadrature schemes are represented by objects of class
+  \code{"quad"}. See \code{\link{quad.object}} for a description of this class.
+
+  Quadrature schemes are created by the function
+  \code{quadscheme}.
+  The arguments \code{data} and \code{dummy} specify the data and dummy
+  points, respectively. There is a sensible default for the dummy 
+  points (provided by \code{\link{default.dummy}}).
+  Alternatively the dummy points
+  may be specified arbitrarily and given in any format recognised by
+  \code{\link{as.ppp}}.
+  There are also functions for creating dummy patterns
+  including \code{\link{corners}},
+  \code{\link{gridcentres}},
+  \code{\link{stratrand}} and
+  \code{\link{spokes}}.
+ 
+  The quadrature region is the region over which we are
+  integrating, and approximating integrals by finite sums.
+  If \code{dummy} is a point pattern object (class \code{"ppp"})
+  then the quadrature region is taken to be \code{Window(dummy)}.
+  If \code{dummy} is just a list of \eqn{x, y} coordinates
+  then the quadrature region defaults to the observation window
+  of the data pattern, \code{Window(data)}.
+
+  If \code{dummy} is missing, then a pattern of dummy points
+  will be generated using \code{\link{default.dummy}}, taking account
+  of the optional arguments \code{...}.
+  By default, the dummy points are arranged in a
+  rectangular grid; recognised arguments
+  include \code{nd} (the number of grid points
+  in the horizontal and vertical directions)
+  and \code{eps} (the spacing between dummy points).
+  If \code{random=TRUE}, a systematic random pattern
+  of dummy points is generated instead.
+  See \code{\link{default.dummy}} for details. 
+  
+  If \code{method = "grid"} then the optional arguments (for \code{\dots}) are 
+  \code{(nd, ntile, eps)}.
+  The quadrature region (defined above) is divided into
+  an \code{ntile[1]} by \code{ntile[2]} grid of rectangular tiles.
+  The weight for each
+  quadrature point is the area of a tile divided by the number of
+  quadrature points in that tile. 
+ 
+  If \code{method="dirichlet"} then the optional arguments are
+  \code{(exact=TRUE, nd, eps)}.
+  The quadrature points (both data and dummy) are used to construct the
+  Dirichlet tessellation. The quadrature weight of each point is the
+  area of its Dirichlet tile inside the quadrature region.
+  If \code{exact == TRUE} then this area is computed exactly
+  using the package \code{deldir}; otherwise it is computed
+  approximately by discretisation.
+}
+\references{
+  Baddeley, A. and Turner, R.
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42} (2000) 283--322.
+ 
+  Berman, M. and Turner, T.R. 
+  Approximating point process likelihoods with GLIM.
+  \emph{Applied Statistics} \bold{41} (1992) 31--38.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{as.ppp}},
+  \code{\link{quad.object}},
+  \code{\link{gridweights}},
+  \code{\link{dirichletWeights}},
+  \code{\link{corners}},
+  \code{\link{gridcentres}},
+  \code{\link{stratrand}},
+  \code{\link{spokes}}
+}
+\examples{
+  data(simdat)
+
+  # grid weights
+  Q <- quadscheme(simdat)
+  Q <- quadscheme(simdat, method="grid")
+  Q <- quadscheme(simdat, eps=0.5)         # dummy point spacing 0.5 units
+
+  Q <- quadscheme(simdat, nd=50)           # 1 dummy point per tile
+  Q <- quadscheme(simdat, ntile=25, nd=50) # 4 dummy points per tile
+
+  # Dirichlet weights
+  Q <- quadscheme(simdat, method="dirichlet", exact=FALSE)
+
+  # random dummy pattern
+  \dontrun{
+  D <- runifpoint(250, Window(simdat))
+  Q <- quadscheme(simdat, D, method="dirichlet", exact=FALSE)
+  }
+
+  # polygonal window
+  data(demopat)
+  X <- unmark(demopat)
+  Q <- quadscheme(X)
+
+  # mask window
+  Window(X) <- as.mask(Window(X))
+  Q <- quadscheme(X)
+  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/quadscheme.logi.Rd b/man/quadscheme.logi.Rd
new file mode 100644
index 0000000..351a453
--- /dev/null
+++ b/man/quadscheme.logi.Rd
@@ -0,0 +1,142 @@
+\name{quadscheme.logi}
+\alias{quadscheme.logi}
+\title{Generate a Logistic Regression Quadrature Scheme from a Point Pattern}
+\description{
+  Generates a logistic regression quadrature scheme (an object of class
+  \code{"logiquad"} inheriting from \code{"quad"})
+  from point patterns of data and dummy points.
+}
+\usage{
+ quadscheme.logi(data, dummy, dummytype = "stratrand",
+                 nd = NULL, mark.repeat = FALSE, \dots)
+}
+\arguments{
+  \item{data}{
+    The observed data point pattern.
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}
+  }
+  \item{dummy}{
+    The pattern of dummy points for the quadrature. 
+    An object of class \code{"ppp"}
+    or in a format recognised by \code{\link{as.ppp}()}.
+    If missing a sensible default is generated.
+  }
+  \item{dummytype}{
+    The name of the type of dummy points to use when \code{"dummy"}
+    is missing. Currently available
+    options are: \code{"stratrand"} (default), \code{"binomial"},
+    \code{"poisson"}, \code{"grid"} and \code{"transgrid"}.
+  }
+  \item{nd}{
+    Integer, or integer vector of length 2 controlling the intensity of
+    dummy points when \code{"dummy"} is missing.
+  }
+  \item{mark.repeat}{
+    Repeating the dummy points for each level of a marked data pattern
+    when \code{"dummy"} is missing. (See details.)
+  }
+  \item{\dots}{
+    Ignored.
+  }
+} 
+\value{
+  An object of class \code{"logiquad"} inheriting from \code{"quad"}
+  describing the quadrature scheme 
+  (data points, dummy points, and quadrature weights)
+  suitable as the argument \code{Q} of the function \code{\link{ppm}()} for
+  fitting a point process model.
+
+  The quadrature scheme can be inspected using the
+  \code{print} and \code{plot} methods for objects
+  of class \code{"quad"}. 
+}
+\details{
+  This is the primary method for producing a quadrature schemes
+  for use by \code{\link{ppm}} when the logistic regression
+  approximation (Baddeley et al. 2013) to the pseudolikelihood of the
+  model is applied (i.e. when \code{method="logi"} in \code{\link{ppm}}).
+  
+  The function \code{\link{ppm}} fits a point process model to an
+  observed point pattern. When used with the option \code{method="logi"}
+  it requires a quadrature scheme consisting of 
+  the original data point pattern and an additional pattern of dummy points.
+  Such quadrature schemes are represented by objects of class
+  \code{"logiquad"}.
+
+  Quadrature schemes are created by the function
+  \code{quadscheme.logi}.
+  The arguments \code{data} and \code{dummy} specify the data and dummy
+  points, respectively. There is a sensible default for the dummy 
+  points.
+  Alternatively the dummy points
+  may be specified arbitrarily and given in any format recognised by
+  \code{\link{as.ppp}}.
+ 
+  The quadrature region is the region over which we are
+  integrating, and approximating integrals by finite sums.
+  If \code{dummy} is a point pattern object (class \code{"ppp"})
+  then the quadrature region is taken to be \code{Window(dummy)}.
+  If \code{dummy} is just a list of \eqn{x, y} coordinates
+  then the quadrature region defaults to the observation window
+  of the data pattern, \code{Window(data)}.
+
+  If \code{dummy} is missing, then a pattern of dummy points will be
+  generated, taking account of the optional arguments \code{dummytype},
+  \code{nd}, and \code{mark.repeat}.
+
+  The currently accepted values for \code{dummytype} are:
+  \itemize{
+  \item \code{"grid"} where the frame of the window
+  is divided into a \code{nd * nd} or \code{nd[1] * nd[2]} regular grid
+  of tiles and the centers constitutes the dummy points.
+  \item \code{"transgrid"} where a regular grid as above is translated
+  by a random vector.
+  \item \code{"stratrand"} where each point of a regular grid as above
+  is randomly translated within its tile.
+  \item \code{"binomial"} where \code{nd * nd} or \code{nd[1] * nd[2]}
+  points are generated uniformly in the frame of the
+  window.
+  \code{"poisson"} where a homogeneous Poisson point process with
+  intensity \code{nd * nd} or \code{nd[1] * nd[2]} is
+  generated within the frame of observation window.
+}
+Then if the window is not rectangular, any dummy points
+lying outside it are deleted. 
+
+If \code{data} is a multitype point pattern the dummy points should also
+be marked (with the same levels of the marks as \code{data}). If
+\code{dummy} is missing and the dummy pattern is generated by
+\code{quadscheme.logi} the default behaviour is to attach a uniformly
+distributed mark (from the levels of the marks) to each dummy
+point. Alternatively, if \code{mark.repeat=TRUE} each dummy point is
+repeated as many times as there are levels of the marks with a distinct
+mark value attached to it.
+
+Finally, each point (data and dummy) is assigned the weight 1. The
+weights are never used and only appear to be compatible with the class
+\code{"quad"} from which the \code{"logiquad"} object inherits.
+}
+\references{
+  Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014)
+  Logistic regression for spatial Gibbs point processes.
+  \emph{Biometrika} \bold{101} (2) 377--392.
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{as.ppp}}
+}
+\examples{
+  data(simdat)
+
+  Q <- quadscheme.logi(simdat)
+}
+\author{\adrian
+  
+  ,
+  \rolf 
+  and \ege
+  .
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/quantess.Rd b/man/quantess.Rd
new file mode 100644
index 0000000..c09327d
--- /dev/null
+++ b/man/quantess.Rd
@@ -0,0 +1,112 @@
+\name{quantess}
+\alias{quantess}
+\alias{quantess.owin}
+\alias{quantess.ppp}
+\alias{quantess.im}
+\title{Quantile Tessellation}
+\description{
+  Divide space into tiles which contain equal amounts of stuff.
+}
+\usage{
+quantess(M, Z, n, \dots)
+
+\method{quantess}{owin}(M, Z, n, \dots, type=2)
+
+\method{quantess}{ppp}(M, Z, n, \dots, type=2)
+
+\method{quantess}{im}(M, Z, n, \dots, type=2)
+}
+\arguments{
+  \item{M}{
+    A spatial object (such as a window, point pattern
+    or pixel image) determining the weight or amount of stuff
+    at each location.
+  }
+  \item{Z}{
+    A spatial covariate (a pixel image or a \code{function(x,y)})
+    or one of the strings \code{"x"} or \code{"y"} indicating the
+    \eqn{x} or \eqn{y} coordinate.
+    The range of values of \code{Z} will be broken into \code{n}
+    bands containing equal amounts of stuff.
+  }
+  \item{n}{
+    Number of bands. A positive integer.
+  }
+  \item{type}{
+    Integer specifying the rule for calculating quantiles.
+    Passed to \code{\link[stats]{quantile.default}}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{quadrats}} or
+    \code{\link{tess}} defining another tessellation
+    which should be intersected with the quantile tessellation.
+  }
+}
+\details{
+  A \emph{quantile tessellation} is a division of space into
+  pieces which contain equal amounts of stuff.
+
+  The function \code{quantess} 
+  computes a quantile tessellation and 
+  returns the tessellation itself.  
+  The function \code{quantess} is generic, with methods for
+  windows (class \code{"owin"}), point patterns (\code{"ppp"})
+  and pixel images (\code{"im"}). 
+
+  The first argument \code{M} (for mass) specifies the spatial
+  distribution of stuff that is to be divided. If \code{M} is a window,
+  the \emph{area} of the window is to be divided into \code{n} equal pieces.
+  If \code{M} is a point pattern, the \emph{number of points} in the
+  pattern is to be divided into \code{n} equal parts, as far as
+  possible. If \code{M} is a pixel image, the pixel values are
+  interpreted as weights, and the \emph{total weight} is to be divided
+  into \code{n} equal parts.  
+
+  The second argument
+  \code{Z} is a spatial covariate. The range of values of \code{Z}
+  will be divided into \code{n} bands, each containing
+  the same total weight. That is, we determine the quantiles of \code{Z}
+  with weights given by \code{M}.
+
+  For convenience, additional arguments \code{\dots} can be given,
+  to further subdivide the tiles of the tessellation.
+
+  The result of \code{quantess} is a tessellation of \code{as.owin(M)}
+  determined by the quantiles of \code{Z}.
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+}
+\author{
+  Original idea by Ute Hahn.
+  Implemented in \code{spatstat} by
+\adrian
+
+
+\rolf
+
+and \ege
+.
+}
+\seealso{
+  \code{\link{tess}}, \code{\link{quadrats}}, \code{\link{quantile}},
+  \code{\link{tilenames}}
+}
+\examples{
+  plot(quantess(letterR, "x", 5))
+
+  plot(quantess(bronzefilter, "x", 6))
+  points(unmark(bronzefilter))
+
+  opa <- par(mar=c(0,0,2,5))
+  A <- quantess(Window(bei), bei.extra$elev, 4)
+  plot(A, ribargs=list(las=1))
+  
+  B <- quantess(bei, bei.extra$elev, 4)
+  tilenames(B) <- paste(spatstat.utils::ordinal(1:4), "quartile")
+  plot(B, ribargs=list(las=1))
+  points(bei, pch=".", cex=2, col="white")
+  par(opa)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/quantile.density.Rd b/man/quantile.density.Rd
new file mode 100644
index 0000000..25bd4b8
--- /dev/null
+++ b/man/quantile.density.Rd
@@ -0,0 +1,83 @@
+\name{quantile.density}
+\alias{quantile.density}
+\title{
+  Quantiles of a Density Estimate
+}
+\description{
+  Given a kernel estimate of a probability density, compute quantiles.
+}
+\usage{
+\method{quantile}{density}(x, probs = seq(0, 1, 0.25), names = TRUE,
+        \dots, warn = TRUE)
+}
+\arguments{
+  \item{x}{
+    Object of class \code{"density"} computed by a method for
+    \code{\link[stats]{density}}
+  }
+  \item{probs}{
+    Numeric vector of probabilities for which the quantiles are required.
+  }
+  \item{names}{
+    Logical value indicating whether to attach names (based on
+    \code{probs}) to the result.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{warn}{
+    Logical value indicating whether to issue a warning if the
+    density estimate \code{x} had to be renormalised because it
+    was computed in a restricted interval.
+  }
+}
+\details{
+  This function calculates quantiles of the probability distribution
+  whose probability density has been estimated and stored in the object
+  \code{x}. The object \code{x} must belong to the class \code{"density"},
+  and would typically have been obtained from a call to the function
+  \code{\link[stats]{density}}.
+
+  The probability density is first normalised so that the total
+  probability is equal to 1. A warning is issued if the density
+  estimate was restricted to an interval (i.e. if \code{x}
+  was created by a call to   \code{\link[stats]{density}} which
+  included either of the arguments \code{from} and \code{to}).
+
+  Next, the density estimate is numerically integrated to obtain an estimate
+  of the cumulative distribution function \eqn{F(x)}. Then
+  for each desired probability \eqn{p}, the algorithm finds the
+  corresponding quantile \eqn{q}.
+
+  The quantile \eqn{q} corresponding to probability \eqn{p}
+  satisfies \eqn{F(q) = p} up to
+  the resolution of the grid of values contained in \code{x}.
+  The quantile is computed from the right, that is,
+  \eqn{q} is the smallest available value of \eqn{x} such that
+  \eqn{F(x) \ge p}{F(x) >= p}.
+}
+\value{
+  A numeric vector containing the quantiles.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link[stats]{quantile}}, 
+  \code{\link{quantile.ewcdf}}, 
+  \code{\link{quantile.im}},
+  \code{\link{CDF}}.
+}
+\examples{
+   dd <- density(runif(10))
+   quantile(dd)
+}
+\keyword{methods}
+\keyword{univar}
+\keyword{nonparametric}
diff --git a/man/quantile.ewcdf.Rd b/man/quantile.ewcdf.Rd
new file mode 100644
index 0000000..2ba7b46
--- /dev/null
+++ b/man/quantile.ewcdf.Rd
@@ -0,0 +1,73 @@
+\name{quantile.ewcdf}
+\alias{quantile.ewcdf}
+\title{
+  Quantiles of Weighted Empirical Cumulative Distribution Function
+}
+\description{
+  Compute quantiles of a weighted empirical cumulative distribution function.  
+}
+\usage{
+  \method{quantile}{ewcdf}(x, probs = seq(0, 1, 0.25),
+                 names = TRUE, \dots,
+                 normalise = TRUE, type=1)
+}
+\arguments{
+  \item{x}{
+    A weighted empirical cumulative distribution function
+    (object of class \code{"ewcdf"}, produced by \code{\link{ewcdf}})
+    for which the quantiles are desired.
+  }
+  \item{probs}{
+    probabilities for which the quantiles are desired.
+    A numeric vector of values between 0 and 1.
+  }
+  \item{names}{
+    Logical. If \code{TRUE}, the resulting vector of quantiles is
+    annotated with names corresponding to \code{probs}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{normalise}{
+    Logical value indicating whether \code{x} should first be normalised
+    so that it ranges between 0 and 1. 
+  }
+  \item{type}{
+    Integer specifying the type of quantile to be calculated,
+    as explained in \code{\link[stats]{quantile.default}}.
+    Only types 1 and 2 are currently implemented.
+  }
+}
+\details{
+  This is a method for the generic \code{\link[stats]{quantile}}
+  function for the class \code{ewcdf} of empirical weighted cumulative
+  distribution functions.
+
+  The quantile for a probability \code{p} is computed
+  as the right-continuous inverse of the cumulative
+  distribution function \code{x} (assuming \code{type=1}, the default).
+
+  If \code{normalise=TRUE} (the default),
+  the weighted cumulative function \code{x} is first normalised to
+  have total mass \code{1} so that it can be interpreted as a
+  cumulative probability distribution function. 
+}
+\value{
+  Numeric vector of quantiles, of the same length as \code{probs}.
+}
+\seealso{
+  \code{\link{ewcdf}}, 
+  \code{\link[stats]{quantile}}
+}
+\examples{
+  z <- rnorm(50)
+  w <- runif(50)
+  Fun <- ewcdf(z, w)
+  quantile(Fun, c(0.95,0.99))
+}
+\author{
+  \spatstatAuthors
+  and Kevin Ummel.
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/quantile.im.Rd b/man/quantile.im.Rd
new file mode 100644
index 0000000..b36e624
--- /dev/null
+++ b/man/quantile.im.Rd
@@ -0,0 +1,57 @@
+\name{quantile.im}
+\alias{quantile.im}
+\title{Sample Quantiles of Pixel Image}
+\description{
+Compute the sample quantiles of the pixel values of a
+given pixel image.
+}
+\usage{
+\method{quantile}{im}(x, \dots)
+}
+\arguments{
+\item{x}{
+A pixel image.
+An object of class \code{"im"}.
+}
+\item{\dots}{
+Optional arguments passed to \code{\link{quantile.default}}.
+They determine the probabilities for which quantiles should be
+computed. See \code{\link{quantile.default}}.
+}
+} 
+\value{
+A vector of quantiles. 
+}
+\details{
+This simple function applies the generic \code{\link{quantile}} operation
+to the pixel values of the image \code{x}. 
+
+This function is a convenient
+way to inspect an image and to obtain summary statistics.
+See the examples.
+}
+\seealso{
+\code{\link{quantile}},
+\code{\link{cut.im}},
+\code{\link{im.object}}
+}
+\examples{
+# artificial image data
+Z <- setcov(square(1))
+
+# find the quartiles
+quantile(Z)
+
+# find the deciles
+quantile(Z, probs=(0:10)/10)
+}
+
+\author{\adrian
+
+
+and \rolf
+
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{univar}
diff --git a/man/quasirandom.Rd b/man/quasirandom.Rd
new file mode 100644
index 0000000..bbe7dce
--- /dev/null
+++ b/man/quasirandom.Rd
@@ -0,0 +1,107 @@
+\name{quasirandom}
+\alias{quasirandom} %DoNotExport
+\alias{vdCorput}
+\alias{Halton}
+\alias{Hammersley}
+\title{
+  Quasirandom Patterns
+}
+\description{
+  Generates quasirandom sequences of numbers and
+  quasirandom spatial patterns of points in any dimension.
+}
+\usage{
+vdCorput(n, base)
+
+Halton(n, bases = c(2, 3), raw = FALSE, simplify = TRUE)
+
+Hammersley(n, bases = 2, raw = FALSE, simplify = TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of points to generate.
+  }
+  \item{base}{
+    A prime number giving the base of the sequence.
+  }
+  \item{bases}{
+    Vector of prime numbers giving the bases of the sequences
+    for each coordinate axis.
+  }
+  \item{raw}{
+    Logical value indicating whether to return the coordinates
+    as a matrix (\code{raw=TRUE}) or as a spatial point
+    pattern (\code{raw=FALSE}, the default).
+  }
+  \item{simplify}{
+    Argument passed to \code{\link{ppx}} indicating whether
+    point patterns of dimension 2 or 3 should be
+    returned as objects of class \code{"ppp"} or \code{"pp3"}
+    respectively (\code{simplify=TRUE}, the default)
+    or as objects of class \code{"ppx"} (\code{simplify=FALSE}).
+  }
+}
+\details{
+  The function \code{vdCorput} generates the quasirandom sequence
+  of Van der Corput (1935) of length \code{n} with the given
+  \code{base}. These are numbers between 0 and 1 which are in
+  some sense uniformly distributed over the interval.
+
+  The function \code{Halton} generates the Halton quasirandom sequence
+  of points in \code{d}-dimensional space, where
+  \code{d = length(bases)}. The values of the \eqn{i}-th coordinate
+  of the points are generated using the van der Corput sequence with
+  base equal to \code{bases[i]}.
+
+  The function \code{Hammersley} generates the Hammersley set
+  of points in \code{d+1}-dimensional space, where 
+  \code{d = length(bases)}. The first \code{d} coordinates
+  of the points are generated using the van der Corput sequence with
+  base equal to \code{bases[i]}. The \code{d+1}-th coordinate
+  is the sequence \code{1/n, 2/n, ..., 1}.
+
+  If \code{raw=FALSE} (the default) then the Halton and Hammersley
+  sets are interpreted as spatial point patterns of the
+  appropriate dimension. They are returned as objects of
+  class \code{"ppx"} (multidimensional point patterns)
+  unless \code{simplify=TRUE} and \code{d=2} or \code{d=3}
+  when they are returned as objects of class \code{"ppp"}
+  or \code{"pp3"}.
+  If \code{raw=TRUE}, the coordinates are returned as a matrix
+  with \code{n} rows and \code{D} columns where \code{D} is the spatial
+  dimension.
+}
+\value{
+  For \code{vdCorput}, a numeric vector.
+
+  For \code{Halton} and \code{Hammersley},
+  an object of class \code{"ppp"}, \code{"pp3"} or
+  \code{"ppx"}; or if \code{raw=TRUE}, a numeric matrix.
+}
+\references{
+  Van der Corput, J. G. (1935) Verteilungsfunktionen.
+  \emph{Proc. Ned. Akad. v. Wetensch.} \bold{38}: 813--821.
+
+  Kuipers, L. and Niederreiter, H. (2005)
+  \emph{Uniform distribution of sequences},
+  Dover Publications.
+}
+\seealso{
+  \code{\link{rQuasi}}
+}
+\examples{
+   vdCorput(10, 2)
+
+   plot(Halton(256, c(2,3)))
+
+   plot(Hammersley(256, 3))
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rCauchy.Rd b/man/rCauchy.Rd
new file mode 100644
index 0000000..386f905
--- /dev/null
+++ b/man/rCauchy.Rd
@@ -0,0 +1,166 @@
+\name{rCauchy}
+\alias{rCauchy}
+\title{Simulate Neyman-Scott Point Process with Cauchy cluster kernel}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  Neyman-Scott process with Cauchy cluster kernel.
+}
+\usage{
+ rCauchy(kappa, scale, mu, win = owin(), thresh = 0.001,
+         nsim=1, drop=TRUE, 
+         saveLambda=FALSE, expand = NULL, \dots,
+         poisthresh=1e-6, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{scale}{
+    Scale parameter for cluster kernel. Determines the size of clusters.
+    A positive number, in the same units as the spatial coordinates.
+  }
+  \item{mu}{
+    Mean number of points per cluster (a single positive number)
+    or reference intensity for the cluster points (a function or
+    a pixel image).
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{thresh}{
+    Threshold relative to the cluster kernel value at the origin (parent
+    location) determining when the cluster kernel will be treated as
+    zero for simulation purposes. Will be overridden by argument
+    \code{expand} if that is given.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{saveLambda}{
+    Logical. If \code{TRUE} then the random intensity corresponding to
+    the simulated parent points will also be calculated and saved,
+    and returns as an attribute of the point pattern.
+  }
+  \item{expand}{
+    Numeric. Size of window expansion for generation of parent
+    points. By default determined by calling
+    \code{\link{clusterradius}} with the numeric threshold value given
+    in \code{thresh}.
+  }
+  \item{\dots}{
+    Passed to \code{\link{clusterfield}} to control the image resolution
+    when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when
+    \code{expand} is missing or \code{NULL}.
+  }
+  \item{poisthresh}{
+    Numerical threshold below which the model will be treated
+    as a Poisson process. See Details.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+
+  Additionally, some intermediate results of the simulation are returned
+  as attributes of this point pattern (see
+  \code{\link{rNeymanScott}}). Furthermore, the simulated intensity
+  function is returned as an attribute \code{"Lambda"}, if
+  \code{saveLambda=TRUE}.
+}
+\details{
+  This algorithm generates a realisation of the Neyman-Scott process
+  with Cauchy cluster kernel, inside the window \code{win}.
+
+  The process is constructed by first
+  generating a Poisson point process of ``parent'' points 
+  with intensity \code{kappa}. Then each parent point is
+  replaced by a random cluster of points, the number of points in each
+  cluster being random with a Poisson (\code{mu}) distribution,
+  and the points being placed independently and uniformly
+  according to a Cauchy kernel.
+
+  In this implementation, parent points are not restricted to lie in the
+  window; the parent process is effectively the uniform Poisson process
+  on the infinite plane.
+
+  This model can be fitted to data by the method of minimum contrast,
+  maximum composite likelihood or Palm likelihood using
+  \code{\link{kppm}}.
+  
+  The algorithm can also generate spatially inhomogeneous versions of
+  the cluster process:
+  \itemize{
+    \item The parent points can be spatially inhomogeneous.
+    If the argument \code{kappa} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is taken
+    as specifying the intensity function of an inhomogeneous Poisson
+    process that generates the parent points.
+    \item The offspring points can be inhomogeneous. If the
+    argument \code{mu} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is
+    interpreted as the reference density for offspring points,
+    in the sense of Waagepetersen (2006).
+  }
+  When the parents are homogeneous (\code{kappa} is a single number)
+  and the offspring are inhomogeneous (\code{mu} is a
+  function or pixel image), the model can be fitted to data
+  using \code{\link{kppm}}. 
+
+  If the pair correlation function of the model is very close
+  to that of a Poisson process, deviating by less than
+  \code{poisthresh}, then the model is approximately a Poisson process,
+  and will be simulated as a Poisson process with intensity
+  \code{kappa * mu}, using \code{\link{rpoispp}}.
+  This avoids computations that would otherwise require huge amounts
+  of memory.
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rMatClust}},
+  \code{\link{rThomas}},
+  \code{\link{rVarGamma}},
+  \code{\link{rNeymanScott}},
+  \code{\link{rGaussPoisson}},
+  \code{\link{kppm}},
+  \code{\link{clusterfit}}.
+}
+\examples{
+ # homogeneous
+ X <- rCauchy(30, 0.01, 5)
+ # inhomogeneous
+ ff <- function(x,y){ exp(2 - 3 * abs(x)) }
+ Z <- as.im(ff, W= owin())
+ Y <- rCauchy(50, 0.01, Z)
+ YY <- rCauchy(ff, 0.01, 5)
+}
+\references{
+  Ghorbani, M. (2013) Cauchy cluster process.
+  \emph{Metrika} \bold{76}, 697-706.
+
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rDGS.Rd b/man/rDGS.Rd
new file mode 100644
index 0000000..74b6a6a
--- /dev/null
+++ b/man/rDGS.Rd
@@ -0,0 +1,112 @@
+\name{rDGS}
+\alias{rDGS}
+\title{Perfect Simulation of the Diggle-Gates-Stibbard Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Diggle-Gates-Stibbard process, using a perfect simulation algorithm.
+}
+\usage{
+  rDGS(beta, rho, W = owin(), expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{rho}{
+    interaction range (a non-negative number).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. 
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}  
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Diggle-Gates-Stibbard point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  Diggle, Gates and Stibbard (1987) proposed a pairwise interaction
+  point process in which each pair of points separated by
+  a distance \eqn{d} contributes a factor \eqn{e(d)} to the
+  probability density, where
+  \deqn{
+    e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right)
+  }{
+    e(d) = sin^2((pi * d)/(2 * rho))
+  }
+  for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1
+  for \eqn{d \ge \rho}{d >= rho}.
+
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+}
+\author{
+  \adrian,
+  based on original code for the Strauss process by 
+  Kasper Klitgaard Berthelsen.
+}
+\examples{
+   X <- rDGS(50, 0.05)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{DiggleGatesStibbard}}.
+
+  \code{\link{rStrauss}},
+  \code{\link{rHardcore}},
+  \code{\link{rStraussHard}},
+  \code{\link{rDiggleGratton}},
+  \code{\link{rPenttinen}}.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rDiggleGratton.Rd b/man/rDiggleGratton.Rd
new file mode 100644
index 0000000..0c10f91
--- /dev/null
+++ b/man/rDiggleGratton.Rd
@@ -0,0 +1,132 @@
+\name{rDiggleGratton}
+\alias{rDiggleGratton}
+\title{Perfect Simulation of the Diggle-Gratton Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Diggle-Gratton process, using a perfect simulation algorithm.
+}
+\usage{
+  rDiggleGratton(beta, delta, rho, kappa=1, W = owin(),
+                 expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{delta}{
+    hard core distance (a non-negative number).
+  }
+  \item{rho}{
+    interaction range (a number greater than \code{delta}).
+  }
+  \item{kappa}{
+    interaction exponent (a non-negative number).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. Currently this must be a rectangular
+    window.
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Diggle-Gratton point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  Diggle and Gratton (1984, pages 208-210)
+  introduced the pairwise interaction point
+  process with pair potential \eqn{h(t)} of the form
+  \deqn{
+    h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa
+    \quad\quad \mbox{  if  } \delta \le t \le \rho
+  }{
+    h(t) = ((t - delta)/(rho - delta))^kappa, {    } delta <= t <= rho
+  }
+  with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta}
+  and  \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}.
+  Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa}
+  are parameters.
+
+  Note that we use the symbol \eqn{\kappa}{kappa}
+  where Diggle and Gratton (1984) 
+  use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol
+  \eqn{\beta}{beta} for an intensity parameter.
+
+  The parameters must all be nonnegative,
+  and must satisfy \eqn{\delta \le \rho}{delta <= rho}.
+
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  Diggle, P.J. and Gratton, R.J. (1984)
+  Monte Carlo methods of inference for implicit statistical models.
+  \emph{Journal of the Royal Statistical Society, series B}
+  \bold{46}, 193 -- 212.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+}
+\author{
+  \adrian
+  
+  
+  based on original code for the Strauss process by 
+  Kasper Klitgaard Berthelsen.
+}
+\examples{
+   X <- rDiggleGratton(50, 0.02, 0.07)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{DiggleGratton}}.
+
+  \code{\link{rStrauss}},
+  \code{\link{rHardcore}},
+  \code{\link{rStraussHard}},
+  \code{\link{rDGS}},
+  \code{\link{rPenttinen}}.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rGaussPoisson.Rd b/man/rGaussPoisson.Rd
new file mode 100644
index 0000000..90fe904
--- /dev/null
+++ b/man/rGaussPoisson.Rd
@@ -0,0 +1,76 @@
+\name{rGaussPoisson}
+\alias{rGaussPoisson}
+\title{Simulate Gauss-Poisson Process}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  Gauss-Poisson Process.
+}
+\usage{
+ rGaussPoisson(kappa, r, p2, win = owin(c(0,1),c(0,1)),
+               \dots, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{r}{
+    Diameter of each cluster that consists of exactly 2 points.
+  }
+  \item{p2}{
+    Probability that a cluster contains exactly 2 points.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{\dots}{Ignored.}
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+
+  Additionally, some intermediate results of the simulation are
+  returned as attributes of the point pattern.
+  See \code{\link{rNeymanScott}}.
+}
+\details{
+  This algorithm generates a realisation of the Gauss-Poisson
+  point process inside the window \code{win}.
+  The process is constructed by first
+  generating a Poisson point process of parent points 
+  with intensity \code{kappa}. Then each parent point is either retained
+  (with probability \code{1 - p2})
+  or replaced by a pair of points at a fixed distance \code{r} apart
+  (with probability \code{p2}). In the case of clusters of 2 points,
+  the line joining the two points has uniform random orientation.
+
+  In this implementation, parent points are not restricted to lie in the
+  window; the parent process is effectively the uniform
+  Poisson process on the infinite plane.
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rThomas}},
+  \code{\link{rMatClust}},
+  \code{\link{rNeymanScott}}
+}
+\examples{
+ pp <- rGaussPoisson(30, 0.07, 0.5)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rHardcore.Rd b/man/rHardcore.Rd
new file mode 100644
index 0000000..377dc44
--- /dev/null
+++ b/man/rHardcore.Rd
@@ -0,0 +1,105 @@
+\name{rHardcore}
+\alias{rHardcore}
+\title{Perfect Simulation of the Hardcore Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Hardcore process, using a perfect simulation algorithm.
+}
+\usage{
+  rHardcore(beta, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{R}{
+    hard core distance (a non-negative number).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. Currently this must be a rectangular
+    window.
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Hardcore point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  The Hardcore process is a model for strong spatial inhibition.
+  Two points of the process are forbidden to lie closer than
+  \code{R} units apart.
+  The Hardcore process is the special case of the Strauss process
+  (see \code{\link{rStrauss}})
+  with interaction parameter \eqn{\gamma}{gamma} equal to zero.
+  
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+}
+\author{
+  \adrian
+  
+  
+  based on original code for the Strauss process by 
+  Kasper Klitgaard Berthelsen.
+}
+\examples{
+   X <- rHardcore(0.05,1.5,square(141.4))
+   Z <- rHardcore(100,0.05)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{Hardcore}},
+  \code{\link{rStrauss}},
+  \code{\link{rStraussHard}},
+  \code{\link{rDiggleGratton}}.
+  \code{\link{rDGS}},
+  \code{\link{rPenttinen}}.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rLGCP.Rd b/man/rLGCP.Rd
new file mode 100644
index 0000000..08b2d4b
--- /dev/null
+++ b/man/rLGCP.Rd
@@ -0,0 +1,140 @@
+\name{rLGCP}
+\alias{rLGCP}
+\title{Simulate Log-Gaussian Cox Process}
+\description{
+  Generate a random point pattern, a realisation of the
+  log-Gaussian Cox process.
+}
+\usage{
+ rLGCP(model="exp", mu = 0, param = NULL,
+       \dots,
+       win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{model}{
+    character string: the short name of a covariance model for
+    the Gaussian random field. After adding the prefix \code{"RM"},
+    the code will search for a function of this name
+    in the \pkg{RandomFields} package.
+  }
+  \item{mu}{
+    mean function of the Gaussian random field. Either a
+    single number, a \code{function(x,y, ...)} or a pixel
+    image (object of class \code{"im"}).
+  }
+  \item{param}{
+    List of parameters for the covariance.
+    Standard arguments are \code{var} and \code{scale}.
+  }
+  \item{\dots}{
+    Additional parameters for the covariance,
+    or arguments passed to \code{\link{as.mask}} to determine
+    the pixel resolution.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}.
+  }
+  \item{saveLambda}{
+    Logical. If \code{TRUE} (the default) then the
+    simulated random intensity will also be saved,
+    and returns as an attribute of the point pattern.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"})
+  or a list of point patterns.
+
+  Additionally, the simulated intensity function for each point pattern is
+  returned as an attribute \code{"Lambda"} of the point pattern,
+  if \code{saveLambda=TRUE}.
+}
+\details{
+  This function generates a realisation of a log-Gaussian Cox
+  process (LGCP). This is a Cox point process in which
+  the logarithm of the random intensity is a Gaussian random
+  field with mean function \eqn{\mu} and covariance function
+  \eqn{c(r)}. Conditional on the random intensity, the point process
+  is a Poisson process with this intensity.
+
+  The string \code{model} specifies the covariance 
+  function of the Gaussian random field, and the parameters
+  of the covariance are determined by \code{param} and \code{\dots}.
+
+  To determine the covariance model, the string \code{model}
+  is prefixed by \code{"RM"}, and a function of this name is
+  sought in the \pkg{RandomFields} package. 
+  For a list of available models see 
+  \code{\link[RandomFields]{RMmodel}} in the
+  \pkg{RandomFields} package. For example the
+  \ifelse{latex}{\out{Mat\'ern}}{Matern}
+  covariance is specified by \code{model="matern"}, corresponding
+  to the function \code{RMmatern} in the \pkg{RandomFields} package.
+
+  Standard variance parameters (for all functions beginning with
+  \code{"RM"} in the \pkg{RandomFields} package) are \code{var}
+  for the variance at distance zero, and \code{scale} for the scale
+  parameter. Other parameters are specified in the help files
+  for the individual functions beginning with \code{"RM"}. For example
+  the help file for \code{RMmatern} states that \code{nu} is a parameter
+  for this model.
+
+  This algorithm uses the function \code{\link[RandomFields]{RFsimulate}} in the
+  \pkg{RandomFields} package to generate values of
+  a Gaussian random field, with the specified mean function \code{mu}
+  and the covariance specified by the arguments \code{model} and
+  \code{param}, on the points of a regular grid. The exponential
+  of this random field is taken as the intensity of a Poisson point
+  process, and a realisation of the Poisson process is then generated by the 
+  function \code{\link{rpoispp}} in the \pkg{spatstat} package.
+  
+  If the simulation window \code{win} is missing or \code{NULL},
+  then it defaults to 
+  \code{Window(mu)} if \code{mu} is a pixel image,
+  and it defaults to the unit square otherwise.
+  
+  The LGCP model can be fitted to data using \code{\link{kppm}}.
+}
+\seealso{
+\code{\link{rpoispp}},
+\code{\link{rMatClust}},
+\code{\link{rGaussPoisson}},
+\code{\link{rNeymanScott}},
+\code{\link{lgcp.estK}},
+\code{\link{kppm}}
+}
+\references{
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J., Syversveen, A. and Waagepetersen, R. (1998)
+  Log Gaussian Cox Processes.
+  \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482.
+}
+\examples{
+  if(require(RandomFields)) {
+  # homogeneous LGCP with exponential covariance function
+  X <- rLGCP("exp", 3, var=0.2, scale=.1)
+
+  # inhomogeneous LGCP with Gaussian covariance function
+  m <- as.im(function(x, y){5 - 1.5 * (x - 0.5)^2 + 2 * (y - 0.5)^2}, W=owin())
+  X <- rLGCP("gauss", m, var=0.15, scale =0.5)
+  plot(attr(X, "Lambda"))
+  points(X)
+
+  # inhomogeneous LGCP with Matern covariance function
+  X <- rLGCP("matern", function(x, y){ 1 - 0.4 * x},
+             var=2, scale=0.7, nu=0.5,
+             win = owin(c(0, 10), c(0, 10)))
+  plot(X)
+  }
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Modified by \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rMatClust.Rd b/man/rMatClust.Rd
new file mode 100644
index 0000000..de45f39
--- /dev/null
+++ b/man/rMatClust.Rd
@@ -0,0 +1,191 @@
+\name{rMatClust}
+\alias{rMatClust}
+\title{Simulate Matern Cluster Process}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster Process.
+}
+\usage{
+ rMatClust(kappa, scale, mu, win = owin(c(0,1),c(0,1)),
+           nsim=1, drop=TRUE, 
+           saveLambda=FALSE, expand = scale, ...,
+           poisthresh=1e-6, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{scale}{
+    Radius parameter of the clusters.
+  }
+  \item{mu}{
+    Mean number of points per cluster (a single positive number)
+    or reference intensity for the cluster points (a function or
+    a pixel image).
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{saveLambda}{
+    Logical. If \code{TRUE} then the random intensity corresponding to
+    the simulated parent points will also be calculated and saved,
+    and returns as an attribute of the point pattern.
+  }
+  \item{expand}{Numeric. Size of window expansion for generation of
+    parent points. Defaults to \code{scale} which is the cluster
+    radius.
+  }
+  \item{\dots}{Passed to \code{\link{clusterfield}} to control the image
+    resolution when \code{saveLambda=TRUE}.
+  }
+  \item{poisthresh}{
+    Numerical threshold below which the model will be treated
+    as a Poisson process. See Details.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+
+  Additionally, some intermediate results of the simulation are returned
+  as attributes of this point pattern (see
+  \code{\link{rNeymanScott}}). Furthermore, the simulated intensity
+  function is returned as an attribute \code{"Lambda"}, if
+  \code{saveLambda=TRUE}.
+}
+\details{
+  This algorithm generates a realisation of
+  \ifelse{latex}{\out{Mat\'ern}}{Matern}'s cluster process,
+  a special case of the Neyman-Scott process, inside the window \code{win}.
+
+  In the simplest case, where \code{kappa} and \code{mu}
+  are single numbers, the algorithm 
+  generates a uniform Poisson point process of \dQuote{parent} points 
+  with intensity \code{kappa}. Then each parent point is
+  replaced by a random cluster of \dQuote{offspring} points,
+  the number of points per cluster being Poisson (\code{mu})
+  distributed, and their
+  positions being placed and uniformly inside
+  a disc of radius \code{scale} centred on the parent point.
+  The resulting point pattern
+  is a realisation of the classical
+  \dQuote{stationary Matern cluster process}
+  generated inside the window \code{win}.
+  This point process has intensity \code{kappa * mu}.
+
+  The algorithm can also generate spatially inhomogeneous versions of
+  the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process:
+  \itemize{
+    \item The parent points can be spatially inhomogeneous.
+    If the argument \code{kappa} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is taken
+    as specifying the intensity function of an inhomogeneous Poisson
+    process that generates the parent points.
+    \item The offspring points can be inhomogeneous. If the
+    argument \code{mu} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is
+    interpreted as the reference density for offspring points,
+    in the sense of Waagepetersen (2007).
+    For a given parent point, the offspring constitute a Poisson process
+    with intensity function equal to 
+    \code{mu/(pi * scale^2)}
+    inside the disc of radius \code{scale} centred on the parent
+    point, and zero intensity outside this disc.
+    Equivalently we first generate,
+    for each parent point, a Poisson (\eqn{M}) random number of
+    offspring (where \eqn{M} is the maximum value of \code{mu})
+    placed independently and uniformly in the disc of radius \code{scale}
+    centred on the parent location, and then randomly thin the
+    offspring points, with retention probability \code{mu/M}.
+    \item
+    Both the parent points and the offspring points can be
+    inhomogeneous, as described above.
+  }
+
+  Note that if \code{kappa} is a pixel image, its domain must be larger
+  than the window \code{win}. This is because an offspring point inside
+  \code{win} could have its parent point lying outside \code{win}.
+  In order to allow this, the simulation algorithm
+  first expands the original window \code{win}
+  by a distance \code{expand} and generates the Poisson process of
+  parent points on this larger window. If \code{kappa} is a pixel image,
+  its domain must contain this larger window.
+
+  The intensity of the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster
+  process is \code{kappa * mu}
+  if either \code{kappa} or \code{mu} is a single number. In the general
+  case the intensity is an integral involving \code{kappa}, \code{mu}
+  and \code{scale}.
+
+  The \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model
+  with homogeneous parents (i.e. where \code{kappa} is a single number)
+  can be fitted to data using \code{\link{kppm}}.
+  Currently it is not possible to fit the
+  \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model
+  with inhomogeneous parents.
+
+  If the pair correlation function of the model is very close
+  to that of a Poisson process, deviating by less than
+  \code{poisthresh}, then the model is approximately a Poisson process,
+  and will be simulated as a Poisson process with intensity
+  \code{kappa * mu}, using \code{\link{rpoispp}}.
+  This avoids computations that would otherwise require huge amounts
+  of memory.
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rThomas}},
+  \code{\link{rCauchy}},
+  \code{\link{rVarGamma}},
+  \code{\link{rNeymanScott}},
+  \code{\link{rGaussPoisson}},
+  \code{\link{kppm}},
+  \code{\link{clusterfit}}.
+}
+\examples{
+ # homogeneous
+ X <- rMatClust(10, 0.05, 4)
+ # inhomogeneous
+ ff <- function(x,y){ 4 * exp(2 * abs(x) - 1) }
+ Z <- as.im(ff, owin())
+ Y <- rMatClust(10, 0.05, Z)
+ YY <- rMatClust(ff, 0.05, 3)
+}
+\references{
+  \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1960)
+  \emph{Spatial Variation}.
+  Meddelanden \ifelse{latex}{\out{fr\r{a}n}}{fraan}
+  Statens Skogsforskningsinstitut,
+  volume 59, number 5.  Statens Skogsforskningsinstitut, Sweden.
+
+  \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1986)
+  \emph{Spatial Variation}.
+  Lecture Notes in Statistics 36, Springer-Verlag, New York.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rMaternI.Rd b/man/rMaternI.Rd
new file mode 100644
index 0000000..36f1fac
--- /dev/null
+++ b/man/rMaternI.Rd
@@ -0,0 +1,84 @@
+\name{rMaternI}
+\alias{rMaternI}
+\title{Simulate Matern Model I}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process model.
+}
+\usage{
+ rMaternI(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, \dots,
+          nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of proposal points.
+    A single positive number.
+  }
+  \item{r}{
+    Inhibition distance.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    Alternatively a higher-dimensional box of class
+    \code{"box3"} or \code{"boxx"}.
+  }
+  \item{stationary}{
+    Logical. Whether to start with a stationary process of proposal points
+    (\code{stationary=TRUE}) or to generate the
+    proposal points only inside the window (\code{stationary=FALSE}).
+  }
+  \item{\dots}{Ignored.}
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+  Each point pattern is normally an object of class \code{"ppp"},
+  but may be of class \code{"pp3"} or \code{"ppx"} depending on the window.
+}
+\details{
+  This algorithm generates one or more realisations
+  of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I
+  inhibition process inside the window \code{win}.
+
+  The process is constructed by first
+  generating a uniform Poisson point process of ``proposal'' points 
+  with intensity \code{kappa}. If \code{stationary = TRUE} (the
+  default), the proposal points are generated in a window larger than
+  \code{win} that effectively means the proposals are stationary.
+  If \code{stationary=FALSE} then the proposal points are
+  only generated inside the window \code{win}.
+
+  A proposal point is then deleted if it lies within \code{r} units' distance
+  of another proposal point. Otherwise it is retained.
+  
+  The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I.
+}
+\seealso{
+\code{\link{rpoispp}},
+\code{\link{rMatClust}}
+}
+\examples{
+ X <- rMaternI(20, 0.05)
+ Y <- rMaternI(20, 0.05, stationary=FALSE)
+}
+\author{
+  \adrian
+  
+  ,
+  Ute Hahn,
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rMaternII.Rd b/man/rMaternII.Rd
new file mode 100644
index 0000000..5a1717f
--- /dev/null
+++ b/man/rMaternII.Rd
@@ -0,0 +1,92 @@
+\name{rMaternII}
+\alias{rMaternII}
+\title{Simulate Matern Model II}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process.
+}
+\usage{
+ rMaternII(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ...,
+           nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of proposal points.
+    A single positive number.
+  }
+  \item{r}{
+    Inhibition distance.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    Alternatively a higher-dimensional box of class
+    \code{"box3"} or \code{"boxx"}.
+  }
+  \item{stationary}{
+    Logical. Whether to start with a stationary process of proposal points
+    (\code{stationary=TRUE}) or to generate the
+    proposal points only inside the window (\code{stationary=FALSE}).
+  }
+  \item{\dots}{Ignored.}
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+  Each point pattern is normally an object of class \code{"ppp"},
+  but may be of class \code{"pp3"} or \code{"ppx"} depending on the window.
+}
+\details{
+  This algorithm generates one or more realisations
+  of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II
+  inhibition process inside the window \code{win}.
+
+  The process is constructed by first
+  generating a uniform Poisson point process of ``proposal'' points 
+  with intensity \code{kappa}. If \code{stationary = TRUE} (the
+  default), the proposal points are generated in a window larger than
+  \code{win} that effectively means the proposals are stationary.
+  If \code{stationary=FALSE} then the proposal points are
+  only generated inside the window \code{win}.
+  
+  Then each proposal point is marked by an ``arrival time'', a number
+  uniformly distributed in \eqn{[0,1]} independently of other variables.
+  
+  A proposal point is deleted if it lies within \code{r} units' distance
+  of another proposal point \emph{that has an earlier arrival time}.
+  Otherwise it is retained.
+  The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II.
+
+  The difference between \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I and II is the italicised
+  statement above. Model II has a higher intensity
+  for the same parameter values.
+}
+\seealso{
+\code{\link{rpoispp}},
+\code{\link{rMatClust}},
+\code{\link{rMaternI}}
+}
+\examples{
+ X <- rMaternII(20, 0.05)
+ Y <- rMaternII(20, 0.05, stationary=FALSE)
+}
+\author{
+  \adrian
+  
+  ,
+  Ute Hahn,
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rMosaicField.Rd b/man/rMosaicField.Rd
new file mode 100644
index 0000000..91695ad
--- /dev/null
+++ b/man/rMosaicField.Rd
@@ -0,0 +1,60 @@
+\name{rMosaicField}
+\alias{rMosaicField}
+\title{Mosaic Random Field}
+\description{
+  Generate a realisation of a random field
+  which is piecewise constant on the tiles of a given tessellation.
+}
+\usage{
+rMosaicField(X, 
+    rgen = function(n) { sample(0:1, n, replace = TRUE)},
+    ...,
+    rgenargs=NULL)
+}
+\arguments{
+  \item{X}{
+    A tessellation (object of class \code{"tess"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}}
+    determining the pixel resolution.
+  }
+  \item{rgen}{
+    Function that generates random values for the
+    tiles of the tessellation.
+  }
+  \item{rgenargs}{
+    List containing extra arguments that should be passed
+    to \code{rgen} (typically specifying parameters of the
+    distribution of the values).
+  }
+}
+\details{
+  This function generates a realisation of a random field
+  which is piecewise constant on the tiles of the given
+  tessellation \code{X}. The values in each tile
+  are independent and identically distributed.
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rpoislinetess}},
+  \code{\link{rMosaicSet}}
+}
+\examples{
+   X <- rpoislinetess(3)
+   plot(rMosaicField(X, runif))
+   plot(rMosaicField(X, runif, dimyx=256))
+   plot(rMosaicField(X, rnorm, rgenargs=list(mean=10, sd=2)))
+
+   plot(rMosaicField(dirichlet(runifpoint(30)), rnorm))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rMosaicSet.Rd b/man/rMosaicSet.Rd
new file mode 100644
index 0000000..8f2bf56
--- /dev/null
+++ b/man/rMosaicSet.Rd
@@ -0,0 +1,58 @@
+\name{rMosaicSet}
+\alias{rMosaicSet}
+\title{Mosaic Random Set}
+\description{
+  Generate a random set by taking a random selection of 
+  tiles of a given tessellation.  
+}
+\usage{
+rMosaicSet(X, p=0.5)
+}
+\arguments{
+  \item{X}{
+    A tessellation (object of class \code{"tess"}).
+  }
+  \item{p}{
+    Probability of including a given tile.
+    A number strictly between 0 and 1.
+  }
+}
+\details{
+  Given a tessellation \code{X}, this function 
+  randomly selects some of the tiles of \code{X},
+  including each tile with probability \eqn{p} independently of
+  the other tiles. The selected tiles are then combined to form a
+  set in the plane. 
+
+  One application of this is Switzer's (1965) example of a random set
+  which has a Markov property. It is constructed by generating \code{X}
+  according to a Poisson line tessellation (see \code{\link{rpoislinetess}}).
+}
+\value{
+  A window (object of class \code{"owin"}).
+}
+\references{
+  Switzer, P.
+  A random set process in the plane with a Markovian property.
+  \emph{Annals of Mathematical Statistics} \bold{36} (1965) 1859--1863.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rpoislinetess}},
+  \code{\link{rMosaicField}}
+}
+\examples{
+   # Switzer's random set
+   X <- rpoislinetess(3)
+   plot(rMosaicSet(X, 0.5), col="green", border=NA)
+
+   # another example
+   plot(rMosaicSet(dirichlet(runifpoint(30)), 0.4))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rNeymanScott.Rd b/man/rNeymanScott.Rd
new file mode 100644
index 0000000..b88eaef
--- /dev/null
+++ b/man/rNeymanScott.Rd
@@ -0,0 +1,235 @@
+\name{rNeymanScott}
+\alias{rNeymanScott}
+\title{Simulate Neyman-Scott Process}
+\description{
+  Generate a random point pattern, a realisation of the
+  Neyman-Scott cluster process.
+}
+\usage{
+ rNeymanScott(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)),
+              \dots, lmax=NULL, nsim=1, drop=TRUE,
+              nonempty=TRUE, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{expand}{
+    Size of the expansion of the simulation window for generating parent
+    points. A single non-negative number.
+  }
+  \item{rcluster}{
+    A function which generates random clusters,
+    or other data specifying the random cluster mechanism.
+    See Details.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{rcluster}.
+  }
+  \item{lmax}{
+    Optional. Upper bound on the values of \code{kappa}
+    when \code{kappa} is a function or pixel image.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{nonempty}{
+    Logical. If \code{TRUE} (the default), a more efficient algorithm is
+    used, in which parents are generated conditionally on having at
+    least one offspring point. If \code{FALSE}, parents are generated
+    even if they have no offspring. Both choices are valid; the default
+    is recommended unless you need to simulate all the parent points
+    for some other purpose.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+  
+  Additionally,  some intermediate results of the simulation are
+  returned as attributes of this point pattern: see Details.
+}
+\details{
+  This algorithm generates a realisation of the
+  general Neyman-Scott process, with the cluster mechanism
+  given by the function \code{rcluster}. 
+
+  First, the algorithm generates a Poisson point process of
+  \dQuote{parent} points with intensity \code{kappa} in an expanded
+  window as explained below. Here \code{kappa}
+  may be a single positive number,
+  a function \code{kappa(x,y)},
+  or a pixel image object of class \code{"im"} (see
+  \code{\link{im.object}}).  See \code{\link{rpoispp}} for details.
+
+  Second, each parent point is replaced by a random cluster
+  of points. These clusters are combined together to yield a
+  single point pattern, and the restriction of this pattern to the
+  window \code{win} is then returned as the result of
+  \code{rNeymanScott}.
+
+  The expanded window consists of \code{\link{as.rectangle}(win)}
+  extended by the amount \code{expand} in each direction. The size of
+  the expansion is saved in the attribute \code{"expand"} and may be
+  extracted by \code{attr(X, "expand")} where \code{X} is the generated
+  point pattern.  
+
+  The argument \code{rcluster} specifies the cluster mechanism.
+  It may be either:
+  \itemize{
+    \item
+    A \code{function} which will be called to generate each random
+    cluster (the offspring points of each parent point).
+    The function should expect to be called
+    in the form \code{rcluster(x0,y0,\dots)} for a parent point at a location
+    \code{(x0,y0)}. The return value of \code{rcluster}
+    should specify the coordinates of the points in the cluster;
+    it may be a list containing elements
+    \code{x,y}, or a point pattern (object of
+    class \code{"ppp"}). If it is a marked point pattern then the result of
+    \code{rNeymanScott} will be a marked point pattern.
+    \item
+    A \code{list(mu, f)} where \code{mu} specifies the mean
+    number of offspring points in each cluster, and \code{f}
+    generates the random displacements (vectors pointing from the parent
+    to the offspring). In this case, the number of offspring
+    in a cluster is assumed to have a Poisson distribution, implying
+    that the Neyman-Scott process is also a Cox process.
+    The first element \code{mu} should be either a single nonnegative
+    number (interpreted as the mean of the Poisson distribution of
+    cluster size)
+    or a pixel image or a \code{function(x,y)} giving a spatially
+    varying mean cluster size (interpreted in the sense of
+    Waagepetersen, 2007).
+    The second element \code{f} should be a function that will be
+    called once in the form \code{f(n)} to generate \code{n} independent
+    and identically distributed displacement vectors (i.e. as if there
+    were a cluster of size \code{n} with a parent at the origin
+    \code{(0,0)}). 
+    The function should return
+    a point pattern (object of class \code{"ppp"})
+    or something acceptable to \code{\link[grDevices]{xy.coords}}
+    that specifies the coordinates of \code{n} points. 
+  }
+
+  If required, the intermediate stages of the simulation (the
+  parents and the individual clusters) can also be extracted from
+  the return value of \code{rNeymanScott} through the attributes
+  \code{"parents"} and \code{"parentid"}.  The attribute
+  \code{"parents"} is the point pattern of parent points.
+  The attribute \code{"parentid"} is an integer vector specifying
+  the parent for each of the points in the simulated pattern.
+
+  Neyman-Scott models where \code{kappa} is a single number
+  and \code{rcluster = list(mu,f)} can be fitted to data
+  using the function \code{\link{kppm}}.
+}
+\section{Inhomogeneous Neyman-Scott Processes}{
+  There are several different ways of specifying a spatially inhomogeneous
+  Neyman-Scott process:
+  \itemize{
+    \item
+    The point process of parent points can be inhomogeneous. 
+    If the argument \code{kappa} is a \code{function(x,y)} or a pixel
+    image (object of class \code{"im"}), then it is taken as specifying
+    the intensity function of an inhomogeneous Poisson process according
+    to which the parent points are generated.
+    \item
+    The number of points in a typical cluster can
+    be spatially varying.
+    If the argument \code{rcluster} is a list of two elements
+    \code{mu, f} and the first entry \code{mu} is a 
+    \code{function(x,y)} or a pixel image (object of class \code{"im"}),
+    then \code{mu} is interpreted as the reference intensity
+    for offspring points, in the sense of Waagepetersen (2007).
+    For a given parent point, the offspring constitute a Poisson process
+    with intensity function equal to \code{mu(x, y) * g(x-x0, y-y0)}
+    where \code{g} is the probability density of the offspring
+    displacements generated by the function \code{f}.
+
+    Equivalently, clusters are first generated with a constant
+    expected number of points per cluster: the constant is \code{mumax}, the
+    maximum of \code{mu}. Then the offspring are randomly \emph{thinned}
+    (see \code{\link{rthin}}) with spatially-varying retention
+    probabilities given by \code{mu/mumax}.  
+    \item
+    The entire mechanism for generating a cluster can
+    be dependent on the location of the parent point.
+    If the argument \code{rcluster} is a function,
+    then the cluster associated with a parent point at location
+    \code{(x0,y0)} will be generated by calling
+    \code{rcluster(x0, y0, \dots)}. The behaviour of this function
+    could depend on the location \code{(x0,y0)} in any fashion.
+  }
+
+  Note that if \code{kappa} is an
+  image, the spatial domain covered by this image must be large
+  enough to include the \emph{expanded} window in which the parent
+  points are to be generated. This requirement means that \code{win} must
+  be small enough so that the expansion of \code{as.rectangle(win)}
+  is contained in the spatial domain of \code{kappa}.  As a result,
+  one may wind up having to simulate the process in a window smaller
+  than what is really desired.
+
+  In the first two cases, the intensity of the Neyman-Scott process
+  is equal to \code{kappa * mu} if at least one of \code{kappa} or
+  \code{mu} is a single number, and is otherwise equal to an
+  integral involving \code{kappa}, \code{mu} and \code{f}.
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rThomas}},
+  \code{\link{rGaussPoisson}},
+  \code{\link{rMatClust}},
+  \code{\link{rCauchy}},
+  \code{\link{rVarGamma}}
+}
+\examples{
+  # each cluster consist of 10 points in a disc of radius 0.2
+  nclust <- function(x0, y0, radius, n) {
+              return(runifdisc(n, radius, centre=c(x0, y0)))
+            }
+  plot(rNeymanScott(10, 0.2, nclust, radius=0.2, n=5))
+
+  # multitype Neyman-Scott process (each cluster is a multitype process)
+  nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) {
+     X <- runifdisc(n, radius, centre=c(x0, y0))
+     M <- sample(types, n, replace=TRUE)
+     marks(X) <- M
+     return(X)
+  }
+  plot(rNeymanScott(15,0.1,nclust2, radius=0.1, n=5))
+}
+\references{
+  Neyman, J. and Scott, E.L. (1958)
+  A statistical approach to problems of cosmology.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{20}, 1--43.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rPenttinen.Rd b/man/rPenttinen.Rd
new file mode 100644
index 0000000..b8ae819
--- /dev/null
+++ b/man/rPenttinen.Rd
@@ -0,0 +1,124 @@
+\name{rPenttinen}
+\alias{rPenttinen}
+\title{Perfect Simulation of the Penttinen Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Penttinen process, using a perfect simulation algorithm.
+}
+\usage{
+  rPenttinen(beta, gamma=1, R, W = owin(), expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{gamma}{
+    Interaction strength parameter (a number between 0 and 1).
+  }
+  \item{R}{
+    disc radius (a non-negative number).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. 
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}  
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Penttinen point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  Penttinen (1984, Example 2.1, page 18), citing Cormack (1979),
+  described the pairwise interaction point process with interaction factor
+  \deqn{
+    h(d) = e^{\theta A(d)} = \gamma^{A(d)}
+  }{
+    h(d) = exp(theta * A(d)) = gamma^(A(d))
+  }
+  between each pair of points separated by a distance $d$.
+  Here \eqn{A(d)} is the area of intersection between two discs
+  of radius \eqn{R} separated by a distance \eqn{d}, normalised so that
+  \eqn{A(0) = 1}.
+
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by
+  Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  Cormack, R.M. (1979)
+  Spatial aspects of competition between individuals.
+  Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology},
+  eds. R.M. Cormack and J.K. Ord, International Co-operative
+  Publishing House, Fairland, MD, USA. 
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+
+  Penttinen, A. (1984) 
+  \emph{Modelling Interaction in Spatial Point Patterns:
+  Parameter Estimation by the Maximum Likelihood Method.}
+  \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}
+  Studies in Computer Science, Economics and Statistics \bold{7},
+  University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland.
+}
+\author{
+  \adrian,
+  based on original code for the Strauss process by 
+  Kasper Klitgaard Berthelsen.
+}
+\examples{
+   X <- rPenttinen(50, 0.5, 0.02)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{Penttinen}}.
+
+  \code{\link{rStrauss}},
+  \code{\link{rHardcore}},
+  \code{\link{rStraussHard}},
+  \code{\link{rDiggleGratton}},
+  \code{\link{rDGS}}.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rPoissonCluster.Rd b/man/rPoissonCluster.Rd
new file mode 100644
index 0000000..ee77ffc
--- /dev/null
+++ b/man/rPoissonCluster.Rd
@@ -0,0 +1,133 @@
+\name{rPoissonCluster}
+\alias{rPoissonCluster}
+\title{Simulate Poisson Cluster Process}
+\description{
+  Generate a random point pattern, a realisation of the
+  general Poisson cluster process.
+}
+\usage{
+ rPoissonCluster(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)),
+                 \dots, lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{expand}{
+    Size of the expansion of the simulation window for generating parent
+    points. A single non-negative number.
+  }
+  \item{rcluster}{
+    A function which generates random clusters.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{rcluster}
+  }
+  \item{lmax}{
+    Optional. Upper bound on the values of \code{kappa}
+    when \code{kappa} is a function or pixel image.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+  
+  Additionally,  some intermediate results of the simulation are
+  returned as attributes of the point pattern: see Details.
+}
+\details{
+  This algorithm generates a realisation of the
+  general Poisson cluster process, with the cluster mechanism
+  given by the function \code{rcluster}. 
+
+  First, the algorithm
+  generates a Poisson point process of ``parent'' points 
+  with intensity \code{kappa} in an expanded
+  window as explained below.. Here \code{kappa} may be a single
+  positive number, a function \code{kappa(x, y)}, or a pixel image
+  object of class \code{"im"} (see \code{\link{im.object}}).
+  See \code{\link{rpoispp}} for details.
+  
+  Second, each parent point is replaced by a random cluster of points,
+  created by calling the function \code{rcluster}.  These clusters are
+  combined together to yield a single point pattern, and the restriction
+  of this pattern to the window \code{win} is then returned as the
+  result of \code{rPoissonCluster}.
+
+  The expanded window consists of \code{\link{as.rectangle}(win)}
+  extended by the amount \code{expand} in each direction. The size of
+  the expansion is saved in the attribute \code{"expand"} and may be
+  extracted by \code{attr(X, "expand")} where \code{X} is the generated
+  point pattern.
+
+  The function \code{rcluster} should expect to be called as
+  \code{rcluster(xp[i],yp[i],\dots)} for each parent point at a location
+  \code{(xp[i],yp[i])}. The return value of \code{rcluster}
+  should be a list with elements
+  \code{x,y} which are vectors of equal length giving the absolute
+  \eqn{x} and \code{y} coordinates of the points in the cluster.
+
+  If the return value of \code{rcluster} is a point pattern (object of
+  class \code{"ppp"}) then it may have marks. The result of
+  \code{rPoissonCluster} will then be a marked point pattern.
+
+  If required, the intermediate stages of the simulation (the parents
+  and the individual clusters) can also be extracted from
+  the return value of \code{rPoissonCluster}
+  through the attributes \code{"parents"} and \code{"parentid"}.
+  The attribute \code{"parents"} is the point pattern of parent points.
+  The attribute \code{"parentid"} is an integer vector specifying
+  the parent for each of the points in the simulated pattern.
+  (If these data are not required, it is more efficient to
+  set \code{saveparents=FALSE}.)
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rMatClust}},
+  \code{\link{rThomas}},
+  \code{\link{rCauchy}},
+  \code{\link{rVarGamma}},
+  \code{\link{rNeymanScott}},
+  \code{\link{rGaussPoisson}}.
+}
+\examples{
+  # each cluster consist of 10 points in a disc of radius 0.2
+  nclust <- function(x0, y0, radius, n) {
+              return(runifdisc(n, radius, centre=c(x0, y0)))
+            }
+  plot(rPoissonCluster(10, 0.2, nclust, radius=0.2, n=5))
+
+  # multitype Neyman-Scott process (each cluster is a multitype process)
+  nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) {
+     X <- runifdisc(n, radius, centre=c(x0, y0))
+     M <- sample(types, n, replace=TRUE)
+     marks(X) <- M
+     return(X)
+  }
+  plot(rPoissonCluster(15,0.1,nclust2, radius=0.1, n=5))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rQuasi.Rd b/man/rQuasi.Rd
new file mode 100644
index 0000000..daa9002
--- /dev/null
+++ b/man/rQuasi.Rd
@@ -0,0 +1,57 @@
+\name{rQuasi}
+\alias{rQuasi}
+\title{
+  Generate Quasirandom Point Pattern in Given Window
+}
+\description{
+  Generates a quasirandom pattern of points in any two-dimensional window.
+}
+\usage{
+rQuasi(n, W, type = c("Halton", "Hammersley"), ...)
+}
+\arguments{
+  \item{n}{
+    Maximum number of points to be generated.
+  }
+  \item{W}{
+    Window (object of class \code{"owin"}) in which to generate
+    the points.
+  }
+  \item{type}{
+    String identifying the quasirandom generator.
+  }
+  \item{\dots}{
+    Arguments passed to the quasirandom generator.
+  }
+}
+\details{
+  This function generates a quasirandom point pattern,
+  using the quasirandom sequence generator
+  \code{\link{Halton}} or \code{\link{Hammersley}}
+  as specified.
+
+  If \code{W} is a rectangle, 
+  exactly \code{n} points will be generated.
+
+  If \code{W} is not a rectangle, \code{n} points will be generated in the
+  containing rectangle \code{as.rectangle(W)},
+  and only the points lying inside \code{W} will be retained.
+}
+\value{
+  Point pattern (object of class \code{"ppp"}) inside the window \code{W}.
+}
+\seealso{
+  \code{\link{Halton}}
+}
+\examples{
+   plot(rQuasi(256, letterR))
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rSSI.Rd b/man/rSSI.Rd
new file mode 100644
index 0000000..2b64488
--- /dev/null
+++ b/man/rSSI.Rd
@@ -0,0 +1,136 @@
+\name{rSSI}
+\alias{rSSI}
+\title{Simulate Simple Sequential Inhibition}
+\description{
+  Generate a random point pattern, a realisation of the
+  Simple Sequential Inhibition (SSI) process.
+}
+\usage{
+ rSSI(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ...,
+      f=NULL, fmax=NULL, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{r}{
+    Inhibition distance.
+  }
+  \item{n}{
+    Maximum number of points allowed. 
+    If \code{n} is finite, stop when the \emph{total} number of points
+    in the point pattern reaches \code{n}.
+    If \code{n} is infinite (the default), stop only when
+    it is apparently impossible to add any more points.
+    See \bold{Details}.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    The default window is the unit square, unless 
+    \code{x.init} is specified, when the default window 
+    is the window of \code{x.init}.
+  }
+  \item{giveup}{
+    Number of rejected proposals after which the algorithm should terminate.
+  }
+  \item{x.init}{
+    Optional. Initial configuration of points. A point pattern
+    (object of class \code{"ppp"}).  The pattern returned by
+    \code{rSSI} consists of this pattern together with the points
+    added via simple sequential inhibition.  See \bold{Details}.
+  }
+  \item{\dots}{Ignored.}
+  \item{f,fmax}{
+    Optional arguments passed to \code{\link{rpoint}}
+    to specify a non-uniform probability density for the random points.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This algorithm generates one or more realisations of the Simple Sequential
+  Inhibition point process inside the window \code{win}.
+
+  Starting with an empty window (or with the point pattern
+  \code{x.init} if specified), the algorithm adds points
+  one-by-one. Each new point is generated uniformly in the window
+  and independently of preceding points. If the new point lies
+  closer than \code{r} units from an existing point, then it is
+  rejected and another random point is generated.
+  The algorithm terminates when either
+  \describe{
+    \item{(a)}{
+      the desired number \code{n} of points is reached, or
+    }
+    \item{(b)}{
+      the current point configuration
+      has not changed for \code{giveup} iterations,
+      suggesting that it is no longer possible to add new points.
+    }
+  }
+  If \code{n} is infinite (the default) then the algorithm terminates
+  only when (b) occurs. The result is sometimes called a
+  \emph{Random Sequential Packing}.
+
+  Note that argument \code{n} specifies the maximum permitted
+  \bold{total} number of points in the pattern returned by
+  \code{rSSI()}. If \code{x.init} is not \code{NULL} then
+  the number of points that are \emph{added}
+  is at most \code{n - npoints(x.init)} if \code{n} is finite.
+
+  Thus if \code{x.init} is not \code{NULL} then argument \code{n}
+  must be at least as large as \code{npoints(x.init)}, otherwise
+  an error is given.  If \code{n==npoints(x.init)} then a warning
+  is given and the call to \code{rSSI()} has no real effect;
+  \code{x.init} is returned.
+
+  There is no requirement that the points of \code{x.init} be at
+  a distance at least \code{r} from each other.  All of the \emph{added}
+  points will be at a distance at least \code{r} from each other
+  and from any point of \code{x.init}.
+
+  The points will be generated inside the window \code{win}
+  and the result will be a point pattern in the same window.
+  
+  The default window is the unit square, \code{win = square(1)},
+  unless \code{x.init} is specified, when the default
+  is \code{win=Window(x.init)}, the window of \code{x.init}.
+
+  If both \code{win} and \code{x.init} are specified, and if the
+  two windows are different, then a warning will be issued.
+  Any points of \code{x.init} lying outside \code{win} will be removed,
+  with a warning.
+}
+\seealso{
+\code{\link{rpoispp}},
+\code{\link{rMaternI}},
+\code{\link{rMaternII}}.
+}
+\examples{
+ Vinf <- rSSI(0.07)
+
+ V100 <- rSSI(0.07, 100)
+
+ X <- runifpoint(100)
+ Y <- rSSI(0.03,142,x.init=X) # Y consists of X together with
+                              # 42 added points.
+ plot(Y, main="rSSI")
+ plot(X,add=TRUE,chars=20,cols="red")
+
+ ## inhomogeneous
+ Z <- rSSI(0.07, 50, f=function(x,y){x})
+ plot(Z)
+}
+\author{
+\spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rStrauss.Rd b/man/rStrauss.Rd
new file mode 100644
index 0000000..40a9898
--- /dev/null
+++ b/man/rStrauss.Rd
@@ -0,0 +1,143 @@
+\name{rStrauss}
+\alias{rStrauss}
+\title{Perfect Simulation of the Strauss Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Strauss process, using a perfect simulation algorithm.
+}
+\usage{
+  rStrauss(beta, gamma = 1, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{gamma}{
+    interaction parameter (a number between 0 and 1, inclusive).
+  }
+  \item{R}{
+    interaction radius (a non-negative number).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. 
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Strauss point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  The Strauss process (Strauss, 1975; Kelly and Ripley, 1976)
+  is a model for spatial inhibition, ranging from
+  a strong `hard core' inhibition to a completely random pattern
+  according to the value of \code{gamma}.
+
+  The Strauss process with interaction radius \eqn{R} and 
+  parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}
+  is the pairwise interaction point process
+  with probability density 
+  \deqn{
+    f(x_1,\ldots,x_n) =
+    \alpha \beta^{n(x)} \gamma^{s(x)}
+  }{
+    f(x_1,\ldots,x_n) =
+    alpha . beta^n(x) gamma^s(x)
+  }
+  where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the 
+  points of the pattern, \eqn{n(x)} is the number of points in the
+  pattern, \eqn{s(x)} is the number of distinct unordered pairs of
+  points that are closer than \eqn{R} units apart,
+  and \eqn{\alpha}{alpha} is the normalising constant.
+  Intuitively, each point of the pattern
+  contributes a factor \eqn{\beta}{beta} to the 
+  probability density, and each pair of points
+  closer than \eqn{r} units apart contributes a factor
+  \eqn{\gamma}{gamma} to the density.
+
+  The interaction parameter \eqn{\gamma}{gamma} must be less than
+  or equal to \eqn{1} in order that the process be well-defined
+  (Kelly and Ripley, 1976).
+  This model describes an ``ordered'' or ``inhibitive'' pattern.
+  If \eqn{\gamma=1}{gamma=1} it reduces to a Poisson process
+  (complete spatial randomness) with intensity \eqn{\beta}{beta}.
+  If \eqn{\gamma=0}{gamma=0} it is called a ``hard core process''
+  with hard core radius \eqn{R/2}, since no pair of points is permitted
+  to lie closer than \eqn{R} units apart.
+
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  Kelly, F.P. and Ripley, B.D. (1976)
+  On Strauss's model for clustering.
+  \emph{Biometrika} \bold{63}, 357--360.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+
+  Strauss, D.J. (1975)
+  A model for clustering.
+  \emph{Biometrika} \bold{62}, 467--475.
+}
+\author{
+  Kasper Klitgaard Berthelsen,
+  adapted for \pkg{spatstat} by \adrian
+}
+\examples{
+   X <- rStrauss(0.05,0.2,1.5,square(141.4))
+   Z <- rStrauss(100,0.7,0.05)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{Strauss}},
+  \code{\link{rHardcore}},
+  \code{\link{rStraussHard}},
+  \code{\link{rDiggleGratton}},
+  \code{\link{rDGS}},
+  \code{\link{rPenttinen}}.
+}
+
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rStraussHard.Rd b/man/rStraussHard.Rd
new file mode 100644
index 0000000..2bc5433
--- /dev/null
+++ b/man/rStraussHard.Rd
@@ -0,0 +1,112 @@
+\name{rStraussHard}
+\alias{rStraussHard}
+\title{Perfect Simulation of the Strauss-Hardcore Process}
+\description{
+  Generate a random pattern of points, a simulated realisation
+  of the Strauss-Hardcore process, using a perfect simulation algorithm.
+}
+\usage{
+  rStraussHard(beta, gamma = 1, R = 0, H = 0, W = owin(),
+               expand=TRUE, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{beta}{
+    intensity parameter (a positive number).
+  }
+  \item{gamma}{
+    interaction parameter (a number between 0 and 1, inclusive).
+  }
+  \item{R}{
+    interaction radius (a non-negative number).
+  }
+  \item{H}{
+    hard core distance (a non-negative number smaller than \code{R}).
+  }
+  \item{W}{
+    window (object of class \code{"owin"}) in which to
+    generate the random pattern. Currently this must be a rectangular
+    window.
+  }
+  \item{expand}{
+    Logical. If \code{FALSE}, simulation is performed
+    in the window \code{W}, which must be rectangular.
+    If \code{TRUE} (the default), simulation is performed
+    on a larger window, and the result is clipped to the original
+    window \code{W}.
+    Alternatively \code{expand} can be an object of class 
+    \code{"rmhexpand"} (see \code{\link{rmhexpand}})
+    determining the expansion method.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function generates a realisation of the 
+  Strauss-Hardcore point process in the window \code{W}
+  using a \sQuote{perfect simulation} algorithm.
+
+  The Strauss-Hardcore process is described in \code{\link{StraussHard}}.
+
+  The simulation algorithm used to generate the point pattern
+  is \sQuote{dominated coupling from the past}
+  as implemented by Berthelsen and \ifelse{latex}{\out{M\o ller}}{Moller} (2002, 2003).
+  This is a \sQuote{perfect simulation} or \sQuote{exact simulation}
+  algorithm, so called because the output of the algorithm is guaranteed
+  to have the correct probability distribution exactly (unlike the
+  Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output
+  is only approximately correct).
+
+  A limitation of the perfect simulation algorithm
+  is that the interaction parameter
+  \eqn{\gamma}{gamma} must be less than or equal to \eqn{1}.
+  To simulate a Strauss-hardcore process with
+  \eqn{\gamma > 1}{gamma > 1}, use \code{\link{rmh}}.
+
+  There is a tiny chance that the algorithm will 
+  run out of space before it has terminated. If this occurs, an error
+  message will be generated.
+}
+\value{
+  If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}).
+  If \code{nsim > 1}, a list of point patterns.
+}
+\references{
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2002)
+  A primer on perfect simulation for spatial point processes.
+  \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367.
+
+  Berthelsen, K.K. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2003)
+  Likelihood and non-parametric Bayesian MCMC inference 
+  for spatial point processes based on perfect simulation and
+  path sampling. 
+  \emph{Scandinavian Journal of Statistics} 30, 549-564.
+
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Waagepetersen, R. (2003).
+  \emph{Statistical Inference and Simulation for Spatial Point Processes.}
+  Chapman and Hall/CRC.
+}
+\author{
+  Kasper Klitgaard Berthelsen and \adrian
+  
+  
+}
+\examples{
+   Z <- rStraussHard(100,0.7,0.05,0.02)
+}
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{StraussHard}}.
+
+  \code{\link{rHardcore}},
+  \code{\link{rStrauss}},
+  \code{\link{rDiggleGratton}},
+  \code{\link{rDGS}},
+  \code{\link{rPenttinen}}.
+}
+
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rThomas.Rd b/man/rThomas.Rd
new file mode 100644
index 0000000..1a5c37b
--- /dev/null
+++ b/man/rThomas.Rd
@@ -0,0 +1,183 @@
+\name{rThomas}
+\alias{rThomas}
+\title{Simulate Thomas Process}
+\description{
+  Generate a random point pattern, a realisation of the
+  Thomas cluster process.
+}
+\usage{
+  rThomas(kappa, scale, mu, win = owin(c(0,1),c(0,1)),
+          nsim=1, drop=TRUE, 
+          saveLambda=FALSE, expand = 4*scale, ...,
+          poisthresh=1e-6, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{scale}{
+    Standard deviation of random displacement (along each coordinate axis)
+    of a point from its cluster centre.
+  }
+  \item{mu}{
+    Mean number of points per cluster (a single positive number)
+    or reference intensity for the cluster points (a function or
+    a pixel image).
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{saveLambda}{
+    Logical. If \code{TRUE} then the random intensity corresponding to
+    the simulated parent points will also be calculated and saved,
+    and returns as an attribute of the point pattern.
+  }
+  \item{expand}{
+    Numeric. Size of window expansion for generation of parent
+    points. Has a sensible default.
+  }
+  \item{\dots}{
+    Passed to \code{\link{clusterfield}} to control the image
+    resolution when \code{saveLambda=TRUE} and to
+    \code{\link{clusterradius}} when \code{expand} is missing.
+  }
+  \item{poisthresh}{
+    Numerical threshold below which the model will be treated
+    as a Poisson process. See Details.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+
+  Additionally, some intermediate results of the simulation are returned
+  as attributes of this point pattern (see
+  \code{\link{rNeymanScott}}). Furthermore, the simulated intensity
+  function is returned as an attribute \code{"Lambda"}, if
+  \code{saveLambda=TRUE}.
+}
+\details{
+  This algorithm generates a realisation of the (`modified')
+  Thomas process, a special case of the Neyman-Scott process,
+  inside the window \code{win}.
+
+  In the simplest case, where \code{kappa} and \code{mu}
+  are single numbers, the algorithm 
+  generates a uniform Poisson point process of \dQuote{parent} points 
+  with intensity \code{kappa}. Then each parent point is
+  replaced by a random cluster of \dQuote{offspring} points,
+  the number of points per cluster being Poisson (\code{mu})
+  distributed, and their
+  positions being isotropic Gaussian displacements from the
+  cluster parent location. The resulting point pattern
+  is a realisation of the classical
+  \dQuote{stationary Thomas process} generated inside the window \code{win}.
+  This point process has intensity \code{kappa * mu}.
+
+  The algorithm can also generate spatially inhomogeneous versions of
+  the Thomas process:
+  \itemize{
+    \item The parent points can be spatially inhomogeneous.
+    If the argument \code{kappa} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is taken
+    as specifying the intensity function of an inhomogeneous Poisson
+    process that generates the parent points.
+    \item The offspring points can be inhomogeneous. If the
+    argument \code{mu} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is
+    interpreted as the reference density for offspring points,
+    in the sense of Waagepetersen (2007).
+    For a given parent point, the offspring constitute a Poisson process
+    with intensity function equal to \code{mu * f},
+    where \code{f} is the Gaussian probability density
+    centred at the parent point. Equivalently we first generate,
+    for each parent point, a Poisson (\code{mumax}) random number of
+    offspring (where \eqn{M} is the maximum value of \code{mu})
+    with independent Gaussian displacements from the parent
+    location, and then randomly thin the offspring points, with
+    retention probability \code{mu/M}.
+    \item Both the parent points and the offspring points can be
+    spatially inhomogeneous, as described above.
+  }
+
+  Note that if \code{kappa} is a pixel image, its domain must be larger
+  than the window \code{win}. This is because an offspring point inside
+  \code{win} could have its parent point lying outside \code{win}.
+  In order to allow this, the simulation algorithm
+  first expands the original window \code{win}
+  by a distance \code{expand} and generates the Poisson process of
+  parent points on this larger window. If \code{kappa} is a pixel image,
+  its domain must contain this larger window.
+
+  The intensity of the Thomas process is \code{kappa * mu}
+  if either \code{kappa} or \code{mu} is a single number. In the general
+  case the intensity is an integral involving \code{kappa}, \code{mu}
+  and \code{f}.
+
+  The Thomas process with homogeneous parents
+  (i.e. where \code{kappa} is a single number)
+  can be fitted to data using \code{\link{kppm}}.
+  Currently it is not possible to fit the Thomas model
+  with inhomogeneous parents.
+
+  If the pair correlation function of the model is very close
+  to that of a Poisson process, deviating by less than
+  \code{poisthresh}, then the model is approximately a Poisson process,
+  and will be simulated as a Poisson process with intensity
+  \code{kappa * mu}, using \code{\link{rpoispp}}.
+  This avoids computations that would otherwise require huge amounts
+  of memory.
+}
+\seealso{
+\code{\link{rpoispp}},
+\code{\link{rMatClust}},
+\code{\link{rCauchy}},
+\code{\link{rVarGamma}},
+\code{\link{rNeymanScott}},
+\code{\link{rGaussPoisson}},
+\code{\link{kppm}},
+\code{\link{clusterfit}}.
+}
+
+\references{
+  Diggle, P. J., Besag, J. and Gleaves, J. T. (1976)
+  Statistical analysis of spatial point patterns by
+  means of distance methods. \emph{Biometrics} \bold{32} 659--667.
+
+  Thomas, M. (1949) A generalisation of Poisson's binomial limit for use
+  in ecology. \emph{Biometrika} \bold{36}, 18--25.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\examples{
+  #homogeneous
+  X <- rThomas(10, 0.2, 5)
+  #inhomogeneous
+  Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin())
+  Y <- rThomas(10, 0.2, Z)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rVarGamma.Rd b/man/rVarGamma.Rd
new file mode 100644
index 0000000..6857fe2
--- /dev/null
+++ b/man/rVarGamma.Rd
@@ -0,0 +1,178 @@
+\name{rVarGamma}
+\alias{rVarGamma}
+\title{Simulate Neyman-Scott Point Process with Variance Gamma cluster kernel}
+\description{
+  Generate a random point pattern, a simulated realisation of the
+  Neyman-Scott process with Variance Gamma (Bessel) cluster kernel.
+}
+\usage{
+ rVarGamma(kappa, nu, scale, mu, win = owin(), 
+           thresh = 0.001, nsim=1, drop=TRUE,
+           saveLambda=FALSE, expand = NULL, ...,
+           poisthresh=1e-6, saveparents=TRUE)
+}
+\arguments{
+  \item{kappa}{
+    Intensity of the Poisson process of cluster centres.
+    A single positive number, a function, or a pixel image.
+  }
+  \item{nu}{
+    Shape parameter for the cluster kernel. A number greater than -1.
+  }
+  \item{scale}{
+    Scale parameter for cluster kernel. Determines the size of clusters.
+    A positive number in the same units as the spatial coordinates.
+  }
+  \item{mu}{
+    Mean number of points per cluster (a single positive number)
+    or reference intensity for the cluster points (a function or
+    a pixel image).
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{thresh}{
+    Threshold relative to the cluster kernel value at the origin (parent
+    location) determining when the cluster kernel will be treated as
+    zero for simulation purposes. Will be overridden by argument
+    \code{expand} if that is given.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{saveLambda}{
+    Logical. If \code{TRUE} then the random intensity corresponding to
+    the simulated parent points will also be calculated and saved,
+    and returns as an attribute of the point pattern.
+  }
+  \item{expand}{
+    Numeric. Size of window expansion for generation of parent
+    points. By default determined by calling
+    \code{\link{clusterradius}} with the numeric threshold value given
+    in \code{thresh}.
+  }
+  \item{\dots}{
+    Passed to \code{\link{clusterfield}} to control the image resolution
+    when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when
+    \code{expand} is missing or \code{NULL}.
+  }
+  \item{poisthresh}{
+    Numerical threshold below which the model will be treated
+    as a Poisson process. See Details.
+  }
+  \item{saveparents}{
+    Logical value indicating whether to save the locations of the
+    parent points as an attribute.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+
+  Additionally, some intermediate results of the simulation are returned
+  as attributes of this point pattern (see
+  \code{\link{rNeymanScott}}). Furthermore, the simulated intensity
+  function is returned as an attribute \code{"Lambda"}, if
+  \code{saveLambda=TRUE}.
+}
+\details{
+  This algorithm generates a realisation of the Neyman-Scott process
+  with Variance Gamma (Bessel) cluster kernel, inside the window \code{win}.
+
+  The process is constructed by first
+  generating a Poisson point process of ``parent'' points 
+  with intensity \code{kappa}. Then each parent point is
+  replaced by a random cluster of points, the number of points in each
+  cluster being random with a Poisson (\code{mu}) distribution,
+  and the points being placed independently and uniformly
+  according to a Variance Gamma kernel.
+
+  The shape of the kernel is determined by the dimensionless
+  index \code{nu}. This is the parameter
+  \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in
+  equation (12) on page 126 of Jalilian et al (2013).
+
+  The scale of the kernel is determined by the argument \code{scale},
+  which is the parameter
+  \eqn{\eta}{eta} appearing in equations (12) and (13) of
+  Jalilian et al (2013). 
+  It is expressed in units of length (the same as the unit of length for 
+  the window \code{win}).
+  
+  In this implementation, parent points are not restricted to lie in the
+  window; the parent process is effectively the uniform
+  Poisson process on the infinite plane.
+
+  This model can be fitted to data by the method of minimum contrast,
+  maximum composite likelihood or Palm likelihood using
+  \code{\link{kppm}}.
+  
+  The algorithm can also generate spatially inhomogeneous versions of
+  the cluster process:
+  \itemize{
+    \item The parent points can be spatially inhomogeneous.
+    If the argument \code{kappa} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is taken
+    as specifying the intensity function of an inhomogeneous Poisson
+    process that generates the parent points.
+    \item The offspring points can be inhomogeneous. If the
+    argument \code{mu} is a \code{function(x,y)}
+    or a pixel image (object of class \code{"im"}), then it is
+    interpreted as the reference density for offspring points,
+    in the sense of Waagepetersen (2006).
+  }
+  When the parents are homogeneous (\code{kappa} is a single number)
+  and the offspring are inhomogeneous (\code{mu} is a
+  function or pixel image), the model can be fitted to data
+  using \code{\link{kppm}}, or using \code{\link{vargamma.estK}}
+  or \code{\link{vargamma.estpcf}}
+  applied to the inhomogeneous \eqn{K} function. 
+
+  If the pair correlation function of the model is very close
+  to that of a Poisson process, deviating by less than
+  \code{poisthresh}, then the model is approximately a Poisson process,
+  and will be simulated as a Poisson process with intensity
+  \code{kappa * mu}, using \code{\link{rpoispp}}.
+  This avoids computations that would otherwise require huge amounts
+  of memory.
+}
+\seealso{
+  \code{\link{rpoispp}},
+  \code{\link{rNeymanScott}},
+  \code{\link{kppm}}.
+
+  \code{\link{vargamma.estK}},
+  \code{\link{vargamma.estpcf}}.
+}
+\examples{
+ # homogeneous
+ X <- rVarGamma(30, 2, 0.02, 5)
+ # inhomogeneous
+ ff <- function(x,y){ exp(2 - 3 * abs(x)) }
+ Z <- as.im(ff, W= owin())
+ Y <- rVarGamma(30, 2, 0.02, Z)
+ YY <- rVarGamma(ff, 2, 0.02, 3)
+}
+\references{
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+  
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rags.Rd b/man/rags.Rd
new file mode 100644
index 0000000..559589c
--- /dev/null
+++ b/man/rags.Rd
@@ -0,0 +1,61 @@
+\name{rags}
+\alias{rags}
+\title{
+  Alternating Gibbs Sampler for Multitype Point Processes
+}
+\description{
+  Simulate a realisation of a point process model using the
+  alternating Gibbs sampler.
+}
+\usage{
+rags(model, \dots, ncycles = 100)
+}
+\arguments{
+  \item{model}{
+    Data specifying some kind of point process model.
+  }
+  \item{\dots}{
+    Additional arguments passed to other code.
+  }
+  \item{ncycles}{
+    Number of cycles of the alternating Gibbs sampler that should be
+    performed. 
+  }
+}
+\details{
+  The Alternating Gibbs Sampler for a multitype point process
+  is an iterative simulation procedure. Each step of the sampler
+  updates the pattern of points of a particular type \code{i},
+  by drawing a realisation from the conditional distribution of
+  points of type \code{i} given the points of all other types.
+  Successive steps of the sampler update the points of type 1, then
+  type 2, type 3, and so on. 
+
+  This is an experimental implementation which currently works only
+  for multitype hard core processes (see \code{\link{MultiHard}})
+  in which there is no interaction between points of the same type. 
+
+  The argument \code{model} should be an object describing a point
+  process model. At the moment, the only permitted format for
+  \code{model} is of the form \code{list(beta, hradii)} where
+  \code{beta} gives the first order trend and \code{hradii} is the
+  matrix of interaction radii. See \code{\link{ragsMultiHard}} for
+  full details.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{ragsMultiHard}},
+  \code{\link{ragsAreaInter}}
+}
+\examples{
+  mo <- list(beta=c(30, 20),
+             hradii = 0.05 * matrix(c(0,1,1,0), 2, 2))
+  rags(mo, ncycles=10)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/ragsAreaInter.Rd b/man/ragsAreaInter.Rd
new file mode 100644
index 0000000..e8acf4c
--- /dev/null
+++ b/man/ragsAreaInter.Rd
@@ -0,0 +1,98 @@
+\name{ragsAreaInter}
+\alias{ragsAreaInter}
+\title{
+  Alternating Gibbs Sampler for Area-Interaction Process
+}
+\description{
+ Generate a realisation of the area-interaction process
+ using the alternating Gibbs sampler.
+ Applies only when the interaction parameter \eqn{eta} is greater than 1.
+}
+\usage{
+     ragsAreaInter(beta, eta, r, \dots,
+                   win = NULL, bmax = NULL, periodic = FALSE, ncycles = 100)
+}
+\arguments{
+  \item{beta}{
+    First order trend. A number, a pixel image (object of class
+    \code{"im"}), or a \code{function(x,y)}.
+  }
+  \item{eta}{
+    Interaction parameter (canonical form) as described in
+    the help for \code{\link{AreaInter}}.
+    A number greater than 1.
+  }
+  \item{r}{
+    Disc radius in the model. A number greater than 1.
+  }
+  \item{\dots}{
+    Additional arguments for \code{beta} if it is a function.
+  }
+  \item{win}{
+    Simulation window. An object of class \code{"owin"}.
+    (Ignored if \code{beta} is a pixel image.)
+  }
+  \item{bmax}{
+    Optional. The maximum possible value of \code{beta},
+    or a number larger than this. 
+  }
+  \item{periodic}{
+    Logical value indicating whether to treat opposite sides of the
+    simulation window as being the same, so that points close to one
+    side may interact with points close to the opposite side.
+    Feasible only when the window is a rectangle.
+  }
+  \item{ncycles}{
+    Number of cycles of the alternating Gibbs sampler to be performed.
+  }
+}
+\details{
+  This function generates a simulated realisation of the
+  area-interaction process (see \code{\link{AreaInter}})
+  using the alternating Gibbs sampler (see \code{\link{rags}}).
+
+  It exploits a mathematical relationship between the
+  (unmarked) area-interaction process and the two-type
+  hard core process (Baddeley and Van Lieshout, 1995;
+  Widom and Rowlinson, 1970). This relationship only holds
+  when the interaction parameter \code{eta} is greater than 1
+  so that the area-interaction process is clustered.
+
+  The parameters \code{beta,eta} are the canonical parameters described
+  in the help for \code{\link{AreaInter}}.
+  The first order trend \code{beta} may be a constant, a function,
+  or a pixel image.
+
+  The simulation window is determined by \code{beta} if it is a pixel
+  image, and otherwise by the argument \code{win} (the default is the
+  unit square).
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\references{
+  Baddeley, A.J. and Van Lieshout, M.N.M. (1995).
+  Area-interaction point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{47} (1995) 601--619.
+
+  Widom, B. and Rowlinson, J.S. (1970).
+  New model for the study of liquid-vapor phase transitions.
+  \emph{The Journal of Chemical Physics}
+  \bold{52} (1970) 1670--1684.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{rags}},
+  \code{\link{ragsMultiHard}}
+
+  \code{\link{AreaInter}}
+}
+\examples{
+   plot(ragsAreaInter(100, 2, 0.07, ncycles=15))
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/ragsMultiHard.Rd b/man/ragsMultiHard.Rd
new file mode 100644
index 0000000..8b001d7
--- /dev/null
+++ b/man/ragsMultiHard.Rd
@@ -0,0 +1,85 @@
+\name{ragsMultiHard}
+\alias{ragsMultiHard}
+\title{
+  Alternating Gibbs Sampler for Multitype Hard Core Process
+}
+\description{
+  Generate a realisation of the multitype hard core point process
+  using the alternating Gibbs sampler.
+}
+\usage{
+ragsMultiHard(beta, hradii, \dots, types=NULL, bmax = NULL,
+              periodic=FALSE, ncycles = 100)
+}
+\arguments{
+  \item{beta}{
+    First order trend. A numeric vector, a pixel image,
+    a function, a list of functions, or a list of pixel images.
+  }
+  \item{hradii}{
+    Matrix of hard core radii between each pair of types.
+    Diagonal entries should be \code{0} or \code{NA}.
+  }
+  \item{types}{
+    Vector of all possible types for the multitype point pattern.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{rmpoispp}}
+    when generating random points. 
+}
+  \item{bmax}{
+    Optional upper bound on \code{beta}.
+  }
+  \item{periodic}{
+    Logical value indicating whether to measure distances in the
+    periodic sense, so that opposite sides of the (rectangular) window
+    are treated as identical.
+  }
+  \item{ncycles}{
+    Number of cycles of the sampler to be performed.
+  }
+}
+\details{
+  The Alternating Gibbs Sampler for a multitype point process
+  is an iterative simulation procedure. Each step of the sampler
+  updates the pattern of points of a particular type \code{i},
+  by drawing a realisation from the conditional distribution of
+  points of type \code{i} given the points of all other types.
+  Successive steps of the sampler update the points of type 1, then
+  type 2, type 3, and so on. 
+
+  This is an experimental implementation which currently works only
+  for multitype hard core processes (see \code{\link{MultiHard}})
+  in which there is no interaction between points of the same type,
+  and for the area-interaction process (see \code{\link{ragsAreaInter}}).
+
+  The argument \code{beta} gives the first order trend for
+  each possible type of point. It may be a single number, a numeric
+  vector, a \code{function(x,y)}, a pixel image, a list of functions,
+  a \code{function(x,y,m)}, or a list of pixel images. 
+
+  The argument \code{hradii} is the matrix of hard core radii
+  between each pair of possible types of points. Two points of types
+  \code{i} and \code{j} respectively are forbidden to lie closer than
+  a distance \code{hradii[i,j]} apart. The diagonal of this matrix must
+  contain \code{NA} or \code{0} values, indicating that there is no hard
+  core constraint applying between points of the same type.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{rags}},
+  \code{\link{ragsAreaInter}}
+}
+\examples{
+  b <- c(30,20)
+  h <- 0.05 * matrix(c(0,1,1,0), 2, 2)
+  ragsMultiHard(b, h, ncycles=10)
+  ragsMultiHard(b, h, ncycles=5, periodic=TRUE)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/ranef.mppm.Rd b/man/ranef.mppm.Rd
new file mode 100644
index 0000000..a194487
--- /dev/null
+++ b/man/ranef.mppm.Rd
@@ -0,0 +1,65 @@
+\name{ranef.mppm}
+\alias{ranef.mppm}
+\title{
+  Extract Random Effects from Point Process Model
+}
+\description{
+  Given a point process model fitted to a list of point patterns,
+  extract the fixed effects of the model.
+  A method for \code{ranef}.
+}
+\usage{
+ \method{ranef}{mppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    A fitted point process model (an object of class \code{"mppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[nlme]{ranef}}.
+
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"mppm"}) produced by the 
+  fitting algorithm \code{\link{mppm}}). This represents a
+  point process model that has been fitted
+  to a list of several point pattern datasets. See \code{\link{mppm}}
+  for information.
+
+  This function extracts the coefficients of the random effects
+  of the model.
+}
+\value{
+  A data frame, or list of data frames, as described in the help for
+  \code{\link[nlme]{ranef.lme}}.
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented in \pkg{spatstat} by
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{fixef.mppm}},
+  \code{\link{coef.mppm}}
+}
+\examples{
+ H <- hyperframe(Y = waterstriders)
+ # Tweak data to exaggerate differences
+ H$Y[[1]] <- rthin(H$Y[[1]], 0.3)
+
+ m1 <- mppm(Y ~ id,  data=H, Strauss(7))
+ ranef(m1)
+ m2 <- mppm(Y ~ 1,  random=~1|id, data=H, Strauss(7))
+ ranef(m2)
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/range.fv.Rd b/man/range.fv.Rd
new file mode 100644
index 0000000..2947929
--- /dev/null
+++ b/man/range.fv.Rd
@@ -0,0 +1,59 @@
+\name{range.fv}
+\alias{range.fv}
+\alias{max.fv}
+\alias{min.fv}
+\title{
+  Range of Function Values
+}
+\description{
+  Compute the range, maximum, or minimum of the
+  function values in a summary function.
+}
+\usage{
+  \method{range}{fv}(\dots, na.rm = TRUE, finite = na.rm)
+
+  \method{max}{fv}(\dots, na.rm = TRUE, finite = na.rm)
+
+  \method{min}{fv}(\dots, na.rm = TRUE, finite = na.rm)
+}
+\arguments{
+  \item{\dots}{
+    One or more function value tables (objects of class \code{"fv"}
+    representing summary functions) or other data.
+  }
+  \item{na.rm}{
+    Logical. Whether to ignore \code{NA} values.
+  }
+  \item{finite}{
+    Logical. Whether to ignore values that are
+    infinite, \code{NaN} or \code{NA}.
+  }
+}
+\details{
+  These are methods for the generic \code{\link[base]{range}},
+  \code{\link[base]{max}} and \code{\link[base]{min}}.
+  They compute the range, maximum, and minimum of the \emph{function} values
+  that would be plotted on the \eqn{y} axis by default.
+
+  For more complicated calculations, use \code{\link{with.fv}}.
+}
+\value{
+  Numeric vector of length 2.
+}
+\seealso{
+  \code{\link{with.fv}}
+}
+\examples{
+   G <- Gest(cells)
+   range(G)
+   max(G)
+   min(G)
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf and \ege.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/raster.x.Rd b/man/raster.x.Rd
new file mode 100644
index 0000000..2411bf2
--- /dev/null
+++ b/man/raster.x.Rd
@@ -0,0 +1,87 @@
+\name{raster.x}
+\alias{raster.x}
+\alias{raster.y}
+\alias{raster.xy}
+\title{Cartesian Coordinates for a Pixel Raster}
+\description{
+  Return the \eqn{x} and \eqn{y} coordinates
+  of each pixel in a pixel image or binary mask.
+}
+\usage{
+ raster.x(w, drop=FALSE)
+ raster.y(w, drop=FALSE)
+ raster.xy(w, drop=FALSE)
+}
+\arguments{
+  \item{w}{
+    A pixel image (object of class \code{"im"}) or a 
+    mask window (object of class \code{"owin"}
+    of type \code{"mask"}).
+  }
+  \item{drop}{
+    Logical.
+    If \code{TRUE}, then coordinates of pixels that lie outside the
+    window are removed.
+    If \code{FALSE} (the default) then the coordinates of every
+    pixel in the containing rectangle are retained.
+  }
+}
+\value{
+  \code{raster.xy} returns a list with components \code{x} and \code{y}
+  which are numeric vectors of equal length containing the pixel coordinates.
+
+  If \code{drop=FALSE}, 
+  \code{raster.x} and \code{raster.y} return 
+  a matrix of the same dimensions as the pixel grid in \code{w},
+  and giving the value of the \eqn{x} (or \eqn{y}) coordinate
+  of each pixel in the raster.
+
+  If \code{drop=TRUE}, 
+  \code{raster.x} and \code{raster.y} return numeric vectors.
+}
+\details{
+  The argument \code{w} should be either
+  a pixel image (object of class \code{"im"})
+  or a mask window (an object of class
+  \code{"owin"} of type \code{"mask"}).
+
+  If \code{drop=FALSE} (the default), the 
+  functions \code{raster.x} and \code{raster.y} return
+  a matrix of the same dimensions as the
+  pixel image or mask itself, with entries giving the \eqn{x} coordinate
+  (for \code{raster.x}) or \eqn{y} coordinate (for \code{raster.y})
+  of each pixel in the pixel grid.
+
+  If \code{drop=TRUE}, pixels that lie outside the
+  window \code{w} (or outside the domain of the image \code{w})
+  are removed, and \code{raster.x} and \code{raster.y} 
+  return numeric vectors containing the coordinates of the
+  pixels that are inside the window \code{w}.
+
+  The function \code{raster.xy} returns a list
+  with components \code{x} and \code{y}
+  which are numeric vectors of equal length containing the pixel coordinates.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.mask}},
+  \code{\link{pixelcentres}}
+}
+\examples{
+  u <- owin(c(-1,1),c(-1,1)) # square of side 2
+  w <- as.mask(u, eps=0.01) # 200 x 200 grid
+  X <- raster.x(w)
+  Y <- raster.y(w)
+  disc <- owin(c(-1,1), c(-1,1), mask=(X^2 + Y^2 <= 1))
+  \dontrun{plot(disc)}
+  # approximation to the unit disc
+}
+\author{\adrian
+  ,
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/rat.Rd b/man/rat.Rd
new file mode 100644
index 0000000..1c12680
--- /dev/null
+++ b/man/rat.Rd
@@ -0,0 +1,64 @@
+\name{rat}
+\alias{rat}
+\title{
+  Ratio object
+}
+\description{
+  Stores the numerator, denominator, and value of a ratio
+  as a single object.
+}
+\usage{
+rat(ratio, numerator, denominator, check = TRUE)
+}
+\arguments{
+  \item{ratio,numerator,denominator}{
+    Three objects belonging to the same class.
+  }
+  \item{check}{
+    Logical. Whether to check that
+    the objects are \code{\link{compatible}}.
+  }
+}
+\details{
+  The class \code{"rat"} is a simple mechanism for keeping track of
+  the numerator and denominator when calculating a ratio. Its main
+  purpose is simply to signal that the object is a ratio.
+
+  The function \code{rat} creates an object of class \code{"rat"}
+  given the numerator, the denominator and the ratio.
+  No calculation is performed; 
+  the three objects are simply stored together.
+
+  The arguments \code{ratio}, \code{numerator}, \code{denominator}
+  can be objects of any kind. They should belong to the same class.
+  It is assumed that the relationship
+  \deqn{
+    \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}}
+  }{
+    ratio = numerator/denominator
+  }
+  holds in some version of arithmetic. However, no calculation is
+  performed.
+
+  By default the algorithm checks
+  whether the three arguments \code{ratio}, \code{numerator},
+  \code{denominator} are compatible objects, according to
+  \code{\link{compatible}}.
+  
+  The result is equivalent to \code{ratio} except for the
+  addition of extra information.
+}
+\value{
+  An object equivalent to the object \code{ratio}
+  except that it also belongs to the class \code{"rat"}
+  and has additional attributes \code{numerator} and \code{denominator}.
+}
+\author{\adrian
+  and \rolf.
+}
+\seealso{
+  \code{\link{compatible}}, 
+  \code{\link{pool}}
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/rcell.Rd b/man/rcell.Rd
new file mode 100644
index 0000000..4541b5a
--- /dev/null
+++ b/man/rcell.Rd
@@ -0,0 +1,108 @@
+\name{rcell}
+\alias{rcell}
+\title{Simulate Baddeley-Silverman Cell Process}
+\description{
+  Generates a random point pattern, a simulated realisation of the
+  Baddeley-Silverman cell process model. 
+}
+\usage{
+ rcell(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx,
+       N=10, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{win}{
+    A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{nx}{
+    Number of columns of cells in the window.
+    Incompatible with \code{dx}.
+  }
+  \item{ny}{
+    Number of rows of cells in the window.
+    Incompatible with \code{dy}.
+  }
+  \item{\dots}{Ignored.}
+  \item{dx}{
+    Width of the cells. Incompatible with \code{nx}.
+  }
+  \item{dy}{
+    Height of the cells.
+    Incompatible with \code{ny}.
+  }
+  \item{N}{
+    Integer. Distributional parameter:
+    the maximum number of random points in each cell.
+    Passed to \code{\link{rcellnumber}}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{ 
+  This function generates a simulated realisation of the \dQuote{cell process}
+  (Baddeley and Silverman, 1984), a random point process
+  with the same second-order properties as the uniform Poisson process.
+  In particular, the \eqn{K} function of this process is identical to
+  the \eqn{K} function of the uniform Poisson process (aka Complete
+  Spatial Randomness). The same holds for the pair correlation function
+  and all other second-order properties.
+  The cell process is a counterexample to the claim that the
+  \eqn{K} function completely characterises a point pattern.
+  
+  A cell process is generated by dividing space into equal rectangular
+  tiles. In each tile, a random number of random points is placed.
+  By default, there are either \eqn{0}, \eqn{1} or \eqn{10} points,
+  with probabilities \eqn{1/10}, \eqn{8/9} and \eqn{1/90}
+  respectively. 
+  The points within a tile are independent and uniformly distributed in
+  that tile, and the numbers of points in different tiles are
+  independent random integers. 
+
+  The tile width is determined
+  either by the number of columns \code{nx} or by the
+  horizontal spacing \code{dx}.
+  The tile height is determined
+  either by the number of rows \code{ny} or by the
+  vertical spacing \code{dy}. 
+  The cell process is then generated in these tiles.
+  The random numbers of points are generated by \code{\link{rcellnumber}}.
+
+  Some of the resulting random points may lie outside the window \code{win}:
+  if they do, they are deleted.
+  The result is a point pattern inside the window \code{win}.
+}
+\seealso{
+  \code{\link{rcellnumber}},
+  \code{\link{rstrat}},
+  \code{\link{rsyst}},
+  \code{\link{runifpoint}},
+  \code{\link{Kest}}
+}
+\examples{
+  X <- rcell(nx=15)
+  plot(X)
+  plot(Kest(X))
+}
+\references{
+  Baddeley, A.J. and Silverman, B.W. (1984)
+  A cautionary example on the use of second-order methods for analyzing
+  point patterns. \emph{Biometrics} \bold{40}, 1089-1094.
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rcellnumber.Rd b/man/rcellnumber.Rd
new file mode 100644
index 0000000..8bcf0df
--- /dev/null
+++ b/man/rcellnumber.Rd
@@ -0,0 +1,59 @@
+\name{rcellnumber}
+\alias{rcellnumber}
+\title{
+  Generate Random Numbers of Points for Cell Process
+}
+\description{
+  Generates random integers for the Baddeley-Silverman counterexample.
+}
+\usage{
+ rcellnumber(n, N = 10, mu=1)
+}
+\arguments{
+  \item{n}{
+    Number of random integers to be generated.
+  }
+  \item{N}{
+    Distributional parameter: the largest possible value
+    (when \code{mu <= 1}).
+    An integer greater than 1.
+  }
+  \item{mu}{
+    Mean of the distribution (equals the variance).
+    Any positive real number.
+  }
+}
+\details{
+  If \code{mu = 1} (the default),
+  this function generates random integers which have mean and variance
+  equal to 1, but which do not have a Poisson distribution. 
+  The random integers take the values \eqn{0}, \eqn{1} and \eqn{N}
+  with probabilities \eqn{1/N}, \eqn{(N-2)/(N-1)} and \eqn{1/(N(N-1))}
+  respectively.
+  See Baddeley and Silverman (1984).
+
+  If \code{mu} is another positive number, the random integers will
+  have mean and variance equal to \code{mu}. They are obtained by
+  generating the
+  one-dimensional counterpart of the cell process and counting the
+  number of points in the interval from \code{0} to \code{mu}. The
+  maximum possible value of each random integer is \code{N * ceiling(mu)}.
+}
+\value{
+  An integer vector of length \code{n}.
+}
+\references{
+  Baddeley, A.J. and Silverman, B.W. (1984)
+  A cautionary example on the use of second-order methods for analyzing
+  point patterns. \emph{Biometrics} \bold{40}, 1089-1094.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{rcell}}
+}
+\examples{
+   rcellnumber(30, 3)
+}
+\keyword{datagen}
diff --git a/man/rdpp.Rd b/man/rdpp.Rd
new file mode 100644
index 0000000..fd032bd
--- /dev/null
+++ b/man/rdpp.Rd
@@ -0,0 +1,60 @@
+\name{rdpp}
+\alias{rdpp}
+\title{Simulation of a Determinantal Point Process}
+\description{
+  Generates simulated realisations from a determinantal point process.
+}
+\usage{
+  rdpp(eig, index, basis = "fourierbasis",
+       window = boxx(rep(list(0:1), ncol(index))),
+       reject_max = 10000, progress = 0, debug = FALSE, \dots)
+}
+\arguments{
+  \item{eig}{
+    vector of values between 0 and 1 specifying the non-zero
+    eigenvalues for the process.
+  }
+  \item{index}{
+    \code{data.frame} or \code{matrix} (or something acceptable to
+    \code{\link{as.matrix}}) specifying indices of the basis
+    functions.
+  }
+  \item{basis}{character string giving the name of the basis.}
+  \item{window}{
+    window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"})
+    giving the domain of the point process.
+  }
+  \item{reject_max}{
+    integer giving the maximal number of trials for rejection sampling.
+  }
+  \item{progress}{
+    integer giving the interval for making a progress report. The value
+    zero turns reporting off.
+  }
+  \item{debug}{
+    logical value indicating whether debug informationb
+    should be outputted.
+  }
+  \item{\dots}{Ignored.}
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+index <- expand.grid(-2:2,-2:2)
+eig <- exp(-rowSums(index^2))
+X <- rdpp(eig, index)
+X
+## To simulate a det. projection p. p. with the given indices set eig=1:
+XX <- rdpp(1, index)
+XX
+}
+\keyword{datagen}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/reach.Rd b/man/reach.Rd
new file mode 100644
index 0000000..c7b93bb
--- /dev/null
+++ b/man/reach.Rd
@@ -0,0 +1,154 @@
+\name{reach}
+\alias{reach}
+\alias{reach.ppm}
+\alias{reach.interact}
+\alias{reach.fii}
+\alias{reach.rmhmodel}
+\title{Interaction Distance of a Point Process}
+\description{
+  Computes the interaction distance of a point process.
+}
+\usage{
+  reach(x, \dots)
+
+  \method{reach}{ppm}(x, \dots, epsilon=0)
+
+  \method{reach}{interact}(x, \dots)
+
+  \method{reach}{rmhmodel}(x, \dots)
+
+  \method{reach}{fii}(x, \dots, epsilon)
+}
+\arguments{
+  \item{x}{Either a fitted point process model (object of class
+    \code{"ppm"}), an interpoint interaction (object of class
+    \code{"interact"}), a fitted interpoint interaction (object of
+    class \code{"fii"}) or a point process model for simulation
+    (object of class \code{"rmhmodel"}).
+  }
+  \item{epsilon}{
+    Numerical threshold below which interaction is treated as zero.
+    See details.
+  }
+  \item{\dots}{
+    Other arguments are ignored.
+  }
+}
+\value{
+  The interaction distance, or \code{NA} if this cannot be
+  computed from the information given.
+}
+\details{
+  The `interaction distance' or `interaction range' of a point process
+  model is the smallest distance \eqn{D} such that any two points in the
+  process which are separated by a distance greater than \eqn{D} do not
+  interact with each other.
+
+  For example, the interaction range of a Strauss process
+  (see \code{\link{Strauss}})
+  with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to
+  \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is
+  Poisson and the interaction
+  range is \eqn{0}.
+  The interaction range of a Poisson process is zero.
+  The interaction range of the Ord threshold process
+  (see \code{\link{OrdThresh}}) is infinite, since two points \emph{may}
+  interact at any distance apart.
+
+  The function \code{reach(x)} is generic, with methods
+  for the case where \code{x} is 
+  \itemize{
+    \item
+    a fitted point process model
+    (object of class \code{"ppm"}, usually obtained from the model-fitting
+    function \code{\link{ppm}});
+    \item
+    an interpoint interaction structure (object of class
+    \code{"interact"}), created by one of the functions
+    \code{\link{Poisson}},
+    \code{\link{Strauss}},
+    \code{\link{StraussHard}},
+    \code{\link{MultiStrauss}},
+    \code{\link{MultiStraussHard}},
+    \code{\link{Softcore}},
+    \code{\link{DiggleGratton}},
+    \code{\link{Pairwise}},
+    \code{\link{PairPiece}},
+    \code{\link{Geyer}},
+    \code{\link{LennardJones}},
+    \code{\link{Saturated}},
+    \code{\link{OrdThresh}}
+    or
+    \code{\link{Ord}};
+    \item
+    a fitted interpoint interaction (object of class
+    \code{"fii"}) extracted from a fitted point process model
+    by the command \code{\link{fitin}};
+    \item
+    a point process model for simulation (object of class
+    \code{"rmhmodel"}), usually obtained from \code{\link{rmhmodel}}.
+  }
+  When \code{x} is an \code{"interact"} object,
+  \code{reach(x)} returns the maximum possible interaction range
+  for any point process model with interaction structure given by \code{x}.
+  For example, \code{reach(Strauss(0.2))} returns \code{0.2}.
+  
+  When \code{x} is a \code{"ppm"} object,
+  \code{reach(x)} returns the interaction range
+  for the point process model represented by \code{x}.
+  For example, a fitted Strauss process model
+  with parameters \code{beta,gamma,r} will return
+  either \code{0} or \code{r}, depending on whether the fitted
+  interaction parameter \code{gamma} is equal or not equal to 1.
+
+  For some point process models, such as the soft core process
+  (see \code{\link{Softcore}}), the interaction distance is
+  infinite, because the interaction terms are positive for all
+  pairs of points. A practical solution is to compute 
+  the distance at which the interaction contribution
+  from a pair of points falls below a threshold \code{epsilon},
+  on the scale of the log conditional intensity. This is done
+  by setting the argument \code{epsilon} to a positive value.
+}
+\seealso{
+  \code{\link{ppm}},
+    \code{\link{Poisson}},
+    \code{\link{Strauss}},
+    \code{\link{StraussHard}},
+    \code{\link{MultiStrauss}},
+    \code{\link{MultiStraussHard}},
+    \code{\link{Softcore}},
+    \code{\link{DiggleGratton}},
+    \code{\link{Pairwise}},
+    \code{\link{PairPiece}},
+    \code{\link{Geyer}},
+    \code{\link{LennardJones}},
+    \code{\link{Saturated}},
+    \code{\link{OrdThresh}},
+    \code{\link{Ord}},
+    \code{\link{rmhmodel}}
+}
+\examples{
+    reach(Poisson())
+    # returns 0
+
+    reach(Strauss(r=7))
+    # returns 7
+    fit <- ppm(swedishpines ~ 1, Strauss(r=7))
+    reach(fit)
+    # returns 7
+
+    reach(OrdThresh(42))
+    # returns Inf
+    
+    reach(MultiStrauss(matrix(c(1,3,3,1),2,2)))
+    # returns 3
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/reach.dppm.Rd b/man/reach.dppm.Rd
new file mode 100644
index 0000000..6bded1e
--- /dev/null
+++ b/man/reach.dppm.Rd
@@ -0,0 +1,42 @@
+\name{reach.dppm}
+\alias{reach.dppm}
+\alias{reach.detpointprocfamily}
+\title{Range of Interaction for a Determinantal Point Process Model}
+\description{
+  Returns the range of interaction for a determinantal point process model.
+}
+\usage{
+  \method{reach}{dppm}(x, \dots)
+
+  \method{reach}{detpointprocfamily}(x, \dots)
+}
+\arguments{
+  \item{x}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.}
+  \item{\dots}{Additional arguments passed to the range function
+    of the given model.
+  }
+}
+\details{
+  The range of interaction for a determinantal point
+  process model may defined as the smallest number \eqn{R} such that
+  \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair
+  correlation function. For many models the range is infinite, but one
+  may instead use a value where the pair correlation function is
+  sufficiently close to 1. For example in the Matern model this defaults
+  to finding \eqn{R} such that \eqn{g(R)=0.99}.
+}
+\value{Numeric}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+reach(dppMatern(lambda=100, alpha=.01, nu=1, d=2))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/reduced.sample.Rd b/man/reduced.sample.Rd
new file mode 100644
index 0000000..64fbf9c
--- /dev/null
+++ b/man/reduced.sample.Rd
@@ -0,0 +1,94 @@
+\name{reduced.sample}
+\alias{reduced.sample}
+\title{Reduced Sample Estimator using Histogram Data}
+\description{
+  Compute the Reduced Sample estimator of a survival time distribution
+  function, from histogram data
+}
+\usage{
+  reduced.sample(nco, cen, ncc, show=FALSE, uppercen=0)
+}
+\arguments{
+  \item{nco}{vector of counts giving the histogram of
+    uncensored observations (those survival times that are less than or
+    equal to the censoring time)
+  }
+  \item{cen}{vector of counts giving the histogram of
+    censoring times
+  }
+  \item{ncc}{vector of counts giving the histogram of
+    censoring times for the uncensored observations only 
+  }
+  \item{uppercen}{
+    number of censoring times greater than the rightmost
+    histogram breakpoint (if there are any)
+  }
+  \item{show}{Logical value controlling the amount of detail
+    returned by the function value (see below)
+  }
+}
+\value{
+  If \code{show = FALSE}, a numeric vector giving the values of
+  the reduced sample estimator.
+  If \code{show=TRUE}, a list with three components which are
+  vectors of equal length,
+  \item{rs}{Reduced sample estimate of the survival time c.d.f. \eqn{F(t)}
+  }
+  \item{numerator}{numerator of the reduced sample estimator
+  }
+  \item{denominator}{denominator of the reduced sample estimator
+  }
+}
+\details{
+  This function is needed mainly for internal use in \pkg{spatstat},
+  but may be useful in other applications where you want to form the
+  reduced sample estimator from a huge dataset.
+
+  Suppose \eqn{T_i}{T[i]} are the survival times of individuals
+  \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)}
+  which we wish to estimate. Suppose these times are right-censored
+  by random censoring times \eqn{C_i}{C[i]}.
+  Thus the observations consist of right-censored survival times
+  \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])}
+  and non-censoring indicators
+  \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])}
+  for each \eqn{i}.
+
+  If the number of observations \eqn{M} is large, it is efficient to
+  use histograms.
+  Form the histogram \code{cen} of all censoring times \eqn{C_i}{C[i]}.
+  That is, \code{obs[k]} counts the number of values 
+  \eqn{C_i}{C[i]} in the interval
+  \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1}
+  and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}.
+  Also form the histogram \code{nco} of all uncensored times,
+  i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1},
+  and the histogram of all censoring times for which the survival time
+  is uncensored,
+  i.e. those \eqn{C_i}{C[i]} such that \eqn{D_i=1}{D[i]=1}.
+  These three histograms are the arguments passed to \code{kaplan.meier}.
+
+  The return value \code{rs} is the reduced-sample estimator
+  of the distribution function \eqn{F(t)}. Specifically,
+  \code{rs[k]} is the reduced sample estimate of \code{F(breaks[k+1])}.
+  The value is exact, i.e. the use of histograms does not introduce any
+  approximation error.
+
+  Note that, for the results to be valid, either the histogram breaks
+  must span the censoring times, or the number of censoring times
+  that do not fall in a histogram cell must have been counted in
+  \code{uppercen}.
+}
+\seealso{
+  \code{\link{kaplan.meier}},
+  \code{\link{km.rs}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+  }
+\keyword{spatial}
+\keyword{nonparametric}
+
diff --git a/man/redwood.Rd b/man/redwood.Rd
new file mode 100644
index 0000000..3f6f0d4
--- /dev/null
+++ b/man/redwood.Rd
@@ -0,0 +1,70 @@
+\name{redwood}
+\alias{redwood}
+\alias{redwood3}
+\docType{data}
+\title{
+  California Redwoods Point Pattern (Ripley's Subset)
+}
+\description{
+  Locations of 62 seedlings and saplings 
+  of California redwood trees.
+ 
+  The data represent the locations of 62 seedlings and saplings 
+  of California redwood trees in a square sampling region.  
+  They originate from Strauss (1975);
+  the present data are a subset extracted by Ripley (1977) 
+  in a subregion that has been rescaled to a unit square.
+
+  Two versions of this dataset are provided: \code{redwood}
+  and \code{redwood3}.
+
+  The dataset \code{redwood} was obtained from the \pkg{spatial} package. 
+  In this version the coordinates are given to 2 decimal places
+  (multiples of 0.01 units) except for one point which has an \eqn{x} coordinate
+  of 0.999, presumably to ensure that it is properly inside the window.
+
+  The dataset \code{redwood3} was obtained from Peter Diggle's webpage.
+  In this version the coordinates are given to 3 decimal places
+  (multiples of 0.001 units). The ordering of the points is not the same
+  in the two datasets.
+  
+  There are many further analyses of this dataset. It is often used as a 
+  canonical example of a clustered point pattern
+  (see e.g. Diggle, 1983).
+
+  The original, full redwood dataset is supplied in the \code{spatstat}
+  library as \code{redwoodfull}.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  The window has been rescaled to the unit square.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(redwood)}
+\source{
+  Original data of Strauss (1975), subset extracted by Ripley (1977).
+  Data obtained from Ripley's package \pkg{spatial}
+  and from Peter Diggle's website.
+}
+\seealso{
+  \code{\link{redwoodfull}}
+}
+\references{
+  Diggle, P.J. (1983)
+  \emph{Statistical analysis of spatial point patterns}.
+  Academic Press.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{39}, 172--212.
+
+  Strauss, D.J. (1975)
+  A model for clustering.
+  \emph{Biometrika} \bold{62}, 467--475.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/redwoodfull.Rd b/man/redwoodfull.Rd
new file mode 100644
index 0000000..0be8340
--- /dev/null
+++ b/man/redwoodfull.Rd
@@ -0,0 +1,108 @@
+\name{redwoodfull}
+\alias{redwoodfull}
+\alias{redwoodfull.extra}
+\docType{data}
+\title{
+  California Redwoods Point Pattern (Entire Dataset)
+}
+\description{
+  These data represent the locations of 195 seedlings and saplings 
+  of California redwood trees in a square sampling region.  
+  They were described and analysed by Strauss (1975).
+  This is the ``\bold{full}'' dataset; most writers have
+  analysed a subset extracted by Ripley (1977)
+  which is available as \code{\link{redwood}}.
+ 
+  Strauss (1975) divided the sampling region into two subregions I and II
+  demarcated by a diagonal line.  The spatial pattern
+  appears to be slightly regular in region I and strongly clustered in 
+  region II. 
+
+  Strauss (1975) writes: \dQuote{It was felt that the seedlings
+    would be scattered fairly randomly, except that a number of
+    tight clusters would form around some of the redwood tree stumps
+    present in the plot. A discontinuity in the soil, very roughly
+    demarked by the diagonal line in the figure, was expected to cause
+    a difference in clustering behaviour between regions I and II. Moreover,
+    almost all the redwood stumps were situated in region II.}
+
+  The dataset \code{redwoodfull} contains the full point pattern
+  of 195 trees. 
+  The window has been rescaled to the unit square.
+  Its physical size is approximately 130 feet across.
+
+  The auxiliary information about the subregions is contained in 
+  \code{redwoodfull.extra}, which is a list with entries
+  \tabular{ll}{
+    \code{rdiag}\tab The coordinates of the diagonal boundary\cr
+               \tab between regions I and II \cr
+    \code{regionI} \tab Region I as a window object \cr
+    \code{regionII} \tab Region II as a window object \cr
+    \code{regionR} \tab Ripley's subrectangle (approximate) \cr
+    \code{plotit}    \tab Function to plot the full data and auxiliary markings
+  }
+
+  Ripley (1977) extracted a subset of these data, containing 62 points,
+  lying within a square subregion which overlaps regions I and II.
+  He rescaled that subset to the unit square. 
+  This subset has been re-analysed many times,
+  and is the dataset usually known as
+  ``the redwood data'' in the spatial statistics literature.
+  The exact dataset used by Ripley is supplied in the \pkg{spatstat}
+  library as \code{\link{redwood}}.
+
+  The approximate position of the square chosen by Ripley
+  within the \code{redwoodfull} pattern
+  is indicated by the window \code{redwoodfull.extra$regionR}.
+  There are some minor inconsistencies with
+  \code{redwood} since it originates from a different digitisation.
+} 
+\format{
+  The dataset \code{redwoodfull} is an object of class \code{"ppp"}
+  representing the point pattern of tree locations.
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+  The window has been rescaled to the unit square.
+  Its physical size is approximately 128 feet across.
+
+  The dataset \code{redwoodfull.extra} is a list with entries
+  \tabular{ll}{
+    \code{rdiag}\tab coordinates of endpoints of a line,\cr
+               \tab in format \code{list(x=numeric(2),y=numeric(2))} \cr
+    \code{regionI} \tab a window object \cr
+    \code{regionII} \tab a window object \cr
+    \code{regionR} \tab a window object \cr
+    \code{plotit}    \tab Function with no arguments
+  }
+}
+\usage{data(redwoodfull)}
+\examples{
+       data(redwoodfull)
+       plot(redwoodfull)
+       redwoodfull.extra$plotit()
+       # extract the pattern in region II 
+       redwoodII <- redwoodfull[, redwoodfull.extra$regionII]
+}
+\source{Strauss (1975). The plot of the data published by Strauss (1975)
+  was scanned and digitised by Sandra Pereira, University of
+  Western Australia, 2002.
+}
+\seealso{
+  \code{\link{redwood}}
+}
+\references{
+  Diggle, P.J. (1983)
+  \emph{Statistical analysis of spatial point patterns}.
+  Academic Press.
+
+  Ripley, B.D. (1977)
+  Modelling spatial patterns (with discussion).
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{39}, 172--212.
+
+  Strauss, D.J. (1975)
+  A model for clustering.
+  \emph{Biometrika} \bold{62}, 467--475.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/reflect.Rd b/man/reflect.Rd
new file mode 100644
index 0000000..0954eb9
--- /dev/null
+++ b/man/reflect.Rd
@@ -0,0 +1,52 @@
+\name{reflect} 
+\alias{reflect}
+\alias{reflect.im}
+\alias{reflect.default}
+\title{Reflect In Origin}
+\description{
+  Reflects a geometrical object through the origin.
+}
+\usage{
+  reflect(X)
+
+  \method{reflect}{im}(X)
+
+  \method{reflect}{default}(X)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    or a window (object of class \code{"owin"}).}
+}
+\value{
+  Another object of the same type, representing the
+  result of reflection.
+}
+\details{
+  The object \code{X} is reflected through the origin.
+  That is, each point in \code{X} with coordinates
+  \eqn{(x,y)} is mapped to the position \eqn{(-x, -y)}.
+
+  This is equivalent to applying the affine transformation with matrix
+  \code{diag(c(-1,-1))}. It is also equivalent to rotation about the origin
+  by 180 degrees.
+
+  The command \code{reflect} is generic, with a method for
+  pixel images and a default method.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{flipxy}}
+}
+\examples{
+  plot(reflect(as.im(letterR)))
+  plot(reflect(letterR), add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/regularpolygon.Rd b/man/regularpolygon.Rd
new file mode 100644
index 0000000..961f87d
--- /dev/null
+++ b/man/regularpolygon.Rd
@@ -0,0 +1,68 @@
+\name{regularpolygon}
+\alias{regularpolygon}
+\alias{hexagon}
+\title{
+  Create A Regular Polygon
+}
+\description{
+  Create a window object representing a regular (equal-sided) polygon.
+}
+\usage{
+regularpolygon(n, edge = 1, centre = c(0, 0), \dots,
+               align = c("bottom", "top", "left", "right", "no"))
+
+hexagon(edge = 1, centre = c(0,0), \dots,
+        align = c("bottom", "top", "left", "right", "no"))
+}
+\arguments{
+  \item{n}{
+    Number of edges in the polygon.
+  }
+  \item{edge}{
+    Length of each edge in the polygon. A single positive number.
+  }
+  \item{centre}{
+    Coordinates of the centre of the polygon.
+    A numeric vector of length 2,
+    or a \code{list(x,y)} giving the coordinates of exactly one point, or a
+    point pattern (object of class \code{"ppp"}) containing exactly one point.
+  }
+  \item{align}{
+    Character string specifying whether to align one of the edges with
+    a vertical or horizontal boundary.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  The function \code{regularpolygon} creates a regular (equal-sided)
+  polygon with \code{n} sides, centred at \code{centre},
+  with sides of equal length \code{edge}.
+  The function \code{hexagon} is the special case \code{n=6}.
+
+  The orientation of the polygon is determined by the argument \code{align}.
+  If \code{align="no"}, one vertex of the polygon is placed on the
+  \eqn{x}-axis.
+  Otherwise, an edge of the polygon is aligned with one side of
+  the frame, specified by the value of \code{align}.
+}
+\value{
+  A window (object of class \code{"owin"}).
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{disc}}, \code{\link{ellipse}},
+  \code{\link{owin}}.
+
+  \code{\link{hextess}} for hexagonal tessellations.
+}
+\examples{
+  plot(hexagon())
+  plot(regularpolygon(7))
+  plot(regularpolygon(7, align="left"))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/relevel.im.Rd b/man/relevel.im.Rd
new file mode 100644
index 0000000..e66b08c
--- /dev/null
+++ b/man/relevel.im.Rd
@@ -0,0 +1,64 @@
+\name{relevel.im}
+\alias{relevel.im}
+\alias{relevel.ppp}
+\alias{relevel.ppx}
+\title{
+  Reorder Levels of a Factor-Valued Image or Pattern
+}
+\description{
+  For a pixel image with factor values, or a point pattern with
+  factor-valued marks, the levels of the factor are re-ordered so that
+  the level \code{ref} is first and the others are moved down.
+}
+\usage{
+  \method{relevel}{im}(x, ref, \dots)
+
+  \method{relevel}{ppp}(x, ref, \dots)
+
+  \method{relevel}{ppx}(x, ref, \dots)
+}
+\arguments{
+  \item{x}{
+    A pixel image (object of class \code{"im"}) with factor values,
+    or a point pattern (object of class \code{"ppp"}, \code{"ppx"},
+    \code{"lpp"} or \code{"pp3"}) with
+    factor-valued marks.
+  }
+  \item{ref}{
+    The reference level.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  These functions are methods for the generic
+  \code{\link[stats]{relevel}}.
+
+  If \code{x} is a pixel image (object of class \code{"im"}) with
+  factor values, or a point pattern (object of class \code{"ppp"},
+  \code{"ppx"}, \code{"lpp"} or \code{"pp3"})
+  with factor-valued marks, the levels of the factor are changed so
+  that the level specified by \code{ref} comes first. 
+}
+\value{
+  Object of the same kind as \code{x}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{mergeLevels}}
+}
+\examples{
+  amacrine
+  relevel(amacrine, "on")
+}
+\keyword{manip}
+\keyword{spatial}
diff --git a/man/reload.or.compute.Rd b/man/reload.or.compute.Rd
new file mode 100644
index 0000000..a8f4eaf
--- /dev/null
+++ b/man/reload.or.compute.Rd
@@ -0,0 +1,72 @@
+\name{reload.or.compute}
+\alias{reload.or.compute}
+\title{
+  Compute Unless Previously Saved
+}
+\description{
+  If the designated file does not yet exist,
+  evaluate the expression and save the results in the file.
+  If the file already exists, re-load the results from the file.
+}
+\usage{
+reload.or.compute(filename, expr, objects = NULL, destination = parent.frame())
+}
+\arguments{
+  \item{filename}{
+    Name of data file. A character string.
+  }
+  \item{expr}{
+    \R language expression to be evaluated.
+  }
+  \item{objects}{
+    Optional character vector of names of objects to be saved
+    in \code{filename} after evaluating \code{expr},
+    or names of objects that should be present in \code{filename}
+    when loaded.
+  }
+  \item{destination}{
+    Environment in which the resulting objects should be assigned.
+  }
+}
+\details{
+  This facility is useful for saving, and later re-loading, the results of
+  time-consuming computations. It would typically be
+  used in an \R script file or an \code{\link[utils]{Sweave}} document.
+
+  If the file called \code{filename} does not yet exist,
+  then \code{expr} will be evaluated
+  and the results will be saved in \code{filename}.
+  The optional argument \code{objects} specifies which results should be saved
+  to the file: the default is to save all objects that were created
+  by evaluating the expression. 
+
+  If the file called \code{filename} already exists, then it
+  will be loaded. The optional argument \code{objects} specifies the names
+  of objects that should be present in the file; a warning is issued
+  if any of them are missing.
+
+  The resulting objects can be assigned into any desired \code{destination}.
+  The default behaviour is equivalent to evaluating \code{expr}
+  in the current environment.
+}
+\value{
+  Character vector (invisible) giving the names of the objects computed
+  or loaded.
+}
+\examples{
+   \dontrun{
+    if(FALSE) {
+     reload.or.compute("mydata.rda", {
+        x <- very.long.computation()
+        y <- 42
+      })
+   }
+   }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{utilities}
diff --git a/man/relrisk.Rd b/man/relrisk.Rd
new file mode 100644
index 0000000..6406e92
--- /dev/null
+++ b/man/relrisk.Rd
@@ -0,0 +1,53 @@
+\name{relrisk}
+\alias{relrisk}
+\title{
+  Estimate of Spatially-Varying Relative Risk
+}
+\description{
+  Generic command to estimate the 
+  spatially-varying probability of each type of point, or the ratios of
+  such probabilities.
+}
+\usage{
+  relrisk(X, \dots) 
+}
+\arguments{
+  \item{X}{
+    Either a point pattern (class \code{"ppp"})
+    or a fitted point process model (class \code{"ppm"})
+    from which the probabilities will be estimated.
+  }
+  \item{\dots}{
+    Additional arguments appropriate to the method.
+  }
+}
+\details{
+  In a point pattern containing several different types of points,
+  we may be interested in the spatially-varying probability of each
+  possible type, or the relative risks which are the
+  ratios of such probabilities.
+  
+  The command \code{\link{relrisk}} is generic and can be used to
+  estimate relative risk in different ways.
+  
+  The function \code{\link{relrisk.ppp}} is the method for point pattern
+  datasets. It computes \emph{nonparametric} estimates of relative risk
+  by kernel smoothing.
+
+  The function \code{\link{relrisk.ppm}} is the method for fitted point
+  process models (class \code{"ppm"}). It computes \emph{parametric}
+  estimates of relative risk, using the fitted model.
+}
+\seealso{
+  \code{\link{relrisk.ppp}},
+  \code{\link{relrisk.ppm}}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
diff --git a/man/relrisk.ppm.Rd b/man/relrisk.ppm.Rd
new file mode 100644
index 0000000..3a102d2
--- /dev/null
+++ b/man/relrisk.ppm.Rd
@@ -0,0 +1,206 @@
+\name{relrisk.ppm}
+\alias{relrisk.ppm}
+\title{
+  Parametric Estimate of Spatially-Varying Relative Risk
+}
+\description{
+  Given a point process model fitted to a multitype point pattern,
+  this function computes the fitted spatially-varying probability
+  of each type of point, or the ratios of such probabilities,
+  according to the fitted model. Optionally the standard errors of the estimates
+  are also computed.
+}
+\usage{
+\method{relrisk}{ppm}(X, \dots,
+                     at = c("pixels", "points"),
+                     relative = FALSE, se = FALSE,
+                     casecontrol = TRUE, control = 1, case,
+                     ngrid = NULL, window = NULL)
+}
+\arguments{
+  \item{X}{
+    A fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{at}{
+    String specifying whether to compute the probability values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{X} (\code{at="points"}).
+  }
+  \item{relative}{
+    Logical.
+    If \code{FALSE} (the default) the algorithm
+    computes the probabilities of each type of point.
+    If \code{TRUE}, it computes the    
+    \emph{relative risk}, the ratio of probabilities
+    of each type relative to the probability of a control.
+  }
+  \item{se}{
+    Logical value indicating whether to compute standard errors
+    as well.
+  }
+  \item{casecontrol}{
+    Logical. Whether to treat a bivariate point pattern
+    as consisting of cases and controls, and return only the
+    probability or relative risk of a case.
+    Ignored if there are more than 2 types of points.
+    See Details.
+  }
+  \item{control}{
+    Integer, or character string, identifying which mark value
+    corresponds to a control. 
+  }
+  \item{case}{
+    Integer, or character string, identifying which mark value
+    corresponds to a case (rather than a control)
+    in a bivariate point pattern.
+    This is an alternative to the argument \code{control}
+    in a bivariate point pattern. 
+    Ignored if there are more than 2 types of points.
+  }
+  \item{ngrid}{
+    Optional. Dimensions of a rectangular grid of locations
+    inside \code{window} where the predictions should be computed.
+    An integer, or an integer vector of length 2,
+    specifying the number of grid points in the \eqn{y} and \eqn{x}
+    directions.
+    (Applies only when \code{at="pixels"}.)
+  }
+  \item{window}{
+    Optional. A window (object of class \code{"owin"})
+    \emph{delimiting} the locations where predictions
+    should be computed. Defaults to the window of the
+    original data used to fit the model \code{object}.
+    (Applies only when \code{at="pixels"}.)
+  }
+}
+\details{
+  The command \code{\link{relrisk}} is generic and can be used to
+  estimate relative risk in different ways.
+  
+  This function \code{relrisk.ppm} is the method for fitted point
+  process models (class \code{"ppm"}). It computes \emph{parametric}
+  estimates of relative risk, using the fitted model.
+
+  If \code{X}  is a bivariate point pattern
+  (a multitype point pattern consisting of two types of points)
+  then by default,
+  the points of the first type (the first level of \code{marks(X)})
+  are treated as controls or non-events, and points of the second type
+  are treated as cases or events. Then by default this command computes
+  the spatially-varying \emph{probability} of a case,
+  i.e. the probability \eqn{p(u)}
+  that a point at spatial location \eqn{u}
+  will be a case. If \code{relative=TRUE}, it computes the
+  spatially-varying \emph{relative risk} of a case relative to a
+  control, \eqn{r(u) = p(u)/(1- p(u))}.
+
+  If \code{X} is a multitype point pattern with \eqn{m > 2} types,
+  or if \code{X} is a bivariate point pattern
+  and \code{casecontrol=FALSE},
+  then by default this command computes, for each type \eqn{j},
+  a nonparametric estimate of
+  the spatially-varying \emph{probability} of an event of type \eqn{j}.
+  This is the probability \eqn{p_j(u)}{p[j](u)}
+  that a point at spatial location \eqn{u}
+  will belong to type \eqn{j}.
+  If \code{relative=TRUE}, the command computes the
+  \emph{relative risk} of an event of type \eqn{j}
+  relative to a control,
+  \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)},
+  where events of type \eqn{k} are treated as controls.
+  The argument \code{control} determines which type \eqn{k}
+  is treated as a control.
+
+  If \code{at = "pixels"} the calculation is performed for
+  every spatial location \eqn{u} on a fine pixel grid, and the result
+  is a pixel image representing the function \eqn{p(u)}
+  or a list of pixel images representing the functions 
+  \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)}
+  for \eqn{j = 1,\ldots,m}{j = 1,...,m}.
+  An infinite value of relative risk (arising because the
+  probability of a control is zero) will be returned as \code{NA}.
+
+  If \code{at = "points"} the calculation is performed
+  only at the data points \eqn{x_i}{x[i]}. By default
+  the result is a vector of values
+  \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case
+  at each data point, or a matrix of values 
+  \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of
+  each possible type \eqn{j} at each data point.
+  If \code{relative=TRUE} then the relative risks
+  \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are
+  returned.
+  An infinite value of relative risk (arising because the
+  probability of a control is zero) will be returned as \code{Inf}.
+
+  Probabilities and risks are computed from the fitted intensity of the model,
+  using \code{\link{predict.ppm}}.
+  If \code{se=TRUE} then standard errors will also be computed,
+  based on asymptotic theory, using \code{\link{vcov.ppm}}.
+}
+\value{
+  If \code{se=FALSE} (the default), the format is described below.
+  If \code{se=TRUE}, the result is a list of two entries,
+  \code{estimate} and \code{SE}, each having the format described below.
+  
+  If \code{X} consists of only two types of points,
+  and if \code{casecontrol=TRUE},
+  the result is a pixel image (if \code{at="pixels"})
+  or a vector (if \code{at="points"}).
+  The pixel values or vector values
+  are the probabilities of a case if \code{relative=FALSE},
+  or the relative risk of a case (probability of a case divided by the
+  probability of a control) if \code{relative=TRUE}.
+
+  If \code{X} consists of more than two types of points,
+  or if \code{casecontrol=FALSE}, the result is:
+  \itemize{
+    \item (if \code{at="pixels"})
+    a list of pixel images, with one image for each possible type of point.
+    The result also belongs to the class \code{"solist"} so that it can
+    be printed and plotted.
+    \item
+    (if \code{at="points"})
+    a matrix of probabilities, with rows corresponding to
+    data points \eqn{x_i}{x[i]}, and columns corresponding
+    to types \eqn{j}.
+  }
+  The pixel values or matrix entries
+  are the probabilities of each type of point if \code{relative=FALSE},
+  or the relative risk of each type (probability of each type divided by the
+  probability of a control) if \code{relative=TRUE}.
+
+  If \code{relative=FALSE}, the resulting values always lie between 0
+  and 1. If \code{relative=TRUE}, the results are either non-negative
+  numbers, or the values \code{Inf} or \code{NA}. 
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  There is another method \code{\link{relrisk.ppp}} for point pattern datasets
+  which computes \emph{nonparametric} estimates of relative risk
+  by kernel smoothing.
+
+  See also
+  \code{\link{relrisk}}, 
+  \code{\link{relrisk.ppp}},
+  \code{\link{ppm}}
+}
+\examples{
+  fit <- ppm(chorley ~ marks * (x+y))
+  rr <- relrisk(fit, relative=TRUE, control="lung", se=TRUE)
+  plot(rr$estimate)
+  plot(rr$SE)
+  rrX <- relrisk(fit, at="points", relative=TRUE, control="lung")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/relrisk.ppp.Rd b/man/relrisk.ppp.Rd
new file mode 100644
index 0000000..4615382
--- /dev/null
+++ b/man/relrisk.ppp.Rd
@@ -0,0 +1,238 @@
+\name{relrisk.ppp}
+\alias{relrisk.ppp}
+\title{
+  Nonparametric Estimate of Spatially-Varying Relative Risk
+}
+\description{
+  Given a multitype point pattern, this function estimates the
+  spatially-varying probability of each type of point, or the ratios of
+  such probabilities, using kernel smoothing.
+  The default smoothing bandwidth is selected by
+  cross-validation.
+}
+\usage{
+\method{relrisk}{ppp}(X, sigma = NULL, ..., varcov = NULL, at = "pixels",
+           relative=FALSE,
+           se=FALSE,
+           casecontrol=TRUE, control=1, case)
+}
+\arguments{
+  \item{X}{
+    A multitype point pattern (object of class \code{"ppp"}
+    which has factor valued marks).
+  }
+  \item{sigma}{
+    Optional. The numeric value of the smoothing bandwidth
+    (the standard deviation of isotropic
+    Gaussian smoothing kernel).
+    Alternatively \code{sigma} may be a function which can be used
+    to select a different bandwidth for each type of point. See Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{bw.relrisk}} to select the
+    bandwidth, or passed to \code{\link{density.ppp}} to control the
+    pixel resolution.
+  }
+  \item{varcov}{
+    Optional. Variance-covariance matrix of anisotopic Gaussian
+    smoothing kernel. Incompatible with \code{sigma}.
+  }
+  \item{at}{
+    String specifying whether to compute the probability values
+    at a grid of pixel locations (\code{at="pixels"}) or
+    only at the points of \code{X} (\code{at="points"}).
+  }
+  \item{relative}{
+    Logical.
+    If \code{FALSE} (the default) the algorithm
+    computes the probabilities of each type of point.
+    If \code{TRUE}, it computes the    
+    \emph{relative risk}, the ratio of probabilities
+    of each type relative to the probability of a control.
+  }
+  \item{se}{
+    Logical value indicating whether to compute standard errors
+    as well.
+  }
+  \item{casecontrol}{
+    Logical. Whether to treat a bivariate point pattern
+    as consisting of cases and controls, and return only the
+    probability or relative risk of a case.
+    Ignored if there are more than 2 types of points.
+    See Details.
+  }
+  \item{control}{
+    Integer, or character string, identifying which mark value
+    corresponds to a control. 
+  }
+  \item{case}{
+    Integer, or character string, identifying which mark value
+    corresponds to a case (rather than a control)
+    in a bivariate point pattern.
+    This is an alternative to the argument \code{control}
+    in a bivariate point pattern. 
+    Ignored if there are more than 2 types of points.
+  }
+}
+\details{
+  The command \code{\link{relrisk}} is generic and can be used to
+  estimate relative risk in different ways.
+  
+  This function \code{relrisk.ppp} is the method for point pattern
+  datasets. It computes \emph{nonparametric} estimates of relative risk
+  by kernel smoothing.
+
+  If \code{X}  is a bivariate point pattern
+  (a multitype point pattern consisting of two types of points)
+  then by default,
+  the points of the first type (the first level of \code{marks(X)})
+  are treated as controls or non-events, and points of the second type
+  are treated as cases or events. Then by default this command computes
+  the spatially-varying \emph{probability} of a case,
+  i.e. the probability \eqn{p(u)}
+  that a point at spatial location \eqn{u}
+  will be a case. If \code{relative=TRUE}, it computes the
+  spatially-varying \emph{relative risk} of a case relative to a
+  control, \eqn{r(u) = p(u)/(1- p(u))}.
+
+  If \code{X} is a multitype point pattern with \eqn{m > 2} types,
+  or if \code{X} is a bivariate point pattern
+  and \code{casecontrol=FALSE},
+  then by default this command computes, for each type \eqn{j},
+  a nonparametric estimate of
+  the spatially-varying \emph{probability} of an event of type \eqn{j}.
+  This is the probability \eqn{p_j(u)}{p[j](u)}
+  that a point at spatial location \eqn{u}
+  will belong to type \eqn{j}.
+  If \code{relative=TRUE}, the command computes the
+  \emph{relative risk} of an event of type \eqn{j}
+  relative to a control,
+  \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)},
+  where events of type \eqn{k} are treated as controls.
+  The argument \code{control} determines which type \eqn{k}
+  is treated as a control.
+
+  If \code{at = "pixels"} the calculation is performed for
+  every spatial location \eqn{u} on a fine pixel grid, and the result
+  is a pixel image representing the function \eqn{p(u)}
+  or a list of pixel images representing the functions 
+  \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)}
+  for \eqn{j = 1,\ldots,m}{j = 1,...,m}.
+  An infinite value of relative risk (arising because the
+  probability of a control is zero) will be returned as \code{NA}.
+
+  If \code{at = "points"} the calculation is performed
+  only at the data points \eqn{x_i}{x[i]}. By default
+  the result is a vector of values
+  \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case
+  at each data point, or a matrix of values 
+  \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of
+  each possible type \eqn{j} at each data point.
+  If \code{relative=TRUE} then the relative risks
+  \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are
+  returned.
+  An infinite value of relative risk (arising because the
+  probability of a control is zero) will be returned as \code{Inf}.
+
+  Estimation is performed by a simple Nadaraja-Watson type kernel
+  smoother (Diggle, 2003).
+  The smoothing bandwidth can be specified in any of the following ways:
+  \itemize{
+    \item \code{sigma} is a single numeric value, giving the standard
+    deviation of the isotropic Gaussian kernel.
+    \item \code{sigma} is a numeric vector of length 2, giving the
+    standard deviations in the \eqn{x} and \eqn{y} directions of
+    a Gaussian kernel.
+    \item \code{varcov} is a 2 by 2 matrix giving the
+    variance-covariance matrix of the Gaussian kernel.
+    \item \code{sigma} is a \code{function} which selects
+    the bandwidth. 
+    Bandwidth selection will be applied
+    \bold{separately to each type of point}.
+    An example of such a function is \code{\link{bw.diggle}}.
+    \item 
+    \code{sigma} and \code{varcov}
+    are both missing or null. Then a \bold{common}
+    smoothing bandwidth \code{sigma}
+    will be selected by cross-validation using \code{\link{bw.relrisk}}.
+  }
+
+  If \code{se=TRUE} then standard errors will also be computed,
+  based on asymptotic theory, \emph{assuming a Poisson process}.
+}
+\value{
+  If \code{se=FALSE} (the default), the format is described below.
+  If \code{se=TRUE}, the result is a list of two entries,
+  \code{estimate} and \code{SE}, each having the format described below.
+  
+  If \code{X} consists of only two types of points,
+  and if \code{casecontrol=TRUE},
+  the result is a pixel image (if \code{at="pixels"})
+  or a vector (if \code{at="points"}).
+  The pixel values or vector values
+  are the probabilities of a case if \code{relative=FALSE},
+  or the relative risk of a case (probability of a case divided by the
+  probability of a control) if \code{relative=TRUE}.
+
+  If \code{X} consists of more than two types of points,
+  or if \code{casecontrol=FALSE}, the result is:
+  \itemize{
+    \item (if \code{at="pixels"})
+    a list of pixel images, with one image for each possible type of point.
+    The result also belongs to the class \code{"solist"} so that it can
+    be printed and plotted.
+    \item
+    (if \code{at="points"})
+    a matrix of probabilities, with rows corresponding to
+    data points \eqn{x_i}{x[i]}, and columns corresponding
+    to types \eqn{j}.
+  }
+  The pixel values or matrix entries
+  are the probabilities of each type of point if \code{relative=FALSE},
+  or the relative risk of each type (probability of each type divided by the
+  probability of a control) if \code{relative=TRUE}.
+
+  If \code{relative=FALSE}, the resulting values always lie between 0
+  and 1. If \code{relative=TRUE}, the results are either non-negative
+  numbers, or the values \code{Inf} or \code{NA}. 
+}
+\seealso{
+  There is another method \code{\link{relrisk.ppm}} for point process
+  models which computes \emph{parametric}
+  estimates of relative risk, using the fitted model.
+
+  See also 
+ \code{\link{bw.relrisk}},
+ \code{\link{density.ppp}},
+ \code{\link{Smooth.ppp}},
+ \code{\link{eval.im}}
+}
+\examples{
+   p.oak <- relrisk(urkiola, 20)
+   if(interactive()) {
+      plot(p.oak, main="proportion of oak")
+      plot(eval.im(p.oak > 0.3), main="More than 30 percent oak")
+      plot(split(lansing), main="Lansing Woods")
+      p.lan <- relrisk(lansing, 0.05, se=TRUE)
+      plot(p.lan$estimate, main="Lansing Woods species probability")
+      plot(p.lan$SE, main="Lansing Woods standard error")
+      wh <- im.apply(p.lan$estimate, which.max)
+      types <- levels(marks(lansing))
+      wh <- eval.im(types[wh])
+      plot(wh, main="Most common species")
+   }
+}
+\references{
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{smooth}
diff --git a/man/requireversion.Rd b/man/requireversion.Rd
new file mode 100644
index 0000000..23f5106
--- /dev/null
+++ b/man/requireversion.Rd
@@ -0,0 +1,44 @@
+\name{requireversion}
+\alias{requireversion}
+\title{
+  Require a Specific Version of a Package
+}
+\description{
+  Checks that the version number of a specified package is greater than or
+  equal to the specified version number.
+  For use in stand-alone \R scripts.
+}
+\usage{
+requireversion(pkg, ver)
+}
+\arguments{
+  \item{pkg}{
+    Package name.
+  }
+  \item{ver}{
+    Character string containing version number.
+  }
+}
+\details{
+  This function checks whether the installed version of the
+  package \code{pkg} is greater than or equal to \code{ver}.
+
+  It is useful in stand-alone \R scripts, which often require
+  a particular version of a package in order to work correctly.
+
+  \bold{This function should not be used inside a package}:
+  for that purpose, the dependence on packages and versions
+  should be specified in the package description file.
+}
+\value{
+  Null.
+}
+\author{
+  \adrian
+}
+\examples{
+   \dontrun{
+      requireversion(spatstat, "1.42-0")
+   }
+}
+\keyword{environment}
diff --git a/man/rescale.Rd b/man/rescale.Rd
new file mode 100644
index 0000000..c2b9352
--- /dev/null
+++ b/man/rescale.Rd
@@ -0,0 +1,80 @@
+\name{rescale}
+\alias{rescale}
+\title{Convert dataset to another unit of length}
+\description{
+  Converts between different units of length
+  in a spatial dataset, such as a point pattern or a window. 
+}
+\usage{
+  rescale(X, s, unitname)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    or a window (object of class \code{"owin"}).}
+  \item{s}{Conversion factor: the new units are \code{s} times the old
+    units.}
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    See \code{\link{unitname}}.
+  }
+}
+\value{
+  Another object of the same type, representing the same
+  data, but expressed in the new units.
+}
+\details{
+  This is generic. Methods are provided for many spatial objects.
+
+  The spatial coordinates in the dataset \code{X} will be re-expressed
+  in terms of a new unit of length that is \code{s} times the current
+  unit of length given in \code{X}. The name of the unit of length
+  will also be adjusted. The result is an object of the same type,
+  representing the same data, but expressed in the new units.
+
+  For example if \code{X} is a dataset giving coordinates in metres,
+  then \code{rescale(X,1000)} will take the new unit of length
+  to be 1000 metres.
+  To do this, it will divide the old coordinate values by 1000
+  to obtain coordinates expressed in kilometres, and change the
+  name of the unit of length from \code{"metres"} to \code{"1000 metres"}.
+
+  If \code{unitname} is given, it will be taken as the new name of the unit
+  of length. It should be a valid name for the
+  unit of length, as described in the help for \code{\link{unitname}}.
+  For example if \code{X} is a dataset giving coordinates in metres,
+  \code{rescale(X, 1000, "km")} will divide the coordinate
+  values by 1000 to obtain coordinates in
+  kilometres, and the unit name will be changed to \code{"km"}.
+}
+\section{Note}{
+  The result of this operation is equivalent to the original dataset.
+  If you want to actually change the coordinates by
+  a linear transformation, producing a dataset that is not equivalent
+  to the original one, use \code{\link{affine}}.
+}
+\seealso{
+Available methods:
+\code{\link{rescale.im}},
+\code{\link{rescale.layered}},
+\code{\link{rescale.linnet}},
+\code{\link{rescale.lpp}},
+\code{\link{rescale.owin}},
+\code{\link{rescale.ppp}},
+\code{\link{rescale.psp}}
+and \code{\link{rescale.units}}.
+
+Other generics: 
+\code{\link{unitname}},
+  \code{\link{affine}},
+  \code{\link{rotate}},
+  \code{\link{shift}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rescale.im.Rd b/man/rescale.im.Rd
new file mode 100644
index 0000000..b722ffd
--- /dev/null
+++ b/man/rescale.im.Rd
@@ -0,0 +1,69 @@
+\name{rescale.im}
+\alias{rescale.im}
+\title{Convert Pixel Image to Another Unit of Length}
+\description{
+  Converts a pixel image to 
+  another unit of length.
+}
+\usage{
+ \method{rescale}{im}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Pixel image (object of class \code{"im"}).}
+  \item{s}{Conversion factor: the new units are \code{s} times the old units.}
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    See \code{\link{unitname}}.
+  }
+}
+\value{
+  Another pixel image (of class \code{"im"}),
+  containing the same pixel values, but with pixel coordinates
+  expressed in the new units.
+}
+\details{
+  This is a method for the generic function \code{\link{rescale}}.
+
+  The spatial coordinates of the pixels in \code{X}
+  will be re-expressed
+  in terms of a new unit of length that is \code{s} times the current
+  unit of length given in \code{X}.
+  (Thus, the coordinate values are \emph{divided} by \code{s},
+  while the unit value is multiplied by \code{s}).
+
+  If \code{s} is missing, then the coordinates will be re-expressed
+  in \sQuote{native} units; for example if the current unit is
+  equal to 0.1 metres, then the coordinates will be re-expressed in metres.
+
+  The result is a pixel image representing the \emph{same} data
+  but re-expressed in a different unit.
+
+  Pixel values are unchanged. This may not be what you intended!
+}
+\seealso{
+  \code{\link{im}},
+  \code{\link{rescale}},
+  \code{\link{unitname}},
+  \code{\link{eval.im}}
+}
+\examples{
+# Bramble Canes data: 1 unit = 9 metres
+  data(bramblecanes)
+# distance transform
+  Z <- distmap(bramblecanes)
+# convert to metres
+# first alter the pixel values
+  Zm <- eval.im(9 * Z)
+# now rescale the pixel coordinates
+  Z <- rescale(Zm, 1/9)
+# or equivalently
+  Z <- rescale(Zm)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rescale.owin.Rd b/man/rescale.owin.Rd
new file mode 100644
index 0000000..4d1f306
--- /dev/null
+++ b/man/rescale.owin.Rd
@@ -0,0 +1,70 @@
+\name{rescale.owin}
+\alias{rescale.owin}
+\title{Convert Window to Another Unit of Length}
+\description{
+  Converts a window to another unit of length.
+}
+\usage{
+  \method{rescale}{owin}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Window (object of class \code{"owin"}).}
+  \item{s}{Conversion factor: the new units are \code{s} times the old units.}
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    See \code{\link{unitname}}.
+  }
+}
+\value{
+  Another window object (of class \code{"owin"}) representing the same
+  window, but expressed in the new units.
+}
+\details{
+  This is a method for the generic function \code{\link{rescale}}.
+
+  The spatial coordinates in the window \code{X}
+  (and its window) will be re-expressed
+  in terms of a new unit of length that is \code{s} times the current
+  unit of length given in \code{X}.
+  (Thus, the coordinate values are \emph{divided} by \code{s},
+  while the unit value is multiplied by \code{s}).
+
+  The result is a window representing the \emph{same} region of space,
+  but re-expressed in a different unit.
+
+  If \code{s} is missing, then the coordinates will be re-expressed
+  in \sQuote{native} units; for example if the current unit is
+  equal to 0.1 metres, then the coordinates will be re-expressed in metres.
+}
+\section{Note}{
+  The result of this operation is equivalent to the original window.
+  If you want to actually change the coordinates by
+  a linear transformation, producing a window that is larger or smaller
+  than the original one, use \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{unitname}},
+  \code{\link{rescale}},
+  \code{\link{rescale.owin}},
+  \code{\link{affine}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+  data(swedishpines)
+  W <- Window(swedishpines)
+  W
+# coordinates are in decimetres (0.1 metre)
+# convert to metres:
+  rescale(W, 10)
+# or equivalently
+  rescale(W)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rescale.ppp.Rd b/man/rescale.ppp.Rd
new file mode 100644
index 0000000..b61222f
--- /dev/null
+++ b/man/rescale.ppp.Rd
@@ -0,0 +1,71 @@
+\name{rescale.ppp}
+\alias{rescale.ppp}
+\title{Convert Point Pattern to Another Unit of Length}
+\description{
+  Converts a point pattern dataset to 
+  another unit of length.
+}
+\usage{
+ \method{rescale}{ppp}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Point pattern (object of class \code{"ppp"}).}
+  \item{s}{Conversion factor: the new units are \code{s} times the old units.}
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    See \code{\link{unitname}}.
+  }
+}
+\value{
+  Another point pattern (of class \code{"ppp"}),
+  representing the same data, but expressed in the new units.
+}
+\details{
+  This is a method for the generic function \code{\link{rescale}}.
+
+  The spatial coordinates in the point pattern \code{X}
+  (and its window) will be re-expressed
+  in terms of a new unit of length that is \code{s} times the current
+  unit of length given in \code{X}.
+  (Thus, the coordinate values are \emph{divided} by \code{s},
+  while the unit value is multiplied by \code{s}).
+
+  The result is a point pattern representing the \emph{same} data
+  but re-expressed in a different unit.
+
+  Mark values are unchanged.
+
+  If \code{s} is missing, then the coordinates will be re-expressed
+  in \sQuote{native} units; for example if the current unit is
+  equal to 0.1 metres, then the coordinates will be re-expressed in metres.
+}
+\section{Note}{
+  The result of this operation is equivalent to the original point pattern.
+  If you want to actually change the coordinates by
+  a linear transformation, producing a point pattern that is not
+  equivalent to the original one, use \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{unitname}},
+  \code{\link{rescale}},
+  \code{\link{rescale.owin}},
+  \code{\link{affine}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+# Bramble Canes data: 1 unit = 9 metres
+  data(bramblecanes)
+# convert to metres
+  bram <- rescale(bramblecanes, 1/9)
+# or equivalently
+  bram <- rescale(bramblecanes)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rescale.psp.Rd b/man/rescale.psp.Rd
new file mode 100644
index 0000000..7b23366
--- /dev/null
+++ b/man/rescale.psp.Rd
@@ -0,0 +1,72 @@
+\name{rescale.psp}
+\alias{rescale.psp}
+\title{Convert Line Segment Pattern to Another Unit of Length}
+\description{
+  Converts a line segment pattern dataset to 
+  another unit of length.
+}
+\usage{
+ \method{rescale}{psp}(X, s, unitname)
+}
+\arguments{
+  \item{X}{Line segment pattern (object of class \code{"psp"}).}
+  \item{s}{Conversion factor: the new units are \code{s} times the old units.}
+  \item{unitname}{
+    Optional. New name for the unit of length.
+    See \code{\link{unitname}}.
+  }
+}
+\value{
+  Another line segment pattern (of class \code{"psp"}),
+  representing the same data, but expressed in the new units.
+}
+\details{
+  This is a method for the generic function \code{\link{rescale}}.
+
+  The spatial coordinates in the line segment pattern \code{X}
+  (and its window) will be re-expressed
+  in terms of a new unit of length that is \code{s} times the current
+  unit of length given in \code{X}.
+  (Thus, the coordinate values are \emph{divided} by \code{s},
+  while the unit value is multiplied by \code{s}).
+
+  The result is a line segment pattern representing the \emph{same} data
+  but re-expressed in a different unit.
+
+  Mark values are unchanged.
+
+  If \code{s} is missing, then the coordinates will be re-expressed
+  in \sQuote{native} units; for example if the current unit is
+  equal to 0.1 metres, then the coordinates will be re-expressed in metres.
+}
+\section{Note}{
+  The result of this operation is equivalent to the original segment pattern.
+  If you want to actually change the coordinates by
+  a linear transformation, producing a segment pattern that is not
+  equivalent to the original one, use \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{units}},
+  \code{\link{affine}},
+  \code{\link{rotate}},
+  \code{\link{shift}}
+}
+\examples{
+   data(copper)
+   X <- copper$Lines
+   X
+   # data are in km
+   # convert to metres
+   rescale(X, 1/1000)
+
+   # convert data and rename unit
+   rescale(X, 1/1000, c("metre", "metres"))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rescue.rectangle.Rd b/man/rescue.rectangle.Rd
new file mode 100644
index 0000000..4cc7e6d
--- /dev/null
+++ b/man/rescue.rectangle.Rd
@@ -0,0 +1,53 @@
+\name{rescue.rectangle}
+\alias{rescue.rectangle}
+\title{Convert Window Back To Rectangle}
+\description{
+  Determines whether the given window is really a rectangle
+  aligned with the coordinate axes, and if so, converts it to
+  a rectangle object.
+}
+\usage{
+ rescue.rectangle(W)
+}
+\arguments{
+  \item{W}{A window (object of class \code{"owin"}).}
+}
+\value{
+  Another object of class \code{"owin"} representing the
+  same window.
+}
+\details{
+  This function decides whether the window \code{W} is actually a rectangle
+  aligned with the coordinate axes. This will be true if
+  \code{W} is 
+  \itemize{
+    \item a rectangle (window object of type \code{"rectangle"});
+    \item a polygon (window object of type \code{"polygonal"}
+    with a single polygonal boundary) that is a rectangle aligned with
+    the coordinate axes;
+    \item a binary mask (window object of type \code{"mask"}) in which
+    all the pixel entries are \code{TRUE}.
+  }
+  If so, the function returns
+  this rectangle, a window object of type \code{"rectangle"}.
+  If not, the function returns \code{W}.
+}
+\seealso{
+  \code{\link{as.owin}},
+  \code{\link{owin.object}}
+}
+\examples{
+  w <- owin(poly=list(x=c(0,1,1,0),y=c(0,0,1,1)))
+  rw <- rescue.rectangle(w)
+
+  w <- as.mask(unit.square())
+  rw <- rescue.rectangle(w)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/residuals.dppm.Rd b/man/residuals.dppm.Rd
new file mode 100644
index 0000000..a19ede8
--- /dev/null
+++ b/man/residuals.dppm.Rd
@@ -0,0 +1,53 @@
+\name{residuals.dppm}
+\alias{residuals.dppm}
+\title{
+  Residuals for Fitted Determinantal Point Process Model
+}
+\description{
+  Given a determinantal point process model fitted to a point pattern,
+  compute residuals.
+}
+\usage{
+  \method{residuals}{dppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    The fitted determinatal point process model (an object of class \code{"dppm"})
+    for which residuals should be calculated.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{residuals.ppm}}.
+  }
+}
+\value{
+  An object of class \code{"msr"}
+  representing a signed measure or vector-valued measure
+  (see \code{\link{msr}}). This object can be plotted.
+}
+\details{
+  This function extracts the intensity component of the model using
+  \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}}
+  to compute the residuals.
+
+  Use \code{\link{plot.msr}} to plot the residuals directly.
+}
+\seealso{
+ \code{\link{msr}},
+ \code{\link{dppm}}
+}
+\examples{
+   fit <- dppm(swedishpines ~ x, dppGauss())
+   rr <- residuals(fit)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/residuals.kppm.Rd b/man/residuals.kppm.Rd
new file mode 100644
index 0000000..7e6f29b
--- /dev/null
+++ b/man/residuals.kppm.Rd
@@ -0,0 +1,53 @@
+\name{residuals.kppm}
+\alias{residuals.kppm}
+\title{
+  Residuals for Fitted Cox or Cluster Point Process Model
+}
+\description{
+  Given a Cox or cluster point process model fitted to a point pattern,
+  compute residuals.
+}
+\usage{
+  \method{residuals}{kppm}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"kppm"})
+    for which residuals should be calculated.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{residuals.ppm}}.
+  }
+}
+\value{
+  An object of class \code{"msr"} 
+  representing a signed measure or vector-valued measure
+  (see \code{\link{msr}}). This object can be plotted.
+}
+\details{
+  This function extracts the intensity component of the model using
+  \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}}
+  to compute the residuals.
+  
+  Use \code{\link{plot.msr}} to plot the residuals directly.
+}
+\seealso{
+ \code{\link{msr}},
+ \code{\link{kppm}}
+}
+\examples{
+   fit <- kppm(redwood ~ x, "Thomas")
+   rr <- residuals(fit)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/residuals.mppm.Rd b/man/residuals.mppm.Rd
new file mode 100644
index 0000000..51f38ed
--- /dev/null
+++ b/man/residuals.mppm.Rd
@@ -0,0 +1,81 @@
+\name{residuals.mppm}
+\alias{residuals.mppm}
+\title{Residuals for Point Process Model Fitted to Multiple Point Patterns}
+\description{
+  Given a point process model fitted to multiple point patterns,
+  compute residuals for each pattern.
+}
+\usage{
+\method{residuals}{mppm}(object, type = "raw", ..., 
+                             fittedvalues = fitted.mppm(object))
+}
+\arguments{
+  \item{object}{Fitted point process model (object of class \code{"mppm"}).}
+  \item{\dots}{Ignored.}
+  \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"}
+    or \code{"inverse"}. Partially matched.}
+  \item{fittedvalues}{Advanced use only.
+    Fitted values of the model to be used in the calculation.
+  }
+}
+\details{
+  Baddeley et al (2005) defined residuals for the fit of
+  a point process model to spatial point pattern data.
+  For an explanation of these residuals, see the help file for
+  \code{\link{residuals.ppm}}.
+
+  This function computes the residuals
+  for a point process model fitted to \emph{multiple} point patterns.
+  The \code{object} should be an object of class \code{"mppm"} obtained
+  from \code{\link{mppm}}.
+
+  The return value is a list.
+  The number of entries in the list equals the
+  number of point patterns in the original data. Each entry in the list
+  has the same format as the output of
+  \code{\link{residuals.ppm}}.
+  That is, each entry in the list is a signed measure (object of
+  class \code{"msr"}) giving the residual measure for the corresponding
+  point pattern. 
+}
+\value{
+  A list of signed measures (objects of class \code{"msr"})
+  giving the residual measure for each of the original point patterns.
+  See Details.
+}
+\examples{
+    fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders))
+    r <- residuals(fit)
+    # compute total residual for each point pattern
+    rtot <- sapply(r, integral.msr)
+    # standardise the total residuals
+    areas <- sapply(windows.mppm(fit), area.owin)
+    rtot/sqrt(areas)
+}
+\references{
+  Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  \adrian, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{residuals.mppm}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/residuals.ppm.Rd b/man/residuals.ppm.Rd
new file mode 100644
index 0000000..5539ded
--- /dev/null
+++ b/man/residuals.ppm.Rd
@@ -0,0 +1,241 @@
+\name{residuals.ppm}
+\alias{residuals.ppm}
+\title{
+  Residuals for Fitted Point Process Model
+}
+\description{
+  Given a point process model fitted to a point pattern,
+  compute residuals.
+}
+\usage{
+  \method{residuals}{ppm}(object, type="raw", \dots,
+                check=TRUE, drop=FALSE,
+                fittedvalues=NULL, 
+                new.coef=NULL, dropcoef=FALSE,
+                quad=NULL)
+}
+\arguments{
+  \item{object}{
+    The fitted point process model (an object of class \code{"ppm"})
+    for which residuals should be calculated.
+  }
+  \item{type}{
+    String indicating the type of residuals to be calculated.
+    Current options are
+    \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}.
+    A partial match is adequate.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{check}{
+    Logical value indicating whether to check the internal format
+    of \code{object}. If there is any possibility that this object
+    has been restored from a dump file, or has otherwise lost track of
+    the environment where it was originally computed, set
+    \code{check=TRUE}. 
+  }
+  \item{drop}{
+    Logical value determining whether to delete quadrature points
+    that were not used to fit the model. See \code{\link{quad.ppm}} for
+    explanation. 
+  }
+  \item{fittedvalues}{
+    Vector of fitted values for the conditional intensity at 
+    the quadrature points,
+    from which the residuals will be computed. 
+    For expert use only.
+  }
+  \item{new.coef}{
+    Optional. Numeric vector of coefficients for the model,
+    replacing \code{coef(object)}.
+    See the section on Modified Residuals below.
+  }
+  \item{dropcoef}{
+    Internal use only.
+  }
+  \item{quad}{
+    Optional. Data specifying how to re-fit the model.
+    A list of arguments passed to \code{\link{quadscheme}}.
+    See the section on Modified Residuals below.
+  }
+}
+\value{
+  An object of class \code{"msr"} 
+  representing a signed measure or vector-valued measure
+  (see \code{\link{msr}}). This object can be plotted.
+}
+\details{
+  This function computes several kinds of residuals for the fit of
+  a point process model to a spatial point pattern dataset
+  (Baddeley et al, 2005).
+  Use \code{\link{plot.msr}} to plot the residuals directly,
+  or \code{\link{diagnose.ppm}}
+  to produce diagnostic plots based on these residuals.
+
+  The argument \code{object} must be a fitted point process model
+  (object of class \code{"ppm"}). Such objects are produced by the maximum
+  pseudolikelihood fitting algorithm \code{\link{ppm}}.
+  This fitted model object contains complete
+  information about the original data pattern. 
+
+  Residuals are attached both to the data points and to some
+  other points in the window of observation (namely, to the dummy
+  points of the quadrature scheme used to fit the model).
+  If the fitted model is correct, then the sum of the 
+  residuals over all (data and dummy) points in a spatial region \eqn{B}
+  has mean zero. For further explanation, see Baddeley et al (2005).
+  
+  The type of residual
+  is chosen by the argument \code{type}. Current options are
+
+  \describe{
+    \item{\code{"raw"}:}{
+      the raw residuals
+      \deqn{
+	r_j = z_j - w_j \lambda_j
+      }{
+	r[j] = z[j] - w[j] lambda[j]
+      }
+      at the quadrature points \eqn{u_j}{u[j]},
+      where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]}
+      is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point;
+      \eqn{w_j}{w[j]} is the quadrature weight attached to
+      \eqn{u_j}{u[j]}; and
+      \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)}
+      is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}.
+      These are the spatial analogue of the martingale residuals
+      of a one-dimensional counting process.
+    }
+    \item{\code{"inverse"}:}{
+      the `inverse-lambda' residuals (Baddeley et al, 2005)
+      \deqn{
+	r^{(I)}_j = \frac{r_j}{\lambda_j}
+        = \frac{z_j}{\lambda_j} - w_j
+      }{
+	rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j]
+      }
+      obtained by dividing the raw residuals by 
+      the fitted conditional intensity. These are 
+      a counterpart of the exponential energy marks (see \code{\link{eem}}).
+    }
+    \item{\code{"pearson"}:}{
+      the Pearson residuals (Baddeley et al, 2005)
+      \deqn{
+	r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}}
+        = \frac{z_j}{\sqrt{\lambda_j}}
+        - w_j \sqrt{\lambda_j}
+      }{
+	rP[j] = r[j]/sqrt(lambda[j])
+	= z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j])
+      }
+      obtained by dividing the raw residuals by the
+      square root of the fitted conditional intensity.
+      The Pearson residuals are standardised, in the sense
+      that if the model (true and fitted) is Poisson,
+      then the sum of the Pearson residuals in a spatial region \eqn{B}
+      has variance equal to the area of \eqn{B}.
+    }
+    \item{\code{"score"}:}{
+      the score residuals (Baddeley et al, 2005)
+      \deqn{
+	r_j = (z_j - w_j \lambda_j) x_j
+      }{
+	r[j] = (z[j] - w[j] lambda[j]) * x[j,]
+      }
+      obtained by multiplying the raw residuals \eqn{r_j}{r[j]}
+      by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}.
+      The score residuals always sum to zero.
+    }
+  }
+  The result of \code{residuals.ppm} is a measure
+  (object of class \code{"msr"}).
+  Use \code{\link{plot.msr}} to plot the residuals directly,
+  or \code{\link{diagnose.ppm}} to produce diagnostic plots
+  based on these residuals.
+  Use \code{\link{integral.msr}} to compute the total residual.
+
+  By default, 
+  the window of the measure is the same as the original window
+  of the data. If \code{drop=TRUE} then the window is the
+  domain of integration of the pseudolikelihood or composite likelihood.
+  This only matters when the model \code{object} was fitted using
+  the border correction: in that case, if \code{drop=TRUE} the
+  window of the residuals is the erosion of the original data window
+  by the border correction distance \code{rbord}.
+}
+\section{Modified Residuals}{
+  Sometimes we want to modify the calculation of residuals by using
+  different values for the model parameters. This capability is
+  provided by the arguments \code{new.coef} and \code{quad}.
+
+  If \code{new.coef} is given, then the residuals will be computed
+  by taking the model parameters to be \code{new.coef}.
+  This should be a numeric vector
+  of the same length as the vector of fitted model parameters
+  \code{coef(object)}. 
+
+  If \code{new.coef} is missing and \code{quad} is given,
+  then the model parameters will
+  be determined by re-fitting the model using a new
+  quadrature scheme specified by \code{quad}. 
+  Residuals will be computed for the
+  original model \code{object} using these new parameter values.
+
+  The argument \code{quad} should normally be
+  a list of arguments in \code{name=value} format that will be
+  passed to \code{\link{quadscheme}} (together with
+  the original data points) to determine the new quadrature scheme.
+  It may also be a quadrature scheme (object of class
+  \code{"quad"}) to which the model should be fitted, or a
+  point pattern (object of class \code{"ppp"}) specifying the
+  \emph{dummy points} in a new quadrature scheme.
+}
+\references{
+  Baddeley, A., Turner, R.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Pakes, A.G. (2008) 
+  Properties of residuals for spatial point processes.
+  \emph{Annals of the Institute of Statistical Mathematics}
+  \bold{60}, 627--649.
+}
+\seealso{
+ \code{\link{msr}},
+ \code{\link{diagnose.ppm}},
+ \code{\link{ppm.object}},
+ \code{\link{ppm}}
+}
+\examples{
+   fit <- ppm(cells, ~x, Strauss(r=0.15))
+
+   # Pearson residuals
+   rp <- residuals(fit, type="pe")
+   rp
+
+   # simulated data
+   X <- rStrauss(100,0.7,0.05)
+   # fit Strauss model 
+   fit <- ppm(X, ~1, Strauss(0.05))
+   res.fit <- residuals(fit)
+
+   # check that total residual is 0 
+   integral.msr(residuals(fit, drop=TRUE))
+
+   # true model parameters
+   truecoef <- c(log(100), log(0.7))
+   res.true <- residuals(fit, new.coef=truecoef)  
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{methods}
diff --git a/man/residualspaper.Rd b/man/residualspaper.Rd
new file mode 100644
index 0000000..77dbbef
--- /dev/null
+++ b/man/residualspaper.Rd
@@ -0,0 +1,93 @@
+\name{residualspaper}
+\alias{residualspaper}
+\docType{data}
+\title{
+  Data and Code From JRSS Discussion Paper on Residuals
+}
+\description{
+  This dataset contains the point patterns
+  used as examples in the paper of Baddeley et al (2005).
+  [Figure 2 is already available in \pkg{spatstat}
+  as the \code{\link{copper}} dataset.] 
+
+  R code is also provided to reproduce all
+  the Figures displayed in Baddeley et al (2005).
+  The component \code{plotfig}
+  is a function, which can be called
+  with a numeric or character argument specifying the Figure or Figures
+  that should be plotted. See the Examples.
+} 
+\format{
+  \code{residualspaper} is a list with the following components:
+  \describe{
+    \item{Fig1}{
+      The locations of Japanese pine seedlings and saplings
+      from Figure 1 of the paper.
+      A point pattern (object of class \code{"ppp"}).
+    }
+    \item{Fig3}{
+      The Chorley-Ribble data from Figure 3 of the paper.
+      A list with three components, \code{lung}, \code{larynx}
+      and \code{incin}. Each is a matrix with 2 columns
+      giving the coordinates of the lung cancer cases,
+      larynx cancer cases, and the incinerator, respectively.
+      Coordinates are Eastings and Northings in km.
+    }
+    \item{Fig4a}{
+      The synthetic dataset in Figure 4 (a) of the paper.
+    }
+    \item{Fig4b}{
+      The synthetic dataset in Figure 4 (b) of the paper.
+    }
+    \item{Fig4c}{
+      The synthetic dataset in Figure 4 (c) of the paper.
+    }
+    \item{Fig11}{
+      The covariate displayed in Figure 11. A pixel image (object of
+      class \code{"im"}) whose pixel values are distances to the
+      nearest line segment in the \code{copper} data.
+    }
+    \item{plotfig}{A function which will compute and plot
+      any of the Figures from the paper. The argument of
+      \code{plotfig} is either a numeric vector or a character vector,
+      specifying the Figure or Figures to be plotted. See the Examples.
+    }
+  }
+}
+\usage{data(residualspaper)}
+\examples{
+
+\dontrun{
+  data(residualspaper)
+  
+  X <- residualspaper$Fig4a
+  summary(X)
+  plot(X)
+
+  # reproduce all Figures
+  residualspaper$plotfig()
+
+  # reproduce Figures 1 to 10
+  residualspaper$plotfig(1:10)
+
+  # reproduce Figure 7 (a)
+  residualspaper$plotfig("7a")
+}
+}
+\source{
+  Figure 1: Prof M. Numata. Data kindly supplied by Professor Y. Ogata
+  with kind permission of Prof M. Tanemura.
+  
+  Figure 3:  Professor P.J. Diggle (rescaled by \adrian)
+
+  Figure 4 (a,b,c): \adrian
+}
+\references{
+  Baddeley, A., Turner, R., \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+}
+\keyword{datasets}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/rex.Rd b/man/rex.Rd
new file mode 100644
index 0000000..858a4be
--- /dev/null
+++ b/man/rex.Rd
@@ -0,0 +1,99 @@
+\name{rex}
+\alias{rex}
+\title{
+  Richardson Extrapolation
+}
+\description{
+  Performs Richardson Extrapolation on a sequence of
+  approximate values.
+}
+\usage{
+rex(x, r = 2, k = 1, recursive = FALSE)
+}
+\arguments{
+  \item{x}{
+    A numeric vector or matrix,
+    whose columns are successive estimates or approximations
+    to a vector of parameters.
+  }
+  \item{r}{
+    A number greater than 1.
+    The ratio of successive step sizes.
+    See Details.
+  }
+  \item{k}{
+    Integer. The order of convergence assumed. See Details.
+  }
+  \item{recursive}{
+    Logical value indicating whether to perform one step of
+    Richardson extrapolation (\code{recursive=FALSE}, the default)
+    or repeat the extrapolation procedure until a best estimate
+    is obtained (\code{recursive=TRUE}.
+  }
+}
+\details{
+  Richardson extrapolation is a general technique for improving numerical
+  approximations, often used in numerical integration (Brezinski and
+  Zaglia, 1991). It can also be used to improve parameter estimates
+  in statistical models (Baddeley and Turner, 2014). 
+  
+  The successive columns of \code{x} are assumed to
+  have been obtained using approximations with step sizes
+  \eqn{a, a/r, a/r^2, \ldots}{a, a/r, a/r^2, ...}
+  where \eqn{a} is the initial step size (which does not need to be
+  specified).
+
+  Estimates based on a step size \eqn{s} are assumed to have an error
+  of order \eqn{s^k}.
+
+  Thus, the default values \code{r=2} and {k=1} imply that the errors in
+  the second column of \code{x} should be roughly \eqn{(1/r)^k = 1/2} as large
+  as the errors in the first column, and so on.
+}
+\value{
+  A matrix whose columns contain a sequence of improved estimates.
+}
+\references{
+  Baddeley, A. and Turner, R. (2014)
+  Bias correction for parameter estimates of spatial point process models.
+  \emph{Journal of Statistical Computation and Simulation}
+  \bold{84}, 1621--1643.
+  DOI: 10.1080/00949655.2012.755976
+
+  Brezinski, C. and Zaglia, M.R. (1991)
+  \emph{Extrapolation Methods. Theory and Practice}.
+  North-Holland.
+}
+\author{
+  \adrian
+  and
+  \rolf.
+}
+\seealso{
+  \code{\link{bc}}
+}
+\examples{
+   # integrals of sin(x) and cos(x) from 0 to pi
+   # correct answers: 2, 0
+   est <- function(nsteps) {
+     xx <- seq(0, pi, length=nsteps)
+     ans <- pi * c(mean(sin(xx)), mean(cos(xx)))
+     names(ans) <- c("sin", "cos")
+     ans
+   }
+   X <- cbind(est(10), est(20), est(40))
+   X
+   rex(X)
+   rex(X, recursive=TRUE)
+
+   # fitted Gibbs point process model
+   fit0 <- ppm(cells ~ 1, Strauss(0.07), nd=16)
+   fit1 <- update(fit0, nd=32)
+   fit2 <- update(fit0, nd=64)
+   co <- cbind(coef(fit0), coef(fit1), coef(fit2))
+   co 
+   rex(co, k=2, recursive=TRUE)
+}
+\keyword{math}
+\keyword{optimize}
+
diff --git a/man/rgbim.Rd b/man/rgbim.Rd
new file mode 100644
index 0000000..d00cbe8
--- /dev/null
+++ b/man/rgbim.Rd
@@ -0,0 +1,89 @@
+\name{rgbim}
+\alias{rgbim}
+\alias{hsvim}
+\title{Create Colour-Valued Pixel Image}
+\description{
+  Creates an object of
+  class \code{"im"} representing a two-dimensional pixel image
+  whose pixel values are colours.
+}
+\usage{
+  rgbim(R, G, B, A, maxColorValue=255, autoscale=FALSE)
+  hsvim(H, S, V, A, autoscale=FALSE)
+}
+\arguments{
+  \item{R,G,B}{
+    Pixel images (objects of class \code{"im"}) or constants
+    giving the red, green, and blue components of a colour, respectively.
+  }
+  \item{A}{
+    Optional. Pixel image or constant value
+    giving the alpha (transparency) component of a colour.
+  }
+  \item{maxColorValue}{
+    Maximum colour channel value for \code{R,G,B,A}.
+  }
+  \item{H,S,V}{
+    Pixel images (objects of class \code{"im"}) or constants
+    giving the hue, saturation, and value components of
+    a colour, respectively.
+  }
+  \item{autoscale}{
+    Logical. If \code{TRUE}, input values are automatically rescaled
+    to fit the permitted range. RGB values are scaled to lie between
+    0 and \code{maxColorValue}. HSV values are scaled to lie between 0
+    and 1.
+  }
+}
+\details{
+  These functions take three pixel images, with real or integer pixel values,
+  and create a single pixel image whose pixel values are colours
+  recognisable to \R.
+
+  Some of the arguments may be constant numeric values, but
+  at least one of the arguments must be a pixel image.
+  The image arguments should be compatible (in array dimension
+  and in spatial position).
+
+  \code{rgbim} calls \code{\link{rgb}} to compute the colours,
+  while \code{hsvim} calls \code{\link{hsv}}. See the help for the relevant
+  function for more information about the meaning of the colour
+  channels.
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{rgb}},
+  \code{\link{hsv}}.
+
+  See \code{\link[spatstat:colourtools]{colourtools}}
+  for additional colour tools.
+}
+\examples{
+  \testonly{
+    op <- spatstat.options(npixel=32)
+  }
+  # create three images with values in [0,1]
+  X <- setcov(owin())
+  X <- eval.im(pmin(1,X))
+  M <- Window(X)
+  Y <- as.im(function(x,y){(x+1)/2}, W=M)
+  Z <- as.im(function(x,y){(y+1)/2}, W=M)
+  RGB <- rgbim(X, Y, Z, maxColorValue=1)
+  HSV <- hsvim(X, Y, Z)
+  plot(RGB, valuesAreColours=TRUE)
+  plot(HSV, valuesAreColours=TRUE)
+  \testonly{
+    spatstat.options(op)
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{datagen}
+ 
+ 
diff --git a/man/rho2hat.Rd b/man/rho2hat.Rd
new file mode 100644
index 0000000..50d0ec6
--- /dev/null
+++ b/man/rho2hat.Rd
@@ -0,0 +1,109 @@
+\name{rho2hat}
+\alias{rho2hat}
+\title{
+  Smoothed Relative Density of Pairs of Covariate Values
+}
+\description{
+  Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and
+  \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of
+  the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}.
+}
+\usage{
+rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight"))
+}
+\arguments{
+  \item{object}{
+    A point pattern (object of class \code{"ppp"}),
+    a quadrature scheme (object of class \code{"quad"})
+    or a fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{cov1,cov2}{
+    The two covariates.
+    Each argument is either a \code{function(x,y)} or a pixel image (object of
+    class \code{"im"}) providing the values of the covariate at any
+    location, or one of the strings \code{"x"} or \code{"y"}
+    signifying the Cartesian coordinates.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{density.ppp}} to smooth
+    the scatterplots.
+  }
+  \item{method}{
+    Character string determining the smoothing method. See Details.
+  }
+}
+\details{
+  This is a bivariate version of \code{\link{rhohat}}.
+  
+  If \code{object} is a point pattern, this command
+  produces a smoothed version of the scatterplot of
+  the values of the covariates \code{cov1} and \code{cov2}
+  observed at the points of the point pattern. 
+
+  The covariates \code{cov1,cov2} must have continuous values.
+  
+  If \code{object} is a fitted point process model, suppose \code{X} is
+  the original data point pattern to which the model was fitted. Then
+  this command assumes \code{X} is a realisation of a Poisson point
+  process with intensity function of the form
+  \deqn{
+    \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u)
+  }{
+    lambda(u) = rho(Z1(u), Z2(u)) * kappa(u)
+  }
+  where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model
+  \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function
+  to be estimated. The algorithm computes a smooth estimate of the
+  function \eqn{\rho}{rho}.
+
+  The \code{method} determines how the density estimates will be
+  combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}:
+  \itemize{
+    \item
+    If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is
+    estimated by the ratio of two density estimates.
+    The numerator is a (rescaled) density estimate obtained by
+    smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} 
+    obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2}
+    at the data points \eqn{y_i}{y[i]}. The denominator
+    is a density estimate of the reference distribution of
+    \eqn{(Z_1,Z_2)}{(Z1, Z2)}.
+    \item
+    If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is
+    estimated by applying density estimation to the 
+    points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))}
+    obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2}
+    at the data points \eqn{y_i}{y[i]},
+    with weights inversely proportional to the reference density of
+    \eqn{(Z_1,Z_2)}{(Z1, Z2)}.
+    }
+}
+\value{
+  A pixel image (object of class \code{"im"}). Also
+  belongs to the special class \code{"rho2hat"} which has a plot method.
+}
+\references{
+  Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012)
+  Nonparametric estimation of the dependence of a point
+  process on spatial covariates.
+  \emph{Statistics and Its Interface} \bold{5} (2), 221--236.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{rhohat}},
+  \code{\link{methods.rho2hat}}
+}
+\examples{
+  data(bei)
+  attach(bei.extra)
+  plot(rho2hat(bei, elev, grad))
+  fit <- ppm(bei, ~elev, covariates=bei.extra)
+  \dontrun{
+  plot(rho2hat(fit, elev, grad))
+  }
+  plot(rho2hat(fit, elev, grad, method="reweight"))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/rhohat.Rd b/man/rhohat.Rd
new file mode 100644
index 0000000..95b85e0
--- /dev/null
+++ b/man/rhohat.Rd
@@ -0,0 +1,312 @@
+\name{rhohat}  
+\alias{rhohat}
+\alias{rhohat.ppp}
+\alias{rhohat.quad}
+\alias{rhohat.ppm}
+\alias{rhohat.lpp}
+\alias{rhohat.lppm}
+\concept{Resource Selection Function}
+\concept{Prospectivity}
+\title{
+  Smoothing Estimate of Intensity as Function of a Covariate
+}
+\description{
+  Computes a smoothing estimate of the intensity of a point process,
+  as a function of a (continuous) spatial covariate.
+}
+\usage{
+rhohat(object, covariate, ...)
+
+\method{rhohat}{ppp}(object, covariate, ...,
+       baseline=NULL, weights=NULL,
+       method=c("ratio", "reweight", "transform"),
+       horvitz=FALSE,
+       smoother=c("kernel", "local"),
+       dimyx=NULL, eps=NULL,
+       n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL,
+       bwref=bw,
+       covname, confidence=0.95)
+
+\method{rhohat}{quad}(object, covariate, ...,
+       baseline=NULL, weights=NULL,
+       method=c("ratio", "reweight", "transform"),
+       horvitz=FALSE,
+       smoother=c("kernel", "local"),
+       dimyx=NULL, eps=NULL,
+       n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL,
+       bwref=bw,
+       covname, confidence=0.95)
+
+\method{rhohat}{ppm}(object, covariate, ...,
+       weights=NULL,
+       method=c("ratio", "reweight", "transform"),
+       horvitz=FALSE,
+       smoother=c("kernel", "local"),
+       dimyx=NULL, eps=NULL,
+       n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL,
+       bwref=bw,
+       covname, confidence=0.95)
+
+\method{rhohat}{lpp}(object, covariate, ...,
+       weights=NULL,
+       method=c("ratio", "reweight", "transform"),
+       horvitz=FALSE,
+       smoother=c("kernel", "local"),
+       nd=1000, eps=NULL, random=TRUE, 
+       n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL,
+       bwref=bw,
+       covname, confidence=0.95)
+
+\method{rhohat}{lppm}(object, covariate, ...,
+       weights=NULL,
+       method=c("ratio", "reweight", "transform"),
+       horvitz=FALSE,
+       smoother=c("kernel", "local"),
+       nd=1000, eps=NULL, random=TRUE, 
+       n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL,
+       bwref=bw,
+       covname, confidence=0.95)
+}
+\arguments{
+  \item{object}{
+    A point pattern (object of class \code{"ppp"} or \code{"lpp"}),
+    a quadrature scheme (object of class \code{"quad"})
+    or a fitted point process model (object of class \code{"ppm"}
+    or \code{"lppm"}).
+  }
+  \item{covariate}{
+    Either a \code{function(x,y)} or a pixel image (object of
+    class \code{"im"}) providing the values of the covariate at any
+    location.
+    Alternatively one of the strings \code{"x"} or \code{"y"}
+    signifying the Cartesian coordinates.
+  }
+  \item{weights}{
+    Optional weights attached to the data points.
+    Either a numeric vector of weights for each data point,
+    or a pixel image (object of class \code{"im"}) or
+    a \code{function(x,y)} providing the weights.
+  }
+  \item{baseline}{
+    Optional baseline for intensity function.
+    A \code{function(x,y)} or a pixel image (object of
+    class \code{"im"}) providing the values of the baseline at any
+    location. 
+  }
+  \item{method}{
+    Character string determining the smoothing method. See Details.
+  }
+  \item{horvitz}{
+    Logical value indicating whether to use Horvitz-Thompson weights.
+    See Details.
+  }
+  \item{smoother}{
+    Character string determining the smoothing algorithm. See Details.
+  }
+  \item{dimyx,eps,nd,random}{
+    Arguments controlling the pixel
+    resolution at which the covariate will be evaluated.
+    See Details.
+  }
+  \item{bw}{
+    Smoothing bandwidth or bandwidth rule
+    (passed to \code{\link{density.default}}).
+  }
+  \item{adjust}{
+    Smoothing bandwidth adjustment factor
+    (passed to \code{\link{density.default}}).
+  }
+  \item{n, from, to}{
+    Arguments passed to \code{\link{density.default}} to
+    control the number and range of values at which the function
+    will be estimated.
+  }
+  \item{bwref}{
+    Optional. An alternative value of \code{bw} to use when smoothing
+    the reference density (the density of the covariate values
+    observed at all locations in the window).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{density.default}}
+    or \code{\link[locfit]{locfit}}.
+  }
+  \item{covname}{
+    Optional. Character string to use as the name of the covariate.
+  }
+  \item{confidence}{
+    Confidence level for confidence intervals.
+    A number between 0 and 1.
+  }
+}
+\details{
+  This command estimates the relationship between
+  point process intensity and a given spatial covariate.
+  Such a relationship is sometimes called a
+  \emph{resource selection function} (if the points are organisms
+  and the covariate is a descriptor of habitat) or
+  a \emph{prospectivity index} (if the points are mineral deposits
+  and the covariate is a geological variable). 
+  This command uses a nonparametric smoothing method which does not assume a
+  particular form for the relationship.  
+  
+  If \code{object} is a point pattern, and \code{baseline} is missing or
+  null, this command assumes that \code{object} is a realisation of a
+  Poisson point process with intensity function
+  \eqn{\lambda(u)}{lambda(u)} of the form
+  \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))}
+  where \eqn{Z} is the spatial
+  covariate function given by \code{covariate}, and
+  \eqn{\rho(z)}{rho(z)} is a function to be estimated.  This command
+  computes estimators of \eqn{\rho(z)}{rho(z)} proposed by Baddeley and
+  Turner (2005) and Baddeley et al (2012).
+
+  The covariate \eqn{Z} must have continuous values.
+
+  If \code{object} is a point pattern, and \code{baseline} is given,
+  then the intensity function is assumed to be
+  \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)}
+  where \eqn{B(u)} is the baseline intensity at location \eqn{u}.
+  A smoothing estimator of the relative intensity  \eqn{\rho(z)}{rho(z)}
+  is computed.
+
+  If \code{object} is a fitted point process model, suppose \code{X} is
+  the original data point pattern to which the model was fitted. Then
+  this command assumes \code{X} is a realisation of a Poisson point
+  process with intensity function of the form
+  \deqn{
+    \lambda(u) = \rho(Z(u)) \kappa(u)
+  }{
+    lambda(u) = rho(Z(u)) * kappa(u)
+  }
+  where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model
+  \code{object}. A smoothing estimator of \eqn{\rho(z)}{rho(z)} is computed.
+
+  The estimation procedure is determined by the character strings
+  \code{method} and \code{smoother} and the argument \code{horvitz}.
+  The estimation procedure involves computing several density estimates
+  and combining them. 
+  The algorithm used to compute density estimates is 
+  determined by \code{smoother}:
+  \itemize{
+    \item If \code{smoother="kernel"},
+    each the smoothing procedure is based on
+    fixed-bandwidth kernel density estimation,
+    performed by \code{\link{density.default}}.
+    \item If \code{smoother="local"}, the smoothing procedure
+    is based on local likelihood density estimation, performed by
+    \code{\link[locfit]{locfit}}.
+  }
+  The \code{method} determines how the density estimates will be
+  combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}:
+  \itemize{
+    \item
+    If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is
+    estimated by the ratio of two density estimates.
+    The numerator is a (rescaled) density estimate obtained by
+    smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate
+    \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator
+    is a density estimate of the reference distribution of \eqn{Z}.
+    \item
+    If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is
+    estimated by applying density estimation to the 
+    values \eqn{Z(y_i)}{Z(y[i])} of the covariate
+    \eqn{Z} observed at the data points \eqn{y_i}{y[i]},
+    with weights inversely proportional to the reference density of
+    \eqn{Z}.
+    \item 
+    If \code{method="transform"},
+    the smoothing method is variable-bandwidth kernel
+    smoothing, implemented by applying the Probability Integral Transform
+    to the covariate values, yielding values in the range 0 to 1,
+    then applying edge-corrected density estimation on the interval
+    \eqn{[0,1]}, and back-transforming.
+  }
+  If \code{horvitz=TRUE}, then the calculations described above
+  are modified by using Horvitz-Thompson weighting.
+  The contribution to the numerator from 
+  each data point is weighted by the reciprocal of the
+  baseline value or fitted intensity value at that data point;
+  and a corresponding adjustment is made to the denominator.
+  
+  The covariate will be evaluated on a fine grid of locations,
+  with spatial resolution controlled by the arguments
+  \code{dimyx,eps,nd,random}. 
+  In two dimensions (i.e.
+  if \code{object} is of class \code{"ppp"}, \code{"ppm"} or
+  \code{"quad"}) the arguments \code{dimyx, eps} are
+  passed to \code{\link{as.mask}} to control the pixel
+  resolution. On a linear network (i.e. if \code{object} is of class
+  \code{"lpp"}) the argument \code{nd} specifies the
+  total number of test locations on the linear
+  network, \code{eps} specifies the linear separation between test
+  locations, and \code{random} specifies whether the test locations
+  have a randomised starting position.
+
+  If the argument \code{weights} is present, then the contribution
+  from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is
+  multiplied by \code{weights[i]}.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  containing the estimated values of \eqn{\rho}{rho} for a sequence
+  of values of \eqn{Z}.
+  Also belongs to the class \code{"rhohat"}
+  which has special methods for \code{print}, \code{plot}
+  and \code{predict}.
+}
+\section{Categorical and discrete covariates}{
+  This technique assumes that the covariate has continuous values.
+  It is not applicable to covariates with categorical (factor) values
+  or discrete values such as small integers.
+  For a categorical covariate, use
+  \code{\link{intensity.quadratcount}} applied to the result of
+  \code{\link{quadratcount}(X, tess=covariate)}.
+}
+\references{
+  Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012)
+  Nonparametric estimation of the dependence of a point
+  process on spatial covariates.
+  \emph{Statistics and Its Interface} \bold{5} (2), 221--236.
+  
+  Baddeley, A. and Turner, R. (2005)
+  Modelling spatial point patterns in R.
+  In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan,
+  editors, \emph{Case Studies in Spatial Point Pattern Modelling},
+  Lecture Notes in Statistics number 185. Pages 23--74.
+  Springer-Verlag, New York, 2006. 
+  ISBN: 0-387-28311-0.  
+}
+\author{
+  \adrian, 
+  Ya-Mei Chang, Yong Song, 
+  and \rolf.
+}
+\seealso{
+  \code{\link{rho2hat}},
+  \code{\link{methods.rhohat}},
+  \code{\link{parres}}.
+
+  See \code{\link{ppm}} for a parametric method for the same problem.
+}
+\examples{
+  X <-  rpoispp(function(x,y){exp(3+3*x)})
+  rho <- rhohat(X, "x")
+  rho <- rhohat(X, function(x,y){x})
+  plot(rho)
+  curve(exp(3+3*x), lty=3, col=2, add=TRUE)
+
+  rhoB <- rhohat(X, "x", method="reweight")
+  rhoC <- rhohat(X, "x", method="transform")
+
+  \testonly{rh <- rhohat(X, "x", dimyx=32)}
+
+  fit <- ppm(X, ~x)
+  rr <- rhohat(fit, "y")
+
+# linear network
+  Y <- runiflpp(30, simplenet)
+  rhoY <- rhohat(Y, "y")
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/ripras.Rd b/man/ripras.Rd
new file mode 100644
index 0000000..2370e23
--- /dev/null
+++ b/man/ripras.Rd
@@ -0,0 +1,107 @@
+\name{ripras}
+\alias{ripras}
+\title{Estimate window from points alone}
+\description{
+  Given an observed pattern of points,
+  computes the Ripley-Rasson estimate of 
+  the spatial domain from which they came.
+}
+\usage{
+ ripras(x, y=NULL, shape="convex", f)
+}
+\arguments{
+  \item{x}{
+    vector of \code{x} coordinates of observed points,
+    or a 2-column matrix giving \code{x,y} coordinates,
+    or a list with components \code{x,y} giving coordinates
+    (such as a point pattern object of class \code{"ppp"}.)
+  }
+  \item{y}{(optional) vector of \code{y} coordinates of observed points,
+    if \code{x} is a vector.}
+  \item{shape}{String indicating the type of window to be estimated:
+    either \code{"convex"} or \code{"rectangle"}.
+  }
+  \item{f}{
+    (optional) scaling factor. See Details.
+  }
+}
+\value{
+  A window (an object of class \code{"owin"}).
+}
+\details{
+  Given an observed pattern of points with coordinates 
+  given by \code{x} and \code{y}, this function computes 
+  an estimate due to Ripley and Rasson (1977) of the
+  spatial domain from which the points came. 
+
+  The points are
+  assumed to have been generated independently and uniformly
+  distributed inside an unknown domain \eqn{D}.
+
+  If \code{shape="convex"} (the default), the domain \eqn{D} is assumed
+  to be a convex set. The maximum
+  likelihood estimate of \eqn{D} is the convex hull of the 
+  points (computed by \code{\link{convexhull.xy}}).
+  Analogously to the problems of estimating the endpoint
+  of a uniform distribution, the MLE is not optimal.
+  Ripley and Rasson's estimator is a rescaled copy of the convex hull,
+  centred at the centroid of the convex hull.
+  The scaling factor is 
+  \eqn{1/sqrt(1 - m/n)}{1/\sqrt{1 - \frac m n}}
+  where \eqn{n} is the number of data points and 
+  \eqn{m} the number of vertices of the convex hull.
+  The scaling factor may be overridden using the argument \code{f}.
+
+  If \code{shape="rectangle"}, the domain \eqn{D} is assumed
+  to be a rectangle with sides parallel to the coordinate axes. The maximum
+  likelihood estimate of \eqn{D} is the bounding box of the points
+  (computed by \code{\link{bounding.box.xy}}). The Ripley-Rasson
+  estimator is a rescaled copy of the bounding box,
+  with scaling factor \eqn{(n+1)/(n-1)}
+  where \eqn{n} is the number of data points,
+  centred at the centroid of the bounding box.
+  The scaling factor may be overridden using the argument \code{f}.
+}
+\seealso{
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{bounding.box.xy}},
+  \code{\link{convexhull.xy}}
+}
+\examples{
+  x <- runif(30)
+  y <- runif(30)
+  w <- ripras(x,y)
+  plot(owin(), main="ripras(x,y)")
+  plot(w, add=TRUE)
+  points(x,y)
+
+  X <- rpoispp(15)
+  plot(X, main="ripras(X)")
+  plot(ripras(X), add=TRUE)
+
+  # two points insufficient
+  ripras(c(0,1),c(0,0))
+  # triangle
+  ripras(c(0,1,0.5), c(0,0,1))
+  # three collinear points
+  ripras(c(0,0,0), c(0,1,2))
+}
+\references{
+  Ripley, B.D. and Rasson, J.-P. (1977)
+  Finding the edge of a Poisson forest.
+  \emph{Journal of Applied Probability},
+  \bold{14}, 483 -- 491.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{utilities}
+
+
+
+
diff --git a/man/rjitter.Rd b/man/rjitter.Rd
new file mode 100644
index 0000000..6ae49a4
--- /dev/null
+++ b/man/rjitter.Rd
@@ -0,0 +1,71 @@
+\name{rjitter}
+\alias{rjitter}
+\title{Random Perturbation of a Point Pattern}
+\description{
+  Applies independent random displacements to each point in a point pattern.
+}
+\usage{
+rjitter(X, radius, retry=TRUE, giveup = 10000, \dots, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{radius}{
+    Scale of perturbations. A positive numerical value.
+    The displacement vectors will be uniformly
+    distributed in a circle of this radius.
+    There is a sensible default.
+  }
+  \item{retry}{
+    What to do when a perturbed point lies outside the window
+    of the original point pattern. If \code{retry=FALSE},
+    the point will be lost; if \code{retry=TRUE},
+    the algorithm will try again.
+  }
+  \item{giveup}{
+    Maximum number of unsuccessful attempts.
+  }
+  \item{\dots}{Ignored.}
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1},
+  in the same window as \code{X}.
+}
+\details{
+  Each of the points in the point pattern \code{X} is subjected to
+  an independent random displacement. The displacement vectors are
+  uniformly distributed in a circle of radius \code{radius}.
+
+  If a displaced point lies outside the window, then if
+  \code{retry=FALSE} the point will be lost. 
+
+  However if \code{retry=TRUE}, the algorithm will try again: each time a
+  perturbed point lies outside the window, the algorithm will reject it and
+  generate another proposed perturbation of the original point,
+  until one lies inside the window, or until \code{giveup} unsuccessful
+  attempts have been made. In the latter case, any unresolved points
+  will be included without any perturbation. The return value will
+  always be a point pattern with the same number of points as \code{X}.
+}
+\examples{
+   X <- rsyst(owin(), 10, 10)
+   Y <- rjitter(X, 0.02)
+   plot(Y)
+   Z <- rjitter(X)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rknn.Rd b/man/rknn.Rd
new file mode 100644
index 0000000..6a953e5
--- /dev/null
+++ b/man/rknn.Rd
@@ -0,0 +1,70 @@
+\name{rknn}
+\alias{dknn}
+\alias{pknn}
+\alias{qknn}
+\alias{rknn}
+\title{
+  Theoretical Distribution of Nearest Neighbour Distance
+}
+\description{
+  Density, distribution function, quantile function and random
+  generation for the random distance to the \eqn{k}th nearest neighbour
+  in a Poisson point process in \eqn{d} dimensions.
+}
+\usage{
+dknn(x, k = 1, d = 2, lambda = 1)
+pknn(q, k = 1, d = 2, lambda = 1)
+qknn(p, k = 1, d = 2, lambda = 1)
+rknn(n, k = 1, d = 2, lambda = 1)
+}
+\arguments{
+  \item{x,q}{vector of quantiles.}
+  \item{p}{vector of probabilities.}
+  \item{n}{number of observations to be generated.}
+  \item{k}{order of neighbour.}
+  \item{d}{dimension of space.}
+  \item{lambda}{intensity of Poisson point process.}
+}
+\details{
+  In a Poisson point process in \eqn{d}-dimensional space, let
+  the random variable \eqn{R} be
+  the distance from a fixed point to the \eqn{k}-th nearest random point,
+  or the distance from a random point to the
+  \eqn{k}-th nearest other random point.
+
+  Then \eqn{R^d} has a Gamma distribution with shape parameter \eqn{k}
+  and rate \eqn{\lambda * \alpha}{lambda * alpha} where
+  \eqn{\alpha}{alpha} is a constant (equal to the volume of the
+  unit ball in \eqn{d}-dimensional space).
+  See e.g. Cressie (1991, page 61).
+
+  These functions support calculation and simulation for the
+  distribution of \eqn{R}. 
+}
+\value{
+  A numeric vector:
+  \code{dknn} returns the probability density,
+  \code{pknn} returns cumulative probabilities (distribution function),
+  \code{qknn} returns quantiles,
+  and \code{rknn} generates random deviates.
+}
+\references{
+  Cressie, N.A.C. (1991)
+  \emph{Statistics for spatial data}.
+  John Wiley and Sons, 1991.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\examples{
+  x <- seq(0, 5, length=20)
+  densities <- dknn(x, k=3, d=2)
+  cdfvalues <- pknn(x, k=3, d=2)
+  randomvalues <- rknn(100, k=3, d=2)
+  deciles <- qknn((1:9)/10, k=3, d=2)
+}
+\keyword{spatial}
+\keyword{distribution}
diff --git a/man/rlabel.Rd b/man/rlabel.Rd
new file mode 100644
index 0000000..a8271a9
--- /dev/null
+++ b/man/rlabel.Rd
@@ -0,0 +1,77 @@
+\name{rlabel}
+\alias{rlabel}
+\title{Random Re-Labelling of Point Pattern}
+\description{
+  Randomly allocates marks to a point pattern,
+  or permutes the existing marks, or resamples
+  from the existing marks.
+}
+\usage{
+   rlabel(X, labels=marks(X), permute=TRUE)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"},
+    \code{"lpp"}, \code{"pp3"} or \code{"ppx"}).
+  }
+  \item{labels}{
+    Vector of values from which the new marks will be drawn
+    at random. Defaults to the vector of existing marks.
+  }
+  \item{permute}{
+    Logical value indicating whether to generate new marks
+    by randomly permuting \code{labels} or
+    by drawing a random sample with replacement.
+  }
+}
+\value{
+  A marked point pattern (of the same class as \code{X}).
+}
+\details{
+  This very simple function allocates random marks to
+  an existing point pattern \code{X}. It is useful
+  for hypothesis testing purposes.
+
+  In the simplest case, the command \code{rlabel(X)} yields
+  a point pattern obtained from \code{X} by randomly permuting
+  the marks of the points.
+
+  If \code{permute=TRUE}, then \code{labels} should be a vector of
+  length equal to the number of points in \code{X}.
+  The result of \code{rlabel} will be a point pattern
+  with locations given by \code{X} and marks given by
+  a random permutation of \code{labels} (i.e. a random sample without
+  replacement).
+
+  If \code{permute=FALSE}, then \code{labels} may be a vector of
+  any length. 
+  The result of \code{rlabel} will be a point pattern
+  with locations given by \code{X} and marks given by
+  a random sample from \code{labels} (with replacement).
+}
+\seealso{
+  \code{\link{marks<-}} to assign arbitrary marks.
+}
+\examples{
+   data(amacrine)
+
+   # Randomly permute the marks "on" and "off"
+   # Result always has 142 "off" and 152 "on"
+   Y <- rlabel(amacrine)
+
+   # randomly allocate marks "on" and "off"
+   # with probabilities p(off) = 0.48, p(on) = 0.52
+   Y <- rlabel(amacrine, permute=FALSE)
+
+   # randomly allocate marks "A" and "B" with equal probability
+   data(cells)
+   Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rlinegrid.Rd b/man/rlinegrid.Rd
new file mode 100644
index 0000000..36bd341
--- /dev/null
+++ b/man/rlinegrid.Rd
@@ -0,0 +1,41 @@
+\name{rlinegrid}
+\alias{rlinegrid}
+\title{Generate grid of parallel lines with random displacement}
+\description{
+  Generates a grid of parallel lines, equally spaced, inside the
+  specified window.
+}
+\usage{
+  rlinegrid(angle = 45, spacing = 0.1, win = owin())
+}
+\arguments{
+  \item{angle}{Common orientation of the lines, in degrees anticlockwise
+    from the x axis.
+  }
+  \item{spacing}{Spacing between successive lines.}
+  \item{win}{Window in which to generate the lines. An object of
+    class \code{"owin"} or something acceptable to
+    \code{\link{as.owin}}.
+  }
+}
+\details{
+  The grid is randomly displaced from the origin.
+}
+\value{
+  A line segment pattern (object of class \code{"psp"}).
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{rpoisline}}
+}
+\examples{
+  plot(rlinegrid(30, 0.05))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rlpp.Rd b/man/rlpp.Rd
new file mode 100644
index 0000000..3cc7129
--- /dev/null
+++ b/man/rlpp.Rd
@@ -0,0 +1,77 @@
+\name{rlpp}
+\alias{rlpp}
+\title{
+  Random Points on a Linear Network
+}
+\description{
+  Generates \eqn{n} independent random points
+  on a linear network with a specified probability density.
+}
+\usage{
+  rlpp(n, f, \dots, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of random points to generate.
+    A nonnegative integer giving the number of points,
+    or an integer vector giving the numbers of points of each type.
+  }
+  \item{f}{
+    Probability density (not necessarily normalised).
+    A pixel image on a linear network (object of class \code{"linim"})
+    or a function on a linear network (object of class \code{"linfun"}).
+    Alternatively, \code{f} can be a list of functions or pixel images,
+    giving the densities of points of each type.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{f} if it is a function
+    or a list of functions.
+  }
+  \item{nsim}{Number of simulated realisations to generate.}
+  \item{drop}{
+    Logical value indicating what to do when \code{nsim=1}.
+    If \code{drop=TRUE} (the default), the result is a point pattern.
+    If \code{drop=FALSE}, the result is a list with one entry which is a
+    point pattern.
+  }
+}
+\details{
+  The linear network \code{L}, on which the points will be generated,
+  is determined by the argument \code{f}.
+  
+  If \code{f} is a function, it is converted to a pixel image
+  on the linear network, using any additional function arguments
+  \code{\dots}.
+
+  If \code{n} is a single integer and \code{f} is a function or pixel image,
+  then independent random points are generated on \code{L} with
+  probability density proportional to \code{f}.
+
+  If \code{n} is an integer vector and \code{f} is a list of functions
+  or pixel images, where \code{n} and \code{f} have the same length,
+  then independent random points of several types are generated on
+  \code{L}, with \code{n[i]} points of type \code{i} having probability
+  density proportional to \code{f[[i]]}.
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE},
+  a point pattern on the linear network,
+  i.e.\ an object of class \code{"lpp"}.
+  Otherwise, a list of such point patterns.
+}
+\author{
+  \adrian 
+}
+\seealso{
+ \code{\link{runiflpp}}
+}
+\examples{
+  g <- function(x, y, seg, tp) { exp(x + 3*y) }
+  f <- linfun(g, simplenet)
+
+  rlpp(20, f)
+
+  plot(rlpp(20, f, nsim=3))
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rmh.Rd b/man/rmh.Rd
new file mode 100644
index 0000000..13456d0
--- /dev/null
+++ b/man/rmh.Rd
@@ -0,0 +1,90 @@
+\name{rmh}
+\alias{rmh}
+\title{Simulate point patterns using the Metropolis-Hastings algorithm.}
+\description{
+  Generic function for running the Metropolis-Hastings algorithm
+  to produce simulated realisations of a point process model.
+}
+
+\usage{rmh(model, \dots)}
+
+\arguments{
+  \item{model}{The point process model to be simulated.
+  }
+  \item{\dots}{Further arguments controlling the simulation.
+  }
+}
+
+\details{
+  The Metropolis-Hastings algorithm can be used to
+  generate simulated realisations from a wide range of
+  spatial point processes. For caveats, see below.
+  
+  The function \code{rmh} is generic; it has methods
+  \code{\link{rmh.ppm}} (for objects of class \code{"ppm"})
+  and  \code{\link{rmh.default}} (the default).
+  The actual implementation of the Metropolis-Hastings algorithm is
+  contained in \code{\link{rmh.default}}.
+  For details of its use, see 
+  \code{\link{rmh.ppm}} or \code{\link{rmh.default}}.
+
+  [If the model is a Poisson process, then Metropolis-Hastings
+  is not used; the Poisson model is generated directly
+  using \code{\link{rpoispp}} or \code{\link{rmpoispp}}.]
+
+  In brief, the Metropolis-Hastings algorithm is a Markov Chain,
+  whose states are spatial point patterns, and whose limiting
+  distribution is the desired point process. After
+  running the algorithm for a very large number of iterations,
+  we may regard the state of the algorithm as a realisation
+  from the desired point process.
+
+  However, there are difficulties in deciding whether the
+  algorithm has run for ``long enough''. The convergence of the
+  algorithm may indeed be extremely slow. No guarantees of
+  convergence are given!
+
+  While it is fashionable to decry the Metropolis-Hastings algorithm
+  for its poor convergence and other properties, it has the advantage
+  of being easy to implement for a wide range of models.
+}
+
+\section{Warning}{
+As of version 1.22-1 of \code{spatstat} a subtle change was
+made to \code{rmh.default()}.  We had noticed that the results
+produced were sometimes not ``scalable'' in that two models,
+differing in effect only by the units in which distances are
+measured and starting from the same seed, gave different results.
+This was traced to an idiosyncracy of floating point arithmetic.
+The code of \code{rmh.default()} has been changed so that the
+results produced by \code{rmh} are now scalable.  The downside of
+this is that code which users previously ran may now give results
+which are different from what they formerly were.
+
+In order to recover former behaviour (so that previous results
+can be reproduced) set \code{spatstat.options(scalable=FALSE)}.
+See the last example in the help for \code{\link{rmh.default}}.
+}
+
+\value{
+  A point pattern, in the form of an object of class \code{"ppp"}.
+  See \code{\link{rmh.default}} for details.
+}
+
+\seealso{
+  \code{\link{rmh.default}}
+}
+
+\examples{
+    # See examples in rmh.default and rmh.ppm
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmh.default.Rd b/man/rmh.default.Rd
new file mode 100644
index 0000000..837b396
--- /dev/null
+++ b/man/rmh.default.Rd
@@ -0,0 +1,695 @@
+\name{rmh.default}
+\alias{rmh.default}
+\title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.}
+\description{
+  Generates a random point pattern, simulated from
+  a chosen point process model, using the Metropolis-Hastings
+  algorithm. 
+}
+
+\usage{
+   \method{rmh}{default}(model, start=NULL,
+   control=default.rmhcontrol(model),
+   \dots,
+   nsim=1, drop=TRUE, saveinfo=TRUE,
+   verbose=TRUE, snoop=FALSE)
+}
+
+\arguments{
+  \item{model}{Data specifying the point process model
+    that is to be simulated.
+  }
+  \item{start}{Data determining the initial state of
+    the algorithm.
+  }
+  \item{control}{Data controlling the iterative behaviour
+    and termination of the algorithm.
+  }
+  \item{\dots}{
+    Further arguments passed to \code{\link{rmhcontrol}}
+    or to trend functions in \code{model}.
+  }
+  \item{nsim}{
+    Number of simulated point patterns that should be generated.
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a single point pattern.
+  }
+  \item{saveinfo}{
+    Logical value indicating whether to save auxiliary information.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{snoop}{
+    Logical. If \code{TRUE}, activate the visual debugger. 
+  }
+}
+
+\value{
+  A point pattern (an object of class \code{"ppp"}, see
+  \code{\link{ppp.object}}) or a list of point patterns.
+
+  The returned value has an attribute \code{info} containing
+  modified versions of the arguments
+  \code{model}, \code{start}, and \code{control} which together specify
+  the exact simulation procedure. The \code{info} attribute can be
+  printed (and is printed automatically by \code{\link{summary.ppp}}).
+  For computational efficiency, the \code{info} attribute can be omitted
+  by setting \code{saveinfo=FALSE}.
+
+  The value of \code{\link[base:Random]{.Random.seed}} at the start
+  of the simulations is also saved and returned as an attribute
+  \code{seed}.
+
+  If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}),
+  the transition history of the algorithm
+  is saved, and returned as an attribute \code{history}. The transition
+  history is a data frame containing a factor \code{proposaltype}
+  identifying the proposal type (Birth, Death or Shift) and
+  a logical vector \code{accepted} indicating whether the proposal was
+  accepted.
+  The data frame also has columns \code{numerator}, \code{denominator}
+  which give the numerator and denominator of the Hastings ratio for
+  the proposal.
+
+  If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}),
+  the return value has an attribute \code{saved} which is a list of
+  point patterns, containing the intermediate states of the algorithm.
+}
+
+\details{
+  This function generates simulated realisations from any of a range of
+  spatial point processes, using the Metropolis-Hastings algorithm.
+  It is the default method for the generic function \code{\link{rmh}}.
+
+  This function executes a Metropolis-Hastings algorithm
+  with birth, death and shift proposals as described in
+  Geyer and \ifelse{latex}{\out{M\o ller}}{Moller} (1994).
+
+  The argument \code{model} specifies the point process model to be
+  simulated. It is either a list, or an object of class
+  \code{"rmhmodel"}, with the following components:
+
+  \describe{
+    \item{cif}{A character string specifying the choice of
+      interpoint interaction for the point process.
+    }
+    \item{par}{
+      Parameter values for the conditional
+      intensity function.
+    }
+    \item{w}{
+      (Optional) window in which the pattern is
+      to be generated. An object of class \code{"owin"},
+      or data acceptable to \code{\link{as.owin}}.
+    }
+    \item{trend}{
+      Data specifying the spatial trend in the model, if it has a trend.
+      This may be a function, a pixel image (of class \code{"im"}),
+      (or a list of functions or images if the model
+      is multitype).
+      
+      If the trend is a function or functions,
+      any auxiliary arguments \code{...} to \code{rmh.default}
+      will be passed to these functions, which
+      should be of the form \code{function(x, y, ...)}.
+    }
+    \item{types}{
+      List of possible types, for a multitype point process.
+    }
+  }
+  For full details of these parameters, see \code{\link{rmhmodel.default}}.
+  
+  The argument \code{start} determines the initial state of the
+  Metropolis-Hastings algorithm. It is either \code{NULL},
+  or an object of class \code{"rmhstart"},
+  or a list with the following components:
+
+  \describe{
+    \item{n.start}{
+      Number of points in the initial point pattern.
+      A single integer, or a vector of integers giving the
+      numbers of points of each type in a multitype point pattern.
+      Incompatible with \code{x.start}.
+    }
+    \item{x.start}{
+      Initial point pattern configuration.
+      Incompatible with \code{n.start}.
+
+      \code{x.start} may be a point pattern (an
+      object of class \code{"ppp"}), or data which can be coerced
+      to this class by \code{\link{as.ppp}},  or an object with
+      components \code{x} and \code{y}, or a two-column matrix.
+      In the last two cases, the window for the pattern is determined
+      by \code{model$w}.
+      In the first two cases, if \code{model$w} is also present,
+      then the final simulated pattern will be clipped to
+      the window \code{model$w}.
+    }
+  }
+  For full details of these parameters, see \code{\link{rmhstart}}.
+
+  The third argument \code{control} controls the simulation
+  procedure (including \emph{conditional simulation}),
+  iterative behaviour, and termination of the
+  Metropolis-Hastings algorithm. It is either \code{NULL}, or
+  a list, or an object of class \code{"rmhcontrol"}, with components:
+  \describe{
+    \item{p}{The probability of proposing a ``shift''
+      (as opposed to a birth or death) in the Metropolis-Hastings
+      algorithm.
+    }
+    \item{q}{The conditional probability of proposing a death
+      (rather than a birth)
+      given that birth/death has been chosen over shift.  
+    }
+    \item{nrep}{The number of repetitions or iterations
+      to be made by the Metropolis-Hastings algorithm.  It should
+      be large.
+    }
+    \item{expand}{
+      Either a numerical expansion factor, or
+      a window (object of class \code{"owin"}). Indicates that
+      the process is to be simulated on a larger domain than the
+      original data window \code{w}, then clipped to \code{w}
+      when the algorithm has finished.
+
+      The default is to expand the simulation window
+      if the model is stationary and non-Poisson
+      (i.e. it has no trend and the interaction is not Poisson)
+      and not to expand in all other cases. 
+      
+      If the model has a trend, then in order for expansion to
+      be feasible, the trend must be given either as a function,
+      or an image whose bounding box is large enough to contain
+      the expanded window.
+    }
+    \item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE}
+      we simulate a process on the torus formed by identifying
+      opposite edges of a rectangular window.  
+    }
+    \item{ptypes}{A vector of probabilities (summing to 1) to be used
+      in assigning a random type to a new point.
+    }
+    \item{fixall}{A logical scalar specifying whether to condition on
+      the number of points of each type.
+    }
+    \item{nverb}{An integer specifying how often ``progress reports''
+      (which consist simply of the number of repetitions completed)
+      should be printed out.  If nverb is left at 0, the default,
+      the simulation proceeds silently.
+    }
+    \item{x.cond}{If this argument is present, then
+      \emph{conditional simulation} will be performed, and \code{x.cond}
+      specifies the conditioning points and the type of conditioning.
+    }
+    \item{nsave,nburn}{
+      If these values are specified, then
+      intermediate states of the simulation algorithm will be saved
+      every \code{nsave} iterations, after an initial burn-in period of
+      \code{nburn} iterations.
+    }
+    \item{track}{
+      Logical flag indicating whether to save the transition
+      history of the simulations.
+    }
+  }
+  For full details of these parameters, see \code{\link{rmhcontrol}}.
+  The control parameters can also be given in the \code{\dots} arguments.
+}
+\section{Conditional Simulation}{
+  There are several kinds of conditional simulation.
+  \itemize{
+    \item
+    Simulation \emph{conditional upon the number of points},
+    that is, holding the number of points fixed.
+    To do this, set \code{control$p} (the probability of a shift) equal to 1.
+    The number of points is then determined by the starting state, which
+    may be specified either by setting \code{start$n.start} to be a
+    scalar, or by setting the initial pattern \code{start$x.start}.
+    \item 
+    In the case of multitype processes, it is possible to simulate the
+    model \emph{conditionally upon the number of points of each type},
+    i.e. holding the number of points of each type
+    to be fixed. To do this, set \code{control$p} equal to 1
+    and \code{control$fixall} to be \code{TRUE}.
+    The number of points is then determined by the starting state, which
+    may be specified either by setting \code{start$n.start} to be an
+    integer vector, or by setting the initial pattern \code{start$x.start}.
+    \item
+    Simulation 
+    \emph{conditional on the configuration observed in a sub-window},
+    that is, requiring that, inside a specified sub-window \eqn{V},
+    the simulated pattern should agree with a specified point pattern
+    \eqn{y}.To do this, set \code{control$x.cond} to equal the
+    specified point pattern \eqn{y}, making sure that it is an object of class
+    \code{"ppp"} and that the window \code{Window(control$x.cond)}
+    is the conditioning window \eqn{V}.
+    \item
+    Simulation \emph{conditional on the presence of specified points},
+    that is, requiring that the simulated pattern should include a
+    specified set of points. This is simulation from the Palm
+    distribution of the point process given a pattern \eqn{y}.
+    To do this, set \code{control$x.cond} to be a
+    \code{data.frame} containing the coordinates (and marks,
+    if appropriate) of the specified points.
+  }
+  For further information, see \code{\link{rmhcontrol}}.
+  
+  Note that, when we simulate conditionally on the number of points, or
+  conditionally on the number of points of each type,
+  no expansion of the window is possible.
+}
+\section{Visual Debugger}{
+  If \code{snoop = TRUE}, an interactive debugger is activated.
+  On the current plot device, the debugger displays the current
+  state of the Metropolis-Hastings algorithm together with
+  the proposed transition to the next state.
+  Clicking on this graphical display (using the left mouse button)
+  will re-centre the display at the clicked location.
+  Surrounding this graphical display is an array of boxes representing
+  different actions.
+  Clicking on one of the action boxes (using the left mouse button)
+  will cause the action to be performed.
+  Debugger actions include:
+  \itemize{
+    \item Zooming in or out
+    \item Panning (shifting the field of view) left, right, up or down
+    \item Jumping to the next iteration
+    \item Skipping 10, 100, 1000, 10000 or 100000 iterations
+    \item Jumping to the next Birth proposal (etc)
+    \item Changing the fate of the proposal (i.e. changing whether
+    the proposal is accepted or rejected)
+    \item Dumping the current state and proposal to a file
+    \item Printing detailed information at the terminal
+    \item Exiting the debugger (so that the simulation
+    algorithm continues without further interruption).
+  }
+  Right-clicking the mouse will also cause the debugger to exit.
+}
+
+\references{
+   Baddeley, A. and Turner, R. (2000) Practical maximum
+   pseudolikelihood for spatial point patterns.
+   \emph{Australian and New Zealand Journal of Statistics}
+   \bold{42}, 283 -- 322.
+
+   Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
+   Patterns} (2nd ed.) Arnold, London.
+
+   Diggle, P.J. and Gratton, R.J. (1984)
+   Monte Carlo methods of inference for implicit statistical models.
+   \emph{Journal of the Royal Statistical Society, series B}
+   \bold{46}, 193 -- 212.
+
+   Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+
+   Geyer, C.J. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (1994)
+   Simulation procedures and likelihood inference for spatial
+   point processes.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+}
+
+\section{Warnings}{
+
+There is never a guarantee that the Metropolis-Hastings algorithm
+has converged to its limiting distribution.
+
+If \code{start$x.start} is specified then \code{expand} is set equal to 1
+and simulation takes place in \code{Window(x.start)}.  Any specified
+value for \code{expand} is simply ignored.
+
+The presence of both a component \code{w} of \code{model} and a
+non-null value for \code{Window(x.start)} makes sense ONLY if \code{w}
+is contained in \code{Window(x.start)}.  
+
+For multitype processes make sure that, even if there is to be no
+trend corresponding to a particular type, there is still a component
+(a NULL component) for that type, in the list.
+}
+
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{rmh.ppm}},
+  \code{\link{rStrauss}},
+  \code{\link{ppp}},
+  \code{\link{ppm}},
+  \code{\link{AreaInter}},
+  \code{\link{BadGey}},
+  \code{\link{DiggleGatesStibbard}},
+  \code{\link{DiggleGratton}},
+  \code{\link{Fiksel}},
+  \code{\link{Geyer}},
+  \code{\link{Hardcore}},
+  \code{\link{LennardJones}},
+  \code{\link{MultiHard}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{PairPiece}},
+  \code{\link{Poisson}},
+  \code{\link{Softcore}},
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}},
+  \code{\link{Triplets}}
+}
+\section{Other models}{
+  In theory, any finite point process model can be simulated using
+  the Metropolis-Hastings algorithm, provided the conditional
+  intensity is uniformly bounded.
+
+  In practice, the list of point process models that can be simulated using
+  \code{rmh.default} is limited to those that have been implemented
+  in the package's internal C code. More options will be added in the future.
+
+  Note that the \code{lookup} conditional intensity function
+  permits the simulation (in theory, to any desired degree
+  of approximation) of any pairwise interaction process for
+  which the interaction depends only on the distance between
+  the pair of points.
+}
+\section{Reproducible simulations}{
+  If the user wants the simulation to be exactly reproducible
+  (e.g. for a figure in a journal article, where it is useful to
+  have the figure consistent from draft to draft) then the state of
+  the random number generator should be set before calling
+  \code{rmh.default}. This can be done either by calling
+  \code{\link[base:Random]{set.seed}} or by assigning a value to
+  \code{\link[base:Random]{.Random.seed}}. In the examples below, we use
+  \code{\link[base:Random]{set.seed}}.  
+
+  If a simulation has been performed and the user now wants to 
+  repeat it exactly, the random seed should be extracted from
+  the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")},
+  then assigned to the system random nunber state by
+  \code{.Random.seed <- seed} before calling \code{rmh.default}.
+}
+\examples{
+   if(interactive()) {
+     nr   <- 1e5
+     nv  <- 5000
+     ns <- 200
+   } else {
+     nr  <- 10
+     nv <- 5
+     ns <- 20
+     oldopt <- spatstat.options()
+     spatstat.options(expand=1.1)
+   }
+   set.seed(961018)
+   
+   # Strauss process.
+   mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
+                 w=c(0,10,0,10))
+   X1.strauss <- rmh(model=mod01,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+
+   if(interactive()) plot(X1.strauss)
+   
+   # Strauss process, conditioning on n = 42:
+   X2.strauss <- rmh(model=mod01,start=list(n.start=42),
+                     control=list(p=1,nrep=nr,nverb=nv))
+
+   # Tracking algorithm progress:
+   X <- rmh(model=mod01,start=list(n.start=ns),
+            control=list(nrep=nr, nsave=nr/5, nburn=nr/2, track=TRUE))
+   History <- attr(X, "history")
+   Saved <- attr(X, "saved")
+   head(History)
+   plot(Saved)
+
+   # Hard core process:
+   mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10))
+   X3.hardcore <- rmh(model=mod02,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   
+   if(interactive()) plot(X3.hardcore)
+
+   # Strauss process equal to pure hardcore:
+   mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10))
+   X3.strauss <- rmh(model=mod02s,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   
+   # Strauss process in a polygonal window.
+   x     <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42)
+   y     <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33)
+   mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07),
+                w=owin(poly=list(x=x,y=y)))
+   X4.strauss <- rmh(model=mod03,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X4.strauss)
+   
+   # Strauss process in a polygonal window, conditioning on n = 80.
+   X5.strauss <- rmh(model=mod03,start=list(n.start=ns),
+                     control=list(p=1,nrep=nr,nverb=nv))
+   
+   # Strauss process, starting off from X4.strauss, but with the
+   # polygonal window replace by a rectangular one.  At the end,
+   # the generated pattern is clipped to the original polygonal window.
+   xxx <- X4.strauss
+   Window(xxx) <- as.owin(c(0,1,0,1))
+   X6.strauss <- rmh(model=mod03,start=list(x.start=xxx),
+                     control=list(nrep=nr,nverb=nv))
+   
+   # Strauss with hardcore:
+   mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
+                w=c(0,10,0,10))
+   X1.straush <- rmh(model=mod04,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   
+   # Another Strauss with hardcore (with a perhaps surprising result):
+   mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5),
+                w=c(0,250,0,250))
+   X2.straush <- rmh(model=mod05,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   
+   # Pure hardcore (identical to X3.strauss).
+   mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7),
+                w=c(0,10,0,10))
+   X3.straush <- rmh(model=mod06,start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   
+   # Soft core:
+   w    <- c(0,10,0,10)
+   mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5),
+                w=c(0,10,0,10))
+   X.sftcr <- rmh(model=mod07,start=list(n.start=ns),
+                  control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.sftcr)
+
+   # Area-interaction process:
+   mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7),
+                 w=c(0,10,0,10))
+   X.area <- rmh(model=mod42,start=list(n.start=ns),
+                  control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.area)
+
+   # Triplets process
+   modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7),
+                   w=c(0,10,0,10))
+   X.triplets <- rmh(model=modtrip,
+                     start=list(n.start=ns),
+                     control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.triplets)
+   
+   # Multitype Strauss:
+   beta <- c(0.027,0.008)
+   gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
+   r    <- matrix(c(45,45,45,45),2,2)
+   mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r),
+                w=c(0,250,0,250))
+   X1.straussm <- rmh(model=mod08,start=list(n.start=ns),
+                      control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
+   if(interactive()) plot(X1.straussm)
+   
+   # Multitype Strauss conditioning upon the total number
+   # of points being 80:
+   X2.straussm <- rmh(model=mod08,start=list(n.start=ns),
+                      control=list(p=1,ptypes=c(0.75,0.25),nrep=nr,
+                                   nverb=nv))
+   
+   # Conditioning upon the number of points of type 1 being 60
+   # and the number of points of type 2 being 20:
+   X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)),
+                      control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25),
+                                   nrep=nr,nverb=nv))
+   
+   # Multitype Strauss hardcore:
+   rhc  <- matrix(c(9.1,5.0,5.0,2.5),2,2)
+   mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                iradii=r,hradii=rhc),w=c(0,250,0,250))
+   X.straushm <- rmh(model=mod09,start=list(n.start=ns),
+                     control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
+   
+   # Multitype Strauss hardcore with trends for each type:
+   beta  <- c(0.27,0.08)
+   tr3   <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+   tr4   <- function(x,y){x <- x/250; y <- y/250;
+                         exp(-0.6*x+0.5*y)}
+                        # log linear trend
+   mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=r,hradii=rhc),w=c(0,250,0,250),
+                 trend=list(tr3,tr4))
+   X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns),
+                            control=list(ptypes=c(0.75,0.25),
+                            nrep=nr,nverb=nv))
+   if(interactive()) plot(X1.straushm.trend)
+   
+   # Multitype Strauss hardcore with trends for each type, given as images:
+   bigwin <- square(250)
+   i1 <- as.im(tr3, bigwin)
+   i2 <- as.im(tr4, bigwin)   
+   mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=r,hradii=rhc),w=bigwin,
+                 trend=list(i1,i2))
+   X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns),
+                            control=list(ptypes=c(0.75,0.25),expand=1,
+                            nrep=nr,nverb=nv))
+   
+   # Diggle, Gates, and Stibbard:
+   mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1))
+   X.dgs <- rmh(model=mod12,start=list(n.start=ns),
+                control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.dgs)
+   
+   # Diggle-Gratton:
+   mod13 <- list(cif="diggra",
+                 par=list(beta=1800,kappa=3,delta=0.02,rho=0.04),
+                 w=square(1))
+   X.diggra <- rmh(model=mod13,start=list(n.start=ns),
+                   control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.diggra)
+   
+   # Fiksel:
+   modFik <- list(cif="fiksel",
+                 par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0),
+                 w=square(1))
+   X.fiksel <- rmh(model=modFik,start=list(n.start=ns),
+                   control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X.fiksel)
+   
+   # Geyer:
+   mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5),
+                 w=c(0,10,0,10))
+   X1.geyer <- rmh(model=mod14,start=list(n.start=ns),
+                   control=list(nrep=nr,nverb=nv))
+   if(interactive()) plot(X1.geyer)
+   
+   # Geyer; same as a Strauss process with parameters
+   # (beta=2.25,gamma=0.16,r=0.7):
+   
+   mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000),
+                 w=c(0,10,0,10))
+   X2.geyer <- rmh(model=mod15,start=list(n.start=ns),
+                   control=list(nrep=nr,nverb=nv))
+   
+   mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3))
+   data(redwood)
+   X3.geyer <- rmh(model=mod16,start=list(x.start=redwood),
+                   control=list(periodic=TRUE,nrep=nr,nverb=nv))
+   
+   # Geyer, starting from the redwood data set, simulating
+   # on a torus, and conditioning on n:
+   X4.geyer <- rmh(model=mod16,start=list(x.start=redwood),
+                   control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv))
+
+   # Lookup (interaction function h_2 from page 76, Diggle (2003)):
+      r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
+      h <- 20*(r-0.05)
+      h[r<0.05] <- 0
+      h[r>0.10] <- 1
+      mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
+      X.lookup <- rmh(model=mod17,start=list(n.start=ns),
+                      control=list(nrep=nr,nverb=nv))
+      if(interactive()) plot(X.lookup)
+                   
+   # Strauss with trend
+   tr <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+   beta <- 0.3
+   gmma <- 0.5
+   r    <- 45
+   modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),
+                 w=square(250), trend=tr)
+   X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns),
+                           control=list(nrep=nr,nverb=nv))
+   # Baddeley-Geyer
+   r <- seq(0,0.2,length=8)[-1]
+   gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5)
+   mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5),
+                 w=square(1))
+   X1.badgey <- rmh(model=mod18,start=list(n.start=ns),
+                    control=list(nrep=nr,nverb=nv))
+   mod19 <- list(cif="badgey",
+                 par=list(beta=4000, gamma=gmma,r=r,sat=1e4),
+                 w=square(1))
+   set.seed(1329)
+   X2.badgey <- rmh(model=mod18,start=list(n.start=ns),
+                    control=list(nrep=nr,nverb=nv))
+
+   # Check:
+   h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2
+   hs <- stepfun(r,c(h,1))
+   mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1))
+   set.seed(1329)
+   X.check <- rmh(model=mod20,start=list(n.start=ns),
+                      control=list(nrep=nr,nverb=nv))
+   # X2.badgey and X.check will be identical.
+
+   mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1),
+                 r=c(0.035,0.07,0.14),sat=5), w=square(1))
+   X3.badgey <- rmh(model=mod21,start=list(n.start=ns),
+                    control=list(nrep=nr,nverb=nv))
+   # Same result as Geyer model with beta=300, gamma=0.4, r=0.07,
+   # sat = 5 (if seeds and control parameters are the same)
+
+   # Or more simply:
+   mod22 <- list(cif="badgey",
+                 par=list(beta=300,gamma=0.4,r=0.07, sat=5),
+                 w=square(1))
+   X4.badgey <- rmh(model=mod22,start=list(n.start=ns),
+                    control=list(nrep=nr,nverb=nv))
+   # Same again --- i.e. the BadGey model includes the Geyer model.
+
+
+   # Illustrating scalability.
+   \dontrun{
+    M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin())
+    set.seed(496)
+    X1 <- rmh(model=M1,start=list(n.start=300))
+    M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4),
+              w=owin(c(0,10),c(0,10)))
+    set.seed(496)
+    X2  <- rmh(model=M2,start=list(n.start=300))
+    chk <- affine(X1,mat=diag(c(10,10)))
+    all.equal(chk,X2,check.attributes=FALSE)
+    # Under the default spatstat options the foregoing all.equal()
+    # will yield TRUE.  Setting spatstat.options(scalable=FALSE) and
+    # re-running the code will reveal differences between X1 and X2.
+   }
+
+   if(!interactive()) spatstat.options(oldopt)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rmh.ppm.Rd b/man/rmh.ppm.Rd
new file mode 100644
index 0000000..21894fe
--- /dev/null
+++ b/man/rmh.ppm.Rd
@@ -0,0 +1,263 @@
+\name{rmh.ppm}
+\alias{rmh.ppm}
+\title{Simulate from a Fitted Point Process Model}
+\description{
+  Given a point process model fitted to data, 
+  generate a random simulation of the model, 
+  using the Metropolis-Hastings algorithm.
+}
+
+\usage{
+  \method{rmh}{ppm}(model, start=NULL,
+                    control=default.rmhcontrol(model, w=w),
+                    \dots,
+                    w = NULL, 
+                    project=TRUE,
+                    nsim=1, drop=TRUE, saveinfo=TRUE,
+                    verbose=TRUE, new.coef=NULL)
+}
+
+\arguments{
+  \item{model}{A fitted point process model (object of class
+    \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired
+    to simulate.  This fitted model is usually the result of a call
+    to \code{\link{ppm}}.  See \bold{Details} below.
+  }
+  \item{start}{Data determining the initial state
+    of the Metropolis-Hastings algorithm.  See
+    \code{\link{rmhstart}} for description of these arguments.
+    Defaults to \code{list(x.start=data.ppm(model))}
+  }
+  \item{control}{Data controlling the iterative behaviour of
+    the Metropolis-Hastings algorithm.  See \code{\link{rmhcontrol}}
+    for description of these arguments.
+  }
+  \item{\dots}{
+    Further arguments passed to \code{\link{rmhcontrol}},
+    or to \code{\link{rmh.default}}, or to covariate functions in the model.
+  }
+  \item{w}{
+    Optional. Window in which the simulations should be generated.
+    Default is the window of the original data.
+  }
+  \item{project}{
+    Logical flag indicating what to do if the fitted model is
+    invalid (in the sense that the values of the fitted coefficients do not
+    specify a valid point process).
+    If \code{project=TRUE} the closest valid model will be simulated;
+    if \code{project=FALSE} an error will occur.
+  }
+  \item{nsim}{
+    Number of simulated point patterns that should be generated.
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a single point pattern.
+  }
+  \item{saveinfo}{
+    Logical value indicating whether to save auxiliary information.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports.
+  }
+  \item{new.coef}{
+    New values for the canonical parameters of the model.
+    A numeric vector of the same length as \code{coef(model)}.
+  }
+}
+
+\value{
+  A point pattern (an object of class \code{"ppp"}; see
+  \code{\link{ppp.object}}) or a list of point patterns.
+}
+
+\details{
+  This function generates simulated realisations from a point
+  process model that has been fitted to point pattern data.  It is
+  a method for the generic function \code{\link{rmh}} for the
+  class \code{"ppm"} of fitted point process models.  To simulate
+  other kinds of point process models, see \code{\link{rmh}}
+  or \code{\link{rmh.default}}.
+
+  The argument \code{model} describes the fitted model.  It must be
+  an object of class \code{"ppm"} (see \code{\link{ppm.object}}),
+  and will typically be the result of a call to the point process
+  model fitting function \code{\link{ppm}}.
+
+  The current implementation enables simulation from any fitted model
+  involving the interactions
+  \code{\link{AreaInter}},
+  \code{\link{DiggleGratton}},
+  \code{\link{DiggleGatesStibbard}},
+  \code{\link{Geyer}},
+  \code{\link{Hardcore}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{PairPiece}},
+  \code{\link{Poisson}},
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}}
+  and \code{\link{Softcore}},
+  including nonstationary models. See the examples.
+  
+  It is also possible to simulate \emph{hybrids} of several such models.
+  See \code{\link{Hybrid}} and the examples.
+  
+  It is possible that the fitted coefficients of a point process model
+  may be ``illegal'', i.e. that there may not exist a
+  mathematically well-defined point process with the given parameter
+  values. For example, a Strauss process with interaction
+  parameter \eqn{\gamma > 1}{gamma > 1} does not exist,
+  but the model-fitting procedure used in \code{\link{ppm}} will sometimes
+  produce values of \eqn{\gamma}{gamma} greater than 1.
+  In such cases, if \code{project=FALSE} then an error will occur,
+  while if \code{project=TRUE} then \code{rmh.ppm} will find
+  the nearest legal model and simulate
+  this model instead. (The nearest legal model is obtained by
+  projecting the vector of coefficients onto the set of
+  valid coefficient vectors. The result is usually the Poisson process
+  with the same fitted intensity.)
+  
+  The arguments \code{start} and \code{control} are lists of
+  parameters determining the initial state and the iterative
+  behaviour, respectively, of the Metropolis-Hastings algorithm.
+
+  The argument \code{start} is passed directly to \code{\link{rmhstart}}.
+  See \code{\link{rmhstart}} for details of the parameters of the
+  initial state, and their default values.
+
+  The argument \code{control} is first passed to
+  \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots}
+  are given, \code{\link{update.rmhcontrol}} is called to update the
+  parameter values. See \code{\link{rmhcontrol}} for details of
+  the iterative behaviour parameters, and \code{\link{default.rmhcontrol}}
+  for their default values.
+
+  Note that if you specify expansion of the simulation window
+  using the parameter \code{expand} (so that the
+  model will be simulated on a window larger than the original data
+  window) then the model must be capable of extrapolation to this
+  larger window. This is usually not possible for models which
+  depend on external covariates, because the domain of a covariate image
+  is usually the same as the domain of the fitted model.
+  
+  After extracting the relevant information from the fitted model
+  object \code{model}, \code{rmh.ppm} invokes the default
+  \code{rmh} algorithm \code{\link{rmh.default}}, unless the model
+  is Poisson. If the model is Poisson then the Metropolis-Hastings
+  algorithm is not needed, and the model is simulated directly, using
+  one of \code{\link{rpoispp}}, \code{\link{rmpoispp}},
+  \code{\link{rpoint}} or \code{\link{rmpoint}}.
+
+  See \code{\link{rmh.default}} for further information about the
+  implementation, or about the Metropolis-Hastings algorithm.
+}
+
+\section{Warnings}{
+  See Warnings in \code{\link{rmh.default}}.
+}
+
+\seealso{
+  \code{\link{simulate.ppm}},
+  \code{\link{rmh}},
+  \code{\link{rmhmodel}},
+  \code{\link{rmhcontrol}},
+  \code{\link{default.rmhcontrol}},
+  \code{\link{update.rmhcontrol}},
+  \code{\link{rmhstart}},
+  \code{\link{rmh.default}},
+  \code{\link{ppp.object}},
+  \code{\link{ppm}},
+
+  Interactions:
+  \code{\link{AreaInter}},
+  \code{\link{DiggleGratton}},
+  \code{\link{DiggleGatesStibbard}},
+  \code{\link{Geyer}},
+  \code{\link{Hardcore}},
+  \code{\link{Hybrid}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{PairPiece}},
+  \code{\link{Poisson}},
+  \code{\link{Strauss}},
+  \code{\link{StraussHard}},
+  \code{\link{Softcore}}
+}
+
+\examples{
+   live <- interactive()
+   op <- spatstat.options()
+   spatstat.options(rmh.nrep=1e5)
+   Nrep <- 1e5
+
+   X <- swedishpines
+   if(live) plot(X, main="Swedish Pines data")
+
+   # Poisson process
+   fit <- ppm(X, ~1, Poisson())
+   Xsim <- rmh(fit)
+   if(live) plot(Xsim, main="simulation from fitted Poisson model")
+
+   # Strauss process   
+   fit <- ppm(X, ~1, Strauss(r=7))
+   Xsim <- rmh(fit)
+   if(live) plot(Xsim, main="simulation from fitted Strauss model")
+
+   \dontrun{
+     # Strauss process simulated on a larger window
+     # then clipped to original window
+     Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE))
+     Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE)
+   }
+
+   \dontrun{
+     X <- rSSI(0.05, 100)
+     # piecewise-constant pairwise interaction function
+     fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01)))
+     Xsim <- rmh(fit)
+   }
+
+    # marked point pattern
+    Y <- amacrine
+
+   \dontrun{
+     # marked Poisson models
+     fit <- ppm(Y)
+     fit <- ppm(Y,~marks)
+     fit <- ppm(Y,~polynom(x,2))
+     fit <- ppm(Y,~marks+polynom(x,2))
+     fit <- ppm(Y,~marks*polynom(x,y,2))
+     Ysim <- rmh(fit)
+   }
+
+   # multitype Strauss models
+   MS <- MultiStrauss(radii=matrix(0.07, ncol=2, nrow=2),
+                      types = levels(Y$marks))
+   \dontrun{
+    fit <- ppm(Y ~marks, MS)
+    Ysim <- rmh(fit)
+   }
+
+   fit <- ppm(Y ~ marks*polynom(x,y,2), MS)
+   Ysim <- rmh(fit)
+   if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss")
+
+   spatstat.options(op)
+
+  \dontrun{
+    # Hybrid model
+    fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2)))
+    Y <- rmh(fit)
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
+\keyword{datagen}
diff --git a/man/rmhcontrol.Rd b/man/rmhcontrol.Rd
new file mode 100644
index 0000000..31e801a
--- /dev/null
+++ b/man/rmhcontrol.Rd
@@ -0,0 +1,333 @@
+\name{rmhcontrol}
+\alias{rmhcontrol}
+\alias{rmhcontrol.default}
+\title{Set Control Parameters for Metropolis-Hastings Algorithm.}
+\description{
+  Sets up a list of parameters controlling the iterative behaviour
+  of the Metropolis-Hastings algorithm. 
+}
+\usage{
+   rmhcontrol(\dots)
+
+   \method{rmhcontrol}{default}(\dots, p=0.9, q=0.5, nrep=5e5,
+                      expand=NULL, periodic=NULL, ptypes=NULL,
+                      x.cond=NULL, fixall=FALSE, nverb=0,
+                      nsave=NULL, nburn=nsave, track=FALSE,
+                      pstage=c("block", "start"))
+}
+\arguments{
+  \item{\dots}{Arguments passed to methods.}
+  \item{p}{Probability of proposing a shift (as against a birth/death).}
+  \item{q}{Conditional probability of proposing a death given that a
+    birth or death will be proposed.}
+  \item{nrep}{Total number of steps (proposals) of Metropolis-Hastings
+    algorithm that should be run.}
+  \item{expand}{
+    Simulation window or expansion rule.
+    Either a window (object of class \code{"owin"})
+    or a numerical expansion factor, specifying that
+    simulations are to be performed in a domain other than the
+    original data window, then clipped to the original data window.
+    This argument is passed to \code{\link{rmhexpand}}.
+    A numerical expansion factor can be in several formats:
+    see \code{\link{rmhexpand}}.
+  }
+  \item{periodic}{
+    Logical value (or \code{NULL}) indicating whether to simulate
+    ``periodically'', i.e. identifying opposite edges of the rectangular
+    simulation window. A \code{NULL} value means ``undecided.''
+  }
+  \item{ptypes}{For multitype point processes, the distribution of
+    the mark attached to a new random point (when a birth is
+    proposed)}
+  \item{x.cond}{Conditioning points for conditional simulation.}
+  \item{fixall}{(Logical) for multitype point processes,
+    whether to fix the number of points of each type.}
+  \item{nverb}{Progress reports will be printed every \code{nverb}
+    iterations}
+  \item{nsave,nburn}{
+    If these values are specified, then
+    intermediate states of the simulation algorithm will be saved
+    every \code{nsave} iterations, after an initial burn-in period of
+    \code{nburn} iterations.
+  }
+  \item{track}{
+    Logical flag indicating whether to save the transition
+    history of the simulations.
+  }
+  \item{pstage}{
+    Character string specifying when to generate
+    proposal points. Either \code{"start"} or \code{"block"}.
+  }
+}
+
+\value{
+  An object of class \code{"rmhcontrol"}, which is essentially
+  a list of parameter values for the algorithm.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the parameters chosen.
+}
+\details{
+  The Metropolis-Hastings algorithm, implemented as \code{\link{rmh}},
+  generates simulated realisations of point process models.
+  The function \code{rmhcontrol}
+  sets up a list of parameters which control the 
+  iterative behaviour
+  and termination of the Metropolis-Hastings algorithm, for use in a
+  subsequent call to \code{\link{rmh}}. It also checks that the
+  parameters are valid.
+
+  (A separate function \code{\link{rmhstart}}
+  determines the initial state of the algorithm,
+  and \code{\link{rmhmodel}} determines the model to be simulated.)
+
+  The parameters are as follows:
+  
+  \describe{
+    \item{p}{The probability of proposing a ``shift''
+      (as opposed to a birth or death) in the Metropolis-Hastings
+      algorithm.
+
+      If \eqn{p = 1} then the algorithm only alters existing points,
+      so the number of points never changes, i.e. we are
+      simulating conditionally upon the number of points.
+      The number of points is determined by the initial state
+      (specified by \code{\link{rmhstart}}).
+
+      If \eqn{p=1} and \code{fixall=TRUE} and the model
+      is a multitype point process model, then the algorithm
+      only shifts the locations of existing points and does not
+      alter their marks (types). 
+      This is equivalent to simulating conditionally
+      upon the number of points of each type.
+      These numbers are again specified by the initial state.
+
+      If \eqn{p = 1} then no expansion of the simulation window
+      is allowed (see \code{expand} below).
+
+      The default value of \code{p} can be changed by setting
+      the parameter \code{rmh.p} in \code{\link{spatstat.options}}.
+    }
+    \item{q}{The conditional probability of proposing a death
+      (rather than a birth) given that a shift is not proposed.
+      This is of course ignored if \code{p} is equal to 1.
+
+      The default value of \code{q} can be changed by setting
+      the parameter \code{rmh.q} in \code{\link{spatstat.options}}.
+    }
+    \item{nrep}{The number of repetitions or iterations 
+      to be made by the Metropolis-Hastings algorithm.  It should
+      be large.
+
+      The default value of \code{nrep} can be changed by setting
+      the parameter \code{rmh.nrep} in \code{\link{spatstat.options}}.
+    }
+    \item{expand}{
+      Either a number or a window (object of class \code{"owin"}).
+      Indicates that the process is to be simulated on a 
+      domain other than the original data window \code{w},
+      then clipped to \code{w} when the algorithm has finished.
+      This would often be done in order to approximate the
+      simulation of a stationary process (Geyer, 1999)
+      or more generally a process existing in the
+      whole plane, rather than just in the window \code{w}.
+
+      If \code{expand} is a window object, it is taken as the
+      larger domain in which simulation is performed.
+
+      If \code{expand} is numeric, it is interpreted
+      as an expansion factor or expansion distance
+      for determining the simulation domain from the data window.
+      It should be a \emph{named} scalar, such as
+      \code{expand=c(area=2)}, \code{expand=c(distance=0.1)},
+      \code{expand=c(length=1.2)}.  See \code{\link{rmhexpand}()} for
+      more details. If the name is omitted, it defaults to \code{area}.
+     
+      Expansion is not permitted if the number of points has been
+      fixed by setting \code{p = 1} or if the
+      starting configuration has been specified via the
+      argument \code{x.start} in \code{\link{rmhstart}}.
+
+      If \code{expand} is \code{NULL}, this is interpreted to mean
+      \dQuote{not yet decided}. An expansion rule will be determined
+      at a later stage, using appropriate defaults.
+      See \code{\link{rmhexpand}}.
+    }
+    \item{periodic}{A logical value (or \code{NULL})
+      determining whether to simulate \dQuote{periodically}.
+      If \code{periodic} is \code{TRUE}, and if the simulation window
+      is a rectangle, then the simulation algorithm effectively
+      identifies opposite edges of the rectangle. Points
+      near the right-hand edge of the rectangle are deemed to be close
+      to points near the left-hand edge. Periodic simulation usually
+      gives a better approximation to a stationary point process.
+      For periodic simulation, the simulation window must be a rectangle.
+      (The simulation window is determined by \code{expand} as described
+      above.)
+
+      The value \code{NULL} means \sQuote{undecided}.
+      The decision is postponed until \code{\link{rmh}} is called.
+      Depending on the point process model to be simulated,
+      \code{rmh} will then set \code{periodic=TRUE} if the simulation window
+      is expanded \emph{and} the expanded simulation window is rectangular;
+      otherwise \code{periodic=FALSE}.
+
+      Note that \code{periodic=TRUE} is only permitted when the
+      simulation window (i.e. the expanded window) is rectangular.       
+    }
+    \item{ptypes}{A vector of probabilities (summing to 1) to be used
+      in assigning a random type to a new point.  Defaults to a vector
+      each of whose entries is \eqn{1/nt} where \eqn{nt} is the number
+      of types for the process.  Convergence of the simulation
+      algorithm should be improved if \code{ptypes} is close to the
+      relative frequencies of the types which will result from the
+      simulation.
+    }
+    \item{x.cond}{
+      If this argument is given,
+      then \emph{conditional simulation} will be performed,
+      and \code{x.cond} specifies the location of the
+      fixed points as well as the type of conditioning.
+      It should be either a point pattern
+      (object of class \code{"ppp"}) or a \code{list(x,y)}
+      or a \code{data.frame}.
+      See the section on Conditional Simulation.
+    }
+    \item{fixall}{A logical scalar specifying whether to condition on
+      the number of points of each type.  Meaningful only if a marked
+      process is being simulated, and if \eqn{p = 1}.  A warning message
+      is given if \code{fixall} is set equal to \code{TRUE} when it is
+      not meaningful.
+    }
+    \item{nverb}{An integer specifying how often ``progress reports''
+      (which consist simply of the number of repetitions completed)
+      should be printed out.  If nverb is left at 0, the default,
+      the simulation proceeds silently.
+    }
+    \item{nsave,nburn}{If these integers are given, then the
+      current state of the simulation algorithm (i.e. the current
+      random point pattern) will be saved every \code{nsave} iterations,
+      starting from iteration \code{nburn}. 
+    }
+    \item{track}{
+      Logical flag indicating whether to save the transition
+      history of the simulations (i.e. information specifying
+      what type of proposal was made, and whether it was accepted
+      or rejected, for each iteration).
+    }
+    \item{pstage}{
+      Character string specifying the stage of the algorithm
+      at which the randomised proposal points should be generated.
+      If \code{pstage="start"} or if \code{nsave=0},
+      the entire sequence of \code{nrep}
+      random proposal points is generated at the start of the
+      algorithm. This is the original
+      behaviour of the code, and should be used in order to maintain
+      consistency with older versions of \pkg{spatstat}.
+      If \code{pstage="block"} and \code{nsave > 0}, then
+      a set of \code{nsave} random proposal points will be generated
+      before each block of \code{nsave} iterations. This is much more
+      efficient.
+      The default is \code{pstage="block"}.
+    }
+  }
+}
+\section{Conditional Simulation}{
+  For a Gibbs point process \eqn{X},
+  the Metropolis-Hastings algorithm easily accommodates several
+  kinds of conditional simulation:
+  \describe{
+    \item{conditioning on the total number of points:}{
+      We fix the total number of points \eqn{N(X)} to be equal to
+      \eqn{n}. We simulate from the conditional distribution of
+      \eqn{X} given \eqn{N(X) = n}. 
+    }
+    \item{conditioning on the number of points of each type:}{
+      In a multitype point process, where \eqn{Y_j}{Y[[j]]} denotes the
+      process of points of type \eqn{j}, we fix the number
+      \eqn{N(Y_j)}{N(Y[[j]])} of points of type \eqn{j} to be equal to
+      \eqn{n_j}{n[j]}, for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}.
+      We simulate from the conditional distribution of \eqn{X}
+      given \eqn{N(Y_j)=n_j}{N(Y[[j]]) = n[j]} for
+      \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. 
+    }
+    \item{conditioning on the realisation in a subwindow:}{
+      We require that the point process \eqn{X} should,
+      within a specified sub-window \eqn{V},
+      coincide with a specified point pattern \eqn{y}.
+      We simulate from the conditional distribution of \eqn{X}
+      given \eqn{X \cap V = y}{(X intersect V) = y}.
+    }
+    \item{Palm conditioning:}{
+      We require that the point process \eqn{X} include
+      a specified list of points \eqn{y}. We simulate from
+      the point process with probability density
+      \eqn{g(x) = c f(x \cup y)}{g(x) = c * f(x union y)}
+      where \eqn{f} is the probability density of the original
+      process \eqn{X}, and \eqn{c} is a normalising constant.
+    }
+  }
+  To achieve each of these types of conditioning we
+  do as follows:
+  \describe{
+    \item{conditioning on the total number of points:}{
+      Set \code{p=1}.
+      The number of points is determined by the initial state
+      of the simulation: see \code{\link{rmhstart}}.
+    }
+    \item{conditioning on the number of points of each type:}{
+      Set \code{p=1} and \code{fixall=TRUE}.
+      The number of points of each type is determined by the initial state
+      of the simulation: see \code{\link{rmhstart}}.
+    }
+    \item{conditioning on the realisation in a subwindow:}{
+      Set \code{x.cond} to be a point pattern (object of
+      class \code{"ppp"}). Its window \code{V=Window(x.cond)} becomes the
+      conditioning subwindow \eqn{V}.
+    }
+    \item{Palm conditioning:}{
+      Set \code{x.cond} to be a \code{list(x,y)} or \code{data.frame}
+      with two columns containing the coordinates of the points, or a 
+      \code{list(x,y,marks)} or \code{data.frame} with three columns
+      containing the coordinates and marks of the points.
+    }
+  }
+  The arguments \code{x.cond}, \code{p} and \code{fixall} can be
+  combined.   
+}
+\references{
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+}
+
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{rmhmodel}},
+  \code{\link{rmhstart}},
+  \code{\link{rmhexpand}},
+  \code{\link{spatstat.options}}
+}
+\examples{
+   # parameters given as named arguments
+   c1 <- rmhcontrol(p=0.3,periodic=TRUE,nrep=1e6,nverb=1e5)
+
+   # parameters given as a list
+   liz <- list(p=0.9, nrep=1e4)
+   c2 <- rmhcontrol(liz)
+
+   # parameters given in rmhcontrol object
+   c3 <- rmhcontrol(c1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rmhexpand.Rd b/man/rmhexpand.Rd
new file mode 100644
index 0000000..8035486
--- /dev/null
+++ b/man/rmhexpand.Rd
@@ -0,0 +1,157 @@
+\name{rmhexpand}
+\alias{rmhexpand}
+\title{
+  Specify Simulation Window or Expansion Rule
+}
+\description{
+  Specify a spatial domain in which point process simulations
+  will be performed. Alternatively, specify a rule which will be
+  used to determine the simulation window.
+}
+\usage{
+rmhexpand(x = NULL, ..., area = NULL, length = NULL, distance = NULL)
+}
+\arguments{
+  \item{x}{
+    Any kind of data determining the simulation window or the
+    expansion rule.
+    A window (object of class \code{"owin"}) specifying the
+    simulation window, a numerical value specifying an expansion
+    factor or expansion distance, a list containing
+    one numerical value, an object of class \code{"rmhexpand"},
+    or \code{NULL}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{area}{
+    Area expansion factor. 
+    Incompatible with other arguments.
+  }
+  \item{length}{
+    Length expansion factor.
+    Incompatible with other arguments.
+    }
+  \item{distance}{
+    Expansion distance (buffer width).
+    Incompatible with other arguments.
+  }
+}
+\details{
+  In the Metropolis-Hastings algorithm \code{\link{rmh}}
+  for simulating spatial point processes, 
+  simulations are usually carried out on a spatial domain that is
+  larger than the original window of the point process model,
+  then subsequently clipped to the original window.
+
+  The command \code{rmhexpand} can be used to specify the simulation window,
+  or to specify a rule which will later be used to determine the
+  simulation window from data.
+
+  The arguments are all incompatible: at most one of them should be
+  given.
+
+  If the first argument \code{x} is given, it may be any of the
+  following:
+  \itemize{
+    \item
+    a window (object of class \code{"owin"}) specifying the
+    simulation window.
+    \item
+    an object of class \code{"rmhexpand"} specifying the
+    expansion rule.
+    \item
+    a single numerical value, without attributes.
+    This will be interpreted as the value of the argument \code{area}.
+    \item
+    either \code{c(area=v)} or \code{list(area=v)},
+    where \code{v} is a single numeric value.
+    This will be interpreted as the value of the argument \code{area}.
+    \item
+    either \code{c(length=v)} or \code{list(length=v)},
+    where \code{v} is a single numeric value.
+    This will be interpreted as the value of the argument \code{length}.
+    \item
+    either \code{c(distance=v)} or \code{list(distance=v)},
+    where \code{v} is a single numeric value.
+    This will be interpreted as the value of the argument \code{distance}.
+    \item 
+    \code{NULL}, meaning that the expansion rule is not yet
+    determined. 
+  }
+
+  If one of the arguments \code{area}, \code{length} or \code{distance}
+  is given, then the simulation window is determined from the original
+  data window as follows.
+  \describe{
+    \item{area}{
+      The bounding box of the original data window will be extracted,
+      and the simulation window will be a scalar dilation of this rectangle.
+      The argument \code{area} should be a numerical value, greater than
+      or equal to 1. It specifies the area expansion factor, i.e. the ratio
+      of the area of the simulation window to the area of the
+      original point process window's bounding box.
+    }
+    \item{length}{
+      The bounding box of the original data window will be extracted,
+      and the simulation window will be a scalar dilation of this rectangle.
+      The argument \code{length} should be a numerical value, greater than
+      or equal to 1. It specifies the length expansion factor, i.e. the ratio
+      of the width (height) of the simulation window to the width
+      (height) of the original point process window's bounding box.
+    }
+    \item{distance}{
+      The argument \code{distance} should be a numerical value, greater than
+      or equal to 0. It specifies the width of a buffer region around the
+      original data window. 
+      If the original data window is a rectangle, then this window
+      is extended by a margin of width equal to \code{distance}
+      around all sides of the original rectangle. The result is a
+      rectangle.
+      If the original data window is not a rectangle, then
+      morphological dilation is applied using
+      \code{\link{dilation.owin}}
+      so that a margin or buffer of width equal to \code{distance}
+      is created around all sides of the original window. The result
+      is a non-rectangular window, typically of a different shape.
+    }
+  }
+}
+\section{Undetermined expansion}{
+  If \code{expand=NULL}, this is interpreted to mean that the
+  expansion rule is \dQuote{not yet decided}. Expansion will be decided
+  later, by the simulation algorithm \code{\link{rmh}}.
+  If the model cannot be expanded (for example if the covariate data
+  in the model are not available on a larger domain) then expansion
+  will not occur. If the model can be expanded, then
+  if the point process model has a finite interaction range \code{r},
+  the default is \code{rmhexpand(distance=2*r)}, and
+  otherwise \code{rmhexpand(area=2)}.
+}
+\value{
+  An object of class \code{"rmhexpand"} specifying the
+  expansion rule. There is a \code{print} method for this class.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{expand.owin}} to apply the rule to a window.
+
+  \code{\link{will.expand}} to test whether expansion will occur.
+
+  \code{\link{rmh}}, \code{\link{rmhcontrol}} for background details.
+}
+\examples{
+  rmhexpand()
+  rmhexpand(2)
+  rmhexpand(1)
+  rmhexpand(length=1.5)
+  rmhexpand(distance=0.1)
+  rmhexpand(letterR)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rmhmodel.Rd b/man/rmhmodel.Rd
new file mode 100644
index 0000000..6689789
--- /dev/null
+++ b/man/rmhmodel.Rd
@@ -0,0 +1,100 @@
+\name{rmhmodel}
+\alias{rmhmodel}
+\title{Define Point Process Model for Metropolis-Hastings Simulation.}
+\description{
+  Builds a description of a point process model
+  for use in simulating the model by the Metropolis-Hastings
+  algorithm. 
+}
+\usage{
+  rmhmodel(...)
+}
+\arguments{
+  \item{\dots}{Arguments specifying the point process model
+    in some format.
+  }
+}
+\value{
+  An object of class \code{"rmhmodel"}, which is essentially
+  a list of parameter values for the model.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the model chosen.
+}
+\details{
+  Simulated realisations of many point process models
+  can be generated using the Metropolis-Hastings algorithm
+  \code{\link{rmh}}. The algorithm requires the model to be specified
+  in a particular format: an object of class \code{"rmhmodel"}.
+
+  The function \code{\link{rmhmodel}} takes a
+  description of a point process model in some other format, and
+  converts it into an object of class \code{"rmhmodel"}.
+  It also checks that the parameters of the model are valid.
+
+  The function \code{\link{rmhmodel}} is generic, with methods
+  for
+  \describe{
+    \item{fitted point process models:}{
+      an object of class \code{"ppm"}, obtained by a call to the
+      model-fitting function \code{\link{ppm}}.
+      See \code{\link{rmhmodel.ppm}}.
+    }
+    \item{lists:}{
+      a list of parameter values in a certain format.
+      See \code{\link{rmhmodel.list}}.
+    }
+    \item{default:}{
+      parameter values specified as separate arguments to \code{\dots}.
+      See \code{\link{rmhmodel.default}}.
+    }
+  }
+}
+\references{
+   Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
+   Patterns} (2nd ed.) Arnold, London.
+
+   Diggle, P.J. and Gratton, R.J. (1984)
+   Monte Carlo methods of inference for implicit statistical models.
+   \emph{Journal of the Royal Statistical Society, series B}
+   \bold{46}, 193 -- 212.
+
+   Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+}
+
+\seealso{
+  \code{\link{rmhmodel.ppm}},
+  \code{\link{rmhmodel.default}},
+  \code{\link{rmhmodel.list}},
+  \code{\link{rmh}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmhstart}},
+  \code{\link{ppm}},
+  \code{\link{Strauss}},
+  \code{\link{Softcore}},
+  \code{\link{StraussHard}},
+  \code{\link{Triplets}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{DiggleGratton}},
+  \code{\link{PairPiece}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmhmodel.default.Rd b/man/rmhmodel.default.Rd
new file mode 100644
index 0000000..e9351c6
--- /dev/null
+++ b/man/rmhmodel.default.Rd
@@ -0,0 +1,533 @@
+\name{rmhmodel.default}
+\alias{rmhmodel.default}
+\title{Build Point Process Model for Metropolis-Hastings Simulation.}
+\description{
+  Builds a description of a point process model
+  for use in simulating the model by the Metropolis-Hastings
+  algorithm. 
+}
+\usage{
+  \method{rmhmodel}{default}(..., 
+         cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL)
+}
+\arguments{
+  \item{\dots}{Ignored.}
+  \item{cif}{Character string specifying the choice of model}
+  \item{par}{Parameters of the model}
+  \item{w}{Spatial window in which to simulate}
+  \item{trend}{Specification of the trend in the model}
+  \item{types}{A vector of factor levels defining the possible
+    marks, for a multitype process.
+  }
+}
+\value{
+  An object of class \code{"rmhmodel"}, which is essentially
+  a list of parameter values for the model.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the model chosen.
+}
+\details{
+  The generic function \code{\link{rmhmodel}} takes a
+  description of a point process model in some format, and
+  converts it into an object of class \code{"rmhmodel"}
+  so that simulations of the model can be generated using
+  the Metropolis-Hastings algorithm \code{\link{rmh}}. 
+  
+  This function \code{rmhmodel.default} is the default method.
+  It builds a description of the point process model
+  from the simple arguments listed.
+
+  The argument \code{cif} is a character string specifying the choice of
+  interpoint interaction for the point process. The current options are
+  \describe{
+    \item{\code{'areaint'}}{Area-interaction process.}
+    \item{\code{'badgey'}}{Baddeley-Geyer (hybrid Geyer) process.}
+    \item{\code{'dgs'}}{Diggle, Gates and Stibbard (1987) process}
+    \item{\code{'diggra'}}{Diggle and Gratton (1984) process}
+    \item{\code{'fiksel'}}{Fiksel double exponential process (Fiksel, 1984).}
+    \item{\code{'geyer'}}{Saturation process (Geyer, 1999).}
+    \item{\code{'hardcore'}}{Hard core process}
+    \item{\code{'lennard'}}{Lennard-Jones process}
+    \item{\code{'lookup'}}{General isotropic pairwise interaction process,
+      with the interaction function specified via a ``lookup table''.}
+    \item{\code{'multihard'}}{Multitype hardcore process}
+    \item{\code{'penttinen'}}{The Penttinen process}
+    \item{\code{'strauss'}}{The Strauss process}
+    \item{\code{'straush'}}{The Strauss process with hard core}
+    \item{\code{'sftcr'}}{The Softcore process}
+    \item{\code{'straussm'}}{ The multitype Strauss process}
+    \item{\code{'straushm'}}{Multitype Strauss process with hard core}
+    \item{\code{'triplets'}}{Triplets process (Geyer, 1999).}
+  }
+  It is also possible to specify a \emph{hybrid} of these interactions
+  in the sense of Baddeley et al (2013).
+  In this case, \code{cif} is a character vector containing names from
+  the list above. For example, \code{cif=c('strauss', 'geyer')} would
+  specify a hybrid of the Strauss and Geyer models.
+  
+  The argument \code{par} supplies parameter values appropriate to
+  the conditional intensity function being invoked.
+  For the interactions listed above, these parameters are:
+  \describe{
+    \item{areaint:}{
+      (Area-interaction process.) A \bold{named} list with components
+      \code{beta,eta,r} which are respectively the ``base''
+      intensity, the scaled interaction parameter and the
+      interaction radius.  
+    }
+    \item{badgey:}{
+      (Baddeley-Geyer process.)
+      A \bold{named} list with components
+      \code{beta} (the ``base'' intensity), \code{gamma} (a vector
+      of non-negative interaction parameters), \code{r} (a vector
+      of interaction radii, of the same length as \code{gamma},
+      in \emph{increasing} order), and \code{sat} (the saturation
+      parameter(s); this may be a scalar, or a vector of the same
+      length as \code{gamma} and \code{r}; all values should be at
+      least 1).  Note that because of the presence of ``saturation''
+      the \code{gamma} values are permitted to be larger than 1.
+    }
+    \item{dgs:}{
+      (Diggle, Gates, and Stibbard process.
+      See Diggle, Gates, and Stibbard (1987))
+      A \bold{named} list with components
+      \code{beta} and \code{rho}.  This process has pairwise interaction
+      function equal to
+      \deqn{
+	e(t) = \sin^2\left(\frac{\pi t}{2\rho}\right)
+      }{
+	e(t) = sin^2((pi * t)/(2 * rho))
+      }
+      for \eqn{t < \rho}{t < rho}, and equal to 1
+      for \eqn{t \ge \rho}{t >= rho}.
+    }
+    \item{diggra:}{
+      (Diggle-Gratton process. See Diggle and Gratton (1984)
+      and Diggle, Gates and Stibbard (1987).)
+      A \bold{named} list with components \code{beta},
+      \code{kappa}, \code{delta} and \code{rho}.  This process has
+      pairwise interaction function \eqn{e(t)} equal to 0
+      for \eqn{t < \delta}{t < delta}, equal to
+      \deqn{
+	\left(\frac{t-\delta}{\rho-\delta}\right)^\kappa
+      }{
+	((t-delta)/(rho-delta))^kappa
+      }
+      for \eqn{\delta \le t < \rho}{delta <= t < rho},
+      and equal to 1 for \eqn{t \ge  \rho}{t >= rho}.
+      Note that here we use the symbol
+      \eqn{\kappa}{kappa} where Diggle, Gates, and Stibbard use
+      \eqn{\beta}{beta} since we reserve the symbol \eqn{\beta}{beta}
+      for an intensity parameter.
+    }
+    \item{fiksel:}{
+      (Fiksel double exponential process, see Fiksel (1984))
+      A \bold{named} list with components \code{beta},
+      \code{r}, \code{hc}, \code{kappa} and \code{a}.  This process has
+      pairwise interaction function \eqn{e(t)} equal to 0
+      for \eqn{t < hc}, equal to
+      \deqn{
+	\exp(a \exp(- \kappa t))
+      }{
+	exp(a * exp( - kappa * t))
+      }
+      for \eqn{hc \le t < r}{hc <= t < r},
+      and equal to 1 for \eqn{t \ge  r}{t >= r}.
+    }
+    \item{geyer:}{
+      (Geyer's saturation process. See Geyer (1999).)
+      A \bold{named} list
+      with components \code{beta}, \code{gamma}, \code{r}, and \code{sat}.
+      The components \code{beta}, \code{gamma}, \code{r} are as for
+      the Strauss model, and \code{sat} is the ``saturation''
+      parameter.  The model is Geyer's ``saturation'' point process
+      model, a modification of the Strauss process in which
+      we effectively impose an upper limit (\code{sat}) on the number of
+      neighbours which will be counted as close to a given point.
+      
+      Explicitly, a saturation point process with interaction
+      radius \eqn{r}, saturation threshold \eqn{s}, and
+      parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma},
+      is the point process in which each point \eqn{x_i}{x[i]}
+      in the pattern \eqn{X} contributes a factor
+      \deqn{\beta \gamma^{\min(s, t(x_i,X))}}{beta gamma^min(s,t(x[i],X))}
+      to the probability density of the point pattern,
+      where \eqn{t(x_i,X)}{t(x[i],X)} denotes the number of
+      ``\eqn{r}-close neighbours'' of \eqn{x_i}{x[i]} in the
+      pattern \eqn{X}.
+
+      If the saturation threshold \eqn{s} is infinite,
+      the Geyer process reduces to a Strauss process
+      with interaction parameter \eqn{\gamma^2}{gamma^2}
+      rather than \eqn{\gamma}{gamma}.
+    }
+    \item{hardcore:}{
+      (Hard core process.) A \bold{named} list
+      with components \code{beta} and \code{hc}
+      where \code{beta} is the base intensity and \code{hc} is the
+      hard core distance.
+      This process has pairwise interaction function \eqn{e(t)}
+      equal to 1 if \eqn{t > hc} and 0 if \eqn{t <= hc}.
+    }
+    \item{lennard:}{
+      (Lennard-Jones process.) A \bold{named} list
+      with components \code{sigma} and \code{epsilon},
+      where \code{sigma} is the characteristic diameter
+      and \code{epsilon} is the well depth.
+      See \code{\link{LennardJones}} for explanation.
+    }
+    \item{multihard:}{
+      (Multitype hard core process.) A \bold{named} list
+      with components \code{beta} and \code{hradii},
+      where \code{beta} is a vector of base intensities for each type
+      of point, and \code{hradii} is a matrix of hard core radii
+      between each pair of types. 
+    }
+    \item{penttinen:}{
+      (Penttinen process.) A \bold{named} list with components
+      \code{beta,gamma,r} which are respectively the ``base''
+      intensity, the pairwise interaction parameter, and the disc radius.
+      Note that \code{gamma} must be less than or equal to 1.
+      See \code{\link{Penttinen}} for explanation.
+      (Note that there is also an algorithm for perfect simulation
+      of the Penttinen process, \code{\link{rPenttinen}})
+    }
+    \item{strauss:}{
+      (Strauss process.) A \bold{named} list with components
+      \code{beta,gamma,r} which are respectively the ``base''
+      intensity, the pairwise interaction parameter and the
+      interaction radius.  Note that \code{gamma} must be less than
+      or equal to 1.
+      (Note that there is also an algorithm for perfect simulation
+      of the Strauss process, \code{\link{rStrauss}})
+    }
+    \item{straush:}{
+      (Strauss process with hardcore.) A \bold{named} list with
+      entries \code{beta,gamma,r,hc} where \code{beta}, \code{gamma},
+      and \code{r} are as for the Strauss process, and \code{hc} is
+      the hardcore radius.  Of course \code{hc} must be less than
+      \code{r}.
+    }
+    \item{sftcr:}{
+      (Softcore process.) A \bold{named} list with components
+      \code{beta,sigma,kappa}.  Again \code{beta} is a ``base''
+      intensity. The pairwise interaction between two points
+      \eqn{u \neq v}{u != v} is
+      \deqn{
+	\exp \left \{
+	- \left ( \frac{\sigma}{||u-v||} \right )^{2/\kappa}
+	\right \}
+      }{-(sigma/||u-v||)^(2/kappa)}
+      Note that it is necessary that \eqn{0 < \kappa < 1}{0 < kappa <1}.
+    }
+    \item{straussm:}{
+      (Multitype Strauss process.) A \bold{named} list with components
+      \itemize{
+	\item
+	\code{beta}: 
+	A vector of ``base'' intensities, one for each possible type.
+	\item
+	\code{gamma}:
+	A \bold{symmetric} matrix of interaction parameters,
+	with \eqn{\gamma_{ij}}{gamma_ij} pertaining to the interaction between
+	type \eqn{i} and type \eqn{j}.
+	\item
+	\code{radii}:
+	A \bold{symmetric} matrix of interaction radii, with
+	entries \eqn{r_{ij}}{r_ij} pertaining to the interaction between type
+	\eqn{i} and type \eqn{j}.
+      }
+    }
+    \item{straushm:}{
+      (Multitype Strauss process with hardcore.)
+      A \bold{named} list with components \code{beta} and \code{gamma}
+      as for \code{straussm} and
+      \bold{two} ``radii'' components:
+      \itemize{
+        \item \code{iradii}: the interaction radii
+        \item \code{hradii}: the hardcore radii
+      }
+      which are both symmetric matrices of nonnegative numbers.
+      The entries of \code{hradii} must be less than the
+      corresponding entries
+      of \code{iradii}.
+    }
+    \item{triplets:}{
+      (Triplets process.) A \bold{named} list with components
+      \code{beta,gamma,r} which are respectively the ``base''
+      intensity, the triplet interaction parameter and the
+      interaction radius.  Note that \code{gamma} must be less than
+      or equal to 1.
+    }
+    \item{lookup:}{
+      (Arbitrary pairwise interaction process with isotropic interaction.)
+      A \bold{named} list with components
+      \code{beta}, \code{r}, and \code{h}, or just with components
+      \code{beta} and \code{h}.
+
+      This model is the pairwise interaction process
+      with an isotropic interaction given by any chosen function \eqn{H}.
+      Each pair of points \eqn{x_i, x_j}{x[i], x[j]} in the
+      point pattern contributes
+      a factor \eqn{H(d(x_i, x_j))}{H(d(x[i],x[j]))}
+      to the probability density, where \eqn{d} denotes distance
+      and \eqn{H} is the pair interaction function.
+
+      The component \code{beta} is a
+      (positive) scalar which determines the ``base'' intensity
+      of the process.
+
+      In this implementation, \eqn{H} must be a step function.
+      It is specified by the user in one of two ways.
+      \itemize{
+	\item
+	\bold{as a vector of values:}
+	If \code{r} is present, then \code{r} is assumed to 
+	give the locations of jumps in the function \eqn{H},
+	while the vector \code{h} gives the corresponding
+	values of the function.
+
+	Specifically, the interaction function
+	\eqn{H(t)} takes the value \code{h[1]}
+	for distances \eqn{t} in the interval 
+	\code{[0, r[1])}; takes the value \code{h[i]}
+	for distances \eqn{t} in the interval 
+	\code{[r[i-1], r[i])} where
+	\eqn{i = 2,\ldots, n}{i = 2, ..., n};
+	and takes the value 1 for \eqn{t \ge r[n]}{t >= r[n]}.
+	Here \eqn{n} denotes the length of \code{r}.
+	    
+	The components \code{r} and \code{h}
+	must be numeric vectors of equal length.
+	The \code{r} values must be strictly positive, and 
+	sorted in increasing order.
+
+	The entries of \code{h} must be non-negative. 
+	If any entry of \code{h} is greater than 1,
+	then the entry \code{h[1]} must be 0 (otherwise the specified
+	process is non-existent).
+
+	Greatest efficiency is achieved if the values of
+	\code{r} are equally spaced.
+	    
+	[\bold{Note:} The usage of \code{r} and \code{h}
+	has \emph{changed} from the previous usage in \pkg{spatstat}
+	versions 1.4-7 to 1.5-1, in which ascending order was not required,
+	and in which the first entry of \code{r} had to be 0.]
+
+	\item
+	\bold{as a stepfun object:}
+	If \code{r} is absent, then \code{h} must be
+	an object of class \code{"stepfun"} specifying
+	a step function. Such objects are created by
+	\code{\link{stepfun}}. 
+
+	The stepfun object \code{h} must be right-continuous
+	(which is the default using \code{\link{stepfun}}.)
+
+	The values of the step function must all be nonnegative.
+	The values must all be less than 1
+	unless the function is identically zero on some initial
+	interval \eqn{[0,r)}. The rightmost value (the value of
+	\code{h(t)} for large \code{t}) must be equal to 1.
+
+	Greatest efficiency is achieved if the jumps (the
+	``knots'' of the step function) are equally spaced.
+      }
+    }
+  }
+  For a hybrid model, the argument \code{par} should be a list,
+  of the same length as \code{cif}, such that \code{par[[i]]}
+  is a list of the parameters required for the interaction
+  \code{cif[i]}. See the Examples.
+  
+  The optional argument \code{trend} determines the spatial trend in the model,
+  if it has one. It should be a function or image
+  (or a list of such, if the model is multitype)
+  to provide the value of the trend at an arbitrary point.
+  \describe{
+    \item{trend given as a function:}{A trend
+      function may be a function of any number of arguments,
+      but the first two must be the \eqn{x,y} coordinates of
+      a point.  Auxiliary arguments may be passed
+      to the \code{trend} function at the time of simulation,
+      via the \code{\dots} argument to \code{\link{rmh}}.
+      
+      The function \bold{must} be \bold{vectorized}.
+      That is, it must be capable of accepting vector valued
+      \code{x} and \code{y} arguments.  Put another way,
+      it must be capable of calculating the trend value at a
+      number of points, simultaneously, and should return the
+      \bold{vector} of corresponding trend values.
+    }
+    \item{trend given as an image:}{
+      An image (see \code{\link{im.object}})
+      provides the trend values at a grid of
+      points in the observation window and determines the trend
+      value at other points as the value at the nearest grid point.
+    }
+  }
+  Note that the trend or trends must be \bold{non-negative};
+  no checking is done for this.
+  
+  The optional argument \code{w} specifies the window
+  in which the pattern is to be generated.  If specified, it must be in
+  a form which can be coerced to an object of class \code{owin}
+  by \code{\link{as.owin}}.
+
+  The optional argument \code{types} specifies the possible
+  types in a multitype point process. If the model being simulated
+  is multitype, and \code{types} is not specified, then this vector
+  defaults to \code{1:ntypes} where \code{ntypes} is the number of
+  types.
+}
+\references{
+  Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013)
+  Hybrids of Gibbs point process models and their implementation.
+  \emph{Journal of Statistical Software} \bold{55}:11, 1--43.
+  \url{http://www.jstatsoft.org/v55/i11/}
+
+   Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
+   Patterns} (2nd ed.) Arnold, London.
+
+   Diggle, P.J. and Gratton, R.J. (1984)
+   Monte Carlo methods of inference for implicit statistical models.
+   \emph{Journal of the Royal Statistical Society, series B}
+   \bold{46}, 193 -- 212.
+
+   Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+
+   Fiksel, T. (1984)
+   Estimation of parameterized pair potentials
+   of marked and non-marked Gibbsian point processes.
+   \emph{Electronische Informationsverabeitung und Kybernetika}
+   \bold{20}, 270--278.
+
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+}
+
+\section{Warnings in Respect of ``lookup''}{
+
+For the \code{lookup} cif, 
+the entries of the \code{r} component of \code{par}
+must be \emph{strictly positive} and sorted into ascending order.
+
+Note that if you specify the \code{lookup} pairwise interaction
+function via \code{\link{stepfun}()} the arguments \code{x}
+and \code{y} which are passed to \code{stepfun()} are slightly
+different from \code{r} and \code{h}:  \code{length(y)} is equal
+to \code{1+length(x)}; the final entry of \code{y} must be equal
+to 1 --- i.e. this value is explicitly supplied by the user rather
+than getting tacked on internally.
+
+The step function returned by \code{stepfun()} must be right
+continuous (this is the default behaviour of \code{stepfun()})
+otherwise an error is given.
+}
+
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmhstart}},
+  \code{\link{ppm}},
+  \rmhInteractionsList.
+}
+\examples{
+   # Strauss process:
+   mod01 <- rmhmodel(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
+                 w=c(0,10,0,10))
+   # The above could also be simulated using 'rStrauss'
+
+   # Strauss with hardcore:
+   mod04 <- rmhmodel(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
+                w=owin(c(0,10),c(0,5)))
+
+   # Hard core:
+   mod05 <- rmhmodel(cif="hardcore",par=list(beta=2,hc=0.3),
+              w=square(5))
+
+   # Soft core:
+   w    <- square(10)
+   mod07 <- rmhmodel(cif="sftcr",
+                     par=list(beta=0.8,sigma=0.1,kappa=0.5),
+                     w=w)
+   
+   # Area-interaction process:
+   mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7),
+                 w=c(0,10,0,10))
+
+   # Baddeley-Geyer process:
+   mod99 <- rmhmodel(cif="badgey",par=list(beta=0.3,
+                     gamma=c(0.2,1.8,2.4),r=c(0.035,0.07,0.14),sat=5),
+                     w=unit.square())
+
+   # Multitype Strauss:
+   beta <- c(0.027,0.008)
+   gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
+   r    <- matrix(c(45,45,45,45),2,2)
+   mod08 <- rmhmodel(cif="straussm",
+                     par=list(beta=beta,gamma=gmma,radii=r),
+                     w=square(250))
+   # specify types
+   mod09 <- rmhmodel(cif="straussm",
+                     par=list(beta=beta,gamma=gmma,radii=r),
+                     w=square(250),
+                     types=c("A", "B"))
+
+   # Multitype Hardcore:
+   rhc  <- matrix(c(9.1,5.0,5.0,2.5),2,2)
+   mod08hard <- rmhmodel(cif="multihard",
+                     par=list(beta=beta,hradii=rhc),
+                     w=square(250),
+                     types=c("A", "B"))
+
+   
+   # Multitype Strauss hardcore with trends for each type:
+   beta  <- c(0.27,0.08)
+   ri    <- matrix(c(45,45,45,45),2,2)
+   rhc  <- matrix(c(9.1,5.0,5.0,2.5),2,2)
+   tr3   <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+   tr4   <- function(x,y){x <- x/250; y <- y/250;
+                         exp(-0.6*x+0.5*y)}
+                        # log linear trend
+   mod10 <- rmhmodel(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=ri,hradii=rhc),w=c(0,250,0,250),
+                 trend=list(tr3,tr4))
+
+   # Triplets process:
+   mod11 <- rmhmodel(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7),
+                 w=c(0,10,0,10))
+
+   # Lookup (interaction function h_2 from page 76, Diggle (2003)):
+      r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
+      h <- 20*(r-0.05)
+      h[r<0.05] <- 0
+      h[r>0.10] <- 1
+      mod17 <- rmhmodel(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
+
+  # hybrid model
+  modhy <- rmhmodel(cif=c('strauss', 'geyer'),
+                    par=list(list(beta=100,gamma=0.5,r=0.05),
+                             list(beta=1, gamma=0.7,r=0.1, sat=2)),
+                    w=square(1))
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmhmodel.list.Rd b/man/rmhmodel.list.Rd
new file mode 100644
index 0000000..d95c7be
--- /dev/null
+++ b/man/rmhmodel.list.Rd
@@ -0,0 +1,150 @@
+\name{rmhmodel.list}
+\alias{rmhmodel.list}
+\title{Define Point Process Model for Metropolis-Hastings Simulation.}
+\description{
+  Given a list of parameters,
+  builds a description of a point process model
+  for use in simulating the model by the Metropolis-Hastings
+  algorithm. 
+}
+\usage{
+   \method{rmhmodel}{list}(model, ...)
+}
+\arguments{
+  \item{model}{A list of parameters. See Details.}
+  \item{\dots}{
+    Optional list of additional named parameters.
+  }
+}
+\value{
+  An object of class \code{"rmhmodel"}, which is essentially
+  a validated list of parameter values for the model.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the model chosen.
+}
+\details{
+  The generic function \code{\link{rmhmodel}} takes a
+  description of a point process model in some format, and
+  converts it into an object of class \code{"rmhmodel"}
+  so that simulations of the model can be generated using
+  the Metropolis-Hastings algorithm \code{\link{rmh}}. 
+  
+  This function \code{rmhmodel.list} is the method
+  for lists. The argument \code{model} should be a named list of parameters
+  of the form
+  
+  \code{list(cif, par, w, trend, types)}
+
+  where \code{cif} and \code{par} are required and the others are
+  optional. For details about these components,
+  see \code{\link{rmhmodel.default}}.
+
+  The subsequent arguments \code{\dots} (if any) may also
+  have these names, and they will take precedence over
+  elements of the list \code{model}. 
+}  
+\references{
+   Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
+   Patterns} (2nd ed.) Arnold, London.
+
+   Diggle, P.J. and Gratton, R.J. (1984)
+   Monte Carlo methods of inference for implicit statistical models.
+   \emph{Journal of the Royal Statistical Society, series B}
+   \bold{46}, 193 -- 212.
+
+   Diggle, P.J., Gates, D.J., and Stibbard, A. (1987)
+   A nonparametric estimator for pairwise-interaction point processes.
+   Biometrika \bold{74}, 763 -- 770.
+   \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373.
+
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+}
+
+\seealso{
+  \code{\link{rmhmodel}},
+  \code{\link{rmhmodel.default}},
+  \code{\link{rmhmodel.ppm}},
+  \code{\link{rmh}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmhstart}},
+  \code{\link{ppm}},
+  \code{\link{Strauss}},
+  \code{\link{Softcore}},
+  \code{\link{StraussHard}},
+  \code{\link{MultiStrauss}},
+  \code{\link{MultiStraussHard}},
+  \code{\link{DiggleGratton}},
+  \code{\link{PairPiece}}
+}
+\examples{
+   # Strauss process:
+   mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
+                 w=c(0,10,0,10))
+   mod01 <- rmhmodel(mod01)
+
+   # Strauss with hardcore:
+   mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
+                w=owin(c(0,10),c(0,5)))
+   mod04 <- rmhmodel(mod04)
+
+   # Soft core:
+   w    <- square(10)
+   mod07 <- list(cif="sftcr",
+                     par=list(beta=0.8,sigma=0.1,kappa=0.5),
+                     w=w)
+   mod07 <- rmhmodel(mod07)
+   
+   # Multitype Strauss:
+   beta <- c(0.027,0.008)
+   gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
+   r    <- matrix(c(45,45,45,45),2,2)
+   mod08 <- list(cif="straussm",
+                     par=list(beta=beta,gamma=gmma,radii=r),
+                     w=square(250))
+   mod08 <- rmhmodel(mod08)
+
+   # specify types
+   mod09 <- rmhmodel(list(cif="straussm",
+                     par=list(beta=beta,gamma=gmma,radii=r),
+                     w=square(250),
+                     types=c("A", "B")))
+
+   # Multitype Strauss hardcore with trends for each type:
+   beta  <- c(0.27,0.08)
+   ri    <- matrix(c(45,45,45,45),2,2)
+   rhc  <- matrix(c(9.1,5.0,5.0,2.5),2,2)
+   tr3   <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+   tr4   <- function(x,y){x <- x/250; y <- y/250;
+                         exp(-0.6*x+0.5*y)}
+                        # log linear trend
+   mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=ri,hradii=rhc),w=c(0,250,0,250),
+                 trend=list(tr3,tr4))
+   mod10 <- rmhmodel(mod10)
+
+   # Lookup (interaction function h_2 from page 76, Diggle (2003)):
+   r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
+   h <- 20*(r-0.05)
+   h[r<0.05] <- 0
+   h[r>0.10] <- 1
+   mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
+   mod17 <- rmhmodel(mod17)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmhmodel.ppm.Rd b/man/rmhmodel.ppm.Rd
new file mode 100644
index 0000000..de99fcf
--- /dev/null
+++ b/man/rmhmodel.ppm.Rd
@@ -0,0 +1,135 @@
+\name{rmhmodel.ppm}
+\alias{rmhmodel.ppm}
+\title{Interpret Fitted Model for Metropolis-Hastings Simulation.}
+\description{
+  Converts a fitted point process model
+  into a format that can be used to simulate the model
+  by the Metropolis-Hastings algorithm. 
+}
+\usage{
+  \method{rmhmodel}{ppm}(model, w, ..., verbose=TRUE, project=TRUE,
+                         control=rmhcontrol(),
+                         new.coef=NULL)
+}
+\arguments{
+  \item{model}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{w}{
+    Optional. Window in which the simulations should be generated.
+  }
+  \item{\dots}{Ignored.}
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports
+    while the model is being converted.
+  }
+  \item{project}{Logical flag indicating what to do if the fitted model
+    does not correspond to a valid point process. See Details.}
+  \item{control}{
+    Parameters determining the iterative behaviour of the simulation
+    algorithm. Passed to \code{\link{rmhcontrol}}.
+  }
+  \item{new.coef}{
+    New values for the canonical parameters of the model.
+    A numeric vector of the same length as \code{coef(model)}.
+  }
+}
+\value{
+  An object of class \code{"rmhmodel"}, which is essentially
+  a list of parameter values for the model.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the model chosen.
+}
+\details{
+  The generic function \code{\link{rmhmodel}} takes a
+  description of a point process model in some format, and
+  converts it into an object of class \code{"rmhmodel"}
+  so that simulations of the model can be generated using
+  the Metropolis-Hastings algorithm \code{\link{rmh}}. 
+  
+  This function \code{rmhmodel.ppm} is the method for
+  the class \code{"ppm"} of fitted point process models.
+
+  The argument \code{model} should be a fitted point process model
+  (object of class \code{"ppm"}) typically obtained from the
+  model-fitting function \code{\link{ppm}}.
+  This will be converted into an object of class \code{"rmhmodel"}.
+
+  The optional argument \code{w} specifies the window
+  in which the pattern is to be generated.  If specified, it must be in
+  a form which can be coerced to an object of class \code{owin}
+  by \code{\link{as.owin}}.
+
+  Not all fitted point process models
+  obtained from \code{\link{ppm}} can be simulated.
+  We have not yet implemented simulation code for
+  the \code{\link{LennardJones}} and \code{\link{OrdThresh}}
+  models.
+
+  It is also possible that a fitted point process model
+  obtained from \code{\link{ppm}}  may not correspond to a valid
+  point process. For example a fitted model with the \code{\link{Strauss}}
+  interpoint interaction may have any value of the interaction parameter
+  \eqn{\gamma}{gamma}; however the Strauss 
+  process is not well-defined for 
+  \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976).
+
+  The argument \code{project} determines what to do in such cases.
+  If \code{project=FALSE}, a fatal error will occur.
+  If \code{project=TRUE}, the fitted model parameters will be
+  adjusted to the nearest values which do correspond to a valid
+  point process. For example a Strauss process with \eqn{\gamma >
+    1}{gamma > 1} will be projected to a Strauss process with
+  \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process.
+}  
+\references{
+   Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point
+   Patterns} (2nd ed.) Arnold, London.
+
+   Diggle, P.J. and Gratton, R.J. (1984)
+   Monte Carlo methods of inference for implicit statistical models.
+   \emph{Journal of the Royal Statistical Society, series B}
+   \bold{46}, 193 -- 212.
+
+   Geyer, C.J. (1999)
+   Likelihood Inference for Spatial Point
+   Processes. Chapter 3 in  O.E. Barndorff-Nielsen, W.S. Kendall and
+   M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and
+   Computation}, Chapman and Hall / CRC,  Monographs on Statistics and
+   Applied Probability, number 80. Pages 79--140.
+
+   Kelly, F.P. and Ripley, B.D. (1976)
+   On Strauss's model for clustering.
+   \emph{Biometrika} \bold{63}, 357--360.
+}
+\seealso{
+  \code{\link{rmhmodel}},
+  \code{\link{rmhmodel.list}},
+  \code{\link{rmhmodel.default}},
+  \code{\link{rmh}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmhstart}},
+  \code{\link{ppm}},
+  \rmhInteractionsList.
+}
+\examples{
+  fit1 <- ppm(cells ~1, Strauss(0.07))
+  mod1 <- rmhmodel(fit1)
+
+  fit2 <- ppm(cells ~x, Geyer(0.07, 2))
+  mod2 <- rmhmodel(fit2)
+
+  fit3 <- ppm(cells ~x, Hardcore(0.07))
+  mod3 <- rmhmodel(fit3)
+
+  # Then rmh(mod1), etc
+}
+\author{
+  \adrian
+  and
+  \rolf
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmhstart.Rd b/man/rmhstart.Rd
new file mode 100644
index 0000000..015984a
--- /dev/null
+++ b/man/rmhstart.Rd
@@ -0,0 +1,116 @@
+\name{rmhstart}
+\alias{rmhstart}
+\alias{rmhstart.default}
+\title{Determine Initial State for Metropolis-Hastings Simulation.}
+\description{
+  Builds a description of the initial state
+  for the Metropolis-Hastings
+  algorithm. 
+}
+\usage{
+   rmhstart(start, \dots)
+   \method{rmhstart}{default}(start=NULL, \dots, n.start=NULL, x.start=NULL)
+}
+\arguments{
+  \item{start}{An existing description of the initial state in some
+    format. Incompatible with the arguments listed below.
+  }
+  \item{\dots}{There should be no other arguments.}
+  \item{n.start}{
+    Number of initial points (to be randomly generated).
+    Incompatible with \code{x.start}.
+  }
+  \item{x.start}{
+    Initial point pattern configuration.
+    Incompatible with \code{n.start}.
+  }
+}
+\value{
+  An object of class \code{"rmhstart"}, which is essentially
+  a list of parameters describing the initial point pattern
+  and (optionally) the initial state of the random number generator.
+  
+  There is a \code{print} method for this class, which prints
+  a sensible description of the initial state.
+}
+\details{
+  Simulated realisations of many point process models
+  can be generated using the Metropolis-Hastings algorithm
+  implemented in \code{\link{rmh}}.
+  
+  This function \code{rmhstart}
+  creates a full description of the initial state of the
+  Metropolis-Hastings algorithm,
+  \emph{including possibly the initial state of the random number generator},
+  for use in a subsequent call to \code{\link{rmh}}. It also
+  checks that the initial state is valid.
+
+  The initial state should be specified \bold{either} by the
+  first argument \code{start} \bold{or} by the other arguments
+  \code{n.start}, \code{x.start} etc.
+  
+  If \code{start} is a list, then it should have components named 
+  \code{n.start} or \code{x.start},
+  with the same interpretation as described below.
+
+  The arguments are:
+    \describe{
+      \item{n.start}{
+	The number of \dQuote{initial} points to be randomly
+	(uniformly) generated in the simulation window \code{w}.
+	Incompatible with \code{x.start}.
+
+	For a multitype point process, \code{n.start} may be a vector
+	(of length equal to the number of types) giving the number
+	of points of each type to be generated.  
+
+	If expansion of the simulation window is selected (see the argument
+	\code{expand} to \code{\link{rmhcontrol}}),
+	then the actual number of starting points in the simulation
+	will be \code{n.start} multiplied by the expansion factor
+	(ratio of the areas of the expanded window and original window).
+
+	For faster convergence of the Metropolis-Hastings algorithm,
+	the value of \code{n.start} should be roughly equal to
+	(an educated guess at) the expected number of points
+	for the point process inside the window.
+      }
+      \item{x.start}{
+	Initial point pattern configuration. Incompatible with
+	\code{n.start}.
+
+	\code{x.start} may be a point pattern (an object
+	of class \code{ppp}), or an object which can be coerced
+	to this class by \code{\link{as.ppp}}, or a dataset containing
+	vectors \code{x} and \code{y}.  
+
+	If \code{x.start} is specified, then expansion of the
+	simulation window (the argument \code{expand}
+	of \code{\link{rmhcontrol}}) is not permitted.
+      }
+    }
+    The parameters \code{n.start} and \code{x.start} are
+    \emph{incompatible}.
+}  
+\seealso{
+  \code{\link{rmh}},
+  \code{\link{rmhcontrol}},
+  \code{\link{rmhmodel}}
+}
+\examples{
+   # 30 random points
+   a <- rmhstart(n.start=30)
+
+   # a particular point pattern
+   data(cells)
+   b <- rmhstart(x.start=cells)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmpoint.Rd b/man/rmpoint.Rd
new file mode 100644
index 0000000..9e15d1c
--- /dev/null
+++ b/man/rmpoint.Rd
@@ -0,0 +1,300 @@
+\name{rmpoint}
+\alias{rmpoint}
+\title{Generate N Random Multitype Points}
+\description{
+  Generate a random multitype point pattern
+  with a fixed number of points, or a fixed number of points of each type.
+}
+\usage{
+ rmpoint(n, f=1, fmax=NULL, win=unit.square(),
+         types, ptypes,
+         \dots, giveup=1000, verbose=FALSE,
+         nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of marked points to generate.
+    Either a single number specifying the total number of points,
+    or a vector specifying the number of points of each type.
+  }
+  \item{f}{
+    The probability density of the multitype points,
+    usually un-normalised.
+    Either a constant, a vector,
+    a function \code{f(x,y,m, ...)}, a pixel image,
+    a list of functions \code{f(x,y,...)}
+    or a list of pixel images.
+  }
+  \item{fmax}{
+    An upper bound on the values of \code{f}.
+    If missing, this number will be estimated.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    Ignored if \code{f} is a pixel image or list of pixel images.
+  }
+  \item{types}{
+    All the possible types for the multitype pattern. 
+  }
+  \item{ptypes}{
+    Optional vector of probabilities for each type.
+  }
+  \item{\dots}{
+    Arguments passed to \code{f} if it is a function.
+  }
+  \item{giveup}{
+    Number of attempts in the rejection method after which
+    the algorithm should stop trying to generate new points.
+  }
+  \item{verbose}{
+    Flag indicating whether to report details of performance
+    of the simulation algorithm.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates random multitype point patterns
+  consisting of a fixed number of points.
+  
+  Three different models are available:
+  \describe{
+    \item{I. Random location and type:}{
+      If \code{n} is a single number and the argument \code{ptypes} is missing,
+      then \code{n} independent, identically distributed
+      random multitype points are generated.
+      Their locations
+      \code{(x[i],y[i])} and types \code{m[i]} have
+      joint probability density proportional to \eqn{f(x,y,m)}.
+    }
+    \item{II. Random type, and random location given type:}{
+      If \code{n} is a single number and \code{ptypes} is given,
+      then \code{n} independent, identically distributed
+      random multitype points are generated.
+      Their types \code{m[i]} have probability distribution
+      \code{ptypes}. Given the types, the locations \code{(x[i],y[i])}
+      have conditional probability density proportional to
+      \eqn{f(x,y,m)}.
+    }
+    \item{III. Fixed types, and random location given type:}{
+      If \code{n} is a vector, then we generate \code{n[i]}
+      independent, identically distributed random points of type
+      \code{types[i]}. For points of type \eqn{m} the conditional probability
+      density of location \eqn{(x,y)} is proportional to
+      \eqn{f(x,y,m)}.
+    }
+  }
+  Note that the density \code{f} is normalised in different ways
+  in Model I and Models II and III. In Model I the normalised
+  joint density is \eqn{g(x,y,m)=f(x,y,m)/Z} where
+  \deqn{
+    Z = \sum_m \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y
+  }{
+    Z = sum_[m] integral lambda(x,y,m) dx dy
+  }
+  while in Models II and III the normalised conditional density
+  is \eqn{g(x,y\mid m) = f(x,y,m)/Z_m}{g(x,y|m) = f(x,y,m)/Z[m]}
+  where
+  \deqn{
+    Z_m = \int\int \lambda(x,y,m)  {\rm d}x \, {\rm d}y.
+  }{
+    Z[m] = integral lambda(x,y,m) dx dy.
+  }
+  In Model I, the marginal distribution of types
+  is \eqn{p_m = Z_m/Z}{p[m] = Z[m]/Z}.
+  
+  The unnormalised density \code{f} may be specified
+  in any of the following ways.
+  \describe{
+    \item{single number:}{
+      If \code{f} is a single number, the conditional density of
+      location given type is uniform. That is, the points of each type
+      are uniformly distributed.
+      In Model I, the marginal distribution of types is also uniform
+      (all possible types have equal probability).
+    }
+    \item{vector:}{
+      If \code{f} is a numeric vector, the conditional density of
+      location given type is uniform. That is, the points of each type
+      are uniformly distributed.
+      In Model I, the marginal distribution of types is
+      proportional to the vector \code{f}. In Model II, the marginal
+      distribution of types is \code{ptypes}, that is, the values in
+      \code{f} are ignored.
+      The argument \code{types} defaults to \code{names(f)},
+      or if that is null, \code{1:length(f)}.
+    }
+    \item{function:}{
+      If \code{f} is a function, it will be called in the form
+      \code{f(x,y,m,\dots)} at spatial location \code{(x,y)}
+      for points of type \code{m}.
+      In Model I, the joint probability density of location and type is
+      proportional to \code{f(x,y,m,\dots)}.
+      In Models II and III, the conditional probability density of
+      location \code{(x,y)} given type \code{m} is
+      proportional to \code{f(x,y,m,\dots)}.
+      The function \code{f} must work correctly with vectors \code{x},
+      \code{y} and \code{m}, returning a vector of function values.
+      (Note that \code{m} will be a factor
+      with levels \code{types}.)
+      The value \code{fmax} must be given and must be an upper bound on the 
+      values of \code{f(x,y,m,\dots)} for all locations \code{(x, y)}
+      inside the window \code{win} and all types \code{m}.
+      The argument \code{types} must be given.
+    }
+    \item{list of functions:}{
+      If \code{f} is a list of functions, then the functions will be
+      called in the form \code{f[[i]](x,y,\dots)} at spatial
+      location \code{(x,y)} for points of type \code{types[i]}.
+      In Model I, the joint probability density of location and type is
+      proportional to \code{f[[m]](x,y,\dots)}.
+      In Models II and III, the conditional probability density of
+      location \code{(x,y)} given type \code{m} is
+      proportional to \code{f[[m]](x,y,\dots)}.
+      The function \code{f[[i]]} must work correctly with vectors
+      \code{x} and \code{y}, returning a vector of function values.
+      The value \code{fmax} must be given and must be an upper bound on the 
+      values of \code{f[[i]](x,y,\dots)} for all locations \code{(x, y)}
+      inside the window \code{win}. 
+      The argument \code{types} defaults to \code{names(f)},
+      or if that is null, \code{1:length(f)}.
+    }
+    \item{pixel image:}{
+      If \code{f} is a pixel image object of class \code{"im"}
+      (see \code{\link{im.object}}), the unnormalised density at a location
+      \code{(x,y)} for points of any type is equal to the pixel value
+      of \code{f} for the pixel nearest to \code{(x,y)}.
+      In Model I, the marginal distribution of types is uniform.
+      The argument \code{win} is ignored;
+      the window of the pixel image is used instead.
+      The argument \code{types} must be given.
+    }
+    \item{list of pixel images:}{
+      If \code{f} is a list of pixel images,
+      then the image \code{f[[i]]} determines the density values
+      of points of type \code{types[i]}. 
+      The argument \code{win} is ignored;
+      the window of the pixel image is used instead.
+      The argument \code{types} defaults to \code{names(f)},
+      or if that is null, \code{1:length(f)}.
+    }
+  }
+  The implementation uses the rejection method.
+  For Model I, \code{\link{rmpoispp}} is called repeatedly
+  until \code{n} points have been generated.
+  It gives up after \code{giveup} calls
+  if there are still fewer than \code{n} points.
+  For Model II, the types are first generated according to
+  \code{ptypes}, then
+  the locations of the points of each type
+  are generated using \code{\link{rpoint}}.
+  For Model III,   the locations of the points of each type
+  are generated using \code{\link{rpoint}}.
+}
+\seealso{
+\code{\link{ppp.object}},
+\code{\link{owin.object}}
+}
+\examples{
+
+abc <- c("a","b","c")
+
+##### Model I
+
+rmpoint(25, types=abc)
+rmpoint(25, 1, types=abc)
+# 25 points, equal probability for each type, uniformly distributed locations
+
+rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc)
+# same as above
+rmpoint(25, list(function(x,y){rep(1, length(x))},
+                 function(x,y){rep(1, length(x))},
+                 function(x,y){rep(1, length(x))}),
+             types=abc)
+# same as above
+
+rmpoint(25, function(x,y,m) { x }, types=abc)
+# 25 points, equal probability for each type,
+# locations nonuniform with density proportional to x
+
+rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc)
+rmpoint(25, list(function(x,y) { rep(1, length(x)) },
+                function(x,y) { x },
+                function(x,y) { x }),
+                types=abc)
+# 25 points, UNEQUAL probabilities for each type,
+# type "a" points uniformly distributed,
+# type "b" and "c" points nonuniformly distributed.
+
+##### Model II
+
+rmpoint(25, 1, types=abc, ptypes=rep(1,3)/3)
+rmpoint(25, 1, types=abc, ptypes=rep(1,3))
+# 25 points, equal probability for each type,
+# uniformly distributed locations
+
+rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc, ptypes=rep(1,3))
+# same as above
+rmpoint(25, list(function(x,y){rep(1, length(x))},
+                 function(x,y){rep(1, length(x))},
+                 function(x,y){rep(1, length(x))}),
+             types=abc, ptypes=rep(1,3))
+# same as above
+
+rmpoint(25, function(x,y,m) { x }, types=abc, ptypes=rep(1,3))
+# 25 points, equal probability for each type,
+# locations nonuniform with density proportional to x
+
+rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc, ptypes=rep(1,3))
+# 25 points, EQUAL probabilities for each type,
+# type "a" points uniformly distributed,
+# type "b" and "c" points nonuniformly distributed.
+
+###### Model III
+
+rmpoint(c(12, 8, 4), 1, types=abc)
+# 12 points of type "a",
+# 8 points of type "b",
+# 4 points of type "c",
+# each uniformly distributed
+
+rmpoint(c(12, 8, 4), function(x,y,m) { ifelse(m=="a", 1, x)}, types=abc)
+rmpoint(c(12, 8, 4), list(function(x,y) { rep(1, length(x)) },
+                               function(x,y) { x },
+                               function(x,y) { x }),
+             types=abc)
+
+# 12 points of type "a", uniformly distributed
+# 8 points of type "b", nonuniform
+# 4 points of type "c", nonuniform
+
+
+#########
+
+## Randomising an existing point pattern:
+# same numbers of points of each type, uniform random locations (Model III)
+rmpoint(table(marks(demopat)), 1, win=Window(demopat))
+
+# same total number of points, distribution of types estimated from X,
+# uniform random locations (Model II)
+rmpoint(npoints(demopat), 1, types=levels(marks(demopat)), win=Window(demopat),
+       ptypes=table(marks(demopat)))
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rmpoispp.Rd b/man/rmpoispp.Rd
new file mode 100644
index 0000000..04f9a28
--- /dev/null
+++ b/man/rmpoispp.Rd
@@ -0,0 +1,209 @@
+\name{rmpoispp}
+\alias{rmpoispp}
+\title{Generate Multitype Poisson Point Pattern}
+\description{
+  Generate a random point pattern, a realisation of the
+  (homogeneous or inhomogeneous) multitype Poisson process.
+}
+\usage{
+ rmpoispp(lambda, lmax=NULL, win, types, \dots,
+          nsim=1, drop=TRUE, warnwin=!missing(win))
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the multitype Poisson process.
+    Either a single positive number, a vector, a \code{function(x,y,m, \dots)},
+    a pixel image, a list of functions \code{function(x,y, \dots)},
+    or a list of pixel images.
+  }
+  \item{lmax}{
+    An upper bound for the value of \code{lambda}.
+    May be omitted
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    Ignored if \code{lambda} is a pixel image or list of images.
+  }
+  \item{types}{
+    All the possible types for the multitype pattern.
+  }
+  \item{\dots}{
+    Arguments passed to \code{lambda} if it is a function.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{warnwin}{
+    Logical value specifying whether to issue a warning
+    when \code{win} is ignored.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"}) if \code{nsim=1},
+  or a list of point patterns if \code{nsim > 1}.
+  Each point pattern is multitype (it carries a vector of marks
+  which is a factor).
+}
+\details{
+  This function generates a realisation of the marked Poisson
+  point process with intensity \code{lambda}.
+  
+  Note that the intensity function
+  \eqn{\lambda(x,y,m)}{lambda(x,y,m)} is the
+  average number of points \bold{of type m} per unit area
+  near the location \eqn{(x,y)}.
+  Thus a marked point process with a constant intensity of 10
+  and three possible types will have an average of 30 points per unit
+  area, with 10 points of each type on average.
+
+  The intensity function may be specified in any of the following ways.
+  \describe{
+    \item{single number:}{
+      If \code{lambda} is a single number,
+      then this algorithm generates a realisation
+      of the uniform marked Poisson process inside the window \code{win} with 
+      intensity \code{lambda} for each type. The total intensity of
+      points of all types is \code{lambda * length(types)}.
+      The argument \code{types} must be given
+      and determines the possible types in the multitype pattern.
+    }
+    \item{vector:}{
+      If \code{lambda} is a numeric vector,
+      then this algorithm generates a realisation
+      of the stationary marked Poisson process inside the window
+      \code{win} with intensity \code{lambda[i]} for points of type
+      \code{types[i]}. The total intensity of points of all types
+      is \code{sum(lambda)}.
+      The argument \code{types} defaults to
+      \code{names(lambda)}, or if that is null, \code{1:length(lambda)}.
+    }
+    \item{function:}{
+      If \code{lambda} is a function, the process has intensity
+      \code{lambda(x,y,m,\dots)} at spatial location \code{(x,y)}
+      for points of type \code{m}.
+      The function \code{lambda} must work correctly with vectors \code{x},
+      \code{y} and \code{m}, returning a vector of function values.
+      (Note that \code{m} will be a factor
+      with levels equal to \code{types}.)
+      The value \code{lmax}, if present, must be an upper bound on the 
+      values of \code{lambda(x,y,m,\dots)} for all locations \code{(x, y)}
+      inside the window \code{win} and all types \code{m}.
+      The argument \code{types} must be given.
+    }
+    \item{list of functions:}{
+      If \code{lambda} is a list of functions,
+      the process has intensity \code{lambda[[i]](x,y,\dots)} at spatial
+      location \code{(x,y)} for points of type \code{types[i]}.
+      The function \code{lambda[[i]]} must work correctly with vectors
+      \code{x} and \code{y}, returning a vector of function values.
+      The value \code{lmax}, if given, must be an upper bound on the 
+      values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)}
+      inside the window \code{win}. 
+      The argument \code{types} defaults to
+      \code{names(lambda)}, or if that is null, \code{1:length(lambda)}.
+    }
+    \item{pixel image:}{
+      If \code{lambda} is a pixel image object of class \code{"im"}
+      (see \code{\link{im.object}}), the intensity at a location
+      \code{(x,y)} for points of any type is equal to the pixel value
+      of \code{lambda} for the pixel nearest to \code{(x,y)}.
+      The argument \code{win} is ignored;
+      the window of the pixel image is used instead.
+      The argument \code{types} must be given.
+    }
+    \item{list of pixel images:}{
+      If \code{lambda} is a list of pixel images,
+      then the image \code{lambda[[i]]} determines the intensity
+      of points of type \code{types[i]}. 
+      The argument \code{win} is ignored;
+      the window of the pixel image is used instead.
+      The argument \code{types} defaults to
+      \code{names(lambda)}, or if that is null, \code{1:length(lambda)}.
+    }
+  }
+  If \code{lmax} is missing, an approximate upper bound will be calculated.
+  
+  To generate an inhomogeneous Poisson process
+  the algorithm uses ``thinning'': it first generates a uniform
+  Poisson process of intensity \code{lmax} for points of each type \code{m},
+  then randomly deletes or retains each point independently,
+  with retention probability
+  \eqn{p(x,y,m) = \lambda(x,y,m)/\mbox{lmax}}{p(x,y,m) = lambda(x,y)/lmax}.
+}
+\seealso{
+  \code{\link{rpoispp}} for unmarked Poisson point process;
+  \code{\link{rmpoint}} for a fixed number of random marked points;
+  \code{\link{ppp.object}},
+  \code{\link{owin.object}}.
+}
+\examples{
+ # uniform bivariate Poisson process with total intensity 100 in unit square
+ pp <- rmpoispp(50, types=c("a","b"))
+ 
+ # stationary bivariate Poisson process with intensity A = 30, B = 70
+ pp <- rmpoispp(c(30,70), types=c("A","B"))
+ pp <- rmpoispp(c(30,70))
+
+ # works in any window
+ data(letterR)
+ pp <- rmpoispp(c(30,70), win=letterR, types=c("A","B"))
+
+ # inhomogeneous lambda(x,y,m)
+ # note argument 'm' is a factor 
+ lam <- function(x,y,m) { 50 * (x^2 + y^3) * ifelse(m=="A", 2, 1)}
+ pp <- rmpoispp(lam, win=letterR, types=c("A","B"))
+ # extra arguments
+ lam <- function(x,y,m,scal) { scal * (x^2 + y^3) * ifelse(m=="A", 2, 1)}
+ pp <- rmpoispp(lam, win=letterR, types=c("A","B"), scal=50)
+
+ # list of functions lambda[[i]](x,y)
+ lams <- list(function(x,y){50 * x^2}, function(x,y){20 * abs(y)})
+ pp <- rmpoispp(lams, win=letterR, types=c("A","B"))
+ pp <- rmpoispp(lams, win=letterR)
+ # functions with extra arguments
+ lams <- list(function(x,y,scal){5 * scal * x^2},
+              function(x,y, scal){2 * scal * abs(y)})
+ pp <- rmpoispp(lams, win=letterR, types=c("A","B"), scal=10)
+ pp <- rmpoispp(lams, win=letterR, scal=10)
+
+ # florid example
+ lams <- list(function(x,y){
+   			   100*exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+              ,
+              function(x,y){
+                         	   100*exp(-0.6*x+0.5*y)
+                         }
+                        # log linear trend
+              )
+  X <- rmpoispp(lams, win=unit.square(), types=c("on", "off"))   
+
+ # pixel image
+ Z <- as.im(function(x,y){30 * (x^2 + y^3)}, letterR)
+ pp <- rmpoispp(Z, types=c("A","B"))
+
+ # list of pixel images
+ ZZ <- list(
+          as.im(function(x,y){20 * (x^2 + y^3)}, letterR),
+          as.im(function(x,y){40 * (x^3 + y^2)}, letterR))
+ pp <- rmpoispp(ZZ, types=c("A","B"))
+ pp <- rmpoispp(ZZ)
+
+ # randomising an existing point pattern
+ rmpoispp(intensity(amacrine), win=Window(amacrine))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rnoise.Rd b/man/rnoise.Rd
new file mode 100644
index 0000000..2e6c235
--- /dev/null
+++ b/man/rnoise.Rd
@@ -0,0 +1,69 @@
+\name{rnoise}
+\alias{rnoise}
+\title{
+  Random Pixel Noise
+}
+\description{
+  Generate a pixel image whose pixel values are random numbers
+  following a specified probability distribution.
+}
+\usage{
+rnoise(rgen = runif, w = square(1), \dots)
+}
+\arguments{
+  \item{rgen}{
+    Random generator for the pixel values.
+    A function in the \R language.
+  }
+  \item{w}{
+    Window (region or pixel raster) in which to generate the image.
+    Any data acceptable to \code{\link{as.mask}}.
+  }
+  \item{\dots}{
+    Arguments, matched by name,
+    to be passed to \code{rgen} to specify the parameters of the
+    probability distribution, or passed to \code{\link{as.mask}}
+    to control the pixel resolution.
+  }
+}
+\details{
+  The argument \code{w} could be a window (class \code{"owin"}),
+  a pixel image (class \code{"im"}) or other data. It is 
+  first converted to a binary mask by \code{\link{as.mask}}
+  using any relevant arguments in \code{\dots}.
+  
+  Then each pixel inside the window (i.e. with logical value
+  \code{TRUE} in the mask) is assigned a random numerical value
+  by calling the function \code{rgen}.
+
+  The function \code{rgen} would typically be one of the standard
+  random variable generators like \code{\link{runif}} (uniformly
+  distributed random values) or \code{\link{rnorm}} (Gaussian random
+  values). Its first argument \code{n} is the number of values to be
+  generated. Other arguments to \code{rgen} must be matched by name.
+}
+\value{
+  A pixel image (object of class \code{"im"}).
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+  \code{\link{as.mask}},
+  \code{\link{as.im}},
+  \code{\link[stats]{Distributions}}.
+}
+\examples{
+  plot(rnoise(), main="Uniform noise")
+  plot(rnoise(rnorm, dimyx=32, mean=2, sd=1),
+       main="White noise")
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/roc.Rd b/man/roc.Rd
new file mode 100644
index 0000000..1700624
--- /dev/null
+++ b/man/roc.Rd
@@ -0,0 +1,107 @@
+\name{roc}
+\alias{roc}
+\alias{roc.ppp}
+\alias{roc.lpp}
+\alias{roc.ppm}
+\alias{roc.kppm}
+\alias{roc.lppm}
+\title{
+  Receiver Operating Characteristic
+}
+\description{
+  Computes the Receiver Operating Characteristic curve
+  for a point pattern or a fitted point process model.
+}
+\usage{
+ roc(X, \dots)
+
+\method{roc}{ppp}(X, covariate, \dots, high = TRUE)
+
+\method{roc}{ppm}(X, \dots)
+
+\method{roc}{kppm}(X, \dots)
+
+\method{roc}{lpp}(X, covariate, \dots, high = TRUE)
+
+\method{roc}{lppm}(X, \dots)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"} or \code{"lpp"})
+    or fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}).
+  }
+  \item{covariate}{
+    Spatial covariate. Either a \code{function(x,y)},
+    a pixel image (object of class \code{"im"}), or
+    one of the strings \code{"x"} or \code{"y"} indicating the
+    Cartesian coordinates.    
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} controlling the
+    pixel resolution for calculations.
+  }
+  \item{high}{
+    Logical value indicating whether the threshold operation
+    should favour high or low values of the covariate.
+  }
+}
+\details{
+  This command computes Receiver Operating
+  Characteristic curve. The area under the ROC is computed by \code{\link{auc}}.
+
+  For a point pattern \code{X} and a covariate \code{Z}, the
+  ROC is a plot showing the ability of the 
+  covariate to separate the spatial domain
+  into areas of high and low density of points.
+  For each possible threshold \eqn{z}, the algorithm calculates
+  the fraction \eqn{a(z)} of area in the study region where the
+  covariate takes a value greater than \eqn{z}, and the
+  fraction \eqn{b(z)} of data points for which the covariate value
+  is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against
+  \eqn{a(z)} for all thresholds \eqn{z}. 
+  
+  For a fitted point process model, 
+  the ROC shows the ability of the
+  fitted model intensity to separate the spatial domain
+  into areas of high and low density of points.
+  The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model
+  (Lobo et al, 2007).
+}
+\value{
+  Function value table (object of class \code{"fv"})
+  which can be plotted to show the ROC curve.
+}
+\references{
+  Lobo, J.M.,
+  \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A.
+  and Real, R. (2007)
+  AUC: a misleading measure of the performance of predictive
+  distribution models.
+  \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151.
+
+  Nam, B.-H. and D'Agostino, R. (2002)
+  Discrimination index, the area under the {ROC} curve.
+  Pages 267--279 in 
+  Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. 
+  and Mesbah, M., \emph{Goodness-of-fit tests and model validity},
+  \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\seealso{
+\code{\link{auc}}
+}
+\examples{
+  plot(roc(swedishpines, "x"))
+  fit <- ppm(swedishpines ~ x+y)
+  plot(roc(fit))
+}
+\keyword{spatial}
diff --git a/man/rose.Rd b/man/rose.Rd
new file mode 100644
index 0000000..a2c48d2
--- /dev/null
+++ b/man/rose.Rd
@@ -0,0 +1,153 @@
+\name{rose}
+\alias{rose}
+\alias{rose.default}
+\alias{rose.histogram}
+\alias{rose.density}
+\alias{rose.fv}
+\title{Rose Diagram}
+\description{
+  Plots a rose diagram (rose of directions),
+  the analogue of a histogram or density plot for angular data.
+}
+\usage{
+rose(x, \dots)
+
+\method{rose}{default}(x, breaks = NULL, \dots,
+                       weights=NULL,
+                       nclass = NULL,
+                       unit = c("degree", "radian"),
+                       start=0, clockwise=FALSE,
+                       main)
+
+\method{rose}{histogram}(x, \dots,
+                       unit = c("degree", "radian"),
+                       start=0, clockwise=FALSE,
+                       main, labels=TRUE, at=NULL, do.plot = TRUE)
+
+\method{rose}{density}(x, \dots, 
+                  unit = c("degree", "radian"), 
+                  start=0, clockwise=FALSE,
+                  main, labels=TRUE, at=NULL, do.plot = TRUE)
+
+\method{rose}{fv}(x, \dots, 
+                  unit = c("degree", "radian"),
+                  start=0, clockwise=FALSE,
+                  main, labels=TRUE, at=NULL, do.plot = TRUE)
+}
+\arguments{
+  \item{x}{
+    Data to be plotted.
+    A numeric vector containing angles,
+    or a \code{histogram} object containing a histogram of
+    angular values, or a \code{density} object containing a
+    smooth density estimate for angular data,
+    or an \code{fv} object giving a function
+    of an angular argument.
+  }
+  \item{breaks, nclass}{
+    Arguments passed to \code{\link[graphics]{hist}} to determine
+    the histogram breakpoints.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link[graphics]{polygon}}
+    controlling the appearance of the plot
+    (or passed from \code{rose.default} to \code{\link[graphics]{hist}}
+    to control the calculation of the histogram).
+  }
+  \item{unit}{
+    The unit in which the angles are expressed.
+  }
+  \item{start}{
+    The starting direction for measurement of angles,
+    that is, the spatial direction which corresponds to a measured angle
+    of zero. Either a character string giving a compass direction
+    (\code{"N"} for north, \code{"S"} for south, \code{"E"} for east,
+    or \code{"W"} for west) or a number giving the angle from the
+    the horizontal (East) axis to the starting direction. For example,
+    if \code{unit="degree"} and \code{clockwise=FALSE},
+    then \code{start=90} and \code{start="N"} are equivalent.
+    The default is to measure angles anti-clockwise from the
+    horizontal axis (East direction).
+  }
+  \item{clockwise}{
+    Logical value indicating whether angles increase in the clockwise
+    direction (\code{clockwise=TRUE}) or anti-clockwise,
+    counter-clockwise direction (\code{clockwise=FALSE}, the default).
+  }
+  \item{weights}{
+    Optional vector of numeric weights associated with \code{x}.
+  }
+  \item{main}{
+    Optional main title for the plot.
+  }
+  \item{labels}{
+    Either a logical value indicating whether to plot labels
+    next to the tick marks, or a vector of labels for the tick marks.
+  }
+  \item{at}{
+    Optional vector of angles at which tick marks should be plotted.
+    Set \code{at=numeric(0)} to suppress tick marks.
+  }
+  \item{do.plot}{
+    Logical value indicating whether to really perform the plot.
+  }
+}
+\details{
+  A rose diagram or rose of directions is the analogue of
+  a histogram or bar chart for data which represent angles
+  in two dimensions. The bars of the bar chart are replaced by
+  circular sectors in the rose diagram.
+
+  The function \code{rose} is generic, with a default method
+  for numeric data, and methods for histograms and function tables.
+  
+  If \code{x} is a numeric vector, it must contain angular values
+  in the range 0 to 360 (if \code{unit="degree"})
+  or in the range 0 to \code{2 * pi} (if \code{unit="radian"}).
+  A histogram of the data will first be computed using
+  \code{\link[graphics]{hist}}. Then the rose diagram of this histogram
+  will be plotted by \code{rose.histogram}.
+
+  If \code{x} is an object of class \code{"histogram"} produced by
+  the function \code{\link[graphics]{hist}}, representing the histogram
+  of angular data, then the rose diagram of the densities
+  (rather than the counts) in this histogram object will be plotted. 
+
+  If \code{x} is an object of class \code{"density"} produced by
+  \code{\link{circdensity}} or \code{\link[stats]{density.default}},
+  representing a kernel smoothed density estimate of angular data,
+  then the rose diagram of the density estimate will be plotted. 
+  
+  If \code{x} is a function value table (object of class \code{"fv"})
+  then the argument of the function will be interpreted as an angle,
+  and the value of the function will be interpreted as the radius.
+
+  By default, angles are interpreted using the mathematical convention
+  where the zero angle is the horizontal \eqn{x} axis, and angles
+  increase anti-clockwise. Other conventions can be specified
+  using the arguments \code{start} and \code{clockwise}.
+  Standard compass directions are obtained by setting \code{unit="degree"},
+  \code{start="N"} and \code{clockwise=TRUE}.  
+}
+\value{A window (class \code{"owin"}) containing the plotted region.}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{fv}}, \code{\link[graphics]{hist}},
+  \code{\link{circdensity}},
+  \code{\link[stats]{density.default}}.
+}
+\examples{
+  ang <- runif(1000, max=360)
+  rose(ang, col="grey")
+  rose(ang, col="grey", start="N", clockwise=TRUE)
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/rotate.Rd b/man/rotate.Rd
new file mode 100644
index 0000000..5ae2a96
--- /dev/null
+++ b/man/rotate.Rd
@@ -0,0 +1,37 @@
+\name{rotate}
+\alias{rotate}
+\title{Rotate}
+\description{
+  Applies a rotation to any two-dimensional object,
+  such as a point pattern or a window. 
+}
+\usage{
+  rotate(X, \dots)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    or a window (object of class \code{"owin"}).}
+  \item{\dots}{Data specifying the rotation.}
+}
+\value{
+  Another object of the same type, representing the
+  result of rotating \code{X} through the specified angle.
+}
+\details{
+  This is generic. Methods are provided for
+  point patterns (\code{\link{rotate.ppp}})
+  and windows (\code{\link{rotate.owin}}).
+}
+\seealso{
+  \code{\link{rotate.ppp}},
+  \code{\link{rotate.owin}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rotate.im.Rd b/man/rotate.im.Rd
new file mode 100644
index 0000000..2462ba7
--- /dev/null
+++ b/man/rotate.im.Rd
@@ -0,0 +1,51 @@
+\name{rotate.im}
+\alias{rotate.im}
+\title{Rotate a Pixel Image}
+\description{
+  Rotates a pixel image
+}
+\usage{
+ \method{rotate}{im}(X, angle=pi/2, \dots, centre=NULL)
+}
+\arguments{
+  \item{X}{A pixel image (object of class \code{"im"}).}
+  \item{angle}{Angle of rotation, in radians.}
+  \item{\dots}{Ignored.}
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+}
+\value{
+  Another object of class \code{"im"} representing the
+  rotated pixel image.
+}
+\details{
+  The image is rotated by the angle specified.
+  Angles are measured in radians, anticlockwise.
+  The default is to rotate the image 90 degrees anticlockwise. 
+}
+\seealso{
+  \code{\link{affine.im}},
+  \code{\link{shift.im}},
+  \code{\link{rotate}}
+}
+\examples{
+  Z <- distmap(letterR)
+  X <- rotate(Z)
+  \dontrun{
+  plot(X)
+  }
+  Y <- rotate(X, centre="midpoint")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rotate.infline.Rd b/man/rotate.infline.Rd
new file mode 100644
index 0000000..a9b48c1
--- /dev/null
+++ b/man/rotate.infline.Rd
@@ -0,0 +1,82 @@
+\name{rotate.infline}
+\alias{rotate.infline}
+\alias{shift.infline}
+\alias{reflect.infline}
+\alias{flipxy.infline}
+\title{
+  Rotate or Shift Infinite Lines
+}
+\description{
+  Given the coordinates of one or more infinite straight lines in the
+  plane, apply a rotation or shift.
+}
+\usage{
+\method{rotate}{infline}(X, angle = pi/2, \dots)
+
+\method{shift}{infline}(X, vec = c(0,0), \dots)
+
+\method{reflect}{infline}(X)
+
+\method{flipxy}{infline}(X)
+}
+\arguments{
+  \item{X}{
+    Object of class \code{"infline"} representing one or more
+    infinite straight lines in the plane.
+}
+  \item{angle}{
+    Angle of rotation, in radians.
+  }
+  \item{vec}{
+    Translation (shift) vector: a numeric vector of length 2,
+    or a \code{list(x,y)}, or a point pattern containing one point.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  These functions are methods for the generic
+  \code{\link{shift}}, \code{\link{rotate}},
+  \code{\link{reflect}} and \code{\link{flipxy}}
+  for the class \code{"infline"}.
+
+  An object of class \code{"infline"} represents one or more
+  infinite lines in the plane.
+}
+\value{
+  Another \code{"infline"} object representing the
+  result of the transformation.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{infline}}
+}
+\examples{
+  L <- infline(v=0.5)
+
+  plot(square(c(-1,1)), main="rotate lines", type="n")
+  points(0, 0, pch=3)
+  plot(L, col="green")
+  plot(rotate(L, pi/12), col="red")
+  plot(rotate(L, pi/6), col="red")
+  plot(rotate(L, pi/4), col="red")
+
+  L <- infline(p=c(0.4, 0.9), theta=pi* c(0.2, 0.6))
+
+  plot(square(c(-1,1)), main="shift lines", type="n")
+  L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6))
+  plot(L, col="green")
+  plot(shift(L, c(-0.5, -0.4)), col="red")
+
+  plot(square(c(-1,1)), main="reflect lines", type="n")
+  points(0, 0, pch=3)
+  L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6))
+  plot(L, col="green")
+  plot(reflect(L), col="red")
+
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/rotate.owin.Rd b/man/rotate.owin.Rd
new file mode 100644
index 0000000..2f76f33
--- /dev/null
+++ b/man/rotate.owin.Rd
@@ -0,0 +1,60 @@
+\name{rotate.owin}
+\alias{rotate.owin}
+\title{Rotate a Window}
+\description{
+  Rotates a window
+}
+\usage{
+ \method{rotate}{owin}(X, angle=pi/2, \dots, rescue=TRUE, centre=NULL)
+}
+\arguments{
+  \item{X}{A window (object of class \code{"owin"}).}
+  \item{angle}{Angle of rotation.}
+  \item{rescue}{
+    Logical. If \code{TRUE}, the rotated window
+    will be processed by \code{\link{rescue.rectangle}}.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    controlling the resolution of the rotated window, if \code{X} is a
+    binary pixel mask. Ignored if \code{X} is not a binary mask.
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+}
+\value{
+  Another object of class \code{"owin"} representing the
+  rotated window.
+}
+\details{
+  Rotates the window by the specified angle. Angles are measured in
+  radians, anticlockwise. The default is to rotate the window 90 degrees
+  anticlockwise. The centre of rotation is the origin, by default,
+  unless \code{centre} is specified.
+}
+\seealso{
+  \code{\link{owin.object}}
+}
+\examples{
+  w <- owin(c(0,1),c(0,1))
+  v <- rotate(w, pi/3)
+  e <- rotate(w, pi/2, centre="midpoint")
+  \dontrun{
+  plot(v)
+  }
+  w <- as.mask(letterR)
+  v <- rotate(w, pi/5)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rotate.ppp.Rd b/man/rotate.ppp.Rd
new file mode 100644
index 0000000..3e9f5f7
--- /dev/null
+++ b/man/rotate.ppp.Rd
@@ -0,0 +1,54 @@
+\name{rotate.ppp}
+\alias{rotate.ppp}
+\title{Rotate a Point Pattern}
+\description{
+  Rotates a point pattern
+}
+\usage{
+ \method{rotate}{ppp}(X, angle=pi/2, \dots, centre=NULL)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}).}
+  \item{angle}{Angle of rotation.}
+  \item{\dots}{
+    Arguments passed to \code{\link{rotate.owin}} affecting
+    the handling of the observation window, if it is a binary pixel mask.
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+}
+\value{
+  Another object of class \code{"ppp"} representing the
+  rotated point pattern.
+}
+\details{
+  The points of the pattern, and the window of observation, 
+  are rotated about the origin by the angle specified.
+  Angles are measured in
+  radians, anticlockwise. The default is to rotate the pattern 90 degrees
+  anticlockwise. If the points carry marks, these are preserved.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{rotate.owin}}
+}
+\examples{
+  data(cells)
+  X <- rotate(cells, pi/3)
+  \dontrun{
+  plot(X)
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rotate.psp.Rd b/man/rotate.psp.Rd
new file mode 100644
index 0000000..e9247ed
--- /dev/null
+++ b/man/rotate.psp.Rd
@@ -0,0 +1,57 @@
+\name{rotate.psp}
+\alias{rotate.psp}
+\title{Rotate a Line Segment Pattern}
+\description{
+  Rotates a line segment pattern
+}
+\usage{
+ \method{rotate}{psp}(X, angle=pi/2, \dots, centre=NULL)
+}
+\arguments{
+  \item{X}{A line segment pattern (object of class \code{"psp"}).}
+  \item{angle}{Angle of rotation.}
+  \item{\dots}{
+    Arguments passed to \code{\link{rotate.owin}} affecting
+    the handling of the observation window, if it is a binary pixel
+    mask.
+  }
+  \item{centre}{
+    Centre of rotation.
+    Either a vector of length 2, or a character string
+    (partially matched to \code{"centroid"}, \code{"midpoint"}
+    or \code{"bottomleft"}).
+    The default is the coordinate origin \code{c(0,0)}.
+  }
+}
+\value{
+  Another object of class \code{"psp"} representing the
+  rotated line segment pattern.
+}
+\details{
+  The line segments of the pattern, and the window of observation, 
+  are rotated about the origin by the angle specified.
+  Angles are measured in
+  radians, anticlockwise. The default is to rotate the pattern 90 degrees
+  anticlockwise. If the line segments carry marks, these are preserved.
+}
+\seealso{
+  \code{\link{psp.object}},
+  \code{\link{rotate.owin}},
+  \code{\link{rotate.ppp}}
+}
+\examples{
+  oldpar <- par(mfrow=c(2,1))
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(X, main="original")
+  Y <- rotate(X, pi/4)
+  plot(Y, main="rotated")
+  par(oldpar)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rotmean.Rd b/man/rotmean.Rd
new file mode 100644
index 0000000..6587b49
--- /dev/null
+++ b/man/rotmean.Rd
@@ -0,0 +1,89 @@
+\name{rotmean}
+\alias{rotmean}
+\title{
+  Rotational Average of a Pixel Image
+}
+\description{
+  Compute the average pixel value over all rotations of the image
+  about the origin, as a function of distance from the origin.
+}
+\usage{
+rotmean(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im"))
+}
+\arguments{
+  \item{X}{
+    A pixel image.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{origin}{
+    Optional. Origin about which the rotations should be performed.
+    Either a numeric vector or a character string as described
+    in the help for \code{\link{shift.owin}}.
+  }
+  \item{padzero}{
+    Logical. If \code{TRUE} (the default), the value of \code{X}
+    is assumed to be zero outside the window of \code{X}.
+    If \code{FALSE}, the value of \code{X} is taken to be undefined
+    outside the window of \code{X}.
+  }
+  \item{Xname}{
+    Optional name for \code{X} to be used in the
+    function labels.
+  }
+  \item{result}{
+    Character string specifying the kind of result required:
+    either a function object or a pixel image.
+  }
+}
+\details{
+  This command computes, for each possible distance \eqn{r},
+  the average pixel value of the pixels lying at
+  distance \eqn{r} from the origin. Kernel smoothing is used
+  to obtain a smooth function of \eqn{r}.
+
+  If \code{result="fv"} (the default) the result is a function
+  object of class \code{"fv"} giving the mean pixel value of \code{X}
+  as a function of distance from the origin.
+
+  If \code{result="im"} the result is a pixel image, with the same
+  dimensions as \code{X}, giving the mean value of \code{X}
+  over all pixels lying at the same distance from the origin
+  as the current pixel.
+
+  If \code{padzero=TRUE} (the default), the value of \code{X}
+  is assumed to be zero outside the window of \code{X}. The rotational
+  mean at a given distance \eqn{r} is the average value of the image
+  \code{X} over the \emph{entire} circle of radius \eqn{r},
+  including zero values outside the window if the circle
+  lies partly outside the window.
+  
+  If \code{padzero=FALSE}, the value of \code{X} is taken to be
+  undefined outside the window of \code{X}. The rotational mean
+  is the average of the \code{X} values over the \emph{subset} of the circle
+  of radius \eqn{r} that lies entirely inside the window.
+}
+\value{
+  An object of class \code{"fv"} or \code{"im"}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  if(interactive()) {
+    Z <- setcov(square(1))
+    plot(rotmean(Z))
+    plot(rotmean(Z, result="im"))
+  } else {
+    Z <- setcov(square(1), dimyx=32)
+    f <- rotmean(Z)
+  }
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/round.ppp.Rd b/man/round.ppp.Rd
new file mode 100644
index 0000000..b00e64d
--- /dev/null
+++ b/man/round.ppp.Rd
@@ -0,0 +1,55 @@
+\name{round.ppp}
+\alias{round.ppp}
+\alias{round.pp3}
+\alias{round.ppx}
+\title{
+  Apply Numerical Rounding to Spatial Coordinates
+}
+\description{
+  Apply numerical rounding
+  to the spatial coordinates of a point pattern.
+}
+\usage{
+\method{round}{ppp}(x, digits = 0)
+
+\method{round}{pp3}(x, digits = 0)
+
+\method{round}{ppx}(x, digits = 0)
+}
+\arguments{
+  \item{x}{
+    A spatial point pattern in any dimension
+    (object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}).
+  }
+  \item{digits}{
+    integer indicating the number of decimal places. 
+  }
+}
+\details{
+  These functions are methods for the generic function
+  \code{\link[base]{round}}.
+  They apply numerical rounding to the spatial coordinates of the
+  point pattern \code{x}. 
+}
+\value{
+  A point pattern object, of the same class as \code{x}.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rounding}} to determine whether numbers have been
+  rounded.
+  
+  \code{\link[base]{round}} in the Base package.
+}
+\examples{
+  round(cells, 1)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/rounding.Rd b/man/rounding.Rd
new file mode 100644
index 0000000..cbb5d4a
--- /dev/null
+++ b/man/rounding.Rd
@@ -0,0 +1,83 @@
+\name{rounding} 
+\alias{rounding}
+\alias{rounding.default}
+\alias{rounding.ppp}
+\alias{rounding.pp3}
+\alias{rounding.ppx}
+\title{
+  Detect Numerical Rounding 
+}
+\description{
+  Given a numeric vector, or an object containing numeric spatial
+  coordinates, determine whether the values have been rounded
+  to a certain number of decimal places.
+}
+\usage{
+rounding(x)
+
+\method{rounding}{default}(x)
+
+\method{rounding}{ppp}(x)
+
+\method{rounding}{pp3}(x)
+
+\method{rounding}{ppx}(x)
+}
+\arguments{
+  \item{x}{
+    A numeric vector, or an object containing numeric spatial coordinates.
+  }
+}
+\details{
+  For a numeric vector \code{x},
+  this function determines whether the values have been rounded
+  to a certain number of decimal places.
+  \itemize{
+    \item 
+    If the entries of \code{x} are not all integers, then
+    \code{rounding(x)} returns the smallest number of digits \code{d}
+    after the decimal point
+    such that \code{\link[base]{round}(x, digits=d)} is identical to
+    \code{x}.
+    For example if \code{rounding(x) = 2} then the entries of
+    \code{x} are rounded to 2 decimal places, and are multiples of 0.01.
+    \item 
+    If all the entries of \code{x} are integers, then
+    \code{rounding(x)} returns \code{-d}, where
+    \code{d} is the smallest number of digits \emph{before} the decimal point
+    such that \code{\link[base]{round}(x, digits=-d)} is identical to
+    \code{x}.
+    For example if \code{rounding(x) = -3} then the entries of
+    \code{x} are multiples of 1000.
+    If \code{rounding(x) = 0} then the entries of \code{x} are integers
+    but not multiples of 10.
+    \item
+    If all entries of \code{x} are equal to 0, the rounding is
+    not determined, and a value of \code{NULL} is returned.
+  }
+  
+  For a point pattern (object of class \code{"ppp"})
+  or similar object \code{x} containing numeric spatial
+  coordinates, this procedure is applied to the spatial coordinates.
+}
+\value{
+  An integer.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{round.ppp}}
+}
+\examples{
+   rounding(c(0.1, 0.3, 1.2))
+   rounding(c(1940, 1880, 2010))
+   rounding(0)
+   rounding(cells)
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/rpoint.Rd b/man/rpoint.Rd
new file mode 100644
index 0000000..24b9cba
--- /dev/null
+++ b/man/rpoint.Rd
@@ -0,0 +1,133 @@
+\name{rpoint}
+\alias{rpoint}
+\title{Generate N Random Points}
+\description{
+  Generate a random point pattern
+  containing \eqn{n} independent, identically distributed random points
+  with any specified distribution.
+}
+\usage{
+ rpoint(n, f, fmax=NULL, win=unit.square(),
+        \dots, giveup=1000, verbose=FALSE,
+        nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of points to generate.
+  }
+  \item{f}{
+    The probability density of the points,
+    possibly un-normalised.
+    Either a constant,
+    a function \code{f(x,y,...)}, or a pixel image object.
+  }
+  \item{fmax}{
+    An upper bound on the values of \code{f}.
+    If missing, this number will be estimated.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    Ignored if \code{f} is a pixel image.
+  }
+  \item{\dots}{
+    Arguments passed to the function \code{f}.
+  }
+  \item{giveup}{
+    Number of attempts in the rejection method after which
+    the algorithm should stop trying to generate new points.
+  }
+  \item{verbose}{
+    Flag indicating whether to report details of performance
+    of the simulation algorithm.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates \code{n} independent, identically distributed
+  random points with common probability density proportional to
+  \code{f}.
+
+  The argument \code{f} may be
+  \describe{
+    \item{a numerical constant:}{
+      uniformly distributed random points will
+      be generated.
+    }
+    \item{a function:}{random points will be generated
+      in the window \code{win} with probability density proportional
+      to \code{f(x,y,...)} where \code{x} and \code{y} are the cartesian
+      coordinates. The function \code{f} must accept 
+      two \emph{vectors} of coordinates \code{x,y} and return the corresponding
+      vector of function values. Additional arguments \code{...} of any kind
+      may be passed to the function.
+    }
+    \item{a pixel image:}{if \code{f} is a pixel image object
+      of class \code{"im"} (see \code{\link{im.object}}) then
+      random points will be generated
+      in the window of this pixel image, with probability density
+      proportional to the pixel values of \code{f}.
+    }
+  }
+  The algorithm is as follows:
+  \itemize{
+    \item
+    If \code{f} is a constant, we invoke \code{\link{runifpoint}}.
+    \item
+    If \code{f} is a function, then we use the rejection method.
+    Proposal points are generated from the uniform distribution.
+    A proposal point \eqn{(x,y)} is accepted with probability
+    \code{f(x,y,...)/fmax} and otherwise rejected.
+    The algorithm continues until \code{n} points have been
+    accepted. It gives up after \code{giveup * n} proposals
+    if there are still fewer than \code{n} points.
+    \item 
+    If \code{f} is a pixel image, then a random sequence of 
+    pixels is selected (using \code{\link{sample}})
+    with probabilities proportional to the
+    pixel values of \code{f}.  Then for each pixel in the sequence
+    we generate a uniformly distributed random point in that pixel.
+  }
+  The algorithm for pixel images is more efficient than that for
+  functions.
+}
+\seealso{
+\code{\link{ppp.object}},
+\code{\link{owin.object}},
+\code{\link{runifpoint}}
+}
+\examples{
+ # 100 uniform random points in the unit square
+ X <- rpoint(100)
+
+ # 100 random points with probability density proportional to x^2 + y^2
+ X <- rpoint(100, function(x,y) { x^2 + y^2}, 1)
+
+ # `fmax' may be omitted
+ X <- rpoint(100, function(x,y) { x^2 + y^2})
+
+ # irregular window
+ data(letterR)
+ X <- rpoint(100, function(x,y) { x^2 + y^2}, win=letterR)
+
+ # make a pixel image 
+ Z <- setcov(letterR)
+ # 100 points with density proportional to pixel values
+ X <- rpoint(100, Z)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoisline.Rd b/man/rpoisline.Rd
new file mode 100644
index 0000000..def00b7
--- /dev/null
+++ b/man/rpoisline.Rd
@@ -0,0 +1,55 @@
+\name{rpoisline}
+\alias{rpoisline}
+\title{Generate Poisson Random Line Process}
+\description{
+  Generate a random pattern of line segments
+  obtained from the Poisson line process.
+}
+\usage{
+ rpoisline(lambda, win=owin())
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson line process.
+    A positive number.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+}
+\value{
+  A line segment pattern (an object of class \code{"psp"}).
+
+  The result also has an attribute called \code{"lines"} (an object of
+  class \code{"infline"} specifying the original infinite random lines)
+  and an attribute \code{"linemap"} (an integer vector mapping the line
+  segments to their parent lines).
+}
+\details{
+  This algorithm generates a realisation
+  of the uniform Poisson line process, and clips it to the window
+  \code{win}.
+
+  The argument \code{lambda} must be a positive number.
+  It controls the intensity of the process. The expected number of
+  lines intersecting a convex region of the plane is equal to
+  \code{lambda} times the perimeter length of the region.
+  The expected total length of the lines crossing a region of the plane
+  is equal to \code{lambda * pi} times the area of the region.
+}
+\seealso{
+\code{\link{psp}}
+}
+\examples{
+ # uniform Poisson line process with intensity 10,
+ # clipped to the unit square
+ rpoisline(10)
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoislinetess.Rd b/man/rpoislinetess.Rd
new file mode 100644
index 0000000..c37f3fb
--- /dev/null
+++ b/man/rpoislinetess.Rd
@@ -0,0 +1,57 @@
+\name{rpoislinetess}
+\alias{rpoislinetess}
+\title{Poisson Line Tessellation}
+\description{
+  Generate a tessellation delineated by the lines of
+  the Poisson line process
+}
+\usage{
+rpoislinetess(lambda, win = owin())
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson line process.
+    A positive number.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    Currently, the window must be a rectangle.
+  }
+}
+\details{
+  This algorithm generates a realisation
+  of the uniform Poisson line process, and divides the window
+  \code{win} into tiles separated by these lines.
+
+  The argument \code{lambda} must be a positive number.
+  It controls the intensity of the process. The expected number of
+  lines intersecting a convex region of the plane is equal to
+  \code{lambda} times the perimeter length of the region.
+  The expected total length of the lines crossing a region of the plane
+  is equal to \code{lambda * pi} times the area of the region.
+}
+\value{
+  A tessellation (object of class \code{"tess"}).
+
+  Also has an attribute \code{"lines"} containing the realisation of the
+  Poisson line process, as an object of class \code{"infline"}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rpoisline}}
+  to generate the lines only.
+}
+\examples{
+ X <- rpoislinetess(3)
+ plot(as.im(X), main="rpoislinetess(3)")
+ plot(X, add=TRUE)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoislpp.Rd b/man/rpoislpp.Rd
new file mode 100644
index 0000000..9bbc2e3
--- /dev/null
+++ b/man/rpoislpp.Rd
@@ -0,0 +1,67 @@
+\name{rpoislpp}
+\alias{rpoislpp}
+\title{
+  Poisson Point Process on a Linear Network
+}
+\description{
+  Generates a realisation of the Poisson point process
+  with specified intensity on the given linear network.
+}
+\usage{
+rpoislpp(lambda, L, \dots, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson process. 
+    A single number, a \code{function(x,y)}, a pixel image
+    (object of class \code{"im"}), or a vector of numbers,
+    a list of functions, or a list of images.
+  }
+  \item{L}{
+    A linear network (object of class \code{"linnet"},
+    see \code{\link{linnet}}).
+    Can be omitted in some cases: see Details.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{rpoisppOnLines}}.
+  }
+  \item{nsim}{Number of simulated realisations to generate.}
+  \item{drop}{
+    Logical value indicating what to do when \code{nsim=1}.
+    If \code{drop=TRUE} (the default), the result is a point pattern.
+    If \code{drop=FALSE}, the result is a list with one entry which is a
+    point pattern.
+  }
+}
+\details{
+  This function uses \code{\link{rpoisppOnLines}}
+  to generate the random points.
+
+  Argument \code{L} can be omitted, and defaults to \code{as.linnet(lambda)},
+  when \code{lambda} is a function on a linear network (class
+  \code{"linfun"}) or a pixel image on a linear network (\code{"linim"}).
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE},
+  a point pattern on the linear network,
+  i.e.\ an object of class \code{"lpp"}.
+  Otherwise, a list of such point patterns.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian 
+}
+\seealso{
+ \code{\link{runiflpp}},
+ \code{\link{rlpp}},
+ \code{\link{lpp}},
+ \code{\link{linnet}}
+}
+\examples{
+   X <- rpoislpp(5, simplenet)
+   plot(X)
+   # multitype
+   X <- rpoislpp(c(a=5, b=5), simplenet)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoispp.Rd b/man/rpoispp.Rd
new file mode 100644
index 0000000..36ad53e
--- /dev/null
+++ b/man/rpoispp.Rd
@@ -0,0 +1,173 @@
+\name{rpoispp}
+\alias{rpoispp}
+\title{Generate Poisson Point Pattern}
+\description{
+  Generate a random point pattern using the
+  (homogeneous or inhomogeneous) Poisson process.
+  Includes CSR (complete spatial randomness).
+}
+\usage{
+ rpoispp(lambda, lmax=NULL, win=owin(), \dots,
+         nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE)
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson process.
+    Either a single positive number, a \code{function(x,y, \dots)},
+    or a pixel image.
+  }
+  \item{lmax}{
+    Optional. An upper bound for the value of \code{lambda(x,y)},
+    if \code{lambda} is a function.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+    Ignored if \code{lambda} is a pixel image.
+  }
+  \item{\dots}{
+    Arguments passed to \code{lambda} if it is a function.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{ex}{
+    Optional. A point pattern to use as the example.
+    If \code{ex} is given and \code{lambda,lmax,win} are missing,
+    then \code{lambda} and \code{win} will be calculated from
+    the point pattern \code{ex}.
+  }
+  \item{warnwin}{
+    Logical value specifying whether to issue a warning
+    when \code{win} is ignored (which occurs when \code{lambda}
+    is an image and \code{win} is present).
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  If \code{lambda} is a single number,
+  then this algorithm generates a realisation
+  of the uniform Poisson process (also known as 
+  Complete Spatial Randomness, CSR) inside the window \code{win} with 
+  intensity \code{lambda} (points per unit area).
+ 
+  If \code{lambda} is a function, then this algorithm generates a realisation
+  of the inhomogeneous Poisson process with intensity function
+  \code{lambda(x,y,\dots)} at spatial location \code{(x,y)}
+  inside the window \code{win}.
+  The function \code{lambda} must work correctly with vectors \code{x}
+  and \code{y}.
+  
+  If \code{lmax} is given,
+  it must be an upper bound on the values of \code{lambda(x,y,\dots)}
+  for all locations \code{(x, y)}
+  inside the window \code{win}. That is, we must have
+  \code{lambda(x,y,\dots) <= lmax} for all locations \code{(x,y)}.
+  If this is not true then the results of
+  the algorithm will be incorrect.
+
+  If \code{lmax} is missing or \code{NULL},
+  an approximate upper bound is computed by finding the maximum value
+  of \code{lambda(x,y,\dots)}
+  on a grid of locations \code{(x,y)} inside the window \code{win},
+  and adding a safety margin equal to 5 percent of the range of
+  \code{lambda} values. This can be computationally intensive,
+  so it is advisable to specify \code{lmax} if possible.
+
+  If \code{lambda} is a pixel image object of class \code{"im"}
+  (see \code{\link{im.object}}), this algorithm generates a realisation
+  of the inhomogeneous Poisson process with intensity equal to the
+  pixel values of the image. (The value of the intensity function at an
+  arbitrary location is the pixel value of the nearest pixel.)
+  The argument \code{win} is ignored;
+  the window of the pixel image is used instead. It will be converted
+  to a rectangle if possible, using \code{\link{rescue.rectangle}}.
+  
+  To generate an inhomogeneous Poisson process
+  the algorithm uses ``thinning'': it first generates a uniform
+  Poisson process of intensity \code{lmax},
+  then randomly deletes or retains each point, independently of other points,
+  with retention probability
+  \eqn{p(x,y) = \lambda(x,y)/\mbox{lmax}}{p(x,y) = lambda(x,y)/lmax}.
+
+  For \emph{marked} point patterns, use \code{\link{rmpoispp}}.
+}
+\section{Warning}{
+  Note that \code{lambda} is the \bold{intensity}, that is,
+  the expected number of points \bold{per unit area}.
+  The total number of points in the simulated
+  pattern will be random with expected value \code{mu = lambda * a}
+  where \code{a} is the area of the window \code{win}. 
+}
+\section{Reproducibility}{
+  The simulation algorithm, for the case where
+  \code{lambda} is a pixel image, was changed in \pkg{spatstat}
+  version \code{1.42-3}. Set \code{spatstat.options(fastpois=FALSE)}
+  to use the previous, slower algorithm, if it is desired to reproduce
+  results obtained with earlier versions.
+}
+\seealso{
+  \code{\link{rmpoispp}} for Poisson \emph{marked} point patterns,
+  \code{\link{runifpoint}} for a fixed number of independent
+  uniform random points;
+  \code{\link{rpoint}}, \code{\link{rmpoint}} for a fixed number of
+  independent random points with any distribution;
+  \code{\link{rMaternI}},
+  \code{\link{rMaternII}},
+  \code{\link{rSSI}},
+  \code{\link{rStrauss}},
+  \code{\link{rstrat}}
+  for random point processes with spatial inhibition
+  or regularity; 
+  \code{\link{rThomas}},
+  \code{\link{rGaussPoisson}},
+  \code{\link{rMatClust}},
+  \code{\link{rcell}}
+  for random point processes exhibiting clustering;
+  \code{\link{rmh.default}} for Gibbs processes.
+  See also \code{\link{ppp.object}},
+  \code{\link{owin.object}}.
+}
+\examples{
+ # uniform Poisson process with intensity 100 in the unit square
+ pp <- rpoispp(100)
+ 
+ # uniform Poisson process with intensity 1 in a 10 x 10 square
+ pp <- rpoispp(1, win=owin(c(0,10),c(0,10)))
+ # plots should look similar !
+ 
+ # inhomogeneous Poisson process in unit square
+ # with intensity lambda(x,y) = 100 * exp(-3*x)
+ # Intensity is bounded by 100
+ pp <- rpoispp(function(x,y) {100 * exp(-3*x)}, 100)
+
+ # How to tune the coefficient of x
+ lamb <- function(x,y,a) { 100 * exp( - a * x)}
+ pp <- rpoispp(lamb, 100, a=3)
+
+ # pixel image
+ Z <- as.im(function(x,y){100 * sqrt(x+y)}, unit.square())
+ pp <- rpoispp(Z)
+
+ # randomising an existing point pattern
+ rpoispp(intensity(cells), win=Window(cells))
+ rpoispp(ex=cells)
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoispp3.Rd b/man/rpoispp3.Rd
new file mode 100644
index 0000000..e46aaed
--- /dev/null
+++ b/man/rpoispp3.Rd
@@ -0,0 +1,62 @@
+\name{rpoispp3}
+\alias{rpoispp3}
+\title{
+  Generate Poisson Point Pattern in Three Dimensions
+}
+\description{
+  Generate a random three-dimensional point pattern
+  using the homogeneous Poisson process.
+}
+\usage{
+rpoispp3(lambda, domain = box3(), nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson process.
+    A single positive number.
+  }
+  \item{domain}{
+    Three-dimensional box in which the process should be generated.
+    An object of class \code{"box3"}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in
+  three dimensions (an object of class \code{"pp3"}).
+  If \code{nsim > 1}, a list of such point patterns.
+}
+\details{
+  This function generates a realisation
+  of the homogeneous Poisson process in three dimensions,
+  with intensity \code{lambda} (points per unit volume).
+  
+  The realisation is generated inside the three-dimensional region
+  \code{domain} which currently must be a rectangular box (object of
+  class \code{"box3"}).
+}
+\note{
+  The intensity \code{lambda} is the expected number of points
+  \emph{per unit volume}. 
+}
+\seealso{
+  \code{\link{runifpoint3}}, 
+  \code{\link{pp3}}, 
+  \code{\link{box3}}
+}
+\examples{
+   X <- rpoispp3(50)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoisppOnLines.Rd b/man/rpoisppOnLines.Rd
new file mode 100644
index 0000000..342dbc2
--- /dev/null
+++ b/man/rpoisppOnLines.Rd
@@ -0,0 +1,116 @@
+\name{rpoisppOnLines}
+\alias{rpoisppOnLines}
+\title{Generate Poisson Point Pattern on Line Segments}
+\description{
+  Given a line segment pattern, generate a Poisson random point pattern
+  on the line segments.
+}
+\usage{
+rpoisppOnLines(lambda, L, lmax = NULL, ..., nsim=1)
+}
+\arguments{
+  \item{lambda}{Intensity of the Poisson process.
+    A single number, a \code{function(x,y)}, a pixel image
+    (object of class \code{"im"}), or a vector of numbers,
+    a list of functions, or a list of images.
+  }
+  \item{L}{Line segment pattern (object of class \code{"psp"})
+    on which the points should be generated.
+  }
+  \item{lmax}{
+    Optional upper bound (for increased computational efficiency).
+    A known upper bound for the values of \code{lambda},
+    if \code{lambda} is a function or a pixel image.
+    That is, \code{lmax} should be a number which is 
+    known to be greater than or equal to all values of \code{lambda}. 
+  }
+  \item{\dots}{Additional arguments passed to \code{lambda} if it is a
+    function.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+}
+\details{
+  This command generates a Poisson point process on the one-dimensional
+  system of line segments in \code{L}. The result is a point pattern
+  consisting of points lying on the line segments in \code{L}.
+  The number of random points falling on any given line segment follows
+  a Poisson distribution. The patterns of points on different segments
+  are independent.
+
+  The intensity \code{lambda} is the expected number of points
+  per unit \bold{length} of line segment. 
+  It may be constant, or it may depend on spatial location.
+
+  In order to generate an unmarked Poisson process,
+  the argument \code{lambda} may be a single number,
+  or a \code{function(x,y)}, or a pixel image (object of class
+  \code{"im"}). 
+
+  In order to generate a \emph{marked} Poisson process,
+  \code{lambda} may be a numeric vector, a list of functions,
+  or a list of images, each entry giving the intensity for
+  a different mark value.
+
+  If \code{lambda} is not numeric, then the (Lewis-Shedler)
+  rejection method is used. 
+  The rejection method requires knowledge of \code{lmax},
+  the maximum possible value of \code{lambda}. This should be either
+  a single number, or a numeric vector of the same length as
+  \code{lambda}. If \code{lmax} is not
+  given, it will be computed approximately, by sampling many values of
+  \code{lambda}.
+
+  If \code{lmax} is given, then it \bold{must} be larger than
+  any possible value of \code{lambda}, otherwise the results of the
+  algorithm will be incorrect.
+}
+\value{
+  If \code{nsim = 1}, a point pattern
+  (object of class \code{"ppp"}) in the same window
+  as \code{L}. If \code{nsim > 1}, a list of such point patterns.
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{ppp}},
+  \code{\link{runifpointOnLines}},
+  \code{\link{rpoispp}}
+}
+\examples{
+  live <- interactive()
+  L <- psp(runif(10), runif(10), runif(10), runif(10),  window=owin())
+  if(live) plot(L, main="")
+
+  # uniform intensity
+  Y <- rpoisppOnLines(4, L)
+  if(live) plot(Y, add=TRUE, pch="+")
+
+  # uniform MARKED process with types 'a' and 'b'
+  Y <- rpoisppOnLines(c(a=4, b=5), L)
+  if(live) {
+    plot(L, main="")
+    plot(Y, add=TRUE, pch="+")
+  }
+
+  # intensity is a function
+  Y <- rpoisppOnLines(function(x,y){ 10 * x^2}, L, 10)
+  if(live) { 
+    plot(L, main="")
+    plot(Y, add=TRUE, pch="+")
+  }
+
+  # intensity is an image
+  Z <- as.im(function(x,y){10 * sqrt(x+y)}, unit.square())
+  Y <- rpoisppOnLines(Z, L, 15)
+  if(live) {
+   plot(L, main="")
+   plot(Y, add=TRUE, pch="+")
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rpoisppx.Rd b/man/rpoisppx.Rd
new file mode 100644
index 0000000..1e6c1f3
--- /dev/null
+++ b/man/rpoisppx.Rd
@@ -0,0 +1,63 @@
+\name{rpoisppx}
+\alias{rpoisppx}
+\title{
+  Generate Poisson Point Pattern in Any Dimensions
+}
+\description{
+  Generate a random multi-dimensional point pattern
+  using the homogeneous Poisson process.
+}
+\usage{
+rpoisppx(lambda, domain, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{lambda}{
+    Intensity of the Poisson process.
+    A single positive number.
+  }
+  \item{domain}{
+    Multi-dimensional box in which the process should be generated.
+    An object of class \code{"boxx"}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a single point pattern.
+  }
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE},
+  a point pattern (an object of class \code{"ppx"}).
+  If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns.
+}
+\details{
+  This function generates a realisation
+  of the homogeneous Poisson process in multi dimensions,
+  with intensity \code{lambda} (points per unit volume).
+  
+  The realisation is generated inside the multi-dimensional region
+  \code{domain} which currently must be a rectangular box (object of
+  class \code{"boxx"}).
+}
+\note{
+  The intensity \code{lambda} is the expected number of points
+  \emph{per unit volume}. 
+}
+\seealso{
+  \code{\link{runifpointx}}, 
+  \code{\link{ppx}}, 
+  \code{\link{boxx}}
+}
+\examples{
+   w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3))
+   X <- rpoisppx(10, w)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rppm.Rd b/man/rppm.Rd
new file mode 100644
index 0000000..6ab28b2
--- /dev/null
+++ b/man/rppm.Rd
@@ -0,0 +1,69 @@
+\name{rppm}
+\alias{rppm}
+\title{
+  Recursively Partitioned Point Process Model
+}
+\description{
+  Fits a recursive partition model to point pattern data.
+}
+\usage{
+  rppm(\dots, rpargs=list())
+}
+\arguments{
+  \item{\dots}{
+    Arguments passed to \code{\link{ppm}}
+    specifying the point pattern data and the explanatory covariates.
+  }
+  \item{rpargs}{
+    Optional list of arguments passed to \code{\link[rpart]{rpart}}
+    controlling the recursive partitioning procedure.
+  }
+}
+\details{
+  This function attempts to find a simple rule for predicting
+  low and high intensity regions of points in a point pattern,
+  using explanatory covariates.
+  
+  The arguments \code{\dots} specify the point pattern data
+  and explanatory covariates in the same way as they would be
+  in the function \code{\link{ppm}}. 
+
+  The recursive partitioning algorithm \code{\link[rpart]{rpart}}
+  is then used to find a partitioning rule.
+}
+\value{
+  An object of class \code{"rppm"}. There are methods
+  for \code{print}, \code{plot}, \code{fitted}, \code{predict} and
+  \code{prune} for this class.
+}
+\references{
+  Breiman, L., Friedman, J. H., Olshen, R. A., and Stone, C. J. (1984)
+  \emph{Classification and Regression Trees}. Wadsworth.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{plot.rppm}}, 
+  \code{\link{predict.rppm}}, 
+  \code{\link{prune.rppm}}.
+}
+\examples{
+    # New Zealand trees data: trees planted along border
+    # Use covariates 'x', 'y'
+    nzfit <- rppm(nztrees ~ x + y)
+    nzfit
+    prune(nzfit, cp=0.035)
+    # Murchison gold data: numeric and logical covariates
+    mur <- solapply(murchison, rescale, s=1000, unitname="km")
+    mur$dfault <- distfun(mur$faults)
+    # 
+    mfit <- rppm(gold ~ dfault + greenstone, data=mur)
+    mfit
+    # Gorillas data: factor covariates
+    #          (symbol '.' indicates 'all variables')
+    gfit <- rppm(unmark(gorillas) ~ . , data=gorillas.extra)
+    gfit
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/rshift.Rd b/man/rshift.Rd
new file mode 100644
index 0000000..f79f3bb
--- /dev/null
+++ b/man/rshift.Rd
@@ -0,0 +1,65 @@
+\name{rshift}
+\alias{rshift}
+\title{Random Shift}
+\description{
+  Randomly shifts the points of a point pattern or
+  line segment pattern. Generic.
+}
+\usage{
+   rshift(X, \dots)
+}
+\arguments{
+  \item{X}{Pattern to be subjected to a random shift.
+    A point pattern (class \code{"ppp"}),
+    a line segment pattern (class \code{"psp"})
+    or an object of class \code{"splitppp"}.
+  }
+  \item{\dots}{
+    Arguments controlling the generation of the
+    random shift vector, or specifying which parts of the pattern
+    will be shifted. 
+  }
+}
+\value{
+  An object of the same type as \code{X}.
+}
+\details{
+  This operation applies a random shift (vector displacement) to
+  the points in a point pattern,
+  or to the segments in a line segment pattern.
+
+  The argument \code{X} may be 
+  \itemize{
+    \item
+    a point pattern
+    (an object of class \code{"ppp"})
+    \item
+    a line segment pattern
+    (an object of class \code{"psp"})
+    \item
+    an object of class \code{"splitppp"}
+    (basically a list of point patterns, obtained from
+    \code{\link{split.ppp}}).
+  }
+  The function \code{rshift} is generic, with
+  methods for the three classes \code{"ppp"},
+  \code{"psp"} and \code{"splitppp"}.
+
+  See the help pages for these methods, \code{\link{rshift.ppp}},
+  \code{\link{rshift.psp}} and
+  \code{\link{rshift.splitppp}}, for further information.
+}
+\seealso{
+  \code{\link{rshift.ppp}},
+  \code{\link{rshift.psp}},
+  \code{\link{rshift.splitppp}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rshift.ppp.Rd b/man/rshift.ppp.Rd
new file mode 100644
index 0000000..37bb9b9
--- /dev/null
+++ b/man/rshift.ppp.Rd
@@ -0,0 +1,196 @@
+\name{rshift.ppp}  
+\alias{rshift.ppp}
+\title{Randomly Shift a Point Pattern}
+\description{
+  Randomly shifts the points of a point pattern.
+}
+\usage{
+   \method{rshift}{ppp}(X, \dots, which=NULL, group)
+}
+\arguments{
+  \item{X}{Point pattern to be subjected to a random shift.
+    An object of class \code{"ppp"}
+  }
+  \item{\dots}{
+    Arguments that determine the random shift. See Details.
+  }
+  \item{group}{
+    Optional. Factor specifying a grouping of the points of \code{X},
+    or \code{NULL} indicating that all points belong to the same group.
+    Each group will be shifted together, and
+    separately from other groups.
+    By default, points in a marked point pattern are grouped
+    according to their mark values,
+    while points in an unmarked point pattern are treated as a single group.
+  }
+  \item{which}{
+    Optional. Identifies which groups of the pattern will be shifted,
+    while other groups are not shifted. A vector of levels of \code{group}.
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\details{
+  This operation randomly shifts the locations of the points
+  in a point pattern.
+
+  The function \code{rshift} is generic. This function \code{rshift.ppp}
+  is the method for point patterns.
+
+  The most common use of this function is to shift the
+  points in a multitype point pattern.
+  By default, points of the same type are shifted in parallel
+  (i.e. points of a common type are shifted by a common displacement vector),
+  and independently of other types.
+  This is useful for testing the hypothesis of independence of types
+  (the null hypothesis that the sub-patterns of points of each type
+  are independent point processes).
+
+  In general the points of \code{X} are divided into groups,
+  then the points within a group are shifted by a common
+  random displacement vector. Different groups of points are shifted
+  independently. The grouping is determined as follows:
+  \itemize{
+    \item
+    If the argument \code{group} is present, then this determines the grouping.
+    \item
+    Otherwise, if \code{X} is a multitype point pattern,
+    the marks determine the grouping.
+    \item
+    Otherwise, all points belong to a single group.
+  }
+  The argument \code{group} should be a factor, of length equal to the
+  number of points in \code{X}. Alternatively \code{group} may be \code{NULL},
+  which specifies that all points of \code{X} belong to a single group.
+
+  By default, every group of points will be shifted.
+  The argument \code{which} indicates that only some of the groups
+  should be shifted, while other groups should be left unchanged.
+  \code{which} must be a vector of levels of \code{group}
+  (for example, a vector of types in a multitype pattern)
+  indicating which groups are to be shifted. 
+
+  The displacement vector, i.e. the vector
+  by which the data points are shifted,
+  is generated at random.
+  Parameters that control the randomisation
+  and the handling of edge effects are passed through
+  the \code{\dots} argument. They are
+  \describe{
+    \item{radius,width,height}{
+      Parameters of the random shift vector.
+    }
+  \item{edge}{
+    String indicating how to deal with edges of the pattern.
+    Options are \code{"torus"}, \code{"erode"} and \code{"none"}.
+  }
+  \item{clip}{
+    Optional. Window to which the final point pattern should be clipped.
+  }
+  }
+  If the window is a rectangle, the \emph{default} behaviour is 
+  to generate a displacement vector at random with equal probability
+  for all possible displacements. This means that the \eqn{x} and
+  \eqn{y} coordinates of the displacement vector are independent random
+  variables, uniformly distributed over the range of possible coordinates.
+
+  Alternatively, the displacement vector can be generated by
+  another random mechanism, controlled by the arguments
+  \code{radius}, \code{width} and \code{height}. 
+  \describe{
+    \item{rectangular:}{
+      if \code{width} and \code{height} are given, then 
+      the displacement vector is uniformly distributed
+      in a rectangle of these dimensions, centred at
+      the origin.  The maximum possible displacement in the \eqn{x}
+      direction is \code{width/2}. The maximum possible displacement in
+      the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y}
+      displacements are independent. (If \code{width} and \code{height}
+      are actually equal to the dimensions of the observation window,
+      then this is equivalent to the default.)
+    }
+    \item{radial:}{
+      if \code{radius} is given, then the displacement vector is
+      generated by choosing a random point inside a disc of
+      the given radius, centred at the origin, with uniform probability
+      density over the disc. Thus the argument \code{radius} determines
+      the maximum possible displacement distance.
+      The argument \code{radius} is incompatible with the
+      arguments \code{width} and \code{height}.
+    }
+  }
+
+  The argument \code{edge} controls what happens when 
+  a shifted point lies outside the window of \code{X}.
+  Options are:
+  \describe{
+    \item{"none":}{
+      Points shifted outside the window of \code{X}
+      simply disappear. 
+    }
+    \item{"torus":}{
+      Toroidal or periodic boundary.
+      Treat opposite edges of the window as identical, so that a point
+      which disappears off the right-hand edge will re-appear at the
+      left-hand edge. This is called a ``toroidal shift'' because it makes the
+      rectangle topologically equivalent to the surface of a torus
+      (doughnut).
+
+      The window must be a rectangle. Toroidal shifts are undefined
+      if the window is non-rectangular.
+    }
+    \item{"erode":}{
+      Clip the point pattern to a smaller window.
+
+      If the random displacements are generated by a radial
+      mechanism (see above), then the window of \code{X}
+      is eroded by a distance equal to the value of the argument
+      \code{radius}, using \code{\link{erosion}}.
+
+      If the random displacements are generated by a rectangular
+      mechanism, then the window of \code{X} is
+      (if it is not rectangular) eroded by a distance
+      \code{max(height,width)} using \code{\link{erosion}};
+      or (if it is rectangular) trimmed by a margin of width \code{width}
+      at the left and right sides and trimmed by a margin of
+      height \code{height} at the top and bottom.
+      
+      The rationale for this is that the clipping window is
+      the largest window for which edge effects can be ignored.
+    }
+  }
+  The optional argument \code{clip} specifies a smaller window
+  to which the pattern should be restricted.
+}
+\seealso{
+  \code{\link{rshift}},
+  \code{\link{rshift.psp}}
+}
+\examples{
+   data(amacrine)
+
+   # random toroidal shift
+   # shift "on" and "off" points separately
+   X <- rshift(amacrine)
+
+   # shift "on" points and leave "off" points fixed
+   X <- rshift(amacrine, which="on")
+
+   # shift all points simultaneously
+   X <- rshift(amacrine, group=NULL)
+
+   # maximum displacement distance 0.1 units
+   X <- rshift(amacrine, radius=0.1)
+
+   # shift with erosion
+   X <- rshift(amacrine, radius=0.1, edge="erode")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rshift.psp.Rd b/man/rshift.psp.Rd
new file mode 100644
index 0000000..153106c
--- /dev/null
+++ b/man/rshift.psp.Rd
@@ -0,0 +1,117 @@
+\name{rshift.psp}
+\alias{rshift.psp}
+\title{Randomly Shift a Line Segment Pattern}
+\description{
+  Randomly shifts the segments in a line segment pattern.
+}
+\usage{
+   \method{rshift}{psp}(X, \dots, group=NULL, which=NULL)
+}
+\arguments{
+  \item{X}{Line segment pattern to be subjected to a random shift.
+    An object of class \code{"psp"}.
+  }
+  \item{\dots}{
+    Arguments controlling the randomisation and the handling of edge
+    effects. See \code{\link{rshift.ppp}}.
+  }
+  \item{group}{
+    Optional. Factor specifying a grouping of the line segments of \code{X},
+    or \code{NULL} indicating that all line segments belong to the same group.
+    Each group will be shifted together, and
+    separately from other groups.
+  }
+  \item{which}{
+    Optional. Identifies which groups of the pattern will be shifted,
+    while other groups are not shifted. A vector of levels of \code{group}.
+  }
+}
+\value{
+  A line segment pattern (object of class \code{"psp"}).
+}
+\details{
+  This operation randomly shifts the locations of the line segments
+  in a line segment pattern.
+
+  The function \code{rshift} is generic. This function \code{rshift.psp}
+  is the method for line segment patterns.
+
+  The line segments of \code{X} are first divided into groups,
+  then the line segments within a group are shifted by a common
+  random displacement vector. Different groups of line segments are shifted
+  independently.
+  If the argument \code{group} is present, then this determines the grouping.
+  Otherwise, all line segments belong to a single group.
+
+  The argument \code{group} should be a factor, of length equal to the
+  number of line segments in \code{X}.
+  Alternatively \code{group} may be \code{NULL},
+  which specifies that all line segments of \code{X} belong to a single group.
+
+  By default, every group of line segments will be shifted.
+  The argument \code{which} indicates that only some of the groups
+  should be shifted, while other groups should be left unchanged.
+  \code{which} must be a vector of levels of \code{group}
+  indicating which groups are to be shifted. 
+
+  The displacement vector, i.e. the vector
+  by which the data line segments are shifted,
+  is generated at random.
+  The \emph{default} behaviour is 
+  to generate a displacement vector at random with equal probability
+  for all possible displacements. This means that the \eqn{x} and
+  \eqn{y} coordinates of the displacement vector are independent random
+  variables, uniformly distributed over the range of possible coordinates.
+
+  Alternatively, the displacement vector can be generated by
+  another random mechanism, controlled by the arguments
+  \code{radius}, \code{width} and \code{height}. 
+  \describe{
+    \item{rectangular:}{
+      if \code{width} and \code{height} are given, then 
+      the displacement vector is uniformly distributed
+      in a rectangle of these dimensions, centred at
+      the origin.  The maximum possible displacement in the \eqn{x}
+      direction is \code{width/2}. The maximum possible displacement in
+      the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y}
+      displacements are independent. (If \code{width} and \code{height}
+      are actually equal to the dimensions of the observation window,
+      then this is equivalent to the default.)
+    }
+    \item{radial:}{
+      if \code{radius} is given, then the displacement vector is
+      generated by choosing a random line segment inside a disc of
+      the given radius, centred at the origin, with uniform probability
+      density over the disc. Thus the argument \code{radius} determines
+      the maximum possible displacement distance.
+      The argument \code{radius} is incompatible with the
+      arguments \code{width} and \code{height}.
+    }
+  }
+
+  The argument \code{edge} controls what happens when 
+  a shifted line segment lies partially or completely
+  outside the window of \code{X}. Currently the only option is
+  \code{"erode"} which specifies that the segments will be clipped
+  to a smaller window. 
+
+  The optional argument \code{clip} specifies a smaller window
+  to which the pattern should be restricted.
+}
+\seealso{
+  \code{\link{rshift}},
+  \code{\link{rshift.ppp}}
+}
+\examples{
+  X <- psp(runif(20), runif(20), runif(20), runif(20),  window=owin())
+  Y <- rshift(X, radius=0.1)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rshift.splitppp.Rd b/man/rshift.splitppp.Rd
new file mode 100644
index 0000000..56eadbd
--- /dev/null
+++ b/man/rshift.splitppp.Rd
@@ -0,0 +1,81 @@
+\name{rshift.splitppp}  
+\alias{rshift.splitppp}
+\title{Randomly Shift a List of Point Patterns}
+\description{
+  Randomly shifts each point pattern in a list of point patterns.
+}
+\usage{
+   \method{rshift}{splitppp}(X, \dots, which=seq_along(X))
+}
+\arguments{
+  \item{X}{
+    An object of class \code{"splitppp"}.
+    Basically a list of point patterns.
+  }
+  \item{\dots}{
+    Parameters controlling the generation of the
+    random shift vector and the handling of edge effects.
+    See \code{\link{rshift.ppp}}.
+  }
+  \item{which}{
+    Optional. Identifies which patterns will be shifted,
+    while other patterns are not shifted.
+    Any valid subset index for \code{X}.
+  }
+}
+\value{
+  Another object of class \code{"splitppp"}.
+}
+\details{
+  This operation applies a random shift to each of the
+  point patterns in the list \code{X}.
+  
+  The function \code{\link{rshift}} is generic.
+  This function \code{rshift.splitppp}
+  is the method for objects of class \code{"splitppp"}, which are
+  essentially lists of point patterns, created by the function
+  \code{\link{split.ppp}}.
+
+  By default, every pattern in the list \code{X} will be shifted.
+  The argument \code{which} indicates that only some of the patterns
+  should be shifted, while other groups should be left unchanged.
+  \code{which} can be any valid subset index for \code{X}.
+
+  Each point pattern in the list \code{X}
+  (or each pattern in \code{X[which]})
+  is shifted by a random displacement vector.
+  The shifting is performed by \code{\link{rshift.ppp}}.
+
+  See the help page for \code{\link{rshift.ppp}}
+  for details of the other arguments.
+}
+\seealso{
+  \code{\link{rshift}},
+  \code{\link{rshift.ppp}}
+}
+\examples{
+   data(amacrine)
+   Y <- split(amacrine)
+
+   # random toroidal shift
+   # shift "on" and "off" points separately
+   X <- rshift(Y)
+
+   # shift "on" points and leave "off" points fixed
+   X <- rshift(Y, which="on")
+
+   # maximum displacement distance 0.1 units
+   X <- rshift(Y, radius=0.1)
+
+   # shift with erosion
+   X <- rshift(Y, radius=0.1, edge="erode")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rstrat.Rd b/man/rstrat.Rd
new file mode 100644
index 0000000..e840870
--- /dev/null
+++ b/man/rstrat.Rd
@@ -0,0 +1,76 @@
+\name{rstrat}
+\alias{rstrat}
+\title{Simulate Stratified Random Point Pattern}
+\description{
+  Generates a ``stratified random'' pattern of points in a window,
+  by dividing the window into rectangular tiles and placing
+  \code{k} random points independently in each tile.
+}
+\usage{
+ rstrat(win=square(1), nx, ny=nx, k = 1, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{win}{
+    A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{nx}{Number of tiles in each column.
+  }
+  \item{ny}{Number of tiles in each row.
+  }
+  \item{k}{Number of random points to generate in each tile.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates a random pattern of points
+  in a ``stratified random'' sampling design. It can be useful
+  for generating random spatial sampling points.
+
+  The bounding rectangle of \code{win} is divided into
+  a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles.
+  In each tile, \code{k} random points are generated independently
+  with a uniform distribution in that tile. 
+
+  Some of these grid points may lie outside the window \code{win}:
+  if they do, they are deleted.
+
+  The result is a point pattern inside the window \code{win}.
+
+  This function is useful in creating dummy points for quadrature
+  schemes (see \code{\link{quadscheme}}) as well as in simulating
+  random point patterns.
+}
+\seealso{
+  \code{\link{rsyst}},
+  \code{\link{runifpoint}},
+  \code{\link{quadscheme}}
+}
+\examples{
+  X <- rstrat(nx=10)
+  plot(X)
+
+  # polygonal boundary
+  data(letterR)
+  X <- rstrat(letterR, 5, 10, k=3)
+  plot(X)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rsyst.Rd b/man/rsyst.Rd
new file mode 100644
index 0000000..e08afd8
--- /dev/null
+++ b/man/rsyst.Rd
@@ -0,0 +1,89 @@
+\name{rsyst}
+\alias{rsyst}
+\title{Simulate systematic random point pattern}
+\description{
+  Generates a \dQuote{systematic random} pattern of points in a window,
+  consisting of a grid of equally-spaced points with a random common
+  displacement.
+}
+\usage{
+ rsyst(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx,
+       nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{win}{
+    A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{nx}{Number of columns of grid points in the window.
+    Incompatible with \code{dx}.
+  }
+  \item{ny}{Number of rows of grid points in the window.
+    Incompatible with \code{dy}.
+  }
+  \item{\dots}{Ignored.}
+  \item{dx}{Spacing of grid points in \eqn{x} direction.
+    Incompatible with \code{nx}.
+  }
+  \item{dy}{Spacing of grid points in \eqn{y} direction.
+    Incompatible with \code{ny}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates a \dQuote{systematic random} pattern
+  of points in the window \code{win}. The pattern consists of a
+  rectangular grid of points with a random common displacement.
+
+  The grid spacing in the \eqn{x} direction is determined
+  either by the number of columns \code{nx} or by the
+  horizontal spacing \code{dx}.
+  The grid spacing in the \eqn{y} direction is determined
+  either by the number of rows \code{ny} or by the
+  vertical spacing \code{dy}. 
+  
+  The grid is then given a random displacement (the common displacement
+  of the grid points is a uniformly distributed random vector in the
+  tile of dimensions \code{dx, dy}).
+
+  Some of the resulting grid points may lie outside the window
+  \code{win}: if they do, they are deleted.  The result is a point
+  pattern inside the window \code{win}.
+
+  This function is useful in creating dummy points for quadrature
+  schemes (see \code{\link{quadscheme}}) as well as in simulating
+  random point patterns.
+}
+\seealso{
+  \code{\link{rstrat}},
+  \code{\link{runifpoint}},
+  \code{\link{quadscheme}}
+}
+\examples{
+  X <- rsyst(nx=10)
+  plot(X)
+
+  # polygonal boundary
+  data(letterR)
+  X <- rsyst(letterR, 5, 10)
+  plot(X)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/rtemper.Rd b/man/rtemper.Rd
new file mode 100644
index 0000000..9f3f073
--- /dev/null
+++ b/man/rtemper.Rd
@@ -0,0 +1,87 @@
+\name{rtemper}
+\alias{rtemper}
+\title{
+  Simulated Annealing or Simulated Tempering for Gibbs Point Processes
+}
+\description{
+  Performs simulated annealing or simulated tempering
+  for a Gibbs point process model using a specified
+  annealing schedule.
+}
+\usage{
+rtemper(model, invtemp, nrep, \dots, start = NULL, verbose = FALSE)
+}
+\arguments{
+  \item{model}{
+    A Gibbs point process model: a fitted Gibbs point process model
+    (object of class \code{"ppm"}), or any data acceptable to
+    \code{\link{rmhmodel}}.
+  }
+  \item{invtemp}{
+    A numeric vector of positive numbers.
+    The sequence of values of inverse temperature that will be used.
+  }
+  \item{nrep}{
+    An integer vector of the same length as \code{invtemp}.
+    The value \code{nrep[i]} specifies the number of steps of the
+    Metropolis-Hastings algorithm that will be performed
+    at inverse temperature \code{invtemp[i]}.
+  }
+  \item{start}{
+    Initial starting state for the simulation.
+    Any data acceptable to \code{\link{rmhstart}}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{rmh.default}}.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  The Metropolis-Hastings simulation algorithm
+  \code{\link{rmh}} is run for
+  \code{nrep[1]} steps at inverse temperature \code{invtemp[1]},
+  then for 
+  \code{nrep[2]} steps at inverse temperature \code{invtemp[2]},
+  and so on.
+
+  Setting the inverse temperature to a value \eqn{\alpha}{alpha}
+  means that the probability density of the Gibbs model, \eqn{f(x)},
+  is replaced by \eqn{g(x) = C\, f(x)^\alpha}{g(x) = C f(x)^alpha}
+  where \eqn{C} is a normalising constant depending on
+  \eqn{\alpha}{alpha}.
+  Larger values of \eqn{\alpha}{alpha} exaggerate the high and low
+  values of probability density, while smaller values of \eqn{\alpha}{alpha}
+  flatten out the probability density.
+
+  For example if the original \code{model} is a Strauss process,
+  the modified model is close to a hard core process
+  for large values of inverse temperature, and close to a Poisson process
+  for small values of inverse temperature. 
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{rmh.default}},
+  \code{\link{rmh}}.
+}
+\examples{
+   stra <- rmhmodel(cif="strauss",
+                    par=list(beta=2,gamma=0.2,r=0.7),
+                    w=square(10))
+   nr <- if(interactive()) 1e5 else 1e4
+   Y <- rtemper(stra, c(1, 2, 4, 8), nr * (1:4), verbose=TRUE)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/rthin.Rd b/man/rthin.Rd
new file mode 100644
index 0000000..fae0d6e
--- /dev/null
+++ b/man/rthin.Rd
@@ -0,0 +1,103 @@
+\name{rthin}
+\alias{rthin}
+\title{Random Thinning}
+\description{
+  Applies independent random thinning to a point pattern.
+}
+\usage{
+rthin(X, P, \dots, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"} or \code{"lpp"})
+    that will be thinned.
+  }
+  \item{P}{
+    Data giving the retention probabilities, i.e. the probability
+    that each point in \code{X} will be retained.
+    Either a single number, or a vector of numbers,
+    or a \code{function(x,y)} in the \R language,
+    or a function object (class \code{"funxy"} or \code{"linfun"}),
+    or a pixel image (object of class \code{"im"} or \code{"linim"}).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{P}, if it is a function.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (object of class \code{"ppp"} or \code{"lpp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  In a random thinning operation, each point of the pattern \code{X}
+  is randomly either deleted or retained (i.e. not deleted).
+  The result is a point pattern,
+  consisting of those points of \code{X} that were retained.
+
+  Independent random thinning means that the retention/deletion of each
+  point is independent of other points.
+
+  The argument \code{P} determines the probability of \bold{retaining}
+  each point. It may be
+  \describe{
+    \item{a single number,}{so that each point will be retained with
+      the same probability \code{P};
+    }
+    \item{a vector of numbers,}{so that the \code{i}th point of \code{X}
+      will be retained with probability \code{P[i]};
+    }
+    \item{a function \code{P(x,y)},}{so that a point at a location
+      \code{(x,y)} will be retained with probability \code{P(x,y)};
+    }
+    \item{an object of class \code{"funxy"} or \code{"linfun"},}{so that
+    points in the pattern \code{X} will be retained with probabilities
+    \code{P(X)};
+    }
+    \item{a pixel image,}{containing values of the retention probability
+      for all locations in a region encompassing the point pattern.
+    }
+  }
+  If \code{P} is a function \code{P(x,y)},
+  it should be \sQuote{vectorised}, that is,
+  it should accept vector arguments \code{x,y} and should yield a
+  numeric vector of the same length. The function may have extra
+  arguments which are passed through the \code{\dots} argument.
+}
+\section{Reproducibility}{
+  The algorithm for random thinning was changed in \pkg{spatstat}
+  version \code{1.42-3}. Set \code{spatstat.options(fastthin=FALSE)}
+  to use the previous, slower algorithm, if it is desired to reproduce
+  results obtained with earlier versions.
+}
+\examples{
+  plot(redwood, main="thinning")
+  
+  # delete 20\% of points
+  Y <- rthin(redwood, 0.8)
+  points(Y, col="green", cex=1.4)
+
+  # function
+  f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) }
+  Y <- rthin(redwood, f)
+
+  # pixel image
+  Z <- as.im(f, Window(redwood))
+  Y <- rthin(redwood, Z)
+
+  # pattern on a linear network
+  A <- runiflpp(30, simplenet)
+  B <- rthin(A, 0.2)
+  g <- function(x,y,seg,tp) { ifelse(y < 0.4, 1, 0.5) }
+  B <- rthin(A, linfun(g, simplenet))
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/run.simplepanel.Rd b/man/run.simplepanel.Rd
new file mode 100644
index 0000000..cc1712f
--- /dev/null
+++ b/man/run.simplepanel.Rd
@@ -0,0 +1,154 @@
+\name{run.simplepanel}
+\alias{clear.simplepanel}
+\alias{redraw.simplepanel}
+\alias{run.simplepanel}
+\title{
+  Run Point-and-Click Interface
+}
+\description{
+  Execute various operations in a simple
+  point-and-click user interface.
+}
+\usage{
+run.simplepanel(P, popup=TRUE, verbose = FALSE)
+clear.simplepanel(P)
+redraw.simplepanel(P, verbose = FALSE)
+}
+\arguments{
+  \item{P}{
+    An interaction panel (object of class \code{"simplepanel"},
+    created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}).
+  }
+  \item{popup}{
+    Logical. If \code{popup=TRUE} (the default),
+    the panel will be displayed in a
+    new popup window. If \code{popup=FALSE},
+    the panel will be displayed on the current graphics
+    window if it already exists, and on a new window otherwise.
+  }
+  \item{verbose}{
+    Logical. If \code{TRUE}, debugging information will be printed.
+  }
+}
+\details{
+  These commands enable the user to run a simple, robust,
+  point-and-click interface to any \R code.
+  The interface is implemented
+  using only the basic graphics package in \R.
+
+  The argument \code{P} is an object of class \code{"simplepanel"},
+  created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}},  
+  which specifies the graphics to be displayed and the actions to be performed
+  when the user interacts with the panel.
+
+  The command \code{run.simplepanel(P)} activates the panel:
+  the display is initialised and the graphics system waits for the
+  user to click the panel. 
+  While the panel is active, the user can only interact with the panel;
+  the \R command line interface and the \R GUI cannot be used.
+  When the panel terminates (typically because the user
+  clicked a button labelled Exit), control returns to the \R command line
+  interface and the \R GUI. 
+
+  The command \code{clear.simplepanel(P)} clears all the display
+  elements in the panel, resulting in a blank display except for the
+  title of the panel.
+
+  The command \code{redraw.simplepanel(P)} redraws all the buttons
+  of the panel, according to the \code{redraw} functions contained
+  in the panel.
+
+  If \code{popup=TRUE} (the default), \code{run.simplepanel} begins by
+  calling \code{\link[grDevices]{dev.new}} so that a new popup window
+  is created; this window is closed using
+  \code{\link[grDevices]{dev.off}} when \code{run.simplepanel} terminates.
+  If \code{popup=FALSE}, the panel will be displayed on the current graphics
+  window if it already exists, and on a new window otherwise;
+  this window is not closed when \code{run.simplepanel} terminates.
+
+  For more sophisticated control of the graphics focus (for example, to
+  use the panel to control the display on another window),
+  initialise the graphics devices yourself using
+  \code{\link[grDevices]{dev.new}} or similar commands; save these devices
+  in the shared environment \code{env} of the panel \code{P};
+  and write the click/redraw functions of \code{P} in such a way that
+  they access these devices using \code{\link[grDevices]{dev.set}}.
+  Then use \code{run.simplepanel} with \code{popup=FALSE}.
+}
+\value{
+  The return value of \code{run.simplepanel(P)} is the value returned
+  by the \code{exit} function of \code{P}. See \code{\link{simplepanel}}.
+
+  The functions \code{clear.simplepanel} and \code{redraw.simplepanel}
+  return \code{NULL}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{simplepanel}}
+}
+\examples{
+  if(interactive()) {
+    # make boxes (alternatively use layout.boxes())
+    Bminus <- square(1)
+    Bvalue <- shift(Bminus, c(1.2, 0))
+    Bplus <- shift(Bvalue, c(1.2, 0))
+    Bdone <- shift(Bplus, c(1.2, 0))
+    myboxes <- list(Bminus, Bvalue, Bplus, Bdone)
+    myB <- do.call(boundingbox,myboxes)
+
+    # make environment containing an integer count
+    myenv <- new.env()
+    assign("answer", 0, envir=myenv)
+
+    # what to do when finished: return the count.
+    myexit <- function(e) { return(get("answer", envir=e)) }
+
+    # button clicks
+    # decrement the count
+    Cminus <- function(e, xy) {
+     ans <- get("answer", envir=e)
+     assign("answer", ans - 1, envir=e)
+     return(TRUE)
+   }
+   # display the count (clicking does nothing)
+   Cvalue <- function(...) { TRUE }
+   # increment the count
+   Cplus <- function(e, xy) {
+    ans <- get("answer", envir=e)
+    assign("answer", ans + 1, envir=e)
+    return(TRUE)
+   }
+   # quit button
+   Cdone <- function(e, xy) { return(FALSE) }
+
+   myclicks <- list("-"=Cminus,
+                    value=Cvalue,
+                    "+"=Cplus,
+                    done=Cdone)
+
+   # redraw the button that displays the current value of the count
+   Rvalue <- function(button, nam, e) {
+     plot(button, add=TRUE)
+     ans <- get("answer", envir=e)
+     text(centroid.owin(button), labels=ans)
+     return(TRUE)
+  }
+
+  # make the panel
+  P <- simplepanel("Counter",
+                   B=myB, boxes=myboxes,
+                   clicks=myclicks,
+                   redraws = list(NULL, Rvalue, NULL, NULL),
+                   exit=myexit, env=myenv)
+  P
+
+  run.simplepanel(P)
+  }
+}
+\keyword{iplot}
+\keyword{utilities}
diff --git a/man/runifdisc.Rd b/man/runifdisc.Rd
new file mode 100644
index 0000000..af61f64
--- /dev/null
+++ b/man/runifdisc.Rd
@@ -0,0 +1,70 @@
+\name{runifdisc}
+\alias{runifdisc}
+\title{Generate N Uniform Random Points in a Disc}
+\description{
+  Generate a random point pattern
+  containing \eqn{n} independent uniform random points
+  in a circular disc.
+}
+\usage{
+ runifdisc(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of points.
+  }
+  \item{radius}{Radius of the circle.}
+  \item{centre}{Coordinates of the centre of the circle.}
+  \item{\dots}{
+    Arguments passed to \code{\link{disc}} controlling the
+    accuracy of approximation to the circle.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates \code{n} independent random points,
+  uniformly distributed in a circular disc.
+
+  It is faster (for a circular window) than the general
+  code used in \code{\link{runifpoint}}.
+
+  To generate random points in an ellipse, first generate points in a
+  circle using \code{runifdisc},
+  then transform to an ellipse using \code{\link{affine}},
+  as shown in the examples.
+  
+  To generate random points in other windows, use
+  \code{\link{runifpoint}}.
+  To generate non-uniform random points, use \code{\link{rpoint}}.
+}
+\seealso{
+\code{\link{disc}},
+\code{\link{runifpoint}},
+\code{\link{rpoint}}
+}
+\examples{
+ # 100 random points in the unit disc
+ plot(runifdisc(100))
+ # 42 random points in the ellipse with major axis 3 and minor axis 1
+ X <- runifdisc(42)
+ Y <- affine(X, mat=diag(c(3,1)))
+ plot(Y)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/runiflpp.Rd b/man/runiflpp.Rd
new file mode 100644
index 0000000..02ea871
--- /dev/null
+++ b/man/runiflpp.Rd
@@ -0,0 +1,60 @@
+\name{runiflpp}
+\alias{runiflpp}
+\title{
+  Uniform Random Points on a Linear Network
+}
+\description{
+  Generates \eqn{n} random points, independently and
+  uniformly distributed, on a linear network.
+}
+\usage{
+runiflpp(n, L, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of random points to generate.
+    A nonnegative integer, or a vector of integers
+    specifying the number of points of each type.
+  }
+  \item{L}{
+    A linear network (object of class \code{"linnet"},
+    see \code{\link{linnet}}).
+  }
+  \item{nsim}{Number of simulated realisations to generate.}
+  \item{drop}{
+    Logical value indicating what to do when \code{nsim=1}.
+    If \code{drop=TRUE} (the default), the result is a point pattern.
+    If \code{drop=FALSE}, the result is a list with one entry which is a
+    point pattern.
+  }
+}
+\details{
+  This function uses \code{\link{runifpointOnLines}}
+  to generate the random points.
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE},
+  a point pattern on the linear network,
+  i.e.\ an object of class \code{"lpp"}.
+  Otherwise, a list of such point patterns.
+}
+\author{
+  Ang Qi Wei \email{aqw07398 at hotmail.com} and
+  \adrian 
+}
+\seealso{
+ \code{\link{rlpp}} for non-uniform random points;
+ \code{\link{rpoislpp}} for Poisson point process;
+ 
+ \code{\link{lpp}},
+ \code{\link{linnet}}
+}
+\examples{
+   data(simplenet)
+   X <- runiflpp(10, simplenet)
+   plot(X)
+  # marked
+   Z <- runiflpp(c(a=10, b=3), simplenet)
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/runifpoint.Rd b/man/runifpoint.Rd
new file mode 100644
index 0000000..9a35de5
--- /dev/null
+++ b/man/runifpoint.Rd
@@ -0,0 +1,107 @@
+\name{runifpoint}
+\alias{runifpoint}
+\title{Generate N Uniform Random Points}
+\description{
+  Generate a random point pattern
+  containing \eqn{n} independent uniform random points.
+}
+\usage{
+ runifpoint(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, \dots,
+            nsim=1, drop=TRUE, ex=NULL)
+}
+\arguments{
+  \item{n}{
+    Number of points.
+  }
+  \item{win}{
+    Window in which to simulate the pattern.
+    An object of class \code{"owin"}
+    or something acceptable to \code{\link{as.owin}}.
+  }
+  \item{giveup}{
+    Number of attempts in the rejection method after which
+    the algorithm should stop trying to generate new points.
+  }
+  \item{warn}{
+    Logical. Whether to issue a warning if \code{n} is very large.
+    See Details.
+  }
+  \item{\dots}{Ignored.}
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+  \item{ex}{
+    Optional. A point pattern to use as the example.
+    If \code{ex} is given and \code{n} and \code{win} are missing,
+    then \code{n} and \code{win} will be calculated from
+    the point pattern \code{ex}.
+  }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"})
+  if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}.
+}
+\details{
+  This function generates \code{n} independent random points,
+  uniformly distributed in the window \code{win}.
+  (For nonuniform distributions, see \code{\link{rpoint}}.)
+
+  The algorithm depends on the type of window, as follows:
+  \itemize{
+    \item
+    If \code{win} is a rectangle then 
+    \eqn{n} independent random points, uniformly distributed
+    in the rectangle, are generated by assigning uniform random values to their
+    cartesian coordinates.
+    \item
+    If \code{win} is a binary image mask, then a random sequence of 
+    pixels is selected (using \code{\link{sample}})
+    with equal probabilities. Then for each pixel in the sequence
+    we generate a uniformly distributed random point in that pixel.
+    \item
+    If \code{win} is a polygonal window, the algorithm uses the rejection
+    method. It finds a rectangle enclosing the window,
+    generates points in this rectangle, and tests whether they fall in
+    the desired window. It gives up when \code{giveup * n} tests
+    have been performed without yielding \code{n} successes.
+  }
+  The algorithm for binary image masks is faster than the rejection
+  method but involves discretisation.
+
+  If \code{warn=TRUE}, then a warning will be issued if \code{n} is very large.
+  The threshold is \code{\link{spatstat.options}("huge.npoints")}.
+  This warning has no consequences,
+  but it helps to trap a number of common errors.
+}
+\seealso{
+\code{\link{ppp.object}},
+\code{\link{owin.object}},
+\code{\link{rpoispp}},
+\code{\link{rpoint}}
+}
+\examples{
+ # 100 random points in the unit square
+ pp <- runifpoint(100)
+ # irregular window
+ data(letterR)
+ # polygonal
+ pp <- runifpoint(100, letterR)
+ # binary image mask
+ pp <- runifpoint(100, as.mask(letterR))
+ ##
+ # randomising an existing point pattern
+ runifpoint(npoints(cells), win=Window(cells))
+ runifpoint(ex=cells)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/runifpoint3.Rd b/man/runifpoint3.Rd
new file mode 100644
index 0000000..32b8ac2
--- /dev/null
+++ b/man/runifpoint3.Rd
@@ -0,0 +1,52 @@
+\name{runifpoint3}
+\alias{runifpoint3}
+\title{
+  Generate N Uniform Random Points in Three Dimensions
+}
+\description{
+  Generate a random point pattern containing \code{n}
+  independent, uniform random points in three dimensions.
+}
+\usage{
+runifpoint3(n, domain = box3(), nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of points to be generated.
+  }
+  \item{domain}{
+    Three-dimensional box in which the process should be generated.
+    An object of class \code{"box3"}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in
+  three dimensions (an object of class \code{"pp3"}).
+  If \code{nsim > 1}, a list of such point patterns.
+}
+\details{
+  This function generates \code{n} independent random points,
+  uniformly distributed in the three-dimensional box \code{domain}.
+}
+\seealso{
+  \code{\link{rpoispp3}}, 
+  \code{\link{pp3}}, 
+  \code{\link{box3}}
+}
+\examples{
+   X <- runifpoint3(50)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/runifpointOnLines.Rd b/man/runifpointOnLines.Rd
new file mode 100644
index 0000000..8df31fd
--- /dev/null
+++ b/man/runifpointOnLines.Rd
@@ -0,0 +1,63 @@
+\name{runifpointOnLines}
+\alias{runifpointOnLines}
+\title{Generate N Uniform Random Points On Line Segments}
+\description{
+  Given a line segment pattern, generate a random point pattern
+  consisting of \code{n} points uniformly distributed on the
+  line segments.
+}
+\usage{
+runifpointOnLines(n, L, nsim=1)
+}
+\arguments{
+  \item{n}{Number of points to generate.}
+  \item{L}{Line segment pattern (object of class \code{"psp"})
+    on which the points should lie.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+}
+\details{
+  This command generates a point pattern consisting of \code{n}
+  independent random points, each point uniformly distributed on
+  the line segment pattern. This means that, for each random point,
+  \itemize{
+    \item the
+    probability of falling on a particular segment is proportional to the
+    length of the segment; and
+    \item given that the point falls on a particular segment,
+    it has uniform probability density along that segment.
+  }
+  If \code{n} is a single integer, the result is an unmarked point
+  pattern containing \code{n} points.
+  If \code{n} is a vector of integers, the result is a marked point
+  pattern, with \code{m} different types of points, where
+  \code{m = length(n)},
+  in which there are \code{n[j]} points of type \code{j}.
+}
+\value{
+  If \code{nsim = 1}, 
+  a point pattern (object of class \code{"ppp"}) with the same
+  window as \code{L}.
+  If \code{nsim > 1}, a list of point patterns.
+}
+\seealso{
+  \code{\link{psp}},
+  \code{\link{ppp}},
+  \code{\link{pointsOnLines}},
+  \code{\link{runifpoint}}
+}
+\examples{
+  X <- psp(runif(10), runif(10), runif(10), runif(10),  window=owin())
+  Y <- runifpointOnLines(20, X)
+  plot(X, main="")
+  plot(Y, add=TRUE)
+  Z <- runifpointOnLines(c(5,5), X)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/runifpointx.Rd b/man/runifpointx.Rd
new file mode 100644
index 0000000..94e0ea5
--- /dev/null
+++ b/man/runifpointx.Rd
@@ -0,0 +1,53 @@
+\name{runifpointx}
+\alias{runifpointx}
+\title{
+  Generate N Uniform Random Points in Any Dimensions
+}
+\description{
+  Generate a random point pattern containing \code{n}
+  independent, uniform random points in any number of spatial dimensions.
+}
+\usage{
+runifpointx(n, domain, nsim=1, drop=TRUE)
+}
+\arguments{
+  \item{n}{
+    Number of points to be generated.
+  }
+  \item{domain}{
+    Multi-dimensional box in which the process should be generated.
+    An object of class \code{"boxx"}.
+  }
+  \item{nsim}{Number of simulated realisations to be generated.}
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the
+    result will be a point pattern, rather than a list 
+    containing a single point pattern.
+  }
+}
+\value{
+  If \code{nsim = 1} and \code{drop=TRUE},
+  a point pattern (an object of class \code{"ppx"}).
+  If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns.
+}
+\details{
+  This function generates a pattern of \code{n} independent random points,
+  uniformly distributed in the multi-dimensional box \code{domain}.
+}
+\seealso{
+  \code{\link{rpoisppx}}, 
+  \code{\link{ppx}}, 
+  \code{\link{boxx}}
+}
+\examples{
+   w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3))
+   X <- runifpointx(50, w)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/scalardilate.Rd b/man/scalardilate.Rd
new file mode 100644
index 0000000..2eb7457
--- /dev/null
+++ b/man/scalardilate.Rd
@@ -0,0 +1,85 @@
+\name{scalardilate} 
+\alias{scalardilate}
+\alias{scalardilate.im}
+\alias{scalardilate.owin}
+\alias{scalardilate.ppp}
+\alias{scalardilate.psp}
+\alias{scalardilate.default}
+\title{Apply Scalar Dilation}
+\description{
+  Applies scalar dilation to a plane geometrical object,
+  such as a point pattern or a window,
+  relative to a specified origin.
+}
+\usage{
+  scalardilate(X, f, \dots)
+
+  \method{scalardilate}{im}(X, f, \dots, origin=NULL)
+
+  \method{scalardilate}{owin}(X, f, \dots, origin=NULL)
+
+  \method{scalardilate}{ppp}(X, f, \dots, origin=NULL)
+
+  \method{scalardilate}{psp}(X, f, \dots, origin=NULL)
+
+  \method{scalardilate}{default}(X, f, \dots)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    a window (object of class \code{"owin"}), a pixel image
+    (class \code{"im"}) and so on.
+  }
+  \item{f}{
+    Scalar dilation factor. A finite number greater than zero.
+  }
+  \item{\dots}{Ignored by the methods.}
+  \item{origin}{
+    Origin for the scalar dilation. Either a vector of 2 numbers,
+    or one of the character strings
+    \code{"centroid"}, \code{"midpoint"} or  \code{"bottomleft"}
+    (partially matched).
+  }
+}
+\value{
+  Another object of the same type, representing the
+  result of applying the scalar dilation.
+}
+\details{
+  This command performs scalar dilation of the object \code{X}
+  by the factor \code{f} relative to the origin specified by
+  \code{origin}. 
+
+  The function \code{scalardilate} is generic, with methods for
+  windows (class \code{"owin"}), point patterns (class \code{"ppp"}),
+  pixel images (class \code{"im"}), line segment patterns (class \code{"psp"})
+  and a default method.
+
+  If the argument \code{origin} is not given,
+  then every spatial coordinate is multiplied by the factor \code{f}.
+
+  If \code{origin} is given, then scalar dilation is performed
+  relative to the specified origin. Effectively, \code{X} is shifted
+  so that \code{origin} is moved to \code{c(0,0)}, then scalar dilation
+  is performed, then the result is shifted so that \code{c(0,0)} is
+  moved to \code{origin}.
+
+  This command is a special case of an affine transformation:
+  see \code{\link{affine}}.
+}
+\seealso{
+  \code{\link{affine}},
+  \code{\link{shift}}
+}
+\examples{
+plot(letterR)
+plot(scalardilate(letterR, 0.7, origin="bot"), col="red", add=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/scaletointerval.Rd b/man/scaletointerval.Rd
new file mode 100644
index 0000000..26ccb62
--- /dev/null
+++ b/man/scaletointerval.Rd
@@ -0,0 +1,56 @@
+\name{scaletointerval}  
+\alias{scaletointerval}
+\alias{scaletointerval.default}
+\alias{scaletointerval.im}
+\title{Rescale Data to Lie Between Specified Limits}
+\description{
+  Rescales a dataset so that the values range exactly between the
+  specified limits.
+}
+\usage{
+  scaletointerval(x, from=0, to=1, xrange=range(x))
+  \method{scaletointerval}{default}(x, from=0, to=1, xrange=range(x))
+  \method{scaletointerval}{im}(x, from=0, to=1, xrange=range(x))
+}
+\arguments{
+  \item{x}{Data to be rescaled.}
+  \item{from,to}{Lower and upper endpoints of the interval
+    to which the values of \code{x} should be rescaled.
+  }
+  \item{xrange}{
+    Optional range of values of \code{x} that should be mapped
+    to the new interval.
+  }
+}
+\details{
+  These functions rescale a dataset \code{x}
+  so that its values range exactly between the limits
+  \code{from} and \code{to}.
+
+  The method for pixel images (objects of class \code{"im"})
+  applies this scaling to the pixel values of \code{x}.
+  
+  Rescaling cannot be performed if the values in \code{x} are not
+  interpretable as numeric, or if the values in \code{x} are all equal.
+}
+\value{
+  An object of the same type as \code{x}.
+}
+\seealso{
+  \code{\link{scale}}
+}
+\examples{
+  X <- as.im(function(x,y) {x+y+3}, unit.square())
+  summary(X)
+  Y <- scaletointerval(X)
+  summary(Y)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{univar}
diff --git a/man/scan.test.Rd b/man/scan.test.Rd
new file mode 100644
index 0000000..768d706
--- /dev/null
+++ b/man/scan.test.Rd
@@ -0,0 +1,158 @@
+\name{scan.test}
+\alias{scan.test}
+\title{
+  Spatial Scan Test
+}
+\description{
+  Performs the Spatial Scan Test for clustering
+  in a spatial point pattern, or for clustering of one type of point
+  in a bivariate spatial point pattern.
+}
+\usage{
+scan.test(X, r, ...,
+          method = c("poisson", "binomial"),
+          nsim = 19,
+          baseline = NULL,
+          case = 2, 
+          alternative = c("greater", "less", "two.sided"),
+          verbose = TRUE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{
+    Radius of circle to use. A single number or a numeric vector.
+  }
+  \item{\dots}{
+    Optional. Arguments passed to \code{\link{as.mask}} to determine the
+    spatial resolution of the computations.
+  }
+  \item{method}{
+    Either \code{"poisson"} or \code{"binomial"}
+    specifying the type of likelihood.
+  }
+  \item{nsim}{
+    Number of simulations for computing Monte Carlo p-value.
+  }
+  \item{baseline}{
+    Baseline for the Poisson intensity, if \code{method="poisson"}.
+    A pixel image or a function.
+  }
+  \item{case}{
+    Which type of point should be interpreted as a case,
+    if \code{method="binomial"}.
+    Integer or character string.
+  }
+  \item{alternative}{
+    Alternative hypothesis: \code{"greater"} if the alternative
+    postulates that the mean number of points inside the circle
+    will be greater than expected under the null.
+  }
+  \item{verbose}{
+    Logical. Whether to print progress reports.
+  }
+}
+\details{
+  The spatial scan test (Kulldorf, 1997) is applied
+  to the point pattern \code{X}.
+
+  In a nutshell,
+  \itemize{
+    \item
+    If \code{method="poisson"} then 
+    a significant result would mean that there is a circle of radius
+    \code{r}, located somewhere in the spatial domain of the data,
+    which contains a significantly higher than
+    expected number of points of \code{X}. That is, the
+    pattern \code{X} exhibits spatial clustering.
+    \item 
+    If \code{method="binomial"} then \code{X} must be a bivariate (two-type)
+    point pattern. By default, the first type of point is interpreted as
+    a control (non-event) and the second type of point as a case (event).
+    A significant result would mean that there is a
+    circle of radius \code{r} which contains a significantly higher than
+    expected number of cases. That is, the cases are clustered together,
+    conditional on the locations of all points.
+  }
+
+  Following is a more detailed explanation.
+  \itemize{
+    \item 
+    If \code{method="poisson"} then the scan test based on Poisson
+    likelihood is performed (Kulldorf, 1997).
+    The dataset \code{X} is treated as an unmarked point pattern.
+    By default (if \code{baseline} is not specified) 
+    the null hypothesis is complete spatial randomness CSR
+    (i.e. a uniform Poisson process).
+    The alternative hypothesis is a Poisson process with
+    one intensity \eqn{\beta_1}{beta1} inside some circle of radius
+    \code{r} and another intensity \eqn{\beta_0}{beta0} outside the
+    circle.
+    If \code{baseline} is given, then it should be a pixel image
+    or a \code{function(x,y)}. The null hypothesis is
+    an inhomogeneous Poisson process with intensity proportional
+    to \code{baseline}. The alternative hypothesis is an inhomogeneous
+    Poisson process with intensity
+    \code{beta1 * baseline} inside some circle of radius \code{r},
+    and \code{beta0 * baseline} outside the circle.
+    \item
+    If \code{method="binomial"} then the scan test based on
+    binomial likelihood is performed (Kulldorf, 1997).
+    The dataset \code{X} must be a bivariate point pattern,
+    i.e. a multitype point pattern with two types.
+    The null hypothesis is that all permutations of the type labels are
+    equally likely.
+    The alternative hypothesis is that some circle of radius
+    \code{r} has a higher proportion of points of the second type,
+    than expected under the null hypothesis.
+  }
+
+  The result of \code{scan.test} is a hypothesis test
+  (object of class \code{"htest"}) which can be plotted to
+  report the results. The component \code{p.value} contains the
+  \eqn{p}-value.
+
+  The result of \code{scan.test} can also be plotted (using the plot
+  method for the class \code{"scan.test"}). The plot is
+  a pixel image of the Likelihood Ratio Test Statistic
+  (2 times the log likelihood ratio) as a function
+  of the location of the centre of the circle.
+  This pixel image can be extracted from the object
+  using \code{\link{as.im.scan.test}}.
+  The Likelihood Ratio Test Statistic is computed by
+  \code{\link{scanLRTS}}.
+}
+\value{
+  An object of class \code{"htest"} (hypothesis test)
+  which also belongs to the class \code{"scan.test"}.
+  Printing this object gives the result of the test.
+  Plotting this object displays the Likelihood Ratio Test Statistic
+  as a function of the location of the centre of the circle.
+}
+\references{
+  Kulldorff, M. (1997)
+  A spatial scan statistic.
+  \emph{Communications in Statistics --- Theory and Methods}
+  \bold{26}, 1481--1496.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{plot.scan.test}},
+  \code{\link{as.im.scan.test}},
+  \code{\link{relrisk}},
+  \code{\link{scanLRTS}}
+}
+\examples{
+   nsim <- if(interactive()) 19 else 2
+   rr <- if(interactive()) seq(0.5, 1, by=0.1) else c(0.5, 1)
+   scan.test(redwood, 0.1 * rr, method="poisson", nsim=nsim)
+   scan.test(chorley, rr, method="binomial", case="larynx", nsim=nsim)
+}
+\keyword{htest}
+\keyword{spatial}
diff --git a/man/scanLRTS.Rd b/man/scanLRTS.Rd
new file mode 100644
index 0000000..48281d5
--- /dev/null
+++ b/man/scanLRTS.Rd
@@ -0,0 +1,150 @@
+\name{scanLRTS}
+\alias{scanLRTS}
+\title{
+  Likelihood Ratio Test Statistic for Scan Test
+}
+\description{
+  Calculate the Likelihood Ratio Test Statistic for the Scan Test,
+  at each spatial location.
+}
+\usage{
+scanLRTS(X, r, \dots,
+   method = c("poisson", "binomial"),
+   baseline = NULL, case = 2,
+   alternative = c("greater", "less", "two.sided"),
+   saveopt = FALSE,
+   Xmask = NULL)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{r}{
+    Radius of circle to use. A single number or a numeric vector.
+  }
+  \item{\dots}{
+    Optional. Arguments passed to \code{\link{as.mask}} to determine the
+    spatial resolution of the computations.
+  }
+  \item{method}{
+    Either \code{"poisson"} or \code{"binomial"}
+    specifying the type of likelihood.
+  }
+  \item{baseline}{
+    Baseline for the Poisson intensity, if \code{method="poisson"}.
+    A pixel image or a function.
+  }
+  \item{case}{
+    Which type of point should be interpreted as a case,
+    if \code{method="binomial"}.
+    Integer or character string.
+  }
+  \item{alternative}{
+    Alternative hypothesis: \code{"greater"} if the alternative
+    postulates that the mean number of points inside the circle
+    will be greater than expected under the null.
+  }
+  \item{saveopt}{
+    Logical value indicating to save the optimal value of \code{r}
+    at each location.
+  }
+  \item{Xmask}{
+    Internal use only.
+  }
+}
+\details{
+  This command computes, for all spatial locations \code{u},
+  the Likelihood Ratio Test Statistic \eqn{\Lambda(u)}{Lambda(u)}
+  for a test of homogeneity at the location \eqn{u}, as described
+  below. The result is a pixel image giving the values of
+  \eqn{\Lambda(u)}{Lambda(u)} at each pixel. 
+
+  The \bold{maximum} value of \eqn{\Lambda(u)}{Lambda(u)} over all locations
+  \eqn{u} is the \emph{scan statistic}, which is the basis of
+  the   \emph{scan test} performed by \code{\link{scan.test}}.
+
+  \itemize{
+    \item 
+    If \code{method="poisson"} then the test statistic is based on Poisson
+    likelihood.
+    The dataset \code{X} is treated as an unmarked point pattern.
+    By default (if \code{baseline} is not specified) 
+    the null hypothesis is complete spatial randomness CSR
+    (i.e. a uniform Poisson process).
+    At the spatial location \eqn{u},
+    the alternative hypothesis is a Poisson process with
+    one intensity \eqn{\beta_1}{beta1} inside the circle of radius
+    \code{r} centred at \eqn{u},
+    and another intensity \eqn{\beta_0}{beta0} outside the
+    circle.
+    If \code{baseline} is given, then it should be a pixel image
+    or a \code{function(x,y)}. The null hypothesis is
+    an inhomogeneous Poisson process with intensity proportional
+    to \code{baseline}. The alternative hypothesis is an inhomogeneous
+    Poisson process with intensity
+    \code{beta1 * baseline} inside the circle,
+    and \code{beta0 * baseline} outside the circle.
+    \item
+    If \code{method="binomial"} then the test statistic is based on
+    binomial likelihood.
+    The dataset \code{X} must be a bivariate point pattern,
+    i.e. a multitype point pattern with two types.
+    The null hypothesis is that all permutations of the type labels are
+    equally likely.
+    The alternative hypothesis is that the circle of radius
+    \code{r} centred at \eqn{u}
+    has a higher proportion of points of the second type,
+    than expected under the null hypothesis.
+  }
+
+  If \code{r} is a vector of more than one value for the radius,
+  then the calculations described above are performed for
+  every value of \code{r}. Then the maximum over \code{r} is taken
+  for each spatial location \eqn{u}.
+  The resulting pixel value of \code{scanLRTS} at a location
+  \eqn{u} is the profile maximum of the Likelihood Ratio Test Statistic,
+  that is, the maximum of the
+  Likelihood Ratio Test Statistic for circles of all radii,
+  centred at the same location \eqn{u}.
+
+  If you have already performed a scan test using
+  \code{\link{scan.test}}, the  Likelihood Ratio Test Statistic
+  can be extracted from the test result using the 
+  function \code{\link{as.im.scan.test}}.
+}
+\section{Warning: window size}{
+  Note that the result of \code{scanLRTS} is a pixel image
+  on a larger window than the original window of \code{X}.
+  The expanded window contains the centre of any circle
+  of radius \code{r}
+  that has nonempty intersection with the original window.
+}
+\value{
+  A pixel image (object of class \code{"im"}) whose pixel values
+  are the values of the (profile) Likelihood Ratio Test Statistic at each
+  spatial location. 
+}
+\references{
+  Kulldorff, M. (1997)
+  A spatial scan statistic.
+  \emph{Communications in Statistics --- Theory and Methods}
+  \bold{26}, 1481--1496.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+ \code{\link{scan.test}},
+ \code{\link{as.im.scan.test}}
+}
+\examples{
+   plot(scanLRTS(redwood, 0.1, method="poisson"))
+   sc <- scanLRTS(chorley, 1, method="binomial", case="larynx") 
+   plot(sc)
+   scanstatchorley <- max(sc)
+}
+\keyword{htest}
+\keyword{spatial}
diff --git a/man/scanpp.Rd b/man/scanpp.Rd
new file mode 100644
index 0000000..620d3c7
--- /dev/null
+++ b/man/scanpp.Rd
@@ -0,0 +1,96 @@
+\name{scanpp}
+\alias{scanpp}
+\title{Read Point Pattern From Data File} 
+\description{
+Reads a point pattern dataset from a text file.
+}
+\usage{
+   scanpp(filename, window, header=TRUE, dir="", factor.marks=NULL, ...) 
+}
+\arguments{
+  \item{filename}{
+    String name of the file containing
+    the coordinates of the points in the point pattern,
+    and their marks if any.
+    }
+    \item{window}{
+      Window for the point pattern. An object of class \code{"owin"}.
+    }
+    \item{header}{
+      Logical flag indicating whether the first line of the
+      file contains headings for the columns.
+      Passed to \code{\link[utils]{read.table}}.
+    }
+    \item{dir}{
+      String containing the path name of the directory
+      in which \code{filename} is to be found.
+      Default is the current directory.
+    }
+    \item{factor.marks}{
+      Logical vector (or NULL) indicating whether marks are to be
+      interpreted as factors. Defaults to \code{NULL} which means that
+      strings will be interpreted as factors while numeric variables
+      will not. See details.
+    }
+    \item{\dots}{
+      Ignored.
+    }
+}
+\value{
+  A point pattern (an object of class \code{"ppp"},
+  see \code{\link{ppp.object}}).
+}
+\details{
+  This simple function reads a point pattern dataset from a file
+  containing the cartesian coordinates of its points,
+  and optionally the mark values for these points.
+
+  The file identified by \code{filename} in directory \code{dir}
+  should be a text file that can be read using \code{\link[utils]{read.table}}.
+  Thus, each line of the file (except possibly the first line)
+  contains data for one point in the
+  point pattern. Data are arranged in columns. There should be either
+  two columns (for an unmarked point pattern) or more columns (for a
+  marked point pattern).
+
+  If \code{header=FALSE} then the first two columns of data
+  will be interpreted as the \eqn{x} and \eqn{y} coordinates
+  of points. Remaining columns, if present, will be interpreted as
+  containing the marks for these points.
+
+  If \code{header=TRUE} then the first line of the file should contain
+  string names for each of the columns of data. If there are columns
+  named \code{x} and \code{y} then these will be taken as the
+  cartesian coordinates, and any remaining columns will be taken as
+  the marks. If there are no columns named \code{x} and \code{y}
+  then the first and second columns will be taken as the cartesian
+  coordinates.
+
+  If a logical vector is provided for \code{factor.marks} the length
+  should equal the number of mark columns (a shorter \code{factor.marks}
+  is recycled to this length). This vector is then used to determine
+  which mark columns should be interpreted as factors. Note: Strings will
+  not be interpreted as factors if the corresponding entry in
+  \code{factor.marks} is \code{FALSE}.
+
+  Note that there is intentionally no default for \code{window}.
+  The window of observation should be specified.
+  If you really need to estimate the window, use the
+  Ripley-Rasson estimator \code{\link{ripras}}.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{ppp}},
+  \code{\link{as.ppp}},
+  \code{\link{ripras}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{IO}
+ 
+ 
diff --git a/man/sdr.Rd b/man/sdr.Rd
new file mode 100644
index 0000000..897e55d
--- /dev/null
+++ b/man/sdr.Rd
@@ -0,0 +1,111 @@
+\name{sdr}
+\alias{sdr}
+\title{
+  Sufficient Dimension Reduction
+}
+\description{
+  Given a point pattern and a set of predictors,
+  find a minimal set of new predictors, each constructed as
+  a linear combination of the original predictors.
+}
+\usage{
+sdr(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"),
+    Dim1 = 1, Dim2 = 1, predict=FALSE)
+}
+\arguments{
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{covariates}{
+    A list of pixel images (objects of class \code{"im"})
+    to serve as predictor variables.
+  }
+  \item{method}{
+    Character string indicating which method to use. See Details.
+  }
+  \item{Dim1}{
+    Dimension of the first order Central Intensity Subspace
+    (applicable when \code{method} is \code{"DR"}, \code{"NNIR"},
+    \code{"SAVE"} or \code{"TSE"}). 
+  }
+  \item{Dim2}{
+    Dimension of the second order Central Intensity Subspace
+    (applicable when \code{method="TSE"}).
+  }
+  \item{predict}{
+    Logical value indicating whether to compute the new predictors
+    as well.
+  }
+}
+\details{
+  Given a point pattern \eqn{X} and predictor variables
+  \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]},
+  Sufficient Dimension Reduction methods
+  (Guan and Wang, 2010) attempt to find a minimal set
+  of new predictor variables, each constructed by taking a linear combination
+  of the original predictors, which explain the dependence of
+  \eqn{X} on   \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}.
+  The methods do not assume any particular form of dependence
+  of the point pattern on the predictors.
+  The predictors are assumed to
+  be Gaussian random fields.
+
+  Available methods are:
+  \tabular{ll}{
+    \code{method="DR"} \tab directional regression \cr
+    \code{method="NNIR"} \tab nearest neighbour inverse regression \cr
+    \code{method="SAVE"} & sliced average variance estimation \cr
+    \code{method="SIR"} & sliced inverse regression \cr
+    \code{method="TSE"} & two-step estimation \cr
+  }
+
+  The result includes a matrix \code{B} whose columns are estimates
+  of the basis vectors of the space of new predictors. That is,
+  the \code{j}th column of \code{B} expresses the \code{j}th new
+  predictor as a linear combination of the original predictors.
+
+  If \code{predict=TRUE}, the new predictors are also evaluated.
+  They can also be evaluated using \code{\link{sdrPredict}}.
+}
+\value{
+  A list with components \code{B, M}
+  or \code{B, M1, M2} where
+  \code{B} is a matrix whose columns are estimates of the basis vectors
+  for the space, and \code{M} or \code{M1,M2} are matrices containing
+  estimates of the kernel.
+
+  If \code{predict=TRUE}, the result also includes a component
+  \code{Y} which is a list of pixel images giving the values of the
+  new predictors.
+}
+\examples{
+   A <- sdr(bei, bei.extra, predict=TRUE)
+   A
+   Y1 <- A$Y[[1]]
+   plot(Y1)
+   points(bei, pch=".", cex=2)
+   # investigate likely form of dependence
+   plot(rhohat(bei, Y1))
+}
+\seealso{
+  \code{\link{sdrPredict}} to compute the new predictors from the
+  coefficient matrix.
+  
+  \code{\link{dimhat}} to estimate the subspace dimension.
+
+  \code{\link{subspaceDistance}}
+}
+\references{
+  Guan, Y. and Wang, H. (2010)
+  Sufficient dimension reduction for spatial point
+  processes directed by Gaussian random fields.
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{72}, 367--387.
+}
+\author{
+  Matlab original by Yongtao Guan,
+  translated to \R by Suman Rakshit.
+}
+\keyword{spatial}
+\keyword{multivariate}
+
diff --git a/man/sdrPredict.Rd b/man/sdrPredict.Rd
new file mode 100644
index 0000000..31596c4
--- /dev/null
+++ b/man/sdrPredict.Rd
@@ -0,0 +1,48 @@
+\name{sdrPredict}
+\alias{sdrPredict}
+\title{
+  Compute Predictors from Sufficient Dimension Reduction
+}
+\description{
+  Given the result of a Sufficient Dimension Reduction method,
+  compute the new predictors.
+}
+\usage{
+  sdrPredict(covariates, B)
+}
+\arguments{
+  \item{covariates}{
+    A list of pixel images (objects of class \code{"im"}).
+  }
+  \item{B}{
+    Either a matrix of coefficients for the covariates, or the result of
+    a call to \code{\link{sdr}}.
+  }
+}
+\details{
+  This function assumes that \code{\link{sdr}} has already been used to
+  find a minimal set of predictors based on the \code{covariates}.
+  The argument \code{B} should be either the result of \code{\link{sdr}}
+  or the coefficient matrix returned as one of the
+  results of \code{\link{sdr}}. The columns of this matrix define linear
+  combinations of the \code{covariates}. This function evaluates those
+  linear combinations, and returns a list of pixel images containing the
+  new predictors.
+}
+\value{
+  A list of pixel images  (objects of class \code{"im"})
+  with one entry for each column of \code{B}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{sdr}}
+}
+\examples{
+   A <- sdr(bei, bei.extra)
+   Y <- sdrPredict(bei.extra, A)
+   Y
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/segregation.test.Rd b/man/segregation.test.Rd
new file mode 100644
index 0000000..ca1a31c
--- /dev/null
+++ b/man/segregation.test.Rd
@@ -0,0 +1,99 @@
+\name{segregation.test}
+\alias{segregation.test}
+\alias{segregation.test.ppp}
+\title{
+  Test of Spatial Segregation of Types
+}
+\description{
+  Performs a Monte Carlo test of spatial segregation of the types
+  in a multitype point pattern.
+}
+\usage{
+segregation.test(X, \dots)
+
+\method{segregation.test}{ppp}(X, \dots, nsim = 19,
+       permute = TRUE, verbose = TRUE, Xname)
+}
+\arguments{
+  \item{X}{
+    Multitype point pattern (object of class \code{"ppp"}
+    with factor-valued marks).
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{\link{relrisk.ppp}}
+    to control the smoothing parameter or bandwidth selection.
+  }
+  \item{nsim}{
+    Number of simulations for the Monte Carlo test.
+  }
+  \item{permute}{
+    Argument passed to \code{\link{rlabel}}. If \code{TRUE} (the
+    default), randomisation is performed by randomly permuting the
+    labels of \code{X}. If \code{FALSE}, randomisation is performing
+    by resampling the labels with replacement.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+  \item{Xname}{
+    Optional character string giving the name of the dataset \code{X}.
+  }
+}
+\details{
+  The Monte Carlo test of spatial segregation of types,
+  proposed by Kelsall and Diggle (1995)
+  and Diggle et al (2005), is applied to the point pattern \code{X}.
+  The test statistic is
+  \deqn{
+    T = \sum_i \sum_m \left( \widehat p(m \mid x_i) - \overline p_m
+    \right)^2
+  }{
+    T = sum[i] sum[m] (phat(m | x[i]) - pbar[m])^2
+  }
+  where \eqn{\widehat p(m \mid x_i)}{phat(m | x[i])} is the
+  leave-one-out kernel smoothing estimate of the probability that the
+  \eqn{i}-th data point has type \eqn{m}, and
+  \eqn{\overline p_m}{pbar[m]} is the average fraction of data points
+  which are of type \eqn{m}.
+  The statistic \eqn{T} is evaluated for the data and
+  for \code{nsim} randomised versions of \code{X}, generated by
+  randomly permuting or resampling the marks.
+  
+  Note that, by default, automatic bandwidth selection will be
+  performed separately for each randomised pattern. This computation
+  can be very time-consuming but is necessary for the test to be
+  valid in most conditions. A short-cut is to specify the value of
+  the smoothing bandwidth \code{sigma} as shown in the examples.
+}
+\value{
+  An object of class \code{"htest"} representing the result of the test.
+}
+\references{
+  Kelsall, J.E. and Diggle, P.J. (1995)
+  Kernel estimation of relative risk.
+  \emph{Bernoulli} \bold{1}, 3--16.
+
+  Diggle, P.J., Zheng, P. and Durr, P. (2005)
+  Non-parametric estimation of spatial segregation in a
+  multivariate point process: bovine tuberculosis in
+  Cornwall, UK. 
+  \emph{Applied Statistics} \bold{54}, 645--658.
+}
+\seealso{
+  \code{\link{relrisk}}
+}
+\examples{
+  segregation.test(hyytiala, 5)
+
+  if(interactive()) segregation.test(hyytiala, hmin=0.05) 
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/selfcrossing.psp.Rd b/man/selfcrossing.psp.Rd
new file mode 100644
index 0000000..9c1c6c6
--- /dev/null
+++ b/man/selfcrossing.psp.Rd
@@ -0,0 +1,47 @@
+\name{selfcrossing.psp}
+\alias{selfcrossing.psp}
+\title{Crossing Points in a Line Segment Pattern}
+\description{
+  Finds any crossing points between the line segments in a 
+  line segment pattern.
+}
+\usage{
+  selfcrossing.psp(A)
+}
+\arguments{
+  \item{A}{
+    Line segment pattern (object of class \code{"psp"}).
+  }
+}
+\value{
+  Point pattern (object of class \code{"ppp"}).
+}
+\details{
+  This function finds any crossing points between
+  different line segments in the line segment pattern \code{A}.
+
+  A crossing point occurs whenever one of the line segments in \code{A}
+  intersects another line segment in \code{A}, at a nonzero
+  angle of intersection.
+}
+\seealso{
+  \code{\link{crossing.psp}},
+  \code{\link{psp.object}},
+  \code{\link{ppp.object}}.
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(a, col="green", main="selfcrossing.psp")
+  P <- selfcrossing.psp(a)
+  plot(P, add=TRUE, col="red")
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/selfcut.psp.Rd b/man/selfcut.psp.Rd
new file mode 100644
index 0000000..e376cc8
--- /dev/null
+++ b/man/selfcut.psp.Rd
@@ -0,0 +1,48 @@
+\name{selfcut.psp}
+\alias{selfcut.psp}
+\title{Cut Line Segments Where They Intersect}
+\description{
+  Finds any crossing points between the line segments in a 
+  line segment pattern, and cuts the segments into pieces
+  at these crossing-points.
+}
+\usage{
+selfcut.psp(A, \dots, eps)
+}
+\arguments{
+  \item{A}{
+    Line segment pattern (object of class \code{"psp"}).
+  }
+  \item{eps}{
+    Optional. Smallest permissible length of the resulting
+    line segments. There is a sensible default.
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  This function finds any crossing points between
+  different line segments in the line segment pattern \code{A},
+  and cuts the line segments into pieces at these intersection points.
+
+  A crossing point occurs whenever one of the line segments in \code{A}
+  intersects another line segment in \code{A}, at a nonzero
+  angle of intersection.
+}
+\value{
+  Another line segment pattern (object of class \code{"psp"})
+  in the same window as \code{A} with the same kind of marks as \code{A}.
+}
+\author{
+\spatstatAuthors.
+}
+\seealso{
+  \code{\link{selfcrossing.psp}}
+}
+\examples{
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  Y <- selfcut.psp(X)
+  n <- nsegments(Y)
+  plot(Y \%mark\% factor(sample(seq_len(n), n, replace=TRUE)))
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/sessionLibs.Rd b/man/sessionLibs.Rd
new file mode 100644
index 0000000..b016ad5
--- /dev/null
+++ b/man/sessionLibs.Rd
@@ -0,0 +1,38 @@
+\name{sessionLibs}
+\alias{sessionLibs}
+\title{
+  Print Names and Version Numbers of Libraries Loaded
+}
+\description{
+  Prints the names and version numbers
+  of libraries currently loaded by the user.
+}
+\usage{
+  sessionLibs()
+}
+\details{
+  This function prints a list of the libraries loaded by the user
+  in the current session, giving just their name and version number.
+  It obtains this information from \code{\link[utils]{sessionInfo}}.
+
+  This function is not needed in an interactive \R session
+  because the package startup messages will usually provide this
+  information.
+  
+  Its main use is in an \code{\link{Sweave}} script, where it is needed
+  because the package startup messages are not printed. 
+}
+\value{
+  Null.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link[utils]{sessionInfo}}
+}
+\keyword{data}
diff --git a/man/setcov.Rd b/man/setcov.Rd
new file mode 100644
index 0000000..daa6642
--- /dev/null
+++ b/man/setcov.Rd
@@ -0,0 +1,71 @@
+\name{setcov}
+\alias{setcov}
+\title{Set Covariance of a Window}
+\description{
+  Computes the set covariance function of a window.
+}
+\usage{
+ setcov(W, V=W, \dots)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}.
+  }
+  \item{V}{
+    Optional. Another window.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link{as.mask}}
+    to control the pixel resolution.
+  }
+}
+\value{
+  A pixel image (an object of class \code{"im"}) representing the
+  set covariance function of \code{W},
+  or the cross-covariance of \code{W} and \code{V}.
+}
+\details{
+  The set covariance function of a region \eqn{W} in the plane
+  is the function \eqn{C(v)} defined for each vector \eqn{v}
+  as the area of the intersection between \eqn{W} and \eqn{W+v},
+  where \eqn{W+v} is the set obtained by shifting (translating)
+  \eqn{W} by \eqn{v}.
+
+  We may interpret \eqn{C(v)} as the area of the set of
+  all points \eqn{x} in \eqn{W} such that \eqn{x+v} also lies in
+  \eqn{W}.
+  
+  This command computes a discretised approximation to
+  the set covariance function of any
+  plane region \eqn{W} represented as a window object (of class
+  \code{"owin"}, see \code{\link{owin.object}}). The return value is
+  a pixel image (object of class \code{"im"}) whose greyscale values
+  are values of the set covariance function.
+
+  The set covariance is computed using the Fast Fourier Transform,
+  unless \code{W} is a rectangle, when an exact formula is used.
+
+  If the argument \code{V} is present, then \code{setcov(W,V)}
+  computes the set \emph{cross-covariance} function \eqn{C(x)}
+  defined for each vector \eqn{x}
+  as the area of the intersection between \eqn{W} and \eqn{V+x}.
+}
+\seealso{
+  \code{\link{imcov}},
+  \code{\link{owin}},
+  \code{\link{as.owin}},
+  \code{\link{erosion}}
+}
+\examples{
+  w <- owin(c(0,1),c(0,1))
+  v <- setcov(w)
+  plot(v)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/shapley.Rd b/man/shapley.Rd
new file mode 100644
index 0000000..ee40007
--- /dev/null
+++ b/man/shapley.Rd
@@ -0,0 +1,98 @@
+\name{shapley}
+\alias{shapley}
+\alias{shapley.extra}
+\docType{data}
+\title{Galaxies in the Shapley Supercluster}
+\description{
+  A point pattern recording the sky positions of 4215 galaxies
+  in the Shapley Supercluster.
+} 
+\format{
+  \code{shapley} is an object of class \code{"ppp"}
+  representing the point pattern of galaxy locations
+  (see \code{\link{ppp.object}}).
+
+  \code{shapley.extra} is a list containing additional data
+  described under Notes.
+}
+\usage{data(shapley)}
+\examples{
+  data(shapley)
+  shapley.extra$plotit(main="Shapley Supercluster")
+}
+\source{M.J. Drinkwater, Department of Physics, University of
+  Queensland}
+\section{Notes}{
+  This dataset comes from a survey by Drinkwater et al (2004) 
+  of the Shapley Supercluster, one of the most massive concentrations
+  of galaxies in the local universe. The data give the sky positions
+  of 4215 galaxies observed using the FLAIR-II spectrograph
+  on the UK Schmidt Telescope (UKST). They were kindly provided by
+  Dr Michael Drinkwater through the Centre for Astrostatistics
+  at Penn State University.
+
+  Sky positions are given using the coordinates
+  Right Ascension (degrees from 0 to 360) and
+  Declination (degrees from -90 to 90).
+  
+  The point pattern has three mark variables:
+  \describe{
+    \item{Mag}{
+      Galaxy magnitude (a negative logarithmic measure of
+      visible brightness).
+    }
+    \item{V}{
+      Recession velocity (km/sec) inferred from redshift,
+      with corrections applied.
+    }
+    \item{SigV}{
+      Estimated standard error for \code{V}.
+    }
+  }
+  The region covered by the survey was approximately the
+  UKST's standard quadrilateral survey fields 382 to 384 and 443 to 446.
+  However, a few of the galaxy positions lie outside these fields.
+
+  The point pattern dataset \code{shapley} consists of all 4215 galaxy
+  locations. The observation window for this pattern is a
+  dilated copy of the convex hull of the galaxy positions,
+  constructed so that all galaxies lie within the window.
+
+  Note that the data contain duplicated points (two points at the
+  same location). To determine which points are duplicates,
+  use \code{\link{duplicated.ppp}}.
+  To remove the duplication, use \code{\link{unique.ppp}}.
+
+  The auxiliary dataset \code{shapley.extra} contains 
+  the following components:
+  \describe{
+    \item{\code{UKSTfields}}{
+      a list of seven windows
+      (objects of class \code{"owin"}) giving the UKST standard survey
+      fields.
+    }
+    \item{\code{UKSTdomain}}{
+      the union of these seven fields,
+      an object of class \code{"owin"}.
+    }
+    \item{\code{plotit}}{
+      a function (called without arguments)
+      that will plot the data and the survey fields
+      in the conventional astronomical presentation,
+      in which Right Ascension is converted
+      to hours and minutes (1 hour equals 15 degrees) and 
+      Right Ascension decreases as we move to the right of the plot.
+    }
+  }
+}
+\references{
+  Drinkwater, M.J., Parker, Q.A., Proust, D., Slezak, E.
+  and Quintana, H. (2004)
+  The large scale distribution of galaxies in the Shapley
+  Supercluster.
+  \emph{Publications of the Astronomical Society of Australia}
+  \bold{21}, 89-96. \code{DOI 10.1071/AS03057}
+} 
+\keyword{datasets}
+\keyword{spatial}
+
diff --git a/man/sharpen.Rd b/man/sharpen.Rd
new file mode 100644
index 0000000..74999e5
--- /dev/null
+++ b/man/sharpen.Rd
@@ -0,0 +1,101 @@
+\name{sharpen} 
+\alias{sharpen}
+\alias{sharpen.ppp}
+\title{Data Sharpening of Point Pattern}
+\description{
+  Performs Choi-Hall data sharpening of a spatial point pattern.
+}
+\usage{
+sharpen(X, \dots)
+\method{sharpen}{ppp}(X, sigma=NULL, \dots,
+                      varcov=NULL, edgecorrect=FALSE)
+}
+\arguments{
+  \item{X}{A marked point pattern (object of class \code{"ppp"}).}
+  \item{sigma}{
+    Standard deviation of isotropic Gaussian smoothing kernel.
+  }
+  \item{varcov}{
+    Variance-covariance matrix of anisotropic Gaussian kernel.
+    Incompatible with \code{sigma}.
+  }
+  \item{edgecorrect}{
+    Logical value indicating whether to apply
+    edge effect bias correction.
+  }
+  \item{\dots}{Arguments passed to \code{\link{density.ppp}}
+    to control the pixel resolution of the result.}
+}
+\details{
+  Choi and Hall (2001) proposed a procedure for
+  \emph{data sharpening} of spatial point patterns.
+  This procedure is appropriate for earthquake epicentres
+  and other point patterns which are believed to exhibit
+  strong concentrations of points along a curve. Data sharpening
+  causes such points to concentrate more tightly along the curve.
+  
+  If the original data points are 
+  \eqn{X_1, \ldots, X_n}{X[1],..., X[n]}
+  then the sharpened points are
+  \deqn{
+    \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)}
+  }{
+    X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i]))
+  }
+  where \eqn{k} is a smoothing kernel in two dimensions.
+  Thus, the new point \eqn{\hat X_i}{X^[i]} is a
+  vector average of the nearby points \eqn{X[j]}.
+
+  The function \code{sharpen} is generic. It currently has only one
+  method, for two-dimensional point patterns (objects of class
+  \code{"ppp"}).
+
+  If \code{sigma} is given, the smoothing kernel is the
+  isotropic two-dimensional Gaussian density with standard deviation
+  \code{sigma} in each axis. If \code{varcov} is given, the smoothing
+  kernel is the Gaussian density with variance-covariance matrix
+  \code{varcov}.
+  
+  The data sharpening procedure tends to cause the point pattern
+  to contract away from the boundary of the window. That is,
+  points \code{X_i}{X[i]} that lie `quite close to the edge of the window
+  of the point pattern tend to be displaced inward. 
+  If \code{edgecorrect=TRUE} then the algorithm is modified to
+  correct this vector bias. 
+}
+\value{
+  A point pattern (object of class \code{"ppp"}) in the same window
+  as the original pattern \code{X}, and with the same marks as \code{X}.
+}
+\seealso{
+  \code{\link{density.ppp}},
+  \code{\link{Smooth.ppp}}.
+}
+\examples{
+   data(shapley)
+   X <- unmark(shapley)
+   \dontshow{
+   if(!(interactive())) X <- rthin(X, 0.05)
+   }
+   Y <- sharpen(X, sigma=0.5)
+   Z <- sharpen(X, sigma=0.5, edgecorrect=TRUE)
+   opa <- par(mar=rep(0.2, 4))
+   plot(solist(X, Y, Z), main= " ",
+        main.panel=c("data", "sharpen", "sharpen, correct"),
+        pch=".", equal.scales=TRUE, mar.panel=0.2)
+   par(opa)
+}
+\references{
+  Choi, E. and Hall, P. (2001)
+  Nonparametric analysis of earthquake point-process data.
+  In M. de Gunst, C. Klaassen and A. van der Vaart (eds.)
+  \emph{State of the art in probability and statistics:
+    Festschrift for Willem R. van Zwet},
+  Institute of Mathematical Statistics, Beachwood, Ohio.
+  Pages 324--344.
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/shift.Rd b/man/shift.Rd
new file mode 100644
index 0000000..0135859
--- /dev/null
+++ b/man/shift.Rd
@@ -0,0 +1,43 @@
+\name{shift}
+\alias{shift}
+\title{Apply Vector Translation}
+\description{
+  Applies a vector shift of the plane 
+  to a geometrical object,
+  such as a point pattern or a window. 
+}
+\usage{
+  shift(X, \dots)
+}
+\arguments{
+  \item{X}{Any suitable dataset representing a two-dimensional
+    object, such as a point pattern (object of class \code{"ppp"}),
+    or a window (object of class \code{"owin"}).}
+  \item{\dots}{Arguments determining the shift vector.}
+}
+\value{
+  Another object of the same type, representing the
+  result of applying the shift.
+}
+\details{
+  This is generic. Methods are provided for
+  point patterns (\code{\link{shift.ppp}})
+  and windows (\code{\link{shift.owin}}).
+
+  The object is translated by the vector \code{vec}.
+}
+\seealso{
+  \code{\link{shift.ppp}},
+  \code{\link{shift.owin}},
+  \code{\link{rotate}},
+  \code{\link{affine}},
+  \code{\link{periodify}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/shift.im.Rd b/man/shift.im.Rd
new file mode 100644
index 0000000..394d01e
--- /dev/null
+++ b/man/shift.im.Rd
@@ -0,0 +1,61 @@
+\name{shift.im}
+\alias{shift.im}
+\title{Apply Vector Translation To Pixel Image}
+\description{
+  Applies a vector shift to a pixel image
+}
+\usage{
+ \method{shift}{im}(X, vec=c(0,0), \dots, origin=NULL)
+}
+\arguments{
+  \item{X}{Pixel image (object of class \code{"im"}).}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Ignored}
+  \item{origin}{
+    Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched.
+  }
+}
+\value{
+  Another pixel image (of class \code{"im"}) representing the
+  result of applying the vector shift.
+}
+\details{
+  The spatial location of each pixel in the image
+  is translated by the vector \code{vec}.
+  This is a method for the generic function \code{\link{shift}}.
+
+  If \code{origin} is given, then it should be one of the character
+  strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}.
+  The argument \code{vec} will be ignored; instead the shift will be performed
+  so that the specified geometric location is shifted to the origin.
+  If \code{origin="centroid"} then the centroid of the image window will be
+  shifted to the origin. If \code{origin="midpoint"} then the centre of
+  the bounding rectangle of the image will be shifted to the origin.
+  If \code{origin="bottomleft"} then the bottom left corner of the
+  bounding rectangle of the image will be shifted to the origin.
+}
+\seealso{
+  \code{\link{shift}}
+}
+\examples{
+ # make up an image
+ X <- setcov(unit.square())
+ plot(X)
+
+ Y <- shift(X, c(10,10))
+ plot(Y)
+ # no discernible difference except coordinates are different
+
+ shift(X, origin="mid")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/shift.owin.Rd b/man/shift.owin.Rd
new file mode 100644
index 0000000..cedb7d7
--- /dev/null
+++ b/man/shift.owin.Rd
@@ -0,0 +1,62 @@
+\name{shift.owin}
+\alias{shift.owin}
+\title{Apply Vector Translation To Window}
+\description{
+  Applies a vector shift to a window
+}
+\usage{
+ \method{shift}{owin}(X, vec=c(0,0), \dots, origin=NULL)
+}
+\arguments{
+  \item{X}{Window (object of class \code{"owin"}).}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Ignored}
+  \item{origin}{Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched. 
+  }
+}
+\value{
+  Another window (of class \code{"owin"}) representing the
+  result of applying the vector shift.
+}
+\details{
+  The window is translated by the vector \code{vec}.
+  This is a method for the generic function \code{\link{shift}}.
+
+  If \code{origin} is given, then it should be one of the character
+  strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}.
+  The argument \code{vec} will be ignored; instead the shift will be performed
+  so that the specified geometric location is shifted to the origin.
+  If \code{origin="centroid"} then the centroid of the window will be
+  shifted to the origin. If \code{origin="midpoint"} then the centre of
+  the bounding rectangle of the window will be shifted to the origin.
+  If \code{origin="bottomleft"} then the bottom left corner of the
+  bounding rectangle of the window will be shifted to the origin.
+}
+\seealso{
+  \code{\link{shift}},
+  \code{\link{shift.ppp}},
+  \code{\link{periodify}},
+  \code{\link{rotate}},
+  \code{\link{affine}},
+  \code{\link{centroid.owin}}
+}
+\examples{
+  W <- owin(c(0,1),c(0,1))
+  X <- shift(W, c(2,3))
+  \dontrun{
+  plot(W)
+  # no discernible difference except coordinates are different
+  }
+  shift(W, origin="mid")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/shift.ppp.Rd b/man/shift.ppp.Rd
new file mode 100644
index 0000000..311f296
--- /dev/null
+++ b/man/shift.ppp.Rd
@@ -0,0 +1,66 @@
+\name{shift.ppp}
+\alias{shift.ppp}
+\title{Apply Vector Translation To Point Pattern}
+\description{
+  Applies a vector shift to a point pattern. 
+}
+\usage{
+ \method{shift}{ppp}(X, vec=c(0,0), \dots, origin=NULL)
+}
+\arguments{
+  \item{X}{Point pattern (object of class \code{"ppp"}).}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Ignored}
+  \item{origin}{Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched.
+  }
+}
+\value{
+  Another point pattern (of class \code{"ppp"}) representing the
+  result of applying the vector shift.
+}
+\details{
+  The point pattern, and its window, are
+  translated by the vector \code{vec}.
+  
+  This is a method for the generic function \code{\link{shift}}.
+
+  If \code{origin} is given, then it should be one of the character
+  strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}.
+  The argument \code{vec} will be ignored; instead the shift will be performed
+  so that the specified geometric location is shifted to the origin.
+  If \code{origin="centroid"} then the centroid of the window will be
+  shifted to the origin. If \code{origin="midpoint"} then the centre of
+  the bounding rectangle of the window will be shifted to the origin.
+  If \code{origin="bottomleft"} then the bottom left corner of the
+  bounding rectangle of the window will be shifted to the origin.
+}
+\seealso{
+  \code{\link{shift}},
+  \code{\link{shift.owin}},
+  \code{\link{periodify}},
+  \code{\link{rotate}},
+  \code{\link{affine}}
+}
+\examples{
+  data(cells)
+  X <- shift(cells, c(2,3))
+  \dontrun{
+  plot(X)
+  # no discernible difference except coordinates are different
+  }
+  plot(cells, pch=16)
+  plot(shift(cells, c(0.03,0.03)), add=TRUE)
+
+  shift(cells, origin="mid")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/shift.psp.Rd b/man/shift.psp.Rd
new file mode 100644
index 0000000..9e7306c
--- /dev/null
+++ b/man/shift.psp.Rd
@@ -0,0 +1,63 @@
+\name{shift.psp}
+\alias{shift.psp}
+\title{Apply Vector Translation To Line Segment Pattern}
+\description{
+  Applies a vector shift to a line segment pattern. 
+}
+\usage{
+ \method{shift}{psp}(X, vec=c(0,0), \dots, origin=NULL)
+}
+\arguments{
+  \item{X}{Line Segment pattern (object of class \code{"psp"}).}
+  \item{vec}{Vector of length 2 representing a translation.}
+  \item{\dots}{Ignored}
+  \item{origin}{Character string determining a location
+    that will be shifted to the origin. Options are
+    \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}.
+    Partially matched.
+  }
+}
+\value{
+  Another line segment pattern (of class \code{"psp"}) representing the
+  result of applying the vector shift.
+}
+\details{
+  The line segment pattern, and its window, are
+  translated by the vector \code{vec}.
+  
+  This is a method for the generic function \code{\link{shift}}.
+
+  If \code{origin} is given, then it should be one of the character
+  strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}.
+  The argument \code{vec} will be ignored; instead the shift will be performed
+  so that the specified geometric location is shifted to the origin.
+  If \code{origin="centroid"} then the centroid of the window will be
+  shifted to the origin. If \code{origin="midpoint"} then the centre of
+  the bounding rectangle of the window will be shifted to the origin.
+  If \code{origin="bottomleft"} then the bottom left corner of the
+  bounding rectangle of the window will be shifted to the origin.
+}
+\seealso{
+  \code{\link{shift}},
+  \code{\link{shift.owin}},
+  \code{\link{shift.ppp}},
+  \code{\link{periodify}},
+  \code{\link{rotate}},
+  \code{\link{affine}}
+}
+\examples{
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  plot(X, col="red")
+  Y <- shift(X, c(0.05,0.05))
+  plot(Y, add=TRUE, col="blue")
+
+  shift(Y, origin="mid")
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/sidelengths.owin.Rd b/man/sidelengths.owin.Rd
new file mode 100644
index 0000000..bf4e9d9
--- /dev/null
+++ b/man/sidelengths.owin.Rd
@@ -0,0 +1,61 @@
+\name{sidelengths.owin} 
+\alias{sidelengths.owin}
+\alias{shortside.owin}
+\title{Side Lengths of Enclosing Rectangle of a Window}
+\description{
+  Computes the side lengths of the (enclosing rectangle of) a window.
+}
+\usage{
+ \method{sidelengths}{owin}(x)
+
+ \method{shortside}{owin}(x)
+}
+\arguments{
+  \item{x}{
+    A window whose side lengths will be computed.
+    Object of class \code{"owin"}.
+  }
+}
+\value{
+  For \code{sidelengths.owin}, a numeric vector of length 2
+  giving the side-lengths (\eqn{x} then \eqn{y}) of the enclosing rectangle.
+  For \code{shortside.owin}, a numeric value.
+}
+\details{
+  The functions \code{shortside} and \code{sidelengths} are generic.
+  The functions documented here are the methods for the class \code{"owin"}.
+
+  \code{sidelengths.owin} computes the 
+  side-lengths of the enclosing rectangle of the window \code{x}.
+
+  For safety, both functions give a warning if the window is not a rectangle.
+  To suppress the warning, first convert the window to a rectangle
+  using \code{\link{as.rectangle}}.
+
+  \code{shortside.owin} computes the minimum of the two side-lengths.
+}
+\seealso{
+  \code{\link{shortside}},   \code{\link{sidelengths}}
+  for the generic functions.
+  
+  \code{\link{area.owin}},
+  \code{\link{diameter.owin}},
+  \code{\link{perimeter}}
+  for other geometric calculations on \code{"owin"} objects.
+  
+  \code{\link{owin}},
+  \code{\link{as.owin}}.
+}
+\examples{
+  w <- owin(c(0,2),c(-1,3))
+  sidelengths(w)
+  shortside(as.rectangle(letterR))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/simba.Rd b/man/simba.Rd
new file mode 100644
index 0000000..272cf0c
--- /dev/null
+++ b/man/simba.Rd
@@ -0,0 +1,37 @@
+\name{simba}
+\alias{simba}
+\docType{data}
+\title{
+  Simulated data from a two-group experiment with replication
+  within each group.
+}
+\description{
+  The \code{simba} dataset contains simulated data from an
+  experiment with a `control' group and a `treatment' group, each 
+  group containing 5 experimental units.
+
+  The responses in the experiment are point patterns.
+  
+  The responses in the control group are 
+  independent realisations of a Poisson point process with intensity 80.
+
+  The responses in the treatment group are independent realisations of
+  a Strauss process with activity parameter \eqn{\beta=100}{beta=100},
+  interaction parameter \eqn{\gamma=0.5}{gamma=0.5} and
+  interaction radius \eqn{R=0.07} in the unit square.
+} 
+\format{
+  \code{simba} is a hyperframe with 10 rows, and columns named:
+
+  \itemize{
+    \item \code{Points} containing the point patterns
+    \item \code{group} factor identifying the experimental group,
+    with levels \code{control} and \code{treatment}).
+  }
+}
+\usage{data(simba)}
+\source{
+  Simulated data, generated by \adrian.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/simdat.Rd b/man/simdat.Rd
new file mode 100644
index 0000000..d1d1011
--- /dev/null
+++ b/man/simdat.Rd
@@ -0,0 +1,30 @@
+\name{simdat}
+\alias{simdat}
+\docType{data}
+\title{
+  Simulated Point Pattern
+}
+\description{
+  This point pattern data set was simulated (using the
+  Metropolis-Hastings algorithm)
+  from a model fitted to the Numata
+  Japanese black pine data set referred to in
+  Baddeley and Turner (2000).
+} 
+\format{
+  An object of class \code{"ppp"}
+  in a square window of size 10 by 10 units.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(simdat)}
+\source{\rolf}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/simplenet.Rd b/man/simplenet.Rd
new file mode 100644
index 0000000..ddbc8c4
--- /dev/null
+++ b/man/simplenet.Rd
@@ -0,0 +1,18 @@
+\name{simplenet}
+\alias{simplenet}
+\docType{data}
+\title{
+  Simple Example of Linear Network
+}
+\description{
+  A simple, artificially created, example of a linear network.
+} 
+\format{
+  \code{simplenet} is an object of class \code{"linnet"}.
+}
+\usage{data(simplenet)}
+\source{
+  Created by \adrian.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/simplepanel.Rd b/man/simplepanel.Rd
new file mode 100644
index 0000000..002ca47
--- /dev/null
+++ b/man/simplepanel.Rd
@@ -0,0 +1,233 @@
+\name{simplepanel}
+\alias{simplepanel}
+\alias{grow.simplepanel}
+\title{Simple Point-and-Click Interface Panels}
+\description{
+  These functions enable the user to create a simple, robust, 
+  point-and-click interface to any \R code.
+}
+\usage{
+   simplepanel(title, B, boxes, clicks,
+      redraws=NULL, exit = NULL, env)
+
+   grow.simplepanel(P, side = c("right", "left", "top", "bottom"),
+      len = NULL, new.clicks, new.redraws=NULL, \dots, aspect)
+}
+\arguments{
+  \item{title}{
+    Character string giving the title of the interface panel.
+  }
+  \item{B}{
+    Bounding box of the panel coordinates.
+    A rectangular window (object of class \code{"owin"})
+  }
+  \item{boxes}{
+    A list of rectangular windows (objects of class \code{"owin"})
+    specifying the placement of the buttons
+    and other interactive components of the panel.
+  }
+  \item{clicks}{
+    A list of \R functions, of the same length as \code{boxes},
+    specifying the operations to be performed when each button
+    is clicked. Entries can also be \code{NULL} indicating that no
+    action should occur. See Details.
+  }
+  \item{redraws}{
+    Optional list of \R functions, of the same length as \code{boxes},
+    specifying how to redraw each button. Entries can also be
+    \code{NULL} indicating a simple default. See Details.
+  }
+  \item{exit}{
+    An \R function specifying actions to be taken when the
+    interactive panel terminates.
+  }
+  \item{env}{
+    An \code{environment} that will be passed as an argument
+    to all the functions in \code{clicks}, \code{redraws} and
+    \code{exit}.
+  }
+  \item{P}{
+    An existing interaction panel (object of class \code{"simplepanel"}).
+  }
+  \item{side}{
+    Character string identifying which side of the panel \code{P}
+    should be grown to accommodate the new buttons.
+  }
+  \item{len}{
+    Optional. Thickness of the new panel area that should be grown
+    to accommodate the new buttons. A single number in the same units
+    as the coordinate system of \code{P}.
+  }
+  \item{new.clicks}{
+    List of \R functions defining the operations to be performed
+    when each of the new buttons is clicked.
+  }
+  \item{new.redraws}{
+    Optional. List of \R functions, of the same length as
+    \code{new.clicks}, defining how to redraw each of the new buttons.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{layout.boxes}} to determine the
+    layout of the new buttons.
+  }
+  \item{aspect}{
+    Optional. Aspect ratio (height/width) of the new buttons.
+  }
+}
+\details{
+  These functions enable the user to create a simple, robust,
+  point-and-click interface to any \R code.
+
+  The functions \code{simplepanel} and \code{grow.simplepanel}
+  create an object of class \code{"simplepanel"}. Such an object defines
+  the graphics to be displayed and the actions to be performed
+  when the user interacts with the panel.
+
+  The panel is activated by calling \code{\link{run.simplepanel}}.
+
+  The function \code{simplepanel} creates a panel object
+  from basic data.
+  The function \code{grow.simplepanel} modifies an existing
+  panel object \code{P} by growing an additional row or column
+  of buttons. 
+
+  For \code{simplepanel},
+  \itemize{
+    \item 
+    The spatial layout of the panel is determined by the rectangles
+    \code{B} and \code{boxes}.
+    \item 
+    The argument \code{clicks} must be a list of functions
+    specifying the action to be taken when each button is clicked
+    (or \code{NULL} to indicate that no action should be taken).
+    The list entries should have names (but there are sensible defaults).
+    Each function should be of the form \code{function(env, xy)} where
+    \code{env} is an \code{environment} that may contain shared data,
+    and \code{xy} gives the coordinates of the mouse click, in the format
+    \code{list(x, y)}.
+    The function returns \code{TRUE} if the
+    panel should continue running, and \code{FALSE} if the panel
+    should terminate.
+    \item 
+    The argument \code{redraws}, if given, must be a list of functions
+    specifying the action to be taken when each button is to be redrawn.
+    Each function should be of the form \code{function(button, name, env)} where
+    \code{button} is a rectangle specifying the location of the button
+    in the current coordinate system; \code{name} is a character string
+    giving the name of the button; and \code{env} is the
+    \code{environment} that may contain shared data.
+    The function returns \code{TRUE} if the
+    panel should continue running, and \code{FALSE} if the panel
+    should terminate. 
+    If \code{redraws} is not given (or if one of the entries in
+    \code{redraws} is \code{NULL}), the default action is to draw a pink
+    rectangle showing the button position,
+    draw the name of the button in the middle of this rectangle,
+    and return \code{TRUE}. 
+    \item
+    The argument \code{exit}, if given, must be a function
+    specifying the action to be taken when the panel terminates.
+    (Termination occurs when one of the \code{clicks} functions
+    returns \code{FALSE}).
+    The \code{exit} function should be of the form \code{function(env)} where
+    \code{env} is the \code{environment} that may contain shared data.
+    Its return value will be used as the return value
+    of \code{\link{run.simplepanel}}.
+    \item
+    The argument \code{env} should be an \R environment.
+    The panel buttons will have access to this environment,
+    and will be able to read and write data in it. This mechanism is used
+    to exchange data between the panel and other \R code.
+  }
+  For \code{grow.simplepanel},
+  \itemize{
+    \item the spatial layout of the new boxes
+    is determined by the arguments \code{side}, \code{len},
+    \code{aspect} and by the additional \code{\dots} arguments passed to
+    \code{\link{layout.boxes}}.
+    \item the argument \code{new.clicks} 
+    should have the same format as \code{clicks}.
+    It implicitly specifies the number of new buttons to be added,
+    and the actions to be performed when they are clicked.
+    \item the optional argument \code{new.redraws}, if given,
+    should have the same format as \code{redraws}.
+    It specifies the actions to be performed when the
+    new buttons are clicked.
+  }
+}
+\value{
+  An object of class \code{"simplepanel"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{run.simplepanel}},
+  \code{\link{layout.boxes}}
+}
+\examples{
+  # make boxes (alternatively use layout.boxes())
+  Bminus <- square(1)
+  Bvalue <- shift(Bminus, c(1.2, 0))
+  Bplus <- shift(Bvalue, c(1.2, 0))
+  Bdone <- shift(Bplus, c(1.2, 0))
+  myboxes <- list(Bminus, Bvalue, Bplus, Bdone)
+  myB <- do.call(boundingbox,myboxes)
+
+  # make environment containing an integer count
+  myenv <- new.env()
+  assign("answer", 0, envir=myenv)
+
+  # what to do when finished: return the count.
+  myexit <- function(e) { return(get("answer", envir=e)) }
+
+  # button clicks
+  # decrement the count
+  Cminus <- function(e, xy) {
+    ans <- get("answer", envir=e)
+    assign("answer", ans - 1, envir=e)
+    return(TRUE)
+  }
+  # display the count (clicking does nothing)
+  Cvalue <- function(...) { TRUE }
+  # increment the count
+  Cplus <- function(e, xy) {
+    ans <- get("answer", envir=e)
+    assign("answer", ans + 1, envir=e)
+    return(TRUE)
+  }
+  # 'Clear' button
+  Cclear <- function(e, xy) {
+    assign("answer", 0, envir=e)
+    return(TRUE)
+  }
+  # quit button
+  Cdone <- function(e, xy) { return(FALSE) }
+
+  myclicks <- list("-"=Cminus,
+                   value=Cvalue,
+                   "+"=Cplus,
+                   done=Cdone)
+
+  # redraw the button that displays the current value of the count
+  Rvalue <- function(button, nam, e) {
+     plot(button, add=TRUE)
+     ans <- get("answer", envir=e)
+     text(centroid.owin(button), labels=ans)
+     return(TRUE)
+  }
+
+  # make the panel
+  P <- simplepanel("Counter",
+                   B=myB, boxes=myboxes,
+                   clicks=myclicks,
+                   redraws = list(NULL, Rvalue, NULL, NULL),
+                   exit=myexit, env=myenv)
+  P
+  # ( type run.simplepanel(P) to run the panel interactively )
+
+  # add another button to right
+  Pplus <- grow.simplepanel(P, "right", new.clicks=list(clear=Cclear))
+}
+\keyword{iplot}
+\keyword{utilities}
diff --git a/man/simplify.owin.Rd b/man/simplify.owin.Rd
new file mode 100644
index 0000000..3165629
--- /dev/null
+++ b/man/simplify.owin.Rd
@@ -0,0 +1,57 @@
+\name{simplify.owin}
+\Rdversion{1.1}
+\alias{simplify.owin}
+\title{
+  Approximate a Polygon by a Simpler Polygon
+}
+\description{
+  Given a polygonal window, this function finds a simpler polygon
+  that approximates it.
+}
+\usage{
+simplify.owin(W, dmin)
+}
+\arguments{
+  \item{W}{
+    The polygon which is to be simplied.
+    An object of class \code{"owin"}.
+  }
+  \item{dmin}{
+    Numeric value. The smallest permissible length of an edge.
+  }
+}
+\details{
+  This function simplifies a polygon \code{W}
+  by recursively deleting the shortest edge of \code{W}
+  until all remaining edges are longer than the specified
+  minimum length \code{dmin}, or until there are only three edges left.
+
+  The argument \code{W} must be a window (object of class
+  \code{"owin"}). It should be of type \code{"polygonal"}.
+  If \code{W} is a rectangle, it is returned without alteration.
+
+  The simplification algorithm is not yet implemented for
+  binary masks. If \code{W} is a mask, an error is generated.
+}
+\value{
+  Another window (object of class \code{"owin"})
+  of type \code{"polygonal"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{owin}}
+}
+\examples{
+  plot(letterR, col="red")
+  plot(simplify.owin(letterR, 0.3), col="blue", add=TRUE)
+
+  W <- Window(chorley)
+  plot(W)
+  WS <- simplify.owin(W, 2)
+  plot(WS, add=TRUE, border="green")
+  points(vertices(WS))
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/simulate.dppm.Rd b/man/simulate.dppm.Rd
new file mode 100644
index 0000000..b67c5ca
--- /dev/null
+++ b/man/simulate.dppm.Rd
@@ -0,0 +1,121 @@
+\name{simulate.dppm}
+\alias{simulate.dppm}
+\alias{simulate.detpointprocfamily}
+\title{Simulation of Determinantal Point Process Model}
+\description{
+  Generates simulated realisations from a determinantal point process model.
+}
+\usage{
+  \method{simulate}{dppm}(object, nsim = 1, seed = NULL, \dots,
+    W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object))
+
+  \method{simulate}{detpointprocfamily}(object, nsim = 1, seed = NULL, \dots,
+    W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object))
+}
+\arguments{
+  \item{object}{
+    Determinantal point process model. An object of class
+    \code{"detpointprocfamily"} or \code{"dppm"}.
+  }
+  \item{nsim}{Number of simulated realisations.}
+  \item{seed}{
+    an object specifying whether and how to initialise the random
+    number generator. Either \code{NULL} or an integer that will be
+    used in a call to \code{\link[base:Random]{set.seed}} before
+    simulating the point patterns.
+  }
+  \item{\dots}{Arguments passed on to \code{\link{rdpp}}.}
+  \item{W}{
+    Object specifying the window of simulation (defaults to a unit
+    box if nothing else is sensible -- see Details). Can be any single
+    argument acceptable to \code{\link{as.boxx}} (e.g. an \code{"owin"},
+    \code{"box3"} or \code{"boxx"} object).
+  }
+  \item{trunc}{
+    Numeric value specifying how the model truncation is preformed. See
+    Details.
+  }
+  \item{correction}{
+    Character string specifying the type of correction to use.
+    The options are "periodic" (default) and "border". See Details.
+  }
+  \item{rbord}{
+    Numeric value specifying the extent of the border correction if this
+    correction is used. See Details.
+  }
+}
+\details{
+  These functions are methods for the generic function
+  \code{\link[stats]{simulate}} for the classes \code{"detpointprocfamily"} and
+  \code{"dppm"} of determinantal point process models.
+
+  The return value is a list of \code{nsim} point patterns.
+  It also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  This follows the convention used in
+  \code{simulate.lm} (see \code{\link[stats]{simulate}}).
+  It can be used to force a sequence of simulations to be
+  repeated exactly, as shown in the examples for
+  \code{\link[stats]{simulate}}.
+
+  The exact simulation of a determinantal point process model involves
+  an infinite series, which typically has no analytical solution. In the
+  implementation a truncation is performed. The truncation
+  \code{trunc} can be specified either directly as a positive integer
+  or as a fraction between 0 and 1. In the latter case the truncation is chosen
+  such that the expected number of points in a simulation is
+  \code{trunc} times the theoretical expected number of points in the
+  model. The default is 0.99.
+
+  The window of the returned point pattern(s) can be specified via the
+  argument \code{W}. For a fitted model (of class \code{"dppm"}) it
+  defaults to the observation window of the data used to fit the
+  model. For inhomogeneous models it defaults to the window of the
+  intensity image. Otherwise it defaults to a unit box.  For
+  non-rectangular windows simulation is done in the containing rectangle
+  and then restricted to the window.  For inhomogeneous models a
+  stationary model is first simulated using the maximum intensity and
+  then the result is obtained by thinning.
+
+  The default is to use periodic edge correction for simulation such
+  that opposite edges are glued together.  If border correction is used
+  then the simulation is done in an extended window. Edge effects are
+  theoretically completely removed by doubling the size of the window in
+  each spatial dimension, but for practical purposes much less extension
+  may be sufficient. The numeric \code{rbord} determines the extend of
+  the extra space added to the window.
+}
+\value{
+  A list of length \code{nsim} containing simulated point patterns
+  (objects of class \code{"ppp"}). The list has class \code{"solist"}.
+
+  The return value also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  See Details.
+}
+\references{
+  Lavancier, F. \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Rubak, E. (2015)
+  Determinantal point process models and statistical inference
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{77}, 853--977.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{rdpp}},
+  \code{\link[stats]{simulate}}
+}
+\examples{
+model <- dppGauss(lambda=100, alpha=.05, d=2)
+simulate(model, 2)
+}
+\keyword{datagen}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/simulate.kppm.Rd b/man/simulate.kppm.Rd
new file mode 100644
index 0000000..56e798b
--- /dev/null
+++ b/man/simulate.kppm.Rd
@@ -0,0 +1,89 @@
+\name{simulate.kppm}
+\alias{simulate.kppm}
+\title{Simulate a Fitted Cluster Point Process Model}
+\description{
+  Generates simulated realisations from a fitted cluster point process model.
+}
+\usage{
+  \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ...,
+         window=NULL, covariates=NULL, verbose=TRUE, retry=10,
+         drop=FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted cluster point process model. An object of class \code{"kppm"}.
+  }
+  \item{nsim}{
+    Number of simulated realisations.
+  }
+  \item{seed}{
+    an object specifying whether and how to initialise
+    the random number generator. Either \code{NULL} or an integer that will
+    be used in a call to \code{\link[base:Random]{set.seed}}
+    before simulating the point patterns. 
+  }
+  \item{\dots}{Ignored.}
+  \item{window}{
+    Optional. Window (object of class \code{"owin"}) in which the
+    model should be simulated.
+  }
+  \item{covariates}{
+    Optional. A named list containing new values for the covariates in the
+    model. 
+  }
+  \item{verbose}{
+    Logical. Whether to print progress reports (when \code{nsim > 1}).
+  }
+  \item{retry}{
+    Number of times to repeat the simulation if it fails
+    (e.g. because of insufficient memory).
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE}, the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted
+  cluster point process models.
+  
+  Simulations are performed by \code{\link{rThomas}},
+  \code{\link{rMatClust}} or \code{\link{rLGCP}}
+  depending on the model.
+
+  The return value is a list of point patterns.
+  It also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  This follows the convention used in
+  \code{simulate.lm} (see \code{\link[stats]{simulate}}).
+  It can be used to force a sequence of simulations to be
+  repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}.
+}
+\value{
+  A list of length \code{nsim} containing simulated point patterns
+  (objects of class \code{"ppp"}).
+
+  The return value also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  See Details.
+}
+\examples{
+  data(redwood)
+  fit <- kppm(redwood, ~1, "Thomas")
+  simulate(fit, 2)
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{rThomas}},
+  \code{\link{rMatClust}},
+  \code{\link{rLGCP}},
+  \code{\link{simulate.ppm}},
+  \code{\link[stats]{simulate}}
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/simulate.lppm.Rd b/man/simulate.lppm.Rd
new file mode 100644
index 0000000..f964eb7
--- /dev/null
+++ b/man/simulate.lppm.Rd
@@ -0,0 +1,75 @@
+\name{simulate.lppm}
+\alias{simulate.lppm}
+\title{Simulate a Fitted Point Process Model on a Linear Network}
+\description{
+  Generates simulated realisations from a fitted Poisson
+  point process model on a linear network.
+}
+\usage{
+  \method{simulate}{lppm}(object, nsim=1, ...,
+                         new.coef=NULL,
+                         progress=(nsim > 1),
+                         drop=FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model on a linear network.
+    An object of class \code{"lppm"}.
+  }
+  \item{nsim}{
+    Number of simulated realisations.
+  }
+  \item{progress}{
+    Logical flag indicating whether to print progress reports
+    for the sequence of simulations.
+  }
+  \item{new.coef}{
+    New values for the canonical parameters of the model.
+    A numeric vector of the same length as \code{coef(object)}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{predict.lppm}}
+    to determine the spatial resolution of the image of the fitted intensity
+    used in the simulation.
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE}, the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link[stats]{simulate}} for the class \code{"lppm"} of fitted
+  point process models on a linear network.
+
+  Only Poisson process models are supported so far.
+  
+  Simulations are performed by \code{\link{rpoislpp}}.
+}
+\value{
+  A list of length \code{nsim} containing simulated point patterns
+  (objects of class \code{"lpp"}) on the same linear network as the
+  original data used to fit the model.
+  The result also belongs to the class \code{"solist"}, so that it can be
+  plotted, and the class \code{"timed"}, so that the total computation
+  time is recorded.
+}
+\examples{
+  fit <- lppm(unmark(chicago) ~ y)
+  simulate(fit)[[1]]
+}
+\seealso{
+  \code{\link{lppm}},
+  \code{\link{rpoislpp}},
+  \code{\link[stats]{simulate}}
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/simulate.mppm.Rd b/man/simulate.mppm.Rd
new file mode 100644
index 0000000..beb8a9f
--- /dev/null
+++ b/man/simulate.mppm.Rd
@@ -0,0 +1,62 @@
+\name{simulate.mppm}
+\alias{simulate.mppm}
+\title{Simulate a Point Process Model Fitted to Several Point Patterns}
+\description{
+  Generates simulated realisations from a 
+  point process model that was fitted to several point patterns.
+}
+\usage{
+  \method{simulate}{mppm}(object, nsim=1, \dots, verbose=TRUE)
+}
+\arguments{
+  \item{object}{
+    Point process model fitted to several point patterns.
+    An object of class \code{"mppm"}.
+  }
+  \item{nsim}{
+    Number of simulated realisations (of each original pattern).
+  }
+  \item{\dots}{
+    Further arguments passed to \code{\link{simulate.ppm}}
+    to control the simulation.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link[stats]{simulate}} for the class \code{"mppm"} of fitted
+  point process models for replicated point pattern data.
+
+  The result is a hyperframe with \code{n} rows and \code{nsim} columns,
+  where \code{n} is the number of original point pattern
+  datasets to which the model was fitted. Each column of the hyperframe
+  contains a simulated version of the original data.
+  
+  For each of the original point pattern datasets, the
+  fitted model for this dataset is extracted using
+  \code{\link{subfits}}, then \code{nsim} simulated realisations
+  of this model are generated using \code{\link{simulate.ppm}},
+  and these are stored in the corresponding row of the output.
+}
+\value{
+  A hyperframe.
+}
+\examples{
+  H <- hyperframe(Bugs=waterstriders)
+  fit <- mppm(Bugs ~ id, H)
+  y <- simulate(fit, nsim=2)
+  y
+  plot(y[1,,drop=TRUE], main="Simulations for Waterstriders pattern 1")
+  plot(y[,1,drop=TRUE], main="Simulation 1 for each Waterstriders pattern")
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{simulate.ppm}}.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/simulate.ppm.Rd b/man/simulate.ppm.Rd
new file mode 100644
index 0000000..6cd2b0f
--- /dev/null
+++ b/man/simulate.ppm.Rd
@@ -0,0 +1,123 @@
+\name{simulate.ppm}
+\alias{simulate.ppm}
+\title{Simulate a Fitted Gibbs Point Process Model}
+\description{
+  Generates simulated realisations from a fitted Gibbs or Poisson
+  point process model.
+}
+\usage{
+  \method{simulate}{ppm}(object, nsim=1, ...,
+                         singlerun = FALSE,
+                         start = NULL,
+                         control = default.rmhcontrol(object, w=w),
+                         w = NULL, 
+                         project=TRUE, new.coef=NULL,
+                         verbose=FALSE, progress=(nsim > 1),
+                         drop=FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model.
+    An object of class \code{"ppm"}.
+  }
+  \item{nsim}{
+    Number of simulated realisations.
+  }
+  \item{singlerun}{
+    Logical. Whether to generate the simulated realisations
+    from a single long run of the Metropolis-Hastings algorithm
+    (\code{singlerun=TRUE}) or from separate, independent runs of the
+    algorithm (\code{singlerun=FALSE}, the default).
+  }
+  \item{start}{Data determining the initial state
+    of the Metropolis-Hastings algorithm.  See
+    \code{\link{rmhstart}} for description of these arguments.
+    Defaults to \code{list(n.start=npoints(data.ppm(object)))}
+    meaning that the initial state of the algorithm
+    has the same number of points as the original dataset.
+  }
+  \item{control}{Data controlling the running of
+    the Metropolis-Hastings algorithm.  See \code{\link{rmhcontrol}}
+    for description of these arguments.
+  }
+  \item{w}{
+    Optional. The window in which the model is defined.
+    An object of class \code{"owin"}.
+  }
+  \item{\dots}{
+    Further arguments passed to \code{\link{rmhcontrol}},
+    or to \code{\link{rmh.default}}, or to covariate functions in the model.
+  }
+  \item{project}{
+    Logical flag indicating what to do if the fitted model is
+    invalid (in the sense that the values of the fitted coefficients do not
+    specify a valid point process).
+    If \code{project=TRUE} the closest valid model will be simulated;
+    if \code{project=FALSE} an error will occur.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports
+    from \code{\link{rmh.ppm}}
+    during the simulation of each point pattern.
+  }
+  \item{progress}{
+    Logical flag indicating whether to print progress reports
+    for the sequence of simulations.
+  }
+  \item{new.coef}{
+    New values for the canonical parameters of the model.
+    A numeric vector of the same length as \code{coef(object)}.
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE}, the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted
+  point process models.
+  
+  Simulations are performed by \code{\link{rmh.ppm}}.
+
+  If \code{singlerun=FALSE} (the default), the simulated patterns are
+  the results of independent runs of the Metropolis-Hastings
+  algorithm. If \code{singlerun=TRUE}, a single long run of the
+  algorithm is performed, and the state of the simulation is saved
+  every \code{nsave} iterations to yield the simulated patterns.
+
+  In the case of a single run, the behaviour is controlled
+  by the parameters \code{nsave,nburn,nrep}. These 
+  are described in \code{\link{rmhcontrol}}. They may be passed
+  in the \code{\dots} arguments or included in \code{control}.
+  It is sufficient to specify two
+  of the three parameters \code{nsave,nburn,nrep}.
+}
+\value{
+  A list of length \code{nsim} containing simulated point patterns
+  (objects of class \code{"ppp"}).
+  It also belongs to the class \code{"solist"}, so that it can be
+  plotted, and the class \code{"timed"}, so that the total computation
+  time is recorded.
+}
+\examples{
+  \testonly{op <- spatstat.options(rmh.nrep=10)}
+  fit <- ppm(japanesepines, ~1, Strauss(0.1))
+  simulate(fit, 2)
+  simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4)
+  \testonly{spatstat.options(op)}
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{simulate.kppm}},
+  \code{\link[stats]{simulate}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/simulate.slrm.Rd b/man/simulate.slrm.Rd
new file mode 100644
index 0000000..2c23407
--- /dev/null
+++ b/man/simulate.slrm.Rd
@@ -0,0 +1,88 @@
+\name{simulate.slrm}
+\alias{simulate.slrm}
+\title{Simulate a Fitted Spatial Logistic Regression Model}
+\description{
+  Generates simulated realisations from a fitted
+  spatial logistic regresson model
+}
+\usage{
+  \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ...,
+         window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE)
+}
+\arguments{
+  \item{object}{
+    Fitted spatial logistic regression model. An object of class \code{"slrm"}.
+  }
+  \item{nsim}{
+    Number of simulated realisations.
+  }
+  \item{seed}{
+    an object specifying whether and how to initialise
+    the random number generator. Either \code{NULL} or an integer that will
+    be used in a call to \code{\link[base:Random]{set.seed}}
+    before simulating the point patterns. 
+  }
+  \item{\dots}{Ignored.}
+  \item{window}{
+    Optional. Window (object of class \code{"owin"}) in which the
+    model should be simulated.
+  }
+  \item{covariates}{
+    Optional. A named list containing new values for the covariates in the
+    model. 
+  }
+  \item{verbose}{
+    Logical. Whether to print progress reports (when \code{nsim > 1}).
+  }
+  \item{drop}{
+    Logical. If \code{nsim=1} and \code{drop=TRUE}, the
+    result will be a point pattern, rather than a list 
+    containing a point pattern.
+  }
+}
+\details{
+  This function is a method for the generic function
+  \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted
+  spatial logistic regression models.
+  
+  Simulations are performed by \code{\link{rpoispp}}
+  after the intensity has been computed by \code{\link{predict.slrm}}.
+
+  The return value is a list of point patterns.
+  It also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  This follows the convention used in
+  \code{simulate.lm} (see \code{\link[stats]{simulate}}).
+  It can be used to force a sequence of simulations to be
+  repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}.
+}
+\value{
+  A list of length \code{nsim} containing simulated point patterns
+  (objects of class \code{"ppp"}).
+
+  The return value also carries an attribute \code{"seed"} that
+  captures the initial state of the random number generator.
+  See Details.
+}
+\examples{
+  X <- copper$SouthPoints
+  fit <- slrm(X ~ 1)
+  simulate(fit, 2)
+  fitxy <- slrm(X ~ x+y)
+  simulate(fitxy, 2, window=square(2))
+}
+\seealso{
+  \code{\link{slrm}},
+  \code{\link{rpoispp}},
+  \code{\link{simulate.ppm}},
+  \code{\link{simulate.kppm}},
+  \code{\link[stats]{simulate}}
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/slrm.Rd b/man/slrm.Rd
new file mode 100644
index 0000000..ad09507
--- /dev/null
+++ b/man/slrm.Rd
@@ -0,0 +1,191 @@
+\name{slrm}
+\alias{slrm}
+\title{Spatial Logistic Regression}
+\description{
+  Fits a spatial logistic regression model
+  to a spatial point pattern. 
+}
+\usage{
+slrm(formula, ..., data = NULL, offset = TRUE, link = "logit",
+                   dataAtPoints=NULL, splitby=NULL)
+}
+\arguments{
+  \item{formula}{The model formula. See Details.}
+  \item{\dots}{
+    Optional arguments passed to \code{\link{pixellate}}
+    determining the pixel resolution for the discretisation
+    of the point pattern.
+  }
+  \item{data}{
+    Optional. A list containing data required in the formula.
+    The names of entries in the list should correspond to variable
+    names in the formula. The entries should be point patterns,
+    pixel images or windows.
+  }
+  \item{offset}{
+    Logical flag indicating whether the model formula
+    should be augmented by an offset equal to the logarithm of the
+    pixel area.
+  }
+  \item{link}{The link function for the regression model.
+    A character string, specifying a link function
+    for binary regression.
+  }
+  \item{dataAtPoints}{Optional.
+    Exact values of the covariates at the data points.
+    A data frame, with column names corresponding to
+    variables in the \code{formula}, with one row for each
+    point in the point pattern dataset.
+  }
+  \item{splitby}{
+    Optional. Character string identifying a window. The window will be used
+    to split pixels into sub-pixels. 
+  }
+}
+\details{
+  This function fits a Spatial Logistic Regression model
+  (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. 
+  The logistic function may be replaced by another link function.
+
+  The \code{formula} specifies the form of the model to be fitted,
+  and the data to which it should be fitted. The \code{formula}
+  must be an \R formula with a left and right hand
+  side.
+
+  The left hand side of the \code{formula} is the name of the
+  point pattern dataset, an object of class \code{"ppp"}. 
+
+  The right hand side of the \code{formula} is an expression,
+  in the usual \R formula syntax, representing the functional form of
+  the linear predictor for the model.
+
+  Each variable name that appears in the formula may be 
+  \itemize{
+    \item
+    one of the reserved names \code{x} and \code{y},
+    referring to the Cartesian coordinates;
+    \item
+    the name of an entry in the list \code{data}, if this argument is given;
+    \item
+    the name of an object in the
+    parent environment, that is, in the environment where the call
+    to \code{slrm} was issued.
+  }
+  Each object appearing on the right hand side of the formula may be
+  \itemize{
+    \item a pixel image (object of class \code{"im"})
+    containing the values of a covariate;
+    \item a window (object of class \code{"owin"}), which will be
+    interpreted as a logical covariate which is \code{TRUE} inside the
+    window and \code{FALSE} outside it;
+    \item a \code{function} in the \R language, with arguments
+    \code{x,y}, which can be evaluated at any location to
+    obtain the values of a covariate.
+  }
+  See the Examples below.
+
+  The fitting algorithm discretises the point pattern onto a pixel grid. The
+  value in each pixel is 1 if there are any points of the point pattern
+  in the pixel, and 0 if there are no points in the pixel.
+  The dimensions of the pixel grid will be determined as follows:
+  \itemize{
+    \item
+    The pixel grid will be determined by the extra
+    arguments \code{\dots} if they are specified (for example the argument
+    \code{dimyx} can be used to specify the number of pixels).
+    \item
+    Otherwise, if the right hand side of the \code{formula} includes
+    the names of any pixel images containing covariate values,
+    these images will determine the pixel grid for the discretisation.
+    The covariate image with the finest grid (the smallest pixels) will
+    be used.
+    \item
+    Otherwise, the default pixel grid size is given by
+    \code{spatstat.options("npixel")}.
+  }
+  
+  If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic
+  Regression model. This model states that the probability
+  \eqn{p} that a given pixel contains a data point, is related to the
+  covariates through
+  \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta}
+  where \eqn{\eta}{eta} is the linear predictor of the model
+  (a linear combination of the covariates,
+  whose form is specified by the \code{formula}).
+
+  If \code{link="cloglog"} then the algorithm fits a model stating that
+  \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}.
+
+  If \code{offset=TRUE} (the default), the model formula will be
+  augmented by adding an offset term equal to the logarithm of the pixel
+  area. This ensures that the fitted parameters are
+  approximately independent of pixel size.
+  If \code{offset=FALSE}, the offset is not included, and the
+  traditional form of Spatial Logistic Regression is fitted.
+}
+\value{
+  An object of class \code{"slrm"} representing the fitted model.
+
+  There are many methods for this class, including methods for
+  \code{print}, \code{fitted}, \code{predict},
+  \code{anova}, \code{coef}, \code{logLik}, \code{terms},
+  \code{update}, \code{formula} and \code{vcov}.
+  Automated stepwise model selection is possible using
+  \code{\link{step}}. Confidence intervals for the parameters can be
+  computed using \code{\link[stats]{confint}}. 
+}
+\seealso{
+  \code{\link{anova.slrm}},
+  \code{\link{coef.slrm}},
+  \code{\link{fitted.slrm}},
+  \code{\link{logLik.slrm}},
+  \code{\link{plot.slrm}},
+  \code{\link{predict.slrm}},
+  \code{\link{vcov.slrm}}
+}
+\references{
+  Agterberg, F.P. (1974)
+  Automatic contouring of geological maps to detect target areas for
+  mineral exploration.
+  \emph{Journal of the International Association for Mathematical Geology}
+  \bold{6}, 373--395.
+
+  Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K.,
+  Schuhmacher, D., Shah, R. and Turner, R. (2010)
+  Spatial logistic regression and change-of-support
+  for spatial Poisson point processes.
+  \emph{Electronic Journal of Statistics}
+  \bold{4}, 1151--1201.
+  {doi: 10.1214/10-EJS581}
+
+  Tukey, J.W. (1972)
+  Discussion of paper by F.P. Agterberg and S.C. Robinson.
+  \emph{Bulletin of the International Statistical Institute}
+  \bold{44} (1) p. 596.
+  Proceedings, 38th Congress, International Statistical Institute.
+}
+\examples{
+     X <- copper$SouthPoints
+     slrm(X ~ 1)
+     slrm(X ~ x+y)
+
+     slrm(X ~ x+y, link="cloglog")
+     # specify a grid of 2-km-square pixels
+     slrm(X ~ 1, eps=2)
+
+     Y <- copper$SouthLines
+     Z <- distmap(Y)
+     slrm(X ~ Z)
+     slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist")))
+
+     dat <- list(A=X, V=Z)
+     slrm(A ~ V, data=dat)
+}
+\author{\adrian
+  \email{adrian at maths.uwa.edu.au}
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/solapply.Rd b/man/solapply.Rd
new file mode 100644
index 0000000..9992772
--- /dev/null
+++ b/man/solapply.Rd
@@ -0,0 +1,69 @@
+\name{solapply}
+\alias{solapply}
+\alias{anylapply}
+\title{
+  Apply a Function Over a List and Obtain a List of Objects
+}
+\description{
+  Applies the function \code{FUN} to each element of the list \code{X},
+  and returns the result as a list of class \code{"solist"}
+  or \code{"anylist"} as appropriate.
+}
+\usage{
+  anylapply(X, FUN, \dots)
+
+  solapply(X, FUN, \dots, check = TRUE, promote = TRUE, demote = FALSE)
+}
+\arguments{
+  \item{X}{A list.}
+  \item{FUN}{
+    Function to be applied to each element of \code{X}.
+  }
+  \item{\dots}{
+    Additional arguments to \code{FUN}.
+  }
+  \item{check,promote,demote}{
+    Arguments passed to \code{\link{solist}} which determine
+    how to handle different classes of objects.
+  }
+}
+\details{
+  These convenience functions are similar to \code{\link[base]{lapply}}
+  except that they return a list of class \code{"solist"} or
+  \code{"anylist"}.
+
+  In both functions, the result is computed by 
+  \code{lapply(X, FUN, \dots)}.
+
+  In \code{anylapply} the result is converted to a list of class
+  \code{"anylist"} and returned.
+
+  In \code{solapply} the result is converted to
+  a list of class \code{"solist"} \bold{if possible}, using 
+  \code{\link{as.solist}}. If this is not possible,
+  then the behaviour depends on the argument \code{demote}.
+  If \code{demote=TRUE} the result will be returned as a
+  list of class \code{"anylist"}. If \code{demote=FALSE} (the default),
+  an error occurs.
+}
+\value{
+  A list, usually of class \code{"solist"}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{solist}}, 
+  \code{\link{anylist}}.
+}
+\examples{
+  solapply(waterstriders, density)
+}
+\keyword{spatial}
+\keyword{list}
+\keyword{manip}
diff --git a/man/solist.Rd b/man/solist.Rd
new file mode 100644
index 0000000..a51d7dc
--- /dev/null
+++ b/man/solist.Rd
@@ -0,0 +1,97 @@
+\name{solist}
+\alias{solist}
+\title{
+  List of Two-Dimensional Spatial Objects
+}
+\description{
+  Make a list of two-dimensional spatial objects.
+}
+\usage{
+solist(\dots, check=TRUE, promote=TRUE, demote=FALSE)
+}
+\arguments{
+  \item{\dots}{
+    Any number of objects, each representing a two-dimensional
+    spatial dataset.
+  }
+  \item{check}{
+    Logical value. If \code{TRUE}, check that each of the
+    objects is a 2D spatial object.
+  }
+  \item{promote}{
+    Logical value. If \code{TRUE}, test whether all objects belong to
+    the \emph{same} class, and if so, promote the list of objects
+    to the appropriate class of list.
+  }
+  \item{demote}{
+    Logical value determining what should happen if any of the
+    objects is not a 2D spatial object: if \code{demote=FALSE} (the
+    default), a fatal error occurs; if \code{demote=TRUE},
+    a list of class \code{"anylist"} is returned.
+  }
+}
+\details{
+  This command creates an object of class \code{"solist"}
+  (spatial object list)
+  which represents a list of two-dimensional spatial datasets.
+  The datasets do not necessarily belong to the same class.
+
+  Typically the intention is that the datasets in the list
+  should be treated in the same way, for example, they should
+  be plotted side-by-side. The \pkg{spatstat} package
+  provides a plotting function, \code{\link{plot.solist}},
+  and many other functions for this class.
+  
+  In the \pkg{spatstat} package, various functions produce
+  an object of class \code{"solist"}. For example, when
+  a point pattern is split into several point patterns by
+  \code{\link{split.ppp}}, or an image is split into several
+  images by \code{\link{split.im}}, the result is of
+  class \code{"solist"}.
+
+  If \code{check=TRUE} then the code will check whether all
+  objects in \code{\dots} belong to the classes
+  of two-dimensional spatial objects defined in the
+  \pkg{spatstat} package. They do not have to belong to the
+  \emph{same} class. Set \code{check=FALSE}
+  for efficiency, but only if you are sure that all the objects are valid.
+
+  If some of the objects in \code{\dots} are
+  not two-dimensional spatial objects,
+  the action taken depends on the argument \code{demote}.
+  If \code{demote=TRUE}, the result will belong to the more general
+  class \code{"anylist"} instead of \code{"solist"}.
+  If \code{demote=FALSE} (the default), an error occurs.
+
+  If \code{promote=TRUE} then the code will check whether all
+  the objects \code{\dots} belong to the same class.
+  If they are all point patterns (class \code{"ppp"}),
+  the result will also belong to the class \code{"ppplist"}.
+  If they are all pixel images (class \code{"im"}), the result
+  will also belong to the class \code{"imlist"}.
+
+  Use \code{\link{as.solist}} to convert a list to a \code{"solist"}.
+}
+\value{
+  A list, usually belonging to the class \code{"solist"}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{as.solist}},
+  \code{\link{anylist}},
+  \code{\link{solapply}}
+}
+\examples{
+  solist(cells, density(cells))
+  solist(cells, japanesepines, redwood)
+}
+\keyword{spatial}
+\keyword{list}
+\keyword{manip}
diff --git a/man/solutionset.Rd b/man/solutionset.Rd
new file mode 100644
index 0000000..5032f5b
--- /dev/null
+++ b/man/solutionset.Rd
@@ -0,0 +1,88 @@
+\name{solutionset}
+\alias{solutionset}
+\title{Evaluate Logical Expression Involving Pixel Images and Return
+  Region Where Expression is True}
+\description{
+  Given a logical expression involving one or more pixel images,
+  find all pixels where the expression is true,
+  and assemble these pixels into a window.
+}
+\usage{
+  solutionset(\dots, envir)
+}
+\arguments{
+  \item{\dots}{An expression in the \R language, involving one or more
+    pixel images.}
+  \item{envir}{Optional. The environment in which to evaluate the
+    expression.}
+}
+\details{
+  Given a logical expression involving one or more pixel images,
+  this function will find all pixels where the expression is true,
+  and assemble these pixels into a spatial window.
+
+  Pixel images in \code{spatstat}
+  are represented by objects of class \code{"im"}
+  (see \code{\link{im.object}}). These are essentially matrices of
+  pixel values, with extra attributes recording the pixel dimensions,
+  etc.
+
+  Suppose \code{X} is a pixel image. Then \code{solutionset(abs(X) > 3)}
+  will find all the pixels in \code{X} for which the pixel value
+  is greater than 3 in absolute value, and return a window containing
+  all these pixels.
+
+  If \code{X} and \code{Y} are two pixel images,
+  \code{solutionset(X > Y)} will find all pixels for which the
+  pixel value of \code{X} is greater than the corresponding pixel value
+  of \code{Y}, and return a window containing these pixels.
+  
+  In general, \code{\dots} can be any logical expression involving
+  pixel images.
+
+  The code first tries to evaluate the expression using
+  \code{\link{eval.im}}.
+  This is successful if the expression involves only
+  (a) the \emph{names} of pixel images, (b) scalar
+  constants, and (c) functions which are vectorised.
+  There must be at least one pixel image in the expression.
+  The expression \code{expr} must be vectorised.
+  See the Examples.
+
+  If this is unsuccessful, the code then tries to evaluate the
+  expression using pixel arithmetic. This is successful if all the
+  arithmetic operations in the expression are listed
+  in \code{\link{Math.im}}. 
+}
+\value{
+  A spatial window
+  (object of class \code{"owin"}, see \code{\link{owin.object}}).
+}
+\seealso{
+  \code{\link{im.object}},
+  \code{\link{owin.object}},
+  \code{\link{eval.im}},
+  \code{\link{levelset}}
+}
+\examples{
+  # test images
+  X <- as.im(function(x,y) { x^2 - y^2 }, unit.square())
+  Y <- as.im(function(x,y) { 3 * x + y  - 1}, unit.square())
+
+  W <- solutionset(abs(X) > 0.1)
+  W <- solutionset(X > Y)
+  W <- solutionset(X + Y >= 1)
+
+  area(solutionset(X < Y))
+
+  solutionset(density(cells) > 20)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{programming}
+\keyword{manip}
diff --git a/man/spatdim.Rd b/man/spatdim.Rd
new file mode 100644
index 0000000..f0f67b6
--- /dev/null
+++ b/man/spatdim.Rd
@@ -0,0 +1,50 @@
+\name{spatdim}
+\alias{spatdim}
+\title{Spatial Dimension of a Dataset}
+\description{
+  Extracts the spatial dimension of an object in the
+  \pkg{spatstat} package.
+}
+\usage{spatdim(X)}
+\arguments{
+  \item{X}{Object belonging to any class defined in the
+    \pkg{spatstat} package.}
+}
+\value{
+  An integer, or \code{NA}.
+}
+\details{
+  This function returns the number of spatial coordinate dimensions
+  of the dataset \code{X}. The results for some of the more common
+  types of objects are as follows:
+  \tabular{ll}{
+    \bold{object class} \tab \bold{dimension} \cr
+    \code{"ppp"} \tab 2 \cr
+    \code{"lpp"} \tab 2 \cr
+    \code{"pp3"} \tab 3 \cr
+    \code{"ppx"} \tab number of \emph{spatial} dimensions \cr
+    \code{"owin"} \tab 2 \cr
+    \code{"psp"} \tab 2 \cr
+    \code{"ppm"} \tab 2
+  }
+  Note that time dimensions are not counted.
+
+  If \code{X} is not a recognised spatial object, the result is \code{NA}.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+  spatdim(lansing)
+}
+
+
+
+
+
diff --git a/man/spatialcdf.Rd b/man/spatialcdf.Rd
new file mode 100644
index 0000000..9dece2b
--- /dev/null
+++ b/man/spatialcdf.Rd
@@ -0,0 +1,105 @@
+\name{spatialcdf}
+\alias{spatialcdf}
+\title{
+  Spatial Cumulative Distribution Function
+}
+\description{
+  Compute the spatial cumulative distribution function of a
+  spatial covariate, optionally using spatially-varying weights.
+}
+\usage{
+spatialcdf(Z, weights = NULL, normalise = FALSE, ..., W = NULL, Zname = NULL)
+}
+\arguments{
+  \item{Z}{
+    Spatial covariate.
+    A pixel image or a \code{function(x,y,...)}
+  }
+  \item{weights}{
+    Spatial weighting for different locations.
+    A pixel image, a \code{function(x,y,...)}, a window, a constant value,
+    or a fitted point process model (object of class \code{"ppm"} or
+    \code{"kppm"}).
+  }
+  \item{normalise}{
+    Logical. Whether the weights should be normalised so that they
+    sum to 1.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{as.mask}} to determine the pixel
+    resolution, or extra arguments passed to \code{Z} if it is a function.
+  }
+  \item{W}{
+    Optional window (object of class \code{"owin"}) defining the spatial
+    domain.
+  }
+  \item{Zname}{
+    Optional character string for the name of the covariate \code{Z}
+    used in plots.
+  }
+}
+\details{
+  If \code{weights} is missing or \code{NULL}, it defaults to 1.
+  The values of the covariate \code{Z}
+  are computed on a grid of pixels. The weighted cumulative distribution
+  function of \code{Z} values is computed, taking each value with weight
+  equal to the pixel area. The resulting function \eqn{F} is such that
+  \eqn{F(t)} is the area of the region of space where
+  \eqn{Z \le t}{Z <= t}.
+
+  If \code{weights} is a pixel image or a function, then the
+  values of \code{weights} and of the covariate \code{Z}
+  are computed on a grid of pixels. The
+  \code{weights} are multiplied by the pixel area.
+  Then the weighted empirical cumulative distribution function
+  of \code{Z} values
+  is computed using \code{\link{ewcdf}}. The resulting function
+  \eqn{F} is such that \eqn{F(t)} is the total weight (or weighted area)
+  of the region of space where \eqn{Z \le t}{Z <= t}.
+
+  If \code{weights} is a fitted point process model, then it should
+  be a Poisson process. The fitted intensity of the model,
+  and the value of the covariate \code{Z}, are evaluated at the
+  quadrature points used to fit the model. The \code{weights} are
+  multiplied by the weights of the quadrature points.
+  Then the weighted empirical cumulative distribution of \code{Z} values
+  is computed using \code{\link{ewcdf}}. The resulting function
+  \eqn{F} is such that \eqn{F(t)} is the expected number of points
+  in the point process that will fall in the region of space
+  where \eqn{Z \le t}{Z <= t}.
+  
+  If \code{normalise=TRUE}, the function is normalised so that its
+  maximum value equals 1, so that it gives the cumulative
+  \emph{fraction} of weight or cumulative fraction of points.
+
+  The result can be printed, plotted, and used as a function.
+}
+\value{
+  A cumulative distribution function object
+  belonging to the classes \code{"spatialcdf"},
+  \code{"ewcdf"}, \code{"ecdf"} and \code{"stepfun"}.
+}
+\author{
+  \adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{ewcdf}},
+  \code{\link{cdf.test}}
+}
+\examples{
+   with(bei.extra, {
+     plot(spatialcdf(grad))
+     fit <- ppm(bei ~ elev)
+     plot(spatialcdf(grad, predict(fit)))
+     plot(A <- spatialcdf(grad, fit))
+     A(0.1)
+  })
+}
+\keyword{spatial}
+\keyword{nonparametric}
diff --git a/man/spatstat-deprecated.Rd b/man/spatstat-deprecated.Rd
new file mode 100644
index 0000000..0efd028
--- /dev/null
+++ b/man/spatstat-deprecated.Rd
@@ -0,0 +1,110 @@
+\name{spatstat-deprecated}
+\alias{as.psp.owin}
+\alias{bounding.box}
+\alias{clf.test}
+\alias{conspire}
+\alias{eval.hyper}
+\alias{ksmooth.ppp}
+\alias{smooth.ppp}
+\alias{smooth.fv}
+\alias{smooth.msr}
+\alias{ks.test.ppm}
+\alias{mpl}
+\alias{rtoro}
+\alias{superimposePSP}
+\alias{which.max.im}
+\alias{delaunay.distance}
+\alias{delaunay.network}
+\alias{dirichlet.edges}
+\alias{dirichlet.network}
+\alias{dirichlet.vertices}
+\alias{dirichlet.weights}
+\alias{circumradius}
+\alias{circumradius.owin}
+\alias{circumradius.ppp}
+\alias{circumradius.linnet}
+\title{Deprecated spatstat functions}
+\description{
+  Deprecated spatstat functions.
+}
+\usage{
+\method{as.psp}{owin}(x, \dots, window=NULL, 
+       check=spatstat.options("checksegments"), fatal=TRUE)
+bounding.box(\dots)
+clf.test(\dots)
+conspire(\dots)
+ksmooth.ppp(x, sigma, \dots, edge=TRUE)
+smooth.ppp(X, ..., weights = rep(1, npoints(X)), at="pixels")
+smooth.fv(x, which = "*", ...,
+          method=c("smooth.spline", "loess"),
+          xinterval=NULL)
+smooth.msr(X, ...)
+ks.test.ppm(\dots)
+mpl(Q, trend, interaction, data, correction, rbord, use.gam)
+rtoro(X, which=NULL, radius=NULL, width=NULL, height=NULL) 
+eval.hyper(e, h, simplify=TRUE, ee=NULL)
+superimposePSP(\dots, W=NULL, check=TRUE)
+which.max.im(x)
+delaunay.distance(\dots)
+delaunay.network(\dots)
+dirichlet.edges(\dots)
+dirichlet.network(\dots)
+dirichlet.vertices(\dots)
+dirichlet.weights(\dots)
+circumradius(x, \dots)
+\method{circumradius}{owin}(x, \dots)
+\method{circumradius}{ppp}(x, \dots)
+\method{circumradius}{linnet}(x, \dots)
+}
+
+\details{
+  These functions are deprecated, and will eventually be deleted from
+  the \pkg{spatstat} package.
+  
+  \code{as.psp.owin} has been replaced by \code{\link{edges}}.
+  
+  \code{bounding.box} has been replaced by \code{\link{boundingbox}}.
+  
+  \code{clf.test} has been renamed \code{\link{dclf.test}}.
+
+  \code{conspire} has been replaced by \code{\link{plot.fv}}.
+
+  \code{ksmooth.ppp} has been replaced by \code{\link{density.ppp}}.
+
+  \code{smooth.ppp} has been replaced by \code{\link{Smooth.ppp}}.
+
+  \code{smooth.fv} has been replaced by \code{\link{Smooth.fv}}.
+
+  \code{smooth.msr} has been replaced by \code{\link{Smooth.msr}}.
+
+  \code{mpl} has been replaced by \code{\link{ppm}}.
+
+  \code{ks.test.ppm} has been replaced by
+  \code{\link{cdf.test}}.
+  
+  \code{rtoro} has been replaced by \code{\link{rshift}}.
+
+  \code{eval.hyper} has been replaced by \code{\link{with.hyperframe}}.
+  
+  \code{superimposePSP} has been replaced by \code{\link{superimpose.psp}}
+  which is a method for the generic function \code{\link{superimpose}}.
+
+  \code{which.max.im(x)} is replaced by
+  \code{\link{im.apply}(x, which.max)}.
+
+  \code{delaunay.distance}   is replaced by \code{delaunayDistance}.
+  
+  \code{delaunay.network} is replaced by \code{delaunayNetwork}.
+  
+  \code{dirichlet.edges} is replaced by \code{dirichletEdges}.
+  
+  \code{dirichlet.network} is replaced by \code{dirichletNetwork}.
+  
+  \code{dirichlet.vertices} is replaced by \code{dirichletVertices}.
+  
+  \code{dirichlet.weights} is replaced by \code{dirichletWeights}.
+
+  \code{circumradius} is replaced by the more appropriately named
+  \code{boundingradius}.
+}
+\keyword{internal}
diff --git a/man/spatstat-internal.Rd b/man/spatstat-internal.Rd
new file mode 100644
index 0000000..930b933
--- /dev/null
+++ b/man/spatstat-internal.Rd
@@ -0,0 +1,1599 @@
+\name{spatstat-internal} 
+\title{Internal spatstat functions}
+\alias{[.pp3}
+\alias{[.localpcfmatrix}
+\alias{[.rat}
+\alias{[.splitppx}
+\alias{[.diagramobj}
+\alias{[<-.splitppx}
+\alias{acedist.show}
+\alias{acedist.noshow}
+\alias{active.interactions}
+\alias{adaptcoef}
+\alias{adjust.ratfv}
+\alias{affinexy}
+\alias{affinexypolygon}
+\alias{ang2rad}
+\alias{anycrossing.psp}
+\alias{ApplyConnected}
+\alias{applytolayers}
+\alias{applyPolyclipArgs}
+\alias{areadelta2}       
+\alias{areaGain.diri}       
+\alias{areaGain.grid}       
+\alias{areaLoss.diri}       
+\alias{areaLoss.grid}
+\alias{assemble.plot.objects}       
+\alias{AsymmDistance.psp}
+\alias{as.breakpts}
+\alias{as.character.units}
+\alias{as.data.frame.bw.optim}
+\alias{as.data.frame.fv}
+\alias{as.double.im}
+\alias{as.linfun.linfun}
+\alias{as.list.hyperframe}
+\alias{as.listof}
+\alias{as.owin.lintess}
+\alias{as.units}
+\alias{augment.msr}
+\alias{BartCalc}
+\alias{bbEngine}
+\alias{bermantestEngine}
+\alias{bdry.mask}
+\alias{bind.ratfv}
+\alias{blankcoefnames}
+\alias{bounding.box3}
+\alias{break.holes}
+\alias{breakpts}
+\alias{breakpts.from.r}
+\alias{bt.frame}
+\alias{bw.optim}
+\alias{calc.DR}
+\alias{calc.NNIR}
+\alias{calc.SAVE}
+\alias{calc.SIR}
+\alias{calc.TSE}
+\alias{cannot.update}
+\alias{cartesian}
+\alias{cellmiddles}
+\alias{censtimeCDFest}
+\alias{change.default.expand}          
+\alias{checkbigmatrix}          
+\alias{checkfields}          
+\alias{checksolve}          
+\alias{check.finespacing}
+\alias{check.hist.lengths}
+\alias{check.mat.mul}
+\alias{check.testfun}
+\alias{circticks}
+\alias{clarkevansCalc}
+\alias{clip.psp}
+\alias{cliprect.psp}
+\alias{clippoly.psp}
+\alias{closethresh}
+\alias{coef.summary.kppm}
+\alias{coef.summary.ppm}
+\alias{coef.vblogit} 
+\alias{coerce.marks.numeric}
+\alias{compatible.rat}
+\alias{compileCDF}
+\alias{conform.ratfv}
+\alias{crosspairquad}
+\alias{cobble.xy}
+\alias{codetime}
+\alias{col.args.to.grey}
+\alias{colouroutputs}
+\alias{colouroutputs<-}
+\alias{commonPolyclipArgs}
+\alias{conform.imagelist}
+\alias{countingweights}
+\alias{CressieReadStatistic}
+\alias{CressieReadSymbol}
+\alias{CressieReadName}
+\alias{CVforPCF}
+\alias{damaged.ppm}
+\alias{data.mppm}
+\alias{datagen.runifpointOnLines}
+\alias{datagen.runifpoisppOnLines}
+\alias{datagen.rpoisppOnLines}
+\alias{default.clipwindow}
+\alias{default.linnet.tolerance}
+\alias{default.n.tiling}
+\alias{default.ntile}
+\alias{deltasuffstat}
+\alias{Deviation}
+\alias{dflt.redraw}
+\alias{densitycrossEngine}
+\alias{densitypointsEngine}
+\alias{diagnose.ppm.engine}
+\alias{diagramobj}
+\alias{digestCovariates}
+\alias{digital.volume}
+\alias{dilate.owin}
+\alias{dim.fasp}               
+\alias{dim.hyperframe}               
+\alias{dim.im}               
+\alias{dim.msr}
+\alias{dim.owin}
+\alias{dimnames.fasp}               
+\alias{dimnames<-.fasp}
+\alias{dimnames.msr}
+\alias{distributecbind}
+\alias{dist2dpath}
+\alias{do.as.im}
+\alias{do.call.plotfun}
+\alias{do.istat}
+\alias{doMultiStraussHard}
+\alias{dppmFixAlgorithm}
+\alias{dppmFixIntensity}
+\alias{emptywindow}
+\alias{envelopeEngine}
+\alias{envelopeProgressData}
+\alias{envelopeTest}
+\alias{envelope.hasenvelope}
+\alias{envelope.matrix}
+\alias{equalpairs}          
+\alias{equalpairs.quad}          
+\alias{equals.quad}          
+\alias{equalsfun.quad}          
+\alias{erodemask}
+\alias{erode.owin}
+\alias{evalCovar}
+\alias{evalCovar.ppm}
+\alias{evalCovar.lppm}
+\alias{evalCovariate}
+\alias{evalInteraction}
+\alias{evalInterEngine}
+\alias{evalPairPotential}
+\alias{evalSparse3Dentrywise}
+\alias{evaluate2Dkernel}
+\alias{even.breaks.owin}
+\alias{exactdt}              
+\alias{exactPdt}
+\alias{existsSpatstatVariable}
+\alias{expandSpecialLists}
+\alias{expandwinPerfect}
+\alias{ExpSmoothLog}
+\alias{extractAIC.slrm}
+\alias{extractAtomicQtests}
+\alias{fakemaintitle}
+\alias{family.vblogit} 
+\alias{f3engine}
+\alias{f3Cengine}
+\alias{fasp}
+\alias{FDMKERNEL}
+\alias{fft2D}
+\alias{fftwAvailable}
+\alias{fill.coefs}
+\alias{findbestlegendpos}
+\alias{findCovariate}
+\alias{findcbind}
+\alias{fii}
+\alias{fillNA}
+\alias{flatfname}
+\alias{flipxypolygon}
+\alias{forbid.logi}
+\alias{format.numberwithunit}
+\alias{FormatFaspFormulae}
+\alias{framebottomleft}
+\alias{fvexprmap}
+\alias{fvlabels}
+\alias{fvlabels<-}
+\alias{fvlabelmap}
+\alias{fvlegend}
+\alias{g3engine}
+\alias{g3Cengine}
+\alias{getdataname}
+\alias{getfields}
+\alias{getglmdata}
+\alias{getglmfit}
+\alias{getglmsubset}
+\alias{getlambda.lpp}
+\alias{getlastshift}
+\alias{getppmdatasubset}
+\alias{getppmOriginalCovariates}
+\alias{getRandomFieldsModelGen}
+\alias{getSpatstatVariable}
+\alias{getSumFun}
+\alias{geyercounts}
+\alias{geyerdelta2}
+\alias{GLMpredict}
+\alias{good.correction.K}
+%\alias{gridadjacencymatrix} %DoNotExport
+\alias{gridindex}            
+\alias{grid1index}
+\alias{grokIndexVector}
+\alias{grow.mask}
+\alias{hackglmmPQL}
+\alias{hasenvelope}
+\alias{HermiteCoefs}
+\alias{handle.r.b.args}
+\alias{handle.rshift.args}
+\alias{head.hyperframe}
+\alias{hierarchicalordering}
+\alias{hiermat}
+\alias{ho.engine}
+\alias{hsvNA}
+\alias{IdenticalRows}
+\alias{idorempty}
+\alias{illegal.iformula}
+\alias{implemented.for.K}
+\alias{impliedpresence}
+\alias{impliedcoefficients}
+\alias{inpoint}
+\alias{instantiate.interact}
+\alias{interactionfamilyname}
+\alias{intermaker}
+\alias{intX.owin}     
+\alias{intX.xypolygon}     
+\alias{intY.owin}     
+\alias{intY.xypolygon}
+\alias{invokeColourmapRule}
+\alias{is.atomicQtest}
+\alias{is.cadlag}
+\alias{is.col.argname}
+\alias{is.data}
+\alias{is.expandable}
+\alias{is.expandable.ppm}
+\alias{is.expandable.rmhmodel}
+\alias{is.fv}
+\alias{is.hyperframe}
+\alias{is.infline}
+\alias{is.interact}
+\alias{is.marked.default}    
+\alias{is.marked.msr}    
+\alias{is.marked.psp}    
+\alias{is.marked.quad}
+\alias{is.mppm}
+\alias{is.multitype.msr}    
+\alias{is.multitype.quad}    
+\alias{is.multitype.default}    
+\alias{is.poisson.mppm}
+\alias{is.pp3}
+\alias{is.ppx}
+\alias{is.psp}
+\alias{is.scov}
+\alias{is.sob}
+\alias{is.tess}
+\alias{k3engine}
+\alias{Kborder.engine}
+\alias{Knone.engine}
+\alias{Krect.engine}
+\alias{Kount}
+\alias{Kwtsum}               
+\alias{Kpcf.kppm}               
+\alias{killinteraction}
+\alias{km.rs.opt}
+\alias{kppmComLik}
+\alias{kppmMinCon}
+\alias{kppmPalmLik}
+\alias{kraever}
+\alias{kraeverRandomFields}
+\alias{labels.ppm}
+\alias{levels.im}
+\alias{levels<-.im}
+\alias{levelsAsFactor}
+\alias{linearKengine}
+\alias{linearKmulti}
+\alias{linearKmulti.inhom}
+\alias{linearKmultiEngine}
+\alias{linearpcfengine}
+\alias{linearpcfmulti}
+\alias{linearpcfmulti.inhom}
+\alias{linearPCFmultiEngine}
+\alias{listof}
+\alias{localKengine}
+\alias{localpcfengine}
+\alias{localpcfmatrix}
+\alias{local2lpp}
+\alias{logicalIndex}
+\alias{logi.dummy}
+\alias{logi.engine}
+\alias{logLik.vblogit}
+\alias{makeLinnetTolerance}
+\alias{maskLaslett}
+\alias{match2DkernelName}
+\alias{parbreak}
+\alias{plan.legend.layout}
+\alias{PDEdensityLPP}
+\alias{PoisSaddle}
+\alias{PoisSaddleArea}
+\alias{PoisSaddleGeyer}
+\alias{PoisSaddlePairwise}
+\alias{polyLaslett}
+\alias{polytileareaEngine}
+\alias{positiveIndex}
+\alias{PPMmodelmatrix}
+\alias{putSpatstatVariable}
+\alias{lookup.im}
+\alias{lookup2DkernelInfo}
+\alias{majorminorversion}
+\alias{make.even.breaks}
+\alias{makefvlabel}
+\alias{makeunits}
+\alias{markappend}
+\alias{markcbind}
+\alias{markformat}
+\alias{markformat.ppp}
+\alias{markformat.ppx}
+\alias{markformat.psp}
+\alias{markformat.default}
+\alias{mark.scale.default}
+\alias{markspace.integral}
+\alias{marks.default}           
+\alias{marks.quad}           
+\alias{\%mapp\%} %DoNotExport
+%NAMESPACE export("%mapp%")
+\alias{markappendop}
+\alias{marksubset}
+\alias{markreplicateop}
+\alias{\%mrep\%} %DoNotExport
+%NAMESPACE export("%mrep%")
+\alias{marksubsetop}
+\alias{\%msub\%} %DoNotExport
+%NAMESPACE export("%msub%")
+\alias{mask2df}
+\alias{match.kernel}
+\alias{maxflow}
+\alias{mctestSigtraceEngine}
+\alias{meanlistfv}
+\alias{meanX.owin}
+\alias{meanY.owin}
+\alias{model.se.image}
+\alias{modelFrameGam}
+\alias{mpl.engine}
+\alias{mpl.get.covariates}
+\alias{mpl.prepare}
+\alias{mpl.usable}
+\alias{MultiPair.checkmatrix}
+\alias{multiply.only.finite.entries}
+\alias{multiplicityNumeric}
+\alias{na.handle.im}
+\alias{names.hyperframe}
+\alias{names<-.fv}
+\alias{names<-.hyperframe}
+\alias{nearest.pixel}
+\alias{nearest.valid.pixel}
+\alias{newformula}
+\alias{newstyle.coeff.handling}
+\alias{nncleanEngine}
+\alias{nndcumfun}
+\alias{no.trend.ppm}
+\alias{n.quad}
+\alias{numberwithunit}
+\alias{numeric.columns}
+\alias{objsurfEngine}
+\alias{onecolumn}
+\alias{optimStatus}
+\alias{outdated.interact}
+\alias{oversize.quad}    
+\alias{owinpolycheck}
+\alias{owinpoly2mask}
+\alias{owin2polypath}
+\alias{pairs.listof}
+\alias{pairs.solist}
+\alias{param.quad}
+\alias{partialModelMatrix}
+\alias{pcf3engine}
+\alias{pcfmulti.inhom}
+\alias{pickoption}
+\alias{plotEachLayer}
+\alias{ploterodewin}
+\alias{ploterodeimage}
+\alias{plot3Dpoints}
+\alias{plotPolygonBdry}
+\alias{plot.addvar}
+\alias{plot.barplotdata}
+\alias{plot.bw.frac}
+\alias{plot.bw.optim}
+\alias{plot.localpcfmatrix}
+\alias{plot.lurk}
+\alias{plot.minconfit}
+\alias{plot.parres}
+\alias{plot.plotpairsim}
+\alias{plot.pppmatching}
+\alias{plot.profilepl}
+\alias{plot.qqppm}
+\alias{plot.spatialcdf}
+\alias{plot.studpermutest}
+\alias{ppllengine}
+\alias{ppm.default}
+\alias{ppmCovariates}
+\alias{ppmDerivatives}
+\alias{ppmInfluenceEngine}
+\alias{pppdist.mat}
+\alias{pppdist.prohorov}
+\alias{ppsubset}
+\alias{predict.vblogit}
+\alias{prefixfv}
+\alias{prepareTitle}
+\alias{printStatus}
+\alias{print.addvar}    
+\alias{print.anylist}    
+\alias{print.autoexec}    
+\alias{print.bt.frame}
+\alias{print.bugtable}
+\alias{print.bw.frac}
+\alias{print.bw.optim}
+\alias{print.colourmap}
+\alias{print.diagppm}
+\alias{print.distfun}
+\alias{print.detpointprocfamily}
+\alias{print.detpointprocfamilyfun}
+\alias{print.envelope}
+\alias{print.ewcdf}
+\alias{print.fasp}       
+\alias{print.fv}       
+\alias{print.fvfun}       
+\alias{print.funxy}       
+\alias{print.hasenvelope}       
+\alias{print.hierarchicalordering}
+\alias{print.hyperframe}
+\alias{print.indicfun}       
+\alias{print.influence.ppm}       
+\alias{print.interact}       
+\alias{print.intermaker}       
+\alias{print.isf}
+\alias{print.laslett}
+\alias{print.layered}
+\alias{print.leverage.ppm}
+\alias{print.lintess}
+\alias{print.localpcfmatrix}
+\alias{print.lut}
+\alias{print.minconfit}
+\alias{print.mppm}
+\alias{print.msr}
+\alias{print.nnfun}
+\alias{print.numberwithunit}
+\alias{print.onearrow}
+\alias{print.parres}
+\alias{print.plotpairsim}
+\alias{print.plotppm}
+\alias{print.pppmatching}
+\alias{print.profilepl}
+\alias{print.quadrattest}
+\alias{print.qqppm}
+\alias{print.rat}
+\alias{print.rmhcontrol}
+\alias{print.rmhexpand}
+\alias{print.rmhmodel}
+\alias{print.rmhstart}
+\alias{print.rmhInfoList}
+\alias{print.rppm}
+\alias{print.splitppp}
+\alias{print.simplepanel}
+\alias{print.Smoothfun}       
+\alias{print.solist}
+\alias{print.splitppx}
+\alias{print.summary.hyperframe}
+\alias{print.summary.listof}
+\alias{print.summary.linim}
+\alias{print.summary.linnet}
+\alias{print.summary.lintess}
+\alias{print.summary.logiquad}
+\alias{print.summary.lut}
+\alias{print.summary.mppm}
+\alias{print.summary.owin}
+\alias{print.summary.ppp}
+\alias{print.summary.psp}
+\alias{print.summary.rmhexpand}
+\alias{print.summary.solist}
+\alias{print.summary.splitppp}
+\alias{print.summary.splitppx}
+\alias{print.summary.units}
+\alias{print.symbolmap}       
+\alias{print.textstring}
+\alias{print.texturemap}
+\alias{print.tess}
+\alias{print.timed}
+\alias{print.vblogit}
+\alias{print.yardstick}
+\alias{project3Dhom}
+\alias{putlastshift}
+\alias{quad}
+\alias{quad.mppm}
+\alias{quadBlockSizes}
+\alias{RandomFieldsSafe}
+\alias{ratfv}
+\alias{recognise.spatstat.type}
+\alias{rectquadrat.breaks}
+\alias{rectquadrat.countEngine}
+\alias{reduceformula}
+\alias{reheat}
+\alias{RelevantDeviation}
+\alias{repair.image.xycoords}
+\alias{replacementIndex}
+\alias{representativeRows}
+\alias{resolveEinfo}
+\alias{resolve.vargamma.shape}
+\alias{rgbNA}
+\alias{rhohatEngine}
+\alias{rhohatCalc}
+\alias{rMaternInhibition}
+\alias{RmhExpandRule}
+\alias{rmhsnoop}
+\alias{rocData}
+\alias{rocModel}
+\alias{roseContinuous}
+\alias{ruletextline}
+\alias{quadrat.testEngine}
+\alias{quadscheme.replicated}
+\alias{quadscheme.spatial}
+\alias{pointgrid}
+\alias{rastersample}
+\alias{rasterx.mask}
+\alias{rastery.mask}
+\alias{rasterxy.mask}
+\alias{rasterx.im}
+\alias{rastery.im}
+\alias{rasterxy.im}
+\alias{rebadge.fv}
+\alias{rebadge.as.crossfun}
+\alias{rebadge.as.dotfun}
+\alias{rebound}
+\alias{rebound.im}
+\alias{rebound.ppp}
+\alias{rebound.psp}
+\alias{rebound.owin}
+\alias{reconcile.fv}
+\alias{rename.fv}
+\alias{repair.old.factor.image}
+\alias{reincarnate.interact}
+\alias{resid4plot}
+\alias{resid1plot}
+\alias{resid1panel}
+\alias{resolve.2D.kernel}
+\alias{restrict.mask}
+\alias{reversePolyclipArgs}
+\alias{rmax.Rigid}
+\alias{rmax.rule}
+\alias{rotxy}
+\alias{rotxypolygon}
+\alias{row.names.hyperframe}
+\alias{row.names<-.hyperframe}
+\alias{runifpoispp}          
+\alias{runifpoisppOnLines}          
+\alias{runifrect}
+\alias{rmhResolveControl}
+\alias{rmhResolveExpansion}
+\alias{rmhResolveTypes}
+\alias{rmhSnoopEnv}
+\alias{rmhcontrol.rmhcontrol}
+\alias{rmhcontrol.list}
+\alias{rmhEngine}
+\alias{rmhmodel.rmhmodel}
+\alias{rmhstart.rmhstart}
+\alias{rmhstart.list}
+\alias{rmpoint.I.allim}
+\alias{rpoint.multi}
+\alias{safedeldir}
+\alias{safelookup}
+\alias{scalardilate.breakpts}
+\alias{scalardilate.diagramobj}
+\alias{scalardilate.msr}
+\alias{scanmeasure}
+\alias{scanmeasure.ppp}
+\alias{scanmeasure.im}
+\alias{scanBinomLRTS}
+\alias{scanPoisLRTS}
+\alias{second.moment.calc}
+\alias{second.moment.engine}
+\alias{sewpcf}
+\alias{sewsmod}
+\alias{shift.diagramobj}              
+\alias{shift.influence.ppm}              
+\alias{shift.leverage.ppm}              
+\alias{shift.msr}              
+\alias{shift.quadratcount}              
+\alias{shift.quadrattest}              
+\alias{shiftxy}              
+\alias{shiftxypolygon}              
+\alias{signalStatus}
+\alias{simulate.profilepl}
+\alias{simulrecipe}              
+\alias{slr.prepare}
+\alias{slrAssemblePixelData}
+\alias{Smooth.solist}
+\alias{smoothcrossEngine}              
+\alias{smoothpointsEngine}              
+\alias{sort.im}
+\alias{sortalongsegment}
+\alias{spatstat.rawdata.location}
+\alias{spatstat.xy.coords}
+\alias{spatstatClusterModelInfo}
+\alias{spatstatDPPModelInfo}
+\alias{spatstatRmhInfo}
+\alias{spatialCDFframe}
+\alias{spatialCDFtest}
+\alias{splitHybridInteraction}
+\alias{sp.foundclass}
+\alias{sp.foundclasses}
+\alias{sphere.volume}
+\alias{store.versionstring.spatstat}
+\alias{str.hyperframe}
+\alias{strausscounts}
+\alias{suffloc}
+\alias{suffstat.generic}
+\alias{suffstat.poisson}
+\alias{summarise.trend}
+\alias{summary.envelope}
+\alias{summary.funxy}
+\alias{summary.hyperframe}
+\alias{summary.lintess}
+\alias{summary.logiquad}
+\alias{summary.lut}
+\alias{summary.mppm}
+\alias{summary.profilepl}
+\alias{summary.pppmatching}
+\alias{summary.ppx}
+\alias{summary.splitppx}
+\alias{summary.rmhexpand}
+\alias{summary.vblogit}
+\alias{sumsymouter}
+\alias{superimposeMarks}
+\alias{symbolmaptype}
+\alias{tail.hyperframe}
+\alias{tensor1x1}
+\alias{test.crossing.psp}
+\alias{test.selfcrossing.psp}
+\alias{thinjump}
+\alias{tilecentroids}        
+\alias{trianglediameters}
+\alias{trim.mask}        
+\alias{tweak.fv.entry}
+\alias{tweak.ratfv.entry}
+\alias{tweak.coefs}
+\alias{twostage.test}
+\alias{\%unit\%} %DoNotExport
+%NAMESPACE export("%unit%")
+\alias{unitname.default}
+\alias{unitname<-.default}
+\alias{unstack.solist}
+\alias{unstack.layered}
+\alias{unstackFilter}
+\alias{update.im}
+\alias{update.ippm}
+\alias{update.rmhstart}
+\alias{validradius}
+\alias{validate2Dkernel}
+\alias{validate.angles}        
+\alias{validate.lpp.coords}
+\alias{validate.mask}        
+\alias{validate.quad}        
+\alias{vanilla.fv}
+\alias{varcountEngine}
+%\alias{vblogit} %DoNotExport
+%\alias{vblogit.fmla} %DoNotExport
+\alias{versionstring.interact}
+\alias{versionstring.ppm}
+\alias{versionstring.spatstat}
+\alias{verifyclass}
+\alias{Window.lintess}
+\alias{Window<-.linnet}
+\alias{Window<-.lpp}
+\alias{warn.once}
+\alias{waxlyrical}
+\alias{windows.mppm}
+\alias{w.quad}               
+\alias{x.quad}
+\alias{y.quad}
+\alias{xy.grid}
+\alias{X2testEngine}
+\alias{xtfrm.im}
+\alias{xypolygon2psp}
+\alias{xypolyselfint}
+%%%% sparse 3D arrays
+\alias{sparse3Darray}
+\alias{as.sparse3Darray}
+\alias{dim.sparse3Darray}
+\alias{dim<-.sparse3Darray}
+\alias{dimnames.sparse3Darray}
+\alias{dimnames<-.sparse3Darray}
+\alias{print.sparse3Darray}
+\alias{aperm.sparse3Darray}
+\alias{as.array.sparse3Darray}
+\alias{[.sparse3Darray}
+\alias{[<-.sparse3Darray}
+\alias{anyNA.sparse3Darray}
+\alias{RelevantZero}
+\alias{RelevantEmpty}
+\alias{isRelevantZero}
+\alias{unionOfSparseIndices}
+\alias{Math.sparse3Darray}
+\alias{Ops.sparse3Darray}
+\alias{Summary.sparse3Darray}
+\alias{inside3Darray}
+\alias{SparseEntries}
+\alias{SparseIndices}
+\alias{EntriesToSparse}
+\alias{mapSparseEntries}
+\alias{applySparseEntries}
+\alias{sumsymouterSparse}
+\alias{tenseur}
+\alias{marginSums}
+\alias{rbindCompatibleDataFrames}
+\alias{bind.sparse3Darray}
+%%%%
+\alias{spatstatDiagnostic}
+%%
+\alias{as.ppplist}
+\alias{as.imlist}
+\alias{pointsAlongNetwork}
+\alias{expandSparse}
+\alias{allElementsIdentical}
+\alias{resampleNetworkDataFrame}
+\alias{sparseVectorCumul}
+%%
+\alias{as.ppm.lppm}
+\alias{as.ppm.rppm}
+\alias{predict.profilepl}
+%%%%%%%
+\description{
+  Internal spatstat functions.
+}
+\usage{
+\method{[}{splitppx}(x, \dots)
+\method{[}{splitppx}(x, \dots) <- value
+\method{[}{diagramobj}(x, \dots)
+\method{[}{rat}(x, \dots)
+acedist.show(X, Y, n, d, timelag)
+acedist.noshow(X, Y, n, d)
+active.interactions(object)
+adaptcoef(new.coef, fitcoef, drop)
+adjust.ratfv(f, columns, numfactor, denfactor)
+affinexy(X, mat, vec, invert)
+affinexypolygon(p, mat, vec, detmat)
+ang2rad(ang, unit, start, clockwise)
+anycrossing.psp(A,B)
+ApplyConnected(X, Engine, r, \dots, rule, auxdata)
+applytolayers(L, FUN, \dots)
+applyPolyclipArgs(x, p)
+areadelta2(X, r, \dots, sparseOK)
+areaGain.diri(u, X, r, \dots, W, verbose)
+areaGain.grid(u, X, r, \dots, W, ngrid)
+areaLoss.diri(X, r, \dots, W, subset)
+areaLoss.grid(X, r, \dots, W, subset,
+                         method = c("count", "distmap"),
+                         ngrid = spatstat.options("ngrid.disc"),
+                         exact = FALSE)
+assemble.plot.objects(xlim, ylim, \dots, lines, polygon)
+AsymmDistance.psp(X, Y, metric, method)
+as.breakpts(\dots)
+\method{as.character}{units}(x, \dots)
+\method{as.data.frame}{bw.optim}(x, \dots)
+\method{as.data.frame}{fv}(x, \dots)
+\method{as.double}{im}(x, \dots)
+\method{as.linfun}{linfun}(X, \dots)
+\method{as.list}{hyperframe}(x, \dots)
+as.listof(x)
+\method{as.owin}{lintess}(W, \dots)
+as.units(s)
+augment.msr(x, \dots, sigma)
+BartCalc(fY, fK)
+bbEngine(\dots)
+bermantestEngine(model, covariate, which, alternative, \dots,
+                 modelname, covname, dataname)
+bdry.mask(W)
+bind.ratfv(x, numerator, denominator, labl, desc, preferred,
+           ratio, quotient)
+blankcoefnames(x)
+bounding.box3(\dots)
+break.holes(x, splitby, depth, maxdepth)
+breakpts(val, maxi, even = FALSE, npos = NULL, step = NULL)
+breakpts.from.r(r)
+bt.frame(Q, trend, interaction, \dots, covariates,
+         correction, rbord, use.gam, allcovar)
+bw.optim(cv, h, iopt, \dots, cvname, hname, criterion, unitname)
+calc.DR(COV, z, Dim)
+calc.NNIR(COV, z, pos, Dim)
+calc.SAVE(COV, z, Dim)
+calc.SIR(COV, z)
+calc.TSE(COV, z, pos, Dim1, Dim2)
+cannot.update(\dots)
+cartesian(pp, markset, fac = TRUE)
+cellmiddles(W, nx, ny, npix, distances)
+censtimeCDFest(o, cc, d, breaks, \dots,
+     KM, RS, HAN, RAW, han.denom, tt, pmax)
+change.default.expand(x, newdefault)
+checkbigmatrix(n, m, fatal, silent)
+checkfields(X,L)
+checksolve(M, action, descrip, target)
+check.finespacing(r, eps, win, rmaxdefault, context, action, rname)
+check.hist.lengths(hist,breaks)
+check.mat.mul(A, B, Acols, Brows, fatal)
+check.testfun(f, f1, X)
+circticks(R, at, unit, start, clockwise, labels)
+clarkevansCalc(X, correction, clipregion, working)
+clip.psp(x, window, check, fragments)
+cliprect.psp(x, window, fragments)
+clippoly.psp(s, window, fragments)
+closethresh(X,R,S,twice,\dots)
+\method{coef}{summary.kppm}(object, \dots)
+\method{coef}{summary.ppm}(object, \dots)
+\method{coef}{vblogit}(object, \dots) 
+coerce.marks.numeric(X, warn)
+\method{compatible}{rat}(A, B, \dots) 
+compileCDF(D, B, r, \dots, han.denom, check)
+conform.ratfv(x)
+crosspairquad(Q,rmax,what)
+cobble.xy(x, y, f, fatal, \dots)
+codetime(x, hms, what)
+col.args.to.grey(x, \dots)
+colouroutputs(x)
+colouroutputs(x) <- value
+commonPolyclipArgs(\dots, p)
+conform.imagelist(X, Zlist)
+countingweights(id, areas, check = TRUE)
+CressieReadStatistic(OBS,EXP,lambda)
+CressieReadSymbol(lambda)
+CressieReadName(lambda)
+CVforPCF(bw, stuff)
+damaged.ppm(object)
+data.mppm(x)
+datagen.runifpointOnLines(n, L)
+datagen.runifpoisppOnLines(lambda, L)
+datagen.rpoisppOnLines(lambda, L, lmax, \dots, check)
+default.clipwindow(object, epsilon)
+default.linnet.tolerance(L)
+default.n.tiling(X, nd, ntile, npix, eps, random, quasi, verbose)
+default.ntile(X)
+deltasuffstat(model, \dots, restrict, dataonly, force, quadsub, sparseOK)
+Deviation(x, ref, leaveout, n, xi)
+dflt.redraw(button, name, env)
+densitycrossEngine(Xdata, Xquery, sigma, \dots,
+                    weights, edge, varcov,
+                    diggle, sorted)
+densitypointsEngine(x, sigma, \dots,
+                    kernel, scalekernel,
+                    weights, edge, varcov,
+                    leaveoneout, diggle, sorted, spill, cutoff)
+diagnose.ppm.engine(object, \dots, type, typename, opt,
+                         sigma, rbord, compute.sd, compute.cts,
+                         envelope, nsim, nrank,
+                         rv, oldstyle, splineargs, verbose)
+diagramobj(X, \dots)
+digestCovariates(\dots, W)
+digital.volume(range, nval, vside)
+dilate.owin(\dots)
+\method{dim}{fasp}(x)
+\method{dim}{hyperframe}(x)
+\method{dim}{im}(x)
+\method{dim}{msr}(x)
+\method{dim}{owin}(x)
+\method{dimnames}{fasp}(x)
+\method{dimnames}{fasp}(x) <- value
+\method{dimnames}{msr}(x)
+distributecbind(x)
+dist2dpath(dist, method="C")
+do.as.im(x, action, \dots, W, eps, dimyx, xy, na.replace)
+do.call.plotfun(fun, arglist, \dots)
+do.istat(panel)
+doMultiStraussHard(iradii, hradii, types)
+dppmFixIntensity(DPP, lambda, po)
+dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar)
+emptywindow(w)
+envelopeEngine(X, fun, simul,
+           nsim, nrank, \dots, funargs, funYargs,
+           verbose, clipdata, 
+           transform, global, ginterval, use.theory,
+           alternative, scale, clamp,
+           savefuns, savepatterns, saveresultof,
+           weights,
+           nsim2, VARIANCE, nSD,
+           Yname, maxnerr, internal, cl,
+           envir.user, expected.arg, do.pwrong,
+           foreignclass, collectrubbish)
+envelopeProgressData(X, fun, \dots, exponent,
+                     alternative, leaveout, scale, clamp,
+                     normalize, deflate, rmin, 
+                     save.envelope, savefuns, savepatterns)
+envelopeTest(X, \dots, exponent, alternative,
+            rinterval, leaveout, scale, clamp, tie.rule,
+            interpolate, save.interpolant,
+            save.envelope, savefuns, savepatterns,
+            Xname, verbose)
+\method{envelope}{hasenvelope}(Y, \dots, Yname)
+\method{envelope}{matrix}(Y, \dots, rvals, observed, theory, funX,
+  nsim, nsim2, jsim, jsim.mean,
+  type, alternative, scale, clamp, csr, use.theory, nrank, ginterval, nSD,
+  savefuns, check, Yname, do.pwrong, weights, precomputed)
+equalpairs(U, X, marked=FALSE)
+equalpairs.quad(Q)
+equals.quad(Q)          
+equalsfun.quad(Q)
+erodemask(w,r,strict)
+erode.owin(\dots)
+evalCovar(model, covariate, \dots)
+\method{evalCovar}{ppm}(model, covariate, \dots, lambdatype,
+          dimyx, eps, interpolate, jitter, modelname, covname, dataname) 
+\method{evalCovar}{lppm}(model, covariate, \dots, lambdatype, 
+          eps, nd, interpolate, jitter, modelname, covname, dataname)
+evalCovariate(covariate, locations)
+evalInteraction(X,P,E,interaction,correction,\dots,precomputed,savecomputed)
+evalInterEngine(X,P,E,interaction,correction,\dots,
+                Reach,precomputed,savecomputed)
+evalPairPotential(X,P,E,pairpot,potpars,R)
+evalSparse3Dentrywise(expr, envir)
+evaluate2Dkernel(kernel, x, y, sigma, varcov, \dots, scalekernel)
+even.breaks.owin(w)
+exactdt(X, \dots)              
+exactPdt(w)
+existsSpatstatVariable(name)
+expandSpecialLists(x, special)
+expandwinPerfect(W, expand, amount)
+ExpSmoothLog(X, \dots, at, weights)
+\method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots)
+extractAtomicQtests(x)
+fakemaintitle(bb, main, \dots)
+\method{family}{vblogit}(object, \dots)
+f3engine(x, y, z, box, vside, range, nval, correction)
+f3Cengine(x, y, z, box, vside, rmax, nrval)
+fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv)
+FDMKERNEL(lppobj, sigma, dtt, weights, iterMax, sparse, dtx)
+fft2D(z, inverse, west)
+fftwAvailable()
+fill.coefs(coefs, required)
+findbestlegendpos(\dots)
+findCovariate(covname, scope, scopename=NULL)
+findcbind(root, depth, maxdepth)
+fii(interaction, coefs, Vnames, IsOffset)
+fillNA(x, value)
+flatfname(x)
+flipxypolygon(p)
+forbid.logi(object)
+\method{format}{numberwithunit}(x, \dots, collapse, modifier)
+FormatFaspFormulae(f, argname)
+framebottomleft(w)
+fvexprmap(x)
+fvlabels(x, expand=FALSE)
+fvlabels(x) <- value
+fvlabelmap(x, dot=TRUE)
+fvlegend(object, elang)
+g3engine(x, y, z, box, rmax, nrval, correction)
+g3Cengine(x, y, z, box, rmax, nrval)
+getdataname(defaultvalue, \dots, dataname)
+getfields(X, L, fatal = TRUE)
+getglmdata(object, drop=FALSE)
+getglmfit(object)
+getglmsubset(object)
+getlambda.lpp(lambda, X, subset, \dots,
+              update, leaveoneout, loo.given, lambdaname)
+getlastshift(X)
+getppmdatasubset(object)
+getppmOriginalCovariates(object)
+getRandomFieldsModelGen(model)
+getSpatstatVariable(name)
+getSumFun(abbreviation, classname, ismarked, fatal)
+geyercounts(U,X,r,sat,Xcounts,EqualPairs)
+geyerdelta2(X,r,sat,\dots,sparseOK)
+GLMpredict(fit, data, coefs, changecoef, type)
+good.correction.K(X)
+%gridadjacencymatrix(dims)
+gridindex(x, y, xrange, yrange, nx, ny)            
+grid1index(x, xrange, nx)
+grokIndexVector(ind, len, nama)
+grow.mask(M, xmargin=0, ymargin=xmargin)
+hackglmmPQL(fixed, random, family, data, correlation, weights,
+            control, niter, verbose, subset, \dots, reltol)
+hasenvelope(X, E)
+HermiteCoefs(order)
+handle.r.b.args(r = NULL, breaks = NULL, window, pixeps = NULL, rmaxdefault)
+handle.rshift.args(W, \dots, radius, width, height, edge, clip,
+edgedefault)
+\method{head}{hyperframe}(x,n,\dots)
+hierarchicalordering(i, s)
+hiermat(x, h)
+ho.engine(model, \dots, nsim, nrmh, start, control, verb)
+hsvNA(h, s, v, alpha)
+IdenticalRows(i,j,a,b)
+idorempty(w, r, caller)
+illegal.iformula(ifmla, itags, dfvarnames)
+implemented.for.K(correction, windowtype, explicit)
+impliedpresence(tags, formula, df, extranames=character(0))
+impliedcoefficients(object, tag)
+inpoint(W)
+instantiate.interact(x, par)
+interactionfamilyname(x)
+intermaker(f, blank)
+intX.owin(w)
+intX.xypolygon(polly)
+intY.owin(w)
+intY.xypolygon(polly)
+invokeColourmapRule(colfun, x, \dots, zlim, colargs)
+is.atomicQtest(x)
+is.cadlag(s)
+is.col.argname(x)
+is.data(Q)
+is.expandable(x)
+\method{is.expandable}{ppm}(x)  
+\method{is.expandable}{rmhmodel}(x) 
+is.fv(x)
+is.hyperframe(x)
+is.infline(x)
+is.interact(x)
+\method{is.marked}{default}(\dots)  
+\method{is.marked}{msr}(X, \dots)
+\method{is.marked}{psp}(X, \dots)
+\method{is.marked}{quad}(X, na.action="warn", \dots)
+is.mppm(x)
+\method{is.multitype}{default}(X, \dots)  
+\method{is.multitype}{msr}(X, \dots)
+\method{is.multitype}{quad}(X, na.action="warn", \dots)
+\method{is.poisson}{mppm}(x)
+is.pp3(x)
+is.ppx(x)
+is.psp(x)
+is.scov(x)
+is.sob(x)
+is.tess(x)
+k3engine(x, y, z, box, rmax, nrval, correction)
+Kborder.engine(X, rmax, nr, correction, weights, ratio)
+Knone.engine(X, rmax, nr, weights, ratio)
+Krect.engine(X, rmax, nr, correction, weights, ratio, fname)
+Kount(dIJ, bI, b, breaks)
+Kwtsum(dIJ, bI, wIJ, b, w, breaks)
+Kpcf.kppm(model, what)
+killinteraction(model)
+km.rs.opt(o, cc, d, breaks, KM, RS)
+kppmComLik(X, Xname, po, clusters, control, weightfun, rmax,
+           algorithm, DPP, \dots)
+kppmMinCon(X, Xname, po, clusters, control, statistic, statargs,
+           algorithm, DPP, \dots)
+kppmPalmLik(X, Xname, po, clusters, control, weightfun, rmax, 
+           algorithm, DPP, \dots)
+kraever(package, fatal)
+kraeverRandomFields()
+\method{labels}{ppm}(object, \dots)
+\method{levels}{im}(x)
+\method{levels}{im}(x) <- value
+levelsAsFactor(x)
+linearKengine(X, \dots, r, reweight, denom, correction,
+              ratio, showworking)
+linearKmulti(X, I, J, r, \dots, correction)
+linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction,
+             normalise)
+linearpcfengine(X, \dots, r, reweight, denom, correction, ratio)
+linearpcfmulti(X, I, J, r, \dots, correction)
+linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots,
+                     correction, normalise)
+linearKmultiEngine(X, I, J, \dots,
+                   r, reweight, denom, correction, showworking)
+linearPCFmultiEngine(X, I, J, \dots,
+                   r, reweight, denom, correction, showworking)
+listof(\dots)
+localKengine(X, \dots, wantL, lambda, correction, verbose, rvalue)
+localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda)
+localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan)
+local2lpp(L, seg, tp, X, df.only)
+logicalIndex(i, nama, len)
+logi.dummy(X, dummytype, nd, mark.repeat, \dots)
+logi.engine(Q, trend, interaction, \dots, 
+            covariates, subsetexpr, correction, rbord, covfunargs, allcovar, 
+            vnamebase, vnameprefix, justQ, savecomputed, precomputed,
+            VB)
+\method{logLik}{vblogit}(object, \dots) 
+makeLinnetTolerance
+maskLaslett(X, \dots, eps, dimyx, xy, oldX, verbose, plotit)
+match2DkernelName(kernel)
+parbreak(terse)
+plan.legend.layout(B, \dots, side, sep, size, sep.frac, size.frac,
+                   started, map)
+PDEdensityLPP(x, sigma, \dots, weights, dx, dt, fun)
+PoisSaddle(beta, fi)
+PoisSaddleArea(beta, fi)
+PoisSaddleGeyer(beta, fi)
+PoisSaddlePairwise(beta, fi)
+polyLaslett(X, \dots, oldX, verbose, plotit)
+polytileareaEngine(P, xrange, yrange, nx, ny)
+positiveIndex(i, nama, len)
+PPMmodelmatrix(object, data, \dots, Q, keepNA, irregular)
+\method{print}{localpcfmatrix}(x, \dots)
+\method{plot}{localpcfmatrix}(x, \dots)
+putSpatstatVariable(name, value)
+\method{[}{localpcfmatrix}(x, i, \dots)
+\method{[}{pp3}(x, i, drop, \dots)
+lookup.im(Z, x, y, naok, strict)
+lookup2DkernelInfo(kernel)
+majorminorversion(v)
+make.even.breaks(bmax, npos, bstep)
+makefvlabel(op, accent, fname, sub, argname)
+makeunits(sing, plur, mul)
+markappend(\dots)
+markcbind(\dots)
+markformat(x)
+\method{markformat}{ppp}(x) 
+\method{markformat}{ppx}(x) 
+\method{markformat}{psp}(x) 
+\method{markformat}{default}(x) 
+mark.scale.default(marx, w, markscale, maxsize, meansize, characters)
+markspace.integral(X)
+\method{marks}{default}(x, \dots)
+\method{marks}{quad}(x, dfok=FALSE, \dots)
+markappendop(x, y)
+x \%mapp\% y
+marksubset(x, index, format)
+marksubsetop(x, i)
+x \%msub\% i
+markreplicateop(x, n)
+x \%mrep\% n
+mask2df(w)
+match.kernel(kernel)
+maxflow(costm)
+mctestSigtraceEngine(R, devdata, devsim, \dots,
+     interpolate, confint, alpha, exponent, unitname)
+meanlistfv(z, \dots)
+meanX.owin(w)            
+meanY.owin(w)
+model.se.image(fit, W, \dots, what)
+modelFrameGam(formula, \dots)
+mpl.engine(Q, trend, interaction, \dots,
+         covariates, subsetexpr, covfunargs, correction,
+	 rbord, use.gam, gcontrol,
+         GLM, GLMfamily, GLMcontrol, famille,
+         forcefit, nd, eps, allcovar, callstring,
+         precomputed, savecomputed, preponly,
+         rename.intercept, justQ, weightfactor)
+mpl.get.covariates(covariates, locations, type, covfunargs, need.deriv)
+mpl.prepare(Q, X, P, trend, interaction, covariates, 
+            want.trend, want.inter, correction, rbord, Pname,
+            callstring, \dots,
+            subsetexpr,
+            covfunargs, allcovar, precomputed, savecomputed,
+            vnamebase, vnameprefix, warn.illegal, warn.unidentifiable,
+            weightfactor, skip.border)
+mpl.usable(x)
+MultiPair.checkmatrix(mat, n, matname, naok, zerook, asymmok)
+multiplicityNumeric(x)
+multiply.only.finite.entries(x, a)
+na.handle.im(X, na.replace)
+\method{names}{fv}(x) <- value
+\method{names}{hyperframe}(x)
+\method{names}{hyperframe}(x) <- value
+nearest.pixel(x, y, Z)
+nearest.valid.pixel(x, y, Z)
+newformula(old, change, eold, enew)
+newstyle.coeff.handling(object)
+nncleanEngine(kthNND, k, d, \dots, tol, maxit,
+              plothist, lineargs, verbose, Xname)
+nndcumfun(X, \dots, r)
+no.trend.ppm(x)
+n.quad(Q)
+numberwithunit(x, u)
+numeric.columns(M, logical, others)
+objsurfEngine(objfun, optpar, objargs,
+              \dots, dotargs, objname,
+              ngrid, ratio, verbose)
+onecolumn(m)
+optimStatus(x, call)
+printStatus(x, errors.only)
+signalStatus(x, errors.only)
+outdated.interact(object)
+oversize.quad(Q, \dots, nU, nX, p)
+owinpolycheck(W, verbose=TRUE)
+owinpoly2mask(w, rasta, check=TRUE)
+owin2polypath(w)
+\method{pairs}{listof}(\dots, plot=TRUE)
+\method{pairs}{solist}(\dots, plot=TRUE)
+param.quad(Q)
+partialModelMatrix(X,D,model,callstring,\dots)
+pcf3engine(x, y, z, box, rmax, nrval, correction, delta)
+pcfmulti.inhom(X, I, J, lambdaI = NULL, lambdaJ = NULL, \dots,
+               r = NULL, breaks = NULL,
+               kernel = "epanechnikov", bw = NULL, stoyan = 0.15,
+               correction = c("translate", "Ripley"),
+               sigma = NULL, varcov = NULL,
+               Iname = "points satisfying condition I",
+               Jname = "points satisfying condition J")
+pickoption(what="option", key, keymap, \dots,
+           exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE,
+           allow.all=TRUE)
+plotEachLayer(x, \dots, main, plotargs, add, show.all, do.plot)
+ploterodewin(W1, W2, col.edge, col.inside, do.plot, \dots)
+ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ, do.plot)
+plot3Dpoints(xyz, eye, org,
+             \dots,
+             type, xlim, ylim, zlim,
+             add, box, main, cex, box.back, box.front)
+plotPolygonBdry(x, \dots)
+\method{plot}{addvar}(x, \dots, do.points=FALSE)
+\method{plot}{barplotdata}(x, \dots)
+\method{plot}{bw.frac}(x, \dots)
+\method{plot}{bw.optim}(x, \dots, showopt, optargs)
+\method{plot}{lurk}(x, \dots, shade)
+\method{plot}{minconfit}(x, \dots)
+\method{plot}{parres}(x, \dots)
+\method{plot}{pppmatching}(x, addmatch = NULL, main = NULL, \dots)
+\method{plot}{plotpairsim}(x, \dots)
+\method{plot}{profilepl}(x, \dots, add, main, tag, coeff, xvariable,
+                         col, lty, lwd, col.opt, lty.opt, lwd.opt)
+\method{plot}{qqppm}(x, \dots, limits=TRUE,
+           monochrome=spatstat.options('monochrome'),
+           limcol=if(monochrome) "black" else "red")
+\method{plot}{spatialcdf}(x, \dots, xlab, ylab)
+\method{plot}{studpermutest}(x, fmla, \dots,
+        lty = NULL, col = NULL, lwd = NULL,
+        lty.theo = NULL, col.theo = NULL, lwd.theo = NULL,
+        lwd.mean = if (meanonly) 1 else NULL, lty.mean = lty, col.mean = col, 
+        separately = FALSE, meanonly = FALSE,
+        main = if (meanonly) "group means" else NULL,
+        xlim = NULL, ylim = NULL, ylab = NULL, legend = !add,
+        legendpos = "topleft", lbox = FALSE, add = FALSE)
+ppllengine(X, Y, action="project", check=FALSE)
+\method{ppm}{default}(Q, trend, interaction,
+       \dots, covariates, data, covfunargs, subset, 
+       correction, rbord, use.gam, method, forcefit, emend, project,
+       prior.mean, prior.var,
+       nd, eps, gcontrol, nsim, nrmh, start, control,
+       verb, callstring)
+ppmCovariates(model)
+ppmDerivatives(fit, what, Dcovfun, loc, covfunargs)
+ppmInfluenceEngine(fit, what, \dots, iScore, iHessian, iArgs,
+              drop, method, precomputed, sparseOK,
+              fitname, multitypeOK, entrywise, matrix.action,
+              geomsmooth)
+pppdist.mat(X, Y, cutoff = 1, q = 1, matching = TRUE,
+            precision = 9, approximation = 10)
+pppdist.prohorov(X, Y, n, dfix, type, cutoff, matching,
+            ccode, auction, precision, approximation) 
+ppsubset(X, I, Iname, fatal)
+\method{predict}{vblogit}(object, newdata, type, se.fit, dispersion,
+                          terms, na.action, \dots)
+prefixfv(x, tagprefix, descprefix, lablprefix, whichtags)
+prepareTitle(main)
+\method{print}{addvar}(x, \dots)
+\method{print}{anylist}(x, \dots)
+\method{print}{autoexec}(x, \dots)
+\method{print}{bt.frame}(x, \dots)
+\method{print}{bugtable}(x, \dots)
+\method{print}{bw.frac}(x, \dots)
+\method{print}{bw.optim}(x, \dots)
+\method{print}{colourmap}(x, \dots)
+\method{print}{diagppm}(x, \dots)
+\method{print}{distfun}(x, \dots)
+\method{print}{detpointprocfamily}(x, \dots)
+\method{print}{detpointprocfamilyfun}(x, \dots)
+\method{print}{envelope}(x, \dots)
+\method{print}{ewcdf}(x, digits, \dots)
+\method{print}{fasp}(x, \dots)
+\method{print}{funxy}(x, \dots)
+\method{print}{fv}(x, \dots, tight)
+\method{print}{fvfun}(x, \dots)
+\method{print}{hasenvelope}(x, \dots)
+\method{print}{hierarchicalordering}(x, \dots)
+\method{print}{hyperframe}(x, \dots)
+\method{print}{indicfun}(x, \dots)
+\method{print}{influence.ppm}(x, \dots)
+\method{print}{interact}(x, \dots, family, brief, banner)       
+\method{print}{intermaker}(x, \dots)
+\method{print}{isf}(x, \dots)
+\method{print}{laslett}(x, \dots)
+\method{print}{layered}(x, \dots)
+\method{print}{leverage.ppm}(x, \dots)
+\method{print}{lintess}(x, \dots)
+\method{print}{lut}(x, \dots)
+\method{print}{minconfit}(x, \dots)
+\method{print}{mppm}(x, \dots)
+\method{print}{msr}(x, \dots)
+\method{print}{nnfun}(x, \dots)
+\method{print}{numberwithunit}(x, \dots)
+\method{print}{onearrow}(x, \dots)
+\method{print}{parres}(x, \dots)
+\method{print}{plotppm}(x, \dots)
+\method{print}{plotpairsim}(x, \dots)
+\method{print}{pppmatching}(x, \dots)
+\method{print}{profilepl}(x, \dots)
+\method{print}{quadrattest}(x, \dots)
+\method{print}{qqppm}(x, \dots)
+\method{print}{rat}(x, \dots)
+\method{print}{rmhcontrol}(x, \dots)
+\method{print}{rmhexpand}(x, \dots, prefix=TRUE)
+\method{print}{rmhmodel}(x, \dots)
+\method{print}{rmhstart}(x, \dots)
+\method{print}{rmhInfoList}(x, \dots)
+\method{print}{rppm}(x, \dots)
+\method{print}{simplepanel}(x, \dots)
+\method{print}{Smoothfun}(x, \dots)
+\method{print}{solist}(x, \dots)
+\method{print}{splitppp}(x, \dots)
+\method{print}{splitppx}(x, \dots)
+\method{print}{summary.hyperframe}(x, \dots)
+\method{print}{summary.linim}(x, \dots)
+\method{print}{summary.linnet}(x, \dots)
+\method{print}{summary.lintess}(x, \dots)
+\method{print}{summary.listof}(x, \dots)
+\method{print}{summary.logiquad}(x, \dots, dp=3)
+\method{print}{summary.lut}(x, \dots)
+\method{print}{summary.mppm}(x, \dots, brief)
+\method{print}{summary.owin}(x, \dots)
+\method{print}{summary.ppp}(x, \dots, dp)
+\method{print}{summary.psp}(x, \dots)
+\method{print}{summary.rmhexpand}(x, \dots)
+\method{print}{summary.splitppp}(x, \dots)
+\method{print}{summary.solist}(x, \dots)
+\method{print}{summary.splitppx}(x, \dots)
+\method{print}{summary.units}(x, \dots)
+\method{print}{symbolmap}(x, \dots)
+\method{print}{textstring}(x, \dots)
+\method{print}{texturemap}(x, \dots)
+\method{print}{tess}(x, \dots, brief=FALSE)
+\method{print}{timed}(x, \dots)
+\method{print}{vblogit}(x, \dots)
+\method{print}{yardstick}(x, \dots)
+project3Dhom(xyz, eye, org, vert)
+putlastshift(X, vec)
+quad(data, dummy, w, param)
+quad.mppm(x)
+quadBlockSizes(nX, nD, p, nMAX, announce)
+RandomFieldsSafe()
+ratfv(df, numer, denom, \dots, ratio)
+recognise.spatstat.type(x)
+rectquadrat.breaks(xr, yr, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL)
+rectquadrat.countEngine(x, y, xbreaks, ybreaks, weights)
+reduceformula(fmla, deletevar, verbose)
+reheat(model, invtemp)
+RelevantDeviation(x, alternative, clamp, scaling)
+repair.image.xycoords(x)
+replacementIndex(ii, stuff)
+representativeRows(x)
+resolveEinfo(x, what, fallback, warn, atomic)
+resolve.vargamma.shape(\dots, nu.ker, nu.pcf, default = FALSE)
+rgbNA(red, green, blue, alpha, maxColorValue)
+rhohatEngine(model, covariate, reference, volume, \dots,
+               weights, method, horvitz, smoother, resolution, 
+               n, bw, adjust, from, to, 
+               bwref, covname, covunits, confidence, modelcall, callstring)
+rhohatCalc(ZX, Zvalues, lambda, denom, \dots,
+           weights, lambdaX,
+           method, horvitz, smoother,
+           n, bw, adjust, from, to, 
+           bwref, covname, confidence,
+           covunits, modelcall, callstring, savestuff)
+rMaternInhibition(type, kappa, r, win, stationary, \dots, nsim, drop)
+RmhExpandRule(nama)
+rocData(covariate, nullmodel, \dots, high)
+rocModel(lambda, nullmodel, \dots, high)
+rmhsnoop(\dots, Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, 
+     proptype, proplocn, propmark, propindx, numerator, denominator)
+roseContinuous(ang, rad, unit, \dots,
+               start, clockwise, main, labels, at, do.plot)
+ruletextline(ch, n, terse)
+quadrat.testEngine(X, nx, ny, alternative, method, conditional, CR, 
+     \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, Xname, fitname)
+quadscheme.replicated(data, dummy, method, \dots)
+quadscheme.spatial(data, dummy, method, \dots)
+pointgrid(W, ngrid)
+rastersample(X, Y)
+rasterx.mask(w, drop)
+rastery.mask(w, drop)
+rasterxy.mask(w, drop)
+rasterx.im(x)
+rastery.im(x)
+rasterxy.im(x, drop)
+rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp,
+           new.dotnames, new.preferred, new.formula, new.tags)
+rebadge.as.crossfun(x, main, sub, i, j)
+rebadge.as.dotfun(x, main, sub, i)
+rebound(x, rect)
+\method{rebound}{im}(x, rect)  
+\method{rebound}{ppp}(x, rect) 
+\method{rebound}{psp}(x, rect) 
+\method{rebound}{owin}(x, rect)
+reconcile.fv(\dots)
+rename.fv(x, fname, ylab, yexp)
+repair.old.factor.image(x)
+reincarnate.interact(object)
+resid4plot(RES, plot.neg, plot.smooth,
+           spacing, outer, srange, monochrome, main,
+           xlab, ylab, rlab, col.neg, col.smooth, \dots)
+resid1plot(RES, opt, plot.neg, plot.smooth,
+              srange, monochrome, main,
+              add, show.all, do.plot, col.neg, col.smooth, \dots)
+resid1panel(observedX, observedV,
+            theoreticalX, theoreticalV, theoreticalSD,
+            xlab,ylab, \dots, do.plot)
+resolve.2D.kernel(\dots,
+            sigma, varcov, x, mindist, adjust, bwfun, allow.zero)
+restrict.mask(M, W)
+reversePolyclipArgs(x, p)
+rmax.Rigid(X, g)
+rmax.rule(fun, W, lambda)
+rotxy(X, angle = pi/2)
+rotxypolygon(p, angle = pi/2)
+rmhResolveControl(control, model)
+rmhResolveExpansion(win, control, imagelist, itype)
+rmhResolveTypes(model, start, control)
+rmhSnoopEnv(Xinit, Wclip, R)
+\method{rmhcontrol}{rmhcontrol}(\dots) 
+\method{rmhcontrol}{list}(\dots) 
+rmhEngine(InfoList, \dots, verbose, kitchensink, preponly, snoop,
+                           overrideXstart, overrideclip) 
+\method{rmhmodel}{rmhmodel}(model, \dots) 
+\method{rmhstart}{rmhstart}(start, \dots) 
+\method{rmhstart}{list}(start, \dots) 
+rmpoint.I.allim(n, f, types)
+\method{row.names}{hyperframe}(x)
+\method{row.names}{hyperframe}(x) <- value
+rpoint.multi(n, f, fmax, marks, win, giveup, verbose, warn, nsim, drop)
+runifpoispp(lambda, win, \dots, nsim, drop)
+runifpoisppOnLines(lambda, L, nsim)
+runifrect(n, win, nsim, drop)
+safedeldir(X)
+safelookup(Z, x, factor, warn)
+\method{scalardilate}{breakpts}(X, f, \dots)
+\method{scalardilate}{diagramobj}(X, f, \dots)
+\method{scalardilate}{msr}(X, f, \dots)
+scanmeasure(X, \dots)
+\method{scanmeasure}{ppp}(X, r, \dots, method) 
+\method{scanmeasure}{im}(X, r, \dots)
+scanPoisLRTS(nZ, nG, muZ, muG, alternative)
+scanBinomLRTS(nZ, nG, muZ, muG, alternative)
+second.moment.calc(x, sigma, edge, what, \dots,
+                   varcov=NULL, expand=FALSE, debug=FALSE)
+second.moment.engine(x, sigma, edge, what, \dots,
+      kernel, obswin, varcov, npts, debug)
+sewpcf(d, w, denargs, lambda2area, divisor)
+sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500)
+\method{shift}{diagramobj}(X, \dots)
+\method{shift}{influence.ppm}(X, \dots)
+\method{shift}{leverage.ppm}(X, \dots)
+\method{shift}{msr}(X, \dots)
+\method{shift}{quadratcount}(X, \dots)
+\method{shift}{quadrattest}(X, \dots)
+shiftxy(X, vec = c(0, 0))
+shiftxypolygon(p, vec = c(0, 0))
+\method{simulate}{profilepl}(object, \dots)
+simulrecipe(type, expr, envir, csr, pois, constraints)
+slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip)
+slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea)
+\method{Smooth}{solist}(X, \dots)
+smoothcrossEngine(Xdata, Xquery, values, sigma, \dots,
+                    weights, varcov, sorted)
+smoothpointsEngine(x, values, sigma, \dots,
+                    weights, varcov, leaveoneout, sorted, cutoff)
+\method{sort}{im}(x, \dots)
+sortalongsegment(df)
+spatstat.rawdata.location(\dots)
+spatstat.xy.coords(x, y)
+spatstatClusterModelInfo(name, onlyPCP)
+spatstatDPPModelInfo(model)
+spatstatRmhInfo(cifname)
+spatialCDFframe(model, covariate, \dots)
+spatialCDFtest(model, covariate, test, \dots,
+         dimyx, eps, interpolate, jitter,
+         nsim, verbose, modelname, covname, dataname)
+sphere.volume(range, nval = 10)
+splitHybridInteraction(coeffs, inte)
+sp.foundclass(cname, inlist, formalname, argsgiven)             
+sp.foundclasses(cnames, inlist, formalname, argsgiven)
+store.versionstring.spatstat()
+\method{str}{hyperframe}(object, \dots)
+strausscounts(U,X,r,EqualPairs)
+suffloc(object)
+suffstat.generic(model, X, callstring)
+suffstat.poisson(model, X, callstring)
+summarise.trend(trend, w, a)
+\method{summary}{envelope}(object,\dots)
+\method{summary}{funxy}(object,\dots)
+\method{summary}{hyperframe}(object, \dots, brief=FALSE)
+\method{summary}{lintess}(object, \dots)
+\method{summary}{logiquad}(object, \dots, checkdup=FALSE)
+\method{summary}{lut}(object, \dots)
+\method{summary}{mppm}(object, \dots, brief=FALSE)
+\method{summary}{profilepl}(object, \dots)
+\method{summary}{pppmatching}(object, \dots)
+\method{summary}{ppx}(object, \dots)
+\method{summary}{rmhexpand}(object, \dots)
+\method{summary}{splitppx}(object, \dots)
+\method{summary}{vblogit}(object, \dots)
+sumsymouter(x, w)
+superimposeMarks(arglist, nobj)
+symbolmaptype(x)
+\method{tail}{hyperframe}(x,n,\dots)
+tensor1x1(A,B)
+test.crossing.psp(A,B)
+test.selfcrossing.psp(A)
+thinjump(n, p)
+tilecentroids(W, nx, ny)
+trianglediameters(iedge, jedge, edgelength, \dots,
+                  nvert, dmax, check)
+trim.mask(M, R, tolerant)
+tweak.fv.entry(x, current.tag, new.labl, new.desc, new.tag)
+tweak.ratfv.entry(x, \dots)
+tweak.coefs(model, new.coef)
+twostage.test(X, \dots, exponent, nsim, nsimsub,
+              alternative, reuse, leaveout, interpolate,
+              savefuns, savepatterns, verbose, testblurb)
+x \%unit\% u
+\method{unitname}{default}(x) 
+\method{unitname}{default}(x) <- value 
+\method{unstack}{solist}(x, \dots)
+\method{unstack}{layered}(x, \dots)
+unstackFilter(x)
+\method{update}{im}(object, \dots) 
+\method{update}{ippm}(object, \dots, envir) 
+\method{update}{rmhstart}(object, \dots)
+validradius(r, caller)
+validate2Dkernel(kernel, fatal)
+validate.angles(angles, unit, guess)
+validate.lpp.coords(X, fatal, context)
+validate.mask(w, fatal=TRUE)        
+validate.quad(Q, fatal, repair, announce)
+vanilla.fv(x)
+varcountEngine(g, B, lambdaB, f)
+%vblogit(y, X, offset, eps, m0, S0, S0i, xi0, verb, maxiter, \dots)
+%vblogit.fmla(formula, offset, data, subset, weights, verbose, epsilon, \dots)
+versionstring.interact(object)
+versionstring.ppm(object)
+versionstring.spatstat()
+verifyclass(X, C, N = deparse(substitute(X)), fatal = TRUE)
+\method{Window}{lintess}(X, \dots)
+\method{Window}{linnet}(X, \dots, check=TRUE) <- value
+\method{Window}{lpp}(X, \dots, check=TRUE) <- value
+warn.once(key, \dots)
+waxlyrical(type, terse)
+windows.mppm(x)
+w.quad(Q)               
+x.quad(Q)
+y.quad(Q)
+xy.grid(xr, yr, nx, ny, dx, dy)
+X2testEngine(OBS, EXP, \dots, method, CR, df, nsim,
+     conditional, alternative, testname, dataname)
+\method{xtfrm}{im}(x)
+xypolyselfint(p, eps, proper, yesorno, checkinternal)
+xypolygon2psp(p, w, check)
+%%% sparse 3D arrays
+sparse3Darray(i,j,k,x,dims,dimnames,strict,nonzero)
+as.sparse3Darray(x, \dots)
+\method{dim}{sparse3Darray}(x)
+\method{dim}{sparse3Darray}(x) <- value
+\method{dimnames}{sparse3Darray}(x)
+\method{dimnames}{sparse3Darray}(x) <- value
+\method{print}{sparse3Darray}(x, \dots)
+\method{aperm}{sparse3Darray}(a, perm, resize, \dots)
+\method{as.array}{sparse3Darray}(x, \dots)
+\method{[}{sparse3Darray}(x, i, j, k, drop, \dots)
+\method{[}{sparse3Darray}(x, i, j, k, \dots) <- value
+\method{anyNA}{sparse3Darray}(x, recursive)
+RelevantZero(x)
+RelevantEmpty(x)
+isRelevantZero(x)
+unionOfSparseIndices(A,B)
+\special{Math(x, \dots)}
+\special{Ops(e1, e2)}
+\special{Summary(\dots, na.rm = FALSE)}
+%NAMESPACE S3method("Math", "sparse3Darray")
+%NAMESPACE S3method("Ops", "sparse3Darray")
+%NAMESPACE S3method("Summary", "sparse3Darray")
+inside3Darray(d, i, j, k)
+SparseEntries(x)
+SparseIndices(x)
+EntriesToSparse(df, dims)
+mapSparseEntries(x, margin, values, conform, across)
+applySparseEntries(x, f, \dots)
+sumsymouterSparse(x, w, dbg)
+tenseur(A, B, alongA, alongB)
+marginSums(X, MARGIN)
+rbindCompatibleDataFrames(x)
+bind.sparse3Darray(A, B, along)
+%%
+spatstatDiagnostic(msg)
+%%
+as.ppplist(x, check)
+as.imlist(x, check)
+pointsAlongNetwork(L, delta)
+expandSparse(x, n, across)
+allElementsIdentical(x, entry)
+resampleNetworkDataFrame(df, template)
+sparseVectorCumul(x, i, length)
+%%
+\method{as.ppm}{lppm}(object)
+\method{as.ppm}{rppm}(object)
+\method{predict}{profilepl}(object, \dots)
+%%%%%%%
+}
+\details{
+  These internal \pkg{spatstat} functions are not usually called
+  directly by the user. Their names and capabilities may change
+  without warning from one version of \pkg{spatstat} to the next.
+}
+\keyword{internal}
diff --git a/man/spatstat-package.Rd b/man/spatstat-package.Rd
new file mode 100644
index 0000000..ff090df
--- /dev/null
+++ b/man/spatstat-package.Rd
@@ -0,0 +1,1927 @@
+\name{spatstat-package}
+\alias{spatstat-package} 
+\alias{spatstat} 
+\docType{package}
+\title{The Spatstat Package}
+\description{
+  This is a summary of the features of 
+  \pkg{spatstat}, a package in \R 
+  for the statistical analysis of spatial point patterns.
+}
+\details{
+  \pkg{spatstat} is a package for the statistical analysis
+  of spatial data. Its main focus is the analysis of
+  spatial patterns of points in two-dimensional space.
+  The points may carry auxiliary data (`marks'),
+  and the spatial region in which the points were recorded 
+  may have arbitrary shape. 
+
+  The package is designed to support a complete statistical analysis
+  of spatial data. It supports
+  \itemize{
+    \item creation, manipulation and plotting of point patterns;
+    \item exploratory data analysis;
+    \item spatial random sampling;
+    \item simulation of point process models;
+    \item parametric model-fitting;
+    \item non-parametric smoothing and regression;
+    \item formal inference (hypothesis tests, confidence intervals);
+    \item model diagnostics.
+  }
+  
+  Apart from two-dimensional point patterns and point processes,
+  \pkg{spatstat} also supports point patterns in three dimensions, 
+  point patterns in multidimensional space-time,
+  point patterns on a linear network,
+  patterns of line segments in two dimensions, 
+  and spatial tessellations and random sets in two dimensions.
+
+  The package can fit several types of point process models
+  to a point pattern dataset:
+  \itemize{
+    \item Poisson point process models
+    (by Berman-Turner approximate maximum likelihood
+    or by spatial logistic regression)
+    \item Gibbs/Markov point process models
+    (by Baddeley-Turner approximate maximum pseudolikelihood,
+    Coeurjolly-Rubak logistic likelihood, or
+    Huang-Ogata approximate maximum likelihood)
+    \item Cox/cluster point process models
+    (by Waagepetersen's two-step fitting procedure
+    and minimum contrast, composite likelihood, or Palm likelihood)
+    \item determinantal point process models
+    (by Waagepetersen's two-step fitting procedure
+    and minimum contrast, composite likelihood, or Palm likelihood)
+  }
+  The models may include spatial trend,
+  dependence on covariates, and complicated interpoint interactions.
+  Models are specified by 
+  a \code{formula} in the \R  language, and are fitted using
+  a function analogous to \code{\link{lm}} and \code{\link{glm}}.
+  Fitted models can be printed, plotted, predicted, simulated and so on.
+}
+\section{Getting Started}{
+  For a quick introduction to \pkg{spatstat}, read
+  the package vignette \emph{Getting started with spatstat}
+  installed with \pkg{spatstat}. To read that document, you can either
+  \itemize{
+    \item visit \url{cran.r-project.org/web/packages/spatstat}
+    and click on \code{Getting Started with Spatstat}
+    \item
+    start \R, type \code{library(spatstat)} and \code{vignette('getstart')}
+    \item
+    start \R, type \code{help.start()} to open the help
+    browser, and navigate to \code{Packages > spatstat > Vignettes}.
+  }
+  Once you have installed \pkg{spatstat}, start \R and type
+  \code{library(spatstat)}. Then type \code{beginner}
+  for a beginner's introduction, or 
+  \code{demo(spatstat)} for a demonstration of the package's capabilities.
+
+  For a complete course on \pkg{spatstat},
+  and on statistical analysis of spatial point patterns,
+  read the book by Baddeley, Rubak and Turner (2015).
+  Other recommended books on spatial point process methods
+  are Diggle (2014), Gelfand et al (2010) and Illian et al (2008).
+
+  The \pkg{spatstat} package includes over 50 datasets,
+  which can be useful when learning the package.
+  Type \code{demo(data)} to see plots of all datasets
+  available in the package.
+  Type \code{vignette('datasets')} for detailed background information
+  on these datasets, and plots of each dataset.
+      
+  For information on converting your data into \pkg{spatstat} format,
+  read Chapter 3 of Baddeley, Rubak and Turner (2015).
+  This chapter is available free online, as one of the sample chapters
+  at the book companion website, \url{spatstat.github.io/book}.
+
+  For information about handling data in \bold{shapefiles},
+  see Chapter 3, or the Vignette
+  \emph{Handling shapefiles in the spatstat package},
+  installed with \pkg{spatstat}, accessible as
+  \code{vignette('shapefiles')}.
+}
+\section{Updates}{
+  New versions of \pkg{spatstat} are released every 8 weeks.
+  Users are advised to update their installation of \pkg{spatstat}
+  regularly.
+  
+  Type \code{latest.news} to read the news documentation about
+  changes to the current installed version of \pkg{spatstat}.
+
+  See the Vignette \emph{Summary of recent updates},
+  installed with \pkg{spatstat}, which describes the main changes
+  to \pkg{spatstat} since the book (Baddeley, Rubak and Turner, 2015)
+  was published. It is accessible as \code{vignette('updates')}.
+  
+  Type \code{news(package="spatstat")} to read news documentation about
+  all previous versions of the package.
+}
+\section{FUNCTIONS AND DATASETS}{
+  Following is a summary of the main functions and datasets
+  in the \pkg{spatstat} package.
+  Alternatively an alphabetical list of all functions and
+  datasets is available by typing \code{library(help=spatstat)}.
+
+  For further information on any of these,
+  type \code{help(name)} or \code{?name}
+  where \code{name} is the name of the function
+  or dataset.
+}
+\section{CONTENTS:}{
+  \tabular{ll}{
+    I. \tab Creating and manipulating data \cr
+    II. \tab Exploratory Data Analysis \cr
+    III. \tab Model fitting (Cox and cluster models) \cr
+    IV. \tab Model fitting (Poisson and Gibbs models) \cr
+    V. \tab Model fitting (determinantal point processes)\cr
+    VI. \tab Model fitting (spatial logistic regression)\cr
+    VII. \tab Simulation \cr
+    VIII. \tab Tests and diagnostics\cr
+    IX. \tab Documentation
+  }
+}
+
+\section{I. CREATING AND MANIPULATING DATA}{
+
+  \bold{Types of spatial data:}
+
+  The main types of spatial data supported by \pkg{spatstat} are:
+
+  \tabular{ll}{
+    \code{\link{ppp}} \tab point pattern \cr
+    \code{\link{owin}} \tab window (spatial region) \cr
+    \code{\link{im}} \tab pixel image \cr
+    \code{\link{psp}} \tab line segment pattern \cr
+    \code{\link{tess}} \tab tessellation \cr
+    \code{\link{pp3}} \tab three-dimensional point pattern \cr
+    \code{\link{ppx}} \tab point pattern in any number of dimensions \cr
+    \code{\link{lpp}} \tab point pattern on a linear network
+  }
+
+  \bold{To create a point pattern:}
+  
+  \tabular{ll}{
+    \code{\link{ppp}} \tab
+    create a point pattern from \eqn{(x,y)} and window information
+    \cr
+    \tab
+    \code{ppp(x, y, xlim, ylim)} for rectangular window\cr
+    \tab
+    \code{ppp(x, y, poly)} for polygonal window \cr
+    \tab
+    \code{ppp(x, y, mask)} for binary image window \cr
+    \code{\link{as.ppp}} \tab
+    convert other types of data to a \code{ppp} object \cr
+    \code{\link{clickppp}} \tab
+    interactively add points to a plot \cr
+    \code{\link{marks<-}}, \code{\%mark\%}  \tab
+    attach/reassign marks to a point pattern
+  }
+      
+  \bold{To simulate a random point pattern:}
+  
+  \tabular{ll}{
+	\code{\link{runifpoint}} \tab
+	generate \eqn{n} independent uniform random points \cr
+	\code{\link{rpoint}} \tab
+	generate \eqn{n} independent random points \cr
+	\code{\link{rmpoint}} \tab
+	generate \eqn{n} independent multitype random points \cr
+	\code{\link{rpoispp}} \tab
+	simulate the (in)homogeneous Poisson point process \cr
+	\code{\link{rmpoispp}} \tab
+	simulate the (in)homogeneous multitype Poisson point process \cr
+	\code{\link{runifdisc}} \tab
+	generate \eqn{n} independent uniform random points in disc\cr
+	\code{\link{rstrat}} \tab
+	stratified random sample of points \cr
+	\code{\link{rsyst}} \tab
+	systematic random sample of points \cr
+	\code{\link{rjitter}} \tab
+	apply random displacements to points in a pattern\cr
+	\code{\link{rMaternI}}  \tab
+	simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr
+	\code{\link{rMaternII}} \tab
+	simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr
+	\code{\link{rSSI}} \tab
+	simulate Simple Sequential Inhibition process\cr
+	\code{\link{rStrauss}} \tab
+	simulate Strauss process (perfect simulation)\cr
+	\code{\link{rHardcore}} \tab
+	simulate Hard Core process (perfect simulation)\cr
+	\code{\link{rStraussHard}} \tab
+	simulate Strauss-hard core process (perfect simulation)\cr
+	\code{\link{rDiggleGratton}} \tab
+	simulate Diggle-Gratton process (perfect simulation)\cr
+	\code{\link{rDGS}} \tab
+	simulate Diggle-Gates-Stibbard process (perfect simulation)\cr
+	\code{\link{rPenttinen}} \tab
+	simulate Penttinen process (perfect simulation)\cr
+	\code{\link{rNeymanScott}} \tab
+	simulate a general Neyman-Scott process\cr
+	\code{\link{rPoissonCluster}} \tab
+	simulate a general Poisson cluster process\cr
+	\code{\link{rMatClust}} \tab
+	simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr
+	\code{\link{rThomas}} \tab
+	simulate the Thomas process  \cr
+	\code{\link{rGaussPoisson}}  \tab
+	simulate the Gauss-Poisson cluster process\cr
+	\code{\link{rCauchy}} \tab
+	simulate Neyman-Scott Cauchy cluster process \cr
+	\code{\link{rVarGamma}} \tab
+	simulate Neyman-Scott Variance Gamma cluster process \cr
+	\code{\link{rthin}} \tab  random thinning  \cr
+	\code{\link{rcell}} \tab
+	simulate the Baddeley-Silverman cell process  \cr
+	\code{\link{rmh}} \tab
+	simulate Gibbs point process using Metropolis-Hastings \cr
+	\code{\link{simulate.ppm}} \tab
+	simulate Gibbs point process using Metropolis-Hastings \cr
+	\code{\link{runifpointOnLines}} \tab
+	generate \eqn{n} random points along specified line segments \cr
+	\code{\link{rpoisppOnLines}} \tab
+	generate Poisson random points along specified line segments 
+      }
+
+      \bold{To randomly change an existing point pattern:}
+      
+      \tabular{ll}{
+	\code{\link{rshift}} \tab random shifting of points \cr
+        \code{\link{rjitter}} \tab
+	apply random displacements to points in a pattern\cr
+	\code{\link{rthin}} \tab  random thinning \cr
+	\code{\link{rlabel}} \tab random (re)labelling of a multitype
+	point pattern \cr
+        \code{\link{quadratresample}} \tab block resampling 
+      }
+
+      \bold{Standard point pattern datasets:}
+
+      Datasets in \pkg{spatstat} are lazy-loaded, so you can simply
+      type the name of the dataset to use it; there is no need
+      to type \code{\link{data}(amacrine)} etc.
+
+      Type \code{demo(data)} to see a display of all the datasets
+      installed with the package.
+
+      Type \code{vignette('datasets')} for a document giving an overview
+      of all datasets, including background information, and plots.
+      
+      \tabular{ll}{
+	\code{\link{amacrine}} \tab Austin Hughes' rabbit amacrine cells \cr
+	\code{\link{anemones}} \tab Upton-Fingleton sea anemones data\cr
+	\code{\link{ants}} \tab Harkness-Isham ant nests data\cr
+	\code{\link{bdspots}} \tab Breakdown spots in microelectrodes \cr
+	\code{\link{bei}} \tab Tropical rainforest trees \cr
+	\code{\link{betacells}} \tab Waessle et al. cat retinal ganglia data \cr
+	\code{\link{bramblecanes}} \tab Bramble Canes data \cr
+	\code{\link{bronzefilter}} \tab Bronze Filter Section data \cr
+	\code{\link{cells}} \tab Crick-Ripley biological cells data \cr
+	\code{\link{chicago}} \tab Chicago crimes \cr
+	\code{\link{chorley}} \tab Chorley-Ribble cancer data \cr
+	\code{\link{clmfires}} \tab Castilla-La Mancha forest fires \cr
+	\code{\link{copper}} \tab Berman-Huntington copper deposits data \cr
+	\code{\link{dendrite}} \tab Dendritic spines \cr
+	\code{\link{demohyper}} \tab Synthetic point patterns\cr
+	\code{\link{demopat}} \tab Synthetic point pattern \cr
+	\code{\link{finpines}} \tab Finnish Pines data \cr
+	\code{\link{flu}} \tab Influenza virus proteins \cr
+	\code{\link{gordon}} \tab People in Gordon Square, London \cr
+	\code{\link{gorillas}} \tab Gorilla nest sites \cr
+	\code{\link{hamster}} \tab Aherne's hamster tumour data \cr
+	\code{\link{humberside}} \tab North Humberside childhood leukaemia data \cr
+	\code{\link{hyytiala}} \tab {Mixed forest in
+              \ifelse{latex}{\out{Hyyti{\"a}l{\"a}}}{Hyytiala}, Finland}\cr
+	\code{\link{japanesepines}} \tab Japanese Pines data \cr
+	\code{\link{lansing}} \tab Lansing Woods data \cr
+	\code{\link{longleaf}} \tab Longleaf Pines data \cr
+	\code{\link{mucosa}} \tab Cells in gastric mucosa \cr
+	\code{\link{murchison}} \tab Murchison gold deposits \cr
+	\code{\link{nbfires}} \tab New Brunswick fires data \cr
+	\code{\link{nztrees}} \tab Mark-Esler-Ripley trees data \cr
+	\code{\link{osteo}} \tab Osteocyte lacunae (3D, replicated) \cr
+	\code{\link{paracou}} \tab Kimboto trees in Paracou, French Guiana \cr
+	\code{\link{ponderosa}} \tab Getis-Franklin ponderosa pine trees data \cr
+	\code{\link{pyramidal}} \tab Pyramidal neurons from 31 brains \cr
+	\code{\link{redwood}} \tab Strauss-Ripley redwood saplings data \cr
+	\code{\link{redwoodfull}} \tab Strauss redwood saplings data (full set) \cr
+	\code{\link{residualspaper}} \tab Data from Baddeley et al (2005) \cr
+	\code{\link{shapley}} \tab Galaxies in an astronomical survey \cr
+	\code{\link{simdat}} \tab Simulated point pattern (inhomogeneous, with interaction) \cr
+	\code{\link{spiders}} \tab Spider webs on mortar lines of brick wall \cr
+	\code{\link{sporophores}} \tab Mycorrhizal fungi around a tree \cr
+	\code{\link{spruces}} \tab Spruce trees in Saxonia \cr
+	\code{\link{swedishpines}} \tab Strand-Ripley Swedish pines data \cr
+	\code{\link{urkiola}} \tab Urkiola Woods data \cr
+	\code{\link{waka}} \tab Trees in Waka national park \cr
+	\code{\link{waterstriders}} \tab Insects on water surface 
+      }
+
+      \bold{To manipulate a point pattern:}
+
+      \tabular{ll}{
+	\code{\link{plot.ppp}} \tab
+	plot a point pattern (e.g. \code{plot(X)}) \cr
+	\code{\link{iplot}} \tab
+	plot a point pattern interactively \cr
+	\code{\link{edit.ppp}} \tab interactive text editor \cr
+	\code{\link{[.ppp}} \tab
+	extract or replace a subset of a point pattern \cr
+        \tab \code{pp[subset]} or \code{pp[subwindow]} \cr
+	\code{\link{subset.ppp}} \tab
+	extract subset of point pattern satisfying a condition \cr
+	\code{\link{superimpose}} \tab
+	combine several point patterns  \cr
+	\code{\link{by.ppp}} \tab
+	apply a function to sub-patterns of a point pattern \cr
+	\code{\link{cut.ppp}} \tab
+	classify the points in a point pattern \cr
+	\code{\link{split.ppp}} \tab
+	divide pattern into sub-patterns \cr
+	\code{\link{unmark}} \tab
+	remove marks  \cr
+	\code{\link{npoints}} \tab
+	count the number of points  \cr
+	\code{\link{coords}} \tab
+	extract coordinates, change coordinates  \cr
+	\code{\link{marks}} \tab
+	extract marks, change marks or attach marks  \cr
+	\code{\link{rotate}} \tab
+	rotate pattern  \cr
+	\code{\link{shift} } \tab
+	translate pattern  \cr
+	\code{\link{flipxy} } \tab
+	swap \eqn{x} and \eqn{y} coordinates  \cr
+	\code{\link{reflect} } \tab
+	reflect in the origin  \cr
+	\code{\link{periodify} } \tab
+	make several translated copies  \cr
+	\code{\link{affine}} \tab
+	apply affine transformation\cr
+	\code{\link{scalardilate}} \tab
+	apply scalar dilation\cr
+	\code{\link{density.ppp}} \tab
+	kernel estimation of point pattern intensity\cr
+	\code{\link{Smooth.ppp}} \tab
+	kernel smoothing of marks of point pattern\cr
+	\code{\link{nnmark}} \tab
+	mark value of nearest data point\cr
+	\code{\link{sharpen.ppp}} \tab
+	data sharpening\cr
+	\code{\link{identify.ppp}} \tab
+	interactively identify points \cr
+	\code{\link{unique.ppp}} \tab
+	remove duplicate points \cr
+	\code{\link{duplicated.ppp}} \tab
+	determine which points are duplicates \cr
+	\code{\link{connected.ppp}} \tab find clumps of points \cr
+	\code{\link{dirichlet}} \tab
+	compute Dirichlet-Voronoi tessellation \cr
+	\code{\link{delaunay}} \tab
+	compute Delaunay triangulation \cr 
+	\code{\link{delaunayDistance}} \tab
+	graph distance in Delaunay triangulation \cr 
+	\code{\link{convexhull}} \tab compute convex hull \cr
+	\code{\link{discretise}} \tab discretise coordinates \cr
+	\code{\link{pixellate.ppp}} \tab approximate point pattern by 
+	pixel image \cr
+	\code{\link{as.im.ppp}} \tab approximate point pattern by 
+	pixel image 
+      }
+      See \code{\link{spatstat.options}} to control plotting behaviour.
+      
+    \bold{To create a window:}
+
+    An object of class \code{"owin"} describes a spatial region
+      (a window of observation).
+
+      \tabular{ll}{
+	\code{\link{owin}}	\tab	Create a window object \cr
+	\tab \code{owin(xlim, ylim)} for rectangular window \cr
+	\tab \code{owin(poly)} for polygonal window \cr
+	\tab \code{owin(mask)} for binary image window \cr
+	\code{\link{Window}}	\tab
+	Extract window of another object \cr
+	\code{\link{Frame}}	\tab
+	Extract the containing rectangle ('frame') of another object \cr
+	\code{\link{as.owin}}	\tab
+	Convert other data to a window object \cr
+	\code{\link{square}}    \tab	make a square window \cr
+	\code{\link{disc}}    \tab	make a circular window \cr
+	\code{\link{ellipse}}    \tab	make an elliptical window \cr
+	\code{\link{ripras}}    \tab
+	Ripley-Rasson estimator of window, given only the points \cr
+	\code{\link{convexhull}} \tab compute convex hull of something \cr 
+	\code{\link{letterR}}    \tab
+	polygonal window in the shape of the \R logo \cr
+	\code{\link{clickpoly}}    \tab
+	interactively draw a polygonal window  \cr
+	\code{\link{clickbox}}    \tab
+	interactively draw a rectangle  
+      }
+
+    \bold{To manipulate a window:}
+
+    \tabular{ll}{
+	\code{\link{plot.owin}}	\tab	plot a window. \cr
+	\tab		\code{plot(W)}\cr
+	\code{\link{boundingbox}} \tab
+	Find a tight bounding box for the window \cr
+	\code{\link{erosion}}	\tab
+	erode window by a distance r\cr
+	\code{\link{dilation}}	\tab
+	dilate window by a distance r\cr
+	\code{\link{closing}}	\tab
+	close window by a distance r\cr
+	\code{\link{opening}}	\tab
+	open window by a distance r\cr
+	\code{\link{border}}	\tab
+	difference between window and its erosion/dilation \cr 
+	\code{\link{complement.owin}}	\tab
+	invert (swap inside and outside)\cr
+	\code{\link{simplify.owin}}	\tab
+	approximate a window by a simple polygon  \cr
+	\code{\link{rotate}} \tab rotate window  \cr
+	\code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates  \cr
+	\code{\link{shift} } \tab translate window  \cr
+	\code{\link{periodify} } \tab make several translated copies  \cr
+	\code{\link{affine}} \tab apply affine transformation \cr
+	\code{\link{as.data.frame.owin}} \tab
+	convert window to data frame 
+      }
+
+    \bold{Digital approximations:}
+
+    \tabular{ll}{
+	\code{\link{as.mask}}	\tab
+	Make a discrete pixel approximation of a given window \cr
+	\code{\link{as.im.owin}} \tab convert window to pixel image \cr
+	\code{\link{pixellate.owin}} \tab convert window to pixel image \cr
+	\code{\link{commonGrid}} \tab find common pixel grid for windows \cr
+	\code{\link{nearest.raster.point}} \tab
+	map continuous coordinates to raster locations\cr
+	\code{\link{raster.x}} \tab
+	raster x coordinates \cr
+	\code{\link{raster.y}} \tab
+	raster y coordinates \cr
+	\code{\link{raster.xy}} \tab
+	raster x and y coordinates \cr
+	\code{\link{as.polygonal}} \tab
+	convert pixel mask to polygonal window 
+      }
+      See \code{\link{spatstat.options}} to control the approximation
+
+    \bold{Geometrical computations with windows:}
+
+    \tabular{ll}{
+	\code{\link{edges}}	\tab	extract boundary edges \cr
+	\code{\link{intersect.owin}}	\tab	intersection of two windows\cr
+	\code{\link{union.owin}}	\tab	union of two windows\cr
+	\code{\link{setminus.owin}}	\tab	set subtraction of two windows\cr
+	\code{\link{inside.owin}}	\tab	determine whether a point is inside a window\cr
+	\code{\link{area.owin}}	\tab	compute area \cr
+	\code{\link{perimeter}}	\tab	compute perimeter length \cr
+	\code{\link{diameter.owin}}	\tab	compute diameter\cr
+	\code{\link{incircle}}	\tab	find largest circle inside a window \cr
+	\code{\link{inradius}}	\tab	radius of incircle \cr
+	\code{\link{connected.owin}}    \tab find connected components of window \cr
+	\code{\link{eroded.areas}}	\tab	compute areas of eroded windows\cr
+	\code{\link{dilated.areas}}	\tab	compute areas of dilated windows\cr
+	\code{\link{bdist.points}}	\tab	compute distances from data points to window boundary \cr
+	\code{\link{bdist.pixels}}	\tab	compute distances from all pixels to window boundary \cr
+	\code{\link{bdist.tiles}}	\tab
+	boundary distance for each tile in tessellation \cr
+	\code{\link{distmap.owin}}	\tab	distance transform image \cr
+	\code{\link{distfun.owin}}	\tab	distance transform \cr
+	\code{\link{centroid.owin}}	\tab compute centroid (centre of mass) of window\cr
+	\code{\link{is.subset.owin}}    \tab determine whether one
+	window contains another \cr
+	\code{\link{is.convex}} \tab determine whether a window is convex \cr 
+	\code{\link{convexhull}} \tab compute convex hull \cr
+	\code{\link{triangulate.owin}} \tab decompose into triangles \cr
+	\code{\link{as.mask}} \tab pixel approximation of window \cr
+	\code{\link{as.polygonal}} \tab polygonal approximation of window \cr
+	\code{\link{is.rectangle}} \tab test whether window is a rectangle \cr 
+	\code{\link{is.polygonal}} \tab test whether window is polygonal \cr 
+	\code{\link{is.mask}} \tab test whether window is a mask \cr 
+	\code{\link{setcov}} \tab spatial covariance function of window \cr
+	\code{\link{pixelcentres}} \tab extract centres of pixels in mask \cr
+	\code{\link{clickdist}}    \tab
+	measure distance between two points clicked by user
+      }
+
+    \bold{Pixel images:}
+    An object of class \code{"im"} represents a pixel image. 
+    Such objects are returned by some of the functions in
+      \pkg{spatstat} including \code{\link{Kmeasure}},
+      \code{\link{setcov}} and \code{\link{density.ppp}}. 
+      \tabular{ll}{
+	\code{\link{im}} \tab create a pixel image\cr
+	\code{\link{as.im}} \tab convert other data to a pixel image\cr
+	\code{\link{pixellate}} \tab convert other data to a pixel image\cr
+	\code{\link{as.matrix.im}} \tab convert pixel image to matrix\cr
+	\code{\link{as.data.frame.im}} \tab convert pixel image to data frame\cr
+	\code{\link{as.function.im}} \tab convert pixel image to function\cr
+	\code{\link{plot.im}}	\tab	plot a pixel image on screen as a digital image\cr
+	\code{\link{contour.im}}	\tab draw contours of a pixel image \cr
+	\code{\link{persp.im}}	\tab draw perspective plot of a pixel image \cr
+	\code{\link{rgbim}}	\tab create colour-valued pixel image \cr
+	\code{\link{hsvim}}	\tab create colour-valued pixel image \cr
+	\code{\link{[.im}} 	\tab extract a subset of a pixel image\cr
+	\code{\link{[<-.im}} 	\tab replace a subset of a pixel image\cr
+	\code{\link{rotate.im}} \tab rotate pixel image \cr
+	\code{\link{shift.im}} \tab apply vector shift to pixel image \cr
+	\code{\link{affine.im}} \tab apply affine transformation to image \cr
+	\code{X}	\tab	print very basic information about image \code{X}\cr
+	\code{\link{summary}(X)} \tab	summary of image \code{X} \cr
+	\code{\link{hist.im}} \tab	histogram of image \cr
+	\code{\link{mean.im}} \tab	mean pixel value of image  \cr
+	\code{\link{integral.im}} \tab	integral of pixel values  \cr
+	\code{\link{quantile.im}} \tab	quantiles of image  \cr
+	\code{\link{cut.im}} \tab	convert numeric image to factor image \cr
+	\code{\link{is.im}} \tab test whether an object is a pixel image\cr
+	\code{\link{interp.im}} \tab interpolate a pixel image\cr
+	\code{\link{blur}} \tab apply Gaussian blur to image\cr
+	\code{\link{Smooth.im}} \tab apply Gaussian blur to image\cr
+	\code{\link{connected.im}} \tab find connected components \cr
+	\code{\link{compatible.im}} \tab test whether two images have
+	compatible dimensions \cr
+	\code{\link{harmonise.im}} \tab make images compatible \cr
+	\code{\link{commonGrid}} \tab find a common pixel grid for images \cr
+	\code{\link{eval.im}} \tab evaluate any expression involving images\cr
+	\code{\link{scaletointerval}} \tab rescale pixel values \cr
+	\code{\link{zapsmall.im}} \tab set very small pixel values to zero \cr
+	\code{\link{levelset}} \tab level set of an image\cr 
+	\code{\link{solutionset}} \tab region where an expression is true \cr
+	\code{\link{imcov}} \tab spatial covariance function of image \cr
+	\code{\link{convolve.im}} \tab spatial convolution of images \cr
+	\code{\link{transect.im}} \tab line transect of image \cr
+	\code{\link{pixelcentres}} \tab extract centres of pixels \cr
+	\code{\link{transmat}} \tab convert matrix of pixel values \cr
+	                       \tab to a different indexing convention \cr
+	\code{\link{rnoise}} \tab random pixel noise
+     }
+
+    \bold{Line segment patterns}
+
+    An object of class \code{"psp"} represents a pattern of straight line
+    segments.
+    \tabular{ll}{
+      \code{\link{psp}} \tab create a line segment pattern \cr
+      \code{\link{as.psp}} \tab convert other data into a line segment pattern \cr
+      \code{\link{edges}} \tab extract edges of a window \cr
+      \code{\link{is.psp}} \tab determine whether a dataset has class \code{"psp"} \cr
+      \code{\link{plot.psp}} \tab plot a line segment pattern \cr
+      \code{\link{print.psp}} \tab print basic information \cr
+      \code{\link{summary.psp}} \tab print summary information \cr
+      \code{\link{[.psp}} \tab extract a subset of a line segment pattern \cr
+      \code{\link{as.data.frame.psp}} \tab
+      convert line segment pattern to data frame \cr
+      \code{\link{marks.psp}} \tab extract marks of line segments \cr
+      \code{\link{marks<-.psp}} \tab assign new marks to line segments \cr
+      \code{\link{unmark.psp}} \tab delete marks from line segments \cr
+      \code{\link{midpoints.psp}} \tab
+      compute the midpoints of line segments \cr
+      \code{\link{endpoints.psp}} \tab extract the endpoints of line segments \cr
+      \code{\link{lengths.psp}} \tab compute the lengths of line segments \cr
+      \code{\link{angles.psp}} \tab compute the orientation angles of line segments \cr
+      \code{\link{superimpose}} \tab combine several line segment patterns  \cr
+      \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr      
+      \code{\link{rotate.psp}} \tab rotate a line segment pattern \cr      
+      \code{\link{shift.psp}} \tab shift a line segment pattern \cr      
+      \code{\link{periodify}} \tab make several shifted copies \cr      
+      \code{\link{affine.psp}} \tab apply an affine transformation \cr      
+      \code{\link{pixellate.psp}} \tab approximate line segment pattern
+    by pixel image \cr      
+      \code{\link{as.mask.psp}} \tab approximate line segment pattern
+    by binary mask \cr      
+      \code{\link{distmap.psp}} \tab compute the distance map of a line
+      segment pattern \cr
+      \code{\link{distfun.psp}} \tab compute the distance map of a line
+      segment pattern \cr
+      \code{\link{density.psp}} \tab kernel smoothing of line segments\cr
+      \code{\link{selfcrossing.psp}} \tab find crossing points between
+      line segments \cr
+      \code{\link{selfcut.psp}} \tab cut segments where they cross \cr
+      \code{\link{crossing.psp}} \tab find crossing points between
+      two line segment patterns \cr
+      \code{\link{nncross}} \tab find distance to nearest line segment
+      from a given point\cr
+      \code{\link{nearestsegment}} \tab find line segment closest to a
+      given point \cr
+      \code{\link{project2segment}} \tab find location along a line segment
+      closest to a given point \cr
+      \code{\link{pointsOnLines}} \tab generate points evenly spaced
+      along line segment \cr
+      \code{\link{rpoisline}} \tab generate a realisation of the
+      Poisson line process inside a window\cr
+      \code{\link{rlinegrid}} \tab generate a random array of parallel
+      lines through a window
+    }
+
+    \bold{Tessellations}
+
+    An object of class \code{"tess"} represents a tessellation.
+
+    \tabular{ll}{
+      \code{\link{tess}} \tab create a tessellation \cr
+      \code{\link{quadrats}} \tab create a tessellation of rectangles\cr
+      \code{\link{hextess}} \tab create a tessellation of hexagons \cr
+      \code{\link{quantess}} \tab quantile tessellation \cr
+      \code{\link{as.tess}} \tab convert other data to a tessellation \cr
+      \code{\link{plot.tess}} \tab plot a tessellation \cr
+      \code{\link{tiles}} \tab extract all the tiles of a tessellation \cr
+      \code{\link{[.tess}} \tab extract some tiles of a tessellation \cr
+      \code{\link{[<-.tess}} \tab change some tiles of a tessellation \cr
+      \code{\link{intersect.tess}} \tab intersect two tessellations \cr
+                              \tab or restrict a tessellation to a window \cr
+      \code{\link{chop.tess}} \tab subdivide a tessellation by a line \cr
+      \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation of points\cr
+      \code{\link{delaunay}} \tab compute Delaunay triangulation of points\cr
+      \code{\link{rpoislinetess}} \tab generate tessellation using Poisson line
+      process \cr
+      \code{\link{tile.areas}}	\tab
+      area of each tile in tessellation \cr
+      \code{\link{bdist.tiles}}	\tab
+      boundary distance for each tile in tessellation 
+    }
+
+    \bold{Three-dimensional point patterns}
+
+    An object of class \code{"pp3"} represents a three-dimensional
+    point pattern in a rectangular box. The box is represented by
+    an object of class \code{"box3"}.
+
+    \tabular{ll}{
+      \code{\link{pp3}} \tab create a 3-D point pattern \cr
+      \code{\link{plot.pp3}} \tab plot a 3-D point pattern \cr
+      \code{\link{coords}} \tab extract coordinates \cr
+      \code{\link{as.hyperframe}} \tab extract coordinates \cr
+      \code{\link{subset.pp3}} \tab extract subset of 3-D point pattern \cr
+      \code{\link{unitname.pp3}} \tab name of unit of length \cr
+      \code{\link{npoints}} \tab count the number of points  \cr
+      \code{\link{runifpoint3}} \tab generate uniform random points in 3-D \cr
+      \code{\link{rpoispp3}} \tab generate Poisson random points in 3-D \cr
+      \code{\link{envelope.pp3}} \tab generate simulation envelopes for
+      3-D pattern \cr
+      \code{\link{box3}} \tab create a 3-D rectangular box \cr
+      \code{\link{as.box3}} \tab convert data to 3-D rectangular box \cr
+      \code{\link{unitname.box3}} \tab name of unit of length \cr
+      \code{\link{diameter.box3}} \tab diameter of box \cr
+      \code{\link{volume.box3}} \tab volume of box \cr
+      \code{\link{shortside.box3}} \tab shortest side of box \cr
+      \code{\link{eroded.volumes}} \tab volumes of erosions of box 
+    }
+
+    \bold{Multi-dimensional space-time point patterns}
+
+    An object of class \code{"ppx"} represents a 
+    point pattern in multi-dimensional space and/or time.
+
+    \tabular{ll}{
+      \code{\link{ppx}} \tab create a multidimensional space-time point pattern \cr
+      \code{\link{coords}} \tab extract coordinates \cr
+      \code{\link{as.hyperframe}} \tab extract coordinates \cr
+      \code{\link{subset.ppx}} \tab extract subset \cr
+      \code{\link{unitname.ppx}} \tab name of unit of length \cr
+      \code{\link{npoints}} \tab count the number of points  \cr
+      \code{\link{runifpointx}} \tab generate uniform random points \cr
+      \code{\link{rpoisppx}} \tab generate Poisson random points \cr
+      \code{\link{boxx}} \tab define multidimensional box  \cr
+      \code{\link{diameter.boxx}} \tab diameter of box \cr
+      \code{\link{volume.boxx}} \tab volume of box \cr
+      \code{\link{shortside.boxx}} \tab shortest side of box \cr
+      \code{\link{eroded.volumes.boxx}} \tab volumes of erosions of box 
+    }
+    
+    \bold{Point patterns on a linear network}
+
+    An object of class \code{"linnet"} represents a linear network
+    (for example, a road network).
+
+    \tabular{ll}{
+      \code{\link{linnet}} \tab create a linear network \cr
+      \code{\link{clickjoin}} \tab interactively join vertices in network \cr
+      \code{\link{iplot.linnet}} \tab interactively plot network \cr
+      \code{\link{simplenet}} \tab simple example of network \cr
+      \code{\link{lineardisc}} \tab disc in a linear network \cr
+      \code{\link{delaunayNetwork}} \tab network of Delaunay triangulation \cr
+      \code{\link{dirichletNetwork}} \tab network of Dirichlet edges \cr
+      \code{\link{methods.linnet}} \tab methods for \code{linnet} objects\cr
+      \code{\link{vertices.linnet}} \tab nodes of network \cr 
+      \code{\link{pixellate.linnet}} \tab approximate by pixel image
+    }
+    
+    An object of class \code{"lpp"} represents a 
+    point pattern on a linear network (for example,
+    road accidents on a road network).
+    
+    \tabular{ll}{
+      \code{\link{lpp}} \tab create a point pattern on a linear network \cr
+      \code{\link{methods.lpp}} \tab methods for \code{lpp} objects \cr
+      \code{\link{subset.lpp}} \tab method for \code{subset} \cr
+      \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr
+      \code{\link{runiflpp}} \tab simulate random points on a linear network \cr
+      \code{\link{chicago}} \tab Chicago crime data \cr
+      \code{\link{dendrite}} \tab Dendritic spines data \cr
+      \code{\link{spiders}} \tab Spider webs on mortar lines of brick wall 
+    }
+    
+    \bold{Hyperframes}
+
+    A hyperframe is like a data frame, except that the entries
+    may be objects of any kind.
+
+    \tabular{ll}{
+      \code{\link{hyperframe}} \tab create a hyperframe \cr
+      \code{\link{as.hyperframe}} \tab convert data to hyperframe \cr
+      \code{\link{plot.hyperframe}} \tab plot hyperframe \cr
+      \code{\link{with.hyperframe}} \tab evaluate expression using each row
+      of hyperframe \cr
+      \code{\link{cbind.hyperframe}} \tab combine hyperframes by columns\cr
+      \code{\link{rbind.hyperframe}} \tab combine hyperframes by rows\cr
+      \code{\link{as.data.frame.hyperframe}} \tab convert hyperframe to
+      data frame \cr
+      \code{\link{subset.hyperframe}} \tab method for \code{subset} \cr
+      \code{\link{head.hyperframe}} \tab first few rows of hyperframe \cr
+      \code{\link{tail.hyperframe}} \tab last few rows of hyperframe
+    }
+    
+    \bold{Layered objects}
+
+    A layered object represents data that should be plotted in
+    successive layers, for example, a background and a foreground.
+
+     \tabular{ll}{
+      \code{\link{layered}} \tab create layered object \cr
+      \code{\link{plot.layered}} \tab plot layered object\cr
+      \code{\link{[.layered}} \tab extract subset of layered object
+    }
+    
+    \bold{Colour maps}
+
+    A colour map is a mechanism for associating colours with data.
+    It can be regarded as a function, mapping data to colours.
+    Using a \code{colourmap} object in a plot command
+    ensures that the mapping from numbers to colours is
+    the same in different plots. 
+
+     \tabular{ll}{
+      \code{\link{colourmap}} \tab create a colour map  \cr
+      \code{\link{plot.colourmap}} \tab plot the colour map only\cr
+      \code{\link{tweak.colourmap}} \tab alter individual colour values \cr
+      \code{\link{interp.colourmap}} \tab make a smooth transition
+      between colours \cr
+      \code{\link{beachcolourmap}} \tab one special colour map
+    }
+}
+  
+\section{II. EXPLORATORY DATA ANALYSIS}{
+
+  \bold{Inspection of data:}
+  \tabular{ll}{
+    \code{\link{summary}(X)} \tab
+    print useful summary of point pattern \code{X}\cr
+    \code{X} \tab
+    print basic description of point pattern \code{X}  \cr
+    \code{any(duplicated(X))} \tab
+    check for duplicated points in pattern \code{X} \cr
+    \code{\link{istat}(X)} \tab Interactive exploratory analysis \cr
+    \code{\link{View}(X)} \tab spreadsheet-style viewer
+  }
+
+  \bold{Classical exploratory tools:}
+  \tabular{ll}{
+    \code{\link{clarkevans}} \tab Clark and Evans aggregation index \cr
+    \code{\link{fryplot}} \tab Fry plot \cr
+    \code{\link{miplot}} \tab Morisita Index plot
+  }
+
+  \bold{Smoothing:}
+  \tabular{ll}{
+    \code{\link{density.ppp}} \tab kernel smoothed density/intensity\cr
+    \code{\link{relrisk}} \tab kernel estimate of relative risk\cr
+    \code{\link{Smooth.ppp}} \tab spatial interpolation of marks  \cr
+    \code{\link{bw.diggle}} \tab cross-validated bandwidth selection
+                               for \code{\link{density.ppp}}\cr
+    \code{\link{bw.ppl}} \tab likelihood cross-validated bandwidth selection
+                               for \code{\link{density.ppp}}\cr
+    \code{\link{bw.scott}} \tab Scott's rule of thumb 
+                                for density estimation\cr
+    \code{\link{bw.relrisk}} \tab cross-validated bandwidth selection
+                               for \code{\link{relrisk}} \cr
+    \code{\link{bw.smoothppp}} \tab cross-validated bandwidth selection
+                               for \code{\link{Smooth.ppp}} \cr
+    \code{\link{bw.frac}} \tab bandwidth selection using window geometry\cr
+    \code{\link{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth
+                               for \code{\link{pcf}}
+  }
+
+  \bold{Modern exploratory tools:}
+  \tabular{ll}{
+    \code{\link{clusterset}} \tab Allard-Fraley feature detection  \cr
+    \code{\link{nnclean}} \tab Byers-Raftery feature detection  \cr
+    \code{\link{sharpen.ppp}} \tab Choi-Hall data sharpening \cr
+    \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr
+    \code{\link{rho2hat}} \tab Kernel estimate of effect of two covariates\cr
+    \code{\link{spatialcdf}} \tab Spatial cumulative distribution function\cr
+    \code{\link{roc}} \tab Receiver operating characteristic curve
+  }
+
+  \bold{Summary statistics for a point pattern:}
+  Type \code{demo(sumfun)} for a demonstration of many
+  of the summary statistics.
+  \tabular{ll}{
+    \code{\link{intensity}} \tab Mean intensity \cr
+    \code{\link{quadratcount}} \tab Quadrat counts \cr
+    \code{\link{intensity.quadratcount}} \tab Mean intensity in quadrats \cr
+    \code{\link{Fest}} \tab empty space function \eqn{F} \cr
+    \code{\link{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr
+    \code{\link{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr
+    \code{\link{Kest}} \tab Ripley's \eqn{K}-function\cr
+    \code{\link{Lest}} \tab Besag \eqn{L}-function\cr
+    \code{\link{Tstat}} \tab Third order \eqn{T}-function \cr
+    \code{\link{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr
+    \code{\link{pcf}} \tab 	pair correlation function \cr
+    \code{\link{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr
+    \code{\link{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr
+    \code{\link{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr
+    \code{\link{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr
+    \code{\link{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr
+    \code{\link{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr
+    \code{\link{localL}} \tab Getis-Franklin neighbourhood density function\cr
+    \code{\link{localK}} \tab neighbourhood K-function\cr
+    \code{\link{localpcf}} \tab local pair correlation function\cr
+    \code{\link{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr
+    \code{\link{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr
+    \code{\link{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr
+    \code{\link{Ksector}} \tab Directional \eqn{K}-function\cr
+    \code{\link{Kscaled}} \tab locally scaled \eqn{K}-function \cr
+    \code{\link{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr
+    \code{\link{Kmeasure}} \tab reduced second moment measure \cr
+    \code{\link{envelope}} \tab simulation envelopes for a summary
+    function \cr
+    \code{\link{varblock}} \tab variances and confidence intervals\cr
+                   \tab for a summary function \cr
+    \code{\link{lohboot}} \tab bootstrap for a summary function 
+  }
+
+  Related facilities:
+  \tabular{ll}{
+    \code{\link{plot.fv}} \tab plot a summary function\cr
+    \code{\link{eval.fv}} \tab evaluate any expression involving
+    summary functions\cr
+    \code{\link{harmonise.fv}} \tab make functions compatible \cr
+    \code{\link{eval.fasp}} \tab evaluate any expression involving
+    an array of functions\cr
+    \code{\link{with.fv}} \tab evaluate an expression for a 
+    summary function\cr
+    \code{\link{Smooth.fv}} \tab apply smoothing to a summary function\cr
+    \code{\link{deriv.fv}} \tab calculate derivative of a summary function\cr
+    \code{\link{pool.fv}} \tab pool several estimates of a summary function\cr
+    \code{\link{nndist}} \tab nearest neighbour distances \cr
+    \code{\link{nnwhich}} \tab find nearest neighbours \cr
+    \code{\link{pairdist}} \tab distances between all pairs of points\cr
+    \code{\link{crossdist}} \tab distances between points in two patterns\cr
+    \code{\link{nncross}} \tab nearest neighbours between two point patterns \cr
+    \code{\link{exactdt}} \tab distance from any location to nearest data point\cr 
+    \code{\link{distmap}} \tab distance map image\cr
+    \code{\link{distfun}} \tab distance map function\cr
+    \code{\link{nnmap}} \tab nearest point image \cr
+    \code{\link{nnfun}} \tab nearest point function \cr
+    \code{\link{density.ppp}} \tab kernel smoothed density\cr
+    \code{\link{Smooth.ppp}} \tab spatial interpolation of marks  \cr
+    \code{\link{relrisk}} \tab kernel estimate of relative risk\cr
+    \code{\link{sharpen.ppp}} \tab data sharpening  \cr
+    \code{\link{rknn}} \tab theoretical distribution of nearest
+    neighbour distance
+ }
+
+  \bold{Summary statistics for a multitype point pattern:}
+  A multitype point pattern is represented by an object \code{X}
+  of class \code{"ppp"} such that \code{marks(X)} is a factor. 
+  \tabular{ll}{
+    \code{\link{relrisk}} \tab kernel estimation of relative risk  \cr
+    \code{\link{scan.test}} \tab spatial scan test of elevated risk  \cr
+    \code{\link{Gcross},\link{Gdot},\link{Gmulti}} \tab
+    multitype nearest neighbour distributions 
+    \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr
+    \code{\link{Kcross},\link{Kdot}, \link{Kmulti}} \tab
+    multitype \eqn{K}-functions 
+    \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr
+    \code{\link{Lcross},\link{Ldot}} \tab
+    multitype \eqn{L}-functions 
+    \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr
+    \code{\link{Jcross},\link{Jdot},\link{Jmulti}} \tab
+    multitype \eqn{J}-functions
+    \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr
+    \code{\link{pcfcross}} \tab
+    multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr
+    \code{\link{pcfdot}} \tab
+    multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr
+    \code{\link{pcfmulti}} \tab
+    general pair correlation function \cr
+    \code{\link{markconnect}} \tab
+    marked connection function \eqn{p_{ij}}{p[i,j]} \cr
+    \code{\link{alltypes}} \tab  estimates of the above
+    for all \eqn{i,j} pairs \cr
+    \code{\link{Iest}} \tab  multitype \eqn{I}-function\cr
+    \code{\link{Kcross.inhom},\link{Kdot.inhom}} \tab
+    inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr
+    \code{\link{Lcross.inhom},\link{Ldot.inhom}} \tab
+    inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr
+    \code{\link{pcfcross.inhom},\link{pcfdot.inhom}} \tab
+    inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} 
+  }
+
+  \bold{Summary statistics for a marked point pattern:}
+  A marked point pattern is represented by an object \code{X}
+  of class \code{"ppp"} with a component \code{X$marks}.
+  The entries in the vector \code{X$marks} may be numeric, complex,
+  string or any other atomic type. For numeric marks, there are the
+  following functions:
+  \tabular{ll}{
+    \code{\link{markmean}} \tab smoothed local average of marks \cr
+    \code{\link{markvar}} \tab smoothed local variance of marks \cr
+    \code{\link{markcorr}} \tab mark correlation function \cr
+    \code{\link{markcrosscorr}} \tab mark cross-correlation function \cr
+    \code{\link{markvario}} \tab mark variogram \cr
+    \code{\link{Kmark}} \tab mark-weighted \eqn{K} function \cr
+    \code{\link{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr
+    \code{\link{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr
+    \code{\link{nnmean}} \tab nearest neighbour mean index \cr
+    \code{\link{nnvario}} \tab nearest neighbour mark variance index 
+  }
+  For marks of any type, there are the following:
+  \tabular{ll}{
+    \code{\link{Gmulti}} \tab multitype nearest neighbour distribution \cr
+    \code{\link{Kmulti}} \tab multitype \eqn{K}-function \cr
+    \code{\link{Jmulti}} \tab multitype \eqn{J}-function 
+  }
+  Alternatively use \code{\link{cut.ppp}} to convert a marked point pattern
+  to a multitype point pattern.
+
+  \bold{Programming tools:}
+  \tabular{ll}{
+    \code{\link{applynbd}} \tab apply function to every neighbourhood
+    in a point pattern \cr
+    \code{\link{markstat}} \tab apply function to the marks of neighbours
+    in a point pattern \cr
+    \code{\link{marktable}} \tab tabulate the marks of neighbours
+    in a point pattern \cr
+    \code{\link{pppdist}} \tab find the optimal match between two point
+    patterns
+  }
+
+  \bold{Summary statistics for a point pattern on a linear network:}
+
+  These are for point patterns on a linear network (class \code{lpp}).
+  For unmarked patterns:
+  
+  \tabular{ll}{
+    \code{\link{linearK}} \tab
+    \eqn{K} function on linear network \cr
+    \code{\link{linearKinhom}} \tab
+    inhomogeneous \eqn{K} function on linear network \cr
+    \code{\link{linearpcf}} \tab
+    pair correlation function on linear network \cr
+    \code{\link{linearpcfinhom}} \tab
+    inhomogeneous pair correlation on linear network
+  }
+
+  For multitype patterns:
+  \tabular{ll}{
+    \code{\link{linearKcross}} \tab
+    \eqn{K} function between two types of points \cr
+    \code{\link{linearKdot}} \tab
+    \eqn{K} function from one type to any type \cr
+    \code{\link{linearKcross.inhom}} \tab
+    Inhomogeneous version of \code{\link{linearKcross}} \cr
+    \code{\link{linearKdot.inhom}} \tab
+    Inhomogeneous version of \code{\link{linearKdot}} \cr
+    \code{\link{linearmarkconnect}} \tab
+    Mark connection function  on linear network \cr
+    \code{\link{linearmarkequal}} \tab
+    Mark equality function on linear network \cr
+    \code{\link{linearpcfcross}} \tab
+    Pair correlation between two types of points \cr
+    \code{\link{linearpcfdot}} \tab
+    Pair correlation from one type to any type \cr
+    \code{\link{linearpcfcross.inhom}} \tab
+    Inhomogeneous version of \code{\link{linearpcfcross}} \cr
+    \code{\link{linearpcfdot.inhom}} \tab
+    Inhomogeneous version of \code{\link{linearpcfdot}} 
+  }
+
+  Related facilities:
+  
+  \tabular{ll}{
+    \code{\link{pairdist.lpp}} \tab distances between pairs  \cr
+    \code{\link{crossdist.lpp}} \tab distances between pairs \cr
+    \code{\link{nndist.lpp}} \tab nearest neighbour distances  \cr
+    \code{\link{nncross.lpp}} \tab nearest neighbour distances  \cr
+    \code{\link{nnwhich.lpp}} \tab find nearest neighbours  \cr
+    \code{\link{nnfun.lpp}} \tab find nearest data point  \cr
+    \code{\link{density.lpp}} \tab kernel smoothing estimator of intensity  \cr
+    \code{\link{distfun.lpp}} \tab distance transform  \cr
+    \code{\link{envelope.lpp}} \tab simulation envelopes  \cr
+    \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr
+    \code{\link{runiflpp}} \tab simulate random points on a linear network 
+  }
+  
+  It is also possible to fit point process models to \code{lpp} objects.
+  See Section IV.
+  
+  \bold{Summary statistics for a three-dimensional point pattern:}
+
+  These are for 3-dimensional point pattern objects (class \code{pp3}).
+
+  \tabular{ll}{
+    \code{\link{F3est}} \tab empty space function \eqn{F} \cr
+    \code{\link{G3est}} \tab nearest neighbour function \eqn{G} \cr
+    \code{\link{K3est}} \tab \eqn{K}-function \cr
+    \code{\link{pcf3est}} \tab pair correlation function
+  }
+
+  Related facilities:
+  \tabular{ll}{
+    \code{\link{envelope.pp3}} \tab simulation envelopes \cr
+    \code{\link{pairdist.pp3}} \tab distances between all pairs of
+    points \cr
+    \code{\link{crossdist.pp3}} \tab distances between points in
+    two patterns \cr
+    \code{\link{nndist.pp3}} \tab nearest neighbour distances \cr
+    \code{\link{nnwhich.pp3}} \tab find nearest neighbours \cr
+    \code{\link{nncross.pp3}} \tab find nearest neighbours in another pattern
+  }
+
+  \bold{Computations for multi-dimensional point pattern:}
+
+  These are for multi-dimensional space-time
+  point pattern objects (class \code{ppx}).
+
+  \tabular{ll}{
+    \code{\link{pairdist.ppx}} \tab distances between all pairs of
+    points \cr
+    \code{\link{crossdist.ppx}} \tab distances between points in
+    two patterns \cr
+    \code{\link{nndist.ppx}} \tab nearest neighbour distances \cr
+    \code{\link{nnwhich.ppx}} \tab find nearest neighbours
+  }
+
+  \bold{Summary statistics for random sets:}
+  
+  These work for point patterns (class \code{ppp}),
+  line segment patterns (class \code{psp})
+  or windows (class \code{owin}).
+  
+  \tabular{ll}{
+    \code{\link{Hest}} \tab spherical contact distribution \eqn{H} \cr
+    \code{\link{Gfox}} \tab Foxall \eqn{G}-function \cr
+    \code{\link{Jfox}} \tab Foxall \eqn{J}-function
+  }
+  
+}
+
+\section{III. MODEL FITTING (COX AND CLUSTER MODELS)}{
+  
+  Cluster process models (with homogeneous or inhomogeneous intensity)
+  and Cox processes can be fitted by the function \code{\link{kppm}}.
+  Its result is an object of class \code{"kppm"}.
+  The fitted model can be printed, plotted, predicted, simulated
+  and updated.
+
+  \tabular{ll}{
+    \code{\link{kppm}} \tab  Fit model\cr
+    \code{\link{plot.kppm}} \tab  Plot the fitted model\cr
+    \code{\link{summary.kppm}} \tab  Summarise the fitted model\cr
+    \code{\link{fitted.kppm}} \tab Compute fitted intensity \cr
+    \code{\link{predict.kppm}} \tab Compute fitted intensity \cr
+    \code{\link{update.kppm}} \tab Update the model \cr
+    \code{\link{improve.kppm}} \tab Refine the estimate of trend \cr
+    \code{\link{simulate.kppm}} \tab Generate simulated realisations \cr
+    \code{\link{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr
+    \code{\link[spatstat:methods.kppm]{coef.kppm}}
+    \tab Extract trend coefficients \cr
+    \code{\link[spatstat:methods.kppm]{formula.kppm}}
+    \tab Extract trend formula \cr
+    \code{\link{parameters}} \tab Extract all model parameters \cr
+    \code{\link{clusterfield}} \tab Compute offspring density \cr
+    \code{\link{clusterradius}} \tab Radius of support of offspring density \cr
+    \code{\link{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr
+    \code{\link{pcfmodel.kppm}} \tab Pair correlation of fitted model 
+  }
+  
+  For model selection, you can also use 
+  the generic functions \code{\link{step}}, \code{\link{drop1}} 
+  and \code{\link{AIC}} on fitted point process models.
+
+  The theoretical models can also be simulated,
+  for any choice of parameter values,
+  using \code{\link{rThomas}}, \code{\link{rMatClust}},
+  \code{\link{rCauchy}}, \code{\link{rVarGamma}},
+  and \code{\link{rLGCP}}.
+  
+  Lower-level fitting functions include:
+
+  \tabular{ll}{
+    \code{\link{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr
+    \code{\link{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr
+    \code{\link{thomas.estK}} \tab fit the Thomas process model \cr
+    \code{\link{thomas.estpcf}} \tab fit the Thomas process model \cr
+    \code{\link{matclust.estK}} \tab fit the Matern Cluster process model \cr
+    \code{\link{matclust.estpcf}} \tab fit the Matern Cluster process model \cr
+    \code{\link{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr
+   \code{\link{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr
+   \code{\link{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr
+   \code{\link{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr
+    \code{\link{mincontrast}} \tab low-level algorithm for fitting models
+    \cr \tab by the method of minimum contrast 
+  }
+}
+
+\section{IV. MODEL FITTING (POISSON AND GIBBS MODELS)}{
+  
+  \bold{Types of models}
+  
+  Poisson point processes are the simplest models for point patterns.
+  A Poisson model assumes that the points are stochastically
+  independent. It may allow the points to have a non-uniform spatial
+  density. The special case of a Poisson process with a uniform
+  spatial density is often called Complete Spatial Randomness.
+  
+  Poisson point processes are included in the more general class of Gibbs point
+  process models. In a Gibbs model, there is \emph{interaction}
+  or dependence between points. Many different types of interaction
+  can be specified.
+  
+  For a detailed explanation of how to fit Poisson or
+  Gibbs point process models to point pattern data using \pkg{spatstat},
+  see Baddeley and Turner (2005b) or Baddeley (2008).
+  
+  \bold{To fit a Poisson or Gibbs point process model:}
+
+  Model fitting in \pkg{spatstat} is performed mainly by the function
+  \code{\link{ppm}}. Its result is an object of class \code{"ppm"}.
+  
+  Here are some examples, where \code{X} is a point pattern (class
+  \code{"ppp"}):
+  
+  \tabular{ll}{
+    \emph{command} \tab \emph{model} \cr
+    \code{ppm(X)} \tab Complete Spatial Randomness \cr
+    \code{ppm(X ~ 1)} \tab Complete Spatial Randomness \cr
+    \code{ppm(X ~ x)} \tab Poisson process with \cr
+                             \tab intensity loglinear in \eqn{x} coordinate \cr
+    \code{ppm(X ~ 1, Strauss(0.1))} \tab Stationary Strauss process \cr
+    \code{ppm(X ~ x, Strauss(0.1))} \tab Strauss process with \cr
+                              \tab conditional intensity loglinear in \eqn{x}
+  }
+  It is also possible to fit models that depend on
+  other covariates.
+
+  \bold{Manipulating the fitted model:}
+
+  \tabular{ll}{
+    \code{\link{plot.ppm}} \tab 		Plot the fitted model\cr
+    \code{\link{predict.ppm}}
+    \tab   Compute the spatial trend and conditional intensity\cr
+    \tab   of the fitted point process model \cr
+    \code{\link{coef.ppm}} \tab Extract the fitted model coefficients\cr
+    \code{\link{parameters}} \tab Extract all model parameters\cr
+    \code{\link{formula.ppm}} \tab Extract the trend formula\cr
+    \code{\link{intensity.ppm}} \tab Compute fitted intensity \cr
+    \code{\link{Kmodel.ppm}} \tab \eqn{K} function of fitted model \cr
+    \code{\link{pcfmodel.ppm}} \tab pair correlation of fitted model \cr
+    \code{\link{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr
+    \code{\link{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr
+    \code{\link{update.ppm}} \tab Update the fit \cr
+    \code{\link{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr
+    \code{\link{rmh.ppm}} \tab Simulate from fitted model  \cr
+    \code{\link{simulate.ppm}} \tab Simulate from fitted model  \cr
+    \code{\link{print.ppm}} \tab Print basic information about a fitted model\cr
+    \code{\link{summary.ppm}} \tab Summarise a fitted model\cr
+    \code{\link{effectfun}} \tab Compute the fitted effect of one covariate\cr
+    \code{\link{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr
+    \code{\link{anova.ppm}} \tab Analysis of deviance \cr
+    \code{\link{model.frame.ppm}} \tab Extract data frame used to fit model  \cr
+    \code{\link{model.images}} \tab Extract spatial data used to fit model  \cr
+    \code{\link{model.depends}} \tab Identify variables in the model \cr
+    \code{\link{as.interact}} \tab Interpoint interaction component of model \cr
+    \code{\link{fitin}} \tab Extract fitted interpoint interaction \cr
+    \code{\link{is.hybrid}} \tab Determine whether the model is a hybrid \cr
+    \code{\link{valid.ppm}} \tab Check the model is a valid point process \cr
+    \code{\link{project.ppm}} \tab Ensure the model is a valid point process 
+  }
+  For model selection, you can also use 
+  the generic functions \code{\link{step}}, \code{\link{drop1}} 
+  and \code{\link{AIC}} on fitted point process models.
+  
+  See \code{\link{spatstat.options}} to control plotting of fitted model.
+  
+  \bold{To specify a point process model:}
+  
+  The first order ``trend'' of the model is determined by an \R 
+  language formula. The formula specifies the form of the
+  \emph{logarithm} of the trend.
+  
+  \tabular{ll}{
+    \code{X ~ 1}  \tab No trend (stationary) \cr
+    \code{X ~ x}   \tab Loglinear trend
+      \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) =	exp(alpha + beta * x)} \cr
+    \tab where \eqn{x,y} are Cartesian coordinates \cr
+    \code{X ~ polynom(x,y,3)}  \tab Log-cubic polynomial trend  \cr
+    \code{X ~ harmonic(x,y,2)}  \tab Log-harmonic polynomial trend \cr
+    \code{X ~ Z}   \tab Loglinear function of covariate \code{Z} \cr
+    \tab  \eqn{\lambda(x,y) = \exp(\alpha + \beta Z(x,y))}{lambda(x,y) =	exp(alpha + beta * Z(x,y))} 
+  }
+
+  The higher order (``interaction'') components are described by
+  an object of class \code{"interact"}. Such objects are created by:
+  \tabular{ll}{
+    \code{\link{Poisson}()} \tab		the Poisson point process\cr
+    \code{\link{AreaInter}()}	 \tab Area-interaction process\cr
+    \code{\link{BadGey}()} \tab	multiscale Geyer process\cr
+    \code{\link{Concom}()} \tab	connected component interaction\cr
+    \code{\link{DiggleGratton}() } \tab Diggle-Gratton potential \cr
+    \code{\link{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr
+    \code{\link{Fiksel}()}	 \tab Fiksel pairwise interaction process\cr
+    \code{\link{Geyer}()}	 \tab Geyer's saturation process\cr
+    \code{\link{Hardcore}()}	 \tab Hard core process\cr
+    \code{\link{HierHard}()}	 \tab Hierarchical multiype hard core process\cr
+    \code{\link{HierStrauss}()}	 \tab Hierarchical multiype Strauss process\cr
+    \code{\link{HierStraussHard}()}	 \tab
+    Hierarchical multiype Strauss-hard core process\cr
+    \code{\link{Hybrid}()}	 \tab Hybrid of several interactions\cr
+    \code{\link{LennardJones}() } \tab Lennard-Jones potential \cr
+    \code{\link{MultiHard}()} \tab 		multitype hard core process \cr
+    \code{\link{MultiStrauss}()} \tab 		multitype Strauss process \cr
+    \code{\link{MultiStraussHard}()} \tab 	multitype Strauss/hard core process \cr
+    \code{\link{OrdThresh}()} \tab		Ord process, threshold potential\cr
+    \code{\link{Ord}()} \tab 		        Ord model, user-supplied potential \cr
+    \code{\link{PairPiece}()} \tab		pairwise interaction, piecewise constant \cr
+    \code{\link{Pairwise}()} \tab	pairwise interaction, user-supplied potential\cr
+    \code{\link{Penttinen}()} \tab	Penttinen pairwise interaction\cr
+    \code{\link{SatPiece}()} \tab	Saturated pair model, piecewise  constant potential\cr
+    \code{\link{Saturated}()} \tab	Saturated pair model, user-supplied potential\cr
+    \code{\link{Softcore}()} \tab pairwise interaction, soft core potential\cr
+    \code{\link{Strauss}()} \tab  Strauss process \cr
+    \code{\link{StraussHard}()} \tab Strauss/hard core point process \cr
+    \code{\link{Triplets}()} \tab Geyer triplets process
+  }
+  Note that it is also possible to combine several such interactions
+  using \code{\link{Hybrid}}.
+  
+  \bold{Finer control over model fitting:}
+  
+  A quadrature scheme is represented by an object of
+  class \code{"quad"}. To create a quadrature scheme, typically
+  use \code{\link{quadscheme}}.
+  
+  \tabular{ll}{
+    \code{\link{quadscheme}} \tab default quadrature scheme \cr
+                             \tab using rectangular cells or Dirichlet cells\cr
+    \code{\link{pixelquad}}  \tab quadrature scheme based on image pixels \cr
+    \code{\link{quad}}       \tab create an object of class \code{"quad"}
+  }
+  
+  To inspect a quadrature scheme:
+  \tabular{ll}{
+    \code{plot(Q)} \tab plot quadrature scheme \code{Q}\cr
+    \code{print(Q)} \tab print basic information about quadrature scheme \code{Q}\cr
+    \code{\link{summary}(Q)} \tab summary of quadrature scheme \code{Q}
+  }
+
+  A quadrature scheme consists of data points, dummy points, and
+  weights. To generate dummy points:
+  \tabular{ll}{
+    \code{\link{default.dummy}} \tab default pattern of dummy points \cr
+    \code{\link{gridcentres}} \tab dummy points in a rectangular grid \cr
+    \code{\link{rstrat}} \tab stratified random dummy pattern \cr
+    \code{\link{spokes}} \tab radial pattern of dummy points  \cr
+    \code{\link{corners}} \tab dummy points at corners of the window
+  }
+  
+  To compute weights:
+  \tabular{ll}{
+    \code{\link{gridweights}} \tab quadrature weights by the grid-counting rule  \cr
+    \code{\link{dirichletWeights}} \tab quadrature weights are
+    Dirichlet tile areas
+  }
+
+  \bold{Simulation and goodness-of-fit for fitted models:}
+  
+  \tabular{ll}{
+    \code{\link{rmh.ppm}} \tab simulate realisations of a fitted model \cr
+    \code{\link{simulate.ppm}} \tab simulate realisations of a fitted model \cr
+    \code{\link{envelope}} \tab compute simulation envelopes for a
+    fitted model 
+  }
+
+  \bold{Point process models on a linear network:}
+
+  An object of class \code{"lpp"} represents a pattern of points on
+  a linear network. Point process models can also be fitted to these
+  objects. Currently only Poisson models can be fitted.
+
+  \tabular{ll}{
+    \code{\link{lppm}} \tab point process model on linear network \cr
+    \code{\link{anova.lppm}} \tab analysis of deviance for \cr
+    \tab point process model on linear network \cr
+    \code{\link{envelope.lppm}} \tab simulation envelopes for \cr
+    \tab point process model on linear network \cr
+    \code{\link{fitted.lppm}} \tab fitted intensity values \cr
+    \code{\link{predict.lppm}} \tab model prediction on linear network \cr
+    \code{\link{linim}} \tab pixel image on linear network \cr
+    \code{\link{plot.linim}} \tab plot a pixel image on linear network \cr
+    \code{\link{eval.linim}} \tab evaluate expression involving images \cr
+    \code{\link{linfun}} \tab function defined on linear network \cr
+    \code{\link{methods.linfun}} \tab conversion facilities
+  }
+}
+
+
+\section{V. MODEL FITTING (DETERMINANTAL POINT PROCESS MODELS)}{
+
+  Code for fitting \emph{determinantal point process models} has 
+  recently been added to \pkg{spatstat}.
+
+  For information, see the help file for \code{\link{dppm}}.
+}
+
+\section{VI. MODEL FITTING (SPATIAL LOGISTIC REGRESSION)}{
+  
+  \bold{Logistic regression}
+  
+  Pixel-based spatial logistic regression is an alternative
+  technique for analysing spatial point patterns
+  that is widely used in Geographical Information Systems.
+  It is approximately equivalent to fitting a Poisson point process
+  model.
+  
+  In pixel-based logistic regression, the spatial domain is
+  divided into small pixels, the presence or absence of a
+  data point in each pixel is recorded, and logistic regression
+  is used to model the presence/absence indicators as a function
+  of any covariates.
+  
+  Facilities for performing spatial logistic regression are
+  provided in \pkg{spatstat} for comparison purposes.
+  
+  \bold{Fitting a spatial logistic regression}
+  
+  Spatial logistic regression is performed by the function
+  \code{\link{slrm}}. Its result is an object of class \code{"slrm"}.
+  There are many methods for this class, including methods for
+  \code{print}, \code{fitted}, \code{predict}, \code{simulate},
+  \code{anova}, \code{coef}, \code{logLik}, \code{terms},
+  \code{update}, \code{formula} and \code{vcov}. 
+  
+  For example, if \code{X} is a point pattern (class
+  \code{"ppp"}):
+  
+  \tabular{ll}{
+    \emph{command} \tab \emph{model} \cr
+    \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr
+    \code{slrm(X ~ x)} \tab Poisson process with \cr
+                \tab intensity loglinear in \eqn{x} coordinate \cr
+    \code{slrm(X ~ Z)} \tab Poisson process with \cr
+                \tab intensity loglinear in covariate \code{Z}
+  }
+
+  \bold{Manipulating a fitted spatial logistic regression}
+  
+  \tabular{ll}{
+    \code{\link{anova.slrm}} \tab Analysis of deviance \cr
+    \code{\link{coef.slrm}}  \tab Extract fitted coefficients \cr
+    \code{\link{vcov.slrm}}  \tab Variance-covariance matrix of fitted coefficients \cr
+    \code{\link{fitted.slrm}} \tab Compute fitted probabilities or
+    intensity \cr
+    \code{\link{logLik.slrm}}   \tab Evaluate loglikelihood of fitted
+    model \cr
+    \code{\link{plot.slrm}}    \tab Plot fitted probabilities or
+    intensity \cr
+    \code{\link{predict.slrm}} \tab Compute predicted probabilities or
+    intensity with new data \cr
+    \code{\link{simulate.slrm}} \tab Simulate model
+  }
+  
+  There are many other undocumented methods for this class,
+  including methods for \code{print}, \code{update}, \code{formula}
+  and \code{terms}. Stepwise model selection is
+  possible using \code{step} or \code{stepAIC}.
+}
+
+
+\section{VII. SIMULATION}{
+
+  There are many ways to generate a random point pattern,
+  line segment pattern, pixel image or tessellation
+  in \pkg{spatstat}. 
+
+  \bold{Random point patterns:}
+
+  \tabular{ll}{
+    \code{\link{runifpoint}} \tab
+    generate \eqn{n} independent uniform random points \cr
+    \code{\link{rpoint}} \tab
+    generate \eqn{n} independent random points \cr
+    \code{\link{rmpoint}} \tab
+    generate \eqn{n} independent multitype random points \cr
+    \code{\link{rpoispp}} \tab
+    simulate the (in)homogeneous Poisson point process \cr
+    \code{\link{rmpoispp}} \tab
+    simulate the (in)homogeneous multitype Poisson point process \cr
+    \code{\link{runifdisc}} \tab
+    generate \eqn{n} independent uniform random points in disc\cr
+    \code{\link{rstrat}} \tab
+    stratified random sample of points \cr
+    \code{\link{rsyst}} \tab
+    systematic random sample (grid) of points \cr
+    \code{\link{rMaternI}}  \tab
+    simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr
+    \code{\link{rMaternII}} \tab
+    simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr
+    \code{\link{rSSI}} \tab
+    simulate Simple Sequential Inhibition process\cr
+    \code{\link{rHardcore}} \tab
+    simulate hard core process (perfect simulation)\cr
+    \code{\link{rStrauss}} \tab
+    simulate Strauss process (perfect simulation)\cr
+    \code{\link{rStraussHard}} \tab
+    simulate Strauss-hard core process (perfect simulation)\cr
+    \code{\link{rDiggleGratton}} \tab
+    simulate Diggle-Gratton process (perfect simulation)\cr
+    \code{\link{rDGS}} \tab
+    simulate Diggle-Gates-Stibbard process (perfect simulation)\cr
+    \code{\link{rPenttinen}} \tab
+    simulate Penttinen process (perfect simulation)\cr
+    \code{\link{rNeymanScott}} \tab
+    simulate a general Neyman-Scott process\cr
+    \code{\link{rMatClust}} \tab
+    simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr
+    \code{\link{rThomas}} \tab
+    simulate the Thomas process  \cr
+    \code{\link{rLGCP}} \tab
+    simulate the log-Gaussian Cox process  \cr
+    \code{\link{rGaussPoisson}}  \tab
+    simulate the Gauss-Poisson cluster process\cr
+    \code{\link{rCauchy}} \tab
+    simulate Neyman-Scott process with Cauchy clusters \cr
+    \code{\link{rVarGamma}} \tab
+    simulate Neyman-Scott process with Variance Gamma clusters \cr
+    \code{\link{rcell}} \tab
+    simulate the Baddeley-Silverman cell process  \cr
+    \code{\link{runifpointOnLines}} \tab
+    generate \eqn{n} random points along specified line segments \cr
+    \code{\link{rpoisppOnLines}} \tab
+    generate Poisson random points along specified line segments 
+  }
+      
+  \bold{Resampling a point pattern:}
+
+  \tabular{ll}{
+    \code{\link{quadratresample}} \tab block resampling \cr
+    \code{\link{rjitter}} \tab
+    apply random displacements to points in a pattern\cr
+    \code{\link{rshift}} \tab random shifting of (subsets of) points\cr
+    \code{\link{rthin}} \tab  random thinning 
+  }
+  
+  See also \code{\link{varblock}} for estimating the variance
+  of a summary statistic by block resampling, and
+  \code{\link{lohboot}} for another bootstrap technique.
+  
+  \bold{Fitted point process models:}
+
+   If you have fitted a point process model to a point pattern dataset,
+   the fitted model can be simulated.
+
+   Cluster process models 
+   are fitted by the function \code{\link{kppm}} yielding an
+   object of class \code{"kppm"}. To generate one or more simulated
+   realisations of this fitted model, use 
+   \code{\link{simulate.kppm}}.
+
+   Gibbs point process models 
+   are fitted by the function \code{\link{ppm}} yielding an
+   object of class \code{"ppm"}. To generate a simulated
+   realisation of this fitted model, use \code{\link{rmh}}.
+   To generate one or more simulated realisations of the fitted model,
+   use \code{\link{simulate.ppm}}.
+
+   \bold{Other random patterns:}
+
+   \tabular{ll}{
+     \code{\link{rlinegrid}} \tab
+     generate a random array of parallel lines through a window \cr
+     \code{\link{rpoisline}} \tab
+     simulate the Poisson line process within a window \cr
+     \code{\link{rpoislinetess}} \tab
+     generate random tessellation using Poisson line process \cr
+     \code{\link{rMosaicSet}} \tab
+     generate random set by selecting some tiles of a tessellation \cr
+     \code{\link{rMosaicField}} \tab
+     generate random pixel image by assigning random values
+     in each tile of a tessellation
+   }
+
+   \bold{Simulation-based inference}
+
+   \tabular{ll}{
+    \code{\link{envelope}} \tab critical envelope for Monte Carlo
+    test of goodness-of-fit \cr
+    \code{\link{qqplot.ppm}} \tab diagnostic plot for interpoint
+    interaction \cr
+    \code{\link{scan.test}} \tab spatial scan statistic/test \cr
+    \code{\link{studpermu.test}} \tab studentised permutation test\cr
+    \code{\link{segregation.test}} \tab test of segregation of types 
+  }
+}
+
+
+\section{VIII. TESTS AND DIAGNOSTICS}{
+
+  \bold{Hypothesis tests:}
+
+  \tabular{ll}{
+    \code{\link{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit
+    test on quadrat counts \cr
+    \code{\link{clarkevans.test}} \tab Clark and Evans test \cr
+    \code{\link{cdf.test}} \tab Spatial distribution goodness-of-fit test\cr
+    \code{\link{berman.test}} \tab Berman's goodness-of-fit tests\cr
+    \code{\link{envelope}} \tab critical envelope for Monte Carlo
+    test of goodness-of-fit \cr
+    \code{\link{scan.test}} \tab spatial scan statistic/test \cr
+    \code{\link{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr
+    \code{\link{mad.test}} \tab Mean Absolute Deviation test \cr
+    \code{\link{anova.ppm}} \tab Analysis of Deviance for
+    point process models 
+  }
+
+  More recently-developed tests:
+  
+  \tabular{ll}{
+    \code{\link{dg.test}} \tab Dao-Genton test \cr
+    \code{\link{bits.test}} \tab Balanced independent two-stage test \cr
+    \code{\link{dclf.progress}} \tab Progress plot for DCLF test \cr
+    \code{\link{mad.progress}} \tab Progress plot for MAD test
+  }
+
+\bold{Sensitivity diagnostics:}
+
+  Classical measures of model sensitivity such as leverage and influence
+  have been adapted to point process models.
+  
+    \tabular{ll}{
+      \code{\link{leverage.ppm}} \tab Leverage for point process model\cr
+      \code{\link{influence.ppm}} \tab Influence for point process model\cr
+      \code{\link{dfbetas.ppm}} \tab Parameter influence\cr
+    }
+  
+  \bold{Diagnostics for covariate effect:}
+
+  Classical diagnostics for covariate effects have been adapted to
+  point process models.
+
+  \tabular{ll}{
+    \code{\link{parres}} \tab Partial residual plot\cr
+    \code{\link{addvar}} \tab Added variable plot \cr
+    \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr
+    \code{\link{rho2hat}} \tab Kernel estimate of covariate effect
+    (bivariate) 
+  }
+  
+  \bold{Residual diagnostics:}
+  
+  Residuals for a fitted point process model, and diagnostic plots
+  based on the residuals, were introduced in Baddeley et al (2005) and
+  Baddeley, Rubak and \ifelse{latex}{\out{M\o ller}}{Moller} (2011).
+  
+  Type \code{demo(diagnose)}
+  for a demonstration of the diagnostics features.
+
+  \tabular{ll}{
+    \code{\link{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr
+    \code{\link{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr
+    \code{\link{residualspaper}} \tab examples from Baddeley et al (2005) \cr
+    \code{\link{Kcom}} \tab model compensator of \eqn{K} function \cr
+    \code{\link{Gcom}} \tab model compensator of \eqn{G} function \cr
+    \code{\link{Kres}} \tab score residual of \eqn{K} function \cr
+    \code{\link{Gres}} \tab score residual of \eqn{G} function \cr
+    \code{\link{psst}} \tab pseudoscore residual of summary function \cr
+    \code{\link{psstA}} \tab pseudoscore residual of empty space function \cr
+    \code{\link{psstG}} \tab pseudoscore residual of \eqn{G} function \cr
+    \code{\link{compareFit}} \tab compare compensators of several fitted
+    models
+  }
+
+
+  \bold{Resampling and randomisation procedures}
+
+  You can build your own tests based on randomisation
+  and resampling using the following capabilities:
+  
+  \tabular{ll}{
+    \code{\link{quadratresample}} \tab block resampling \cr
+    \code{\link{rjitter}} \tab
+    apply random displacements to points in a pattern\cr
+    \code{\link{rshift}} \tab random shifting of (subsets of) points\cr
+    \code{\link{rthin}} \tab  random thinning  
+  }
+}
+
+
+\section{IX. DOCUMENTATION}{
+  The online manual entries are quite detailed and should be consulted
+  first for information about a particular function.
+  
+  The book
+  Baddeley, Rubak and Turner (2015) is a complete course
+  on analysing spatial point patterns, with full details about
+  \pkg{spatstat}.
+
+  Older material (which is now out-of-date but is freely available)
+  includes Baddeley and Turner (2005a), a brief overview of
+  the package in its early development;
+  Baddeley and Turner (2005b), a more detailed explanation of
+  how to fit point process models to data; and 
+  Baddeley (2010), a complete set of notes from a 2-day workshop
+  on the use of \pkg{spatstat}. 
+
+  Type \code{citation("spatstat")} to get a list of these references.
+}
+\references{
+  Baddeley, A. (2010)
+  \emph{Analysing spatial point patterns in R}.
+  Workshop notes, Version 4.1.
+  Online technical publication, CSIRO.
+  \url{https://research.csiro.au/software/wp-content/uploads/sites/6/2015/02/Rspatialcourse_CMIS_PDF-Standard.pdf}
+
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  Chapman and Hall/CRC Press.
+  
+  Baddeley, A. and Turner, R. (2005a)
+  Spatstat: an R package for analyzing spatial point patterns.
+  \emph{Journal of Statistical Software} \bold{12}:6, 1--42.
+  URL: \code{www.jstatsoft.org}, ISSN: 1548-7660.
+
+  Baddeley, A. and Turner, R. (2005b)
+  Modelling spatial point patterns in R.
+  In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan,
+  editors, \emph{Case Studies in Spatial Point Pattern Modelling},
+  Lecture Notes in Statistics number 185. Pages 23--74.
+  Springer-Verlag, New York, 2006. 
+  ISBN: 0-387-28311-0.
+
+  Baddeley, A., Turner, R.,
+  \ifelse{latex}{\out{M\o ller}}{Moller}, J. and Hazelton, M. (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \bold{67}, 617--666.
+
+  Baddeley, A., Rubak, E. and \ifelse{latex}{\out{M\o ller}}{Moller}, J. (2011)
+  Score, pseudo-score and residual
+  diagnostics for spatial point process models.
+  \emph{Statistical Science} \bold{26}, 613--646.
+
+  Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013)
+  Hybrids of Gibbs point process models and their implementation.
+  \emph{Journal of Statistical Software} \bold{55}:11, 1--43.
+  \url{http://www.jstatsoft.org/v55/i11/}
+
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+
+  Diggle, P.J. (2014)
+  \emph{Statistical Analysis of Spatial and Spatio-Temporal Point Patterns},
+  Third edition. {Chapman and Hall/CRC}.
+
+  Gelfand, A.E., Diggle, P.J., Fuentes, M. and Guttorp, P., editors (2010)
+  \emph{Handbook of Spatial Statistics}.
+  CRC Press.
+
+  Huang, F. and Ogata, Y. (1999)
+  Improvements of the maximum pseudo-likelihood
+  estimators in various spatial statistical models.
+  \emph{Journal of Computational and Graphical Statistics}
+  \bold{8}, 510--530.
+
+  Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008)
+  \emph{Statistical Analysis and Modelling of Spatial Point Patterns.}
+  Wiley.
+  
+  Waagepetersen, R.
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63} (2007) 252--258.
+}
+\section{Licence}{
+  This library and its documentation are usable under the terms of the "GNU 
+  General Public License", a copy of which is distributed with the package.
+}
+\author{
+  \spatstatAuthors.
+}
+\section{Acknowledgements}{
+  Kasper Klitgaard Berthelsen,
+  Ottmar Cronie,
+  Yongtao Guan,
+  Ute Hahn,
+  Abdollah Jalilian,
+  Marie-Colette van Lieshout,
+  Greg McSwiggan,
+  Tuomas Rajala,
+  Suman Rakshit,
+  Dominic Schuhmacher,
+  Rasmus Waagepetersen
+  and
+  Hangsheng Wang
+  made substantial contributions of code.
+
+  Additional contributions and suggestions from
+  Monsuru Adepeju,
+  Corey Anderson,
+  Ang Qi Wei,
+  Marcel Austenfeld,
+  Sandro Azaele,
+  Malissa Baddeley,
+  Guy Bayegnak,
+  Colin Beale,
+  Melanie Bell,
+  Thomas Bendtsen,
+  Ricardo Bernhardt,
+  Andrew Bevan,
+  Brad Biggerstaff,
+  Anders Bilgrau,
+  Leanne Bischof,
+  Christophe Biscio,
+  Roger Bivand,
+  Jose M. Blanco Moreno,
+  Florent Bonneu,
+  Julian Burgos,
+  Simon Byers,
+  Ya-Mei Chang,
+  Jianbao Chen,
+  Igor Chernayavsky,
+  Y.C. Chin,
+  Bjarke Christensen,
+  Jean-Francois Coeurjolly,
+  Kim Colyvas,
+  Robin Corria Ainslie,
+  Richard Cotton,
+  Marcelino de la Cruz,
+  Peter Dalgaard,
+  Mario D'Antuono,
+  Sourav Das,
+  Tilman Davies,
+  Peter Diggle,
+  Patrick Donnelly,
+  Ian Dryden,
+  Stephen Eglen,
+  Ahmed El-Gabbas,
+  Belarmain Fandohan,
+  Olivier Flores,
+  David Ford,
+  Peter Forbes,
+  Shane Frank,
+  Janet Franklin,
+  Funwi-Gabga Neba,
+  Oscar Garcia,
+  Agnes Gault,
+  Jonas Geldmann,
+  Marc Genton,
+  Shaaban Ghalandarayeshi,
+  Julian Gilbey,
+  Jason Goldstick,
+  Pavel Grabarnik,
+  C. Graf,
+  Ute Hahn,
+  Andrew Hardegen,
+  Martin \ifelse{latex}{\out{B{\o}gsted}}{Bogsted} Hansen,
+  Martin Hazelton,
+  Juha Heikkinen,
+  Mandy Hering,
+  Markus Herrmann,
+  Paul Hewson,
+  Kassel Hingee,
+  Kurt Hornik,
+  Philipp Hunziker,
+  Jack Hywood,
+  Ross Ihaka,
+  \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos},
+  Aruna Jammalamadaka,
+  Robert John-Chandran,
+  Devin Johnson,
+  Mahdieh Khanmohammadi,
+  Bob Klaver,
+  Peter Kovesi,
+  Mike Kuhn,
+  Jeff Laake,
+  Frederic Lavancier,
+  Tom Lawrence,
+  Robert Lamb,
+  Jonathan Lee,
+  George Leser,
+  Li Haitao,
+  George Limitsios,
+  Andrew Lister,
+  Ben Madin,
+  Martin Maechler,
+  Kiran Marchikanti,
+  Jeff Marcus,
+  Robert Mark,
+  Peter McCullagh,
+  Monia Mahling,
+  Jorge Mateu Mahiques,
+  Ulf Mehlig,
+  Frederico Mestre,
+  Sebastian Wastl Meyer,
+  Mi Xiangcheng,
+  Lore De Middeleer,
+  Robin Milne,
+  Enrique Miranda,
+  Jesper \ifelse{latex}{\out{M\o ller}}{Moller},
+  Mehdi Moradi,
+  Virginia Morera Pujol,
+  Erika Mudrak,
+  Gopalan Nair,
+  Nader Najari,
+  Nicoletta Nava,
+  Linda Stougaard Nielsen,
+  Felipe Nunes,
+  Jens Randel Nyengaard,
+  Jens \ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel},
+  Thierry Onkelinx,
+  Sean O'Riordan,
+  Evgeni Parilov,
+  Jeff Picka,
+  Nicolas Picard,
+  Mike Porter,
+  Sergiy Protsiv,
+  Adrian Raftery,
+  Suman Rakshit,
+  Ben Ramage,
+  Pablo Ramon,
+  Xavier Raynaud,
+  Nicholas Read,
+  Matt Reiter,
+  Ian Renner,
+  Tom Richardson,
+  Brian Ripley,
+  Ted Rosenbaum,
+  Barry Rowlingson,
+  Jason Rudokas,
+  John Rudge,
+  Christopher Ryan,
+  Farzaneh Safavimanesh,
+  Aila \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka},
+  Cody Schank,
+  Katja Schladitz,
+  Sebastian Schutte,
+  Bryan Scott,
+  Olivia Semboli,
+  \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe},
+  Vadim Shcherbakov,
+  Shen Guochun,
+  Shi Peijian,
+  Harold-Jeffrey Ship,
+  Tammy L Silva,
+  Ida-Maria Sintorn,
+  Yong Song, 
+  Malte Spiess,
+  Mark Stevenson,
+  Kaspar Stucki,
+  Michael Sumner,
+  P. Surovy,
+  Ben Taylor,
+  Thordis Linda Thorarinsdottir,
+  Berwin Turlach,
+  Torben Tvedebrink,
+  Kevin Ummer,
+  Medha Uppala,
+  Andrew van Burgel,
+  Tobias Verbeke,
+  Mikko Vihtakari,
+  Alexendre Villers,
+  Fabrice Vinatier,
+  Sasha Voss,
+  Sven Wagner,
+  Hao Wang,
+  H. Wendrock,
+  Jan Wild,
+  Carl G. Witthoft,
+  Selene Wong,
+  Maxime Woringer,
+  Mike Zamboni
+  and
+  Achim Zeileis.
+}
+\keyword{spatial}
+\keyword{package}
+
diff --git a/man/spatstat.options.Rd b/man/spatstat.options.Rd
new file mode 100644
index 0000000..ea9e0fd
--- /dev/null
+++ b/man/spatstat.options.Rd
@@ -0,0 +1,412 @@
+\name{spatstat.options}
+\alias{spatstat.options}
+\alias{reset.spatstat.options}
+\title{Internal Options in Spatstat Package}
+\description{
+  Allows the user to examine and reset the values
+  of global parameters which control actions in the
+  \pkg{spatstat} package.
+}
+\usage{
+  spatstat.options(...)
+  reset.spatstat.options()
+}
+\arguments{
+  \item{\dots}{
+    Either empty,
+    or a succession of parameter names in quotes,
+    or a succession of \code{name=value} pairs.
+    See below for the parameter names.
+  }
+}
+\value{
+  Either a list of parameters and their values,
+  or a single value. See Details.
+}
+\details{
+  The function \code{spatstat.options}
+  allows the user to examine and reset the values
+  of global parameters which control actions in the
+  \pkg{spatstat} package.
+  It is analogous to the system function \code{\link[base]{options}}.
+
+  The function \code{reset.spatstat.options} resets all the
+  global parameters in \pkg{spatstat} to their original,
+  default values.
+
+  The global parameters of interest to the user are:
+  \describe{
+    \item{checkpolygons}{
+      Logical flag indicating whether the functions
+      \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should
+      apply very strict checks on the validity of polygon data. 
+      These strict checks are no longer necessary, and the default is
+      \code{checkpolygons=FALSE}.
+      See also \code{fixpolygons} below.
+    }
+    \item{checksegments}{
+      Logical flag indicating whether the functions
+      \code{\link[spatstat]{psp}} and \code{\link[spatstat]{as.psp}} should check
+      the validity of line segment data (in particular, checking that
+      the endpoints of the line segments are inside the specified
+      window). It is advisable to leave this flag set to \code{TRUE}. 
+    }
+    \item{eroded.intensity}{
+      Logical flag affecting the behaviour of the
+      score and pseudo-score residual functions
+      \code{\link[spatstat]{Gcom}}, \code{\link[spatstat]{Gres}}
+      \code{\link[spatstat]{Kcom}}, \code{\link[spatstat]{Kres}}, 
+      \code{\link[spatstat]{psstA}}, 
+      \code{\link[spatstat]{psstG}}, 
+      \code{\link[spatstat]{psst}}.
+      The flag indicates whether to compute intensity estimates
+      on an eroded window (\code{eroded.intensity=TRUE})
+      or on the original data window
+      (\code{eroded.intensity=FALSE}, the default). 
+    }
+    \item{expand}{
+      The default expansion factor (area inflation factor)
+      for expansion of the simulation window in
+      \code{\link[spatstat]{rmh}} (see \code{\link[spatstat]{rmhcontrol}}).
+      Initialised to \code{2}.
+    }
+    \item{expand.polynom}{
+      Logical. Whether expressions involving \code{\link[spatstat]{polynom}}
+      in a model formula should be expanded, so that
+      \code{polynom(x,2)} is replaced by \code{x + I(x^2)} and so on.
+      Initialised to \code{TRUE}.
+    }
+    \item{fastpois}{
+      Logical. Whether to use a fast algorithm
+      (introduced in \pkg{spatstat 1.42-3}) for
+      simulating the Poisson point process in \code{\link[spatstat]{rpoispp}}
+      when the argument \code{lambda} is a pixel image.
+      Initialised to \code{TRUE}.
+      Should be set to \code{FALSE} if needed to guarantee repeatability
+      of results computed using earlier versions of \pkg{spatstat}.
+    }
+    \item{fastthin}{
+      Logical. Whether to use a fast C language algorithm
+      (introduced in \pkg{spatstat 1.42-3}) for
+      random thinning in \code{\link[spatstat]{rthin}} when the argument
+      \code{P} is a single number.
+      Initialised to \code{TRUE}.
+      Should be set to \code{FALSE} if needed to guarantee repeatability
+      of results computed using earlier versions of \pkg{spatstat}.
+    }
+    \item{fastK.lgcp}{
+      Logical. Whether to use fast or slow algorithm to compute the
+      (theoretical) \eqn{K}-function of a log-Gaussian Cox process
+      for use in \code{\link[spatstat]{lgcp.estK}} or \code{\link[spatstat]{Kmodel}}.
+      The slow algorithm uses accurate numerical integration; the
+      fast algorithm uses Simpson's Rule for numerical integration,
+      and is about two orders of magnitude faster.
+      Initialised to \code{FALSE}. 
+    }
+    \item{fixpolygons}{
+      Logical flag indicating whether the functions
+      \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should
+      repair errors in polygon data. For example,
+      self-intersecting polygons and overlapping polygons will
+      be repaired. The default is \code{fixpolygons=TRUE}.
+    }
+    \item{fftw}{
+      Logical value indicating whether the two-dimensional
+      Fast Fourier Transform
+      should be computed using the package \pkg{fftwtools},
+      instead of the \code{fft} function in the \pkg{stats} package.
+      This affects the speed of \code{\link[spatstat]{density.ppp}},
+      \code{\link[spatstat]{density.psp}}, \code{\link[spatstat]{blur}}
+      \code{\link[spatstat]{setcov}} and \code{\link[spatstat]{Smooth.ppp}}.
+    }
+    \item{gpclib}{
+      Defunct. This parameter was used to permit or forbid the use of the
+      package \pkg{gpclib}, because of its restricted software licence.
+      This package is no longer needed.
+    }
+    \item{huge.npoints}{
+      The maximum value of \code{n} for which \code{runif(n)} will
+      not generate an error (possible errors include failure to allocate
+      sufficient memory, and integer overflow of \code{n}).
+      An attempt to generate more than this number of random points
+      triggers a warning from \code{\link[spatstat]{runifpoint}} and other
+      functions. Defaults to \code{1e6}.
+    }
+    \item{image.colfun}{
+      Function determining the default colour map for
+      \code{\link[spatstat]{plot.im}}. When called with one integer argument
+      \code{n}, this function should return a character vector of length
+      \code{n} specifying \code{n} different colours.
+    }
+    \item{Kcom.remove.zeroes}{
+      Logical value, determining whether the algorithm in
+      \code{\link[spatstat]{Kcom}} and \code{\link[spatstat]{Kres}}
+      removes or retains the contributions to the function
+      from pairs of points that are identical. If these are
+      retained then the function has a jump at \eqn{r=0}.
+      Initialised to \code{TRUE}.
+    }
+    \item{maxedgewt}{
+      Edge correction weights will be trimmed
+      so as not to exceed this value.
+      This applies to the weights computed by
+      \code{\link[spatstat]{edge.Trans}} or \code{\link[spatstat]{edge.Ripley}}
+      and used in \code{\link[spatstat]{Kest}} and its relatives.
+    }
+    \item{maxmatrix}{
+      The maximum permitted size (rows times columns)
+      of matrices generated by \pkg{spatstat}'s internal code.
+      Used by \code{\link[spatstat]{ppm}} and \code{\link[spatstat]{predict.ppm}}
+      (for example) to decide when to
+      split a large calculation into blocks.
+      Defaults to \code{2^24=16777216}. 
+    }
+    \item{monochrome}{
+      Logical flag indicating whether graphics should be
+      plotted in grey scale (\code{monochrome=TRUE}) or
+      in colour (\code{monochrome=FALSE}, the default).
+    }
+    \item{n.bandwidth}{
+      Integer. Number of trial values of smoothing bandwidth to use for
+      cross-validation in \code{\link[spatstat]{bw.relrisk}} and similar
+      functions.
+    }
+    \item{ndummy.min}{
+      The minimum number of dummy points in a quadrature scheme
+      created by \code{\link[spatstat]{default.dummy}}.
+      Either an integer or a pair of integers
+      giving the minimum number of dummy points in the \code{x} and \code{y}
+      directions respectively. 
+    }
+    \item{ngrid.disc}{
+      Number of points in the square grid used to compute
+      a discrete approximation to the areas of discs
+      in \code{\link[spatstat]{areaLoss}} and \code{\link[spatstat]{areaGain}}
+      when exact calculation is not available.
+      A single integer.
+    }
+    \item{npixel}{
+      Default number of pixels in a binary mask or pixel image.
+      Either an integer, or a pair of integers,
+      giving the number of pixels in the \code{x} and \code{y}
+      directions respectively.
+    }
+    \item{nvoxel}{
+      Default number of voxels in a 3D image,
+      typically for calculating the distance transform in \code{\link[spatstat]{F3est}}.
+      Initialised to 4 megavoxels: \code{nvoxel = 2^22 = 4194304}.
+    }
+    \item{par.binary}{
+      List of arguments to be passed to the function
+      \code{\link[graphics]{image}}
+      when displaying a binary image mask (in \code{\link[spatstat]{plot.owin}}
+      or \code{\link[spatstat]{plot.ppp}}).
+      Typically used to reset the colours of foreground and background.
+    }
+    \item{par.contour}{
+      List of arguments controlling contour plots of pixel images
+      by \code{\link[spatstat]{contour.im}}.
+    }
+    \item{par.fv}{
+      List of arguments controlling the plotting of functions 
+      by \code{\link[spatstat]{plot.fv}} and its relatives.
+    }
+    \item{par.persp}{
+      List of arguments to be passed to the function
+      \code{\link[graphics]{persp}}
+      when displaying a real-valued image, such as the fitted surfaces
+      in \code{\link[spatstat]{plot.ppm}}.
+    }
+    \item{par.points}{
+      List of arguments controlling the plotting of point patterns
+      by \code{\link[spatstat]{plot.ppp}}.
+    }
+    \item{par.pp3}{
+      List of arguments controlling the plotting of three-dimensional
+      point patterns by \code{\link[spatstat]{plot.pp3}}.
+    }
+    \item{print.ppm.SE}{
+      Default rule used by \code{\link[spatstat]{print.ppm}}
+      to decide whether to calculate and print standard errors
+      of the estimated coefficients of the model.
+      One of the strings \code{"always"}, \code{"never"} or
+      \code{"poisson"} (the latter indicating that standard errors
+      will be calculated only for Poisson models). The default is
+      \code{"poisson"} because the calculation for non-Poisson models
+      can take a long time.
+    }
+    \item{progress}{
+      Character string determining the style of progress reports
+      printed by \code{\link[spatstat]{progressreport}}. Either
+      \code{"tty"}, \code{"tk"} or \code{"txtbar"}.
+      For explanation of these options, see \code{\link[spatstat]{progressreport}}.
+    }
+    \item{project.fast}{
+      Logical. If \code{TRUE}, the algorithm of
+      \code{\link[spatstat]{project.ppm}}
+      will be accelerated using a shorcut.
+      Initialised to \code{FALSE}.
+    }
+    \item{psstA.ngrid}{
+      Single integer, 
+      controlling the accuracy of the discrete approximation
+      of areas computed in the function \code{\link[spatstat]{psstA}}.
+      The area of a disc is approximated by counting points on
+      an \eqn{n \times n}{n * n} grid.
+      Initialised to 32.
+    }
+    \item{psstA.nr}{
+      Single integer,
+      determining the number of distances \eqn{r}
+      at which the function \code{\link[spatstat]{psstA}} will be evaluated
+      (in the default case where argument \code{r} is absent).
+      Initialised to 30.
+    }
+    \item{psstG.remove.zeroes}{
+      Logical value, determining whether the algorithm in
+      \code{\link[spatstat]{psstG}} 
+      removes or retains the contributions to the function
+      from pairs of points that are identical. If these are
+      retained then the function has a jump at \eqn{r=0}.
+      Initialised to \code{TRUE}.
+    }
+    \item{rmh.p, rmh.q, rmh.nrep}{
+      New default values for the parameters \code{p}, \code{q}
+      and \code{nrep} in the Metropolis-Hastings simulation
+      algorithm. These override the defaults
+      in \code{\link[spatstat]{rmhcontrol.default}}.
+    }
+    \item{scalable}{
+      Logical flag indicating whether the new code in \code{rmh.default}
+      which makes the results scalable (invariant to change of units)
+      should be used.  In order to recover former behaviour (so that
+      previous results can be reproduced) set this option equal to
+      \code{FALSE}.  See the \dQuote{Warning} section in the help for
+      \code{\link[spatstat]{rmh}()} for more detail.
+    }
+    \item{terse}{
+      Integer between 0 and 4.
+      The level of terseness (brevity) in printed output
+      from many functions in \pkg{spatstat}.
+      Higher values mean shorter output.
+      A rough guide is the following:
+      \tabular{ll}{
+	0 \tab Full output\cr
+        1 \tab Avoid wasteful output \cr
+        2 \tab Remove space between paragraphs\cr
+        3 \tab Suppress extras such as standard errors \cr
+        4 \tab Compress text, suppress internal warnings
+      }
+      The value of \code{terse} is initialised to 0.
+    }
+    \item{transparent}{
+      Logical value indicating whether default colour maps
+      are allowed to include semi-transparent colours, where possible.
+      Default is \code{TRUE}.
+      Currently this only affects \code{\link[spatstat]{plot.ppp}}. 
+    }
+    \item{units.paren}{
+      The kind of parenthesis which encloses the text that
+      explains a \code{unitname}. This text is seen in
+      the text output of functions like \code{\link[spatstat]{print.ppp}} and
+      in the graphics generated by \code{\link[spatstat]{plot.fv}}.
+      The value should be one of the character strings \code{'('}, \code{'['},
+      \code{'{'} or \code{''}. The default is \code{'('}.
+    }
+  }
+  If no arguments are given, 
+  the current values of all parameters are returned,
+  in a list.
+
+  If one parameter name is given, the current value of this
+  parameter is returned (\bold{not} in a list, just the value).
+  
+  If several parameter names are given,
+  the current values of these parameters are returned, in a list.
+  
+  If \code{name=value} pairs are given, the named parameters
+  are reset to the given values, and the \bold{previous} values of
+  these parameters are returned, in a list.
+}
+\section{Internal parameters}{
+  The following parameters may also be specified to
+  \code{spatstat.options} but are intended for
+  software development or testing purposes.
+  \describe{
+    \item{closepairs.newcode}{
+      Logical. Whether to use new version of the
+      code for \code{\link[spatstat]{closepairs}}.
+      Initialised to \code{TRUE}.
+    }
+    \item{crossing.psp.useCall}{
+      Logical. Whether to use new version of the
+      code for \code{\link[spatstat]{crossing.psp}}.
+      Initialised to \code{TRUE}.
+    }
+    \item{crosspairs.newcode}{
+      Logical. Whether to use new version of the
+      code for \code{\link[spatstat]{crosspairs}}.
+      Initialised to \code{TRUE}.
+    }
+    \item{densityC}{
+      Logical. 
+      Indicates whether to use accelerated C code
+      (\code{densityC=TRUE}) or interpreted R code (\code{densityC=FALSE})
+      to evaluate \code{density.ppp(X, at="points")}.
+      Initialised to \code{TRUE}.
+    }
+    \item{exactdt.checks.data}{
+      Logical.
+      Do not change this value, unless you are
+      \adrian. 
+    }
+    \item{fasteval}{
+      One of the strings \code{'off'}, \code{'on'} or
+      \code{'test'} determining whether to use accelerated C code
+      to evaluate the conditional intensity of a Gibbs model.
+      Initialised to \code{'on'}.
+    }
+    \item{old.morpho.psp}{
+      Logical. Whether to use old R code for
+      morphological operations.
+      Initialise to \code{FALSE}.
+    }
+    \item{selfcrossing.psp.useCall}{
+      Logical. Whether to use new version of the
+      code for \code{\link[spatstat]{selfcrossing.psp}}.
+      Initialised to \code{TRUE}.
+    }
+    \item{use.Krect}{
+      Logical. Whether to use new code for the
+      K-function in a rectangular window.
+      Initialised to \code{TRUE}.
+    }
+  }
+}
+\seealso{
+  \code{\link[base]{options}}
+}
+\examples{
+  # save current values
+  oldopt <- spatstat.options()
+
+  spatstat.options("npixel")
+  spatstat.options(npixel=150)
+  spatstat.options(npixel=c(100,200))
+
+  spatstat.options(par.binary=list(col=grey(c(0.5,1))))
+
+  spatstat.options(par.persp=list(theta=-30,phi=40,d=4))
+  # see help(persp.default) for other options
+
+  # revert
+  spatstat.options(oldopt)
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
diff --git a/man/spiders.Rd b/man/spiders.Rd
new file mode 100644
index 0000000..50cfe30
--- /dev/null
+++ b/man/spiders.Rd
@@ -0,0 +1,66 @@
+\name{spiders}
+\alias{spiders}
+\docType{data}
+\title{
+  Spider Webs on Mortar Lines of a Brick Wall
+}
+\description{
+  Data recording the locations of small spider webs on
+  the network of mortar lines of a brick wall.
+}
+\usage{data("spiders")}
+\format{
+  Object of class \code{"lpp"} representing a pattern of points
+  on a linear network.
+}
+\details{
+The data give the positions of 48 webs 
+of the urban wall spider \emph{Oecobius navus} 
+on the mortar lines of a brick wall, 
+recorded by Voss (1999) and manually digitised by Mark Handcock. 
+The mortar spaces provide the only opportunity for constructing webs
+(Voss 1999; Voss et al 2007) so this is a pattern of points on a
+network of lines.
+
+The habitat preferences of this species were studied in detail
+by Voss et al (2007). Questions of interest include evidence for
+non-uniform density of webs and for interaction between nearby individuals.
+
+Observations were made inside a square quadrat of side length 1.125 metres.
+The original hand-drawn map was digitised manually by Mark
+S. Handcock, and reformatted as a \code{spatstat} object by
+Ang Qi Wei.
+
+The dataset \code{spiders} is an object of class \code{"lpp"}
+(point pattern on a linear network). Coordinates are given in millimetres.
+The linear network has 156 vertices and a total length of 20.22 metres. 
+
+\emph{Please cite Voss et al (2007) with any use of these data.} 
+}
+\source{
+  Dr Sasha Voss. Coordinates manually recorded by M.S. Handcock
+  and formatted by Q.W. Ang.
+  
+  \emph{Please cite Voss et al (2007) with any use of these data.} 
+}
+\references{
+  Ang, Q.W. (2010)
+  \emph{Statistical methodology for events on a network}.
+  Master's thesis, School of Mathematics and Statistics, University of
+  Western Australia.
+
+  Voss, S. (1999)
+  Habitat preferences and spatial dynamics of the
+  urban wall spider: \emph{Oecobius annulipes} Lucas.
+  Honours Thesis, Department of Zoology, University of Western
+   Australia.	  
+
+  Voss, S., Main, B.Y. and Dadour, I.R. (2007)
+  Habitat preferences of the urban wall spider 
+  \emph{Oecobius navus} (Araneae, Oecobiidae).
+  \emph{Australian Journal of Entomology} \bold{46}, 261--268.
+}
+\examples{
+plot(spiders, show.window=FALSE, pch=16)
+}
+\keyword{datasets}
diff --git a/man/split.hyperframe.Rd b/man/split.hyperframe.Rd
new file mode 100644
index 0000000..2e7c421
--- /dev/null
+++ b/man/split.hyperframe.Rd
@@ -0,0 +1,71 @@
+\name{split.hyperframe}
+\alias{split.hyperframe}
+\alias{split<-.hyperframe}
+\title{
+  Divide Hyperframe Into Subsets and Reassemble
+}
+\description{
+  \code{split} divides the data \code{x} into subsets defined
+  by \code{f}. The replacement form replaces values corresponding to
+  such a division. 
+}
+\usage{
+  \method{split}{hyperframe}(x, f, drop = FALSE, ...)
+
+  \method{split}{hyperframe}(x, f, drop = FALSE, ...) <- value
+}
+\arguments{
+  \item{x}{
+    Hyperframe (object of class \code{"hyperframe"}).
+  }
+  \item{f}{
+    a \code{factor} in the sense that \code{as.factor(f)} defines the
+    grouping, or a list of such factors in which case their
+    interaction is used for the grouping.
+  }
+  \item{drop}{
+    logical value, indicating whether levels that do not occur should be
+    dropped from the result.
+  }
+  \item{value}{
+    a list of hyperframes which arose (or could have arisen)
+    from the command \code{split(x,f,drop=drop)}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  These are methods for the generic functions
+  \code{\link{split}} and \code{\link{split<-}}
+  for hyperframes (objects of class \code{"hyperframe"}).
+
+  A hyperframe is like a data frame, except that its entries
+  can be objects of any kind. The behaviour of these methods
+  is analogous to the corresponding methods for data frames.
+}
+\value{
+  The value returned from \code{split.hyperframe} is a list of
+  hyperframe containing
+  the values for the groups.  The components of the list are named
+  by the levels of \code{f} (after converting to a factor, or if already
+  a factor and \code{drop = TRUE}, dropping unused levels).
+
+  The replacement method \code{split<-.hyperframe} returns
+  a new hyperframe \code{x} for which \code{split(x,f)} equals \code{value}.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+}
+\seealso{
+  \code{\link{hyperframe}}, \code{\link{[.hyperframe}}
+}
+\examples{
+   split(pyramidal, pyramidal$group)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/split.im.Rd b/man/split.im.Rd
new file mode 100644
index 0000000..b4d26d9
--- /dev/null
+++ b/man/split.im.Rd
@@ -0,0 +1,69 @@
+\name{split.im}
+\alias{split.im}
+\title{Divide Image Into Sub-images}
+\description{
+  Divides a pixel image into several sub-images according to the
+  value of a factor, or according to the tiles of a tessellation.
+}
+\usage{
+\method{split}{im}(x, f, ..., drop = FALSE)
+}
+\arguments{
+  \item{x}{Pixel image (object of class \code{"im"}).}
+  \item{f}{
+    Splitting criterion. Either a tessellation (object of class
+    \code{"tess"}) or a pixel image with factor values.
+  }
+  \item{\dots}{Ignored.}
+  \item{drop}{Logical value determining whether each subset should
+    be returned as a pixel images (\code{drop=FALSE}) or 
+    as a one-dimensional vector of pixel values (\code{drop=TRUE}).
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{split}}
+  for the class of pixel images. The image \code{x} will be divided
+  into subsets determined by the data \code{f}. The result is a list
+  of these subsets.
+
+  The splitting criterion may be either
+  \itemize{
+    \item a tessellation (object of class \code{"tess"}). Each tile of
+    the tessellation delineates a subset of the spatial domain.
+    \item a pixel image (object of class \code{"im"}) with factor
+    values. The levels of the factor determine subsets of the spatial
+    domain.
+  }
+  If \code{drop=FALSE} (the default), the result is a list of pixel
+  images, each one a subset of the pixel image \code{x},
+  obtained by restricting the pixel domain to one of the subsets.
+  If \code{drop=TRUE}, then the pixel values are returned as
+  numeric vectors. 
+}
+\value{
+  If \code{drop=FALSE}, a list of pixel images (objects of class
+  \code{"im"}). It is also of class \code{"solist"} so that it can be
+  plotted immediately.
+
+  If \code{drop=TRUE}, a list of numeric vectors.
+}
+\seealso{
+  \code{\link{by.im}},
+  \code{\link{tess}},
+  \code{\link{im}}
+}
+\examples{
+  W <- square(1)
+  X <- as.im(function(x,y){sqrt(x^2+y^2)}, W)
+  Y <- dirichlet(runifpoint(12, W))
+  plot(split(X,Y))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{manip}
diff --git a/man/split.msr.Rd b/man/split.msr.Rd
new file mode 100644
index 0000000..99fbe2c
--- /dev/null
+++ b/man/split.msr.Rd
@@ -0,0 +1,84 @@
+\name{split.msr}
+\alias{split.msr}
+\title{
+  Divide a Measure into Parts
+}
+\description{
+  Decomposes a measure into components, each component being a measure.
+}
+\usage{
+  \method{split}{msr}(x, f, drop = FALSE, \dots)
+}
+\arguments{
+  \item{x}{
+    Measure (object of class \code{"msr"}) to be decomposed.
+  }
+  \item{f}{
+    Factor or tessellation determining the decomposition.
+    Argument passed to \code{\link{split.ppp}}.
+    See Details.
+  }
+  \item{drop}{
+    Logical value indicating whether empty components should be retained
+    in the list (\code{drop=FALSE}, the default) or deleted (\code{drop=TRUE}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  An object of class \code{"msr"} represents a signed (i.e. real-valued)
+  or vector-valued measure in the \pkg{spatstat} package.
+  See \code{\link{msr}} for explanation.
+
+  This function is a method for the generic
+  \code{\link[base]{split}}. It divides the measure \code{x} into
+  components, each of which is a measure.
+
+  A measure \code{x} is represented in \pkg{spatstat} by 
+  a finite set of sample points with values attached to them.
+  The function \code{split.msr} divides this pattern of sample points
+  into several sub-patterns of points using \code{\link{split.ppp}}.
+  For each sub-pattern, the values attached to these points are
+  extracted from \code{x}, and these values and sample points
+  determine a measure, which is a
+  component or piece of the original \code{x}.
+
+  The argument \code{f} can be missing, if the sample points of \code{x}
+  are multitype points. In this case, \code{x} represents a measure
+  associated with marked spatial locations, and the command \code{split(x)}
+  separates \code{x} into a list of component measures, one for each
+  possible mark.
+
+  Otherwise the argument \code{f} is passed to \code{\link{split.ppp}}.
+  It should be either a factor (of length equal to the number of
+  sample points of \code{x}) or a tessellation (object of class
+  \code{"tess"} representing a division of space into tiles)
+  as documented under \code{\link{split.ppp}}.
+}
+\value{
+  A list, each of whose entries is a measure (object of class
+  \code{"msr"}). 
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{msr}},
+  \code{\link{[.msr}},
+  \code{\link{with.msr}}
+}
+\examples{
+  ## split by tessellation
+  a <- residuals(ppm(cells ~ x))
+  aa <- split(a, dirichlet(runifpoint(4)))
+  aa
+  sapply(aa, integral)
+
+  ## split by type of point
+  b <- residuals(ppm(amacrine ~ marks + x))
+  bb <- split(b)
+  bb
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/split.ppp.Rd b/man/split.ppp.Rd
new file mode 100644
index 0000000..a62100d
--- /dev/null
+++ b/man/split.ppp.Rd
@@ -0,0 +1,204 @@
+\name{split.ppp}
+\alias{split.ppp}
+\alias{split<-.ppp}
+\title{Divide Point Pattern into Sub-patterns}
+\description{
+  Divides a point pattern into several sub-patterns,
+  according to their marks, or according to any user-specified grouping.
+}
+\usage{
+  \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, \dots)
+  \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=missing(f), \dots) <- value
+}
+\arguments{
+  \item{x}{
+    A two-dimensional point pattern.
+    An object of class \code{"ppp"}.
+  }
+  \item{f}{
+    Data determining the grouping. Either a factor,
+    a logical vector,
+    a pixel image with factor values, a tessellation, a window,
+    or the name of one of the columns of marks.
+  }
+  \item{drop}{
+    Logical. Determines whether empty groups will
+    be deleted.
+  }
+  \item{un}{
+    Logical. Determines whether the resulting subpatterns will be unmarked
+    (i.e. whether marks will be removed	from the points in each
+    subpattern). 
+  }
+  \item{reduce}{
+    Logical. Determines whether to delete the column of marks used to split the
+    pattern, when the marks are a data frame.
+  }
+  \item{\dots}{
+    Other arguments are ignored.
+  }
+  \item{value}{
+    List of point patterns.
+  }
+} 
+\value{
+  The value of \code{split.ppp} is a list of point patterns.
+  The components of the list are named by the levels of \code{f}.
+  The list also has the class \code{"splitppp"}.
+
+  The assignment form \code{split<-.ppp} returns the updated
+  point pattern \code{x}. 
+}
+\details{
+  The function \code{split.ppp}
+  divides up the points of the point pattern \code{x}
+  into several sub-patterns according to the values of \code{f}.
+  The result is a list of point patterns.
+  
+  The argument \code{f} may be
+  \itemize{
+    \item
+    a factor, of length equal to the number of points in \code{x}.
+    The levels of \code{f}
+    determine the destination of each point in \code{x}.
+    The \code{i}th point of \code{x} will be placed in the sub-pattern
+    \code{split.ppp(x)$l} where \code{l = f[i]}.
+    \item
+    a pixel image (object of class \code{"im"}) with factor values.
+    The pixel value of \code{f}
+    at each point of \code{x} will be used as the classifying variable.
+    \item
+    a tessellation (object of class \code{"tess"}).
+    Each point of \code{x} will be classified according to
+    the tile of the tessellation into which it falls.
+    \item
+    a window (object of class \code{"owin"}).
+    Each point of \code{x} will be classified according to
+    whether it falls inside or outside this window.
+    \item
+    a character string, matching the name of one of the columns of
+    marks, if \code{marks(x)} is a data frame. This column should
+    be a factor.
+  }
+  If \code{f} is missing, then it will be determined by the
+  marks of the point pattern. The pattern \code{x} can be either
+  \itemize{
+    \item 
+    a multitype point pattern
+    (a marked point pattern whose marks vector is a factor).
+    Then \code{f} is taken to be the marks vector.
+    The effect is that the points of each type
+    are separated into different point patterns.
+    \item 
+    a marked point pattern with a data frame of marks, containing at least one
+    column that is a factor. The first such column will be used to
+    determine the splitting factor \code{f}.
+  }
+
+  Some of the sub-patterns created by the split
+  may be empty. If \code{drop=TRUE}, then empty sub-patterns will
+  be deleted from the list. If \code{drop=FALSE} then they are retained.
+
+  The argument \code{un} determines how to handle marks 
+  in the case where \code{x} is a marked point pattern.
+  If \code{un=TRUE} then the marks of the 
+  points will be discarded when they are split into groups,
+  while if \code{un=FALSE} then the marks will be retained.
+
+  If \code{f} and \code{un} are both missing,
+  then the default is \code{un=TRUE} for multitype point patterns
+  and \code{un=FALSE} for marked point patterns with a data frame of
+  marks.
+
+  If the marks of \code{x} are a data frame, then 
+  \code{split(x, reduce=TRUE)} will discard only the column of marks
+  that was used to split the pattern. This applies only when
+  the argument \code{f} is missing.
+
+  The result of \code{split.ppp} has class \code{"splitppp"}
+  and can be plotted using \code{\link{plot.splitppp}}.
+
+  The assignment function \code{split<-.ppp} 
+  updates the point pattern \code{x} so that
+  it satisfies \code{split(x, f, drop, un) = value}. The argument \code{value}
+  is expected to be a list of point patterns, one for each level of
+  \code{f}. These point patterns are expected to be compatible with the
+  type of data in the original pattern \code{x}.
+
+  Splitting can also be undone by the function
+  \code{\link{superimpose}},
+  but this typically changes the ordering of the data.
+}
+\seealso{
+  \code{\link{cut.ppp}},
+  \code{\link{plot.splitppp}},
+  \code{\link{superimpose}},
+  \code{\link{im}},
+  \code{\link{tess}},
+  \code{\link{ppp.object}}
+}
+\examples{
+
+# (1) Splitting by marks
+
+# Multitype point pattern: separate into types
+ u <- split(amacrine)
+
+# plot them
+ plot(split(amacrine))
+
+# the following are equivalent:
+ amon <- split(amacrine)$on
+ amon <- unmark(amacrine[amacrine$marks == "on"])
+ amon <- subset(amacrine, marks == "on", -marks)
+   
+# the following are equivalent:
+ amon <- split(amacrine, un=FALSE)$on
+ amon <- amacrine[amacrine$marks == "on"]
+   
+# Scramble the locations of the 'on' cells
+ X <- amacrine
+ u <- split(X)
+ u$on <- runifpoint(ex=amon)
+ split(X) <- u
+
+# Point pattern with continuous marks
+ trees <- longleaf
+ \testonly{
+	# smaller dataset
+	trees <- trees[seq(1, npoints(trees), by=80)]
+ }
+ # cut the range of tree diameters into three intervals
+ # using cut.ppp
+ long3 <- cut(trees, breaks=3)
+ # now split them
+ long3split <- split(long3)
+
+# (2) Splitting by a factor
+
+# Unmarked point pattern
+  swedishpines
+# cut & split according to nearest neighbour distance
+  f <- cut(nndist(swedishpines), 3)
+  u <- split(swedishpines, f)
+
+# (3) Splitting over a tessellation
+   tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5))
+   v <- split(swedishpines, tes)
+
+
+# (4) how to apply an operation to selected points:
+#  split into components, transform desired component, then un-split
+#  e.g. apply random jitter to 'on' points only
+  X <- amacrine
+  Y <- split(X)
+  Y$on <- rjitter(Y$on, 0.1)
+  split(X) <- Y
+}
+
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{manip}
diff --git a/man/split.ppx.Rd b/man/split.ppx.Rd
new file mode 100644
index 0000000..83b8c3c
--- /dev/null
+++ b/man/split.ppx.Rd
@@ -0,0 +1,117 @@
+\name{split.ppx}
+\alias{split.ppx}
+\title{Divide Multidimensional Point Pattern into Sub-patterns}
+\description{
+  Divides a multidimensional point pattern into several sub-patterns,
+  according to their marks, or according to any user-specified grouping.
+}
+\usage{
+  \method{split}{ppx}(x, f = marks(x), drop=FALSE, un=NULL, \dots)
+}
+\arguments{
+  \item{x}{
+    A multi-dimensional point pattern.
+    An object of class \code{"ppx"}.
+  }
+  \item{f}{
+    Data determining the grouping. Either a factor, 
+    or the name of one of the columns of marks.
+  }
+  \item{drop}{
+    Logical. Determines whether empty groups will
+    be deleted.
+  }
+  \item{un}{
+    Logical. Determines whether the resulting subpatterns will be unmarked
+    (i.e. whether marks will be removed	from the points in each
+    subpattern). 
+  }
+  \item{\dots}{
+    Other arguments are ignored.
+  }
+} 
+\value{
+  A list of point patterns.
+  The components of the list are named by the levels of \code{f}.
+  The list also has the class \code{"splitppx"} and \code{"anylist"}.
+}
+\details{
+  The generic command \code{\link[base]{split}} allows a dataset to be separated
+  into subsets according to the value of a grouping variable.
+  
+  The function \code{split.ppx} is a method for the generic
+  \code{\link[base]{split}} for the class \code{"ppx"} of multidimensional
+  point patterns. It divides up the points of the point pattern \code{x}
+  into several sub-patterns according to the values of \code{f}.
+  The result is a list of point patterns.
+  
+  The argument \code{f} may be
+  \itemize{
+    \item
+    a factor, of length equal to the number of points in \code{x}.
+    The levels of \code{f}
+    determine the destination of each point in \code{x}.
+    The \code{i}th point of \code{x} will be placed in the sub-pattern
+    \code{split.ppx(x)$l} where \code{l = f[i]}.
+    \item
+    a character string, matching the name of one of the columns of
+    marks, if \code{marks(x)} is a data frame. This column should
+    be a factor.
+  }
+  If \code{f} is missing, then it will be determined by the
+  marks of the point pattern. The pattern \code{x} can be either
+  \itemize{
+    \item 
+    a multitype point pattern
+    (a marked point pattern whose marks vector is a factor).
+    Then \code{f} is taken to be the marks vector.
+    The effect is that the points of each type
+    are separated into different point patterns.
+    \item 
+    a marked point pattern with a data frame or hyperframe
+    of marks, containing at least one
+    column that is a factor. The first such column will be used to
+    determine the splitting factor \code{f}.
+  }
+
+  Some of the sub-patterns created by the split
+  may be empty. If \code{drop=TRUE}, then empty sub-patterns will
+  be deleted from the list. If \code{drop=FALSE} then they are retained.
+
+  The argument \code{un} determines how to handle marks 
+  in the case where \code{x} is a marked point pattern.
+  If \code{un=TRUE} then the marks of the 
+  points will be discarded when they are split into groups,
+  while if \code{un=FALSE} then the marks will be retained.
+
+  If \code{f} and \code{un} are both missing,
+  then the default is \code{un=TRUE} for multitype point patterns
+  and \code{un=FALSE} for marked point patterns with a data frame of
+  marks.
+  
+  The result of \code{split.ppx} has class \code{"splitppx"}
+  and \code{"anylist"}. There are methods for \code{print},
+  \code{summary} and \code{plot}.
+}
+\seealso{
+  \code{\link{ppx}},
+  \code{\link{plot.anylist}}
+}
+\examples{
+   df <- data.frame(x=runif(4),y=runif(4),t=runif(4),
+                    age=rep(c("old", "new"), 2),
+                    size=runif(4))
+   X <- ppx(data=df, coord.type=c("s","s","t","m","m"))
+   X
+   split(X)
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{manip}
diff --git a/man/spokes.Rd b/man/spokes.Rd
new file mode 100644
index 0000000..eb7071c
--- /dev/null
+++ b/man/spokes.Rd
@@ -0,0 +1,96 @@
+\name{spokes}
+\alias{spokes}
+\title{Spokes pattern of dummy points}
+\description{
+  Generates a pattern of dummy points in a window,
+  given a data point pattern.
+  The dummy points lie on the radii of circles emanating from each
+  data point.
+}
+\usage{
+ spokes(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault = 1)
+}
+\arguments{
+  \item{x}{
+    Vector of \eqn{x} coordinates of data points, or a list
+    with components \code{x} and \code{y}, or a point pattern
+    (an object of class \code{ppp}).
+  }
+  \item{y}{
+    Vector of \eqn{y} coordinates of data points.  Ignored
+    unless \code{x} is a vector.
+  }
+  \item{nrad}{
+    Number of radii emanating from each data point.
+  }
+  \item{nper}{
+    Number of dummy points per radius.
+  }
+  \item{fctr}{
+    Scale factor.
+    Length of largest spoke radius is \code{fctr * M}
+    where \code{M} is the mean nearest neighbour distance
+    for the data points.
+  }
+  \item{Mdefault}{
+    Value of \code{M} to be used if \code{x} has length 1.
+  }
+}
+\value{
+  If argument \code{x} is a point pattern, a point pattern with
+  window equal to that of \code{x}.  Otherwise a list with two
+  components \code{x} and \code{y}.  In either case the components
+  \code{x} and \code{y} of the value are numeric vectors giving
+  the coordinates of the dummy points.
+}
+\details{
+  This function is useful in creating dummy points for quadrature
+  schemes (see \code{\link{quadscheme}}).
+
+  Given the data points, the function creates a collection of
+  \code{nrad * nper * length(x)} dummy points.
+
+  Around each data point \code{(x[i],y[i])} there are
+  \code{nrad * nper} dummy points, lying on \code{nrad} radii
+  emanating from \code{(x[i],y[i])}, with \code{nper} dummy points
+  equally spaced along each radius.
+
+  The (equal) spacing of dummy points along each radius is
+  controlled by the factor \code{fctr}. 
+  The distance from a data point to the furthest of its associated
+  dummy points is \code{fctr * M}
+  where \code{M} is the mean nearest neighbour distance
+  for the data points.
+
+  If there is only one data point the nearest neighbour distance
+  is infinite, so the value \code{Mdefault} will be used in place
+  of \code{M}.
+
+  If \code{x} is a point pattern, then the value returned is
+  also a point pattern, which is clipped to the window
+  of \code{x}.  Hence there may be fewer than
+  \code{nrad * nper * length(x)} dummy points in the pattern
+  returned.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{quadscheme}},
+  \code{\link{inside.owin}},
+  \code{\link{gridcentres}},
+  \code{\link{stratrand}}
+}
+\examples{
+  dat <- runifrect(10)
+  dum <- spokes(dat$x, dat$y, 5, 3, 0.7)
+  plot(dum)
+  Q <- quadscheme(dat, dum, method="dirichlet")
+  plot(Q, tiles=TRUE)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/sporophores.Rd b/man/sporophores.Rd
new file mode 100644
index 0000000..7301d0b
--- /dev/null
+++ b/man/sporophores.Rd
@@ -0,0 +1,45 @@
+\name{sporophores}
+\alias{sporophores}
+\docType{data}
+\title{
+  Sporophores Data
+}
+\description{
+  Spatial pattern of sporophores of three species of fungi around a tree.
+}
+\usage{data(sporophores)}
+\format{
+  A multitype spatial point pattern (an object of class \code{"ppp"}
+  with factor-valued marks indicating the species).
+  Spatial coordinates are given in centimetres.
+  Levels of the species variable are
+  \code{"L laccata"}, \code{"L pubescens"} and \code{"Hebloma spp"}.
+}
+\details{
+  Ford, Mason and Pelham (1980) studied the spatial locations
+  of sporophores of three species of mycorrhizal fungi
+  distributed around a young birch tree in agricultural soil.
+  The dataset given here is the spatial pattern
+  in the fifth year after the tree was planted.
+  The species are 
+  \emph{Laccaria laccata}, \emph{Lactarius pubescens}
+  and \emph{Hebloma} spp.
+}
+\source{
+  Data generously provided by Dr E.D. Ford.
+  Please cite Ford et al (1980) in any use of these data.
+}
+\references{
+  Ford, E.D., Mason, P.A. and Pelham, J. (1980) 
+  Spatial patterns of sporophore distribution around a
+  young birch tree in three successive years.
+  \emph{Transactions of the British Mycological Society}
+  \bold{75}, 287--296.
+}
+\examples{
+## reproduce Fig 1 in Ford et al (1980)
+plot(sporophores, chars=c(16,1,2), cex=0.6, leg.args=list(cex=1.1))
+points(0,0,pch=16, cex=2)
+text(15,8,"Tree", cex=0.75)
+}
+\keyword{datasets}
diff --git a/man/spruces.Rd b/man/spruces.Rd
new file mode 100644
index 0000000..f7388ed
--- /dev/null
+++ b/man/spruces.Rd
@@ -0,0 +1,62 @@
+\name{spruces}
+\alias{spruces}
+\docType{data}
+\title{
+  Spruces Point Pattern
+}
+\description{
+  The data give the locations of Norwegian spruce trees 
+  in a natural forest stand in Saxonia, Germany.
+  Each tree is marked with its diameter at breast height.
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations
+  in a 56 x 38 metre sampling region. Each tree is marked
+  with its diameter at breast height. All values are given in metres.
+
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object. The marks are numeric.
+
+  These data have been analysed by Fiksel (1984, 1988),
+  Stoyan et al (1987), Penttinen et al (1992) and
+  Goulard et al (1996).
+}
+\usage{data(spruces)}
+\source{Stoyan et al (1987). Original source unknown.}
+\examples{
+     plot(spruces)
+     # To reproduce Goulard et al. Figure 3
+     # (Goulard et al: "influence zone radius equals 5 * stem diameter")
+     # (spatstat help(plot.ppp): "size of symbol = diameter")
+     plot(spruces, maxsize=10*max(spruces$marks))
+     plot(unmark(spruces), add=TRUE)
+}
+\references{
+  Fiksel, T. (1984)
+  Estimation of parameterized pair potentials of marked and
+  nonmarked Gibbsian point processes.
+  \emph{Elektron. Informationsverarb. u. Kybernet.}
+  \bold{20}, 270--278.
+  
+  Fiksel, T. (1988)
+  Estimation of interaction potentials of Gibbsian point processes.
+  \emph{Statistics}
+  \bold{19}, 77-86
+  
+  Goulard, M., S\"arkk\"a, A. and Grabarnik, P. (1996)
+  Parameter estimation for marked Gibbs point
+  processes through the maximum pseudolikelihood method.
+  \emph{Scandinavian Journal of Statistics}
+  \bold{23}, 365--379.
+
+  Penttinen, A., Stoyan, D. and Henttonen, H. (1992)
+  Marked point processes in forest statistics.
+  \emph{Forest Science} \bold{38}, 806--824.
+
+  Stoyan, D., Kendall, W.S. and Mecke, J. (1987)
+  \emph{Stochastic Geometry and its Applications}.
+  Wiley.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/square.Rd b/man/square.Rd
new file mode 100644
index 0000000..76f2dec
--- /dev/null
+++ b/man/square.Rd
@@ -0,0 +1,63 @@
+\name{square}
+\alias{square}
+\alias{unit.square}
+\title{Square Window}
+\description{
+Creates a square window
+}
+\usage{
+ square(r=1, unitname=NULL)
+ unit.square()
+}
+\arguments{
+  \item{r}{Numeric. The side length of the square,
+    or a vector giving the minimum and maximum coordinate values.
+  }
+  \item{unitname}{
+    Optional. Name of unit of length. Either a single character string,
+    or a vector of two character strings giving the
+    singular and plural forms, respectively.
+  }
+}
+\value{
+  An object of class \code{"owin"} (see \code{\link{owin.object}})
+  specifying a window. 
+}
+\details{
+  If \code{r} is a number, \code{square(r)}
+  is a shortcut for creating a window object
+  representing the square 
+  \eqn{[0,r] \times [0,r]}{[0,r] * [0,r]}.
+  It is equivalent to the command
+  \code{owin(c(0,r),c(0,r))}.
+
+  If \code{r} is a vector of length 2, then
+  \code{square(r)} creates the square with \code{x} and \code{y} coordinates
+  ranging from \code{r[1]} to \code{r[2]}.
+  
+  \code{unit.square} creates the unit square
+  \eqn{[0,1] \times [0,1]}{[0,1] * [0,1]}.
+  It is equivalent to
+  \code{square(1)} or \code{square()} or \code{owin(c(0,1),c(0,1))}.
+
+  These commands are included for convenience,
+  and to improve the readability of some code.
+}
+\seealso{
+  \code{\link{owin.object}},
+  \code{\link{owin}}
+}
+\examples{
+ W <- square(10)
+ W <- square(c(-1,1))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
+ 
+ 
diff --git a/man/ssf.Rd b/man/ssf.Rd
new file mode 100644
index 0000000..917c3cf
--- /dev/null
+++ b/man/ssf.Rd
@@ -0,0 +1,59 @@
+\name{ssf}
+\alias{ssf}
+\title{
+  Spatially Sampled Function
+}
+\description{
+  Create an object that represents a spatial function
+  which has been evaluated or sampled at an irregular set of points.
+}
+\usage{
+  ssf(loc, val)
+}
+\arguments{
+  \item{loc}{
+    The spatial locations at which the function has been evaluated.
+    A point pattern (object of class \code{"ppp"}).
+  }
+  \item{val}{
+    The function values at these locations.
+    A numeric vector with one entry for each point of \code{loc},
+    or a data frame with one row for each point of \code{loc}.
+  }
+}
+\details{
+  An object of class \code{"ssf"} represents a real-valued or
+  vector-valued function that has been evaluated or sampled at an
+  irregular set of points. An example would be a spatial covariate
+  that has only been measured at certain locations.
+
+  An object of this class also inherits the class \code{"ppp"},
+  and is essentially the same as a marked point pattern, except
+  for the class membership which enables it to be 
+  handled in a different way.
+
+  There are methods for \code{plot}, \code{print} etc; see
+  \code{\link{plot.ssf}} and \code{\link{methods.ssf}}.
+
+  Use \code{\link[spatstat]{unmark}} to extract only the point
+  locations, and \code{\link{marks.ssf}} to extract only the function values.
+}
+\value{
+  Object of class \code{"ssf"}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{plot.ssf}}, 
+  \code{\link{methods.ssf}}, 
+  \code{\link{Smooth.ssf}}, 
+  \code{\link{with.ssf}},
+  \code{\link{[.ssf}}.
+}
+\examples{
+  ssf(cells, nndist(cells, k=1:3))
+}
+\keyword{spatial}
+\keyword{datagen}
+
diff --git a/man/stieltjes.Rd b/man/stieltjes.Rd
new file mode 100644
index 0000000..a40861a
--- /dev/null
+++ b/man/stieltjes.Rd
@@ -0,0 +1,71 @@
+\name{stieltjes}
+\alias{stieltjes}
+\title{Compute Integral of Function Against Cumulative Distribution}
+\description{
+  Computes the Stieltjes integral 
+  of a function \eqn{f} with respect to a function \eqn{M}.
+}
+\usage{
+stieltjes(f, M, ...)
+}
+\arguments{
+  \item{f}{
+    The integrand. A function in the \R language.
+  }
+  \item{M}{
+    The cumulative function against which \code{f} will be
+    integrated. An object of class \code{"fv"} or \code{"stepfun"}.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{f}.
+  }
+}
+\details{
+  This command computes the Stieltjes integral
+  \deqn{I = \int f(x) dM(x)}{I = integral f(x) dM(x)}
+  of a real-valued function \eqn{f(x)}
+  with respect to a nondecreasing function \eqn{M(x)}.
+
+  One common use of the Stieltjes integral is
+  to find the mean value of a random variable from its
+  cumulative distribution function \eqn{F(x)}. The mean value is
+  the Stieltjes integral of \eqn{f(x)=x} with respect to \eqn{F(x)}.
+
+  The argument \code{f} should be a \code{function} in the \R language.
+  It should accept a numeric vector argument \code{x} and should return
+  a numeric vector of the same length.
+
+  The argument \code{M} should be either a step function
+  (object of class \code{"stepfun"}) or a function value table
+  (object of class \code{"fv"}, see \code{\link{fv.object}}).
+  Objects of class \code{"stepfun"} are returned by
+  \code{\link[stats]{ecdf}}, \code{\link{ewcdf}},
+  \code{\link{spatialcdf}} and other utilities.
+  Objects of class \code{"fv"} are returned
+  by the commands \code{\link{Kest}}, \code{\link{Gest}}, etc.
+}
+\value{
+  A list containing the value of the Stieltjes integral
+  computed using each of the versions of the function \code{M}.
+}
+\seealso{
+  \code{\link{fv.object}},
+  \code{\link{Gest}}
+}
+\examples{
+  # estimate cdf of nearest neighbour distance in redwood data
+  G <- Gest(redwood)
+  # compute estimate of mean nearest neighbour distance
+  stieltjes(function(x){x}, G)
+  # estimated probability of a distance in the interval [0.1,0.2]
+  stieltjes(function(x,a,b){ (x >= a) & (x <= b)}, G, a=0.1, b=0.2)
+
+  # stepfun example
+  H <- spatialcdf(bei.extra$elev, normalise=TRUE)
+  stieltjes(function(x){x}, H)
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/stienen.Rd b/man/stienen.Rd
new file mode 100644
index 0000000..0bd7738
--- /dev/null
+++ b/man/stienen.Rd
@@ -0,0 +1,83 @@
+\name{stienen}
+\alias{stienen}
+\alias{stienenSet}
+\title{
+  Stienen Diagram
+}
+\description{
+  Draw the Stienen diagram of a point pattern,
+  or compute the region covered by the Stienen diagram.
+}
+\usage{
+stienen(X, \dots, bg = "grey", border = list(bg = NULL))
+stienenSet(X, edge=TRUE)
+}
+\arguments{
+  \item{X}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{plot.ppp}}
+    to control the plot.
+  }
+  \item{bg}{
+    Fill colour for circles.
+  }
+  \item{border}{
+    Either a list of arguments passed to \code{\link{plot.ppp}}
+    to control the display of circles at the border of the diagram,
+    or the value \code{FALSE}
+    indicating that the border circles should not be plotted.
+  }
+  \item{edge}{
+    Logical value indicating whether to include the circles at the
+    border of the diagram.
+  }
+}
+\details{
+  The Stienen diagram of a point pattern (Stienen, 1982)
+  is formed by drawing a circle around each point of the pattern,
+  with diameter equal to the nearest-neighbour distance for that point.
+  These circles do not overlap. If two points are nearest neighbours
+  of each other, then the corresponding circles touch.
+
+  \code{stienenSet(X)} computes the union of these circles and
+  returns it as a window (object of class \code{"owin"}).
+
+  \code{stienen(X)} generates a plot of the Stienen diagram of
+  the point pattern \code{X}. By default, circles are shaded in grey
+  if they lie inside the window of \code{X}, and are not shaded
+  otherwise.
+}
+\value{
+  The plotting function \code{stienen} returns \code{NULL}.
+
+  The return value of \code{stienenSet} is a window (object of class
+  \code{"owin"}).
+}
+\references{
+  Stienen, H. (1982)
+  \emph{Die Vergroeberung von Karbiden in reinen Eisen-Kohlenstoff
+    Staehlen}.
+  Dissertation, RWTH Aachen.
+}
+\seealso{
+  \code{\link{nndist}},
+  \code{\link{plot.ppp}}
+}
+\examples{
+  Y <- stienenSet(cells)
+  stienen(redwood)
+  stienen(redwood, border=list(bg=NULL, lwd=2, cols="red"))
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{math}
+\keyword{manip}
diff --git a/man/stratrand.Rd b/man/stratrand.Rd
new file mode 100644
index 0000000..366f7df
--- /dev/null
+++ b/man/stratrand.Rd
@@ -0,0 +1,80 @@
+\name{stratrand}
+\alias{stratrand}
+\title{Stratified random point pattern}
+\description{
+  Generates a \dQuote{stratified random} pattern of points in a window,
+  by dividing the window into rectangular tiles and placing
+  \code{k} random points in each tile.
+}
+\usage{
+ stratrand(window, nx, ny, k = 1)
+}
+\arguments{
+  \item{window}{A window. 
+    An object of class \code{\link{owin}},
+    or data in any format acceptable to \code{\link{as.owin}()}.
+  }
+  \item{nx}{Number of tiles in each row.
+  }
+  \item{ny}{Number of tiles in each column.
+  }
+  \item{k}{Number of random points to generate in each tile.
+  }
+}
+\value{
+  A list with two components \code{x} and \code{y}, which are numeric
+  vectors giving the coordinates of the random points.
+}
+\details{
+  The bounding rectangle of \code{window} is divided into
+  a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles.
+  In each tile, \code{k} random points are generated independently
+  with a uniform distribution in that tile. 
+
+  Note that some of these grid points may lie outside the window,
+  if \code{window} is not of type \code{"rectangle"}. The function
+  \code{\link{inside.owin}} can be used to select those grid points
+  which do lie inside the window. See the examples.
+
+  This function is useful in creating dummy points for quadrature
+  schemes (see \code{\link{quadscheme}}) as well as in simulating
+  random point patterns.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{quadscheme}},
+  \code{\link{inside.owin}},
+  \code{\link{gridcentres}}
+}
+\examples{
+  w <- unit.square()
+  xy <- stratrand(w, 10, 10)
+  \dontrun{
+  plot(w)
+  points(xy)
+  }
+
+  # polygonal boundary
+  bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2),
+               y=c(0.1,0.1,0.5,0.7,0.3))
+  w <- owin(c(0,1), c(0,1), poly=bdry)
+  xy <- stratrand(w, 10, 10, 3)
+  \dontrun{
+  plot(w)
+  points(xy)
+  }
+  # determine which grid points are inside polygon
+  ok <- inside.owin(xy$x, xy$y, w)
+  \dontrun{
+  plot(w)
+  points(xy$x[ok], xy$y[ok])
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/studpermu.test.Rd b/man/studpermu.test.Rd
new file mode 100644
index 0000000..c4f6c47
--- /dev/null
+++ b/man/studpermu.test.Rd
@@ -0,0 +1,127 @@
+\name{studpermu.test}
+\alias{studpermu.test}
+\title{
+  Studentised Permutation Test
+}
+\description{
+  Perform a studentised permutation test for a difference between
+  groups of point patterns.
+}
+\usage{
+   studpermu.test(X, formula, summaryfunction = Kest,
+       \dots, rinterval = NULL, nperm = 999,
+        use.Tbar = FALSE, minpoints = 20, rsteps = 128,
+        r = NULL, arguments.in.data = FALSE)
+}
+\arguments{
+  \item{X}{
+    Data. Either a \code{hyperframe} or a list of lists of point patterns.
+  }
+  \item{formula}{
+    Formula describing the grouping, when \code{X} is a hyperframe.
+    The left side of the formula identifies which column of \code{X}
+    contains the point patterns.
+    The right side identifies the grouping factor. 
+    If the formula is missing, the grouping variable is taken to be the
+    first column of \code{X} that contains a factor, and the point
+    patterns are taken from the first column that contains point patterns.
+  }
+  \item{summaryfunction}{
+    Summary function applicable to point patterns.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{summaryfunction}.
+  }
+  \item{rinterval}{
+    Interval of distance values \eqn{r} over which the
+    summary function should be evaluated and over which the test
+    statistic will be integrated. If \code{NULL}, the default
+    range of the summary statistic is used (taking the intersection
+    of these ranges over all patterns).
+  }
+  \item{nperm}{
+    Number of random permutations for the test.
+  }
+  \item{use.Tbar}{
+    Logical value indicating choice of test statistic.
+    If \code{TRUE}, use the alternative test statistic,
+    which is appropriate for summary functions with
+    roughly constant variance, such as \eqn{K(r)/r} or \eqn{L(r)}.
+  }
+  \item{minpoints}{
+    Minimum permissible number of points in a point pattern
+    for inclusion in the test calculation.
+  }
+  \item{rsteps}{
+    Number of discretisation steps in the \code{rinterval}.
+  }
+  \item{r}{
+    Optional vector of distance values as the argument for
+    \code{summaryfunction}. Should not usually be given.
+    There is a sensible default.
+  }
+  \item{arguments.in.data}{
+    Logical. If \code{TRUE}, individual extra arguments to
+    \code{summaryfunction} will be taken from \code{X}
+    (which must be a hyperframe). This assumes that 
+    the first argument of \code{summaryfunction} is the
+    point pattern dataset.
+  }
+}
+\details{
+  This function performs the studentized permutation test
+  of Hahn (2012) for a difference between groups of point patterns.
+
+  The first argument \code{X} should be either
+  \describe{
+    \item{a list of lists of point patterns.}{
+      Each element of \code{X} will be interpreted as a group of
+      point patterns, assumed to be replicates of the same point process.
+    }
+    \item{a hyperframe:}{
+      One column of the hyperframe should contain point patterns,
+      and another column should contain a factor indicating the
+      grouping. The argument \code{formula} should be a formula in the
+      \R language specifying the grouping: it should be of the form
+      \code{P ~ G} where \code{P} is the name of the column of point
+      patterns, and \code{G} is the name of the factor.
+    }
+  }
+  A group needs to contain at least two point patterns with at least
+  \code{minpoints} points in each pattern.
+
+  The function returns an object of class \code{"htest"}
+  and \code{"studpermutest"} that can be printed and plotted.
+  The printout shows the test result and \eqn{p}-value.
+  The plot shows the summary functions for the
+  groups (and the group means if requested).
+}
+\value{
+  Object of class \code{"studpermutest"}.
+}
+\references{
+  Hahn, U. (2012) 
+  A studentized permutation test for the comparison of
+  spatial point patterns.
+  \emph{Journal of the American Statistical Association}
+  \bold{107} (498), 754--764.
+}
+\author{
+  Ute Hahn.
+
+  Modified for \code{spatstat} by
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\examples{
+  np <- if(interactive()) 99 else 19
+  testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np)
+  testpyramidal
+}
+\keyword{spatial}
+\keyword{htest}
diff --git a/man/subfits.Rd b/man/subfits.Rd
new file mode 100644
index 0000000..8e756c3
--- /dev/null
+++ b/man/subfits.Rd
@@ -0,0 +1,84 @@
+\name{subfits}
+\alias{subfits}
+\alias{subfits.new}
+\alias{subfits.old}
+\title{Extract List of Individual Point Process Models}
+\description{
+  Takes a Gibbs point process model that has been fitted
+  to several point patterns simultaneously, and produces a list
+  of fitted point process models for the individual point patterns.
+}
+\usage{
+   subfits(object, what="models", verbose=FALSE)
+   subfits.old(object, what="models", verbose=FALSE)
+   subfits.new(object, what="models", verbose=FALSE)
+}
+\arguments{
+  \item{object}{
+    An object of class \code{"mppm"}
+    representing a point process model fitted to several point patterns. 
+  }
+  \item{what}{
+    What should be returned.
+    Either \code{"models"} to return the fitted models,
+    or \code{"interactions"} to return the fitted interactions only.
+  }
+  \item{verbose}{
+    Logical flag indicating whether to print progress reports.
+  }
+}
+\details{
+  \code{object} is assumed to have been generated by
+  \code{\link{mppm}}. It represents a point process model that has been
+  fitted to a list of several point patterns, with covariate data.
+
+  For each of the \emph{individual} point pattern
+  datasets, this function derives the corresponding fitted model
+  for that dataset only (i.e. a point process model for the \eqn{i}th
+  point pattern, that is consistent with \code{object}). 
+
+  If \code{what="models"},
+  the result is a list of point process models (a list of objects of class
+  \code{"ppm"}), one model for each point pattern dataset in the
+  original fit.
+  If \code{what="interactions"},
+  the result is a list of fitted interpoint interactions (a list of
+  objects of class
+  \code{"fii"}).
+
+  Two different algorithms are provided, as
+  \code{subfits.old} and \code{subfits.new}.
+  Currently \code{subfits} is the same as the old algorithm
+  \code{subfits.old} because the newer algorithm is too memory-hungry.
+}
+\value{
+  A list of point process models (a list of objects of class
+  \code{"ppm"}) or a list of fitted interpoint interactions (a list of
+  objects of class \code{"fii"}).
+}
+\examples{
+  H <- hyperframe(Wat=waterstriders)
+  fit <- mppm(Wat~x, data=H)
+  subfits(fit)
+
+  H$Wat[[3]] <- rthin(H$Wat[[3]], 0.1)
+  fit2 <- mppm(Wat~x, data=H, random=~1|id)
+  subfits(fit2)
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented in \pkg{spatstat} by
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{mppm}},
+  \code{\link{ppm}}
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/subset.hyperframe.Rd b/man/subset.hyperframe.Rd
new file mode 100644
index 0000000..894a647
--- /dev/null
+++ b/man/subset.hyperframe.Rd
@@ -0,0 +1,83 @@
+\name{subset.hyperframe}
+\alias{subset.hyperframe}
+\title{
+  Subset of Hyperframe Satisfying A Condition
+}
+\description{
+  Given a hyperframe, return the subset specified by
+  imposing a condition on each row, and optionally by choosing
+  only some of the columns.
+}
+\usage{
+\method{subset}{hyperframe}(x, subset, select, \dots)
+}
+\arguments{
+  \item{x}{
+    A hyperframe pattern (object of class \code{"hyperframe"}.
+  }
+  \item{subset}{
+    Logical expression indicating which points are to be kept.
+    The expression may involve the names of columns of \code{x}
+    and will be evaluated by \code{\link{with.hyperframe}}.
+  }
+  \item{select}{
+    Expression indicating which columns of marks should be kept.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{[.hyperframe}}
+    such as \code{drop} and \code{strip}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{subset}}.
+  It extracts the subset of rows of \code{x}
+  that satisfy the logical expression
+  \code{subset}, and retains only the columns of \code{x} that are
+  specified by the expression \code{select}. The result is always a
+  hyperframe.
+
+  The argument \code{subset} determines the subset of rows that
+  will be extracted. It should be a logical expression.
+  It may involve the names of columns of \code{x}.
+  The default is to keep all points.
+
+  The argument \code{select} determines which columns of \code{x}
+  will be retained.
+  It should be an expression involving the names of columns
+  (which will be interpreted as integers representing the positions of
+  these columns). For example if there are columns named
+  \code{A} to \code{Z}, then \code{select=D:F} is a valid expression
+  and means that columns \code{D}, \code{E} and \code{F} will be
+  retained. Similarly \code{select=-(A:C)} is valid and means that columns
+  \code{A} to \code{C} will be deleted.  
+  The default is to retain all columns.
+
+  Setting \code{subset=FALSE} will remove all the rows.
+  Setting \code{select=FALSE} will remove all the columns.
+  
+  The result is always a hyperframe.
+}
+\value{
+  A hyperframe.
+}  
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link[base]{subset}},
+  \code{\link{[.hyperframe}}
+}
+\examples{
+ a <- subset(flu, virustype=="wt")
+
+ aa <- subset(flu, minnndist(pattern) > 10)
+
+ aaa <- subset(flu, virustype=="wt", select = -pattern)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/subset.ppp.Rd b/man/subset.ppp.Rd
new file mode 100644
index 0000000..4553785
--- /dev/null
+++ b/man/subset.ppp.Rd
@@ -0,0 +1,144 @@
+\name{subset.ppp}
+\alias{subset.ppp}
+\alias{subset.pp3}
+\alias{subset.lpp}
+\alias{subset.ppx}
+\title{
+  Subset of Point Pattern Satisfying A Condition
+}
+\description{
+  Given a point pattern, return the subset of points which satisfy
+  a specified condition.
+}
+\usage{
+\method{subset}{ppp}(x, subset, select, drop=FALSE, \dots)
+
+\method{subset}{pp3}(x, subset, select, drop=FALSE, \dots)
+
+\method{subset}{lpp}(x, subset, select, drop=FALSE, \dots)
+
+\method{subset}{ppx}(x, subset, select, drop=FALSE, \dots)
+}
+\arguments{
+  \item{x}{
+    A point pattern (object of class \code{"ppp"},
+    \code{"lpp"}, \code{"pp3"} or \code{"ppx"}).
+  }
+  \item{subset}{
+    Logical expression indicating which points are to be kept.
+    The expression may involve the names of spatial coordinates
+    (\code{x}, \code{y}, etc), the \code{marks}, and
+    (if there is more than one column of marks)
+    the names of individual columns of marks.
+    Missing values are taken as false. See Details.
+  }
+  \item{select}{
+    Expression indicating which columns of marks should be kept.
+    The \emph{names} of columns of marks can be used in this expression,
+    and will be treated as if they were column indices.
+    See Details.
+  }
+  \item{drop}{
+    Logical value indicating whether to remove unused levels
+    of the marks, if the marks are a factor.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{subset}}.
+  It extracts the subset of points of \code{x}
+  that satisfy the logical expression
+  \code{subset}, and retains only the columns of marks that are
+  specified by the expression \code{select}. The result is always a point
+  pattern, with the same window as \code{x}.
+
+  The argument \code{subset} determines the subset of points that
+  will be extracted. It should be a logical expression.
+  It may involve the variable names
+  \code{x} and \code{y} representing the Cartesian coordinates;
+  the names of other spatial coordinates or local coordinates;
+  the name \code{marks} representing the marks;
+  and (if there is more than one column of marks)
+  the names of individual columns of marks.
+  The default is to keep all points.
+
+  The argument \code{select} determines which columns of marks
+  will be retained (if there are several columns of marks).
+  It should be an expression involving the names of columns of marks
+  (which will be interpreted as integers representing the positions of
+  these columns). For example if there are columns of marks named
+  \code{A} to \code{Z}, then \code{select=D:F} is a valid expression
+  and means that columns \code{D}, \code{E} and \code{F} will be
+  retained. Similarly \code{select=-(A:C)} is valid and means that columns
+  \code{A} to \code{C} will be deleted.  
+  The default is to retain all columns.
+
+  Setting \code{subset=FALSE} will produce an empty point pattern
+  (i.e. containing zero points) in the same window as \code{x}.
+  Setting \code{select=FALSE} or \code{select= -marks} will
+  remove all the marks from \code{x}.
+
+  The argument \code{drop} determines whether to remove
+  unused levels of a factor, if the resulting point pattern is multitype
+  (i.e. the marks are a factor) or if the marks are a data frame
+  in which some of the columns are factors.
+
+  The result is always a point pattern, of the same class as \code{x}.
+  Spatial coordinates (and local
+  coordinates) are always retained. To extract only some
+  columns of marks or coordinates as a data frame,
+  use \code{subset(as.data.frame(x), ...)}
+}
+\section{Other kinds of subset arguments}{
+  Alternatively the argument \code{subset} can be any kind of subset index
+  acceptable to \code{\link{[.ppp}}, \code{\link{[.pp3}},
+  \code{\link{[.ppx}}. This argument selects which points of \code{x}
+  will be retained.
+
+  \bold{Warning:} if the argument \code{subset} is
+  a window, this is interpreted as specifying the subset of points that fall
+  inside that window, but the resulting point pattern has the same window
+  as the original pattern \code{x}.
+}
+\value{
+  A point pattern of the same class as \code{x}, in the same
+  spatial window as \code{x}. The result is a subset of \code{x},
+  possibly with some columns of marks removed.
+}  
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link[base]{subset}},
+  
+  \code{\link{[.ppp}},
+  \code{\link{[.pp3}},
+  \code{\link{[.lpp}},
+  \code{\link{[.ppx}}
+}
+\examples{
+ plot(subset(cells, x > 0.5))
+
+ subset(amacrine, marks == "on")
+
+ subset(amacrine, marks == "on", drop=TRUE)
+
+ subset(redwood, nndist(redwood) > 0.04)
+
+ subset(finpines, select=height)
+
+ subset(finpines, diameter > 2, height)
+
+ subset(nbfires, year==1999 & ign.src == "campfire",
+                 select=cause:fnl.size)
+
+ v <- subset(chicago, x + y > 1100 & marks == "assault")
+
+ vv <- subset(chicago, x + y > 1100 & marks == "assault", drop=TRUE)
+
+ a <- subset(rpoispp3(40), z > 0.5)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/subspaceDistance.Rd b/man/subspaceDistance.Rd
new file mode 100644
index 0000000..5e19dfc
--- /dev/null
+++ b/man/subspaceDistance.Rd
@@ -0,0 +1,52 @@
+\name{subspaceDistance}
+\alias{subspaceDistance}
+\title{
+  Distance Between Linear Spaces
+}
+\description{
+  Evaluate the distance between two linear subspaces
+  using the measure proposed by Li, Zha and Chiaromonte (2005). 
+}
+\usage{
+subspaceDistance(B0, B1)
+}
+\arguments{
+  \item{B0}{
+    Matrix whose columns are a basis for the first subspace.
+  }
+  \item{B1}{
+    Matrix whose columns are a basis for the second subspace.
+  }
+}
+\details{
+  This algorithm calculates the maximum absolute value of the
+  eigenvalues of \eqn{P1-P0} where \eqn{P0,P1} are the projection
+  matrices onto the subspaces generated by \code{B0,B1}.
+  This measure of distance was proposed by Li, Zha and Chiaromonte
+  (2005). See also Xia (2007).
+}
+\value{
+  A single numeric value.
+}
+\references{
+  Guan, Y. and Wang, H. (2010)
+  Sufficient dimension reduction for spatial point
+  processes directed by Gaussian random fields.
+  \emph{Journal of the Royal Statistical Society, Series B},
+  \bold{72}, 367--387.
+
+  Li, B., Zha, H. and Chiaromonte, F. (2005) Contour regression: a
+  general approach to dimension reduction. 
+  \emph{Annals of Statistics} \bold{33}, 1580--1616.
+
+  Xia, Y. (2007)
+  A constructive approach to the estimation of dimension reduction
+  directions. 
+  \emph{Annals of Statistics} \bold{35}, 2654--2690.
+}
+\author{
+  Matlab original by Yongtao Guan,
+  translated to \R by Suman Rakshit.
+}
+\keyword{multivariate}
+\keyword{algebra}
diff --git a/man/suffstat.Rd b/man/suffstat.Rd
new file mode 100644
index 0000000..44c7888
--- /dev/null
+++ b/man/suffstat.Rd
@@ -0,0 +1,117 @@
+\name{suffstat}
+\alias{suffstat}
+\title{Sufficient Statistic of Point Process Model}
+\description{
+  The canonical sufficient statistic of a 
+  point process model is evaluated for a given point pattern.
+}
+\usage{
+  suffstat(model, X=data.ppm(model))
+}
+\arguments{
+  \item{model}{A fitted point process model (object of class
+    \code{"ppm"}).
+  }
+  \item{X}{
+    A point pattern (object of class \code{"ppp"}).
+  }
+}
+\value{
+  A numeric vector of sufficient statistics. The entries
+  correspond to the model coefficients \code{coef(model)}.
+}
+\details{
+  The canonical sufficient statistic
+  of \code{model} is evaluated for the point pattern \code{X}.
+  This computation is useful for various Monte Carlo methods.
+  
+  Here \code{model} should be a point process model (object of class
+  \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained
+  from the model-fitting function \code{\link{ppm}}. The argument
+  \code{X} should be a point pattern (object of class \code{"ppp"}).
+
+  Every point process model fitted by \code{\link{ppm}} has
+  a probability density of the form
+  \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))}
+  where \eqn{x} denotes a typical realisation (i.e. a point pattern),
+  \eqn{\theta}{theta} is the vector of model coefficients,
+  \eqn{Z(\theta)}{Z(theta)} is a normalising constant,
+  and \eqn{S(x)} is a function of the realisation \eqn{x}, called the
+  ``canonical sufficient statistic'' of the model.
+
+  For example, the stationary Poisson process has canonical sufficient
+  statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}.
+  The stationary Strauss process with interaction range \eqn{r}
+  (and fitted with no edge correction) has canonical sufficient statistic
+  \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs
+  of points in \eqn{x} which are closer than a distance \eqn{r}
+  to each other. 
+
+  \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is
+  the canonical sufficient statistic associated with \code{model},
+  evaluated when \eqn{x} is the given point pattern \code{X}.
+  The result is a numeric vector, with entries which correspond to the
+  entries of the coefficient vector \code{coef(model)}.
+
+  The sufficient statistic \eqn{S}
+  does not depend on the fitted coefficients
+  of the model. However it does depend on the irregular parameters
+  which are fixed in the original call to \code{\link{ppm}}, for
+  example, the interaction range \code{r} of the Strauss process.
+
+  The sufficient statistic also depends on the edge correction that
+  was used to fit the model. For example in a Strauss process,
+  \itemize{
+    \item
+    If the model is fitted with \code{correction="none"}, the sufficient
+    statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the
+    number of points and \eqn{s(x)} is the number of pairs of points
+    which are closer than \eqn{r} units apart.
+    \item
+    If the model is fitted with \code{correction="periodic"}, the sufficient
+    statistic is the same as above, except that distances are measured
+    in the periodic sense. 
+    \item
+    If the model is fitted with
+    \code{correction="translate"}, then \eqn{n(x)} is unchanged
+    but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation
+    correction weights for all pairs of points which are closer than
+    \eqn{r} units apart).
+    \item
+    If the model is fitted with
+    \code{correction="border"} (the default), then points lying less than
+    \eqn{r} units from the boundary of the observation window are
+    treated as fixed. Thus \eqn{n(x)} is
+    replaced by the number \eqn{n_r(x)}{n[r](x)}
+    of points lying at least \eqn{r} units from
+    the boundary of the observation window, and \eqn{s(x)} is replaced by
+    the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer
+    than \eqn{r} units apart, and at least one of which lies
+    more than \eqn{r} units from the boundary of the observation window.
+  }
+
+  Non-finite values of the sufficient statistic (\code{NA} or
+  \code{-Inf}) may be returned if the point pattern \code{X} is
+  not a possible realisation of the model (i.e. if \code{X} has zero
+  probability of occurring under \code{model} for all values of
+  the canonical coefficients \eqn{\theta}{theta}).
+}
+\seealso{
+  \code{\link{ppm}}
+}
+\examples{
+    fitS <- ppm(swedishpines~1, Strauss(7))
+    X <- rpoispp(intensity(swedishpines), win=Window(swedishpines))
+    suffstat(fitS, X)
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/summary.anylist.Rd b/man/summary.anylist.Rd
new file mode 100644
index 0000000..0cd7aac
--- /dev/null
+++ b/man/summary.anylist.Rd
@@ -0,0 +1,44 @@
+\name{summary.anylist}
+\alias{summary.anylist}
+\title{Summary of a List of Things}
+\description{
+  Prints a useful summary of each item in a list of things.
+}
+\usage{
+ \method{summary}{anylist}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    An object of class \code{"anylist"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{summary}}.
+
+  An object of the class \code{"anylist"} is effectively a list
+  of things which are intended to be treated in a similar way.
+  See \code{\link{anylist}}.
+
+  This function extracts a useful summary of each of the items in the list.
+}
+\seealso{
+  \code{\link{anylist}},
+  \code{\link{summary}},
+  \code{\link{plot.anylist}}
+}
+\examples{
+  x <- anylist(A=runif(10), B=runif(10), C=runif(10))
+  summary(x)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.im.Rd b/man/summary.im.Rd
new file mode 100644
index 0000000..d35ace4
--- /dev/null
+++ b/man/summary.im.Rd
@@ -0,0 +1,71 @@
+\name{summary.im}
+\alias{summary.im}
+\alias{print.summary.im}
+\title{Summarizing a Pixel Image}
+\description{
+  \code{summary} method for class \code{"im"}.
+}
+\usage{
+  \method{summary}{im}(object, \dots)
+  \method{print}{summary.im}(x, \dots)
+}
+\arguments{
+  \item{object}{A pixel image.}
+  \item{\dots}{Ignored.}
+  \item{x}{Object of class \code{"summary.im"} as returned by
+    \code{summary.im}.
+  }
+}
+\details{
+  This is a method for the generic \code{\link{summary}}
+  for the class \code{"im"}. An object of class \code{"im"}
+  describes a pixel image. See \code{\link{im.object}})
+  for details of this class. 
+  
+  \code{summary.im} extracts information about the pixel image,
+  and \code{print.summary.im} prints this information in a
+  comprehensible format.
+
+  In normal usage, \code{print.summary.im} is invoked implicitly
+  when the user calls \code{summary.im} without assigning its value
+  to anything. See the examples.
+
+  The information extracted by \code{summary.im} includes
+  \describe{
+    \item{range}{The range of the image values.}
+    \item{mean}{The mean of the image values.}
+    \item{integral}{The ``integral'' of the image values,
+      calculated as the sum of the image values
+      multiplied by the area of one pixel.}
+    \item{dim}{The dimensions of the pixel array:
+      \code{dim[1]} is the number of rows in the array,
+      corresponding to the \bold{y} coordinate.}
+  }
+}
+\value{
+  \code{summary.im} returns an object of class \code{"summary.im"},
+  while \code{print.summary.im} returns \code{NULL}.
+}
+\seealso{
+  \code{\link{mean.im}}, \code{\link{integral.im}}, \code{\link{anyNA.im}}
+}
+\examples{
+  # make an image
+  X <- as.im(function(x,y) {x^2}, unit.square())
+  # summarize it
+  summary(X)
+  # save the summary
+  s <- summary(X)
+  # print it
+  print(X)
+  s
+  # extract stuff
+  X$dim
+  X$range
+  X$integral
+}
+\author{\adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{methods}
diff --git a/man/summary.kppm.Rd b/man/summary.kppm.Rd
new file mode 100644
index 0000000..edf39ed
--- /dev/null
+++ b/man/summary.kppm.Rd
@@ -0,0 +1,79 @@
+\name{summary.kppm}
+\alias{summary.kppm}
+\alias{print.summary.kppm}
+\title{Summarizing a Fitted Cox or Cluster Point Process Model}
+\description{
+  \code{summary} method for class \code{"kppm"}.
+}
+\usage{
+  \method{summary}{kppm}(object, \dots, quick=FALSE)
+
+  \method{print}{summary.kppm}(x, \dots)
+}
+\arguments{
+  \item{object}{
+    A fitted Cox or cluster point process model (object of
+    class \code{"kppm"}).
+  }
+  \item{quick}{Logical value controlling the scope of the summary.}
+  \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or
+    \code{\link{print.summary.ppm}} controlling the treatment of
+    the trend component of the model.}
+  \item{x}{Object of class \code{"summary.kppm"} as returned by
+    \code{summary.kppm}.
+  }
+}
+\details{
+  This is a method for the generic \code{\link{summary}}
+  for the class \code{"kppm"}. An object of class \code{"kppm"}
+  describes a fitted Cox or cluster point process model.
+  See \code{\link{kppm}}.
+  
+  \code{summary.kppm} extracts information about the
+  type of model that has been fitted, the data to which the model was
+  fitted, and the values of the fitted coefficients.
+
+  \code{print.summary.kppm} prints this information in a
+  comprehensible format.
+
+  In normal usage, \code{print.summary.kppm} is invoked implicitly
+  when the user calls \code{summary.kppm} without assigning its value
+  to anything. See the examples.
+
+  You can also type \code{coef(summary(object))} to extract a table
+  of the fitted coefficients of the point process model \code{object}
+  together with standard errors and confidence limits.
+}
+\value{
+  \code{summary.kppm} returns an object of class \code{"summary.kppm"},
+  while \code{print.summary.kppm} returns \code{NULL}.
+
+  The result of \code{summary.kppm} includes at least the
+  following components:
+  \item{Xname}{character string name of the original point pattern data}
+  \item{stationary}{logical value indicating whether the model is
+    stationary}
+  \item{clusters}{the \code{clusters} argument to \code{\link{kppm}}}
+  \item{modelname}{character string describing the model}
+  \item{isPCP}{\code{TRUE} if the model is a Poisson cluster process,
+    \code{FALSE} if it is a log-Gaussian Cox process}
+  \item{lambda}{Estimated intensity: numeric value, or pixel image}
+  \item{mu}{Mean cluster size: numeric value, pixel image, or
+    \code{NULL}}
+  \item{clustpar}{list of fitted parameters for the cluster model}
+  \item{clustargs}{list of fixed parameters for the cluster model, if
+    any}
+  \item{callstring}{character string representing the original call to
+    \code{\link{kppm}}}
+}
+\examples{
+ fit <- kppm(redwood ~ 1, "Thomas")
+ summary(fit)
+ coef(summary(fit))
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/summary.listof.Rd b/man/summary.listof.Rd
new file mode 100644
index 0000000..2dfabd8
--- /dev/null
+++ b/man/summary.listof.Rd
@@ -0,0 +1,43 @@
+\name{summary.listof}
+\alias{summary.listof}
+\title{Summary of a List of Things}
+\description{
+  Prints a useful summary of each item in a list of things.
+}
+\usage{
+ \method{summary}{listof}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    An object of class \code{"listof"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{summary}}.
+
+  An object of the class \code{"listof"} is effectively a list
+  of things which are all of the same class.
+
+  This function extracts a useful summary of each of the items in the list.
+}
+\seealso{
+  \code{\link{summary}},
+  \code{\link{plot.listof}}
+}
+\examples{
+  x <- list(A=runif(10), B=runif(10), C=runif(10))
+  class(x) <- c("listof", class(x))
+  summary(x)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.owin.Rd b/man/summary.owin.Rd
new file mode 100644
index 0000000..79b014f
--- /dev/null
+++ b/man/summary.owin.Rd
@@ -0,0 +1,41 @@
+\name{summary.owin}
+\alias{summary.owin}
+\title{Summary of a Spatial Window}
+\description{
+  Prints a useful description of a window object.
+}
+\usage{
+  \method{summary}{owin}(object, \dots)
+}
+\arguments{
+  \item{object}{Window (object of class \code{"owin"}).}
+  \item{\dots}{Ignored.}
+}
+\details{
+  A useful description of the window \code{object} is printed.
+
+  This is a method for the generic function \code{\link{summary}}.
+}
+\seealso{
+  \code{\link{summary}},
+  \code{\link{summary.ppp}},
+  \code{\link{print.owin}}
+}
+\examples{
+  summary(owin())  # the unit square
+
+  data(demopat)
+  W <- Window(demopat)  # weird polygonal window
+  summary(W)           # describes it
+
+  summary(as.mask(W))  # demonstrates current pixel resolution
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.ppm.Rd b/man/summary.ppm.Rd
new file mode 100644
index 0000000..67d99a9
--- /dev/null
+++ b/man/summary.ppm.Rd
@@ -0,0 +1,97 @@
+\name{summary.ppm}
+\alias{summary.ppm}
+\alias{print.summary.ppm}
+\title{Summarizing a Fitted Point Process Model}
+\description{
+  \code{summary} method for class \code{"ppm"}.
+}
+\usage{
+  \method{summary}{ppm}(object, \dots, quick=FALSE, fine=FALSE)
+  \method{print}{summary.ppm}(x, \dots)
+}
+\arguments{
+  \item{object}{A fitted point process model.}
+  \item{\dots}{Ignored.}
+  \item{quick}{Logical flag controlling the scope of the summary.}
+  \item{fine}{
+    Logical value passed to \code{\link{vcov.ppm}} determining
+    whether to compute the quick, coarse estimate of variance
+    (\code{fine=FALSE}, the default) or the slower, finer estimate
+    (\code{fine=TRUE}).
+  }
+  \item{x}{Object of class \code{"summary.ppm"} as returned by
+    \code{summary.ppm}.
+  }
+}
+\details{
+  This is a method for the generic \code{\link{summary}}
+  for the class \code{"ppm"}. An object of class \code{"ppm"}
+  describes a fitted point process model. See \code{\link{ppm.object}})
+  for details of this class. 
+  
+  \code{summary.ppm} extracts information about the
+  type of model that has been fitted, the data to which the model was
+  fitted, and the values of the fitted coefficients.
+  (If \code{quick=TRUE} then only the information about the type
+  of model is extracted.)
+
+  \code{print.summary.ppm} prints this information in a
+  comprehensible format.
+
+  In normal usage, \code{print.summary.ppm} is invoked implicitly
+  when the user calls \code{summary.ppm} without assigning its value
+  to anything. See the examples.
+
+  You can also type \code{coef(summary(object))} to extract a table
+  of the fitted coefficients of the point process model \code{object}
+  together with standard errors and confidence limits. 
+}
+\value{
+  \code{summary.ppm} returns an object of class \code{"summary.ppm"},
+  while \code{print.summary.ppm} returns \code{NULL}.
+}
+\examples{
+  # invent some data
+  X <- rpoispp(42)
+  # fit a model to it
+  fit <- ppm(X ~ x, Strauss(r=0.1))
+  # summarize the fitted model
+  summary(fit)
+  # `quick' option
+  summary(fit, quick=TRUE)
+  # coefficients with standard errors and CI
+  coef(summary(fit))
+  coef(summary(fit, fine=TRUE))
+
+  # save the full summary
+  s <- summary(fit)
+  # print it
+  print(s)
+  s
+  # extract stuff
+  names(s)
+  coef(s)
+  s$args$correction
+  s$name
+  s$trend$value
+
+  \dontrun{
+  # multitype pattern
+  data(demopat)
+  fit <- ppm(demopat, ~marks, Poisson())
+  summary(fit)
+  }
+
+  # model with external covariates
+  fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y}))
+  summary(fitX)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/summary.ppp.Rd b/man/summary.ppp.Rd
new file mode 100644
index 0000000..602af6a
--- /dev/null
+++ b/man/summary.ppp.Rd
@@ -0,0 +1,62 @@
+\name{summary.ppp}
+\alias{summary.ppp}
+\title{Summary of a Point Pattern Dataset}
+\description{
+  Prints a useful summary of a point pattern dataset.
+}
+\usage{
+ \method{summary}{ppp}(object, \dots, checkdup=TRUE)
+}
+\arguments{
+  \item{object}{
+    Point pattern (object of class \code{"ppp"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{checkdup}{
+    Logical value indicating whether to check
+    for the presence of duplicate points.
+  }
+}
+\details{
+  A useful summary of the point pattern \code{object} is printed.
+
+  This is a method for the generic function \code{\link{summary}}.
+
+  If \code{checkdup=TRUE}, the pattern will be checked for the
+  presence of duplicate points, using \code{\link{duplicated.ppp}}.
+  This can be time-consuming if the pattern contains many points,
+  so the checking can be disabled by setting \code{checkdup=FALSE}.
+
+  If the point pattern was generated by simulation
+  using \code{\link{rmh}}, the parameters of the algorithm
+  are printed.
+}
+\seealso{
+  \code{\link{summary}},
+  \code{\link{summary.owin}},
+  \code{\link{print.ppp}}
+}
+\examples{
+  summary(cells)  # plain vanilla point pattern
+
+  # multitype point pattern
+  woods <- lansing
+  \testonly{woods <- woods[seq(1, npoints(woods), length=40)]}
+  summary(woods) # tabulates frequencies of each mark
+  
+  # numeric marks
+  trees <- longleaf
+  \testonly{trees <- trees[seq(1, npoints(trees), length=40)]}
+  summary(trees) # prints summary.default(marks(trees))
+
+  # weird polygonal window
+  summary(demopat)  # describes it
+}
+\author{
+  \spatstatAuthors
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.psp.Rd b/man/summary.psp.Rd
new file mode 100644
index 0000000..25e4b16
--- /dev/null
+++ b/man/summary.psp.Rd
@@ -0,0 +1,36 @@
+\name{summary.psp}
+\alias{summary.psp}
+\title{Summary of a Line Segment Pattern Dataset}
+\description{
+  Prints a useful summary of a line segment pattern dataset.
+}
+\usage{
+  \method{summary}{psp}(object, \dots)
+}
+\arguments{
+  \item{object}{Line segment pattern (object of class \code{"psp"}).}
+  \item{\dots}{Ignored.}
+}
+\details{
+  A useful summary of the line segment pattern \code{object} is printed.
+
+  This is a method for the generic function \code{\link{summary}}.
+}
+\seealso{
+  \code{\link{summary}},
+  \code{\link{summary.owin}},
+  \code{\link{print.psp}}
+}
+\examples{
+  a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  summary(a)  # describes it
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.quad.Rd b/man/summary.quad.Rd
new file mode 100644
index 0000000..2bb5b13
--- /dev/null
+++ b/man/summary.quad.Rd
@@ -0,0 +1,62 @@
+\name{summary.quad}
+\alias{summary.quad}
+\alias{print.summary.quad}
+\title{Summarizing a Quadrature Scheme}
+\description{
+  \code{summary} method for class \code{"quad"}.
+}
+\usage{
+  \method{summary}{quad}(object, \dots, checkdup=FALSE)
+  \method{print}{summary.quad}(x, \dots, dp=3)
+}
+\arguments{
+  \item{object}{A quadrature scheme.}
+  \item{\dots}{Ignored.}
+  \item{checkdup}{
+    Logical value indicating whether to test for duplicated points.
+  }
+  \item{dp}{Number of significant digits to print.} 
+  \item{x}{Object of class \code{"summary.quad"} returned by
+    \code{summary.quad}.}
+}
+\details{
+  This is a method for the generic \code{\link{summary}}
+  for the class \code{"quad"}. An object of class \code{"quad"}
+  describes a quadrature scheme, used to fit a point process model.
+  See \code{\link{quad.object}}) for details of this class. 
+  
+  \code{summary.quad} extracts information about the
+  quadrature scheme,
+  and \code{print.summary.quad} prints this information in a
+  comprehensible format.
+
+  In normal usage, \code{print.summary.quad} is invoked implicitly
+  when the user calls \code{summary.quad} without assigning its value
+  to anything. See the examples.
+}
+\value{
+  \code{summary.quad} returns an object of class \code{"summary.quad"},
+  while \code{print.summary.quad} returns \code{NULL}.
+}
+\examples{
+  # make a quadrature scheme
+  Q <- quadscheme(rpoispp(42))
+  # summarize it
+  summary(Q)
+  # save the summary
+  s <- summary(Q)
+  # print it
+  print(s)
+  s
+  # extract total quadrature weight
+  s$w$all$sum
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.solist.Rd b/man/summary.solist.Rd
new file mode 100644
index 0000000..c90df32
--- /dev/null
+++ b/man/summary.solist.Rd
@@ -0,0 +1,47 @@
+\name{summary.solist}
+\alias{summary.solist}
+\title{Summary of a List of Spatial Objects}
+\description{
+  Prints a useful summary of each entry in a list of two-dimensional
+  spatial objects.
+}
+\usage{
+ \method{summary}{solist}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    An object of class \code{"solist"}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{summary}}.
+
+  An object of the class \code{"solist"} is effectively a list
+  of two-dimensional spatial datasets.
+  See \code{\link{solist}}.
+
+  This function extracts a useful summary of each of the datasets.
+}
+\seealso{
+  \code{\link{solist}},
+  \code{\link{summary}},
+  \code{\link{plot.solist}}
+}
+\examples{
+  x <- solist(cells, japanesepines, redwood)
+  summary(x)
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/summary.splitppp.Rd b/man/summary.splitppp.Rd
new file mode 100644
index 0000000..8f3fc5c
--- /dev/null
+++ b/man/summary.splitppp.Rd
@@ -0,0 +1,45 @@
+\name{summary.splitppp}
+\alias{summary.splitppp}
+\title{Summary of a Split Point Pattern}
+\description{
+  Prints a useful summary of a split point pattern.
+}
+\usage{
+ \method{summary}{splitppp}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Split point pattern (object of class \code{"splitppp"}, effectively a
+    list of point patterns, usually created by \code{\link{split.ppp}}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{summary}}.
+
+  An object of the class \code{"splitppp"} is effectively a list
+  of point patterns (objects of class \code{"ppp"}) representing
+  different sub-patterns of an original point pattern.
+
+  This function extracts a useful summary of each of the sub-patterns.
+}
+\seealso{
+  \code{\link{summary}},
+  \code{\link{split}},
+  \code{\link{split.ppp}}
+}
+\examples{
+  data(amacrine)      # multitype point pattern
+  summary(split(amacrine))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+
diff --git a/man/sumouter.Rd b/man/sumouter.Rd
new file mode 100644
index 0000000..adf2d97
--- /dev/null
+++ b/man/sumouter.Rd
@@ -0,0 +1,92 @@
+\name{sumouter}
+\alias{sumouter}
+\alias{quadform}
+\alias{bilinearform}
+\title{Compute Quadratic Forms}
+\description{
+  Calculates certain quadratic forms of matrices.
+}
+\usage{
+  sumouter(x, w=NULL, y=x)
+  quadform(x, v)
+  bilinearform(x, v, y)
+}
+\arguments{
+  \item{x,y}{A matrix, whose rows are the vectors in the quadratic form.}
+  \item{w}{Optional vector of weights}
+  \item{v}{Matrix determining the quadratic form}
+}
+\value{
+  A vector or matrix.
+}
+\details{
+  The matrices \code{x} and \code{y} will be interpreted as
+  collections of row vectors. They must have the same number of rows.
+
+  The command \code{sumouter} computes the sum of the outer
+  products of corresponding row vectors, weighted by the entries of \code{w}:
+  \deqn{
+    M = \sum_i w_i x_i y_i^\top
+  }{
+    M = sum[i] (w[i] * outer(x[i,], y[i,]))
+  }
+  where the sum is over all rows of \code{x} 
+  (after removing any rows containing \code{NA} or other non-finite
+  values).
+  If \code{w} is missing, the weights will be taken as 1.
+  The result is a \eqn{p \times q}{p * q} matrix where
+  \code{p = ncol(x)} and \code{q = ncol(y)}.
+  
+  The command \code{quadform} evaluates the quadratic form, defined by
+  the matrix \code{v}, for each of the row vectors of \code{x}:
+ \deqn{ 
+   y_i = x_i V x_i^\top
+  }{
+    y[i] = x[i,] \%*\% v \%*\% t(x[i,])
+  }
+  The result \code{y} is a numeric vector of length \code{n} where
+  \code{n = nrow(x)}. If \code{x[i,]} contains \code{NA} or
+  other non-finite values, then \code{y[i] = NA}.
+
+  The command \code{bilinearform} evaluates the more general bilinear
+  form defined by the matrix \code{v}. Here \code{x} and \code{y} must
+  be matrices of the same dimensions. For each row vector of
+  \code{x} and corresponding row vector of \code{y}, the bilinear form is
+ \deqn{ 
+   z_i = x_i V y_i^\top
+  }{
+    z[i] = x[i,] \%*\% v \%*\% t(y[i,])
+  }
+  The result \code{z} is a numeric vector of length \code{n} where
+  \code{n = nrow(x)}. If \code{x[i,]} or \code{y[i,]} contains \code{NA} or
+  other non-finite values, then \code{z[i] = NA}.
+}
+\examples{
+  x <- matrix(1:12, 4, 3)
+  dimnames(x) <- list(c("Wilma", "Fred", "Barney", "Betty"), letters[1:3])
+  x
+
+  sumouter(x)
+
+  w <- 4:1
+  sumouter(x, w)
+  v <- matrix(1, 3, 3)
+  quadform(x, v)
+
+  # should be the same as quadform(x, v)
+  bilinearform(x, v, x)
+
+  # See what happens with NA's
+  x[3,2] <- NA
+  sumouter(x, w)
+  quadform(x, v)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{array}
+ 
+ 
diff --git a/man/superimpose.Rd b/man/superimpose.Rd
new file mode 100644
index 0000000..fe0a3b1
--- /dev/null
+++ b/man/superimpose.Rd
@@ -0,0 +1,198 @@
+\name{superimpose}  
+\alias{superimpose}
+\alias{superimpose.ppp}
+\alias{superimpose.splitppp}
+\alias{superimpose.ppplist}
+\alias{superimpose.psp}
+\alias{superimpose.default}
+\title{Superimpose Several Geometric Patterns}
+\description{
+  Superimpose any number of point patterns or line segment patterns.
+}
+\usage{
+  superimpose(\dots)
+
+  \method{superimpose}{ppp}(\dots, W=NULL, check=TRUE)
+
+  \method{superimpose}{psp}(\dots, W=NULL, check=TRUE)
+
+  \method{superimpose}{splitppp}(\dots, W=NULL, check=TRUE)
+
+  \method{superimpose}{ppplist}(\dots, W=NULL, check=TRUE)
+
+  \method{superimpose}{default}(\dots)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, each of which represents either a point
+    pattern or a line segment pattern or a list of point patterns.
+  }
+  \item{W}{
+    Optional. Data determining the window for the resulting pattern.
+    Either a window (object of class \code{"owin"}, or something
+    acceptable to \code{\link{as.owin}}), or a function
+    which returns a window, or one of the strings
+    \code{"convex"}, \code{"rectangle"}, \code{"bbox"} or \code{"none"}.
+  }
+  \item{check}{
+    Logical value (passed to \code{\link{ppp}} or \code{\link{psp}}
+    as appropriate) determining whether to check the geometrical
+    validity of the resulting pattern.
+  }
+}
+\value{
+  For \code{superimpose.ppp}, a point pattern (object of class \code{"ppp"}).
+  For \code{superimpose.default}, either a point pattern
+  (object of class \code{"ppp"}) or a \code{list(x,y)}.
+  For \code{superimpose.psp}, a line segment pattern (object of class
+  \code{"psp"}).
+}
+\details{
+  This function is used to superimpose several geometric patterns
+  of the same kind, producing a single pattern of the same kind.
+  
+  The function \code{superimpose} is generic, with methods
+  for the class \code{ppp} of point patterns, the class \code{psp}
+  of line segment patterns, and a default method. There is also
+  a method for \code{lpp}, described separately in
+  \code{\link{superimpose.lpp}}.
+
+  The dispatch to a method is initially determined
+  by the class of the \emph{first} argument in \code{\dots}.
+  \itemize{
+    \item \code{default}:  If the first argument is 
+    \emph{not} an object of class \code{ppp} or \code{psp}, then the
+    default method \code{superimpose.default} is executed.
+    This checks the class of all arguments, and dispatches to the
+    appropriate method. Arguments of class \code{ppplist} can be handled.
+    \item \code{ppp}:
+    If the first \code{\dots} argument is an object of
+    class \code{ppp} then the method \code{superimpose.ppp}
+    is executed. All arguments in \code{\dots}
+    must be either \code{ppp} objects or lists
+    with components \code{x} and \code{y}. The result will
+    be an object of class \code{ppp}.
+    \item psp:
+    If the first \code{\dots} argument is an object of
+    class \code{psp} then the \code{psp} method is dispatched and all
+    \code{\dots} arguments must be \code{psp} objects.
+    The result is a \code{psp} object.
+  }
+
+  The patterns are \emph{not} required to have the same window
+  of observation. 
+
+  The window for the superimposed pattern is controlled
+  by the argument \code{W}.
+  \itemize{
+    \item
+    If \code{W} is a window (object of class \code{"W"}
+    or something acceptable to \code{\link{as.owin}})
+    then this determines the window for the superimposed pattern.
+    \item
+    If \code{W} is \code{NULL}, or the character string \code{"none"},
+    then windows are extracted from the geometric patterns,
+    as follows. 
+    For \code{superimpose.psp}, all arguments \code{\dots}
+    are line segment patterns (objects of class \code{"psp"});
+    their observation windows are extracted; the union of these
+    windows is computed; and this union is taken to be the
+    window for the superimposed pattern.
+    For \code{superimpose.ppp} and \code{superimpose.default},
+    the arguments \code{\dots} are inspected, and 
+    any arguments which are point patterns (objects of class
+    \code{"ppp"}) are selected; their observation windows
+    are extracted, and the union of these windows
+    is taken to be the window for the superimposed point pattern.
+    For \code{superimpose.default} if none of the arguments
+    is of class \code{"ppp"} then no window is computed
+    and the result of \code{superimpose} is a \code{list(x,y)}.
+    \item
+    If \code{W} is one of the strings
+    \code{"convex"}, \code{"rectangle"} or \code{"bbox"}
+    then a window for the superimposed pattern
+    is computed from the coordinates of the
+    points or the line segments as follows.
+    \describe{
+      \item{\code{"bbox"}:}{the bounding box of the
+      points or line segments (see \code{\link{bounding.box.xy}});}
+      \item{\code{"convex"}:}{the Ripley-Rasson estimator
+      of a convex window (see \code{\link{ripras}});}
+      \item{\code{"rectangle"}:}{the Ripley-Rasson estimator
+      of a rectangular window
+      (using \code{\link{ripras}} with argument
+      \code{shape="rectangle"}).}
+    }
+    \item
+    If \code{W} is a function,
+    then this function is used to compute
+    a window for the superimposed pattern
+    from the coordinates of the
+    points or the line segments.
+    The function should accept input of the form \code{list(x,y)}
+    and is expected to return an object of class \code{"owin"}.
+    Examples of such functions are
+    \code{\link{ripras}} and \code{\link{bounding.box.xy}}.
+  }
+  The arguments \code{\dots} may be \emph{marked} patterns.
+  The marks of each component pattern must have the same format.
+  Numeric and character marks may be ``mixed''.  If there is such
+  mixing then the numeric marks are coerced to character in the
+  combining process. If the mark structures are all data frames,
+  then these data frames must have the same number of columns and
+  identical column names.
+
+  If the arguments \code{\dots} are given in the form \code{name=value},
+  then the \code{name}s will be used as an extra column of marks
+  attached to the elements of the corresponding patterns.
+}
+
+\seealso{
+  \code{\link{superimpose.lpp}},
+  \code{\link{concatxy}}, \code{\link{quadscheme}}.
+}
+\examples{
+  # superimposing point patterns
+  p1  <- runifrect(30)
+  p2  <- runifrect(42)
+  s1  <- superimpose(p1,p2) # Unmarked pattern.
+  p3  <- list(x=rnorm(20),y=rnorm(20))
+  s2  <- superimpose(p3,p2,p1) # Default method gets called.
+  s2a <- superimpose(p1,p2,p3) # Same as s2 except for order of points.
+  s3  <- superimpose(clyde=p1,irving=p2) # Marked pattern; marks a factor
+                                         # with levels "clyde" and "irving";
+                                         # warning given.
+  marks(p1) <- factor(sample(LETTERS[1:3],30,TRUE))
+  marks(p2) <- factor(sample(LETTERS[1:3],42,TRUE))
+  s5  <- superimpose(clyde=p1,irving=p2) # Marked pattern with extra column
+  marks(p2) <- data.frame(a=marks(p2),b=runif(42))
+  s6  <- try(superimpose(p1,p2)) # Gives an error.
+  marks(p1) <- data.frame(a=marks(p1),b=1:30)
+  s7  <- superimpose(p1,p2) # O.K.
+
+  # how to make a 2-type point pattern with types "a" and "b"
+  u <- superimpose(a = rpoispp(10), b = rpoispp(20))
+
+  # how to make a 2-type point pattern with types 1 and 2
+  u <- superimpose("1" = rpoispp(10), "2" = rpoispp(20))
+ 
+  # superimposing line segment patterns
+  X <- rpoisline(10)
+  Y <- as.psp(matrix(runif(40), 10, 4), window=owin())
+  Z <- superimpose(X, Y)
+
+  # being unreasonable
+  \dontrun{
+   if(FALSE) {
+    crud <- try(superimpose(p1,p2,X,Y)) # Gives an error, of course!
+   }
+  }
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/superimpose.lpp.Rd b/man/superimpose.lpp.Rd
new file mode 100644
index 0000000..c2ec5f0
--- /dev/null
+++ b/man/superimpose.lpp.Rd
@@ -0,0 +1,79 @@
+\name{superimpose.lpp}  
+\alias{superimpose.lpp}
+\title{Superimpose Several Point Patterns on Linear Network}
+\description{
+  Superimpose any number of point patterns on the same linear network.
+}
+\usage{
+  \method{superimpose}{lpp}(\dots, L=NULL)
+}
+\arguments{
+  \item{\dots}{
+    Any number of arguments, each of which represents a point
+    pattern on the same linear network.
+    Each argument can be either an object of class \code{"lpp"},
+    giving both the spatial coordinates of the points and the
+    linear network, or a \code{list(x,y)} or \code{list(x,y,seg,tp)}
+    giving just the spatial coordinates of the points.
+  }
+  \item{L}{
+    Optional. The linear network.
+    An object of class \code{"linnet"}.
+    This argument is required if none of the other arguments
+    is of class \code{"lpp"}. 
+  }
+}
+\value{
+  An object of class \code{"lpp"} representing the combined point
+  pattern on the linear network.
+}
+\details{
+  This function is used to superimpose several point patterns
+  on the same linear network. It is a method for the generic
+  function \code{\link{superimpose}}.
+
+  Each of the arguments \code{\dots}
+  can be either a point pattern on a linear network
+  (object of class \code{"lpp"} 
+  giving both the spatial coordinates of the points and the
+  linear network), or a \code{list(x,y)} or \code{list(x,y,seg,tp)}
+  giving just the spatial coordinates of the points.
+  These arguments must represent point patterns on the \emph{same}
+  linear network.
+
+  The argument \code{L} is an alternative way to specify the linear
+  network, and is required if none of the arguments \code{\dots} is an
+  object of class \code{"lpp"}.
+
+  The arguments \code{\dots} may be \emph{marked} patterns.
+  The marks of each component pattern must have the same format.
+  Numeric and character marks may be ``mixed''.  If there is such
+  mixing then the numeric marks are coerced to character in the
+  combining process. If the mark structures are all data frames,
+  then these data frames must have the same number of columns and
+  identical column names.
+
+  If the arguments \code{\dots} are given in the form \code{name=value},
+  then the \code{name}s will be used as an extra column of marks
+  attached to the elements of the corresponding patterns.
+}
+\seealso{
+  \code{\link{superimpose}}
+}
+\examples{
+  X <- rpoislpp(5, simplenet)
+  Y <- rpoislpp(10, simplenet)
+  superimpose(X,Y) # not marked
+  superimpose(A=X, B=Y) # multitype with types A and B
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  \ege
+  
+  and Greg McSwiggan.
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/swedishpines.Rd b/man/swedishpines.Rd
new file mode 100644
index 0000000..1d035ed
--- /dev/null
+++ b/man/swedishpines.Rd
@@ -0,0 +1,52 @@
+\name{swedishpines}
+\alias{swedishpines}
+\docType{data}
+\title{
+  Swedish Pines Point Pattern
+}
+\description{
+  The data give the locations of pine saplings
+  in a Swedish forest. 
+} 
+\format{
+  An object of class \code{"ppp"}
+  representing the point pattern of tree locations
+  in a rectangular plot 9.6 by 10 metres.
+
+  Cartesian coordinates are given in decimetres (multiples of 0.1 metre)
+  rounded to the nearest decimetre.
+  Type \code{\link{rescale}(swedishpines)} to get an equivalent dataset
+  where the coordinates are expressed in metres.
+  
+  See \code{\link{ppp.object}} for details of the format of a
+  point pattern object.
+}
+\usage{data(swedishpines)}
+\source{Strand (1972), Ripley (1981)}
+\section{Note}{
+  For previous analyses see Ripley (1981, pp. 172-175),
+  Venables and Ripley (1997, p. 483),
+  Baddeley and Turner (2000).
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+ 
+  Ripley, B.D. (1981) 
+  \emph{Spatial statistics}.
+  John Wiley and Sons.
+
+  Strand, L. (1972). 
+  A model for stand growth.
+  \emph{IUFRO Third Conference Advisory Group of Forest Statisticians},
+  INRA, Institut National de la Recherche Agronomique, Paris.
+  Pages 207--216.
+
+  Venables, W.N. and Ripley, B.D. (1997)
+  \emph{Modern applied statistics with S-PLUS}.
+  Second edition. Springer Verlag. 
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/symbolmap.Rd b/man/symbolmap.Rd
new file mode 100644
index 0000000..be59108
--- /dev/null
+++ b/man/symbolmap.Rd
@@ -0,0 +1,152 @@
+\name{symbolmap}
+\alias{symbolmap}
+\title{
+  Graphics Symbol Map
+}
+\description{
+  Create a graphics symbol map that associates data values with
+  graphical symbols.
+}
+\usage{
+symbolmap(\dots, range = NULL, inputs = NULL)
+}
+\arguments{
+  \item{\dots}{
+    Named arguments specifying the graphical parameters.
+    See Details.
+  }
+  \item{range}{
+    Optional. Range of numbers that are mapped.
+    A numeric vector of length 2 giving the minimum and maximum
+    values that will be mapped.
+    Incompatible with \code{inputs}.
+  }
+  \item{inputs}{
+    Optional. A vector containing all the data values
+    that will be mapped to symbols.
+    Incompatible with \code{range}.
+  }
+}
+\details{
+  A graphical symbol map is an association between
+  data values and graphical symbols. 
+  The command \code{symbolmap} creates an object of class
+  \code{"symbolmap"} that represents a graphical symbol map.
+
+  Once a symbol map has been created, it can be applied to any
+  suitable data to generate a plot of those data.
+  This makes it easy to ensure that
+  the \emph{same} symbol map is used in two different plots.
+  The symbol map can be plotted as a legend to the plots,
+  and can also be plotted in its own right.
+  
+  The possible values of data that will be mapped
+  are specified by \code{range} or \code{inputs}.
+  \itemize{
+    \item if \code{range} is given, it should be a numeric vector
+    of length 2 giving the minimum and maximum values of the range
+    of numbers that will be mapped. These limits must be finite.
+    \item if \code{inputs} is given, it should be a vector
+    of any atomic type (e.g. numeric, character, logical, factor).
+    This vector contains all the possible data values
+    that will be mapped.
+    \item If neither \code{range} nor \code{inputs} is given,
+    it is assumed that the possible values are real numbers.
+  }
+  The association of data values with graphical symbols
+  is specified by the other arguments \code{\dots}
+  which are given in \code{name=value} form.
+  These arguments specify the kinds of symbols that will be
+  used, the sizes of the symbols, and graphics parameters for
+  drawing the symbols.
+
+  Each graphics parameter can be either a single
+  value, for example \code{shape="circles"},
+  or a \code{function(x)} which determines the value
+  of the graphics parameter as a function of the data \code{x},
+  for example \code{shape=function(x) ifelse(x > 0, "circles", "squares")}.
+  Colourmaps (see \code{\link{colourmap}}) are also acceptable
+  because they are functions.
+  
+  Currently recognised graphics parameters, and their
+  allowed values, are:
+  \describe{
+    \item{shape}{
+      The shape of the symbol: currently
+      either \code{"circles"}, \code{"squares"}, \code{"arrows"}
+      or \code{NA}.
+      This parameter takes precedence over \code{pch}.
+    }
+    \item{size}{
+      The size of the symbol: a positive number or zero.
+    }
+    \item{pch}{
+      Graphics character code:
+      a positive integer, or a single character.
+      See \code{\link[graphics]{par}}.
+    }
+    \item{cex}{
+      Graphics character expansion factor.
+    }
+    \item{cols}{
+      Colour of plotting characters.
+    }
+    \item{fg,bg}{
+      Colour of foreground (or symbol border) and background
+      (or symbol interior).
+    }
+    \item{col,lwd,lty}{
+      Colour, width and style of lines.
+    }
+    \item{etch}{
+      Logical. If \code{TRUE}, each symbol is surrounded
+      by a border drawn in the opposite colour,
+      which improves its visibility against the background.
+      Default is \code{FALSE}.
+    }
+    \item{direction,headlength,headangle,arrowtype}{
+      Numeric parameters of arrow symbols, applicable when
+      \code{shape="arrows"}. Here \code{direction} is the direction
+      of the arrow in degrees anticlockwise from the \eqn{x} axis;
+      \code{headlength} is the length of the head of the arrow in
+      coordinate units; \code{headangle} is the angle subtended by the point
+      of the arrow; and \code{arrowtype} is an integer code
+      specifying which ends of the shaft have arrowheads
+      attached (0 means no arrowheads, 1 is an arrowhead at the start
+      of the shaft, 2 is an arrowhead at the end of the shaft, and
+      3 is arrowheads at both ends). 
+    }
+  }
+  A vector of colour values is also acceptable for the arguments
+  \code{col,cols,fg,bg} if
+  \code{range} is specified.
+}
+\value{
+  An object of class \code{"symbolmap"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{plot.symbolmap}} to plot the symbol map itself.
+  
+  \code{\link{invoke.symbolmap}} to apply the symbol map to some data
+  and plot the resulting symbols.
+  
+  \code{\link{update.symbolmap}} to change the symbol map.
+}
+\examples{
+  g <- symbolmap(inputs=letters[1:10], pch=11:20)
+
+  g1 <- symbolmap(range=c(0,100), size=function(x) x/50)
+
+  g2 <- symbolmap(shape=function(x) ifelse(x > 0, "circles", "squares"),
+                    size=function(x) sqrt(ifelse(x > 0, x/pi, -x)),
+                    bg = function(x) ifelse(abs(x) < 1, "red", "black"))
+
+  colmap <- colourmap(topo.colors(20), range=c(0,10))
+  g3 <- symbolmap(pch=21, bg=colmap, range=c(0,10))
+  plot(g3)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/tess.Rd b/man/tess.Rd
new file mode 100644
index 0000000..bdbd181
--- /dev/null
+++ b/man/tess.Rd
@@ -0,0 +1,160 @@
+\name{tess}
+\alias{tess}
+\title{Create a Tessellation}
+\description{
+  Creates an object of class \code{"tess"} representing a tessellation
+  of a spatial region.
+}
+\usage{
+  tess(..., xgrid = NULL, ygrid = NULL, tiles = NULL, image = NULL,
+            window=NULL, marks=NULL, keepempty=FALSE, unitname=NULL, check=TRUE)
+}
+\arguments{
+  \item{\dots}{Ignored.}
+  \item{xgrid,ygrid}{Cartesian coordinates of vertical and
+    horizontal lines determining a grid of rectangles.
+    Incompatible with other arguments.
+  }
+  \item{tiles}{List of tiles in the tessellation.
+    A list, each of whose elements is a window
+    (object of class \code{"owin"}). Incompatible with other arguments.
+  }
+  \item{image}{
+    Pixel image which specifies the tessellation.
+    Incompatible with other arguments.
+  }
+  \item{window}{
+    Optional.
+    The spatial region which is tessellated (i.e. the union of all the tiles).
+    An object of class \code{"owin"}.
+  }
+  \item{marks}{
+    Optional vector or data frame of marks associated with the tiles.
+  }
+  \item{keepempty}{
+    Logical flag indicating whether empty tiles should be retained
+    or deleted.
+  }
+  \item{unitname}{
+    Optional. Name of unit of length. Either a single character string,
+    or a vector of two character strings giving the
+    singular and plural forms, respectively.
+    If this argument is missing or \code{NULL},
+    information about the unitname will be
+    extracted from the other arguments.
+    If this argument is given, it overrides any other information
+    about the unitname.
+  }
+  \item{check}{
+    Logical value indicating whether to check the validity of the
+    input data. It is strongly recommended to use the default
+    value \code{check=TRUE}.
+  }
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. This command creates an object of class \code{"tess"} that
+  represents a tessellation.
+
+  Three types of tessellation are supported:
+  \describe{
+    \item{rectangular:}{
+      tiles are rectangles, with sides parallel to the \code{x} and
+      \code{y} axes. They may or may not have equal size and shape.
+      The arguments \code{xgrid} and \code{ygrid} determine
+      the positions of the vertical and horizontal grid lines,
+      respectively. (See \code{\link{quadrats}} for another way to do this.)
+    }
+    \item{tile list:}{
+      tiles are arbitrary spatial regions.
+      The argument \code{tiles} is a list of these tiles,
+      which are objects of class \code{"owin"}.
+    }
+    \item{pixel image:}{
+      Tiles are subsets of a fine grid of pixels.
+      The argument \code{image} is a pixel image (object of class
+      \code{"im"}) with factor values. Each level of the factor
+      represents a different tile of the tessellation. The pixels that
+      have a particular value of the factor constitute a tile. 
+    }
+  }
+
+  The optional argument \code{window} specifies the spatial region
+  formed by the union of all the tiles. In other words it specifies the
+  spatial region that is divided into tiles by the tessellation.
+  If this argument is missing or \code{NULL}, it will be determined by
+  computing the set union of all the tiles. This is a time-consuming
+  computation. For efficiency it is advisable to specify the window.
+  Note that the validity of the window will not be checked.
+
+  Empty tiles may occur, either because one of the entries in the list
+  \code{tiles} is an empty window, or because one of the levels of the
+  factor-valued pixel image \code{image} does not occur in the pixel data.
+  When \code{keepempty=TRUE}, empty tiles are permitted. 
+  When \code{keepempty=FALSE} (the default), tiles are not allowed to be
+  empty, and any empty tiles will be removed from the tessellation.
+
+  There are methods for \code{print}, \code{plot}, \code{[} and \code{[<-}
+  for tessellations. Use \code{\link{tiles}} to extract the list of
+  tiles in a tessellation, \code{\link{tilenames}} to extract the names
+  of the tiles, and \code{\link{tile.areas}} to compute their
+  areas.
+
+  The tiles may have marks, which can be extracted by
+  \code{\link{marks.tess}} and changed by \code{\link{marks<-.tess}}.
+
+  Tessellations can be used to classify the points of
+  a point pattern, in \code{\link{split.ppp}}, \code{\link{cut.ppp}} and
+  \code{\link{by.ppp}}.
+
+  To construct particular tessellations, see
+  \code{\link{quadrats}}, \code{\link{hextess}}, 
+  \code{\link{dirichlet}}, \code{\link{delaunay}}
+  and \code{\link{rpoislinetess}}.
+}
+\value{
+  An object of class \code{"tess"} representing the tessellation.
+}
+\seealso{
+  \code{\link{marks.tess}},
+  \code{\link{plot.tess}},
+  \code{\link{[.tess}},
+  \code{\link{as.tess}},
+  \code{\link{tiles}},
+  \code{\link{intersect.tess}},
+  \code{\link{split.ppp}},
+  \code{\link{cut.ppp}},
+  \code{\link{by.ppp}},
+  \code{\link{bdist.tiles}},
+  \code{\link{tile.areas}}.
+
+  To construct particular tessellations, see
+  \code{\link{quadrats}}, \code{\link{hextess}},
+  \code{\link{dirichlet}}, \code{\link{delaunay}}
+  and \code{\link{rpoislinetess}}.
+
+  To divide space into pieces containing equal
+  amounts of stuff, use \code{\link{quantess}}.
+}
+\examples{
+  A <- tess(xgrid=0:4,ygrid=0:4)
+  A
+  B <- A[c(1, 2, 5, 7, 9)]
+  B
+  v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin())
+  levels(v) <- letters[seq(length(levels(v)))]
+  E <- tess(image=v)
+  E
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\keyword{spatial}
+\keyword{datagen}
diff --git a/man/texturemap.Rd b/man/texturemap.Rd
new file mode 100644
index 0000000..02e989d
--- /dev/null
+++ b/man/texturemap.Rd
@@ -0,0 +1,60 @@
+\name{texturemap}
+\alias{texturemap}
+\title{
+  Texture Map
+}
+\description{
+  Create a map that associates data values with graphical textures.
+}
+\usage{
+texturemap(inputs, textures, ...)
+}
+\arguments{
+  \item{inputs}{
+    A vector containing all the data values
+    that will be mapped to textures.
+  }
+  \item{textures}{
+    Optional. A vector of integer codes specifying the textures
+    to which the \code{inputs} will be mapped.
+  }
+  \item{\dots}{
+    Other graphics parameters such as \code{col}, \code{lwd}, \code{lty}.
+  }
+}
+\details{
+  A texture map is an association between data values and graphical
+  textures. The command \code{texturemap} creates an object of class
+  \code{"texturemap"} that represents a texture map.
+
+  Once a texture map has been created, it can be applied to any
+  suitable data to generate a texture plot of those data
+  using \code{\link{textureplot}}.
+  This makes it easy to ensure that
+  the \emph{same} texture map is used in two different plots.
+  The texture map can also be plotted in its own right.
+
+  The argument \code{inputs} should be a vector containing all the
+  possible data values (such as the levels of a factor) that are to be
+  mapped.
+
+  The \code{textures} should be integer values between 1 and 8,
+  representing the eight possible textures
+  described in the help for \code{\link{add.texture}}.
+  The default is \code{textures = 1:n} where
+  \code{n} is the length of \code{inputs}.
+}
+\value{
+  An object of class \code{"texturemap"} representing the texture map.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{textureplot}}
+}
+\examples{
+   texturemap(letters[1:4], 2:5, col=1:4, lwd=2)
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/textureplot.Rd b/man/textureplot.Rd
new file mode 100644
index 0000000..3ae8aa9
--- /dev/null
+++ b/man/textureplot.Rd
@@ -0,0 +1,112 @@
+\name{textureplot}
+\alias{textureplot}
+\title{
+  Plot Image or Tessellation Using Texture Fill
+}
+\description{
+  For a factor-valued pixel image, this command plots each level
+  of the factor using a different texture.
+  For a tessellation, each tile is plotted using a different texture.
+}
+\usage{
+textureplot(x, \dots,
+            main, add=FALSE, clipwin=NULL, do.plot = TRUE,
+            border=NULL, col = NULL, lwd = NULL, lty = NULL, spacing = NULL,
+            textures=1:8,
+            legend=TRUE,
+            leg.side=c("right", "left", "bottom", "top"),
+            legsep=0.1, legwid=0.2)
+}
+\arguments{
+  \item{x}{
+    A tessellation (object of class \code{"tess"} or something
+    acceptable to \code{\link{as.tess}}) with at most 8 tiles,
+    or a pixel image (object of class \code{"im"} or something
+    acceptable to \code{\link{as.im}}) whose pixel values
+    are a \code{factor} with at most 8 levels.
+  }
+  \item{\dots}{
+    Other arguments passed to \code{\link{add.texture}}.
+  }
+  \item{main}{
+    Character string giving a main title for the plot.
+  }
+  \item{add}{
+    Logical value indicating whether to draw on
+    the current plot (\code{add=TRUE}) or to initialise a new plot
+    (\code{add=FALSE}).
+  }
+  \item{clipwin}{
+    Optional. A window (object of class \code{"owin"}).
+    Only this subset of the image will be displayed.
+  }
+  \item{do.plot}{
+    Logical. Whether to actually do the plot.
+  }
+  \item{border}{
+    Colour for drawing the boundaries between the different regions.
+    The default (\code{border=NULL}) means to use \code{par("fg")}.
+    Use \code{border=NA} to omit borders.
+  }
+  \item{col}{
+    Numeric value or vector giving the colour or colours in which
+    the textures should be plotted.
+  }
+  \item{lwd}{
+    Numeric value or vector giving the line width or widths to be used.
+  }
+  \item{lty}{
+    Numeric value or vector giving the line type or types to be used.
+  }
+  \item{spacing}{
+    Numeric value or vector giving the \code{spacing} parameter
+    for the textures.
+  }
+  \item{textures}{
+    Textures to be used for each level. Either a texture map (object of
+    class \code{"texturemap"}) or a vector of integer codes
+    (to be interpreted by \code{\link{add.texture}}).
+  }
+  \item{legend}{
+    Logical. Whether to display an explanatory legend.
+  }
+  \item{leg.side}{Position of legend relative to main plot.}
+  \item{legsep}{
+    Separation between legend and main plot, as a fraction
+    of the shortest side length of the main plot.
+  }
+  \item{legwid}{
+    Width (if vertical) or height (if horizontal) of the legend
+    as a fraction of the shortest side length of the main plot.
+  }
+}
+\details{
+  If \code{x} is a tessellation, then each tile of the tessellation is
+  plotted and filled with a texture using \link{add.texture}.
+
+  If \code{x} is a factor-valued pixel image, then
+  for each level of the factor, the algorithm finds the region where the image
+  takes this value, and fills the region with a texture using
+  \code{\link{add.texture}}.
+}
+\value{
+  (Invisible) A texture map (object of class \code{"texturemap"})
+  associating a texture with each level of the factor.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{im}}, 
+  \code{\link{plot.im}},
+  \code{\link{add.texture}}.
+}
+\examples{
+  nd <- if(interactive()) 128 else 32
+  Z <- setcov(owin(), dimyx=nd)
+  Zcut <- cut(Z, 3, labels=c("Lo", "Med", "Hi"))
+  textureplot(Zcut)
+  textureplot(dirichlet(runifpoint(6)))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/thinNetwork.Rd b/man/thinNetwork.Rd
new file mode 100644
index 0000000..14d1d40
--- /dev/null
+++ b/man/thinNetwork.Rd
@@ -0,0 +1,84 @@
+\name{thinNetwork}
+\alias{thinNetwork}
+\title{
+  Remove Vertices or Segments from a Linear Network
+}
+\description{
+  Delete some vertices and/or segments from a linear network
+  or related object.
+}
+\usage{
+thinNetwork(X, retainvertices, retainedges)
+}
+\arguments{
+  \item{X}{
+    A linear network (object of class \code{"linnet"}),
+    or a point pattern on a linear network (object of class
+    \code{"lpp"}).
+  }
+  \item{retainvertices}{
+    Optional. Subset index specifying which vertices should be retained
+    (not deleted). 
+  }
+  \item{retainedges}{
+    Optional. Subset index specifying which edges (segments) should be retained
+    (not deleted). 
+  }
+}
+\details{
+  This function deletes some of the vertices and edges (segments) in the
+  linear network.
+
+  The arguments \code{retainvertices} and \code{retainedges} can be
+  any kind of subset index: a vector of positive integers specifying which
+  vertices/edges should be retained; a vector of negative integers
+  specifying which vertices/edges should be deleted; or a logical vector
+  specifying whether each vertex/edge should be retained (\code{TRUE})
+  or deleted (\code{FALSE}).
+
+  Vertices are indexed in the same sequence as in
+  \code{vertices(as.linnet(X))}.
+  Segments are indexed in the same sequence as in
+  \code{as.psp(as.linnet(X))}.
+
+  The argument \code{retainedges} has higher precedence than
+  \code{retainvertices} in the sense that:
+  \itemize{
+    \item If \code{retainedges} is given, then
+    any vertex which is an endpoint of a retained edge will also be
+    retained.
+    \item
+    If \code{retainvertices} is given and \code{retainedges} is \bold{missing},
+    then any segment joining two retained vertices will also be retained.
+    \item
+    Thus, when both \code{retainvertices} and \code{retainedges} are
+    given, it is possible that more vertices will be retained than those
+    specified by \code{retainvertices}.
+  }
+
+  After the network has been altered, other consequential changes will
+  occur, including renumbering of the segments and vertices.
+  If \code{X} is a point pattern on a linear network, then data points
+  will be deleted if they lie on a deleted edge.
+}
+\value{
+  An object of the same kind as \code{X}.
+}
+\author{
+  \adrian
+  and Suman Rakshit.
+}
+\seealso{
+  \code{\link{linnet}} to make a network;
+
+  \code{\link{connected.linnet}} to extract connected components.
+}
+\examples{
+   L <- simplenet
+   plot(L, main="thinNetwork(L, retainedges=c(-3, -5))")
+   text(midpoints.psp(as.psp(L)), labels=1:nsegments(L), pos=3)
+   Lsub <- thinNetwork(L, retainedges=c(-3, -5))
+   plot(Lsub, add=TRUE, col="blue", lwd=2)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/thomas.estK.Rd b/man/thomas.estK.Rd
new file mode 100644
index 0000000..4c04feb
--- /dev/null
+++ b/man/thomas.estK.Rd
@@ -0,0 +1,166 @@
+\name{thomas.estK}
+\alias{thomas.estK}
+\title{Fit the Thomas Point Process by Minimum Contrast}
+\description{
+  Fits the Thomas point process to a point pattern dataset by the Method of
+  Minimum Contrast using the K function.
+}
+\usage{
+thomas.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Data to which the Thomas model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    Thomas process.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+}
+\details{
+  This algorithm fits the Thomas point process model to a point pattern dataset
+  by the Method of Minimum Contrast, using the \eqn{K} function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The \eqn{K} function of the point pattern will be computed
+      using \code{\link{Kest}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the \eqn{K} function,
+      and this object should have been obtained by a call to
+      \code{\link{Kest}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Thomas point process to \code{X},
+  by finding the parameters of the Thomas model
+  which give the closest match between the
+  theoretical \eqn{K} function of the Thomas process
+  and the observed \eqn{K} function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The Thomas point process is described in
+  \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster
+  process formed by taking a pattern of parent points, generated
+  according to a Poisson process with intensity \eqn{\kappa}{kappa}, and
+  around each parent point, generating a random number of offspring
+  points, such that the number of offspring of each parent is a Poisson
+  random variable with mean \eqn{\mu}{mu}, and the locations of the
+  offspring points of one parent are independent and isotropically
+  Normally distributed around the parent point with standard deviation
+  \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The
+  named vector of stating values can use either \code{sigma2}
+  (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second
+  component, but the latter is recommended for consistency with other
+  cluster models.
+
+  The theoretical \eqn{K}-function of the Thomas process is
+  \deqn{
+    K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})).
+  }{
+    K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa.
+  }
+  The theoretical intensity
+  of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}.
+
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\kappa}{kappa}
+  and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The Thomas process can be simulated, using \code{\link{rThomas}}.
+
+  Homogeneous or inhomogeneous Thomas process models can also
+  be fitted using the function \code{\link{kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Diggle, P. J., Besag, J. and Gleaves, J. T. (1976)
+  Statistical analysis of spatial point patterns by
+  means of distance methods. \emph{Biometrics} \bold{32} 659--667.
+
+  \Moller, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Thomas, M. (1949) A generalisation of Poisson's binomial limit for use
+  in ecology. \emph{Biometrika} \bold{36}, 18--25.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{
+  Rasmus Waagepetersen
+  \email{rw at math.auc.dk}
+  Adapted for \pkg{spatstat} by \adrian
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{lgcp.estK}},
+  \code{\link{matclust.estK}},
+  \code{\link{mincontrast}},
+  \code{\link{Kest}},
+  \code{\link{rThomas}} to simulate the fitted model.
+}
+\examples{
+    data(redwood)
+    u <- thomas.estK(redwood, c(kappa=10, scale=0.1))
+    u
+    plot(u)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/thomas.estpcf.Rd b/man/thomas.estpcf.Rd
new file mode 100644
index 0000000..d9d9f7b
--- /dev/null
+++ b/man/thomas.estpcf.Rd
@@ -0,0 +1,170 @@
+\name{thomas.estpcf}
+\alias{thomas.estpcf}
+\title{Fit the Thomas Point Process by Minimum Contrast}
+\description{
+  Fits the Thomas point process to a point pattern dataset by the Method of
+  Minimum Contrast using the pair correlation function.
+}
+\usage{
+thomas.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list())
+}
+\arguments{
+  \item{X}{
+    Data to which the Thomas model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the
+    Thomas process.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+  \item{pcfargs}{
+    Optional list containing arguments passed to \code{\link{pcf.ppp}}
+    to control the smoothing in the estimation of the
+    pair correlation function.
+  }
+}
+\details{
+  This algorithm fits the Thomas point process model to a point pattern dataset
+  by the Method of Minimum Contrast, using the pair correlation function
+  \code{\link{pcf}}.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The pair correlation function of the point pattern will be computed
+      using \code{\link{pcf}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the pair correlation function,
+      and this object should have been obtained by a call to
+      \code{\link{pcf}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Thomas point process to \code{X},
+  by finding the parameters of the Thomas model
+  which give the closest match between the
+  theoretical pair correlation function of the Thomas process
+  and the observed pair correlation function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The Thomas point process is described in
+  \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster
+  process formed by taking a pattern of parent points, generated
+  according to a Poisson process with intensity \eqn{\kappa}{kappa}, and
+  around each parent point, generating a random number of offspring
+  points, such that the number of offspring of each parent is a Poisson
+  random variable with mean \eqn{\mu}{mu}, and the locations of the
+  offspring points of one parent are independent and isotropically
+  Normally distributed around the parent point with standard deviation
+  \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The
+  named vector of stating values can use either \code{sigma2}
+  (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second
+  component, but the latter is recommended for consistency with other
+  cluster models.
+
+  The theoretical pair correlation function of the Thomas process is
+  \deqn{
+    g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})).
+  }{
+    g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2).
+  }
+  The theoretical intensity
+  of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}.
+
+  In this algorithm, the Method of Minimum Contrast is first used to find
+  optimal values of the parameters \eqn{\kappa}{kappa}
+  and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter
+  \eqn{\mu}{mu} is inferred from the estimated intensity
+  \eqn{\lambda}{lambda}.
+
+  If the argument \code{lambda} is provided, then this is used
+  as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The Thomas process can be simulated, using \code{\link{rThomas}}.
+
+  Homogeneous or inhomogeneous Thomas process models can also
+  be fitted using the function \code{\link{kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Diggle, P. J., Besag, J. and Gleaves, J. T. (1976)
+  Statistical analysis of spatial point patterns by
+  means of distance methods. \emph{Biometrics} \bold{32} 659--667.
+
+  \Moller, J. and Waagepetersen, R. (2003).
+  Statistical Inference and Simulation for Spatial Point Processes.
+  Chapman and Hall/CRC, Boca Raton.
+
+  Thomas, M. (1949) A generalisation of Poisson's binomial limit for use
+  in ecology. \emph{Biometrika} \bold{36}, 18--25.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{thomas.estK}}
+  \code{\link{mincontrast}},
+  \code{\link{pcf}},
+  \code{\link{rThomas}} to simulate the fitted model.
+}
+\examples{
+    data(redwood)
+    u <- thomas.estpcf(redwood, c(kappa=10, scale=0.1))
+    u
+    plot(u, legendpos="topright")
+    u2 <- thomas.estpcf(redwood, c(kappa=10, scale=0.1),
+          pcfargs=list(stoyan=0.12))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/tile.areas.Rd b/man/tile.areas.Rd
new file mode 100644
index 0000000..3de48ea
--- /dev/null
+++ b/man/tile.areas.Rd
@@ -0,0 +1,46 @@
+\name{tile.areas}
+\alias{tile.areas}
+\title{Compute Areas of Tiles in a Tessellation}
+\description{
+  Computes the area of each tile in a tessellation.
+}
+\usage{
+tile.areas(x)
+}
+\arguments{
+  \item{x}{A tessellation (object of class \code{"tess"}).}
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. See \code{\link{tess}}.
+
+  This command computes the area of each of the tiles 
+  that make up the tessellation \code{x}.
+  The result is a numeric vector
+  in the same order as the tiles would be listed by \code{tiles(x)}.
+}
+\value{
+  A numeric vector.
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{tiles}},
+  \code{\link{tilenames}},
+  \code{\link{tiles.empty}}
+}
+\examples{
+  A <- tess(xgrid=0:2,ygrid=0:2)
+  tile.areas(A)
+  v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin())
+  E <- tess(image=v)
+  tile.areas(E)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/tileindex.Rd b/man/tileindex.Rd
new file mode 100644
index 0000000..46df9f9
--- /dev/null
+++ b/man/tileindex.Rd
@@ -0,0 +1,54 @@
+\name{tileindex}
+\alias{tileindex}
+\title{
+  Determine Which Tile Contains Each Given Point
+}
+\description{
+  Given a tessellation and a list of spatial points,
+  determine which tile of the tessellation contains each of the
+  given points.
+}
+\usage{
+tileindex(x, y, Z)
+}
+\arguments{
+  \item{x,y}{
+    Spatial coordinates.
+    Numeric vectors of equal length.
+  }
+  \item{Z}{
+    A tessellation (object of class \code{"tess"}).
+  }
+}
+\details{
+  This function determines which tile of the tessellation \code{Z}
+  contains each of the spatial points
+  with coordinates \code{(x[i],y[i])}.
+
+  The result is a factor, of the same length as \code{x} and \code{y},
+  indicating which tile contains each point. The levels of the factor
+  are the names of the tiles of \code{Z}.
+  Values are \code{NA} if the corresponding point lies outside the tessellation.
+}
+\value{
+  A factor, of the same length as \code{x} and \code{y},
+  whose levels are the names of the tiles of \code{Z}.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{cut.ppp}} and \code{\link{split.ppp}} to
+  divide up the points of a point pattern according to
+  a tessellation.
+
+  \code{\link{as.function.tess}} to create a function whose
+  value is the tile index.
+}
+\examples{
+  X <- runifpoint(7)
+  V <- dirichlet(X)
+  tileindex(0.1, 0.4, V)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/tilenames.Rd b/man/tilenames.Rd
new file mode 100644
index 0000000..f7ffcd2
--- /dev/null
+++ b/man/tilenames.Rd
@@ -0,0 +1,42 @@
+\name{tilenames}
+\alias{tilenames}
+\alias{tilenames<-}
+\title{Names of Tiles in a Tessellation}
+\description{
+  Extract or Change the Names of the Tiles in a Tessellation.
+}
+\usage{
+tilenames(x)
+tilenames(x) <- value
+}
+\arguments{
+  \item{x}{A tessellation (object of class \code{"tess"}).}
+  \item{value}{Character vector giving new names for the tiles.}
+}
+\details{
+  These functions extract or change the names of the 
+  tiles that make up the tessellation \code{x}.
+
+  If the tessellation is a regular grid, the tile names
+  cannot be changed.
+}
+\value{
+  \code{tilenames} returns a character vector.
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{tiles}}
+}
+\examples{
+  D <- dirichlet(runifpoint(10))
+  tilenames(D)
+  tilenames(D) <- paste("Cell", 1:10)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/tiles.Rd b/man/tiles.Rd
new file mode 100644
index 0000000..02bb5ad
--- /dev/null
+++ b/man/tiles.Rd
@@ -0,0 +1,44 @@
+\name{tiles}
+\alias{tiles}
+\title{Extract List of Tiles in a Tessellation}
+\description{
+  Extracts a list of the tiles that make up a tessellation.
+}
+\usage{
+tiles(x)
+}
+\arguments{
+  \item{x}{A tessellation (object of class \code{"tess"}).}
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. See \code{\link{tess}}.
+
+  The tiles that make up the tessellation \code{x}
+  are returned in a list.
+}
+\value{
+  A list of windows (objects of class \code{"owin"}).
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{tilenames}},
+  \code{\link{tile.areas}},
+  \code{\link{tiles.empty}}
+}
+\examples{
+  A <- tess(xgrid=0:2,ygrid=0:2)
+  tiles(A)
+  v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin())
+  E <- tess(image=v)
+  tiles(E)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/tiles.empty.Rd b/man/tiles.empty.Rd
new file mode 100644
index 0000000..2645e03
--- /dev/null
+++ b/man/tiles.empty.Rd
@@ -0,0 +1,55 @@
+\name{tiles.empty}
+\alias{tiles.empty}
+\title{Check For Empty Tiles in a Tessellation}
+\description{
+  Checks whether each tile in a tessellation is empty or non-empty.
+}
+\usage{
+tiles.empty(x)
+}
+\arguments{
+  \item{x}{A tessellation (object of class \code{"tess"}).}
+}
+\details{
+  A tessellation is a collection of disjoint spatial regions
+  (called \emph{tiles}) that fit together to form a larger spatial
+  region. See \code{\link{tess}}.
+
+  It is possible for some tiles of a tessellation to be empty.
+  For example, this can happen
+  when the tessellation \code{x} is obtained by restricting
+  another tessellation \code{y} to a smaller spatial domain \code{w}.
+
+  The function \code{tiles.empty} checks whether each tile is empty
+  or non-empty. The result is a logical vector,
+  with entries equal to \code{TRUE} when the corresponding tile is
+  empty. Results are given in the same order
+  as the tiles would be listed by \code{tiles(x)}.
+}
+\value{
+  A logical vector.
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{tiles}},
+  \code{\link{tilenames}},
+  \code{\link{tile.areas}}
+}
+\examples{
+  A <- tess(xgrid=0:2,ygrid=0:2)
+  tiles.empty(A)
+  v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin())
+  E <- tess(image=v)
+  tiles.empty(E)
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/timeTaken.Rd b/man/timeTaken.Rd
new file mode 100644
index 0000000..8128194
--- /dev/null
+++ b/man/timeTaken.Rd
@@ -0,0 +1,48 @@
+\name{timeTaken}
+\alias{timeTaken}
+\title{
+  Extract the Total Computation Time
+}
+\description{
+  Given an object or objects that contain timing information
+  (reporting the amount of computer time taken to compute each object),
+  this function extracts the timing data and evaluates the total time taken.
+}
+\usage{
+timeTaken(..., warn=TRUE)
+}
+\arguments{
+  \item{\dots}{
+    One or more objects of class \code{"timed"} containing
+    timing data. 
+  }
+  \item{warn}{
+    Logical value indicating whether a warning should be issued if
+    some of the arguments do not contain timing information.
+  }
+}
+\details{
+  An object of class \code{"timed"} contains information on
+  the amount of computer time that was taken to compute the object.
+  See \code{\link{timed}}.
+
+  This function extracts the timing information from one or more
+  such objects, and calculates the total time.
+}
+\value{
+  An object inheriting the class \code{"timed"}.
+}
+\examples{
+  A <- timed(Kest(cells))
+  B <- timed(Gest(cells))
+  A
+  B
+  timeTaken(A,B)
+}
+\seealso{
+  \code{\link{timed}}
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{utilities}
diff --git a/man/timed.Rd b/man/timed.Rd
new file mode 100644
index 0000000..7c2ac26
--- /dev/null
+++ b/man/timed.Rd
@@ -0,0 +1,89 @@
+\name{timed}
+\alias{timed}
+\title{
+  Record the Computation Time
+}
+\description{
+  Saves the result of a calculation as an object of class \code{"timed"}
+  which includes information about the time taken to compute the result.
+  The computation time is printed when the object is printed.
+}
+\usage{
+timed(x, ..., starttime = NULL, timetaken = NULL)
+}
+\arguments{
+  \item{x}{
+    An expression to be evaluated, or an object that has already
+    been evaluated.
+  }
+  \item{starttime}{
+    The time at which the computation is defined to have started.
+    The default is the current time.
+    Ignored if \code{timetaken} is given.
+  }
+  \item{timetaken}{
+    The length of time taken to perform the computation.
+    The default is the time taken to evaluate \code{x}.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a simple mechanism for recording how long it takes to
+  perform complicated calculations (usually for the purposes of
+  reporting in a publication).
+  
+  If \code{x} is an expression to be evaluated, \code{timed(x)}
+  evaluates the expression and measures the
+  time taken to evaluate it.
+  The result is saved as an object of the class
+  \code{"timed"}. Printing this object displays the computation time.
+
+  If \code{x} is an object which has already been computed,
+  then the time taken to compute the object can be specified either directly
+  by the argument \code{timetaken}, or indirectly by the argument
+  \code{starttime}.
+  \itemize{
+    \item
+    \code{timetaken} is the duration of time taken to perform
+    the computation. It should be the difference of two clock times
+    returned by \code{\link{proc.time}}. Typically the user
+    sets \code{begin <- proc.time()} before commencing the calculations,
+    then \code{end <- proc.time()} after completing the calculations,
+    and then sets \code{timetaken <- end - begin}.
+    \item
+    \code{starttime} is the clock time at which the computation started.
+    It should be a value that was returned by \code{\link{proc.time}}
+    at some earlier time when the calculations commenced.
+    When \code{timed} is called, the computation time will be taken
+    as the difference between the current clock time and
+    \code{starttime}. Typically the user
+    sets \code{begin <- proc.time()} before commencing the calculations,
+    and when the calculations are completed, the user calls
+    \code{result <- timed(result, starttime=begin)}. 
+  }
+  
+  If the result of evaluating \code{x} belongs to other S3 classes,
+  then the result of \code{timed(x, \dots)} also inherits these classes,
+  and printing the object will display the appropriate information for these
+  classes as well.
+}
+\value{
+  An object inheriting the class \code{"timed"}.
+}
+\examples{
+  timed(clarkevans(cells))
+
+  timed(Kest(cells))
+
+  answer <- timed(42, timetaken=4.1e17)
+  answer
+}
+\seealso{
+  \code{\link{timeTaken}} to extract the time taken.
+}
+\author{
+  \spatstatAuthors.
+}
+\keyword{utilities}
diff --git a/man/transect.im.Rd b/man/transect.im.Rd
new file mode 100644
index 0000000..94cc8a7
--- /dev/null
+++ b/man/transect.im.Rd
@@ -0,0 +1,76 @@
+\name{transect.im}
+\alias{transect.im}
+\title{
+  Pixel Values Along a Transect
+}
+\description{
+  Extract the pixel values of a pixel image at each point along
+  a linear transect.
+}
+\usage{
+transect.im(X, ..., from="bottomleft", to="topright",
+             click=FALSE, add=FALSE)
+}
+\arguments{
+  \item{X}{
+    A pixel image (object of class \code{"im"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{from,to}{
+    Optional. Start point and end point of the transect.
+    Pairs of \eqn{(x,y)} coordinates
+    in a format acceptable to \code{\link{xy.coords}},
+    or keywords \code{"bottom"}, \code{"left"}, \code{"top"},
+    \code{"right"}, \code{"bottomleft"} etc.
+  }
+  \item{click}{
+    Optional.
+    Logical value.
+    If \code{TRUE}, the linear transect is determined interactively
+    by the user, who clicks two points on the current plot.
+  }
+  \item{add}{
+    Logical. If \code{click=TRUE}, this argument determines
+    whether to perform interactive tasks on the current plot (\code{add=TRUE})
+    or to start by plotting \code{X} (\code{add=FALSE}).
+  }
+}
+\details{
+  The pixel values of the image \code{X} along a line segment 
+  will be extracted. The result is a function table (\code{"fv"} object)
+  which can be plotted directly.
+
+  If \code{click=TRUE}, then the user is prompted to click two points on
+  the plot of \code{X}. These endpoints define the transect.
+
+  Otherwise, the transect is defined by the endpoints
+  \code{from} and \code{to}. The default is a diagonal transect from
+  bottom left to top right of the frame.
+}
+\value{
+  An object of class \code{"fv"} which can be plotted.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{im}}
+}
+\examples{
+  Z <- density(redwood)
+  plot(transect.im(Z))
+  \dontrun{
+   if(FALSE) {
+    plot(transect.im(Z, click=TRUE))
+   }
+  }
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{iplot}
diff --git a/man/transmat.Rd b/man/transmat.Rd
new file mode 100644
index 0000000..bf6b4ca
--- /dev/null
+++ b/man/transmat.Rd
@@ -0,0 +1,89 @@
+\name{transmat}
+\alias{transmat}
+\title{
+  Convert Pixel Array Between Different Conventions
+}
+\description{
+  This function provides a simple way to convert arrays of pixel data
+  between different display conventions.
+}
+\usage{
+transmat(m, from, to)
+}
+\arguments{
+  \item{m}{
+    A matrix.
+  }
+  \item{from,to}{
+    Specifications of the spatial arrangement of the pixels.
+    See Details.
+  }
+}
+\details{
+  Pixel images are handled by many different software packages.
+  In virtually all of these, the pixel values are stored in a matrix,
+  and are accessed using the row and column indices of the matrix.
+  However, different pieces of software use different conventions for
+  mapping the matrix indices \eqn{[i,j]} to the spatial coordinates
+  \eqn{(x,y)}.
+
+  \itemize{
+    \item
+    In the \emph{Cartesian} convention, the first matrix index \eqn{i}
+    is associated with the first Cartesian coordinate \eqn{x}, 
+    and \eqn{j} is associated with \eqn{y}. This convention is used in
+    \code{\link[graphics]{image.default}}.
+    \item
+    In the \emph{European reading order} convention, a matrix is displayed
+    in the spatial coordinate system as it would be printed in a page of text:
+    \eqn{i} is effectively associated with the negative \eqn{y} coordinate, 
+    and \eqn{j} is associated with \eqn{x}. This convention is used in some
+    image file formats.
+    \item
+    In the \code{spatstat} convention, \eqn{i}
+    is associated with the increasing \eqn{y} coordinate, 
+    and \eqn{j} is associated with \eqn{x}. This is also used in some
+    image file formats.
+  }
+  To convert between these conventions, use the function
+  \code{transmat}. If a matrix \code{m} contains pixel image data
+  that is correctly displayed by software that uses the Cartesian convention,
+  and we wish to convert it to the European reading convention, we can type
+  \code{mm <- transmat(m, from="Cartesian", to="European")}.
+  The transformed matrix \code{mm} will then be correctly displayed by
+  software that uses the European convention. 
+
+  Each of the arguments \code{from} and \code{to} can be one of the names
+  \code{"Cartesian"}, \code{"European"} or \code{"spatstat"} (partially matched)
+  or it can be a list specifying another convention. For example
+  \code{to=list(x="-i", y="-j")!} specifies that rows of the output matrix 
+  are expected to be displayed as vertical columns in the plot, 
+  starting at the right side of the plot, as in the traditional 
+  Chinese, Japanese and Korean writing order.
+}
+\value{
+  Another matrix obtained by rearranging the entries of \code{m}.
+}
+\author{
+\adrian
+
+
+\rolf
+
+and \ege
+
+}
+\examples{
+  opa <- par(mfrow=c(1,2))
+  # image in spatstat format
+  Z <- bei.extra$elev
+  plot(Z, main="plot.im", ribbon=FALSE)
+  m <- as.matrix(Z)
+  # convert matrix to format suitable for display by image.default
+  Y <- transmat(m, from="spatstat", to="Cartesian")
+  image(Y, asp=0.5, main="image.default", axes=FALSE)
+  par(opa)
+}
+\keyword{spatial}
+\keyword{hplot}
+\keyword{manip}
diff --git a/man/treebranchlabels.Rd b/man/treebranchlabels.Rd
new file mode 100644
index 0000000..11c6ea1
--- /dev/null
+++ b/man/treebranchlabels.Rd
@@ -0,0 +1,76 @@
+\name{treebranchlabels}
+\alias{treebranchlabels}
+\title{
+  Label Vertices of a Tree by Branch Membership
+}
+\description{
+  Given a linear network which is a tree (acyclic graph),
+  this function assigns a label to each vertex, indicating
+  its position in the tree.
+}
+\usage{
+  treebranchlabels(L, root = 1)
+}
+\arguments{
+  \item{L}{
+    Linear network (object of class \code{"linnet"}).
+    The network must have no loops.
+  }
+  \item{root}{
+    Root of the tree. An integer index identifying
+    which point in \code{vertices(L)} is the root of the tree.
+  }
+}
+\details{
+  The network \code{L} should be a tree, that is, it must have no loops.
+
+  This function computes a character string label for each vertex
+  of the network \code{L}. The vertex identified by \code{root}
+  (that is, \code{vertices(L)[root]}) is taken as the root of the tree
+  and is given the empty label \code{""}.
+  \itemize{
+    \item If there are several line
+    segments which meet at the root vertex, each of these segments is the
+    start of a new branch of the tree; the other endpoints of these
+    segments are assigned the labels 
+    \code{"a"}, \code{"b"}, \code{"c"} and so on.
+    \item If only one segment issues from the root vertex,
+    the other endpoint of this segment is assigned the empty label
+    \code{""}.
+  }
+  A similar rule is then applied to each of the newly-labelled vertices.
+  If the vertex labelled \code{"a"} is joined to two other unlabelled
+  vertices, these will be labelled \code{"aa"} and \code{"ab"}.
+  The rule is applied recursively until all vertices have been labelled.
+
+  If \code{L} is not a tree, the algorithm will terminate, but the
+  results will be nonsense.
+}
+\value{
+  A vector of character strings, with one entry for each point in
+  \code{vertices(L)}.
+}
+\author{
+\spatstatAuthors
+}
+\seealso{
+  \code{\link{deletebranch}}, \code{\link{extractbranch}},
+  \code{\link{treeprune}} for manipulating a network using the
+  branch labels.
+  
+  \code{\link{linnet}} for creating a network.
+}
+\examples{
+  # make a simple tree
+  m <- simplenet$m
+  m[8,10] <- m[10,8] <- FALSE
+  L <- linnet(vertices(simplenet), m)
+  plot(L, main="")
+  # compute branch labels 
+  tb <- treebranchlabels(L, 1)
+  tbc <- paste0("[", tb, "]")
+  text(vertices(L), labels=tbc, cex=2)
+}
+\keyword{spatial}
+\keyword{math}
+
diff --git a/man/treeprune.Rd b/man/treeprune.Rd
new file mode 100644
index 0000000..8a9b38f
--- /dev/null
+++ b/man/treeprune.Rd
@@ -0,0 +1,63 @@
+\name{treeprune}
+\alias{treeprune}
+\title{
+  Prune Tree to Given Level
+}
+\description{
+  Prune a tree by removing all the branches above a given level.
+}
+\usage{
+treeprune(X, root = 1, level = 0)
+}
+\arguments{
+  \item{X}{
+    Object of class \code{"linnet"} or \code{"lpp"}.
+  }
+  \item{root}{
+    Index of the root vertex amongst the vertices of \code{as.linnet(X)}.
+  }
+  \item{level}{
+    Integer specifying the level above which the tree should be pruned.
+  }
+}
+\details{
+  The object \code{X} must be either a linear network, or a derived
+  object such as a point pattern on a linear network. The linear network
+  must be an acyclic graph (i.e. must not contain any loops) so that it
+  can be interpreted as a tree. 
+  
+  This function removes all vertices
+  for which \code{\link{treebranchlabels}} gives a
+  string more than \code{level} characters long.
+}
+\value{
+  Object of the same kind as \code{X}.
+}
+\author{
+\spatstatAuthors
+}
+\seealso{
+  \code{\link{treebranchlabels}} for calculating the branch labels.
+  
+  \code{\link{deletebranch}} for removing entire branches.
+  \code{\link{extractbranch}} for extracting entire branches.
+
+  \code{\link{linnet}} for creating networks.
+}
+\examples{
+  # make a simple tree
+  m <- simplenet$m
+  m[8,10] <- m[10,8] <- FALSE
+  L <- linnet(vertices(simplenet), m)
+  plot(L, main="")
+  # compute branch labels 
+  tb <- treebranchlabels(L, 1)
+  tbc <- paste0("[", tb, "]")
+  text(vertices(L), labels=tbc, cex=2)
+  # prune tree 
+  tp <- treeprune(L, root=1, 1)
+  plot(tp, add=TRUE, col="blue", lwd=3)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/triangulate.owin.Rd b/man/triangulate.owin.Rd
new file mode 100644
index 0000000..0b955ef
--- /dev/null
+++ b/man/triangulate.owin.Rd
@@ -0,0 +1,47 @@
+\name{triangulate.owin}
+\alias{triangulate.owin}
+\title{
+  Decompose Window into Triangles
+}
+\description{
+  Given a spatial window, this function decomposes the window
+  into disjoint triangles. 
+  The result is a tessellation of the window
+  in which each tile is a triangle.
+}
+\usage{
+triangulate.owin(W)
+}
+\arguments{
+  \item{W}{Window (object of class \code{"owin"}).}
+}
+\details{
+  The window \code{W} will be decomposed into disjoint triangles.
+  The result is a tessellation of \code{W} in which each tile is a
+  triangle. All triangle vertices lie on the boundary
+  of the original polygon.
+  
+  The window is first converted to a polygonal window using
+  \code{\link{as.polygonal}}. The vertices of the polygonal window
+  are extracted, and the Delaunay triangulation of these vertices
+  is computed using \code{\link{delaunay}}. Each Delaunay triangle
+  is intersected with the window: if the result is not a triangle, 
+  the triangulation procedure is applied recursively to this smaller polygon. 
+}
+\value{
+  Tessellation (object of class \code{"tess"}).
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{tess}},
+  \code{\link{delaunay}},
+  \code{\link{as.polygonal}}
+}
+\examples{
+  plot(triangulate.owin(letterR))
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/trim.rectangle.Rd b/man/trim.rectangle.Rd
new file mode 100644
index 0000000..9c23a9f
--- /dev/null
+++ b/man/trim.rectangle.Rd
@@ -0,0 +1,54 @@
+\name{trim.rectangle}
+\alias{trim.rectangle}
+\title{Cut margins from rectangle}
+\description{
+  Trims a margin from a rectangle.
+}
+\usage{
+ trim.rectangle(W, xmargin=0, ymargin=xmargin)
+}
+\arguments{
+  \item{W}{
+    A window (object of class \code{"owin"}).
+    Must be of type \code{"rectangle"}.
+  }
+  \item{xmargin}{Width of horizontal margin to be trimmed.
+    A single nonnegative number, or a vector of length 2
+    indicating margins of unequal width at left and right.
+  }
+  \item{ymargin}{Height of vertical margin to be trimmed.
+    A single nonnegative number, or a vector of length 2
+    indicating margins of unequal width at bottom and top.
+  }
+}
+\value{
+  Another object of class \code{"owin"} representing the
+  window after margins are trimmed.
+}
+\details{
+  This is a simple convenience function to trim off a
+  margin of specified width and height from each side of a
+  rectangular window. Unequal margins can also be trimmed.
+}
+\seealso{
+  \code{\link{grow.rectangle}},
+  \code{\link{erosion}},
+  \code{\link{owin.object}}
+}
+\examples{
+  w <- square(10)
+  # trim a margin of width 1 from all four sides
+  square9 <- trim.rectangle(w, 1)
+
+  # trim margin of width 3 from the right side
+  # and margin of height 4 from top edge.
+  v <- trim.rectangle(w, c(0,3), c(0,4))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/triplet.family.Rd b/man/triplet.family.Rd
new file mode 100644
index 0000000..bf6cc3e
--- /dev/null
+++ b/man/triplet.family.Rd
@@ -0,0 +1,46 @@
+\name{triplet.family}
+\alias{triplet.family}
+\title{Triplet Interaction Family}
+\description{
+  An object describing the family of all Gibbs point processes
+  with interaction order equal to 3.
+}
+\details{
+  \bold{Advanced Use Only!}
+  
+  This structure would not normally be touched by
+  the user. It describes the interaction structure
+  of Gibbs point processes which have infinite order of interaction,
+  such as the triplet interaction process \cite{\link{Triplets}}.
+ 
+  Anyway, \code{triplet.family} is an object of class \code{"isf"}
+  containing a function \code{triplet.family$eval} for
+  evaluating the sufficient statistics of a Gibbs
+  point process model taking an exponential family form. 
+} 
+\seealso{
+  \code{\link{Triplets}} to create the triplet interaction process
+  structure.
+  
+  Other families:
+  \code{\link{pairwise.family}},
+  \code{\link{pairsat.family}},
+  \code{\link{inforder.family}},
+  \code{\link{ord.family}}.
+
+  
+}
+\references{
+  Baddeley, A. and Turner, R. (2000)
+  Practical maximum pseudolikelihood for spatial point patterns.
+  \emph{Australian and New Zealand Journal of Statistics}
+  \bold{42}, 283--322.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/tweak.colourmap.Rd b/man/tweak.colourmap.Rd
new file mode 100644
index 0000000..9dc0134
--- /dev/null
+++ b/man/tweak.colourmap.Rd
@@ -0,0 +1,62 @@
+\name{tweak.colourmap}
+\alias{tweak.colourmap}
+\title{
+  Change Colour Values in a Colour Map
+}
+\description{
+  Assign new colour values to some of the entries in a colour map.
+}
+\usage{
+tweak.colourmap(m, col, ..., inputs=NULL, range=NULL)
+}
+
+\arguments{
+  \item{m}{
+    A colour map (object of class \code{"colourmap"}).
+  }
+  \item{inputs}{
+    Input values to the colour map, to be assigned new colours.
+    Incompatible with \code{range}.
+  }
+  \item{range}{
+    Numeric vector of length 2 specifying a range of numerical values
+    which should be assigned a new colour.
+    Incompatible with \code{inputs}.
+  }
+  \item{col}{
+    Replacement colours for the specified \code{inputs} or
+    the specified \code{range} of values.
+  }
+  \item{\dots}{Other arguments are ignored.}
+}
+\details{
+  This function changes the colour map \code{m}
+  by assigning new colours to each of the
+  input values specified by \code{inputs},
+  or by assigning a single new colour
+  to the range of input values specified by \code{range}.
+  
+  The modified colour map is returned.
+}
+\value{
+  Another colour map (object of class \code{"colourmap"}).
+}
+\seealso{
+  \code{\link{colourmap}}, 
+  \code{\link{interp.colourmap}}, 
+  \code{\link[spatstat:colourtools]{colourtools}}.
+}
+\examples{
+  co <- colourmap(rainbow(32), range=c(0,1))
+  plot(tweak.colourmap(co, inputs=c(0.5, 0.6), "white"))
+  plot(tweak.colourmap(co, range=c(0.5,0.6), "white"))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{color}
+
diff --git a/man/union.quad.Rd b/man/union.quad.Rd
new file mode 100644
index 0000000..f6d8f9d
--- /dev/null
+++ b/man/union.quad.Rd
@@ -0,0 +1,46 @@
+\name{union.quad}
+\alias{union.quad}
+\title{Union of Data and Dummy Points}
+\description{
+  Combines the data and dummy points of a quadrature scheme
+  into a single point pattern.
+}
+\usage{
+ union.quad(Q)
+}
+\arguments{
+  \item{Q}{A quadrature scheme (an object of class \code{"quad"}).}
+}
+\value{
+  A point pattern (of class \code{"ppp"}).
+}
+\details{
+  The argument \code{Q} should be a quadrature scheme (an object of class
+  \code{"quad"}, see \code{\link{quad.object}} for details).
+  
+  This function combines the data and dummy points of \code{Q}
+  into a single point pattern. If either the data or the dummy points
+  are marked, the result is a marked point pattern.
+
+  The function \code{\link{as.ppp}} will perform the same task.
+}
+\seealso{
+  \code{\link{quad.object}},
+  \code{\link{as.ppp}}
+}
+\examples{
+  data(simdat)
+  Q <- quadscheme(simdat, default.dummy(simdat))
+  U <- union.quad(Q)
+  \dontrun{plot(U)}
+  # equivalent:
+  U <- as.ppp(Q)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/unique.ppp.Rd b/man/unique.ppp.Rd
new file mode 100644
index 0000000..d0a7926
--- /dev/null
+++ b/man/unique.ppp.Rd
@@ -0,0 +1,64 @@
+\name{unique.ppp}
+\alias{unique.ppp}
+\alias{unique.ppx}
+\title{Extract Unique Points from a Spatial Point Pattern}
+\description{
+  Removes any points that are identical to other points
+  in a spatial point pattern.
+}
+\usage{
+ \method{unique}{ppp}(x, \dots, warn=FALSE)
+
+ \method{unique}{ppx}(x, \dots, warn=FALSE)
+}
+\arguments{
+  \item{x}{
+    A spatial point pattern
+    (object of class \code{"ppp"} or \code{"ppx"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{duplicated.ppp}}
+    or \code{\link{duplicated.data.frame}}.
+  }
+  \item{warn}{
+    Logical. If \code{TRUE}, issue a warning message if any
+    duplicated points were found.
+  }
+}
+\value{
+  Another point pattern object.
+}
+\details{
+  These are methods for the generic function \code{unique} for 
+  point pattern datasets (of class \code{"ppp"}, see
+  \code{\link{ppp.object}}, or class \code{"ppx"}).
+
+  This function removes duplicate points in \code{x},
+  and returns a point pattern.
+
+  Two points in a point pattern are deemed to be identical
+  if their \eqn{x,y} coordinates are the same,
+  \emph{and} their marks are the same (if they carry marks).
+  This is the default rule: see \code{\link{duplicated.ppp}}
+  for other options. 
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{duplicated.ppp}},
+  \code{\link{multiplicity.ppp}}
+}
+\examples{
+   X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3))
+   unique(X)
+   unique(X, rule="deldir")
+}
+
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+ 
diff --git a/man/unitname.Rd b/man/unitname.Rd
new file mode 100644
index 0000000..50618dc
--- /dev/null
+++ b/man/unitname.Rd
@@ -0,0 +1,125 @@
+\name{unitname}  
+\alias{unitname}
+\alias{unitname.dppm}
+\alias{unitname.im}
+\alias{unitname.kppm}
+\alias{unitname.minconfit}
+\alias{unitname.owin}
+\alias{unitname.ppp}
+\alias{unitname.ppm}
+\alias{unitname.psp}
+\alias{unitname.quad}
+\alias{unitname.slrm}
+\alias{unitname.tess}
+\alias{unitname<-}
+\alias{unitname<-.dppm}
+\alias{unitname<-.im}
+\alias{unitname<-.kppm}
+\alias{unitname<-.minconfit}
+\alias{unitname<-.owin}
+\alias{unitname<-.ppp}
+\alias{unitname<-.ppm}
+\alias{unitname<-.psp}
+\alias{unitname<-.quad}
+\alias{unitname<-.slrm}
+\alias{unitname<-.tess}
+\title{Name for Unit of Length}
+\description{
+  Inspect or change the name of the unit of length
+  in a spatial dataset.
+}
+\usage{
+unitname(x)
+\method{unitname}{dppm}(x)
+\method{unitname}{im}(x)
+\method{unitname}{kppm}(x)
+\method{unitname}{minconfit}(x)
+\method{unitname}{owin}(x)
+\method{unitname}{ppm}(x)
+\method{unitname}{ppp}(x)
+\method{unitname}{psp}(x)
+\method{unitname}{quad}(x)
+\method{unitname}{slrm}(x)
+\method{unitname}{tess}(x)
+unitname(x) <- value
+\method{unitname}{dppm}(x) <- value
+\method{unitname}{im}(x) <- value
+\method{unitname}{kppm}(x) <- value
+\method{unitname}{minconfit}(x) <- value
+\method{unitname}{owin}(x) <- value
+\method{unitname}{ppm}(x) <- value
+\method{unitname}{ppp}(x) <- value
+\method{unitname}{psp}(x) <- value
+\method{unitname}{quad}(x) <- value
+\method{unitname}{slrm}(x) <- value
+\method{unitname}{tess}(x) <- value
+}
+\arguments{
+  \item{x}{A spatial dataset.
+    Either a point pattern (object of class \code{"ppp"}),
+    a line segment pattern (object of class \code{"psp"}),
+    a window (object of class \code{"owin"}),
+    a pixel image (object of class \code{"im"}),
+    a tessellation (object of class \code{"tess"}),
+    a quadrature scheme (object of class \code{"quad"}),
+    or a fitted point process model
+    (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"}
+    or \code{"dppm"} or \code{"minconfit"}).
+  }
+  \item{value}{
+    Name of the unit of length. See Details.
+  }
+}
+\details{
+  Spatial datasets in the \pkg{spatstat} package
+  may include the name of the unit of length. This name is used
+  when printing or plotting the dataset, and in some other
+  applications. 
+
+  \code{unitname(x)} extracts this name,
+  and \code{unitname(x) <- value} sets the name to \code{value}.
+
+  A valid name is either
+  \itemize{
+    \item a single character string
+    \item a vector of two character strings giving the
+    singular and plural forms of the unit name
+    \item a list of length 3, containing two character strings
+    giving the singular and plural forms of the basic unit,
+    and a number specifying the multiple of this unit.
+  }
+  
+ Note that re-setting the name of the unit of length \emph{does not}
+ affect the numerical values in \code{x}. It changes only the string
+ containing the name of the unit of length. To rescale the numerical
+ values, use \code{\link{rescale}}.
+}
+\value{
+  The return value of \code{unitname} is an object of class \code{"units"}
+  containing the name of the unit of length in \code{x}. There are
+  methods for \code{print} and \code{summary}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rescale}},
+  \code{\link{owin}},
+  \code{\link{ppp}}
+}
+
+\examples{
+  X <- runifpoint(20)
+
+  # if the unit of length is 1 metre:
+  unitname(X) <- c("metre", "metres")
+
+  # if the unit of length is 6 inches:
+  unitname(X) <- list("inch", "inches", 6)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/unmark.Rd b/man/unmark.Rd
new file mode 100644
index 0000000..0d1085a
--- /dev/null
+++ b/man/unmark.Rd
@@ -0,0 +1,54 @@
+\name{unmark}
+\alias{unmark}
+\alias{unmark.ppp}
+\alias{unmark.splitppp}
+\alias{unmark.psp}
+\alias{unmark.ppx}
+\title{Remove Marks}
+\description{
+  Remove the mark information from a spatial dataset. 
+}
+\usage{
+ unmark(X)
+ \method{unmark}{ppp}(X)
+ \method{unmark}{splitppp}(X)
+ \method{unmark}{psp}(X)
+ \method{unmark}{ppx}(X)
+}
+\arguments{
+  \item{X}{A point pattern (object of class \code{"ppp"}),
+    a split point pattern (object of class \code{"splitppp"}),
+    a line segment pattern (object of class \code{"psp"})
+    or a multidimensional space-time point pattern
+    (object of class \code{"ppx"}).
+  }
+}
+\value{
+  An object of the same class as \code{X}
+  with any mark information deleted.
+}
+\details{
+  A `mark' is a value attached to each point in a spatial point pattern,
+  or attached to each line segment in a line segment pattern, etc.
+  
+  The function \code{unmark} is a simple way to 
+  remove the marks from such a dataset.
+}
+\seealso{
+  \code{\link{ppp.object}},
+  \code{\link{psp.object}}
+}
+\examples{
+  data(lansing)
+  hicks <- lansing[lansing$marks == "hickory", ]
+  \dontrun{
+  plot(hicks)  # still a marked point pattern, but only 1 value of marks
+  plot(unmark(hicks)) # unmarked
+  }
+}
+\author{
+  \adrian
+  and \rolf
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/unnormdensity.Rd b/man/unnormdensity.Rd
new file mode 100644
index 0000000..15d8f7f
--- /dev/null
+++ b/man/unnormdensity.Rd
@@ -0,0 +1,78 @@
+\name{unnormdensity}
+\alias{unnormdensity}
+\title{
+  Weighted kernel smoother
+}
+\description{
+  An unnormalised version of kernel density estimation
+  where the weights are not required to sum to 1.
+  The weights may be positive, negative or zero.
+}
+\usage{
+unnormdensity(x, ..., weights = NULL)
+}
+\arguments{
+  \item{x}{
+    Numeric vector of data
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{density.default}}.
+    Arguments must be \emph{named}.
+  }`
+  \item{weights}{
+    Optional numeric vector of weights for the data.
+  }
+}
+\details{
+  This is an alternative to the standard \R kernel density estimation function
+  \code{\link{density.default}}.
+
+  The standard \code{\link{density.default}}
+  requires the \code{weights} to be nonnegative numbers that add up to 1,
+  and returns a probability density (a function that integrates to 1).
+  
+  This function \code{unnormdensity} does not impose any requirement
+  on the \code{weights} except that they be finite. Individual weights may be
+  positive, negative or zero. The result is a function that does not
+  necessarily integrate to 1 and may be negative. The result is
+  the convolution of the kernel \eqn{k} with the weighted data,
+  \deqn{
+    f(x) = \sum_i w_i k(x- x_i)
+  }{
+    f(x) = sum of w[i] * k(x - x[i])
+  }
+  where \eqn{x_i}{x[i]} are the data points and \eqn{w_i}{w[i]} are the
+  weights.
+
+  The algorithm first selects the kernel bandwidth by
+  applying \code{\link{density.default}} to the data
+  \code{x} with normalised, positive weight vector
+  \code{w = abs(weights)/sum(abs(weights))} and
+  extracting the selected bandwidth.
+  Then the result is computed by applying
+  applying \code{\link{density.default}} to \code{x} twice
+  using the normalised positive and negative parts of the weights.
+  
+  Note that the arguments \code{\dots} must be passed by name,
+  i.e. in the form (\code{name=value}). Arguments that do not match
+  an argument of \code{\link{density.default}} will be ignored
+  \emph{silently}.
+}
+\value{
+  Object of class \code{"density"} as described in
+  \code{\link{density.default}}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+ \code{\link{density.default}}
+}
+\examples{
+  d <- unnormdensity(1:3, weights=c(-1,0,1))
+  if(interactive()) plot(d)
+}
+\keyword{smooth}
diff --git a/man/unstack.msr.Rd b/man/unstack.msr.Rd
new file mode 100644
index 0000000..dc85c52
--- /dev/null
+++ b/man/unstack.msr.Rd
@@ -0,0 +1,54 @@
+\name{unstack.msr}
+\alias{unstack.msr}
+\title{
+  Separate a Vector Measure into its Scalar Components
+}
+\description{
+  Converts a vector-valued measure into a list of
+  scalar-valued measures. 
+}
+\usage{
+\method{unstack}{msr}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A measure (object of class \code{"msr"}).
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic
+  \code{\link[utils]{unstack}} for the class \code{"msr"} of measures. 
+  
+  If \code{x} is a vector-valued measure, then 
+  \code{y <- unstack(x)} is a list of scalar-valued measures
+  defined by the components of \code{x}. 
+  The \code{j}th entry of the list, \code{y[[j]]}, is equivalent to
+  the \code{j}th component of the vector measure \code{x}.
+  
+  If \code{x} is a scalar-valued measure, then
+  the result is a list consisting of one entry, which is \code{x}.
+}
+\value{
+  A list of measures, of class \code{"solist"}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link[utils]{unstack}}
+
+  \code{\link{unstack.ppp}}
+
+  \code{\link{split.msr}}.
+}
+\examples{
+   fit <- ppm(cells ~ x)
+   m <- residuals(fit, type="score")
+   m
+   unstack(m)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/unstack.ppp.Rd b/man/unstack.ppp.Rd
new file mode 100644
index 0000000..44ac9b3
--- /dev/null
+++ b/man/unstack.ppp.Rd
@@ -0,0 +1,65 @@
+\name{unstack.ppp}
+\alias{unstack.ppp}
+\alias{unstack.psp}
+\alias{unstack.lpp}
+\title{
+  Separate Multiple Columns of Marks 
+}
+\description{
+  Given a spatial pattern with several columns of marks,
+  take one column at a time, and return a list of spatial patterns
+  each having only one column of marks.
+}
+\usage{
+\method{unstack}{ppp}(x, \dots)
+
+\method{unstack}{psp}(x, \dots)
+
+\method{unstack}{lpp}(x, \dots)
+}
+\arguments{
+  \item{x}{
+    A spatial point pattern (object of class \code{"ppp"} or
+    \code{"lpp"}) or a spatial pattern of line segments (object of class
+    \code{"psp"}). 
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  The functions defined here are methods for the generic
+  \code{\link[utils]{unstack}}. The functions expect a spatial object
+  \code{x} which has several columns of marks; they separate the columns,
+  and return a list of spatial objects, each having only one column of marks.
+  
+  If \code{x} has several columns of marks (i.e. \code{marks(x)} is a
+  matrix, data frame or hyperframe with several columns),
+  then \code{y <- unstack(x)} is a list of spatial objects, each of the same
+  kind as \code{x}. The \code{j}th entry \code{y[[j]]} is equivalent to
+  \code{x} except that it only includes the \code{j}th column of \code{marks(x)}.
+  
+  If \code{x} has no marks, or has only a single column of marks,
+  the result is a list consisting of one entry, which is \code{x}.
+}
+\value{
+  A list, of class \code{"solist"}, whose entries are objects of the
+  same type as \code{x}.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link[utils]{unstack}}
+
+  \code{\link{unstack.msr}}
+
+  See also methods for the generic \code{\link[base]{split}} such as
+  \code{\link{split.ppp}}.
+}
+\examples{
+   finpines
+   unstack(finpines)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/update.detpointprocfamily.Rd b/man/update.detpointprocfamily.Rd
new file mode 100644
index 0000000..9d9b421
--- /dev/null
+++ b/man/update.detpointprocfamily.Rd
@@ -0,0 +1,31 @@
+\name{update.detpointprocfamily}
+\alias{update.detpointprocfamily}
+\title{Set Parameter Values in a Determinantal Point Process Model}
+\description{
+  Set parameter values in a determinantal point process model object.
+}
+\usage{
+  \method{update}{detpointprocfamily}(object, \dots)
+}
+\arguments{
+  \item{object}{object of class \code{"detpointprocfamily"}.}
+  \item{\dots}{
+    arguments of the form \code{tag=value} specifying the parameters 
+    values to set.
+  } 
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
+
+
+
+
diff --git a/man/update.interact.Rd b/man/update.interact.Rd
new file mode 100644
index 0000000..f9671cb
--- /dev/null
+++ b/man/update.interact.Rd
@@ -0,0 +1,51 @@
+\name{update.interact}
+\alias{update.interact}
+\title{
+  Update an Interpoint Interaction
+}
+\description{
+  This command updates the \code{object} using the
+  arguments given.
+}
+\usage{
+\method{update}{interact}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Interpoint interaction (object of class \code{"interact"}).
+  }
+  \item{\dots}{
+    Additional or replacement values of parameters of \code{object}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[stats]{update}}
+  for the class \code{"interact"} of interpoint interactions.
+  It updates the \code{object} using the parameters given in the
+  extra arguments \code{\dots}.
+
+  The extra arguments must be given in
+  the form \code{name=value} and must be recognisable to
+  the interaction object. They override any parameters
+  of the same name in \code{object}.
+}
+\value{
+  Another object of class \code{"interact"}, equivalent to \code{object}
+  except for changes in parameter values.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{update.ppm}}
+}
+\examples{
+  Str <- Strauss(r=1)
+  Str
+  update(Str, r=2)
+
+  M <- MultiStrauss(radii=matrix(1,2,2))
+  update(M, types=c("on", "off"))
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/update.kppm.Rd b/man/update.kppm.Rd
new file mode 100644
index 0000000..b893b12
--- /dev/null
+++ b/man/update.kppm.Rd
@@ -0,0 +1,78 @@
+\name{update.kppm}
+\alias{update.kppm}
+\title{Update a Fitted Cluster Point Process Model}
+\description{
+  \code{update} method for class \code{"kppm"}.
+}
+\usage{
+ \method{update}{kppm}(object, \dots, evaluate=TRUE)
+}
+\arguments{
+  \item{object}{
+    Fitted cluster point process model.
+    An object of class \code{"kppm"},
+    obtained from \code{\link{kppm}}.
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{kppm}}.
+  }
+  \item{evaluate}{
+    Logical value indicating whether to return the updated fitted model
+    (\code{evaluate=TRUE}, the default) or just the updated call to \code{kppm}
+    (\code{evaluate=FALSE}).
+  }
+}
+\details{
+  \code{object} should be a fitted cluster point process model,
+  obtained from the model-fitting function \code{\link{kppm}}.
+  The model will be updated according to the new arguments provided.
+
+  If the argument \code{trend} is provided, it determines the
+  intensity in the updated model. It should be an \R formula
+  (with or without a left hand side). It may include the symbols
+  \code{+} or \code{-} to specify addition or deletion of terms
+  in the current model formula, as shown in the Examples below.
+  The symbol \code{.} refers to the current contents of the
+  formula.
+
+  The intensity in the updated model is determined by the
+  argument \code{trend} if it is provided, or otherwise by any unnamed
+  argument that is a formula, or otherwise by the formula of the
+  original model, \code{formula(object)}.
+
+  The spatial point pattern data to which the new model is fitted
+  is determined by the left hand side of the updated model formula,
+  if this is present. Otherwise it is determined by the argument
+  \code{X} if it is provided, or otherwise by any unnamed argument
+  that is a point pattern or a quadrature scheme.
+
+  The model is refitted using \code{\link{kppm}}.
+}
+\value{
+  Another fitted cluster point process model (object of
+  class \code{"kppm"}.
+}
+\seealso{
+  \code{\link{kppm}},  \code{\link{plot.kppm}},
+  \code{\link{predict.kppm}}, \code{\link{simulate.kppm}},
+  \code{\link{methods.kppm}},
+  \code{\link{vcov.kppm}}
+}
+\examples{
+ fit <- kppm(redwood ~1, "Thomas")
+ fitx <- update(fit, ~ . + x)
+ fitM <- update(fit, clusters="MatClust")
+ fitC <- update(fit, cells)
+ fitCx <- update(fit, cells ~ x)
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/update.ppm.Rd b/man/update.ppm.Rd
new file mode 100644
index 0000000..6954155
--- /dev/null
+++ b/man/update.ppm.Rd
@@ -0,0 +1,176 @@
+\name{update.ppm}
+\alias{update.ppm}
+\title{Update a Fitted Point Process Model}
+\description{
+  \code{update} method for class \code{"ppm"}.
+}
+\usage{
+  \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL,
+                                      envir=environment(terms(object)))
+
+}
+\arguments{
+  \item{object}{
+    An existing fitted point process model,
+    typically produced by \code{\link{ppm}}.
+  }
+  \item{\dots}{
+    Arguments to be updated in the new call to \code{\link{ppm}}.
+  }
+  \item{fixdummy}{
+    Logical flag indicating whether the quadrature scheme for the
+    call to \code{\link{ppm}} should use the same set of dummy points
+    as that in the original call.
+  }
+  \item{use.internal}{
+    Optional. Logical flag indicating whether the model should be
+    refitted using the internally saved data (\code{use.internal=TRUE})
+    or by re-evaluating these data in the
+    current frame (\code{use.internal=FALSE}).
+  }
+  \item{envir}{
+    Environment in which to re-evaluate the call to \code{\link{ppm}}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{update}}
+  for the class \code{"ppm"}. An object of class \code{"ppm"}
+  describes a fitted point process model. See \code{\link{ppm.object}})
+  for details of this class.
+
+  \code{update.ppm} will modify the point process model
+  specified by \code{object} according to the new arguments given,
+  then re-fit it.
+  The actual re-fitting is performed by the model-fitting
+  function \code{\link{ppm}}.
+
+  If you are comparing several model fits to the same data,
+  or fits of the same model to different data, it is
+  strongly advisable to use \code{update.ppm}
+  rather than trying to fit them by hand.
+  This is because \code{update.ppm} re-fits the model
+  in a way which is comparable to the original fit.
+
+  The arguments \code{...} are matched to the formal arguments
+  of \code{\link{ppm}} as follows.
+
+  First, all the \emph{named} arguments in \code{...} are matched
+  with the formal arguments of \code{\link{ppm}}.
+  Use \code{name=NULL} to remove the argument \code{name} from the
+  call.
+
+  Second, any \emph{unnamed} arguments in \code{...} are
+  matched with formal arguments of \code{\link{ppm}} if the matching
+  is obvious from the class of the object. Thus \code{...} may contain
+  \itemize{
+    \item
+    exactly one argument of class \code{"ppp"} or \code{"quad"},
+    which will be interpreted as the named argument \code{Q};
+    \item
+    exactly one argument of class \code{"formula"}, which will be
+    interpreted as the named argument \code{trend} (or as specifying
+    a change to the trend formula);
+    \item
+    exactly one argument of class \code{"interact"}, which will be
+    interpreted as the named argument \code{interaction};
+    \item
+    exactly one argument of class \code{"data.frame"}, which will be
+    interpreted as the named argument \code{covariates}.
+  }
+
+  The \code{trend} argument can be a formula that specifies a
+  \emph{change} to the current trend formula. For example, the
+  formula \code{~ . + Z} specifies that the additional covariate
+  \code{Z} will be added to the right hand side of the trend
+  formula in the existing \code{object}.
+
+  The argument \code{fixdummy=TRUE} ensures comparability of the
+  objects before and after updating.
+  When \code{fixdummy=FALSE}, calling \code{update.ppm}
+  is exactly the same as calling \code{ppm} with the updated
+  arguments. However, the original and updated models
+  are not strictly comparable (for example, their pseudolikelihoods
+  are not strictly comparable) unless they used the same set of dummy
+  points for the quadrature scheme. Setting \code{fixdummy=TRUE}
+  ensures that the re-fitting will be performed using the same set
+  of dummy points. This is highly recommended.
+
+  The value of \code{use.internal} determines where to find data
+  to re-evaluate the model (data for the arguments mentioned in
+  the original call to \code{ppm} that are not overwritten by
+  arguments to \code{update.ppm}).
+  
+  If \code{use.internal=FALSE}, then arguments 
+  to \code{ppm} are \emph{re-evaluated} in the frame where you
+  call \code{update.ppm}. This is like the behaviour of the
+  other methods for \code{\link{update}}. This means that if you have changed
+  any of the objects referred to in the call, these changes will be
+  taken into account. Also if the original call to \code{ppm} included
+  any calls to random number generators, these calls will be recomputed,
+  so that you will get a different outcome of the random numbers.
+
+  If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted
+  from internal data stored inside the current fitted
+  model \code{object}. This is useful if you don't want to 
+  re-evaluate anything. It is also necessary if 
+  if \code{object} has been restored from a dump file
+  using \code{\link{load}} or \code{\link{source}}. In such cases,
+  we have lost the environment in which \code{object} was fitted,
+  and data cannot be re-evaluated.
+
+  By default, if \code{use.internal} is missing, \code{update.ppm} will
+  re-evaluate the arguments if this is possible, and use internal data
+  if not.
+}
+\value{
+  Another fitted point process model (object of class \code{"ppm"}).
+}
+\examples{
+  data(nztrees)
+  data(cells)
+
+  # fit the stationary Poisson process
+  fit <- ppm(nztrees, ~ 1)
+
+  # fit a nonstationary Poisson process
+  fitP <- update(fit, trend=~x)
+  fitP <- update(fit, ~x)
+
+  # change the trend formula: add another term to the trend
+  fitPxy <- update(fitP, ~ . + y)
+  # change the trend formula: remove the x variable
+  fitPy <- update(fitPxy, ~ . - x)
+
+  # fit a stationary Strauss process
+  fitS <- update(fit, interaction=Strauss(13))
+  fitS <- update(fit, Strauss(13))
+
+  # refit using a different edge correction
+  fitS <- update(fitS, correction="isotropic")
+
+  # re-fit the model to a subset
+  # of the original point pattern
+  nzw <- owin(c(0,148),c(0,95))
+  nzsub <- nztrees[,nzw]
+  fut <- update(fitS, Q=nzsub)
+  fut <- update(fitS, nzsub)
+
+  # WARNING: the point pattern argument is called 'Q'
+
+  ranfit <- ppm(rpoispp(42), ~1, Poisson())
+  ranfit
+  # different random data!  
+  update(ranfit)
+  # the original data
+  update(ranfit, use.internal=TRUE)  
+
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/update.rmhcontrol.Rd b/man/update.rmhcontrol.Rd
new file mode 100644
index 0000000..4217ffa
--- /dev/null
+++ b/man/update.rmhcontrol.Rd
@@ -0,0 +1,43 @@
+\name{update.rmhcontrol}
+\alias{update.rmhcontrol}
+\title{Update Control Parameters of Metropolis-Hastings Algorithm}
+\description{
+  \code{update} method for class \code{"rmhcontrol"}.
+}
+\usage{
+  \method{update}{rmhcontrol}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Object of class \code{"rmhcontrol"} containing control parameters
+    for a Metropolis-Hastings algorithm.
+  }
+  \item{\dots}{
+    Arguments to be updated in the new call to \code{\link{rmhcontrol}}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{update}}
+  for the class \code{"rmhcontrol"}. An object of class \code{"rmhcontrol"}
+  describes a set of control parameters for the Metropolis-Hastings
+  simulation algorithm. See \code{\link{rmhcontrol}}).
+
+  \code{update.rmhcontrol} will modify the parameters
+  specified by \code{object} according to the new arguments given.
+}
+\value{
+  Another object of class \code{"rmhcontrol"}.
+}
+\examples{
+  a <- rmhcontrol(expand=1)
+  update(a, expand=2)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/update.symbolmap.Rd b/man/update.symbolmap.Rd
new file mode 100644
index 0000000..cafe305
--- /dev/null
+++ b/man/update.symbolmap.Rd
@@ -0,0 +1,53 @@
+\name{update.symbolmap}
+\alias{update.symbolmap}
+\title{
+  Update a Graphics Symbol Map.
+}
+\description{
+  This command updates the \code{object} using the
+  arguments given.
+}
+\usage{
+\method{update}{symbolmap}(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Graphics symbol map (object of class \code{"symbolmap"}).
+  }
+  \item{\dots}{
+    Additional or replacement arguments to \code{\link{symbolmap}}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link[stats]{update}}
+  for the class \code{"symbolmap"} of graphics symbol maps.
+  It updates the \code{object} using the parameters given in the
+  extra arguments \code{\dots}.
+
+  The extra arguments must be given in
+  the form \code{name=value} and must be recognisable to
+  \code{\link{symbolmap}}. They override any parameters
+  of the same name in \code{object}.
+}
+\value{
+  Another object of class \code{"symbolmap"}.
+}
+\author{\adrian
+  
+  ,
+  \rolf
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{symbolmap}} to create a graphics symbol map.
+}
+\examples{
+  g <- symbolmap(size=function(x) x/50)
+  g
+  update(g, range=c(0,1))
+  update(g, size=42)
+  update(g, shape="squares", range=c(0,1))
+}
+\keyword{spatial}
+\keyword{hplot}
diff --git a/man/urkiola.Rd b/man/urkiola.Rd
new file mode 100644
index 0000000..9f97d2b
--- /dev/null
+++ b/man/urkiola.Rd
@@ -0,0 +1,34 @@
+\encoding{latin1}
+\name{urkiola}
+\alias{urkiola}
+\docType{data}
+\title{Urkiola Woods Point Pattern}
+\description{
+  Locations of birch (\emph{Betula celtiberica}) and
+  oak (\emph{Quercus robur}) trees  in a secondary wood in
+  Urkiola Natural Park (Basque country, northern Spain). 
+  They are part of a more extensive dataset collected
+  and analysed by Laskurain (2008). The coordinates of the trees
+  are given in meters. 
+}
+\usage{data(urkiola)}
+\format{
+  An object of class \code{"ppp"} representing the point pattern of
+  tree locations. Entries include
+  \describe{
+    \item{x}{Cartesian x-coordinate of tree}
+    \item{y}{Cartesian y-coordinate of tree }
+    \item{marks}{factor indicating species of each tree}
+  }
+  The levels of \code{marks} are \code{birch} and \code{oak}.
+  See \code{\link{ppp.object}}  for details of the format of a ppp object. 
+}
+\source{N.A. Laskurain. Kindly formatted and communicated by M. de la Cruz Rot}
+\references{
+  Laskurain, N. A. (2008)
+  \emph{\enc{Din�mica}{Dinamica} espacio-temporal de un bosque
+    secundario en el Parque Natural de Urkiola (Bizkaia).} 
+  Tesis Doctoral. Universidad del \enc{Pa�s}{Pais} Vasco /Euskal Herriko
+  Unibertsitatea.
+}
+\keyword{datasets}
diff --git a/man/valid.Rd b/man/valid.Rd
new file mode 100644
index 0000000..2e427de
--- /dev/null
+++ b/man/valid.Rd
@@ -0,0 +1,51 @@
+\name{valid}
+\alias{valid}
+\title{
+  Check Whether Point Process Model is Valid
+}
+\description{
+  Determines whether a point process model object
+  corresponds to a valid point process.  
+}
+\usage{
+  valid(object, \dots)
+}
+\arguments{
+  \item{object}{
+    Object of some class, describing a point process model.
+  }
+  \item{\dots}{
+    Additional arguments passed to methods.
+  }
+}
+\details{
+  The function \code{valid} is generic,
+  with methods for the classes \code{"ppm"} and \code{"dppmodel"}.
+
+  An object representing a point process is called valid if
+  all its parameter values are known (for example, no parameter
+  takes the value \code{NA} or \code{NaN}) and the parameter values
+  correspond to a well-defined point process (for example, the
+  parameter values satisfy all the constraints that are imposed by
+  mathematical theory.) 
+  
+  See the methods for further details.
+}
+\value{
+  A logical value, or \code{NA}.
+}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{valid.ppm}},
+  \code{\link{valid.detpointprocfamily}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/valid.detpointprocfamily.Rd b/man/valid.detpointprocfamily.Rd
new file mode 100644
index 0000000..587293a
--- /dev/null
+++ b/man/valid.detpointprocfamily.Rd
@@ -0,0 +1,34 @@
+\name{valid.detpointprocfamily}
+\alias{valid.detpointprocfamily}
+\title{Check Validity of a Determinantal Point Process Model}
+\description{
+  Checks the validity of a determinantal point process model.
+}
+\usage{
+   \method{valid}{detpointprocfamily}(object, \dots)
+}
+\arguments{
+  \item{object}{Model of class \code{"detpointprocfamily"}.}
+  \item{\dots}{Ignored.}
+}
+\value{Logical}
+\author{
+  \adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\examples{
+model1 <- dppMatern(lambda=100, alpha=.01, nu=1, d=2)
+valid(model1)
+model2 <- dppMatern(lambda=100, alpha=1, nu=1, d=2)
+valid(model2)
+}
+\seealso{
+  \code{\link{valid}}
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/valid.ppm.Rd b/man/valid.ppm.Rd
new file mode 100644
index 0000000..96d16a8
--- /dev/null
+++ b/man/valid.ppm.Rd
@@ -0,0 +1,85 @@
+\name{valid.ppm}
+\alias{valid.ppm}
+\title{
+  Check Whether Point Process Model is Valid
+}
+\description{
+  Determines whether a fitted point process model 
+  satisfies the integrability conditions for existence of the point process.
+}
+\usage{
+  \method{valid}{ppm}(object, warn=TRUE, \dots)
+}
+\arguments{
+  \item{object}{
+    Fitted point process model (object of class \code{"ppm"}).
+  }
+  \item{warn}{
+    Logical value indicating whether to issue a warning if the
+    validity of the model cannot be checked (due to unavailability of
+    the required code).
+  }
+  \item{\dots}{Ignored.}
+}
+\details{
+  This is a method for the generic function \code{\link{valid}}
+  for Poisson and Gibbs point process models (class \code{"ppm"}).
+  
+  The model-fitting function \code{\link{ppm}}
+  fits Gibbs point process models to point pattern data.
+  By default, \code{\link{ppm}} does not check whether the
+  fitted model actually exists as a point process. This checking
+  is done by \code{valid.ppm}.
+
+  Unlike a regression model, which is well-defined for any values
+  of the fitted regression coefficients, a Gibbs point process model
+  is only well-defined if the fitted interaction parameters 
+  satisfy some constraints. 
+  A famous example is the Strauss process (see \code{\link{Strauss}})
+  which exists only when the interaction parameter \eqn{\gamma}{gamma}
+  is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1},
+  the probability density is not integrable and the process does not
+  exist (and cannot be simulated).
+
+  By default, \code{\link{ppm}} does not enforce the constraint that
+  a fitted Strauss process (for example) must satisfy
+  \eqn{\gamma \le 1}{gamma <= 1}.
+  This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} 
+  could be useful information for data analysis, as it indicates that
+  the Strauss model is not appropriate, and suggests a clustered model should be
+  fitted.
+
+  The function \code{valid.ppm} checks whether the fitted model
+  \code{object} specifies a well-defined point process. It returns
+  \code{TRUE} if the model is well-defined.
+
+  Another possible reason for invalid models is that the data may not
+  be adequate for estimation of the model parameters. In this case,
+  some of the fitted coefficients could be \code{NA} or infinite
+  values. If this happens
+  then \code{valid.ppm} returns \code{FALSE}.
+
+  Use the function \code{\link{project.ppm}} to force the fitted model
+  to be valid.
+}
+\value{
+  A logical value, or \code{NA}.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{ppm}},
+  \code{\link{project.ppm}}
+}
+\examples{
+   fit1 <- ppm(cells, ~1, Strauss(0.1))
+   valid(fit1)
+   fit2 <- ppm(redwood, ~1, Strauss(0.1))
+   valid(fit2)
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/varblock.Rd b/man/varblock.Rd
new file mode 100644
index 0000000..7c9fb73
--- /dev/null
+++ b/man/varblock.Rd
@@ -0,0 +1,130 @@
+\name{varblock}
+\alias{varblock}
+\title{
+  Estimate Variance of Summary Statistic by Subdivision
+}
+\description{
+  This command estimates the variance of
+  any summary statistic (such as the \eqn{K}-function)
+  by spatial subdivision of a single point pattern dataset.
+}
+\usage{
+varblock(X, fun = Kest,
+         blocks = quadrats(X, nx = nx, ny = ny),
+         \dots,
+         nx = 3, ny = nx,
+         confidence=0.95)
+}
+\arguments{
+  \item{X}{
+    Point pattern dataset (object of class \code{"ppp"}).
+  }
+  \item{fun}{
+    Function that computes the summary statistic.
+  }
+  \item{blocks}{
+    Optional. A tessellation that specifies the division of
+    the space into blocks.
+  }
+  \item{\dots}{
+    Arguments passed to \code{fun}.
+  }
+  \item{nx,ny}{
+    Optional. Number of rectangular blocks
+    in the \eqn{x} and \eqn{y} directions.
+    Incompatible with \code{blocks}.
+  }
+  \item{confidence}{
+    Confidence level, as a fraction between 0 and 1.
+  }
+}
+\details{
+  This command computes an estimate of the variance of
+  the summary statistic \code{fun(X)} from a single point pattern
+  dataset \code{X} using a subdivision method.
+  It can be used to plot \bold{confidence intervals}
+  for the true value of a summary function such as the \eqn{K}-function.
+  
+  The window containing \code{X} is divided into pieces by
+  an \code{nx * ny} array of rectangles
+  (or is divided into pieces of more general shape,
+  according to the argument \code{blocks} if it is present).
+  The summary statistic \code{fun} is applied to each of the
+  corresponding sub-patterns of \code{X} as described below.
+  Then the pointwise
+  sample mean, sample variance and sample standard deviation
+  of these summary statistics are computed. Then
+  pointwise confidence intervals are computed, for the specified level
+  of confidence, defaulting to 95 percent.
+
+  The variance is estimated by equation (4.21) of Diggle (2003, page 52).
+  This assumes that the point pattern \code{X} is stationary.
+  For further details see Diggle (2003, pp 52--53).
+  
+  The estimate of the summary statistic
+  from each block is computed as follows.
+  For most functions \code{fun},
+  the estimate from block \code{B}
+  is computed by finding the subset of \code{X} consisting of
+  points that fall inside \code{B},
+  and applying \code{fun} to these points, by calling \code{fun(X[B])}.
+
+  However if \code{fun} is the \eqn{K}-function \code{\link{Kest}},
+  or any function which has an argument called \code{domain},
+  the estimate for each block \code{B} is computed
+  by calling \code{fun(X, domain=B)}. In the case of the
+  \eqn{K}-function this means that the estimate from block \code{B}
+  is computed by counting pairs of
+  points in which the \emph{first} point lies in \code{B},
+  while the second point may lie anywhere. 
+}
+\section{Errors}{
+  If the blocks are too small, there may be insufficient data
+  in some blocks, and the function \code{fun} may report an error.
+  If this happens, you need to take larger blocks.
+  
+  An error message about incompatibility may occur.
+  The different function estimates may be incompatible in some cases,
+  for example, because they use different default edge corrections
+  (typically because the tiles of the tessellation are not the same kind
+  of geometric object as the window of \code{X}, or because the default
+  edge correction depends on the number of points). To prevent
+  this, specify the choice of edge correction,
+  in the \code{correction} argument to \code{fun}, if it has one.
+
+  An alternative to \code{varblock} is Loh's mark bootstrap
+  \code{\link{lohboot}}.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  that contains the result of \code{fun(X)} as well as
+  the sample mean, sample variance and sample standard deviation
+  of the block estimates, together with 
+  the upper and lower two-standard-deviation confidence limits.
+}
+\references{
+  Diggle, P.J. (2003)
+  \emph{Statistical analysis of spatial point patterns},
+  Second edition. Arnold.
+}
+\author{
+  \adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+ \code{\link{tess}},
+ \code{\link{quadrats}} for basic manipulation.
+ 
+ \code{\link{lohboot}} for an alternative bootstrap technique.
+}
+\examples{
+   v <- varblock(amacrine, Kest, nx=4, ny=2)
+   v <- varblock(amacrine, Kcross, nx=4, ny=2)
+   if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso"))
+}
+\keyword{nonparametric}
+\keyword{spatial}
+
diff --git a/man/varcount.Rd b/man/varcount.Rd
new file mode 100644
index 0000000..95ea764
--- /dev/null
+++ b/man/varcount.Rd
@@ -0,0 +1,107 @@
+\name{varcount}
+\alias{varcount}
+\title{
+  Predicted Variance of the Number of Points 
+}
+\description{
+  Given a fitted point process model, calculate the predicted variance
+  of the number of points in a nominated set \code{B}.
+}
+\usage{
+varcount(model, B, \dots, dimyx = NULL)
+}
+\arguments{
+  \item{model}{
+    A fitted point process model
+    (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+  }
+  \item{B}{
+    A window (object of class \code{"owin"} specifying the region in
+    which the points are counted.
+    Alternatively a pixel image (object of class \code{"im"})
+    or a function of spatial coordinates specifying a numerical weight
+    for each random point.
+  }
+  \item{\dots}{
+    Additional arguments passed to \code{B} when it is a function.
+  }
+  \item{dimyx}{
+    Spatial resolution for the calculations.
+    Argument passed to \code{\link{as.mask}}.
+  }
+}
+\details{
+  This command calculates the variance of the number of points
+  falling in a specified window \code{B} according to the \code{model}.
+  It can also calculate the variance of a sum of weights attached
+  to each random point.
+
+  The \code{model} should be a fitted point process model
+  (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}).
+
+  \itemize{
+    \item{
+      If \code{B} is a window, this command calculates the variance
+      of the number of points falling in \code{B}, according to the
+      fitted \code{model}.
+
+      If the \code{model} depends on spatial covariates other than the
+      Cartesian coordinates, then \code{B} should be a subset of the
+      domain in which these covariates are defined. 
+    }
+    \item{
+      If \code{B} is a pixel image,
+      this command calculates the variance of
+      \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])},
+      the sum of the values of \code{B} over all random points
+      falling in the domain of the image.
+
+      If the \code{model} depends on spatial covariates other than the
+      Cartesian coordinates, then the domain of the pixel image,
+      \code{as.owin(B)}, should be a subset of the domain in which these
+      covariates are defined. 
+    }
+    \item{
+      If \code{B} is a \code{function(x,y)} or \code{function(x,y,...)}
+      this command calculates the variance of
+      \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])},
+      the sum of the values of \code{B} over all random points
+      falling inside the window \code{W=as.owin(model)}, the window
+      in which the original data were observed.
+    }
+  }
+
+  The variance calculation involves the intensity and the
+  pair correlation function of the model.
+  The calculation is exact (up to discretisation error)
+  for models of class \code{"kppm"} and \code{"dppm"},
+  and for Poisson point process models of class \code{"ppm"}.
+  For Gibbs point process models of class \code{"ppm"} the
+  calculation depends on the Poisson-saddlepoint approximations
+  to the intensity and pair correlation function, which are rough
+  approximations. The approximation is not yet implemented
+  for some Gibbs models.
+}
+\value{
+  A single number.
+}
+\author{
+  \spatstatAuthors
+}
+\seealso{
+  \code{\link{predict.ppm}},
+  \code{\link{predict.kppm}},
+  \code{\link{predict.dppm}}
+}
+\examples{
+   fitT <- kppm(redwood ~ 1, "Thomas")
+   B <- owin(c(0, 0.5), c(-0.5, 0))
+   varcount(fitT, B)
+
+   fitS <- ppm(swedishpines ~ 1, Strauss(9))
+   BS <- square(50)
+   varcount(fitS, BS)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/man/vargamma.estK.Rd b/man/vargamma.estK.Rd
new file mode 100644
index 0000000..3b34750
--- /dev/null
+++ b/man/vargamma.estK.Rd
@@ -0,0 +1,171 @@
+\name{vargamma.estK}
+\alias{vargamma.estK}
+\title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel}
+\description{
+  Fits the Neyman-Scott cluster point process, with Variance Gamma
+  kernel, to a point pattern dataset by the Method of Minimum Contrast.
+}
+\usage{
+vargamma.estK(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...)
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the model.
+  }
+  \item{nu}{
+    Numerical value controlling the shape of the tail of the clusters.
+    A number greater than \code{-1/2}.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+}
+\details{
+  This algorithm fits the Neyman-Scott Cluster point process model
+  with Variance Gamma kernel (Jalilian et al, 2013)
+  to a point pattern dataset
+  by the Method of Minimum Contrast, using the \eqn{K} function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The \eqn{K} function of the point pattern will be computed
+      using \code{\link{Kest}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the \eqn{K} function,
+      and this object should have been obtained by a call to
+      \code{\link{Kest}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Neyman-Scott Cluster point process
+  with Variance Gamma kernel to \code{X},
+  by finding the parameters of the model
+  which give the closest match between the
+  theoretical \eqn{K} function of the model
+  and the observed \eqn{K} function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The Neyman-Scott cluster point process with Variance Gamma
+  kernel is described in Jalilian et al (2013).
+  It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{mu}, and the locations of the offspring points of one parent
+  have a common distribution described in Jalilian et al (2013).
+
+  The shape of the kernel is determined by the dimensionless
+  index \code{nu}. This is the parameter
+  \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in
+  equation (12) on page 126 of Jalilian et al (2013).
+  In previous versions of spatstat instead of specifying \code{nu}
+  (called \code{nu.ker} at that time) the user could specify
+  \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1}
+  appearing in equation (13), page 127 of Jalilian et al (2013).
+  These are related by \code{nu.pcf = 2 * nu.ker + 1}
+  and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but
+  not recommended for consistency across the package. In that case
+  exactly one of \code{nu.ker} or \code{nu.pcf} must be specified.
+  
+  If the argument \code{lambda} is provided, then this is used
+  as the value of the point process intensity \eqn{\lambda}{lambda}.
+  Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The corresponding model can be simulated using \code{\link{rVarGamma}}.
+
+  The parameter \code{eta} appearing in \code{startpar} is equivalent to the
+  scale parameter \code{omega} used in  \code{\link{rVarGamma}}.
+  
+  Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be
+  fitted using the function \code{\link{kppm}} and the fitted models
+  can be simulated using \code{\link{simulate.kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{vargamma.estpcf}},
+  \code{\link{lgcp.estK}},
+  \code{\link{thomas.estK}},
+  \code{\link{cauchy.estK}},
+  \code{\link{mincontrast}},
+  \code{\link{Kest}},
+  \code{\link{Kmodel}}.
+
+  \code{\link{rVarGamma}} to simulate the model.
+}
+\examples{
+   \testonly{
+     u <- vargamma.estK(redwood, startpar=c(kappa=15, eta=0.075))
+    }
+    if(interactive()) {
+      u <- vargamma.estK(redwood)
+      u
+      plot(u)
+    }
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/vargamma.estpcf.Rd b/man/vargamma.estpcf.Rd
new file mode 100644
index 0000000..d360e3c
--- /dev/null
+++ b/man/vargamma.estpcf.Rd
@@ -0,0 +1,173 @@
+\name{vargamma.estpcf}
+\alias{vargamma.estpcf}
+\title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel}
+\description{
+  Fits the Neyman-Scott cluster point process, with Variance Gamma
+  kernel, to a point pattern dataset by the Method of Minimum Contrast,
+  using the pair correlation function.
+}
+\usage{
+vargamma.estpcf(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL,
+            q = 1/4, p = 2, rmin = NULL, rmax = NULL,
+            ..., pcfargs = list())
+}
+\arguments{
+  \item{X}{
+    Data to which the model will be fitted.
+    Either a point pattern or a summary statistic.
+    See Details.
+  }
+  \item{startpar}{
+    Vector of starting values for the parameters of the model.
+  }
+  \item{nu}{
+    Numerical value controlling the shape of the tail of the clusters.
+    A number greater than \code{-1/2}.
+  }
+  \item{lambda}{
+    Optional. An estimate of the intensity of the point process.
+  }
+  \item{q,p}{
+    Optional. Exponents for the contrast criterion.
+  }
+  \item{rmin, rmax}{
+    Optional. The interval of \eqn{r} values for the contrast criterion.
+  }
+  \item{\dots}{
+    Optional arguments passed to \code{\link[stats]{optim}}
+    to control the optimisation algorithm. See Details.
+  }
+  \item{pcfargs}{
+    Optional list containing arguments passed to \code{\link{pcf.ppp}}
+    to control the smoothing in the estimation of the
+    pair correlation function.
+  }
+}
+\details{
+  This algorithm fits the Neyman-Scott Cluster point process model
+  with Variance Gamma kernel (Jalilian et al, 2013)
+  to a point pattern dataset
+  by the Method of Minimum Contrast, using the pair correlation function.
+
+  The argument \code{X} can be either
+  \describe{
+    \item{a point pattern:}{An object of class \code{"ppp"}
+      representing a point pattern dataset. 
+      The pair correlation function of the point pattern will be computed
+      using \code{\link{pcf}}, and the method of minimum contrast
+      will be applied to this.
+    }
+    \item{a summary statistic:}{An object of class \code{"fv"} containing
+      the values of a summary statistic, computed for a point pattern
+      dataset. The summary statistic should be the pair correlation function,
+      and this object should have been obtained by a call to
+      \code{\link{pcf}} or one of its relatives.
+    }
+  }
+
+  The algorithm fits the Neyman-Scott Cluster point process
+  with Variance Gamma kernel to \code{X},
+  by finding the parameters of the model
+  which give the closest match between the
+  theoretical pair correlation function of the model
+  and the observed pair correlation function.
+  For a more detailed explanation of the Method of Minimum Contrast,
+  see \code{\link{mincontrast}}.
+  
+  The Neyman-Scott cluster point process with Variance Gamma
+  kernel is described in Jalilian et al (2013).
+  It is a cluster process formed by taking a 
+  pattern of parent points, generated according to a Poisson process
+  with intensity \eqn{\kappa}{kappa}, and around each parent point,
+  generating a random number of offspring points, such that the
+  number of offspring of each parent is a Poisson random variable with mean
+  \eqn{\mu}{mu}, and the locations of the offspring points of one parent
+  have a common distribution described in Jalilian et al (2013).
+
+  The shape of the kernel is determined by the dimensionless
+  index \code{nu}. This is the parameter
+  \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in
+  equation (12) on page 126 of Jalilian et al (2013).
+  In previous versions of spatstat instead of specifying \code{nu}
+  (called \code{nu.ker} at that time) the user could specify
+  \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1}
+  appearing in equation (13), page 127 of Jalilian et al (2013).
+  These are related by \code{nu.pcf = 2 * nu.ker + 1}
+  and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but
+  not recommended for consistency across the package. In that case
+  exactly one of \code{nu.ker} or \code{nu.pcf} must be specified.
+  
+  If the argument \code{lambda} is provided, then this is used
+  as the value of the point process intensity \eqn{\lambda}{lambda}.
+  Otherwise, if \code{X} is a
+  point pattern, then  \eqn{\lambda}{lambda}
+  will be estimated from \code{X}. 
+  If \code{X} is a summary statistic and \code{lambda} is missing,
+  then the intensity \eqn{\lambda}{lambda} cannot be estimated, and
+  the parameter \eqn{\mu}{mu} will be returned as \code{NA}.
+
+  The remaining arguments \code{rmin,rmax,q,p} control the
+  method of minimum contrast; see \code{\link{mincontrast}}.
+
+  The corresponding model can be simulated using \code{\link{rVarGamma}}.
+  
+  The parameter \code{eta} appearing in \code{startpar} is equivalent to the
+  scale parameter \code{omega} used in  \code{\link{rVarGamma}}.
+  
+  Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be
+  fitted using the function \code{\link{kppm}} and the fitted models
+  can be simulated using \code{\link{simulate.kppm}}.
+
+  The optimisation algorithm can be controlled through the
+  additional arguments \code{"..."} which are passed to the
+  optimisation function \code{\link[stats]{optim}}. For example,
+  to constrain the parameter values to a certain range,
+  use the argument \code{method="L-BFGS-B"} to select an optimisation
+  algorithm that respects box constraints, and use the arguments
+  \code{lower} and \code{upper} to specify (vectors of) minimum and
+  maximum values for each parameter.
+}
+\value{
+  An object of class \code{"minconfit"}. There are methods for printing
+  and plotting this object. It contains the following main components:
+  \item{par }{Vector of fitted parameter values.}
+  \item{fit }{Function value table (object of class \code{"fv"})
+    containing the observed values of the summary statistic
+    (\code{observed}) and the theoretical values of the summary
+    statistic computed from the fitted model parameters.
+  }
+}
+\references{
+  Jalilian, A., Guan, Y. and Waagepetersen, R. (2013)
+  Decomposition of variance for spatial Cox processes.
+  \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137.
+
+  Waagepetersen, R. (2007)
+  An estimating function approach to inference for
+  inhomogeneous Neyman-Scott processes.
+  \emph{Biometrics} \bold{63}, 252--258.
+}
+\author{Abdollah Jalilian and Rasmus Waagepetersen.
+  Adapted for \pkg{spatstat} by \adrian
+  
+  
+}
+\seealso{
+  \code{\link{kppm}},
+  \code{\link{vargamma.estK}},
+  \code{\link{lgcp.estpcf}},
+  \code{\link{thomas.estpcf}},
+  \code{\link{cauchy.estpcf}},
+  \code{\link{mincontrast}},
+  \code{\link{pcf}},
+  \code{\link{pcfmodel}}.
+
+  \code{\link{rVarGamma}} to simulate the model.
+}
+\examples{
+    u <- vargamma.estpcf(redwood)
+    u
+    plot(u, legendpos="topright")
+}
+\keyword{spatial}
+\keyword{models}
diff --git a/man/vcov.kppm.Rd b/man/vcov.kppm.Rd
new file mode 100644
index 0000000..0653f28
--- /dev/null
+++ b/man/vcov.kppm.Rd
@@ -0,0 +1,100 @@
+\name{vcov.kppm}
+\alias{vcov.kppm}
+\title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model}
+\description{
+  Returns the variance-covariance matrix of the estimates of the
+  parameters of a fitted cluster point process model.
+}
+\usage{
+   \method{vcov}{kppm}(object, ...,
+          what=c("vcov", "corr", "fisher", "internals"),
+          fast = NULL, rmax = NULL, eps.rmax = 0.01,
+          verbose = TRUE)
+}
+\arguments{
+  \item{object}{
+    A fitted cluster point process model (an object of class
+    \code{"kppm"}.)
+  }
+  \item{\dots}{
+    Ignored.
+  }
+  \item{what}{
+    Character string (partially-matched)
+    that specifies what matrix is returned.
+    Options are \code{"vcov"} for the variance-covariance matrix,
+    \code{"corr"} for the correlation matrix, and
+    \code{"fisher"} for the Fisher information matrix.
+  }
+  \item{fast}{
+    Logical specifying whether tapering (using sparse matrices from
+    \pkg{Matrix}) should be used to speed up calculations. Warning: This
+    is expected to underestimate the true asymptotic
+    variances/covariances.
+  }  
+  \item{rmax}{
+    Optional. The dependence range. Not usually specified by the
+    user. Only used when \code{fast=TRUE}.
+  }
+  \item{eps.rmax}{
+    Numeric. A small positive number which is used to determine \code{rmax}
+    from the tail behaviour of the pair correlation function when fast
+    option (\code{fast=TRUE}) is used. Namely
+    \code{rmax} is the smallest value of \eqn{r}
+    at which \eqn{(g(r)-1)/(g(0)-1)}
+    falls below \code{eps.rmax}.
+    Only used when \code{fast=TRUE}.
+    Ignored if \code{rmax} is provided.
+  }
+  \item{verbose}{
+    Logical value indicating whether to print progress reports
+    during very long calculations.
+  }
+}
+\details{
+  This function computes the asymptotic variance-covariance
+  matrix of the estimates of the canonical (regression) parameters in the
+  cluster point process model \code{object}. It is a method for the 
+  generic function \code{\link{vcov}}.
+  
+  The result is an \code{n * n} matrix where \code{n =
+    length(coef(model))}.
+
+  To calculate a confidence interval for a regression parameter,
+  use \code{\link[stats]{confint}} as shown in the examples.
+}
+\value{
+  A square matrix. 
+}
+\references{
+  Waagepetersen, R. (2007)
+  Estimating functions for inhomogeneous spatial point processes
+  with incomplete covariate data.
+  \emph{Biometrika} \bold{95}, 351--363.
+}
+\author{
+  Abdollah Jalilian and Rasmus Waagepetersen.
+  Ported to \pkg{spatstat} by \adrian
+  
+  and \ege.
+}
+\seealso{
+  \code{\link{kppm}}, 
+  \code{\link{vcov}}, 
+  \code{\link{vcov.ppm}}
+}
+\examples{
+   data(redwood)
+   fit <- kppm(redwood ~ x + y)
+   vcov(fit)
+   vcov(fit, what="corr")
+
+   # confidence interval
+   confint(fit)
+   # cross-check the confidence interval by hand:
+   sd <- sqrt(diag(vcov(fit)))
+   t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1)))
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/vcov.mppm.Rd b/man/vcov.mppm.Rd
new file mode 100644
index 0000000..8d1e077
--- /dev/null
+++ b/man/vcov.mppm.Rd
@@ -0,0 +1,86 @@
+\name{vcov.mppm}
+\alias{vcov.mppm}
+\title{Calculate Variance-Covariance Matrix for Fitted Multiple Point
+  Process Model}
+\description{
+  Given a fitted multiple point process model, calculate the
+  variance-covariance matrix of the parameter estimates.
+}
+\usage{
+   \method{vcov}{mppm}(object, ..., what="vcov", err="fatal")
+}
+\arguments{
+  \item{object}{
+    A multiple point process model (object of class \code{"mppm"}).
+  }
+  \item{\dots}{
+    Arguments recognised by \code{\link{vcov.ppm}}.
+  }
+  \item{what}{
+    Character string indicating which quantity should be calculated.
+    Options include \code{"vcov"} for the variance-covariance matrix,
+    \code{"corr"} for the correlation matrix, and \code{"fisher"}
+    for the Fisher information matrix.
+  }
+  \item{err}{
+    Character string indicating what action to take if an error occurs.
+    Either \code{"fatal"}, \code{"warn"} or \code{"null"}.
+  }
+}
+\details{
+  This is a method for the generic function \code{\link{vcov}}.
+  
+  The argument \code{object} should be a fitted multiple point process
+  model (object of class \code{"mppm"}) generated by \code{\link{mppm}}.
+  
+  The variance-covariance matrix of the parameter estimates
+  is computed using asymptotic theory for maximum likelihood
+  (for Poisson processes) or estimating equations (for other Gibbs models).
+  
+  If \code{what="vcov"} (the default), the variance-covariance matrix
+  is returned. 
+  If \code{what="corr"}, the variance-covariance matrix is normalised
+  to yield a correlation matrix, and this is returned.
+  If \code{what="fisher"}, the Fisher information matrix is returned instead.
+
+  In all three cases, the rows and columns of the matrix correspond
+  to the parameters (coefficients) in the same order as in
+  \code{coef{model}}.
+
+  If errors or numerical problems occur, the
+  argument \code{err} determines what will happen. If
+  \code{err="fatal"} an error will occur. If \code{err="warn"}
+  a warning will be issued and \code{NA} will be returned.
+  If \code{err="null"}, no warning is issued, but \code{NULL} is returned.
+}
+\value{
+  A numeric matrix (or \code{NA} or \code{NULL}).
+}
+\section{Error messages}{
+  An error message that reports
+  \emph{system is computationally singular} indicates that the
+  determinant of the Fisher information matrix of one of the models
+  was either too large or too small for reliable numerical calculation.
+  See \code{\link{vcov.ppm}} for suggestions on how to handle this.
+}
+\seealso{
+  \code{\link{vcov}}, \code{\link{vcov.ppm}},
+  \code{\link{mppm}}
+}
+\examples{
+   fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders))
+   vcov(fit)
+}
+\references{
+  Baddeley, A., Rubak, E. and Turner, R. (2015)
+  \emph{Spatial Point Patterns: Methodology and Applications with R}.
+  London: Chapman and Hall/CRC Press. 
+}
+\author{
+  Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff.
+  Implemented by 
+  \spatstatAuthors.
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
diff --git a/man/vcov.ppm.Rd b/man/vcov.ppm.Rd
new file mode 100644
index 0000000..44cdd17
--- /dev/null
+++ b/man/vcov.ppm.Rd
@@ -0,0 +1,229 @@
+\name{vcov.ppm}
+\alias{vcov.ppm}
+\title{Variance-Covariance Matrix for a Fitted Point Process Model}
+\description{
+  Returns the variance-covariance matrix of the estimates of the
+  parameters of a fitted point process model.
+}
+\usage{
+  \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE,
+                    fine=FALSE,
+                    gam.action=c("warn", "fatal", "silent"),
+                    matrix.action=c("warn", "fatal", "silent"),
+                    logi.action=c("warn", "fatal", "silent"),
+                    hessian=FALSE)
+}
+\arguments{
+  \item{object}{A fitted point process model (an object of class \code{"ppm"}.)}
+  \item{\dots}{Ignored.}
+  \item{what}{Character string (partially-matched)
+    that specifies what matrix is returned.
+    Options are \code{"vcov"} for the variance-covariance matrix,
+    \code{"corr"} for the correlation matrix, and
+    \code{"fisher"} or \code{"Fisher"}
+    for the Fisher information matrix.
+  }
+  \item{fine}{
+    Logical value indicating whether to use a quick estimate
+    (\code{fine=FALSE}, the default) or a slower, more accurate
+    estimate (\code{fine=TRUE}).
+  }
+  \item{verbose}{Logical. If \code{TRUE}, a message will be printed
+    if various minor problems are encountered.
+  }
+  \item{gam.action}{String indicating what to do if \code{object} was
+    fitted by \code{gam}. 
+  }
+  \item{matrix.action}{String indicating what to do if the matrix
+    is ill-conditioned (so that its inverse cannot be calculated).
+  }
+  \item{logi.action}{String indicating what to do if \code{object} was
+    fitted via the logistic regression approximation using a
+    non-standard dummy point process.
+  }
+  \item{hessian}{
+    Logical. Use the negative Hessian matrix
+    of the log pseudolikelihood instead of the Fisher information.
+  }
+}
+\details{
+  This function computes the asymptotic variance-covariance
+  matrix of the estimates of the canonical parameters in the
+  point process model \code{object}. It is a method for the 
+  generic function \code{\link{vcov}}.
+
+  \code{object} should be an object of class \code{"ppm"}, typically
+  produced by \code{\link{ppm}}.
+
+  The canonical parameters of the fitted model \code{object}
+  are the quantities returned by \code{coef.ppm(object)}.
+  The function \code{vcov} calculates the variance-covariance matrix
+  for these parameters.
+  
+  The argument \code{what} provides three options:
+  \describe{
+    \item{\code{what="vcov"}}{
+      return the variance-covariance matrix of the parameter estimates
+    }
+    \item{\code{what="corr"}}{
+      return the correlation matrix of the parameter estimates
+    }
+    \item{\code{what="fisher"}}{
+      return the observed Fisher information matrix.
+    }
+  }
+  In all three cases, the result is a square matrix.
+  The rows and columns of the matrix correspond to the canonical
+  parameters given by \code{\link{coef.ppm}(object)}. The row and column
+  names of the matrix are also identical to the names in
+  \code{\link{coef.ppm}(object)}.
+
+  For models fitted by the Berman-Turner approximation (Berman and Turner, 1992;
+  Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the
+  default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works
+  as follows.
+  \itemize{
+    \item
+    If the fitted model \code{object} is a Poisson process,
+    the calculations are based on standard asymptotic theory for the maximum
+    likelihood estimator (Kutoyants, 1998).
+    The observed Fisher information matrix of the fitted model
+    \code{object} is first computed, by
+    summing over the Berman-Turner quadrature points in the fitted model.
+    The asymptotic variance-covariance matrix is calculated as the
+    inverse of the
+    observed Fisher information. The correlation matrix is then obtained
+    by normalising.
+    \item
+    If the fitted model is not a Poisson process (i.e. it is some other
+    Gibbs point process) then the calculations are based on
+    Coeurjolly and Rubak (2012). A consistent estimator of the
+    variance-covariance matrix is computed by summing terms over all
+    pairs of data points. If required, the Fisher information is
+    calculated as the inverse of the variance-covariance matrix.
+  }
+
+  For models fitted by the Huang-Ogata method (\code{method="ho"} in
+  the call to \code{\link{ppm}}), the implementation uses the 
+  Monte Carlo estimate of the Fisher information matrix that was
+  computed when the original model was fitted. 
+
+  For models fitted by the logistic regression approximation to the
+  maximum pseudolikelihood (\code{method="logi"} in the call to
+  \code{\link{ppm}}), calculations are based on (Baddeley et al.,
+  2013). A consistent estimator of the variance-covariance matrix is
+  computed by summing terms over all pairs of data points. If required,
+  the Fisher information is calculated as the inverse of the
+  variance-covariance matrix. In this case the calculations depend on
+  the type of dummy pattern used, and currently only the types
+  \code{"stratrand"}, \code{"binomial"} and \code{"poisson"} as
+  generated by \code{\link{quadscheme.logi}} are implemented. For other
+  types the behavior depends on the argument \code{logi.action}. If
+  \code{logi.action="fatal"} an error is produced. Otherwise, for types
+  \code{"grid"} and \code{"transgrid"} the formulas for
+  \code{"stratrand"} are used which in many cases should be
+  conservative. For an arbitrary user specified dummy pattern (type
+  \code{"given"}) the formulas for \code{"poisson"} are used which in
+  many cases should be conservative. If \code{logi.action="warn"} a
+  warning is issued otherwise the calculation proceeds without a
+  warning.
+  
+  The argument \code{verbose} makes it possible to suppress some
+  diagnostic messages.
+
+  The asymptotic theory is not correct if the model was fitted using
+  \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}).
+  The argument \code{gam.action} determines what to do in this case.
+  If \code{gam.action="fatal"}, an error is generated.
+  If \code{gam.action="warn"}, a warning is issued and the calculation
+  proceeds using the incorrect theory for the parametric case, which is
+  probably a reasonable approximation in many applications.
+  If \code{gam.action="silent"}, the calculation proceeds without a
+  warning.
+  
+  If \code{hessian=TRUE} then the negative Hessian (second derivative)
+  matrix of the log pseudolikelihood, and its inverse, will be computed.
+  For non-Poisson models, this is not a valid estimate of variance,
+  but is useful for other calculations.
+
+  Note that standard errors and 95\% confidence intervals for
+  the coefficients can also be obtained using
+  \code{confint(object)} or \code{coef(summary(object))}.
+}
+\section{Error messages}{
+  An error message that reports
+  \emph{system is computationally singular} indicates that the
+  determinant of the Fisher information matrix was either too large 
+  or too small for reliable numerical calculation.
+
+  If this message occurs, try repeating the calculation
+  using \code{fine=TRUE}. 
+
+  Singularity can occur because of numerical overflow or
+  collinearity in the covariates. To check this, rescale the 
+  coordinates of the data points and refit the model. See the Examples.
+
+  In a Gibbs model, a singular matrix may also occur if the
+  fitted model is a hard core process: this is a feature of the
+  variance estimator. 
+}
+\value{
+  A square matrix.
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- ppm(X, ~ x + y)
+  vcov(fit)
+  vcov(fit, what="Fish")
+
+  # example of singular system
+  m <- ppm(demopat ~polynom(x,y,2))
+  \dontrun{
+    try(v <- vcov(m))
+  }
+  # rescale x, y coordinates to range [0,1] x [0,1] approximately
+  demopatScale <- rescale(demopat, 10000)
+  m <- ppm(demopatScale ~ polynom(x,y,2))
+  v <- vcov(m)
+
+  # Gibbs example
+  fitS <- ppm(swedishpines ~1, Strauss(9))
+  coef(fitS)
+  sqrt(diag(vcov(fitS)))
+}
+\author{
+  Original code for Poisson point process was written by
+  \adrian 
+  
+  and \rolf .
+  New code for stationary Gibbs point processes was generously contributed by
+  \ege and Jean-Francois Coeurjolly.
+  New code for generic Gibbs process written by \adrian.
+  New code for logistic method contributed by \ege.
+}
+\seealso{
+  \code{\link{vcov}} for the generic,
+
+  \code{\link{ppm}} for information about fitted models,
+  
+  \code{\link[stats]{confint}} for confidence intervals.
+}
+\references{
+  Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014)
+  Logistic regression for spatial Gibbs point processes.
+  \emph{Biometrika} \bold{101} (2) 377--392.
+
+  Coeurjolly, J.-F. and Rubak, E. (2013)
+  Fast covariance estimation for innovations
+  computed from a spatial Gibbs point process.
+  Scandinavian Journal of Statistics \bold{40} 669--684.
+
+  Kutoyants, Y.A. (1998) 
+  \bold{Statistical Inference for Spatial Poisson Processes},
+  Lecture Notes in Statistics 134. 
+  New York: Springer 1998. 
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
+
diff --git a/man/vcov.slrm.Rd b/man/vcov.slrm.Rd
new file mode 100644
index 0000000..753ca91
--- /dev/null
+++ b/man/vcov.slrm.Rd
@@ -0,0 +1,107 @@
+\name{vcov.slrm}
+\alias{vcov.slrm}
+\title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression}
+\description{
+  Returns the variance-covariance matrix of the estimates of the
+  parameters of a point process model that was fitted by
+  spatial logistic regression.
+}
+\usage{
+  \method{vcov}{slrm}(object, \dots,
+         what=c("vcov", "corr", "fisher", "Fisher")) 
+}
+\arguments{
+  \item{object}{A fitted point process model of class \code{"slrm"}.}
+  \item{\dots}{Ignored.}
+  \item{what}{Character string (partially-matched)
+    that specifies what matrix is returned.
+    Options are \code{"vcov"} for the variance-covariance matrix,
+    \code{"corr"} for the correlation matrix, and
+    \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix.
+  }
+}
+\details{
+  This function computes the asymptotic variance-covariance
+  matrix of the estimates of the canonical parameters in the
+  point process model \code{object}. It is a method for the 
+  generic function \code{\link{vcov}}.
+
+  \code{object} should be an object of class \code{"slrm"}, typically
+  produced by \code{\link{slrm}}. It represents a Poisson point process
+  model fitted by spatial logistic regression.
+
+  The canonical parameters of the fitted model \code{object}
+  are the quantities returned by \code{coef.slrm(object)}.
+  The function \code{vcov} calculates the variance-covariance matrix
+  for these parameters.
+  
+  The argument \code{what} provides three options:
+  \describe{
+    \item{\code{what="vcov"}}{
+      return the variance-covariance matrix of the parameter estimates
+    }
+    \item{\code{what="corr"}}{
+      return the correlation matrix of the parameter estimates
+    }
+    \item{\code{what="fisher"}}{
+      return the observed Fisher information matrix.
+    }
+  }
+  In all three cases, the result is a square matrix.
+  The rows and columns of the matrix correspond to the canonical
+  parameters given by \code{\link{coef.slrm}(object)}. The row and column
+  names of the matrix are also identical to the names in
+  \code{\link{coef.slrm}(object)}.
+
+  Note that standard errors and 95\% confidence intervals for
+  the coefficients can also be obtained using
+  \code{confint(object)} or \code{coef(summary(object))}.
+
+  Standard errors for the fitted intensity can be obtained
+  using \code{\link{predict.slrm}}.
+}
+\section{Error messages}{
+  An error message that reports
+  \emph{system is computationally singular} indicates that the
+  determinant of the Fisher information matrix was either too large 
+  or too small for reliable numerical calculation.
+  This can occur because of numerical overflow or
+  collinearity in the covariates. 
+}
+\value{
+  A square matrix.
+}
+\examples{
+  X <- rpoispp(42)
+  fit <- slrm(X ~ x + y)
+  vcov(fit)
+  vcov(fit, what="corr")
+  vcov(fit, what="f")
+}
+\author{
+  \adrian 
+  
+  and \rolf .
+}
+\seealso{
+  \code{\link{vcov}} for the generic,
+
+  \code{\link{slrm}} for information about fitted models,
+
+  \code{\link{predict.slrm}} for other kinds of calculation about the model,
+
+  \code{\link[stats]{confint}} for confidence intervals.
+}
+\references{
+  Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K.,
+  Schuhmacher, D., Shah, R. and Turner, R. (2010)
+  Spatial logistic regression and change-of-support
+  for spatial Poisson point processes.
+  \emph{Electronic Journal of Statistics}
+  \bold{4}, 1151--1201.
+  {doi: 10.1214/10-EJS581}
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{models}
+
diff --git a/man/vertices.Rd b/man/vertices.Rd
new file mode 100644
index 0000000..8a0507e
--- /dev/null
+++ b/man/vertices.Rd
@@ -0,0 +1,59 @@
+\name{vertices}
+\alias{vertices}
+\alias{vertices.owin}
+\title{Vertices of a Window}
+\description{
+  Finds the vertices of a window, or similar object.
+}
+\usage{
+ vertices(w)
+
+ \method{vertices}{owin}(w)
+}
+\arguments{
+  \item{w}{A window (object of class \code{"owin"}) or similar object.}
+}
+\value{
+  A list with components \code{x} and \code{y} giving the coordinates
+  of the vertices.
+}
+\details{
+  This function computes the vertices (`corners') of a spatial window
+  or other object.
+
+  For \code{vertices.owin},
+  the argument \code{w} should be a window (an object of class
+  \code{"owin"}, see \code{\link{owin.object}} for details).
+
+  If \code{w} is a rectangle, the coordinates of the
+  four corner points are returned.
+
+  If \code{w} is a polygonal window (consisting of one or more
+  polygons), the coordinates of the vertices of all polygons are
+  returned.
+
+  If \code{w} is a binary mask, then a `boundary pixel' is defined to be
+  a pixel inside the window
+  which has at least one neighbour outside the window. The coordinates
+  of the centres of all boundary pixels are returned.
+}
+\seealso{
+  \code{\link{owin.object}}.
+}
+\examples{
+  data(letterR)
+  vert <- vertices(letterR)
+
+  plot(letterR, main="Polygonal vertices")
+  points(vert)
+  plot(letterR, main="Boundary pixels")
+  points(vertices(as.mask(letterR)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/vesicles.Rd b/man/vesicles.Rd
new file mode 100644
index 0000000..05d7266
--- /dev/null
+++ b/man/vesicles.Rd
@@ -0,0 +1,91 @@
+\name{vesicles}
+\alias{vesicles}
+\alias{vesicles.extra}
+\docType{data}
+\title{
+  Vesicles Data
+}
+\description{
+  Point pattern of synaptic vesicles observed in rat brain tissue.
+}
+\usage{data(vesicles)}
+\format{
+  The dataset \code{vesicles} is a point pattern
+  (object of class \code{"ppp"}) representing the location
+  of the synaptic vesicles. The window of the point pattern
+  represents the region of presynapse where synaptic vesicles were
+  observed in this study.
+  There is a hole in the window, representing the region occupied by
+  mitochondria, where synaptic vesicles do not occur.
+
+  The dataset \code{vesicles.extra} is a list with entries
+  \tabular{ll}{
+    \code{presynapse}\tab outer polygonal boundary of presynapse \cr
+    \code{mitochondria} \tab polygonal boundary of mitochondria \cr
+    \code{mask} \tab binary mask representation of vesicles window \cr
+    \code{activezone} \tab line segment pattern representing \cr
+                      \tab the active zone.
+  }
+  All coordinates are in nanometres (nm).		    
+}
+\details{
+  As part of a study on the effects of stress on brain function,
+  Khanmohammadi et al (2014) analysed the spatial pattern of
+  synaptic vesicles in 45-nanometre-thick sections of
+  rat brain tissue visualised in transmission electron microscopy.
+  
+  To investigate the influence of stress, Khanmohammadi et al (2014)
+  study the distribution of the synaptic vesicles in
+  the pre-synaptic neuron in relation to the active zone of the
+  presynaptic membrane. They hypothesize that the synaptic vesicle density
+  is a decreasing function of distance to the active zone. 
+
+  The boundaries for the active zone, mitochondria, pre- and post
+  synaptic terminals, and the centre of the synaptic vesicles
+  were annotated by hand on the image.
+}
+\section{Raw Data}{
+  For demonstration and training purposes,
+  the raw data files for this dataset are also
+  provided in the \pkg{spatstat} package installation:
+  \tabular{ll}{
+    \code{vesicles.txt}\tab spatial locations of vesicles \cr
+    \code{presynapse.txt}\tab vertices of \code{presynapse} \cr
+    \code{mitochondria.txt} \tab vertices of \code{mitochondria} \cr
+    \code{vesiclesimage.tif} \tab greyscale microscope image \cr
+    \code{vesiclesmask.tif} \tab binary image of \code{mask} \cr
+    \code{activezone.txt} \tab coordinates of \code{activezone} 
+  }
+  The files are in the folder \code{rawdata/vesicles} in the
+  \pkg{spatstat} installation directory. The precise location of the
+  files can be obtained using \code{\link[base]{system.file}}, as shown
+  in the examples.  
+}
+\source{
+  Nicoletta Nava, Mahdieh Khanmohammadi and Jens Randel Nyengaard.
+  Experiment performed by Nicoletta Nava at the
+  Stereology and Electron Microscopy Laboratory, Aarhus University,
+  Denmark. Images were annotated by Mahdieh Khanmohammadi
+  at the Department of Computer Science, University of Copenhagen.
+  Jens Randel Nyengaard provided supervision and guidance,
+  and curated the data.
+}
+\references{
+  Khanmohammadi, M., Waagepetersen, R., Nava, N.,
+  Nyengaard, J.R. and Sporring, J. (2014)
+  Analysing the distribution of synaptic vesicles using a
+  spatial point process model.
+  \emph{5th ACM Conference on Bioinformatics, Computational Biology
+  and Health Informatics}, Newport Beach, CA, USA, September 2014.
+}
+\examples{
+plot(vesicles)
+with(vesicles.extra,
+     plot(activezone, add=TRUE, col="red"))
+
+## read coordinates of vesicles from raw data, for training purposes
+vf <- system.file("rawdata/vesicles/vesicles.txt", package="spatstat")
+vdf <- read.table(vf, header=TRUE)
+}
+\keyword{spatial}
+\keyword{datasets}
diff --git a/man/volume.Rd b/man/volume.Rd
new file mode 100644
index 0000000..19e6df0
--- /dev/null
+++ b/man/volume.Rd
@@ -0,0 +1,47 @@
+\name{volume}
+\alias{volume}
+\title{Volume of an Object}
+\description{
+  Computes the volume of a spatial object such as a three-dimensional box. 
+}
+\usage{
+ volume(x)
+}
+\arguments{
+  \item{x}{
+    An object whose volume will be computed.
+  }
+}
+\value{
+  The numerical value of the volume of the object.
+}
+\details{
+  This function computes the volume of an object
+  such as a three-dimensional box. 
+
+  The function \code{volume} is generic, with methods for
+  the classes \code{"box3"} (three-dimensional boxes) and
+  \code{"boxx"} (multi-dimensional boxes).
+
+  There is also a method for the class \code{"owin"}
+  (two-dimensional windows), which is identical to
+  \code{\link{area.owin}},
+  and a method for the class \code{"linnet"} of linear networks,
+  which returns the length of the network.
+}
+\seealso{
+  \code{\link{area.owin}},
+  \code{\link{volume.box3}},
+  \code{\link{volume.boxx}},
+  \code{\link{volume.linnet}}
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/waka.Rd b/man/waka.Rd
new file mode 100644
index 0000000..12c530b
--- /dev/null
+++ b/man/waka.Rd
@@ -0,0 +1,44 @@
+\name{waka}
+\alias{waka}
+\docType{data}
+\title{
+  Trees in Waka national park
+}
+\description{
+  This dataset is a spatial point pattern of trees 
+  recorded at Waka National Park, Gabon.
+  See Balinga et al (2006).
+
+  The dataset \code{waka} is a point pattern
+  (object of class \code{"ppp"}) containing the spatial coordinates
+  of each tree, marked by the tree diameter at breast height
+  \code{dbh}.
+  The survey region is a 100 by 100 metre square.
+  Coordinates are given in metres, while the \code{dbh} is in centimetres.
+}
+\usage{data(waka)}
+\examples{
+data(waka)
+plot(waka, markscale=0.01)
+title(sub="Tree diameters to scale")
+plot(waka, markscale=0.04)
+title(sub="Tree diameters 4x scale")
+}
+\source{
+  Nicolas Picard
+}
+\references{
+  Balinga, M., Sunderland, T., Walters, G., Issemb{\'e}, Y., Asaha, S. 
+  and Fombod, E. (2006)
+  \emph{A vegetation assessment of the Waka national park, Gabon.}
+  Herbier National du Gabon, LBG, MBG, WCS, FRP and
+  Simthsonian Institution, Libreville, Gabon. CARPE Report, 154 pp.
+  \url{http://carpe.umd.edu/}
+
+  Picard, N., Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009)
+  The multi-scale marked area-interaction point process: a model for
+  the spatial pattern of trees.
+  \emph{Scandinavian Journal of Statistics} \bold{36} 23--41
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/waterstriders.Rd b/man/waterstriders.Rd
new file mode 100644
index 0000000..2c56048
--- /dev/null
+++ b/man/waterstriders.Rd
@@ -0,0 +1,59 @@
+\name{waterstriders}
+\alias{waterstriders}
+\docType{data}
+\title{
+  Waterstriders data.
+  Three independent replications of a point pattern
+  formed by insects.
+}
+\description{
+  The territorial behaviour of an insect group
+  called waterstriders was studied in 
+  a series of laboratory experiments by Dr Matti Nummelin
+  (University of Helskini). The data were analysed in
+  the pioneering PhD thesis of Antti Penttinen (1984).
+
+  The dataset \code{waterstriders} is 
+  a list of three point patterns. Each point pattern gives
+  the locations of larvae of the waterstrider
+  \emph{Limnoporus (Gerris) rufoscutellatus} (larval stage V)
+  in a homogeneous area about 48 cm square. The point 
+  patterns can be assumed to be independent.
+
+  It is known that this species of waterstriders
+  exhibits territorialism at older larvae stages and at the
+  adult stage. Therefore, if any deviation from Complete Spatial
+  Randomness exists in these three point patterns, it is expected
+  to be towards inhibition.
+
+  The data were obtained from photographs which were scanned manually.
+  The waterstriders are in a pool which is larger than the picture.
+  A guard area (width about 2.5 cm) has been deleted because 
+  it is a source of inhomogeneity to interactions.
+
+  Penttinen (1984, chapter 5) fitted a pairwise interaction model with
+  a Strauss/hardcore interaction (see \code{\link{StraussHard}})
+  with hard core radius 1.5 cm and interaction radius 4.5 cm.
+} 
+\format{
+  \code{waterstriders} is a list of three point patterns
+  (objects of class \code{"ppp"}). It also has class \code{"listof"}
+  so that it can be plotted and printed directly. The point pattern
+  coordinates are in centimetres.
+}
+\usage{data(waterstriders)}
+\source{
+  Data were collected by Dr. Matti Nummelin (University of
+  Helsinki, Finland).
+  Data kindly provided by Prof. Antti Penttinen,
+  University of Jyv\"askyl\"a, Finland.
+}
+\references{
+Penttinen, A. (1984) 
+Modelling interaction in spatial point patterns:
+parameter estimation by the maximum likelihood method.
+\emph{Jyv\"askyl\"a Studies in Computer Science, Economics and
+  Statistics} \bold{7}, University of {Jyv\"askyl\"a}, Finland.
+}
+\keyword{datasets}
+\keyword{spatial}
diff --git a/man/weighted.median.Rd b/man/weighted.median.Rd
new file mode 100644
index 0000000..0a4b7bd
--- /dev/null
+++ b/man/weighted.median.Rd
@@ -0,0 +1,61 @@
+\name{weighted.median}
+\alias{weighted.median}
+\alias{weighted.quantile}
+\alias{weighted.var}
+\title{
+  Weighted Median, Quantiles or Variance
+}
+\description{
+  Compute the median, quantiles or variance of a set of numbers which have
+  weights associated with them.
+}
+\usage{
+weighted.median(x, w, na.rm = TRUE)
+
+weighted.quantile(x, w, probs=seq(0,1,0.25), na.rm = TRUE)
+
+weighted.var(x, w, na.rm = TRUE)
+}
+\arguments{
+  \item{x}{
+    Data values.
+    A vector of numeric values, for which the median or quantiles are required.
+  }
+  \item{w}{
+    Weights.
+    A vector of nonnegative numbers, of the same length as \code{x}.
+  }
+  \item{probs}{
+    Probabilities for which the quantiles should be computed.
+    A numeric vector of values between 0 and 1.
+  }
+  \item{na.rm}{
+    Logical. Whether to ignore \code{NA} values.
+  }
+}
+\details{
+  The \code{i}th observation \code{x[i]} is treated as having
+  a weight proportional to \code{w[i]}.
+
+  The weighted median is a value \code{m}
+  such that the total weight of data to the left of \code{m}
+  is equal to half the total weight.
+  If there is no such value, linear interpolation is performed.
+}
+\value{
+  A numeric value or vector.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link[stats]{quantile}}, \code{\link[stats]{median}}.
+}
+\examples{
+  x <- 1:20
+  w <- runif(20)
+  weighted.median(x, w)
+  weighted.quantile(x, w)
+  weighted.var(x, w)
+}
+\keyword{math}
diff --git a/man/where.max.Rd b/man/where.max.Rd
new file mode 100644
index 0000000..6f0f592
--- /dev/null
+++ b/man/where.max.Rd
@@ -0,0 +1,61 @@
+\name{where.max}
+\alias{where.max}
+\alias{where.min}
+\title{
+  Find Location of Maximum in a Pixel Image
+}
+\description{
+  Finds the spatial location(s) where a given pixel image
+  attains its maximum or minimum value.
+}
+\usage{
+  where.max(x, first = TRUE)
+  where.min(x, first = TRUE)
+}
+\arguments{
+  \item{x}{
+    A pixel image (object of class \code{"im"}).
+  }
+  \item{first}{
+    Logical value. If \code{TRUE} (the default), then only one location
+    will be returned. If \code{FALSE}, then all locations where the
+    maximum is achieved will be returned.
+  }
+}
+\details{
+  This function finds the spatial location or locations where the
+  pixel image \code{x} attains its maximum or minimum value.
+  The result is a point pattern giving the locations.
+
+  If \code{first=TRUE} (the default), then only one location will
+  be returned, namely the location with the smallest \eqn{y} coordinate
+  value which attains the maximum or minimum.
+  This behaviour is analogous to the functions
+  \code{\link[base]{which.min}} and
+  \code{\link[base:which.min]{which.max}}.
+
+  If \code{first=FALSE}, then the function returns
+  the locations of all pixels where the
+  maximum (or minimum) value is attained. This could be a large
+  number of points.
+}
+\value{
+  A point pattern (object of class \code{"ppp"}).
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{Summary.im}} for computing the minimum and maximum
+  of pixel values;
+  \code{\link{eval.im}} and \code{\link{Math.im}} for mathematical
+  expressions involving images; \code{\link{solutionset}} for finding
+  the set of pixels where a statement is true.
+}
+\examples{
+   D <- distmap(letterR, invert=TRUE)
+   plot(D)
+   plot(where.max(D), add=TRUE, pch=16, cols="green")
+}
+\keyword{spatial}
+\keyword{math}
diff --git a/man/whichhalfplane.Rd b/man/whichhalfplane.Rd
new file mode 100644
index 0000000..c5f729f
--- /dev/null
+++ b/man/whichhalfplane.Rd
@@ -0,0 +1,45 @@
+\name{whichhalfplane}
+\alias{whichhalfplane}
+\title{
+  Test Which Side of Infinite Line a Point Falls On
+}
+\description{
+  Given an infinite line
+  and a spatial point location,
+  determine which side of the line the point falls on.
+}
+\usage{
+whichhalfplane(L, x, y = NULL)
+}
+\arguments{
+  \item{L}{
+    Object of class \code{"infline"} specifying one or more
+    infinite straight lines in two dimensions.
+  }
+  \item{x,y}{
+    Arguments acceptable to \code{\link[grDevices]{xy.coords}}
+    specifying the locations of the points.
+  }
+}
+\details{
+  An infinite line \eqn{L} divides the two-dimensional plane into
+  two half-planes. This function returns a matrix \code{M} of logical values
+  in which \code{M[i,j] = TRUE} if the \code{j}th spatial point
+  lies below or to the left of the \code{i}th line.
+}
+\value{
+  A logical matrix.
+}
+\author{
+  \adrian.
+}
+\seealso{
+  \code{\link{infline}}
+}
+\examples{
+  L <- infline(p=runif(3), theta=runif(3, max=2*pi))
+  X <- runifpoint(4)
+  whichhalfplane(L, X)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/whist.Rd b/man/whist.Rd
new file mode 100644
index 0000000..f0d321a
--- /dev/null
+++ b/man/whist.Rd
@@ -0,0 +1,75 @@
+\name{whist}
+\alias{whist}
+\title{
+  Weighted Histogram
+}
+\description{
+  Computes the weighted histogram of a set of observations
+  with a given set of weights.
+}
+\usage{
+whist(x, breaks, weights = NULL)
+}
+\arguments{
+  \item{x}{
+    Numeric vector of observed values.
+  }
+  \item{breaks}{
+    Vector of breakpoints for the histogram. 
+  }
+  \item{weights}{
+    Numeric vector of weights for the observed values. 
+  }
+}
+\details{
+  This low-level function computes (but does not plot) the weighted
+  histogram of a vector of observations \code{x} using a given
+  vector of \code{weights}.
+
+  The arguments \code{x} and \code{weights} should be numeric vectors of
+  equal length. They may include \code{NA} or infinite values.
+
+  The argument \code{breaks} should be a numeric vector whose entries
+  are strictly increasing. These values define the boundaries between the
+  successive histogram cells.
+  The breaks \emph{do not} have to span the range
+  of the observations.
+
+  There are \code{N-1} histogram cells, where \code{N = length(breaks)}.
+  An observation \code{x[i]} falls in the \code{j}th cell if
+  \code{breaks[j] <= x[i] < breaks[j+1]} (for \code{j < N-1})
+  or
+  \code{breaks[j] <= x[i] <= breaks[j+1]} (for \code{j = N-1}).
+  The weighted histogram value \code{h[j]} for the \code{j}th cell is
+  the sum of \code{weights[i]} for all observations \code{x[i]} that
+  fall in the cell.
+  
+  Note that, in contrast to the function \code{\link{hist}},
+  the function \code{whist} does not require the breakpoints to span the
+  range of the observations \code{x}. Values of \code{x} that fall outside the
+  range of \code{breaks} are handled separately; their total weight
+  is returned as an attribute of the histogram. 
+}
+\value{
+  A numeric vector of length \code{N-1} containing the
+  histogram values, where \code{N = length(breaks)}.
+
+  The return value also has attributes \code{"low"} and \code{"high"}
+  giving the total weight of all observations that are less than
+  the lowest breakpoint, or greater than the highest breakpoint,
+  respectively.
+}
+\examples{
+  x <- rnorm(100)
+  b <- seq(-1,1,length=21)
+  w <- runif(100)
+  whist(x,b,w)
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+  with thanks to Peter Dalgaard.
+}
+\keyword{arith}
diff --git a/man/will.expand.Rd b/man/will.expand.Rd
new file mode 100644
index 0000000..24bd423
--- /dev/null
+++ b/man/will.expand.Rd
@@ -0,0 +1,51 @@
+\name{will.expand}
+\alias{will.expand}
+\title{
+  Test Expansion Rule
+}
+\description{
+  Determines whether an expansion rule will
+  actually expand the window or not.
+}
+\usage{
+will.expand(x)
+}
+\arguments{
+  \item{x}{
+    Expansion rule.
+    An object of class \code{"rmhexpand"}.
+  }
+}
+\details{
+  An object of class \code{"rmhexpand"} describes a rule for
+  expanding a simulation window. See \code{\link{rmhexpand}}
+  for details.
+
+  One possible expansion rule is to do nothing, i.e. not to expand
+  the window.
+  
+  This command inspects the expansion rule \code{x}
+  and determines whether it will or will not actually expand the window.
+  It returns \code{TRUE} if the window will be expanded.
+}
+\value{
+  Logical value.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{rmhexpand}},
+  \code{\link{expand.owin}}
+}
+\examples{
+  x <- rmhexpand(distance=0.2)
+  y <- rmhexpand(area=1)
+  will.expand(x)
+  will.expand(y)
+}
+\keyword{spatial}
+\keyword{manip}
diff --git a/man/with.fv.Rd b/man/with.fv.Rd
new file mode 100644
index 0000000..6532cb0
--- /dev/null
+++ b/man/with.fv.Rd
@@ -0,0 +1,117 @@
+\name{with.fv}
+\alias{with.fv}
+\title{Evaluate an Expression in a Function Table}
+\description{
+  Evaluate an R expression in a 
+  function value table (object of class \code{"fv"}).
+}
+\usage{
+\method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL)
+}
+\arguments{
+  \item{data}{A function value table (object of class \code{"fv"})
+    in which the expression will be evaluated.
+  }
+  \item{expr}{The expression to be evaluated. An \R language
+    expression, which may involve the names of columns in \code{data},
+    the special abbreviations \code{.}, \code{.x} and \code{.y},
+    and global constants or functions.
+  }
+  \item{\dots}{Ignored.}
+  \item{fun}{Logical value, specifying whether the result
+    should be interpreted as another function (\code{fun=TRUE})
+    or simply returned as a numeric vector or array (\code{fun=FALSE}).
+    See Details.
+  }
+  \item{enclos}{
+    An environment in which to search for variables that are
+    not found in \code{data}. Defaults to \code{\link{parent.frame}()}.
+  }
+}
+\details{
+  This is a method for the generic command \code{\link{with}}
+  for an object of class \code{"fv"} (function value table).
+
+  An object of class \code{"fv"} is a convenient way of storing and
+  plotting several different estimates of the same function. It is
+  effectively a data frame with extra attributes. 
+  See \code{\link{fv.object}} for further explanation.
+
+  This command makes it possible to perform computations that involve
+  different estimates of the same function. For example we use it to compute
+  the arithmetic difference between two different edge-corrected
+  estimates of the \eqn{K} function of a point pattern.
+
+  The argument \code{expr} should be an \R language expression. The expression
+  may involve
+  \itemize{
+    \item the name of any column in \code{data}, referring to
+    one of the estimates of the function;
+    \item the symbol \code{.} which stands for all
+    the available estimates of the function;
+    \item the symbol \code{.y} which stands for the recommended
+    estimate of the function (in an \code{"fv"} object, one of the
+    estimates is always identified as the recommended estimate);
+    \item the symbol \code{.x} which stands for the argument of the function;
+    \item global constants or functions.
+  }
+  See the Examples.
+  The expression should be capable of handling
+  vectors and matrices.
+
+  The interpretation of the argument \code{fun} is as follows:
+  \itemize{
+    \item
+    If \code{fun=FALSE}, the result of evaluating the expression
+    \code{expr} will be returned as a numeric vector, matrix or 
+    data frame.
+    \item
+    If \code{fun=TRUE}, then the result of evaluating \code{expr}
+    will be interpreted as containing the values of a new function.
+    The return value will be an object of class \code{"fv"}.
+    (This can only happen if the result has the right dimensions.)
+    \item 
+    The default is \code{fun=TRUE} if the result of evaluating
+    \code{expr} has more than one column, and \code{fun=FALSE} otherwise.
+  }
+  
+  To perform calculations involving \emph{several} objects of
+  class \code{"fv"}, use \code{\link{eval.fv}}.
+}
+\value{
+  A function value table (object of class \code{"fv"})
+  or a numeric vector or data frame.
+}
+\seealso{
+  \code{\link{with}},
+  \code{\link{fv.object}},
+  \code{\link{eval.fv}},
+  \code{\link{Kest}}
+}
+\examples{
+  # compute 4 estimates of the K function
+  X <- rpoispp(42)
+  K <- Kest(X)
+  plot(K)
+
+  # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi)
+  L <- with(K, sqrt(./pi))
+  plot(L)
+
+  # compute 4 estimates of V(r) = L(r)/r
+  V <- with(L, ./.x)
+  plot(V)
+
+  # compute the maximum absolute difference between
+  # the isotropic and translation correction estimates of K(r)
+  D <- with(K, max(abs(iso - trans)))
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/with.hyperframe.Rd b/man/with.hyperframe.Rd
new file mode 100644
index 0000000..7c02535
--- /dev/null
+++ b/man/with.hyperframe.Rd
@@ -0,0 +1,80 @@
+\name{with.hyperframe}
+\alias{with.hyperframe}
+\title{Evaluate an Expression in Each Row of a Hyperframe}
+\description{
+  An expression, involving the names of columns in a hyperframe,
+  is evaluated separately for each row of the hyperframe.
+}
+\usage{
+\method{with}{hyperframe}(data, expr, ...,
+                         simplify = TRUE,
+                         ee = NULL, enclos=NULL)
+}
+\arguments{
+  \item{data}{A hyperframe (object of class \code{"hyperframe"})
+    containing data.
+  }
+  \item{expr}{An \R language expression to be evaluated.}
+  \item{\dots}{Ignored.}
+  \item{simplify}{
+    Logical. If \code{TRUE}, the return value
+    will be simplified to a vector whenever possible.
+  }
+  \item{ee}{
+    Alternative form of \code{expr}, as an object of class
+    \code{"expression"}.
+  }
+  \item{enclos}{
+    An environment in which to search for objects that are
+    not found in the hyperframe. Defaults to \code{\link{parent.frame}()}.
+  }
+}
+\details{
+  This function evaluates the expression \code{expr} in each row
+  of the hyperframe \code{data}. It is a method for the generic
+  function \code{\link{with}}.
+  
+  The argument \code{expr} should be an \R language expression
+  in which each variable name is either the name of a column in the
+  hyperframe \code{data}, or the name of an object in the parent frame
+  (the environment in which \code{with} was called.)
+  The argument \code{ee} can be used as an alternative
+  to \code{expr} and should be an expression object (of
+  class \code{"expression"}).
+  
+  For each row of \code{data}, the expression will be evaluated
+  so that variables which are column names of \code{data} are
+  interpreted as the entries for those columns in the current row.
+
+  For example, if a hyperframe \code{h} has columns
+  called \code{A} and \code{B}, then \code{with(h, A != B)} inspects
+  each row of \code{data} in turn,
+  tests whether the entries in columns \code{A} and \code{B} are
+  equal, and returns the \eqn{n} logical values.
+}
+\value{
+  Normally a list of length
+  \eqn{n} (where \eqn{n} is the number of rows) containing the results
+  of evaluating the expression for each row. 
+  If \code{simplify=TRUE} and each result is a single atomic value,
+  then the result is a vector or factor 
+  containing the same values.
+}
+\author{\adrian
+  
+  
+  and \rolf
+  
+}
+\seealso{
+  \code{\link{hyperframe}},
+  \code{\link{plot.hyperframe}}
+}
+\examples{
+  # generate Poisson point patterns with intensities 10 to 100
+   H <- hyperframe(L=seq(10,100, by=10))
+   X <- with(H, rpoispp(L))
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
diff --git a/man/with.msr.Rd b/man/with.msr.Rd
new file mode 100644
index 0000000..cb96362
--- /dev/null
+++ b/man/with.msr.Rd
@@ -0,0 +1,81 @@
+\name{with.msr}
+\alias{with.msr}
+\title{Evaluate Expression Involving Components of a Measure}
+\description{
+  An expression involving the names of components of a measure
+  is evaluated.
+}
+\usage{
+\method{with}{msr}(data, expr, \dots)
+}
+\arguments{
+  \item{data}{
+    A measure (object of class \code{"msr"}).
+  }
+  \item{expr}{
+    An expression to be evaluated.
+  }
+  \item{\dots}{
+    Ignored.
+  }
+}
+\details{
+  This is a method for the generic function
+  \code{\link[base]{with}} for the class \code{"msr"}.
+  The argument \code{data} should be an object of class \code{"msr"}
+  representing a measure (a function which assigns a value to each
+  subset of two-dimensional space).
+
+  This function can be used to extract the components of the measure,
+  or to perform more complicated manipulations of the components.
+
+  The argument \code{expr} should be an un-evaluated expression
+  in the \R language. The expression may involve any of the variable
+  names listed below with their corresponding meanings. 
+  \tabular{ll}{
+    \code{qlocations} \tab (point pattern) all quadrature locations  \cr
+    \code{qweights} \tab (numeric) all quadrature weights  \cr
+    \code{density} \tab (numeric) density value at each quadrature point  \cr
+    \code{discrete} \tab (numeric) discrete mass at each quadrature point  \cr
+    \code{continuous} \tab (numeric) increment of continuous component  \cr
+    \code{increment} \tab (numeric) increment of measure  \cr
+    \code{is.atom} \tab (logical) whether quadrature point is an atom  \cr
+    \code{atoms} \tab (point pattern) locations of atoms  \cr
+    \code{atommass} \tab (numeric) massess of atoms
+  }
+  The measure is the sum of discrete and continuous components.
+  The discrete component assigns non-zero mass to several points called atoms.
+  The continuous component has a density which should be integrated
+  over a region to determine the value for that region.
+
+  An object of class \code{"msr"} approximates the continuous component
+  by a sum over quadrature points. The quadrature points are chosen
+  so that they include the atoms of the measure. In the list above,
+  we have \code{increment = continuous + discrete},
+  \code{continuous = density * qweights},
+  \code{is.atom = (discrete > 0)},
+  \code{atoms = qlocations[is.atom]} and
+  \code{atommass = discrete[is.atom]}.
+}
+\value{
+  The result of evaluating the expression could be
+  an object of any kind.
+}
+\author{
+  \spatstatAuthors.
+}
+\seealso{
+  \code{\link{msr}},
+  \code{\link{split.msr}}
+}
+\examples{
+   X <- rpoispp(function(x,y) { exp(3+3*x) })
+   fit <- ppm(X, ~x+y)
+   rp <- residuals(fit, type="pearson")
+
+   with(rp, atoms)
+   with(rp, qlocations \%mark\% continuous)
+}
+\keyword{spatial}
+\keyword{manip}
+
diff --git a/man/with.ssf.Rd b/man/with.ssf.Rd
new file mode 100644
index 0000000..dd0c392
--- /dev/null
+++ b/man/with.ssf.Rd
@@ -0,0 +1,62 @@
+\name{with.ssf}
+\alias{with.ssf}
+\alias{apply.ssf}
+\title{
+  Evaluate Expression in a Spatially Sampled Function
+}
+\description{
+  Given a spatially sampled function,
+  evaluate an expression involving the function values.
+}
+\usage{
+  apply.ssf(X, \dots)
+
+  \method{with}{ssf}(data, \dots)
+}
+\arguments{
+  \item{X, data}{
+    A spatially sampled function (object of class \code{"ssf"}).
+  }
+  \item{\dots}{
+    Arguments passed to \code{\link{with.default}} or
+    \code{\link{apply}} specifying what to compute.
+  }
+}
+\details{
+  An object of class \code{"ssf"} represents a
+  function (real- or vector-valued) that has been
+  sampled at a finite set of points.
+  It contains a data frame
+  which provides the function values
+  at the sample points. 
+
+  In \code{with.ssf}, the expression specified by \code{\dots}
+  will be evaluated in this dataframe.
+  In \code{apply.ssf}, the dataframe will be subjected to
+  the \code{\link{apply}} operator using the additional arguments
+  \code{\dots}. 
+  
+  If the result of evaluation
+  is a data frame with one row for each data point,
+  or a numeric vector with one entry for each data point,
+  then the result will be an object of class \code{"ssf"}
+  containing this information. Otherwise, the result will be
+  a numeric vector.
+}
+\value{
+  An object of class \code{"ssf"} or a numeric vector.
+}
+\author{
+  \adrian.
+}
+\seealso{
+ \code{\link{ssf}}
+}
+\examples{
+  a <- ssf(cells, data.frame(d=nndist(cells), i=1:npoints(cells)))
+  with(a, i/d)
+}
+\keyword{spatial}
+\keyword{manip}
+\keyword{programming}
+
diff --git a/man/yardstick.Rd b/man/yardstick.Rd
new file mode 100644
index 0000000..cdb729d
--- /dev/null
+++ b/man/yardstick.Rd
@@ -0,0 +1,88 @@
+\name{yardstick}
+\alias{textstring}
+\alias{onearrow}
+\alias{yardstick}
+\title{
+  Text, Arrow or Scale Bar in a Diagram
+}
+\description{
+  Create spatial objects that represent a text string,
+  an arrow, or a yardstick (scale bar).
+}
+\usage{
+textstring(x, y, txt = NULL, \dots)
+
+onearrow(x0, y0, x1, y1, txt = NULL, \dots)
+
+yardstick(x0, y0, x1, y1, txt = NULL, \dots)
+}
+\arguments{
+  \item{x,y}{
+    Coordinates where the text should be placed.
+  }
+  \item{x0,y0,x1,y1}{
+    Spatial coordinates of both ends of the arrow or yardstick.
+    Alternatively \code{x0} can be a point pattern (class \code{"ppp"})
+    containing exactly two points, or a line segment pattern
+    (class \code{"psp"}) consisting of exactly one line segment.
+  }
+  \item{txt}{
+    The text to be displayed beside the line segment.
+    Either a character string or an expression.
+  }
+  \item{\dots}{
+    Additional named arguments for plotting the object.
+  }
+}
+\details{
+  These commands create objects that represent components of a
+  diagram:
+  \itemize{
+    \item \code{textstring} creates an object that represents a string
+    of text at a particular spatial location.
+    \item \code{onearrow} creates an object that represents an arrow
+    between two locations.
+    \item \code{yardstick} creates an object that represents
+    a scale bar: a line segment indicating the scale of the plot.
+  }
+  To display the relevant object, it should be plotted, using
+  \code{plot}. See the help files for the plot methods
+  \code{\link{plot.textstring}}, \code{\link{plot.onearrow}}
+  and \code{\link{plot.yardstick}}.
+
+  These objects are designed to be included as components in a
+  \code{\link{layered}} object or a \code{\link{solist}}. This makes it
+  possible to build up a diagram consisting of many spatial objects,
+  and to annotate the diagram with arrows, text and so on, so that
+  ultimately the entire diagram is plotted using \code{plot}.
+}
+\value{
+  An object of class \code{"diagramobj"} which also
+  belongs to one of the special classes \code{"textstring"}, \code{"onearrow"}
+  or \code{"yardstick"}. There are methods for \code{plot},
+  \code{print}, \code{"["} and \code{\link{shift}}.
+}
+\author{\adrian
+  
+  
+  \rolf
+  
+  and \ege
+  
+}
+\seealso{
+  \code{\link{plot.textstring}},
+  \code{\link{plot.onearrow}},
+  \code{\link{plot.yardstick}}.
+}
+\examples{
+  X <- rescale(swedishpines)
+  plot(X, pch=16, main="")
+  ys <- yardstick(as.psp(list(xmid=4, ymid=0.5, length=1, angle=0),
+                         window=Window(X)),
+                  txt="1 m")
+  plot(ys, angle=90)
+}
+\keyword{spatial}
+\keyword{hplot}
+
diff --git a/man/zapsmall.im.Rd b/man/zapsmall.im.Rd
new file mode 100644
index 0000000..29f9ab1
--- /dev/null
+++ b/man/zapsmall.im.Rd
@@ -0,0 +1,40 @@
+\name{zapsmall.im}
+\alias{zapsmall.im}
+\title{Rounding of Pixel Values}
+\description{
+  Modifies a pixel image, identifying those pixels that have values 
+  very close to zero, and replacing the value by zero.
+}
+\usage{
+  zapsmall.im(x, digits)
+}
+\arguments{
+  \item{x}{Pixel image (object of class \code{"im"}).}
+  \item{digits}{
+    Argument passed to \code{\link{zapsmall}}
+    indicating the precision to be used.
+  }
+}
+\details{
+  The function \code{\link{zapsmall}} is applied to each pixel value
+  of the image \code{x}.
+}
+\value{
+  Another pixel image.
+}
+\seealso{
+  \code{\link{zapsmall}}
+}
+\examples{
+  data(cells)
+  D <- density(cells)
+  zapsmall.im(D)
+}
+\author{\ege
+  and \adrian
+  
+  
+}
+\keyword{spatial}
+\keyword{methods}
+\keyword{univar}
diff --git a/man/zclustermodel.Rd b/man/zclustermodel.Rd
new file mode 100644
index 0000000..fedfb2a
--- /dev/null
+++ b/man/zclustermodel.Rd
@@ -0,0 +1,52 @@
+\name{zclustermodel}
+\alias{zclustermodel}
+\title{
+  Cluster Point Process Model
+}
+\description{
+  Experimental code. Creates an object representing a cluster point
+  process model. Typically used for theoretical calculations about
+  such a model.
+}
+\usage{
+zclustermodel(name = "Thomas", \dots, mu, kappa, scale)
+}
+\arguments{
+  \item{name}{
+    Name of the cluster process. One of
+    \code{"Thomas"},
+    \code{"MatClust"},
+    \code{"VarGamma"} or
+    \code{"Cauchy"}.
+  }
+  \item{\dots}{
+    Other arguments needed for the model.
+  }
+  \item{mu}{
+    Mean cluster size. A single number, or a pixel image.
+  }
+  \item{kappa}{
+    Parent intensity. A single number.
+  }
+  \item{scale}{
+    Cluster scale parameter of the model.
+  }
+}
+\details{
+  Experimental.
+}
+\value{
+  Object of the experimental class \code{"zclustermodel"}.
+}
+\author{
+  \adrian
+}
+\seealso{
+  \code{\link{methods.zclustermodel}}
+}
+\examples{
+  m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1)
+}
+\keyword{spatial}
+\keyword{models}
+
diff --git a/src/Ediggatsti.c b/src/Ediggatsti.c
new file mode 100755
index 0000000..caa9501
--- /dev/null
+++ b/src/Ediggatsti.c
@@ -0,0 +1,82 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+#include "chunkloop.h"
+#include "looptest.h"
+#include "constants.h"
+
+/*
+
+  Ediggatsti.c
+
+  $Revision: 1.3 $     $Date: 2014/09/19 00:53:30 $
+
+  C implementation of 'eval' for DiggleGatesStibbard interaction 
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+void Ediggatsti(nnsource, xsource, ysource, idsource, 
+		nntarget, xtarget, ytarget, idtarget, 
+		rrho, values) 
+     /* inputs */
+     int *nnsource, *nntarget;
+     double *xsource, *ysource, *xtarget, *ytarget;
+     int *idsource, *idtarget;
+     double *rrho;
+     /* output */
+     double *values;
+{
+  int nsource, ntarget, maxchunk, j, i, ileft, idsourcej;
+  double xsourcej, ysourcej, xleft, dx, dy, dx2, d2;
+  double rho, rho2, rho2pluseps, coef, product;
+
+  nsource = *nnsource;
+  ntarget = *nntarget;
+  rho   = *rrho;
+
+  if(nsource == 0 || ntarget == 0) 
+    return;
+
+  rho2   = rho * rho;
+  coef   = M_PI_2/rho;
+  rho2pluseps = rho2 + EPSILON(rho2);
+
+  ileft = 0;
+
+  OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+    R_CheckUserInterrupt(); 
+    INNERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+      product = 1;
+      xsourcej = xsource[j];
+      ysourcej = ysource[j];
+      idsourcej = idsource[j];
+      /* 
+	 adjust starting position
+
+      */
+      xleft  = xsourcej - rho;
+      while((xtarget[ileft] < xleft) && (ileft+1 < ntarget))
+	++ileft;
+      /* 
+	 process from ileft until dx > rho
+      */
+      for(i=ileft; i < ntarget; i++) {
+	dx = xtarget[i] - xsourcej;
+	dx2 = dx * dx;
+	if(dx2 > rho2pluseps)
+	  break;
+	if(idtarget[i] != idsourcej) {
+	  dy = ytarget[i] - ysourcej;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= rho2) 
+	    product *= sin(sqrt(d2) * coef);
+	}
+      }
+      values[j] = log(product * product);
+    }
+  }
+}
+
+
diff --git a/src/Ediggra.c b/src/Ediggra.c
new file mode 100755
index 0000000..ad9a0f0
--- /dev/null
+++ b/src/Ediggra.c
@@ -0,0 +1,92 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+#include "looptest.h"
+
+/*
+
+  Ediggra.c
+
+  $Revision: 1.5 $     $Date: 2014/09/19 00:53:38 $
+
+  C implementation of 'eval' for DiggleGratton interaction (exponentiated)
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+double sqrt();
+
+void Ediggra(nnsource, xsource, ysource, idsource, 
+	     nntarget, xtarget, ytarget, idtarget, 
+	     ddelta, rrho, values) 
+     /* inputs */
+     int *nnsource, *nntarget;
+     double *xsource, *ysource, *xtarget, *ytarget;
+     int *idsource, *idtarget;
+     double *ddelta, *rrho;
+     /* output */
+     double *values;
+{
+  int nsource, ntarget, maxchunk, j, i, ileft, idsourcej;
+  double xsourcej, ysourcej, xleft, dx, dy, dx2, d2;
+  double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta;
+  double product;
+
+  nsource = *nnsource;
+  ntarget = *nntarget;
+  delta = *ddelta;
+  rho   = *rrho;
+
+  if(nsource == 0 || ntarget == 0) 
+    return;
+
+  rho2   = rho * rho;
+  delta2 = delta * delta;
+  rhominusdelta = rho - delta;
+  rho2pluseps = rho2 + EPSILON(rho2);
+
+  ileft = 0;
+
+  OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+      product = 1;
+      xsourcej = xsource[j];
+      ysourcej = ysource[j];
+      idsourcej = idsource[j];
+
+      /* 
+	 adjust starting point
+      */
+
+      xleft  = xsourcej - rho;
+      while((xtarget[ileft] < xleft) && (ileft+1 < ntarget))
+	++ileft;
+
+      /* 
+	 process until dx > rho 
+	 (or until product is zero)
+      */
+      for(i=ileft; i < ntarget; i++) {
+	dx = xtarget[i] - xsourcej;
+	dx2 = dx * dx;
+	if(dx2 > rho2pluseps) 
+	  break;
+	if(idtarget[i] != idsourcej) {
+	  dy = ytarget[i] - ysourcej;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= rho2) {
+	    if(d2 <= delta2) {
+	      product = 0;
+	      break;
+	    }
+	    else 
+	      product *= (sqrt(d2) - delta)/rhominusdelta;
+	  }
+	}
+      }
+      values[j] = product;
+    }
+  }
+}
diff --git a/src/Efiksel.c b/src/Efiksel.c
new file mode 100755
index 0000000..7607189
--- /dev/null
+++ b/src/Efiksel.c
@@ -0,0 +1,79 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+#include "looptest.h"
+
+/*
+
+  Efiksel.c
+
+  $Revision: 1.3 $     $Date: 2012/03/28 05:55:29 $
+
+  C implementation of 'eval' for Fiksel interaction (non-hardcore part)
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+double sqrt(), exp();
+
+void Efiksel(nnsource, xsource, ysource, 
+	     nntarget, xtarget, ytarget, 
+	     rrmax, kkappa, values) 
+/* inputs */
+     int *nnsource, *nntarget;
+     double *xsource, *ysource, *xtarget, *ytarget, *rrmax, *kkappa;
+     /* output */
+     double *values;
+{
+  int nsource, ntarget, maxchunk, j, i, ileft;
+  double xsourcej, ysourcej, xleft, dx, dy, dx2, d2;
+  double rmax, r2max, r2maxpluseps, kappa, total;
+
+  nsource = *nnsource;
+  ntarget = *nntarget;
+  rmax = *rrmax;
+  kappa = *kkappa;
+
+  if(nsource == 0 || ntarget == 0) 
+    return;
+
+  r2max = rmax * rmax;
+  r2maxpluseps = r2max + EPSILON(r2max);
+
+  ileft = 0;
+
+  OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nsource, maxchunk, 16384) {
+      total = 0;
+      xsourcej = xsource[j];
+      ysourcej = ysource[j];
+      /* 
+	 adjust starting point
+      */
+      xleft  = xsourcej - rmax;
+      while((xtarget[ileft] < xleft) && (ileft+1 < ntarget))
+	++ileft;
+
+      /* 
+	 process from ileft until dx > rmax
+      */
+      for(i=ileft; i < ntarget; i++) {
+	/* squared interpoint distance */
+	dx = xtarget[i] - xsourcej;
+	dx2 = dx * dx;
+	if(dx2 > r2maxpluseps)
+	  break;
+	dy = ytarget[i] - ysourcej;
+	d2 = dx2 + dy * dy;
+	if(d2 <= r2max)
+	  total += exp(- kappa * sqrt(d2));
+      }
+      values[j] = total;
+    }
+  }
+}
+
+
+
diff --git a/src/Egeyer.c b/src/Egeyer.c
new file mode 100755
index 0000000..77f602b
--- /dev/null
+++ b/src/Egeyer.c
@@ -0,0 +1,100 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+#include "looptest.h"
+
+/*
+
+  Egeyer.c
+
+  $Revision: 1.6 $     $Date: 2014/09/19 00:53:20 $
+
+  Part of C implementation of 'eval' for Geyer interaction
+
+  Calculates change in saturated count 
+
+  (xquad, yquad): quadscheme 
+  (xdata, ydata): data
+  tdata: unsaturated pair counts for data pattern
+  quadtodata[j] = i   if quad[j] == data[i]  (indices start from ZERO)
+  
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+double sqrt();
+
+void Egeyer(nnquad, xquad, yquad, quadtodata,
+	    nndata, xdata, ydata, tdata,
+	    rrmax, ssat, result) 
+/* inputs */
+     int *nnquad, *nndata, *quadtodata, *tdata;
+     double *xquad, *yquad, *xdata, *ydata, *rrmax, *ssat;
+     /* output */
+     double *result;
+{
+  int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata;
+  double xquadj, yquadj, rmax, sat, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2;
+  double tbefore, tafter, satbefore, satafter, delta, totalchange;
+
+  nquad = *nnquad;
+  ndata = *nndata;
+  rmax  = *rrmax;
+  sat   = *ssat;
+
+  if(nquad == 0 || ndata == 0) 
+    return;
+
+  r2max = rmax * rmax;
+  r2maxpluseps = r2max + EPSILON(r2max);
+
+  ileft = 0;
+
+  OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nquad, maxchunk, 65536) {
+      totalchange = 0.0;
+      xquadj = xquad[j];
+      yquadj = yquad[j];
+      dataindex = quadtodata[j];
+      isdata = (dataindex >= 0);
+
+      /* 
+	 adjust starting point
+      */
+      xleft  = xquadj - rmax;
+      while((xdata[ileft] < xleft) && (ileft+1 < ndata))
+	++ileft;
+
+      /* 
+	 process until dx > rmax
+      */
+      for(i=ileft; i < ndata; i++) {
+	dx = xdata[i] - xquadj;
+	dx2 = dx * dx;
+	if(dx2 > r2maxpluseps)
+	  break;
+	if(i != dataindex) {
+	  dy = ydata[i] - yquadj;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= r2max) {
+	    /* effect of adding dummy point j or 
+	       negative effect of removing data point */
+	    tbefore = tdata[i];
+	    tafter  = tbefore + ((isdata) ? -1 : 1);
+	    /* effect on saturated values */
+	    satbefore = (double) ((tbefore < sat)? tbefore : sat);
+	    satafter  = (double) ((tafter  < sat)? tafter  : sat);
+	    /* sum changes over all i */
+	    delta = satafter - satbefore; 
+	    totalchange += ((isdata) ? -delta : delta);
+	  }
+	}
+      }
+      result[j] = totalchange;
+    }
+  }
+}
+
+
+
diff --git a/src/Estrauss.c b/src/Estrauss.c
new file mode 100755
index 0000000..91d80c4
--- /dev/null
+++ b/src/Estrauss.c
@@ -0,0 +1,75 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+#include "looptest.h"
+/*
+
+  Estrauss.c
+
+  $Revision: 1.4 $     $Date: 2014/09/19 00:54:07 $
+
+  C implementation of 'eval' for Strauss interaction
+
+  Calculates number of data points within distance r of each quadrature point
+  (when 'source' = quadrature points, 'target' = data points)
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+double sqrt();
+
+void Ccrosspaircounts(nnsource, xsource, ysource, 
+		     nntarget, xtarget, ytarget, 
+		     rrmax, counts) 
+/* inputs */
+     int *nnsource, *nntarget;
+     double *xsource, *ysource, *xtarget, *ytarget, *rrmax;
+     /* output */
+     int *counts;
+{
+  int nsource, ntarget, maxchunk, j, i, ileft, counted;
+  double xsourcej, ysourcej, rmax, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2;
+
+  nsource = *nnsource;
+  ntarget = *nntarget;
+  rmax = *rrmax;
+  r2max = rmax * rmax;
+  r2maxpluseps = r2max + EPSILON(r2max);
+
+  if(nsource == 0 || ntarget == 0) 
+    return;
+
+  ileft = 0;
+
+  OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nsource, maxchunk, 65536) {
+      counted = 0;
+      xsourcej = xsource[j];
+      ysourcej = ysource[j];
+      /* 
+	 adjust starting point
+      */
+      xleft  = xsourcej - rmax;
+      while((xtarget[ileft] < xleft) && (ileft+1 < ntarget))
+	++ileft;
+
+      /* 
+	 process from ileft to iright
+      */
+      for(i=ileft; i < ntarget; i++) {
+	dx = xtarget[i] - xsourcej;
+	dx2 = dx * dx;
+	if(dx2 > r2maxpluseps)
+	  break;
+	dy = ytarget[i] - ysourcej;
+	d2 = dx2 + dy * dy;
+	if(d2 <= r2max)
+	  ++counted;
+      }
+      counts[j] = counted;
+    }
+  }
+}
+
diff --git a/src/Kborder.c b/src/Kborder.c
new file mode 100755
index 0000000..2dd90b7
--- /dev/null
+++ b/src/Kborder.c
@@ -0,0 +1,46 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+/* 
+  Kborder.c
+
+  Efficient computation of border-corrected estimates of K 
+  for large datasets
+  
+  KborderI()   Estimates K function, 
+               returns integer numerator & denominator
+
+  KborderD()   Estimates K function, 
+               returns double precision numerator & denominator
+
+  Kwborder()   Estimates Kinhom.
+
+  Functions require (x,y) data to be sorted in ascending order of x
+  and expect r values to be equally spaced and starting at zero
+   
+  $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $
+
+*/
+
+#undef WEIGHTED
+
+#define FNAME KborderI
+#define OUTTYPE int
+#include "Kborder.h"
+
+#undef FNAME
+#undef OUTTYPE
+#define FNAME KborderD
+#define OUTTYPE double
+#include "Kborder.h"
+
+#undef FNAME
+#undef OUTTYPE
+#define FNAME Kwborder
+#define WEIGHTED
+#define OUTTYPE double
+#include "Kborder.h"
+
+
+
diff --git a/src/Kborder.h b/src/Kborder.h
new file mode 100755
index 0000000..abeb6ba
--- /dev/null
+++ b/src/Kborder.h
@@ -0,0 +1,210 @@
+/*
+  
+  Kborder.h
+
+  Code template for K function estimators in Kborder.c
+
+  Variables:
+
+     FNAME        function name
+
+     OUTTYPE      storage type of the output vectors
+                  ('int' or 'double')
+
+     WEIGHTED     #defined for weighted (inhom) K function
+
+
+  Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.11 $     $Date: 2013/09/18 04:06:59 $
+
+*/
+
+void FNAME(
+	   nxy, x, y, 
+#ifdef WEIGHTED
+	   w,
+#endif
+	   b, nr, rmax, numer, denom) 
+     /* inputs */
+     int *nxy, *nr;
+     double *x, *y, *b, *rmax;
+#ifdef WEIGHTED
+     double *w;
+#endif
+     /* outputs */
+     OUTTYPE *numer, *denom;
+{
+  int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk;
+  double dt, tmax, xi, yi, bi, maxsearch, max2search;
+  double bratio, dratio, dij, dij2, dx, dy, dx2;
+  OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum;
+  OUTTYPE naccum, daccum;
+#ifdef WEIGHTED
+  double wi, wj, wij;
+#endif
+
+#ifdef WEIGHTED
+
+#define ZERO 0.0
+#define WI wi
+#define WJ wj
+#define WIJ wij
+
+#else 
+
+#define ZERO 0
+#define WI 1
+#define WJ 1
+#define WIJ 1
+
+#endif
+
+  n = *nxy;
+  nt = *nr;
+
+  n1 = n - 1;
+  nt1 = nt - 1;
+
+  dt = (*rmax)/(nt-1);
+  tmax = *rmax;
+
+  /* initialise */
+  numerLowAccum  = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE));
+  numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE));
+  denomAccum     = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE));
+  for(l = 0; l < nt; l++)
+    numer[l] = denom[l] = 
+      numerLowAccum[l] = numerHighAccum[l] = 
+      denomAccum[l] = ZERO;
+
+  if(n == 0) 
+    return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < n) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n) maxchunk = n;
+
+    for(; i < maxchunk; i++) {
+
+      /*  --------   DENOMINATOR  -------------*/
+      bi = b[i];
+#ifdef WEIGHTED
+      wi = w[i];
+#endif
+      /* increment denominator for all r < b[i] */
+      bratio = bi/dt;
+      /* lmax is the largest integer STRICTLY less than bratio */
+      lmax = (int) ceil(bratio) - 1;
+      lmax = (lmax <= nt1) ? lmax : nt1;
+      /* effectively increment entries 0 to lmax */
+      if(lmax >= 0) 
+	denomAccum[lmax] += WI;
+
+      /*  ----------  NUMERATOR -----------*/
+      /* scan through points (x[j],y[j]) */
+      xi = x[i];
+      yi = y[i];
+      maxsearch = (bi < tmax) ? bi : tmax;
+      max2search = maxsearch * maxsearch;
+
+      /* 
+	 scan backward from i-1 
+	 until |x[j]-x[i]| > maxsearch  or until we run out 
+      */
+      if(i > 0) {
+	for(j=i-1; j >= 0; j--) {
+	  /* squared interpoint distance */
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 >= max2search)
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < max2search) {
+#ifdef WEIGHTED 
+	    wj = w[j];
+#endif
+	    /* increment numerator for all r such that dij <= r < bi */
+	    dij = (double) sqrt(dij2);
+	    dratio = dij/dt;
+	    /* smallest integer greater than or equal to dratio */
+	    lmin = (int) ceil(dratio);
+	    /* increment entries lmin to lmax inclusive */
+	    if(lmax >= lmin) {
+#ifdef WEIGHTED
+	      wij = wi * wj;
+#endif
+	      numerLowAccum[lmin] += WIJ;
+	      numerHighAccum[lmax] += WIJ;
+	    }
+	  }
+	}
+      }
+
+      /* 
+	 scan forward from i+1 
+	 until x[j]-x[i] > maxsearch  or until we run out 
+
+      */
+      if(i < n1) {
+	for(j=i+1; j < n; j++) {
+	  /* squared interpoint distance */
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 >= max2search) 
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < max2search) {
+#ifdef WEIGHTED 
+	    wj = w[j];
+#endif
+	    /* increment numerator for all r such that dij <= r < bi */
+	    dij = (double) sqrt(dij2);
+	    dratio = dij/dt;
+	    /* smallest integer greater than or equal to dratio */
+	    lmin = (int) ceil(dratio);
+	    /* increment entries lmin to lmax inclusive */
+	    if(lmax >= lmin) {
+#ifdef WEIGHTED
+	      wij = wi * wj;
+#endif
+	      numerLowAccum[lmin] += WIJ;
+	      numerHighAccum[lmax] += WIJ;
+	    }
+	  }
+	}
+      }
+    }
+  }
+  /* 
+     Now use the accumulated values to compute the numerator and denominator.
+     The value of denomAccum[l] should be added to denom[k] for all k <= l.
+     numerHighAccum[l] should be added to numer[k] for all k <=l
+     numerLowAccum[l] should then be subtracted from  numer[k] for k <= l.
+  */
+
+  for(l=nt1, naccum=daccum=ZERO; l>=0; l--) {
+    daccum += denomAccum[l];
+    denom[l] = daccum;
+
+    naccum += numerHighAccum[l];
+    numer[l] = naccum;
+    naccum -= numerLowAccum[l];
+  }
+
+}
+
+#undef ZERO
+#undef WI 
+#undef WJ
+#undef WIJ
+
diff --git a/src/Knone.c b/src/Knone.c
new file mode 100644
index 0000000..2997953
--- /dev/null
+++ b/src/Knone.c
@@ -0,0 +1,47 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+/* 
+  Knone.c
+
+  Efficient computation of uncorrected estimates of K 
+  for large datasets
+  
+  KnoneI()   Estimates K function, 
+               returns integer numerator
+
+  KnoneD()   Estimates K function, 
+               returns double precision numerator
+
+  Kwnone()   Estimates Kinhom, 
+               returns double precision numerator
+
+  Functions require (x,y) data to be sorted in ascending order of x
+  and expect r values to be equally spaced and starting at zero
+   
+  $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $
+
+*/
+
+#undef WEIGHTED
+
+#define FNAME KnoneI
+#define OUTTYPE int
+#include "Knone.h"
+
+#undef FNAME
+#undef OUTTYPE
+#define FNAME KnoneD
+#define OUTTYPE double
+#include "Knone.h"
+
+#undef FNAME
+#undef OUTTYPE
+#define FNAME Kwnone
+#define WEIGHTED
+#define OUTTYPE double
+#include "Knone.h"
+
+
+
diff --git a/src/Knone.h b/src/Knone.h
new file mode 100644
index 0000000..9ef9340
--- /dev/null
+++ b/src/Knone.h
@@ -0,0 +1,176 @@
+/*
+  
+  Knone.h
+
+  Code template for K function estimators in Knone.c
+
+  Variables:
+
+  FNAME        function name
+
+  OUTTYPE      storage type of the output 'numer' 
+  ('int' or 'double')
+
+  WEIGHTED     #defined for weighted (inhom) K function
+
+  Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.6 $     $Date: 2013/09/18 04:08:26 $
+
+*/
+
+void FNAME(
+	   nxy, x, y, 
+#ifdef WEIGHTED
+	   w,
+#endif
+	   nr, rmax, numer) 
+/* inputs */
+     int *nxy, *nr;
+     double *x, *y, *rmax;
+#ifdef WEIGHTED
+     double *w;
+#endif
+     /* output */
+     OUTTYPE *numer;
+{
+  int i, j, l, n, nt, n1, lmin, lmax, maxchunk;
+  double dt, tmax, tmax2, xi, yi;
+  double dratio, dij, dij2, dx, dy, dx2;
+#ifdef WEIGHTED
+  double wi, wj, wij;
+#endif
+
+#ifdef WEIGHTED
+
+#define ZERO 0.0
+#define WI wi
+#define WJ wj
+#define WIJ wij
+
+#else 
+
+#define ZERO 0
+#define WI 1
+#define WJ 1
+#define WIJ 1
+
+#endif
+
+  n = *nxy;
+  nt = *nr;
+
+  n1 = n - 1;
+  lmax = nt - 1;
+
+  dt = (*rmax)/(nt-1);
+  tmax = *rmax;
+  tmax2 = tmax * tmax;
+
+  /* initialise */
+  for(l = 0; l < nt; l++)
+    numer[l] =  ZERO;
+
+  if(n == 0) 
+    return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < n) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n) maxchunk = n;
+
+    for(; i < maxchunk; i++) {
+
+#ifdef WEIGHTED
+      wi = w[i];
+#endif
+      xi = x[i];
+      yi = y[i];
+
+      /* 
+	 scan backward from i-1 
+	 until x[j] < x[i] -tmax or until we run out 
+      */
+      if(i > 0) {
+	for(j=i-1; j >= 0; j--) {
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 >= tmax2)
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < tmax2) {
+#ifdef WEIGHTED 
+	    wj = w[j];
+#endif
+	    /* increment numerator for all r >= dij */
+	    dij = (double) sqrt(dij2);
+	    dratio = dij/dt;
+	    /* smallest integer greater than or equal to dratio */
+	    lmin = (int) ceil(dratio);
+	    /* effectively increment entries lmin to lmax inclusive */
+	    if(lmin <= lmax) {
+#ifdef WEIGHTED
+	      wij = wi * wj;
+#endif
+	      numer[lmin] += WIJ;
+	    }
+	  }
+	}
+      }
+
+      /* 
+	 scan forward from i+1 
+	 until x[j] > x[i] + tmax or until we run out 
+      */
+      if(i < n1) {
+	for(j=i+1; j < n; j++) {
+	  /* squared interpoint distance */
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 >= tmax2)
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < tmax2) {
+#ifdef WEIGHTED 
+	    wj = w[j];
+#endif
+	    /* increment numerator for all r >= dij */
+	    dij = (double) sqrt(dij2);
+	    dratio = dij/dt;
+	    /* smallest integer greater than or equal to dratio */
+	    lmin = (int) ceil(dratio);
+	    /* increment entries lmin to lmax inclusive */
+	    if(lmin <= lmax) {
+#ifdef WEIGHTED
+	      wij = wi * wj;
+#endif
+	      numer[lmin] += WIJ;
+	    }
+	  }
+	}
+      }
+    }
+  }
+  /* 
+     Now accumulate the numerator.
+  */
+
+  if(nt > 1)
+    for(l=1; l < nt; l++)
+      numer[l] += numer[l-1];
+
+}
+
+#undef ZERO
+#undef WI 
+#undef WJ 
+#undef WIJ
+
diff --git a/src/Krect.c b/src/Krect.c
new file mode 100644
index 0000000..03effc8
--- /dev/null
+++ b/src/Krect.c
@@ -0,0 +1,78 @@
+/*
+
+  Krect.c
+
+  $Revision: 1.3 $     $Date: 2014/02/09 03:02:42 $
+
+  +++  Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++
+
+  Fast code for K function in rectangular case.
+
+     **Assumes point pattern is sorted in increasing order of x coordinate**
+     **Assumes window is (0,wide) x (0, high) **
+     **Assumes output vectors were initialised to zero**
+
+  Krect.c          defines three interface functions,
+                   for weighted, unweighted double, and unweighted integer cases
+
+  KrectFunDec.h    (#included thrice)
+                   Function declaration, arguments, storage allocation
+  
+  KrectV1.h        split according to whether Isotropic Correction is wanted
+                   Macro ISOTROPIC is #defined 
+
+  KrectV2.h        split according to whether Translation Correction is wanted
+                   Macro TRANSLATION is #defined 
+
+  KrectV3.h        split according to whether Border Correction is wanted
+                   Macro BORDER is #defined 
+
+  KrectV4.h        split according to whether Uncorrected estimate is wanted
+                   Macro UNCORRECTED is #defined 
+
+  KrectBody.h      Function body, including loops over i and j
+
+  KrectIncrem.h    (#included twice)
+                   Code performed when a close pair of points has
+                   been found: calculate edge corrections, increment results.
+
+*/
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <Rmath.h>
+
+/* This constant is defined in Rmath.h */
+#define TWOPI M_2PI
+
+#define ABS(X) (((X) >= 0) ? (X) : (-X))
+#define SMALL(X) ((ABS(X) < 1.0e-12) ? 1 : 0)
+#define MIN(X,Y) (((X) < (Y)) ? (X) : (Y))
+
+#undef FNAME
+#undef WEIGHTED
+#undef COUNTTYPE
+
+#define FNAME KrectInt
+#define COUNTTYPE int
+#include "KrectFunDec.h"
+
+#undef FNAME
+#undef WEIGHTED
+#undef COUNTTYPE
+
+#define FNAME KrectDbl
+#define COUNTTYPE double
+#include "KrectFunDec.h"
+
+#undef FNAME
+#undef WEIGHTED
+#undef COUNTTYPE
+
+#define FNAME KrectWtd
+#define COUNTTYPE double
+#define WEIGHTED
+#include "KrectFunDec.h"
+
+
+
diff --git a/src/KrectBody.h b/src/KrectBody.h
new file mode 100644
index 0000000..a725253
--- /dev/null
+++ b/src/KrectBody.h
@@ -0,0 +1,195 @@
+  /* 
+     KrectBody.h 
+
+     +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++
+
+     Main function body for 'Krect' 
+
+     Included multiple times with different values of the macros: 
+            (#define or #undef)
+     WEIGHTED
+     ISOTROPIC
+     TRANSLATION
+     BORDER
+     UNCORRECTED
+
+     **Assumes point pattern is sorted in increasing order of x coordinate**
+     **Assumes window is (0,wide) x (0, high) **
+     **Assumes output vectors were initialised to zero**
+
+     Variables are declared in 'KrectFunDec.c'
+
+     This algorithm is optimal (amongst the choices in spatstat)
+     when the window is a rectangle *and* at least one of
+     the ISOTROPIC, TRANSLATION corrections is needed.
+     There are faster algorithms for the border correction on its own.
+
+     $Revision: 1.3 $ $Date: 2014/02/09 03:01:27 $
+
+  */
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < N) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > N) maxchunk = N;
+
+    /* ............. LOOP OVER i ................. */
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+#ifdef WEIGHTED
+      wi = w[i];
+#endif
+
+#ifdef BORDER
+      /* For border correction */
+      /* compute distance to border */
+      bx = MIN(xi, (wide - xi));
+      by = MIN(yi, (high - yi));
+      bdisti = MIN(bx, by);
+      /* denominator will ultimately be incremented for all r < b[i] */
+      bratio = bdisti/rstep;
+      /* lbord is the largest integer STRICTLY less than bratio */
+      lbord = (int) ceil(bratio) - 1;
+      lbord = (lbord <= Nr1) ? lbord : Nr1;
+      /* increment entry corresponding to r = b[i] */
+#ifdef WEIGHTED
+      if(lbord >= 0) 
+	denomAccum[lbord] += wi;
+#else
+      if(lbord >= 0) 
+	(denomAccum[lbord])++;
+#endif
+#endif
+
+#ifdef ISOTROPIC
+      /* For isotropic correction */
+      /* 
+	 perpendicular distance from point i to each edge of rectangle
+	 L = left, R = right, D = down, U = up
+      */
+      dL = xi;
+      dR = wide - xi;
+      dD = yi;
+      dU = high - yi;
+      /*
+	test for corner of the rectangle
+      */
+      ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU);
+      corner = (ncor >= 2);
+      /* 
+	 angle between 
+	 - perpendicular to edge of rectangle
+	 and 
+	 - line from point to corner of rectangle
+	 
+      */
+      bLU = atan2(dU, dL);
+      bLD = atan2(dD, dL);
+      bRU = atan2(dU, dR);
+      bRD = atan2(dD, dR);
+      bUL = atan2(dL, dU);
+      bUR = atan2(dR, dU);
+      bDL = atan2(dL, dD);
+      bDR = atan2(dR, dD);
+#endif
+
+      /* ............. LOOP OVER j ................. */
+      /* scan through points (x[j],y[j]) */
+
+      /* 
+	 scan backward from i-1 
+	 until |x[j]-x[i]| > Rmax
+      */
+      if(i > 0) {
+	for(j=i-1; j >= 0; j--) {
+	  /* squared interpoint distance */
+	  dx = xi - x[j];
+	  dx2 = dx * dx;
+	  if(dx2 >= R2max)
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < R2max) {
+#include "KrectIncrem.h"	    
+	  }
+	}
+      }
+
+      /* 
+	 scan forward from i+1 
+	 until x[j]-x[i] > Rmax
+
+      */
+      if(i < N1) {
+	for(j=i+1; j < N; j++) {
+	  /* squared interpoint distance */
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 >= R2max) 
+	    break;
+	  dy = y[j] - yi;
+	  dij2 = dx2 + dy * dy;
+	  if(dij2 < R2max) {
+#include "KrectIncrem.h"	    
+	  }
+	}
+      }
+    }
+  }
+
+  /* 
+    ..................  END OF LOOPS ................................
+  */
+
+  /* ............. compute cumulative functions ..................... */
+
+#ifdef UNCORRECTED
+  naccum = ZERO;
+  for(l = 0; l < Nr; l++) {
+    unco[l] += naccum;
+    naccum = unco[l];
+  }
+#endif    
+
+#ifdef ISOTROPIC
+  accum = 0.0;
+  for(l = 0; l < Nr; l++) {
+    iso[l] += accum;
+    accum = iso[l];
+  }
+#endif    
+   
+#ifdef TRANSLATION
+  accum = 0.0;
+  for(l = 0; l < Nr; l++) {
+    trans[l] += accum;
+    accum = trans[l];
+  }
+#endif    
+   
+#ifdef BORDER
+  /* 
+     Now use the accumulated values to compute the numerator and denominator.
+     The value of denomAccum[l] should be added to denom[k] for all k <= l.
+     numerHighAccum[l] should be added to numer[k] for all k <=l
+     numerLowAccum[l] should then be subtracted from  numer[k] for k <= l.
+  */
+  for(l=Nr1, naccum=daccum=ZERO; l>=0; l--) {
+    daccum += denomAccum[l];
+    bdenom[l] = daccum;
+
+    naccum += numerHighAccum[l];
+    bnumer[l] = naccum;
+    naccum -= numerLowAccum[l];
+  }
+
+#endif
+
diff --git a/src/KrectFunDec.h b/src/KrectFunDec.h
new file mode 100644
index 0000000..4ce0d48
--- /dev/null
+++ b/src/KrectFunDec.h
@@ -0,0 +1,108 @@
+/*
+
+  KrectFunDec.h
+
+  $Revision: 1.3 $     $Date: 2014/02/09 02:51:15 $
+
+  Function declarations for Krect
+
+  Macros: 
+      FNAME     function name
+
+      WEIGHTED  #defined for weighted version (Kinhom etc)
+
+  +++  Copyright (C) Adrian Baddeley 2014 ++++
+
+*/
+
+void FNAME(width, height,
+	   nxy, x, y, 
+#ifdef WEIGHTED
+           w,
+#endif
+	   nr, rmax, trimedge, 
+	   doIso, doTrans, doBord, doUnco,
+	   iso, trans, bnumer, bdenom, unco)
+     /* input data */
+     double *width, *height;   /* window is (0, width) x (0, height) */
+     int    *nxy;           /* number of (x,y) points */
+     double *x, *y;         /* (x,y) coordinates */
+#ifdef WEIGHTED
+     double *w;             /* weights (e.g. reciprocal intensities) */
+#endif
+     /* algorithm parameters */
+     int    *nr;            /* number of r values */
+     double *rmax;          /* maximum r value */
+     double *trimedge;      /* maximum edge correction weight */
+     int    *doIso;         /* logical: whether to do isotropic correction */
+     int    *doTrans;       /* logical: whether to do translation correction */
+     int    *doBord;        /* logical: whether to do border correction */
+     int    *doUnco;        /* logical: whether to do uncorrected estimator */
+     /* outputs */
+     /* These are vectors of length nr if required, otherwise ignored */
+     double *iso;           /* isotropic-corrected estimator */
+     double *trans;         /* translation-corrected estimator */
+     COUNTTYPE *bnumer;        /* numerator of border-corrected estimator */
+     COUNTTYPE *bdenom;        /* denominator of border-corrected estimator */
+     COUNTTYPE *unco;          /* uncorrected estimator */
+{
+  int i, j, l, ldist, lbord, M, maxchunk, N, Nr, N1, Nr1;
+  double rstep, Rmax, R2max, wide, high, trim;
+  double xi, yi, bdisti, bx, by, bratio;
+  double dx, dy, dx2, dij, dij2,  dratio, edgetrans, edgeiso;
+  double dL, dR, dD, dU, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR;
+  double aL, aR, aD, aU, cL, cR, cU, cD, extang;
+  int ncor, corner;
+  COUNTTYPE *numerLowAccum, *numerHighAccum, *denomAccum;
+  COUNTTYPE naccum, daccum;
+  double accum;
+#ifdef WEIGHTED
+  double wi, wj, wij;
+#endif
+
+#ifdef WEIGHTED
+
+#define ZERO 0.0
+#define WIJ wij
+
+#else 
+
+#define ZERO 0
+#define WIJ 1
+
+#endif
+  
+  N = *nxy;
+
+  if(N == 0) 
+    return;
+
+  Nr = *nr;
+  Rmax = *rmax;
+
+  trim = *trimedge;
+
+  N1 = N - 1;
+  Nr1 = Nr - 1;
+  R2max = Rmax * Rmax;
+  rstep = Rmax/Nr1;
+
+  wide = *width;
+  high = *height;
+
+  /* Allocate and initialise scratch space - for border correction,
+     but do it in all cases to keep the compiler happy */
+
+  M = (*doBord == 1) ? Nr : 1;
+  numerLowAccum  = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE));
+  numerHighAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE));
+  denomAccum     = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE));
+  for(l = 0; l < M; l++)
+    numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO;
+
+#include "KrectV1.h"
+
+}
+
+#undef ZERO
+#undef WIJ
diff --git a/src/KrectIncrem.h b/src/KrectIncrem.h
new file mode 100644
index 0000000..1abcdbf
--- /dev/null
+++ b/src/KrectIncrem.h
@@ -0,0 +1,94 @@
+/*
+  KrectIncrem.h
+
+  Code to increment numerators of K-function
+
+  $Revision: 1.5 $  $Date: 2014/02/09 03:00:51 $
+
+  +++  Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++
+
+*/
+
+#ifdef WEIGHTED 
+	      wj = w[j];
+	      wij = wi * wj;
+#endif
+	      /* determine index of entry to be incremented */
+	      dij = (double) sqrt(dij2);
+	      dratio = dij/rstep;
+	      /* smallest integer greater than or equal to dratio */
+	      ldist = (int) ceil(dratio);
+
+#ifdef UNCORRECTED
+	      /* ............  uncorrected estimate ................. */
+#ifdef WEIGHTED
+              unco[ldist] += wij;             
+#else
+              (unco[ldist])++;
+#endif
+#endif
+
+#ifdef BORDER
+	      /* ............  border correction ................. */
+	      /* increment numerator for all r such that dij <= r < bi */
+	      /* increment entries ldist to lbord inclusive */
+#ifdef WEIGHTED
+	      if(lbord >= ldist) {
+		numerLowAccum[ldist] += wij;
+		numerHighAccum[lbord] += wij;
+	      }
+#else
+	      if(lbord >= ldist) {
+		(numerLowAccum[ldist])++;
+		(numerHighAccum[lbord])++;
+	      }
+#endif
+#endif
+
+#ifdef TRANSLATION
+	      /* ............  translation correction ................. */
+              edgetrans = 1.0/((1.0 - ABS(dx)/wide) * (1.0 - ABS(dy)/high));
+              edgetrans = MIN(edgetrans, trim);
+#ifdef WEIGHTED
+	      trans[ldist] += wij * edgetrans;
+#else
+	      trans[ldist] += edgetrans;
+#endif
+#endif
+
+#ifdef ISOTROPIC
+	      /* ............  isotropic correction ................. */
+	      /*
+		half the angle subtended by the intersection between
+		the circle of radius d[i,j] centred on point i
+		and each edge of the rectangle (prolonged to an infinite line)
+	      */
+	      aL = (dL < dij) ? acos(dL/dij) : 0.0;
+	      aR = (dR < dij) ? acos(dR/dij) : 0.0;
+	      aD = (dD < dij) ? acos(dD/dij) : 0.0;
+	      aU = (dU < dij) ? acos(dU/dij) : 0.0;
+
+	      /* apply maxima */
+
+	      cL = MIN(aL, bLU) + MIN(aL, bLD);
+	      cR = MIN(aR, bRU) + MIN(aR, bRD);
+	      cU = MIN(aU, bUL) + MIN(aU, bUR);
+	      cD = MIN(aD, bDL) + MIN(aD, bDR);
+
+	      /* total exterior angle over 2 pi */
+	      extang = (cL + cR + cU + cD)/TWOPI;
+
+	      /* add pi/2 for corners */
+	      if(corner) 
+		extang += 1/4;
+
+	      /* edge correction factor */
+	      edgeiso = 1 / (1 - extang);
+              edgeiso = MIN(edgeiso, trim);
+
+#ifdef WEIGHTED
+	      iso[ldist] += wij * edgeiso;
+#else
+	      iso[ldist] += edgeiso;
+#endif
+#endif
diff --git a/src/KrectV1.h b/src/KrectV1.h
new file mode 100644
index 0000000..900e967
--- /dev/null
+++ b/src/KrectV1.h
@@ -0,0 +1,19 @@
+/* 
+   KrectV2.h
+
+   with or without isotropic correction
+
+ */
+
+if((*doIso) == 1) {
+
+#define ISOTROPIC
+#include "KrectV2.h"
+
+ } else {
+
+#undef ISOTROPIC
+#include "KrectV2.h"
+
+ }
+
diff --git a/src/KrectV2.h b/src/KrectV2.h
new file mode 100644
index 0000000..04b007a
--- /dev/null
+++ b/src/KrectV2.h
@@ -0,0 +1,19 @@
+/* 
+   KrectV3.h
+
+   with or without translation correction
+
+ */
+
+if((*doTrans) == 1) {
+
+#define TRANSLATION
+#include "KrectV3.h"
+
+ } else {
+
+#undef TRANSLATION
+#include "KrectV3.h"
+
+ }
+
diff --git a/src/KrectV3.h b/src/KrectV3.h
new file mode 100644
index 0000000..c807b89
--- /dev/null
+++ b/src/KrectV3.h
@@ -0,0 +1,19 @@
+/* 
+   KrectV4.h
+
+   with or without border correction
+
+ */
+
+if((*doBord) == 1) {
+
+#define BORDER
+#include "KrectV4.h"
+
+ } else {
+
+#undef BORDER
+#include "KrectV4.h"
+
+ }
+
diff --git a/src/KrectV4.h b/src/KrectV4.h
new file mode 100644
index 0000000..21ad833
--- /dev/null
+++ b/src/KrectV4.h
@@ -0,0 +1,19 @@
+/* 
+   KrectV5.h
+
+   with or without uncorrected estimator
+
+ */
+
+if((*doUnco) == 1) {
+
+#define UNCORRECTED
+#include "KrectBody.h"
+
+ } else {
+
+#undef UNCORRECTED
+#include "KrectBody.h"
+
+ }
+
diff --git a/src/Perfect.cc b/src/Perfect.cc
new file mode 100755
index 0000000..0446be5
--- /dev/null
+++ b/src/Perfect.cc
@@ -0,0 +1,849 @@
+//  Debug switch 
+//  #define DBGS 
+
+#include <math.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <time.h>
+#include <R.h>
+#include <Rdefines.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include <R_ext/Constants.h>
+
+// #include <stdio.h>
+// FILE *out;
+// File i/o is deprecated in R implementation
+
+
+#ifdef DBGS
+#define CHECK(PTR,MESSAGE) if(((void *) PTR) == ((void *) NULL)) error(MESSAGE)
+#define CLAMP(X, LOW, HIGH, XNAME) \
+  if((X) > (HIGH)) { \
+     Rprintf("Value of %s exceeds upper limit %d\n", XNAME, HIGH); \
+     X = HIGH; \
+  } else if((X) < (LOW)) { \
+     Rprintf("Value of %s is below %d\n", XNAME, LOW); \
+     X = LOW; \
+  } 
+#else
+#define CHECK(PTR,MESSAGE)
+#define CLAMP(X, LOW, HIGH, XNAME) \
+  if((X) > (HIGH)) X = HIGH; else if((X) < (LOW)) X = LOW; 
+#endif
+
+// .........................................
+// memory allocation 
+// using R_alloc
+
+#define ALLOCATE(TYPE)  (TYPE *) R_alloc(1, sizeof(TYPE))
+#define FREE(PTR) 
+
+// Alternative using Calloc and Free
+// #define ALLOCATE(TYPE)  (TYPE *) Calloc(1, sizeof(TYPE))
+// #define FREE(PTR) Free(PTR)
+
+void R_CheckUserInterrupt(void);
+
+struct Point{ long int No; float X; float Y; float R; struct Point *next; }; 
+
+struct Point2{ long int No; float X; float Y; 
+  char InLower[2]; 
+  double Beta; double TempBeta; struct Point2 *next; }; 
+
+struct Point3{ char Case; char XCell; char YCell; struct Point3 *next; }; 
+
+// const float Pi=3.141593;
+
+double slumptal(void){
+  return(runif((double) 0.0, (double) 1.0));
+}
+
+long int poisson(double lambda){
+  return((long int)rpois(lambda));
+}
+
+// ........................... Point patterns ..........................
+
+class Point2Pattern {
+public:
+  long int UpperLiving[2];
+  long int MaxXCell, MaxYCell, NoP;
+  double XCellDim, YCellDim, Xmin, Xmax, Ymin, Ymax;
+  struct Point2 *headCell[10][10],*dummyCell;
+  char DirX[10], DirY[10];
+ 
+  Point2Pattern(double xmin, double xmax,
+		double ymin, double ymax, 
+		long int mxc, long int myc){
+    long int i,j;
+    UpperLiving[0] = 0;
+    UpperLiving[1] = 0;
+    Xmin = xmin; Xmax = xmax;
+    Ymin = ymin; Ymax = ymax;
+    DirX[1] = 1; DirY[1] = 0;
+    DirX[2] = 1; DirY[2] = -1;
+    DirX[3] = 0; DirY[3] = -1;
+    DirX[4] = -1; DirY[4] = -1;
+    DirX[5] = -1; DirY[5] = 0;
+    DirX[6] = -1; DirY[6] = 1;
+    DirX[7] = 0; DirY[7] = 1;
+    DirX[8] = 1; DirY[8] = 1;    
+    NoP = 0;
+    //
+    dummyCell = ALLOCATE(struct Point2);
+    //
+    dummyCell->next = dummyCell;
+    dummyCell->No = 0;
+    MaxXCell = mxc; MaxYCell = myc;
+    if(MaxXCell>9) MaxXCell = 9;
+    if(MaxYCell>9) MaxYCell = 9;
+    for(i=0;i<=MaxXCell;i++){
+      for(j=0;j<=MaxYCell;j++){
+	//
+	headCell[i][j] = ALLOCATE(struct Point2);
+	//
+	headCell[i][j]->next=dummyCell;
+      }
+    }
+    XCellDim = (Xmax-Xmin)/((double)(MaxXCell+1));
+    YCellDim = (Ymax-Ymin)/((double)(MaxYCell+1));
+  };
+  ~Point2Pattern(){}
+  void Print();
+  void Return(double *X, double *Y, int *num, int maxnum);
+  long int Count();
+  long int UpperCount();  
+  void Empty();
+  void Clean();
+  //  void DumpToFile(char FileName[100]);
+  //  void ReadFromFile(char FileName[100]);
+};
+
+void Point2Pattern::Print(){
+  long int i,j,k;
+  k = 0;
+  struct Point2 *TempCell;
+  for(i=0;i<=MaxXCell;i++){
+    for(j=0;j<=MaxYCell;j++){
+      //Rprintf("%d %d:\n",i,j);
+      TempCell = headCell[i][j]->next;
+      CHECK(TempCell, "internal error: TempCell is null in Print()");
+	while(TempCell->next != TempCell){
+	  k++;
+	  Rprintf("%f %f %ld %ld %ld=%d %ld=%d UL0 %d UL1 %d %f\n",
+		  TempCell->X,TempCell->Y,k,
+		  TempCell->No,
+		  i,int(TempCell->X/XCellDim),
+		  j,int(TempCell->Y/YCellDim),
+		  TempCell->InLower[0],TempCell->InLower[1],
+		  TempCell->Beta);
+	  TempCell = TempCell->next;
+	  CHECK(TempCell, "internal error: TempCell is null in Print() loop");
+      }
+    }
+  }
+  Rprintf("Printed %ld points.\n",k);
+}
+
+void Point2Pattern::Return(double *X, double *Y, int *num, int maxnum){
+  long int i,j,k;
+  k =0; *num = 0;
+#ifdef DBGS
+  Rprintf("executing Return()\n");
+#endif
+  if(UpperLiving[0]<=maxnum){
+    struct Point2 *TempCell;
+    for(i=0;i<=MaxXCell;i++){
+      for(j=0;j<=MaxYCell;j++){
+#ifdef DBGS
+	//	Rprintf("%d %d:\n",i,j);
+#endif
+	TempCell = headCell[i][j]->next;
+	CHECK(TempCell, "internal error: TempCell is null in Return()");
+	while(TempCell->next != TempCell){
+	  X[k] = TempCell->X;
+	  Y[k] = TempCell->Y;	
+	  k++;
+	  TempCell = TempCell->next;
+	  CHECK(TempCell, "internal error: TempCell is null in Return() loop");
+	}
+      }
+    }    
+    *num = k;
+  } else {
+    *num = -1;
+  }
+}
+
+long int Point2Pattern::Count(){
+  long int i,j,k;
+  k = 0;
+  struct Point2 *TempCell;
+  for(i=0;i<=MaxXCell;i++){
+    for(j=0;j<=MaxYCell;j++){
+      // Rprintf("%d %d:\n",i,j);
+      TempCell = headCell[i][j]->next;
+      CHECK(TempCell, "internal error: TempCell is null in Count()");
+      while(TempCell->next != TempCell){
+	k++;
+	TempCell = TempCell->next;
+	CHECK(TempCell, "internal error: TempCell is null in Count() loop");
+      }
+    }
+  }
+  //Rprintf("Printed %d points.\n",k);
+  return(k);
+}
+
+// a quick (over)estimate of the number of points in the pattern, 
+// for storage allocation
+
+long int Point2Pattern::UpperCount(){
+  return(UpperLiving[0]);
+}
+
+void Point2Pattern::Empty(){
+  struct Point2 *TempCell, *TempCell2;
+  long int i,j;
+  
+#ifdef DBGS
+  long int k;
+  k=0;
+  Rprintf("executing Empty()\n");
+#endif
+
+  for(i=0; i<=this->MaxXCell; i++){
+    for(j=0; j<=this->MaxYCell; j++){
+      TempCell = headCell[i][j]->next;
+      CHECK(TempCell, "internal error: TempCell is null in Empty()");
+      while(TempCell!=TempCell->next){	
+#ifdef DBGS
+	//	k++; Rprintf("%d %d %d\n",i,j,k);
+#endif
+	TempCell2 = TempCell->next;
+	FREE(TempCell);
+	TempCell = TempCell2;
+	CHECK(TempCell, "internal error: TempCell is null in Empty() loop");
+      }
+      headCell[i][j]->next = dummyCell;
+    }
+  }
+}
+
+void Point2Pattern::Clean(){
+  struct Point2 *TempCell, *TempCell2;
+  long int i,j;
+  
+#ifdef DBGS
+  Rprintf("executing Clean()\n");
+#endif
+
+  for(i=0; i<=MaxXCell; i++){
+    for(j=0; j<=MaxYCell; j++){
+      TempCell = headCell[i][j];
+      CHECK(TempCell, "internal error: TempCell is null in Clean()");
+      TempCell2 = headCell[i][j]->next;
+      CHECK(TempCell2, "internal error: TempCell2 is null in Clean()");
+      while(TempCell2!=TempCell2->next){
+	TempCell2->No = 0;
+	if(TempCell2->InLower[0]==0){
+	  TempCell->next = TempCell2->next;
+	  FREE(TempCell2);
+	  TempCell2 = TempCell->next;
+	  CHECK(TempCell2, 
+		"internal error: TempCell2 is null in Clean() loop A");
+	}
+	else{
+	  TempCell2 = TempCell2->next;
+	  TempCell = TempCell->next;
+	  CHECK(TempCell, "internal error: TempCell is null in Clean() loop B");
+	  CHECK(TempCell2, 
+		"internal error: TempCell2 is null in Clean() loop B");
+	}
+      }
+    }
+  }
+}
+
+//void Point2Pattern::DumpToFile(char FileName[100]){
+//  FILE *out;
+//  long int i,j;
+//  out = fopen(FileName,"w");
+//  struct Point2 *TempCell;
+//  for(i=0;i<=MaxXCell;i++){
+//    for(j=0;j<=MaxYCell;j++){
+//    //Rprintf("%d %d:\n",i,j);
+//    TempCell = headCell[i][j]->next;
+//    while(TempCell->next != TempCell){
+//	fprintf(out,"%f\t%f\t%ld\n",
+//	       TempCell->X,TempCell->Y,TempCell->No);
+//	TempCell = TempCell->next;
+//    }
+//  }
+//}
+//fclose(out);
+//}
+
+//void Point2Pattern::ReadFromFile(char FileName[100]){
+//  FILE *out;
+//long int k,XCell,YCell;
+//float f1,xs,ys;
+//out = fopen(FileName,"r");
+//struct Point2 *TempCell;
+//k=0;
+//while(feof(out)==0){
+//  k++;
+//  fscanf(out,"%f%f\n",&xs,&ys);
+//  //Rprintf("%f %f\n",xs,ys);
+//  //
+//  TempCell = ALLOCATE(struct Point2);
+//  //
+//  TempCell->No = k;
+//  TempCell->X = xs;
+//  TempCell->Y = ys;
+//  TempCell->InLower[0] = 1;
+//  TempCell->InLower[1] = 1;
+//
+//  f1 = (xs-Xmin)/XCellDim;  XCell = int(f1);
+//  if(XCell>MaxXCell) XCell = MaxXCell;
+//  f1 = (ys-Ymin)/YCellDim;  YCell = int(f1);
+//  if(YCell>MaxYCell) YCell = MaxYCell;
+//
+//  TempCell->next = headCell[XCell][YCell]->next;
+//  headCell[XCell][YCell]->next = TempCell;
+//
+//}
+//fclose(out);
+//Rprintf("%ld points loaded.\n",k);
+//
+//}
+
+
+// ........................... Point processes ..........................
+// ...................... (stationary, pairwise interaction) ............
+
+class PointProcess {
+ public:
+  double Xmin, Xmax, Ymin, Ymax, TotalBirthRate, InteractionRange;
+  PointProcess(double xmin, double xmax, double ymin, double ymax){
+    Xmin = xmin; Xmax = xmax;
+    Ymin = ymin; Ymax = ymax;
+  }
+  ~PointProcess(){}
+  virtual void NewEvent(double *x, double *y, char *InWindow)=0;
+  virtual void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP)=0;
+  virtual double Interaction(double dsquared)=0;
+  //  virtual void CalcBeta(long int xsidepomm, long int ysidepomm, 
+  //		   double *betapomm){ 
+  //  Rprintf("Define CalcBeta...\n");
+  // }
+  //  virtual void CheckBeta(long int xsidepomm, long int ysidepomm, 
+  //		   double *betapomm){ 
+  //Rprintf("Define CheckBeta...\n");
+  //}
+  //  virtual double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p)
+  //{ return(0.0);};
+  //  virtual double lnDens(Point2Pattern *p2p);
+  //  virtual void Beta(struct Point2 *TempCell){
+  //    TempCell->Beta = 0;
+  //    Rprintf("Define Beta...\n");};
+};
+
+//double PointProcess::lnDens(Point2Pattern *p2p){  
+//// double f1;
+//long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx;
+//double dy,dx, lnDens,dst2;
+//struct Point2 *TempCell, *TempCell2;
+//
+//dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1));
+//dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1));
+//rx = int(InteractionRange/dx+1.0);
+//ry = int(InteractionRange/dy+1.0);
+//
+//  //Rprintf("1:%f 2:%f 3:%d 4:%d 5:%f 6:%f\n",dx,dy,rx,ry,
+//  // this->InteractionRange,InteractionRange);
+//  //Rprintf("mx:%d my:%d\n",p2p->MaxXCell,p2p->MaxYCell);
+//
+//  lnDens = 0;
+//
+//  //Rprintf("lnDens: %f (0)\n",lnDens);
+//  
+//  for(xc = 0; xc <= p2p->MaxXCell; xc++){
+//    for(yc = 0; yc <= p2p->MaxYCell; yc++){
+//      //if(xc==1) Rprintf("%d %d\n",xc,yc);
+//      CHECK(p2p->headCell[xc][yc], 
+//	    "internal error: p2p->headCell[xc][yc] is null in lnDens()");
+//      TempCell = p2p->headCell[xc][yc]->next;
+//      CHECK(TempCell, "internal error: TempCell is null in lnDens()");
+//      while(TempCell != TempCell->next){
+//	lnDens += log(TempCell->Beta);
+//	//Rprintf("lnDens: %f (1) %d %d %d %d Beta %f\n",lnDens,xc,yc,
+//	//       p2p->MaxXCell,p2p->MaxYCell,TempCell->Beta);
+//	//if(lnDens<(-100000)){Rprintf("%f",lnDens); scanf("%f",&f1);}
+//	if(InteractionRange>0){
+//	  if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell;
+//	  if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell;
+//	  if((xc-rx)>=0) fx=xc-rx; else fx = 0;
+//	  if((yc-ry)>=0) fy=yc-ry; else fy = 0;
+//	  for(xco = fx; xco <= tx; xco++){
+//	    for(yco = fy; yco <= ty; yco++){
+//	      //if(xc==1) Rprintf("%d %d %d %d %d %d\n",xco,yco,fx,tx,fy,ty);
+//	      CHECK(p2p->headCell[xco][yco], 
+//		    "internal error: p2p->headCell[xco][yco] is null in lnDens() loop");
+//	      TempCell2 = p2p->headCell[xco][yco]->next;
+//	      CHECK(TempCell2, 
+//		    "internal error: TempCell2 is null in lnDens() loop A");
+//	      while(TempCell2!=TempCell2->next){
+//		if(TempCell2 != TempCell){
+//		  dst2 = pow(TempCell->X-TempCell2->X,2)+
+//			     pow(TempCell->Y-TempCell2->Y,2);
+//		  lnDens += log(Interaction(dst2));
+//		}
+//		TempCell2 = TempCell2->next; 
+//		CHECK(TempCell2, 
+//		      "internal error: TempCell2 is null in lnDens() loop B");
+//	      }
+//	    }
+//	  }
+//	  //Rprintf("lnDens: %f\n",lnDens);
+//	}
+//	TempCell = TempCell->next;
+//	CHECK(TempCell, 
+//	      "internal error: TempCell is null in lnDens() at end");
+//      }
+//    }
+//  }
+//  return(lnDens);
+//
+//}
+
+// ........................... Sampler ..........................
+
+class Sampler{
+ public:
+  PointProcess *PP;
+  Point2Pattern *P2P;
+  long int GeneratedPoints, LivingPoints, NoP;
+  //long int UpperLiving[2];
+  Sampler(PointProcess *p){ PP = p;}
+  ~Sampler(){}
+  void Sim(Point2Pattern *p2p, long int *ST, long int *ET);
+  long int BirthDeath(long int TimeStep,
+		      struct Point *headLiving,
+		      struct Point *headDeleted,
+		      struct Point3 *headTransition);
+  // WAS:  Sampler::Forward
+  void Forward(long int TS, long int TT, char TX, char TY,
+		      struct Point *Proposal, long int *DDD);
+};
+
+
+void Sampler::Forward(long int TS, long int TT, char TX, char TY,
+		      struct Point *Proposal, long int *DDD){
+
+  long int XCell, YCell, DirectionN;
+  double dtmp2,dtmpx,dtmpy, tmpR, TempGamma[2], TempI;
+  struct Point2 *TempCell, *TempCell2;
+  float f1;
+
+  /* Birth */
+  if(TT==1){
+    f1 = (Proposal->X-P2P->Xmin)/P2P->XCellDim;  XCell = int(f1);
+    CLAMP(XCell, 0, P2P->MaxXCell, "XCell");
+    f1 = (Proposal->Y-P2P->Ymin)/P2P->YCellDim;  YCell = int(f1);
+    CLAMP(YCell, 0, P2P->MaxYCell, "YCell");
+    //
+    TempCell = ALLOCATE(struct Point2);
+    //
+    TempCell->No = Proposal->No;
+    TempCell->X = Proposal->X;
+    TempCell->Y = Proposal->Y;
+
+    tmpR = Proposal->R;
+    TempCell->next = P2P->headCell[XCell][YCell]->next;
+    P2P->headCell[XCell][YCell]->next = TempCell;
+    TempCell->InLower[0]=0;
+    TempCell->InLower[1]=0;
+
+    TempGamma[0] = 1.0; TempGamma[1] = 1.0;    
+
+    /*same cell*/
+    TempCell2 = TempCell->next; 
+    CHECK(TempCell2, 
+	  "internal error: TempCell2 is null in Forward() birth case");
+    while(TempCell2 != TempCell2->next){
+      dtmpx = TempCell->X - TempCell2->X;
+      dtmpy = TempCell->Y - TempCell2->Y;
+      dtmp2  = dtmpx*dtmpx+dtmpy*dtmpy;      
+      TempI = PP->Interaction(dtmp2);
+      if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI;
+      if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI;
+      TempCell2=TempCell2->next;
+      CHECK(TempCell2, 
+	    "internal error: TempCell2 is null in Forward() birth case loop");
+    }
+    /*eight other cells*/
+    for(DirectionN=1;DirectionN<=8;DirectionN++){
+      if(((XCell+P2P->DirX[DirectionN])>=0) &&
+	 ((XCell+P2P->DirX[DirectionN])<=P2P->MaxXCell) &&
+	 ((YCell+P2P->DirY[DirectionN])>=0) &&
+	 ((YCell+P2P->DirY[DirectionN])<=P2P->MaxYCell)){
+	CHECK(P2P->headCell[XCell+P2P->DirX[DirectionN]][YCell+P2P->DirY[DirectionN]], 
+	      "internal error: HUGE P2P EXPRESSION is null in Forward() birth case loop A");
+	TempCell2 = 
+	  P2P->headCell[XCell+P2P->DirX[DirectionN]]
+	  [YCell+P2P->DirY[DirectionN]]->next;
+	CHECK(TempCell2, 
+	      "internal error: TempCell2 is null in Forward() birth case loop B");
+	while(TempCell2!=TempCell2->next){
+	  dtmpx = TempCell->X - TempCell2->X;
+	  dtmpy = TempCell->Y - TempCell2->Y;
+	  dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy;      
+	  TempI = PP->Interaction(dtmp2);
+	  if(TempCell2->InLower[0]==1) 
+	    TempGamma[0] = TempGamma[0]*TempI;
+	  if(TempCell2->InLower[1]==1) 
+	    TempGamma[1] = TempGamma[1]*TempI;
+	  TempCell2=TempCell2->next;
+	CHECK(TempCell2, 
+	      "internal error: TempCell2 is null in Forward() birth case loop C");
+	}
+      }
+    }
+
+    if(tmpR <= TempGamma[1] ){ 
+      TempCell->InLower[0]=1;
+      P2P->UpperLiving[0] = P2P->UpperLiving[0] +1;
+    }
+    if(tmpR <= TempGamma[0] ){ 
+      TempCell->InLower[1]=1;
+      P2P->UpperLiving[1] = P2P->UpperLiving[1] +1;
+    }
+  }
+  /* Death */
+  if(TT==0){
+    TempCell=P2P->headCell[(int)TX][(int)TY];
+    CHECK(TempCell, "internal error: TempCell is null in Forward() death case");
+    while(TempCell->next->No != *DDD){
+      TempCell = TempCell->next;
+      CHECK(TempCell, 
+	    "internal error: TempCell is null in Forward() death case loop");
+      if(TempCell->next == TempCell) {
+	Rprintf("internal error: unexpected self-reference. Dumping...\n"); 
+	P2P->Print(); 
+        error("internal error: unexpected self-reference");
+	break;
+      }
+    };
+    CHECK(TempCell->next, 
+	  "internal error: TempCell->next is null in Forward() death case");
+    if(*DDD!=TempCell->next->No) 
+      Rprintf("diagnostic message: multi cell:  !!DDD:%ld TempUpper->No:%ld ",
+	     *DDD,TempCell->No);
+    if(TempCell->next->InLower[0]==1)
+      P2P->UpperLiving[0] = P2P->UpperLiving[0] -1;
+    if(TempCell->next->InLower[1]==1) 
+      P2P->UpperLiving[1] = P2P->UpperLiving[1] -1;
+    TempCell2 = TempCell->next;
+    CHECK(TempCell2, 
+	  "internal error: TempCell2 is null in Forward() death case B");
+    TempCell->next = TempCell2->next;
+    FREE(TempCell2);
+    /* Common stuff */
+    //KillCounter ++;
+    *DDD = *DDD - 1;
+  }
+}
+
+
+long int Sampler::BirthDeath(long int TimeStep,
+		      struct Point *headLiving,
+		      struct Point *headDeleted,
+		      struct Point3 *headTransition){
+  long int i,n;
+  float f1,f2,f3,f4;
+  double xtemp,ytemp;
+  char InWindow, Success;
+  struct Point *TempPoint, *TempPoint2;
+  struct Point3 *TempTransition;
+
+  R_CheckUserInterrupt();
+
+  f1 = LivingPoints; f2 = PP->TotalBirthRate; f3 = f2/(f1+f2);
+  f4 = slumptal();
+  n = 0;
+  Success = 0;
+
+  //Rprintf("LivingPoints: %d TotalBirthRate %f GeneratedPoints %d\n",
+  // LivingPoints,PP->TotalBirthRate,GeneratedPoints);
+  
+  /* Birth */
+  while(Success==0){
+  if(f4<f3){
+    //Rprintf("Ping 1 (BD)\n");
+    PP->NewEvent(&xtemp, &ytemp, &InWindow);
+    //Rprintf("Ping 2 (BD)\n");
+    if(InWindow==1){
+      Success = 1;
+      //
+      TempTransition = ALLOCATE(struct Point3);
+      //
+      //Rprintf("Ping 3 (BD)\n");
+      TempTransition->Case = 0;
+      LivingPoints ++;
+      GeneratedPoints ++;
+      //
+      TempPoint = ALLOCATE(struct Point);
+      //
+      TempPoint->X = xtemp;
+      TempPoint->Y = ytemp;
+      TempPoint->No = GeneratedPoints;
+      TempPoint->R = slumptal();
+      TempPoint->next = headLiving->next;
+      headLiving->next = TempPoint;
+      NoP ++;
+      f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim;  
+      TempTransition->XCell = int(f1);
+      f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim;  
+      TempTransition->YCell = int(f1);
+      
+      //Rprintf("X %f XCell %d\n",TempPoint->X,TempTransition->XCell);
+      // 
+      CLAMP(TempTransition->XCell, 0, P2P->MaxXCell, "TempTransition->XCell");
+      CLAMP(TempTransition->YCell, 0, P2P->MaxYCell, "TempTransition->YCell");
+      TempTransition->next = headTransition->next;
+      headTransition->next = TempTransition;
+    }
+  }
+  /* Death */
+  else{
+    Success = 1;
+    //
+    TempTransition = ALLOCATE(struct Point3);
+    //
+    TempTransition->Case = 1;
+    f1 = LivingPoints; f2 = f1*slumptal()+1.0;
+    n = int(f2); if(n < 1) n = 1;
+    if(n>LivingPoints){
+      //      Rprintf("diagnostic message: random integer n=%ld > %ld = number of living points\n", n,LivingPoints);
+      n=LivingPoints;
+    }
+    TempPoint2 = TempPoint = headLiving;
+    for(i=1; i<=n; i++){ 
+      TempPoint2 = TempPoint;
+      TempPoint = TempPoint->next;
+      }
+    TempPoint2->next = TempPoint->next;
+    
+    TempPoint->next = headDeleted->next;  
+    headDeleted->next = TempPoint;
+
+    LivingPoints --;
+    NoP --;
+    TempTransition->next = headTransition->next;
+    headTransition->next = TempTransition;
+  }
+  }
+  return(n);
+}
+
+void Sampler::Sim(Point2Pattern *p2p, long int *ST, long int *ET) {
+
+  P2P = p2p;
+  long int StartTime, EndTime, TimeStep, D0Time, D0Living;
+  long int XCell, YCell, DDD, i;
+  float f1;
+  
+  /* Initialising linked listed for backward simulation */
+  struct Point *headDeleted, *headLiving, *dummyDeleted, *dummyLiving;
+  struct Point *TempPoint;
+  //
+  headLiving = ALLOCATE(struct Point);
+  dummyLiving = ALLOCATE(struct Point);
+  //
+  headLiving->next = dummyLiving; dummyLiving->next = dummyLiving;
+  //
+  headDeleted = ALLOCATE(struct Point);
+  dummyDeleted = ALLOCATE(struct Point);
+  //
+  headDeleted->next = dummyDeleted; dummyDeleted->next = dummyDeleted;
+
+  struct Point2 *TempCell2;
+
+  struct Point3 *headTransition, *dummyTransition;
+  //
+  headTransition = ALLOCATE(struct Point3);
+  dummyTransition = ALLOCATE(struct Point3);
+  //
+  headTransition->next = dummyTransition; 
+  dummyTransition->next = dummyTransition;
+  
+  PP->GeneratePoisson(headLiving, &GeneratedPoints,
+			      &LivingPoints,
+			      &NoP);  
+    
+  StartTime=1;
+  EndTime=1;
+
+  TimeStep = 0; D0Time = 0;
+  D0Living = GeneratedPoints;
+
+  long int tmp, D0;
+  
+  do{
+    tmp=BirthDeath(TimeStep, headLiving,
+		      headDeleted,
+		      headTransition);
+    if(tmp>0){ 
+      if(tmp>(LivingPoints+1-D0Living)){
+	D0Living --;
+      }
+    }
+    D0Time++;
+  }while(D0Living>0);
+  tmp=BirthDeath(TimeStep, headLiving,
+		      headDeleted,
+		      headTransition); 
+  StartTime=1; EndTime=D0Time+1; D0 = 0;
+
+  do{	 
+    if(D0==1){
+      for(TimeStep=StartTime;TimeStep<=EndTime;TimeStep ++){
+	tmp=BirthDeath(TimeStep, headLiving,
+		       headDeleted,
+		       headTransition);      
+      }
+    }
+    D0 = 1;
+    P2P->Empty();
+    
+    /*
+    headUpper->next = dummyUpper; dummyUpper->next = dummyUpper;
+    for(XCell=0;XCell<=P2P->MaxXCell;XCell++){
+      for(YCell=0;YCell<=P2P->MaxYCell;YCell++){
+	headUpperCell[XCell][YCell]->next=dummyUpper;
+      }
+    }
+    */
+    
+    P2P->UpperLiving[0] = LivingPoints;
+    P2P->UpperLiving[1] = 0;
+    
+    P2P->NoP = 0;
+    i=0;
+    TempPoint = headLiving->next;
+    CHECK(TempPoint, "internal error: TempPoint is null in Sim()");
+    while(TempPoint!=TempPoint->next){
+      i++;
+      //
+      TempCell2 = ALLOCATE(struct Point2);
+      //
+      TempCell2->No = TempPoint->No;
+      TempCell2->X = TempPoint->X;
+      TempCell2->Y = TempPoint->Y;
+      TempCell2->InLower[0] = 1;
+      TempCell2->InLower[1] = 0;
+      f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim;  XCell = int(floor(f1));
+      CLAMP(XCell, 0, P2P->MaxXCell, "XCell");
+      f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim;  YCell = int(floor(f1));
+      CLAMP(YCell, 0, P2P->MaxYCell, "YCell");
+      TempCell2->next = P2P->headCell[XCell][YCell]->next;
+      P2P->headCell[XCell][YCell]->next = TempCell2;
+      
+      TempPoint = TempPoint->next;
+      CHECK(TempPoint, "internal error: TempPoint is null in Sim() loop");
+    }
+    
+    //P2P->DumpToFile("temp0.dat");
+    
+    struct Point3 *TempTransition;
+    struct Point *Proposal;
+    
+    TempTransition = headTransition->next;
+    CHECK(TempTransition, "internal error: TempTransition is null in Sim()");
+    Proposal = headDeleted->next;
+    DDD = GeneratedPoints;
+    
+    for(TimeStep=EndTime;TimeStep>=1;TimeStep--){
+      R_CheckUserInterrupt();
+      Forward(TimeStep,TempTransition->Case,
+	      TempTransition->XCell,TempTransition->YCell,
+	      Proposal,&DDD);
+      if(TempTransition->Case == 1) Proposal = Proposal ->next;
+      TempTransition = TempTransition->next;
+      CHECK(TempTransition, 
+	    "internal error: TempTransition is null in Sim() loop");
+    }
+    
+    /* Doubling strategy used!*/
+    StartTime = EndTime+1;
+    EndTime=EndTime*2;
+    
+    //P2P->DumpToFile("temp.dat");
+    
+  }while(P2P->UpperLiving[0]!=P2P->UpperLiving[1]);
+  P2P->Clean();
+  i=0;
+  struct Point *TempPoint2;
+  TempPoint = headLiving;
+  TempPoint2 = headLiving->next;
+  CHECK(TempPoint2, 
+	"internal error: TempPoint2 is null in Sim() position B");
+  while(TempPoint!=TempPoint->next){
+    i++;
+    FREE(TempPoint);
+    TempPoint = TempPoint2;
+    TempPoint2 = TempPoint2->next;
+    CHECK(TempPoint2, 
+	  "internal error: TempPoint2 is null in Sim() loop C");
+  }
+  FREE(TempPoint);
+  
+  i = 0;
+  TempPoint = headDeleted;
+  TempPoint2 = headDeleted->next;
+  CHECK(TempPoint2, 
+	"internal error: TempPoint2 is null in Sim() position D");
+  while(TempPoint!=TempPoint->next){
+    i++;
+    FREE(TempPoint);
+    TempPoint = TempPoint2;
+    TempPoint2 = TempPoint2->next;
+    CHECK(TempPoint2, 
+	  "internal error: TempPoint2 is null in Sim() loop D");
+  }
+  FREE(TempPoint);
+  //Rprintf("%d ",i);
+
+  struct Point3 *TempTransition,*TempTransition2;
+
+  i = 0;
+  TempTransition = headTransition;
+  TempTransition2 = headTransition->next;
+  CHECK(TempTransition2, 
+	"internal error: TempTransition2 is null in Sim() position E");
+  while(TempTransition!=TempTransition->next){
+    i++;
+    FREE(TempTransition);
+    TempTransition = TempTransition2;
+    TempTransition2 = TempTransition2->next;
+    CHECK(TempTransition2, 
+	  "internal error: TempTransition2 is null in Sim() loop F");
+  }
+  FREE(TempTransition);
+  //Rprintf("%d ST: %d ET: %d\n",i,StartTime,EndTime);
+  //scanf("%f",&f1);
+  *ST = StartTime;
+  *ET = EndTime;
+}
+
+#include "PerfectStrauss.h"
+#include "PerfectStraussHard.h"
+#include "PerfectHardcore.h"
+#include "PerfectDiggleGratton.h"
+#include "PerfectDGS.h"
+#include "PerfectPenttinen.h"
diff --git a/src/PerfectDGS.h b/src/PerfectDGS.h
new file mode 100644
index 0000000..438492c
--- /dev/null
+++ b/src/PerfectDGS.h
@@ -0,0 +1,195 @@
+
+// ........................... Diggle-Gates-Stibbard process ................
+// $Revision: 1.3 $  $Date: 2012/03/10 11:22:50 $
+
+#ifndef PI
+#define PI 3.14159265358979
+#endif
+
+class DgsProcess : public PointProcess {
+ public:
+  double beta, rho, rhosquared;
+  DgsProcess(double xmin, double xmax, double ymin, double ymax, 
+		       double b, double r);
+  ~DgsProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+};
+
+DgsProcess::DgsProcess(double xmin, double xmax, 
+		       double ymin, double ymax, 
+   		       double b, double r) :
+  PointProcess(xmin, xmax, ymin, ymax){
+    beta = b; rho = r; 
+    rhosquared = rho * rho;
+    InteractionRange = rho;
+    TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double DgsProcess::Interaction(double dsquared)
+{
+  double rtn, dist, t;
+  rtn = 1;
+  if(dsquared < rhosquared) {
+    dist = sqrt(dsquared);
+    t = sin((PI/2) * dist/rho);
+    rtn = t * t;
+  }
+  return(rtn);
+}
+
+void DgsProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void DgsProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating DgsProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating DgsProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating DgsProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectDGS(SEXP beta,
+		  SEXP rho,
+		  SEXP xrange,
+		  SEXP yrange) {
+
+    // input parameters
+    double Beta, Rho, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int StartTime, EndTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(rho    = AS_NUMERIC(rho));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 4 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    Rho    = *(NUMERIC_POINTER(rho));
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ Rho);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ Rho);
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ Rho);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ Rho);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise Diggle-Gates-Stibbard point process
+    DgsProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Rho);  
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(3));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    
+    // return 
+    UNPROTECT(8);  // 4 arguments plus xout, yout, nout, out
+    return(out);
+  }
+}
diff --git a/src/PerfectDiggleGratton.h b/src/PerfectDiggleGratton.h
new file mode 100644
index 0000000..02f95c6
--- /dev/null
+++ b/src/PerfectDiggleGratton.h
@@ -0,0 +1,203 @@
+
+// ........................... Diggle-Gratton process ..........................
+//  $Revision: 1.5 $   $Date: 2012/03/10 11:22:56 $
+
+class DiggleGrattonProcess : public PointProcess {
+ public:
+  double beta, delta, rho, kappa, rhominusdelta, deltasquared, rhosquared;
+  DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, 
+		       double b, double d, double r, double k);
+  ~DiggleGrattonProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+};
+
+DiggleGrattonProcess::DiggleGrattonProcess(double xmin, double xmax, 
+			      double ymin, double ymax, 
+			      double b, double d, double r, double k) :
+  PointProcess(xmin, xmax, ymin, ymax){
+    beta = b; delta = d; rho = r; kappa = k;
+    deltasquared = delta * delta;
+    rhosquared = rho * rho;
+    rhominusdelta = rho - delta;
+    InteractionRange = rho;
+    TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double DiggleGrattonProcess::Interaction(double dsquared)
+{
+  double rtn, dist, t;
+  rtn = 1;
+  if(dsquared < rhosquared) {
+    if(dsquared < deltasquared) { 
+      rtn = 0; 
+    } else {
+      dist = sqrt(dsquared);
+      t = (dist - delta)/rhominusdelta;
+      rtn = pow(t, kappa);
+    }
+  }
+   return(rtn);
+}
+
+void DiggleGrattonProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void DiggleGrattonProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating DiggleGrattonProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating DiggleGrattonProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating DiggleGrattonProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectDiggleGratton(SEXP beta,
+		      SEXP delta,
+		      SEXP rho,
+		      SEXP kappa,
+		      SEXP xrange,
+		      SEXP yrange) {
+
+    // input parameters
+    double Beta, Delta, Rho, Kappa, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int StartTime, EndTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(delta  = AS_NUMERIC(delta));
+    PROTECT(rho    = AS_NUMERIC(rho));
+    PROTECT(kappa  = AS_NUMERIC(kappa));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 6 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    Delta  = *(NUMERIC_POINTER(delta));
+    Rho    = *(NUMERIC_POINTER(rho));
+    Kappa  = *(NUMERIC_POINTER(kappa));
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ Rho);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ Rho);
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ Rho);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ Rho);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise DiggleGratton point process
+    DiggleGrattonProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Delta,Rho,Kappa);  
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(3));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    
+    // return 
+    UNPROTECT(10);  // 6 arguments plus xout, yout, nout, out
+    return(out);
+  }
+}
diff --git a/src/PerfectHardcore.h b/src/PerfectHardcore.h
new file mode 100644
index 0000000..774a8c1
--- /dev/null
+++ b/src/PerfectHardcore.h
@@ -0,0 +1,174 @@
+
+// ........................... Hardcore process ..........................
+// $Revision: 1.4 $  $Date: 2012/03/10 11:23:09 $
+
+class HardcoreProcess : public PointProcess {
+ public:
+  double beta, R, Rsquared;
+  HardcoreProcess(double xmin, double xmax, double ymin, double ymax, 
+		double b, double Ri);
+  ~HardcoreProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+};
+
+HardcoreProcess::HardcoreProcess(double xmin, double xmax, 
+			      double ymin, double ymax, 
+			      double b, double Ri) :
+  PointProcess(xmin, xmax, ymin, ymax){
+  beta = b; R = Ri; 
+  Rsquared = R * R; 
+  InteractionRange = R;
+  TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double HardcoreProcess::Interaction(double dsquared)
+{
+  double rtn;
+  rtn = 1;
+  if(dsquared < Rsquared) rtn = 0;
+  return(rtn);
+}
+
+void HardcoreProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void HardcoreProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating HardcoreProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating HardcoreProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating HardcoreProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectHardcore(SEXP beta,
+		      SEXP r,
+		      SEXP xrange,
+		      SEXP yrange) {
+
+    // input parameters
+    double Beta, R, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int StartTime, EndTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(r      = AS_NUMERIC(r));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 4 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    R      = *(NUMERIC_POINTER(r));
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ R);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ R);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise Hardcore point process
+    HardcoreProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, R);  
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(3));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    
+    // return 
+    UNPROTECT(8);  // 4 arguments plus xout, yout, nout, out
+    return(out);
+  }
+}
diff --git a/src/PerfectPenttinen.h b/src/PerfectPenttinen.h
new file mode 100644
index 0000000..a73abd5
--- /dev/null
+++ b/src/PerfectPenttinen.h
@@ -0,0 +1,200 @@
+
+// ........................... Penttinen process ................
+// $Revision: 1.2 $  $Date: 2016/02/02 01:30:01 $
+
+class PenttProcess : public PointProcess {
+ public:
+  double beta, gamma, radius, reachsquared, loggamma2pi;
+  int ishard;
+  PenttProcess(double xmin, double xmax, double ymin, double ymax, 
+	       double b, double g, double r);
+  ~PenttProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+};
+
+PenttProcess::PenttProcess(double xmin, double xmax, 
+			   double ymin, double ymax, 
+			   double b, double g, double r) :
+  PointProcess(xmin, xmax, ymin, ymax){
+    beta = b; gamma = g; radius = r; 
+    ishard = (gamma <= DOUBLE_EPS);
+    loggamma2pi = M_2PI * (ishard? 0.0 : log(gamma));
+    reachsquared = 4.0 * radius * radius;
+    InteractionRange = 2.0 * radius;
+    TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double PenttProcess::Interaction(double dsquared)
+{
+  double rtn, z, z2;
+  rtn = 1.0;
+  if(dsquared < reachsquared) {
+    if(ishard) return(0.0);
+    z2 = dsquared/reachsquared;
+    z = sqrt(z2);
+    if(z < 1.0) {
+      rtn = exp(loggamma2pi * (acos(z) - z * sqrt(1.0 - z2)));
+    }
+  }
+  return(rtn);
+}
+
+void PenttProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void PenttProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating PenttProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating PenttProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating PenttProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectPenttinen(SEXP beta,
+			SEXP gamma,
+			SEXP r,
+			SEXP xrange,
+			SEXP yrange) {
+
+    // input parameters
+    double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int StartTime, EndTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(gamma   = AS_NUMERIC(gamma));
+    PROTECT(r      = AS_NUMERIC(r));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 5 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    Gamma  = *(NUMERIC_POINTER(gamma));
+    R      = *(NUMERIC_POINTER(r));
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ R);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ R);
+
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ R);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ R);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise Penttinen point process
+    PenttProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Gamma,R);  
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(3));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    
+    // return 
+    UNPROTECT(9);  // 5 arguments plus xout, yout, nout, out
+    return(out);
+  }
+}
diff --git a/src/PerfectStrauss.h b/src/PerfectStrauss.h
new file mode 100644
index 0000000..8e6e606
--- /dev/null
+++ b/src/PerfectStrauss.h
@@ -0,0 +1,302 @@
+
+// ........................... Strauss process ..........................
+//  $Revision: 1.4 $ $Date: 2014/02/18 10:43:00 $
+
+class StraussProcess : public PointProcess {
+ public:
+  double beta, gamma, R, Rsquared;
+  StraussProcess(double xmin, double xmax, double ymin, double ymax, 
+		double b, double g, double Ri);
+  ~StraussProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+  //  void CalcBeta(long int xsidepomm, long int ysidepomm, 
+  //	   double *betapomm);
+  //  void CheckBeta(long int xsidepomm, long int ysidepomm, 
+  //		 double *betapomm);
+  //  double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p);
+  //  void Beta(struct Point2 *TempCell);
+  //  void CalcBeta(Point2Pattern *p2p);
+};
+
+StraussProcess::StraussProcess(double xmin, double xmax, 
+			      double ymin, double ymax, 
+			      double b, double g, double Ri) :
+  PointProcess(xmin, xmax, ymin, ymax){
+  beta = b; gamma = g; R = Ri; 
+  Rsquared = R * R; 
+  InteractionRange = R;
+  TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double StraussProcess::Interaction(double dsquared)
+{
+  double rtn;
+  rtn = 1;
+  if(dsquared < Rsquared) rtn = gamma;
+  return(rtn);
+}
+
+void StraussProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void StraussProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating StraussProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating StraussProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating StraussProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+//void StraussProcess::CalcBeta(long int xsidepomm, long int ysidepomm, 
+//		   double *betapomm){ 
+//  long int i,j,k;
+//  k=0;
+//  //  Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm);
+//  for(i=0; i<xsidepomm; i++){
+//    for(j=0; j<ysidepomm; j++){
+//      *(betapomm + i*ysidepomm + j) = this->beta;
+//      k++;
+//    }
+//  } 
+//}
+
+//void StraussProcess::CheckBeta(long int xsidepomm, long int ysidepomm, 
+//		   double *betapomm){ 
+//  long int i,j,k;
+//  //  double d1;
+//  k=0;
+//  //  Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm);
+//  for(i=0; i<xsidepomm; i++){
+//    for(j=0; j<ysidepomm; j++){
+//      if((fabs(*(betapomm + i*ysidepomm + j)- beta)>0.001) && (k==0)){
+//	Rprintf("%f %f %f %ld %ld\n",fabs(*(betapomm + i*ysidepomm + j)- beta),
+//	       *(betapomm + i*ysidepomm + j),beta,i,j);
+//	k++;
+//	//	scanf("%lf",&d1);
+//      }
+//    }
+//  } 
+//}
+
+//double StraussProcess::lnCondInt(struct Point2 *TempCell, 
+//				 Point2Pattern *p2p){
+//  double f1;
+//  long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx,k;
+//  double dy,dx, lnCI,dst2;
+//  struct Point2 *TempCell2;
+//  
+//  f1 = (TempCell->X-p2p->Xmin)/p2p->XCellDim;  xc = int(f1);
+//  CLAMP(xc, 0, p2p->MaxXCell, "xc");
+//  f1 = (TempCell->Y-p2p->Ymin)/p2p->YCellDim;  yc = int(f1);
+//  CLAMP(yc, 0, p2p->MaxYCell, "yc");
+//  
+//  dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1));
+//  dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1));
+//  rx = int(this->InteractionRange/dx+1.0);
+//  ry = int(this->InteractionRange/dy+1.0);
+//  
+//  lnCI = log(TempCell->Beta);
+//
+//  k = 0;
+//  
+//  if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell;
+//  if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell;
+//  if((xc-rx)>=0) fx=xc-rx; else fx = 0;
+//  if((yc-ry)>=0) fy=yc-ry; else fy = 0;
+//
+//  //Rprintf("MCI! %d %d %d %d\n",fx,tx,fy,ty);
+//
+//  for(xco = fx; xco <= tx; xco++){
+//    for(yco = fy; yco <= ty; yco++){
+//      CHECK(p2p->headCell[xco][yco], 
+//	    "internal error: p2p->headCell[xco][yco] is null in lnCondInt()");
+//      TempCell2 = p2p->headCell[xco][yco]->next;
+//      CHECK(TempCell2, "internal error: TempCell2 is null in lnCondInt()");
+//      while(TempCell2!=TempCell2->next){
+//	if(TempCell2 != TempCell){
+//	  k++;
+//	  dst2 = pow(TempCell->X-TempCell2->X,2)+
+//	        pow(TempCell->Y-TempCell2->Y,2);
+//	  lnCI += log(Interaction(dst2));
+//	}
+//	TempCell2 = TempCell2->next; 
+//	CHECK(TempCell2, 
+//	      "internal error: TempCell2 is null in lnCondInt() loop");
+//      }
+//    }
+//  }
+//  return(lnCI);
+//}
+
+//void StraussProcess::Beta(struct Point2 *TempCell){
+//  TempCell->Beta = beta;
+//}
+
+//void StraussProcess::CalcBeta(Point2Pattern *p2p){
+//  long int xco,yco;
+//  //  double dy,dx;
+//  struct Point2 *TempMother;
+//
+//  for(xco = 0; xco <= p2p->MaxXCell; xco++){
+//    for(yco = 0; yco <= p2p->MaxYCell; yco++){
+//      CHECK(p2p->headCell[xco][yco], 
+//	    "internal error: p2p->headCell[xco][yco] is null in CalcBeta()");
+//      TempMother = p2p->headCell[xco][yco]->next;
+//      CHECK(TempMother, "internal error: TempMother is null in CalcBeta()");
+//      while(TempMother!=TempMother->next){
+//	TempMother->Beta = this->beta;
+//	TempMother = TempMother->next;
+//	CHECK(TempMother, 
+//	      "internal error: TempMother is null in CalcBeta() loop");
+//      }
+//    }
+//  }
+//}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectStrauss(SEXP beta,
+		      SEXP gamma,
+		      SEXP r,
+		      SEXP xrange,
+		      SEXP yrange) {
+
+    // input parameters
+    double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int EndTime, StartTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    SEXP stout, etout;
+    int *ss, *ee;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(gamma  = AS_NUMERIC(gamma));
+    PROTECT(r      = AS_NUMERIC(r));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 5 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    Gamma  = *(NUMERIC_POINTER(gamma));
+    R      = *(NUMERIC_POINTER(r));
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ R);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ R);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise Strauss point process
+    StraussProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R);  
+
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    PROTECT(stout = NEW_INTEGER(1));
+    PROTECT(etout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+    ss = INTEGER_POINTER(stout);
+    ee = INTEGER_POINTER(etout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+    *ss = StartTime;
+    *ee = EndTime;
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(5));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    SET_VECTOR_ELT(out, 3, stout);
+    SET_VECTOR_ELT(out, 4, etout);
+    
+    // return 
+    UNPROTECT(11);  // 5 arguments plus xout, yout, nout, stout, etout, out
+    return(out);
+  }
+}
diff --git a/src/PerfectStraussHard.h b/src/PerfectStraussHard.h
new file mode 100644
index 0000000..646c601
--- /dev/null
+++ b/src/PerfectStraussHard.h
@@ -0,0 +1,188 @@
+
+// ..................... Strauss-Hardcore process ..........................
+//  $Revision: 1.3 $ $Date: 2014/02/18 10:42:53 $
+
+class StraussHardProcess : public PointProcess {
+ public:
+  double beta, gamma, H, R, Hsquared, Rsquared;
+  StraussHardProcess(double xmin, double xmax, double ymin, double ymax, 
+		     double b, double g, double Ri, double Hc);
+  ~StraussHardProcess(){}
+  void NewEvent(double *x, double *y, char *InWindow);
+  void GeneratePoisson(Point *headPoint, 
+			       long int *GeneratedPoints,
+			       long int *LivingPoints,
+			       long int *NoP);
+  double Interaction(double dsquared);
+  //  void CalcBeta(long int xsidepomm, long int ysidepomm, 
+  //	   double *betapomm);
+  //  void CheckBeta(long int xsidepomm, long int ysidepomm, 
+  //		 double *betapomm);
+  //  double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p);
+  //  void Beta(struct Point2 *TempCell);
+  //  void CalcBeta(Point2Pattern *p2p);
+};
+
+StraussHardProcess::StraussHardProcess(double xmin, double xmax, 
+			      double ymin, double ymax, 
+			      double b, double g, double Ri, double Hc) :
+  PointProcess(xmin, xmax, ymin, ymax){
+    beta = b; gamma = g; R = Ri;  H = Hc; 
+    Rsquared = R * R; 
+    Hsquared = H * H; 
+    InteractionRange = R;
+    TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin);
+  }  
+
+double StraussHardProcess::Interaction(double dsquared)
+{
+  if(dsquared >= Rsquared) return(1.0);
+  if(dsquared >= Hsquared) return(gamma);
+  return(0.0);
+}
+
+void StraussHardProcess::NewEvent(double *x, double *y, char *InWindow)
+{
+  double Xdim, Ydim;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  *x = slumptal()*Xdim+Xmin;
+  *y = slumptal()*Ydim+Ymin;
+  *InWindow = 1;
+}
+
+void StraussHardProcess::GeneratePoisson(Point *headPoint, 
+			      long int *GeneratedPoints,
+			      long int *LivingPoints,
+			      long int *NoP)
+{
+  int i;
+  double xtemp, ytemp, L, Xdim, Ydim;
+  struct Point *TempPoint;
+  Xdim = Xmax-Xmin;
+  Ydim = Ymax-Ymin;
+  L = beta*Xdim*Ydim;
+  *GeneratedPoints = poisson(L);
+  *LivingPoints = *GeneratedPoints;
+  for (i=1; i<=*GeneratedPoints ; i++){
+    //Rprintf("Generating StraussHardProcess Poisson 3\n");
+    //scanf("%f",&f1);
+    xtemp = slumptal()*Xdim+Xmin;
+    ytemp = slumptal()*Ydim+Ymin;
+    //
+    //Rprintf("Generating StraussHardProcess Poisson 3.2\n");
+    TempPoint = ALLOCATE(struct Point);
+    //
+    TempPoint->X = xtemp;
+    TempPoint->Y = ytemp;
+    TempPoint->No = i;
+    TempPoint->R = slumptal();
+    //Rprintf("Generating StraussHardProcess Poisson 3.6\n");
+    TempPoint->next = headPoint->next;
+    headPoint->next = TempPoint;
+    *NoP = *NoP + 1;
+  }
+}
+
+// ........................... Interface to R ..........................
+
+extern "C" {
+  SEXP PerfectStraussHard(SEXP beta,
+		      SEXP gamma,
+		      SEXP r,
+		      SEXP hc,
+		      SEXP xrange,
+		      SEXP yrange) {
+
+    // input parameters
+    double Beta, Gamma, R, H, Xmin, Xmax, Ymin, Ymax;
+    double *Xrange, *Yrange;
+    // internal
+    int xcells, ycells;
+    long int StartTime, EndTime;
+    // output 
+    int noutmax;
+    SEXP xout, yout, nout, out;
+    double *xx, *yy;
+    int *nn;
+
+    // protect arguments from garbage collector    
+    PROTECT(beta   = AS_NUMERIC(beta));
+    PROTECT(gamma  = AS_NUMERIC(gamma));
+    PROTECT(r      = AS_NUMERIC(r));
+    PROTECT(hc     = AS_NUMERIC(hc));
+    PROTECT(xrange = AS_NUMERIC(xrange));
+    PROTECT(yrange = AS_NUMERIC(yrange));
+    // that's 6 protected objects
+
+    // extract arguments
+    Beta   = *(NUMERIC_POINTER(beta));
+    Gamma  = *(NUMERIC_POINTER(gamma));
+    R      = *(NUMERIC_POINTER(r));
+    H      = *(NUMERIC_POINTER(hc));
+    Xrange = NUMERIC_POINTER(xrange);
+    Xmin   = Xrange[0];
+    Xmax   = Xrange[1];
+    Yrange = NUMERIC_POINTER(yrange);
+    Ymin   = Yrange[0];
+    Ymax   = Yrange[1];
+
+    // compute cell array size
+    xcells = (int) floor((Xmax-Xmin)/ R);
+    if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1;
+    ycells = (int) floor((Ymax-Ymin)/ R);
+    if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1;
+#ifdef DBGS
+    Rprintf("xcells %d   ycells %d\n",xcells,ycells);
+    Rprintf("Initialising\n");
+#endif
+
+    // Initialise StraussHard point process
+    StraussHardProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R, H);  
+
+    // Initialise point pattern
+    Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells);
+    // parameters: min x, max x, min y, max y, "cells" in x and y direction
+    // used for speeding up neighbour counting, 9 is max here
+    
+#ifdef DBGS
+    Rprintf("Initialisation complete\n");
+#endif
+
+    // Synchronise random number generator 
+    GetRNGstate();
+
+    // Initialise perfect sampler
+    Sampler PerfectSampler(&ExampleProcess);
+    
+    // Perform perfect sampling
+    PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime);
+    
+    // Synchronise random number generator 
+    PutRNGstate();
+
+    // Get upper estimate of number of points
+    noutmax = ExamplePattern.UpperCount() + 1;
+    
+    // Allocate space for output
+    PROTECT(xout = NEW_NUMERIC(noutmax));
+    PROTECT(yout = NEW_NUMERIC(noutmax));
+    PROTECT(nout = NEW_INTEGER(1));
+    xx = NUMERIC_POINTER(xout);
+    yy = NUMERIC_POINTER(yout);
+    nn = INTEGER_POINTER(nout);
+
+    // copy data into output storage
+    ExamplePattern.Return(xx, yy, nn, noutmax);
+
+    // pack up into output list
+    PROTECT(out  = NEW_LIST(3));
+    SET_VECTOR_ELT(out, 0, xout);
+    SET_VECTOR_ELT(out, 1, yout);
+    SET_VECTOR_ELT(out, 2, nout);
+    
+    // return 
+    UNPROTECT(10);  // 6 arguments plus xout, yout, nout, out
+    return(out);
+  }
+}
diff --git a/src/areadiff.c b/src/areadiff.c
new file mode 100755
index 0000000..5c3bfa7
--- /dev/null
+++ b/src/areadiff.c
@@ -0,0 +1,303 @@
+/*
+
+  areadiff.c
+
+  Area difference function
+
+  $Revision: 1.14 $ $Date: 2013/09/18 04:09:24 $
+
+  A(x,r) = area of disc b(0,r) not covered by discs b(x_i,r) for x_i in x
+  
+  Area estimated by point-counting on a fine grid
+
+  For use in area-interaction model and related calculations
+
+*/
+
+#undef DEBUG
+
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+#include "constants.h"
+
+/* 
+   Original version areadiff()
+
+   1 point u
+
+   No trimming of discs
+
+*/
+
+void
+areadiff(rad,x,y,nn,ngrid,answer) 
+     /* inputs */
+     double *rad;      /* radius */
+     double *x, *y;    /* coordinate vectors for point pattern */
+     int    *nn;       /* length of vectors x and y */
+     int    *ngrid;    /* dimensions of point-counting grid */
+     /* output */
+     double *answer;   /* computed area */
+{
+  double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif;
+  int i, j, k, m, n, count, covered;
+  r  = *rad;
+  r2 = r * r;
+  n  = *nn;
+  m  = *ngrid;
+  dx = dy = 2 * r / (m-1);
+
+  count = 0;
+
+  /* run through grid points */
+  for(i = 0, xg = -r; i < m; i++, xg += dx) {
+    a2 = r2 - xg *xg;
+    for(j = 0, yg = -r; j < m; j++, yg += dy) {
+      /* test for inside disc */
+      if(yg * yg < a2) {
+#ifdef DEBUG
+	Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg);
+#endif
+	/* run through data points seeking one close to (xy, yg) */
+	covered = 0; 
+	if(n > 0) {
+	  for(k = 0; k < n; k++) {
+#ifdef DEBUG
+	    Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]);
+#endif
+	    xdif = x[k] - xg;
+	    b2 = r2 - xdif * xdif;
+	    if(b2 > 0) {
+	      ydif = y[k] - yg;
+	      if(b2 - ydif * ydif > 0) {
+#ifdef DEBUG
+		Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", 
+			k, k, x[k], y[k]);
+#endif
+		covered = 1;
+		break;
+	      }
+	    }
+	  }
+	}
+	if(covered == 0) {
+	  ++count;
+#ifdef DEBUG
+	  Rprintf("Not covered; incrementing count\n");
+#endif
+	}
+      }
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("Count = %d\n", count);
+#endif
+  
+  /* calculate area */
+  *answer = ((double) count) * dx * dy;
+}
+
+/* similar function, handles multiple values of 'r' */
+
+void
+areadifs(rad,nrads,x,y,nxy,ngrid,answer) 
+     /* inputs */
+     double *rad;      /* vector of radii */
+     int    *nrads;     /* length of 'rads' */
+     double *x, *y;    /* coordinate vectors for point pattern */
+     int    *nxy;       /* length of vectors x and y */
+     int    *ngrid;    /* dimensions of point-counting grid */
+     /* output */
+     double *answer;   /* computed areas (vector of length 'nrads') */
+{
+  double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif;
+  int i, j, k, l, m, n, nr, m0, count, covered, maxchunk;
+
+  n  = *nxy;
+  nr = *nrads;
+  m  = *ngrid;
+
+  /* run through radii in chunks of 2^14 */
+  OUTERCHUNKLOOP(l, nr, maxchunk, 16384) {
+
+    R_CheckUserInterrupt();
+
+    INNERCHUNKLOOP(l, nr, maxchunk, 16384) {
+      r  = rad[l];
+      if(r == 0.0) {
+	answer[l] = 0.0;
+      } else if(n == 0) {
+	answer[l] = M_PI * r * r;
+      } else {
+	r2 = r * r;
+	dx = dy = 2 * r / (m-1);
+	count = 0;
+
+	/* run through grid points in disc of radius r */
+	for(i = 0, xg = -r; i < m; i++, xg += dx) {
+	  a2 = r2 - xg * xg;
+	  m0 = (a2 > 0.0) ? floor(sqrt(a2)/dy) : 0;
+	  for(j = -m0, yg = -m0 * dy; j <= m0; j++, yg += dy) {
+#ifdef DEBUG
+	    Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg);
+#endif
+	    /* run through data points seeking one close to (xy, yg) */
+	    covered = 0;
+	    for(k = 0; k < n; k++) {
+#ifdef DEBUG
+	      Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]);
+#endif
+	      xdif = x[k] - xg;
+	      b2 = r2 - xdif * xdif;
+	      if(b2 > 0) {
+		ydif = y[k] - yg;
+		if(b2 - ydif * ydif > 0) {
+#ifdef DEBUG
+		  Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", 
+			  k, k, x[k], y[k]);
+#endif
+		  covered = 1;
+		  break;
+		}
+	      }
+	    } /* end of loop through data points */
+	    if(covered == 0) {
+	      ++count;
+#ifdef DEBUG
+	      Rprintf("Not covered; incrementing count\n");
+#endif
+	    }
+	  }
+	} /* end of loop over grid points */
+
+#ifdef DEBUG
+	Rprintf("Count = %d\n", count);
+#endif
+  
+	/* calculate area for this value of r*/
+	answer[l] = ((double) count) * dx * dy;
+      }
+      /* end of if(r==0).. else {...} */
+    }
+  }
+}
+
+/*
+    Modified version
+
+    multiple test points u
+    
+    discs constrained inside a rectangle
+
+*/
+
+void
+areaBdif(rad,nrads,x,y,nxy,ngrid,x0,y0,x1,y1,answer) 
+     /* inputs */
+     double *rad;      /* vector of radii */
+     int    *nrads;     /* length of 'rads' */
+     double *x, *y;    /* coordinate vectors for point pattern */
+     int    *nxy;       /* length of vectors x and y */
+     int    *ngrid;    /* dimensions of point-counting grid */
+     double *x0,*y0,*x1,*y1;  /* constraint rectangle */
+     /* output */
+     double *answer;   /* computed areas (vector of length 'nrads') */
+{
+  double dx, dy, xg, yg, r, r2, a, a2, b2, xdif, ydif;
+  double xleft, xright, ylow, yhigh;
+  double xmin, ymin, xmax, ymax;
+  int i, j, k, l, m, n, nr, ileft, iright, mlow, mhigh, count, covered;
+
+  n  = *nxy;
+  nr = *nrads;
+  m  = *ngrid;
+
+  xmin = *x0;
+  ymin = *y0;
+  xmax = *x1;
+  ymax = *y1;
+
+  /* run through radii */
+  for(l = 0; l < nr; l++) {
+    r  = rad[l];
+    if(r == 0.0) {
+      answer[l] = 0.0;
+    } else if (n == 0) {
+      answer[l]= M_PI * r * r;
+    } else {
+      r2 = r * r;
+      dx = dy = 2 * r / (m-1);
+      count = 0;
+
+      /* run through grid points in disc intersected with box */
+      xleft = (xmin > -r) ? xmin : -r;
+      xright = (xmax < r) ? xmax : r;
+      ileft = ceil(xleft/dx);
+      iright = floor(xright/dx);
+
+      if(ileft <= iright) {
+	for(i = ileft, xg = ileft * dx; i <= iright; i++, xg += dx) {
+	  a2 = r2 - xg * xg;
+	  a = (a2 > 0) ? sqrt(a2): 0.0;
+	  yhigh = (ymax < a) ? ymax: a;
+	  ylow  = (ymin > -a) ? ymin: -a;
+	  mhigh = floor(yhigh/dy);
+	  mlow  = ceil(ylow/dy);
+	  if(mlow <= mhigh) {
+	    for(j = mlow, yg = mlow * dy; j <= mhigh; j++, yg += dy) {
+#ifdef DEBUG
+	      Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg);
+#endif
+	      /* run through data points seeking one close to (xy, yg) */
+	      covered = 0;
+	      for(k = 0; k < n; k++) {
+#ifdef DEBUG
+		Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", 
+			k, k, x[k], y[k]);
+#endif
+		xdif = x[k] - xg;
+		b2 = r2 - xdif * xdif;
+		if(b2 > 0) {
+		  ydif = y[k] - yg;
+		  if(b2 - ydif * ydif > 0) {
+#ifdef DEBUG
+		    Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", 
+			    k, k, x[k], y[k]);
+#endif
+		    covered = 1;
+		    break;
+		  }
+		}
+	      }
+	      /* end of loop over data points */
+	      if(covered == 0) {
+		++count;
+#ifdef DEBUG
+		Rprintf("Not covered; incrementing count\n");
+#endif
+	      }
+	    }
+	  }
+	}
+      }
+      /* end of loop over grid points */
+
+#ifdef DEBUG
+      Rprintf("Count = %d\n", count);
+#endif
+  
+      /* calculate area for this value of r*/
+      answer[l] = ((double) count) * dx * dy;
+    }
+    /* end of if(r==0).. else {...} */
+  }
+  /* end of loop over r values */
+}
+
+
+
+
diff --git a/src/areaint.c b/src/areaint.c
new file mode 100755
index 0000000..701f4ca
--- /dev/null
+++ b/src/areaint.c
@@ -0,0 +1,308 @@
+#include <R.h>
+#include <math.h>
+#include <stdlib.h>
+#include "methas.h"
+#include "dist2.h"
+
+/*
+  Conditional intensity function for an area-interaction process:
+
+  cif = eta^(1-B) where B = (uncovered area)/(pi r^2)
+
+*/
+
+#define NGRID 16
+
+/* To explore serious bug, #define BADBUG */
+#undef BADBUG
+
+/* Format for storage of parameters and precomputed/auxiliary data */
+
+typedef struct AreaInt {
+  /* model parameters */
+  double eta;
+  double r;
+  /* transformations of the parameters */
+  double r2;
+  double range2;
+  double logeta;
+  int hard;
+  /* periodic distance */
+  double *period;
+  int per;
+  /* grid counting */
+  double dx;
+  double xgrid0;
+  int *my;
+  int kdisc;
+  /* scratch space for saving list of neighbours */
+  int *neighbour;
+} AreaInt;
+
+/* initialiser function */
+
+Cdata *areaintInit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  double r, dx, dy, x0;
+  int i, my, kdisc;
+  AreaInt *areaint;
+
+  /* create storage */
+  areaint = (AreaInt *) R_alloc(1, sizeof(AreaInt));
+  /* Interpret model parameters*/
+  areaint->eta    = model.ipar[0];
+  areaint->r      = r = model.ipar[1]; 
+#ifdef BADBUG
+  Rprintf("r = %lf\n", r);
+#endif
+  areaint->r2     = r * r;
+  areaint->range2 = 4 * r * r;    /* square of interaction distance */
+  /* is the model numerically equivalent to hard core ? */
+  areaint->hard   = (areaint->eta == 0.0);
+  areaint->logeta = (areaint->hard) ? log(DOUBLE_XMIN) : log(areaint->eta);
+#ifdef BADBUG
+  if(areaint->hard) Rprintf("Hard core recognised\n");
+#endif
+  /* periodic boundary conditions? */
+  areaint->period = model.period;
+  areaint->per    = (model.period[0] > 0.0);
+#ifdef BADBUG
+  if(areaint->per) {
+    Rprintf("*** periodic boundary conditions ***\n");
+    Rprintf("period = %lf, %lf\n", model.period[0], model.period[1]);
+  }
+#endif
+  /* grid counting */
+  dx = dy = areaint->dx = (2 * r)/NGRID;
+#ifdef BADBUG
+  Rprintf("areaint->dx = %lf\n", areaint->dx);
+#endif
+  areaint->xgrid0 = -r + dx/2;
+  areaint->my = (int *) R_alloc((long) NGRID, sizeof(int));
+  kdisc = 0;
+  for(i = 0; i < NGRID; i++) {
+    x0 = areaint->xgrid0 + i * dx;
+    my = floor(sqrt(r * r - x0 * x0)/dy);
+    my = (my < 0) ? 0 : my;
+    areaint->my[i] = my;
+#ifdef BADBUG
+    Rprintf("\tmy[%ld] = %ld\n", i, my);
+#endif
+    kdisc += 2 * my + 1;
+  }
+  areaint->kdisc = kdisc;
+#ifdef BADBUG
+  Rprintf("areaint->kdisc = %ld\n", areaint->kdisc);
+#endif
+  /* allocate space for neighbour indices */
+  areaint->neighbour = (int *) R_alloc((long) state.npmax, sizeof(int));
+  return((Cdata *) areaint);
+}
+
+#ifdef BADBUG
+void fexitc();
+#endif
+
+/* conditional intensity evaluator */
+
+double areaintCif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *period, *x, *y;
+  double u, v;
+  double r2, dx, dy, a, range2;
+  double xgrid, ygrid, xgrid0, covfrac, cifval;
+  int kount, kdisc, kx, my, ky;
+  int *neighbour;
+  int nn, k;
+
+  AreaInt *areaint;
+
+  areaint = (AreaInt *) cdata;
+
+  r2      = areaint->r2;
+  range2  = areaint->range2;    /* square of interaction distance */
+  dy = dx = areaint->dx;
+  kdisc   = areaint->kdisc;
+  /* pointers */
+  period   = areaint->period;
+  neighbour = areaint->neighbour;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  if(npts == 0) return ((double) 1.0);
+
+  if(!areaint->per) {
+    /*
+      ..........   Euclidean distance ....................
+      First identify which data points are neighbours of (u,v)
+    */
+    nn = 0;
+    ixp1 = ix + 1;
+    /* If ix = NONE = -1, then ixp1 = 0 is correct */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	a = range2 - pow(u - x[j], 2);
+	if(a > 0.) {
+	  a -= pow(v - y[j], 2);
+	  if(a > 0.) {
+	    /* point j is a neighbour of (u,v) */
+	    neighbour[nn] = j;
+	    ++nn;
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j < npts; j++) {
+	a = range2 - pow(u - x[j], 2);
+	if(a > 0.) {
+	  a -= pow(v - y[j], 2);
+	  if(a > 0.) {
+	    /* point j is a neighbour of (u,v) */
+	    neighbour[nn] = j;
+	    ++nn;
+	  }
+	}
+      }
+    }
+    if(nn == 0) {
+      /* no neighbours; no interaction */
+      cifval = 1.0;
+      return cifval;
+    } else if(areaint->hard) {
+      /* neighbours forbidden if it's a hard core process */
+      cifval = 0.0;
+      return cifval;
+    } else {
+      /* scan a grid of points centred at (u,v) */
+      kount = 0;
+      xgrid0 = u + areaint->xgrid0;
+      for(kx=0; kx<NGRID; kx++) {
+	xgrid = xgrid0 + kx * dx;
+	my = areaint->my[kx];
+	for(ky=(-my); ky<=my; ky++) {
+	  ygrid = v + ky * dy;
+	  /*
+	    Grid point (xgrid,ygrid) is inside disc of
+	    radius r centred at (u,v)
+
+	    Loop through all neighbouring data points to determine
+	    whether the grid point is covered by another disc
+	  */
+	  if(nn > 0) {
+	    for(k=0; k < nn; k++) {
+	      j = neighbour[k];
+	      a = r2 - pow(xgrid - x[j], 2);
+	      if(a > 0) {
+		a -= pow(ygrid - y[j], 2);
+		if(a > 0) {
+		  /* point j covers grid point */
+		  ++kount;
+		  break;
+		}
+	      }
+	    }
+	  }
+	  /* finished consideration of grid point (xgrid, ygrid) */
+	}
+      }
+    }
+  } else {
+    /*
+      ............. periodic distance ......................
+      First identify which data points are neighbours of (u,v)
+    */
+    nn = 0;
+    ixp1 = ix + 1;
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(dist2thresh(u,v,x[j],y[j],period,range2)) {
+	  /* point j is a neighbour of (u,v) */
+	  neighbour[nn] = j;
+	  ++nn;
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(dist2thresh(u,v,x[j],y[j],period,range2)) {
+	  /* point j is a neighbour of (u,v) */
+	  neighbour[nn] = j;
+	  ++nn;
+	}
+      }
+    }
+    if(nn == 0) {
+      /* no neighbours; no interaction */
+      cifval = 1.0;
+      return cifval;
+    } else if(areaint->hard) {
+      /* neighbours forbidden if it's a hard core process */
+      cifval = 0.0;
+      return cifval;
+    } else {
+      /* scan a grid of points centred at (u,v) */
+      kount = 0;
+      xgrid0 = u + areaint->xgrid0;
+      for(kx=0; kx<NGRID; kx++) {
+	xgrid = xgrid0 + kx * dx;
+	my = areaint->my[kx];
+	for(ky=(-my); ky<=my; ky++) {
+	  ygrid = v + ky * dy;
+	  /*
+	    Grid point (xgrid,ygrid) is inside disc of
+	    radius r centred at (u,v)
+
+	    Loop through all neighbouring data points to determine
+	    whether the grid point is covered by another disc
+	  */
+	  for(k=0; k < nn; k++) {
+	    j = neighbour[k];
+	    if(dist2Mthresh(xgrid,ygrid,x[j],y[j],period,r2)) {  
+	      /* point j covers grid point */
+	      ++kount;
+	      break;
+	    }
+	  }
+	  /* finished considering grid point (xgrid,ygrid) */
+	}
+      }
+    }
+  }
+  /*
+    `kdisc' is the number of         grid points in the disc
+    `kount' is the number of COVERED grid points in the disc
+  */
+
+  /* Hard core case has been handled. */
+  /* Usual calculation: covered area fraction */
+  covfrac = ((double) kount)/((double) kdisc);
+  cifval = exp(areaint->logeta * covfrac);
+
+#ifdef BADBUG
+    if(!R_FINITE(cifval)) {
+      Rprintf("Non-finite CIF value\n");
+      Rprintf("kount=%ld, kdisc=%ld, covfrac=%lf, areaint->logeta=%lf\n", 
+	      kount, kdisc, covfrac, areaint->logeta);
+      Rprintf("u=%lf, v=%lf\n", u, v);
+      fexitc("Non-finite CIF");
+    }
+#endif
+
+  return cifval;
+}
+
+
+Cifns AreaIntCifns = { &areaintInit, &areaintCif, (updafunptr) NULL, NO};
diff --git a/src/areapair.c b/src/areapair.c
new file mode 100644
index 0000000..e610f79
--- /dev/null
+++ b/src/areapair.c
@@ -0,0 +1,99 @@
+/*
+
+  areapair.c
+
+  $Revision: 1.6 $     $Date: 2013/09/18 04:11:42 $
+
+  Specialised code for the second order conditional intensity
+  of the area-interaction process
+
+*/
+
+#include <R.h>
+#include <math.h>
+
+#include "yesno.h"
+
+/* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */
+
+void delta2area(xa, ya, xb, yb, 
+		nother, xother, yother,
+		radius, epsilon, pixcount) 
+     double *xa, *ya, *xb, *yb;
+     int *nother;
+     double *xother, *yother;
+     double *radius, *epsilon;
+     int *pixcount;
+{ 
+  int Ni, Nj, Nk, i, j, k, count, covered;
+  double xA, yA, xB, yB, r, eps, r2;
+  double xmin, xmax, ymin, ymax, xi, yj;
+  double dxA, dyA;
+  double dxB, dyB;
+  double dx, dy;
+  
+  Nk = *nother;
+
+  xA = *xa;
+  yA = *ya;
+  xB = *xb;
+  yB = *yb;
+  r = *radius;
+  eps = *epsilon;
+  r2 = r * r;
+
+  /* find intersection of squares centred on A and B */
+  if(xA < xB) {
+    xmin = xB - r;
+    xmax = xA + r;
+  } else {
+    xmin = xA - r;
+    xmax = xB + r;
+  }
+  if(xmin > xmax) return;
+  if(yA < yB) {
+    ymin = yB - r;
+    ymax = yA + r;
+  } else {
+    ymin = yA - r;
+    ymax = yB + r;
+  }
+  if(ymin > ymax) return;
+    
+  /* set up grid */
+  Ni = (int) ceil((xmax - xmin)/eps) + 1;
+  Nj = (int) ceil((ymax - ymin)/eps) + 1;
+  
+  count = 0;
+
+  for(i = 0, xi = xmin; i < Ni; i++, xi += eps) {
+    dxA = xi - xA;
+    for(j = 0, yj = ymin; j < Nj; j++, yj += eps) {
+      dyA = yj - yA;
+      if(dxA * dxA + dyA * dyA <= r2) {
+	/* grid point belongs to b(A, r) */
+	dxB = xi - xB;
+	dyB = yj - yB;
+	if(dxB * dxB + dyB * dyB <= r2) {
+	  /* grid point belongs to b(A,r) \cap b(B,r) */
+	  covered = NO;
+	  /* test whether it is covered by another b(X[k], r) */
+	  for(k = 0; k < Nk; k++) {
+	    dx = xi - xother[k];
+	    dy = yj - yother[k];
+	    if(dx * dx + dy * dy <= r2) {
+	      covered = YES;
+	      break;
+	    }
+	  }
+	  if(!covered) {
+	    ++count;
+	  }
+	}
+      }
+    }
+  }
+  *pixcount = count;
+}
+
+
diff --git a/src/auctionbf.c b/src/auctionbf.c
new file mode 100644
index 0000000..6aea79a
--- /dev/null
+++ b/src/auctionbf.c
@@ -0,0 +1,258 @@
+/*
+
+   auctionbf.c
+
+   $Revision: 1.1 $   $Date: 2014/06/28 02:14:04 $
+
+   Code by Dominic Schuhmacher
+   <dominic.schuhmacher at mathematik.uni-goettingen.de>
+
+   up to local adaptations for spatstat this code is identical to Revision 0.4
+   for the R package transport
+   
+*/
+
+
+/* n >= 2 is assumed throughout !!!!!!!!! */
+
+#include <R.h>
+#include <math.h>
+#include <R_ext/Utils.h>
+
+typedef struct State {
+  int n; 
+  double epsbid;   /* the current eps */
+  int backwards; /* 0 if we should do forward auction, 1 if we should do backward auction */ 
+  int nofassigned;  /* number of assigned persons */
+  int *pers_to_obj;  /* -1 means unassigned */
+  int *obj_to_pers;  /* -1 means unassigned */           
+  double *price;   
+  double *profit;
+  int *desiremat;        /* matrix of desires */ 
+  double *persvalue; /* desire minus price of current person in forward phase */
+  double *objvalue; /* desire minus profit of current object in reverse phase */
+                     /* last three only used in bid, but maybe better
+                        to reserve memory once and for all */
+} State;
+
+#define DESIRE(I,J,STATE,NVALUE) ((STATE)->desiremat)[(NVALUE) * (J) + (I)]
+#define DESIREMAIN(I,J,STATE,NVALUE) ((STATE).desiremat)[(NVALUE) * (J) + (I)]
+#define MIN(A,B) ((A)<(B) ? (A) : (B))
+
+void bidbf(State *state, int person);
+void lurebf(State *state, int obj);
+int arrayargmax(double *a, int n);
+double arraysec(double *a, int n, int arg);
+/* void printit(State *state); */
+
+
+
+/* ------------ The main function ----------------------------- */
+
+void auctionbf(int *desirem, int *nn, int *pers_to_obj, double *price, double *profit, int *kk, double *eps)
+{
+   int i,j,r; /* indices */
+   int k,n;
+   State state;
+
+   /* inputs */
+   state.n = n = *nn;
+   k = *kk;    /* length of eps, only needed in outside loop */
+   state.pers_to_obj = pers_to_obj;      /* n vector: person i gets which object */
+   state.price = price;    /* n vector: price of object j */
+   state.profit = profit;  /* n vector: profit of person i */
+   state.desiremat = desirem;  /* n x n vector: desire of person i for object j */  
+
+   /* scratch space */ 
+   state.obj_to_pers = (int *) R_alloc((long) n, sizeof(int));
+   state.persvalue = (double *) R_alloc((long) n, sizeof(double));
+   state.objvalue = (double *) R_alloc((long) n, sizeof(double));
+
+   /* Prices start at what the R-function supplied (usually 0) */
+   /* Profits are set to the rowwise max that satisfies eps-CS */
+   for (i = 0; i < n; i++) {
+     for (j = 0; j < n; j++) {
+       state.persvalue[j] = DESIREMAIN(i,j,state,n);
+     }
+     state.profit[i] = arrayargmax(state.persvalue, n);
+   }
+
+   for (r = 0; r < k; r++) {
+     state.backwards = 0;
+     state.epsbid = eps[r];
+     /* At start everything is unassigned */
+     state.nofassigned = 0;
+     for (j = 0; j < n; j++) {
+       state.pers_to_obj[j] = -1;
+       state.obj_to_pers[j] = -1;
+     }
+     
+     while (state.nofassigned < n) {
+       /*  printit(&state); */
+       R_CheckUserInterrupt();
+       if (state.backwards == 0) {
+         /* printit(&state); */
+         for (i = 0; i < n; i++) {
+           if (state.pers_to_obj[i] == -1) {
+             /* Rprintf("Bid \n"); */
+             bidbf(&state, i);  /* bid does assigning and unassigning and changes nofassigned */
+           }
+         }
+       } else {
+         /* printit(&state); */
+         for (j = 0; j < n; j++) {
+           if (state.obj_to_pers[j] == -1) {
+             /* Rprintf("Lure \n"); */        
+             lurebf(&state, j);  /* lure does assigning and unassigning and changes nofassigned */
+           }
+         }
+       }
+
+     }   /* eof while */
+   }     /* eof eps-scaling for-loop */
+}
+
+
+/* ------------ Functions called by auction ------------------------- */
+
+void bidbf(State *state, int person) {
+  int j;
+  int n;
+  int bidfor, oldpers;
+  double bidamount;
+
+  n = state->n;
+  for (j = 0; j < n; j++) {
+    state->persvalue[j] = DESIRE(person,j,state,n) - state->price[j];
+  }
+  bidfor = arrayargmax(state->persvalue, n);
+  bidamount = state->persvalue[bidfor] - arraysec(state->persvalue,n,bidfor) + state->epsbid;
+  /* here we get a float result, the rest are int results */
+  oldpers = state->obj_to_pers[bidfor];
+  if (oldpers == -1) {
+    state->nofassigned++; 
+    state->backwards = 1;
+  }
+  else {
+    state->pers_to_obj[oldpers] = -1; 
+  }
+  state->pers_to_obj[person] = bidfor;
+  state->obj_to_pers[bidfor] = person;
+  state->price[bidfor] = state->price[bidfor] + bidamount;
+  /* new forward/reverse auction algo */
+  state->profit[person] = DESIRE(person,bidfor,state,n) - state->price[bidfor];
+}
+
+
+/* like bidbf, but for reverse auction */
+void lurebf(State *state, int obj) {
+  int i;
+  int n;
+  int lurepno, oldobj;
+  double lureamount;
+
+  n = state->n;
+  for (i = 0; i < n; i++) {
+    state->objvalue[i] = DESIRE(i,obj,state,n) - state->profit[i];
+  }
+  lurepno = arrayargmax(state->objvalue, n);
+  lureamount = state->objvalue[lurepno] - arraysec(state->objvalue,n,lurepno) + state->epsbid;
+  /* here we get a float result, the rest are int results */
+  oldobj = state->pers_to_obj[lurepno];
+  if (oldobj == -1) {
+    state->nofassigned++;
+    state->backwards = 0; 
+  }
+  else {
+    state->obj_to_pers[oldobj] = -1; 
+  }
+  state->obj_to_pers[obj] = lurepno;
+  state->pers_to_obj[lurepno] = obj;
+  state->profit[lurepno] = state->profit[lurepno] + lureamount;
+  /* new forward/reverse auction algo */
+  state->price[obj] = DESIRE(lurepno,obj,state,n) - state->profit[lurepno];
+}
+
+
+/* ------------ Little helpers ------------------------- */
+
+/* Gives first index that maximizes array */
+int arrayargmax(double *a, int n) {
+  int i, arg;
+  double amax;
+  arg = 0;
+  amax = a[0];
+  for (i = 1; i < n; i++)
+    if (a[i] > amax) {
+    arg = i;
+    amax = a[i];
+  }
+  return(arg);
+}
+
+/* Second largest element of a non-negative integer array
+   knowing the largest is at index arg */
+double arraysec(double *a, int n, int arg) {
+  int i;
+  double amax;
+  if (arg > 0) amax = a[0];
+  else amax = a[1]; 
+  for (i = 0; i < arg; i++)
+    if (a[i] > amax) amax = a[i];
+  for (i = arg+1; i < n; i++)
+    if (a[i] > amax) amax = a[i]; 
+  return(amax);
+}
+
+
+/* void printit(State *state)
+{
+  int i=0,n=0;
+
+  n = state->n;
+
+  Rprintf("Current state: \n");
+
+  Rprintf("backwards: %d \n", state->backwards);
+
+  Rprintf("nofassigned:  %d \n", state->nofassigned);
+
+  Rprintf("pers_to_obj:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%d ", state->pers_to_obj[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("obj_to_pers:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%d ", state->obj_to_pers[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("price:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%2.9lf ", state->price[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("profit:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%2.9lf ", state->profit[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("persvalue:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%2.9lf ", state->persvalue[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("objvalue:  ");
+  for (i = 0; i < n; i++) {
+    Rprintf("%2.9lf ", state->objvalue[i]);
+  }
+  Rprintf("\n");
+
+  Rprintf("\n\n\n");
+}
+*/ 
diff --git a/src/badgey.c b/src/badgey.c
new file mode 100755
index 0000000..e4e775f
--- /dev/null
+++ b/src/badgey.c
@@ -0,0 +1,513 @@
+#include <R.h>
+#include <math.h>
+#include <stdlib.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* To get debug output, insert the line:  #define DEBUG 1  */
+
+void fexitc(const char *msg);
+
+/*
+  Conditional intensity function for a multiscale saturation process. 
+
+  parameter vector: 
+      ipar[0] = ndisc
+      ipar[1] = gamma[0]
+      ipar[2] = r[0]
+      ipar[3] = s[0]
+      ...
+*/
+
+typedef struct BadGey {
+  /* model parameters */
+  int ndisc;
+  double *gamma;
+  double *r;
+  double *s;
+  /* transformations of the parameters */
+  double *r2;
+  double *loggamma;
+  int *hard;
+  /* periodic distance */
+  double *period;
+  int per;
+  /* auxiliary counts */
+  int *aux;   /* matrix[ndisc, npmax]: neighbour counts in current state */
+  int *tee;   /* vector[ndisc] : neighbour count at point in question */
+  double *w;  /* vector[ndisc] : sum of changes in counts at other points */
+} BadGey;
+
+Cdata *badgeyinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, j, k, i0, ndisc, nmatrix;
+  double r, g, d2;
+  BadGey *badgey;
+
+  /* create storage */
+  badgey = (BadGey *) R_alloc(1, sizeof(BadGey));
+
+  badgey->ndisc  = ndisc = model.ipar[0];
+  /* Allocate space for parameter vectors */
+  badgey->gamma    = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  badgey->r        = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  badgey->s        = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  /* Derived values */
+  badgey->r2       = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  badgey->loggamma = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  badgey->hard     = (int *) R_alloc((size_t) ndisc, sizeof(int));
+  /* copy and transform parameters */
+  for(i=0; i < ndisc; i++) {
+    i0 = 3*i + 1;
+    g = badgey->gamma[i] = model.ipar[i0];
+    r = badgey->r[i] =     model.ipar[i0 + 1];
+        badgey->s[i] =     model.ipar[i0 + 2];
+    badgey->r2[i] = r * r;
+    badgey->hard[i] = (g < DOUBLE_EPS);
+    badgey->loggamma[i] = (g < DOUBLE_EPS) ? 0 : log(g);
+  }
+  /* periodic boundary conditions? */
+  badgey->period = model.period;
+  badgey->per    = (model.period[0] > 0.0);
+  /* Allocate scratch space */
+  badgey->tee      = (int *) R_alloc((size_t) ndisc, sizeof(int));
+  badgey->w        = (double *) R_alloc((size_t) ndisc, sizeof(double));
+  /* Allocate space for auxiliary counts */
+  nmatrix = ndisc * state.npmax;
+  badgey->aux      = (int *) R_alloc((size_t) nmatrix, sizeof(int));
+  /* Initialise auxiliary counts */
+  for(i = 0; i < nmatrix; i++)
+    badgey->aux[i] = 0;
+  for(i = 0; i < state.npts; i++) {
+    for(j = 0; j < state.npts; j++) {
+      if(j == i) continue;
+      d2 = dist2either(state.x[i], state.y[i], state.x[j], state.y[j], 
+		       badgey->period);
+      for(k = 0; k < ndisc; k++) {
+	if(d2 < badgey->r2[k])
+	  MAT(badgey->aux, k, i, ndisc) += 1;
+      }
+    }
+  }
+#ifdef DEBUG
+  Rprintf("Finished initialiser; ndisc=%d\n", ndisc);
+#endif
+  return((Cdata *) badgey);
+}
+
+#define AUX(I,J) MAT(aux, I, J, ndisc)
+
+double badgeycif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int ix, j, k, npts, ndisc, tk;
+  double u, v, d2;
+  double a, dd2, b, f, r2, s, cifval;
+  double *x, *y;
+  int *tee, *aux;
+  double *w;
+  BadGey *badgey;
+
+  badgey = (BadGey *) cdata;
+
+#ifdef DEBUG
+  Rprintf("Entering badgeycif\n");
+#endif
+
+  npts = state.npts;
+  cifval = 1.0;
+  if(npts==0) return cifval;
+
+  x = state.x;
+  y = state.y;
+  u = prop.u;
+  v = prop.v;
+  ix = prop.ix;
+
+  ndisc = badgey->ndisc;
+  tee   = badgey->tee;
+  aux   = badgey->aux;
+  w     = badgey->w;
+
+  /* 
+     For disc k, 
+     tee[k] = neighbour count at the point in question;
+     w[k]   = sum of changes in (saturated) neighbour counts at other points 
+  */
+  if(prop.itype == BIRTH) {
+    /* compute tee[k] and w[k] from scratch */
+    for(k = 0; k < ndisc; k++) {
+      tee[k] = 0;
+      w[k] = 0.0;
+    }
+    if(badgey->per) {
+      /* periodic distance */
+      for(j=0; j<npts; j++) {
+	d2 = dist2(u,v,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    tee[k]++;
+	    f = badgey->s[k] - AUX(k,j);
+	    if(f > 1) /* j is not saturated after addition of (u,v) */
+	      w[k] += 1; /* addition of (u,v) increases count by 1 */
+	    else if(f > 0) /* j becomes saturated by addition of (u,v) */
+	      w[k] += f;
+	  }
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	d2 = pow(u - x[j], 2) + pow(v - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    tee[k]++;
+	    f = badgey->s[k] - AUX(k,j);
+	    if(f > 1) /* j is not saturated after addition of (u,v) */
+	      w[k] += 1; /* addition of (u,v) increases count by 1 */
+	    else if(f > 0) /* j becomes saturated by addition of (u,v) */
+	      w[k] += f;
+	  }
+	}
+      }
+    }
+  } else if(prop.itype == DEATH) {
+    /* extract current auxiliary counts for point ix */
+    /* compute w[k] from scratch */
+    for(k = 0; k < ndisc; k++) {
+      tee[k] = AUX(k,ix);
+      w[k] = 0.0;
+    }
+    /* compute change in counts for other points */
+    if(badgey->per) {
+      /* Periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2 = dist2(u,v,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    f = badgey->s[k] - AUX(k,j);
+	    if(f > 0) /* j is not saturated */
+	      w[k] += 1; /* deletion of 'ix' decreases count by 1 */
+	    else {
+	      f += 1;
+	      if(f > 0) {
+		/* j is not saturated after deletion of 'ix' 
+		   (s must be fractional) */
+		w[k] += f; 
+	      }
+	    }
+	  }
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2 = pow(u - x[j], 2) + pow(v - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    f = badgey->s[k] - AUX(k,j);
+	    if(f > 0) /* j is not saturated */
+	      w[k] += 1; /* deletion of 'ix' decreases count by 1 */
+	    else {
+	      f += 1;
+	      if(f > 0) {
+		/* j is not saturated after deletion of 'ix' 
+		   (s must be fractional) */
+		w[k] += f; 
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  } else if(prop.itype == SHIFT) { 
+    /* compute auxiliary counts from scratch */
+    for(k = 0; k < ndisc; k++) {
+      tee[k] = 0;
+      w[k] = 0.0;
+    }
+    /* Compute the cif at the new point, not the ratio of new/old */
+    if(badgey->per) {
+      /* periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2 = dist2(u,v,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  r2 = badgey->r2[k];
+	  if(d2 < r2) {
+	    /* shifted point is a neighbour of point j */
+	    tee[k]++;
+	    a = AUX(k,j);
+	    s = badgey->s[k];
+	    /* Adjust */
+	    dd2 = dist2(x[ix],y[ix], x[j],y[j],badgey->period);
+	    if(dd2 < r2) a -= 1; 
+	    b = a + 1;
+	    /* b is the number of neighbours of point j in new state */
+	    if(a < s && s < b) {
+	      w[k] += s - a;  /* s is fractional and j is saturated */
+	    }
+	    else if(s >= b) w[k] += 1; 
+	  }
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2 = pow(u - x[j], 2) + pow(v - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  r2 = badgey->r2[k];
+	  if(d2 < r2) {
+	    /* shifted point is a neighbour of point j */
+	    tee[k]++;
+	    a = AUX(k,j);
+	    s = badgey->s[k];
+	    /* Adjust */
+	    dd2 = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2);
+	    if(dd2 < r2) a -= 1; 
+	    b = a + 1;
+	    /* b is the number of neighbours of point j in new state */
+	    if(a < s && s < b) {
+	      w[k] += s - a;  /* s is fractional and j is saturated */
+	    }
+	    else if(s >= b) w[k] += 1; 
+	  }
+	}
+      }
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("ndisc=%d\n", ndisc);
+#endif
+
+  /* compute total change in saturated count */
+  for(k = 0; k < ndisc; k++) {
+    s = badgey->s[k];
+    tk = tee[k];
+    w[k] += ((tk < s) ? tk : s);
+#ifdef DEBUG
+    Rprintf("s[%d]=%lf, t[%d]=%d, w[%d]=%lf\n",
+	   k, s, k, tk, k, w[k]);
+#endif
+  }
+
+  /* evaluate cif */
+  for(k = 0; k < ndisc; k++) {
+    if(badgey->hard[k]) {
+      if(tee[k] > 0) return(0.0);
+      /* else cifval multiplied by 0^0 = 1 */
+    } else cifval *= exp(badgey->loggamma[k] * w[k]);
+  }
+  
+  return cifval;
+}
+
+void badgeyupd(state, prop, cdata) 
+     State state;
+     Propo prop;
+     Cdata *cdata;
+{
+/* Declare other variables */
+  int ix, npts, ndisc, j, k;
+  double u, v, xix, yix, r2, d2, d2old, d2new;
+  double *x, *y;
+  int *aux;
+  BadGey *badgey;
+
+  badgey = (BadGey *) cdata;
+
+  aux = badgey->aux;
+  /* 'state' is current state before transition */
+  x = state.x;
+  y = state.y;
+  npts = state.npts;      
+  ndisc = badgey->ndisc;
+
+#ifdef DEBUG
+  Rprintf("start update ---- \n");
+  for(j=0; j < npts; j++) {
+    for(k=0; k < ndisc; k++)
+      Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j));
+    Rprintf("\n");
+  }
+#endif
+      
+  if(prop.itype == BIRTH) { 
+#ifdef DEBUG
+    Rprintf("Update for birth ---- \n");
+#endif
+    /* Birth */
+    u = prop.u;
+    v = prop.v;
+    /* initialise auxiliary counters for new point x[npts], y[npts] */
+    for(k = 0; k < ndisc; k++)
+      AUX(k, npts) = 0;
+    /* update all auxiliary counters */
+    if(badgey->per) {
+      /* periodic distance */
+      for(j=0; j < npts; j++) {
+	d2 = dist2(u,v,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    AUX(k, j) += 1;
+	    AUX(k, npts) += 1;
+	  }
+	} 
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j < npts; j++) {
+	d2 = pow(u - x[j], 2) + pow(v - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    AUX( k, j) += 1;
+	    AUX( k, npts) += 1;
+	  }
+	} 
+      }
+    }
+#ifdef DEBUG
+  Rprintf("end update ---- \n");
+  for(j=0; j <= npts; j++) {
+    for(k=0; k < ndisc; k++)
+      Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j));
+    Rprintf("\n");
+  }
+#endif
+    return;
+  }
+  if(prop.itype == DEATH) {
+    /* Death */
+    ix = prop.ix;
+    u = x[ix];
+    v = y[ix];
+#ifdef DEBUG
+    Rprintf("--- Update for death of point %d = (%lf,%lf) ---- \n", ix, u, v);
+#endif
+    /* 
+       Decrement auxiliary counter for each neighbour of deleted point,
+       and remove entry corresponding to deleted point
+    */
+    if(badgey->per) {
+      /* periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j==ix) continue;
+	d2 = dist2(u,v,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+	    if(j < ix) AUX(k,j) -= 1; 
+	    else AUX(k,j-1) = AUX(k,j) - 1;
+	  } else if(j >= ix) AUX(k,j-1) = AUX(k,j);
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j==ix) continue;
+	d2 = pow(u - x[j], 2) + pow(v - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  if(d2 < badgey->r2[k]) {
+#ifdef DEBUG
+	    Rprintf("hit for point %d with radius r[%d]\n", j, k);
+#endif
+	    if(j < ix) AUX(k,j) -= 1; 
+	    else AUX(k,j-1) = AUX(k,j) - 1;
+	  } else if(j >= ix) AUX(k,j-1) = AUX(k,j);
+	}
+      }
+    }
+#ifdef DEBUG
+  Rprintf("end update ---- \n");
+  for(j=0; j < npts-1; j++) {
+    for(k=0; k < ndisc; k++)
+      Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j));
+    Rprintf("\n");
+  }
+#endif
+    return;
+  }
+
+  if(prop.itype == SHIFT) { 
+#ifdef DEBUG
+    Rprintf("Update for shift ---- \n");
+#endif
+    /* Shift */
+    u = prop.u;
+    v = prop.v;
+    ix = prop.ix;
+    xix = x[ix];
+    yix = y[ix];
+    /* recompute all auxiliary counters for point ix */
+    for(k = 0; k < ndisc; k++) 
+      AUX(k,ix) = 0;
+
+    if(badgey->per) {
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2new = dist2(u,v,x[j],y[j],badgey->period);
+	d2old = dist2(xix,yix,x[j],y[j],badgey->period);
+	for(k = 0; k < ndisc; k++) {
+	  r2 = badgey->r2[k];
+	  if(d2old >= r2 && d2new >= r2) continue;
+	  if(d2new < r2) {
+	    /* increment neighbour count for new point */
+	    AUX(k,ix) += 1;
+	    if(d2old >= r2) 
+	      AUX(k,j) += 1; /* point j gains a new neighbour */
+	  } else if(d2old < r2) 
+	    AUX(k,j) -= 1; /* point j loses a neighbour */
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	d2new = pow(u - x[j], 2) + pow(v - y[j], 2);
+	d2old = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2);
+	for(k = 0; k < ndisc; k++) {
+	  r2 = badgey->r2[k];
+	  if(d2old >= r2 && d2new >= r2) continue;
+	  if(d2new < r2) {
+#ifdef DEBUG
+	    Rprintf("shifted point is close to j=%d\n", j);
+#endif
+	    /* increment neighbour count for new point */
+	    AUX(k,ix) += 1;
+	    if(d2old >= r2) {
+#ifdef DEBUG
+	    Rprintf("\t(previous position was not)\n");
+#endif
+	      AUX(k,j) += 1; /* point j gains a new neighbour */
+	    }
+	  } else if(d2old < r2) {
+#ifdef DEBUG
+	    Rprintf("previous position was close to j=%d, shifted point is not\n", j);
+#endif
+	    AUX(k,j) -= 1; /* point j loses a neighbour */
+	  }
+	}
+      }
+    }
+#ifdef DEBUG
+  Rprintf("end update ---- \n");
+  for(j=0; j < npts; j++) {
+    for(k=0; k < ndisc; k++)
+      Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j));
+    Rprintf("\n");
+  }
+#endif
+    return;
+  }
+  fexitc("Unrecognised transition type; bailing out.\n");
+}
+
+Cifns BadGeyCifns = { &badgeyinit, &badgeycif, &badgeyupd, NO};
diff --git a/src/bdrymask.c b/src/bdrymask.c
new file mode 100644
index 0000000..10e8f87
--- /dev/null
+++ b/src/bdrymask.c
@@ -0,0 +1,57 @@
+/*
+
+  bdrymask.c
+
+  Boundary pixels of binary mask
+
+  Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014
+  Licence: GPL >= 2
+
+  $Revision: 1.3 $  $Date: 2016/02/02 01:29:50 $
+
+
+*/
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+void bdrymask(nx, ny, m, b)
+     /* inputs */
+     int *nx, *ny, *m;
+     /* outputs */
+     int *b;
+{ 
+  int Nxcol, Nyrow, Nx1, Ny1;
+  int i, j, mij;
+
+  Nxcol   = *nx;
+  Nyrow   = *ny;
+  Nx1 = Nxcol - 1;
+  Ny1 = Nyrow - 1;
+
+#define MAT(A,I,J) A[(I) + (J) * Nyrow]
+
+  /* loop over pixels */
+
+  for(j = 0; j < Nxcol; j++) {
+
+    R_CheckUserInterrupt();
+    
+    for(i = 0; i < Nyrow; i++) {
+
+      mij = MAT(m, i, j);
+      if(i == 0 || i == Ny1 || j == 0 || j == Nx1) {
+	MAT(b, i, j) = mij;
+      } else if((mij != MAT(m, (i-1), j)) ||
+		(mij != MAT(m, (i+1), j)) ||
+		(mij != MAT(m, i, (j-1))) ||
+		(mij != MAT(m, i, (j+1)))) {
+	MAT(b, i, j) = 1;
+      }
+    }
+  }
+}
+
+
+
diff --git a/src/call3d.c b/src/call3d.c
new file mode 100755
index 0000000..30cf6b5
--- /dev/null
+++ b/src/call3d.c
@@ -0,0 +1,560 @@
+/*
+	$Revision: 1.5 $ $Date: 2010/10/24 10:57:02 $
+
+	R interface
+
+	Pass data between R and internally-defined data structures 
+
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+# 
+# MODIFIED BY: Adrian Baddeley, Perth 2009
+# 
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+
+*/
+
+#include <R.h>
+#include "geom3.h"
+#include "functable.h"
+
+#undef DEBUG 
+
+#ifdef DEBUG
+#define DEBUGMESSAGE(S) Rprintf(S);
+#else 
+#define DEBUGMESSAGE(S) 
+#endif
+
+void g3one(Point *p, int n, Box *b, Ftable *g);
+void g3three(Point *p, int n, Box *b, Ftable *g);
+void g3cen(Point *p, int n, Box *b, H4table *count);
+void k3trans(Point *p, int n, Box *b, Ftable *k);
+void k3isot(Point *p, int n, Box *b, Ftable *k);
+void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta);
+void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta);
+void phatminus(Point *p, int n, Box *b, double vside, Itable *count);
+void phatnaive(Point *p, int n, Box *b, double vside, Itable *count);
+void p3hat4(Point *p, int n, Box *b, double vside, H4table *count);
+
+/*
+	ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS
+
+	We have defined an alloc() and free() function for each type.
+
+	However, the free() functions currently do nothing,
+	because we use R_alloc to allocate transient space,
+	which is freed automatically by R.
+
+*/
+
+Ftable *
+allocFtable(n)		/* allocate function table of size n */
+int	n;
+{
+  Ftable *x;
+  x = (Ftable *) R_alloc(1, sizeof(Ftable));
+  x->n = n;
+  x->f 	   = (double *) R_alloc(n, sizeof(double));
+  x->num   = (double *) R_alloc(n, sizeof(double));
+  x->denom = (double *) R_alloc(n, sizeof(double));
+  return(x);
+}
+
+void freeFtable(x) Ftable *x; { }
+
+Itable	*
+allocItable(n)
+int	n;
+{
+  Itable *x;
+  x = (Itable *) R_alloc(1, sizeof(Itable));
+  x->n     = n;
+  x->num   = (int *) R_alloc(n, sizeof(int));
+  x->denom = (int *) R_alloc(n, sizeof(int));
+  return(x);
+}
+
+void freeItable(x) Itable *x; { }
+
+H4table	*
+allocH4table(n)
+int	n;
+{
+  H4table *x;
+  x = (H4table *) R_alloc(1, sizeof(H4table));
+  x->n     = n;
+  x->obs   = (int *) R_alloc(n, sizeof(int));
+  x->nco   = (int *) R_alloc(n, sizeof(int));
+  x->cen   = (int *) R_alloc(n, sizeof(int));
+  x->ncc   = (int *) R_alloc(n, sizeof(int));
+  return(x);
+}
+
+void freeH4table(x) H4table *x; { }
+
+Box	*
+allocBox()		/* I know this is ridiculous but it's consistent. */
+{
+  Box *b;
+  b = (Box *) R_alloc(1, sizeof(Box));
+  return(b);
+}
+
+void freeBox(x) Box *x; { }
+
+
+Point	*
+allocParray(n)		/* allocate array of n Points */
+int	n;
+{
+  Point *p;
+  p = (Point *) R_alloc(n, sizeof(Point));
+  return(p);
+}
+
+void freeParray(x) Point *x; { }
+
+/*
+	CREATE AND INITIALISE DATA STORAGE
+
+*/
+
+Ftable *
+MakeFtable(t0, t1, n)
+     double *t0, *t1;
+     int	*n;
+{
+  Ftable	*tab;
+  int	i, nn;
+
+  nn = *n;
+  tab = allocFtable(nn);
+
+  tab->t0 = *t0;
+  tab->t1 = *t1;
+  
+  for(i = 0; i < nn; i++) {
+    tab->f[i] = 0.0;
+    tab->num[i] = 0;
+    tab->denom[i] = 0;
+  }
+  return(tab);
+}
+	
+Itable	*
+MakeItable(t0, t1, n)
+     double *t0, *t1;
+     int *n;
+{
+  Itable *tab;
+  int i, nn;
+
+  nn = *n;
+  tab = allocItable(nn);
+
+  tab->t0 = *t0;
+  tab->t1 = *t1;
+
+  for(i = 0; i < nn; i++) {
+    tab->num[i] = 0;
+    tab->denom[i] = 0;
+  }
+  return(tab);
+}
+
+H4table	*
+MakeH4table(t0, t1, n)
+     double *t0, *t1;
+     int *n;
+{
+  H4table *tab;
+  int i, nn;
+
+  nn = *n;
+  tab = allocH4table(nn);
+
+  tab->t0 = *t0;
+  tab->t1 = *t1;
+
+  for(i = 0; i < nn; i++) {
+    tab->obs[i] = 0;
+    tab->nco[i] = 0;
+    tab->cen[i] = 0;
+    tab->ncc[i] = 0;
+  }
+  tab->upperobs = 0;
+  tab->uppercen = 0;
+
+  return(tab);
+}
+
+/*
+	CONVERSION OF DATA TYPES 
+
+		R -> internal
+
+	including allocation of internal data types as needed
+*/
+
+Point	*
+RtoPointarray(x,y,z,n)
+     double *x, *y, *z;
+     int	*n;
+{
+  int	i, nn;
+  Point	*p;
+
+  nn = *n;
+  p = allocParray(nn);
+	
+  for(i = 0; i < nn; i++) {
+    p[i].x = x[i];
+    p[i].y = y[i];
+    p[i].z = z[i];
+  }
+  return(p);
+}
+
+Box *
+RtoBox(x0, x1, y0, y1, z0, z1)
+     double	*x0, *x1, *y0, *y1, *z0, *z1;
+{
+  Box *b;
+  b = allocBox();
+
+  b->x0 = *x0;
+  b->x1 = *x1;
+  b->y0 = *y0;
+  b->y1 = *y1;
+  b->z0 = *z0;
+  b->z1 = *z1;
+  return(b);
+}
+
+/*
+	CONVERSION OF DATA TYPES 
+
+		internal -> R
+
+	Note: it can generally be assumed that the R arguments
+	are already allocated vectors of correct length,
+	so we do not allocate them.
+
+
+*/
+
+void
+FtabletoR(tab, t0, t1, n, f, num, denom)
+     /* internal */
+     Ftable	*tab;
+     /* R representation */
+     double	*t0, *t1;
+     int	*n;
+     double	*f, *num, *denom;
+{
+  int	i;
+
+  *t0 = tab->t0;
+  *t1 = tab->t1;
+  *n = tab->n;
+	
+  for(i = 0; i < tab->n; i++) {
+    f[i] = tab->f[i];
+    num[i] = tab->num[i];
+    denom[i] = tab->denom[i];
+  }
+
+  freeFtable(tab);
+}
+
+void
+ItabletoR(tab, t0, t1, m, num, denom)
+     /* internal */
+     Itable	*tab;
+     /* R representation */
+     double	*t0, *t1;
+     int  *m;
+     int  *num, *denom;
+{
+  int	i;
+  
+  *t0 = tab->t0;
+  *t1 = tab->t1;
+  *m  = tab->n;
+
+  for(i = 0; i < tab->n; i++) {
+    num[i] = tab->num[i];
+    denom[i] = tab->denom[i];
+  }
+  freeItable(tab);
+}
+	
+void
+H4tabletoR(tab, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen)
+     /* internal */
+     H4table	*tab;
+     /* R representation */
+     double	*t0, *t1;
+     int  *m;
+     int  *obs, *nco, *cen, *ncc;
+     int *upperobs, *uppercen;
+{
+  int	i;
+  
+  *t0 = tab->t0;
+  *t1 = tab->t1;
+  *m  = tab->n;
+
+  *upperobs = tab->upperobs;
+  *uppercen = tab->uppercen;
+
+  for(i = 0; i < tab->n; i++) {
+    obs[i] = tab->obs[i];
+    nco[i] = tab->nco[i];
+    cen[i] = tab->cen[i];
+    ncc[i] = tab->ncc[i];
+  }
+
+  freeH4table(tab);
+}
+	
+		
+/*
+	R CALLING INTERFACE 
+
+	These routines are called from R by 
+	> .C("routine-name", ....)
+*/
+
+void
+RcallK3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method)
+
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+            *y0, *y1, 
+            *z0, *z1;	
+
+     double *t0, *t1;	/* Ftable */
+     int    *m;
+     double *f, *num, *denom;
+
+     int    *method;
+	
+{
+  Point	*p;
+  Box 	*b;
+  Ftable	*tab;
+	
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  tab = MakeFtable(t0, t1, m);	
+
+  switch((int) *method) {	
+  case 0:
+    k3trans(p, (int) *n, b, tab); break;
+  case 1:
+    k3isot(p, (int) *n, b, tab); break;
+  default:
+    Rprintf("Method %d not implemented: defaults to 0\n", *method);
+    k3trans(p, (int) *n, b, tab); break;
+  }
+  FtabletoR(tab, t0, t1, m, f, num, denom);
+}
+
+void
+RcallG3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method)
+
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+            *y0, *y1, 
+	    *z0, *z1;	
+
+     double *t0, *t1;	/* Ftable */
+     int    *m;
+     double *f, *num, *denom;
+
+     int    *method;
+{
+  Point	*p;
+  Box 	*b;
+  Ftable	*tab;
+	
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  tab = MakeFtable(t0, t1, m);	
+
+  switch(*method) {
+  case 1:
+    g3one(p, (int) *n, b, tab); 
+    break;
+  case 3:
+    g3three(p, (int) *n, b, tab); 
+    break;
+  default:
+    Rprintf("Method %d not implemented: defaults to 3\n", *method);
+    g3three(p, (int) *n, b, tab); 
+  }
+  FtabletoR(tab, t0, t1, m, f, num, denom);
+}
+
+void
+RcallG3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, 
+	 t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen)
+     
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+	    *y0, *y1, 
+	    *z0, *z1;	
+
+     double *t0, *t1;
+     int    *m;		/* H4table */
+     int    *obs, *nco, *cen, *ncc;
+     int    *upperobs, *uppercen;
+{
+  Point	*p;
+  Box 	*b;
+  H4table *count;
+	
+  DEBUGMESSAGE("Inside RcallG3cen\n")
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  count = MakeH4table(t0, t1, m);
+  g3cen(p, (int) *n, b, count);
+  H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen);
+  DEBUGMESSAGE("Leaving RcallG3cen\n")
+}
+
+void
+RcallF3(x,y,z, n, x0, x1, y0, y1, z0, z1, 
+	 vside, 
+	 t0, t1, m, num, denom, method)
+     
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+	    *y0, *y1, 
+	    *z0, *z1;	
+
+     double *vside;
+
+     double *t0, *t1;
+     int    *m;		/* Itable */
+     int    *num, *denom;
+
+     int    *method;
+{
+  Point	*p;
+  Box 	*b;
+  Itable *count;
+	
+  DEBUGMESSAGE("Inside Rcall_f3\n")
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  count = MakeItable(t0, t1, m);	
+
+  switch((int) *method) {
+  case 0:
+    phatnaive(p, (int) *n, b, *vside, count);
+    break;
+  case 1:
+    phatminus(p, (int) *n, b, *vside, count);
+    break;
+  default:
+    Rprintf("Method %d not recognised: defaults to 1\n", *method);
+    phatminus(p, (int) *n, b, *vside, count);
+  }
+
+  ItabletoR(count, t0, t1, m, num, denom);
+  DEBUGMESSAGE("Leaving Rcall_f3\n")
+}
+
+void
+RcallF3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, 
+	 vside, 
+	 t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen)
+     
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+	    *y0, *y1, 
+	    *z0, *z1;	
+
+     double *vside;
+
+     double *t0, *t1;
+     int    *m;		/* H4table */
+     int    *obs, *nco, *cen, *ncc;
+     int    *upperobs, *uppercen;
+{
+  Point	*p;
+  Box 	*b;
+  H4table *count;
+	
+  DEBUGMESSAGE("Inside Rcallf3cen\n")
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  count = MakeH4table(t0, t1, m);
+  p3hat4(p, (int) *n, b, *vside, count);
+  H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen);
+  DEBUGMESSAGE("Leaving Rcallf3cen\n")
+}
+
+void
+Rcallpcf3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method, 
+delta)
+
+     double *x, *y, *z;	/* points */
+     int    *n;
+
+     double *x0, *x1, 	/* box */
+            *y0, *y1, 
+            *z0, *z1;	
+
+     double *t0, *t1;	/* Ftable */
+     int    *m;
+     double *f, *num, *denom;
+
+     int    *method;
+
+     double *delta;    /* Epanechnikov kernel halfwidth */
+{
+  Point	*p;
+  Box 	*b;
+  Ftable	*tab;
+
+  p = RtoPointarray(x, y, z, n);
+  b = RtoBox(x0, x1, y0, y1, z0, z1);
+  tab = MakeFtable(t0, t1, m);	
+
+  switch((int) *method) {	
+  case 0:
+    pcf3trans(p, (int) *n, b, tab, (double) *delta); break;
+  case 1:
+    pcf3isot(p, (int) *n, b, tab, (double) *delta); break;
+  default:
+    Rprintf("Method %d not implemented: defaults to 0\n", *method);
+    pcf3trans(p, (int) *n, b, tab, (double) *delta); break;
+  }
+  FtabletoR(tab, t0, t1, m, f, num, denom);
+}
+
diff --git a/src/chunkloop.h b/src/chunkloop.h
new file mode 100644
index 0000000..f0adceb
--- /dev/null
+++ b/src/chunkloop.h
@@ -0,0 +1,37 @@
+/*
+  chunkloop.h
+
+  Divide a loop into chunks 
+
+  Convenient for divide-and-recombine,
+  and reducing calls to R_CheckUserInterrupt, etc.
+
+  $Revision: 1.2 $  $Date: 2013/05/27 02:09:10 $
+  
+*/
+
+#define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \
+  IVAR = 0; \
+  ICHUNK = 0; \
+  while(IVAR < LOOPLENGTH) 
+
+#define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \
+    ICHUNK += CHUNKSIZE; \
+    if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \
+    for(; IVAR < ICHUNK; IVAR++) 
+
+#define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \
+  IVAR = ISTART; \
+  ICHUNK = 0; \
+  while(IVAR <= IEND) 
+
+#define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE)	\
+    ICHUNK += CHUNKSIZE; \
+    if(ICHUNK > IEND) ICHUNK = IEND; \
+    for(; IVAR <= IEND; IVAR++) 
+
+#define CHUNKLOOP_H
+
+
+
+
diff --git a/src/close3pair.c b/src/close3pair.c
new file mode 100644
index 0000000..a7551a6
--- /dev/null
+++ b/src/close3pair.c
@@ -0,0 +1,76 @@
+/*
+
+  close3pair.c
+
+  $Revision: 1.1 $     $Date: 2015/02/21 03:28:53 $
+
+  closepairs and crosspairs for 3D
+
+  Assumes point pattern is sorted in increasing order of x coordinate
+
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+#define OK 0
+#define ERR_OVERFLOW 1
+#define ERR_ALLOC 2
+
+#define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \
+  (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int))
+
+#define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \
+  (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double))
+
+double sqrt();
+
+/* .......  define functions, using closefuns.h  ........*/
+
+/* return only one of the pairs (i,j) and (j,i) */
+#define SINGLE
+
+/* enable 3D code */
+#define ZCOORD
+
+/* return i, j only */
+#define CLOSEFUN close3IJpairs
+#define CROSSFUN cross3IJpairs
+#undef THRESH
+#undef COORDS
+#undef DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
+/* return i, j, xi, yi, zi, xj, yj, zj, dx, dy, dz, d */
+#define CLOSEFUN close3pairs
+#define CROSSFUN cross3pairs
+#undef THRESH
+#define COORDS
+#define DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
+/* return i, j, t where t = 1{d < s} */
+
+#define CLOSEFUN close3thresh
+#define CROSSFUN cross3thresh
+#define THRESH
+#undef COORDS
+#undef DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
diff --git a/src/closefuns.h b/src/closefuns.h
new file mode 100644
index 0000000..083fe14
--- /dev/null
+++ b/src/closefuns.h
@@ -0,0 +1,1168 @@
+/*
+  closefuns.h
+
+  Function definitions to be #included in closepair.c
+  several times with different values of macros.
+
+  Macros used:
+
+  CLOSEFUN   name of function for 'closepairs'
+
+  CROSSFUN   name of function for 'crosspairs'
+
+  DIST       if defined, also return d
+
+  COORDS     if defined, also return xi, yi, xj, yj, dx, dy
+
+  THRESH     if defined, also return 1(d < s)
+
+  ZCOORD     if defined, coordinates are 3-dimensional
+
+  SINGLE     if defined, capture only i < j
+
+  $Revision: 1.9 $ $Date: 2015/12/30 04:01:51 $
+
+*/
+
+#ifdef ZCOORD 
+#define SPACEDIM 3
+#else
+#define SPACEDIM 2
+#endif
+
+SEXP CLOSEFUN(SEXP xx,
+	      SEXP yy,
+#ifdef ZCOORD
+	      SEXP zz,
+#endif
+	      SEXP rr,
+#ifdef THRESH
+	      SEXP ss,
+#endif
+	      SEXP nguess) 
+{
+  double *x, *y;
+  double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2;
+#ifdef ZCOORD
+  double *z;
+  double zi, dz;
+#endif
+  int n, k, kmax, kmaxold, maxchunk, i, j, m;
+  /* local storage */
+  int *iout, *jout;
+  /* R objects in return value */
+  SEXP Out, iOut, jOut;
+  /* external storage pointers */
+  int *iOutP, *jOutP;
+
+#ifdef COORDS
+  double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout;
+  SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut;
+  double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP;
+#ifdef ZCOORD
+  double *ziout, *zjout, *dzout;
+  SEXP ziOut, zjOut, dzOut;
+  double *ziOutP, *zjOutP, *dzOutP;
+#endif
+#endif
+#ifdef DIST
+  double *dout;
+  SEXP dOut;
+  double *dOutP;
+#endif
+
+#ifdef THRESH
+  double s, s2;
+  int *tout;
+  SEXP tOut;
+  int *tOutP;
+#endif
+  
+  /* protect R objects from garbage collector */
+  PROTECT(xx     = AS_NUMERIC(xx));
+  PROTECT(yy     = AS_NUMERIC(yy));
+#ifdef ZCOORD
+  PROTECT(zz     = AS_NUMERIC(zz));
+#endif
+  PROTECT(rr     = AS_NUMERIC(rr));
+  PROTECT(nguess = AS_INTEGER(nguess));
+#ifdef THRESH
+  PROTECT(ss     = AS_NUMERIC(ss));
+#define NINPUTS (3+SPACEDIM)
+#else
+#define NINPUTS (2+SPACEDIM)
+#endif
+
+  /* Translate arguments from R to C */
+
+  x = NUMERIC_POINTER(xx);
+  y = NUMERIC_POINTER(yy);
+#ifdef ZCOORD
+  z = NUMERIC_POINTER(zz);
+#endif
+
+  n = LENGTH(xx);
+  rmax = *(NUMERIC_POINTER(rr));
+  kmax = *(INTEGER_POINTER(nguess));
+  
+  r2max = rmax * rmax;
+
+  rmaxplus = rmax + rmax/16.0;
+
+#ifdef THRESH
+  s = *(NUMERIC_POINTER(ss));
+  s2 = s * s;
+#endif
+
+  k = 0;   /* k is the next available storage location 
+              and also the current length of the list */ 
+
+  if(n > 0 && kmax > 0) {
+    /* allocate space */
+    iout = (int *) R_alloc(kmax, sizeof(int));
+    jout = (int *) R_alloc(kmax, sizeof(int));
+#ifdef COORDS
+    xiout =  (double *) R_alloc(kmax, sizeof(double));
+    yiout =  (double *) R_alloc(kmax, sizeof(double));
+    xjout =  (double *) R_alloc(kmax, sizeof(double));
+    yjout =  (double *) R_alloc(kmax, sizeof(double));
+    dxout =  (double *) R_alloc(kmax, sizeof(double));
+    dyout =  (double *) R_alloc(kmax, sizeof(double));
+#ifdef ZCOORD
+    ziout =  (double *) R_alloc(kmax, sizeof(double));
+    zjout =  (double *) R_alloc(kmax, sizeof(double));
+    dzout =  (double *) R_alloc(kmax, sizeof(double));
+#endif
+#endif
+#ifdef DIST
+    dout  =  (double *) R_alloc(kmax, sizeof(double));
+#endif
+
+#ifdef THRESH
+    tout  =  (int *) R_alloc(kmax, sizeof(int));
+#endif
+    
+    /* loop in chunks of 2^16 */
+
+    i = 0; maxchunk = 0; 
+    while(i < n) {
+
+      R_CheckUserInterrupt();
+
+      maxchunk += 65536; 
+      if(maxchunk > n) maxchunk = n;
+
+      for(; i < maxchunk; i++) {
+
+	xi = x[i];
+	yi = y[i];
+#ifdef ZCOORD
+	zi = z[i];
+#endif
+
+#ifndef SINGLE
+	if(i > 0) {
+	  /* scan backward */
+	  for(j = i - 1; j >= 0; j--) {
+	    dx = x[j] - xi;
+	    if(dx < -rmaxplus) 
+	      break;
+	    dy = y[j] - yi;
+	    d2 = dx * dx + dy * dy;
+#ifdef ZCOORD
+	    if(d2 <= r2max) {
+	      dz = z[j] - zi;
+	      d2 = d2 + dz * dz;
+#endif
+	      if(d2 <= r2max) {
+		/* add this (i, j) pair to output */
+		if(k >= kmax) {
+		  /* overflow; allocate more space */
+		  kmaxold = kmax;
+		  kmax    = 2 * kmax;
+		  iout  = intRealloc(iout,  kmaxold, kmax);
+		  jout  = intRealloc(jout,  kmaxold, kmax);
+#ifdef COORDS
+		  xiout = dblRealloc(xiout, kmaxold, kmax); 
+		  yiout = dblRealloc(yiout, kmaxold, kmax); 
+		  xjout = dblRealloc(xjout, kmaxold, kmax); 
+		  yjout = dblRealloc(yjout, kmaxold, kmax); 
+		  dxout = dblRealloc(dxout, kmaxold, kmax); 
+		  dyout = dblRealloc(dyout, kmaxold, kmax); 
+#ifdef ZCOORD
+		  ziout = dblRealloc(ziout, kmaxold, kmax); 
+		  zjout = dblRealloc(zjout, kmaxold, kmax); 
+		  dzout = dblRealloc(dzout, kmaxold, kmax); 
+#endif
+#endif
+#ifdef DIST
+		  dout  = dblRealloc(dout,  kmaxold, kmax); 
+#endif
+#ifdef THRESH
+		tout  = intRealloc(tout,  kmaxold, kmax);
+#endif
+	      }
+	      jout[k] = j + 1; /* R indexing */
+	      iout[k] = i + 1;
+#ifdef COORDS
+	      xiout[k] = xi;
+	      yiout[k] = yi;
+	      xjout[k] = x[j];
+	      yjout[k] = y[j];
+	      dxout[k] = dx;
+	      dyout[k] = dy;
+#ifdef ZCOORD
+	      ziout[k] = zi;
+	      zjout[k] = z[j];
+	      dzout[k] = dz;
+#endif
+#endif
+#ifdef DIST
+	      dout[k] = sqrt(d2);
+#endif
+
+#ifdef THRESH
+	      tout[k] = (d2 <= s2) ? 1 : 0;
+#endif
+	      ++k;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	  }
+	}
+#endif
+
+	if(i + 1 < n) {
+	  /* scan forward */
+	  for(j = i + 1; j < n; j++) {
+	    dx = x[j] - xi;
+	    if(dx > rmaxplus) 
+	      break;
+	    dy = y[j] - yi;
+	    d2 = dx * dx + dy * dy;
+#ifdef ZCOORD
+	    if(d2 <= r2max) {
+	      dz = z[j] - zi;
+	      d2 = d2 + dz * dz;
+#endif
+	      if(d2 <= r2max) {
+		/* add this (i, j) pair to output */
+		if(k >= kmax) {
+		  /* overflow; allocate more space */
+		  kmaxold = kmax;
+		  kmax    = 2 * kmax;
+		  iout  = intRealloc(iout,  kmaxold, kmax);
+		  jout  = intRealloc(jout,  kmaxold, kmax);
+#ifdef COORDS
+		  xiout = dblRealloc(xiout, kmaxold, kmax); 
+		  yiout = dblRealloc(yiout, kmaxold, kmax); 
+		  xjout = dblRealloc(xjout, kmaxold, kmax); 
+		  yjout = dblRealloc(yjout, kmaxold, kmax); 
+		  dxout = dblRealloc(dxout, kmaxold, kmax); 
+		  dyout = dblRealloc(dyout, kmaxold, kmax); 
+#ifdef ZCOORD
+		  ziout = dblRealloc(ziout, kmaxold, kmax); 
+		  zjout = dblRealloc(zjout, kmaxold, kmax); 
+		  dzout = dblRealloc(dzout, kmaxold, kmax); 
+#endif
+#endif
+#ifdef DIST
+		  dout  = dblRealloc(dout,  kmaxold, kmax); 
+#endif
+#ifdef THRESH
+		  tout  = intRealloc(tout,  kmaxold, kmax);
+#endif
+		}
+		jout[k] = j + 1; /* R indexing */
+		iout[k] = i + 1;
+#ifdef COORDS
+		xiout[k] = xi;
+		yiout[k] = yi;
+		xjout[k] = x[j];
+		yjout[k] = y[j];
+		dxout[k] = dx;
+		dyout[k] = dy;
+#ifdef ZCOORD
+		ziout[k] = zi;
+		zjout[k] = z[j];
+		dzout[k] = dz;
+#endif
+#endif
+#ifdef DIST
+		dout[k] = sqrt(d2);
+#endif
+#ifdef THRESH
+		tout[k] = (d2 <= s2) ? 1 : 0;
+#endif
+		++k;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	  }
+	}
+	/* end of i loop */
+      }
+    }
+  }
+
+  /* return a list of vectors */
+  PROTECT(iOut  = NEW_INTEGER(k));
+  PROTECT(jOut  = NEW_INTEGER(k));
+#ifdef COORDS
+  PROTECT(xiOut = NEW_NUMERIC(k));
+  PROTECT(yiOut = NEW_NUMERIC(k));
+  PROTECT(xjOut = NEW_NUMERIC(k));
+  PROTECT(yjOut = NEW_NUMERIC(k));
+  PROTECT(dxOut = NEW_NUMERIC(k));
+  PROTECT(dyOut = NEW_NUMERIC(k));
+#ifdef ZCOORD
+  PROTECT(ziOut = NEW_NUMERIC(k));
+  PROTECT(zjOut = NEW_NUMERIC(k));
+  PROTECT(dzOut = NEW_NUMERIC(k));
+#endif
+#endif
+#ifdef DIST
+  PROTECT(dOut  = NEW_NUMERIC(k));
+#endif
+#ifdef THRESH
+  PROTECT(tOut = NEW_INTEGER(k));
+#endif
+  if(k > 0) {
+    iOutP  = INTEGER_POINTER(iOut);
+    jOutP  = INTEGER_POINTER(jOut);
+#ifdef COORDS
+    xiOutP = NUMERIC_POINTER(xiOut);
+    yiOutP = NUMERIC_POINTER(yiOut);
+    xjOutP = NUMERIC_POINTER(xjOut);
+    yjOutP = NUMERIC_POINTER(yjOut);
+    dxOutP = NUMERIC_POINTER(dxOut);
+    dyOutP = NUMERIC_POINTER(dyOut);
+#ifdef ZCOORD
+    ziOutP = NUMERIC_POINTER(ziOut);
+    zjOutP = NUMERIC_POINTER(zjOut);
+    dzOutP = NUMERIC_POINTER(dzOut);
+#endif
+#endif
+#ifdef DIST
+    dOutP  = NUMERIC_POINTER(dOut);
+#endif
+#ifdef THRESH
+    tOutP  = INTEGER_POINTER(tOut);
+#endif
+    for(m = 0; m < k; m++) {
+      iOutP[m] = iout[m];
+      jOutP[m] = jout[m];
+#ifdef COORDS
+      xiOutP[m] = xiout[m];
+      yiOutP[m] = yiout[m];
+      xjOutP[m] = xjout[m];
+      yjOutP[m] = yjout[m];
+      dxOutP[m] = dxout[m];
+      dyOutP[m] = dyout[m];
+#ifdef ZCOORD
+      ziOutP[m] = ziout[m];
+      zjOutP[m] = zjout[m];
+      dzOutP[m] = dzout[m];
+#endif
+#endif
+#ifdef DIST
+      dOutP[m]  = dout[m];
+#endif
+#ifdef THRESH
+      tOutP[m]  = tout[m];
+#endif
+    }
+  }
+
+#define HEAD 2
+#ifdef THRESH
+#define NECK 1
+#else
+#define NECK 0
+#endif
+#ifdef COORDS
+#define MIDDLE (3*SPACEDIM)
+#else 
+#define MIDDLE 0
+#endif
+#ifdef DIST
+#define TAIL 1
+#else
+#define TAIL 0
+#endif
+
+  PROTECT(Out   = NEW_LIST(HEAD+NECK+MIDDLE+TAIL));
+
+  SET_VECTOR_ELT(Out, 0,  iOut);
+  SET_VECTOR_ELT(Out, 1,  jOut);
+
+#ifdef THRESH
+  SET_VECTOR_ELT(Out, HEAD,  tOut);
+#endif
+
+#ifdef COORDS
+#ifdef ZCOORD
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut);
+#else
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut);
+#endif
+#endif
+
+#ifdef DIST
+  SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut);
+#endif
+
+  UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL);   /* 1 is for 'Out' itself */
+
+  return(Out);
+}
+
+#undef NINPUTS
+#undef HEAD
+#undef NECK
+#undef MIDDLE
+#undef TAIL
+
+/* ........................................................ */
+
+SEXP CROSSFUN(SEXP xx1,
+	      SEXP yy1,
+#ifdef ZCOORD
+	      SEXP zz1,
+#endif
+	      SEXP xx2,
+	      SEXP yy2,
+#ifdef ZCOORD
+	      SEXP zz2,
+#endif
+	      SEXP rr,
+#ifdef THRESH
+	      SEXP ss,
+#endif
+	      SEXP nguess) 
+{
+  /* input vectors */
+  double *x1, *y1, *x2, *y2;
+#ifdef ZCOORD
+  double *z1, *z2;
+#endif
+  /* lengths */
+  int n1, n2, nout, noutmax, noutmaxold, maxchunk;
+  /* distance parameter */
+  double rmax, r2max, rmaxplus;
+  /* indices */
+  int i, j, jleft, m;
+  /* temporary values */
+  double x1i, y1i, xleft, dx, dy, dx2, d2;
+#ifdef ZCOORD
+  double z1i, dz;
+#endif
+  /* local storage */
+  int *iout, *jout;
+  /* R objects in return value */
+  SEXP Out, iOut, jOut;
+  /* external storage pointers */
+  int *iOutP, *jOutP;
+#ifdef COORDS
+  SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut;
+  double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP;
+  double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout;
+#ifdef ZCOORD
+  SEXP ziOut, zjOut, dzOut;
+  double *ziOutP, *zjOutP, *dzOutP;
+  double *ziout, *zjout, *dzout;
+#endif
+#endif  
+#ifdef DIST
+  SEXP dOut;
+  double *dOutP;
+  double *dout;
+#endif
+
+#ifdef THRESH
+  double s, s2;
+  int *tout;
+  SEXP tOut;
+  int *tOutP;
+#endif
+  /* protect R objects from garbage collector */
+  PROTECT(xx1     = AS_NUMERIC(xx1));
+  PROTECT(yy1     = AS_NUMERIC(yy1));
+  PROTECT(xx2     = AS_NUMERIC(xx2));
+  PROTECT(yy2     = AS_NUMERIC(yy2));
+#ifdef ZCOORD
+  PROTECT(zz1     = AS_NUMERIC(zz1));
+  PROTECT(zz2     = AS_NUMERIC(zz2));
+#endif
+
+  PROTECT(rr     = AS_NUMERIC(rr));
+  PROTECT(nguess = AS_INTEGER(nguess));
+#ifdef THRESH
+  PROTECT(ss     = AS_NUMERIC(ss));
+#define NINPUTS (2*SPACEDIM + 3)
+#else
+#define NINPUTS (2*SPACEDIM + 2)
+#endif
+
+  /* Translate arguments from R to C */
+
+  x1 = NUMERIC_POINTER(xx1);
+  y1 = NUMERIC_POINTER(yy1);
+  x2 = NUMERIC_POINTER(xx2);
+  y2 = NUMERIC_POINTER(yy2);
+#ifdef ZCOORD
+  z1 = NUMERIC_POINTER(zz1);
+  z2 = NUMERIC_POINTER(zz2);
+#endif
+
+  n1 = LENGTH(xx1);
+  n2 = LENGTH(xx2);
+  rmax = *(NUMERIC_POINTER(rr));
+  noutmax = *(INTEGER_POINTER(nguess));
+  
+  r2max = rmax * rmax;
+
+  rmaxplus = rmax + rmax/16.0;
+
+#ifdef THRESH
+  s = *(NUMERIC_POINTER(ss));
+  s2 = s * s;
+#endif
+
+  nout = 0;   /* nout is the next available storage location 
+		 and also the current length of the list */ 
+
+  if(n1 > 0 && n2 > 0 && noutmax > 0) {
+    /* allocate space */
+    iout = (int *) R_alloc(noutmax, sizeof(int));
+    jout = (int *) R_alloc(noutmax, sizeof(int));
+#ifdef COORDS
+    xiout =  (double *) R_alloc(noutmax, sizeof(double));
+    yiout =  (double *) R_alloc(noutmax, sizeof(double));
+    xjout =  (double *) R_alloc(noutmax, sizeof(double));
+    yjout =  (double *) R_alloc(noutmax, sizeof(double));
+    dxout =  (double *) R_alloc(noutmax, sizeof(double));
+    dyout =  (double *) R_alloc(noutmax, sizeof(double));
+#ifdef ZCOORD
+    ziout =  (double *) R_alloc(noutmax, sizeof(double));
+    zjout =  (double *) R_alloc(noutmax, sizeof(double));
+    dzout =  (double *) R_alloc(noutmax, sizeof(double));
+#endif
+#endif
+#ifdef DIST
+    dout  =  (double *) R_alloc(noutmax, sizeof(double));
+#endif
+#ifdef THRESH
+    tout  =  (int *) R_alloc(noutmax, sizeof(int));
+#endif
+    
+    jleft = 0;
+
+    i = 0; maxchunk = 0;
+
+    while(i < n1) {
+
+      R_CheckUserInterrupt();
+
+      maxchunk += 65536;
+      if(maxchunk > n1) maxchunk = n1;
+
+      for( ; i < maxchunk; i++) {
+
+	x1i = x1[i];
+	y1i = y1[i];
+#ifdef ZCOORD
+	z1i = z1[i];
+#endif
+
+	/* 
+	   adjust starting point jleft
+	*/
+	xleft = x1i - rmaxplus;
+	while((x2[jleft] < xleft) && (jleft+1 < n2))
+	  ++jleft;
+
+	/* 
+	   process from j = jleft until dx > rmax + epsilon
+	*/
+	for(j=jleft; j < n2; j++) {
+
+	  /* squared interpoint distance */
+	  dx = x2[j] - x1i;
+	  if(dx > rmaxplus)
+	    break;
+	  dx2 = dx * dx;
+	  dy = y2[j] - y1i;
+	  d2 = dx2 + dy * dy;
+#ifdef ZCOORD
+	    if(d2 <= r2max) {
+	      dz = z2[j] - z1i;
+	      d2 = d2 + dz * dz;
+#endif
+	      if(d2 <= r2max) {
+		/* add this (i, j) pair to output */
+		if(nout >= noutmax) {
+		  /* overflow; allocate more space */
+		  noutmaxold = noutmax;
+		  noutmax    = 2 * noutmax;
+		  iout  = intRealloc(iout,  noutmaxold, noutmax);
+		  jout  = intRealloc(jout,  noutmaxold, noutmax);
+#ifdef COORDS
+		  xiout = dblRealloc(xiout, noutmaxold, noutmax); 
+		  yiout = dblRealloc(yiout, noutmaxold, noutmax); 
+		  xjout = dblRealloc(xjout, noutmaxold, noutmax); 
+		  yjout = dblRealloc(yjout, noutmaxold, noutmax); 
+		  dxout = dblRealloc(dxout, noutmaxold, noutmax); 
+		  dyout = dblRealloc(dyout, noutmaxold, noutmax); 
+#ifdef ZCOORD
+		  ziout = dblRealloc(ziout, noutmaxold, noutmax); 
+		  zjout = dblRealloc(zjout, noutmaxold, noutmax); 
+		  dzout = dblRealloc(dzout, noutmaxold, noutmax); 
+#endif
+#endif
+#ifdef DIST
+		  dout  = dblRealloc(dout,  noutmaxold, noutmax); 
+#endif
+#ifdef THRESH
+		  tout  = intRealloc(tout,  noutmaxold, noutmax);
+#endif
+		}
+		iout[nout] = i + 1; /* R indexing */
+		jout[nout] = j + 1;
+#ifdef COORDS
+		xiout[nout] = x1i;
+		yiout[nout] = y1i;
+		xjout[nout] = x2[j];
+		yjout[nout] = y2[j];
+		dxout[nout] = dx;
+		dyout[nout] = dy;
+#ifdef ZCOORD
+		ziout[nout] = z1i;
+		zjout[nout] = z2[j];
+		dzout[nout] = dz;
+#endif
+#endif
+#ifdef DIST
+		dout[nout] = sqrt(d2);
+#endif
+#ifdef THRESH
+		tout[nout] = (d2 <= s2) ? 1 : 0;
+#endif
+		++nout;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	}
+      }
+    }
+  }
+
+  /* return a list of vectors */
+  PROTECT(iOut  = NEW_INTEGER(nout));
+  PROTECT(jOut  = NEW_INTEGER(nout));
+#ifdef COORDS
+  PROTECT(xiOut = NEW_NUMERIC(nout));
+  PROTECT(yiOut = NEW_NUMERIC(nout));
+  PROTECT(xjOut = NEW_NUMERIC(nout));
+  PROTECT(yjOut = NEW_NUMERIC(nout));
+  PROTECT(dxOut = NEW_NUMERIC(nout));
+  PROTECT(dyOut = NEW_NUMERIC(nout));
+#ifdef ZCOORD
+  PROTECT(ziOut = NEW_NUMERIC(nout));
+  PROTECT(zjOut = NEW_NUMERIC(nout));
+  PROTECT(dzOut = NEW_NUMERIC(nout));
+#endif
+#endif
+#ifdef DIST
+  PROTECT(dOut  = NEW_NUMERIC(nout));
+#endif
+#ifdef THRESH
+  PROTECT(tOut = NEW_INTEGER(nout));
+#endif
+  if(nout > 0) {
+    iOutP  = INTEGER_POINTER(iOut);
+    jOutP  = INTEGER_POINTER(jOut);
+#ifdef COORDS
+    xiOutP = NUMERIC_POINTER(xiOut);
+    yiOutP = NUMERIC_POINTER(yiOut);
+    xjOutP = NUMERIC_POINTER(xjOut);
+    yjOutP = NUMERIC_POINTER(yjOut);
+    dxOutP = NUMERIC_POINTER(dxOut);
+    dyOutP = NUMERIC_POINTER(dyOut);
+#ifdef ZCOORD
+    ziOutP = NUMERIC_POINTER(ziOut);
+    zjOutP = NUMERIC_POINTER(zjOut);
+    dzOutP = NUMERIC_POINTER(dzOut);
+#endif
+#endif
+#ifdef DIST
+    dOutP  = NUMERIC_POINTER(dOut);
+#endif
+#ifdef THRESH
+    tOutP  = INTEGER_POINTER(tOut);
+#endif
+    for(m = 0; m < nout; m++) {
+      iOutP[m] = iout[m];
+      jOutP[m] = jout[m];
+#ifdef COORDS
+      xiOutP[m] = xiout[m];
+      yiOutP[m] = yiout[m];
+      xjOutP[m] = xjout[m];
+      yjOutP[m] = yjout[m];
+      dxOutP[m] = dxout[m];
+      dyOutP[m] = dyout[m];
+#ifdef ZCOORD
+      ziOutP[m] = ziout[m];
+      zjOutP[m] = zjout[m];
+      dzOutP[m] = dzout[m];
+#endif
+#endif
+#ifdef DIST
+      dOutP[m]  = dout[m];
+#endif
+#ifdef THRESH
+      tOutP[m]  = tout[m];
+#endif
+    }
+  }
+#define HEAD 2
+#ifdef THRESH
+#define NECK 1
+#else
+#define NECK 0
+#endif
+#ifdef COORDS
+#define MIDDLE (3*SPACEDIM)
+#else 
+#define MIDDLE 0
+#endif
+#ifdef DIST
+#define TAIL 1
+#else
+#define TAIL 0
+#endif
+
+  PROTECT(Out   = NEW_LIST(HEAD+NECK+MIDDLE+TAIL));
+
+  SET_VECTOR_ELT(Out, 0,  iOut);
+  SET_VECTOR_ELT(Out, 1,  jOut);
+
+#ifdef THRESH
+  SET_VECTOR_ELT(Out, HEAD,  tOut);
+#endif
+
+#ifdef COORDS
+#ifdef ZCOORD
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut);
+#else
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut);
+#endif
+#endif
+#ifdef DIST
+  SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut);
+#endif
+
+  UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL);   /* 1 is for 'Out' itself */
+
+  return(Out);
+}
+
+#undef NINPUTS
+#undef HEAD
+#undef NECK
+#undef MIDDLE
+#undef TAIL
+
+
+
+/* ........................................................ */
+
+/*  
+    Alternative code for CLOSEFUN, based on algorithm in CROSSFUN
+
+*/
+
+#define ALT_ALGO(NAME) ALT_PREFIX(NAME)
+#define ALT_PREFIX(NAME) alt ## NAME
+
+SEXP ALT_ALGO(CLOSEFUN)(SEXP xx,
+		 SEXP yy,
+#ifdef ZCOORD
+		 SEXP zz,
+#endif
+		 SEXP rr,
+#ifdef THRESH
+		 SEXP ss,
+#endif
+		 SEXP nguess) 
+{
+  /* input vectors */
+  double *x, *y;
+#ifdef ZCOORD
+  double *z;
+#endif
+  /* lengths */
+  int n, nout, noutmax, noutmaxold, maxchunk;
+  /* distance parameter */
+  double rmax, r2max, rmaxplus;
+  /* indices */
+  int i, j, jleft, m;
+  /* temporary values */
+  double xi, yi, xleft, dx, dy, dx2, d2;
+#ifdef ZCOORD
+  double zi, dz;
+#endif
+  /* local storage */
+  int *iout, *jout;
+  /* R objects in return value */
+  SEXP Out, iOut, jOut;
+  /* external storage pointers */
+  int *iOutP, *jOutP;
+#ifdef COORDS
+  SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut;
+  double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP;
+  double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout;
+#ifdef ZCOORD
+  SEXP ziOut, zjOut, dzOut;
+  double *ziOutP, *zjOutP, *dzOutP;
+  double *ziout, *zjout, *dzout;
+#endif
+#endif  
+#ifdef DIST
+  SEXP dOut;
+  double *dOutP;
+  double *dout;
+#endif
+
+#ifdef THRESH
+  double s, s2;
+  int *tout;
+  SEXP tOut;
+  int *tOutP;
+#endif
+  /* protect R objects from garbage collector */
+  PROTECT(xx     = AS_NUMERIC(xx));
+  PROTECT(yy     = AS_NUMERIC(yy));
+#ifdef ZCOORD
+  PROTECT(zz     = AS_NUMERIC(zz));
+#endif
+
+  PROTECT(rr     = AS_NUMERIC(rr));
+  PROTECT(nguess = AS_INTEGER(nguess));
+#ifdef THRESH
+  PROTECT(ss     = AS_NUMERIC(ss));
+#define NINPUTS (SPACEDIM + 3)
+#else
+#define NINPUTS (SPACEDIM + 2)
+#endif
+
+  /* Translate arguments from R to C */
+
+  x = NUMERIC_POINTER(xx);
+  y = NUMERIC_POINTER(yy);
+#ifdef ZCOORD
+  z = NUMERIC_POINTER(zz);
+#endif
+
+  n = LENGTH(xx);
+  rmax = *(NUMERIC_POINTER(rr));
+  noutmax = *(INTEGER_POINTER(nguess));
+  
+  r2max = rmax * rmax;
+
+  rmaxplus = rmax + rmax/16.0;
+
+#ifdef THRESH
+  s = *(NUMERIC_POINTER(ss));
+  s2 = s * s;
+#endif
+
+  nout = 0;   /* nout is the next available storage location 
+		 and also the current length of the list */ 
+
+  if(n > 0 && noutmax > 0) {
+    /* allocate space */
+    iout = (int *) R_alloc(noutmax, sizeof(int));
+    jout = (int *) R_alloc(noutmax, sizeof(int));
+#ifdef COORDS
+    xiout =  (double *) R_alloc(noutmax, sizeof(double));
+    yiout =  (double *) R_alloc(noutmax, sizeof(double));
+    xjout =  (double *) R_alloc(noutmax, sizeof(double));
+    yjout =  (double *) R_alloc(noutmax, sizeof(double));
+    dxout =  (double *) R_alloc(noutmax, sizeof(double));
+    dyout =  (double *) R_alloc(noutmax, sizeof(double));
+#ifdef ZCOORD
+    ziout =  (double *) R_alloc(noutmax, sizeof(double));
+    zjout =  (double *) R_alloc(noutmax, sizeof(double));
+    dzout =  (double *) R_alloc(noutmax, sizeof(double));
+#endif
+#endif
+#ifdef DIST
+    dout  =  (double *) R_alloc(noutmax, sizeof(double));
+#endif
+#ifdef THRESH
+    tout  =  (int *) R_alloc(noutmax, sizeof(int));
+#endif
+    
+    jleft = 0;
+
+    i = 0; maxchunk = 0;
+
+    while(i < n) {
+
+      R_CheckUserInterrupt();
+
+      maxchunk += 65536;
+      if(maxchunk > n) maxchunk = n;
+
+      for( ; i < maxchunk; i++) {
+
+	xi = x[i];
+	yi = y[i];
+#ifdef ZCOORD
+	zi = z[i];
+#endif
+
+	/* 
+	   adjust starting point jleft
+	*/
+	xleft = xi - rmaxplus;
+	while((x[jleft] < xleft) && (jleft+1 < n))
+	  ++jleft;
+
+	/* 
+	   process from j = jleft until dx > rmax + epsilon
+	*/
+	for(j=jleft; j < n; j++) {
+
+	  /* squared interpoint distance */
+	  dx = x[j] - xi;
+	  if(dx > rmaxplus)
+	    break;
+	  dx2 = dx * dx;
+	  dy = y[j] - yi;
+	  d2 = dx2 + dy * dy;
+#ifdef ZCOORD
+	    if(d2 <= r2max) {
+	      dz = z[j] - zi;
+	      d2 = d2 + dz * dz;
+#endif
+	      if(d2 <= r2max) {
+		/* add this (i, j) pair to output */
+		if(nout >= noutmax) {
+		  /* overflow; allocate more space */
+		  noutmaxold = noutmax;
+		  noutmax    = 2 * noutmax;
+		  iout  = intRealloc(iout,  noutmaxold, noutmax);
+		  jout  = intRealloc(jout,  noutmaxold, noutmax);
+#ifdef COORDS
+		  xiout = dblRealloc(xiout, noutmaxold, noutmax); 
+		  yiout = dblRealloc(yiout, noutmaxold, noutmax); 
+		  xjout = dblRealloc(xjout, noutmaxold, noutmax); 
+		  yjout = dblRealloc(yjout, noutmaxold, noutmax); 
+		  dxout = dblRealloc(dxout, noutmaxold, noutmax); 
+		  dyout = dblRealloc(dyout, noutmaxold, noutmax); 
+#ifdef ZCOORD
+		  ziout = dblRealloc(ziout, noutmaxold, noutmax); 
+		  zjout = dblRealloc(zjout, noutmaxold, noutmax); 
+		  dzout = dblRealloc(dzout, noutmaxold, noutmax); 
+#endif
+#endif
+#ifdef DIST
+		  dout  = dblRealloc(dout,  noutmaxold, noutmax); 
+#endif
+#ifdef THRESH
+		  tout  = intRealloc(tout,  noutmaxold, noutmax);
+#endif
+		}
+		iout[nout] = i + 1; /* R indexing */
+		jout[nout] = j + 1;
+#ifdef COORDS
+		xiout[nout] = xi;
+		yiout[nout] = yi;
+		xjout[nout] = x[j];
+		yjout[nout] = y[j];
+		dxout[nout] = dx;
+		dyout[nout] = dy;
+#ifdef ZCOORD
+		ziout[nout] = zi;
+		zjout[nout] = z[j];
+		dzout[nout] = dz;
+#endif
+#endif
+#ifdef DIST
+		dout[nout] = sqrt(d2);
+#endif
+#ifdef THRESH
+		tout[nout] = (d2 <= s2) ? 1 : 0;
+#endif
+		++nout;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	}
+      }
+    }
+  }
+
+  /* return a list of vectors */
+  PROTECT(iOut  = NEW_INTEGER(nout));
+  PROTECT(jOut  = NEW_INTEGER(nout));
+#ifdef COORDS
+  PROTECT(xiOut = NEW_NUMERIC(nout));
+  PROTECT(yiOut = NEW_NUMERIC(nout));
+  PROTECT(xjOut = NEW_NUMERIC(nout));
+  PROTECT(yjOut = NEW_NUMERIC(nout));
+  PROTECT(dxOut = NEW_NUMERIC(nout));
+  PROTECT(dyOut = NEW_NUMERIC(nout));
+#ifdef ZCOORD
+  PROTECT(ziOut = NEW_NUMERIC(nout));
+  PROTECT(zjOut = NEW_NUMERIC(nout));
+  PROTECT(dzOut = NEW_NUMERIC(nout));
+#endif
+#endif
+#ifdef DIST
+  PROTECT(dOut  = NEW_NUMERIC(nout));
+#endif
+#ifdef THRESH
+  PROTECT(tOut = NEW_INTEGER(nout));
+#endif
+  if(nout > 0) {
+    iOutP  = INTEGER_POINTER(iOut);
+    jOutP  = INTEGER_POINTER(jOut);
+#ifdef COORDS
+    xiOutP = NUMERIC_POINTER(xiOut);
+    yiOutP = NUMERIC_POINTER(yiOut);
+    xjOutP = NUMERIC_POINTER(xjOut);
+    yjOutP = NUMERIC_POINTER(yjOut);
+    dxOutP = NUMERIC_POINTER(dxOut);
+    dyOutP = NUMERIC_POINTER(dyOut);
+#ifdef ZCOORD
+    ziOutP = NUMERIC_POINTER(ziOut);
+    zjOutP = NUMERIC_POINTER(zjOut);
+    dzOutP = NUMERIC_POINTER(dzOut);
+#endif
+#endif
+#ifdef DIST
+    dOutP  = NUMERIC_POINTER(dOut);
+#endif
+#ifdef THRESH
+    tOutP  = INTEGER_POINTER(tOut);
+#endif
+    for(m = 0; m < nout; m++) {
+      iOutP[m] = iout[m];
+      jOutP[m] = jout[m];
+#ifdef COORDS
+      xiOutP[m] = xiout[m];
+      yiOutP[m] = yiout[m];
+      xjOutP[m] = xjout[m];
+      yjOutP[m] = yjout[m];
+      dxOutP[m] = dxout[m];
+      dyOutP[m] = dyout[m];
+#ifdef ZCOORD
+      ziOutP[m] = ziout[m];
+      zjOutP[m] = zjout[m];
+      dzOutP[m] = dzout[m];
+#endif
+#endif
+#ifdef DIST
+      dOutP[m]  = dout[m];
+#endif
+#ifdef THRESH
+      tOutP[m]  = tout[m];
+#endif
+    }
+  }
+#define HEAD 2
+#ifdef THRESH
+#define NECK 1
+#else
+#define NECK 0
+#endif
+#ifdef COORDS
+#define MIDDLE (3*SPACEDIM)
+#else 
+#define MIDDLE 0
+#endif
+#ifdef DIST
+#define TAIL 1
+#else
+#define TAIL 0
+#endif
+
+  PROTECT(Out   = NEW_LIST(HEAD+NECK+MIDDLE+TAIL));
+
+  SET_VECTOR_ELT(Out, 0,  iOut);
+  SET_VECTOR_ELT(Out, 1,  jOut);
+
+#ifdef THRESH
+  SET_VECTOR_ELT(Out, HEAD,  tOut);
+#endif
+
+#ifdef COORDS
+#ifdef ZCOORD
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut);
+#else
+  SET_VECTOR_ELT(Out, HEAD+NECK,   xiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut);
+  SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut);
+#endif
+#endif
+#ifdef DIST
+  SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut);
+#endif
+
+  UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL);   /* 1 is for 'Out' itself */
+
+  return(Out);
+}
+
+#undef NINPUTS
+#undef HEAD
+#undef NECK
+#undef MIDDLE
+#undef TAIL
+
+#undef ALT_ALGO
+#undef ALT_PREFIX
diff --git a/src/closepair.c b/src/closepair.c
new file mode 100755
index 0000000..9bbf8e2
--- /dev/null
+++ b/src/closepair.c
@@ -0,0 +1,538 @@
+/*
+
+  closepair.c
+
+  $Revision: 1.33 $     $Date: 2016/03/28 03:46:26 $
+
+  Assumes point pattern is sorted in increasing order of x coordinate
+
+  paircount()    count the total number of pairs (i, j) with distance < rmax
+
+  Cclosepaircounts
+                count for each i the number of j with distance < rmax
+
+  crosscount()   count number of close pairs in two patterns
+
+  (note: Ccrosspaircounts is defined in Estrauss.c)
+
+  duplicatedxy() find duplicated (x,y) pairs
+
+  Fclosepairs()  extract close pairs of coordinates 
+                 .C interface - output vectors have Fixed length 
+
+  Fcrosspairs()  extract close pairs in two patterns 
+                 .C interface - output vectors have Fixed length 
+
+  Vclosepairs()  extract close pairs of coordinates 
+                 .Call interface - output vectors have Variable length 
+
+  Vcrosspairs()  extract close pairs in two patterns 
+                 .Call interface - output vectors have Variable length 
+
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+#define OK 0
+#define ERR_OVERFLOW 1
+#define ERR_ALLOC 2
+
+#define FAILED(X) ((void *)(X) == (void *)NULL)
+
+#define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \
+  (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int))
+
+#define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \
+  (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double))
+
+double sqrt();
+
+/* count TOTAL number of close pairs */
+
+void paircount(nxy, x, y, rmaxi, count) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance */
+     /* output */
+     int *count;
+{
+  int n, maxchunk, i, j, counted;
+  double xi, yi, rmax, r2max, dx, dy, a;
+
+  n = *nxy;
+  rmax = *rmaxi;
+  r2max = rmax * rmax;
+
+  *count = counted = 0;
+  if(n == 0) 
+    return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+
+  while(i < n) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n) maxchunk = n;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+
+      if(i > 0) { 
+	/* scan backwards from i */
+	for(j = i - 1; j >= 0; j--) {
+	  dx = x[j] - xi;
+	  a = r2max - dx * dx;
+	  if(a < 0) 
+	    break;
+	  dy = y[j] - yi;
+	  a -= dy * dy;
+	  if(a >= 0)
+	    ++counted;
+	}
+      }
+      if(i + 1 < n) {
+	/* scan forwards from i */
+	for(j = i + 1; j < n; j++) {
+	  dx = x[j] - xi;
+	  a = r2max - dx * dx;
+	  if(a < 0) 
+	    break;
+	  dy = y[j] - yi;
+	  a -= dy * dy;
+	  if(a >= 0)
+	    ++counted;
+	}
+      } 
+      /* end loop over i */
+    }
+  } 
+
+  *count = counted;
+}
+
+/* count for each i the number of j closer than distance r */
+
+void Cclosepaircounts(nxy, x, y, rmaxi, counts) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance */
+     /* output VECTOR, assumed initialised to 0 */
+     int *counts;
+{
+  int n, maxchunk, i, j;
+  double xi, yi, rmax, r2max, dx, dy, a;
+
+  n = *nxy;
+  rmax = *rmaxi;
+  r2max = rmax * rmax;
+
+  if(n == 0) 
+    return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+
+  while(i < n) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n) maxchunk = n;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+
+      if(i > 0) { 
+	/* scan backwards from i */
+	for(j = i - 1; j >= 0; j--) {
+	  dx = x[j] - xi;
+	  a = r2max - dx * dx;
+	  if(a < 0) 
+	    break;
+	  dy = y[j] - yi;
+	  a -= dy * dy;
+	  if(a >= 0)
+	    (counts[i])++;
+	}
+      }
+      if(i + 1 < n) {
+	/* scan forwards from i */
+	for(j = i + 1; j < n; j++) {
+	  dx = x[j] - xi;
+	  a = r2max - dx * dx;
+	  if(a < 0) 
+	    break;
+	  dy = y[j] - yi;
+	  a -= dy * dy;
+	  if(a >= 0)
+	    (counts[i])++;
+	}
+      } 
+      /* end loop over i */
+    }
+  } 
+}
+
+
+/*
+  analogue for two different point patterns
+*/
+
+
+void crosscount(nn1, x1, y1, nn2, x2, y2, rmaxi, count) 
+     /* inputs */
+     int *nn1, *nn2;
+     double *x1, *y1, *x2, *y2, *rmaxi;
+     /* output */
+     int *count;
+{
+  int n1, n2, maxchunk, i, j, jleft, counted;
+  double x1i, y1i, rmax, r2max, xleft, dx, dy, a;
+
+  n1 = *nn1;
+  n2 = *nn2;
+  rmax = *rmaxi;
+  r2max = rmax * rmax;
+
+  *count = counted = 0;
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  jleft = 0;
+
+  i = 0; maxchunk = 0; 
+
+  while(i < n1) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n1) maxchunk = n1;
+
+    for(; i < maxchunk; i++) {
+  
+      x1i = x1[i];
+      y1i = y1[i];
+
+      /* 
+	 adjust starting index
+      */
+      xleft = x1i - rmax;
+      while((x2[jleft] < xleft) && (jleft+1 < n2))
+	++jleft;
+
+      /* 
+	 process from j=jleft until dx > rmax
+      */
+      for(j=jleft; j < n2; j++) {
+	dx = x2[j] - x1i;
+	a  = r2max - dx * dx;
+	if(a < 0)
+	  break;
+	dy = y2[j] - y1i;
+	a -= dy * dy;
+	if(a > 0) 
+	  ++counted;
+      }
+    }
+  }
+  *count = counted;
+}
+
+
+
+/*
+  Find duplicated locations
+
+   xx, yy are not sorted
+*/
+
+
+void duplicatedxy(n, x, y, out) 
+     /* inputs */
+     int *n;
+     double *x, *y;
+     /* output */
+     int *out;  /* logical vector */
+{
+  int m, i, j;
+  double xi, yi;
+  m = *n;
+  for(i = 1; i < m; i++) {
+    R_CheckUserInterrupt();
+    xi = x[i];
+    yi = y[i];
+    for(j = 0; j < i; j++) 
+      if((x[j] == xi) && (y[j] == yi)) 
+	break;
+    if(j == i) out[i] = 0; else out[i] = 1;
+  }
+}
+
+/* ............... fixed output length .............. */
+
+void Fclosepairs(nxy, x, y, r, noutmax, 
+	      nout, iout, jout, 
+	      xiout, yiout, xjout, yjout, dxout, dyout, dout,
+	      status)
+     /* inputs */
+     int *nxy, *noutmax;
+     double *x, *y, *r;
+     /* outputs */
+     int *nout, *iout, *jout;
+     double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout;
+     int *status;
+{
+  int n, k, kmax, maxchunk, i, j;
+  double xi, yi, rmax, r2max, dx, dy, dx2, d2;
+
+  n = *nxy;
+  rmax = *r;
+  r2max = rmax * rmax;
+
+  *status = OK;
+  *nout = 0;
+  k = 0;   /* k is the next available storage location 
+              and also the current length of the list */ 
+  kmax = *noutmax;
+
+  if(n == 0) 
+    return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < n) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n) maxchunk = n;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+
+      if(i > 0) {
+	/* scan backwards */
+	for(j = i - 1; j >= 0; j--) {
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 > r2max)
+	    break;
+	  dy = y[j] - yi;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= r2max) {
+	    /* add this (i, j) pair to output */
+	    if(k >= kmax) {
+	      *nout = k;
+	      *status = ERR_OVERFLOW;
+	      return;
+	    }
+	    jout[k] = j + 1;  /* R indexing */
+	    iout[k] = i + 1;
+	    xiout[k] = xi;
+	    yiout[k] = yi;
+	    xjout[k] = x[j];
+	    yjout[k] = y[j];
+	    dxout[k] = dx;
+	    dyout[k] = dy;
+	    dout[k] = sqrt(d2);
+	    ++k;
+	  }
+	}
+      }
+    
+      if(i + 1 < n) {
+	/* scan forwards */
+	for(j = i + 1; j < n; j++) {
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 > r2max)
+	    break;
+	  dy = y[j] - yi;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= r2max) {
+	    /* add this (i, j) pair to output */
+	    if(k >= kmax) {
+	      *nout = k;
+	      *status = ERR_OVERFLOW;
+	      return;
+	    }
+	    jout[k] = j + 1;  /* R indexing */
+	    iout[k] = i + 1; 
+	    xiout[k] = xi;
+	    yiout[k] = yi;
+	    xjout[k] = x[j];
+	    yjout[k] = y[j];
+	    dxout[k] = dx;
+	    dyout[k] = dy;
+	    dout[k] = sqrt(d2);
+	    ++k;
+	  }
+	}
+      }
+    }
+  }
+  *nout = k;
+}
+
+void Fcrosspairs(nn1, x1, y1, nn2, x2, y2, rmaxi, noutmax, 
+	      nout, iout, jout, 
+	      xiout, yiout, xjout, yjout, dxout, dyout, dout,
+	      status)
+     /* inputs */
+     int *nn1, *nn2, *noutmax;
+     double *x1, *y1, *x2, *y2, *rmaxi;
+     /* outputs */
+     int *nout, *iout, *jout;
+     double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout;
+     int *status;
+{
+  int n1, n2, maxchunk, k, kmax, i, j, jleft;
+  double x1i, y1i, rmax, r2max, xleft, dx, dy, dx2, d2;
+
+  n1 = *nn1;
+  n2 = *nn2;
+  rmax = *rmaxi;
+  r2max = rmax * rmax;
+
+  *status = OK;
+  *nout = 0;
+  k = 0;   /* k is the next available storage location 
+              and also the current length of the list */ 
+  kmax = *noutmax;
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  jleft = 0;
+
+  i = 0; maxchunk = 0; 
+
+  while(i < n1) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > n1) maxchunk = n1;
+
+    for(; i < maxchunk; i++) {
+
+      x1i = x1[i];
+      y1i = y1[i];
+
+      /* 
+	 adjust starting position jleft
+
+      */
+      xleft = x1i - rmax;
+      while((x2[jleft] < xleft) && (jleft+1 < n2))
+	++jleft;
+
+
+      /* 
+	 process from j=jleft until dx > rmax
+      */
+      for(j=jleft; j < n2; j++) {
+	dx = x2[j] - x1i;
+	dx2 = dx * dx;
+	if(dx2 > r2max)
+	  break;
+	dy = y2[j] - y1i;
+	d2 = dx2 + dy * dy;
+	if(d2 <= r2max) {
+	  /* add this (i, j) pair to output */
+	  if(k >= kmax) {
+	    *nout = k;
+	    *status = ERR_OVERFLOW;
+	    return;
+	  }
+	  jout[k] = j + 1;  /* R indexing */
+	  iout[k] = i + 1;
+	  xiout[k] = x1i;
+	  yiout[k] = y1i;
+	  xjout[k] = x2[j];
+	  yjout[k] = y2[j];
+	  dxout[k] = dx;
+	  dyout[k] = dy;
+	  dout[k] = sqrt(d2);
+	  ++k;
+	}
+      }
+    }
+  }
+  *nout = k;
+}
+
+
+/* ........  versions that return variable-length vectors ......... */
+
+#define SINGLE
+
+/* return i, j only */
+#define CLOSEFUN VcloseIJpairs
+#define CROSSFUN VcrossIJpairs
+#undef THRESH
+#undef COORDS
+#undef DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
+/* return i, j, d */
+#define CLOSEFUN VcloseIJDpairs
+#define CROSSFUN VcrossIJDpairs
+#undef THRESH
+#undef COORDS
+#define DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
+/* return i, j, xi, yi, xj, yj, dx, dy, d */
+#define CLOSEFUN Vclosepairs
+#define CROSSFUN Vcrosspairs
+#undef THRESH
+#define COORDS
+#define DIST
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+#undef DIST
+
+/* return i, j, t where t = 1{d < s} */
+
+#define CLOSEFUN Vclosethresh
+#define CROSSFUN Vcrossthresh
+#define THRESH
+#undef COORDS
+#include "closefuns.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+#undef THRESH
+#undef COORDS
+
diff --git a/src/connect.c b/src/connect.c
new file mode 100755
index 0000000..b8a388c
--- /dev/null
+++ b/src/connect.c
@@ -0,0 +1,143 @@
+/*
+       connect.c
+
+       Connected component transforms
+
+       cocoImage:   connected component transform of a discrete binary image
+                   (8-connected topology)
+
+       cocoGraph: connected component labels for a discrete graph
+                   specified by a list of edges
+       
+       $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $
+
+       
+*/
+
+#include <math.h>
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+#include "raster.h"
+void   shape_raster();
+
+#include "yesno.h"
+
+
+/* workhorse function for cocoImage */
+
+void
+comcommer(im)
+     Raster  *im;            
+     /* raster must have been dimensioned by shape_raster() */
+     /* Pixel values assumed to be 0 in background, and 
+        distinct nonzero integers in foreground */
+{
+  int	j,k;
+  int rmin, rmax, cmin, cmax;
+  int label, curlabel, minlabel;
+  int nchanged;
+
+  /* image boundaries */
+  rmin = im->rmin;
+  rmax = im->rmax;
+  cmin = im->cmin;
+  cmax = im->cmax;
+
+#define ENTRY(ROW, COL) Entry(*im, ROW, COL, int)
+
+#define UPDATE(ROW,COL,BEST,NEW) \
+     NEW = ENTRY(ROW, COL); \
+     if(NEW != 0 && NEW < BEST) \
+       BEST = NEW
+
+  nchanged = 1;
+
+  while(nchanged >0) {
+    nchanged = 0;
+    R_CheckUserInterrupt();
+    for(j = rmin; j <= rmax; j++) {
+      for(k = cmin; k <= cmax; k++) {
+	curlabel = ENTRY(j, k);
+	if(curlabel != 0) {
+	  minlabel = curlabel;
+	  UPDATE(j-1, k-1, minlabel, label);
+	  UPDATE(j-1, k,   minlabel, label);
+	  UPDATE(j-1, k+1, minlabel, label);
+	  UPDATE(j,   k-1, minlabel, label);
+	  UPDATE(j,   k,   minlabel, label);
+	  UPDATE(j,   k+1, minlabel, label);
+	  UPDATE(j+1, k-1, minlabel, label);
+	  UPDATE(j+1, k,   minlabel, label);
+	  UPDATE(j+1, k+1, minlabel, label);
+	  if(minlabel < curlabel) {
+	    ENTRY(j, k) = minlabel;
+	    nchanged++;
+	  }
+	}
+      }
+    }
+  }
+}
+
+void cocoImage(mat, nr, nc)
+     int   *mat;        /* input:  binary image */
+     int *nr, *nc;      /* raster dimensions
+			   EXCLUDING margin of 1 on each side */
+{
+  Raster im;
+
+  shape_raster( &im, (void *) mat, 
+		(double) 1, (double) 1,
+		(double) *nc, (double) *nr, 
+		*nr+2, *nc+2, 1, 1);
+  comcommer(&im);
+}	
+
+void cocoGraph(nv, ne, ie, je, label, status)
+     /* inputs */
+     int *nv;         /* number of graph vertices */
+     int *ne;         /* number of edges */
+     int *ie, *je;    /* vectors of indices of ends of each edge */ 
+     /* output */
+     int *label;      /* vector of component labels for each vertex */
+                      /* Component label is lowest serial number of
+			 any vertex in the connected component */
+     int *status;          /* 0 if OK, 1 if overflow */
+{
+  int Nv, Ne, i, j, k, niter, labi, labj, changed;
+  
+  Nv = *nv;
+  Ne = *ne;
+
+  /* initialise labels */
+  for(k = 0; k < Nv; k++)
+    label[k] = k;
+
+  for(niter = 0; niter < Nv; niter++) {
+    R_CheckUserInterrupt();
+    changed = NO;
+    for(k = 0; k < Ne; k++) {
+      i = ie[k];
+      j = je[k];
+      labi = label[i];
+      labj = label[j];
+      if(labi < labj) {
+	label[j] = labi;
+	changed = YES;
+      } else if(labj < labi) {
+	label[i] = labj;
+	changed = YES;
+      } 
+    }
+    if(!changed) {
+      /* algorithm has converged */
+      *status = 0;
+      return;
+    }
+  }
+  /* error exit */   
+  *status = 1;
+  return;
+}
diff --git a/src/constants.h b/src/constants.h
new file mode 100644
index 0000000..8dff6eb
--- /dev/null
+++ b/src/constants.h
@@ -0,0 +1,25 @@
+/* 
+   constants.h
+
+   Ensure that required constants are defined
+   (Insurance against flaky installations)
+
+   $Revision: 1.1 $  $Date: 2013/08/09 08:14:15 $
+
+*/
+
+#ifndef M_PI 
+#define M_PI 3.141592653589793
+#endif
+#ifndef M_PI_2 
+#define M_PI_2 1.570796326794897
+#endif
+#ifndef M_2_PI 
+#define M_2_PI (2.0/M_PI)
+#endif
+#ifndef M_2PI 
+#define M_2PI 6.283185307179586
+#endif
+
+   
+
diff --git a/src/corrections.c b/src/corrections.c
new file mode 100755
index 0000000..2a3d555
--- /dev/null
+++ b/src/corrections.c
@@ -0,0 +1,391 @@
+/*
+
+  corrections.c
+
+  Edge corrections
+
+  $Revision: 1.12 $     $Date: 2013/05/27 02:09:10 $
+
+ */
+
+#include <math.h>
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+#include "yesno.h"
+#include "constants.h"
+
+#undef DEBUG
+
+
+/* This constant is defined in Rmath.h */
+#define TWOPI M_2PI
+
+#define MIN(A,B) (((A) < (B)) ? (A) : (B))
+
+#define BETWEEN(X,X0,X1) (((X) - (X0)) * ((X) - (X1)) <= 0)
+
+#define UNDER(X,Y,X0,Y0,X1,Y1) \
+  (((Y1) - (Y0)) * ((X) - (X0)) >= ((Y) - (Y0)) * ((X1)- (X0)))
+
+#define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \
+    (((X0) < (X1)) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0))
+
+#define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \
+  (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1))
+
+
+void ripleybox(nx, x, y, rmat, nr, xmin, ymin, xmax, ymax,  epsilon, out)
+     /* inputs */
+     int *nx, *nr;  /* dimensions */
+     double *x, *y; /* coordinate vectors of length nx */
+     double *rmat;  /* matrix nx by nr  */
+     double *xmin, *ymin, *xmax, *ymax;  /* box dimensions */
+     double *epsilon; /* threshold for proximity to corner */
+     /* output */
+     double *out;  /* output matrix nx by nr */
+{
+  int i, j, n, m, ijpos, ncor, maxchunk;
+  double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij;
+  double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR;
+  double corner, extang;
+  double eps;
+
+  n  = *nx;
+  m  = *nr;
+  x0 = *xmin;
+  y0 = *ymin;
+  x1 = *xmax;
+  y1 = *ymax;
+  eps = *epsilon;
+
+  OUTERCHUNKLOOP(i, n, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, n, maxchunk, 16384) {
+      xx = x[i];
+      yy = y[i];
+      /* 
+	 perpendicular distance from point to each edge of rectangle
+	 L = left, R = right, D = down, U = up
+      */
+      dL = xx - x0;
+      dR = x1 - xx;
+      dD = yy - y0;
+      dU = y1 - yy;
+
+      /*
+	test for corner of the rectangle
+      */
+#define ABS(X) (((X) >= 0) ? (X) : (-X))
+#define SMALL(X) ((ABS(X) < eps) ? 1 : 0)
+
+      ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU);
+      corner = (ncor >= 2) ? YES : NO;
+  
+      /* 
+	 angle between 
+	 - perpendicular to edge of rectangle
+	 and 
+	 - line from point to corner of rectangle
+
+      */
+      bLU = atan2(dU, dL);
+      bLD = atan2(dD, dL);
+      bRU = atan2(dU, dR);
+      bRD = atan2(dD, dR);
+      bUL = atan2(dL, dU);
+      bUR = atan2(dR, dU);
+      bDL = atan2(dL, dD);
+      bDR = atan2(dR, dD);
+
+      for(j = 0; j < m; j++) {
+	ijpos = j * n + i;
+	rij = rmat[ijpos];
+#ifdef DEBUG
+	Rprintf("rij = %lf\n", rij);
+#endif
+	/*
+	  half the angle subtended by the intersection between
+	  the circle of radius r[i,j] centred on point i
+	  and each edge of the rectangle (prolonged to an infinite line)
+	*/
+	aL = (dL < rij) ? acos(dL/rij) : 0.0;
+	aR = (dR < rij) ? acos(dR/rij) : 0.0;
+	aD = (dD < rij) ? acos(dD/rij) : 0.0;
+	aU = (dU < rij) ? acos(dU/rij) : 0.0;
+#ifdef DEBUG
+	Rprintf("aL = %lf\n", aL);
+	Rprintf("aR = %lf\n", aR);
+	Rprintf("aD = %lf\n", aD);
+	Rprintf("aU = %lf\n", aU);
+#endif
+	/* apply maxima */
+
+	cL = MIN(aL, bLU) + MIN(aL, bLD);
+	cR = MIN(aR, bRU) + MIN(aR, bRD);
+	cU = MIN(aU, bUL) + MIN(aU, bUR);
+	cD = MIN(aD, bDL) + MIN(aD, bDR);
+#ifdef DEBUG
+	Rprintf("cL = %lf\n", cL);
+	Rprintf("cR = %lf\n", cR);
+	Rprintf("cD = %lf\n", cD);
+	Rprintf("cU = %lf\n", cU);
+#endif
+
+	/* total exterior angle over 2 pi */
+	extang = (cL + cR + cU + cD)/TWOPI;
+
+	/* add pi/2 for corners */
+	if(corner) 
+	  extang += 1/4;
+
+#ifdef DEBUG
+	Rprintf("extang = %lf\n", extang);
+#endif
+	/* OK, now compute weight */
+	out[ijpos] = 1 / (1 - extang);
+      }
+    }
+  }
+}
+
+
+void ripleypoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, out) 
+     /* inputs */
+     int *nc, *nr, *nseg;
+     double *xc, *yc, *rmat;
+     double *x0, *y0, *x1, *y1;
+     /* output */
+     double *out;
+{
+  int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk;
+  double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01;
+  double x, y, radius, radius2, dx0, dx1, dy0;
+  double a, b, c, t, det, sqrtdet, tmp;
+  double theta[6], delta[7], tmid[7];
+  double xtest, ytest, contrib, total;
+
+  n = *nc;
+  nradperpt = *nr;
+  m = *nseg;
+
+  OUTERCHUNKLOOP(i, n, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, n, maxchunk, 16384) {
+      xcentre = xc[i];
+      ycentre = yc[i];
+#ifdef DEBUG
+      Rprintf("centre = (%lf, %lf)\n", xcentre, ycentre);
+#endif
+
+      for(j = 0; j < nradperpt; j++) {
+	radius = rmat[ j * n + i];
+	radius2 = radius * radius;
+#ifdef DEBUG
+	Rprintf("radius = %lf\n", radius);
+#endif
+
+	total = 0.0;
+	for(k=0; k < m; k++) {
+#ifdef DEBUG
+	  Rprintf("k = %d\n", k);
+#endif
+	  ncut = 0;
+	  xx0 = x0[k];
+	  yy0 = y0[k];
+	  xx1 = x1[k];
+	  yy1 = y1[k];
+#ifdef DEBUG
+	  Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1);
+#endif
+	  /* intersection with left edge */
+	  dx0 = xx0 - xcentre;
+	  det = radius2 - dx0 * dx0;
+	  if(det > 0) {
+	    sqrtdet = sqrt(det);
+	    y = ycentre + sqrtdet;
+	    if(y < yy0) {
+	      theta[ncut] = atan2(y - ycentre, dx0);
+#ifdef DEBUG
+	      Rprintf("cut left at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	    y = ycentre - sqrtdet;
+	    if(y < yy0) {
+	      theta[ncut] = atan2(y-ycentre, dx0);
+#ifdef DEBUG
+	      Rprintf("cut left at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	  } else if(det == 0) {
+	    if(ycentre < yy0) {
+	      theta[ncut] = atan2(0.0, dx0);
+#ifdef DEBUG
+	      Rprintf("tangent left at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	  }
+	  /* intersection with right edge */
+	  dx1 = xx1 - xcentre;
+	  det = radius2 - dx1 * dx1;
+	  if(det > 0) {
+	    sqrtdet = sqrt(det);
+	    y = ycentre + sqrtdet;
+	    if(y < yy1) {
+	      theta[ncut] = atan2(y - ycentre, dx1);
+#ifdef DEBUG
+	      Rprintf("cut right at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	    y = ycentre - sqrtdet;
+	    if(y < yy1) {
+	      theta[ncut] = atan2(y - ycentre, dx1);
+#ifdef DEBUG
+	      Rprintf("cut right at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	  } else if(det == 0) {
+	    if(ycentre < yy1) {
+	      theta[ncut] = atan2(0.0, dx1);
+#ifdef DEBUG
+	      Rprintf("tangent right at theta= %lf\n", theta[ncut]);
+#endif
+	      ncut++;
+	    }
+	  }
+	  /* intersection with top segment */
+	  xx01 = xx1 - xx0;
+	  yy01 = yy1 - yy0;
+	  dy0  = yy0 - ycentre;
+	  a = xx01 * xx01 + yy01 * yy01;
+	  b = 2 * (xx01 * dx0 + yy01 * dy0);
+	  c = dx0 * dx0 + dy0 * dy0 - radius2;
+	  det = b * b - 4 * a * c;
+	  if(det > 0) {
+	    sqrtdet = sqrt(det);
+	    t = (sqrtdet - b)/(2 * a);
+	    if(t >= 0 && t <= 1) {
+	      x = xx0 + t * xx01;
+	      y = yy0 + t * yy01;
+	      theta[ncut] = atan2(y - ycentre, x - xcentre);
+#ifdef DEBUG
+	      Rprintf("hits segment: t = %lf, theta = %lf\n", 
+		      t, theta[ncut]);
+#endif
+	      ++ncut;
+	    }
+	    t = (-sqrtdet - b)/(2 * a);
+	    if(t >= 0 && t <= 1) {
+	      x = xx0 + t * xx01;
+	      y = yy0 + t * yy01;
+	      theta[ncut] = atan2(y - ycentre, x - xcentre);
+#ifdef DEBUG
+	      Rprintf("hits segment: t = %lf, theta = %lf\n", 
+		      t, theta[ncut]);
+#endif
+	      ++ncut;
+	    }
+	  } else if(det == 0) {
+	    t = - b/(2 * a);
+	    if(t >= 0 && t <= 1) {
+	      x = xx0 + t * xx01;
+	      y = yy0 + t * yy01;
+	      theta[ncut] = atan2(y - ycentre, x - xcentre);
+#ifdef DEBUG
+	      Rprintf("tangent to segment: t = %lf, theta = %lf\n", 
+		      t, theta[ncut]);
+#endif
+	      ++ncut;
+	    }
+	  }
+	  /* for safety, force all angles to be in range [0, 2 * pi] */
+	  if(ncut > 0) 
+	    for(l = 0; l < ncut; l++)
+	      if(theta[l] < 0) 
+		theta[l] += TWOPI;
+
+	  /* sort angles */
+	  if(ncut > 1) {
+	    do {
+	      nchanges = 0;
+	      for(l = 0; l < ncut - 1; l++) {
+		if(theta[l] > theta[l+1]) {
+		  /* swap */
+		  ++nchanges;
+		  tmp = theta[l];
+		  theta[l] = theta[l+1];
+		  theta[l+1] = tmp;
+		}
+	      }
+	    } while(nchanges > 0);
+	  }
+#ifdef DEBUG
+	  if(ncut > 0) {
+	    for(l = 0; l < ncut; l++)
+	      Rprintf("theta[%d] = %lf\n", l, theta[l]);
+	  }
+#endif
+	  /* compute length of circumference inside polygon */
+	  if(ncut == 0) {
+	    /* entire circle is either in or out */
+	    xtest = xcentre + radius;
+	    ytest = ycentre;
+	    if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) 
+	      contrib = TWOPI;
+	    else 
+	      contrib = 0.0;
+	  } else {
+	    /* find midpoints and lengths of pieces (adding theta = ) */
+	    delta[0] = theta[0];
+	    tmid[0] = theta[0]/2;
+	    if(ncut > 1) {
+	      for(l = 1; l < ncut; l++) {
+		delta[l] = theta[l] - theta[l-1];
+		tmid[l] = (theta[l] + theta[l-1])/2;
+	      }
+	    }
+	    delta[ncut] = TWOPI - theta[ncut - 1];
+	    tmid[ncut] = (TWOPI + theta[ncut-1])/2;
+	    contrib = 0.0;
+	    for(l = 0; l <= ncut; l++) {
+#ifdef DEBUG
+	      Rprintf("delta[%d] = %lf\n", l, delta[l]);
+#endif
+	      xtest = xcentre + radius * cos(tmid[l]);
+	      ytest = ycentre + radius * sin(tmid[l]);
+	      if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) {
+		contrib += delta[l];
+#ifdef DEBUG 
+		Rprintf("... inside\n");
+	      } else {
+		Rprintf("... outside\n");
+#endif
+	      }
+
+	    }
+	  }
+	  /* multiply by sign of trapezium */
+	  if(xx0  < xx1)
+	    contrib *= -1;
+
+#ifdef DEBUG
+	  Rprintf("contrib = %lf\n", contrib);
+#endif
+	  total += contrib;
+	}
+	out[ j * n + i] = total;
+#ifdef DEBUG
+	Rprintf("total = %lf\n", total);
+#endif
+      }
+    }
+  }
+}
+
diff --git a/src/crossloop.h b/src/crossloop.h
new file mode 100644
index 0000000..f4b4c07
--- /dev/null
+++ b/src/crossloop.h
@@ -0,0 +1,63 @@
+/*
+
+  crossloop.h
+
+  Generic code template for loop for cross-close-pairs operations
+  collecting contributions to point x_i
+  from all points y_j such that ||x_i - y_j|| <= r
+
+  cpp variables used:
+
+       INITIAL_I        code executed at start of 'i' loop       
+       CONTRIBUTE_IJ    code executed to compute contribution from j to i
+       COMMIT_I         code executed to save total contribution to i
+
+  C variables used:
+       int i, j, n1, n2, maxchunk, jleft;
+       double x1i, y1i, xleft, dx, dy, d2, rmax, r2max;
+       double *x1, *y1, *x2, *y2;
+
+  $Revision: 1.2 $  $Date: 2014/04/02 07:59:10 $
+
+*/
+
+#ifndef CHUNKLOOP_H
+#include "chunkloop.h"
+#endif
+
+#define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I)           \
+  OUTERCHUNKLOOP(i, n1, maxchunk, 65536) {	     	  	\
+    R_CheckUserInterrupt();			     	  	\
+    INNERCHUNKLOOP(i, n1, maxchunk, 65536) {	     	  	\
+						     	  	\
+      x1i = x1[i];					        \
+      y1i = y1[i];					        \
+                                                     	        \
+      INITIAL_I;					  	\
+                                                     	        \
+      jleft = 0;					  	\
+							  	\
+      /* 						  	\
+	 adjust starting point jleft			  	\
+      */						  	\
+      xleft = x1i - rmax;				  	\
+      while((x2[jleft] < xleft) && (jleft+1 < n2))	  	\
+	++jleft;					  	\
+							  	\
+      /* 						  	\
+	 process from j = jleft until dx > rmax		  	\
+      */						  	\
+      for(j=jleft; j < n2; j++) {			  	\
+	dx = x2[j] - x1i;				        \
+	if(dx > rmax)					  	\
+	  break;					  	\
+	dy = y2[j] - y1i;				  	\
+	d2 = dx * dx + dy * dy;				  	\
+	if(d2 <= r2max) {				  	\
+	    /* add this (i, j) pair to output */	  	\
+	  CONTRIBUTE_IJ;				  	\
+	}						  	\
+      }							  	\
+      COMMIT_I;						  	\
+    }							  	\
+  }  
diff --git a/src/denspt.c b/src/denspt.c
new file mode 100755
index 0000000..e651f98
--- /dev/null
+++ b/src/denspt.c
@@ -0,0 +1,512 @@
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+#include "pairloop.h"
+#include "constants.h"
+/*
+
+  denspt.c
+
+  Calculation of density estimate at data points
+
+  $Revision: 1.18 $     $Date: 2017/06/05 10:53:59 $
+
+  Assumes point pattern is sorted in increasing order of x coordinate
+
+  *denspt*     Density estimate at points
+  *smoopt*     Smoothed mark values at points
+
+*/
+
+#define TWOPI M_2PI
+
+double sqrt(), exp();
+
+#define STD_DECLARATIONS				\
+  int n, i, j, maxchunk;				\
+  double xi, yi, rmax, r2max, dx, dy, dx2, d2	
+
+#define STD_INITIALISE				\
+  n = *nxy;					\
+  rmax = *rmaxi;				\
+  r2max = rmax * rmax
+
+
+/* ----------------- density estimation -------------------- */
+
+void denspt(nxy, x, y, rmaxi, sig, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sig;      /* Gaussian sd */
+     /* output */
+     double *result;   /* vector of computed density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double sigma, twosig2; 
+  STD_INITIALISE;
+
+  sigma = *sig;				      
+  twosig2 = 2.0 * sigma * sigma;	
+  coef = 1.0/(TWOPI * sigma * sigma);  
+
+  if(n == 0) 
+    return;
+
+  PAIRLOOP( { resulti = 0.0; },
+            { resulti += exp(-d2/twosig2); } ,
+	    { result[i] = coef * resulti; })
+
+}
+
+
+void wtdenspt(nxy, x, y, rmaxi, sig, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance */
+     double *sig;      /* Gaussian sd */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of weighted density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double sigma, twosig2; 
+  STD_INITIALISE;
+
+  sigma = *sig;				      
+  twosig2 = 2.0 * sigma * sigma;	
+  coef = 1.0/(TWOPI * sigma * sigma);  
+
+  if(n == 0) 
+    return;
+
+  PAIRLOOP( { resulti = 0.0; },
+	    { resulti += weight[j] * exp(-d2/twosig2); },
+	    { result[i] = coef * resulti; } )
+
+ }
+
+/* ------------- anisotropic versions -------------------- */
+
+void adenspt(nxy, x, y, rmaxi, detsigma, sinv, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *detsigma;  /* determinant of variance matrix */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;   /* vector of density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double detsig, s11, s12, s21, s22;
+  STD_INITIALISE;
+  detsig = *detsigma;
+  coef = 1.0/(TWOPI * sqrt(detsig));
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  PAIRLOOP( { resulti = 0.0; },
+	    { resulti += exp(-(dx * (dx * s11 + dy * s12) \
+			       + dy * (dx * s21 + dy * s22))/2.0); },
+	    { result[i] = coef * resulti; })
+}
+
+
+void awtdenspt(nxy, x, y, rmaxi, detsigma, sinv, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *detsigma;  /* determinant of variance matrix */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of weighted density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double detsig, s11, s12, s21, s22;
+  STD_INITIALISE;
+  detsig = *detsigma;
+  coef = 1.0/(TWOPI * sqrt(detsig));
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n == 0) 
+    return;
+
+  PAIRLOOP( { resulti = 0.0; },
+	    { resulti += weight[j] * \
+		exp(-(dx * (dx * s11 + dy * s12)			\
+		      + dy * (dx * s21 + dy * s22))/2.0); },
+	    { result[i] = coef * resulti; })
+ }
+
+
+/* --------------- smoothing --------------------------- */
+
+void smoopt(nxy, x, y, v, self, rmaxi, sig, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sig;      /* Gaussian sd */
+     /* output */
+     double *result;   /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double sigma, twosig2;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  sigma = *sig;
+  countself = *self;
+  twosig2 = 2.0 * sigma * sigma;
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   { \
+	     wij = exp(-d2/twosig2);		\
+	     denom += wij;			\
+	     numer += wij * v[j];		\
+	   },					
+	   {					\
+	     denom += 1;			\
+	     numer += v[i];			\
+	     result[i] = numer/denom;		\
+	   })
+    } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   { \
+	     wij = exp(-d2/twosig2);		\
+	     denom += wij;			\
+	     numer += wij * v[j];		\
+	   },					
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+    }
+ }
+
+
+void wtsmoopt(nxy, x, y, v, self, rmaxi, sig, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance */
+     double *sig;      /* Gaussian sd */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double sigma, twosig2;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  sigma = *sig;
+  countself = *self;
+  twosig2 = 2.0 * sigma * sigma;
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {						\
+	     wij = weight[j] * exp(-d2/twosig2);	\
+	     denom += wij;				\
+	     numer += wij * v[j];			\
+	   },						
+	   {						\
+	     denom += weight[i];			\
+	     numer += weight[i] * v[i];		\
+	     result[i] = numer/denom;			\
+	   })
+  } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {						\
+	     wij = weight[j] * exp(-d2/twosig2);	\
+	     denom += wij;				\
+	     numer += wij * v[j];			\
+	   },						
+	   {						\
+	     result[i] = numer/denom;			\
+	   })
+    }
+}
+
+/* ------------- anisotropic versions -------------------- */
+
+void asmoopt(nxy, x, y, v, self, rmaxi, sinv, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;   /* vector of smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double s11, s12, s21, s22;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  countself = *self;
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {							\
+	     wij = exp(-(dx * (dx * s11 + dy * s12)		\
+			 + dy * (dx * s21 + dy * s22))/2.0);	\
+	     denom += wij;					\
+	     numer += wij * v[j];				\
+	   },
+	   {					\
+	     denom += 1;			\
+	     numer += v[i];			\
+	     result[i] = numer/denom;		\
+	   })
+    } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {							\
+	     wij = exp(-(dx * (dx * s11 + dy * s12)		\
+			 + dy * (dx * s21 + dy * s22))/2.0);	\
+	     denom += wij;					\
+	     numer += wij * v[j];				\
+	   },
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+    }
+}
+
+
+void awtsmoopt(nxy, x, y, v, self, rmaxi, sinv, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double s11, s12, s21, s22;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  countself = *self;
+
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {								\
+	     wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12)		\
+				     + dy * (dx * s21 + dy * s22))/2.0); \
+	     denom += wij;						\
+	     numer += wij * v[j];					\
+	   },
+	   {					\
+	     denom += weight[i];		\
+	     numer += weight[i] * v[i];	\
+	     result[i] = numer/denom;		\
+	   })
+    } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {								\
+	     wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12)		\
+				     + dy * (dx * s21 + dy * s22))/2.0); \
+	     denom += wij;						\
+	     numer += wij * v[j];					\
+	   },
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+    }
+}
+
+/* ----------------- transformed coordinates -------------------- */
+/*
+
+   The following functions assume that x, y have been transformed
+   by the inverse of the variance matrix,
+   and subsequently scaled by 1/sqrt(2) so that
+   the Gaussian density is proportional to exp(-(x^2+y^2)). 
+
+   Constant factor in density is omitted.
+*/
+   
+void Gdenspt(nxy, x, y, rmaxi, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     /* output */
+     double *result;   /* vector of computed density values */
+{
+  STD_DECLARATIONS;
+  double resulti;
+  STD_INITIALISE;
+
+  if(n == 0) 
+    return;
+
+  PAIRLOOP( { resulti = 0.0; },
+            { resulti += exp(-d2); } ,
+	    { result[i] = resulti; })
+}
+
+void Gwtdenspt(nxy, x, y, rmaxi, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *rmaxi;    /* maximum distance */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of weighted density values */
+{
+  STD_DECLARATIONS;
+  double resulti;	
+  STD_INITIALISE;
+
+  if(n == 0) 
+    return;
+
+  PAIRLOOP( { resulti = 0.0; },
+	    { resulti += weight[j] * exp(-d2); },
+	    { result[i] = resulti; } )
+ }
+
+void Gsmoopt(nxy, x, y, v, self, rmaxi, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     /* output */
+     double *result;   /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  countself = *self;
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   { \
+	     wij = exp(-d2);		\
+	     denom += wij;			\
+	     numer += wij * v[j];		\
+	   },					
+	   {					\
+	     denom += 1;			\
+	     numer += v[i];			\
+	     result[i] = numer/denom;		\
+	   })
+    } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   { \
+	     wij = exp(-d2);		\
+	     denom += wij;			\
+	     numer += wij * v[j];		\
+	   },					
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+    }
+ }
+
+
+void Gwtsmoopt(nxy, x, y, v, self, rmaxi, weight, result) 
+     /* inputs */
+     int *nxy;         /* number of (x,y) points */
+     double *x, *y;    /* (x,y) coordinates */
+     double *v;        /* vector of mark values to be smoothed */
+     int *self;       /* 0 if leave-one-out */
+     double *rmaxi;    /* maximum distance */
+     double *weight;      /* vector of weights */
+     /* output */
+     double *result;    /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  int countself;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  countself = *self;
+
+  if(n == 0) 
+    return;
+
+  if(countself != 0) {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {						\
+	     wij = weight[j] * exp(-d2);	\
+	     denom += wij;				\
+	     numer += wij * v[j];			\
+	   },						
+	   {						\
+	     denom += weight[i];			\
+	     numer += weight[i] * v[i];		\
+	     result[i] = numer/denom;			\
+	   })
+  } else {
+  PAIRLOOP({ numer = denom = 0.0; },
+	   {						\
+	     wij = weight[j] * exp(-d2);	\
+	     denom += wij;				\
+	     numer += wij * v[j];			\
+	   },						
+	   {						\
+	     result[i] = numer/denom;			\
+	   })
+    }
+}
diff --git a/src/densptcross.c b/src/densptcross.c
new file mode 100644
index 0000000..55546dc
--- /dev/null
+++ b/src/densptcross.c
@@ -0,0 +1,320 @@
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+#include "crossloop.h"
+#include "constants.h"
+/*
+
+  densptcross.c
+
+  $Revision: 1.2 $     $Date: 2014/04/02 10:27:43 $
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+  *crdenspt     Density estimate at points
+  *crsmoopt     Smoothed mark values at points
+
+*/
+
+#define TWOPI M_2PI
+
+double sqrt(), exp();
+
+#define STD_DECLARATIONS				\
+  int i, j, n1, n2, maxchunk, jleft;                    \
+  double x1i, y1i, xleft, dx, dy, d2, rmax, r2max;      \
+  double *x1, *y1, *x2, *y2;
+
+#define STD_INITIALISE				\
+  n1 = *nquery;					\
+  x1 = xq; y1 = yq;                             \
+  n2 = *ndata;					\
+  x2 = xd; y2 = yd;                             \
+  rmax = *rmaxi;				\
+  r2max = rmax * rmax
+
+
+/* ----------------- density estimation -------------------- */
+
+void crdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, sig, result) 
+     /* inputs */
+     int *nquery;            /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;            /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sig;      /* Gaussian sd */
+     /* output */
+     double *result;   /* vector of computed density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double sigma, twosig2; 
+  STD_INITIALISE;
+
+  sigma = *sig;				      
+  twosig2 = 2.0 * sigma * sigma;	
+  coef = 1.0/(TWOPI * sigma * sigma);  
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP( { resulti = 0.0; },
+            { resulti += exp(-d2/twosig2); } ,
+	    { result[i] = coef * resulti; })
+
+}
+
+
+void wtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, sig, result) 
+     /* inputs */
+     int *nquery;        /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;         /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *wd;         /* weights of data points */
+     double *rmaxi;      /* maximum distance at which points contribute */
+     double *sig;        /* Gaussian sd */
+     /* output */
+     double *result;   /* vector of computed density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double sigma, twosig2; 
+  STD_INITIALISE;
+
+  sigma = *sig;				      
+  twosig2 = 2.0 * sigma * sigma;	
+  coef = 1.0/(TWOPI * sigma * sigma);  
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP( { resulti = 0.0; },
+	    { resulti += wd[j] * exp(-d2/twosig2); },
+	    { result[i] = coef * resulti; } )
+
+ }
+
+/* ------------- anisotropic versions -------------------- */
+
+void acrdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, detsigma, sinv, result) 
+     /* inputs */
+     int *nquery;            /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;            /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *detsigma;  /* determinant of variance matrix */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;   /* vector of computed density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double detsig, s11, s12, s21, s22;
+  STD_INITIALISE;
+  detsig = *detsigma;
+  coef = 1.0/(TWOPI * sqrt(detsig));
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP( { resulti = 0.0; },
+	    { resulti += exp(-(dx * (dx * s11 + dy * s12) \
+			       + dy * (dx * s21 + dy * s22))/2.0); },
+	    { result[i] = coef * resulti; })
+}
+
+
+void awtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, detsigma, sinv, result) 
+     /* inputs */
+     int *nquery;        /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;         /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *wd;         /* weights of data points */
+     double *rmaxi;      /* maximum distance at which points contribute */
+     double *detsigma;   /* determinant of variance matrix */
+     double *sinv;       /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;     /* vector of weighted density values */
+{
+  STD_DECLARATIONS;
+  double resulti, coef;	
+  double detsig, s11, s12, s21, s22;
+  STD_INITIALISE;
+  detsig = *detsigma;
+  coef = 1.0/(TWOPI * sqrt(detsig));
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP( { resulti = 0.0; },
+	    { resulti += wd[j] * \
+		exp(-(dx * (dx * s11 + dy * s12)			\
+		      + dy * (dx * s21 + dy * s22))/2.0); },
+	    { result[i] = coef * resulti; })
+ }
+
+
+/* --------------- smoothing --------------------------- */
+
+void crsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sig, result) 
+     /* inputs */
+     int *nquery;            /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;            /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *vd;         /* mark values at data points */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sig;      /* Gaussian sd */
+     /* output */
+     double *result;   /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  double sigma, twosig2;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  sigma = *sig;
+  twosig2 = 2.0 * sigma * sigma;
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP({ numer = denom = 0.0; },
+	   { \
+	     wij = exp(-d2/twosig2);		\
+	     denom += wij;			\
+	     numer += wij * vd[j];		\
+	   },					
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+ }
+
+
+void wtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sig, result) 
+     /* inputs */
+     int *nquery;            /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;            /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *vd;         /* mark values at data points */
+     double *wd;         /* weights of data points */
+     double *rmaxi;    /* maximum distance */
+     double *sig;      /* Gaussian sd */
+     /* output */
+     double *result;    /* vector of computed smoothed values */
+{
+  STD_DECLARATIONS;
+  double sigma, twosig2;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  sigma = *sig;
+  twosig2 = 2.0 * sigma * sigma;
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP({ numer = denom = 0.0; },
+	   {						\
+	     wij = wd[j] * exp(-d2/twosig2);	\
+	     denom += wij;				\
+	     numer += wij * vd[j];			\
+	   },						
+	   {						\
+	     result[i] = numer/denom;			\
+	   })
+}
+
+/* ------------- anisotropic versions -------------------- */
+
+void acrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sinv, result) 
+     /* inputs */
+     int *nquery;            /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;            /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *vd;         /* mark values at data points */
+     double *rmaxi;    /* maximum distance at which points contribute */
+     double *sinv;      /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;   /* vector of smoothed values */
+{
+  STD_DECLARATIONS;
+  double s11, s12, s21, s22;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP({ numer = denom = 0.0; },
+	   {							\
+	     wij = exp(-(dx * (dx * s11 + dy * s12)		\
+			 + dy * (dx * s21 + dy * s22))/2.0);	\
+	     denom += wij;					\
+	     numer += wij * vd[j];				\
+	   },
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+}
+
+
+void awtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sinv, result) 
+     /* inputs */
+     int *nquery;        /* number of locations to be interrogated */
+     double *xq, *yq;    /* (x,y) coordinates to be interrogated */
+     int *ndata;         /* number of data points */
+     double *xd, *yd;    /* (x,y) coordinates of data */
+     double *vd;         /* mark values at data points */
+     double *wd;         /* weights of data points */
+     double *rmaxi;      /* maximum distance at which points contribute */
+     double *sinv;       /* inverse variance matrix (2x2, flattened) */
+     /* output */
+     double *result;    /* vector of smoothed values */
+{
+  STD_DECLARATIONS;
+  double s11, s12, s21, s22;
+  double numer, denom, wij; 
+
+  STD_INITIALISE;
+
+  s11 = sinv[0];
+  s12 = sinv[1];
+  s21 = sinv[2];
+  s22 = sinv[3];
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  CROSSLOOP({ numer = denom = 0.0; },
+	   {								\
+	     wij = wd[j] * exp(-(dx * (dx * s11 + dy * s12)		\
+				     + dy * (dx * s21 + dy * s22))/2.0); \
+	     denom += wij;						\
+	     numer += wij * vd[j];					\
+	   },
+	   {					\
+	     result[i] = numer/denom;		\
+	   })
+}
+
diff --git a/src/dgs.c b/src/dgs.c
new file mode 100755
index 0000000..104a04c
--- /dev/null
+++ b/src/dgs.c
@@ -0,0 +1,125 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+#include "constants.h"
+
+/* Conditional intensity computation for Diggle-Gates-Stibbard process */
+
+/*
+ Conditional intensity function for a pairwise interaction point
+ process with interaction function as given by 
+
+                  e(t) = sin^2(pi*t/2*rho) for t < rho
+                       = 1 for t >= rho
+
+ (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74,
+  1987, pages 763 -- 770.)
+*/
+
+#define PION2 M_PI_2   /* pi/2 defined in Rmath.h */
+
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Dgs {
+  double rho;
+  double rho2;
+  double pion2rho;
+  double *period;
+  int per;
+} Dgs;
+
+
+/* initialiser function */
+
+Cdata *dgsinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Dgs *dgs;
+  /* allocate storage */
+  dgs = (Dgs *) R_alloc(1, sizeof(Dgs));
+
+  /* Interpret model parameters*/
+  dgs->rho    = model.ipar[0];
+  dgs->period = model.period;
+  /* constants */
+  dgs->rho2       = pow(dgs->rho, 2);
+  dgs->pion2rho   = PION2/dgs->rho;
+  /* periodic boundary conditions? */
+  dgs->per    = (model.period[0] > 0.0);
+  return((Cdata *) dgs);
+}
+
+/* conditional intensity evaluator */
+
+double dgscif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, r2, pairprod, cifval;
+  Dgs *dgs;
+  DECLARE_CLOSE_D2_VARS;
+
+  dgs = (Dgs *) cdata;
+
+  r2 = dgs->rho2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  cifval = pairprod = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(dgs->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2))
+	  pairprod *= sin(dgs->pion2rho * sqrt(d2));
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2))
+	  pairprod *= sin(dgs->pion2rho * sqrt(d2));
+      }
+    }
+  }
+  else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_D2(u, v, x[j], y[j], r2, d2))
+	  pairprod *= sin(dgs->pion2rho * sqrt(d2));
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_D2(u, v, x[j], y[j], r2, d2))
+	  pairprod *= sin(dgs->pion2rho * sqrt(d2));
+      }
+    }
+  }
+
+  /* sin to sin^2 */
+  cifval = pairprod * pairprod;
+
+  return cifval;
+}
+
+Cifns DgsCifns = { &dgsinit, &dgscif, (updafunptr) NULL, NO};
+
diff --git a/src/digber.c b/src/digber.c
new file mode 100644
index 0000000..ace7e2d
--- /dev/null
+++ b/src/digber.c
@@ -0,0 +1,67 @@
+/*
+
+  digber.c
+
+  Diggle-Berman function J used in bandwidth selection
+
+  J(r) = \int_0^(2r) phi(t, r) dK(t)
+
+  where K is the K-function and
+       phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2))
+                        where y = t/(2r).
+
+  $Revision: 1.7 $     $Date: 2013/08/24 11:13:43 $
+
+ */
+
+#include <math.h>
+
+double sqrt(), acos();
+
+/* 
+   r is the vector of distance values, starting from 0, with length nr,
+   equally spaced.
+
+   dK = diff(K) is the vector of increments of the K-function,
+   with length ndK = nr-1.
+
+   values of J are computed only up to max(r)/2
+
+   nrmax = floor(nr/2).
+
+*/
+
+void digberJ(r, dK, nr, nrmax, ndK, J) 
+     /* inputs */
+     int *nr, *nrmax, *ndK;
+     double *r, *dK;
+     /* output */
+     double *J;  
+{ 
+  int i, j, Ni, NdK;
+  double ri, twori, tj, y, phiy, integral;
+
+  Ni = *nrmax;
+  NdK = *ndK;
+
+  J[0] = 0.0;
+
+  for(i = 1; i < Ni; i++) {
+    ri = r[i];
+    twori = 2 * ri;
+    integral = 0.0;
+    for(j = 0; j < NdK; j++) {
+      tj = r[j];
+      y = tj/twori;
+      if(y >= 1.0) break;
+      phiy = acos(y) - y * sqrt(1 - y * y);
+      integral += phiy * dK[j];
+    }
+    J[i] = 2 * ri * ri * integral;
+  }
+}
+
+
+  
+
+  
diff --git a/src/diggra.c b/src/diggra.c
new file mode 100755
index 0000000..b2ff128
--- /dev/null
+++ b/src/diggra.c
@@ -0,0 +1,154 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Diggle-Gratton process */
+
+/*
+ Conditional intensity function for a pairwise interaction point
+ process with interaction function as given by 
+
+                  e(t) = 0 for t < delta
+                       = (t-delta)/(rho-delta)^kappa for delta <= t < rho
+                       = 1 for t >= rho
+
+ (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74,
+  1987, pages 763 -- 770.)
+*/
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Diggra {
+  double kappa;
+  double delta;
+  double rho;
+  double delta2;  /*  delta^2   */
+  double rho2;    /*  rho^2 */
+  double fac;   /*   1/(rho-delta)  */
+  double *period;
+  int per;
+} Diggra;
+
+
+/* initialiser function */
+
+Cdata *diggrainit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Diggra *diggra;
+  diggra = (Diggra *) R_alloc(1, sizeof(Diggra));
+
+  /* Interpret model parameters*/
+  diggra->kappa  = model.ipar[0];
+  diggra->delta  = model.ipar[1];
+  diggra->rho    = model.ipar[2];
+  diggra->period = model.period;
+  /* constants */
+  diggra->delta2 = pow(diggra->delta, 2);
+  diggra->rho2 = pow(diggra->rho, 2);
+  diggra->fac = 1/(diggra->rho - diggra->delta);
+  /* periodic boundary conditions? */
+  diggra->per    = (model.period[0] > 0.0);
+  return((Cdata *) diggra);
+}
+
+/* conditional intensity evaluator */
+
+double diggracif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, pairprod, cifval;
+  double rho2, delta, delta2, fac;
+  double *period;
+  DECLARE_CLOSE_D2_VARS;
+
+  Diggra *diggra;
+
+  diggra = (Diggra *) cdata;
+  period = diggra->period;
+  rho2   = diggra->rho2;
+  delta  = diggra->delta;
+  delta2 = diggra->delta2;
+  fac    = diggra->fac;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  cifval = pairprod = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(diggra->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) {
+	  if(d2 < delta2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairprod *= fac * (sqrt(d2)-delta);
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) {
+	  if(d2 < delta2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairprod *= fac * (sqrt(d2)-delta);
+	  }
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) {
+	  if(d2 <= delta2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairprod *= fac * (sqrt(d2)-delta);
+	  }
+	}
+      }  
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) {
+	  if(d2 <= delta2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairprod *= fac * (sqrt(d2)-delta);
+	  }
+	}
+      }  
+    }
+  }
+
+  cifval = pow(pairprod, diggra->kappa);
+  return cifval;
+}
+
+Cifns DiggraCifns = { &diggrainit, &diggracif, (updafunptr) NULL, NO};
+
diff --git a/src/dinfty.c b/src/dinfty.c
new file mode 100755
index 0000000..548b8ba
--- /dev/null
+++ b/src/dinfty.c
@@ -0,0 +1,139 @@
+/*
+
+   dinfty.c
+
+   $Revision: 1.6 $   $Date: 2011/09/20 07:42:18 $
+
+   Code by Dominic Schuhmacher
+
+   Modified by Adrian Baddeley
+
+*/
+
+#include <stdio.h>
+#include <R.h>
+
+#define COST(I,J) (d)[n * (J) + (I)]
+
+int arraymax(int *a, int n);
+void swap(int i, int j, int *a);
+int largestmobpos(int *mobile, int *current, int *collectvals, int n);
+
+/* ------------ The main function ----------------------------- */
+
+void dinfty_R(int *d, int *num, int *assignment) {
+   int i,j; /* indices */
+   int lmp, lmq; /* largest mobile position and its neighbor */
+   int newmax;
+   int n, currmin;
+   int *current, *travel, *mobile, *assig, *distrelev, *collectvals;
+
+   n = *num;
+
+   /* scratch space */
+   assig = (int *) R_alloc((long) n, sizeof(int)); 
+   travel = (int *) R_alloc((long) n, sizeof(int)); 
+   mobile = (int *) R_alloc((long) n, sizeof(int)); 
+   current = (int *) R_alloc((long) n, sizeof(int)); 
+   distrelev = (int *) R_alloc((long) n, sizeof(int));
+
+   collectvals = (int *) R_alloc((long) (n * n), sizeof(int));
+
+
+/*                                                               */
+/* We use the Johnson-Trotter Algorithm for listing permutations */
+/*                                                               */
+
+/* Initialize the algorithm */
+   for (i = 0; i < n; i++) {
+      travel[i] = -1;   /* all numbers traveling to the left */
+      mobile[i] = 1;    /* all numbers mobile */
+      current[i] = i;   /* current permutation is the identity */
+      assig[i] = i;     /* best permutation up to now is the identity */
+      distrelev[i] = COST(i, i);   /* pick relevant entries in the cost matrix */
+   }
+   currmin = arraymax(distrelev, n);   /* minimal max up to now */
+
+/* The main loop */
+   while(arraymax(mobile, n) == 1) {
+     lmp = largestmobpos(mobile, current, collectvals, n);
+      lmq = lmp + travel[lmp];
+      swap(lmp, lmq, current);
+      swap(lmp, lmq, travel);
+      for (i = 0; i < n; i++) {
+         if (current[i] > current[lmq])
+            travel[i] = -travel[i];
+         j = i + travel[i];
+         if (j < 0 || j > n-1 || current[i] < current[j])
+            mobile[i] = 0;
+         else
+            mobile[i] = 1;
+         distrelev[i] = COST(i, current[i]);
+      }
+      /* Calculation of new maximal value */
+      newmax = arraymax(distrelev, n);
+      if (newmax < currmin) {
+         currmin = newmax;
+         for (i = 0; i < n; i++) {
+            assig[i] = current[i];
+         }
+      }
+   }
+/* For testing: print distance from within C program
+   Rprintf("Prohorov distance is %d\n", currmin);     */
+
+/* "Return" the final assignment */
+   for (i = 0; i < n; i++) {
+      assignment[i] = assig[i] + 1;
+   }
+
+}
+
+
+/* ------------------------------------------------------------*/
+
+
+/* Maximal element of an integer array */
+int arraymax(int *a, int n) {
+  int i, amax;
+  if(n < 1)
+    return(-1);
+  amax = a[0];
+  if(n > 1)
+    for(i = 0; i < n; i++)
+      if(a[i] > amax) amax = a[i];
+  return(amax);
+}
+
+
+/* Swap elements i and j in array a */
+
+void swap(int i, int j, int *a) {
+   int v;
+
+   v = a[i];
+   a[i] = a[j];
+   a[j] = v;
+}
+
+
+/* Return index of largest mobile number in current */
+int largestmobpos(int *mobile, int *current, int *collectvals, int n) {
+   int i,j, maxval;
+
+   j = 0;
+   for (i = 0; i < n; i++) {
+      if (mobile[i] == 1) {
+         collectvals[j] = current[i];
+         j++;
+      }
+   }
+   maxval = arraymax(collectvals, j);
+   for (i = 0; i < n; i++) {
+      if (current[i] == maxval) {
+         return(i);
+      }
+   }
+   error("Internal error: largestmobpos failed");
+   return(0);
+}
diff --git a/src/discarea.c b/src/discarea.c
new file mode 100755
index 0000000..344a52f
--- /dev/null
+++ b/src/discarea.c
@@ -0,0 +1,275 @@
+/*
+
+  disc.c
+
+  Area of intersection between disc and polygonal window
+
+  $Revision: 1.6 $     $Date: 2011/12/03 00:15:52 $
+
+ */
+
+#undef DEBUG
+
+#include <math.h>
+
+#include <R.h>
+
+#define MIN(A,B) (((A) < (B)) ? (A) : (B))
+#define MAX(A,B) (((A) > (B)) ? (A) : (B))
+
+#ifndef PI
+#define PI 3.1415926535898
+#endif
+
+void 
+discareapoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, eps, out) 
+     /* inputs */
+     int *nc, *nr, *nseg;
+     double *xc, *yc, *rmat;
+     double *x0, *y0, *x1, *y1;
+     double *eps;
+     /* output */
+     double *out;
+{
+  int n, m, i, j, k, nradperpt;
+  double radius, radius2, total, contrib;
+  double xx0, xx1, yy0, yy1, xleft, xright, yleft, yright, xcentre, ycentre;
+  double epsilon;
+  double DiscContrib();
+
+  n = *nc;
+  nradperpt = *nr;
+  m = *nseg;
+  epsilon = *eps;
+
+  for(i = 0; i < n; i++) {
+    xcentre = xc[i];
+    ycentre = yc[i];
+#ifdef DEBUG
+    Rprintf("\ni = %d:\n centre = (%lf, %lf)\n", i, xcentre, ycentre);
+#endif
+
+    for(j = 0; j < nradperpt; j++) {
+      radius = rmat[ j * n + i];
+      radius2 = radius * radius;
+#ifdef DEBUG
+       Rprintf("radius = %lf\n", radius);
+#endif
+
+      total = 0.0;
+      for(k=0; k < m; k++) {
+#ifdef DEBUG
+       Rprintf("k = %d\n", k);
+#endif
+	xx0 = x0[k];
+	yy0 = y0[k];
+	xx1 = x1[k];
+	yy1 = y1[k];
+#ifdef DEBUG
+       Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1);
+#endif
+       /* refer to unit disc at origin */
+       /* arrange so that xleft < xright */
+       if(radius <= epsilon)
+	 contrib = 0.0;
+       else if(xx0 < xx1) {
+	 xleft  = (xx0 - xcentre)/radius;
+	 xright = (xx1 - xcentre)/radius;
+	 yleft  = (yy0 - ycentre)/radius;
+	 yright = (yy1 - ycentre)/radius;
+	 contrib = - radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon);
+       } else {
+	 xleft  = (xx1 - xcentre)/radius;
+	 xright = (xx0 - xcentre)/radius;
+	 yleft  = (yy1 - ycentre)/radius;
+	 yright = (yy0 - ycentre)/radius;
+	 contrib =  radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon);
+       }
+#ifdef DEBUG
+	Rprintf("contrib = %lf\n contrib/(pi * r^2)=%lf\n", 
+		contrib, contrib/(PI * radius2));
+#endif
+	total += contrib;
+      }
+      out[ j * n + i] = total;
+#ifdef DEBUG
+	Rprintf("total = %lf\ntotal/(pi * r^2) = %lf\n", 
+		total, total/(PI * radius2));
+#endif
+    }
+  }
+}
+
+/* area of intersection of unit disc with halfplane x <= v */
+
+#ifdef DEBUG
+#define TRIGBIT(V) trigbit(V)
+double trigbit(v) 
+     double v;
+{
+  double zero, result;
+  zero = 0.0;
+  if(v < -1.0)
+    return(zero);
+  if(v > 1.0)
+    return(PI);
+  result = PI/2 + asin(v) + v * sqrt(1 - v * v);
+  Rprintf("trigbit: v = %lf, asin(v)=%lf, result=%lf\n",
+	  v, asin(v), result);
+  return(result);
+}
+#else
+#define TRIGBIT(V) (((V) <= -1.0) ? 0.0 : (((V) >= 1.0) ? PI : \
+              (PI/2 + asin(V) + (V) * sqrt(1 - (V) * (V)))))
+#endif
+
+/*
+  Find the area of intersection between a disc 
+       centre = (0,0),   radius = 1
+  and the trapezium with upper segment 
+       (xleft, yleft) to (xright, yright)
+  ASSUMES xleft < xright
+*/
+
+double DiscContrib(xleft, yleft, xright, yright, eps) 
+  double xleft, yleft, xright, yright, eps;
+  /* 
+  NOTE: unit disc centred at origin
+  */
+{
+  double xlo, xhi, zero, slope, intercept, A, B, C, det;
+  double xcut1, xcut2, ycut1, ycut2, xunder1, xunder2, dx, dx2, result;
+
+#ifdef DEBUG
+  double increm;
+  Rprintf(
+	  "DiscContrib: xleft=%lf, yleft=%lf, xright=%lf, yright=%lf\n",
+	  xleft, yleft, xright, yright);
+#endif
+
+  zero = 0.0;
+  /* determine relevant range of x coordinates */
+  xlo = MAX(xleft, (-1.0));
+  xhi = MIN(xright, 1.0);
+  if(xlo >= xhi - eps) {
+    /* intersection is empty or negligible */
+#ifdef DEBUG
+    Rprintf("intersection is empty or negligible\n");
+#endif
+    return(zero);
+  }
+    
+  /* find intersection points between the circle 
+     and the line containing upper segment
+  */
+  slope = (yright - yleft)/(xright - xleft);
+  intercept = yleft - slope * xleft;
+  A = 1 + slope * slope;
+  B = 2 * slope * intercept;
+  C = intercept * intercept - 1.0;
+  det = B * B - 4 * A * C;
+
+#ifdef DEBUG
+    Rprintf("slope=%lf, intercept=%lf\nA = %lf, B=%lf, C=%lf, det=%lf\n",
+	    slope, intercept, A, B, C, det);
+#endif
+
+  if(det <= 0.0) {
+    /* no crossing between disc and infinite line */
+    if(intercept < 0.0) 
+      /* segment is below disc; intersection is empty */
+      return(zero);
+    /* segment is above disc */
+    result = TRIGBIT(xhi) - TRIGBIT(xlo);
+    return(result);
+  } 
+  xcut1 = (- B - sqrt(det))/(2 * A);
+  xcut2 = (- B + sqrt(det))/(2 * A);
+  /* partition [xlo, xhi] into pieces delimited by {xcut1, xcut2} */
+  if(xcut1 >= xhi || xcut2 <= xlo) {
+    /* segment is outside disc */
+    if(yleft < 0.0) {
+#ifdef DEBUG
+    Rprintf("segment is beneath disc\n");
+#endif
+      result = zero;
+    } else {
+#ifdef DEBUG
+    Rprintf("segment is above disc\n");
+#endif
+      result = TRIGBIT(xhi) - TRIGBIT(xlo);
+    }
+    return(result);
+  } 
+  /* possibly three parts */
+#ifdef DEBUG
+  Rprintf("up to three pieces\n");
+#endif
+  result = zero;
+  ycut1 = intercept + slope * xcut1;
+  ycut2 = intercept + slope * xcut2;
+  if(xcut1 > xlo) {
+    /* part to left of cut */
+#ifdef DEBUG 
+    Rprintf("left of cut: [%lf, %lf]\n", xlo, xcut1);
+    if(ycut1 < 0.0)
+      Rprintf("below disc - no intersection\n");
+    else {
+      increm = TRIGBIT(xcut1) - TRIGBIT(xlo);
+      Rprintf("increment = %lf\n", increm);
+      result += increm;
+    }
+#else
+    if(ycut1 >= 0.0)
+      result += TRIGBIT(xcut1) - TRIGBIT(xlo);
+#endif
+  }
+  if(xcut2 < xhi) {
+    /* part to right of cut */
+#ifdef DEBUG 
+    Rprintf("right of cut: [%lf, %lf]\n", xcut2, xhi);
+    if(ycut2 < 0.0)
+      Rprintf("below disc - no intersection\n");
+    else {
+      increm = TRIGBIT(xhi) - TRIGBIT(xcut2);
+      Rprintf("increment = %lf\n", increm);
+      result += increm;
+    }
+#else
+    if(ycut2 >= 0.0)
+      result += TRIGBIT(xhi) - TRIGBIT(xcut2);
+#endif
+  }
+  /* part underneath cut */
+  xunder1 = MAX(xlo, xcut1);
+  xunder2 = MIN(xhi, xcut2);
+  dx = xunder2 - xunder1;
+  dx2 = xunder2 * xunder2 - xunder1 * xunder1;
+#ifdef DEBUG 
+    Rprintf("underneath cut: [%lf, %lf]\n",
+	    xunder1, xunder2);
+    increm = intercept * dx + slope * dx2/2 + 
+      (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2;
+    Rprintf("increment = %lf\n", increm);
+    result += increm;
+#else
+  result += intercept * dx + slope * dx2/2 + 
+    (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2;
+#endif
+  
+  return(result);
+
+}
+  
+
+#ifdef DEBUG
+/* interface to low level function, for debugging only */
+
+void RDCtest(xleft, yleft, xright, yright, eps, value)
+  double *xleft, *yleft, *xright, *yright, *eps, *value;
+{
+  double DiscContrib();
+  *value = DiscContrib(*xleft, *yleft, *xright, *yright, *eps);
+}
+
+#endif
diff --git a/src/discs.c b/src/discs.c
new file mode 100644
index 0000000..ba6af41
--- /dev/null
+++ b/src/discs.c
@@ -0,0 +1,97 @@
+#include <R.h>
+#include <Rmath.h>
+
+/*
+  discs.c
+
+  Fill binary mask with discs with given centres and radii
+
+  $Revision: 1.4 $  $Date: 2014/10/05 03:04:08 $
+
+*/
+
+void discs2grid(nx, x0, xstep,  
+		ny, y0, ystep,   /* pixel grid dimensions */
+		nd, xd, yd, rd,  /* disc parameters */
+		out)
+     /* inputs */
+     int *nx, *ny, *nd;
+     double *x0, *xstep, *y0, *ystep;
+     double *xd, *yd, *rd;
+     /* output */
+     int *out;
+{ 
+  int Nxcol, Nyrow, Ndiscs;
+  double  X0, Y0, Xstep, Ystep;
+
+  int i, j, k;
+  double xk, yk, rk, rk2, dx, dymax; 
+  int imin, imax, jmin, jmax, iminj, imaxj, Nxcol1, Nyrow1;
+
+  Nxcol   = *nx;
+  Nyrow   = *ny;
+  Ndiscs  = *nd;
+  X0      = *x0;
+  Y0      = *y0;
+  Xstep   = *xstep;
+  Ystep   = *ystep;
+
+  if(Ndiscs == 0)
+    return;
+
+  Nxcol1 = Nxcol - 1;
+  Nyrow1 = Nyrow - 1;
+
+  /* loop over discs */
+  for(k = 0; k < Ndiscs; k++) {
+    
+    R_CheckUserInterrupt();
+
+    xk = xd[k];
+    yk = yd[k];
+    rk = rd[k];
+
+    /* find valid range of i and j */
+
+    imax = floor( (yk + rk - Y0)/Ystep);
+    imin = ceil((yk - rk - Y0)/Ystep);
+    jmax = floor( (xk + rk - X0)/Xstep);
+    jmin = ceil((xk - rk - X0)/Xstep);
+
+    if(imax >= 0 && imin < Nyrow && jmax >= 0 && jmin < Nxcol &&
+       imax >= imin && jmax >= jmin) {
+      
+      if(imin < 0) imin = 0; 
+      if(imax > Nyrow1) imax = Nyrow1;
+      if(jmin < 0) jmin = 0; 
+      if(jmax > Nxcol1) jmax = Nxcol1;
+
+      rk2 = rk * rk;
+      
+      /* loop over relevant pixels */
+      for(j = jmin, dx=X0 + jmin * Xstep - xk;
+	  j <= jmax; 
+	  j++, dx += Xstep) {
+
+	dymax = sqrt(rk2 - dx * dx);
+	
+	imaxj = floor( (yk + dymax - Y0)/Ystep);
+	iminj = ceil((yk - dymax - Y0)/Ystep);
+
+	if(imaxj >= 0 && iminj < Nyrow) {
+	  if(iminj < 0) iminj = 0; 
+	  if(imaxj > Nyrow1) imaxj = Nyrow1;
+	  
+	  for(i = iminj; i <= imaxj; i++) 
+	    out[i + j * Nyrow] = 1;
+	    
+	}
+      }
+    }
+  }
+}
+
+
+
+
+
diff --git a/src/dist2.c b/src/dist2.c
new file mode 100755
index 0000000..cef9400
--- /dev/null
+++ b/src/dist2.c
@@ -0,0 +1,100 @@
+# include <math.h>
+#include <R.h>
+
+#include "yesno.h"
+
+/* 
+
+   dist2:   squared distance in torus
+
+   dist2thresh: faster code for testing whether dist2 < r2
+
+   dist2Mthresh: same as dist2thresh, but does not assume
+                 the points are within one period of each other.
+*/
+
+double dist2(u,v,x,y,period)
+     double u, v, x, y;
+     double *period;
+{
+  double wide, high, dx, dy, dxp, dyp, a, b, d2;
+  /* points are assumed to lie within one period of each other */
+
+  wide = period[0];
+  high = period[1];
+
+  dx = u - x;
+  if(dx < 0.0) dx = -dx;
+  dxp = wide - dx;
+  a = (dx < dxp)? dx : dxp;
+
+  dy = v - y;
+  if(dy < 0.0) dy = -dy;
+  dyp = high - dy;
+  b = (dy < dyp)? dy : dyp;
+
+  d2 = a * a + b * b;
+  return d2;
+}
+
+double dist2either(u,v,x,y,period)
+     double u, v, x, y;
+     double *period;
+{
+  if(period[0] < 0.0) return pow(u-x,2) + pow(v-y,2);
+  return(dist2(u,v,x,y,period));
+}
+
+int dist2thresh(u,v,x,y,period,r2)
+     double u, v, x, y, r2;
+     double *period;
+{
+  double wide, high, dx, dy, dxp, dyp, a, b, residue;
+  /* points are assumed to lie within one period of each other */
+
+  wide = period[0];
+  high = period[1];
+
+  dx = u - x;
+  if(dx < 0.0) dx = -dx;
+  dxp = wide - dx;
+  a = (dx < dxp) ? dx : dxp;
+  residue = r2 - a * a;
+  if(residue <= 0.0)
+    return NO;
+  dy = v - y;
+  if(dy < 0.0) dy = -dy;
+  dyp = high - dy;
+  b = (dy < dyp) ? dy : dyp;
+  if (residue > b * b) 
+    return YES; 
+  return NO;
+}
+
+int dist2Mthresh(u,v,x,y,period,r2)
+     double u, v, x, y, r2;
+     double *period;
+{
+  double wide, high, dx, dy, dxp, dyp, a, b, residue;
+  /* points are NOT assumed to lie within one period of each other */
+
+  wide = period[0];
+  high = period[1];
+
+  dx = u - x;
+  if(dx < 0.0) dx = -dx;
+  while(dx > wide) dx -= wide;
+  dxp = wide - dx;
+  a = (dx < dxp) ? dx : dxp;
+  residue = r2 - a * a;
+  if(residue < 0.0)
+    return NO;
+  dy = v - y;
+  if(dy < 0.0) dy = -dy;
+  while(dy > high) dy -= high;
+  dyp = high - dy;
+  b = (dy < dyp) ? dy : dyp;
+  if (residue >= b * b) 
+    return YES; 
+  return NO;
+}
diff --git a/src/dist2.h b/src/dist2.h
new file mode 100755
index 0000000..1e59f20
--- /dev/null
+++ b/src/dist2.h
@@ -0,0 +1,86 @@
+/* 
+   dist2.h 
+
+   External declarations for the functions defined in dist2.c
+   and
+   In-line cpp macros for similar purposes
+
+   $Revision: 1.19 $ $Date: 2014/05/08 02:13:22 $
+
+*/
+
+double dist2(double u, double v, double x, double y, double *period);
+
+double dist2either(double u, double v, double x, double y, double *period);
+
+int dist2thresh(double u, double v, double x, double y, double *period, double r2);
+
+int dist2Mthresh(double u, double v, double x, double y, double *period, double r2);
+
+/* 
+   Efficient macros to test closeness of points
+*/
+
+/* 
+   These variables must be declared
+   (note: some files e.g. straush.c use 'RESIDUE' explicitly)
+*/
+
+#define DECLARE_CLOSE_VARS \
+  register double DX, DY, DXP, DYP, RESIDUE
+
+#define DECLARE_CLOSE_D2_VARS \
+  register double DX, DY, DXP, DYP, DX2
+
+#define CLOSE(U,V,X,Y,R2)		\
+  ((DX = X - U),			\
+   (RESIDUE = R2 - DX * DX),		\
+   ((RESIDUE > 0.0) &&			\
+    ((DY = Y - V),                      \
+     (RESIDUE = RESIDUE - DY * DY),     \
+     (RESIDUE > 0.0))))
+
+#define CLOSE_D2(U,V,X,Y,R2,D2)						\
+  ((DX = X - U),							\
+   (DX2 = DX * DX),							\
+   (DX2 < R2) && (((DY = Y - V),					\
+		   (D2 = DX2 + DY * DY),				\
+		   (D2 < R2))))
+
+/*
+  The following calculates X mod P, 
+  but it works only if X \in [-P, P]
+  so that X is the difference between two values
+  that lie in an interval of length P 
+*/
+
+#define CLOSE_PERIODIC(U,V,X,Y,PERIOD,R2)				\
+  ((DX = X - U),							\
+   (DX = (DX < 0.0) ? -DX : DX),					\
+   (DXP = (PERIOD)[0] - DX),						\
+   (DX = (DX < DXP) ? DX : DXP),					\
+   (RESIDUE = R2 - DX * DX),						\
+   ((RESIDUE > 0.0) && ((DY = Y - V),					\
+			(DY = (DY < 0.0) ? -DY : DY),			\
+			(DYP = (PERIOD)[1] - DY),			\
+			(DY = (DY < DYP) ? DY : DYP),			\
+                        (RESIDUE = RESIDUE - DY * DY),                  \
+			(RESIDUE > 0.0) )))
+
+#define CLOSE_PERIODIC_D2(U,V,X,Y,PERIOD,R2,D2)				\
+  ((DX = X - U),							\
+   (DX = (DX < 0.0) ? -DX : DX),					\
+   (DXP = (PERIOD)[0] - DX),						\
+   (DX = (DX < DXP) ? DX : DXP),					\
+   (D2 = DX * DX),							\
+   ((D2 < R2) && ((DY = Y - V),						\
+		  (DY = (DY < 0.0) ? -DY : DY),				\
+		  (DYP = (PERIOD)[1] - DY),				\
+		  (DY = (DY < DYP) ? DY : DYP),				\
+		  (D2 += DY * DY),					\
+		  (D2 < R2) )))
+
+
+
+
+
diff --git a/src/dist2dpath.c b/src/dist2dpath.c
new file mode 100755
index 0000000..77159c4
--- /dev/null
+++ b/src/dist2dpath.c
@@ -0,0 +1,25 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+
+/*
+  given matrix of edge lengths
+  compute matrix of shortest-path distances
+
+  Uses dist2dpath.h
+*/
+
+#define FNAME Ddist2dpath
+#define DTYPE double
+#define FLOATY
+
+#include "dist2dpath.h"
+
+#undef FNAME
+#undef DTYPE
+#undef FLOATY
+
+#define FNAME Idist2dpath
+#define DTYPE int
+
+#include "dist2dpath.h"
+
diff --git a/src/dist2dpath.h b/src/dist2dpath.h
new file mode 100644
index 0000000..27fe707
--- /dev/null
+++ b/src/dist2dpath.h
@@ -0,0 +1,184 @@
+/*
+
+  Function body for dist2dpath.c
+
+  Macros used: 
+
+  FNAME   function name
+  DTYPE   declaration for distance values ('double' or 'int')
+  FLOATY  (DTYPE == 'double')
+
+  $Revision: 1.3 $   $Date: 2013/05/27 02:09:10 $
+
+ */
+
+#undef DEBUG 
+
+#define MATRIX(X,I,J) (X)[(J) + n * (I)]
+#define D(I,J)     MATRIX(d,     I, J)
+#define DPATH(I,J) MATRIX(dpath, I, J)
+#define ADJ(I,J)   (MATRIX(adj,  I, J) != 0)
+
+#define INFIN -1
+#define FINITE(X) ((X) >= 0)
+
+void FNAME(nv, d, adj, dpath, tol, niter, status) 
+  int *nv;     /* number of vertices */
+  DTYPE *d;  /* matrix of edge lengths */
+  int *adj;   /* 0/1 edge matrix of graph */
+  DTYPE *tol;  /* tolerance threshold (ignored in integer case) */
+  DTYPE *dpath; /* output - shortest path distance matrix */
+  int *niter, *status; /* status = 0 for convergence */
+{
+  int i, j, k, n, iter, maxiter, changed;
+  DTYPE dij, dik, dkj, dikj;
+#ifdef FLOATY
+  DTYPE eps, diff, maxdiff;
+#endif
+  int totaledges, starti, nneighi, increm, pos;
+  int *start, *nneigh, *indx;
+
+  n = *nv;
+#ifdef FLOATY
+  eps = *tol;
+#endif
+
+  /* initialise and count edges */
+  *status = -1;
+  totaledges = 0;
+  for(i = 0; i < n; i++) {
+    for(j = 0; j < n; j++) {
+      DPATH(i, j) = (i == j) ? 0 : ((ADJ(i,j)) ? D(i, j) : INFIN);
+      if((i != j) && ADJ(i,j)) ++totaledges;
+    }
+  }
+
+  maxiter = 2 + ((totaledges > n) ? totaledges : n);
+
+  /* store indices j for each edge (i,j) */
+  indx = (int *) R_alloc(totaledges, sizeof(int));
+  nneigh = (int *) R_alloc(n, sizeof(int));
+  start  = (int *) R_alloc(n, sizeof(int));
+
+  pos = 0;
+  for(i = 0; i < n; i++) {
+    nneigh[i] = 0;
+    start[i] = pos;
+#ifdef DEBUG 
+    Rprintf("Neighbours of %d:\n", i);
+#endif
+    for(j = 0; j < n; j++) {
+      if((i != j) && ADJ(i,j) && FINITE(D(i,j))) {
+#ifdef DEBUG 
+	Rprintf("\t%d\n", j);
+#endif
+	++(nneigh[i]);
+	if(pos > totaledges)
+	  error("internal error: pos exceeded storage");
+	indx[pos] = j;
+	++pos;
+      }
+    }
+  }
+
+  /* run */
+  for(iter = 0; iter < maxiter; iter++) {
+
+    changed = 0;
+#ifdef FLOATY
+    maxdiff = 0;
+#endif
+
+#ifdef DEBUG
+    Rprintf("--------- iteration %d ---------------\n", iter);
+#endif
+    for(i = 0; i < n; i++) {
+      R_CheckUserInterrupt();
+      nneighi = nneigh[i];
+      if(nneighi > 0) {
+	/* run through neighbours k of i */
+	starti = start[i];
+	for(increm = 0, pos=starti; increm < nneighi; ++increm, ++pos) {
+	  k = indx[pos];
+	  dik = DPATH(i,k);
+#ifdef DEBUG
+#ifdef FLOATY
+	    Rprintf("i=%d k=%d dik=%lf\n", i, k, dik);
+#else
+	    Rprintf("i=%d k=%d dik=%d\n",  i, k, dik);
+#endif
+#endif
+	  /* now run through all other vertices j */
+	  for(j = 0; j < n; j++) {
+	    if(j != i && j != k) {
+	      dij = DPATH(i,j);
+	      dkj = DPATH(k,j);
+	      if(FINITE(dkj)) {
+		dikj = dik + dkj;
+#ifdef DEBUG
+#ifdef FLOATY
+		Rprintf("considering %d -> (%d) -> %d,\t dij=%lf, dikj=%lf\n", 
+			i, k, j, dij, dikj);
+#else
+		Rprintf("considering %d -> (%d) -> %d,\t dij=%d, dikj=%d\n", 
+			i, k, j, dij, dikj);
+#endif
+#endif
+		if(!FINITE(dij) || dikj < dij) {
+#ifdef DEBUG
+#ifdef FLOATY
+		  Rprintf("updating i=%d j=%d via k=%d from %lf to %lf\n", 
+			  i, j, k, dij, dikj);
+#else
+		  Rprintf("updating i=%d j=%d via k=%d from %d to %d\n", 
+			  i, j, k, dij, dikj);
+#endif
+#endif
+		  DPATH(i,j) = DPATH(j,i) = dikj;
+		  changed = 1;
+#ifdef FLOATY
+		  diff = (FINITE(dij)) ? dij - dikj : dikj;
+		  if(diff > maxdiff) maxdiff = diff;
+#endif
+		}
+	      }
+	    }
+	  }
+	}
+      }
+    }
+    if(changed == 0) {
+      /* algorithm converged */
+#ifdef DEBUG
+      Rprintf("Algorithm converged\n");
+#endif
+      *status = 0;
+      break;
+#ifdef FLOATY
+    } else if(FINITE(maxdiff) && maxdiff < eps) {
+      /* tolerance reached */
+#ifdef DEBUG
+      Rprintf("Algorithm terminated with maxdiff=%lf\n", maxdiff);
+#endif
+      *status = 1;
+      break;
+#endif
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("Returning after %d iterations on %d vertices\n", iter, n);
+#endif
+  
+  *niter = iter;
+}
+
+#undef DEBUG 
+
+#undef MATRIX
+#undef D
+#undef DPATH
+#undef ADJ
+#undef INFIN 
+#undef FINITE
+
diff --git a/src/distan3.c b/src/distan3.c
new file mode 100755
index 0000000..7b644fa
--- /dev/null
+++ b/src/distan3.c
@@ -0,0 +1,497 @@
+/*
+
+  distan3.c
+
+  Distances between pairs of 3D points
+
+  $Revision: 1.3 $     $Date: 2013/11/03 03:34:15 $
+
+  D3pairdist      Pairwise distances
+  D3pair2dist     Pairwise distances squared
+  D3pairPdist     Pairwise distances with periodic correction
+  D3pairP2dist    Pairwise distances squared, with periodic correction
+
+  D3crossdist     Pairwise distances for two sets of points
+  D3cross2dist    Pairwise distances squared, for two sets of points
+  D3crossPdist    Pairwise distances for two sets of points, periodic correction
+
+  matchxyz       Find matches between two sets of points   
+
+ */
+
+#include <math.h>
+/* #include <stdio.h> */
+
+double sqrt();
+
+void D3pairdist(n, x, y, z, squared, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z;
+     int *squared;
+     /* output */
+     double *d;
+{ 
+  void D3pair1dist(), D3pair2dist();
+  if(*squared == 0) {
+    D3pair1dist(n, x, y, z, d);
+  } else {
+    D3pair2dist(n, x, y, z, d);
+  }
+}
+
+void D3pair1dist(n, x, y, z, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints; 
+  double *dp;
+  double xi, yi, zi, dx, dy, dz, dist;
+
+  npoints = *n;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  for (i=1; i < npoints; i++) 
+    {
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dz = z[j] - zi;
+	  dist = sqrt( dx * dx + dy * dy + dz * dz ); 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+}
+
+/* squared distances */
+
+void D3pair2dist(n, x, y, z, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints; 
+  double *dp;
+  double xi, yi, zi, dx, dy, dz, dist;
+
+  npoints = *n;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  for (i=1; i < npoints; i++) 
+    {
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dz = z[j] - zi;
+	  dist = dx * dx + dy * dy + dz * dz; 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+}
+
+void D3crossdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, squared, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto;
+     int *squared;
+     /* output */
+     double *d;
+{
+  void D3cross1dist(), D3cross2dist();
+  if(*squared == 0) {
+    D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d);
+  } else {
+    D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d);
+  }
+}
+
+void D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt; 
+  double *dptr;
+  double xj, yj, zj, dx, dy, dz;
+
+  nf = *nfrom;
+  nt = *nto;
+
+  dptr = d;
+
+  for (j=0; j < nt; j++) {
+    xj = xto[j];
+    yj = yto[j];
+    zj = zto[j];
+    for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dz = zj - zfrom[i];
+	*dptr = sqrt( dx * dx + dy * dy + dz * dz ); 
+    }
+  }
+}
+
+/* squared distances */
+
+void D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt; 
+  double *dptr;
+  double xj, yj, zj, dx, dy, dz;
+
+  nf = *nfrom;
+  nt = *nto;
+
+  dptr = d;
+
+  for (j=0; j < nt; j++) {
+    xj = xto[j];
+    yj = yto[j];
+    zj = zto[j];
+    for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dz = zj - zfrom[i];
+	*dptr = dx * dx + dy * dy + dz * dz; 
+    }
+  }
+}
+
+
+
+/* distances with periodic correction */
+
+void D3pairPdist(n, x, y, z, xwidth, yheight, zdepth, squared, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z, *xwidth, *yheight, *zdepth;
+     int *squared;
+     /* output */
+     double *d;
+{
+  void D3pairP1dist(), D3pairP2dist();
+  if(*squared == 0) {
+    D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d);
+  } else {
+    D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d);
+  }
+}
+
+void D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z, *xwidth, *yheight, *zdepth;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints; 
+  double *dp;
+  double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep;
+
+  npoints = *n;
+  wide = *xwidth;
+  high = *yheight;
+  deep = *zdepth;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  for (i=1; i < npoints; i++) 
+    {
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dz = z[j] - zi;
+	  dx2p = dx * dx;
+	  dy2p = dy * dy;
+	  dz2p = dz * dz;
+	  dx2 = (dx - wide) * (dx - wide);
+	  dy2 = (dy - high) * (dy - high);
+	  dz2 = (dz - deep) * (dz - deep);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  if(dz2 < dz2p) dz2p = dz2;
+	  dx2 = (dx + wide) * (dx + wide);
+	  dy2 = (dy + high) * (dy + high);
+	  dz2 = (dz + deep) * (dz + deep);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  if(dz2 < dz2p) dz2p = dz2;
+	  dist = sqrt( dx2p + dy2p + dz2p ); 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+}
+
+/* same function without the sqrt */
+
+void D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *z, *xwidth, *yheight, *zdepth;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints; 
+  double *dp;
+  double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep;
+
+  npoints = *n;
+  wide = *xwidth;
+  high = *yheight;
+  deep = *zdepth;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  for (i=1; i < npoints; i++) 
+    {
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dz = z[j] - zi;
+	  dx2p = dx * dx;
+	  dy2p = dy * dy;
+	  dz2p = dz * dz;
+	  dx2 = (dx - wide) * (dx - wide);
+	  dy2 = (dy - high) * (dy - high);
+	  dz2 = (dz - deep) * (dz - deep);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  if(dz2 < dz2p) dz2p = dz2;
+	  dx2 = (dx + wide) * (dx + wide);
+	  dy2 = (dy + high) * (dy + high);
+	  dz2 = (dz + deep) * (dz + deep);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  if(dz2 < dz2p) dz2p = dz2;
+	  dist = dx2p + dy2p + dz2p; 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+}
+
+void D3crossPdist(nfrom, xfrom, yfrom, zfrom, 
+		   nto, xto, yto, zto, 
+		   xwidth, yheight, zdepth, 
+		   squared, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth;
+     int *squared;
+     /* output */
+     double *d;
+{
+  void D3crossP1dist(), D3crossP2dist();
+  if(*squared == 0) {
+    D3crossP1dist(nfrom, xfrom, yfrom, zfrom, 
+		  nto, xto, yto, zto, 
+		  xwidth, yheight, zdepth, 
+		  d);
+  } else {
+    D3crossP2dist(nfrom, xfrom, yfrom, zfrom, 
+		  nto, xto, yto, zto, 
+		  xwidth, yheight, zdepth, 
+		  d);
+  }
+}
+
+
+void D3crossP1dist(nfrom, xfrom, yfrom, zfrom, 
+		   nto, xto, yto, zto, 
+		   xwidth, yheight, zdepth, 
+		   d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt; 
+  double *dptr;
+  double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep;
+
+  nf = *nfrom;
+  nt = *nto;
+  wide = *xwidth;
+  high = *yheight;
+  deep = *zdepth;
+
+  dptr = d;
+
+  for (j=0; j < nt; j++) {
+    xj = xto[j];
+    yj = yto[j];
+    zj = zto[j];
+    for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dz = zj - zfrom[i];
+	dx2p = dx * dx;
+	dy2p = dy * dy;
+	dz2p = dz * dz;
+	dx2 = (dx - wide) * (dx - wide);
+	dy2 = (dy - high) * (dy - high);
+	dz2 = (dz - deep) * (dz - deep);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	if(dz2 < dz2p) dz2p = dz2;
+	dx2 = (dx + wide) * (dx + wide);
+	dy2 = (dy + high) * (dy + high);
+	dz2 = (dy + deep) * (dz + deep);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	if(dz2 < dz2p) dz2p = dz2;
+	*dptr = sqrt( dx2p + dy2p + dz2p ); 
+    }
+  }
+}
+
+
+void D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt; 
+  double *dptr;
+  double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep;
+
+  nf = *nfrom;
+  nt = *nto;
+  wide = *xwidth;
+  high = *yheight;
+  deep = *zdepth;
+
+  dptr = d;
+
+  for (j=0; j < nt; j++) {
+    xj = xto[j];
+    yj = yto[j];
+    zj = zto[j];
+    for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dz = zj - zfrom[i];
+	dx2p = dx * dx;
+	dy2p = dy * dy;
+	dz2p = dz * dz;
+	dx2 = (dx - wide) * (dx - wide);
+	dy2 = (dy - high) * (dy - high);
+	dz2 = (dz - deep) * (dz - deep);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	if(dz2 < dz2p) dz2p = dz2;
+	dx2 = (dx + wide) * (dx + wide);
+	dy2 = (dy + high) * (dy + high);
+	dz2 = (dy + deep) * (dz + deep);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	if(dz2 < dz2p) dz2p = dz2;
+	*dptr = dx2p + dy2p + dz2p; 
+    }
+  }
+}
+
+/*
+
+  matchxyz
+
+  Find matches between two lists of points
+
+ */
+
+void matchxyz(na, xa, ya, za, nb, xb, yb, zb, match)
+     /* inputs */
+     int *na, *nb;
+     double *xa, *ya, *za, *xb, *yb, *zb;
+     /* output */
+     int *match; 
+{ 
+  int i, j, Na, Nb; 
+  double xai, yai, zai;
+
+  Na = *na;
+  Nb = *nb;
+
+  for (i=1; i < Na; i++) 
+    {
+      xai = xa[i];
+      yai = ya[i];
+      zai = za[i];
+      match[i] = 0;
+      for (j=0; j < Nb; j++) 
+	if(xai == xb[j] && yai == yb[j] && zai == zb[i]) {
+	  match[i] = j;
+	  break;
+	}
+    }
+}
+
diff --git a/src/distances.c b/src/distances.c
new file mode 100755
index 0000000..9bba6c3
--- /dev/null
+++ b/src/distances.c
@@ -0,0 +1,430 @@
+/*
+
+  distances.c
+
+  Distances between pairs of points
+
+  $Revision: 1.31 $     $Date: 2017/01/08 00:32:52 $
+
+  Cpairdist      Pairwise distances
+  Cpair2dist     Pairwise distances squared
+  CpairPdist     Pairwise distances with periodic correction
+  CpairP2dist    Pairwise distances squared, with periodic correction
+
+  Ccrossdist     Pairwise distances for two sets of points
+  Ccross2dist    Pairwise distances squared, for two sets of points
+  CcrossPdist    Pairwise distances for two sets of points, periodic correction
+
+ */
+
+#include <math.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+double sqrt();
+
+void Cpairdist(n, x, y, squared, d) 
+     /* inputs */
+     int *n;
+     double *x, *y;
+     int *squared;
+     /* output */
+     double *d;
+{
+  void Cpair1dist(), Cpair2dist();
+  if(*squared == 0) {
+    Cpair1dist(n, x, y, d);
+  } else {
+    Cpair2dist(n, x, y, d);
+  }
+}
+
+
+void Cpair1dist(n, x, y, d)
+     /* inputs */
+     int *n;
+     double *x, *y;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints, maxchunk; 
+  double *dp;
+  double xi, yi, dx, dy, dist;
+
+  npoints = *n;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+      xi = x[i];
+      yi = y[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dist = sqrt( dx * dx + dy * dy ); 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+  }
+}
+
+/* squared distances */
+
+void Cpair2dist(n, x, y, d)
+     /* inputs */
+     int *n;
+     double *x, *y;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints, maxchunk; 
+  double *dp;
+  double xi, yi, dx, dy, dist;
+
+  npoints = *n;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+      xi = x[i];
+      yi = y[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dist = dx * dx + dy * dy; 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+  }
+}
+
+void Ccrossdist(nfrom, xfrom, yfrom, nto, xto, yto, squared, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto;
+     int *squared;
+     /* output */
+     double *d;
+{
+  void Ccross1dist(), Ccross2dist();
+  if(*squared == 0) {
+    Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d);
+  } else {
+    Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d);
+  }
+}
+		      
+
+void Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt, maxchunk; 
+  double *dptr;
+  double xj, yj, dx, dy;
+
+  nf = *nfrom;
+  nt = *nto;
+
+  dptr = d;
+
+  OUTERCHUNKLOOP(j, nt, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nt, maxchunk, 16384) {
+      xj = xto[j];
+      yj = yto[j];
+      for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	*dptr = sqrt( dx * dx + dy * dy ); 
+      }
+    }
+  }
+}
+
+
+/* squared distances */
+
+void Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt, maxchunk; 
+  double *dptr;
+  double xj, yj, dx, dy;
+
+  nf = *nfrom;
+  nt = *nto;
+
+  dptr = d;
+
+  OUTERCHUNKLOOP(j, nt, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nt, maxchunk, 16384) {
+      xj = xto[j];
+      yj = yto[j];
+      for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	*dptr = dx * dx + dy * dy; 
+      }
+    }
+  }
+}
+
+
+/* distances with periodic correction */
+
+void CpairPdist(n, x, y, xwidth, yheight, squared, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *xwidth, *yheight;
+     int *squared;
+     /* output */
+     double *d;
+{ 
+  void CpairP1dist(), CpairP2dist();
+  if(*squared == 0) {
+    CpairP1dist(n, x, y, xwidth, yheight, d);
+  } else {
+    CpairP2dist(n, x, y, xwidth, yheight, d);
+  }
+}
+
+void CpairP1dist(n, x, y, xwidth, yheight, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *xwidth, *yheight;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints, maxchunk; 
+  double *dp;
+  double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high;
+
+  npoints = *n;
+  wide = *xwidth;
+  high = *yheight;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+      xi = x[i];
+      yi = y[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dx2p = dx * dx;
+	  dy2p = dy * dy;
+	  dx2 = (dx - wide) * (dx - wide);
+	  dy2 = (dy - high) * (dy - high);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  dx2 = (dx + wide) * (dx + wide);
+	  dy2 = (dy + high) * (dy + high);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  dist = sqrt( dx2p + dy2p ); 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+  }
+}
+
+/* same function without the sqrt */
+
+void CpairP2dist(n, x, y, xwidth, yheight, d)
+     /* inputs */
+     int *n;
+     double *x, *y, *xwidth, *yheight;
+     /* output */
+     double *d;
+{ 
+  int i, j, npoints, maxchunk; 
+  double *dp;
+  double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high;
+
+  npoints = *n;
+  wide = *xwidth;
+  high = *yheight;
+
+  /* set d[0,0] = 0 */
+  *d = 0.0;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+      xi = x[i];
+      yi = y[i];
+      /* point at the start of column i */
+      dp = d + i * npoints;
+      /* set diagonal to zero */
+      dp[i] = 0.0;
+      for (j=0; j < i; j++)
+	{
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  dx2p = dx * dx;
+	  dy2p = dy * dy;
+	  dx2 = (dx - wide) * (dx - wide);
+	  dy2 = (dy - high) * (dy - high);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  dx2 = (dx + wide) * (dx + wide);
+	  dy2 = (dy + high) * (dy + high);
+	  if(dx2 < dx2p) dx2p = dx2;
+	  if(dy2 < dy2p) dy2p = dy2;
+	  dist = dx2p + dy2p; 
+	  /* upper triangle */
+	  *dp = dist;
+	  ++dp;
+	  /* lower triangle */
+	  d[ j * npoints + i] = dist;
+	}
+    }
+  }
+}
+
+void CcrossPdist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, 
+		 squared, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight;
+     int *squared;
+     /* output */
+     double *d;
+{ 
+  void CcrossP1dist(), CcrossP2dist();
+  if(*squared == 0) {
+    CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d);
+  } else {
+    CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); 
+  }
+}
+
+void CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt, maxchunk; 
+  double *dptr;
+  double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high;
+
+  nf = *nfrom;
+  nt = *nto;
+  wide = *xwidth;
+  high = *yheight;
+
+  dptr = d;
+
+  OUTERCHUNKLOOP(j, nt, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nt, maxchunk, 16384) {
+      xj = xto[j];
+      yj = yto[j];
+      for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dx2p = dx * dx;
+	dy2p = dy * dy;
+	dx2 = (dx - wide) * (dx - wide);
+	dy2 = (dy - high) * (dy - high);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	dx2 = (dx + wide) * (dx + wide);
+	dy2 = (dy + high) * (dy + high);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	*dptr = sqrt( dx2p + dy2p ); 
+      }
+    }
+  }
+}
+
+void CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d)
+     /* inputs */
+     int *nto, *nfrom;
+     double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight;
+     /* output */
+     double *d;
+{ 
+  int i, j, nf, nt, maxchunk; 
+  double *dptr;
+  double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high;
+
+  nf = *nfrom;
+  nt = *nto;
+  wide = *xwidth;
+  high = *yheight;
+
+  dptr = d;
+
+  OUTERCHUNKLOOP(j, nt, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nt, maxchunk, 16384) {
+      xj = xto[j];
+      yj = yto[j];
+      for(i = 0; i < nf; i++, dptr++) {
+	dx = xj - xfrom[i];
+	dy = yj - yfrom[i];
+	dx2p = dx * dx;
+	dy2p = dy * dy;
+	dx2 = (dx - wide) * (dx - wide);
+	dy2 = (dy - high) * (dy - high);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	dx2 = (dx + wide) * (dx + wide);
+	dy2 = (dy + high) * (dy + high);
+	if(dx2 < dx2p) dx2p = dx2;
+	if(dy2 < dy2p) dy2p = dy2;
+	*dptr = dx2p + dy2p; 
+      }
+    }
+  }
+}
+
diff --git a/src/distmapbin.c b/src/distmapbin.c
new file mode 100755
index 0000000..682dfa8
--- /dev/null
+++ b/src/distmapbin.c
@@ -0,0 +1,124 @@
+/*
+       distmapbin.c
+
+       Distance transform of a discrete binary image
+       (8-connected path metric)
+       
+       $Revision: 1.6 $ $Date: 2011/11/20 03:34:16 $
+
+       
+*/
+
+#include <math.h>
+#include "raster.h"
+#include <R_ext/Utils.h>
+
+void   dist_to_bdry();
+void   shape_raster();
+
+
+void
+distmap_bin(in, dist)
+        Raster  *in;            /* input:  binary image */
+	Raster	*dist;		/* output: distance to nearest point */
+	/* rasters must have been dimensioned by shape_raster()
+	   and must all have identical dimensions and margins */
+{
+	int	j,k;
+	double	d, dnew;
+	double  xstep, ystep, diagstep, huge;
+	int rmin, rmax, cmin, cmax;
+
+	/* distances between neighbouring pixels */
+	xstep = in->xstep;
+	ystep = in->ystep;
+	diagstep = sqrt(xstep * xstep + ystep * ystep);
+	if(xstep < 0) xstep = -xstep;
+	if(ystep < 0) ystep = -ystep;
+
+	/* effectively infinite distance */
+	huge = 2.0 * Distance(dist->xmin,dist->ymin,dist->xmax,dist->ymax); 
+
+	/* image boundaries */
+	rmin = in->rmin;
+	rmax = in->rmax;
+	cmin = in->cmin;
+	cmax = in->cmax;
+
+#define DISTANCE(ROW, COL) Entry(*dist, ROW, COL, double)
+#define MASKTRUE(ROW, COL) (Entry(*in, ROW, COL, int) != 0)
+#define MASKFALSE(ROW, COL) (Entry(*in, ROW, COL, int) == 0)
+#define UPDATE(D, ROW, COL, STEP) \
+	dnew = STEP + DISTANCE(ROW, COL); \
+        if(D > dnew) D = dnew
+
+	/* initialise edges to boundary condition */
+	for(j = rmin-1; j <= rmax+1; j++) {
+	  DISTANCE(j, cmin-1) = (MASKTRUE(j, cmin-1)) ? 0.0 : huge;
+	  DISTANCE(j, cmax+1) = (MASKTRUE(j, cmax+1)) ? 0.0 : huge;
+	}
+	for(k = cmin-1; k <= cmax+1; k++) {
+	  DISTANCE(rmin-1, k) = (MASKTRUE(rmin-1, k)) ? 0.0 : huge;
+	  DISTANCE(rmax+1, k) = (MASKTRUE(rmax+1, k)) ? 0.0 : huge;
+	}
+	  
+	/* forward pass */
+
+	for(j = rmin; j <= rmax; j++) {
+	  R_CheckUserInterrupt();
+	  for(k = cmin; k <= cmax; k++) {
+	    if(MASKTRUE(j, k))
+	      d = DISTANCE(j, k) = 0.0;
+	    else {
+	      d = huge;
+	      UPDATE(d, j-1, k-1, diagstep);
+	      UPDATE(d, j-1,   k, ystep);
+	      UPDATE(d, j-1, k+1, diagstep);
+	      UPDATE(d,   j, k-1, xstep);
+	      DISTANCE(j,k) = d;
+	    }
+	  }
+	}
+
+	/* backward pass */
+
+	for(j = rmax; j >= rmin; j--) {
+	  R_CheckUserInterrupt();
+	  for(k = cmax; k >= cmin; k--) {
+	    if(MASKFALSE(j,k)) {
+	      d = DISTANCE(j,k);
+	      UPDATE(d, j+1, k+1, diagstep);
+	      UPDATE(d, j+1,   k, ystep);
+	      UPDATE(d, j+1, k-1, diagstep);
+	      UPDATE(d,   j, k+1, xstep);
+	      DISTANCE(j,k) = d;
+	    } 
+	  }
+	}
+}
+
+/* R interface */
+
+void distmapbin(xmin, ymin, xmax, ymax, nr, nc,
+		inp, distances, boundary)
+	double *xmin, *ymin, *xmax, *ymax;  	  /* x, y dimensions */
+	int *nr, *nc;	 	                  /* raster dimensions
+				                     EXCLUDING margin of 1 on each side */
+	int   *inp;              /* input:  binary image */
+	double *distances;	/* output: distance to nearest point */
+	double *boundary;       /* output: distance to boundary of rectangle */
+	/* all images must have identical dimensions including a margin of 1 on each side */
+{
+	Raster data, dist, bdist;
+
+	shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax,
+			    *nr+2, *nc+2, 1, 1);
+	shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax,
+			   *nr+2,*nc+2,1,1);
+	shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax,
+			   *nr+2,*nc+2,1,1);
+	
+	distmap_bin(&data, &dist);
+
+	dist_to_bdry(&bdist);
+}	
diff --git a/src/dwpure.c b/src/dwpure.c
new file mode 100755
index 0000000..4194f96
--- /dev/null
+++ b/src/dwpure.c
@@ -0,0 +1,318 @@
+/*
+
+   dwpure.c
+
+   $Revision: 1.5 $   $Date: 2011/09/20 07:54:53 $
+
+   Code by Dominic Schuhmacher
+   
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <R.h>
+
+typedef struct State {
+  int n1, n2; 
+  /* vectors of length n1 (rows) and n2 (cols) */
+  int *rowmass, *colmass;  /* mass to be moved from row / to col */
+  int *rowlab, *collab;  /* row and col labels
+         (specify previous node (row for collab, col for rowlab)) */
+  int *rowflow, *colflow;  /* second component of labels 
+         (specify flow through current node) */
+  int *rowsurplus, *colsurplus;  /* the surplus in each row/col under the current flow */
+  int *dualu, *dualv;     /* vectors of dual variables (u for rows, v for cols) */
+  int *rowhelper, *colhelper;   /* helping vector to store intermediate results */
+      /* could be local in initcost at the moment */
+  /* n by n matrices */
+  int *d;                /* matrix of costs */ 
+  int *flowmatrix;        /* matrix of flows */
+  int *arcmatrix;  /* matrix of arcs for restriced primal problem 
+         (1 if arc, 0 if no arc) should be unsigned char to save memory
+          however need to workout problem with R_alloc first (see below) */
+   /* n*n vector */
+   int *collectvals;
+} State;
+
+#define COST(I,J,STATE,NVALUE) ((STATE)->d)[(NVALUE) * (J) + (I)]
+#define FLOW(I,J,STATE,NVALUE) ((STATE)->flowmatrix)[(NVALUE) * (J) + (I)]
+#define ARC(I,J,STATE,NVALUE) ((STATE)->arcmatrix)[(NVALUE) * (J) + (I)]
+#define MIN(A,B) ((A)<(B) ? (A) : (B))
+
+int arraysum(int *a, int n);
+int arraymin(int *a, int n);
+void initvalues(State *state);
+void maxflow(State *state);
+void updateduals(State *state);
+void augmentflow(int startcol, State *state);
+
+/* ------------ The main function ----------------------------- */
+
+void dwpure(int *d, int *rmass, int *cmass, int *numr, int *numc, int *flowmatrix)
+{
+   int i,j; /* indices */
+   int n1,n2;
+   unsigned char feasible = 0; /* boolean for main loop */
+   State state;
+
+   /* inputs */
+   state.n1 = n1 = *numr;
+   state.n2 = n2 = *numc;
+   state.d = d;
+   state.rowmass = rmass;
+   state.colmass = cmass;
+   /* scratch space */
+   state.rowlab = (int *) R_alloc((long) n1, sizeof(int));
+   state.collab = (int *) R_alloc((long) n2, sizeof(int));
+   state.rowflow = (int *) R_alloc((long) n1, sizeof(int));
+   state.colflow = (int *) R_alloc((long) n2, sizeof(int));
+   state.rowsurplus = (int *) R_alloc((long) n1, sizeof(int));
+   state.colsurplus = (int *) R_alloc((long) n2, sizeof(int));
+   state.dualu = (int *) R_alloc((long) n1, sizeof(int));
+   state.dualv = (int *) R_alloc((long) n2, sizeof(int));
+   state.rowhelper = (int *) R_alloc((long) n1, sizeof(int));
+   state.colhelper = (int *) R_alloc((long) n2, sizeof(int));
+   state.flowmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int));
+   state.arcmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int));
+   state.collectvals = (int *) R_alloc((long) (n1 * n2), sizeof(int));
+
+   for (i = 0; i < n1; ++i) {
+   for (j = 0; j < n2; ++j) {
+      state.flowmatrix[(n1)*(j) + i] = 0;
+      state.arcmatrix[(n1)*(j) + i] = 0;
+      state.collectvals[(n1)*(j) + i] = 0;
+   }
+   }
+   for (i = 0; i < n1; ++i) {
+      state.rowlab[i] = 0;
+      state.rowflow[i] = 0;
+      state.rowsurplus[i] = 0;
+      state.dualu[i] = 0;
+      state.rowhelper[i] = 0;
+   }
+   for (j = 0; j < n2; ++j) {
+      state.collab[j] = 0;
+      state.colflow[j] = 0;
+      state.colsurplus[j] = 0;
+      state.dualv[j] = 0;
+      state.colhelper[j] = 0;
+   }
+
+
+/* Initialize dual variables, arcmatrix, and surpluses */
+   initvalues(&state);
+
+/* For testing: print out cost matrix 
+   for (i = 0; i < n1; ++i) {
+   for (j = 0; j < n2; ++j) {
+      Rprintf("%d ", COST(i, j, &state, n1));
+   }
+   Rprintf("\n");
+   }   */
+
+/* The main loop */
+   while(feasible == 0) {
+      maxflow(&state);
+      if (arraysum(state.rowsurplus, n1) > 0) {
+         updateduals(&state);  /* also updates arcmatrix */
+      }
+      else {
+         feasible = 1;
+      }
+   }
+
+/* "Return" the final flowmatrix */
+   for (i = 0; i < n1; i++) {
+      for (j = 0; j < n2; j++) {
+      flowmatrix[n1*j+i] = state.flowmatrix[n1*j+i];
+      }
+   }
+}
+
+
+/* ------------ Functions called by dwpure_R ------------------------- */
+
+
+/* Sum of integer array */
+int arraysum(int *a, int n) {
+   int i;
+   int asum = 0;
+   for (i = 0; i < n; i++)
+      asum += a[i];
+   return(asum);
+}
+
+/* Minimal element of an integer array */
+int arraymin(int *a, int n) {
+  int i, amin;
+  if (n < 1)
+    return(-1);
+  amin = a[0];
+  if (n > 1)
+    for (i = 0; i < n; i++)
+      if (a[i] < amin) amin = a[i];
+  return(amin);
+}
+
+
+/* Initialize cost matrix: subtract in each row its minimal entry (from all the
+entries in the row), then subtract in each column its minimal entry (from all the
+entries in the column) */
+void initvalues(State *state) {
+   int i,j,n1,n2;
+
+   n1 = state->n1;
+   n2 = state->n2;
+
+   /* Initial surpluses; can I do this shorter? later on surpluses are updated in
+      flow augmentation step */
+   for (i = 0; i < n1; i++)
+      state->rowsurplus[i] = state->rowmass[i];
+   for (j = 0; j < n2; j++)
+      state->colsurplus[j] = state->colmass[j];
+
+   for (i = 0; i < n1; i++) {
+      for (j = 0; j < n2; j++) 
+         state->colhelper[j] = COST(i, j, state, n1);
+      state->dualu[i] = arraymin(state->colhelper, n2);
+   }
+   for (j = 0; j < n2; j++) {
+      for (i = 0; i < n1; i++) 
+	 state->rowhelper[i] = COST(i, j, state, n1) - state->dualu[i];
+      state->dualv[j] = arraymin(state->rowhelper, n1);
+   }
+   for (i = 0; i < n1; i++) {
+      for (j = 0; j < n2; j++) {
+         if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j])
+            ARC(i, j, state, n1) = 1;
+         else
+            ARC(i, j, state, n1) = 0;
+      }
+   }
+}
+
+/* Maximize the flow on the (zeros of the) current cost matrix */
+void maxflow(State *state) {
+   int breakthrough; /* col. no. in which breakthrough occurs */
+   unsigned char labelfound = 1; /* 0 if no more labels can be found */
+   int i,j,n1,n2;
+
+   n1 = state->n1;
+   n2 = state->n2;
+
+   while (labelfound == 1) {
+      breakthrough = -1;
+      /* initialize labels */
+      for (i = 0; i < n1; i++) {
+         if (state->rowsurplus[i] > 0) {
+            state->rowlab[i] = -5;
+            state->rowflow[i] = state->rowsurplus[i];
+         }
+         else {
+            state->rowlab[i] = -1;  /* setting rowflow to zero isn't necessary! */
+         }
+      }
+      for (j = 0; j < n2; j++)
+         state->collab[j] = -1;   /* setting colflow to zero isn't necessary! */
+      /* -1 means "no index", -5 means "source label" (rows only) */
+
+      while (labelfound == 1 && breakthrough == -1) {
+         labelfound = 0;
+         /* label unlabeled column j that permits flow from some labeled row i */
+         /* ("permits flow" means arcmatrix[i][j] = 1). Do so for every j */
+         for (i = 0; i < n1; i++) {
+            if (state->rowlab[i] != -1) {
+               for (j = 0; j < n2; j++) {
+                  if (ARC(i, j, state, n1) == 1 && state->collab[j] == -1) {
+                     state->collab[j] = i;
+                     state->colflow[j] = state->rowflow[i];
+                     labelfound = 1;
+                     if (state->colsurplus[j] > 0 && breakthrough == -1)
+                        breakthrough = j;
+                  }
+               }
+            }
+         }
+         /* label unlabeled row i that already sends flow to some labeled col j */
+         /* ("already sends" means flowmatrix[i][j] > 0). Do so for every i             */
+         for (j = 0; j < n2; j++) {
+            if (state->collab[j] != -1) {
+               for (i = 0; i < n1; i++) {
+                  if (FLOW(i, j, state, n1) > 0 && state->rowlab[i] == -1) {
+                     state->rowlab[i] = j;
+                     state->rowflow[i] = MIN(state->colflow[j],FLOW(i, j, state, n1));
+                     labelfound = 1;
+                  }
+               }
+            }
+         }
+      }
+      if (breakthrough != -1) augmentflow(breakthrough, state);
+   }
+}
+
+
+/* Update the dual variables (called if solution of restricted primal is not feasible
+for the original problem): determine the minimum over the submatrix given by all
+labeled rows and unlabeled columns, and subtract it from all labeled rows and add
+it to all labeled columns. */
+void updateduals(State *state) 
+{
+   int i,j,n1,n2,mini;
+   int count = 0; 
+
+   n1 = state->n1;
+   n2 = state->n2;
+
+   for (i = 0; i < n1; i++) {
+     for (j = 0; j < n2; j++) {
+       if (state->rowlab[i] != -1 && state->collab[j] == -1) {
+	 state->collectvals[count] = COST(i, j, state, n1) - state->dualu[i] - state->dualv[j];
+	 count++;
+       }
+     }
+   }
+   mini = arraymin(state->collectvals, count);
+   for (i = 0; i < n1; i++) {
+     if (state->rowlab[i] != -1)
+       state->dualu[i] += mini;
+   }
+   for (j = 0; j < n2; j++){
+     if (state->collab[j] != -1)
+       state->dualv[j] -= mini;
+   }
+   for (i = 0; i < n1; i++) {
+     for (j = 0; j < n2; j++) {
+       if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j])
+         ARC(i, j, state, n1) = 1;
+       else
+         ARC(i, j, state, n1) = 0;
+     }
+   }
+
+}
+
+/* Augment the flow on the graph given by arcmatrix (by aug)
+according to the row and column labels starting in column startcol */
+/* Adjust the surpluses while we're at it (first row and last col have -aug) */ 
+void augmentflow(int startcol, State *state) {
+   int k,l,aug,n1;
+  /* int i,j,k,l,aug,n1,n2; */
+
+   n1 = state->n1;
+
+   l = startcol;
+   aug = MIN(state->colflow[l], state->colsurplus[l]);
+   state->colsurplus[l] -= aug;
+
+   k = state->collab[l];
+   FLOW(k, l, state, n1) += aug;
+   l = state->rowlab[k];
+   while (l != -5) {
+      FLOW(k, l, state, n1) -= aug;
+      k = state->collab[l];
+      FLOW(k, l, state, n1) += aug;
+      l = state->rowlab[k];
+   }
+   state->rowsurplus[k] -= aug;
+}
diff --git a/src/exactPdist.c b/src/exactPdist.c
new file mode 100755
index 0000000..cf17f11
--- /dev/null
+++ b/src/exactPdist.c
@@ -0,0 +1,148 @@
+/*
+       exactPdist.c
+
+       `Pseudoexact' distance transform of a discrete binary image
+       (the closest counterpart to `exactdist.c')
+       
+       $Revision: 1.12 $ $Date: 2011/05/17 12:27:20 $
+
+       
+*/
+
+#include <math.h>
+#include "raster.h"
+
+void   dist_to_bdry();
+void   shape_raster();
+
+void
+ps_exact_dt(in, dist, row, col)
+        Raster  *in;            /* input:  binary image */
+	Raster	*dist;		/* output: exact distance to nearest point */
+	Raster	*row;		/* output: row index of closest point */
+	Raster	*col;		/* output: column index of closest point */
+	/* rasters must have been dimensioned by shape_raster()
+	   and must all have identical dimensions and margins */
+{
+	int	j,k;
+	double	d, x, y;
+	int	r, c;
+	double	dnew;
+	double  huge;
+	/*	double  bdiag; */
+	
+	    /* initialise */
+#define UNDEFINED -1
+#define Is_Defined(I) (I >= 0)
+#define Is_Undefined(I) (I < 0)
+	
+	Clear(*row,int,UNDEFINED)
+	Clear(*col,int,UNDEFINED)
+		
+	huge = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); 
+	Clear(*dist,double,huge)
+
+
+	  /* if input pixel is TRUE, set distance to 0 and make pixel point to itself */
+	for(j = in->rmin; j <= in->rmax; j++)
+	for(k = in->cmin; k <= in->cmax; k++) 
+	  if(Entry(*in, j, k, int) != 0) {
+	      Entry(*dist, j, k, double) = 0.0;
+	      Entry(*row,  j, k, int)   = j;
+	      Entry(*col,  j, k, int)   = k;
+	  }
+
+	/* how to update the distance values */
+	
+#define GETVALUES(ROW,COL) \
+	x = Xpos(*in, COL); \
+	y = Ypos(*in, ROW); \
+	d = Entry(*dist,ROW,COL,double); 
+
+#define COMPARE(ROW,COL,RR,CC) \
+	r = Entry(*row,RR,CC,int); \
+	c = Entry(*col,RR,CC,int); \
+	if(Is_Defined(r) && Is_Defined(c) \
+	   && Entry(*dist,RR,CC,double) < d) { \
+	     dnew = DistanceSquared(x, y, Xpos(*in,c), Ypos(*in,r)); \
+	     if(dnew < d) { \
+		Entry(*row,ROW,COL,int) = r; \
+		Entry(*col,ROW,COL,int) = c; \
+		Entry(*dist,ROW,COL,double) = dnew; \
+		d = dnew; \
+	     } \
+	}
+
+	/* bound on diagonal step distance squared */
+	/* bdiag = (in->xstep * in->xstep + in->ystep * in->ystep); */
+	
+	/* forward pass */
+
+	for(j = in->rmin; j <= in->rmax; j++)
+	for(k = in->cmin; k <= in->cmax; k++) {
+	        GETVALUES(j, k)
+		COMPARE(j,k, j-1,k-1)
+		COMPARE(j,k, j-1,  k)
+		COMPARE(j,k, j-1,k+1)
+		COMPARE(j,k, j,  k-1)
+		  }
+
+	/* backward pass */
+
+	for(j = in->rmax; j >= in->rmin; j--) 
+	for(k = in->cmax; k >= in->cmin; k--) {
+	        GETVALUES(j, k)
+		COMPARE(j,k, j+1,k+1)
+		COMPARE(j,k, j+1,  k)
+		COMPARE(j,k, j+1,k-1)
+		COMPARE(j,k, j,  k+1)
+		  } 
+
+	/* take square roots of distances^2 */
+
+	for(j = in->rmax; j >= in->rmin; j--) 
+	for(k = in->cmax; k >= in->cmin; k--) 
+	        Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double));
+
+}
+
+/* R interface */
+
+void ps_exact_dt_R(xmin, ymin, xmax, ymax, nr, nc, mr, mc, 
+	   inp, distances, rows, cols, boundary)
+	double *xmin, *ymin, *xmax, *ymax;  	  /* x, y dimensions */
+	int *nr, *nc;	 	                  /* raster dimensions
+				                     EXCLUDING margins */
+	int *mr, *mc;                             /* margins */
+	int   *inp;              /* input:  binary image */
+	double *distances;	/* output: distance to nearest point */
+	int   *rows;	        /* output: row of nearest point (start= 0) */
+	int   *cols;	        /* output: column of nearest point (start = 0) */
+	double *boundary;       /* output: distance to boundary of rectangle */
+	/* all images must have identical dimensions including a margin of 1 on each side */
+{
+	Raster data, dist, row, col, bdist;
+	int mrow, mcol, nrow, ncol;
+
+	mrow = *mr;
+	mcol = *mc;
+
+	/* full dimensions */
+	nrow = *nr + 2 * mrow;
+	ncol = *nc + 2 * mcol;
+
+	shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &dist, (void *) distances, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &row, (void *) rows, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &col, (void *) cols, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	
+	ps_exact_dt(&data, &dist, &row, &col);
+
+	dist_to_bdry(&bdist);
+}	
diff --git a/src/exactdist.c b/src/exactdist.c
new file mode 100755
index 0000000..75421b9
--- /dev/null
+++ b/src/exactdist.c
@@ -0,0 +1,236 @@
+/*
+       exactdist.c
+
+       Exact distance transform of a point pattern
+       (used to estimate the empty space function F)
+       
+       $Revision: 1.12 $ $Date: 2011/09/20 07:36:17 $
+
+       Author: Adrian Baddeley
+
+       Sketch of functionality:
+            the 'data' are a finite list of points in R^2 
+	    (x,y coordinates) and the 'output' is a real valued 
+	    image whose entries are distances, with the value for
+	    each pixel equalling the distance from that pixel
+	    to the nearest point of the data pattern.
+       
+       Routines:
+
+            exact_dt_R()       interface to R
+	    exact_dt()         implementation of distance transform
+	    dist_to_bdry()     compute distance to edge of image frame
+	    shape_raster()     initialise a Raster structure
+                          
+       The appropriate calling sequence for exact_dt_R() 
+       is exemplified in 'exactdt.R'
+     
+*/
+#undef DEBUG
+
+#include <math.h>
+#include "raster.h"
+
+#ifdef DEBUG
+#include <R.h>
+#endif
+
+void 
+shape_raster(ras,data,xmin,ymin,xmax,ymax,nrow,ncol,mrow,mcol)
+     Raster          *ras;           /* the raster structure to be initialised */
+     void		*data;
+     int 	        nrow, ncol;  /* absolute dimensions of storage array */
+     int 		mrow, mcol;  /* margins clipped off */
+	                             /* e.g. valid width is ncol - 2*mcol columns */
+     double		xmin, ymin,	/* image dimensions in R^2 after clipping */
+		        xmax, ymax;     
+{
+	ras->data	= data;
+	ras->nrow 	= nrow;
+	ras->ncol 	= ncol;
+	ras->length 	= nrow * ncol;
+	ras->rmin	= mrow;
+	ras->rmax	= nrow - mrow - 1;
+	ras->cmin	= mcol;
+	ras->cmax	= ncol - mcol - 1;
+	ras->x0		= 
+	ras->xmin	= xmin;
+	ras->x1 	=
+	ras->xmax	= xmax;
+	ras->y0		=
+	ras->ymin	= ymin;
+	ras->y1		=
+	ras->ymax	= ymax;
+	ras->xstep	= (xmax-xmin)/(ncol - 2 * mcol - 1);
+	ras->ystep	= (ymax-ymin)/(nrow - 2 * mrow - 1);
+	/* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep);  */
+}
+
+void
+exact_dt(x, y, npt, dist, index)
+	double	*x, *y;		/* data points */
+	int	npt;
+	Raster	*dist;		/* exact distance to nearest point */
+	Raster	*index;		/* which point x[i],y[i] is closest */
+{
+	int	i,j,k,l,m;
+	double	d;
+	int	ii;
+	double	dd;
+	/*	double  bdiag; */
+	
+	    /* initialise rasters */
+#define UNDEFINED -1
+#define Is_Defined(I) (I >= 0)
+#define Is_Undefined(I) (I < 0)
+	
+	Clear(*index,int,UNDEFINED)
+		
+	d = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); 
+	Clear(*dist,double,d)
+
+	  /* If the list of data points is empty, ... exit now */
+	if(npt == 0) 
+	  return;
+
+	for(i = 0; i < npt; i++) {
+		/* Rprintf("%ld -> (%lf,%lf)\n", i, x[i], y[i]); */
+		j = RowIndex(*dist,y[i]);
+		k = ColIndex(*dist,x[i]);
+		/* if(!Inside(*dist,j,k))
+			Rprintf("(%ld,%ld) out of bounds\n",j,k);
+		else if (!Inside(*dist,j+1,k+1))
+			Rprintf("(%ld+1,%ld+1) out of bounds\n",j,k);
+		*/
+		for(l = j; l <= j+1; l++) 
+		for(m = k; m <= k+1; m++) {
+			d = DistanceToSquared(x[i],y[i],*index,l,m);
+			if(   Is_Undefined(Entry(*index,l,m,int))
+			   || Entry(*dist,l,m,double) > d)
+			{
+				/* Rprintf("writing (%ld,%ld) -> %ld\t%lf\n", l,m,i,d); */
+				Entry(*index,l,m,int) = i;
+				Entry(*dist,l,m,double) = d;
+				/* Rprintf("checking: %ld, %lf\n",
+				       Entry(*index,l,m,int),
+				       Entry(*dist,l,m,double));
+				 */
+			}
+		}
+	}
+/*
+	for(j = 0; j <= index->nrow; j++)
+		for(k = 0; k <= index->ncol; k++)
+			Rprintf("[%ld,%ld] %ld\t%lf\n",
+			       j,k,Entry(*index,j,k,int),Entry(*dist,j,k,double));
+*/			
+	/* how to update the distance values */
+	
+#define COMPARE(ROW,COL,RR,CC) \
+	d = Entry(*dist,ROW,COL,double); \
+	ii = Entry(*index,RR,CC,int); \
+	/* Rprintf(" %lf\t (%ld,%ld) |-> %ld\n", d, RR, CC, ii); */ \
+	if(Is_Defined(ii) /* && ii < npt */ \
+	   && Entry(*dist,RR,CC,double) < d) { \
+	     dd = DistanceSquared(x[ii],y[ii],Xpos(*index,COL),Ypos(*index,ROW)); \
+	     if(dd < d) { \
+		/* Rprintf("(%ld,%ld) <- %ld\n", ROW, COL, ii); */ \
+		Entry(*index,ROW,COL,int) = ii; \
+		Entry(*dist,ROW,COL,double) = dd; \
+		/* Rprintf("checking: %ld, %lf\n", Entry(*index,ROW,COL,int), Entry(*dist,ROW,COL,double)); */\
+	     } \
+	}
+
+
+	/* bound on diagonal step distance */
+	/*	bdiag = sqrt(index->xstep * index->xstep + index->ystep * index->ystep); */
+	
+	/* forward pass */
+
+	for(j = index->rmin; j <= index->rmax; j++)
+	for(k = index->cmin; k <= index->cmax; k++) {
+		/* Rprintf("Neighbourhood of (%ld,%ld):\n", j,k); */
+		COMPARE(j,k, j-1,k-1)
+		COMPARE(j,k, j-1,  k)
+		COMPARE(j,k, j-1,k+1)
+		COMPARE(j,k, j,  k-1)
+	}
+
+	/* backward pass */
+
+	for(j = index->rmax; j >= index->rmin; j--)
+	for(k = index->cmax; k >= index->cmin; k--) {
+		COMPARE(j,k, j+1,k+1)
+		COMPARE(j,k, j+1,  k)
+		COMPARE(j,k, j+1,k-1)
+		COMPARE(j,k, j,  k+1)
+	}
+
+	/* take square roots of the distances^2 */
+
+	for(j = index->rmin; j <= index->rmax; j++)
+	for(k = index->cmin; k <= index->cmax; k++) 
+	        Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double));
+	
+}	
+
+#define MIN(A,B) (((A) < (B)) ? (A) : (B))
+
+void
+dist_to_bdry(d)		/* compute distance to boundary from each raster point */
+	Raster *d;
+	                /* of course this is easy for a rectangular grid
+			   but we implement it in C
+			   for ease of future modification */
+{
+	int j, k;
+	double x, y, xd, yd;
+	for(j = d->rmin; j <= d->rmax;j++) {
+		y = Ypos(*d,j);
+		yd = MIN(y - d->ymin, d->ymax - y);
+		for(k = d->cmin; k <= d->cmax;k++) {
+			x = Xpos(*d,k);
+			xd = MIN(x - d->xmin, d->xmax - x);
+			Entry(*d,j,k,double) = MIN(xd,yd);
+		}
+	}
+}
+
+/* R interface */
+
+void exact_dt_R(x, y, npt,
+		xmin, ymin, xmax, ymax,
+		nr, nc, mr, mc, 
+		distances, indices, boundary)
+	double *x, *y;		/* input data points */
+	int	*npt;
+	double *xmin, *ymin,
+		*xmax, *ymax;  	/* guaranteed bounding box */
+	int *nr, *nc;		/* desired raster dimensions
+				   EXCLUDING margins */
+	int *mr, *mc;           /* margins */
+	     /* output arrays */
+	double *distances;	/* distance to nearest point */
+	int   *indices;	        /* index to nearest point */
+	double	*boundary;	/* distance to boundary */
+{
+	Raster dist, index, bdist;
+	int mrow, mcol, nrow, ncol;
+
+	mrow = *mr;
+	mcol = *mc;
+
+	/* full dimensions */
+	nrow = *nr + 2 * mrow;
+	ncol = *nc + 2 * mcol;
+	
+	shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &index, (void *) indices, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax,
+		      nrow, ncol, mrow, mcol);
+	
+	exact_dt(x, y, (int) *npt, &dist, &index);
+	dist_to_bdry(&bdist);
+}	
diff --git a/src/f3.c b/src/f3.c
new file mode 100755
index 0000000..d8069df
--- /dev/null
+++ b/src/f3.c
@@ -0,0 +1,499 @@
+#include <math.h>
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "geom3.h"
+#include "functable.h"
+
+#ifdef DEBUG
+#define DEBUGMESSAGE(S) Rprintf(S);
+#else
+#define DEBUGMESSAGE(S) 
+#endif
+
+/*
+	$Revision: 1.4 $	$Date: 2016/10/23 04:24:03 $
+
+	3D distance transform
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+# 
+# MODIFIED BY: Adrian Baddeley, Perth 2009
+# 
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+*/
+
+	/* step lengths in distance transform */
+#define	STEP1 41
+#define	STEP2 58
+#define	STEP3 71
+	/* (41,58,71)/41 is a good rational approximation
+	   to (1, sqrt(2), sqrt(3))	 */
+
+
+#define MIN(X,Y) (((X) < (Y)) ? (X) : (Y))
+#define MAX(X,Y) (((X) > (Y)) ? (X) : (Y))
+
+typedef struct IntImage {
+	int	*data;
+	int	Mx, My, Mz;	/* dimensions */
+	int	length;
+} IntImage;
+
+typedef struct BinaryImage {
+	unsigned char *data;
+	int	Mx, My, Mz;	/* dimensions */
+	int	length;
+} BinaryImage;
+	
+#define VALUE(I,X,Y,Z) \
+	((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx)  + (X) ]
+
+
+void
+allocBinImage(b, ok)
+     BinaryImage	*b;
+     int		*ok;
+{
+  b->length = b->Mx * b->My * b->Mz;
+  b->data = (unsigned char *) 
+    R_alloc(b->length, sizeof(unsigned char));
+
+  if(b->data == 0) {
+    Rprintf("Can't allocate memory for %d binary voxels\n", b->length);
+    *ok = 0;
+  }
+  *ok = 1;
+}
+
+void
+allocIntImage(v, ok)
+     IntImage	*v;
+     int	*ok;
+{
+  v->length = v->Mx * v->My * v->Mz;
+  v->data = (int *) R_alloc(v->length, sizeof(int));
+
+  if(v->data == 0) {
+    Rprintf("Can't allocate memory for %d integer voxels\n", v->length);
+    *ok = 0;
+  }
+  *ok = 1;
+}
+
+void freeBinImage(b) BinaryImage *b; { }
+void freeIntImage(v) IntImage *v; { }
+
+void
+cts2bin(p, n, box, vside, b, ok)
+     /* convert a list of points inside a box
+	into a 3D binary image */
+     Point	*p;
+     int	n;
+     Box	*box;
+     double	vside;	/* side of a (cubic) voxel */
+     BinaryImage	*b;
+     int	*ok;
+{
+  int	i, lx, ly, lz;
+  unsigned char	*cp;
+
+  b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1;
+  b->My = (int) ceil((box->y1 - box->y0)/vside) + 1;
+  b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1;
+
+  allocBinImage(b, ok);
+
+  if(! (*ok)) return;
+
+  for(i = b->length, cp = b->data; i ; i--, cp++)
+    *cp = 1;
+
+  for(i=0;i<n;i++) {
+    lx = (int) ceil((p[i].x - box->x0)/vside)-1;
+    ly = (int) ceil((p[i].y - box->y0)/vside)-1;
+    lz = (int) ceil((p[i].z - box->z0)/vside)-1;
+    
+    if( lx >= 0 && lx < b->Mx 
+	&& ly >= 0 && ly < b->My 
+	&& lz >= 0 && lz < b->Mz 
+	) 
+      VALUE((*b),lx,ly,lz) = 0;
+  }
+}
+
+void
+distrans3(b, v, ok)
+			/* Distance transform in 3D */
+     BinaryImage *b;		/* input */
+     IntImage    *v;		/* output */
+     int	    *ok;
+{
+  register int x, y, z;
+  int infinity, q;
+
+  /* allocate v same size as b */
+  v->Mx = b->Mx;
+  v->My = b->My;
+  v->Mz = b->Mz;
+
+  allocIntImage(v, ok);
+  if(! (*ok)) return;
+
+  /* compute largest possible distance */
+  infinity = (int) ceil( ((double) STEP3) * 
+			 sqrt(
+			      ((double) b->Mx) * b->Mx 
+			      + ((double) b->My) * b->My 
+			      + ((double) b->Mz) * b->Mz));
+
+  /* Forward pass: Top to Bottom; Back to Front; Left to Right. */
+
+  for(z=0;z<b->Mz;z++) {
+    R_CheckUserInterrupt();
+    for(y=0;y<b->My;y++) {
+      for(x=0;x<b->Mx;x++) {
+	if(VALUE((*b),x,y,z) == 0)
+	  VALUE((*v),x,y,z) = 0;
+	else {
+	  q = infinity;
+
+#define INTERVAL(W, DW, MW) \
+	((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1))
+#define BOX(X,Y,Z,DX,DY,DZ) \
+	(INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz))
+#define TEST(DX,DY,DZ,DV) \
+	if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \
+		q = VALUE((*v),x+DX,y+DY,z+DZ) + DV 
+
+	  /* same row */
+	  TEST(-1, 0, 0, STEP1);
+
+	  /* same plane */
+	  TEST(-1,-1, 0, STEP2);
+	  TEST( 0,-1, 0, STEP1);
+	  TEST( 1,-1, 0, STEP2);
+
+	  /* previous plane */
+	  TEST( 1, 1,-1, STEP3);
+	  TEST( 0, 1,-1, STEP2);
+	  TEST(-1, 1,-1, STEP3);
+
+	  TEST( 1, 0,-1, STEP2);
+	  TEST( 0, 0,-1, STEP1);
+	  TEST(-1, 0,-1, STEP2);
+
+	  TEST( 1,-1,-1, STEP3);
+	  TEST( 0,-1,-1, STEP2);
+	  TEST(-1,-1,-1, STEP3);
+
+	  VALUE((*v),x,y,z) = q;
+	}
+      }
+    }
+  }
+
+
+	/* Backward pass: Bottom to Top; Front to Back; Right to Left. */
+
+  for(z = b->Mz - 1; z >= 0; z--) {
+    R_CheckUserInterrupt();
+    for(y = b->My - 1; y >= 0; y--) {
+      for(x = b->Mx - 1; x >= 0; x--) {
+	if((q = VALUE((*v),x,y,z)) != 0)
+	{
+	  /* same row */
+	  TEST(1, 0, 0, STEP1);
+
+	  /* same plane */
+	  TEST(-1, 1, 0, STEP2);
+	  TEST( 0, 1, 0, STEP1);
+	  TEST( 1, 1, 0, STEP2);
+
+	  /* plane below */
+	  TEST( 1, 1, 1, STEP3);
+	  TEST( 0, 1, 1, STEP2);
+	  TEST(-1, 1, 1, STEP3);
+
+	  TEST( 1, 0, 1, STEP2);
+	  TEST( 0, 0, 1, STEP1);
+	  TEST(-1, 0, 1, STEP2);
+
+	  TEST( 1,-1, 1, STEP3);
+	  TEST( 0,-1, 1, STEP2);
+	  TEST(-1,-1, 1, STEP3);
+
+	  VALUE((*v),x,y,z) = q;
+	}
+      }
+    }
+  }
+}
+
+void
+hist3d(v, vside, count)
+     /* compute histogram of all values in *v
+	using count->n histogram cells 
+	ranging from count->t0 to count->t1 
+	and put results in count->num
+     */
+     IntImage *v;
+     double	vside;
+     Itable	*count;
+{
+  register int i, j, k; 
+  register int *ip;
+  register double scale, width;
+
+  /* relationship between distance transform units
+     and physical units */
+  scale = vside/STEP1;
+  width = (count->t1 - count->t0)/(count->n - 1);
+
+  for(i = 0; i < count->n ; i++) {
+    (count->num)[i] = 0;
+    (count->denom)[i] = v->length;
+  }
+
+  for(i = v->length, ip = v->data; i; i--, ip++) {
+    
+    k = (int) ceil((*ip * scale - count->t0)/width);
+    k = MAX(k, 0);
+
+    for(j = k; j < count->n; j++)
+      (count->num)[j]++;
+  }
+}	
+
+void
+hist3dminus(v, vside, count)	/* minus sampling */
+     IntImage *v;
+     double	vside;
+     Itable	*count;
+{
+  register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval;
+  register double scale, width;
+
+  DEBUGMESSAGE("inside hist3dminus\n")
+
+  scale = vside/STEP1;
+  width = (count->t1 - count->t0)/(count->n - 1);
+
+  /* table is assumed to have been initialised in MakeItable */
+
+  for(z = 0; z < v->Mz; z++) {
+    bz = MIN(z + 1, v->Mz - z);
+    for(y = 0; y < v->My; y++) {
+      by = MIN(y + 1, v->My - y);
+      byz = MIN(by, bz);
+      for(x = 0; x < v->Mx; x++) {
+	bx = MIN(x + 1, v->My - x);
+	border = MIN(bx, byz);
+
+	kbord = (int) floor((vside * border - count->t0)/width);
+	kbord = MIN(kbord, count->n - 1);
+
+	/* denominator counts all voxels with 
+	   distance to boundary >= r */
+	if(kbord >= 0)
+	  for(j = 0; j <= kbord; j++)
+	    (count->denom)[j]++;
+
+	val = VALUE((*v), x, y, z);
+	kval = (int) ceil((val * scale - count->t0)/width);
+	kval = MAX(kval, 0);
+
+#ifdef DEBUG
+	/*
+	Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", 
+		vside * border, kbord, scale * val, kval);
+	*/
+#endif
+
+	  /* numerator counts all voxels with
+	     distance to boundary >= r
+	     and distance to nearest point <= r
+	  */
+	if(kval <= kbord)
+	  for(j = kval; j <= kbord; j++)
+	    (count->num)[j]++;
+	  
+      }
+    }
+  }
+  DEBUGMESSAGE("leaving hist3dminus\n")
+}	
+
+void
+hist3dCen(v, vside, count)	/* four censoring-related histograms */
+     IntImage *v;
+     double	vside;
+     H4table	*count;
+{
+  register int x, y, z, val, border, bx, by, bz, byz, kbord, kval;
+  register double scale, width, realborder, realval;
+
+  DEBUGMESSAGE("inside hist3dCen\n")
+
+  scale = vside/STEP1;
+  width = (count->t1 - count->t0)/(count->n - 1);
+
+  /* table is assumed to have been initialised in MakeH4table */
+
+  for(z = 0; z < v->Mz; z++) {
+
+    bz = MIN(z + 1, v->Mz - z);
+
+    for(y = 0; y < v->My; y++) {
+
+      by = MIN(y + 1, v->My - y);
+      byz = MIN(by, bz);
+
+      for(x = 0; x < v->Mx; x++) {
+
+	bx = MIN(x + 1, v->My - x);
+	border = MIN(bx, byz);
+	realborder = vside * border;
+
+	kbord = (int) floor((realborder - count->t0)/width);
+
+	val = VALUE((*v), x, y, z);
+	realval = scale * val;
+
+	kval = (int) ceil((realval - count->t0)/width);
+	/* this could exceed array limits; that will be detected below */
+
+#ifdef DEBUG
+	Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", 
+		realborder, kbord, realval, kval);
+#endif
+
+	if(realval <= realborder) {
+	  /* observation is uncensored; 
+             increment all four histograms */
+	  if(kval >= count->n)
+	    ++(count->upperobs);
+	  else if(kval >= 0) {
+	      (count->obs)[kval]++;
+	      (count->nco)[kval]++;
+	  }
+	  if(kbord >= count->n)
+	    ++(count->uppercen);
+	  else if(kbord >= 0) {
+	      (count->cen)[kbord]++;
+	      (count->ncc)[kbord]++;
+	  }
+	} else {
+	  /* observation is censored; 
+             increment only two histograms */
+	  kval = MIN(kval, kbord);
+	  if(kval >= count->n)
+	    ++(count->upperobs);
+	  else if(kval >= 0) 
+	    (count->obs)[kval]++;
+
+	  if(kbord >= count->n)
+	    ++(count->uppercen);
+	  else if(kbord >= 0) 
+	    (count->cen)[kbord]++;
+	}
+      }
+    }
+  }
+  DEBUGMESSAGE("leaving hist3dCen\n")
+}
+
+/*
+	CALLING ROUTINES 
+*/
+
+void
+phatminus(p, n, box, vside, count)
+     Point	*p;
+     int	n;
+     Box	*box;
+     double	vside;
+     Itable	*count;
+{
+  BinaryImage	b;
+  IntImage	v;
+  int		ok;
+
+  DEBUGMESSAGE("in phatminus\ncalling cts2bin...")
+  cts2bin(p, n, box, vside, &b, &ok);
+  DEBUGMESSAGE("out of cts2bin\ninto distrans3...")
+  if(ok) 
+    distrans3(&b, &v, &ok);
+
+  if(ok) {
+    freeBinImage(&b);
+    DEBUGMESSAGE("out of distrans3\ninto hist3dminus...")
+    hist3dminus(&v, vside, count);
+    DEBUGMESSAGE("out of hist3dminus\n")
+    freeIntImage(&v);
+  }
+}
+
+void
+phatnaive(p, n, box, vside, count)
+     Point	*p;
+     int	n;
+     Box	*box;
+     double	vside;
+     Itable	*count;
+{
+  BinaryImage	b;
+  IntImage	v;
+  int		ok;
+
+  DEBUGMESSAGE("in phatnaive\ncalling cts2bin...")
+  cts2bin(p, n, box, vside, &b, &ok);
+  DEBUGMESSAGE("out of cts2bin\n into distrans3...")
+  if(ok)
+    distrans3(&b, &v, &ok);
+  if(ok) {
+    freeBinImage(&b);
+    DEBUGMESSAGE("out of distrans3\ninto hist3d..."); 
+    hist3d(&v, vside, count);
+    DEBUGMESSAGE("out of hist3d\n")
+    freeIntImage(&v);
+  }
+}
+
+void
+p3hat4(p, n, box, vside, count)
+     Point	*p;
+     int	n;
+     Box	*box;
+     double	vside;
+     H4table	*count;
+{
+  BinaryImage	b;
+  IntImage	v;
+  int		ok;
+
+  DEBUGMESSAGE("in phatminus\ncalling cts2bin...")
+  cts2bin(p, n, box, vside, &b, &ok);
+  DEBUGMESSAGE("out of cts2bin\ninto distrans3...")
+  if(ok) 
+    distrans3(&b, &v, &ok);
+
+  if(ok) {
+    freeBinImage(&b);
+    DEBUGMESSAGE("out of distrans3\ninto hist3dminus...")
+    hist3dCen(&v, vside, count);
+    DEBUGMESSAGE("out of hist3dminus\n")
+    freeIntImage(&v);
+  }
+}
+
diff --git a/src/fardist.c b/src/fardist.c
new file mode 100644
index 0000000..deab73b
--- /dev/null
+++ b/src/fardist.c
@@ -0,0 +1,30 @@
+/*
+
+  fardist.c
+
+  Furthest data point from each grid point
+
+  Uses code template 'fardist.h'
+
+  Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014
+  Licence: GPL >= 2
+
+  $Revision: 1.2 $  $Date: 2014/08/31 06:43:42 $
+
+
+*/
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+double sqrt();
+
+#define FNAME fardistgrid
+#undef  SQUARED
+#include "fardist.h"
+#undef FNAME
+
+#define FNAME fardist2grid
+#define  SQUARED
+#include "fardist.h"
diff --git a/src/fardist.h b/src/fardist.h
new file mode 100644
index 0000000..365492f
--- /dev/null
+++ b/src/fardist.h
@@ -0,0 +1,80 @@
+/*
+
+  fardist.h
+
+  Code template for fardist.c
+
+  Macros used:
+      FNAME  function name
+      SQUARED  #defined if squared distances should be returned.
+
+  Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014
+  Licence: GPL >= 2
+
+  $Revision: 1.3 $  $Date: 2014/08/31 06:42:50 $
+
+
+*/
+
+void FNAME(nx, x0, xstep,  
+	   ny, y0, ystep,   /* pixel grid dimensions */
+	   np, xp, yp,   /* data points */
+	   dfar) /* output grid */
+     /* inputs */
+     int *nx, *ny, *np;
+     double *x0, *xstep, *y0, *ystep;
+     double *xp, *yp;
+     /* outputs */
+     double *dfar;
+{ 
+  int Nxcol, Nyrow, Npoints;
+  int i, j, k, ijpos;
+  double  X0, Y0, Xstep, Ystep, yi, xj;
+  double d2, d2max, dx, dy;
+
+  Nxcol   = *nx;
+  Nyrow   = *ny;
+  Npoints = *np;
+  X0      = *x0;
+  Y0      = *y0;
+  Xstep   = *xstep;
+  Ystep   = *ystep;
+
+  if(Npoints == 0)
+    return;
+
+  /* loop over pixels */
+
+  for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) {
+
+    R_CheckUserInterrupt();
+    
+    for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) {
+
+      d2max = 0.0;
+      
+      for(k = 0; k < Npoints; k++) {
+	
+	dx = xj - xp[k];
+	dy = yi - yp[k]; 
+	d2 = dx * dx + dy * dy;
+	if(d2 > d2max) 
+	  d2max = d2;
+	
+      }
+
+      ijpos = i + j * Nyrow;
+
+#ifdef SQUARED
+      dfar[ijpos] = d2max;
+#else
+      dfar[ijpos] = sqrt(d2max);
+#endif
+
+    /* end of loop over grid points (i, j) */
+    }
+  }
+}
+
+
+
diff --git a/src/fexitc.c b/src/fexitc.c
new file mode 100755
index 0000000..94055a3
--- /dev/null
+++ b/src/fexitc.c
@@ -0,0 +1,16 @@
+# include <R.h>
+# include <stddef.h>
+# include <string.h>
+
+void fexitc(const char *msg)
+{
+    size_t nc = strlen(msg);
+    char buf[256];
+    if(nc > 255) {
+        warning("invalid character length in fexitc");
+        nc = 255;
+    }
+    strncpy(buf, msg, nc);
+    buf[nc] = '\0';
+    error(buf);
+}
diff --git a/src/fiksel.c b/src/fiksel.c
new file mode 100755
index 0000000..d39879b
--- /dev/null
+++ b/src/fiksel.c
@@ -0,0 +1,153 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Fiksel process */
+
+/*
+ Conditional intensity function for a pairwise interaction point
+ process with interaction function 
+
+                  e(t) = 0 for t < h
+                       = exp(a * exp(- kappa * t)) for h <= t < r
+                       = 1 for t >= r
+
+*/
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Fiksel {
+  double r;
+  double h;
+  double kappa;
+  double a;
+  double h2;  /*  h^2   */
+  double r2;  /*  r^2 */
+  double *period;
+  int per;
+} Fiksel;
+
+
+/* initialiser function */
+
+Cdata *fikselinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Fiksel *fiksel;
+  fiksel = (Fiksel *) R_alloc(1, sizeof(Fiksel));
+
+  /* Interpret model parameters*/
+  fiksel->r      = model.ipar[0];
+  fiksel->h      = model.ipar[1];
+  fiksel->kappa  = model.ipar[2];
+  fiksel->a      = model.ipar[3];
+  fiksel->period = model.period;
+  /* constants */
+  fiksel->h2 = pow(fiksel->h, 2);
+  fiksel->r2 = pow(fiksel->r, 2);
+  /* periodic boundary conditions? */
+  fiksel->per    = (model.period[0] > 0.0);
+
+  return((Cdata *) fiksel);
+}
+
+/* conditional intensity evaluator */
+
+double fikselcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, pairpotsum, cifval;
+  double kappa, r2, h2;
+  double *period;
+  Fiksel *fiksel;
+  DECLARE_CLOSE_D2_VARS;
+
+  fiksel = (Fiksel *) cdata;
+  period = fiksel->period;
+  kappa  = fiksel->kappa;
+  r2     = fiksel->r2;
+  h2     = fiksel->h2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  pairpotsum = 0;
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(fiksel->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) {	
+	  if(d2 < h2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairpotsum += exp(-kappa * sqrt(d2));
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) {	
+	  if(d2 < h2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairpotsum += exp(-kappa * sqrt(d2));
+	  }
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) {	
+	  if(d2 < h2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairpotsum += exp(-kappa * sqrt(d2));
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) {	
+	  if(d2 < h2) {
+	    cifval = 0.0;
+	    return(cifval);
+	  } else {
+	    pairpotsum += exp(-kappa * sqrt(d2));
+	  }
+	}
+      }
+    }
+  }
+
+  cifval = exp(fiksel->a * pairpotsum);
+  return cifval;
+}
+
+Cifns FikselCifns = { &fikselinit, &fikselcif, (updafunptr) NULL, NO};
+
diff --git a/src/functable.h b/src/functable.h
new file mode 100755
index 0000000..a19fb40
--- /dev/null
+++ b/src/functable.h
@@ -0,0 +1,58 @@
+/*
+	$Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $
+
+	Definitions of C structures 
+	for spatial statistics function estimates.
+
+	Usually the estimates are of the form f^(x) = a^(x)/b^(x);
+	we store f^ and also a^ and b^ to cater for 
+	applications with replicated data.
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+# 
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+*/
+
+typedef struct Ftable {		/* double precision function table */
+	double	t0;
+	double	t1;
+	int	n;		/* number of entries */
+	double 	*f;		
+	double	*num;		/* 	f[i]  = num[i]/denom[i] 	*/
+	double	*denom;
+} Ftable;
+
+typedef struct Itable {		/* integer count table e.g for histograms */
+	double	t0;
+	double	t1;
+	int	n;
+
+	int	*num;
+	int	*denom;		/* usually p[i] = num[i]/denom[i] */
+} Itable;
+
+typedef struct H4table {	/* Four histograms, for censored data */
+  double	t0;
+  double	t1;
+  int	n;
+
+  int	*obs;  /* observed lifetimes: o_i = min(t_i, c_i) */
+  int	*nco;  /* uncensored lifetimes: o_i for which t_i <= c_i */
+  int	*cen;  /* censoring times: c_i */
+  int	*ncc;  /* censor times of uncensored data: c_i for which t_i <= c_i */
+
+  int   upperobs;  /* number of o_i that exceed t1 */
+  int   uppercen;  /* number of c_i that exceed t1 */
+
+} H4table;
diff --git a/src/g3.c b/src/g3.c
new file mode 100755
index 0000000..c8f944e
--- /dev/null
+++ b/src/g3.c
@@ -0,0 +1,265 @@
+#include <math.h>
+#include <R.h>
+#include "geom3.h"
+#include "functable.h"
+
+/*
+	$Revision: 1.3 $	$Date: 2012/05/22 07:17:31 $
+
+	G function (nearest neighbour distribution) of 3D point pattern
+
+
+	Let 	b = distance from point p[i] to boundary of box
+	 	d = distance from p[i] to nearest p[j] 
+
+
+	method = 1	naive ratio estimator (Ripley 1981)
+
+			numerator(r)  = count(i: b >= r, d <= r)
+			denominator(r)  = count(i: b >= r)
+
+	method = 2	minus sampling estimator
+
+			numerator(r) = count(i: b >= r, d <= r)
+			denominator(r) = lambda * volume(x: b >= r)
+
+			where lambda = (no of points)/volume(box)
+
+	method = 3	Hanisch's G3
+
+			numerator(r) = count(i: b >= d, d <= r)
+			denominator(r) = count(i: b >= d)
+
+	method = 4	Hanisch's G4
+
+			numerator(r) = count(i: b >= d, d <= r)
+			denominator(r) = fudge * volume(x: b >= r)
+
+			fudge = numerator(R)/denominator(R)
+			R = sup{r : denominator(r) > 0 }
+
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+#
+# MODIFIED BY: Adrian Baddeley, Perth 2009, 2012.
+#
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+
+*/
+
+#define MIN(X,Y) (((X) > (Y)) ? (Y) : (X))
+
+double *
+nndist3(p, n, b)
+		/* compute nearest neighbour distance for each p[i] */
+     Point *p;
+     int n;
+     Box *b;
+{
+  register int i, j;
+  register double dx, dy, dz, dist2, nearest2, huge2;
+  Point *ip, *jp;
+  double *nnd;
+
+  nnd = (double *) R_alloc(n, sizeof(double));
+
+  dx = b->x1 - b->x0;
+  dy = b->y1 - b->y0;
+  dz = b->z1 - b->z0;
+  huge2 = 2.0 * (dx * dx + dy * dy + dz * dz);
+	
+  /* scan each point and find closest */
+  for( i = 0; i < n; i++) {
+    ip = p + i;
+    nearest2 = huge2;
+    for(j = 0; j < n; j++)
+      if(j != i) {
+	jp = p + j;
+	dx = jp->x - ip->x;
+	dy = jp->y - ip->y;
+	dz = jp->z - ip->z;
+	dist2 = dx * dx + dy * dy + dz * dz;
+	if(dist2 < nearest2)
+	  nearest2 = dist2;
+      }
+    nnd[i] = sqrt(nearest2);
+  }
+  return(nnd);
+}
+
+double *
+border3(p, n, b)
+		/* compute distances to border */
+     Point *p;
+     int n;
+     Box *b;
+{
+  register int i;
+  register double bord;
+  register Point *ip;
+  double *bored;
+
+  bored = (double *) R_alloc(n, sizeof(double));
+
+  for( i = 0; i < n; i++) {
+    ip = p + i;
+    bord = MIN(ip->x - b->x0, b->x1 - ip->x);
+    bord = MIN(bord, ip->y - b->y0);
+    bord = MIN(bord, b->y1 - ip->y);
+    bord = MIN(bord, ip->z - b->z0);
+    bord = MIN(bord, b->z1 - ip->z);
+    bored[i] = bord;
+  }
+  return(bored);
+}
+
+void
+g3one(p, n, b, g)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *g;
+{
+  register int i, l, lbord, lnnd;
+  double dt;
+  double	*bord, *nnd;
+
+  bord = border3(p, n, b);
+  nnd  = nndist3(p, n, b);
+	
+  /* initialise */
+  for(l = 0; l < g->n; l++)
+    (g->num)[l] = (g->denom)[l] = 0.0;
+
+  /* spacing of argument in result vector g */
+  dt = (g->t1 - g->t0)/(g->n - 1);
+
+  for(i = 0; i < n; i++) { 
+    lbord = floor( (bord[i] - g->t0) / dt );
+    if(lbord >= g->n) 
+      lbord = g->n - 1;
+    for(l = 0; l <= lbord; l++)
+      (g->denom)[l] += 1.0;
+
+    lnnd = ceil( (nnd[i] - g->t0) / dt );
+    if(lnnd < 0) lnnd = 0;
+    for(l = lnnd; l <= lbord; l++)
+      (g->num)[l] += 1.0;
+  }
+
+  /* compute ratio */
+  for(l = 0; l < g->n; l++)
+    (g->f)[l] = ((g->denom)[l] > 0)?
+      (g->num)[l] / (g->denom)[l] : 1.0;
+			   
+}
+
+void
+g3three(p, n, b, g)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *g;
+{
+  register int i, l, lmin;
+  double dt;
+  int	denom;
+  double	*bord, *nnd;
+
+  bord = border3(p, n, b);
+  nnd  = nndist3(p, n, b);
+	
+  /* initialise */
+  denom = 0;
+  for(l = 0; l < g->n; l++)
+    (g->num)[l]   = 0.0;
+  
+  /* spacing of argument in result vector g */
+  dt = (g->t1 - g->t0)/(g->n - 1);
+
+  for(i = 0; i < n; i++) { 
+    if(nnd[i] <= bord[i]) {
+      ++denom;
+
+      lmin = ceil( (nnd[i] - g->t0) / dt );
+      if(lmin < 0) lmin = 0;
+      for(l = lmin; l < g->n; l++)
+	(g->num)[l] += 1.0;
+    }
+  }
+
+  /* compute ratio */
+  for(l = 0; l < g->n; l++) {
+    (g->denom)[l] = denom;
+    (g->f)[l] = (denom > 0)?
+      (g->num)[l] / (double) denom
+      : 1.0;
+  }
+}
+
+void
+g3cen(p, n, b, count)
+     Point *p;
+     int n;
+     Box *b;
+     H4table *count;
+{
+  register int i, lcen, lobs;
+  register double dt, cens, obsv;
+  double	*bord, *nnd;
+
+  bord = border3(p, n, b);
+  nnd  = nndist3(p, n, b);
+	
+  /* spacing of histogram cells */
+  dt = (count->t1 - count->t0)/(count->n - 1);
+
+  /* 'count' is assumed to have been initialised */
+  for(i = 0; i < n; i++) { 
+    obsv = nnd[i];
+    cens = bord[i];
+    lobs = ceil( (obsv - count->t0) / dt );
+    lcen = floor( (cens - count->t0) / dt );
+    if(obsv <= cens) {
+      /* observation is uncensored; 
+	 increment all four histograms */
+      if(lobs >= count->n)
+	++(count->upperobs);
+      else if(lobs >= 0) {
+	(count->obs)[lobs]++;
+	(count->nco)[lobs]++;
+      }
+      if(lcen >= count->n)
+	++(count->uppercen);
+      else if(lcen >= 0) {
+	(count->cen)[lcen]++;
+	(count->ncc)[lcen]++;
+      }
+    } else {
+      /* observation is censored; 
+	 increment only two histograms */
+      lobs = MIN(lobs, lcen);
+      if(lobs >= count->n)
+	++(count->upperobs);
+      else if(lobs >= 0) 
+	(count->obs)[lobs]++;
+
+      if(lcen >= count->n)
+	++(count->uppercen);
+      else if(lcen >= 0) 
+	(count->cen)[lcen]++;
+    }
+  }
+}
+
diff --git a/src/geom3.h b/src/geom3.h
new file mode 100755
index 0000000..6bec70f
--- /dev/null
+++ b/src/geom3.h
@@ -0,0 +1,20 @@
+/*
+	$Revision: 1.1 $	$Date: 2009/11/04 23:54:15 $
+
+	Definitions for 3D geometrical structures
+*/
+
+typedef struct Point {
+	double x;
+	double y;
+	double z;
+} Point;
+
+typedef struct Box {
+	double x0;
+	double x1;
+	double y0;
+	double y1;
+	double z0;
+	double z1;
+} Box;
diff --git a/src/getcif.c b/src/getcif.c
new file mode 100755
index 0000000..889e999
--- /dev/null
+++ b/src/getcif.c
@@ -0,0 +1,74 @@
+#include <R.h>
+#include "methas.h"
+
+void fexitc(const char *msg);
+
+extern Cifns AreaIntCifns, BadGeyCifns, DgsCifns, DiggraCifns, 
+  FikselCifns, GeyerCifns, HardcoreCifns, 
+  LennardCifns, LookupCifns, 
+  SoftcoreCifns, StraussCifns, StraussHardCifns, 
+  MultiStraussCifns, MultiStraussHardCifns, MultiHardCifns,
+  TripletsCifns, PenttinenCifns;
+
+Cifns NullCifns = NULL_CIFNS;
+
+typedef struct CifPair {
+  char *name;
+  Cifns *p;
+} CifPair;
+
+CifPair CifTable[] = { 
+  {"areaint",   &AreaIntCifns},
+  {"badgey",    &BadGeyCifns},
+  {"dgs",       &DgsCifns},
+  {"diggra",    &DiggraCifns},
+  {"geyer",     &GeyerCifns},
+  {"fiksel",    &FikselCifns},
+  {"hardcore",  &HardcoreCifns},
+  {"lookup",    &LookupCifns},
+  {"lennard",   &LennardCifns},
+  {"multihard", &MultiHardCifns},
+  {"penttinen", &PenttinenCifns},
+  {"sftcr",     &SoftcoreCifns},
+  {"strauss",   &StraussCifns},
+  {"straush",   &StraussHardCifns},
+  {"straussm",  &MultiStraussCifns},
+  {"straushm",  &MultiStraussHardCifns},
+  {"triplets",  &TripletsCifns},
+  {(char *) NULL, (Cifns *) NULL}
+};
+
+Cifns getcif(cifname) 
+     char *cifname;
+{
+  int i;
+  CifPair cp;
+  for(i = 0; CifTable[i].name; i++) {
+    cp = CifTable[i];
+    if(strcmp(cifname, cp.name) == 0)
+      return(*(cp.p));
+  }
+  fexitc("Unrecognised cif name; bailing out.\n");
+  /* control never passes to here, but compilers don't know that */
+  return(NullCifns);
+}
+
+/* R interface function, to check directly whether cif is recognised */
+
+void knownCif(cifname, answer) 
+     char** cifname;
+     int* answer;
+{
+  int i;
+  CifPair cp;
+  for(i = 0; CifTable[i].name; i++) {
+    cp = CifTable[i];
+    if(strcmp(*cifname, cp.name) == 0) {
+      *answer = 1;
+      return;
+    }
+  }
+  *answer = 0;
+  return;
+}
+
diff --git a/src/geyer.c b/src/geyer.c
new file mode 100755
index 0000000..1425ba3
--- /dev/null
+++ b/src/geyer.c
@@ -0,0 +1,433 @@
+#include <R.h>
+#include <math.h>
+#include <stdlib.h>
+#include "methas.h"
+#include "dist2.h"
+
+void fexitc(const char *msg);
+
+#undef MH_DEBUG 
+
+/*
+  Conditional intensity function for a Geyer saturation process.  
+*/
+
+typedef struct Geyer {
+  /* model parameters */
+  double gamma;
+  double r;
+  double s;
+  /* transformations of the parameters */
+  double r2;
+  double loggamma;
+  int hard;
+  /* periodic distance */
+  double *period;
+  int per;
+  /* auxiliary counts */
+  int *aux;
+#ifdef MH_DEBUG
+  int *freshaux;
+  int prevtype;
+#endif
+} Geyer;
+
+Cdata *geyerinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, j, n1;
+  Geyer *geyer;
+  double r2;
+  double *period;
+  DECLARE_CLOSE_VARS;
+
+  geyer = (Geyer *) R_alloc(1, sizeof(Geyer));
+
+  /* Interpret model parameters*/
+  geyer->gamma  = model.ipar[0];
+  geyer->r      = model.ipar[1]; /* not squared any more */
+  geyer->s      = model.ipar[2]; 
+  geyer->r2     = geyer->r * geyer->r;
+#ifdef MHDEBUG
+  Rprintf("Initialising Geyer gamma=%lf, r=%lf, sat=%lf\n",
+	  geyer->gamma, geyer->r, geyer->s);
+#endif
+  /* is the model numerically equivalent to hard core ? */
+  geyer->hard   = (geyer->gamma < DOUBLE_EPS);
+  geyer->loggamma = (geyer->hard) ? 0 : log(geyer->gamma);
+  /* periodic boundary conditions? */
+  geyer->period = model.period;
+  geyer->per    = (model.period[0] > 0.0);
+  /* allocate storage for auxiliary counts */
+  geyer->aux = (int *) R_alloc((size_t) state.npmax, sizeof(int));
+#ifdef MH_DEBUG
+  geyer->freshaux = (int *) R_alloc((size_t) state.npmax, sizeof(int));
+  geyer->prevtype = -42;
+#endif
+
+  r2 = geyer->r2;
+
+  /* Initialise auxiliary counts */
+  for(i = 0; i < state.npmax; i++) 
+    geyer->aux[i] = 0;
+
+  if(geyer->per) {
+    /* periodic */
+    period = geyer->period;
+    if(state.npts > 1) {
+      n1 = state.npts - 1;
+      for(i = 0; i < n1; i++) {
+	for(j = i+1; j < state.npts; j++) {
+	  if(CLOSE_PERIODIC(state.x[i], state.y[i], 
+			    state.x[j], state.y[j], 
+			    period, r2)) {
+	    geyer->aux[i] += 1;
+	    geyer->aux[j] += 1;
+	  }
+	}
+      }
+    }
+  } else {
+    /* Euclidean distance */
+    if(state.npts > 1) {
+      n1 = state.npts - 1;
+      for(i = 0; i < n1; i++) {
+	for(j = i+1; j < state.npts; j++) {
+	  if(CLOSE(state.x[i], state.y[i], 
+		 state.x[j], state.y[j], 
+		   r2)) {
+	    geyer->aux[i] += 1;
+	    geyer->aux[j] += 1;
+	  }
+	}
+      }
+    }
+  }
+  return((Cdata *) geyer);
+}
+
+double geyercif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int ix, j, npts, tee;
+  double u, v, r2, s;
+  double w, a, b, f, cifval;
+  double *x, *y;
+  int *aux;
+  double *period;
+  Geyer *geyer;
+  DECLARE_CLOSE_VARS;
+
+  geyer = (Geyer *) cdata;
+
+  npts = state.npts;
+  if(npts==0) return ((double) 1.0);
+
+  x = state.x;
+  y = state.y;
+  u = prop.u;
+  v = prop.v;
+  ix = prop.ix;
+
+  r2     = geyer->r2;
+  s      = geyer->s;
+  period = geyer->period;
+  aux    = geyer->aux;
+
+  /* 
+     tee = neighbour count at the point in question;
+     w   = sum of changes in (saturated) neighbour counts at other points 
+  */
+  tee = w = 0.0;
+
+  if(prop.itype == BIRTH) {
+    if(geyer->per) {
+      /* periodic distance */
+      for(j=0; j<npts; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  tee++;
+	  f = s - aux[j];
+	  if(f > 1) /* j is not saturated after addition of (u,v) */
+	    w = w + 1; /* addition of (u,v) increases count by 1 */
+	  else if(f > 0) /* j becomes saturated by addition of (u,v) */
+	    w = w + f;
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  tee++;
+	  f = s - aux[j];
+	  if(f > 1) /* j is not saturated after addition of (u,v) */
+	    w = w + 1; /* addition of (u,v) increases count by 1 */
+	  else if(f > 0) /* j becomes saturated by addition of (u,v) */
+	    w = w + f;
+	}
+      }
+    }
+  } else if(prop.itype == DEATH) {
+    tee = aux[ix];
+    if(geyer->per) {
+      /* Periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  f = s - aux[j];
+	  if(f > 0) /* j is not saturated */
+	    w = w + 1; /* deletion of 'ix' decreases count by 1 */
+	  else {
+	    f = f+1;
+	    if(f > 0) {
+	      /* j is not saturated after deletion of 'ix' 
+		 (s must be fractional) */
+	      w = w + f; 
+	    }
+	  }
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  f = s - aux[j];
+	  if(f > 0) /* j was not saturated */
+	    w = w + 1; /* deletion of 'ix' decreases count by 1 */
+	  else {
+	    f = f+1; 
+	    if(f > 0) {
+	      /* j is not saturated after deletion of 'ix' 
+		 (s must be fractional) */
+	      w = w + f; 
+	    }
+	  }
+	}
+      }
+    }
+  } else if(prop.itype == SHIFT) { 
+    /* Compute the cif at the new point, not the ratio of new/old */
+    if(geyer->per) {
+      /* Periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  tee++;
+	  a = aux[j];
+	  /* Adjust */
+	  if(CLOSE_PERIODIC(x[ix],y[ix],x[j],y[j],period,r2)) a = a - 1;
+	  b = a + 1;
+	  if(a < s && s < b) {
+	    w = w + s - a;
+	  }
+	  else if(s >= b) w = w + 1;
+	}
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  tee++;
+	  a = aux[j];
+	  /* Adjust */
+	  if(CLOSE(x[ix], y[ix], x[j], y[j], r2)) a = a - 1;
+	  b = a + 1;
+	  if(a < s && s < b) {
+	    w = w + s - a;
+	  }
+	  else if(s >= b) w = w + 1;
+	}
+      }
+    }
+  }
+
+  w = w + ((tee < s) ? tee : s);
+
+ if(geyer->hard) {
+    if(tee > 0) cifval = 0.0;
+    else cifval = 1.0;
+  }
+  else cifval = exp(geyer->loggamma*w);
+  
+  return cifval;
+}
+
+void geyerupd(state, prop, cdata) 
+     State state;
+     Propo prop;
+     Cdata *cdata;
+{
+/* Declare other variables */
+  int ix, npts, j;
+  int oldclose, newclose;
+  double u, v, xix, yix, r2;
+  double *x, *y;
+  int *aux;
+  double *period;
+  Geyer *geyer;
+#ifdef MH_DEBUG
+  int *freshaux;
+  int i;
+  int oc, nc;
+#endif
+  DECLARE_CLOSE_VARS;
+
+  geyer = (Geyer *) cdata;
+  period = geyer->period;
+  aux = geyer->aux;
+  r2 = geyer->r2;
+
+  x = state.x;
+  y = state.y;
+  npts = state.npts;
+
+#ifdef MH_DEBUG  
+  /* ........................ debugging cross-check ................ */
+
+  /* recompute 'aux' values afresh */
+  freshaux = geyer->freshaux;
+  for(i = 0; i < state.npts; i++)
+    freshaux[i] = 0;
+
+  if(geyer->per) {
+    /* periodic */
+    for(i = 0; i < state.npts; i++) {
+      for(j = 0; j < state.npts; j++) {
+	if(i == j) continue;
+	if(CLOSE_PERIODIC(state.x[i], state.y[i],
+			  state.x[j], state.y[j],
+			  period, r2)) 
+	  freshaux[i] += 1;
+      }
+    }
+  } else {
+    /* Euclidean distance */
+    for(i = 0; i < state.npts; i++) {
+      for(j = 0; j < state.npts; j++) {
+	if(i == j) continue;
+	if(CLOSE(state.x[i], state.y[i], 
+		 state.x[j], state.y[j], 
+		 r2))
+	  freshaux[i] += 1;
+      }
+    }
+  }
+  /* Check agreement with 'aux' */
+  for(j = 0; j < state.npts; j++) {
+    if(aux[j] != freshaux[j]) {
+      Rprintf("\n\taux[%d] = %d, freshaux[%d] = %d\n", 
+	      j, aux[j], j, freshaux[j]);
+      Rprintf("\tnpts = %d\n", state.npts);
+      Rprintf("\tperiod = (%lf, %lf)\n", period[0], period[1]);
+      if(geyer->prevtype == BIRTH) error("updaux failed after BIRTH");
+      if(geyer->prevtype == DEATH) error("updaux failed after DEATH");
+      if(geyer->prevtype == SHIFT) error("updaux failed after SHIFT");
+      error("updaux failed at start");
+    }
+  }
+  /* OK. Record type of this transition */ 
+  geyer->prevtype = prop.itype;
+
+  /* ................ end debug cross-check ................ */
+#endif
+
+  if(prop.itype == BIRTH) { 
+    /* Birth */
+    u = prop.u;
+    v = prop.v;
+    /* initialise auxiliary counter for new point */
+    aux[npts] = 0; 
+    /* update all auxiliary counters */
+    if(geyer->per) {
+      /* periodic distance */
+      for(j=0; j < npts; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  aux[j] += 1;
+	  aux[npts] += 1;
+	} 
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j < npts; j++) {
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  aux[j] += 1;
+	  aux[npts] += 1;
+	} 
+      }
+    }
+  } else if(prop.itype == DEATH) {
+    /* Death */
+    ix = prop.ix;
+    u = x[ix];
+    v = y[ix];
+    /* decrement auxiliary counter for each point */
+    if(geyer->per) {
+      /* periodic distance */
+      for(j=0; j<npts; j++) {
+	if(j==ix) continue;
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  if(j < ix) aux[j] -= 1;
+	  else aux[j-1] = aux[j] - 1;
+	} else if(j >= ix) aux[j-1] = aux[j];
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j==ix) continue;
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  if(j < ix) aux[j] -= 1;
+	  else aux[j-1] = aux[j] - 1;
+	} else if(j >= ix) aux[j-1] = aux[j];
+      }
+    }
+  } else if(prop.itype == SHIFT) { 
+    /* Shift */
+    u = prop.u;
+    v = prop.v;
+    ix = prop.ix;
+    xix = x[ix];
+    yix = y[ix];
+    /* recompute auxiliary counter for point 'ix' */
+    aux[ix] = 0;
+    /* update auxiliary counters for other points */
+    if(geyer->per) {
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	newclose = oldclose = NO;
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) newclose = YES;
+	if(CLOSE_PERIODIC(xix,yix,x[j],y[j],period,r2)) oldclose = YES;
+	if(newclose) {
+	  /* increment neighbour count for new point */
+	  aux[ix] += 1;
+	  if(!oldclose) 
+	    aux[j] += 1; /* point j gains a new neighbour */
+	} else if(oldclose)
+	  aux[j] -= 1; /* point j loses a neighbour */
+      }
+    } else {
+      /* Euclidean distance */
+      for(j=0; j<npts; j++) {
+	if(j == ix) continue;
+	newclose = oldclose = NO;
+	if(CLOSE(u,v,x[j],y[j],r2)) newclose = YES;
+	if(CLOSE(xix,yix,x[j],y[j],r2)) oldclose = YES;
+	if(newclose) {
+	  /* increment neighbour count for new point */
+	  aux[ix] += 1;
+	  if(!oldclose) 
+	    aux[j] += 1; /* point j gains a new neighbour */
+	} else if(oldclose)
+	  aux[j] -= 1; /* point j loses a neighbour */
+      }
+    }
+  } else fexitc("Unrecognised transition type; bailing out.\n");
+
+  return;
+}
+
+Cifns GeyerCifns = { &geyerinit, &geyercif, &geyerupd, NO};
diff --git a/src/hardcore.c b/src/hardcore.c
new file mode 100755
index 0000000..0191ae2
--- /dev/null
+++ b/src/hardcore.c
@@ -0,0 +1,109 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Hard core process */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Hardcore {
+  double h;   /* hard core distance */
+  double h2;
+  double *period;
+  int per;
+} Hardcore;
+
+
+/* initialiser function */
+
+Cdata *hardcoreinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Hardcore *hardcore;
+  double h;
+  hardcore = (Hardcore *) R_alloc(1, sizeof(Hardcore));
+
+  /* Interpret model parameters*/
+  hardcore->h      = h = model.ipar[0];
+  hardcore->h2     = h * h;
+  hardcore->period = model.period;
+  /* periodic boundary conditions? */
+  hardcore->per    = (model.period[0] > 0.0);
+
+  return((Cdata *) hardcore);
+}
+
+/* conditional intensity evaluator */
+
+double hardcorecif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double h2, a;
+  Hardcore *hardcore;
+
+  hardcore = (Hardcore *) cdata;
+
+  h2     = hardcore->h2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  if(npts == 0) 
+    return((double) 1.0);
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(hardcore->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2))
+	  return((double) 0.0);
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2))
+	  return((double) 0.0);
+      }
+    }
+  }
+  else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	a = h2 - pow(u - x[j], 2);
+	if(a > 0) {
+	  a -= pow(v - y[j], 2);
+	  if(a > 0) 
+	    return((double) 0.0);
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	a = h2 - pow(u - x[j], 2);
+	if(a > 0) {
+	  a -= pow(v - y[j], 2);
+	  if(a > 0)
+	    return((double) 0.0);
+	}
+      }
+    }
+  }
+
+  return ((double) 1.0);
+}
+
+Cifns HardcoreCifns = { &hardcoreinit, &hardcorecif, (updafunptr) NULL, NO};
diff --git a/src/hasclose.c b/src/hasclose.c
new file mode 100644
index 0000000..05fc764
--- /dev/null
+++ b/src/hasclose.c
@@ -0,0 +1,50 @@
+/*
+
+  hasclose.c
+
+  $Revision: 1.4 $ $Date: 2016/11/29 05:09:25 $
+
+  Determine whether a point has a neighbour closer than 'r'
+  
+  Data must be ordered by increasing x coordinate
+*/
+
+#include <R.h>
+
+#undef BUG
+
+#undef TORUS
+
+#undef ZCOORD
+
+#define CLOSEFUN hasXclose
+#define CROSSFUN hasXYclose
+#include "hasclose.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+
+#define ZCOORD
+
+#define CLOSEFUN hasX3close
+#define CROSSFUN hasXY3close
+#include "hasclose.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+
+#define TORUS
+
+#undef ZCOORD
+
+#define CLOSEFUN hasXpclose
+#define CROSSFUN hasXYpclose
+#include "hasclose.h"
+#undef CLOSEFUN
+#undef CROSSFUN
+
+#define ZCOORD
+
+#define CLOSEFUN hasX3pclose
+#define CROSSFUN hasXY3pclose
+#include "hasclose.h"
+#undef CLOSEFUN
+#undef CROSSFUN
diff --git a/src/hasclose.h b/src/hasclose.h
new file mode 100644
index 0000000..dc0af30
--- /dev/null
+++ b/src/hasclose.h
@@ -0,0 +1,403 @@
+/*
+  hasclose.h
+
+  Function definitions to be #included in hasclose.c
+  several times with different values of macros.
+
+  Macros used:
+
+  CLOSEFUN   name of function for pairs in a single pattern
+
+  CROSSFUN   name of function for pairs between two patterns
+
+  ZCOORD     if defined, coordinates are 3-dimensional
+
+  TORUS      if defined, distances are periodic
+
+  BUG        debugger flag
+
+  $Revision: 1.10 $ $Date: 2017/06/05 10:53:59 $
+
+*/
+
+void CLOSEFUN(n,
+	      x,
+	      y,
+#ifdef ZCOORD
+	      z,
+#endif
+	      r,  /* distance deemed 'close' */
+#ifdef TORUS
+	      b,  /* box dimensions */
+#endif
+	      t)  /* result: true/false */
+     int *n, *t;
+     double *x, *y, *r;
+#ifdef ZCOORD
+     double *z;
+#endif
+#ifdef TORUS
+     double *b;
+#endif
+{
+  double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2minr2;
+#ifdef ZCOORD
+  double zi, dz;
+#endif
+  int N, maxchunk, i, j;
+
+#ifdef TORUS
+  double Bx, By, Hy;
+#ifdef ZCOORD
+  double Bz, Hz;
+#endif
+#endif
+
+  N = *n;
+  rmax = *r;
+
+  r2max = rmax * rmax;
+  rmaxplus = rmax + rmax/16.0;
+
+#ifdef TORUS
+  Bx = b[0];
+  By = b[1];
+  Hy = By/2.0;
+#ifdef ZCOORD
+  Bz = b[2];
+  Hz = Bz/2.0;
+#endif
+#endif
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < N) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > N) maxchunk = N;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+#ifdef ZCOORD
+      zi = z[i];
+#endif
+
+      if(i > 0) {
+	/* scan backward from i */
+	for(j = i - 1; j >= 0; j--) {
+	  dx = xi - x[j];
+	  if(dx > rmaxplus) 
+	    break;
+	  dy = y[j] - yi;
+#ifdef TORUS
+	  if(dy < 0.0) dy = -dy;
+	  if(dy > Hy) dy = By - dy;
+#endif
+	  d2minr2 = dx * dx + dy * dy - r2max;
+#ifdef ZCOORD
+	  if(d2minr2 <= 0.0) {
+	    dz = z[j] - zi;
+#ifdef TORUS
+	    if(dz < 0.0) dz = -dz;
+	    if(dz > Hz) dz = Bz - dz;
+#endif
+	    d2minr2 = d2minr2 + dz * dz;
+#endif
+	    if(d2minr2 <= 0.0) {
+	      /* pair (i, j) is close */
+	      t[i] = t[j] = 1;
+	    }
+#ifdef ZCOORD
+	  }
+#endif
+	}
+#ifdef TORUS
+	/* wrap-around */
+	/* scan forward from 0 */
+	for(j = 0; j < i; j++) {
+	  dx = Bx + x[j] - xi;
+	  if(dx > rmaxplus) 
+	    break;
+	  dy = y[j] - yi;
+#ifdef TORUS
+	  if(dy < 0.0) dy = -dy;
+	  if(dy > Hy) dy = By - dy;
+#endif
+	  d2minr2 = dx * dx + dy * dy - r2max;
+#ifdef ZCOORD
+	  if(d2minr2 <= 0.0) {
+	    dz = z[j] - zi;
+#ifdef TORUS
+	    if(dz < 0.0) dz = -dz;
+	    if(dz > Hz) dz = Bz - dz;
+#endif
+	    d2minr2 = d2minr2 + dz * dz;
+#endif
+	    if(d2minr2 <= 0.0) {
+	      /* pair (i, j) is close */
+	      t[i] = t[j] = 1;
+	    }
+#ifdef ZCOORD
+	  }
+#endif
+	}
+#endif
+      }
+    }
+  }
+}
+
+/* ........................................................ */
+
+void CROSSFUN(n1,
+	      x1,
+	      y1,
+#ifdef ZCOORD
+	      z1,
+#endif
+	      n2,
+	      x2,
+	      y2,
+#ifdef ZCOORD
+	      z2,
+#endif
+	      r,
+#ifdef TORUS
+	      b,  /* box dimensions (same for both patterns!!) */
+#endif
+	      t)
+     int *n1, *n2, *t;
+     double *x1, *y1, *x2, *y2, *r;
+#ifdef ZCOORD
+     double *z1, *z2;
+#endif
+#ifdef TORUS
+     double *b;
+#endif
+{
+  /* lengths */
+  int N1, N2, maxchunk;
+  /* distance parameter */
+  double rmax, r2max, rmaxplus;
+  /* indices */
+  int i, j, jleft;
+  /* temporary values */
+  double x1i, y1i, xleft, dx, dy, dx2, d2minr2;
+#ifdef ZCOORD
+  double z1i, dz;
+#endif
+
+#ifdef TORUS
+  double Bx, By, Hx, Hy;
+  int jright;
+#ifdef ZCOORD
+  double Bz, Hz;
+#endif
+#endif
+
+  N1 = *n1;
+  N2 = *n2;
+  rmax = *r;
+  
+  r2max = rmax * rmax;
+  rmaxplus = rmax + rmax/16.0;
+
+#ifdef TORUS
+  Bx = b[0];
+  By = b[1];
+  Hx = Bx/2.0;
+  Hy = By/2.0;
+#ifdef BUG
+  Rprintf("=> PERIODIC:  Bx = %lf, By = %lf  <= \n", Bx, By);
+#endif
+#ifdef ZCOORD
+  Bz = b[2];
+  Hz = Bz/2.0;
+#endif
+#endif
+
+  if(N1 > 0 && N2 > 0) {
+
+    i = 0; maxchunk = 0; jleft = 0;
+
+    while(i < N1) {
+
+      R_CheckUserInterrupt();
+
+      maxchunk += 65536;
+      if(maxchunk > N1) maxchunk = N1;
+
+      for( ; i < maxchunk; i++) {
+
+	x1i = x1[i];
+	y1i = y1[i];
+#ifdef ZCOORD
+	z1i = z1[i];
+#endif
+
+#ifdef BUG
+	Rprintf("------ i = %d --------\n", i);
+	Rprintf(" [%d] = (%lf, %lf)\n", i, x1i, y1i);
+#endif
+	/* 
+	   adjust starting point jleft
+	*/
+	xleft = x1i - rmaxplus;
+	while((x2[jleft] < xleft) && (jleft+1 < N2))
+	  ++jleft;
+
+#ifdef BUG
+	Rprintf("\t jleft = %d\n", jleft);
+#endif
+
+	/* 
+	   process from j = jleft until dx > rmax + epsilon
+	*/
+	for(j=jleft; j < N2; j++) {
+	  dx = x2[j] - x1i;
+#ifdef BUG
+	  Rprintf("\t Central loop, j = %d, dx = %lf\n", j, dx);
+#endif
+	  if(dx > rmaxplus)
+	    break;
+	  dx2 = dx * dx;
+	  dy = y2[j] - y1i;
+#ifdef BUG
+	  Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy);
+#endif
+#ifdef TORUS
+	  if(dy < 0.0) dy = -dy;
+	  if(dy > Hy) dy = By - dy;
+#ifdef BUG
+	  Rprintf("\t\t periodic dy = %lf\n", dy);
+#endif
+#endif
+	  d2minr2 = dx2 + dy * dy - r2max;
+#ifdef ZCOORD
+	    if(d2minr2 <= 0.0) {
+	      dz = z2[j] - z1i;
+#ifdef TORUS
+	      if(dz < 0.0) dz = -dz;
+	      if(dz > Hz) dz = Bz - dz;
+#endif
+	      d2minr2 = d2minr2 + dz * dz;
+#endif
+	      if(d2minr2 <= 0.0) {
+#ifdef BUG
+		Rprintf("\t\t Point %d has close neighbour\n", i);
+#endif
+		/* point i has a close neighbour */
+		t[i] = 1;
+		break;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	}
+
+#ifdef TORUS
+	jright = j;
+	/* wrap-around at start */
+#ifdef BUG
+	Rprintf("\t Wrap around at start for j = 0 to %d\n", jleft);
+#endif
+	for(j=0; j < jleft; j++) {
+	  dx = x1i - x2[j];
+#ifdef BUG
+	  Rprintf("\t\t j = %d, dx = %lf\n", j, dx);
+#endif
+	  if(dx < 0.0) dx = -dx;
+	  if(dx > Hx) dx = Bx - dx;
+#ifdef BUG
+	  Rprintf("\t\t periodic dx = %lf\n", dx);
+#endif
+	  if(dx > rmaxplus)
+	    break;
+	  dx2 = dx * dx;
+	  dy = y2[j] - y1i;
+#ifdef BUG
+	  Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy);
+#endif
+	  if(dy < 0.0) dy = -dy;
+	  if(dy > Hy) dy = By - dy;
+#ifdef BUG
+	  Rprintf("\t\t periodic dy = %lf\n", dy);
+#endif
+	  d2minr2 = dx2 + dy * dy - r2max;
+#ifdef ZCOORD
+	    if(d2minr2 <= 0.0) {
+	      dz = z2[j] - z1i;
+	      if(dz < 0.0) dz = -dz;
+	      if(dz > Hz) dz = Bz - dz;
+	      d2minr2 = d2minr2 + dz * dz;
+#endif
+	      if(d2minr2 <= 0.0) {
+		/* point i has a close neighbour */
+#ifdef BUG
+		Rprintf("\t\t Point %d has close neighbour\n", i);
+#endif
+		t[i] = 1;
+		break;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	}
+	/* wrap around at end */
+#ifdef BUG
+	Rprintf("\t Wrap around at end for j = %d to %d\n", N2-1, jright);
+#endif
+	for(j=N2-1; j >= jright; j--) {
+	  dx = x1i - x2[j];
+#ifdef BUG
+	  Rprintf("\t\t j = %d, dx = %lf\n", j, dx);
+#endif
+	  if(dx < 0.0) dx = -dx;
+	  if(dx > Hx) dx = Bx - dx;
+#ifdef BUG
+	  Rprintf("\t\t periodic dx = %lf\n", dx);
+#endif
+	  if(dx > rmaxplus)
+	    break;
+	  dx2 = dx * dx;
+	  dy = y2[j] - y1i;
+#ifdef BUG
+	  Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy);
+#endif
+	  if(dy < 0.0) dy = -dy;
+	  if(dy > Hy) dy = By - dy;
+#ifdef BUG
+	  Rprintf("\t\t periodic dy = %lf\n", dy);
+#endif
+	  d2minr2 = dx2 + dy * dy - r2max;
+#ifdef ZCOORD
+	    if(d2minr2 <= 0.0) {
+	      dz = z2[j] - z1i;
+	      if(dz < 0.0) dz = -dz;
+	      if(dz > Hz) dz = Bz - dz;
+	      d2minr2 = d2minr2 + dz * dz;
+#endif
+	      if(d2minr2 <= 0.0) {
+#ifdef BUG
+		Rprintf("\t\t Point %d has close neighbour\n", i);
+#endif
+		/* point i has a close neighbour */
+		t[i] = 1;
+		break;
+	      }
+#ifdef ZCOORD
+	    }
+#endif
+	}
+#endif
+      }
+    }
+  }
+}
+
+
diff --git a/src/idw.c b/src/idw.c
new file mode 100755
index 0000000..8690767
--- /dev/null
+++ b/src/idw.c
@@ -0,0 +1,146 @@
+/*
+  idw.c
+
+  Inverse-distance weighted smoothing
+
+  $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $
+
+*/
+
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+#define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)]
+
+/*  inverse-distance smoothing from data points onto pixel grid */
+
+void Cidw(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat)
+     double *x, *y, *v;           /* data points and values */
+     int *n;
+     double *xstart, *xstep, *ystart, *ystep;   /* pixel grid */
+     int *nx, *ny;
+     double *power;                   /* exponent for IDW */
+     double *num, *den, *rat;     /* output arrays - assumed initialised 0 */
+{
+  int N, i, Nx, Ny, ix, iy;
+  double xg, yg, x0, dx, y0, dy, pon2, d2, w;
+  
+  N  = *n;
+  Nx = *nx;
+  Ny = *ny;
+  x0 = *xstart;
+  y0 = *ystart;
+  dx = *xstep;
+  dy = *ystep;
+
+  pon2 = (*power)/2.0;
+
+  if(pon2 == 1.0) {
+    /* slightly faster code when power=2 */
+    for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) {
+      if(ix % 256 == 0) R_CheckUserInterrupt();
+      for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) {
+	/* loop over data points, accumulating numerator and denominator */
+	for(i = 0; i < N; i++) {
+	  d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]);
+	  w = 1.0/d2;
+	  MAT(num, ix, iy, Ny) += w * v[i];
+	  MAT(den, ix, iy, Ny) += w;
+	}
+	/* compute ratio */
+	MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny);
+      }
+    }
+  } else {
+    /* general case */
+    for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) {
+      if(ix % 256 == 0) R_CheckUserInterrupt();
+      for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) {
+	/* loop over data points, accumulating numerator and denominator */
+	for(i = 0; i < N; i++) {
+	  d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]);
+	  w = 1.0/pow(d2, pon2);
+	  MAT(num, ix, iy, Ny) += w * v[i];
+	  MAT(den, ix, iy, Ny) += w;
+	}
+	/* compute ratio */
+	MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny);
+      }
+    }
+  }
+}
+
+/* Leave-one-out IDW at data points only */
+
+void idwloo(x, y, v, n, power, num, den, rat)
+     double *x, *y, *v;           /* data points and values */
+     int *n;
+     double *power;                   /* exponent for IDW */
+     double *num, *den, *rat;     /* output vectors - assumed initialised 0 */
+{
+  int N, i, j, maxchunk;
+  double xi, yi, d2, w, pon2;
+  
+  N  = *n;
+  pon2 = (*power)/2.0;
+
+  if(pon2 == 1.0) {
+    /* slightly faster code when power=2 */
+    OUTERCHUNKLOOP(i, N, maxchunk, 16384) {
+      R_CheckUserInterrupt();
+      INNERCHUNKLOOP(i, N, maxchunk, 16384) {
+	xi = x[i];
+	yi = y[i];
+	if(i > 0) {
+	  for(j = 0; j < i; j++) {
+	    d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]);
+	    w = 1.0/d2;
+	    num[i] += w * v[j];
+	    den[i] += w;
+	  }
+	}
+	if(i < N-1) {
+	  for(j = i+1; j < N; j++) {
+	    d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]);
+	    w = 1.0/d2;
+	    num[i] += w * v[j];
+	    den[i] += w;
+	  }
+	}
+	/* compute ratio */
+	rat[i] = num[i]/den[i];
+      }
+    }
+  } else {
+    /* general case */
+    OUTERCHUNKLOOP(i, N, maxchunk, 16384) {
+      R_CheckUserInterrupt();
+      INNERCHUNKLOOP(i, N, maxchunk, 16384) {
+	xi = x[i];
+	yi = y[i];
+	if(i > 0) {
+	  for(j = 0; j < i; j++) {
+	    d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]);
+	    w = 1.0/pow(d2, pon2);
+	    num[i] += w * v[j];
+	    den[i] += w;
+	  }
+	}
+	if(i < N-1) {
+	  for(j = i+1; j < N; j++) {
+	    d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]);
+	    w = 1.0/pow(d2, pon2);
+	    num[i] += w * v[j];
+	    den[i] += w;
+	  }
+	}
+	/* compute ratio */
+	rat[i] = num[i]/den[i];
+      }
+    }
+  }
+}
+
+
+
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 0000000..4dd6697
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,213 @@
+
+/* 
+   Native symbol registration table for spatstat package
+
+   Automatically generated - do not edit this file!
+
+*/
+
+#include "proto.h"
+#include <R.h>
+#include <Rinternals.h>
+#include <stdlib.h> // for NULL
+#include <R_ext/Rdynload.h>
+
+/*  
+   See proto.h for declarations for the native routines registered below.
+*/
+
+static const R_CMethodDef CEntries[] = {
+    {"acrdenspt",        (DL_FUNC) &acrdenspt,        10},
+    {"acrsmoopt",        (DL_FUNC) &acrsmoopt,        10},
+    {"adenspt",          (DL_FUNC) &adenspt,           7},
+    {"areaBdif",         (DL_FUNC) &areaBdif,         11},
+    {"areadifs",         (DL_FUNC) &areadifs,          7},
+    {"asmoopt",          (DL_FUNC) &asmoopt,           8},
+    {"auctionbf",        (DL_FUNC) &auctionbf,         7},
+    {"awtcrdenspt",      (DL_FUNC) &awtcrdenspt,      11},
+    {"awtcrsmoopt",      (DL_FUNC) &awtcrsmoopt,      11},
+    {"awtdenspt",        (DL_FUNC) &awtdenspt,         8},
+    {"awtsmoopt",        (DL_FUNC) &awtsmoopt,         9},
+    {"bdrymask",         (DL_FUNC) &bdrymask,          4},
+    {"Cbiform",          (DL_FUNC) &Cbiform,           6},
+    {"Cclosepaircounts", (DL_FUNC) &Cclosepaircounts,  5},
+    {"Ccountends",       (DL_FUNC) &Ccountends,       14},
+    {"Ccrossdist",       (DL_FUNC) &Ccrossdist,        8},
+    {"Ccrosspaircounts", (DL_FUNC) &Ccrosspaircounts,  8},
+    {"CcrossPdist",      (DL_FUNC) &CcrossPdist,      10},
+    {"Cidw",             (DL_FUNC) &Cidw,             14},
+    {"ClineMquad",       (DL_FUNC) &ClineMquad,       23},
+    {"Clinequad",        (DL_FUNC) &Clinequad,        18},
+    {"ClineRMquad",      (DL_FUNC) &ClineRMquad,      23},
+    {"ClineRquad",       (DL_FUNC) &ClineRquad,       18},
+    {"Clinvwhichdist",   (DL_FUNC) &Clinvwhichdist,   12},
+    {"Clixellate",       (DL_FUNC) &Clixellate,       16},
+    {"cocoGraph",        (DL_FUNC) &cocoGraph,         6},
+    {"cocoImage",        (DL_FUNC) &cocoImage,         3},
+    {"Corput",           (DL_FUNC) &Corput,            3},
+    {"Cpairdist",        (DL_FUNC) &Cpairdist,         5},
+    {"CpairPdist",       (DL_FUNC) &CpairPdist,        7},
+    {"Cquadform",        (DL_FUNC) &Cquadform,         5},
+    {"crdenspt",         (DL_FUNC) &crdenspt,          9},
+    {"crosscount",       (DL_FUNC) &crosscount,        8},
+    {"crsmoopt",         (DL_FUNC) &crsmoopt,         10},
+    {"CspaSumSymOut",    (DL_FUNC) &CspaSumSymOut,     9},
+    {"CspaWtSumSymOut",  (DL_FUNC) &CspaWtSumSymOut,  13},
+    {"Csum2outer",       (DL_FUNC) &Csum2outer,        6},
+    {"Csumouter",        (DL_FUNC) &Csumouter,         4},
+    {"Csumsymouter",     (DL_FUNC) &Csumsymouter,      4},
+    {"Cwsum2outer",      (DL_FUNC) &Cwsum2outer,       7},
+    {"Cwsumouter",       (DL_FUNC) &Cwsumouter,        5},
+    {"Cwsumsymouter",    (DL_FUNC) &Cwsumsymouter,     5},
+    {"Cxypolyselfint",   (DL_FUNC) &Cxypolyselfint,   11},
+    {"D3crossdist",      (DL_FUNC) &D3crossdist,      10},
+    {"D3crossPdist",     (DL_FUNC) &D3crossPdist,     13},
+    {"D3pairdist",       (DL_FUNC) &D3pairdist,        6},
+    {"D3pairPdist",      (DL_FUNC) &D3pairPdist,       9},
+    {"Ddist2dpath",      (DL_FUNC) &Ddist2dpath,       7},
+    {"delta2area",       (DL_FUNC) &delta2area,       10},
+    {"denspt",           (DL_FUNC) &denspt,            6},
+    {"digberJ",          (DL_FUNC) &digberJ,           6},
+    {"dinfty_R",         (DL_FUNC) &dinfty_R,          3},
+    {"discareapoly",     (DL_FUNC) &discareapoly,     12},
+    {"discs2grid",       (DL_FUNC) &discs2grid,       11},
+    {"distmapbin",       (DL_FUNC) &distmapbin,        9},
+    {"dwpure",           (DL_FUNC) &dwpure,            6},
+    {"Ediggatsti",       (DL_FUNC) &Ediggatsti,       10},
+    {"Ediggra",          (DL_FUNC) &Ediggra,          11},
+    {"Efiksel",          (DL_FUNC) &Efiksel,           9},
+    {"Egeyer",           (DL_FUNC) &Egeyer,           11},
+    {"exact_dt_R",       (DL_FUNC) &exact_dt_R,       14},
+    {"fardist2grid",     (DL_FUNC) &fardist2grid,     10},
+    {"fardistgrid",      (DL_FUNC) &fardistgrid,      10},
+    {"Fclosepairs",      (DL_FUNC) &Fclosepairs,      16},
+    {"Fcrosspairs",      (DL_FUNC) &Fcrosspairs,      19},
+    {"Gdenspt",          (DL_FUNC) &Gdenspt,           5},
+    {"Gsmoopt",          (DL_FUNC) &Gsmoopt,           7},
+    {"Gwtdenspt",        (DL_FUNC) &Gwtdenspt,         6},
+    {"Gwtsmoopt",        (DL_FUNC) &Gwtsmoopt,         8},
+    {"hasX3close",       (DL_FUNC) &hasX3close,        6},
+    {"hasX3pclose",      (DL_FUNC) &hasX3pclose,       7},
+    {"hasXclose",        (DL_FUNC) &hasXclose,         5},
+    {"hasXpclose",       (DL_FUNC) &hasXpclose,        6},
+    {"hasXY3close",      (DL_FUNC) &hasXY3close,      10},
+    {"hasXY3pclose",     (DL_FUNC) &hasXY3pclose,     11},
+    {"hasXYclose",       (DL_FUNC) &hasXYclose,        8},
+    {"hasXYpclose",      (DL_FUNC) &hasXYpclose,       9},
+    {"Idist2dpath",      (DL_FUNC) &Idist2dpath,       7},
+    {"idwloo",           (DL_FUNC) &idwloo,            8},
+    {"KborderD",         (DL_FUNC) &KborderD,          8},
+    {"KborderI",         (DL_FUNC) &KborderI,          8},
+    {"knnd3D",           (DL_FUNC) &knnd3D,            8},
+    {"knndMD",           (DL_FUNC) &knndMD,            6},
+    {"knndsort",         (DL_FUNC) &knndsort,          6},
+    {"knnGinterface",    (DL_FUNC) &knnGinterface,    15},
+    {"knnsort",          (DL_FUNC) &knnsort,           7},
+    {"knnw3D",           (DL_FUNC) &knnw3D,            8},
+    {"knnwMD",           (DL_FUNC) &knnwMD,            7},
+    {"knnX3Dinterface",  (DL_FUNC) &knnX3Dinterface,  17},
+    {"knnXinterface",    (DL_FUNC) &knnXinterface,    15},
+    {"KnoneD",           (DL_FUNC) &KnoneD,            6},
+    {"KnoneI",           (DL_FUNC) &KnoneI,            6},
+    {"knownCif",         (DL_FUNC) &knownCif,          2},
+    {"KrectDbl",         (DL_FUNC) &KrectDbl,         17},
+    {"KrectInt",         (DL_FUNC) &KrectInt,         17},
+    {"KrectWtd",         (DL_FUNC) &KrectWtd,         18},
+    {"Kwborder",         (DL_FUNC) &Kwborder,          9},
+    {"Kwnone",           (DL_FUNC) &Kwnone,            7},
+    {"lincrossdist",     (DL_FUNC) &lincrossdist,     16},
+    {"linearradius",     (DL_FUNC) &linearradius,      8},
+    {"linknncross",      (DL_FUNC) &linknncross,      16},
+    {"linknnd",          (DL_FUNC) &linknnd,          13},
+    {"linndcross",       (DL_FUNC) &linndcross,       18},
+    {"linndxcross",      (DL_FUNC) &linndxcross,      20},
+    {"linnndist",        (DL_FUNC) &linnndist,        13},
+    {"linnnwhich",       (DL_FUNC) &linnnwhich,       14},
+    {"linpairdist",      (DL_FUNC) &linpairdist,      12},
+    {"linSnndwhich",     (DL_FUNC) &linSnndwhich,     15},
+    {"locpcfx",          (DL_FUNC) &locpcfx,          12},
+    {"locprod",          (DL_FUNC) &locprod,           7},
+    {"locWpcfx",         (DL_FUNC) &locWpcfx,         13},
+    {"locxprod",         (DL_FUNC) &locxprod,         10},
+    {"maxnnd2",          (DL_FUNC) &maxnnd2,           5},
+    {"maxPnnd2",         (DL_FUNC) &maxPnnd2,          5},
+    {"minnnd2",          (DL_FUNC) &minnnd2,           5},
+    {"minPnnd2",         (DL_FUNC) &minPnnd2,          5},
+    {"nnd3D",            (DL_FUNC) &nnd3D,             7},
+    {"nndistsort",       (DL_FUNC) &nndistsort,        5},
+    {"nndMD",            (DL_FUNC) &nndMD,             5},
+    {"nnGinterface",     (DL_FUNC) &nnGinterface,     14},
+    {"nnw3D",            (DL_FUNC) &nnw3D,             7},
+    {"nnwhichsort",      (DL_FUNC) &nnwhichsort,       5},
+    {"nnwMD",            (DL_FUNC) &nnwMD,             6},
+    {"nnX3Dinterface",   (DL_FUNC) &nnX3Dinterface,   16},
+    {"nnXinterface",     (DL_FUNC) &nnXinterface,     14},
+    {"paircount",        (DL_FUNC) &paircount,         5},
+    {"poly2imA",         (DL_FUNC) &poly2imA,          7},
+    {"poly2imI",         (DL_FUNC) &poly2imI,          6},
+    {"ps_exact_dt_R",    (DL_FUNC) &ps_exact_dt_R,    13},
+    {"RcallF3",          (DL_FUNC) &RcallF3,          17},
+    {"RcallF3cen",       (DL_FUNC) &RcallF3cen,       20},
+    {"RcallG3",          (DL_FUNC) &RcallG3,          17},
+    {"RcallG3cen",       (DL_FUNC) &RcallG3cen,       19},
+    {"RcallK3",          (DL_FUNC) &RcallK3,          17},
+    {"Rcallpcf3",        (DL_FUNC) &Rcallpcf3,        18},
+    {"ripleybox",        (DL_FUNC) &ripleybox,        11},
+    {"ripleypoly",       (DL_FUNC) &ripleypoly,       11},
+    {"scantrans",        (DL_FUNC) &scantrans,        11},
+    {"seg2pixI",         (DL_FUNC) &seg2pixI,          8},
+    {"seg2pixL",         (DL_FUNC) &seg2pixL,         11},
+    {"seg2pixN",         (DL_FUNC) &seg2pixN,          9},
+    {"segdens",          (DL_FUNC) &segdens,          10},
+    {"smoopt",           (DL_FUNC) &smoopt,            8},
+    {"trigraf",          (DL_FUNC) &trigraf,          10},
+    {"trigrafS",         (DL_FUNC) &trigrafS,         10},
+    {"wtcrdenspt",       (DL_FUNC) &wtcrdenspt,       10},
+    {"wtcrsmoopt",       (DL_FUNC) &wtcrsmoopt,       11},
+    {"wtdenspt",         (DL_FUNC) &wtdenspt,          7},
+    {"wtsmoopt",         (DL_FUNC) &wtsmoopt,          9},
+    {"xypsi",            (DL_FUNC) &xypsi,            10},
+    {"xysegint",         (DL_FUNC) &xysegint,         16},
+    {"xysegXint",        (DL_FUNC) &xysegXint,        11},
+    {"xysi",             (DL_FUNC) &xysi,             12},
+    {"xysiANY",          (DL_FUNC) &xysiANY,          12},
+    {"xysxi",            (DL_FUNC) &xysxi,             7},
+    {NULL, NULL, 0}
+};
+
+static const R_CallMethodDef CallEntries[] = {
+    {"close3IJpairs",        (DL_FUNC) &close3IJpairs,         5},
+    {"close3pairs",          (DL_FUNC) &close3pairs,           5},
+    {"cross3IJpairs",        (DL_FUNC) &cross3IJpairs,         8},
+    {"cross3pairs",          (DL_FUNC) &cross3pairs,           8},
+    {"Cwhist",               (DL_FUNC) &Cwhist,                3},
+    {"Cxysegint",            (DL_FUNC) &Cxysegint,             9},
+    {"CxysegXint",           (DL_FUNC) &CxysegXint,            5},
+    {"graphVees",            (DL_FUNC) &graphVees,             3},
+    {"PerfectDGS",           (DL_FUNC) &PerfectDGS,            4},
+    {"PerfectDiggleGratton", (DL_FUNC) &PerfectDiggleGratton,  6},
+    {"PerfectHardcore",      (DL_FUNC) &PerfectHardcore,       4},
+    {"PerfectPenttinen",     (DL_FUNC) &PerfectPenttinen,      5},
+    {"PerfectStrauss",       (DL_FUNC) &PerfectStrauss,        5},
+    {"PerfectStraussHard",   (DL_FUNC) &PerfectStraussHard,    6},
+    {"thinjumpequal",        (DL_FUNC) &thinjumpequal,         3},
+    {"triDgraph",            (DL_FUNC) &triDgraph,             4},
+    {"triDRgraph",           (DL_FUNC) &triDRgraph,            5},
+    {"triograph",            (DL_FUNC) &triograph,             3},
+    {"trioxgraph",           (DL_FUNC) &trioxgraph,            4},
+    {"VcloseIJDpairs",       (DL_FUNC) &VcloseIJDpairs,        4},
+    {"VcloseIJpairs",        (DL_FUNC) &VcloseIJpairs,         4},
+    {"Vclosepairs",          (DL_FUNC) &Vclosepairs,           4},
+    {"Vclosethresh",         (DL_FUNC) &Vclosethresh,          5},
+    {"VcrossIJDpairs",       (DL_FUNC) &VcrossIJDpairs,        6},
+    {"VcrossIJpairs",        (DL_FUNC) &VcrossIJpairs,         6},
+    {"Vcrosspairs",          (DL_FUNC) &Vcrosspairs,           6},
+    {"xmethas",              (DL_FUNC) &xmethas,              25},
+    {NULL, NULL, 0}
+};
+
+void R_init_spatstat(DllInfo *dll)
+{
+    R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL);
+    R_useDynamicSymbols(dll, FALSE);
+}
diff --git a/src/k3.c b/src/k3.c
new file mode 100755
index 0000000..53cae48
--- /dev/null
+++ b/src/k3.c
@@ -0,0 +1,161 @@
+#include <math.h>
+#include <R.h>
+#include "geom3.h"
+#include "functable.h"
+
+/*
+	$Revision: 1.1 $	$Date: 2009/11/04 23:54:15 $
+
+	K function of 3D point pattern
+
+
+	k3trans	  	translation correction
+
+	k3isot		isotropic correction
+
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+#         
+# MODIFIED BY: Adrian Baddeley, Perth 2009.
+#
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+
+*/
+
+
+void
+k3trans(p, n, b, k)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *k;
+{
+  register int i, j, l, lmin;
+  register double dx, dy, dz, dist;
+  register double  vx, vy, vz;
+  Point *ip, *jp;
+  double dt, vol, lambda, denom, term;
+  double sphesfrac(), sphevol();
+
+  /* compute denominator & initialise numerator*/
+  vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0);
+  lambda = ((double) n )/ vol;
+  denom = lambda * lambda;
+
+  for(l = 0; l < k->n; l++) {
+    (k->denom)[l] = denom;
+    (k->num)[l]   = 0.0;
+  }
+
+  /* spacing of argument in result vector k */
+  dt = (k->t1 - k->t0)/(k->n - 1);
+
+  /* compute numerator */
+  for( i = 0; i < n; i++) {
+    ip = p + i;
+    for(j = i + 1; j < n; j++) {
+      jp = p + j;
+      dx = jp->x - ip->x;
+      dy = jp->y - ip->y;
+      dz = jp->z - ip->z;
+      dist = sqrt(dx * dx + dy * dy + dz * dz);
+      lmin = ceil( (dist - k->t0) / dt );
+      if(lmin < 0)
+	lmin = 0;
+      
+      vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx);
+      vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy);
+      vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz);
+      if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) {
+	  term = 2.0 /(vx * vy * vz);
+	  /* 2 because they're ordered pairs */
+	  for(l = lmin; l < k->n; l++)
+	    (k->num)[l] += term;
+      }
+    }
+  }
+
+  /* compute ratio */
+  for(l = 0; l < k->n; l++)
+    (k->f)[l] = ((k->denom)[l] > 0.0)?
+      (k->num)[l] / (k->denom)[l]
+      : 0.0;
+}
+
+void
+k3isot(p, n, b, k)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *k;
+{
+  register int i, j, l, lmin;
+  register double dx, dy, dz, dist;
+  Point *ip, *jp;
+  double dt, vol, denom, term;
+  double sphesfrac(), sphevol();
+  Point vertex;
+  Box   half;
+
+  /* compute denominator & initialise numerator*/
+  vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0);
+  denom = ((double) (n * n))/vol;
+
+  for(l = 0; l < k->n; l++) {
+    (k->denom)[l] = denom;
+    (k->num)[l]   = 0.0;
+  }
+
+  /* spacing of argument in result vector k */
+  dt = (k->t1 - k->t0)/(k->n - 1);
+
+  /* set up for volume correction */
+
+  vertex.x = b->x0;
+  vertex.y = b->y0;
+  vertex.z = b->z0;
+  half.x1  = b->x1;
+  half.y1  = b->y1;
+  half.z1  = b->z1;
+  half.x0  = (b->x0 + b->x1)/2.0;
+  half.y0  = (b->y0 + b->y1)/2.0;
+  half.z0  = (b->z0 + b->z1)/2.0;
+
+	/* compute numerator */
+  for( i = 0; i < n; i++) {
+    ip = p + i;
+    for(j = i + 1; j < n; j++) {
+      jp = p + j;
+      dx = jp->x - ip->x;
+      dy = jp->y - ip->y;
+      dz = jp->z - ip->z;
+      dist = sqrt(dx * dx + dy * dy + dz * dz);
+      lmin = ceil( (dist - k->t0) / dt );
+      if(lmin < 0)
+	lmin = 0;
+      
+      term = (1.0 / sphesfrac(ip, b, dist)) 
+	+ (1.0 / sphesfrac(jp, b, dist)); 
+      term *= 
+	1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol;
+      for(l = lmin; l < k->n; l++)
+	(k->num)[l] += term;
+    }
+  }
+  /* compute ratio */
+  for(l = 0; l < k->n; l++)
+    (k->f)[l] = ((k->denom)[l] > 0.0)?
+      (k->num)[l] / (k->denom)[l]
+      : 0.0;
+}
diff --git a/src/knn3Ddist.h b/src/knn3Ddist.h
new file mode 100644
index 0000000..5859831
--- /dev/null
+++ b/src/knn3Ddist.h
@@ -0,0 +1,188 @@
+/*
+
+  knn3Ddist.h
+
+  Code template for k-nearest-neighbour algorithms for 3D point patterns
+
+  Input is a single point pattern - supports 'nndist' and 'nnwhich'
+
+  This code is #included multiple times in nn3Ddist.c
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED
+  IN ASCENDING ORDER OF THE z COORDINATE
+
+  $Revision: 1.3 $ $Date: 2013/06/29 02:38:19 $
+
+*/
+
+void FNAME(n, kmax, x, y, z, nnd, nnwhich, huge)
+/* inputs */
+     int *n, *kmax;
+     double *x, *y, *z, *huge;
+     /* output matrices (npoints * kmax) in ROW MAJOR order */
+     double *nnd;
+     int    *nnwhich;
+{ 
+  int npoints, nk, nk1, i, j, k, k1, unsorted, maxchunk;
+  double d2, d2minK, xi, yi, zi, dx, dy, dz, dz2, hu, hu2, tmp;
+  double *d2min; 
+#ifdef WHICH
+  int *which;
+  int itmp;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+  nk      = *kmax;
+  nk1     = nk - 1;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+#ifdef WHICH
+  which = (int *) R_alloc((size_t) nk, sizeof(int));
+#endif
+
+  /* loop over points */
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+      /* initialise nn distances and indices */
+
+      d2minK = hu2;
+      for(k = 0; k < nk; k++) {
+	d2min[k] = hu2;
+#ifdef WHICH
+	which[k] = -1;
+#endif
+      }
+
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+
+      /* search backward */
+      if(i > 0) {
+	for(j = i - 1; j >= 0; --j) {
+
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("L");
+#endif
+	  dz = z[j] - zi;
+	  dz2 = dz * dz; 
+	  if(dz2 > d2minK)
+	    break;
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2minK) {
+	    /* overwrite last entry */
+	    d2min[nk1] = d2;
+#ifdef WHICH
+	    which[nk1] = j;
+#endif
+	    /* bubble sort */
+	    unsorted = YES;
+	    for(k = nk1; unsorted && k > 0; k--) {
+	      k1 = k - 1;
+	      if(d2min[k] < d2min[k1]) {
+		/* swap entries */
+		tmp = d2min[k1];
+		d2min[k1] = d2min[k];
+		d2min[k] = tmp;
+#ifdef WHICH
+		itmp = which[k1];
+		which[k1] = which[k];
+		which[k] = itmp;
+#endif
+	      } else {
+		unsorted = NO;
+	      }
+	    }
+	    /* adjust maximum distance */
+	    d2minK = d2min[nk1];
+	  }
+	}
+      }
+
+      /* search forward */
+      if(i + 1 < npoints) {
+	for(j = i + 1; j < npoints; ++j) {
+
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("R");
+#endif
+	  dz = z[j] - zi;
+	  dz2 = dz * dz;
+	  if(dz2 > d2minK)
+	    break;
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2minK) {
+	    /* overwrite last entry */
+	    d2min[nk1] = d2;
+#ifdef WHICH
+	    which[nk1] = j;
+#endif
+	    /* bubble sort */
+	    unsorted = YES;
+	    for(k = nk1; unsorted && k > 0; k--) {
+	      k1 = k - 1;
+	      if(d2min[k] < d2min[k1]) {
+		/* swap entries */
+		tmp = d2min[k1];
+		d2min[k1] = d2min[k];
+		d2min[k] = tmp;
+#ifdef WHICH
+		itmp = which[k1];
+		which[k1] = which[k];
+		which[k] = itmp;
+#endif
+	      } else {
+		unsorted = NO;
+	      }
+	    }
+	    /* adjust maximum distance */
+	    d2minK = d2min[nk1];
+	  }
+	}
+      }
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      /* calculate nn distances for point i 
+	 and copy to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+#ifdef DIST
+	nnd[nk * i + k] = sqrt(d2min[k]);
+#endif
+#ifdef WHICH
+	/* convert from C to R indexing */
+	nnwhich[nk * i + k] = which[k] + 1;
+#endif
+      }
+	
+    }
+  }
+}
+
diff --git a/src/knn3DdistX.h b/src/knn3DdistX.h
new file mode 100644
index 0000000..252fa9c
--- /dev/null
+++ b/src/knn3DdistX.h
@@ -0,0 +1,233 @@
+
+#if (1 == 0)
+/*
+  knn3DdistX.h
+
+  Code template for C functions supporting nncross 
+  for k-nearest neighbours (k > 1)
+  for 3D point patterns
+
+  THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED
+  IN ASCENDING ORDER OF z COORDINATE
+
+  This code is #included multiple times in nn3Ddist.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+        EXCLUDE   #defined if exclusion mechanism is used
+  Either or both DIST and WHICH may be defined.
+
+  When EXCLUDE is defined,
+  code numbers id1, id2 are attached to the patterns X and Y respectively, 
+  such that X[i] and Y[j] are the same point iff id1[i] = id2[j].
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.1 $  $Date: 2013/06/29 03:04:47 $
+
+
+*/
+#endif
+
+void FNAME(n1, x1, y1, z1, id1, 
+           n2, x2, y2, z2, id2, 
+	   kmax,
+	   nnd, nnwhich, 
+	   huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *z1, *x2, *y2, *z2, *huge;
+     int *id1, *id2;
+     int *kmax;
+     /* output matrices (npoints * kmax) in ROW MAJOR order */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{ 
+  int npoints1, npoints2, nk, nk1;
+  int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1;
+  double d2, d2minK, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2, tmp;
+  double *d2min; 
+#ifdef WHICH
+  int *which;
+  int itmp;
+#endif
+#ifdef EXCLUDE
+  int id1i;
+#endif
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+  nk       = *kmax;
+  nk1      = nk - 1;
+  hu       = *huge;
+  hu2      = hu * hu;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  lastjwhich = 0;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+#ifdef WHICH
+  which = (int *) R_alloc((size_t) nk, sizeof(int));
+#endif
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < npoints1) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints1) maxchunk = npoints1;
+
+    for(; i < maxchunk; i++) {
+
+      /* initialise nn distances and indices */
+      d2minK = hu2;
+      jwhich = -1;
+      for(k = 0; k < nk; k++) {
+	d2min[k] = hu2;
+#ifdef WHICH
+	which[k] = -1;
+#endif
+      }
+
+      x1i = x1[i];
+      y1i = y1[i];
+      z1i = z1[i];
+#ifdef EXCLUDE
+      id1i = id1[i];
+#endif
+
+      if(lastjwhich < npoints2) {
+	/* search forward from previous nearest neighbour  */
+	for(jright = lastjwhich; jright < npoints2; ++jright)
+	  {
+	    dz = z2[jright] - z1i;
+	    dz2 = dz * dz; 
+	    if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */
+	      break;
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jright] != id1i) {
+#endif
+	      dy = y2[jright] - y1i;
+	      d2 =  dy * dy + dz2;
+	      if(d2 < d2minK) {
+		dx = x2[jright] - x1i;
+		d2 =  dx * dx + d2;
+		if (d2 < d2minK) {
+		  /* overwrite last entry in list of neighbours */
+		  d2min[nk1] = d2;
+		  jwhich = jright;
+#ifdef WHICH
+		  which[nk1] = jright;
+#endif
+		  /* bubble sort */
+		  unsorted = YES;
+		  for(k = nk1; unsorted && k > 0; k--) {
+		    k1 = k - 1;
+		    if(d2min[k] < d2min[k1]) {
+		      /* swap entries */
+		      tmp  = d2min[k1];
+		      d2min[k1] = d2min[k];
+		      d2min[k] = tmp;
+#ifdef WHICH
+		      itmp = which[k1];
+		      which[k1] = which[k];
+		      which[k] = itmp;
+#endif
+		    } else {
+		      unsorted = NO;
+		    }
+		  }
+		  /* adjust maximum distance */
+		  d2minK = d2min[nk1];
+		}
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end forward search */
+      }
+      if(lastjwhich > 0) {
+	/* search backward from previous nearest neighbour */
+	for(jleft = lastjwhich - 1; jleft >= 0; --jleft)
+	  {
+	    dz = z1i - z2[jleft];
+	    dz2 = dz * dz;
+	    if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */
+	      break;
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jleft] != id1i) {
+#endif
+	      dy = y2[jleft] - y1i;
+	      d2 =  dy * dy + dz2;
+	      if(d2 < d2minK) {
+		dx = x2[jleft] - x1i;
+		d2 =  dx * dx + d2;
+		if (d2 < d2minK) {
+		  /* overwrite last entry in list of neighbours */
+		  d2min[nk1] = d2;
+		  jwhich = jleft;
+#ifdef WHICH
+		  which[nk1] = jleft;
+#endif
+		  /* bubble sort */
+		  unsorted = YES;
+		  for(k = nk1; unsorted && k > 0; k--) {
+		    k1 = k - 1;
+		    if(d2min[k] < d2min[k1]) {
+		      /* swap entries */
+		      tmp  = d2min[k1];
+		      d2min[k1] = d2min[k];
+		      d2min[k] = tmp;
+#ifdef WHICH
+		      itmp = which[k1];
+		      which[k1] = which[k];
+		      which[k] = itmp;
+#endif
+		    } else {
+		      unsorted = NO;
+		    }
+		  }
+		  /* adjust maximum distance */
+		  d2minK = d2min[nk1];
+		}
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end backward search */
+      }
+      /* copy nn distances for point i 
+	 to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+#ifdef DIST
+	nnd[nk * i + k] = sqrt(d2min[k]);
+#endif
+#ifdef WHICH
+	nnwhich[nk * i + k] = which[k] + 1;  /* R indexing */
+#endif
+      }
+      /* save index of last neighbour encountered */
+      lastjwhich = jwhich;
+      /* end of loop over points i */
+    }
+  }
+}
+
diff --git a/src/knnXdist.h b/src/knnXdist.h
new file mode 100644
index 0000000..6cfa65d
--- /dev/null
+++ b/src/knnXdist.h
@@ -0,0 +1,297 @@
+
+#if (1 == 0)
+/*
+  knnXdist.h
+
+  Code template for C functions supporting nncross 
+  for k-nearest neighbours (k > 1)
+
+  THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED
+  IN ASCENDING ORDER OF y COORDINATE
+
+  This code is #included multiple times in knndistance.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+        EXCLUDE   #defined if exclusion mechanism is used
+  Either or both DIST and WHICH may be defined.
+
+  When EXCLUDE is defined,
+  code numbers id1, id2 are attached to the patterns X and Y respectively, 
+  such that
+  x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.10 $  $Date: 2013/12/10 03:29:55 $
+
+
+*/
+#endif
+
+void FNAME(n1, x1, y1, id1, 
+           n2, x2, y2, id2, 
+	   kmax,
+	   nnd, nnwhich, 
+	   huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *x2, *y2, *huge;
+     int *id1, *id2;
+     int *kmax;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{ 
+  int npoints1, npoints2, nk, nk1;
+  int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1;
+  double d2, d2minK, x1i, y1i, dx, dy, dy2, hu, hu2, tmp;
+  double *d2min; 
+#ifdef WHICH
+  int *which;
+  int itmp;
+#endif
+#ifdef EXCLUDE
+  int id1i;
+#endif
+#ifdef TRACER
+  int kk;
+#endif
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+  nk       = *kmax;
+  nk1      = nk - 1;
+  hu       = *huge;
+  hu2      = hu * hu;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  lastjwhich = 0;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+#ifdef WHICH
+  which = (int *) R_alloc((size_t) nk, sizeof(int));
+#endif
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < npoints1) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints1) maxchunk = npoints1;
+
+    for(; i < maxchunk; i++) {
+
+      /* initialise nn distances and indices */
+      d2minK = hu2;
+      jwhich = -1;
+      for(k = 0; k < nk; k++) {
+	d2min[k] = hu2;
+#ifdef WHICH
+	which[k] = -1;
+#endif
+      }
+
+      x1i = x1[i];
+      y1i = y1[i];
+#ifdef EXCLUDE
+      id1i = id1[i];
+#endif
+
+#ifdef TRACER
+      Rprintf("i=%d : (%lf, %lf) ..................... \n", i, x1i, y1i);
+#endif
+
+      if(lastjwhich < npoints2) {
+#ifdef TRACER
+	Rprintf("\tForward search from lastjwhich=%d:\n", lastjwhich);
+#endif
+	/* search forward from previous nearest neighbour  */
+	for(jright = lastjwhich; jright < npoints2; ++jright)
+	  {
+#ifdef TRACER
+	    Rprintf("\tjright=%d \t (%lf, %lf)\n", 
+		    jright, x2[jright], y2[jright]);
+#endif
+
+	    dy = y2[jright] - y1i;
+	    dy2 = dy * dy; 
+#ifdef TRACER
+	    Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK);
+#endif
+	    if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */
+	      break;
+
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jright] != id1i) {
+#ifdef TRACER
+	      Rprintf("\t\t %d and %d are not identical\n", i, jright);
+#endif
+#endif
+	      dx = x2[jright] - x1i;
+	      d2 =  dx * dx + dy2;
+#ifdef TRACER
+	      Rprintf("\t\t d2=%lf\n", d2);
+#endif
+	      if (d2 < d2minK) {
+		/* overwrite last entry in list of neighbours */
+#ifdef TRACER
+		Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", 
+			d2min[nk1], d2);
+#endif
+		d2min[nk1] = d2;
+		jwhich = jright;
+#ifdef WHICH
+		which[nk1] = jright;
+#endif
+		/* bubble sort */
+		unsorted = YES;
+		for(k = nk1; unsorted && k > 0; k--) {
+		  k1 = k - 1;
+		  if(d2min[k] < d2min[k1]) {
+		    /* swap entries */
+		    tmp  = d2min[k1];
+		    d2min[k1] = d2min[k];
+		    d2min[k] = tmp;
+#ifdef WHICH
+		    itmp = which[k1];
+		    which[k1] = which[k];
+		    which[k] = itmp;
+#endif
+		  } else {
+		    unsorted = NO;
+		  }
+		}
+#ifdef TRACER
+		Rprintf("\t\t sorted nn distances:\n");
+		for(kk = 0; kk < nk; kk++) 
+		  Rprintf("\t\t d2min[%d] = %lf\n", 
+			  kk, d2min[kk]);
+#endif
+		/* adjust maximum distance */
+		d2minK = d2min[nk1];
+#ifdef TRACER
+		Rprintf("\t\t d2minK=%lf\n", d2minK);
+#endif
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end forward search */
+#ifdef TRACER
+	Rprintf("\tEnd forward search\n");
+#endif
+      }
+      if(lastjwhich > 0) {
+#ifdef TRACER
+	Rprintf("\tBackward search from lastjwhich=%d:\n", lastjwhich);
+#endif
+	/* search backward from previous nearest neighbour */
+	for(jleft = lastjwhich - 1; jleft >= 0; --jleft)
+	  {
+#ifdef TRACER
+	    Rprintf("\tjleft=%d \t (%lf, %lf)\n", 
+		    jleft, x2[jleft], y2[jleft]);
+#endif
+	    dy = y1i - y2[jleft];
+	    dy2 = dy * dy;
+#ifdef TRACER
+	    Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK);
+#endif
+	    if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */
+	      break;
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jleft] != id1i) {
+#ifdef TRACER
+	      Rprintf("\t\t %d and %d are not identical\n", i, jleft);
+#endif
+#endif
+	      dx = x2[jleft] - x1i;
+	      d2 =  dx * dx + dy2;
+#ifdef TRACER
+	      Rprintf("\t\t d2=%lf\n", d2);
+#endif
+	      if (d2 < d2minK) {
+		/* overwrite last entry in list of neighbours */
+#ifdef TRACER
+		Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", 
+			d2min[nk1], d2);
+#endif
+		d2min[nk1] = d2;
+		jwhich = jleft;
+#ifdef WHICH
+		which[nk1] = jleft;
+#endif
+		/* bubble sort */
+		unsorted = YES;
+		for(k = nk1; unsorted && k > 0; k--) {
+		  k1 = k - 1;
+		  if(d2min[k] < d2min[k1]) {
+		    /* swap entries */
+		    tmp  = d2min[k1];
+		    d2min[k1] = d2min[k];
+		    d2min[k] = tmp;
+#ifdef WHICH
+		    itmp = which[k1];
+		    which[k1] = which[k];
+		    which[k] = itmp;
+#endif
+		  } else {
+		    unsorted = NO;
+		  }
+		}
+#ifdef TRACER
+		Rprintf("\t\t sorted nn distances:\n");
+		for(kk = 0; kk < nk; kk++) 
+		  Rprintf("\t\t d2min[%d] = %lf\n", 
+			  kk, d2min[kk]);
+#endif
+		/* adjust maximum distance */
+		d2minK = d2min[nk1];
+#ifdef TRACER
+		Rprintf("\t\t d2minK=%lf\n", d2minK);
+#endif
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end backward search */
+#ifdef TRACER
+	Rprintf("\tEnd backward search\n");
+#endif
+      }
+      /* copy nn distances for point i 
+	 to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+#ifdef DIST
+	nnd[nk * i + k] = sqrt(d2min[k]);
+#endif
+#ifdef WHICH
+	nnwhich[nk * i + k] = which[k] + 1;  /* R indexing */
+#endif
+      }
+      /* save index of last neighbour encountered */
+      lastjwhich = jwhich;
+      /* end of loop over points i */
+    }
+  }
+}
+
diff --git a/src/knndist.h b/src/knndist.h
new file mode 100644
index 0000000..9924351
--- /dev/null
+++ b/src/knndist.h
@@ -0,0 +1,204 @@
+/*
+  knndist.h
+
+  Code template for C functions supporting knndist and knnwhich 
+
+  THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER
+
+  This code is #included multiple times in knndistance.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012
+  Licence: GPL >= 2
+
+  $Revision: 1.3 $  $Date: 2013/05/27 02:09:10 $
+
+*/
+
+void FNAME(n, kmax, x, y, 
+#ifdef DIST 
+	   nnd, 
+#endif
+#ifdef WHICH
+	   nnwhich, 
+#endif
+	   huge)
+     /* inputs */
+     int *n, *kmax;
+     double *x, *y, *huge;
+     /* output matrices (npoints * kmax) in ROW MAJOR order */
+#ifdef DIST
+     double *nnd;
+#endif
+#ifdef WHICH
+     int    *nnwhich;
+#endif
+{ 
+  int npoints, maxchunk, nk, nk1, i, k, k1, left, right, unsorted;
+  double d2, d2minK, xi, yi, dx, dy, dy2, hu, hu2, tmp;
+  double *d2min; 
+#ifdef WHICH
+  int *which;
+  int itmp;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+  nk      = *kmax;
+  nk1     = nk - 1;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+#ifdef WHICH
+  which = (int *) R_alloc((size_t) nk, sizeof(int));
+#endif
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < npoints) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints) maxchunk = npoints;
+
+    for(; i < maxchunk; i++) {
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+    /* initialise nn distances and indices */
+
+      d2minK = hu2;
+      for(k = 0; k < nk; k++) {
+	d2min[k] = hu2;
+#ifdef WHICH
+	which[k] = -1;
+#endif
+      }
+
+      xi = x[i];
+      yi = y[i];
+
+      /* search backward */
+      for(left = i - 1; left >= 0; --left)
+      {
+
+#ifdef SPATSTAT_DEBUG
+	Rprintf("L");
+#endif
+	dy = yi - y[left];
+	dy2 = dy * dy;
+	if(dy2 > d2minK)
+	  break;
+
+	dx = x[left] - xi;
+	d2 =  dx * dx + dy2;
+	if (d2 < d2minK) {
+	  /* overwrite last entry */
+	  d2min[nk1] = d2;
+#ifdef WHICH
+	  which[nk1] = left;
+#endif
+	  /* bubble sort */
+	  unsorted = YES;
+	  for(k = nk1; unsorted && k > 0; k--) {
+	    k1 = k - 1;
+	    if(d2min[k] < d2min[k1]) {
+	      /* swap entries */
+	      tmp  = d2min[k1];
+	      d2min[k1] = d2min[k];
+	      d2min[k] = tmp;
+#ifdef WHICH
+	      itmp = which[k1];
+	      which[k1] = which[k];
+	      which[k] = itmp;
+#endif
+	    } else {
+	      unsorted = NO;
+	    }
+	  }
+	  /* adjust maximum distance */
+	  d2minK = d2min[nk1];
+	}
+      }
+
+      /* search forward */
+      for(right = i + 1; right < npoints; ++right)
+	{
+
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("R");
+#endif
+	  dy = y[right] - yi;
+	  dy2 = dy * dy;
+	  if(dy2 > d2minK)
+	    break;
+
+	  dx = x[right] - xi;
+	  d2 =  dx * dx + dy2;
+	  if (d2 < d2minK) {
+	    /* overwrite last entry */
+	    d2min[nk1] = d2;
+#ifdef WHICH
+	    which[nk1] = right;
+#endif
+	    /* bubble sort */
+	    unsorted = YES;
+	    for(k = nk1; unsorted && k > 0; k--) {
+	      k1 = k - 1;
+	      if(d2min[k] < d2min[k1]) {
+		/* swap entries */
+		tmp  = d2min[k1];
+		d2min[k1] = d2min[k];
+		d2min[k] = tmp;
+#ifdef WHICH
+		itmp = which[k1];
+		which[k1] = which[k];
+		which[k] = itmp;
+#endif
+	      } else {
+		unsorted = NO;
+	      }
+	    }
+	    /* adjust maximum distance */
+	    d2minK = d2min[nk1];
+	  }
+	}
+
+      /* search finished for point i */
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      /* copy nn distances for point i 
+	 to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+#ifdef DIST
+	nnd[nk * i + k] = sqrt(d2min[k]);
+#endif
+#ifdef WHICH
+	nnwhich[nk * i + k] = which[k] + 1;  /* R indexing */
+#endif
+      }
+
+      /* end of i loop */
+    }
+  }
+}
+
+
diff --git a/src/knndistance.c b/src/knndistance.c
new file mode 100644
index 0000000..3150580
--- /dev/null
+++ b/src/knndistance.c
@@ -0,0 +1,246 @@
+/*
+
+  knndistance.c
+
+  K-th Nearest Neighbour Distances between points
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013
+  Licence: GNU Public Licence >= 2
+
+  $Revision: 1.8 $     $Date: 2013/12/10 03:29:45 $
+
+  Function definitions are #included from knndist.h and knnXdist.h
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER 
+
+  SINGLE LIST:
+  knndsort     k-th nearest neighbour distances
+  knnwhich     k-th nearest neighbours
+  knnsort      k-th nearest neighbours and their distances
+
+  ONE LIST TO ANOTHER LIST:
+  knnXdist     Nearest neighbour distance from one list to another
+  knnXwhich    Nearest neighbour ID from one list to another
+  knnX         Nearest neighbour ID & distance from one list to another
+
+  ONE LIST TO ANOTHER OVERLAPPING LIST:
+  knnXEdist    Nearest neighbour distance from one list to another, overlapping
+  knnXEwhich   Nearest neighbour ID from one list to another, overlapping
+  knnXE        Nearest neighbour ID & distance 
+
+
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "yesno.h"
+
+double sqrt();
+
+/* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */
+
+/* ------------------- one point pattern X --------------------- */
+
+/* 
+   knndsort 
+
+   nearest neighbours 1:kmax
+
+   returns distances only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knndsort
+#define DIST
+#include "knndist.h"
+
+/* 
+   knnwhich
+
+   nearest neighbours 1:kmax
+
+   returns identifiers only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnwhich
+#define WHICH
+#include "knndist.h"
+
+/* 
+   knnsort 
+
+   nearest neighbours 1:kmax
+
+   returns distances and indices
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnsort
+#define DIST
+#define WHICH
+#include "knndist.h"
+
+/* --------------- two distinct point patterns X and Y --------------- */
+
+/* general interface */
+
+void knnXinterface(n1, x1, y1, id1, 
+		   n2, x2, y2, id2, 
+		   kmax,
+		   exclude, wantdist, wantwhich,
+		   nnd, nnwhich, 
+		   huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *x2, *y2, *huge;
+     int *id1, *id2;
+     int *kmax;
+     /* options */
+     int *exclude, *wantdist, *wantwhich;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{
+  void knnX(), knnXdist(), knnXwhich();
+  void knnXE(), knnXEdist(), knnXEwhich();
+  int ex, di, wh;
+  ex = (*exclude != 0);
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(!ex) {
+    if(di && wh) {
+      knnX(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } else if(di) {
+      knnXdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } else if(wh) {
+      knnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } 
+  } else {
+    if(di && wh) {
+      knnXE(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } else if(di) {
+      knnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } else if(wh) {
+      knnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge);
+    } 
+  }
+}
+
+/* Turn off the debugging tracer in knnXdist.h */
+#undef TRACER
+
+/* 
+   knnXdist
+
+   returns distances only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnXdist
+#define DIST
+#include "knnXdist.h"
+
+/* 
+   knnXwhich
+
+   returns identifiers only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnXwhich
+#define WHICH
+#include "knnXdist.h"
+
+/* 
+   knnX 
+
+   returns distances and indices
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnX
+#define DIST
+#define WHICH
+#include "knnXdist.h"
+
+/* --------------- overlapping point patterns X and Y --------------- */
+
+/* 
+   knnXEdist
+
+   returns distances only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnXEdist
+#define DIST
+#define EXCLUDE
+#include "knnXdist.h"
+
+/* 
+   knnXEwhich
+
+   returns identifiers only
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnXEwhich
+#define WHICH
+#define EXCLUDE
+#include "knnXdist.h"
+
+/* 
+   knnXE 
+
+   returns distances and indices
+
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME knnXE
+#define DIST
+#define WHICH
+#define EXCLUDE
+#include "knnXdist.h"
+
diff --git a/src/knngrid.c b/src/knngrid.c
new file mode 100644
index 0000000..16e7199
--- /dev/null
+++ b/src/knngrid.c
@@ -0,0 +1,116 @@
+/*
+
+  knngrid.c
+
+  K-th Nearest Neighbour Distances from a pixel grid to a point pattern
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013
+  Licence: GNU Public Licence >= 2
+
+  $Revision: 1.6 $     $Date: 2013/11/03 05:06:28 $
+
+  Function body definition is #included from knngrid.h 
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER 
+
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "yesno.h"
+
+double sqrt();
+
+/* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */
+
+/* general interface */
+
+void knnGinterface(nx, x0, xstep,  
+		   ny, y0, ystep,   /* pixel grid dimensions */
+		   np, xp, yp,   /* data points */
+		   kmax,
+		   wantdist, wantwhich,
+		   nnd, nnwhich, 
+		   huge)
+     /* inputs */
+     int *nx, *ny, *np;
+     double *x0, *xstep, *y0, *ystep, *huge;
+     double *xp, *yp;
+     int *kmax;
+     /* options */
+     int *wantdist, *wantwhich;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{
+  void knnGdw(), knnGd(), knnGw();
+  int di, wh;
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(di && wh) {
+    knnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge);
+  } else if(di) {
+    knnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge);
+  } else if(wh) {
+    knnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge);
+  }
+}
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   knnGdw
+
+   nearest neighbours 1:kmax
+
+   returns distances and indices
+
+*/
+
+#define FNAME knnGdw
+#define DIST
+#define WHICH
+#include "knngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   knnGd
+
+   nearest neighbours 1:kmax
+
+   returns distances only
+
+*/
+
+#define FNAME knnGd
+#define DIST
+#include "knngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   knnGw 
+
+   nearest neighbours 1:kmax
+
+   returns indices only
+
+*/
+
+#define FNAME knnGw
+#define WHICH
+#include "knngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
diff --git a/src/knngrid.h b/src/knngrid.h
new file mode 100644
index 0000000..d5aa52d
--- /dev/null
+++ b/src/knngrid.h
@@ -0,0 +1,245 @@
+
+#if (1 == 0)
+/*
+  knngrid.h
+
+  Code template for C functions 
+  k-nearest neighbours (k > 1) of each grid point
+
+  THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED
+  IN ASCENDING ORDER OF x COORDINATE
+
+  This code is #included multiple times in knngrid.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.6 $  $Date: 2016/02/02 01:31:50 $
+
+
+*/
+#endif
+
+#undef PRINTALOT
+
+void FNAME(nx, x0, xstep,  
+	   ny, y0, ystep,   /* pixel grid dimensions */
+           np, xp, yp,   /* data points */
+	   kmax,
+	   nnd, nnwhich, 
+	   huge)
+     /* inputs */
+     int *nx, *ny, *np;
+     double *x0, *xstep, *y0, *ystep, *huge;
+     double *xp, *yp;
+     int *kmax;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{ 
+  int Nxcol, Nyrow;
+  int i, j, ijpos;
+  int Npoints, Nk, Nk1;
+  int mleft, mright, mwhich, lastmwhich, unsorted, k, k1;
+  double  X0, Y0, Xstep, Ystep;
+  double d2, d2minK, xj, yi, dx, dy, dx2, hu, hu2, tmp;
+  double *d2min; 
+#ifdef WHICH
+  int *which;
+  int itmp;
+#endif
+
+  Nxcol   = *nx;
+  Nyrow   = *ny;
+  Npoints = *np;
+  Nk      = *kmax;
+  hu      = *huge;
+  X0      = *x0;
+  Y0      = *y0;
+  Xstep   = *xstep;
+  Ystep   = *ystep;
+
+  Nk1     = Nk - 1;
+  hu2      = hu * hu;
+
+  if(Npoints == 0)
+    return;
+
+  lastmwhich = mwhich = 0;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current grid point
+  */
+
+  d2min = (double *) R_alloc((size_t) Nk, sizeof(double));
+#ifdef WHICH
+  which = (int *) R_alloc((size_t) Nk, sizeof(int));
+#endif
+
+  /* loop over pixels */
+
+  for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) {
+
+    R_CheckUserInterrupt();
+    
+#ifdef PRINTALOT
+    Rprintf("j=%d, xj=%lf\n", j, xj); 
+#endif
+
+    for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) {
+
+#ifdef PRINTALOT
+      Rprintf("\ti=%d, yi = %lf\n", i, yi); 
+#endif
+
+      /* initialise nn distances and indices */
+      d2minK = hu2;
+      for(k = 0; k < Nk; k++) {
+	d2min[k] = hu2;
+#ifdef WHICH
+	which[k] = -1;
+#endif
+      }
+
+      if(lastmwhich < Npoints) {
+	/* search forward from previous nearest neighbour  */
+	for(mright = lastmwhich; mright < Npoints; ++mright)
+	  {
+	    dx = xp[mright] - xj;
+	    dx2 = dx * dx; 
+#ifdef PRINTALOT
+	    Rprintf("\t\t%d\n", mright);
+#endif
+	    if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */
+	      break;
+	    dy = yp[mright] - yi;
+	    d2 =  dy * dy + dx2;
+	    if (d2 < d2minK) {
+#ifdef PRINTALOT
+	    Rprintf("\t\t\tNeighbour: d2=%lf\n", d2);
+#endif
+	      /* overwrite last entry in list of neighbours */
+	      d2min[Nk1] = d2;
+	      mwhich = mright;
+#ifdef WHICH
+	      which[Nk1] = mright;
+#endif
+	      /* bubble sort */
+	      unsorted = YES;
+	      for(k = Nk1; unsorted && k > 0; k--) {
+		k1 = k - 1;
+		if(d2min[k] < d2min[k1]) {
+		  /* swap entries */
+		  tmp  = d2min[k1];
+		  d2min[k1] = d2min[k];
+		  d2min[k] = tmp;
+#ifdef WHICH
+		  itmp = which[k1];
+		  which[k1] = which[k];
+		  which[k] = itmp;
+#endif
+		} else {
+		  unsorted = NO;
+		}
+	      }
+	      /* adjust maximum distance */
+	      d2minK = d2min[Nk1];
+#ifdef PRINTALOT
+	      Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK);
+	      for(k = 0; k < Nk; k++)
+		Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]);
+#ifdef WHICH
+	      for(k = 0; k < Nk; k++) 
+		Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]);
+#endif
+#endif
+	    }
+	  }
+	/* end forward search */
+      }
+      if(lastmwhich > 0) {
+	/* search backward from previous nearest neighbour */
+	for(mleft = lastmwhich - 1; mleft >= 0; --mleft)
+	  {
+	    dx = xj - xp[mleft];
+	    dx2 = dx * dx;
+#ifdef PRINTALOT
+	    Rprintf("\t\t%d\n", mleft);
+#endif
+	    if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */
+	      break;
+	    dy = yp[mleft] - yi;
+	    d2 =  dy * dy + dx2;
+	    if (d2 < d2minK) {
+#ifdef PRINTALOT
+	    Rprintf("\t\t\tNeighbour: d2=%lf\n", d2);
+#endif
+	      /* overwrite last entry in list of neighbours */
+	      mwhich = mleft;
+	      d2min[Nk1] = d2;
+#ifdef WHICH
+	      which[Nk1] = mleft;
+#endif
+	      /* bubble sort */
+	      unsorted = YES;
+	      for(k = Nk1; unsorted && k > 0; k--) {
+		k1 = k - 1;
+		if(d2min[k] < d2min[k1]) {
+		  /* swap entries */
+		  tmp  = d2min[k1];
+		  d2min[k1] = d2min[k];
+		  d2min[k] = tmp;
+#ifdef WHICH
+		  itmp = which[k1];
+		  which[k1] = which[k];
+		  which[k] = itmp;
+#endif
+		} else {
+		  unsorted = NO;
+		}
+	      }
+	      /* adjust maximum distance */
+	      d2minK = d2min[Nk1];
+#ifdef PRINTALOT
+	      Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK);
+	      for(k = 0; k < Nk; k++) 
+		Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]);
+#ifdef WHICH
+	      for(k = 0; k < Nk; k++) 
+		Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]);
+#endif
+#endif
+	    }
+	  }
+	/* end backward search */
+      }
+      /* remember index of most recently-encountered neighbour */
+      lastmwhich = mwhich;
+#ifdef PRINTALOT
+      Rprintf("\t\tlastmwhich=%d\n", lastmwhich);
+#endif
+      /* copy nn distances for grid point (i, j)
+	 to output array nnd[ , i, j] 
+      */
+      ijpos = Nk * (i + j * Nyrow);
+      for(k = 0; k < Nk; k++) {
+#ifdef DIST
+	nnd[ijpos + k] = sqrt(d2min[k]);
+#endif
+#ifdef WHICH
+	nnwhich[ijpos + k] = which[k] + 1;  /* R indexing */
+#endif
+      }
+      /* end of loop over points i */
+    }
+  }
+}
+
+
diff --git a/src/lennard.c b/src/lennard.c
new file mode 100755
index 0000000..c987e58
--- /dev/null
+++ b/src/lennard.c
@@ -0,0 +1,158 @@
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Constants.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Lennard-Jones process */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Lennard {
+  double sigma;
+  double epsilon;
+  double sigma2;  /*   sigma^2     */
+  double foureps;    /*   4 * epsilon     */
+  double d2min;  /* minimum value of d^2 which yields nonzero intensity */
+  double d2max;  /* maximum value of d^2 which has nontrivial contribution */
+  double *period;
+  int per;
+} Lennard;
+
+/* 
+   MAXEXP is intended to be the largest x such that exp(-x) != 0 
+   although the exact value is not needed
+*/
+#define MAXEXP (-log(DOUBLE_XMIN))
+#define MINEXP (log(1.001))
+
+/* initialiser function */
+
+Cdata *lennardinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Lennard *lennard;
+  double sigma2, foureps, minfrac, maxfrac;
+
+  lennard = (Lennard *) R_alloc(1, sizeof(Lennard));
+
+  /* Interpret model parameters*/
+  lennard->sigma   = model.ipar[0];
+  lennard->epsilon = model.ipar[1];
+  lennard->period  = model.period;
+  /* constants */
+  lennard->sigma2  = sigma2 = pow(lennard->sigma, 2);
+  lennard->foureps = foureps = 4 * lennard->epsilon;
+  /* thresholds where the interaction becomes trivial */
+  minfrac = pow(foureps/MAXEXP, (double) 1.0/6.0);
+  if(minfrac > 0.5) minfrac = 0.5;
+  maxfrac = pow(foureps/MINEXP, (double) 1.0/3.0);
+  if(maxfrac < 2.0) maxfrac = 2.0;
+  lennard->d2min   = sigma2 * minfrac;
+  lennard->d2max   = sigma2 * maxfrac;
+  /* periodic boundary conditions? */
+  lennard->per    = (model.period[0] > 0.0);
+
+  return((Cdata *) lennard);
+}
+
+/* conditional intensity evaluator */
+
+double lennardcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, ratio6, pairsum, cifval;
+  double sigma2, d2max, d2min;
+  double *period;
+  Lennard *lennard;
+  DECLARE_CLOSE_D2_VARS;
+
+  lennard = (Lennard *) cdata;
+
+  sigma2 = lennard->sigma2;
+  d2max  = lennard->d2max;
+  d2min  = lennard->d2min;
+  period = lennard->period;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  pairsum = 0;
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(lennard->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) {
+	  if(d2 < d2min) {
+	    cifval = 0.0;
+	    return cifval;
+	  }
+	  ratio6 = pow(sigma2/d2, 3);
+	  pairsum += ratio6 * (1.0 - ratio6);
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) {
+	  if(d2 < d2min) {
+	    cifval = 0.0;
+	    return cifval;
+	  }
+	  ratio6 = pow(sigma2/d2, 3);
+	  pairsum += ratio6 * (1.0 - ratio6);
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) {
+	  if(d2 < lennard->d2min) {
+	    cifval = 0.0;
+	    return cifval;
+	  }
+	  ratio6 = pow(sigma2/d2, 3);
+	  pairsum += ratio6 * (1.0 - ratio6);
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) {
+	  if(d2 < lennard->d2min) {
+	    cifval = 0.0;
+	    return cifval;
+	  }
+	  ratio6 = pow(sigma2/d2, 3);
+	  pairsum += ratio6 * (1.0 - ratio6);
+	}
+      }
+    }
+  }
+
+  cifval *= exp(lennard->foureps * pairsum);
+  return cifval;
+}
+
+Cifns LennardCifns = { &lennardinit, &lennardcif, (updafunptr) NULL, NO};
+
diff --git a/src/linSnncross.c b/src/linSnncross.c
new file mode 100644
index 0000000..3953987
--- /dev/null
+++ b/src/linSnncross.c
@@ -0,0 +1,39 @@
+#include <R.h>
+#include "yesno.h"
+
+/* 
+   linSnncross.c
+
+   Shortest-path distances between nearest neighbours in linear network
+   One pattern to another pattern
+
+   $Revision: 1.3 $  $Date: 2015/11/28 10:08:55 $
+
+   'Sparse version' 
+
+   Works with sparse representation
+   Does not allow 'exclusion'
+   Requires point data to be ordered by segment index.
+
+   linSnndcross      
+   linSnndwhich
+
+*/
+
+void Clinvdist(), Clinvwhichdist();  /* functions from linvdist.c */
+
+#undef HUH
+
+/* definition of linSnndcross */
+#define FNAME linSnndcross
+#undef WHICH
+#include "linSnncross.h"
+
+/* definition of linSnndwhich */
+#undef  FNAME
+#define FNAME linSnndwhich
+#define WHICH
+#include "linSnncross.h"
+
+
+
diff --git a/src/linSnncross.h b/src/linSnncross.h
new file mode 100644
index 0000000..f45fcad
--- /dev/null
+++ b/src/linSnncross.h
@@ -0,0 +1,132 @@
+/* 
+   linSnncross.h
+
+   Function body definitions with macros
+
+   Sparse representation of network
+
+   $Revision: 1.4 $  $Date: 2015/12/28 02:44:25 $
+
+   Macros used:
+   FNAME   name of function
+   WHICH   whether 'nnwhich' is required
+   HUH     debugging
+
+   ! Data points must be ordered by segment index !
+*/
+
+void 
+FNAME(np, sp, tp,  /* data points 'from' (ordered by sp) */
+      nq, sq, tq, /* data points 'to'   (ordered by sq) */
+      nv, /* number of network vertices */
+      ns, from, to,  /* segments */
+      seglen,  /* segment lengths */
+      huge, /* value taken as infinity */
+      tol, /* tolerance for updating distances */
+      /* OUTPUT */
+#ifdef WHICH
+      nndist,  /* nearest neighbour distance for each point */
+      nnwhich  /* identifies nearest neighbour */
+#else 
+      nndist  /* nearest neighbour distance for each point */
+#endif
+)
+  int *np, *nq, *nv, *ns;
+  int *from, *to, *sp, *sq; /* integer vectors (mappings) */
+  double *tp, *tq; /* fractional location coordinates */
+  double *huge, *tol;
+  double *seglen; 
+  double *nndist; /* nearest neighbour distance for each point */
+#ifdef WHICH
+  int *nnwhich; /* identifies nearest neighbour */
+#endif
+{
+  int Np, Nq, Nv, i, j, ivleft, ivright, jfirst, jlast, k;
+  double d, hugevalue, slen, tpi;
+  double *dminvert;  /* min dist from each vertex */
+#ifdef WHICH
+  int *whichvert;   /* which min from each vertex */
+#endif 
+
+  Np = *np;
+  Nq = *nq;
+  Nv = *nv;
+  hugevalue = *huge;
+
+  /* First compute min distance to target set from each vertex */
+  dminvert = (double *) R_alloc(Nv, sizeof(double));
+#ifdef WHICH
+  whichvert = (int *) R_alloc(Nv, sizeof(int));
+  Clinvwhichdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, 
+		 dminvert, whichvert);
+#else
+  Clinvdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, 
+	    dminvert);
+#endif
+
+#ifdef HUH
+  Rprintf("Initialise answer\n");
+#endif
+  /* initialise nn distances from source points */
+  for(i = 0; i < Np; i++) {
+    nndist[i] = hugevalue;
+#ifdef WHICH
+    nnwhich[i] = -1;
+#endif
+  }
+
+  /* run through all source points */
+#ifdef HUH
+  Rprintf("Run through source points\n");
+#endif
+  jfirst = 0;
+  for(i = 0; i < Np; i++) {
+    tpi = tp[i];
+    k = sp[i];   /* segment containing this point */
+    slen = seglen[k];
+    ivleft = from[k];
+    ivright = to[k];
+#ifdef HUH
+    Rprintf("Source point %d lies on segment %d = [%d,%d]\n", 
+	    i, k, ivleft, ivright);
+#endif
+    d = slen * tpi + dminvert[ivleft];
+    if(nndist[i] > d) {
+#ifdef HUH
+      Rprintf("\tMapping to left endpoint %d, distance %lf\n", ivleft, d);
+#endif
+      nndist[i] = d;
+#ifdef WHICH
+      nnwhich[i] = whichvert[ivleft];
+#endif
+    }
+    d = slen * (1.0 - tpi) + dminvert[ivright];
+    if(nndist[i] > d) {
+#ifdef HUH
+      Rprintf("\tMapping to right endpoint %d, distance %lf\n", ivright, d);
+#endif
+      nndist[i] = d;
+#ifdef WHICH
+      nnwhich[i] = whichvert[ivright];
+#endif
+    }
+    /* find any target points in this segment */
+    while(jfirst < Nq && sq[jfirst] < k) jfirst++;
+    jlast = jfirst;
+    while(jlast < Nq && sq[jlast] == k) jlast++;
+    --jlast;
+    /* if there are no such points, then jlast < jfirst */
+    if(jfirst <= jlast) {
+      for(j = jfirst; j <= jlast; j++) {
+	d = slen * fabs(tq[j] - tpi);
+	if(nndist[i] > d) {
+	  nndist[i] = d;
+#ifdef WHICH
+	  nnwhich[i] = j;
+#endif
+	}
+      }
+    }
+  }
+}
+
diff --git a/src/linalg.c b/src/linalg.c
new file mode 100755
index 0000000..193bbcf
--- /dev/null
+++ b/src/linalg.c
@@ -0,0 +1,263 @@
+/*
+   linalg.c
+
+   Home made linear algebra
+
+   Yes, really
+
+   $Revision: 1.11 $ $Date: 2016/09/30 10:57:20 $ 
+
+   Csumouter
+   Cwsumouter
+   Csum2outer
+   Cwsum2outer
+   Cquadform
+   Csumsymouter
+   Cwsumsymouter
+*/
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/* ............... matrices ..............................*/
+
+/* ........................sums of outer products ........*/
+
+/*
+    Csumouter
+    computes the sum of outer products of columns of x
+    y = sum[j] (x[,j] %o% x[,j])
+*/
+
+void Csumouter(x, n, p, y) 
+  double *x;    /* p by n matrix */
+  int *n, *p;
+  double *y;    /* output matrix p by p, initialised to zero */
+{
+  int N, P;
+  register int i, j, k, maxchunk;
+  register double xij, xkj;
+  register double *xcolj;
+  N = *n; 
+  P = *p;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      xcolj = x + j * P;
+      for(i = 0; i < P; i++) {
+	xij = xcolj[i];
+	for(k = 0; k < P; k++) {
+	  xkj = xcolj[k];
+	  y[k * P + i] += xij * xkj;
+	}
+      }
+    }
+  }
+}
+
+/*
+    Cwsumouter
+    computes the weighted sum of outer products of columns of x
+    y = sum[j] (w[j] * x[,j] %o% x[,j])
+*/
+
+void Cwsumouter(x, n, p, w, y) 
+  double *x;    /* p by n matrix */
+  int *n, *p;
+  double *w;    /* weight vector, length n */
+  double *y;    /* output matrix p by p, initialised to zero */
+{
+  int N, P;
+  register int i, j, k, maxchunk;
+  register double wj, xij, wjxij, xkj;
+  register double *xcolj;
+  N = *n; 
+  P = *p;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      wj = w[j];
+      xcolj = x + j * P;
+      for(i = 0; i < P; i++) {
+	xij = xcolj[i];
+	wjxij = wj * xij;
+	for(k = 0; k < P; k++) {
+	  xkj = xcolj[k];
+	  y[k * P + i] += wjxij * xkj;
+	}
+      }
+    }
+  }
+}
+
+/*
+    Csum2outer
+    computes the sum of outer products of columns of x and y
+    z = sum[j] (x[,j] %o% y[,j])
+*/
+
+void Csum2outer(x, y, n, px, py, z) 
+   double *x, *y;    /* matrices (px by n) and (py by n) */
+   int *n, *px, *py;
+   double *z;    /* output matrix px by py, initialised to zero */
+{
+  int N, Px, Py;
+  register int i, j, k, maxchunk;
+  register double xij, ykj;
+  register double *xcolj, *ycolj;
+  N = *n; 
+  Px = *px;
+  Py = *py;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      xcolj = x + j * Px;
+      ycolj = y + j * Py;
+      for(i = 0; i < Px; i++) {
+	xij = xcolj[i];
+	for(k = 0; k < Py; k++) {
+	  ykj = ycolj[k];
+	  y[k * Px + i] += xij * ykj;
+	}
+      }
+    }
+  }
+}
+
+/*
+    Cwsum2outer
+    computes the weighted sum of outer products of columns of x and y
+    z = sum[j] (w[j] * x[,j] %o% y[,j])
+*/
+
+void Cwsum2outer(x, y, n, px, py, w, z) 
+   double *x, *y;    /* matrices (px by n) and (py by n) */
+   int *n, *px, *py;
+   double *w;    /* weight vector, length n */
+   double *z;    /* output matrix px by py, initialised to zero */
+{
+  int N, Px, Py;
+  register int i, j, k, maxchunk;
+  register double wj, xij, wjxij, ykj;
+  register double *xcolj, *ycolj;
+  N = *n; 
+  Px = *px;
+  Py = *py;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      wj = w[j];
+      xcolj = x + j * Px;
+      ycolj = y + j * Py;
+      for(i = 0; i < Px; i++) {
+	xij = xcolj[i];
+	wjxij = wj * xij;
+	for(k = 0; k < Py; k++) {
+	  ykj = ycolj[k];
+	  z[k * Px + i] += wjxij * ykj;
+	}
+      }
+    }
+  }
+}
+
+/* ........................quadratic/bilinear forms ......*/
+
+/*
+    computes the quadratic form values
+    y[j] = x[,j] %*% v %*% t(x[,j])
+*/
+
+void Cquadform(x, n, p, v, y) 
+  double *x;    /* p by n matrix */
+  int *n, *p;
+  double *v;    /* p by p matrix */
+  double *y;    /* output vector, length n */
+{
+  int N, P;
+  register int i, j, k, maxchunk;
+  register double xij, xkj, vik, yj;
+  register double *xcolj;
+  N = *n; 
+  P = *p;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      xcolj = x + j * P;
+      yj = 0;
+      for(i = 0; i < P; i++) {
+	xij = xcolj[i];
+	for(k = 0; k < P; k++) {
+	  xkj = xcolj[k];
+	  vik = v[k * P + i];
+	  yj += xij * vik * xkj;
+	}
+      }
+      y[j] = yj;
+    }
+  }
+}
+
+/*
+    computes the bilinear form values
+    z[j] = x[,j] %*% v %*% t(y[,j])
+*/
+
+void Cbiform(x, y, n, p, v, z) 
+     double *x, *y;    /* p by n matrices */
+     int *n, *p;
+     double *v;    /* p by p matrix */
+     double *z;    /* output vector, length n */
+{
+  int N, P;
+  register int i, j, k, maxchunk;
+  register double xij, vik, ykj, zj;
+  register double *xcolj, *ycolj;
+  N = *n; 
+  P = *p;
+  OUTERCHUNKLOOP(j, N, maxchunk, 2048) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, N, maxchunk, 2048) {
+      xcolj = x + j * P;
+      ycolj = y + j * P;
+      zj = 0;
+      for(i = 0; i < P; i++) {
+	xij = xcolj[i];
+	for(k = 0; k < P; k++) {
+	  ykj = ycolj[k];
+	  vik = v[k * P + i];
+	  zj += xij * vik * ykj;
+	}
+      }
+      z[j] = zj;
+    }
+  }
+}
+
+/* ............... 3D arrays ...................... */
+
+#undef FNAME
+#undef WEIGHTED
+
+/*
+  sumsymouter
+  computes the sum of outer products 
+  x[,i,j] %o% x[,j,i]  over all pairs i, j
+*/
+
+#define FNAME Csumsymouter
+#include "sumsymouter.h"
+#undef FNAME
+
+/*
+  wsumsymouter
+  computes the weighted sum of outer products 
+  w[i,j] * (x[,i,j] %o% x[,j,i])  over all pairs i, j
+*/
+
+#define FNAME Cwsumsymouter
+#define WEIGHTED
+#include "sumsymouter.h"
+#undef FNAME
+#undef WEIGHTED
diff --git a/src/lincrossdist.c b/src/lincrossdist.c
new file mode 100644
index 0000000..977fe48
--- /dev/null
+++ b/src/lincrossdist.c
@@ -0,0 +1,85 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/* 
+   lincrossdist.c
+
+   Shortest-path distances between pairs of points in linear network
+
+   $Revision: 1.3 $  $Date: 2012/10/13 03:45:41 $
+
+   lincrossdist
+
+*/
+
+#define DPATH(I,J) dpath[(I) + Nv * (J)]
+#define ANSWER(I,J) answer[(I) + Np * (J)]
+#define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2))
+
+void 
+lincrossdist(np, xp, yp,   /* data points from which distances are measured */
+	     nq, xq, yq,   /* data points to which distances are measured */
+	     nv, xv, yv,   /* network vertices */
+	     ns, from, to,  /* segments */
+	     dpath,  /* shortest path distances between vertices */
+	     psegmap, /* map from data points to segments */
+	     qsegmap, /* map from data points to segments */
+	     /* OUTPUT */
+	     answer  /* shortest path distances between points */
+)
+  int *np, *nq, *nv, *ns;
+  int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */
+  double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */
+  double *dpath, *answer; /* matrices */
+{
+  int Np, Nq, Nv, i, j, maxchunk;
+  int Psegi, Qsegj, nbi1, nbi2, nbj1, nbj2; 
+  double xpi, ypi, xqj, yqj;
+  double d, dPiV1, dPiV2, dV1Qj, dV2Qj, d11, d12, d21, d22; 
+
+  Np = *np;
+  Nq = *nq;
+  Nv = *nv;
+
+  OUTERCHUNKLOOP(i, Np, maxchunk, 1024) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Np, maxchunk, 1024) {
+      xpi = xp[i];
+      ypi = yp[i];
+      Psegi = psegmap[i];
+      nbi1 = from[Psegi];
+      nbi2 = to[Psegi];
+      dPiV1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]);
+      dPiV2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]);
+      for(j = 0; j < Nq; j++) {
+	xqj = xq[j];
+	yqj = yq[j];
+	Qsegj = qsegmap[j];
+	if(Psegi == Qsegj) {
+	  /* points i and j lie on the same segment; use Euclidean distance */
+	  d = sqrt(pow(xpi - xqj, 2) + pow(ypi - yqj, 2));
+	} else {
+	  /* Shortest path from i to j passes through ends of segments;
+	     Calculate shortest of 4 possible paths from i to j
+	  */
+	  nbj1 = from[Qsegj];
+	  nbj2 = to[Qsegj];
+	  dV1Qj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj);
+	  dV2Qj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj);
+	  d11 = dPiV1 + DPATH(nbi1,nbj1) + dV1Qj;
+	  d12 = dPiV1 + DPATH(nbi1,nbj2) + dV2Qj;
+	  d21 = dPiV2 + DPATH(nbi2,nbj1) + dV1Qj;
+	  d22 = dPiV2 + DPATH(nbi2,nbj2) + dV2Qj;
+	  d = d11;
+	  if(d12 < d) d = d12;
+	  if(d21 < d) d = d21;
+	  if(d22 < d) d = d22;
+	}
+	/* write */
+	ANSWER(i,j) = d;
+      }
+    }
+  }
+}
+
diff --git a/src/lineardisc.c b/src/lineardisc.c
new file mode 100755
index 0000000..8679821
--- /dev/null
+++ b/src/lineardisc.c
@@ -0,0 +1,316 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/* 
+   lineardisc.c
+
+   Disc of radius r in linear network
+
+   $Revision: 1.12 $  $Date: 2016/07/15 13:56:07 $
+
+*/
+
+#define DPATH(I,J) dpath[(J) + Nv * (I)]
+
+#include "yesno.h"
+
+#undef DEBUG
+
+void 
+lineardisc(f, seg, /* centre of disc (local coords) */
+	   r,      /* radius of disc */
+	   nv, xv, yv,   /* network vertices */
+	   ns, from, to,  /* segments */
+	   dpath,  /* shortest path distances between vertices */
+	   lengths, /* segment lengths */
+	   allinside, boundary, dxv, nendpoints)
+     int *nv, *ns;
+     int *from, *to; /* integer vectors (mappings) */
+     double *f, *r; 
+     int *seg;
+     double *xv, *yv; /* vectors of coordinates of vertices */
+     double *dpath; /* matrix of shortest path distances between vertices */
+     double *lengths; /* vector of segment lengths */
+     /* OUTPUTS */
+     int *allinside, *boundary; /* vectors of status for each segment */
+     double *dxv; /* vector of distances for each vertex */
+     int *nendpoints;
+{
+  int Nv, Ns;
+  double f0, rad;
+  int seg0;
+
+  int i, A, B, fromi, toi, allin, bdry, reachable, nends, maxchunk;
+  double length0, dxA, dxB, dxAvi, dxBvi, residue;
+  double *resid; 
+  int *covered;
+
+  Nv = *nv;
+  Ns = *ns;
+
+  f0 = *f;
+  seg0 = *seg;
+  rad = *r;
+
+  /* endpoints of segment containing centre */
+  A = from[seg0];
+  B = to[seg0];
+
+  /* distances from x to  A and B */
+  length0 = lengths[seg0];
+  dxA = f0 * length0;
+  dxB = (1-f0) * length0;
+
+  /* visit vertices */
+  covered = (int *) R_alloc((size_t) Nv, sizeof(int));
+  resid = (double *) R_alloc((size_t) Nv, sizeof(double));
+
+  OUTERCHUNKLOOP(i, Nv, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Nv, maxchunk, 16384) {
+      /* distance going through A */
+      dxAvi = dxA + DPATH(A,i);
+      /* distance going through B */
+      dxBvi = dxB + DPATH(B,i);
+      /* shortest path distance to this vertex */
+      dxv[i] = (dxAvi < dxBvi) ? dxAvi : dxBvi;
+      /* distance left to 'spend' from this vertex */
+      residue = rad - dxv[i];
+      resid[i] = (residue > 0)? residue : 0;
+      /* determine whether vertex i is inside the disc of radius r */
+      covered[i] = (residue >= 0);
+    }
+  }
+  /* 
+     Now visit line segments. 
+  */
+  nends = 0;
+
+  OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Ns, maxchunk, 16384) {
+      /* 
+	 Determine which line segments are completely inside the disc,
+	 and which cross the boundary.
+      */
+      if(i == seg0) {
+	/* initial segment: disc starts from centre (x, y) */
+	allin = covered[A] && covered[B];
+	bdry  = !allin;
+	if(bdry) {
+	  if(!covered[A]) nends++;
+	  if(!covered[B]) nends++;
+	}
+      } else {
+	/* another segment: disc extends in from either endpoint */
+	fromi = from[i];
+	toi   = to[i];
+	reachable = (covered[fromi] || covered[toi]);
+	if(reachable) {
+	  allin = covered[fromi] && covered[toi] && 
+                     (resid[fromi] + resid[toi] >= lengths[i]);
+	  bdry = !allin;
+	} else allin = bdry = NO;
+	if(bdry) {
+	  if(covered[fromi]) nends++;
+	  if(covered[toi]) nends++;
+	}
+      }
+      allinside[i] = allin;
+      boundary[i] = bdry;
+    }
+  }
+  *nendpoints = nends;
+}
+
+/* ------------------------------------------------- */
+/*   count endpoints of several discs in a network   */
+/* ------------------------------------------------- */
+
+void 
+Ccountends(np, f, seg, /* centres of discs (local coords) */
+	  r,                /* radii of discs */
+	  nv, xv, yv,   /* network vertices */
+	  ns, from, to,  /* network segments */
+	  dpath,  /* shortest path distances between vertices */
+	  lengths, /* segment lengths */
+	  toler, /* tolerance */
+	  nendpoints /* output counts of endpoints */
+	  )
+     int *np, *nv, *ns;
+     int *from, *to; /* integer vectors (mappings) */
+     double *f, *r; 
+     int *seg;
+     double *xv, *yv; /* vectors of coordinates of vertices */
+     double *dpath; /* matrix of shortest path distances between vertices */
+     double *lengths; /* vector of segment lengths */
+     double *toler; /* tolerance for merging endpoints and vertices */
+     /* OUTPUT */
+     int *nendpoints;
+{
+  int Np, Nv, Ns;
+  double f0, rad;
+  int seg0;
+
+  int i, m, A, B, fromi, toi, reachable, nends, maxchunk, covfrom, covto, allin;
+  double length0, dxA, dxB, dxAvi, dxBvi, dxvi, residue, resfrom, resto, tol;
+  double *resid; 
+  int *covered, *terminal;
+
+  Np = *np;
+  Nv = *nv;
+  Ns = *ns;
+  tol = *toler;
+
+#ifdef DEBUG
+  Rprintf("\nTolerance = %lf\n", tol);
+#endif
+
+  covered = (int *) R_alloc((size_t) Nv, sizeof(int));
+  terminal = (int *) R_alloc((size_t) Nv, sizeof(int));
+  resid = (double *) R_alloc((size_t) Nv, sizeof(double));
+
+  /* loop over centre points */
+  OUTERCHUNKLOOP(m, Np, maxchunk, 256) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(m, Np, maxchunk, 256) {
+
+      f0 = f[m];
+      seg0 = seg[m];
+      rad = r[m];
+
+#ifdef DEBUG
+      Rprintf("\nCentre point %d lies in segment %d\n", m, seg0);
+#endif
+
+      /* endpoints of segment containing centre */
+      A = from[seg0];
+      B = to[seg0];
+
+      /* distances from centre to A and B */
+      length0 = lengths[seg0];
+      dxA = f0 * length0;
+      dxB = (1-f0) * length0;
+
+#ifdef DEBUG
+      Rprintf("Distances to endpoints: dxA=%lf, dxB=%lf\n", dxA, dxB);
+#endif
+
+      nends = 0;
+
+      /* visit vertices */
+      for(i = 0; i < Nv; i++) {
+#ifdef DEBUG
+	Rprintf("\nConsidering vertex %d\n", i);
+#endif
+	/* distance going through A */
+	dxAvi = dxA + DPATH(A,i);
+	/* distance going through B */
+	dxBvi = dxB + DPATH(B,i);
+	/* shortest path distance to this vertex */
+	dxvi = (dxAvi < dxBvi) ? dxAvi : dxBvi;
+	/* distance left to 'spend' from this vertex */
+	residue = rad - dxvi;
+#ifdef DEBUG
+	Rprintf("dxAvi = %lf; dxBvi = %lf; residue = %lf\n", 
+		dxAvi, dxBvi, residue);
+#endif
+	if(residue > tol) {
+	  resid[i] = residue;
+	  covered[i] = YES;
+	  terminal[i] = NO;
+#ifdef DEBUG
+	  Rprintf("Vertex is covered\n");
+#endif
+	} else if(residue < -tol) {
+	  resid[i] = 0;
+	  covered[i] = terminal[i] = NO;
+#ifdef DEBUG
+	  Rprintf("Vertex is not covered\n");
+#endif
+	} else {
+	  /* vertex is within 'tol' of an endpoint 
+	   - deem it to be one 
+	  */
+	  resid[i] = 0;
+	  covered[i] = terminal[i] = YES;
+	  /* vertex is an endpoint of disc */
+	  ++nends;  
+#ifdef DEBUG
+	  Rprintf("Vertex is a terminal endpoint\n");
+#endif
+	}
+      }
+
+#ifdef DEBUG
+      Rprintf("%d terminal endpoints\n", nends);
+#endif
+
+      /* 
+	 Now visit line segments 
+	 to count any endpoints that are interior to the segments.
+      */
+
+      for(i = 0; i < Ns; i++) {
+	/* 
+	   Determine which line segments are completely inside the disc,
+	   and which cross the boundary.
+	*/
+	if(i == seg0) {
+	  /* initial segment: disc starts from (x0, y0) */
+	  if(!covered[A]) nends++;
+	  if(!covered[B]) nends++;
+#ifdef DEBUG
+	  if(!covered[A]) Rprintf("A not covered\n");
+	  if(!covered[B]) Rprintf("B not covered\n");
+#endif
+	} else {
+	  /* another segment: disc extends in from either endpoint */
+	  fromi = from[i];
+	  toi   = to[i];
+	  covfrom = covered[fromi];
+	  covto   = covered[toi];
+	  resfrom = resid[fromi];
+	  resto   = resid[toi];
+	  reachable = covfrom || covto;
+#ifdef DEBUG
+	  residue = resfrom + resto - lengths[i];
+	  Rprintf("%d: %s %s: %lf + %lf - %lf = %lf sign %s\n", 
+		  i,
+		  (terminal[fromi]) ? "T" : ((covfrom) ? "Y" : "N"),
+		  (terminal[toi]) ? "T" : ((covto) ? "Y" : "N"),
+		  resfrom, resto, lengths[i], residue,
+		  (residue < 0) ? "-" : ((residue > 0) ? "+" : "0"));
+#endif
+	  if(reachable) {
+	    residue = resfrom + resto - lengths[i];
+	    allin = covfrom && covto && (residue >= 0);
+#ifdef DEBUG
+	    if(allin) {
+	      Rprintf("Covered\n"); 
+	    } else if((terminal[fromi] || terminal[toi]) &&
+		      (residue >= - tol * lengths[i])) {
+		Rprintf("Deemed to be covered\n"); 
+	    } else Rprintf("Reachable\n");
+#endif
+	    allin = allin || 
+	      ((terminal[fromi] || terminal[toi]) &&
+	       (residue >= - tol));
+	    if(!allin) {
+	      /* segment is not entirely covered by disc
+		 - infer endpoint(s) in interior of segment */
+	      if(covfrom && !terminal[fromi]) nends++;
+	      if(covto && !terminal[toi]) nends++;
+#ifdef DEBUG
+	      if(covfrom && !terminal[fromi]) Rprintf("fromi => end\n");
+	      if(covto && !terminal[toi]) Rprintf("toi => end\n");
+#endif
+	    }
+	  }
+	}
+      }
+      nendpoints[m] = nends;
+    }
+  }
+}
diff --git a/src/linearradius.c b/src/linearradius.c
new file mode 100644
index 0000000..e4892bb
--- /dev/null
+++ b/src/linearradius.c
@@ -0,0 +1,79 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/* 
+   linearradius.c
+
+   Bounding radius in linear network
+
+   $Revision: 1.1 $  $Date: 2016/07/19 06:52:57 $
+
+*/
+
+#define DPATH(I,J) dpath[(J) + Nv * (I)]
+
+#include "yesno.h"
+
+#undef DEBUG
+
+void 
+linearradius(ns, from, to,  /* network segments */
+	     lengths, /* segment lengths */
+	     nv, dpath,  /* shortest path distances between vertices */
+	     huge, 
+	     result)
+     int *nv, *ns;
+     int *from, *to; /* integer vectors (mappings) */
+     double *dpath; /* matrix of shortest path distances between vertices */
+     double *lengths; /* vector of segment lengths */
+     double *huge; /* very large value */
+     double *result; 
+{
+  int Nv, Ns;
+  int i, j, A, B, C, D;
+  double AB, AC, AD, BC, BD, CD;
+  double sAij, sBij, sAiMax, sBiMax, smin;
+  int maxchunk;
+
+  Nv = *nv;
+  Ns = *ns;
+  smin = *huge;
+
+  OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Ns, maxchunk, 16384) {
+      /* indices of endpoints of segment i */
+      A = from[i];
+      B = to[i];
+      AB = lengths[i];
+      sAiMax = sBiMax = AB/2.0;
+      for(j = 0; j < Ns; j++) {
+	if(j != i) {
+	  /* indices of endpoints of segment i */
+	  C = from[j];
+	  D = to[j];
+	  CD = lengths[j];
+	  AC = DPATH(A,C);
+	  AD = DPATH(A,D);
+	  BC = DPATH(B,C);
+	  BD = DPATH(B,D);
+	  /* max dist from A to any point in segment j */
+	  sAij = (AD > AC + CD) ? AC + CD :
+ 	          (AC > AD + CD) ? AD + CD : (AC + AD + CD)/2.0;
+	  /* max dist from B to any point in segment j */
+	  sBij = (BD > BC + CD) ? BC + CD : 
+  	          (BC > BD + CD) ? BD + CD : (BC + BD + CD)/2.0;
+	  /* row-wise maximum */
+	  if(sAij > sAiMax) sAiMax = sAij;
+	  if(sBij > sBiMax) sBiMax = sBij;
+	}
+      }
+      if(sAiMax < smin) smin = sAiMax;
+      if(sBiMax < smin) smin = sBiMax;
+    }
+  }
+
+  *result = smin;
+}
+
diff --git a/src/linequad.c b/src/linequad.c
new file mode 100644
index 0000000..67ee582
--- /dev/null
+++ b/src/linequad.c
@@ -0,0 +1,35 @@
+#include <R.h>
+#include <math.h>
+#include "yesno.h"
+
+/* 
+   linequad.c
+
+   make a quadrature scheme on a linear network
+
+   Clinequad    unmarked pattern
+   ClineMquad   multitype pattern
+
+   $Revision: 1.5 $  $Date: 2016/10/03 08:43:57 $ 
+
+ */
+
+#define SWAP(X,Y,TMP) TMP = Y; Y = X; X = TMP
+
+#undef HUH
+
+#define FUNNAME Clinequad
+#define FMKNAME ClineMquad
+#undef ALEA
+#include "linequad.h"
+#undef FUNNAME
+#undef FMKNAME
+
+#define FUNNAME ClineRquad
+#define FMKNAME ClineRMquad
+#define ALEA
+#include "linequad.h"
+#undef FUNNAME
+#undef FMKNAME
+#undef ALEA
+
diff --git a/src/linequad.h b/src/linequad.h
new file mode 100644
index 0000000..043c5f9
--- /dev/null
+++ b/src/linequad.h
@@ -0,0 +1,553 @@
+/* 
+
+  linequad.h
+
+  Template code, #included several times in linequad.c
+
+  Macros used:
+  FUNNAME    function name (unmarked version)
+  FMKNAME    function name (marked version)
+  ALEA       #defined if grid location should be randomised 
+  HUH        #defined if debugging is on
+  SWAP       swap macro
+
+  $Revision: 1.2 $ $Date: 2016/10/04 06:24:22 $
+
+*/
+
+void FUNNAME(ns, from, to, 
+	     nv, xv, yv, 
+	     eps,
+	     ndat,             sdat, tdat, wdat,
+	     ndum, xdum, ydum, sdum, tdum, wdum,
+	     maxscratch)
+     /* 
+	A linear network with *ns segments and *nv vertices
+	is specified by the vectors from, to, xv, yv.
+
+	Data points on the network are specified by *ndat, sdat, tdat.
+	*** Assumed to be sorted in order of 'sdat' **
+
+	Dummy points will be placed every 'eps' units along each segment.
+
+	Output vectors:
+	     wdat   quadrature weights for the data points
+	     wdum   quadrature weights for the dummy points
+
+	     xdum, |
+	     ydum, | coordinates of dummy points
+	     sdum, |
+	     tdum  |
+	    
+	 Space must be allocated for sum(ceiling(lengths/eps)) dummy points. 
+	 
+        
+      */
+     int *ns; /* number of segments */
+     int *from, *to; /* endpoints of each segment */
+     int *nv; /* number of vertices */
+     double *xv, *yv; /* cartesian coords of vertices */
+     double *eps; /* desired spacing of dummy points */
+     int *ndat, *ndum; /* number of data & dummy points */
+     int *sdat, *sdum; /* segment id (local coordinate) */
+     double *tdat, *tdum; /* location (local coordinate) */
+     double *wdat, *wdum; /* quadrature weights */
+     double *xdum, *ydum; /* spatial coordinates of dummy points */
+     int *maxscratch;
+{
+  int Nseg, Ndat, Ndum, Lmax, i, j, k, ll, m, fromi, toi;
+#ifdef HUH
+  int Nvert;
+#endif
+  int SegmentForData, nwhole, nentries, npieces, npieces1;
+  double x0, y0, x1, y1, dx, dy;
+  double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart;
+  double tfirst, tlast, tcurrent, plen, w;
+
+  int *serial, *count, *pieceid;
+  char *isdata;
+  double *tvalue, *pieceweight;
+
+  Nseg  = *ns;
+  Ndat  = *ndat;
+
+  Ndum = 0;
+  Lmax = *maxscratch;
+
+  epsilon = *eps;
+
+#ifdef HUH
+  Nvert = *nv;
+  Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax);
+#endif
+
+  /* allocate scratch space, one for each data/dummy point in current segment */
+  serial = (int *) R_alloc(Lmax, sizeof(int));
+  isdata = (char *) R_alloc(Lmax, sizeof(char));
+  tvalue = (double *) R_alloc(Lmax, sizeof(double));
+  pieceid = (int *) R_alloc(Lmax, sizeof(int));
+
+  /* allocate scratch space, one for each piece of current segment */
+  count = (int *) R_alloc(Lmax, sizeof(int));
+  pieceweight = (double *) R_alloc(Lmax, sizeof(double));
+
+  /* 
+     initialise pointer at start of point pattern
+     Determine which segment contains first point
+  */
+  k = 0;
+  SegmentForData = (Ndat > 0) ? sdat[0] : -1;
+
+#ifdef ALEA
+  GetRNGstate();
+#endif
+
+  /* loop over line segments */
+  for(i = 0; i < Nseg; i++) {
+
+#ifdef HUH
+    Rprintf("Segment %d\n", i);
+#endif
+
+    /* endpoints of segment */
+    fromi = from[i];
+    toi   = to[i];
+
+    x0 = xv[fromi];
+    y0 = yv[fromi];
+    x1 = xv[toi];
+    y1 = yv[toi];
+
+    dx = x1 - x0;
+    dy = y1 - y0;
+    seglength = sqrt(dx * dx + dy * dy);
+
+    /* divide segment into pieces of length eps 
+       with shorter bits at each end */
+    ratio = seglength/epsilon;
+    nwhole = (int) floor(ratio);
+    if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole;
+    rump = (seglength - nwhole * epsilon)/2.0;
+    epsfrac = epsilon/seglength;
+    rumpfrac = rump/seglength;
+    /* 
+       There are nwhole+2 pieces, with endpoints
+       0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1
+    */
+
+    /* Now place dummy points in these pieces */
+#ifdef ALEA
+    tfirst = rumpfrac * unif_rand();
+#else
+    tfirst = rumpfrac/2.0;
+#endif
+#ifdef HUH
+    Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", 
+	    nwhole, epsfrac, rumpfrac, tfirst);
+    Rprintf("\tsegment length %lf divided into %d pieces\n",
+	    seglength, nwhole+2);
+#endif
+
+    /* create a new dummy point in each piece */
+#ifdef HUH
+    Rprintf("\tMaking left dummy point %d\n", Ndum);
+#endif
+    tvalue[0] = tfirst;
+    serial[0] = Ndum;  
+    isdata[0] = NO;
+    count[0] = 1;
+    pieceid[0] = 0;
+    xdum[Ndum] = x0 + dx * tfirst;
+    ydum[Ndum] = y0 + dy * tfirst;
+    sdum[Ndum] = i;
+    tdum[Ndum] = tfirst;
+    ++Ndum;
+    if(nwhole > 0) {
+#ifdef HUH
+      Rprintf("\tMaking %d middle dummy points\n", nwhole);
+#endif
+#ifdef ALEA
+      gridstart = rumpfrac - unif_rand() * epsfrac;
+#else
+      gridstart = rumpfrac - epsfrac/2.0;
+#endif
+      for(j = 1; j <= nwhole; j++) {
+	serial[j] = Ndum;
+	tvalue[j] = tcurrent = gridstart + ((double) j) * epsfrac;
+	isdata[j] = NO;
+	count[j] = 1;
+	pieceid[j] = j;
+	xdum[Ndum] = x0 + dx * tcurrent;
+	ydum[Ndum] = y0 + dy * tcurrent;
+	sdum[Ndum] = i;
+	tdum[Ndum] = tcurrent;
+	++Ndum;
+      }
+    } 
+    j = nwhole + 1;
+#ifdef HUH
+    Rprintf("\tMaking right dummy point %d\n", Ndum);
+#endif
+    serial[j] = Ndum;
+    isdata[j] = NO;
+    tvalue[j] = tlast = 1.0 - tfirst;
+    count[j] = 1;
+    pieceid[j] = j;
+    xdum[Ndum] = x0 + dx * tlast;
+    ydum[Ndum] = y0 + dy * tlast;
+    sdum[Ndum] = i;
+    tdum[Ndum] = tlast;
+    ++Ndum;
+
+    nentries = npieces = nwhole + 2;
+    npieces1 = npieces-1;
+
+    /* add any data points lying on current segment i */
+    while(SegmentForData == i) {
+#ifdef HUH
+      Rprintf("\tData point %d lies on segment %d\n", k, i);
+#endif
+      serial[nentries] = k;
+      tvalue[nentries] = tcurrent = tdat[k];
+      isdata[nentries] = YES;
+      /* determine which piece contains the data point */
+      ll = (int) ceil((tcurrent - rumpfrac)/epsfrac);
+      if(ll < 0) ll = 0; else if(ll >= npieces) ll = npieces1;
+#ifdef HUH
+      Rprintf("\tData point %d mapped to piece %d\n", k, ll);
+#endif
+      count[ll]++;
+      pieceid[nentries] = ll;
+      ++nentries;
+      ++k;
+      SegmentForData = (k < Ndat) ? sdat[k] : -1;
+    }
+
+    /* compute counting weights for each piece of segment */
+#ifdef HUH
+    Rprintf("\tcounting weights..\n");
+#endif
+    for(ll = 0; ll < npieces; ll++) {
+      plen = (ll == 0 || ll == npieces1)? rump : epsilon;
+      pieceweight[ll] = plen/count[ll];
+    }
+    
+    /* apply weights to data/dummy points */
+#ifdef HUH
+    Rprintf("\tdistributing weights..\n");
+#endif
+    for(j = 0; j < nentries; j++) {
+      m = serial[j];
+      ll = pieceid[j];
+      if(ll >= 0 && ll < npieces) {
+	w = pieceweight[ll];
+	if(isdata[j]) {
+#ifdef HUH
+	  Rprintf("\t\tEntry %d: data point %d, piece %d\n", j, m, ll);
+#endif
+	  wdat[m] = w;
+	} else {
+#ifdef HUH
+	  Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", j, m, ll);
+#endif
+	  wdum[m] = w;
+	}
+      }
+    }
+  }
+
+  *ndum = Ndum;
+
+#ifdef ALEA
+  PutRNGstate();
+#endif
+
+}
+
+void FMKNAME(ns, from, to, 
+	     nv, xv, yv, 
+	     eps,
+	     ntypes, 
+	     ndat, xdat, ydat, mdat, sdat, tdat, wdat,
+	     ndum, xdum, ydum, mdum, sdum, tdum, wdum,
+	     maxscratch)
+     /* 
+	A linear network with *ns segments and *nv vertices
+	is specified by the vectors from, to, xv, yv.
+
+	Data points on the network are specified by 
+	*ndat, xdat, ydat, mdat, sdat, tdat.
+	*** Assumed to be sorted in order of 'sdat' **
+
+	Dummy points will be placed every 'eps' units along each segment
+	and replicated for each possible mark.
+	Each data point location is also replicated by dummy points
+	with each possible mark except the mark of the data point.
+
+	Output vectors:
+	     wdat   quadrature weights for the data points
+	     wdum   quadrature weights for the dummy points
+
+	     xdum, |
+	     ydum, | coordinates of dummy points
+	     sdum, |
+	     tdum  |
+	    
+             mdum    marks for dummy points
+
+	 Space must be allocated for 
+	 ntypes * sum(ceiling(lengths/eps)) dummy points. 
+	 
+        
+      */
+     int *ns; /* number of segments */
+     int *from, *to; /* endpoints of each segment */
+     int *nv; /* number of vertices */
+     double *xv, *yv; /* cartesian coords of vertices */
+     double *eps; /* desired spacing of dummy points */
+     int *ndat, *ndum; /* number of data & dummy points */
+     int *ntypes; /* number of types */
+     double *xdat, *ydat; /* spatial coordinates of data points */
+     double *xdum, *ydum; /* spatial coordinates of dummy points */
+     int *mdat, *mdum; /* mark values */
+     int *sdat, *sdum; /* segment id (local coordinate) */
+     double *tdat, *tdum; /* location (local coordinate) */
+     double *wdat, *wdum; /* quadrature weights */
+     int *maxscratch;
+{
+  int Nseg, Ndat, Ndum, Ntypes, Lmax, i, k, ll, m, fromi, toi;
+#ifdef HUH
+  int Nvert;
+#endif
+  int SegmentForData, nwhole, nentries, npieces, npieces1, nMpieces;
+  int jpiece, jentry, jpdata, type, mcurrent;
+  double x0, y0, x1, y1, dx, dy, xcurrent, ycurrent;
+  double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart;
+  double tfirst, tlast, tcurrent, plen, w;
+
+  int *serial, *count, *mkpieceid;
+  char *isdata;
+  double *tvalue, *countingweight;
+
+  Nseg  = *ns;
+  Ndat  = *ndat;
+  Ntypes = *ntypes;
+
+  Ndum = 0;
+  Lmax = *maxscratch;
+
+  epsilon = *eps;
+
+#ifdef HUH
+  Nvert = *nv;
+  Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax);
+#endif
+
+  /* allocate scratch space, one for each data/dummy point in current segment */
+  serial = (int *) R_alloc(Lmax, sizeof(int));
+  isdata = (char *) R_alloc(Lmax, sizeof(char));
+  tvalue = (double *) R_alloc(Lmax, sizeof(double));
+  mkpieceid = (int *) R_alloc(Lmax, sizeof(int));
+
+  /* allocate scratch space, one for each piece of current segment */
+  count = (int *) R_alloc(Lmax, sizeof(int));
+  countingweight = (double *) R_alloc(Lmax, sizeof(double));
+
+  /* 
+     initialise pointer at start of point pattern
+     Determine which segment contains first point
+  */
+  k = 0;
+  SegmentForData = (Ndat > 0) ? sdat[0] : -1;
+
+#ifdef ALEA
+  GetRNGstate();
+#endif
+
+  /* loop over line segments */
+  for(i = 0; i < Nseg; i++) {
+
+#ifdef HUH
+    Rprintf("Segment %d\n", i);
+#endif
+
+    /* endpoints of segment */
+    fromi = from[i];
+    toi   = to[i];
+
+    x0 = xv[fromi];
+    y0 = yv[fromi];
+    x1 = xv[toi];
+    y1 = yv[toi];
+
+    dx = x1 - x0;
+    dy = y1 - y0;
+    seglength = sqrt(dx * dx + dy * dy);
+
+    /* divide segment into pieces of length eps 
+       with shorter bits at each end */
+    ratio = seglength/epsilon;
+    nwhole = (int) floor(ratio);
+    if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole;
+    npieces = nwhole + 2;
+    rump = (seglength - nwhole * epsilon)/2.0;
+    epsfrac = epsilon/seglength;
+    rumpfrac = rump/seglength;
+    /* 
+       There are nwhole+2 pieces, with endpoints
+       0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1
+    */
+
+    /* Now place dummy points in these pieces */
+#ifdef ALEA
+    tfirst = rumpfrac * unif_rand();
+    gridstart = rumpfrac - epsfrac * unif_rand();
+#else
+    tfirst = rumpfrac/2.0;
+    gridstart = rumpfrac - epsfrac/2.0;
+#endif
+    tlast = 1.0 - tfirst;
+#ifdef HUH
+    Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", 
+	    nwhole, epsfrac, rumpfrac, tfirst);
+    Rprintf("\tsegment length %lf divided into %d pieces\n",
+	    seglength, npieces);
+#endif
+
+    /* 
+       'Marked pieces' of segment are numbered in order
+       (piece 0, mark 0), (piece 0, mark 1), ..., (piece 0, mark Ntypes-1),
+       (piece 1, mark 0), .....
+       
+       mpieceid = type + pieceid * Ntypes
+
+    */
+
+
+#ifdef HUH
+      Rprintf("\tMaking %d x %d = %d dummy points\n", 
+	      npieces, Ntypes, npieces * Ntypes);
+#endif
+
+    /* create a new dummy point in each piece */
+    npieces1 = npieces-1;
+    for(jpiece = 0; jpiece < npieces; jpiece++) {
+      tcurrent = (jpiece == 0) ? tfirst :
+	(jpiece == npieces1) ? tlast : 
+	(gridstart + ((double) jpiece) * epsfrac);
+      xcurrent = x0 + dx * tcurrent;
+      ycurrent = y0 + dy * tcurrent;
+      
+      for(type = 0; type < Ntypes; type++) {
+	/* position in list of relevant data/dummy points */
+	jentry = type + jpiece * Ntypes; 
+	/* serial number of marked piece */
+	ll = jentry; 
+
+	tvalue[jentry] = tcurrent;
+	serial[jentry] = Ndum;  
+	isdata[jentry] = NO;
+	mkpieceid[jentry] = ll;
+
+	count[ll] = 1;
+
+	xdum[Ndum] = xcurrent;
+	ydum[Ndum] = ycurrent;
+	mdum[Ndum] = type;
+	sdum[Ndum] = i;
+	tdum[Ndum] = tcurrent;
+	++Ndum;
+      }
+    }
+    nentries = npieces * Ntypes;
+
+    /* handle any data points lying on current segment i */
+    while(SegmentForData == i) {
+#ifdef HUH
+      Rprintf("\tData point %d lies on segment %d\n", k, i);
+#endif
+      xcurrent = xdat[k];
+      ycurrent = ydat[k];
+      tcurrent = tdat[k];
+      mcurrent = mdat[k];
+      /* determine which piece contains the data point */
+      jpdata = (int) ceil((tcurrent - rumpfrac)/epsfrac);
+      if(jpdata < 0) jpdata = 0; else if(jpdata >= npieces) jpdata = npieces1;
+#ifdef HUH
+      Rprintf("\tData point %d falls in piece %d\n", k, jpdata);
+#endif
+      /* 
+	 copy data point, 
+	 and create dummy points at same location with different marks  
+      */
+      for(type = 0; type < Ntypes; type++) {
+	tvalue[nentries] = tcurrent;
+	ll = type + jpdata * Ntypes;
+	mkpieceid[nentries] = ll; 
+	count[ll]++;
+	if(type == mcurrent) {
+	  /* data point */
+	  isdata[nentries] = YES;
+	  serial[nentries] = k;
+	} else {
+	  /* create dummy point */
+	  isdata[nentries] = NO;
+	  serial[nentries] = Ndum;
+	  xdum[Ndum] = xcurrent;
+	  ydum[Ndum] = ycurrent;
+	  mdum[Ndum] = type;
+	  sdum[Ndum] = i;
+	  tdum[Ndum] = tcurrent;
+	  ++Ndum;
+	}
+	++nentries;
+      }
+
+      ++k;
+      SegmentForData = (k < Ndat) ? sdat[k] : -1;
+    }
+
+    /* compute counting weights for each piece of segment */
+#ifdef HUH
+    Rprintf("\tcounting weights..\n");
+#endif
+    for(jpiece = 0; jpiece < npieces; jpiece++) {
+      plen = (jpiece == 0 || jpiece == npieces1)? rump : epsilon;
+      for(type = 0; type < Ntypes; type++) {
+	ll = type + jpiece * Ntypes;
+	countingweight[ll] = plen/count[ll];
+      }
+    }
+    
+    /* apply weights to data/dummy points */
+#ifdef HUH
+    Rprintf("\tdistributing weights..\n");
+#endif
+    nMpieces = npieces * Ntypes;
+    for(jentry = 0; jentry < nentries; jentry++) {
+      m = serial[jentry];
+      ll = mkpieceid[jentry];
+      if(ll >= 0 && ll < nMpieces) {
+	w = countingweight[ll];
+	if(isdata[jentry]) {
+#ifdef HUH
+	  Rprintf("\t\tEntry %d: data point %d, piece %d\n", jentry, m, ll);
+#endif
+	  wdat[m] = w;
+	} else {
+#ifdef HUH
+	  Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", jentry, m, ll);
+#endif
+	  wdum[m] = w;
+	}
+      }
+    }
+  }
+
+  *ndum = Ndum;
+
+#ifdef ALEA
+  PutRNGstate();
+#endif
+
+}
diff --git a/src/linknnd.c b/src/linknnd.c
new file mode 100644
index 0000000..6ff9f65
--- /dev/null
+++ b/src/linknnd.c
@@ -0,0 +1,31 @@
+#include <R.h>
+#include "yesno.h"
+
+/*
+
+  linknnd.c
+
+  k-th nearest neighbours in a linear network
+
+  Sparse representation of network
+
+  ! Data points must be ordered by segment index !
+
+
+  $Revision: 1.3 $  $Date: 2016/12/04 11:08:58 $
+
+ */
+
+#undef HUH
+
+#undef CROSS
+#define FNAME linknnd
+#include "linknnd.h"
+#undef FNAME
+
+#define CROSS
+#define FNAME linknncross
+#include "linknnd.h"
+#undef CROSS
+#undef FNAME
+
diff --git a/src/linknnd.h b/src/linknnd.h
new file mode 100644
index 0000000..fe00958
--- /dev/null
+++ b/src/linknnd.h
@@ -0,0 +1,165 @@
+/*
+
+  linknnd.h
+
+  k-th nearest neighbours in a linear network
+
+  Using sparse representation of network
+  ! Data points must be ordered by segment index !
+
+  This code is #included several times in linknnd.c
+
+  Macros required:
+  FNAME   Function name
+  CROSS   #defined for X-to-Y, undefined for X-to-X
+  HUH     debugging flag
+
+  $Revision: 1.2 $  $Date: 2016/12/04 12:34:19 $
+
+ */
+
+#define MAT(MATRIXNAME, INDEX, ORDER) MATRIXNAME[(ORDER) + (INDEX) * Kmax]
+#define NNDIST(INDEX, ORDER) MAT(nndist, (INDEX), (ORDER))
+#define NNWHICH(INDEX, ORDER) MAT(nnwhich, (INDEX), (ORDER))
+#define VDIST(INDEX, ORDER) MAT(dminvert, (INDEX), (ORDER))
+#define VWHICH(INDEX, ORDER) MAT(whichvert, (INDEX), (ORDER))
+
+#define UPDATENN(INDEX, D, J)	\
+  UpdateKnnList(D, J, \
+		nndist + (INDEX) * Kmax, \
+		nnwhich + (INDEX) * Kmax, \
+		Kmax, \
+		(double) 0.0)
+
+/* ................. */
+
+void FNAME(kmax,         /* number of neighbours required */
+	   np, sp, tp,   /* source data points (ordered by sp) */
+#ifdef CROSS
+	   nq, sq, tq,   /* target data points (ordered by sq) */
+#endif
+	   nv,           /* number of network vertices */
+	   ns, from, to, /* segments (pairs of vertices) */
+	   seglen,       /* segment lengths */
+	   huge,         /* value taken as infinity */
+	   tol,          /* tolerance for updating distances */
+	   /* OUTPUT */
+	   nndist,         /* distance from each source point to
+			      the nearest, ..., kth nearest target points */
+	   nnwhich         /* identifies which target points */
+	   )
+  int *kmax;
+  int *np, *nv, *ns;  /* number of points, vertices, segments */
+  int *sp, *from, *to; /* integer vectors (mappings) */
+  double *tp; /* fractional location coordinates */
+#ifdef CROSS
+  int *nq, *sq;
+  double *tq;
+#endif
+  double *huge, *tol;
+  double *seglen;
+  double *nndist;
+  int *nnwhich;
+{
+  int Np, Nv, Kmax, Nout, i, j, ivleft, ivright, jfirst, jlast, k, m;
+  double d, hugevalue, slen, tpi, deltad;
+  double *dminvert;  /* min dist from each vertex */
+  int *whichvert;   /* which min from each vertex */
+  int linvknndist(), UpdateKnnList();
+
+#ifdef CROSS
+  int Nq;
+#else 
+#define Nq Np
+#define nq np
+#define sq sp
+#define tq tp
+#endif
+
+  Kmax = *kmax;
+  Np = *np;
+  Nv = *nv;
+  hugevalue = *huge;
+
+#ifdef CROSS
+  Nq = *nq;
+#endif
+
+  /* First compute min distances to target set from each vertex */
+#ifdef HUH
+  Rprintf("Computing distances from each vertex\n");
+#endif
+
+  dminvert = (double *) R_alloc(Nv * Kmax, sizeof(double));
+  whichvert = (int *) R_alloc(Nv * Kmax, sizeof(int));
+
+  linvknndist(kmax, nq, sq, tq, nv, ns, from, to, seglen, huge, tol, 
+	     dminvert, whichvert);
+
+#ifdef HUH
+  Rprintf("Initialise answer\n");
+#endif
+  /* initialise nn distances from source points */
+  Nout = Np * Kmax;
+  for(i = 0; i < Nout; i++) {
+    nndist[i] = hugevalue;
+    nnwhich[i] = -1;
+  }
+
+  /* run through all source points */
+#ifdef HUH
+  Rprintf("Run through source points\n");
+#endif
+  jfirst = 0;
+  for(i = 0; i < Np; i++) {
+    tpi = tp[i];
+    m = sp[i];   /* segment containing this point */
+    slen = seglen[m];
+    ivleft = from[m];
+    ivright = to[m];
+#ifdef HUH
+    Rprintf("Source point %d lies on segment %d = [%d,%d]\n", 
+	    i, m, ivleft, ivright);
+#endif
+    deltad = slen * tpi;
+#ifdef HUH
+    Rprintf("\tComparing to left endpoint %d, distance %lf\n", ivleft, deltad);
+#endif
+    for(k = 0; k < Kmax; k++)
+      UPDATENN(i, deltad + VDIST(ivleft, k), VWHICH(ivleft, k));
+
+    deltad = slen * (1.0 - tpi);
+#ifdef HUH
+   Rprintf("\tComparing to right endpoint %d, distance %lf\n", ivright, deltad);
+#endif
+    for(k = 0; k < Kmax; k++)
+      UPDATENN(i, deltad + VDIST(ivright, k), VWHICH(ivright, k));
+
+    /* find any target points in this segment */
+    while(jfirst < Nq && sq[jfirst] < m) jfirst++;
+    jlast = jfirst;
+    while(jlast < Nq && sq[jlast] == m) jlast++;
+    --jlast;
+    /* if there are no such points, then jlast < jfirst */
+    if(jfirst <= jlast) {
+      for(j = jfirst; j <= jlast; j++) {
+	d = slen * fabs(tq[j] - tpi);
+	UPDATENN(i, d, j);
+      }
+    }
+  }
+}
+
+#undef MAT
+#undef NNDIST
+#undef NNWHICH
+#undef VDIST
+#undef VWHICH
+#undef UPDATENN
+
+#ifndef CROSS
+#undef nq
+#undef Nq
+#undef sq
+#undef tq
+#endif
diff --git a/src/linnncross.c b/src/linnncross.c
new file mode 100644
index 0000000..6296a4b
--- /dev/null
+++ b/src/linnncross.c
@@ -0,0 +1,37 @@
+#include <R.h>
+
+/* 
+   linnncross.c
+
+   Shortest-path distances between nearest neighbours in linear network
+   One pattern to another pattern
+
+   $Revision: 1.1 $  $Date: 2013/10/21 02:01:29 $
+
+   linndcross      
+   linndxcross     
+
+*/
+
+#define DPATH(I,J) dpath[(I) + Nv * (J)]
+#define ANSWER(I,J) answer[(I) + Np * (J)]
+#define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2))
+
+/* definition of linndcross */
+#define FNAME linndcross
+#undef  EXCLU
+#define WHICH
+
+#include "linnncross.h"
+
+#undef  FNAME
+#undef  EXCLU
+#undef  WHICH
+
+/* definition of linndxcross */
+
+#define FNAME linndxcross
+#define EXCLU
+#define WHICH
+
+#include "linnncross.h"
diff --git a/src/linnncross.h b/src/linnncross.h
new file mode 100644
index 0000000..236107a
--- /dev/null
+++ b/src/linnncross.h
@@ -0,0 +1,136 @@
+/* 
+   linnncross.h
+
+   Function body definitions with macros
+
+   $Revision: 1.2 $  $Date: 2015/11/28 02:02:50 $
+
+   Macros used:
+   FNAME   name of function
+   EXCLU   whether serial numbers are provided
+   WHICH   whether 'nnwhich' is required
+
+*/
+
+void 
+FNAME(np, xp, yp,   /* data points 'from' */
+      nq, xq, yq,   /* data points 'to' */
+      nv, xv, yv,   /* network vertices */
+      ns, from, to,  /* segments */
+      dpath,  /* shortest path distances between vertices */
+      psegmap, /* map from data points to segments */
+      qsegmap, /* map from data points to segments */
+#ifdef EXCLU
+      idP, idQ, /* serial numbers for patterns p and q */
+#endif
+      huge, /* value taken as infinity */
+      /* OUTPUT */
+#ifdef WHICH
+      nndist,  /* nearest neighbour distance for each point */
+      nnwhich  /* identifies nearest neighbour */
+#else 
+      nndist  /* nearest neighbour distance for each point */
+#endif
+)
+  int *np, *nq, *nv, *ns;
+int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */
+#ifdef EXCLU
+  int *idP, *idQ;
+#endif
+  double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */
+  double *huge;
+  double *dpath; /* matrix */
+  double *nndist; /* nearest neighbour distance for each point */
+#ifdef WHICH
+  int *nnwhich; /* identifies nearest neighbour */
+#endif
+{
+  int Np, Nq, Nv, i, j;
+  int segPi, segQj, nbi1, nbi2, nbj1, nbj2; 
+  double d, xpi, ypi, xqj, yqj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; 
+  double dmin, hugevalue;
+#ifdef EXCLU
+  int idPi;
+#endif
+#ifdef WHICH
+  int whichmin;
+#endif 
+
+  Np = *np;
+  Nq = *nq;
+  Nv = *nv;
+  hugevalue = *huge;
+
+  /* initialise nn distances */
+  for(i = 0; i < Np; i++) {
+    nndist[i] = hugevalue;
+#ifdef WHICH
+    nnwhich[i] = -1;
+#endif
+  }
+
+  /* main loop */
+  for(i = 0; i < Np; i++) {
+    xpi = xp[i];
+    ypi = yp[i];
+#ifdef EXCLU
+    idPi = idP[i];
+#endif
+    segPi = psegmap[i];
+    nbi1 = from[segPi];
+    nbi2 = to[segPi];
+    dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]);
+    dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]);
+    dmin = nndist[i];
+#ifdef WHICH
+    whichmin = nnwhich[i];
+#endif
+    for(j = 0; j < Nq; j++) {
+#ifdef EXCLU
+      if(idQ[j] != idPi) {
+#endif
+      xqj = xq[j];
+      yqj = yq[j];
+      segQj = qsegmap[j];
+      /* compute path distance between i and j */
+      if(segPi == segQj) {
+	/* points i and j lie on the same segment; use Euclidean distance */
+	d = EUCLID(xpi, ypi, xqj, yqj);
+      } else {
+	/* Shortest path from i to j passes through ends of segments;
+	   Calculate shortest of 4 possible paths from i to j
+	*/
+	nbj1 = from[segQj];
+	nbj2 = to[segQj];
+	d1Xj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj);
+	d2Xj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj);
+	d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj;
+	d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj;
+	d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj;
+	d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj;
+	d = d11;
+	if(d12 < d) d = d12;
+	if(d21 < d) d = d21;
+	if(d22 < d) d = d22;
+      }
+      /* OK, distance between i and j is d */
+
+      /* update nn for point i */
+      if(d < dmin) {
+	dmin = d;
+#ifdef WHICH
+	whichmin = j;
+#endif
+      }
+#ifdef EXCLU
+    }
+#endif
+    }
+    /* commit nn distance for point i */
+    nndist[i] = dmin;
+#ifdef WHICH
+    nnwhich[i] = whichmin;
+#endif
+  }
+}
+
diff --git a/src/linnndist.c b/src/linnndist.c
new file mode 100755
index 0000000..020969a
--- /dev/null
+++ b/src/linnndist.c
@@ -0,0 +1,186 @@
+#include <R.h>
+
+/* 
+   linnndist.c
+
+   Shortest-path distances between nearest neighbours in linear network
+
+   $Revision: 1.1 $  $Date: 2013/10/21 02:01:14 $
+
+   linnndist
+   linnnwhich
+
+*/
+
+#define DPATH(I,J) dpath[(J) + Nv * (I)]
+#define ANSWER(I,J) answer[(J) + Np * (I)]
+#define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2))
+
+void 
+linnndist(np, xp, yp,   /* data points */
+	  nv, xv, yv,   /* network vertices */
+	  ns, from, to,  /* segments */
+	  dpath,  /* shortest path distances between vertices */
+	  segmap, /* map from data points to segments */
+	  huge, /* value taken as infinity */
+	  /* OUTPUT */
+	  answer  /* nearest neighbour distance for each point */
+)
+  int *np, *nv, *ns;
+  int *from, *to, *segmap; /* integer vectors (mappings) */
+  double *xp, *yp, *xv, *yv; /* vectors of coordinates */
+  double *huge;
+  double *dpath; /* matrix */
+  double *answer; /* vector of output values */
+{
+  int Np, Nv, i, j, Np1;
+  int segi, segj, nbi1, nbi2, nbj1, nbj2; 
+  double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; 
+  double dmin, hugevalue;
+
+  Np = *np;
+  Nv = *nv;
+  Np1 = Np - 1;
+  hugevalue = *huge;
+
+  /* initialise nn distances */
+  for(i = 0; i < Np; i++)
+    answer[i] = hugevalue;
+
+  /* main loop */
+  for(i = 0; i < Np1; i++) {
+    xpi = xp[i];
+    ypi = yp[i];
+    segi = segmap[i];
+    nbi1 = from[segi];
+    nbi2 = to[segi];
+    dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]);
+    dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]);
+    dmin = answer[i];
+    for(j = i+1; j < Np; j++) {
+      xpj = xp[j];
+      ypj = yp[j];
+      segj = segmap[j];
+      /* compute path distance between i and j */
+      if(segi == segj) {
+	/* points i and j lie on the same segment; use Euclidean distance */
+	d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2));
+      } else {
+	/* Shortest path from i to j passes through ends of segments;
+	   Calculate shortest of 4 possible paths from i to j
+	*/
+	nbj1 = from[segj];
+	nbj2 = to[segj];
+	d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj);
+	d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj);
+	d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj;
+	d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj;
+	d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj;
+	d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj;
+	d = d11;
+	if(d12 < d) d = d12;
+	if(d21 < d) d = d21;
+	if(d22 < d) d = d22;
+      }
+      /* OK, distance between i and j is d */
+
+      /* update nn distance for point i */
+      if(d < dmin) dmin = d;
+      /* update nn distance for point j */
+      if(d < answer[j]) answer[j] = d;
+    }
+    /* commit nn distance for point i */
+    answer[i] = dmin;
+  }
+}
+
+
+void 
+linnnwhich(np, xp, yp,   /* data points */
+	   nv, xv, yv,   /* network vertices */
+	   ns, from, to,  /* segments */
+	   dpath,  /* shortest path distances between vertices */
+	   segmap, /* map from data points to segments */
+	   huge, /* value taken as infinity */
+	   /* OUTPUT */
+	   nndist,  /* nearest neighbour distance for each point */
+	   nnwhich  /* identifies nearest neighbour */
+)
+  int *np, *nv, *ns;
+  int *from, *to, *segmap; /* integer vectors (mappings) */
+  double *xp, *yp, *xv, *yv; /* vectors of coordinates */
+  double *huge;
+  double *dpath; /* matrix */
+  double *nndist; /* vector of output values */
+  int *nnwhich; /* vector of output values */
+{
+  int Np, Nv, i, j, Np1;
+  int segi, segj, nbi1, nbi2, nbj1, nbj2; 
+  double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; 
+  double dmin, hugevalue;
+  int whichmin;
+
+  Np = *np;
+  Nv = *nv;
+  Np1 = Np - 1;
+  hugevalue = *huge;
+
+  /* initialise nn distances and identifiers */
+  for(i = 0; i < Np; i++) {
+    nndist[i] = hugevalue;
+    nnwhich[i] = -1;
+  }
+
+  /* main loop */
+  for(i = 0; i < Np1; i++) {
+    xpi = xp[i];
+    ypi = yp[i];
+    segi = segmap[i];
+    nbi1 = from[segi];
+    nbi2 = to[segi];
+    dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]);
+    dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]);
+    dmin = nndist[i];
+    whichmin = nnwhich[i];
+    for(j = i+1; j < Np; j++) {
+      xpj = xp[j];
+      ypj = yp[j];
+      segj = segmap[j];
+      if(segi == segj) {
+        /* points i and j lie on the same segment; use Euclidean distance */
+        d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2));
+      } else {
+        /* Shortest path from i to j passes through ends of segments;
+	   Calculate shortest of 4 possible paths from i to j
+	*/
+        nbj1 = from[segj];
+        nbj2 = to[segj];
+	d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj);
+	d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj);
+        d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj;
+	d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj;
+	d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj;
+	d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj;
+	d = d11;
+	if(d12 < d) d = d12;
+	if(d21 < d) d = d21;
+	if(d22 < d) d = d22;
+      }
+      /* OK, distance between i and j is d */
+
+      /* update nn for point i */
+      if(d < dmin) {
+	dmin = d;
+	whichmin = j;
+      }
+      /* update nn for point j */
+      if(d < nndist[j]) {
+	nndist[j] = d;
+	nnwhich[j] = i;
+      }
+    }
+    /* commit nn for point i */
+    nndist[i] = dmin;
+    nnwhich[i] = whichmin;
+  }
+}
diff --git a/src/linpairdist.c b/src/linpairdist.c
new file mode 100755
index 0000000..659d13e
--- /dev/null
+++ b/src/linpairdist.c
@@ -0,0 +1,83 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/* 
+   linpairdist.c
+
+   Shortest-path distances between each pair of points in linear network
+
+   $Revision: 1.5 $  $Date: 2012/10/12 10:21:46 $
+
+   linpairdist
+
+*/
+
+#define DPATH(I,J) dpath[(I) + Nv * (J)]
+#define ANSWER(I,J) answer[(I) + Np * (J)]
+#define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2))
+
+void 
+linpairdist(np, xp, yp,   /* data points */
+	    nv, xv, yv,   /* network vertices */
+	    ns, from, to,  /* segments */
+	    dpath,  /* shortest path distances between vertices */
+	    segmap, /* map from data points to segments */
+	    /* OUTPUT */
+	    answer  /* shortest path distances between points */
+)
+  int *np, *nv, *ns;
+  int *from, *to, *segmap; /* integer vectors (mappings) */
+  double *xp, *yp, *xv, *yv; /* vectors of coordinates */
+  double *dpath, *answer; /* matrices */
+{
+  int Np, Nv, i, j, Np1, maxchunk;
+  int segi, segj, nbi1, nbi2, nbj1, nbj2; 
+  double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; 
+
+  Np = *np;
+  Nv = *nv;
+  Np1 = Np - 1;
+
+  OUTERCHUNKLOOP(i, Np1, maxchunk, 1024) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Np1, maxchunk, 1024) {
+      xpi = xp[i];
+      ypi = yp[i];
+      segi = segmap[i];
+      nbi1 = from[segi];
+      nbi2 = to[segi];
+      dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]);
+      dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]);
+      for(j = i+1; j < Np; j++) {
+	xpj = xp[j];
+	ypj = yp[j];
+	segj = segmap[j];
+	if(segi == segj) {
+	  /* points i and j lie on the same segment; use Euclidean distance */
+	  d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2));
+	} else {
+	  /* Shortest path from i to j passes through ends of segments;
+	     Calculate shortest of 4 possible paths from i to j
+	  */
+	  nbj1 = from[segj];
+	  nbj2 = to[segj];
+	  d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj);
+	  d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj);
+	  d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj;
+	  d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj;
+	  d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj;
+	  d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj;
+	  d = d11;
+	  if(d12 < d) d = d12;
+	  if(d21 < d) d = d21;
+	  if(d22 < d) d = d22;
+	}
+	/* write */
+	ANSWER(i,j) = ANSWER(j,i) = d;
+      }
+      ANSWER(i,i) = 0;
+    }
+  }
+}
+
diff --git a/src/linvdist.c b/src/linvdist.c
new file mode 100644
index 0000000..5d101cb
--- /dev/null
+++ b/src/linvdist.c
@@ -0,0 +1,30 @@
+#include <R.h>
+#include "yesno.h"
+
+/*
+
+  linvdist.c
+
+  Distance function at vertices
+  (shortest distance from each vertex to a data point)
+
+  Sparse representation of network
+
+  $Revision: 1.1 $  $Date: 2015/12/05 06:07:16 $
+
+  ! Data points must be ordered by segment index !
+
+ */
+
+#undef HUH
+
+/* definition of Clinvdist */
+#define FNAME Clinvdist
+#undef WHICH
+#include "linvdist.h"
+
+/* definition of Clinvwhichdist */
+#undef FNAME
+#define FNAME Clinvwhichdist
+#define WHICH
+#include "linvdist.h"
diff --git a/src/linvdist.h b/src/linvdist.h
new file mode 100644
index 0000000..ab60a23
--- /dev/null
+++ b/src/linvdist.h
@@ -0,0 +1,150 @@
+/*
+
+  linvdist.h
+
+  Distance function at vertices
+  (shortest distance from each vertex to a data point)
+
+  Function body definitions with macros
+
+  Sparse representation of network
+
+  $Revision: 1.3 $  $Date: 2015/12/05 07:26:56 $
+
+  Macros used:
+  FNAME   name of function
+  WHICH   whether 'nnwhich' is required
+  HUH     debugging flag
+
+  ! Data points must be ordered by segment index !
+
+*/
+
+void FNAME(np, sp, tp,   /* target data points (ordered by sp) */
+	   nv,           /* number of network vertices */
+	   ns, from, to, /* segments */
+	   seglen,       /* segment lengths */
+	   huge,         /* value taken as infinity */
+	   tol,          /* tolerance for updating distances */
+	   /* OUTPUT */
+#ifdef WHICH
+	   dist,         /* distance from each vertex to nearest data point */
+	   which         /* identifies nearest data point */
+#else 
+	   dist          /* distance from each vertex to nearest data point */
+#endif	   
+) 
+  int *np, *nv, *ns;  /* number of points, vertices, segments */
+  int *sp, *from, *to; /* integer vectors (mappings) */
+  double *tp; /* fractional location coordinates */
+  double *huge, *tol;
+  double *seglen;
+  double *dist;
+#ifdef WHICH
+  int *which;
+#endif
+{
+  int Np, Nv, Ns, i, j, k, segPj, ivleft, ivright;
+  double hugevalue, eps, dleft, dright, slen, d, tpj;
+  char converged;
+
+  Np = *np;
+  Nv = *nv;
+  Ns = *ns;
+  hugevalue = *huge;
+  eps = *tol;
+
+#ifdef HUH
+  Rprintf("Initialise dist\n");
+#endif
+  /* initialise to huge value */
+  for(i = 0; i < Nv; i++) {
+    dist[i] = hugevalue;
+#ifdef WHICH
+    which[i] = -1;
+#endif
+  }
+
+#ifdef HUH
+  Rprintf("Run through target points\n");
+#endif
+  /* assign correct value to endpoints of segments containing target points */
+  for(j = 0; j < Np; j++) {
+    segPj = sp[j];
+    tpj = tp[j];
+    slen = seglen[segPj];
+    ivleft = from[segPj];
+    d = slen * tpj;
+    if(d < dist[ivleft]) {
+      dist[ivleft] = d;
+#ifdef WHICH
+      which[ivleft] = j;
+#endif
+    }
+    ivright = to[segPj];
+    d = slen * (1.0 - tpj);
+    if(d < dist[ivright]) {
+      dist[ivright] = d;
+#ifdef WHICH
+      which[ivright] = j;
+#endif
+    }
+  }
+
+  /* recursively update */
+#ifdef HUH
+  Rprintf("Recursive update\n");
+#endif
+  converged = NO;
+  while(!converged) {
+    converged = YES;
+#ifdef HUH
+    Rprintf("........... starting new pass ...................... \n");
+#endif
+    for(k = 0; k < Ns; k++) {
+      ivleft = from[k];
+      ivright = to[k];
+      slen = seglen[k];
+      dleft = (double) dist[ivleft];
+      dright = (double) dist[ivright];
+      d = (double) (dleft + slen);
+      if(d < dright - eps) {
+#ifdef HUH
+	Rprintf("Updating ivright=%d using ivleft=%d, from %lf to %lf+%lf=%lf\n",
+		ivright, ivleft, dright, dleft, slen, d);
+#endif
+	converged = NO;
+	dist[ivright] = d;
+#ifdef WHICH
+	which[ivright] = which[ivleft];
+#endif
+      } else {
+	d = (double) (dright + slen);
+	if(d < dleft - eps) {
+#ifdef HUH
+	Rprintf("Updating ivleft=%d using ivright=%d, from %lf to %lf+%lf=%lf\n",
+		ivleft, ivright, dleft, dright, slen, d);
+#endif
+	  converged = NO;
+	  dist[ivleft] = d;
+#ifdef WHICH
+	  which[ivleft] = which[ivright];
+#endif
+	}
+      }
+    }
+  }
+
+#ifdef HUH
+  Rprintf("Done\nVertex values:\n");
+#ifdef WHICH
+  Rprintf("\ti\twhich\tdist\n");
+  for(i = 0; i < Nv; i++) 
+    Rprintf("\t%d\t%d\t%lf\n", i, which[i], dist[i]);
+#else
+  Rprintf("\ti\tdist\n");
+  for(i = 0; i < Nv; i++) 
+    Rprintf("\t%d\t%lf\n", i, dist[i]);
+#endif
+#endif
+}
diff --git a/src/linvknndist.c b/src/linvknndist.c
new file mode 100644
index 0000000..0cb7ac6
--- /dev/null
+++ b/src/linvknndist.c
@@ -0,0 +1,241 @@
+#include <R.h>
+#include "yesno.h"
+
+/*
+
+  linvknndist.c
+
+  k-th nearest neighbour function at vertices
+  (distance from each vertex to the 
+  nearest, second nearest, ...  k-th nearest target data point)
+
+  Needs only the sparse representation of the network
+
+  $Revision: 1.3 $  $Date: 2016/02/02 01:53:51 $
+
+  ! Data points must be ordered by segment index !
+
+ */
+
+#undef HUH
+
+#define DIST(VERTEX, ORDER) dist[(ORDER) + (VERTEX) * Kmax]
+#define WHICH(VERTEX, ORDER) which[(ORDER) + (VERTEX) * Kmax]
+
+#define UPDATE(VERTEX, D, J, EPS)	\
+  UpdateKnnList(D, J, \
+		dist + (VERTEX) * Kmax, \
+		which + (VERTEX) * Kmax, \
+		Kmax, \
+		EPS)
+
+
+void linvknndist(kmax,         /* number of neighbours required */
+		 nq, sq, tq,   /* target data points (ordered by sq) */
+		 nv,           /* number of network vertices */
+		 ns, from, to, /* segments (pairs of vertices) */
+		 seglen,       /* segment lengths */
+		 huge,         /* value taken as infinity */
+		 tol,          /* tolerance for updating distances */
+		 /* OUTPUT */
+		 dist,         /* distance from each vertex to
+				  the nearest, ..., kth nearest data points */
+		 which         /* identifies which data points */
+) 
+  int *kmax;
+  int *nq, *nv, *ns;  /* number of points, vertices, segments */
+  int *sq, *from, *to; /* integer vectors (mappings) */
+  double *tq; /* fractional location coordinates */
+  double *huge, *tol;
+  double *seglen;
+  double *dist;
+  int *which;
+{
+  int Nq, Nv, Ns, Kmax, Nout, i, j, k, m;
+  int segQj, ivleft, ivright, changed;
+  double hugevalue, eps, slen, d, tqj;
+  char converged;
+  int UpdateKnnList();
+
+  Kmax = *kmax;
+  Nq = *nq;
+  Nv = *nv;
+  Ns = *ns;
+  hugevalue = *huge;
+  eps = *tol;
+
+  /* number of values in 'dist' and in 'which' */
+  Nout = Nv * Kmax;
+
+#ifdef HUH
+  Rprintf("Initialise dist\n");
+#endif
+  /* initialise to huge value */
+  for(i = 0; i < Nout; i++) {
+    dist[i] = hugevalue;
+    which[i] = -1;
+  }
+
+
+#ifdef HUH
+  Rprintf("Run through target points\n");
+#endif
+  /* assign value to endpoints of segments containing target points */
+  for(j = 0; j < Nq; j++) {
+    segQj = sq[j];
+    tqj = tq[j];
+    slen = seglen[segQj];
+    ivleft = from[segQj];
+    d = slen * tqj;
+    UPDATE(ivleft, d, j, (double) 0.0);
+    ivright = to[segQj];
+    d = slen * (1.0 - tqj);
+    UPDATE(ivright, d, j, (double) 0.0);
+  }
+
+#ifdef HUH
+  Rprintf("Initialised values at vertices:\n");
+  Rprintf("\ti\twhich\tdist\n");
+  for(i = 0; i < Nv; i++) {
+    Rprintf("\t%d", i);
+    for(k = 0; k < Kmax; k++) 
+      Rprintf(" %d ", WHICH(i, k));
+    for(k = 0; k < Kmax; k++) 
+      Rprintf(" %lf ", DIST(i, k));
+    Rprintf("\n");
+  }
+#endif
+
+  /* recursively update */
+#ifdef HUH
+  Rprintf("Recursive update\n");
+#endif
+  converged = NO;
+  while(!converged) {
+    converged = YES;
+#ifdef HUH
+    Rprintf("........... starting new pass ...................... \n");
+    Rprintf("Current state:\n");
+    Rprintf("\ti\twhich\tdist\n");
+    for(i = 0; i < Nv; i++) {
+      Rprintf("\t%d", i);
+      for(k = 0; k < Kmax; k++) 
+	Rprintf(" %d ", WHICH(i, k));
+      for(k = 0; k < Kmax; k++) 
+	Rprintf(" %lf ", DIST(i, k));
+      Rprintf("\n");
+    }
+#endif
+    for(m = 0; m < Ns; m++) {
+      ivleft = from[m];
+      ivright = to[m];
+      slen = seglen[m];
+
+#ifdef HUH
+      Rprintf("updating right=%d from left=%d\n", ivright, ivleft);
+#endif
+      for(k = 0; k < Kmax; k++) {
+	changed = UPDATE(ivright, DIST(ivleft, k)+slen, WHICH(ivleft, k), eps);
+	converged = converged && !changed;
+      }
+
+#ifdef HUH
+      Rprintf("updating left=%d from right=%d\n", ivleft, ivright);
+#endif
+      for(k = 0; k < Kmax; k++) {
+	changed = UPDATE(ivleft, DIST(ivright, k)+slen, WHICH(ivright, k), eps);
+	converged = converged && !changed;
+      }
+    }
+  }
+
+#ifdef HUH
+  Rprintf("Done\nVertex values:\n");
+  Rprintf("\ti\twhich\tdist\n");
+  for(i = 0; i < Nv; i++) {
+    Rprintf("\t%d", i);
+    for(k = 0; k < Kmax; k++) 
+      Rprintf(" %d ", WHICH(i, k));
+    for(k = 0; k < Kmax; k++) 
+      Rprintf(" %lf ", DIST(i, k));
+    Rprintf("\n");
+  }
+#endif
+}
+
+
+/* update a list of nearest, second nearest, ..., k-th nearest neighbours */
+
+int UpdateKnnList(d, j, dist, which, Kmax, eps)
+     double d;  /* candidate distance */
+     int j;     /* corresponding candidate target point */
+     int Kmax;  
+     double *dist;  /* pointer to start of vector of length Kmax */
+     int *which;   /* pointer to start of vector of length Kmax */
+     double eps;   /* numerical tolerance, to prevent infinite loops */
+{
+  char matched, unsorted, changed;
+  int k, Klast, itmp;
+  double dtmp, dPlusEps;
+
+  Klast = Kmax - 1;
+
+  dPlusEps = d + eps;
+  if(dPlusEps > dist[Klast])
+    return(NO);
+
+  changed = NO;
+
+  /* Check whether this data point is already listed as a neighbour */
+  matched = NO;
+  for(k = 0; k < Kmax; k++) {
+    if(which[k] == j) {
+      matched = YES;
+#ifdef HUH
+      Rprintf("\tMatch: which[%d] = %d\n", k, j);
+#endif
+      if(dPlusEps <= dist[k]) {
+	changed = YES;
+#ifdef HUH
+	Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[k], d);
+#endif
+	dist[k] = d;
+      }
+      break;
+    }
+  }
+  if(!matched) {
+#ifdef HUH
+    Rprintf("\tNo match with current list\n");
+    Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[Klast], d);
+#endif
+    /* replace furthest point */
+    changed = YES;
+    dist[Klast] = d;
+    which[Klast] = j;
+  }
+  /* Bubble sort entries */
+  if(changed) {
+#ifdef HUH
+    Rprintf("Bubble sort.\nCurrent state:\n\tk\twhich\tdist\n");
+    for(k = 0; k <= Klast; k++) 
+      Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]);
+#endif
+    do {
+      unsorted = NO;
+      for(k = 0; k < Klast; k++) {
+	if(dist[k] > dist[k+1]) {
+	  unsorted = YES;
+	  dtmp = dist[k];   dist[k] = dist[k+1];   dist[k+1] = dtmp;
+	  itmp = which[k]; which[k] = which[k+1]; which[k+1] = itmp;
+	}
+      }
+    } while(unsorted);
+  }
+#ifdef HUH
+    Rprintf("Return state:\n\tk\twhich\tdist\n");
+    for(k = 0; k <= Klast; k++) 
+      Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]);
+#endif
+  return( (int) changed);
+}
diff --git a/src/lixel.c b/src/lixel.c
new file mode 100644
index 0000000..01e1e8f
--- /dev/null
+++ b/src/lixel.c
@@ -0,0 +1,134 @@
+#include <R.h>
+#include <math.h>
+
+/* 
+   lixel.c
+
+   divide a linear network into shorter segments
+
+ */
+
+void Clixellate(ns, fromcoarse, tocoarse, 
+		fromfine, tofine, 
+		nv, xv, yv, svcoarse, tvcoarse, 
+		nsplit, 
+		np, spcoarse, tpcoarse, 
+		spfine, tpfine)
+     /* 
+	A linear network with *ns segments and *nv vertices
+	is specified by the vectors from, to, xv, yv.
+
+	The i-th segment will be subdivided into nsplit[i] subsegments.
+
+	New data will be added at the end of the vectors 'xv' and 'yv' 
+	representing additional vertices in the new network.
+
+	The point pattern data (*np points with local coordinates sp, tp
+	in the coarse network) will be mapped to the new 'fine' network. 
+	Points are sorted by 'spcoarse' value.
+	
+	'xv', 'yv', 'svcoarse', 'tvcoarse'
+        must each have space for (nv + sum(nsplit-1)) entries.
+
+	'fromfine', 'tofine' must have length = sum(nsplit).
+
+      */
+     int *ns; /* number of segments (input & output) */
+     int *fromcoarse, *tocoarse; /* endpoints of each segment (input) */
+     int *fromfine, *tofine;  /* endpoints of each segment (output) */
+     int *nv; /* number of vertices (input & output) */
+     double *xv, *yv; /* cartesian coords of vertices (input & output) */
+     int *svcoarse; /* segment id of new vertex in COARSE network */
+     double *tvcoarse; /* location coordinate of new vertex on COARSE network */
+     int *nsplit; /* number of pieces into which each segment should be split */
+     int *np; /* number of data points */
+     double *tpcoarse, *tpfine; /* location coordinate */
+     int *spcoarse, *spfine; /* segment id coordinate */
+{
+  int Np, oldNs, oldNv, i, j, k, m, ll;
+  int oldfromi, oldtoi, newlines, newNv, newNs, SegmentForData;
+  double xstart, xend, ystart, yend, xincr, yincr, tn;
+
+  Np = *np;
+  newNv = oldNv = *nv;
+  oldNs = *ns;
+  newNs = 0;
+
+  /* 
+     initialise pointer at start of point pattern
+     Determine which segment contains first point
+  */
+  k = 0;
+  SegmentForData = (Np > 0) ? spcoarse[0] : -1;
+  
+  /* loop over line segments in original network */
+  for(i = 0; i < oldNs; i++) {
+
+    newlines = nsplit[i]; 
+
+    oldfromi = fromcoarse[i];
+    oldtoi   = tocoarse[i];
+    
+    /* local coordinates of endpoints of segment, in ***coarse*** network */
+    svcoarse[oldfromi] = svcoarse[oldtoi] = i;
+    tvcoarse[oldfromi] = 0.0;
+    tvcoarse[oldtoi] = 1.0;
+    
+    if(newlines == 1) {
+      /* copy existing segment to new segment list */
+      fromfine[newNs] = oldfromi;
+      tofine[newNs]   = oldtoi;
+      /* advance pointer */
+      ++newNs;
+    } else if(newlines > 1) {
+      /* split segment into 'newlines' pieces */
+      xstart = xv[oldfromi];
+      ystart = yv[oldfromi];
+
+      xend = xv[oldtoi];
+      yend = yv[oldtoi];
+    
+      xincr = (xend-xstart)/newlines;
+      yincr = (yend-ystart)/newlines;
+
+      m = newlines - 1;
+
+      for(j = 1; j <= m; j++) {
+	/* create new vertex, number 'newNv' */
+	xv[newNv] = xstart + j * xincr;
+	yv[newNv] = ystart + j * yincr;
+	/* local coordinates of new vertex relative to ***coarse*** network */
+	svcoarse[newNv] = i;
+	tvcoarse[newNv] = ((double) j)/((double) newlines);
+	/* create new segment, number 'newNs', ending at new vertex */
+	fromfine[newNs] = (j == 1) ? oldfromi : (newNv-1);
+	tofine[newNs]   = newNv;
+	/* advance */
+	++newNv;
+	++newNs;
+      }
+      /* create segment from last added vertex to end of old segment */
+      fromfine[newNs] = newNv-1;
+      tofine[newNs] = oldtoi;
+      ++newNs;
+    }
+
+    /* handle data points lying on current segment i */
+    while(SegmentForData == i) {
+      if(newlines == 1) {
+	spfine[k] = spcoarse[k];
+	tpfine[k] = tpcoarse[k];
+      } else {
+	tn = tpcoarse[k] * newlines;
+	ll = (int) floor(tn); 
+	ll = (ll < 0) ? 0 : (ll > newlines) ? newlines: ll;
+	tpfine[k] = tn - ll;
+	spfine[k] = newNs - newlines + ll;
+      }
+      ++k;
+      SegmentForData = (k < Np) ? spcoarse[k] : -1;
+    }
+  }
+  *nv = newNv;
+  *ns = newNs;
+}
diff --git a/src/localpcf.c b/src/localpcf.c
new file mode 100755
index 0000000..c0164bd
--- /dev/null
+++ b/src/localpcf.c
@@ -0,0 +1,23 @@
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+
+/*
+
+  localpcf.c
+
+  $Revision: 1.3 $     $Date: 2013/05/27 02:09:10 $
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+*/
+
+#undef WEIGHTED
+
+#include "localpcf.h"
+
+#define WEIGHTED 1
+
+#include "localpcf.h"
diff --git a/src/localpcf.h b/src/localpcf.h
new file mode 100755
index 0000000..efcf4c8
--- /dev/null
+++ b/src/localpcf.h
@@ -0,0 +1,115 @@
+/*
+  
+  localpcf.h
+
+  Source template for versions of local pair correlation
+
+  Requires variable: WEIGHTED
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+  $Revision: 1.5 $  $Date: 2012/03/27 04:50:04 $
+
+*/
+
+#ifdef WEIGHTED
+#define FNAME locWpcfx
+#else
+#define FNAME locpcfx
+#endif
+
+void FNAME(nn1, x1, y1, id1, 
+	   nn2, x2, y2, id2, 
+#ifdef WEIGHTED
+           w2,
+#endif
+	   nnr, rmaxi, 
+	   del, pcf)
+     /* inputs */
+     int *nn1, *nn2, *nnr;
+     double *x1, *y1, *x2, *y2;
+     int *id1, *id2;
+     double *rmaxi, *del;
+#ifdef WEIGHTED
+     double *w2;
+#endif
+     /* output */
+     double *pcf;  /* matrix of column vectors of pcf's 
+		      for each point of first pattern */
+{
+  int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk;
+  double x1i, y1i, rmax, delta, xleft, dx, dy, dx2;
+  double d2, d2max, dmax, d;
+  double rstep, rvalue, frac, contrib, weight, coef;
+
+  n1 = *nn1;
+  n2 = *nn2;
+  nr = *nnr;
+  rmax = *rmaxi;
+  delta = *del;
+
+  dmax = rmax + delta; /* maximum relevant value of interpoint distance */
+  d2max = dmax * dmax;
+  rstep = rmax/(nr-1);
+  coef  = 3.0 /(4.0 * delta);
+
+  if(n1 == 0 || n2 == 0) 
+    return;
+
+  jleft = 0;
+
+  OUTERCHUNKLOOP(i, n1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, n1, maxchunk, 8196) {
+      x1i = x1[i];
+      y1i = y1[i];
+      id1i = id1[i];
+
+      /* 
+	 adjust starting point
+
+      */
+      xleft = x1i - dmax;
+      while((x2[jleft] < xleft) && (jleft+1 < n2))
+	++jleft;
+
+      /* 
+	 process from jleft until |dx| > dmax
+      */
+      for(j=jleft; j < n2; j++) {
+	dx = x2[j] - x1i;
+	dx2 = dx * dx;
+	if(dx2 > d2max) 
+	  break;
+	dy = y2[j] - y1i;
+	d2 = dx2 + dy * dy;
+	if(d2 <= d2max && id2[j] != id1i) {
+	  d = sqrt(d2);
+	  kmin = (int) floor((d-delta)/rstep);
+	  kmax = (int) ceil((d+delta)/rstep);
+	  if(kmin <= nr-1 && kmax >= 0) {
+	    /* nonempty intersection with range of r values */
+	    /* compute intersection */
+	    if(kmin < 0) kmin = 0; 
+	    if(kmax >= nr) kmax = nr-1;
+	    /* */
+	    weight = coef/d;
+#ifdef WEIGHTED
+	    weight = weight * w2[j];
+#endif
+	    for(k = kmin; k <= kmax; k++) {
+	      rvalue = k * rstep;
+	      frac = (d - rvalue)/delta;
+	      /* Epanechnikov kernel with halfwidth delta */
+	      contrib = (1 - frac * frac);
+	      if(contrib > 0) 
+		pcf[k + nr * i] += contrib * weight;
+	    }
+	  }
+	}
+      }
+    }
+  }
+}
+
+#undef FNAME
diff --git a/src/loccum.c b/src/loccum.c
new file mode 100644
index 0000000..4937f43
--- /dev/null
+++ b/src/loccum.c
@@ -0,0 +1,79 @@
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include "chunkloop.h"
+
+/*
+
+  loccum.c
+
+  $Revision: 1.1 $     $Date: 2013/05/27 02:09:10 $
+
+  Compute local cumulative sums or products of weights
+
+  locsum:  f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j)
+            for a data point pattern {x_i} 
+
+  locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i)
+            for a grid of points {u} and a data point pattern {x_i} 
+	    (also works if {u} is another point pattern)
+
+  locprod:  f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j)
+            for a data point pattern {x_i} 
+
+  locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i)
+            for a grid of points {u} and a data point pattern {x_i} 
+	    (also works if {u} is another point pattern)
+
+  Assumes point patterns are sorted in increasing order of x coordinate
+
+  Uses C code template files : loccums.h, loccumx.h
+
+*/
+
+/* data-to-data */
+
+#undef FNAME
+#undef NULVAL
+#undef INC
+
+#define FNAME locsum
+#define NULVAL 0.0
+#define INC(A,B) A += B
+
+#include "loccums.h"
+
+#undef FNAME
+#undef NULVAL
+#undef INC
+
+#define FNAME locprod
+#define NULVAL 1.0
+#define INC(A,B) A *= B
+
+#include "loccums.h"
+
+/* test-grid-to-data */
+
+#undef FNAME
+#undef NULVAL
+#undef INC
+
+#define FNAME locxsum
+#define NULVAL 0.0
+#define INC(A,B) A += B
+
+#include "loccumx.h"
+
+#undef FNAME
+#undef NULVAL
+#undef INC
+
+#define FNAME locxprod
+#define NULVAL 1.0
+#define INC(A,B) A *= B
+
+#include "loccumx.h"
+
+
+
diff --git a/src/loccums.h b/src/loccums.h
new file mode 100644
index 0000000..ab20a25
--- /dev/null
+++ b/src/loccums.h
@@ -0,0 +1,106 @@
+/*
+  loccums.h
+
+  C template for loccum.c
+
+  data-to-data functions
+
+  $Revision: 1.5 $ $Date: 2013/09/18 04:28:45 $
+
+  macros: 
+
+  FNAME    function name
+  NULVAL   initial value (empty sum = 0, empty product = 1)
+  INC(A,B) increment operation A += B or A *= B
+
+*/
+
+void FNAME(n, x, y, v,
+	   nr, rmax, 
+	   ans)
+     /* inputs */
+     int *n, *nr;
+     double *x, *y, *v;
+     double *rmax;
+     /* output */
+     double *ans;  /* matrix of column vectors of functions for each point */
+{
+  int N, Nr, Nans;
+  double Rmax;
+
+  int i, j, k, kmin, maxchunk, columnstart;
+  double Rmax2, rstep, xi, yi;
+  double dx, dy, dx2, d2, d, contrib;
+
+  N    = *n;
+  Nr   = *nr;
+  Rmax = *rmax;
+
+  if(N == 0) 
+    return;
+
+  rstep = Rmax/(Nr-1);
+  Rmax2 = Rmax * Rmax;
+  Nans  = Nr * N;
+
+  /* initialise products to 1 */
+  OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(k, Nans, maxchunk, 8196) {
+      ans[k] = NULVAL;
+    }
+  }
+   
+  OUTERCHUNKLOOP(i, N, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, N, maxchunk, 8196) {
+      xi = x[i];
+      yi = y[i];
+      columnstart = Nr * i; /* start position for f_i(.) in 'ans' */
+      /* 
+	 process backward until |dx| > Rmax
+      */
+      if(i > 0) {
+	for(j=i-1; j >= 0; j--) {
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 > Rmax2) 
+	    break;
+	  dy = y[j] - yi;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= Rmax2) {
+	    d = sqrt(d2);
+	    kmin = (int) ceil(d/rstep);
+	    if(kmin < Nr) {
+	      contrib = v[j];
+	      for(k = kmin; k < Nr; k++) 
+		INC(ans[columnstart + k] , contrib);
+	    }
+	  }
+	}
+      }
+      /* 
+	 process forward until |dx| > Rmax
+      */
+      if(i < N - 1) {
+	for(j=i+1; j < N; j++) {
+	  dx = x[j] - xi;
+	  dx2 = dx * dx;
+	  if(dx2 > Rmax2) 
+	    break;
+	  dy = y[j] - yi;
+	  d2 = dx2 + dy * dy;
+	  if(d2 <= Rmax2) {
+	    d = sqrt(d2);
+	    kmin = (int) ceil(d/rstep);
+	    if(kmin < Nr) {
+	      contrib = v[j];
+	      for(k = kmin; k < Nr; k++) 
+		INC(ans[columnstart + k] , contrib);
+	    }
+	  }
+	}
+      }
+    }   
+  }
+}
diff --git a/src/loccumx.h b/src/loccumx.h
new file mode 100644
index 0000000..ad3441b
--- /dev/null
+++ b/src/loccumx.h
@@ -0,0 +1,101 @@
+/*
+
+  loccumx.h
+
+  C template for loccum.c
+
+  grid-to-data or data-cross-data functions
+
+  $Revision: 1.5 $ $Date: 2012/11/10 06:13:52 $
+
+  macros: 
+
+  FNAME    function name
+  NULVAL   initial value (empty sum = 0, empty product = 1)
+  INC(A,B) increment operation A += B or A *= B
+
+*/
+
+void FNAME(ntest, xtest, ytest, 
+	   ndata, xdata, ydata, vdata,
+	   nr, rmax, 
+	   ans)
+     /* inputs */
+     int *ntest, *ndata, *nr;
+     double *xtest, *ytest, *xdata, *ydata, *vdata;
+     double *rmax;
+     /* output */
+     double *ans;  /* matrix of column vectors of functions 
+		      for each point of first pattern */
+{
+  int Ntest, Ndata, Nr, Nans;
+  double Rmax;
+
+  int i, j, k, jleft, kmin, maxchunk, columnstart;
+  double Rmax2, rstep, xtesti, ytesti, xleft;
+  double dx, dy, dx2, d2, d, contrib;
+
+  Ntest = *ntest;
+  Ndata = *ndata;
+  Nr    = *nr;
+  Rmax  = *rmax;
+
+  if(Ntest == 0)
+    return;
+
+  Nans  = Nr * Ntest;
+
+  /* initialise products to 1 */
+  OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(k, Nans, maxchunk, 8196) {
+      ans[k] = NULVAL;
+    }
+  }
+   
+  if(Ndata == 0) 
+    return;
+
+  rstep = Rmax/(Nr-1);
+  Rmax2 = Rmax * Rmax;
+
+  jleft = 0;
+
+  OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) {
+      xtesti = xtest[i];
+      ytesti = ytest[i];
+      columnstart = Nr * i; /* start position for f_i(.) in 'ans' */
+      /* 
+	 adjust starting point
+
+      */
+      xleft = xtesti - Rmax;
+      while((xdata[jleft] < xleft) && (jleft+1 < Ndata))
+	++jleft;
+
+      /* 
+	 process from jleft until |dx| > Rmax
+      */
+      for(j=jleft; j < Ndata; j++) {
+	dx = xdata[j] - xtesti;
+	dx2 = dx * dx;
+	if(dx2 > Rmax2) 
+	  break;
+	dy = ydata[j] - ytesti;
+	d2 = dx2 + dy * dy;
+	if(d2 <= Rmax2) {
+	  d = sqrt(d2);
+	  kmin = (int) ceil(d/rstep);
+	  if(kmin < Nr) {
+	    contrib = vdata[j];
+	    for(k = kmin; k < Nr; k++) 
+	      INC(ans[columnstart + k] , contrib);
+	  }
+	}
+      }
+    }
+  }
+}
+
diff --git a/src/lookup.c b/src/lookup.c
new file mode 100755
index 0000000..9ed6d77
--- /dev/null
+++ b/src/lookup.c
@@ -0,0 +1,218 @@
+#include <R.h>
+#include <math.h>
+#include <Rmath.h>
+#include "methas.h"
+#include "dist2.h"
+
+/*
+ Conditional intensity function for a general pairwise
+ interaction process with the pairwise interaction function
+ given by a ``lookup table'', passed through the par argument. 
+
+*/
+
+/* For debugging code, insert the line: #define DEBUG 1 */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Lookup {
+  int nlook;
+  int equisp;   
+  double delta;
+  double rmax;
+  double r2max;
+  double *h;   /* values of pair interaction */
+  double *r;   /* r values if not equally spaced */
+  double *r2;   /* r^2 values if not equally spaced */
+  double *period;
+  int per;
+} Lookup;
+
+
+/* initialiser function */
+
+Cdata *lookupinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, nlook;
+  double ri;
+  Lookup *lookup;
+
+  lookup = (Lookup *) R_alloc(1, sizeof(Lookup));
+
+  /* Interpret model parameters*/
+  lookup->nlook  = nlook = model.ipar[0];
+  lookup->equisp = (model.ipar[1] > 0); 
+  lookup->delta  = model.ipar[2];
+  lookup->rmax   = model.ipar[3];
+  lookup->r2max  = pow(lookup->rmax, 2);
+  /* periodic boundary conditions? */
+  lookup->period = model.period;
+  lookup->per    = (model.period[0] > 0.0);
+/*
+ If the r-values are equispaced only the h vector is included in
+ ``par'' after ``rmax''; the entries of h then consist of
+ h[0] = par[5], h[1] = par[6], ..., h[k-1] = par[4+k], ...,
+ h[nlook-1] = par[4+nlook].  If the r-values are NOT equispaced then
+ the individual r values are needed and these are included as
+ r[0] = par[5+nlook], r[1] = par[6+nlook], ..., r[k-1] = par[4+nlook+k],
+ ..., r[nlook-1] = par[4+2*nlook].
+*/
+  lookup->h = (double *) R_alloc((size_t) nlook, sizeof(double));
+  for(i = 0; i < nlook; i++)
+    lookup->h[i] = model.ipar[4+i];
+  if(!(lookup->equisp)) {
+    lookup->r = (double *) R_alloc((size_t) nlook, sizeof(double));
+    lookup->r2 = (double *) R_alloc((size_t) nlook, sizeof(double));
+    for(i = 0; i < nlook; i++) {
+      ri = lookup->r[i] = model.ipar[4+nlook+i];
+      lookup->r2[i] = ri * ri;
+    }
+  }
+#ifdef DEBUG
+  Rprintf("Exiting lookupinit: nlook=%d, equisp=%d\n", nlook, lookup->equisp);
+#endif
+  
+  return((Cdata *) lookup);
+}
+
+/* conditional intensity evaluator */
+
+double lookupcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, nlook, k, kk, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double r2max, d2, d, delta, cifval, ux, vy;
+  Lookup *lookup;
+
+  lookup = (Lookup *) cdata;
+
+  r2max = lookup->r2max;
+  delta = lookup->delta;
+  nlook = lookup->nlook;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  cifval = 1.0;
+  if(npts == 0) 
+    return(cifval);
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+
+  if(lookup->equisp) {
+    /* equispaced r values */
+    if(lookup->per) { /* periodic distance */
+      if(ix > 0) {
+	for(j=0; j < ix; j++) {
+	  d = sqrt(dist2(u,v,x[j],y[j],lookup->period));
+	  k = floor(d/delta);
+	  if(k < nlook) {
+	    if(k < 0) k = 0;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+      if(ixp1 < npts) {
+	for(j=ixp1; j<npts; j++) {
+	  d = sqrt(dist2(u,v,x[j],y[j],lookup->period));
+	  k = floor(d/delta);
+	  if(k < nlook) {
+	    if(k < 0) k = 0;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+    } else { /* Euclidean distance */
+      if(ix > 0) {
+	for(j=0; j < ix; j++) {
+	  d = hypot(u - x[j], v-y[j]);
+	  k = floor(d/delta);
+	  if(k < nlook) {
+	    if(k < 0) k = 0;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+      if(ixp1 < npts) {
+	for(j=ixp1; j<npts; j++) {
+	  d = hypot(u - x[j], v-y[j]);
+	  k = floor(d/delta);
+	  if(k < nlook) {
+	    if(k < 0) k = 0;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+    }
+  } else {
+    /* non-equispaced r values */
+    if(lookup->per) { /* periodic distance */
+      if(ix > 0) {
+	for(j=0; j < ix; j++) {
+	  d2 = dist2(u,v,x[j],y[j],lookup->period);
+	  if(d2 < r2max) {
+	    for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++)
+	      ;
+	    k = (kk == 0) ? 0 : kk-1;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+      if(ixp1 < npts) {
+	for(j=ixp1; j<npts; j++) {
+	  d2 = dist2(u,v,x[j],y[j],lookup->period);
+	  if(d2 < r2max) {
+	    for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++)
+	      ;
+	    k = (kk == 0) ? 0 : kk-1;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+    } else { /* Euclidean distance */
+      if(ix > 0) {
+	for(j=0; j < ix; j++) {
+	  ux = u - x[j];
+	  vy = v - y[j];
+	  d2 = ux * ux + vy * vy;
+	  if(d2 < r2max) {
+	    for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++)
+	      ;
+	    k = (kk == 0) ? 0 : kk-1;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+      if(ixp1 < npts) {
+	for(j=ixp1; j<npts; j++) {
+	  ux = u - x[j];
+	  vy = v - y[j];
+	  d2 = ux * ux + vy * vy;
+	  if(d2 < r2max) {
+	    for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++)
+	      ;
+	    k = (kk == 0) ? 0 : kk-1;
+	    cifval *= lookup->h[k];
+	  }
+	}
+      }
+    }
+  }
+
+  return cifval;
+}
+
+Cifns LookupCifns = { &lookupinit, &lookupcif, (updafunptr) NULL, NO};
diff --git a/src/looptest.h b/src/looptest.h
new file mode 100644
index 0000000..46b757c
--- /dev/null
+++ b/src/looptest.h
@@ -0,0 +1,12 @@
+/* 
+   looptest.h
+
+   Utilities for looping
+
+   $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $
+*/
+  
+/* a small value relative to threshold X, for loop exit test */
+
+#define EPSILON(X) ((X)/64)
+
diff --git a/src/massdisthack.c b/src/massdisthack.c
new file mode 100755
index 0000000..6caf606
--- /dev/null
+++ b/src/massdisthack.c
@@ -0,0 +1,70 @@
+/*
+  HACKED from R-2.0.1/src/appl/massdist.c
+  by Adrian Baddeley
+  Changes indicated by 'AB'
+*/
+
+/*
+ *  R : A Computer Language for Statistical Data Analysis
+ *  Copyright (C) 1996-2004     Robert Gentleman and Ross Ihaka and the
+ *				R Development Core Team
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <R_ext/Arith.h>
+#include <R_ext/Applic.h>
+
+void massdisthack(double *x, int *nx, 
+		  double *xmass, /* AB: new variable */
+		  double *xlow, double *xhigh,
+	      double *y, int *ny)
+{
+  double fx, xdelta, xmi, xpos;   /* AB */
+    int i, ix, ixmax, ixmin;
+
+    ixmin = 0;
+    ixmax = *ny - 2;
+    /* AB: line deleted */
+    xdelta = (*xhigh - *xlow) / (*ny - 1);
+
+    for(i=0; i < *ny ; i++)
+	y[i] = 0;
+
+    for(i=0; i < *nx ; i++) {
+      if(R_FINITE(x[i])) {
+	xpos = (x[i] - *xlow) / xdelta;
+	ix = floor(xpos);
+	fx = xpos - ix;
+	xmi = xmass[i];   /* AB: new line  */
+	if(ixmin <= ix && ix <= ixmax) {
+	  y[ix] += (1 - fx) * xmi;   /* AB */
+	  y[ix + 1] += fx * xmi; /* AB */
+	}
+	else if(ix == -1) {
+	  y[0] += fx * xmi;  /* AB */
+	}
+	else if(ix == ixmax + 1) {
+	  y[ix] += (1 - fx) * xmi;  /* AB */
+	}
+      }
+    }
+
+    /* AB: lines deleted */
+}
diff --git a/src/maxnnd.h b/src/maxnnd.h
new file mode 100644
index 0000000..a65c46a
--- /dev/null
+++ b/src/maxnnd.h
@@ -0,0 +1,109 @@
+/*
+
+  maxnnd.h
+
+  Code template for maxnnd 
+   to be #included in minnnd.c
+
+  Macros: 
+  FNAME          Function name
+  IGNOREZERO     #defined if zero distances should be ignored
+
+  $Revision: 1.2 $     $Date: 2014/09/18 01:00:30 $
+
+*/
+
+/* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */
+
+void FNAME(n, x, y, huge, result) 
+     /* inputs */
+     int *n;
+     double *x, *y, *huge;
+     /* outputs */
+     double *result;
+{ 
+  int npoints, i, maxchunk, left, right;
+  double d2, d2mini, d2max, xi, yi, dx, dy, dy2, hu, hu2;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+
+  /* maximum (over all i) nearest-neighbour distance, squared */
+  d2max = 0.0;
+
+  if(npoints == 0) return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+
+  while(i < npoints) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints) maxchunk = npoints;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+      
+      /* nearest-neighbour distance for point i,   squared */
+      d2mini = hu2;
+
+      if(i < npoints - 1) {
+	/* search forward */
+	for(right = i + 1; right < npoints; ++right)
+	  {
+	    dy = y[right] - yi;
+	    dy2 = dy * dy;
+	    if(dy2 > d2mini)
+	      break;
+	    dx = x[right] - xi;
+	    d2 =  dx * dx + dy2;
+	    if (d2 < d2mini) {
+#ifdef IGNOREZERO
+	      if(d2 > 0) {
+#endif
+		d2mini = d2;
+		if(d2mini <= d2max)
+		  break;
+#ifdef IGNOREZERO
+	      }
+#endif
+	    }
+	  }
+      }
+      if(i > 0 && d2mini > d2max){
+	/* search backward */
+	for(left = i - 1; left >= 0; --left)
+	{
+	  dy = yi - y[left];
+	  dy2 = dy * dy;
+	  if(dy2 > d2mini)
+	    break;
+
+	  dx = x[left] - xi;
+	  d2 =  dx * dx + dy2;
+	  if (d2 < d2mini) {
+#ifdef IGNOREZERO
+	    if(d2 > 0) {
+#endif
+	      d2mini = d2;
+	      if(d2mini <= d2max)
+		break;
+#ifdef IGNOREZERO
+	    }
+#endif
+	  }
+	}
+      }
+      if(d2mini > d2max)
+	d2max = d2mini;
+    }
+  }
+  *result = d2max;
+}
diff --git a/src/methas.c b/src/methas.c
new file mode 100755
index 0000000..0205153
--- /dev/null
+++ b/src/methas.c
@@ -0,0 +1,423 @@
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+#include "methas.h"
+#include "chunkloop.h"
+#include "mhsnoop.h"
+
+void fexitc(const char *msg);
+
+
+/* To switch on debugging code, 
+   insert the line: #define MH_DEBUG YES
+*/
+#ifndef MH_DEBUG
+#define MH_DEBUG NO
+#endif
+
+/* 
+   This is the value of 'ix' when we are proposing a birth.
+   It must be equal to -1 so that NONE+1 = 0.
+*/
+#define NONE -1
+
+extern Cifns getcif(char *);
+
+SEXP xmethas(
+	     SEXP ncif,
+	     SEXP cifname,
+	     SEXP beta,
+	     SEXP ipar,
+	     SEXP iparlen,
+	     SEXP period,
+	     SEXP xprop,
+	     SEXP yprop,
+	     SEXP mprop,
+	     SEXP ntypes,
+	     SEXP nrep,
+	     SEXP p,
+	     SEXP q,
+	     SEXP nverb,
+	     SEXP nrep0,
+	     SEXP x,
+	     SEXP y,
+	     SEXP marks,
+	     SEXP ncond,
+	     SEXP fixall,
+             SEXP track,
+	     SEXP thin,
+             SEXP snoopenv,
+	     SEXP temper,
+	     SEXP invertemp)
+{
+  char *cifstring;
+  double cvd, cvn, qnodds, anumer, adenom, betavalue;
+  double *iparvector;
+  int verb, marked, tempered, mustupdate, itype;
+  int nfree, nsuspect;
+  int irep, ix, j, maxchunk, iverb;
+  int Ncif; 
+  int *plength;
+  long Nmore;
+  int permitted;
+  double invtemp;
+  double *xx, *yy, *xpropose, *ypropose;
+  int    *mm,      *mpropose, *pp, *aa;
+  SEXP out, xout, yout, mout, pout, aout;
+  int tracking, thinstart;
+#ifdef HISTORY_INCLUDES_RATIO
+  SEXP numout, denout;
+  double *nn, *dd;
+#endif
+
+  State state;
+  Model model;
+  Algor algo;
+  Propo birthprop, deathprop, shiftprop;
+  History history;
+  Snoop snooper;
+
+  /* The following variables are used only for a non-hybrid interaction */
+  Cifns thecif;     /* cif structure */
+  Cdata *thecdata;  /* pointer to initialised cif data block */
+
+  /* The following variables are used only for a hybrid interaction */
+  Cifns *cif;       /* vector of cif structures */
+  Cdata **cdata;    /* vector of pointers to initialised cif data blocks */
+  int *needupd;     /* vector of logical values */
+  int   k;          /* loop index for cif's */
+
+  /* =================== Protect R objects from garbage collector ======= */
+
+  PROTECT(ncif      = AS_INTEGER(ncif)); 
+  PROTECT(cifname   = AS_CHARACTER(cifname)); 
+  PROTECT(beta      = AS_NUMERIC(beta)); 
+  PROTECT(ipar      = AS_NUMERIC(ipar)); 
+  PROTECT(iparlen   = AS_INTEGER(iparlen)); 
+  PROTECT(period    = AS_NUMERIC(period)); 
+  PROTECT(xprop     = AS_NUMERIC(xprop)); 
+  PROTECT(yprop     = AS_NUMERIC(yprop)); 
+  PROTECT(mprop     = AS_INTEGER(mprop)); 
+  PROTECT(ntypes    = AS_INTEGER(ntypes)); 
+  PROTECT(nrep      = AS_INTEGER(nrep)); 
+  PROTECT(   p      = AS_NUMERIC(p)); 
+  PROTECT(   q      = AS_NUMERIC(q)); 
+  PROTECT(nverb     = AS_INTEGER(nverb)); 
+  PROTECT(nrep0     = AS_INTEGER(nrep0)); 
+  PROTECT(   x      = AS_NUMERIC(x)); 
+  PROTECT(   y      = AS_NUMERIC(y)); 
+  PROTECT( marks    = AS_INTEGER(marks)); 
+  PROTECT(fixall    = AS_INTEGER(fixall)); 
+  PROTECT(ncond     = AS_INTEGER(ncond)); 
+  PROTECT(track     = AS_INTEGER(track)); 
+  PROTECT(thin      = AS_INTEGER(thin)); 
+  PROTECT(temper    = AS_INTEGER(temper)); 
+  PROTECT(invertemp = AS_NUMERIC(invertemp)); 
+
+                    /* that's 24 protected objects */
+
+  /* =================== Translate arguments from R to C ================ */
+
+  /* 
+     Ncif is the number of cif's
+     plength[i] is the number of interaction parameters in the i-th cif
+  */
+  Ncif = *(INTEGER_POINTER(ncif));
+  plength = INTEGER_POINTER(iparlen);
+
+  /* copy RMH algorithm parameters */
+  algo.nrep   = *(INTEGER_POINTER(nrep));
+  algo.nverb  = *(INTEGER_POINTER(nverb));
+  algo.nrep0  = *(INTEGER_POINTER(nrep0));
+  algo.p = *(NUMERIC_POINTER(p));
+  algo.q = *(NUMERIC_POINTER(q));
+  algo.fixall = ((*(INTEGER_POINTER(fixall))) == 1);
+  algo.ncond =  *(INTEGER_POINTER(ncond));
+  algo.tempered = tempered = (*(INTEGER_POINTER(temper)) != 0);
+  algo.invtemp  = invtemp  = *(NUMERIC_POINTER(invertemp));
+
+  /* copy model parameters without interpreting them */
+  model.beta = NUMERIC_POINTER(beta);
+  model.ipar = iparvector = NUMERIC_POINTER(ipar);
+  model.period = NUMERIC_POINTER(period);
+  model.ntypes = *(INTEGER_POINTER(ntypes));
+
+  state.ismarked = marked = (model.ntypes > 1);
+  
+  /* copy initial state */
+  state.npts   = LENGTH(x);
+  state.npmax  = 4 * ((state.npts > 256) ? state.npts : 256);
+  state.x = (double *) R_alloc(state.npmax, sizeof(double));
+  state.y = (double *) R_alloc(state.npmax, sizeof(double));
+  xx = NUMERIC_POINTER(x);
+  yy = NUMERIC_POINTER(y);
+  if(marked) {
+    state.marks =(int *) R_alloc(state.npmax, sizeof(int));
+    mm = INTEGER_POINTER(marks);
+  }
+  if(!marked) {
+    for(j = 0; j < state.npts; j++) {
+      state.x[j] = xx[j];
+      state.y[j] = yy[j];
+    }
+  } else {
+    for(j = 0; j < state.npts; j++) {
+      state.x[j] = xx[j];
+      state.y[j] = yy[j];
+      state.marks[j] = mm[j];
+    }
+  }
+#if MH_DEBUG
+  Rprintf("\tnpts=%d\n", state.npts);
+#endif
+
+  /* access proposal data */
+  xpropose = NUMERIC_POINTER(xprop);
+  ypropose = NUMERIC_POINTER(yprop);
+  mpropose = INTEGER_POINTER(mprop);
+  /* we need to initialise 'mpropose' to keep compilers happy.
+     mpropose is only used for marked patterns.
+     Note 'mprop' is always a valid pointer */
+
+  
+  /* ================= Allocate space for cifs etc ========== */
+
+  if(Ncif > 1) {
+    cif = (Cifns *) R_alloc(Ncif, sizeof(Cifns));
+    cdata = (Cdata **) R_alloc(Ncif, sizeof(Cdata *));
+    needupd = (int *) R_alloc(Ncif, sizeof(int));
+  } else {
+    /* Keep the compiler happy */
+    cif = (Cifns *) R_alloc(1, sizeof(Cifns));
+    cdata = (Cdata **) R_alloc(1, sizeof(Cdata *));
+    needupd = (int *) R_alloc(1, sizeof(int));
+  }
+
+
+  /* ================= Determine process to be simulated  ========== */
+  
+  /* Get the cif's */
+  if(Ncif == 1) {
+    cifstring = (char *) STRING_VALUE(cifname);
+    thecif = getcif(cifstring);
+    mustupdate = NEED_UPDATE(thecif);
+    if(thecif.marked && !marked)
+      fexitc("cif is for a marked point process, but proposal data are not marked points; bailing out.");
+    /* Keep compiler happy*/
+    cif[0] = thecif;
+    needupd[0] = mustupdate;
+  } else {
+    mustupdate = NO;
+    for(k = 0; k < Ncif; k++) {
+      cifstring = (char *) CHAR(STRING_ELT(cifname, k));
+      cif[k] = getcif(cifstring);
+      needupd[k] = NEED_UPDATE(cif[k]);
+      if(needupd[k])
+	mustupdate = YES;
+      if(cif[k].marked && !marked)
+	fexitc("component cif is for a marked point process, but proposal data are not marked points; bailing out.");
+    }
+  }
+  /* ============= Initialise transition history ========== */
+
+  tracking = (*(INTEGER_POINTER(track)) != 0);
+  /* Initialise even if not needed, to placate the compiler */
+  if(tracking) { history.nmax = algo.nrep; } else { history.nmax = 1; }
+  history.n = 0;
+  history.proptype = (int *) R_alloc(history.nmax, sizeof(int));
+  history.accepted = (int *) R_alloc(history.nmax, sizeof(int));
+#ifdef HISTORY_INCLUDES_RATIO
+  history.numerator   = (double *) R_alloc(history.nmax, sizeof(double));
+  history.denominator = (double *) R_alloc(history.nmax, sizeof(double));
+#endif
+
+  /* ============= Visual debugging ========== */
+
+  /* Active if 'snoopenv' is an environment */
+
+
+#if MH_DEBUG
+  Rprintf("Initialising mhsnoop\n");
+#endif
+
+  initmhsnoop(&snooper, snoopenv);
+
+#if MH_DEBUG
+  Rprintf("Initialised\n");
+  if(snooper.active) Rprintf("Debugger is active.\n");
+#endif
+
+  /* ================= Thinning of initial state ==================== */
+
+  thinstart = (*(INTEGER_POINTER(thin)) != 0);
+
+  /* ================= Initialise algorithm ==================== */
+ 
+  /* Interpret the model parameters and initialise auxiliary data */
+  if(Ncif == 1) {
+    thecdata = (*(thecif.init))(state, model, algo);
+    /* keep compiler happy */
+    cdata[0] = thecdata;
+  } else {
+    for(k = 0; k < Ncif; k++) {
+      if(k > 0)
+	model.ipar += plength[k-1];
+      cdata[k] = (*(cif[k].init))(state, model, algo);
+    }
+    /* keep compiler happy */
+    thecdata = cdata[0];
+  }
+
+  /* Set the fixed elements of the proposal objects */
+  birthprop.itype = BIRTH;
+  deathprop.itype = DEATH;
+  shiftprop.itype = SHIFT;
+  birthprop.ix = NONE;
+  if(!marked) 
+    birthprop.mrk = deathprop.mrk = shiftprop.mrk = NONE;
+
+  /* Set up some constants */
+  verb   = (algo.nverb !=0);
+  qnodds = (1.0 - algo.q)/algo.q;
+
+
+  /* Set value of beta for unmarked process */
+  /* (Overwritten for marked process, but keeps compiler happy) */
+  betavalue = model.beta[0];
+
+  /* ============= Run Metropolis-Hastings  ================== */
+
+  /* Initialise random number generator */
+  GetRNGstate();
+
+/*
+
+  Here comes the code for the M-H loop.
+
+  The basic code (in mhloop.h) is #included many times using different options
+
+  The C preprocessor descends through a chain of files 
+       mhv1.h, mhv2.h, ...
+  to enumerate all possible combinations of flags.
+
+*/
+
+#include "mhv1.h"
+
+  /* relinquish random number generator */
+  PutRNGstate();
+
+  /* ============= Done  ================== */
+
+  /* Create space for output, and copy final state */
+  /* Point coordinates */
+  PROTECT(xout = NEW_NUMERIC(state.npts));
+  PROTECT(yout = NEW_NUMERIC(state.npts));
+  xx = NUMERIC_POINTER(xout);
+  yy = NUMERIC_POINTER(yout);
+  for(j = 0; j < state.npts; j++) {
+    xx[j] = state.x[j];
+    yy[j] = state.y[j];
+  }
+  /* Marks */
+  if(marked) {
+    PROTECT(mout = NEW_INTEGER(state.npts));
+    mm = INTEGER_POINTER(mout);
+    for(j = 0; j < state.npts; j++) 
+      mm[j] = state.marks[j];
+  } else {
+    /* Keep the compiler happy */
+    PROTECT(mout = NEW_INTEGER(1));
+    mm = INTEGER_POINTER(mout);
+    mm[0] = 0;
+  }
+  /* Transition history */
+  if(tracking) {
+    PROTECT(pout = NEW_INTEGER(algo.nrep));
+    PROTECT(aout = NEW_INTEGER(algo.nrep));
+    pp = INTEGER_POINTER(pout);
+    aa = INTEGER_POINTER(aout);
+    for(j = 0; j < algo.nrep; j++) {
+      pp[j] = history.proptype[j];
+      aa[j] = history.accepted[j];
+    }
+#ifdef HISTORY_INCLUDES_RATIO
+    PROTECT(numout = NEW_NUMERIC(algo.nrep));
+    PROTECT(denout = NEW_NUMERIC(algo.nrep));
+    nn = NUMERIC_POINTER(numout);
+    dd = NUMERIC_POINTER(denout);
+    for(j = 0; j < algo.nrep; j++) {
+      nn[j] = history.numerator[j];
+      dd[j] = history.denominator[j];
+    }
+#endif
+  } else {
+    /* Keep the compiler happy */
+    PROTECT(pout = NEW_INTEGER(1));
+    PROTECT(aout = NEW_INTEGER(1));
+    pp = INTEGER_POINTER(pout);
+    aa = INTEGER_POINTER(aout);
+    pp[0] = aa[0] = 0;
+#ifdef HISTORY_INCLUDES_RATIO
+    PROTECT(numout = NEW_NUMERIC(1));
+    PROTECT(denout = NEW_NUMERIC(1));
+    nn = NUMERIC_POINTER(numout);
+    dd = NUMERIC_POINTER(denout);
+    nn[0] = dd[0] = 0;
+#endif
+  }
+
+  /* Pack up into list object for return */
+  if(!tracking) {
+    /* no transition history */
+    if(!marked) {
+      PROTECT(out = NEW_LIST(2));
+      SET_VECTOR_ELT(out, 0, xout);
+      SET_VECTOR_ELT(out, 1, yout);
+    } else {
+      PROTECT(out = NEW_LIST(3)); 
+      SET_VECTOR_ELT(out, 0, xout);
+      SET_VECTOR_ELT(out, 1, yout); 
+      SET_VECTOR_ELT(out, 2, mout);
+    }
+  } else {
+    /* transition history */
+    if(!marked) {
+#ifdef HISTORY_INCLUDES_RATIO
+      PROTECT(out = NEW_LIST(6));
+#else
+      PROTECT(out = NEW_LIST(4));
+#endif
+      SET_VECTOR_ELT(out, 0, xout);
+      SET_VECTOR_ELT(out, 1, yout);
+      SET_VECTOR_ELT(out, 2, pout);
+      SET_VECTOR_ELT(out, 3, aout);
+#ifdef HISTORY_INCLUDES_RATIO
+      SET_VECTOR_ELT(out, 4, numout);
+      SET_VECTOR_ELT(out, 5, denout);
+#endif
+      } else {
+#ifdef HISTORY_INCLUDES_RATIO
+      PROTECT(out = NEW_LIST(7));
+#else
+      PROTECT(out = NEW_LIST(5)); 
+#endif
+      SET_VECTOR_ELT(out, 0, xout);
+      SET_VECTOR_ELT(out, 1, yout); 
+      SET_VECTOR_ELT(out, 2, mout);
+      SET_VECTOR_ELT(out, 3, pout);
+      SET_VECTOR_ELT(out, 4, aout);
+#ifdef HISTORY_INCLUDES_RATIO
+      SET_VECTOR_ELT(out, 5, numout);
+      SET_VECTOR_ELT(out, 6, denout);
+#endif
+    }
+  }
+#ifdef HISTORY_INCLUDES_RATIO
+  UNPROTECT(32);  /* 24 arguments plus xout, yout, mout, pout, aout, out,
+                            numout, denout */
+#else
+  UNPROTECT(30);  /* 24 arguments plus xout, yout, mout, pout, aout, out */
+#endif
+  return(out);
+}
diff --git a/src/methas.h b/src/methas.h
new file mode 100755
index 0000000..06d18af
--- /dev/null
+++ b/src/methas.h
@@ -0,0 +1,120 @@
+/* 
+   Definitions of types and data structures for Metropolis-Hastings
+
+   State       Current state of point pattern
+
+   Model       Model parameters passed from R
+
+   Cdata       (pointer to) model parameters and precomputed data in C
+
+   Algor       Algorithm parameters (p, q, nrep etc)
+
+   Propo       Proposal in Metropolis-Hastings algorithm
+
+   History     Transition history of MH algorithm
+
+   Cifns       Set of functions for computing the conditional intensity
+               for a point process model. 
+	       This consists of three functions
+                    init(State, Model, Algor) .... initialises auxiliary data
+		    eval(State, Propo) ........... evaluates cif
+		    update(State,Propo) .......... updates auxiliary data
+
+ */
+
+
+/* Current state of point pattern */
+typedef struct State { 
+  double *x;     /* vectors of Cartesian coordinates */
+  double *y;
+  int *marks;    /* vector of mark values */
+  int npts;       /* current number of points */
+  int npmax;      /* storage limit */
+  int ismarked;   /* whether the pattern is marked */
+} State;
+
+/* Parameters of model passed from R */
+typedef struct Model {
+  double *beta;     /* vector of activity parameters */
+  double *ipar;     /* vector of interaction parameters */
+  double *period;  /* width & height of rectangle, if torus */
+  int ntypes;      /* number of possible marks */
+} Model;
+
+/* 
+   A pointer to Cdata 
+   is a pointer to C storage for parameters of model
+*/
+
+typedef void Cdata;
+
+/* RMH Algorithm parameters */
+typedef struct Algor {
+  double p;         /* probability of proposing shift */
+  double q;         /* conditional probability of proposing death */
+  int fixall;       /* if TRUE, only shifts of location are feasible */
+  int ncond;        /* For conditional simulation, 
+		       the first 'ncond' points are fixed */
+  int nrep;        /* number of iterations */
+  int nverb;       /* print report every 'nverb' iterations */
+  int nrep0;       /* number of iterations already performed 
+		      in previous blocks - for reporting purposes */
+  int tempered;    /* TRUE if tempering is applied */
+  double invtemp;  /* inverse temperature if tempering is applied */
+} Algor;
+
+/* Metropolis-Hastings proposal */
+typedef struct Propo {
+  double u;         /* location of point of interest */
+  double v;
+  int mrk;       /* mark of point of interest */
+  int ix;           /* index of point of interest, if already in pattern */
+  int itype;        /* transition type */
+} Propo;
+
+/* transition codes 'itype' */
+#define REJECT 0
+#define BIRTH 1
+#define DEATH 2
+#define SHIFT 3
+
+#define HISTORY_INCLUDES_RATIO
+
+/* Record of transition history */
+typedef struct History {
+  int nmax;              /* length of vectors */
+  int n;                 /* number of events recorded */
+  int *proptype;         /* vector: proposal type */
+  int *accepted;         /* vector: 0 for reject, 1 for accept */
+#ifdef HISTORY_INCLUDES_RATIO
+  double *numerator;     /* vectors: Hastings ratio numerator & denominator  */
+  double *denominator;
+#endif
+} History;
+
+/* conditional intensity functions */
+
+typedef Cdata *   (*initfunptr)(State state, Model model, Algor algo);
+typedef double    (*evalfunptr)(Propo prop,  State state, Cdata *cdata);
+typedef void      (*updafunptr)(State state, Propo prop,  Cdata *cdata);
+
+typedef struct Cifns {
+  initfunptr init;
+  evalfunptr eval;
+  updafunptr update;
+  int        marked;
+} Cifns;
+
+#define NEED_UPDATE(X) ((X).update != (updafunptr) NULL)
+
+#define NULL_CIFNS { (initfunptr) NULL, (evalfunptr) NULL, (updafunptr) NULL, NO}
+
+/* miscellaneous macros */
+
+#include "yesno.h"
+
+# define MAT(X,I,J,M) (X[(I)+(J)*(M)])
+
+
+
+
diff --git a/src/mhloop.h b/src/mhloop.h
new file mode 100755
index 0000000..c04af3b
--- /dev/null
+++ b/src/mhloop.h
@@ -0,0 +1,511 @@
+
+/* 
+   mhloop.h
+
+   This file contains the iteration loop 
+   for the Metropolis-Hastings algorithm methas.c 
+
+   It is #included several times in methas.c
+   with different #defines for the following variables
+
+   MH_MARKED    whether the simulation is marked
+               (= the variable 'marked' is TRUE)
+
+   MH_SINGLE    whether there is a single interaction 
+              (as opposed to a hybrid of several interactions)
+
+   MH_TEMPER    whether tempering is applied
+
+   MH_TRACKING  whether to save transition history
+
+   MH_DEBUG     whether to print debug information
+   
+   MH_SNOOP     whether to run visual debugger
+
+   $Revision: 1.22 $  $Date: 2015/09/06 05:21:55 $ 
+
+*/
+
+#ifndef MH_DEBUG
+#define MH_DEBUG NO
+#endif
+
+/* ..... Pre-processing: recursively delete illegal/improbable points ..... */
+
+nfree = state.npts - algo.ncond;  /* number of 'free' points */
+
+if(thinstart && nfree > 0) {
+  nsuspect = nfree;
+  while(nsuspect > 0) {
+    /* scan for illegal points */
+    ix = state.npts - nsuspect;
+    deathprop.ix = ix;
+    deathprop.u  = state.x[ix];
+    deathprop.v  = state.y[ix];
+#if MH_MARKED
+    deathprop.mrk = state.marks[ix];
+#endif
+#if MH_DEBUG
+#if MH_MARKED
+    Rprintf("check legality of point %d = (%lf, %lf) with mark %d\n", 
+	    ix, deathprop.u, deathprop.v, deathprop.mrk);
+#else
+    Rprintf("check legality of point %d = (%lf, %lf)\n", 
+	    ix, deathprop.u, deathprop.v);
+#endif
+#endif
+    /* evaluate conditional intensity without trend terms */
+
+#if MH_SINGLE
+    adenom = (*(thecif.eval))(deathprop, state, thecdata);
+#else
+    adenom = 1.0;
+    for(k = 0; k < Ncif; k++)
+      adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]);
+#endif
+#if MH_TEMPER
+    adenom = pow(adenom, invtemp);
+#endif
+#if MH_DEBUG
+    Rprintf("cif = %lf\n", adenom);
+#endif
+    /* accept/reject */
+    if(unif_rand() >= adenom) {
+#if MH_DEBUG
+      Rprintf("deleting illegal/improbable point\n");
+#endif
+      /* delete point x[ix], y[ix] */
+      if(mustupdate) {
+	/* Update auxiliary variables first */
+#if MH_SINGLE
+	(*(thecif.update))(state, deathprop, thecdata);
+#else
+	for(k = 0; k < Ncif; k++) {
+	  if(needupd[k])
+	    (*(cif[k].update))(state, deathprop, cdata[k]);
+	}
+#endif
+      }
+      state.npts--;
+      nfree--;
+#if MH_DEBUG
+      Rprintf("deleting point %d\n", ix);
+      Rprintf("\tnpts=%d\n", state.npts);
+#endif
+      if(ix < state.npts) {
+	for(j = ix; j < state.npts; j++) {
+	  state.x[j] = state.x[j+1];
+	  state.y[j] = state.y[j+1];
+#if MH_MARKED
+	  state.marks[j] = state.marks[j+1];
+#endif
+	}
+      }
+    }
+    nsuspect--;
+  }
+ }
+
+
+/* ............... MAIN ITERATION LOOP  ............................. */
+
+
+OUTERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) {
+  R_CheckUserInterrupt();
+  INNERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) {
+
+#if MH_DEBUG
+    Rprintf("iteration %d\n", irep);
+#endif
+
+    if(verb) {
+      /* print progress message every nverb iterations */
+      iverb = irep + 1 + algo.nrep0;
+      if((iverb % algo.nverb) == 0)
+	Rprintf("iteration %d\n", iverb);
+    }
+
+    itype = REJECT;
+
+    nfree = state.npts - algo.ncond;  /* number of 'free' points */
+
+    /* ................  generate proposal ..................... */
+    /* Shift or birth/death: */
+    if(unif_rand() > algo.p) {
+#if MH_DEBUG
+      Rprintf("propose birth or death\n");
+#endif
+      /* Birth/death: */
+      if(unif_rand() > algo.q) {
+	/* Propose birth: */
+	birthprop.u = xpropose[irep];
+	birthprop.v = ypropose[irep];
+#if MH_MARKED
+	birthprop.mrk = mpropose[irep];
+#endif
+#if MH_DEBUG
+#if MH_MARKED
+	Rprintf("propose birth at (%lf, %lf) with mark %d\n", 
+		birthprop.u, birthprop.v, birthprop.mrk);
+#else
+	Rprintf("propose birth at (%lf, %lf)\n", birthprop.u, birthprop.v);
+#endif
+#endif
+	/* evaluate conditional intensity */
+
+#if MH_MARKED
+	betavalue = model.beta[birthprop.mrk];
+#endif
+
+#if MH_SINGLE
+	anumer = betavalue * (*(thecif.eval))(birthprop, state, thecdata);
+#else
+	anumer = betavalue;
+	for(k = 0; k < Ncif; k++)
+	  anumer *= (*(cif[k].eval))(birthprop, state, cdata[k]);
+#endif
+#if MH_TEMPER
+        anumer = pow(anumer, invtemp);
+#endif
+
+	adenom = qnodds*(nfree+1);
+
+#if MH_DEBUG
+	Rprintf("cif = %lf, Hastings ratio = %lf\n", anumer, anumer/adenom);
+#endif
+
+	/* accept/reject */
+	if(unif_rand() * adenom < anumer) {
+#if MH_DEBUG
+	  Rprintf("accepted birth\n");
+#endif
+	  itype = BIRTH;  /* Birth proposal accepted. */
+	}
+#if MH_SNOOP
+	/* visual debug */
+	mhsnoop(&snooper, irep, &algo, &state, &birthprop, 
+		anumer, adenom, &itype);
+#endif
+#if MH_TRACKING
+	/* save transition history */
+	if(irep < history.nmax) {
+	  history.n++;
+	  history.proptype[irep] = BIRTH;
+	  history.accepted[irep] = (itype == REJECT) ? 0 : 1;
+#ifdef HISTORY_INCLUDES_RATIO
+	  history.numerator[irep] = anumer;
+	  history.denominator[irep] = adenom;
+#endif
+	}
+#endif
+      } else if(nfree > 0) {
+	/* Propose death: */
+	ix = floor(nfree * unif_rand());
+	if(ix < 0) ix = 0;
+	ix = algo.ncond + ix;
+	if(ix >= state.npts) ix = state.npts - 1;
+	deathprop.ix = ix;
+	deathprop.u  = state.x[ix];
+	deathprop.v  = state.y[ix];
+#if MH_MARKED
+	deathprop.mrk = state.marks[ix];
+#endif
+#if MH_DEBUG
+#if MH_MARKED
+	Rprintf("propose death of point %d = (%lf, %lf) with mark %d\n", 
+		ix, deathprop.u, deathprop.v, deathprop.mrk);
+#else
+	Rprintf("propose death of point %d = (%lf, %lf)\n", 
+		ix, deathprop.u, deathprop.v);
+#endif
+#endif
+	/* evaluate conditional intensity */
+
+#if MH_MARKED
+	betavalue = model.beta[deathprop.mrk];
+#endif
+
+#if MH_SINGLE
+	adenom = betavalue * (*(thecif.eval))(deathprop, state, thecdata);
+#else
+	adenom = betavalue;
+	for(k = 0; k < Ncif; k++)
+	  adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]);
+#endif
+#if MH_TEMPER
+        adenom = pow(adenom, invtemp);
+#endif
+
+	anumer = qnodds * nfree;
+#if MH_DEBUG
+	Rprintf("cif = %lf, Hastings ratio = %lf\n", adenom, anumer/adenom);
+#endif
+	/* accept/reject */
+	if(unif_rand() * adenom < anumer) {
+#if MH_DEBUG
+	  Rprintf("accepted death\n");
+#endif
+	  itype = DEATH; /* Death proposal accepted. */
+	}
+#if MH_SNOOP
+	/* visual debug */
+	mhsnoop(&snooper, irep, &algo, &state, &deathprop, 
+		anumer, adenom, &itype);
+#endif
+#if MH_TRACKING
+	/* save transition history */
+	if(irep < history.nmax) {
+	  history.n++;
+	  history.proptype[irep] = DEATH;
+	  history.accepted[irep] = (itype == REJECT) ? 0 : 1;
+#ifdef HISTORY_INCLUDES_RATIO
+	  history.numerator[irep] = anumer;
+	  history.denominator[irep] = adenom;
+#endif
+	}
+#endif
+      }
+    } else if(nfree > 0) {
+      /* Propose shift: */
+      /* point to be shifted */
+      ix = floor(nfree * unif_rand());
+      if(ix < 0) ix = 0;
+      ix = algo.ncond + ix;
+      if(ix >= state.npts) ix = state.npts - 1;
+      deathprop.ix = ix;
+      deathprop.u  = state.x[ix];
+      deathprop.v  = state.y[ix];
+#if MH_MARKED
+      deathprop.mrk = state.marks[ix];
+#endif
+      /* where to shift */
+      permitted = YES;
+      shiftprop.ix = ix;
+      shiftprop.u = xpropose[irep]; 
+      shiftprop.v = ypropose[irep];
+#if MH_MARKED
+      shiftprop.mrk = mpropose[irep]; 
+      if(algo.fixall) permitted = (shiftprop.mrk == deathprop.mrk);
+#endif
+
+#if MH_DEBUG
+#if MH_MARKED
+      Rprintf("propose shift of point %d = (%lf, %lf)[mark %d] to (%lf, %lf)[mark %d]\n", 
+	      ix, deathprop.u, deathprop.v, deathprop.mrk, 
+	      shiftprop.u, shiftprop.v, shiftprop.mrk);
+#else
+      Rprintf("propose shift of point %d = (%lf, %lf) to (%lf, %lf)\n", 
+	      ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v);
+#endif
+#endif
+
+      /* evaluate cif in two stages */
+      cvn = cvd = 1.0;
+      if(permitted) {
+#if MH_SINGLE
+	cvn = (*(thecif.eval))(shiftprop, state, thecdata);
+	if(cvn > 0.0) {
+	  cvd = (*(thecif.eval))(deathprop, state, thecdata);
+	} else {
+	  permitted = NO;
+	}
+#else
+	for(k = 0; k < Ncif; k++) {
+	  cvn *= (*(cif[k].eval))(shiftprop, state, cdata[k]);
+	  if(cvn > 0.0) {
+	    cvd *= (*(cif[k].eval))(deathprop, state, cdata[k]);
+	  } else {
+	    permitted = NO;
+	    break; 
+	  }
+	}
+#endif
+      } 
+
+      if(permitted) {
+#if MH_MARKED
+	cvn *= model.beta[shiftprop.mrk];
+	cvd *= model.beta[deathprop.mrk];
+#endif
+#if MH_TEMPER
+	cvn = pow(cvn, invtemp);
+	cvd = pow(cvd, invtemp);
+#endif
+
+#if MH_DEBUG
+	Rprintf("cif[old] = %lf, cif[new] = %lf, Hastings ratio = %lf\n", 
+		cvd, cvn, cvn/cvd);
+#endif
+	/* accept/reject */
+	if(unif_rand() * cvd < cvn) {
+#if MH_DEBUG
+	  Rprintf("accepted shift\n");
+#endif
+	  itype = SHIFT;          /* Shift proposal accepted . */
+	}
+      } else {
+	cvn = 0.0;
+	cvd = 1.0;
+#if MH_DEBUG
+	Rprintf("Forbidden shift");
+#endif
+      }
+
+#if MH_SNOOP
+	/* visual debug */
+	mhsnoop(&snooper, irep, &algo, &state, &shiftprop, 
+		cvn, cvd, &itype);
+#endif
+#if MH_TRACKING
+      /* save transition history */
+      if(irep < history.nmax) {
+	history.n++;
+	history.proptype[irep] = SHIFT;
+	history.accepted[irep] = (itype == REJECT) ? 0 : 1;
+#ifdef HISTORY_INCLUDES_RATIO
+	  history.numerator[irep] = cvn;
+	  history.denominator[irep] = cvd;
+#endif
+      }
+#endif
+    }
+
+    if(itype != REJECT) {
+      /* ....... implement the transition ............  */
+      if(itype == BIRTH) {      
+	/* Birth transition */
+	/* add point at (u,v) */
+#if MH_DEBUG
+#if MH_MARKED
+	Rprintf("implementing birth at (%lf, %lf) with mark %d\n", 
+		birthprop.u, birthprop.v, birthprop.mrk);
+#else
+	Rprintf("implementing birth at (%lf, %lf)\n", 
+		birthprop.u, birthprop.v);
+#endif
+#endif
+	if(state.npts + 1 > state.npmax) {
+#if MH_DEBUG
+	  Rprintf("!!!!!!!!!!! storage overflow !!!!!!!!!!!!!!!!!\n");
+#endif
+	  /* storage overflow; allocate more storage */
+	  Nmore = 2 * state.npmax;
+	  state.x = (double *) S_realloc((char *) state.x, 
+					 Nmore,  state.npmax, 
+					 sizeof(double));
+	  state.y = (double *) S_realloc((char *) state.y, 
+					 Nmore,  state.npmax, 
+					 sizeof(double));
+#if MH_MARKED
+	  state.marks = (int *) S_realloc((char *) state.marks, 
+					  Nmore,  state.npmax, 
+					  sizeof(int));
+#endif
+	  state.npmax = Nmore;
+
+	  /* call the initialiser again, to allocate additional space */
+#if MH_SINGLE
+	  thecdata = (*(thecif.init))(state, model, algo);
+#else
+	  model.ipar = iparvector;
+	  for(k = 0; k < Ncif; k++) {
+	    if(k > 0)
+	      model.ipar += plength[k-1];
+	    cdata[k] = (*(cif[k].init))(state, model, algo);
+	  }	
+#endif
+#if MH_DEBUG
+	  Rprintf("........... storage extended .................\n");
+#endif
+	}
+	
+	if(mustupdate) {
+	  /* Update auxiliary variables first */
+#if MH_SINGLE
+	  (*(thecif.update))(state, birthprop, thecdata);
+#else
+	  for(k = 0; k < Ncif; k++) {
+	    if(needupd[k])
+	      (*(cif[k].update))(state, birthprop, cdata[k]);
+	  }
+#endif
+	}
+	/* Now add point */
+	state.x[state.npts] = birthprop.u;
+	state.y[state.npts] = birthprop.v;
+#if MH_MARKED
+	state.marks[state.npts] = birthprop.mrk;
+#endif
+	state.npts     = state.npts + 1;
+#if MH_DEBUG
+	Rprintf("\tnpts=%d\n", state.npts);
+#endif
+      } else if(itype==DEATH) { 
+	/* Death transition */
+	/* delete point x[ix], y[ix] */
+	if(mustupdate) {
+	  /* Update auxiliary variables first */
+#if MH_SINGLE
+	  (*(thecif.update))(state, deathprop, thecdata);
+#else
+	  for(k = 0; k < Ncif; k++) {
+	    if(needupd[k])
+	      (*(cif[k].update))(state, deathprop, cdata[k]);
+	  }
+#endif
+	}
+	ix = deathprop.ix;
+	state.npts = state.npts - 1;
+#if MH_DEBUG
+	Rprintf("implementing death of point %d\n", ix);
+	Rprintf("\tnpts=%d\n", state.npts);
+#endif
+	if(ix < state.npts) {
+	  for(j = ix; j < state.npts; j++) {
+	    state.x[j] = state.x[j+1];
+	    state.y[j] = state.y[j+1];
+#if MH_MARKED
+	    state.marks[j] = state.marks[j+1];
+#endif
+	  }
+	}
+      } else {
+	/* Shift transition */
+	/* Shift (x[ix], y[ix]) to (u,v) */
+#if MH_DEBUG
+#if MH_MARKED
+	Rprintf("implementing shift from %d = (%lf, %lf)[%d] to (%lf, %lf)[%d]\n", 
+		deathprop.ix, deathprop.u, deathprop.v, deathprop.mrk,
+		shiftprop.u, shiftprop.v, shiftprop.mrk);
+#else
+	Rprintf("implementing shift from %d = (%lf, %lf) to (%lf, %lf)\n", 
+		deathprop.ix, deathprop.u, deathprop.v,
+		shiftprop.u, shiftprop.v);
+	Rprintf("\tnpts=%d\n", state.npts);
+#endif
+#endif
+	if(mustupdate) {
+	  /* Update auxiliary variables first */
+#if MH_SINGLE
+	  (*(thecif.update))(state, shiftprop, thecdata);
+#else
+	  for(k = 0; k < Ncif; k++) {
+	    if(needupd[k])
+	      (*(cif[k].update))(state, shiftprop, cdata[k]);
+	  }
+#endif
+	}
+	ix = shiftprop.ix;
+	state.x[ix] = shiftprop.u;
+	state.y[ix] = shiftprop.v;
+#if MH_MARKED
+	state.marks[ix] = shiftprop.mrk;
+#endif
+      }
+#if MH_DEBUG
+    } else {
+      Rprintf("rejected\n");
+#endif
+    }
+  }
+}
diff --git a/src/mhsnoop.c b/src/mhsnoop.c
new file mode 100644
index 0000000..4a5bb46
--- /dev/null
+++ b/src/mhsnoop.c
@@ -0,0 +1,185 @@
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+#include "methas.h"
+
+#include "mhsnoopdef.h"
+
+/*
+  mhsnoop.c
+
+  $Revision: 1.8 $  $Date: 2013/05/27 02:09:10 $
+
+  support for visual debugger in RMH
+
+*/
+
+/* To switch on debugging code, 
+   insert the line: #define MH_DEBUG YES
+*/
+#ifndef MH_DEBUG
+#define MH_DEBUG NO
+#endif
+
+
+void initmhsnoop(Snoop *s, SEXP env) {
+  s->active = isEnvironment(env);
+  s->nextstop = 0;         /* stop at iteration 0 */
+  s->nexttype = NO_TYPE;   /* deactivated */
+  if(s->active) {
+    s->env = env;
+    s->expr = findVar(install("callbackexpr"), env);
+  } else {
+    s->env = s->expr = R_NilValue;
+  }
+}
+
+void mhsnoop(Snoop *s, 
+	     int irep, 
+	     Algor *algo,
+	     State *state,
+	     Propo *prop,
+	     double numer,
+	     double denom,
+	     int *itype) 
+{
+  SEXP e;
+  int npts, j;
+  /* passed from C to R before debugger */
+  SEXP Sirep, Sx, Sy, Sm, Sproptype, Sproplocn, Spropmark, Spropindx;
+  SEXP Snumer, Sdenom, Sitype;
+  double *Px, *Py, *Pproplocn;
+  int *Pm;
+  /* passed from R to C after debugger */
+  SEXP Sinxt, Stnxt, SitypeUser;
+
+#if MH_DEBUG
+  Rprintf("mhsnoop called at iteration %d\n", irep);
+#endif
+
+  if(!(s->active)) return;
+
+#if MH_DEBUG
+  Rprintf("mhsnoop is active\n");
+#endif
+
+  /* 
+     execute when the simulation reaches the next stopping time:
+     a specified iteration number 'nextstop'
+     or a specified proposal type 'nexttype'
+ */
+  if(irep != s->nextstop && prop->itype != s->nexttype) return;
+
+#if MH_DEBUG
+  Rprintf("debug triggered\n");
+#endif
+
+  /* environment for communication with R */
+  e = s->env;
+  /* 
+     copy data to R
+  */
+  /* copy iteration number */
+  PROTECT(Sirep = NEW_INTEGER(1));
+  *(INTEGER_POINTER(Sirep)) = irep;
+  setVar(install("irep"), Sirep, e);
+  UNPROTECT(1);
+  /* copy (x,y) coordinates */
+  npts = state->npts;
+  PROTECT(Sx = NEW_NUMERIC(npts));
+  PROTECT(Sy = NEW_NUMERIC(npts));
+  Px = NUMERIC_POINTER(Sx);
+  Py = NUMERIC_POINTER(Sy);
+  for(j = 0; j < npts; j++) {
+    Px[j] = state->x[j];
+    Py[j] = state->y[j];
+  }
+  setVar(install("xcoords"), Sx, e);
+  setVar(install("ycoords"), Sy, e);
+  UNPROTECT(2);
+  /* copy marks */
+  if(state->ismarked) {
+    PROTECT(Sm = NEW_INTEGER(npts));
+    Pm = INTEGER_POINTER(Sm);
+    for(j = 0; j < npts; j++) {
+      Pm[j] = state->marks[j];
+    }
+    setVar(install("mcodes"), Sm, e);
+    UNPROTECT(1);
+  }
+  /* proposal type */
+  PROTECT(Sproptype = NEW_INTEGER(1));
+  *(INTEGER_POINTER(Sproptype)) = prop->itype;
+  setVar(install("proptype"), Sproptype, e);
+  UNPROTECT(1);
+  /* proposal coordinates */
+  PROTECT(Sproplocn = NEW_NUMERIC(2));
+  Pproplocn = NUMERIC_POINTER(Sproplocn);
+  Pproplocn[0] = prop->u;
+  Pproplocn[1] = prop->v;
+  setVar(install("proplocn"), Sproplocn, e);
+  UNPROTECT(1);
+  /* proposal mark value */
+  if(state->ismarked) {
+    PROTECT(Spropmark = NEW_INTEGER(1));
+    *(INTEGER_POINTER(Spropmark)) = prop->mrk;
+    setVar(install("propmark"), Spropmark, e);
+    UNPROTECT(1);
+  }
+  /* proposal point index */
+  PROTECT(Spropindx = NEW_INTEGER(1));
+  *(INTEGER_POINTER(Spropindx)) = prop->ix;
+  setVar(install("propindx"), Spropindx, e);
+  UNPROTECT(1);
+  /* Metropolis-Hastings numerator and denominator */
+  PROTECT(Snumer = NEW_NUMERIC(1));
+  PROTECT(Sdenom = NEW_NUMERIC(1));
+  *(NUMERIC_POINTER(Snumer)) = numer;
+  *(NUMERIC_POINTER(Sdenom)) = denom;
+  setVar(install("numerator"), Snumer, e);
+  setVar(install("denominator"), Sdenom, e);
+  UNPROTECT(2);
+  /* tentative outcome of proposal */
+  PROTECT(Sitype = NEW_INTEGER(1));
+  *(INTEGER_POINTER(Sitype)) = *itype;
+  setVar(install("itype"), Sitype, e);
+  UNPROTECT(1);
+
+  /* ..... call visual debugger */
+
+#if MH_DEBUG
+  Rprintf("executing callback\n");
+#endif
+
+  eval(s->expr, s->env);
+
+  /* update outcome of proposal */
+  SitypeUser = findVar(install("itype"), e);
+  *itype = *(INTEGER_POINTER(SitypeUser));
+
+#if MH_DEBUG
+  Rprintf("Assigning itype = %d\n", *itype);
+#endif
+
+  /* update stopping time */
+  Sinxt = findVar(install("inxt"), e);
+  s->nextstop = *(INTEGER_POINTER(Sinxt));
+  Stnxt = findVar(install("tnxt"), e);
+  s->nexttype = *(INTEGER_POINTER(Stnxt));
+
+#if MH_DEBUG
+  if(s->nextstop >= 0)
+    Rprintf("Next stop: iteration %d\n", s->nextstop);
+  if(s->nexttype >= 0) {
+    if(s->nexttype == BIRTH) 
+      Rprintf("Next stop: first birth proposal\n");
+    if(s->nexttype == DEATH) 
+      Rprintf("Next stop: first death proposal\n");
+    if(s->nexttype == SHIFT) 
+      Rprintf("Next stop: first shift proposal\n");
+  }
+#endif
+
+  return;
+}
+	     
diff --git a/src/mhsnoop.h b/src/mhsnoop.h
new file mode 100644
index 0000000..5801c9f
--- /dev/null
+++ b/src/mhsnoop.h
@@ -0,0 +1,20 @@
+/*
+  Function declarations from mhsnoop.c
+
+  $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $
+
+*/
+
+#include "mhsnoopdef.h"
+
+void initmhsnoop(Snoop *s, SEXP env);
+
+void mhsnoop(Snoop *s, 
+	     int irep, 
+	     Algor *algo,
+	     State *state,
+	     Propo *prop,
+	     double numer, 
+	     double denom,
+	     int *itype);
+
diff --git a/src/mhsnoopdef.h b/src/mhsnoopdef.h
new file mode 100644
index 0000000..e9946a1
--- /dev/null
+++ b/src/mhsnoopdef.h
@@ -0,0 +1,23 @@
+/*
+
+   mhsnoopdef.h
+
+   Define structure 'Snoop' containing visual debugger parameters and state
+
+   $Revision: 1.2 $  $Date: 2013/05/27 02:09:10 $
+
+*/
+
+#ifndef R_INTERNALS_H_
+#include <Rinternals.h>
+#endif
+
+typedef struct Snoop {
+  int active;    /* true or false */
+  int nextstop;  /* jump to iteration number 'nextstop' */
+  int nexttype;  /* jump to the next proposal of type 'nexttype' */
+  SEXP env;    /* environment for exchanging data with R */
+  SEXP expr;   /* callback expression for visual debugger */
+} Snoop;
+
+#define NO_TYPE -1
diff --git a/src/mhv1.h b/src/mhv1.h
new file mode 100644
index 0000000..d583d9d
--- /dev/null
+++ b/src/mhv1.h
@@ -0,0 +1,20 @@
+/* 
+   mhv1.h
+
+   marked or unmarked simulation
+
+*/
+
+#undef MH_MARKED
+
+if(marked) {
+  /* marked process */
+#define MH_MARKED YES
+#include "mhv2.h"
+#undef MH_MARKED
+} else {
+  /* unmarked process */
+#define MH_MARKED NO
+#include "mhv2.h"
+#undef MH_MARKED
+} 
diff --git a/src/mhv2.h b/src/mhv2.h
new file mode 100644
index 0000000..ab31ef4
--- /dev/null
+++ b/src/mhv2.h
@@ -0,0 +1,21 @@
+/* 
+   mhv2.h
+
+   single interaction or hybrid
+
+*/
+
+#undef MH_SINGLE
+
+if(Ncif == 1) {
+  /* single interaction */
+#define MH_SINGLE YES
+#include "mhv3.h"
+#undef MH_SINGLE
+} else {
+  /* hybrid interaction */
+#define MH_SINGLE NO
+#include "mhv3.h"
+#undef MH_SINGLE
+} 
+
diff --git a/src/mhv3.h b/src/mhv3.h
new file mode 100644
index 0000000..760bebd
--- /dev/null
+++ b/src/mhv3.h
@@ -0,0 +1,20 @@
+/* 
+   mhv3.h
+
+   tracking or not
+
+*/
+
+#undef MH_TRACKING
+
+if(tracking) {
+  /* saving transition history */
+#define MH_TRACKING YES
+#include "mhv4.h"
+#undef MH_TRACKING
+} else {
+  /* not saving transition history */
+#define MH_TRACKING NO
+#include "mhv4.h"
+#undef MH_TRACKING
+} 
diff --git a/src/mhv4.h b/src/mhv4.h
new file mode 100644
index 0000000..6a2fcbe
--- /dev/null
+++ b/src/mhv4.h
@@ -0,0 +1,21 @@
+/* 
+   mhv4.h
+
+   visual debugger or not
+
+*/
+
+#undef MH_SNOOP
+
+if(snooper.active) {
+  /* visual debugger */
+#define MH_SNOOP YES
+#include "mhv5.h"
+#undef MH_SNOOP
+} else {
+  /* no visual debugger */
+#define MH_SNOOP NO
+#include "mhv5.h"
+#undef MH_SNOOP
+} 
+
diff --git a/src/mhv5.h b/src/mhv5.h
new file mode 100644
index 0000000..318a34a
--- /dev/null
+++ b/src/mhv5.h
@@ -0,0 +1,21 @@
+/* 
+   mhv5.h
+
+   tempered or not
+
+*/
+
+#undef MH_TEMPER
+
+if(tempered) {
+  /* tempering */
+#define MH_TEMPER YES
+#include "mhloop.h"
+#undef MH_TEMPER
+} else {
+  /* usual, no tempering */
+#define MH_TEMPER NO
+#include "mhloop.h"
+#undef MH_TEMPER
+} 
+
diff --git a/src/minnnd.c b/src/minnnd.c
new file mode 100644
index 0000000..b6c9ae8
--- /dev/null
+++ b/src/minnnd.c
@@ -0,0 +1,39 @@
+/*
+
+  minnnd.c
+
+  Minimum/Maximum Nearest Neighbour Distance
+
+  Uses code templates in minnnd.h, maxnnd.h
+
+  $Revision: 1.4 $     $Date: 2014/09/18 01:28:48 $
+
+*/
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#undef IGNOREZERO
+
+#define FNAME minnnd2
+#include "minnnd.h"
+#undef FNAME
+
+#define FNAME maxnnd2
+#include "maxnnd.h"
+#undef FNAME
+
+/* min/max nearest neighbour distance ignoring zero distances */
+
+#define IGNOREZERO
+
+#define FNAME minPnnd2
+#include "minnnd.h"
+#undef FNAME
+
+#define FNAME maxPnnd2
+#include "maxnnd.h"
+#undef FNAME
+
+
diff --git a/src/minnnd.h b/src/minnnd.h
new file mode 100644
index 0000000..3509671
--- /dev/null
+++ b/src/minnnd.h
@@ -0,0 +1,97 @@
+/*
+
+  minnnd.h
+
+  Code template for minnnd
+   to be #included in minnnd.c
+
+  Macros:  
+  FNAME          Function name 
+  IGNOREZERO     #defined if zero distances should be ignored
+
+  $Revision: 1.1 $     $Date: 2014/09/18 00:52:15 $
+
+*/
+
+
+/* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */
+
+void FNAME(n, x, y, huge, result) 
+     /* inputs */
+     int *n;
+     double *x, *y, *huge;
+     /* outputs */
+     double *result;
+{ 
+  int npoints, i, maxchunk, left, right;
+  double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+
+  d2min = hu2;
+
+  if(npoints == 0) return;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+
+  while(i < npoints) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints) maxchunk = npoints;
+
+    for(; i < maxchunk; i++) {
+
+      xi = x[i];
+      yi = y[i];
+
+      if(i < npoints - 1) {
+	/* search forward */
+	for(right = i + 1; right < npoints; ++right)
+	  {
+	    dy = y[right] - yi;
+	    dy2 = dy * dy;
+	    if(dy2 > d2min)
+	      break;
+	    dx = x[right] - xi;
+	    d2 =  dx * dx + dy2;
+	    if (d2 < d2min) {
+#ifdef IGNOREZERO
+	      if(d2 > 0) d2min = d2;
+#else
+	      d2min = d2;
+#endif
+	    }
+	  }
+      }
+      if(i > 0){
+	/* search backward */
+	for(left = i - 1; left >= 0; --left)
+	{
+	  dy = yi - y[left];
+	  dy2 = dy * dy;
+	  if(dy2 > d2min)
+	    break;
+
+	  dx = x[left] - xi;
+	  d2 =  dx * dx + dy2;
+	  if (d2 < d2min) {
+#ifdef IGNOREZERO
+	      if(d2 > 0) d2min = d2;
+#else
+	      d2min = d2;
+#endif
+	  }
+	}
+      }
+    }
+  }
+  *result = d2min;
+}
+
diff --git a/src/multihard.c b/src/multihard.c
new file mode 100755
index 0000000..b4fb2b0
--- /dev/null
+++ b/src/multihard.c
@@ -0,0 +1,174 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* for debugging code, include   #define DEBUG 1   */
+
+/* Conditional intensity computation for Multitype Hardcore process */
+
+/* NOTE: types (marks) are numbered from 0 to ntypes-1 */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct MultiHard {
+  int ntypes;
+  double *hc;      /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */
+  double *hc2;    /* squared radii */
+  double  range2;   /* square of interaction range */
+  double *period;
+  int per;
+} MultiHard;
+
+
+/* initialiser function */
+
+Cdata *multihardinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, j, ntypes, n2;
+  double h, h2, range2;
+  MultiHard *multihard;
+
+  multihard = (MultiHard *) R_alloc(1, sizeof(MultiHard));
+
+  multihard->ntypes = ntypes = model.ntypes;
+  n2 = ntypes * ntypes;
+
+#ifdef DEBUG
+  Rprintf("initialising space for %d types\n", ntypes);
+#endif
+
+  /* Allocate space for parameters */
+  multihard->hc       = (double *) R_alloc((size_t) n2, sizeof(double));
+
+  /* Allocate space for transformed parameters */
+  multihard->hc2      = (double *) R_alloc((size_t) n2, sizeof(double));
+
+  /* Copy and process model parameters*/
+  range2 = 0.0;
+  for(i = 0; i < ntypes; i++) {
+    for(j = 0; j < ntypes; j++) {
+      h = model.ipar[i + j*ntypes];
+      h2 = h * h;
+      MAT(multihard->hc,  i, j, ntypes) = h; 
+      MAT(multihard->hc2, i, j, ntypes) = h2;
+      if(range2 > h2) range2 = h2;
+    }
+  }
+  multihard->range2 = range2;
+
+  /* periodic boundary conditions? */
+  multihard->period = model.period;
+  multihard->per    = (model.period[0] > 0.0);
+
+#ifdef DEBUG
+  Rprintf("end initialiser\n");
+#endif
+  return((Cdata *) multihard);
+}
+
+/* conditional intensity evaluator */
+
+double multihardcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ntypes, ix, ixp1, j, mrk, mrkj;
+  int *marks;
+  double *x, *y;
+  double u, v;
+  double d2, range2, cifval;
+  double *period;
+  MultiHard *multihard;
+  DECLARE_CLOSE_D2_VARS;
+
+  multihard = (MultiHard *) cdata;
+  range2 = multihard->range2;
+  period = multihard->period;
+
+  u  = prop.u;
+  v  = prop.v;
+  mrk = prop.mrk;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  marks = state.marks;
+
+  npts = state.npts;
+
+#ifdef DEBUG
+  Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk);
+#endif
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  ntypes = multihard->ntypes;
+
+#ifdef DEBUG
+  Rprintf("scanning data\n");
+#endif
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(multihard->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) {
+	    cifval = 0.0;
+	    return(cifval);
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) {
+	    cifval = 0.0;
+	    return(cifval);
+	  }
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) {
+	    cifval = 0.0;
+	    return(cifval);
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) {
+	    cifval = 0.0;
+	    return(cifval);
+	  }
+	}
+      }
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("returning positive cif\n");
+#endif
+  return cifval;
+}
+
+Cifns MultiHardCifns = { &multihardinit, &multihardcif, (updafunptr) NULL, YES};
diff --git a/src/nn3Ddist.c b/src/nn3Ddist.c
new file mode 100755
index 0000000..f9c4dd2
--- /dev/null
+++ b/src/nn3Ddist.c
@@ -0,0 +1,419 @@
+/*
+
+  nn3Ddist.c
+
+  Nearest Neighbour Distances in 3D 
+
+  $Revision: 1.11 $     $Date: 2013/11/03 03:42:48 $
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT z IS SORTED IN ASCENDING ORDER 
+
+  nnd3D     Nearest neighbour distances 
+  nnw3D     Nearest neighbours (id)
+  nndw3D     Nearest neighbours (id) and distances
+
+  nnXdw3D    Nearest neighbour from one list to another
+  nnXEdw3D    Nearest neighbour from one list to another, with overlaps
+
+  knnd3D    k-th nearest neighbour distances
+  knnw3D    k-th nearest neighbours (id)
+  knndw3D    k-th nearest neighbours (id) and distances
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+#include "chunkloop.h"
+
+#include "yesno.h"
+
+double sqrt();
+
+/* .......... Single point pattern ...............................*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* nnd3D: returns nn distance */
+
+#define FNAME nnd3D
+#define DIST
+#include "nn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* nnw3D: returns id of nearest neighbour */
+
+#define FNAME nnw3D
+#define WHICH
+#include "nn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* nndw3D: returns nn distance .and. id of nearest neighbour */
+
+#define FNAME nndw3D
+#define DIST
+#define WHICH
+#include "nn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+
+/* .......... Two point patterns ...............................*/
+
+/* common interface */
+
+void nnX3Dinterface(n1, x1, y1, z1, id1, 
+		    n2, x2, y2, z2, id2,
+		    exclude, wantdist, wantwhich,
+		    nnd, nnwhich, huge)
+     /* inputs */
+     int *n1, *n2, *id1, *id2;
+     double *x1, *y1, *z1, *x2, *y2, *z2, *huge;
+     /* options */
+     int *exclude, *wantdist, *wantwhich;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+{
+  void nnXdw3D(), nnXd3D(), nnXw3D();
+  void nnXEdw3D(), nnXEd3D(), nnXEw3D();
+  int ex, di, wh;
+  ex = (*exclude != 0);
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(!ex) {
+    if(di && wh) {
+      nnXdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } else if(di) {
+      nnXd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } else if(wh) {
+      nnXw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } 
+  } else {
+    if(di && wh) {
+      nnXEdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } else if(di) {
+      nnXEd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } else if(wh) {
+      nnXEw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge);
+    } 
+  }
+}
+
+/* 
+   nnXdw3D:  for TWO point patterns X and Y,
+   find the nearest neighbour 
+   (from each point of X to the nearest point of Y)
+   returning both the distance and the identifier
+
+   Requires both patterns to be sorted in order of increasing z coord
+*/
+
+#define FNAME nnXdw3D
+#define DIST
+#define WHICH
+#undef EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   nnXd3D:  returns distance only
+
+*/
+
+#define FNAME nnXd3D
+#define DIST
+#undef EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   nnXw3D:  returns identifier only
+*/
+
+#define FNAME nnXw3D
+#define WHICH
+#undef EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* .......... Two point patterns with exclusion ........................*/
+
+/* 
+   nnXEdw3D:  similar to nnXdw3D
+   but allows X and Y to include common points
+   (which are not to be counted as neighbours)
+
+   Code numbers id1, id2 are attached to the patterns X and Y respectively, 
+   such that
+   x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+
+   Requires both patterns to be sorted in order of increasing z coord
+*/
+
+#define FNAME nnXEdw3D
+#define DIST
+#define WHICH
+#define EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   nnXEd3D:  returns distances only
+
+*/
+
+#define FNAME nnXEd3D
+#define DIST
+#define EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   nnXEw3D:  returns identifiers only
+
+*/
+
+#define FNAME nnXEw3D
+#define WHICH
+#define EXCLUDE
+#include "nn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* .......... Single point pattern ...............................*/
+/* .......... k-th nearest neighbours ...............................*/
+
+/* 
+   knnd3D
+
+   nearest neighbour distances 1:kmax
+
+*/
+
+#define FNAME knnd3D
+#define DIST
+#include "knn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   knnw3D
+
+   nearest neighbour indices 1:kmax
+
+*/
+
+#define FNAME knnw3D
+#define WHICH
+#include "knn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   knndw3D
+
+   nearest neighbours 1:kmax
+
+   returns distances and indices
+
+*/
+
+#define FNAME knndw3D
+#define DIST
+#define WHICH
+#include "knn3Ddist.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* .......... Two point patterns ...............................*/
+/* .......... k-th nearest neighbours ...............................*/
+
+/* general interface */
+
+void knnX3Dinterface(n1, x1, y1, z1, id1, 
+		     n2, x2, y2, z2, id2, 
+		     kmax,
+		     exclude, wantdist, wantwhich,
+		     nnd, nnwhich, 
+		     huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *z1, *x2, *y2, *z2, *huge;
+     int *id1, *id2;
+     int *kmax;
+     /* options */
+     int *exclude, *wantdist, *wantwhich;
+     /* output matrices (npoints * kmax) in ROW MAJOR order */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{
+  void knnXdw3D(), knnXd3D(), knnXw3D();
+  void knnXEdw3D(), knnXEd3D(), knnXEw3D();
+  int ex, di, wh;
+  ex = (*exclude != 0);
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(!ex) {
+    if(di && wh) {
+      knnXdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } else if(di) {
+      knnXd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } else if(wh) {
+      knnXw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } 
+  } else {
+    if(di && wh) {
+      knnXEdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } else if(di) {
+      knnXEd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } else if(wh) {
+      knnXEw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge);
+    } 
+  }
+}
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   knnXdw3D
+
+   nearest neighbours 1:kmax between two point patterns
+
+   returns distances and indices
+
+*/
+
+#define FNAME knnXdw3D
+#define DIST
+#define WHICH
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   knnXd3D
+
+   nearest neighbours 1:kmax between two point patterns
+
+   returns distances
+
+*/
+
+#define FNAME knnXd3D
+#define DIST
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   knnXw3D
+
+   nearest neighbours 1:kmax between two point patterns
+
+   returns indices
+
+*/
+
+#define FNAME knnXw3D
+#define WHICH
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* .......... Two point patterns with exclusion ..........................*/
+/* .......... k-th nearest neighbours ...............................*/
+
+/* 
+   knnXEdw3D
+
+   nearest neighbours 1:kmax between two point patterns with exclusion
+
+   returns distances and indices
+
+*/
+
+#define FNAME knnXEdw3D
+#define DIST
+#define WHICH
+#define EXCLUDE
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   knnXEd3D
+
+   nearest neighbours 1:kmax between two point patterns with exclusion
+
+   returns distances
+
+*/
+
+#define FNAME knnXEd3D
+#define DIST
+#define EXCLUDE
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
+/* 
+   knnXEw3D
+
+   nearest neighbours 1:kmax between two point patterns with exclusion
+
+   returns indices
+
+*/
+
+#define FNAME knnXEw3D
+#define WHICH
+#define EXCLUDE
+#include "knn3DdistX.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+
diff --git a/src/nn3Ddist.h b/src/nn3Ddist.h
new file mode 100644
index 0000000..ddb486c
--- /dev/null
+++ b/src/nn3Ddist.h
@@ -0,0 +1,101 @@
+/*
+
+  nn3Ddist.h
+
+  Code template for nearest-neighbour algorithms for 3D point patterns
+
+  Input is a single point pattern - supports 'nndist' and 'nnwhich'
+
+  This code is #included multiple times in nn3Ddist.c
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED
+  IN ASCENDING ORDER OF THE z COORDINATE
+
+  $Revision: 1.5 $ $Date: 2013/06/28 10:38:46 $
+
+*/
+
+  
+void FNAME(n, x, y, z, 
+	   nnd, nnwhich, huge)
+/* inputs */
+     int *n;
+     double *x, *y, *z, *huge;
+     /* outputs */
+     double *nnd; 
+     int *nnwhich;
+{ 
+  int npoints, i, j, maxchunk;
+  double d2, d2min, xi, yi, zi, dx, dy, dz, dz2, hu, hu2;
+#ifdef WHICH
+  int which;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+      d2min = hu2;
+#ifdef WHICH
+      which = -1;
+#endif
+      xi = x[i];
+      yi = y[i];
+      zi = z[i];
+      /* search backward */
+      if(i > 0){
+	for(j = i - 1; j >= 0; --j) {
+	  dz = z[j] - zi;
+	  dz2 = dz * dz;
+	  if(dz2 > d2min)
+	    break;
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2min) {
+	    d2min = d2;
+#ifdef WHICH
+	    which = j;
+#endif
+	  }
+	}
+      }
+
+      /* search forward */
+      if(i < npoints - 1) {
+	for(j = i + 1; j < npoints; ++j) {
+	  dz = z[j] - zi;
+	  dz2 = dz * dz;
+	  if(dz2 > d2min)
+	    break;
+	  dx = x[j] - xi;
+	  dy = y[j] - yi;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2min) {
+	    d2min = d2;
+#ifdef WHICH
+	    which = j;
+#endif
+	  }
+	}
+      }
+#ifdef DIST
+      nnd[i] = sqrt(d2min);
+#endif
+#ifdef WHICH
+      /* convert to R indexing */
+      nnwhich[i] = which + 1;
+#endif
+    }
+  }
+}
+
diff --git a/src/nn3DdistX.h b/src/nn3DdistX.h
new file mode 100644
index 0000000..e4d7a5b
--- /dev/null
+++ b/src/nn3DdistX.h
@@ -0,0 +1,127 @@
+/*
+
+  nn3DdistX.h
+
+  Code template for nearest-neighbour algorithms for 3D point patterns
+
+  Input is two point patterns - supports 'nncross'
+
+  This code is #included multiple times in nn3Ddist.c
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+	EXCLUDE   #defined if the two patterns may include common points
+	          (which are not to be counted as neighbours)
+
+  Either or both DIST and WHICH may be defined.
+
+  THE FOLLOWING CODE ASSUMES THAT BOTH POINT PATTERNS ARE SORTED
+  IN ASCENDING ORDER OF THE z COORDINATE
+
+  If EXCLUDE is #defined, 
+   Code numbers id1, id2 are attached to the patterns X and Y respectively, 
+   such that
+   x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+
+  $Revision: 1.5 $ $Date: 2013/09/20 10:01:25 $
+
+*/
+
+void FNAME(n1, x1, y1, z1, id1, 
+	   n2, x2, y2, z2, id2,
+	   nnd, nnwhich, huge)
+/* inputs */
+     int *n1, *n2, *id1, *id2;
+     double *x1, *y1, *z1, *x2, *y2, *z2, *huge;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+{ 
+  int npoints1, npoints2, i, j, jwhich, lastjwhich;
+  double d2, d2min, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2;
+#ifdef EXCLUDE
+  int id1i;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  lastjwhich = 0;
+
+  for(i = 0; i < npoints1; i++) {
+    
+    R_CheckUserInterrupt();
+    
+    d2min = hu2;
+    jwhich = -1;
+    x1i = x1[i];
+    y1i = y1[i];
+    z1i = z1[i];
+#ifdef EXCLUDE
+    id1i = id1[i];
+#endif
+
+    /* search backward from previous nearest neighbour */
+    if(lastjwhich > 0) {
+      for(j = lastjwhich - 1; j >= 0; --j) {
+	dz = z2[j] - z1i;
+	dz2 = dz * dz;
+	if(dz2 > d2min)
+	  break;
+#ifdef EXCLUDE
+	/* do not compare identical points */
+	if(id2[j] != id1i) {
+#endif
+	  dx = x2[j] - x1i;
+	  dy = y2[j] - y1i;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    jwhich = j;
+	  }
+#ifdef EXCLUDE
+	}
+#endif
+      }
+    }
+
+    /* search forward from previous nearest neighbour  */
+    if(lastjwhich < npoints2) {
+      for(j = lastjwhich; j < npoints2; ++j) {
+	dz = z2[j] - z1i;
+	dz2 = dz * dz;
+	if(dz2 > d2min)
+	  break;
+#ifdef EXCLUDE
+	/* do not compare identical points */
+	if(id2[j] != id1i) {
+#endif
+	  dx = x2[j] - x1i;
+	  dy = y2[j] - y1i;
+	  d2 =  dx * dx + dy * dy + dz2;
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    jwhich = j;
+	  }
+#ifdef EXCLUDE
+	}
+#endif
+      }
+    }
+#ifdef DIST
+    nnd[i] = sqrt(d2min);
+#endif
+#ifdef WHICH
+    /* convert to R indexing */
+    nnwhich[i] = jwhich + 1;
+#endif
+    lastjwhich = jwhich;
+  }
+}
diff --git a/src/nnMDdist.c b/src/nnMDdist.c
new file mode 100755
index 0000000..9f1ecdd
--- /dev/null
+++ b/src/nnMDdist.c
@@ -0,0 +1,840 @@
+/*
+
+  nnMDdist.c
+
+  Nearest Neighbour Distances in m dimensions
+
+  $Revision: 1.8 $     $Date: 2013/05/27 02:09:10 $
+
+  Argument x is an m * n matrix 
+  with columns corresponding to points
+  and rows corresponding to coordinates.
+
+  Spatial dimension m must be > 1
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT THE ROWS OF x 
+  ARE SORTED IN ASCENDING ORDER OF THE FIRST COLUMN
+
+  nndMD     Nearest neighbour distances 
+  nnwMD     Nearest neighbours and their distances
+  nnXwMD    Nearest neighbour from one list to another
+  nnXxMD    Nearest neighbour from one list to another, with overlaps
+
+  knndMD    k-th nearest neighbour distances
+  knnwMD    k-th nearest neighbours and their distances
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+#include "chunkloop.h"
+
+#include "yesno.h"
+
+double sqrt();
+
+void nndMD(n, m, x, nnd, huge)
+/* inputs */
+     int *n, *m;
+     double *x, *huge;
+     /* output */
+     double *nnd;
+{ 
+  int npoints, mdimen, i, j, left, right, leftpos, rightpos, maxchunk;
+  double d2, d2min, hu, hu2, xi0, dx0, dxj;
+  double *xi;
+
+  npoints = *n;
+  mdimen  = *m; 
+  xi = (double *) R_alloc((size_t) mdimen, sizeof(double));
+  /*  dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+      d2min = hu2;
+
+      for(j = 0; j < mdimen; j++)
+	xi[j] = x[i * mdimen + j];
+      xi0 = xi[0];
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n (");
+      for(j = 0; j < mdimen; j++)
+	Rprintf("%lf, ", x[i * mdimen + j]);
+      Rprintf(")\n");
+#endif
+
+    
+      /* search backward */
+      if(i > 0) {
+	for(left = i - 1; left >= 0; --left) {
+
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("L=%d, d2min=%lf\n", left, d2min);
+#endif
+	  dx0 = xi0 - x[left * mdimen];
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+
+	  leftpos = left * mdimen;
+	  for(j = 1; j < mdimen && d2 < d2min; j++) {
+	    dxj = xi[j] - x[leftpos + j];
+	    d2 += dxj * dxj;
+	  }
+
+	  if (d2 < d2min) {
+	    d2min = d2;
+#ifdef SPATSTAT_DEBUG
+	    Rprintf("\tupdating d2min=%lf\n", d2min);
+#endif
+	  }
+	}
+      }
+
+      /* search forward */
+      if(i < npoints - 1) {
+	for(right = i + 1; right < npoints; ++right) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("R=%d, d2min=%lf\n", right, d2min);
+#endif
+	  dx0 = x[right * mdimen] - xi0;
+	  d2  = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+
+	  rightpos = right * mdimen;
+	  for(j = 1; j < mdimen && d2 < d2min; j++) {
+	    dxj = xi[j] - x[rightpos + j];
+	    d2 += dxj * dxj;
+	  }
+
+	  if (d2 < d2min) {
+	    d2min = d2;
+#ifdef SPATSTAT_DEBUG
+	    Rprintf("\tupdating d2min=%lf\n", d2min);
+#endif
+	  }
+	}
+      }
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      nnd[i] = sqrt(d2min);
+    }
+  }
+}
+
+/* nnwMD: same as nndMD, 
+   but also returns id of nearest neighbour 
+*/
+
+void nnwMD(n, m, x, nnd, nnwhich, huge)
+/* inputs */
+     int *n, *m;
+     double *x, *huge;
+     /* output */
+     double *nnd;
+     int *nnwhich;
+{ 
+  int npoints, mdimen, i, j, left, right, leftpos, rightpos, which, maxchunk;
+  double d2, d2min, hu, hu2, xi0, dx0, dxj;
+  double *xi;
+
+  npoints = *n;
+  mdimen  = *m;
+  xi = (double *) R_alloc((size_t) mdimen, sizeof(double));
+  /*  dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+      d2min = hu2;
+      which = -1;
+
+      for(j = 0; j < mdimen; j++)
+	xi[j] = x[i * mdimen + j];
+      xi0 = xi[0];
+
+      /* search backward */
+      if(i > 0) {
+	for(left = i - 1; left >= 0; --left) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("L");
+#endif
+	  dx0 = xi0 - x[left * mdimen];
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+	  leftpos = left * mdimen;
+	  for(j = 1; j < mdimen && d2 < d2min; j++) {
+	    dxj = xi[j] - x[leftpos + j];
+	    d2 += dxj * dxj;
+	  }
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    which = left;
+	  }
+	}
+      }
+
+      /* search forward */
+      if(i < npoints - 1) {
+	for(right = i + 1; right < npoints; ++right) {
+
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("R");
+#endif
+	  dx0 = x[right * mdimen] - xi0;
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+
+	  rightpos = right * mdimen;
+	  for(j = 1; j < mdimen && d2 < d2min; j++) {
+	    dxj = xi[j] - x[rightpos + j];
+	    d2 += dxj * dxj;
+	  }
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    which = right;
+	  }
+	}
+      }
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      nnd[i] = sqrt(d2min);
+      /* convert index to R convention */
+      nnwhich[i] = which + 1;
+    }
+  }
+}
+
+/* 
+   nnXwMD:  for TWO point patterns X and Y,
+   find the nearest neighbour 
+   (from each point of X to the nearest point of Y)
+   returning both the distance and the identifier
+
+   Requires both patterns to be sorted in order of increasing z coord
+*/
+
+void nnXwMD(m, n1, x1, n2, x2, nnd, nnwhich, huge)
+/* inputs */
+     int *m, *n1, *n2;
+     double *x1, *x2, *huge;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+{ 
+  int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich;
+  double d2, d2min, x1i0, dx0, dxell, hu, hu2;
+  double *x1i;
+  int maxchunk;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+  mdimen   = *m;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  x1i = (double *) R_alloc((size_t) mdimen, sizeof(double));
+  /*  dx  = (double *) R_alloc((size_t) mdimen, sizeof(double)); */
+
+  lastjwhich = 0;
+
+  OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) {
+      d2min = hu2;
+      jwhich = -1;
+      for(ell = 0; ell < mdimen; ell++) 
+	x1i[ell] = x1[i * mdimen + ell];
+      x1i0 = x1i[0];
+
+      /* search backward from previous nearest neighbour */
+      if(lastjwhich > 0) {
+	for(jleft = lastjwhich - 1; jleft >= 0; --jleft) {
+	  dx0 = x1i0 - x2[jleft * mdimen];
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+	  for(ell = 1; ell < mdimen && d2 < d2min; ell++) {
+	    dxell = x1i[ell] - x2[jleft * mdimen + ell];
+	    d2 += dxell * dxell;
+	  }
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    jwhich = jleft;
+	  }
+	}
+      }
+
+      /* search forward from previous nearest neighbour  */
+      if(lastjwhich < npoints2) {
+	for(jright = lastjwhich; jright < npoints2; ++jright) {
+	  dx0 = x2[jright * mdimen] - x1i0;
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min) 
+	    break;
+	  for(ell = 1; ell < mdimen && d2 < d2min; ell++) {
+	    dxell = x1i[ell] - x2[jright * mdimen + ell];
+	    d2 += dxell * dxell;
+	  }
+	  if (d2 < d2min) {
+	    d2min = d2;
+	    jwhich = jright;
+	  }
+	}
+      }
+      nnd[i] = sqrt(d2min);
+      nnwhich[i] = jwhich;
+      lastjwhich = jwhich;
+    }
+  }
+}
+
+
+/* 
+   nnXxMD:  similar to nnXwMD
+   but allows X and Y to include common points
+   (which are not to be counted as neighbours)
+
+   Code numbers id1, id2 are attached to the patterns X and Y respectively, 
+   such that
+   x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+
+   Requires both patterns to be sorted in order of increasing y coord
+*/
+
+void nnXxMD(m, n1, x1, id1, n2, x2, id2, nnd, nnwhich, huge)
+/* inputs */
+     int *m, *n1, *n2;
+     double *x1, *x2, *huge;
+     int *id1, *id2;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+{ 
+  int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich, id1i;
+  double d2, d2min, x1i0, dx0, dxell, hu, hu2;
+  double *x1i;
+  int maxchunk;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+  mdimen   = *m;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  x1i = (double *) R_alloc((size_t) mdimen, sizeof(double));
+  /*  dx  = (double *) R_alloc((size_t) mdimen, sizeof(double)); */
+
+  lastjwhich = 0;
+
+  OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) {
+      d2min = hu2;
+      jwhich = -1;
+      id1i   = id1[i];
+      for(ell = 0; ell < mdimen; ell++) 
+	x1i[ell] = x1[i * mdimen + ell];
+      x1i0 = x1i[0];
+
+      /* search backward from previous nearest neighbour */
+      if(lastjwhich > 0) {
+	for(jleft = lastjwhich - 1; jleft >= 0; --jleft) {
+	  dx0 = x1i0 - x2[jleft * mdimen];
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min)
+	    break;
+	  /* do not compare identical points */
+	  if(id2[jleft] != id1i) {
+	    for(ell = 1; ell < mdimen && d2 < d2min; ell++) {
+	      dxell = x1i[ell] - x2[jleft * mdimen + ell];
+	      d2 += dxell * dxell;
+	    }
+	    if (d2 < d2min) {
+	      d2min = d2;
+	      jwhich = jleft;
+	    }
+	  }
+	}
+      }
+
+      /* search forward from previous nearest neighbour  */
+      if(lastjwhich < npoints2) {
+	for(jright = lastjwhich; jright < npoints2; ++jright) {
+	  dx0 = x2[jright * mdimen] - x1i0;
+	  d2 = dx0 * dx0;
+	  if(d2 > d2min) 
+	    break;
+	  /* do not compare identical points */
+	  if(id2[jright] != id1i) {	  
+	    for(ell = 1; ell < mdimen && d2 < d2min; ell++) {
+	      dxell = x1i[ell] - x2[jright * mdimen + ell];
+	      d2 += dxell * dxell;
+	    }
+	    if (d2 < d2min) {
+	      d2min = d2;
+	      jwhich = jright;
+	    }
+	  }
+	}
+      }
+      nnd[i] = sqrt(d2min);
+      nnwhich[i] = jwhich;
+      lastjwhich = jwhich;
+    }
+  }
+}
+
+
+/* 
+   knndMD
+
+   nearest neighbours 1:kmax
+
+*/
+
+void knndMD(n, m, kmax, x, nnd, huge)
+/* inputs */
+     int *n, *m, *kmax;
+     double *x, *huge;
+     /* output matrix (kmax * npoints) */
+     double *nnd;
+{ 
+  int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, maxchunk;
+  double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp;
+  double *d2min, *xi;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+  mdimen  = *m;
+  nk      = *kmax;
+  nk1     = nk - 1;
+
+  /* 
+     create space to store the squared k-th nearest neighbour distances
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+
+  /* 
+     scratch space
+  */
+  xi = (double *) R_alloc((size_t) mdimen, sizeof(double));
+
+  /* loop over points */
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+      /* initialise nn distances */
+
+      d2minK = hu2;
+      for(k = 0; k < nk; k++)
+	d2min[k] = hu2;
+
+      for(j = 0; j < mdimen; j++)
+	xi[j] = x[i* mdimen + j];
+      xi0 = xi[0];
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n (");
+      for(j = 0; j < mdimen; j++)
+	Rprintf("%lf, ", xi[j]);
+      Rprintf(")\n");
+#endif
+
+      /* search backward */
+      for(left = i - 1; left >= 0; --left) {
+	dx0 = xi0 - x[left * mdimen];
+	d2 = dx0 * dx0; 
+	if(d2 > d2minK)
+	  break;
+#ifdef SPATSTAT_DEBUG
+	Rprintf("L=%d\n", left);
+	Rprintf("\t 0 ");
+#endif
+	for(j = 1; j < mdimen && d2 < d2minK; j++) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("%d ", j);
+#endif
+	  dxj = xi[j] - x[left * mdimen + j];
+	  d2 += dxj * dxj;
+	}
+#ifdef SPATSTAT_DEBUG
+	Rprintf("\n\t d2=%lf\n", d2);
+#endif
+	if (d2 < d2minK) {
+	  /* overwrite last entry */
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", 
+		  d2, nk1, d2min[nk1]);
+#endif
+	  d2min[nk1] = d2;
+	  /* bubble sort */
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+#endif
+	  unsorted = YES;
+	  for(k = nk1; unsorted && k > 0; k--) {
+	    k1 = k - 1;
+	    if(d2min[k] < d2min[k1]) {
+	      /* swap entries */
+	      tmp = d2min[k1];
+	      d2min[k1] = d2min[k];
+	      d2min[k] = tmp;
+	    } else {
+	      unsorted = NO;
+	    }
+	  }
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+#endif
+	  /* adjust maximum distance */
+	  d2minK = d2min[nk1];
+	}
+      }
+
+      /* search forward */
+      for(right = i + 1; right < npoints; ++right) {
+
+#ifdef SPATSTAT_DEBUG
+	Rprintf("R=%d\n", right);
+	Rprintf("\t 0 ");
+#endif
+	dx0 = x[right * mdimen] - xi0;
+	d2 = dx0 * dx0; 
+	if(d2 > d2minK)
+	  break;
+	for(j = 1; j < mdimen && d2 < d2minK; j++) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("%d ", j);
+#endif
+	  dxj = xi[j] - x[right * mdimen + j];
+	  d2 += dxj * dxj;
+	}
+#ifdef SPATSTAT_DEBUG
+	Rprintf("\n\t d2=%lf\n", d2);
+#endif
+	if (d2 < d2minK) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", 
+		  d2, nk1, d2min[nk1]);
+#endif
+	  /* overwrite last entry */
+	  d2min[nk1] = d2;
+	  /* bubble sort */
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+#endif
+	  unsorted = YES;
+	  for(k = nk1; unsorted && k > 0; k--) {
+	    k1 = k - 1;
+	    if(d2min[k] < d2min[k1]) {
+	      /* swap entries */
+	      tmp = d2min[k1];
+	      d2min[k1] = d2min[k];
+	      d2min[k] = tmp;
+	    } else {
+	      unsorted = NO;
+	    }
+	  }
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+#endif
+	  /* adjust maximum distance */
+	  d2minK = d2min[nk1];
+	}
+      }
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      /* copy nn distances for point i 
+	 to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+	nnd[nk * i + k] = sqrt(d2min[k]);
+      }
+    }
+  }
+}
+
+/* 
+   knnwMD
+
+   nearest neighbours 1:kmax
+
+   returns distances and indices
+
+*/
+
+
+void knnwMD(n, m, kmax, x, nnd, nnwhich, huge)
+/* inputs */
+     int *n, *m, *kmax;
+     double *x, *huge;
+     /* output matrix (kmax * npoints) */
+     double *nnd;
+     int *nnwhich;
+{ 
+  int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, itmp;
+  double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp;
+  double *d2min, *xi;
+  int *which;
+  int maxchunk;
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+  mdimen  = *m;
+  nk      = *kmax;
+  nk1     = nk - 1;
+
+  /* 
+     create space to store the nearest neighbour distances and indices
+     for the current point
+  */
+
+  d2min = (double *) R_alloc((size_t) nk, sizeof(double));
+  which = (int *) R_alloc((size_t) nk, sizeof(int));
+
+  /* 
+     scratch space
+  */
+  xi = (double *) R_alloc((size_t) mdimen, sizeof(double));
+
+  /* loop over points */
+
+  OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, npoints, maxchunk, 16384) {
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\ni=%d\n", i); 
+#endif
+
+      /* initialise nn distances */
+
+      d2minK = hu2;
+      for(k = 0; k < nk; k++) {
+	d2min[k] = hu2;
+	which[k] = -1;
+      }
+
+      for(j = 0; j < mdimen; j++)
+	xi[j] = x[i* mdimen + j];
+      xi0 = xi[0];
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n (");
+      for(j = 0; j < mdimen; j++)
+	Rprintf("%lf, ", x[i * mdimen + j]);
+      Rprintf(")\n");
+#endif
+
+      /* search backward */
+      for(left = i - 1; left >= 0; --left) {
+
+#ifdef SPATSTAT_DEBUG
+	Rprintf("L=%d, d2minK=%lf\n", left, d2minK);
+	Rprintf("\t 0 ");
+#endif
+	dx0 = xi0 - x[left * mdimen];
+	d2 = dx0 * dx0; 
+	if(d2 > d2minK)
+	  break;
+
+	for(j = 1; j < mdimen && d2 < d2minK; j++) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("%d ", j);
+#endif
+	  dxj = xi[j] - x[left * mdimen + j];
+	  d2 += dxj * dxj;
+	}
+	if (d2 < d2minK) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", 
+		  d2, nk1, d2min[nk1]);
+#endif
+	  /* overwrite last entry */
+	  d2min[nk1] = d2;
+	  which[nk1] = left;
+	  /* bubble sort */
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+	  Rprintf("\twhich[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%d, ", which[k]);
+	  Rprintf("\n");
+#endif
+	  unsorted = YES;
+	  for(k = nk1; unsorted && k > 0; k--) {
+	    k1 = k - 1;
+	    if(d2min[k] < d2min[k1]) {
+	      /* swap entries */
+	      tmp = d2min[k1];
+	      d2min[k1] = d2min[k];
+	      d2min[k] = tmp;
+	      itmp = which[k1];
+	      which[k1] = which[k];
+	      which[k] = itmp;
+	    } else {
+	      unsorted = NO;
+	    }
+	  }
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+	  Rprintf("\twhich[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%d, ", which[k]);
+	  Rprintf("\n");
+#endif
+	  /* adjust maximum distance */
+	  d2minK = d2min[nk1];
+	}
+      }
+
+      /* search forward */
+      for(right = i + 1; right < npoints; ++right) {
+
+#ifdef SPATSTAT_DEBUG
+	Rprintf("R=%d, d2minK=%lf\n", right, d2minK);
+	Rprintf("\t 0 ");
+#endif
+	dx0 = x[right * mdimen] - xi0;
+	d2 = dx0 * dx0; 
+	if(d2 > d2minK) 
+	  break;
+	for(j = 1; j < mdimen && d2 < d2minK; j++) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("%d ", j);
+#endif
+	  dxj = xi[j] - x[right * mdimen + j];
+	  d2 += dxj * dxj;
+	}
+	if (d2 < d2minK) {
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", 
+		  d2, nk1, d2min[nk1]);
+#endif
+	  /* overwrite last entry */
+	  d2min[nk1] = d2;
+	  which[nk1] = right;
+	  /* bubble sort */
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+	  Rprintf("\twhich[] before bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%d, ", which[k]);
+	  Rprintf("\n");
+#endif
+	  unsorted = YES;
+	  for(k = nk1; unsorted && k > 0; k--) {
+	    k1 = k - 1;
+	    if(d2min[k] < d2min[k1]) {
+	      /* swap entries */
+	      tmp = d2min[k1];
+	      d2min[k1] = d2min[k];
+	      d2min[k] = tmp;
+	      itmp = which[k1];
+	      which[k1] = which[k];
+	      which[k] = itmp;
+	    } else {
+	      unsorted = NO;
+	    }
+	  }
+#ifdef SPATSTAT_DEBUG
+	  Rprintf("\td2min[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%lf, ", d2min[k]);
+	  Rprintf("\n");
+	  Rprintf("\twhich[] after bubble sort:");
+	  for(k = 0; k < nk; k++)
+	    Rprintf("%d, ", which[k]);
+	  Rprintf("\n");
+#endif
+	  /* adjust maximum distance */
+	  d2minK = d2min[nk1];
+	}
+      }
+
+#ifdef SPATSTAT_DEBUG
+      Rprintf("\n");
+#endif
+
+      /* copy nn distances for point i 
+	 to output matrix in ROW MAJOR order
+      */
+      for(k = 0; k < nk; k++) {
+	nnd[nk * i + k] = sqrt(d2min[k]);
+	/* convert index back to R convention */
+	nnwhich[nk * i + k] = which[k] + 1;
+      }
+    }
+  }
+}
diff --git a/src/nndist.h b/src/nndist.h
new file mode 100644
index 0000000..bd809a3
--- /dev/null
+++ b/src/nndist.h
@@ -0,0 +1,117 @@
+/*
+  nndist.h
+
+  Code template for C functions supporting nndist and nnwhich (k=1)
+
+  THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER
+
+  This code is #included multiple times in nndistance.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012
+  Licence: GPL >= 2
+
+  $Revision: 1.2 $  $Date: 2012/03/14 02:37:27 $
+
+*/
+
+void FNAME(n, x, y, 
+#ifdef DIST
+	   nnd,
+#endif
+#ifdef WHICH
+	   nnwhich, 
+#endif
+           huge)
+     /* inputs */
+     int *n;
+     double *x, *y, *huge;
+     /* outputs */
+#ifdef DIST
+     double *nnd;
+#endif
+#ifdef WHICH
+     int *nnwhich;
+#endif
+{ 
+  int npoints, i, maxchunk, left, right;
+  double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2;
+#ifdef WHICH
+  int which;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints = *n;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < npoints) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints) maxchunk = npoints;
+
+    for(; i < maxchunk; i++) {
+
+      d2min = hu2;
+#ifdef WHICH
+      which = -1;
+#endif
+      xi = x[i];
+      yi = y[i];
+
+      if(i < npoints - 1) {
+	/* search forward */
+	for(right = i + 1; right < npoints; ++right)
+	  {
+	    dy = y[right] - yi;
+	    dy2 = dy * dy;
+	    if(dy2 > d2min)
+	      break;
+	    dx = x[right] - xi;
+	    d2 =  dx * dx + dy2;
+	    if (d2 < d2min) {
+	      d2min = d2;
+#ifdef WHICH
+	      which = right;
+#endif
+	    }
+	  }
+      }
+      if(i > 0){
+	/* search backward */
+	for(left = i - 1; left >= 0; --left)
+	{
+	  dy = yi - y[left];
+	  dy2 = dy * dy;
+	  if(dy2 > d2min)
+	    break;
+
+	  dx = x[left] - xi;
+	  d2 =  dx * dx + dy2;
+	  if (d2 < d2min) {
+	    d2min = d2;
+#ifdef WHICH
+	    which = left;
+#endif
+	  }
+	}
+      }
+
+#ifdef DIST
+      nnd[i] = sqrt(d2min);
+#endif
+#ifdef WHICH
+      nnwhich[i] = which + 1; /* R indexing */
+#endif
+    }
+  }
+}
diff --git a/src/nndistX.h b/src/nndistX.h
new file mode 100644
index 0000000..444ba5a
--- /dev/null
+++ b/src/nndistX.h
@@ -0,0 +1,141 @@
+
+#if (1 == 0)
+/*
+  nndistX.h
+
+  Code template for C functions supporting nncross
+
+  THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED
+  IN ASCENDING ORDER OF y COORDINATE
+
+  This code is #included multiple times in nndistance.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+        EXCLUDE   #defined if exclusion mechanism is used
+  Either or both DIST and WHICH may be defined.
+
+  When EXCLUDE is defined,
+  code numbers id1, id2 are attached to the patterns X and Y respectively, 
+  such that
+  x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012
+  Licence: GPL >= 2
+
+  $Revision: 1.5 $  $Date: 2013/09/18 04:49:18 $
+
+
+*/
+#endif
+
+void FNAME(n1, x1, y1, id1, 
+           n2, x2, y2, id2, 
+	   nnd, nnwhich, 
+	   huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *x2, *y2, *huge;
+     int *id1, *id2;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{ 
+  int npoints1, npoints2, maxchunk, i, jleft, jright, jwhich, lastjwhich;
+  double d2, d2min, x1i, y1i, dx, dy, dy2, hu, hu2;
+#ifdef EXCLUDE
+  int id1i;
+#endif
+
+  hu = *huge;
+  hu2 = hu * hu;
+
+  npoints1 = *n1;
+  npoints2 = *n2;
+
+  if(npoints1 == 0 || npoints2 == 0)
+    return;
+
+  lastjwhich = 0;
+
+  /* loop in chunks of 2^16 */
+
+  i = 0; maxchunk = 0; 
+  while(i < npoints1) {
+
+    R_CheckUserInterrupt();
+
+    maxchunk += 65536; 
+    if(maxchunk > npoints1) maxchunk = npoints1;
+
+    for(; i < maxchunk; i++) {
+
+      d2min = hu2;
+      jwhich = -1;
+      x1i = x1[i];
+      y1i = y1[i];
+#ifdef EXCLUDE
+      id1i = id1[i];
+#endif
+
+      if(lastjwhich < npoints2) {
+	/* search forward from previous nearest neighbour  */
+	for(jright = lastjwhich; jright < npoints2; ++jright)
+	  {
+	    dy = y2[jright] - y1i;
+	    dy2 = dy * dy; 
+	    if(dy2 > d2min) /* note that dy2 >= d2min could break too early */
+	      break;
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jright] != id1i) {
+#endif
+	      dx = x2[jright] - x1i;
+	      d2 =  dx * dx + dy2;
+	      if (d2 < d2min) {
+		d2min = d2;
+		jwhich = jright;
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end forward search */
+      }
+      if(lastjwhich > 0) {
+	/* search backward from previous nearest neighbour */
+	for(jleft = lastjwhich - 1; jleft >= 0; --jleft)
+	  {
+	    dy = y1i - y2[jleft];
+	    dy2 = dy * dy;
+	    if(dy2 > d2min) /* note that dy2 >= d2min could break too early */
+	      break;
+#ifdef EXCLUDE
+	    /* do not compare identical points */
+	    if(id2[jleft] != id1i) {
+#endif
+	      dx = x2[jleft] - x1i;
+	      d2 =  dx * dx + dy2;
+	      if (d2 < d2min) {
+		d2min = d2;
+		jwhich = jleft;
+	      }
+#ifdef EXCLUDE
+	    }
+#endif
+	  }
+	/* end backward search */
+      }
+      /* commit values */
+#ifdef DIST
+      nnd[i] = sqrt(d2min);
+#endif
+#ifdef WHICH
+      nnwhich[i] = jwhich + 1; /* R indexing */
+#endif
+      lastjwhich = jwhich;
+    }
+  }
+}
diff --git a/src/nndistance.c b/src/nndistance.c
new file mode 100755
index 0000000..bb539fe
--- /dev/null
+++ b/src/nndistance.c
@@ -0,0 +1,215 @@
+/*
+
+  nndistance.c
+
+  Nearest Neighbour Distances between points
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012
+  Licence: GNU Public Licence >= 2
+
+  $Revision: 1.21 $     $Date: 2013/11/03 03:36:27 $
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER 
+
+  SINGLE LIST:
+  nndistsort    Nearest neighbour distances 
+  nnwhichsort   Nearest neighbours
+  nnsort        Nearest neighbours & distances
+
+  ONE LIST TO ANOTHER LIST:
+  nnXdist       Nearest neighbour distance from one list to another
+  nnXwhich      Nearest neighbour ID from one list to another
+  nnX           Nearest neighbour ID & distance from one list to another
+
+  ONE LIST TO ANOTHER OVERLAPPING LIST:
+  nnXEdist      Nearest neighbour distance from one list to another, overlapping
+  nnXEwhich     Nearest neighbour ID from one list to another, overlapping
+  nnXE          Nearest neighbour ID & distance 
+
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "yesno.h"
+
+double sqrt();
+
+/* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */
+
+/* ------------------- one point pattern X --------------------- */
+
+/* 
+   nndistsort: nearest neighbour distances 
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#define FNAME nndistsort
+#define DIST
+#include "nndist.h"
+
+/* 
+   nnwhichsort: id of nearest neighbour 
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#define FNAME nnwhichsort
+#define WHICH
+#include "nndist.h"
+
+/* 
+   nnsort: distance & id of nearest neighbour 
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#define FNAME nnsort
+#define DIST
+#define WHICH
+#include "nndist.h"
+
+/* --------------- two distinct point patterns X and Y  ----------------- */
+
+/* general interface */
+
+void nnXinterface(n1, x1, y1, id1, 
+		  n2, x2, y2, id2, 
+		  exclude, wantdist, wantwhich,
+		  nnd, nnwhich, 
+		  huge)
+     /* inputs */
+     int *n1, *n2;
+     double *x1, *y1, *x2, *y2, *huge;
+     int *id1, *id2;
+     /* options */
+     int *exclude, *wantdist, *wantwhich;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+{
+  void nnX(), nnXdist(), nnXwhich();
+  void nnXE(), nnXEdist(), nnXEwhich();
+  int ex, di, wh;
+  ex = (*exclude != 0);
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(!ex) {
+    if(di && wh) {
+      nnX(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } else if(di) {
+      nnXdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } else if(wh) {
+      nnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } 
+  } else {
+    if(di && wh) {
+      nnXE(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } else if(di) {
+      nnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } else if(wh) {
+      nnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge);
+    } 
+  }
+}
+
+
+/* 
+   nnXdist:  nearest neighbour distance
+	      (from each point of X to the nearest point of Y)
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnXdist
+#define DIST
+#include "nndistX.h"
+
+/* 
+   nnXwhich:  nearest neighbour id
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnXwhich
+#define WHICH
+#include "nndistX.h"
+
+/* 
+   nnX:  nearest neighbour distance and id
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnX
+#define DIST
+#define WHICH
+#include "nndistX.h"
+
+/* --------------- two point patterns X and Y with common points --------- */
+
+/*
+   Code numbers id1, id2 are attached to the patterns X and Y respectively, 
+   such that
+   x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j].
+*/
+
+/* 
+   nnXEdist:  similar to nnXdist
+          but allows X and Y to include common points
+          (which are not to be counted as neighbours)
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnXEdist
+#define DIST
+#define EXCLUDE
+#include "nndistX.h"
+
+/* 
+   nnXEwhich:  similar to nnXwhich
+          but allows X and Y to include common points
+          (which are not to be counted as neighbours)
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnXEwhich
+#define WHICH
+#define EXCLUDE
+#include "nndistX.h"
+
+/* 
+   nnXE:  similar to nnX
+          but allows X and Y to include common points
+          (which are not to be counted as neighbours)
+*/
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+#undef EXCLUDE
+#define FNAME nnXE
+#define DIST
+#define WHICH
+#define EXCLUDE
+#include "nndistX.h"
+
diff --git a/src/nngrid.c b/src/nngrid.c
new file mode 100644
index 0000000..9600080
--- /dev/null
+++ b/src/nngrid.c
@@ -0,0 +1,109 @@
+/*
+
+  nngrid.c
+
+  Nearest Neighbour Distances from a pixel grid to a point pattern
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013
+  Licence: GNU Public Licence >= 2
+
+  $Revision: 1.4 $     $Date: 2013/11/03 03:41:23 $
+
+  Function body definition is #included from nngrid.h 
+
+  THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER 
+
+*/
+
+#undef SPATSTAT_DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "yesno.h"
+
+double sqrt();
+
+/* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */
+
+/* general interface */
+
+void nnGinterface(nx, x0, xstep,  
+		  ny, y0, ystep,   /* pixel grid dimensions */
+		  np, xp, yp,   /* data points */
+		  wantdist, wantwhich, /* options */
+		  nnd, nnwhich, 
+		  huge)
+     /* inputs */
+     int *nx, *ny, *np;
+     double *x0, *xstep, *y0, *ystep, *huge;
+     double *xp, *yp;
+     /* options */
+     int *wantdist, *wantwhich;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{
+  void nnGdw(), nnGd(), nnGw();
+  int di, wh;
+  di = (*wantdist != 0);
+  wh = (*wantwhich != 0);
+  if(di && wh) {
+    nnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge);
+  } else if(di) {
+    nnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge);
+  } else if(wh) {
+    nnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge);
+  }
+}
+
+
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   nnGdw
+
+   returns distances and indices
+
+*/
+
+#define FNAME nnGdw
+#define DIST
+#define WHICH
+#include "nngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   nnGd
+
+   returns distances only
+
+*/
+
+#define FNAME nnGd
+#define DIST
+#include "nngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
+/* 
+   nnGw 
+
+   returns indices only
+
+*/
+
+#define FNAME nnGw
+#define WHICH
+#include "nngrid.h"
+#undef FNAME
+#undef DIST
+#undef WHICH
+
diff --git a/src/nngrid.h b/src/nngrid.h
new file mode 100644
index 0000000..4a805d1
--- /dev/null
+++ b/src/nngrid.h
@@ -0,0 +1,131 @@
+
+#if (1 == 0)
+/*
+  nngrid.h
+
+  Code template for C functions 
+  nearest neighbour of each grid point
+
+  THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED
+  IN ASCENDING ORDER OF x COORDINATE
+
+  This code is #included multiple times in nngrid.c 
+  Variables used:
+        FNAME     function name
+        DIST      #defined if function returns distance to nearest neighbour
+	WHICH     #defined if function returns id of nearest neighbour
+  Either or both DIST and WHICH may be defined.
+
+  Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013
+  Licence: GPL >= 2
+
+  $Revision: 1.4 $  $Date: 2014/02/18 08:43:29 $
+
+
+*/
+#endif
+
+void FNAME(nx, x0, xstep,  
+	   ny, y0, ystep,   /* pixel grid dimensions */
+           np, xp, yp,   /* data points */
+	   nnd, nnwhich, 
+	   huge)
+     /* inputs */
+     int *nx, *ny, *np;
+     double *x0, *xstep, *y0, *ystep, *huge;
+     double *xp, *yp;
+     /* outputs */
+     double *nnd;
+     int *nnwhich;
+     /* some inputs + outputs are not used in all functions */
+{ 
+  int Nxcol, Nyrow, Npoints;
+  int i, j, ijpos;
+  int mleft, mright, mwhich, lastmwhich;
+  double  X0, Y0, Xstep, Ystep;
+  double d2, d2min, xj, yi, dx, dy, dx2, hu, hu2;
+
+  Nxcol   = *nx;
+  Nyrow   = *ny;
+  Npoints = *np;
+  hu      = *huge;
+  X0      = *x0;
+  Y0      = *y0;
+  Xstep   = *xstep;
+  Ystep   = *ystep;
+
+  hu2      = hu * hu;
+
+  if(Npoints == 0)
+    return;
+
+  lastmwhich = 0;
+
+  /* loop over pixels */
+
+  for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) {
+
+    R_CheckUserInterrupt();
+    
+    for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) {
+
+      /* reset nn distance and index */
+      d2min = hu2;
+      mwhich = -1;
+
+      if(lastmwhich < Npoints) {
+	/* search forward from previous nearest neighbour  */
+	for(mright = lastmwhich; mright < Npoints; ++mright)
+	  {
+	    dx = xp[mright] - xj;
+	    dx2 = dx * dx; 
+	    if(dx2 > d2min) /* note that dx2 >= d2min could break too early */
+	      break;
+	    dy = yp[mright] - yi;
+	    d2 =  dy * dy + dx2;
+	    if (d2 < d2min) {
+	      /* save as nearest neighbour */
+	      d2min = d2;
+	      mwhich = mright;
+	    }
+	  }
+	/* end forward search */
+      }
+
+      if(lastmwhich > 0) {
+	/* search backward from previous nearest neighbour */
+	for(mleft = lastmwhich - 1; mleft >= 0; --mleft)
+	  {
+	    dx = xj - xp[mleft];
+	    dx2 = dx * dx;
+	    if(dx2 > d2min) /* note that dx2 >= d2min could break too early */
+	      break;
+	    dy = yp[mleft] - yi;
+	    d2 =  dy * dy + dx2;
+	    if (d2 < d2min) {
+	      /* save as nearest neighbour */
+	      d2min = d2;
+	      mwhich = mleft;
+	    }
+	  }
+	/* end backward search */
+      }
+      /* remember index of most recently-encountered neighbour */
+      lastmwhich = mwhich;
+      /* copy nn distance for grid point (i, j)
+	 to output array nnd[i, j] 
+      */
+      ijpos = i + j * Nyrow;
+#ifdef DIST
+      nnd[ijpos] = sqrt(d2min);
+#endif
+#ifdef WHICH
+      nnwhich[ijpos] = mwhich + 1;  /* R indexing */
+#endif
+    /* end of loop over grid points (i, j) */
+    }
+  }
+}
+
+
+
diff --git a/src/pairloop.h b/src/pairloop.h
new file mode 100644
index 0000000..13365ae
--- /dev/null
+++ b/src/pairloop.h
@@ -0,0 +1,67 @@
+/*
+
+  pairloop.h
+
+  Generic code template for loop 
+  collecting contributions to point x_i
+  from all points x_j such that ||x_i - x_j|| <= r
+
+  cpp variables used:
+
+       INITIAL_I        code executed at start of 'i' loop       
+       CONTRIBUTE_IJ    code executed to compute contribution from j to i
+       COMMIT_I         code executed to save total contribution to i
+
+  C variables used:
+       int i, j, n, maxchunk;
+       double xi, yi, dx, dy, dx2, d2, r2max;
+       double *x, *y;
+
+  $Revision: 1.4 $  $Date: 2016/07/08 03:37:11 $
+
+*/
+
+#ifndef CHUNKLOOP_H
+#include "chunkloop.h"
+#endif
+
+#define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I)    \
+  OUTERCHUNKLOOP(i, n, maxchunk, 65536) {		\
+    R_CheckUserInterrupt();				\
+    INNERCHUNKLOOP(i, n, maxchunk, 65536) {		\
+							\
+      xi = x[i];                                        \
+      yi = y[i];                                        \
+                                                        \
+      INITIAL_I;					\
+                                                        \
+      if(i > 0) {					\
+	for(j=i-1; j >= 0; j--) {			\
+	  dx = x[j] - xi;				\
+	  dx2 = dx * dx;				\
+	  if(dx2 > r2max)				\
+	    break;					\
+	  dy = y[j] - yi;				\
+	  d2 = dx2 + dy * dy;				\
+	  if(d2 <= r2max) {				\
+	    CONTRIBUTE_IJ;				\
+	  }						\
+	}						\
+      }							\
+                                                        \
+      if(i+1 < n) {					\
+	for(j=i+1; j < n; j++) {			\
+	  dx = x[j] - xi;				\
+	  dx2 = dx * dx;				\
+	  if(dx2 > r2max)				\
+	    break;					\
+	  dy = y[j] - yi;				\
+	  d2 = dx2 + dy * dy;				\
+	  if(d2 <= r2max) {				\
+	    CONTRIBUTE_IJ;				\
+	  }						\
+	}						\
+      }							\
+      COMMIT_I;						\
+    }							\
+  }							
diff --git a/src/pcf3.c b/src/pcf3.c
new file mode 100755
index 0000000..d44e947
--- /dev/null
+++ b/src/pcf3.c
@@ -0,0 +1,205 @@
+#include <math.h>
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include "geom3.h"
+#include "functable.h"
+#include "chunkloop.h"
+#include "constants.h"
+
+/*
+	$Revision: 1.7 $	$Date: 2012/03/27 05:01:41 $
+
+	pair correlation function of 3D point pattern
+	(Epanechnikov kernel) 
+
+	pcf3trans	  	translation correction
+
+	pcf3isot		isotropic correction
+
+*/
+
+#define FOURPI (2.0 * M_2PI)
+
+
+void
+pcf3trans(p, n, b, pcf, delta)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *pcf;
+     double delta;
+{
+  register int i, j, l, lmin, lmax, maxchunk;
+  register double dx, dy, dz, dist;
+  register double  vx, vy, vz, tval;
+  Point *ip, *jp;
+  double dt, vol, lambda, denom;
+  double coef, twocoef, frac, invweight, kernel;
+
+  double sphesfrac(), sphevol();
+
+  /* compute denominator & initialise numerator*/
+  vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0);
+  lambda = ((double) n )/ vol;
+  denom = lambda * lambda;
+
+  for(l = 0; l < pcf->n; l++) {
+    (pcf->denom)[l] = denom;
+    (pcf->num)[l]   = 0.0;
+  }
+
+  /* spacing of argument in result vector */
+  dt = (pcf->t1 - pcf->t0)/(pcf->n - 1);
+
+  /* compute numerator */
+  OUTERCHUNKLOOP(i, n, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, n, maxchunk, 8196) {
+      ip = p + i;
+      for(j = i + 1; j < n; j++) {
+	/* compute pairwise distance */
+	jp = p + j;
+	dx = jp->x - ip->x;
+	dy = jp->y - ip->y;
+	dz = jp->z - ip->z;
+	dist = sqrt(dx * dx + dy * dy + dz * dz);
+	lmin = ceil( ((dist - delta) - pcf->t0) / dt );
+	lmax = floor( ((dist + delta) - pcf->t0) / dt );
+	if(lmax >= 0 && lmin < pcf->n) {
+	  /* kernel centred at 'dist' has nonempty intersection 
+	     with specified range of t values */
+	  /* compute intersection */
+	  if(lmin < 0)
+	    lmin = 0;
+	  if(lmax >= pcf->n)
+	    lmax = pcf->n - 1;
+	  /* compute (inverse) edge correction weight */
+	  vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx);
+	  vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy);
+	  vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz);
+	  invweight = vx * vy * vz * FOURPI * dist * dist;
+	  if(invweight > 0.0) {
+	    for(l = lmin; l < pcf->n; l++) {
+	      tval = pcf->t0 + l * dt;
+	      /* unnormalised Epanechnikov kernel with halfwidth delta */
+	      frac = (dist - tval)/delta;
+	      kernel = (1 - frac * frac);
+	      if(kernel > 0) 	    
+		(pcf->num)[l] += kernel / invweight;
+	    }
+	  }
+	}
+      }
+    }
+  }
+  
+  /* constant factor in kernel */
+  coef = 3.0/(4.0 * delta);
+  /* multiplied by 2 because we only visited i < j pairs */
+  twocoef = 2.0 * coef; 
+
+  /* normalise kernel and compute ratio estimate */
+  for(l = 0; l < pcf->n; l++) {
+    (pcf->num)[l] *= twocoef;
+    (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ?
+      (pcf->num)[l] / (pcf->denom)[l] : 0.0;
+  }
+}
+
+
+void
+pcf3isot(p, n, b, pcf, delta)
+     Point *p;
+     int n;
+     Box *b;
+     Ftable *pcf;
+     double delta;
+{
+  register int i, j, l, lmin, lmax, maxchunk;
+  register double dx, dy, dz, dist;
+  Point *ip, *jp;
+  double dt, vol, denom, mass, tval;
+  double coef, frac, kernel;
+
+  double sphesfrac(), sphevol();
+  Point vertex;
+  Box   half;
+
+  /* compute denominator & initialise numerator*/
+  vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0);
+  denom = ((double) (n * n))/vol;
+
+  for(l = 0; l < pcf->n; l++) {
+    (pcf->denom)[l] = denom;
+    (pcf->num)[l]   = 0.0;
+  }
+
+  /* spacing of argument in result vector */
+  dt = (pcf->t1 - pcf->t0)/(pcf->n - 1);
+
+  /* set up for volume correction */
+
+  vertex.x = b->x0;
+  vertex.y = b->y0;
+  vertex.z = b->z0;
+  half.x1  = b->x1;
+  half.y1  = b->y1;
+  half.z1  = b->z1;
+  half.x0  = (b->x0 + b->x1)/2.0;
+  half.y0  = (b->y0 + b->y1)/2.0;
+  half.z0  = (b->z0 + b->z1)/2.0;
+
+	/* compute numerator */
+  OUTERCHUNKLOOP(i, n, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, n, maxchunk, 8196) {
+      ip = p + i;
+      for(j = i + 1; j < n; j++) {
+	jp = p + j;
+	dx = jp->x - ip->x;
+	dy = jp->y - ip->y;
+	dz = jp->z - ip->z;
+	dist = sqrt(dx * dx + dy * dy + dz * dz);
+	lmin = ceil( ((dist - delta) - pcf->t0) / dt );
+	lmax = floor( ((dist + delta) - pcf->t0) / dt );
+	if(lmax >= 0 && lmin < pcf->n) {
+	  /* kernel centred at 'dist' has nonempty intersection 
+	     with specified range of t values */
+	  /* compute intersection */
+	  if(lmin < 0)
+	    lmin = 0;
+	  if(lmax >= pcf->n)
+	    lmax = pcf->n - 1;
+	  /* compute edge correction weight */
+	  mass = (1.0 / sphesfrac(ip, b, dist)) 
+	    + (1.0 / sphesfrac(jp, b, dist)); 
+	  mass *= 
+	    1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol;
+	  if(mass > 0.0) {
+	    mass /= FOURPI * dist * dist;
+	    for(l = lmin; l < pcf->n; l++) {
+	      tval = pcf->t0 + l * dt;
+	      /* unnormalised Epanechnikov kernel with halfwidth delta */
+	      frac = (dist - tval)/delta;
+	      kernel = (1 - frac * frac);
+	      if(kernel > 0) 	    
+		(pcf->num)[l] += kernel * mass;
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* constant factor in kernel */
+  coef = 3.0/(4.0 * delta);
+
+  /* normalise kernel and compute ratio estimate */
+  for(l = 0; l < pcf->n; l++) {
+    (pcf->num)[l] *= coef;
+    (pcf->f)[l] = ((pcf->denom)[l] > 0.0)?
+      (pcf->num)[l] / (pcf->denom)[l]
+      : 0.0;
+  }
+}
diff --git a/src/penttinen.c b/src/penttinen.c
new file mode 100644
index 0000000..fb0d64c
--- /dev/null
+++ b/src/penttinen.c
@@ -0,0 +1,139 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+#include "constants.h"
+
+/* Conditional intensity computation for Penttinen process */
+
+/* Format for storage of parameters and precomputed/auxiliary data */
+
+typedef struct Penttinen {
+  double gamma;
+  double r;
+  double loggamma;
+  double reach2;
+  double *period;
+  int hard;
+  int per;
+} Penttinen;
+
+
+/* initialiser function */
+
+Cdata *penttineninit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  /* create storage for model parameters */
+  Penttinen *penttinen;
+  penttinen = (Penttinen *) R_alloc(1, sizeof(Penttinen)); 
+  /* Interpret model parameters*/
+  penttinen->gamma  = model.ipar[0];
+  penttinen->r      = model.ipar[1]; 
+  penttinen->reach2 = 4.0 * penttinen->r * penttinen->r; 
+  penttinen->period = model.period;
+#ifdef MHDEBUG
+  Rprintf("Initialising Penttinen gamma=%lf, r=%lf\n", 
+	  penttinen->gamma, penttinen->r);
+#endif
+  /* is the model numerically equivalent to hard core ? */
+  penttinen->hard   = (penttinen->gamma < DOUBLE_EPS);
+  penttinen->loggamma = (penttinen->hard) ? 0 : log(penttinen->gamma);
+  /* periodic boundary conditions? */
+  penttinen->per    = (model.period[0] > 0.0);
+  return((Cdata *) penttinen);
+}
+
+/* conditional intensity evaluator */
+
+double penttinencif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, reach2, z, z2, logpot, cifval;
+  Penttinen *penttinen;
+  DECLARE_CLOSE_D2_VARS;
+
+  penttinen = (Penttinen *) cdata;
+
+  reach2     = penttinen->reach2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  if(npts == 0) 
+    return((double) 1.0);
+
+  logpot = 0.0;
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(penttinen->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],penttinen->period,reach2,d2)) {
+	  z2 = d2/reach2;
+	  z = sqrt(z2);
+	  if(z < 1.0) {
+	    logpot += acos(z) - z * sqrt(1 - z2);
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],penttinen->period,reach2,d2)) {
+	  z2 = d2/reach2;
+	  z = sqrt(z2);
+	  if(z < 1.0) {
+	    logpot += acos(z) - z * sqrt(1 - z2);
+	  }
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_D2(u, v, x[j], y[j], reach2, d2)) {
+	  z2 = d2/reach2;
+	  z = sqrt(z2);
+	  if(z < 1.0) {
+	    logpot += acos(z) - z * sqrt(1 - z2);
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_D2(u, v, x[j], y[j], reach2, d2)) {
+	  z2 = d2/reach2;
+	  z = sqrt(z2);
+	  if(z < 1.0) {
+	    logpot += acos(z) - z * sqrt(1 - z2);
+	  }
+	}
+      }
+    }
+  }
+
+  if(penttinen->hard) {
+    if(logpot > 0) cifval = 0.0;
+    else cifval = 1.0;
+  } else cifval = exp((penttinen->loggamma) * M_2_PI * logpot);
+  
+  return cifval;
+}
+
+Cifns PenttinenCifns = { &penttineninit, &penttinencif, (updafunptr) NULL, NO};
diff --git a/src/poly2im.c b/src/poly2im.c
new file mode 100755
index 0000000..e70f058
--- /dev/null
+++ b/src/poly2im.c
@@ -0,0 +1,331 @@
+/*
+  poly2im.c
+
+  Conversion from (x,y) polygon to pixel image
+
+  poly2imI     pixel value =  1{pixel centre is inside polygon}
+
+  poly2imA     pixel value = area of intersection between pixel and polygon
+
+  $Revision: 1.9 $ $Date: 2014/06/27 06:14:49 $
+
+*/
+#undef DEBUG
+
+#include <R.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "chunkloop.h"
+
+void 
+poly2imI(xp, yp, np, nx, ny, out) 
+     double *xp, *yp; /* polygon vertices, anticlockwise, CLOSED  */
+     int *np; 
+     int *nx, *ny; /* INTEGER raster points from (0,0) to (nx-1, ny-1) */
+     int *out;  /* output matrix [ny, nx], byrow=FALSE, initialised to 0 */
+{
+  int Np, Nx, Ny, Np1, maxchunk, mstart, mend;
+  int j, k, m;
+  double x0, y0, x1, y1, xleft, xright, yleft, yright;
+  double dx, dy, y, slope, intercept;
+  int jleft, jright, imax;
+  int sign;
+
+  Np = *np;
+  Nx = *nx;
+  Ny = *ny;
+  /*  Nxy = Nx * Ny; */
+  Np1 = Np - 1;
+
+  /* run through polygon edges */
+  OUTERCHUNKLOOP(k, Np1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(k, Np1, maxchunk, 8196) {
+      x0 = xp[k];
+      y0 = yp[k];
+      x1 = xp[k+1];
+      y1 = yp[k+1];
+      if(x0 < x1) {
+	xleft = x0;
+	xright = x1;
+	yleft = y0;
+	yright = y1;
+	sign = -1;
+      } else {
+	xleft = x1;
+	xright = x0;
+	yleft = y1;
+	yright = y0;
+	sign = +1;
+      }
+      /* determine relevant columns of pixels */
+      jleft = (int) ceil(xleft);
+      jright = (int) floor(xright);
+      if(jleft < Nx && jright >= 0 && jleft <= jright) {
+	if(jleft < 0) { jleft = 0; } 
+	if(jright >= Nx) {jright = Nx - 1; }
+	/* equation of edge */
+	dx = xright - xleft;
+	dy = yright - yleft;
+	slope = dy/dx;
+	intercept = yleft - slope * xleft;
+	/* visit relevant columns */
+	for(j = jleft; j <= jright; j++) {
+	  y = slope * ((double) j) + intercept;
+	  imax = (int) floor(y);
+	  if(imax >= Ny) imax = Ny-1;
+	  if(imax >= 0) {
+	    /* 
+	       increment entries below edge in this column:
+	          out[i + j * Ny] += sign for 0 <= i <= imax
+	    */
+	    mstart = j * Ny;
+	    mend   = mstart + imax;
+	    for(m = mstart; m <= mend; m++) {
+	      out[m] += sign;
+	    }
+	  }
+	}
+      }
+    }
+  }
+}
+
+#define BELOW -1
+#define INSIDE 0
+#define ABOVE 1
+
+void 
+poly2imA(ncol, nrow, xpoly, ypoly, npoly, out, status)
+     int *ncol, *nrow; /* pixels are unit squares from (0,0) to (ncol,nrow) */
+     double *xpoly, *ypoly; /* vectors of coordinates of polygon vertices */
+     int *npoly;
+     double *out;  /* double array [nrow, ncol] of pixel areas,
+		    byrow=TRUE, initialised to 0 */
+     int *status;
+{
+  double *xp, *yp;
+  int nx, ny, nxy, np, np1, maxchunk; 
+  int i, j, k;
+  double xcur, ycur, xnext, ynext, xleft, yleft, xright, yright;
+  int sgn, jmin, jmax, imin, imax;
+  double x0, y0, x1, y1, slope, yhi, ylo, area, xcut, xcutA, xcutB;
+  int klo, khi;
+
+  nx = *ncol;
+  ny = *nrow;
+  
+  xp = xpoly;
+  yp = ypoly;
+  np = *npoly;
+
+  *status = 0;
+
+  /* initialise output array */
+  nxy = nx * ny;
+  for(k = 0; k < nxy; k++) 
+    out[k] = 0;
+
+  /* ............ loop over polygon edges ...................*/
+  np1 = np - 1;
+  OUTERCHUNKLOOP(k, np1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(k, np1, maxchunk, 8196) {
+      xcur = xp[k];
+      ycur = yp[k];
+      xnext = xp[k+1];
+      ynext = yp[k+1];
+#ifdef DEBUG
+      Rprintf("\nEdge %d from (%lf, %lf) to (%lf, %lf) .........\n",
+	      k, xcur, ycur, xnext, ynext);
+#endif
+      if(xcur != xnext) {
+	/* vertical edges are ignored */
+	if(xcur < xnext) {
+#ifdef DEBUG
+	  Rprintf("negative sign\n");
+#endif
+	  sgn = -1;
+	  xleft = xcur;
+	  yleft = ycur;
+	  xright = xnext;
+	  yright = ynext;
+	} else {
+#ifdef DEBUG
+	  Rprintf("positive sign\n");
+#endif
+	  sgn = 1;
+	  xleft = xnext;
+	  yleft = ynext;
+	  xright = xcur;
+	  yright = ycur;
+	}
+	/* we have now ensured xleft < xright */
+	slope = (yright - yleft)/(xright - xleft);
+	/* Find relevant columns of pixels */
+	jmin = floor(xleft);
+	jmin = (jmin < 0) ? 0 : jmin;
+	jmax = ceil(xright);
+	jmax = (jmax > nx - 1) ? nx - 1 : jmax;
+	/* Find relevant rows of pixels */
+	imin = floor((yleft < yright) ? yleft : yright);
+	imin = (imin < 0) ? 0 : imin;
+	imax = ceil((yleft < yright) ? yright : yleft);
+	imax = (imax > ny - 1) ? ny - 1 : imax;
+#ifdef DEBUG
+	Rprintf( "imin=%d, imax=%d, jmin=%d, jmax=%d\n", 
+		 imin, imax, jmin, jmax);
+#endif
+	/* ........... loop over columns of pixels ..............*/
+	for(j = jmin; j <= jmax; j++) {
+#ifdef DEBUG
+	  Rprintf( "\t j=%d:\n", j);
+#endif
+	  /* 
+	     Intersect trapezium with column of pixels
+	  */
+	  if(xleft <= j+1 && xright >= j) {
+	    if(xleft >= j) {
+	      /* retain left corner */
+#ifdef DEBUG
+	      Rprintf( "\tretain left corner\n");
+#endif
+	      x0 = xleft;
+	      y0 = yleft;
+	    } else {
+	      /* trim left corner */
+#ifdef DEBUG
+	      Rprintf( "\ttrim left corner\n");
+#endif
+	      x0 = (double) j;
+	      y0 = yleft + slope * (x0 - xleft);
+	    }
+	    if(xright <= j+1) {
+	      /* retain right corner */
+#ifdef DEBUG
+	      Rprintf( "\tretain right corner\n");
+#endif
+	      x1 = xright;
+	      y1 = yright;
+	    } else {
+	      /* trim right corner */
+#ifdef DEBUG
+	      Rprintf( "\ttrim right corner\n");
+#endif
+	      x1 = (double) (j+1);
+	      y1 = yright + slope * (x1 - xright);
+	    }
+	    /* save min and max y */
+	    if(y0 < y1) {
+#ifdef DEBUG
+	      Rprintf( "slope %lf > 0\n", slope);
+#endif
+	      ylo = y0;
+	      yhi = y1;
+	    } else {
+#ifdef DEBUG
+	      Rprintf( "slope %lf <= 0\n", slope);
+#endif
+	      ylo = y1;
+	      yhi = y0;
+	    }
+	    /* ............ loop over pixels within column ......... */
+	    /* first part */
+	    if(imin > 0) {
+	      for(i = 0; i < imin; i++) {
+#ifdef DEBUG
+		Rprintf( "\ti=%d:\n", i);
+#endif
+		/*
+		  The trimmed pixel [x0, x1] * [i, i+1] 
+		  lies below the polygon edge.
+		*/
+		area = (x1 - x0);
+#ifdef DEBUG
+		Rprintf( "\tIncrementing area by %lf\n", sgn * area);
+#endif
+		out[i + ny * j] += sgn * area;
+	      }
+	    }
+	    /* second part */
+	    for(i = imin; i <= imax; i++) {
+#ifdef DEBUG
+	      Rprintf( "\ti=%d:\n", i);
+#endif
+	      /* 
+		 Compute area of intersection between trapezium
+		 and trimmed pixel [x0, x1] x [i, i+1] 
+	      */
+	      klo = (ylo <= i) ? BELOW : (ylo >= (i+1))? ABOVE: INSIDE;
+	      khi = (yhi <= i) ? BELOW : (yhi >= (i+1))? ABOVE: INSIDE;
+	      if(klo == ABOVE) {
+		/* trapezium covers pixel */
+#ifdef DEBUG
+		Rprintf( "\t\ttrapezium covers pixel\n");
+#endif
+		area = (x1-x0);
+	      } else if(khi == BELOW) {
+#ifdef DEBUG
+		Rprintf( "\t\tpixel avoids trapezium\n");
+#endif
+		/* pixel avoids trapezium */
+		area = 0.0;
+	      } else if(klo == INSIDE && khi == INSIDE) {
+		/* polygon edge is inside pixel */
+#ifdef DEBUG
+		Rprintf( "\t\t polygon edge is inside pixel\n");
+#endif
+		area = (x1-x0) * ((ylo + yhi)/2.0 - i);
+	      } else if(klo == INSIDE && khi == ABOVE) {
+		/* polygon edge crosses upper edge of pixel */
+#ifdef DEBUG
+		Rprintf( 
+			"\t\t polygon edge crosses upper edge of pixel\n");
+#endif
+		xcut = x0 + ((i+1) - y0)/slope;
+		if(slope > 0) 
+		  area = (xcut - x0) * ((y0 + (i+1))/2 - i) + (x1 - xcut);
+		else
+		  area = (x1 - xcut) * ((y1 + (i+1))/2 - i) + (xcut - x0);
+	      } else if(klo == BELOW && khi == INSIDE) {
+		/* polygon edge crosses lower edge of pixel */
+#ifdef DEBUG
+		Rprintf( "\t\t polygon edge crosses lower edge of pixel\n");
+#endif
+		xcut = x0 + (i - y0)/slope;
+		if(slope > 0) 
+		  area = (x1 - xcut) * ((y1 + i)/2 - i);
+		else
+		  area = (xcut - x0) * ((y0 + i)/2 - i);
+	      } else if(klo == BELOW && khi == ABOVE) {
+		/* polygon edge crosses upper and lower edges of pixel */
+#ifdef DEBUG
+		Rprintf( 
+			"\t\t polygon edge crosses upper and lower edges of pixel\n");
+#endif
+		xcutA = x0 + (i - y0)/slope;
+		xcutB = x0 + ((i+1) - y0)/slope;
+		if(slope > 0) 
+		  area = (xcutB - xcutA)/2 + (x1 - xcutB);
+		else
+		  area = (xcutB - x0) + (xcutA - xcutB)/2;
+	      } else {
+		/* control should not pass to here */
+		*status = 1;
+		return;
+	      }
+	      /* add contribution to area of pixel */
+#ifdef DEBUG
+	      Rprintf( "\tIncrementing area by %lf\n", sgn * area);
+#endif
+	      out[i + ny * j] += sgn * area;
+	    }
+	    /* ............ end of loop over pixels within column ......... */
+	  }
+	}
+	/* ........ end of loop over columns of pixels ...............*/
+      }
+    } 
+  } /* ......... end of loop over polygon edges ...................*/
+}
diff --git a/src/proto.h b/src/proto.h
new file mode 100644
index 0000000..c9e20d7
--- /dev/null
+++ b/src/proto.h
@@ -0,0 +1,223 @@
+#include <R.h>
+#include <Rinternals.h>
+
+/*
+  Prototype declarations for all native routines in spatstat package
+
+  Automatically generated - do not edit! 
+
+*/
+
+/*
+  
+                  Functions invoked by .C
+
+*/
+
+void areadifs(double *, int *, double *, double *, int *, int *, double *, void *); 
+void areaBdif(double *, int *, double *, double *, int *, int *, double *, double *, double *, double *, double *, void *);
+void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *); 
+void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *);
+void digberJ(double *, double *, int *, int *, int *, double *, void *);
+void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *);
+void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *); 
+void paircount(int *, double *, double *, double *, int *, void *); 
+void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *); 
+void crosscount(int *, double *, double *, int *, double *, double *, double *, int *, void *); 
+void Fcrosspairs(int *, double *, double *, int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *, void *);
+void cocoImage(int *, int *, int *, void *); 
+void cocoGraph(int *, int *, int *, int *, int *, int *, void *);
+void lincrossdist(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, int *, double *, void *);
+void trigrafS(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *, void *); 
+void trigraf(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *, void *); 
+void Idist2dpath(int *, int *, int *, int *, int *, int *, int *, void *);
+void Gdenspt(int *, double *, double *, double *, double *, void *); 
+void Gwtdenspt(int *, double *, double *, double *, double *, double *, void *); 
+void Gwtdenspt(int *, double *, double *, double *, double *, double *, void *); 
+void denspt(int *, double *, double *, double *, double *, double *, void *); 
+void wtdenspt(int *, double *, double *, double *, double *, double *, double *, void *); 
+void wtdenspt(int *, double *, double *, double *, double *, double *, double *, void *); 
+void adenspt(int *, double *, double *, double *, double *, double *, double *, void *); 
+void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *, void *); 
+void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *, void *); 
+void crdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, void *); 
+void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void acrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); 
+void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *);
+void segdens(double *, int *, double *, double *, double *, double *, int *, double *, double *, double *, void *);
+void Ediggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, void *);
+void Ediggatsti(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, void *);
+void discareapoly(int *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *, double *, void *);
+void Ddist2dpath(int *, double *, int *, double *, double *, int *, int *, void *);
+void D3pairdist(int *, double *, double *, double *, int *, double *, void *); 
+void D3pairPdist(int *, double *, double *, double *, double *, double *, double *, int *, double *, void *); 
+void nnd3D(int *, double *, double *, double *, double *, int *, double *, void *); 
+void knnd3D(int *, int *, double *, double *, double *, double *, int *, double *, void *); 
+void nnw3D(int *, double *, double *, double *, double *, int *, double *, void *); 
+void knnw3D(int *, int *, double *, double *, double *, double *, int *, double *, void *); 
+void D3crossdist(int *, double *, double *, double *, int *, double *, double *, double *, int *, double *, void *); 
+void D3crossPdist(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, int *, double *, void *);
+void Cpairdist(int *, double *, double *, int *, double *, void *); 
+void CpairPdist(int *, double *, double *, double *, double *, int *, double *, void *); 
+void Ccrossdist(int *, double *, double *, int *, double *, double *, int *, double *, void *); 
+void CcrossPdist(int *, double *, double *, int *, double *, double *, double *, double *, int *, double *, void *);
+void nndMD(int *, int *, double *, double *, double *, void *); 
+void knndMD(int *, int *, int *, double *, double *, double *, void *); 
+void nnwMD(int *, int *, double *, double *, int *, double *, void *); 
+void knnwMD(int *, int *, int *, double *, double *, int *, double *, void *);
+void distmapbin(double *, double *, double *, double *, int *, int *, int *, double *, double *, void *);
+void ripleybox(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void ripleypoly(int *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *, void *);
+void exact_dt_R(double *, double *, int *, double *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *);
+void ps_exact_dt_R(double *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, int *, double *, void *);
+void fardist2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, void *); 
+void fardistgrid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, void *);
+void RcallK3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, void *); 
+void RcallG3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, void *); 
+void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, void *); 
+void RcallF3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *, void *); 
+void RcallG3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *, void *); 
+void Rcallpcf3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, double *, void *); 
+void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, void *);
+void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *, void *);
+void Efiksel(int *, double *, double *, int *, double *, double *, double *, double *, double *, void *);
+void Egeyer(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, void *);
+void hasXclose(int *, double *, double *, double *, int *, void *); 
+void hasXpclose(int *, double *, double *, double *, double *, int *, void *); 
+void hasXYclose(int *, double *, double *, int *, double *, double *, double *, int *, void *); 
+void hasXYpclose(int *, double *, double *, int *, double *, double *, double *, double *, int *, void *); 
+void hasX3close(int *, double *, double *, double *, double *, int *, void *); 
+void hasX3pclose(int *, double *, double *, double *, double *, double *, int *, void *); 
+void hasXY3close(int *, double *, double *, double *, int *, double *, double *, double *, double *, int *, void *); 
+void hasXY3pclose(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *);
+void Cidw(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *, void *); 
+void idwloo(double *, double *, double *, int *, double *, double *, double *, double *, void *);
+void locprod(int *, double *, double *, double *, int *, double *, double *, void *); 
+void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *, void *);
+void KborderI(int *, double *, double *, double *, int *, double *, int *, int *, void *); 
+void KborderD(int *, double *, double *, double *, int *, double *, double *, double *, void *); 
+void Kwborder(int *, double *, double *, double *, double *, int *, double *, double *, double *, void *); 
+void KnoneI(int *, double *, double *, int *, double *, int *, void *); 
+void KnoneD(int *, double *, double *, int *, double *, double *, void *); 
+void Kwnone(int *, double *, double *, double *, int *, double *, double *, void *); 
+void KrectWtd(double *, double *, int *, double *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, void *); 
+void KrectInt(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, int *, int *, int *, void *); 
+void KrectDbl(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, void *);
+void Csumouter(double *, int *, int *, double *, void *); 
+void Cwsumouter(double *, int *, int *, double *, double *, void *); 
+void Csum2outer(double *, double *, int *, int *, int *, double *, void *); 
+void Cwsum2outer(double *, double *, int *, int *, int *, double *, double *); 
+void Cquadform(double *, int *, int *, double *, double *, void *); 
+void Cbiform(double *, double *, int *, int *, double *, double *, void *); 
+void Csumsymouter(double *, int *, int *, double *, void *); 
+void Cwsumsymouter(double *, double *, int *, int *, double *, void *);
+void Clinvwhichdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *);
+void Ccountends(int *, double *, int *, double *, int *, double *, double *, int *, int *, int *, double *, double *, double *, int *, void *);
+void Clinequad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, void *); 
+void ClineRquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, void *); 
+void ClineMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *, void *); 
+void ClineRMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *, void *);
+void linearradius(int *, int *, int *, double *, int *, double *, double *, double *, void *); 
+void cocoGraph(int *, int *, int *, int *, int *, int *, void *);
+void cocoGraph(int *, int *, int *, int *, int *, int *, void *);
+void Clixellate(int *, int *, int *, int *, int *, int *, double *, double *, int *, double *, int *, int *, int *, double *, int *, double *, void *);
+void locpcfx(int *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, double *, void *); 
+void locWpcfx(int *, double *, double *, int *, int *, double *, double *, int *, double *, int *, double *, double *, double *, void *);
+void cocoGraph(int *, int *, int *, int *, int *, int *, void *);
+void minPnnd2(int *, double *, double *, double *, double *, void *); 
+void minnnd2(int *, double *, double *, double *, double *, void *); 
+void maxPnnd2(int *, double *, double *, double *, double *, void *); 
+void maxnnd2(int *, double *, double *, double *, double *, void *);
+void nnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *); 
+void knnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, void *);
+void nnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, double *, int *, double *, void *); 
+void knnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, void *);
+void linnndist(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, void *); 
+void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); 
+void linnnwhich(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, int *, void *); 
+void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); 
+void linknncross(int *, int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); 
+void linSnndwhich(int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, void *); 
+void linndcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, double *, double *, int *, void *); 
+void linndxcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, int *, void *);
+void nndistsort(int *, double *, double *, double *, double *, void *); 
+void knndsort(int *, int *, double *, double *, double *, double *, void *); 
+void nnwhichsort(int *, double *, double *, int *, double *, void *); 
+void knnsort(int *, int *, double *, double *, double *, int *, double *, void *);
+void nnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, double *, int *, double *, void *); 
+void knnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, void *);
+void linpairdist(int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, double *, void *);
+void poly2imA(int *, int *, double *, double *, int *, double *, int *, void *);
+void xypsi(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *, void *); 
+void Cxypolyselfint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *);
+void auctionbf(int *, int *, int *, double *, double *, int *, double *, void *); 
+void dwpure(int *, int *, int *, int *, int *, int *, void *); 
+void auctionbf(int *, int *, int *, double *, double *, int *, double *, void *); 
+void dwpure(int *, int *, int *, int *, int *, int *, void *); 
+void dinfty_R(int *, int *, int *, void *); 
+void dwpure(int *, int *, int *, int *, int *, int *, void *); 
+void dwpure(int *, int *, int *, int *, int *, int *, void *);
+void seg2pixI(int *, double *, double *, double *, double *, int *, int *, int *, void *); 
+void seg2pixL(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *, double *, void *); 
+void seg2pixN(int *, double *, double *, double *, double *, double *, int *, int *, double *, void *);
+void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); 
+void xysi(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *); 
+void xysiANY(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *, void *); 
+void xysegXint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, void *); 
+void xysxi(int *, double *, double *, double *, double *, double *, int *, void *);
+void Corput(int *, int *, double *, void *);
+void knownCif(char *, int *, void *);
+void scantrans(double *, double *, int *, double *, double *, double *, double *, int *, int *, double *, int *, void *);
+void Gsmoopt(int *, double *, double *, double *, int *, double *, double *, void *); 
+void Gwtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); 
+void smoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); 
+void wtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *, void *); 
+void asmoopt(int *, double *, double *, double *, int *, double *, double *, double *, void *); 
+void awtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *, void *); 
+void crsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void wtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *); 
+void acrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, void *); 
+void awtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, void *);
+void CspaSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, double *, void *); 
+void CspaWtSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, void *);
+void Ccrosspaircounts(int *, double *, double *, int *, double *, double *, double *, int *, void *); 
+void Cclosepaircounts(int *, double *, double *, double *, int *, void *);
+void poly2imI(double *, double *, int *, int *, int *, int *, void *); 
+void bdrymask(int *, int *, int *, int *, void *); 
+void discs2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *, void *);
+/*
+
+             Functions invoked by .Call
+
+*/
+SEXP close3pairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP close3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP cross3pairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP cross3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+SEXP Vclosepairs(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP VcloseIJpairs(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP VcloseIJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP Vcrosspairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP VcrossIJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP VcrossIJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP Vclosethresh(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+SEXP triograph(SEXP, SEXP, SEXP, SEXP); 
+SEXP trioxgraph(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP triDgraph(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP triDRgraph(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP graphVees(SEXP, SEXP, SEXP, SEXP);
+SEXP Cxysegint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+SEXP thinjumpequal(SEXP, SEXP, SEXP, SEXP);
+SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+SEXP PerfectStrauss(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP PerfectHardcore(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP PerfectStraussHard(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP PerfectDiggleGratton(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP PerfectDGS(SEXP, SEXP, SEXP, SEXP, SEXP); 
+SEXP PerfectPenttinen(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+SEXP Cwhist(SEXP, SEXP, SEXP, SEXP);
diff --git a/src/quasirandom.c b/src/quasirandom.c
new file mode 100644
index 0000000..a9aca31
--- /dev/null
+++ b/src/quasirandom.c
@@ -0,0 +1,38 @@
+/*
+  quasirandom.c 
+
+  Quasi-random sequence generators
+
+  Copyright (C) Adrian Baddeley 2014
+  GNU Public Licence version 2 | 3
+
+  $Revision: 1.1 $  $Date: 2014/03/17 03:31:59 $
+
+*/
+
+#include <math.h>
+
+void Corput(base, n, result) 
+     int *base, *n;
+     double *result; 
+{
+  int b, N, i, j;
+  register double f, f0, z;
+
+  N = *n;
+  b = *base;
+
+  f0 = 1.0/((double) b);
+
+  for(i = 0; i < N; i++) {
+    j = i+1;
+    z = 0;
+    f = f0;
+    while(j > 0) {
+      z = z + f * (j % b);
+      j = j/b; 
+      f = f / ((double) b);
+    }
+    result[i] = z;
+  }
+}
diff --git a/src/raster.h b/src/raster.h
new file mode 100755
index 0000000..5ae6eab
--- /dev/null
+++ b/src/raster.h
@@ -0,0 +1,88 @@
+/*
+      raster.h
+
+      Definition of raster structures & operations
+
+      requires <math.h> (for floor())
+
+      $Revision: 1.3 $ $Date: 2004/11/15 19:25:11 $
+*/
+
+typedef struct Raster{
+ /* array of data */
+	char		*data;		/* coerced to appropriate type */
+	int	nrow;		/* dimensions of entire array */
+	int	ncol;
+	int	length;
+	int	rmin;		/* position of valid subrectangle */
+	int	rmax;
+	int	cmin;
+	int	cmax;
+/* definition of mapping into continuous space */
+	double	x0;	/* position of entry (rmin,cmin) */
+	double	y0;
+	double	x1;	/* position of entry (rmax,cmax) */
+	double	y1;
+	double	xstep;	/* x increment for each column step */
+	double	ystep;	/* y increment for each row step */
+	                /*
+			   xstep = (x1 - x0)/(cmax - cmin)
+			         = (x1 - x0)/(number of valid columns - 1)
+			   CAN BE POSITIVE OR NEGATIVE 
+			 */
+	 /* image of valid subrectangle */
+	double	xmin;	/* = min{x0,x1} */
+	double	xmax;
+	double	ymin;
+	double	ymax;
+} Raster;
+
+/*      how to clear the data      */
+
+#define Clear(ARRAY,TYPE,VALUE) \
+       { unsigned int i; TYPE *p; \
+	 for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \
+	 *p = VALUE; }
+		
+/* 	how to index a rectangular array
+	stored sequentially in row-major order */
+
+#define Entry(ARRAY,ROW,COL,TYPE) \
+	((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)]
+
+     /* test for indices inside subrectangle */
+	
+#define Inside(ARRAY,ROW,COL) \
+	( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \
+	(COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax))
+
+     /* how to compute the position in R^2 corresponding to a raster entry */
+
+#define Xpos(ARRAY,COL) \
+	((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin))
+#define Ypos(ARRAY,ROW) \
+	((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin))
+
+#define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY))
+
+#define DistanceTo(X,Y,ARRAY,ROW,COL)\
+	Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW))
+
+#define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY))
+
+#define DistanceToSquared(X,Y,ARRAY,ROW,COL)\
+	DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW))
+
+
+  /* how to map a point (x,y) in R^2 to a raster entry */
+  /*
+     (x,y) is guaranteed to lie in the rectangle bounded by
+     the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1)
+     where r = RowIndex(..) and c = ColIndex(..).
+  */
+
+#define RowIndex(ARRAY,Y) \
+	((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep))
+#define ColIndex(ARRAY,X) \
+	((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep))
+	
diff --git a/src/rthin.c b/src/rthin.c
new file mode 100644
index 0000000..76c022c
--- /dev/null
+++ b/src/rthin.c
@@ -0,0 +1,83 @@
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+/* 
+   rthin.c
+
+   Select from the integers 1:n with probability p
+   by simulating geometric(p) jumps between selected integers
+
+   $Revision: 1.1 $ $Date: 2015/07/25 03:19:22 $
+
+*/
+
+SEXP thinjumpequal(SEXP n,
+		   SEXP p,
+		   SEXP guess) 
+{
+  int N;
+  double P;
+
+  int *w;  /* temporary storage for selected integers */
+  int nw, nwmax;
+
+  int i, j, k;
+  double log1u, log1p;
+
+  /* R object return value */
+  SEXP Out;
+  /* external storage pointer */
+  int *OutP;
+
+  /* protect R objects from garbage collector */
+  PROTECT(p = AS_NUMERIC(p));
+  PROTECT(n = AS_INTEGER(n));
+  PROTECT(guess = AS_INTEGER(guess));
+
+  /* Translate arguments from R to C */
+  N = *(INTEGER_POINTER(n));
+  P = *(NUMERIC_POINTER(p));
+  nwmax = *(INTEGER_POINTER(guess));
+
+  /* Allocate space for result */
+  w = (int *) R_alloc(nwmax, sizeof(int));
+
+  /* set up */
+  GetRNGstate();
+  log1p = -log(1.0 - P);
+  
+  /* main loop */
+  i = 0;  /* last selected element of 1...N */
+  nw = 0;  /* number of selected elements */
+  while(i <= N) {
+    log1u = exp_rand();  /* an exponential rv is equivalent to -log(1-U) */
+    j = (int) ceil(log1u/log1p); /* j is geometric(p) */
+    i += j;
+    if(nw >= nwmax) {
+      /* overflow; allocate more space */
+      w  = (int *) S_realloc((char *) w,  2 * nwmax, nwmax, sizeof(int));
+      nwmax    = 2 * nwmax;
+    }
+    /* add 'i' to output vector */
+    w[nw] = i;
+    ++nw;
+  }
+  /* The last saved 'i' could have exceeded 'N' */
+  /* For efficiency we don't check this in the loop */
+  if(nw > 0 && w[nw-1] > N) 
+    --nw;
+
+  PutRNGstate();
+
+  /* create result vector */
+  PROTECT(Out = NEW_INTEGER(nw));
+
+  /* copy results into output */
+  OutP  = INTEGER_POINTER(Out);
+  for(k = 0; k < nw; k++)
+    OutP[k] = w[k];
+
+  UNPROTECT(4);
+  return(Out);
+}
diff --git a/src/scan.c b/src/scan.c
new file mode 100644
index 0000000..eb416e5
--- /dev/null
+++ b/src/scan.c
@@ -0,0 +1,92 @@
+/*
+  scan.c
+
+  Scan transform
+
+  $Revision: 1.2 $ $Date: 2012/04/16 12:00:07 $
+
+*/
+#include <R.h>
+#include <math.h>
+#include "raster.h"
+
+void shape_raster();
+
+void
+Cscantrans(x, y, npt, R, out)
+	double	*x, *y;		/* data points */
+	int	npt;
+	double  R;             /* radius */
+	Raster	*out;	       /* scan image */
+{
+  int	i,j,k,l,m;
+  double  d2, R2;
+  int   rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax;
+
+  /* initialise raster */
+  Clear(*out,int,0);
+  
+  /* If the list of data points is empty, ... exit now */
+  if(npt == 0) 
+    return;
+
+  R2 = R * R;
+  cmin = out->cmin;
+  cmax = out->cmax;
+  rmin = out->rmin;
+  rmax = out->rmax;
+
+  /* disc size in rows/columns */
+  Rrow = (int) ceil(R/(out->ystep));
+  Rcol = (int) ceil(R/(out->xstep));
+  if(Rrow < 1) Rrow = 1; 
+  if(Rcol < 1) Rcol = 1;
+	
+  /* run through points */
+  for(i = 0; i < npt; i++) {
+    j = RowIndex(*out,y[i]);
+    k = ColIndex(*out,x[i]);
+    lmin = j - Rrow;  if(lmin < rmin) lmin = rmin; 
+    lmax = j + Rrow;  if(lmax > rmax) lmax = rmax;
+    mmin = k - Rcol;  if(mmin < cmin) mmin = cmin; 
+    mmax = k + Rcol;  if(mmax > cmax) mmax = cmax;
+
+    for(l = lmin; l <= lmax; l++) {
+      for(m = mmin; m <= mmax; m++) {
+	d2 = DistanceToSquared(x[i],y[i],*out,l,m);
+	if(d2 <= R2) 
+	  Entry(*out,l,m,int) += 1;
+      }
+    }
+  }
+}
+
+/* R interface */
+
+void scantrans(x, y, n,
+	       xmin, ymin, xmax, ymax,
+	       nr, nc, R,
+	       counts)
+	double *x, *y;		/* input data points */
+	int	*n;
+	double *xmin, *ymin,
+               *xmax, *ymax;  	/* guaranteed bounding box */
+	int *nr, *nc;		/* desired raster dimensions */
+	double *R;              /* radius */
+	     /* output array */
+	int *counts;	        /* number of R-close points */
+{
+  Raster out;
+  int nrow, ncol, npoints;
+  double r;
+
+  nrow = *nr;
+  ncol = *nc;
+  npoints = *n;
+  r = *R;
+
+  shape_raster( &out, (void *) counts,
+		*xmin,*ymin,*xmax,*ymax,
+		nrow, ncol, 0, 0);
+  Cscantrans(x, y, npoints, r, &out);
+}	
diff --git a/src/seg2pix.c b/src/seg2pix.c
new file mode 100755
index 0000000..ffbda44
--- /dev/null
+++ b/src/seg2pix.c
@@ -0,0 +1,265 @@
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "chunkloop.h"
+
+#undef DEBUG
+
+/*
+
+  seg2pix.c
+
+  Discretise line segment on pixel grid
+
+  seg2pixI      pixel value is indicator = 1 if any line crosses pixel
+
+  seg2pixN      pixel value is (weighted) number of lines crossing pixel
+
+  seg2pixL      pixel value is total (weighted) length of lines inside pixel
+
+  (rescale R data so that pixels are integer)
+  pixels numbered 0, ..., nx-1 and 0, ..., ny-1
+  with boundaries at x=0, x=nx, y=0, y=ny.
+
+*/
+
+#define V(I,J) out[(I) + (J) * (Ny)]
+
+int clamp(k, n0, n1) 
+     int k, n0, n1;
+{
+  int m;
+  m = k;
+  if(m < n0) m = n0; 
+  if(m > n1) m = n1;
+  return(m);
+}
+
+/*  function 'seg2pixI' returns indicator = 1 if pixel is hit by any segment */
+
+#define FNAME seg2pixI
+#undef SUMUP
+#include "seg2pix.h"
+#undef FNAME
+
+/*  function 'seg2pixN' returns (weighted) number of segments hitting pixel */
+
+#define FNAME seg2pixN
+#define SUMUP
+#include "seg2pix.h"
+#undef FNAME
+#undef SUMUP
+
+/* 
+   the other one is anomalous...
+*/
+
+void seg2pixL(ns,x0,y0,x1,y1,weights,pixwidth,pixheight,nx,ny,out)
+     int *ns;
+     double *x0,*y0,*x1,*y1,*weights; /* segment coordinates and weights */
+     double *pixwidth, *pixheight;  /* original pixel dimensions */
+     int *nx, *ny;
+     double *out;  /* output matrix */
+{
+  int Ns, Nx, Ny, i, j, k, m, mmin, mmax, maxchunk;
+  double x0i, x1i, y0i, y1i;
+  double leni;
+  double xleft, yleft, xright, yright, slope, scalesecant;
+  double xlow, xhigh, ylow, yhigh, invslope, scalecosecant;
+  double xstart, ystart, xfinish, yfinish; 
+  double xxx0, xxx1, yyy0, yyy1;
+  int mleft, mright, kstart, kfinish, kmin, kmax;
+  double pwidth, pheight, pwidth2, pheight2;
+  double wti; 
+
+  Ns = *ns;
+  Nx = *nx;
+  Ny = *ny;
+
+  /* 
+     one scaled x unit = 'pwidth' original x units
+     one scaled y unit = 'pheight' original y units
+  */
+	 
+  pwidth = *pixwidth;
+  pheight = *pixheight;
+  pwidth2 = pwidth * pwidth;
+  pheight2 = pheight * pheight;
+
+  /* zero the matrix */
+
+  for(k = 0; k < Ny - 1; k++)
+    for(j = 0; j < Nx - 1; j++)
+      V(k, j) = 0;
+
+  OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Ns, maxchunk, 8196) {
+      x0i = x0[i];
+      y0i = y0[i];
+      x1i = x1[i];
+      y1i = y1[i];   
+      wti = weights[i];
+      leni = sqrt(pwidth2 * pow(x1i - x0i, 2) + pheight2 * pow(y1i-y0i, 2));
+#ifdef DEBUG
+      Rprintf("(%lf, %lf) to (%lf, %lf), length %lf\n",
+	      x0i, y0i, x1i, y1i, leni);
+#endif
+      if(leni < 0.001) { /* tiny segment */
+#ifdef DEBUG
+	Rprintf("tiny\n");
+#endif
+	k = clamp((int) floor(x0i), 0, Nx-1);
+	j = clamp((int) floor(y0i), 0, Ny-1);
+	V(j,k) += wti * leni;
+      } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { 
+	/* contained in one cell */
+#ifdef DEBUG
+	Rprintf("contained in one cell\n");
+#endif
+	k = clamp((int) floor(x0i), 0, Nx-1);
+	j = clamp((int) floor(y0i), 0, Ny-1);
+	V(j,k) += wti * leni;
+      } else if(floor(y1i) == floor(y0i)) { /* horizontal */
+#ifdef DEBUG
+	Rprintf("horizontal\n");
+#endif
+	j = clamp((int) floor(y1i), 0, Ny-1);
+	if(x1i > x0i) {
+	  xleft = x0i;
+	  yleft = y0i;
+	  xright = x1i;
+	  yright = y1i;
+	} else {
+	  xleft = x1i;
+	  yleft = y1i;
+	  xright = x0i;
+	  yright = y0i;
+	}
+	mmin = clamp((int) floor(xleft), 0, Nx-1);
+	mmax = clamp((int) floor(xright), 0, Nx-1);
+	slope = (yright - yleft)/(xright - xleft);
+	scalesecant = wti * sqrt(pwidth2 + slope * slope * pheight2);
+	/* 
+	   For this slope, one scaled x unit means
+	   'pwidth' original x units and
+	   slope * pheight original y units
+	   i.e. line length sqrt(pwidth^2 + slope^2 * pheight^2)
+	 
+	*/
+	for(k = mmin; k <= mmax; k++) {
+	  xstart = (k == mmin) ? xleft : k;
+	  xfinish = (k == mmax) ? xright : (k+1);
+	  V(j,k) += (xfinish - xstart) * scalesecant;
+	}
+      } else if(floor(x1i) == floor(x0i)) { /* vertical */
+#ifdef DEBUG
+	Rprintf("vertical\n");
+#endif
+	k = clamp((int) floor(x1i), 0, Nx-1);
+	if(y1i > y0i) {
+	  xlow = x0i;
+	  ylow = y0i;
+	  xhigh = x1i;
+	  yhigh = y1i;
+	} else {
+	  xlow = x1i;
+	  ylow = y1i;
+	  xhigh = x0i;
+	  yhigh = y0i;
+	}
+	mmin = clamp((int) floor(ylow), 0, Ny-1);
+	mmax = clamp((int) floor(yhigh), 0, Ny-1);
+	invslope = (xhigh - xlow)/(yhigh - ylow);
+	scalecosecant = wti * sqrt(pheight2 + invslope * invslope * pwidth2);
+#ifdef DEBUG
+	Rprintf("i = %d\n", i);
+	Rprintf("inverse slope = %lf\n", invslope);
+	Rprintf("scaled cosecant = %lf\n", scalecosecant);
+#endif
+	/* 
+	   For this slope, one scaled y unit means
+	   'pheight' original y units and
+	   invslope * pwidth original x units
+	   i.e. line length sqrt(pheight^2 + invslope^2 * pwidth^2)
+	 
+	*/
+	for(j = mmin; j <= mmax; j++) {
+	  ystart = (j == mmin)? ylow : j;
+	  yfinish = (j == mmax)? yhigh : (j+1);
+	  V(j,k) += (yfinish - ystart) * scalecosecant;
+	}
+      } else { /* general case */
+#ifdef DEBUG
+	Rprintf("general\n");
+#endif
+	if(x1i > x0i) {
+	  xleft = x0i;
+	  yleft = y0i;
+	  xright = x1i;
+	  yright = y1i;
+	} else {
+	  xleft = x1i;
+	  yleft = y1i;
+	  xright = x0i;
+	  yright = y0i;
+	}
+	slope = (yright - yleft)/(xright - xleft);
+	mleft = clamp((int) floor(xleft), 0, Nx-1);
+	mright = clamp((int) floor(xright), 0, Nx-1); 
+#ifdef DEBUG
+	Rprintf("column range [%d, %d]\n", mleft, mright);
+#endif
+	/* treat each vertical slice */
+	for(m = mleft; m <= mright; m++) {
+	  if(m == mleft) {
+	    xstart = xleft;
+	    ystart = yleft;
+	  } else {
+	    xstart = m;
+	    ystart = yleft + slope * (xstart - xleft);
+	  }
+	  if(m == mright) {
+	    xfinish = xright;
+	    yfinish = yright;
+	  } else {
+	    xfinish = m+1;
+	    yfinish = yleft + slope * (xfinish - xleft);
+	  }
+	  kstart = clamp((int) floor(ystart), 0, Ny-1);
+	  kfinish = clamp((int) floor(yfinish), 0, Ny-1);
+	  if(ystart < yfinish) {
+	    kmin = kstart;
+	    kmax = kfinish;
+	    ylow = ystart;
+	    yhigh = yfinish;
+	  } else {
+	    kmin = kfinish;
+	    kmax = kstart;
+	    ylow = yfinish;
+	    yhigh = ystart;
+	  }
+#ifdef DEBUG
+	  Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax);
+#endif
+	  for(k = kmin; k <= kmax; k++) { 
+	    yyy0 = (k == kmin) ? ylow : k;
+	    yyy1 = (k == kmax) ? yhigh : (k+1);
+	    xxx0 = xstart + (yyy0 - ystart)/slope;
+	    xxx1 = xstart + (yyy1 - ystart)/slope;
+	    V(k, m) += wti * sqrt(pow(yyy1 - yyy0, 2) * pheight2 + 
+				  pow(xxx1 - xxx0, 2) * pwidth2);
+	  }
+	}
+      }
+    }
+  }
+#ifdef DEBUG
+  Rprintf("done.\n");
+#endif
+}
+
+
+
diff --git a/src/seg2pix.h b/src/seg2pix.h
new file mode 100644
index 0000000..e7bf595
--- /dev/null
+++ b/src/seg2pix.h
@@ -0,0 +1,176 @@
+/*
+  seg2pix.h
+
+  Code template for seg2pix.c
+
+  $Revision: 1.2 $ $Date: 2015/01/08 10:57:20 $
+
+  Macros:
+  FNAME   name of function
+  SUMUP   #defined if crossings should be counted (weights summed)
+ 
+  V       matrix index macro (in seg2pix.c)
+  DEBUG   debug if #defined
+
+*/
+
+#undef INCREMENT
+#undef ZERO
+
+#ifdef SUMUP
+#define ZERO (double) 0.0
+#define INCREMENT(I,J) V(I,J) += wi
+#else
+#define ZERO 0
+#define INCREMENT(I,J) V(I,J) = 1
+#endif
+
+
+void FNAME(ns,x0,y0,x1,y1,
+#ifdef SUMUP
+	   w,
+#endif
+	   nx,ny,out)
+     int *ns;  /* number of segments */
+     double *x0,*y0,*x1,*y1; /* coordinates of segment endpoints */
+     int *nx, *ny;  /* dimensions of pixel array (columns, rows) */
+#ifdef SUMUP
+     double *w; /* weights attached to segments */
+     double *out; /* output totals */
+#else 
+     int *out;     /* output indicators */
+#endif
+{
+  int Ns, Nx, Ny, i, j, k, m, m0, m1, mmin, mmax, maxchunk;
+  double x0i, x1i, y0i, y1i, dx, dy;
+  double leni;
+  double xleft, yleft, xright, yright, slope;
+  double xstart, ystart, xfinish, yfinish;
+  int mleft, mright, kstart, kfinish, kmin, kmax;
+#ifdef SUMUP
+  double wi;
+#endif
+
+  Ns = *ns;
+  Nx = *nx;
+  Ny = *ny;
+  
+  for(k = 0; k < Ny - 1; k++) 
+    for(j = 0; j < Nx - 1; j++) 
+      V(k, j) = ZERO;
+
+  OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Ns, maxchunk, 8196) {
+      x0i = x0[i];
+      y0i = y0[i];
+      x1i = x1[i];
+      y1i = y1[i];   
+#ifdef SUMUP
+      wi = w[i];
+#endif
+      dx = x1i - x0i;
+      dy = y1i - y0i;
+      leni = hypot(dx, dy);
+#ifdef DEBUG
+      Rprintf("(%lf, %lf) to (%lf, %lf)\n",
+	      x0i, y0i, x1i, y1i);
+#endif
+      if(leni < 0.001) { /* tiny segment */
+#ifdef DEBUG
+	Rprintf("tiny\n");
+#endif
+	k = clamp((int) floor(x0i), 0, Nx-1);
+	j = clamp((int) floor(y0i), 0, Ny-1);
+	INCREMENT(j, k);
+      } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { 
+	/* contained in one cell */
+#ifdef DEBUG
+	Rprintf("contained in one cell\n");
+#endif
+	k = clamp((int) floor(x0i), 0, Nx-1);
+	j = clamp((int) floor(y0i), 0, Ny-1);
+	INCREMENT(j, k);
+      } else if(floor(y1i) == floor(y0i)) { /* horizontal */
+#ifdef DEBUG
+	Rprintf("horizontal\n");
+#endif
+	j = clamp((int) floor(y1i), 0, Ny-1);
+	m0 = clamp((int) floor(x0i), 0, Nx-1);
+	m1 = clamp((int) floor(x1i), 0, Nx-1);
+	mmin = (m0 < m1) ? m0: m1;
+	mmax = (m0 < m1) ? m1: m0;
+#ifdef DEBUG
+	Rprintf("row %d: columns [%d, %d]\n", j, mmin, mmax);
+#endif
+	for(k = mmin; k <= mmax; k++)
+	  INCREMENT(j,k);
+      } else if(floor(x1i) == floor(x0i)) { /* vertical */
+#ifdef DEBUG
+	Rprintf("vertical\n");
+#endif
+	k = clamp((int) floor(x1i), 0, Nx-1);
+	m0 = clamp((int) floor(y0i), 0, Ny-1);
+	m1 = clamp((int) floor(y1i), 0, Ny-1);
+	mmin = (m0 < m1) ? m0: m1;
+	mmax = (m0 < m1) ? m1: m0;
+#ifdef DEBUG
+	Rprintf("column %d: rows [%d, %d]\n", k, mmin, mmax);
+#endif
+	for(j = mmin; j <= mmax; j++) 
+	  INCREMENT(j,k);
+      } else { /* general case */
+#ifdef DEBUG
+	Rprintf("general\n");
+#endif
+	if(x1i > x0i) {
+	  xleft = x0i;
+	  yleft = y0i;
+	  xright = x1i;
+	  yright = y1i;
+	} else {
+	  xleft = x1i;
+	  yleft = y1i;
+	  xright = x0i;
+	  yright = y0i;
+	}
+	slope = (yright - yleft)/(xright - xleft);
+	mleft = clamp((int) floor(xleft), 0, Nx-1);
+	mright = clamp((int) floor(xright), 0, Nx-1); 
+#ifdef DEBUG
+	Rprintf("column range [%d, %d]\n", mleft, mright);
+#endif
+	/* treat each vertical slice */
+	for(m = mleft; m <= mright; m++) {
+	  if(m == mleft) {
+	    xstart = xleft;
+	    ystart = yleft;
+	  } else {
+	    xstart = m;
+	    ystart = yleft + slope * (xstart - xleft);
+	  }
+	  if(m == mright) {
+	    xfinish = xright;
+	    yfinish = yright;
+	  } else {
+	    xfinish = m+1;
+	    yfinish = yleft + slope * (xfinish - xleft);
+	  }
+	  kstart = clamp((int) floor(ystart), 0, Ny-1);
+	  kfinish = clamp((int) floor(yfinish), 0, Ny-1);
+	  kmin = (kstart < kfinish) ? kstart : kfinish;
+	  kmax = (kstart < kfinish) ? kfinish : kstart;
+#ifdef DEBUG
+	  Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax);
+#endif
+	  for(k = kmin; k <= kmax; k++) 
+	    INCREMENT(k, m);
+	}
+      } /* end of if-else */
+    }
+  }
+#ifdef DEBUG
+    Rprintf("done\n");
+#endif
+}
+
diff --git a/src/segdens.c b/src/segdens.c
new file mode 100644
index 0000000..c7364b8
--- /dev/null
+++ b/src/segdens.c
@@ -0,0 +1,53 @@
+#include <R.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+/*
+  segdens.c
+
+  Convolution of segments with Gaussian kernel
+
+  Adrian Baddeley, 02 dec 2016 
+  Licence: GPL >= 2.0
+*/
+
+
+#define DNORM(X, SIG) dnorm((X), (double) 0.0, (SIG), FALSE)
+
+#define PNORM(X, SIG) pnorm((X), (double) 0.0, (SIG), TRUE, FALSE)
+
+void segdens(sigma, ns, xs, ys, alps, lens, np, xp, yp, z) 
+     double *sigma; /* bandwidth */
+     int *ns; /* number of line segments */
+     double *xs, *ys, *alps, *lens;  /* first endpoint, angle, length */
+     int *np; /* number of pixels or test locations */
+     double *xp, *yp; /* pixel coordinates */
+     double *z; /* result, assumed initially 0 */
+{
+  int i, j, Ns, Np;
+  double Sigma;
+  double xsi, ysi, angi, leni, cosi, sini;
+  double dx, dy, u1, u2;
+
+  Ns = *ns;
+  Np = *np;
+  Sigma = *sigma;
+
+  for(i = 0; i < Ns; i++) {
+    R_CheckUserInterrupt();
+    xsi = xs[i];
+    ysi = ys[i];
+    angi = alps[i];
+    leni = lens[i];
+    cosi = cos(angi);
+    sini = sin(angi);
+    for(j = 0; j < Np; j++) {
+      dx = xp[j] - xsi;
+      dy = yp[j] - ysi;
+      u1 = dx * cosi + dy * sini;
+      u2 = -dx * sini + dy * cosi;
+      z[j] += DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma));
+    }
+  }
+}
diff --git a/src/sftcr.c b/src/sftcr.c
new file mode 100755
index 0000000..6a27538
--- /dev/null
+++ b/src/sftcr.c
@@ -0,0 +1,112 @@
+#include <R.h>
+#include <Rmath.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+
+/* Conditional intensity computation for Soft Core process */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct Softcore {
+  double sigma;
+  double kappa;
+  double nook;  /*   -1/kappa     */
+  double stok; /* sigma^(2/kappa) */
+  double *period;
+  int per;
+} Softcore;
+
+
+/* initialiser function */
+
+Cdata *sftcrinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  Softcore *softcore;
+  softcore = (Softcore *) R_alloc(1, sizeof(Softcore));
+
+  /* Interpret model parameters*/
+  softcore->sigma  = model.ipar[0];
+  softcore->kappa  = model.ipar[1];
+  softcore->period = model.period;
+  /* constants */
+  softcore->nook = -1/softcore->kappa;
+  softcore->stok = pow(softcore->sigma, 2/softcore->kappa);
+  /* periodic boundary conditions? */
+  softcore->per    = (model.period[0] > 0.0);
+  return((Cdata *) softcore);
+}
+
+/* conditional intensity evaluator */
+
+double sftcrcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double d2, pairsum, cifval, nook, stok;
+  Softcore *softcore;
+
+  softcore = (Softcore *) cdata;
+
+  nook = softcore->nook;
+  stok = softcore->stok;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  npts = state.npts;
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  pairsum = 0;
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(softcore->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	d2 = dist2(u,v,x[j],y[j],softcore->period);
+	pairsum += pow(d2, nook);
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	d2 = dist2(u,v,x[j],y[j],softcore->period);
+	pairsum += pow(d2, nook);
+      }
+    }
+  }
+  else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	d2 = pow(u - x[j],2) + pow(v-y[j],2);
+	pairsum += pow(d2, nook);
+      }
+    }  
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	d2 = pow(u - x[j],2) + pow(v-y[j],2);
+	pairsum += pow(d2, nook);
+      }
+    }
+  }
+
+  cifval *= exp(-stok * pairsum);
+  return cifval;
+}
+
+Cifns SoftcoreCifns = { &sftcrinit, &sftcrcif, (updafunptr) NULL, NO};
+
diff --git a/src/sparselinalg.c b/src/sparselinalg.c
new file mode 100644
index 0000000..52dfc64
--- /dev/null
+++ b/src/sparselinalg.c
@@ -0,0 +1,24 @@
+#include <R.h>
+#include <R_ext/Utils.h>
+
+/*
+  sparselinalg.c
+
+  Counterpart of 'linalg.c' for sparse matrices/arrays
+
+  $Revision: 1.6 $  $Date: 2016/02/20 11:14:12 $
+
+ */
+
+#undef DBG
+
+#define FNAME CspaSumSymOut
+#undef WEIGHTS
+#include "spasumsymout.h"
+#undef FNAME
+
+#define FNAME CspaWtSumSymOut
+#define WEIGHTS
+#include "spasumsymout.h"
+#undef FNAME
+
diff --git a/src/spasumsymout.h b/src/spasumsymout.h
new file mode 100644
index 0000000..d127eba
--- /dev/null
+++ b/src/spasumsymout.h
@@ -0,0 +1,166 @@
+/*
+  spasumsymout.h
+
+  Function definitions for 'sumsymouter' for sparse matrices/arrays
+
+  This file is #included in sparselinalg.c several times.
+
+  Macros used 
+
+  FNAME     function name
+
+  DBG       (#ifdef) debug 
+
+  WEIGHTS   (#ifdef) use weights 
+
+  $Revision: 1.4 $  $Date: 2016/02/24 09:57:16 $
+
+ */
+
+void FNAME(m, n, 
+	   lenx, ix, jx, kx, x, 
+	   flip, 
+#ifdef WEIGHTS
+	   lenw, jw, kw, w,
+#endif
+	   y) 
+  int *m, *n;            /* dimensions of array m * n * n */
+  int *lenx;             /* number of nonzero entries in sparse array x */
+  int *ix, *jx, *kx;     /* indices of entries in sparse array x */
+  double *x;             /* values in sparse array x */
+                         /* NB: ix, jx, kx are assumed to be
+			    sorted by order(j,k,i)
+			    i.e. in increasing order of j, 
+			    then k within j, 
+			    then i within (j,k) */
+  int *flip;             /* reordering of ix, jx, kx, x that would achieve
+			    increasing order(k,j,i) */
+#ifdef WEIGHTS
+  int *lenw;             /* length of jw, kw */
+  int *jw, *kw;          /* indices of entries in sparse matrix w of weights */
+                         /* Assumed sorted by order (j,k) */
+  double *w;             /* values of weights w */
+#endif
+  double *y;             /* output: full m * m matrix */
+{
+  /* Compute the sum of outer(x[,j,k], x[,k,j]) for all j != k */
+  int M,N,L, i,j,k,ii, l, ll, lstart, lend, t, tstart, tend, r;
+  double xijk, xx;
+  int *it, *jt, *kt;
+  double *xt;
+#ifdef WEIGHTS
+  int R;
+  double wjk;
+#endif
+
+  M = *m; 
+  N = *n;
+  L = *lenx;
+#ifdef WEIGHTS
+  R = *lenw;
+#endif
+
+  if(L <= 1 || N <= 1 || M <= 0) return;
+
+  /* Create space to store array in k-major order*/
+  it = (int *) R_alloc(L, sizeof(int));
+  jt = (int *) R_alloc(L, sizeof(int));
+  kt = (int *) R_alloc(L, sizeof(int));
+  xt = (double *) R_alloc(L, sizeof(double));
+  /* copy reordered array */
+#ifdef DBG
+  Rprintf("----------  Reordered: -------------------\n");
+#endif
+  for(l = 0; l < L; l++) {
+    ll = flip[l];
+    it[l] = ix[ll];
+    jt[l] = jx[ll];
+    kt[l] = kx[ll];
+    xt[l] = x[ll];
+#ifdef DBG
+    Rprintf("%d \t [%d, %d, %d] = %lf\n", l, it[l], jt[l], kt[l], xt[l]);
+#endif
+  }
+
+  /* Now process array */
+  lstart = tstart = r = 0;
+
+  lend = tend = -1; /* to keep compiler happy */
+
+  while(lstart < L && tstart < L) {
+    /* Consider a new entry x[,j,k] */
+    j = jx[lstart];
+    k = kx[lstart];
+#ifdef DBG
+    Rprintf("Entry %d: [, %d, %d]\n", lstart, j, k);
+#endif
+#ifdef WEIGHTS     
+    /* Find weight w[j,k] */
+    while(r < R && ((jw[r] < j) || 
+		    ((jw[r] == j) && (kw[r] < k))))
+      ++r;
+    if(jw[r] == j && kw[r] == k) {
+      /* weight w[j,k] is present */
+      wjk = w[r];
+#endif
+      /* Find all entries in x with the same j,k */
+      for(lend = lstart+1;
+	  lend < L && jx[lend] == j && kx[lend] == k;
+	  ++lend) 
+	;
+      --lend;
+#ifdef DBG
+      Rprintf("\t lstart=%d, lend=%d\n", lstart, lend);
+#endif
+      /* Find corresponding entries in transpose (k'=j, j'=k) */
+      /* search forward to find start of run */
+      while(tstart < L && ((kt[tstart] < j) ||
+			   (kt[tstart] == j && jt[tstart] < k)))
+	++tstart;
+#ifdef DBG
+      Rprintf("\t tstart=%d\n", tstart);
+      Rprintf("\t kt[tstart]=%d, jt[tstart]=%d\n", kt[tstart], jt[tstart]);
+#endif
+      if(kt[tstart] == j && jt[tstart] == k) {
+	/* Both x[,j,k] and x[,k,j] are present so a contribution will occur */
+	/* seek end of run */
+	for(tend = tstart+1;
+	    tend < L && kt[tend] == j && jt[tend] == k;
+	    ++tend) 
+	  ;
+	--tend;
+#ifdef DBG
+	Rprintf("\t tend=%d\n", tend);
+#endif
+	/* Form products */
+	for(l = lstart; l <= lend; l++) {
+	  i = ix[l];
+	  xijk =  x[l];
+#ifdef DBG
+	  Rprintf("Entry %d: [%d, %d, %d] = %lf\n", l, i, j, k, xijk);
+#endif
+	  for(t = tstart; t <= tend; t++) {
+	    ii = it[t];
+	    xx = xijk * xt[t];
+#ifdef WEIGHTS
+	    xx *= wjk;
+#endif
+	    /* increment result at [i, ii] and [ii, i]*/
+	    y[i + M * ii] += xx;
+	    /*	  y[ii + M * i] += xx;  */
+#ifdef DBG
+	    Rprintf("-- matches entry %d: [%d, %d, %d] = %lf\n", 
+		    t, ii, k, j, xt[t]);
+	    Rprintf("++ %lf\n", xx);
+#endif
+	  }
+	}
+      }
+#ifdef WEIGHTS
+    }
+#endif
+    lstart = ((lend > lstart) ? lend : lstart) + 1;
+    tstart = ((tend > tstart) ? tend : tstart) + 1;
+  }
+}
+
diff --git a/src/sphefrac.c b/src/sphefrac.c
new file mode 100755
index 0000000..3dc3c08
--- /dev/null
+++ b/src/sphefrac.c
@@ -0,0 +1,170 @@
+#include <math.h>
+#include <R.h>
+#include "geom3.h"
+/*
+
+	$Revision: 1.1 $ 	$Date: 2009/11/04 23:54:15 $
+
+	Routine for calculating surface area of sphere
+	intersected with box
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+# 
+# MODIFIED BY: Adrian Baddeley, Perth 2009, 2013
+#
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+*/
+
+#ifdef DEBUG
+#define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y));
+#else
+#define DBG(X,Y) 
+#endif
+
+static double pi = 3.141592653589793;
+
+/* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */
+
+double
+sphesfrac(point, box, r)
+Point *point;
+Box *box;
+double r;
+{
+	double sum, p[4], q[4];
+	double a1(), a2(), a3();
+
+	int i, j;
+
+	p[1] = point->x - box->x0;
+	p[2] = point->y - box->y0;
+	p[3] = point->z - box->z0;
+
+	q[1] = box->x1 - point->x;
+	q[2] = box->y1 - point->y;
+	q[3] = box->z1 - point->z;
+
+	sum = 0;
+	for(i = 1; i <= 3; i++)
+	{
+		sum += a1(p[i],r) + a1(q[i],r);
+#ifdef DEBUG
+		Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r));
+#endif
+	}
+	DBG("Past a1", sum)
+	
+	for(i = 1; i < 3; i++)
+		for(j = i+1; j <= 3; j++)
+		{
+			sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r)
+			 + a2(q[i], p[j], r) + a2(q[i], q[j], r);
+#ifdef DEBUG
+			Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum);
+#endif
+		}
+	DBG("Past a2", sum)
+
+	sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r);
+
+	DBG("sum", sum)
+
+	sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r);
+
+	DBG("sum", sum)
+
+	sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r);
+
+	DBG("sum", sum)
+
+	sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r);
+
+	DBG("Past a3", sum)
+
+	return(1 - sum);
+}
+
+double 
+a1(t, r)
+double t, r;
+{
+	/* This is the function A1 divided by 4 pi r^2 */
+
+	if(t >= r) 
+		return(0.0);
+
+	return((1 - t/r) * 0.5);
+}
+
+double
+a2(t1, t2, r)
+double t1, t2, r;
+{
+	double c2();
+	/* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */
+	
+	return(c2( t1 / r, t2 / r) / 2.0);
+}
+
+double
+a3(t1, t2, t3, r)
+double t1, t2, t3, r;
+{
+	double c3();
+	/* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */
+
+	return(c3(t1 / r, t2 / r, t3 / r) / 4.0);
+}
+
+double 
+c2(a, b)
+double a, b;
+{
+	double z, z2;
+	double c2();
+
+	/* This is the function C(a, b, 0) divided by pi 
+		- assumes a, b > 0  */
+
+	if( ( z2 = 1.0 - a * a - b * b) < 0.0 )
+		return(0.0);
+	z = sqrt(z2);
+	return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi);
+}
+
+double
+c3(a, b, c)
+double a, b, c;
+{
+	double za, zb, zc, sum;
+	/* This is C(a,b,c) divided by pi. Arguments assumed > 0 */
+
+	if(a * a + b * b + c * c >= 1.0)
+		return(0.0);
+
+	za = sqrt(1 - b * b - c * c);
+	zb = sqrt(1 - a * a - c * c);
+	zc = sqrt(1 - a * a - b * b);
+
+	sum =  atan2(zb, a * c) + atan2(za, b * c) 
+		+ atan2(zc, a * b)
+		- a * atan2(zb, c)
+		+ a * atan2(b, zc)
+		- b * atan2(za, c)
+		+ b * atan2(a, zc)
+		- c * atan2(zb, a)
+		+ c * atan2(b, za);
+
+	return(sum / pi  - 1);
+}
diff --git a/src/sphevol.c b/src/sphevol.c
new file mode 100755
index 0000000..a09a701
--- /dev/null
+++ b/src/sphevol.c
@@ -0,0 +1,222 @@
+#include <math.h>
+#include <R.h>
+#include "geom3.h"
+/*
+
+	$Revision: 1.2 $ 	$Date: 2013/05/27 02:09:10 $
+
+	Routine for calculating ABSOLUTE volume of intersection 
+	between sphere and box
+
+	Arbitrary positions: point is allowed to be inside or outside box.
+
+# /////////////////////////////////////////////
+# AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991.
+# 
+# MODIFIED BY: Adrian Baddeley, Perth 2009
+#
+# This software is distributed free
+# under the conditions that
+# 	(1) it shall not be incorporated
+# 	in software that is subsequently sold
+# 	(2) the authorship of the software shall
+# 	be acknowledged in any publication that 
+# 	uses results generated by the software
+# 	(3) this notice shall remain in place
+# 	in each file.
+# //////////////////////////////////////////////
+
+*/
+
+
+#ifdef DEBUG
+#define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y));
+#else
+#define DBG(X,Y) 
+#endif
+
+#include "yesno.h"
+#define ABS(X) ((X >= 0.0) ? (X) : -(X))
+
+static	double	rcubed, spherevol;
+
+double
+sphevol(point, box, r)
+Point *point;
+Box *box;
+double r;
+{
+	double sum, p[4], q[4];
+	double v1(), v2(), v3();
+
+	int i, j;
+
+	rcubed = r * r * r;
+	spherevol = (4.0/3.0) * PI * rcubed;
+
+	p[1] = box->x0 - point->x;
+	p[2] = box->y0 - point->y;
+	p[3] = box->z0 - point->z;
+
+	q[1] = box->x1 - point->x;
+	q[2] = box->y1 - point->y;
+	q[3] = box->z1 - point->z;
+
+	sum = 0;
+
+	for(i = 1; i <= 3; i++)
+	{
+		sum += v1(p[i], -1, r) + v1(q[i], 1, r);
+#ifdef DEBUG
+		Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r));
+#endif
+	}
+	DBG("Past v1", sum)
+	
+	for(i = 1; i < 3; i++)
+		for(j = i+1; j <= 3; j++)
+		{
+			sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r)
+			 + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r);
+#ifdef DEBUG
+			Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum);
+#endif
+		}
+	DBG("Past v2", sum)
+
+	sum += v3(p[1], -1, p[2], -1, p[3], -1, r) 
+		+ v3(p[1], -1, p[2], -1, q[3], 1, r);
+
+	DBG("sum", sum)
+
+	sum += v3(p[1], -1, q[2], 1, p[3], -1, r) 
+		+ v3(p[1], -1, q[2], 1, q[3], 1, r);
+
+	DBG("sum", sum)
+
+	sum += v3(q[1], 1, p[2], -1, p[3], -1, r) 
+		+ v3(q[1], 1, p[2], -1, q[3], 1, r);
+
+	DBG("sum", sum)
+
+	sum += v3(q[1], 1, q[2], 1, p[3], -1, r) 
+		+ v3(q[1], 1, q[2], 1, q[3], 1, r);
+
+	DBG("Past v3", sum)
+	DBG("sphere volume", spherevol)
+
+	return(spherevol - sum);
+}
+
+double
+v1(a,s,r)
+double a, r;
+int	s;
+{
+	double value;
+	double u();
+	short sign;
+
+	value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0);
+
+	sign = (a >= 0.0) ? 1 : -1;
+
+	if(sign == s)
+		return(value);
+	else
+		return(spherevol - value);
+}
+		
+double
+v2(a, sa, b, sb, r)
+	double a, b, r;
+	int sa, sb;
+{
+	short sign;
+	double u();
+
+	sign = (b >= 0.0) ? 1 : -1;
+
+	if(sign != sb )
+		return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r));
+
+	b = ABS(b);
+	sb = 1;
+
+	sign = (a >= 0.0) ? 1 : -1;
+	
+	if(sign != sa)
+		return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r));
+
+	a = ABS(a);
+
+	return(2.0 * rcubed * u(a/r, b/r, 0.0));
+}
+	
+double
+v3(a, sa, b, sb, c, sc, r)
+	double	a, b, c, r;
+	int	sa, sb, sc;
+{
+	short sign;
+	double u();
+
+	sign = (c >= 0.0) ? 1 : -1;
+
+	if(sign != sc)
+		return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r));
+
+	c = ABS(c);
+	sc = 1;
+
+	sign = (b >= 0.0) ? 1 : -1;
+
+	if(sign != sb)
+		return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r));
+
+	b = ABS(b);
+	sb = 1;
+
+	sign = (a >= 0.0) ? 1 : -1;
+
+	if(sign != sa)
+		return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r));
+
+	a = ABS(a);
+
+	return(rcubed * u(a/r, b/r, c/r));
+}
+	
+double
+u(a, b, c)
+double a, b, c;
+{
+	double	w();
+
+	if(a * a + b * b + c * c >= 1.0)
+		return(0.0);
+
+	return(
+		(PI/12.0) * (2.0 - 3.0 * (a + b + c)
+			  	+ (a * a * a + b * b * b + c * c * c))
+
+		+ w(a,b) + w(b,c) + w(a,c)
+		- a * b * c
+	);
+}
+	
+
+double 
+w(x,y)
+double	x,y; 	/* Arguments assumed >= 0 */
+{
+	double z;
+
+	z = sqrt(1 - x * x - y * y);
+
+	return(
+	  (x / 2.0 - x * x * x / 6.0) * atan2(y, z)
+	+ (y / 2.0 - y * y * y / 6.0) * atan2(x, z)
+	- ( atan2(x * y , z) - x * y * z )/3.0
+	);
+}
diff --git a/src/straush.c b/src/straush.c
new file mode 100755
index 0000000..2e73b5a
--- /dev/null
+++ b/src/straush.c
@@ -0,0 +1,132 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Hard core Strauss process */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct StraussHard {
+  double gamma;
+  double r;   /* interaction distance */
+  double h;   /* hard core distance */
+  double loggamma;
+  double r2;
+  double h2;
+  double r2h2;  /* r^2 - h^2 */
+  double *period;
+  int hard;
+  int per;
+} StraussHard;
+
+
+/* initialiser function */
+
+Cdata *straushinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  StraussHard *strausshard;
+  strausshard = (StraussHard *) R_alloc(1, sizeof(StraussHard));
+
+  /* Interpret model parameters*/
+  strausshard->gamma  = model.ipar[0];
+  strausshard->r      = model.ipar[1]; /* No longer passed as r^2 */
+  strausshard->h      = model.ipar[2]; /* No longer passed as h^2 */
+  strausshard->r2     = pow(strausshard->r, 2);
+  strausshard->h2     = pow(strausshard->h, 2); 
+  strausshard->r2h2   = strausshard->r2 - strausshard->h2;
+  strausshard->period = model.period;
+  /* is the interaction numerically equivalent to hard core ? */
+  strausshard->hard   = (strausshard->gamma < DOUBLE_EPS);
+  strausshard->loggamma = (strausshard->hard) ? 0.0 : log(strausshard->gamma);
+  /* periodic boundary conditions? */
+  strausshard->per    = (model.period[0] > 0.0);
+  return((Cdata *) strausshard);
+}
+
+/* conditional intensity evaluator */
+
+double straushcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, kount, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double r2, r2h2, cifval;
+  StraussHard *strausshard;
+  double *period;
+  DECLARE_CLOSE_VARS;
+
+  strausshard = (StraussHard *) cdata;
+
+  r2     = strausshard->r2;
+  r2h2   = strausshard->r2h2;
+  period = strausshard->period;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  if(npts == 0) 
+    return((double) 1.0);
+
+  kount = 0;
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(strausshard->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  /* RESIDUE = r2 - distance^2 */
+	  if(RESIDUE > r2h2) return((double) 0.0);
+	  ++kount;
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) {
+	  if(RESIDUE > r2h2) return((double) 0.0);
+	  ++kount;
+	}
+      }
+    }
+  }
+  else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  if(RESIDUE > r2h2) return((double) 0.0);
+	  ++kount;
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE(u,v,x[j],y[j],r2)) {
+	  if(RESIDUE > r2h2) return((double) 0.0);
+	  ++kount;
+	}
+      }
+    }
+  }
+
+  if(strausshard->hard) {
+    if(kount > 0) cifval = (double) 0.0;
+    else cifval = (double) 1.0;
+  }
+  else cifval = exp(strausshard->loggamma*kount);
+  
+  return cifval;
+}
+
+Cifns StraussHardCifns = { &straushinit, &straushcif, (updafunptr) NULL, NO};
diff --git a/src/straushm.c b/src/straushm.c
new file mode 100755
index 0000000..c8a53c3
--- /dev/null
+++ b/src/straushm.c
@@ -0,0 +1,250 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* for debugging code, include   #define DEBUG 1   */
+
+/* Conditional intensity computation for Multitype Strauss hardcore process */
+
+/* NOTE: types (marks) are numbered from 0 to ntypes-1 */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct MultiStraussHard {
+  int ntypes;
+  double *gamma;   /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */
+  double *rad;     /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */
+  double *hc;      /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */
+  double *rad2;    /* squared radii */
+  double *hc2;    /* squared radii */
+  double *rad2hc2;    /* r^2 - h^2 */
+  double  range2;   /* square of interaction range */
+  double *loggamma; /* logs of gamma[i,j] */
+  double *period;
+  int    *hard;     /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */
+  int    *kount;    /* space for kounting pairs of each type */
+  int per;
+} MultiStraussHard;
+
+
+/* initialiser function */
+
+Cdata *straushminit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, j, ntypes, n2, hard;
+  double g, r, h, r2, h2, logg, range2;
+  MultiStraussHard *multistrausshard;
+
+  multistrausshard = (MultiStraussHard *) R_alloc(1, sizeof(MultiStraussHard));
+
+
+  multistrausshard->ntypes = ntypes = model.ntypes;
+  n2 = ntypes * ntypes;
+
+#ifdef DEBUG
+  Rprintf("initialising space for %d types\n", ntypes);
+#endif
+
+  /* Allocate space for parameters */
+  multistrausshard->gamma    = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->rad      = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->hc       = (double *) R_alloc((size_t) n2, sizeof(double));
+
+  /* Allocate space for transformed parameters */
+  multistrausshard->rad2     = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->hc2      = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->rad2hc2  = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->loggamma = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrausshard->hard     = (int *) R_alloc((size_t) n2, sizeof(int));
+
+  /* Allocate scratch space for counts of each pair of types */
+  multistrausshard->kount    = (int *) R_alloc((size_t) n2, sizeof(int));
+
+  /* Copy and process model parameters*/
+
+  /* ipar will contain n^2 values of gamma, then n^2 values of r, 
+     then n^2 values of h */
+
+  range2 = 0.0;
+
+  for(i = 0; i < ntypes; i++) {
+    for(j = 0; j < ntypes; j++) {
+      g = model.ipar[       i + j*ntypes];
+      r = model.ipar[ n2  + i + j*ntypes];
+      h = model.ipar[2*n2 + i + j*ntypes];
+      r2 = r * r;
+      h2 = h * h;
+      hard = (g < DOUBLE_EPS);
+      logg = (hard) ? 0 : log(g);
+      MAT(multistrausshard->gamma,    i, j, ntypes) = g;
+      MAT(multistrausshard->rad,      i, j, ntypes) = r;
+      MAT(multistrausshard->hc,       i, j, ntypes) = h; 
+      MAT(multistrausshard->rad2,     i, j, ntypes) = r2;
+      MAT(multistrausshard->hc2,      i, j, ntypes) = h2;
+      MAT(multistrausshard->rad2hc2,  i, j, ntypes) = r2-h2;
+      MAT(multistrausshard->hard,     i, j, ntypes) = hard; 
+      MAT(multistrausshard->loggamma, i, j, ntypes) = logg;
+      if(r2 > range2) range2 = r2;
+    }
+  }
+  multistrausshard->range2 = range2;
+
+  /* periodic boundary conditions? */
+  multistrausshard->period = model.period;
+  multistrausshard->per    = (model.period[0] > 0.0);
+
+#ifdef DEBUG
+  Rprintf("end initialiser\n");
+#endif
+  return((Cdata *) multistrausshard);
+}
+
+/* conditional intensity evaluator */
+
+double straushmcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2;
+  int *marks;
+  double *x, *y;
+  double u, v, lg;
+  double d2, cifval;
+  double range2;
+  double *period;
+  MultiStraussHard *multistrausshard;
+  DECLARE_CLOSE_D2_VARS;
+
+  multistrausshard = (MultiStraussHard *) cdata;
+  range2 = multistrausshard->range2;
+  period = multistrausshard->period;
+
+  u  = prop.u;
+  v  = prop.v;
+  mrk = prop.mrk;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  marks = state.marks;
+
+  npts = state.npts;
+
+#ifdef DEBUG
+  Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk);
+#endif
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  ntypes = multistrausshard->ntypes;
+
+#ifdef DEBUG
+  Rprintf("initialising pair counts\n");
+#endif
+
+  /* initialise pair counts */
+  for(m1 = 0; m1 < ntypes; m1++)
+    for(m2 = 0; m2 < ntypes; m2++)
+      MAT(multistrausshard->kount, m1, m2, ntypes) = 0;
+
+  /* compile pair counts */
+
+#ifdef DEBUG
+  Rprintf("compiling pair counts\n");
+#endif
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(multistrausshard->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) {
+	    if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) {
+	      cifval = 0.0;
+	      return(cifval);
+	    }
+	    MAT(multistrausshard->kount, mrk, mrkj, ntypes)++;
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) {
+	    if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) {
+	      cifval = 0.0;
+	      return(cifval);
+	    }
+	    MAT(multistrausshard->kount, mrk, mrkj, ntypes)++;
+	  }
+	}
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) {
+	    if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) {
+	      cifval = 0.0;
+	      return(cifval);
+	    }
+	    MAT(multistrausshard->kount, mrk, mrkj, ntypes)++;
+	  }
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) {
+	    if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) {
+	      cifval = 0.0;
+	      return(cifval);
+	    }
+	    MAT(multistrausshard->kount, mrk, mrkj, ntypes)++;
+	  }
+	}
+      }
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("multiplying cif factors\n");
+#endif
+  /* multiply cif value by pair potential */
+  for(m1 = 0; m1 < ntypes; m1++) {
+    for(m2 = 0; m2 < ntypes; m2++) {
+      kount = MAT(multistrausshard->kount, m1, m2, ntypes);
+      if(MAT(multistrausshard->hard, m1, m2, ntypes)) {
+	if(kount > 0) {
+	  cifval = 0.0;
+	  return(cifval);
+	}
+      } else {
+	lg = MAT(multistrausshard->loggamma, m1, m2, ntypes);
+	cifval *= exp(lg * kount);
+      }
+    }
+  }
+  
+#ifdef DEBUG
+  Rprintf("returning positive cif\n");
+#endif
+  return cifval;
+}
+
+Cifns MultiStraussHardCifns = { &straushminit, &straushmcif, (updafunptr) NULL, YES};
diff --git a/src/strauss.c b/src/strauss.c
new file mode 100755
index 0000000..3fb42bd
--- /dev/null
+++ b/src/strauss.c
@@ -0,0 +1,117 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Strauss process */
+
+/* Format for storage of parameters and precomputed/auxiliary data */
+
+typedef struct Strauss {
+  double gamma;
+  double r;
+  double loggamma;
+  double r2;
+  double *period;
+  int hard;
+  int per;
+} Strauss;
+
+
+/* initialiser function */
+
+Cdata *straussinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  /* create storage for model parameters */
+  Strauss *strauss;
+  strauss = (Strauss *) R_alloc(1, sizeof(Strauss)); 
+  /* Interpret model parameters*/
+  strauss->gamma  = model.ipar[0];
+  strauss->r      = model.ipar[1]; /* No longer passed as r^2 */
+  strauss->r2     = strauss->r * strauss->r; 
+  strauss->period = model.period;
+#ifdef MHDEBUG
+  Rprintf("Initialising Strauss gamma=%lf, r=%lf\n", 
+	  strauss->gamma, strauss->r);
+#endif
+  /* is the model numerically equivalent to hard core ? */
+  strauss->hard   = (strauss->gamma < DOUBLE_EPS);
+  strauss->loggamma = (strauss->hard) ? 0 : log(strauss->gamma);
+  /* periodic boundary conditions? */
+  strauss->per    = (model.period[0] > 0.0);
+  return((Cdata *) strauss);
+}
+
+/* conditional intensity evaluator */
+
+double strausscif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, kount, ix, ixp1, j;
+  double *x, *y;
+  double u, v;
+  double r2, cifval;
+  Strauss *strauss;
+  DECLARE_CLOSE_VARS;
+
+  strauss = (Strauss *) cdata;
+
+  r2     = strauss->r2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  if(npts == 0) 
+    return((double) 1.0);
+
+  kount = 0;
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(strauss->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2))
+	  ++kount;
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2))
+	  ++kount;
+      }
+    }
+  } else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE(u,v,x[j],y[j], r2))
+	  ++kount;
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE(u,v,x[j],y[j], r2))
+	  ++kount;
+      }
+    }
+  }
+
+  if(strauss->hard) {
+    if(kount > 0) cifval = 0.0;
+    else cifval = 1.0;
+  }
+  else cifval = exp((strauss->loggamma) * kount);
+  
+  return cifval;
+}
+
+Cifns StraussCifns = { &straussinit, &strausscif, (updafunptr) NULL, NO};
diff --git a/src/straussm.c b/src/straussm.c
new file mode 100755
index 0000000..3ec2fe3
--- /dev/null
+++ b/src/straussm.c
@@ -0,0 +1,218 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* for debugging code, include   #define DEBUG 1   */
+
+/* Conditional intensity computation for Multitype Strauss process */
+
+/* NOTE: types (marks) are numbered from 0 to ntypes-1 */
+
+/* Storage of parameters and precomputed/auxiliary data */
+
+typedef struct MultiStrauss {
+  int ntypes;
+  double *gamma;   /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */
+  double *rad;     /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */
+  double *rad2;    /* squared radii */
+  double  range2;   /* square of interaction range */
+  double *loggamma; /* logs of gamma[i,j] */
+  double *period;
+  int    *hard;     /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */
+  int    *kount;    /* space for kounting pairs of each type */
+  int per;
+} MultiStrauss;
+
+
+/* initialiser function */
+
+Cdata *straussminit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  int i, j, ntypes, n2, hard;
+  double g, r, r2, logg, range2;
+  MultiStrauss *multistrauss;
+
+  multistrauss = (MultiStrauss *) R_alloc(1, sizeof(MultiStrauss));
+
+  multistrauss->ntypes = ntypes = model.ntypes;
+  n2 = ntypes * ntypes;
+
+#ifdef DEBUG
+  Rprintf("initialising space for %d types\n", ntypes);
+#endif
+
+  /* Allocate space for parameters */
+  multistrauss->gamma    = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrauss->rad      = (double *) R_alloc((size_t) n2, sizeof(double));
+
+  /* Allocate space for transformed parameters */
+  multistrauss->rad2     = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrauss->loggamma = (double *) R_alloc((size_t) n2, sizeof(double));
+  multistrauss->hard     = (int *) R_alloc((size_t) n2, sizeof(int));
+
+  /* Allocate scratch space for counts of each pair of types */
+  multistrauss->kount     = (int *) R_alloc((size_t) n2, sizeof(int));
+
+  /* Copy and process model parameters*/
+
+  /* ipar will contain n^2 gamma values followed by n^2 values of r */
+
+  range2 = 0.0;
+
+  for(i = 0; i < ntypes; i++) {
+    for(j = 0; j < ntypes; j++) {
+      g = model.ipar[i + j*ntypes];
+      r = model.ipar[n2 + i + j*ntypes];
+      r2 = r * r;
+      hard = (g < DOUBLE_EPS);
+      logg = (hard) ? 0 : log(g);
+      MAT(multistrauss->gamma, i, j, ntypes) = g;
+      MAT(multistrauss->rad, i, j, ntypes) = r;
+      MAT(multistrauss->hard, i, j, ntypes) = hard; 
+      MAT(multistrauss->loggamma, i, j, ntypes) = logg;
+      MAT(multistrauss->rad2, i, j, ntypes) = r2;
+      if(r2 > range2) range2 = r2;
+    }
+  }
+  multistrauss->range2 = range2;
+
+  /* periodic boundary conditions? */
+  multistrauss->period = model.period;
+  multistrauss->per    = (model.period[0] > 0.0);
+
+#ifdef DEBUG
+  Rprintf("end initialiser\n");
+#endif
+  return((Cdata *) multistrauss);
+}
+
+/* conditional intensity evaluator */
+
+double straussmcif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2;
+  int *marks;
+  double *x, *y;
+  double u, v, lg;
+  double d2, cifval;
+  double range2;
+  double *period;
+  MultiStrauss *multistrauss;
+  DECLARE_CLOSE_D2_VARS;
+
+  multistrauss = (MultiStrauss *) cdata;
+  range2 = multistrauss->range2;
+  period = multistrauss->period;
+  
+  u  = prop.u;
+  v  = prop.v;
+  mrk = prop.mrk;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+  marks = state.marks;
+
+  npts = state.npts;
+
+#ifdef DEBUG
+  Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk);
+#endif
+
+  cifval = 1.0;
+
+  if(npts == 0) 
+    return(cifval);
+
+  ntypes = multistrauss->ntypes;
+
+#ifdef DEBUG
+  Rprintf("initialising pair counts\n");
+#endif
+
+  /* initialise pair counts */
+  for(m1 = 0; m1 < ntypes; m1++)
+    for(m2 = 0; m2 < ntypes; m2++)
+      MAT(multistrauss->kount, m1, m2, ntypes) = 0;
+
+  /* compile pair counts */
+
+#ifdef DEBUG
+  Rprintf("compiling pair counts\n");
+#endif
+
+  ixp1 = ix+1;
+  /* If ix = NONE = -1, then ixp1 = 0 is correct */
+  if(multistrauss->per) { /* periodic distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) 
+	    MAT(multistrauss->kount, mrk, mrkj, ntypes)++;
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+	if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) 
+	    MAT(multistrauss->kount, mrk, mrkj, ntypes)++;
+	}
+      }
+    }
+  }
+  else { /* Euclidean distance */
+    if(ix > 0) {
+      for(j=0; j < ix; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) 
+	    MAT(multistrauss->kount, mrk, mrkj, ntypes)++;
+	}
+      }
+    }
+    if(ixp1 < npts) {
+      for(j=ixp1; j<npts; j++) {
+        if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) {
+	  mrkj = marks[j];
+	  if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) 
+	    MAT(multistrauss->kount, mrk, mrkj, ntypes)++;
+	}
+      }
+    }
+  }
+
+#ifdef DEBUG
+  Rprintf("multiplying cif factors\n");
+#endif
+  /* multiply cif value by pair potential */
+  for(m1 = 0; m1 < ntypes; m1++) {
+    for(m2 = 0; m2 < ntypes; m2++) {
+      kount = MAT(multistrauss->kount, m1, m2, ntypes);
+      if(MAT(multistrauss->hard, m1, m2, ntypes)) {
+	if(kount > 0) {
+	  cifval = 0.0;
+	  return(cifval);
+	}
+      } else {
+	lg = MAT(multistrauss->loggamma, m1, m2, ntypes);
+	cifval *= exp(lg * kount);
+      }
+    }
+  }
+  
+#ifdef DEBUG
+  Rprintf("returning positive cif\n");
+#endif
+  return cifval;
+}
+
+Cifns MultiStraussCifns = { &straussminit, &straussmcif, (updafunptr) NULL, YES};
diff --git a/src/sumsymouter.h b/src/sumsymouter.h
new file mode 100644
index 0000000..5a56a1a
--- /dev/null
+++ b/src/sumsymouter.h
@@ -0,0 +1,88 @@
+/*
+  sumsymouter.h
+
+  Code template for some functions in linalg.c
+
+  $Revision: 1.3 $ $Date: 2013/04/18 11:55:24 $
+
+  Macros used: FNAME = function name,
+               WEIGHTED = #defined for weighted version
+*/
+
+void FNAME(
+  x, 
+#ifdef WEIGHTED
+  w, 
+#endif
+  p, n, y
+) 
+  double *x;    /* p by n by n array */
+#ifdef WEIGHTED
+  double *w;    /*      n by n matrix (symmetric) */
+#endif
+  int *p, *n;
+  double *y;    /* output matrix p by p, initialised to zero */
+{
+  int N, P;
+  register int i, j, k, m, ijpos, jipos, maxchunk;
+  register double *xij, *xji;
+#ifdef WEIGHTED
+  register double wij;
+#endif
+  N = *n; 
+  P = *p;
+  OUTERCHUNKLOOP(i, N, maxchunk, 256) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, N, maxchunk, 256) {
+      /* loop over j != i */
+      if(i > 0) {
+	for(j = 0; j < i; j++) {
+	  /* pointers to [i,j] and [j,i] in N*N matrices */
+	  ijpos = i + N * j;
+	  jipos = j + N * i;
+	  /* pointers to x[, i, j] and x[ , j, i] */
+	  xij = x + ijpos * P;
+	  xji = x + jipos * P;
+	  /* outer product */ 
+#ifdef WEIGHTED
+	  wij = w[ijpos];
+#endif
+	  for(k = 0; k < P; k++) {
+	    for(m = 0; m < P; m++) {
+#ifdef WEIGHTED
+	      y[m + k * P] += wij * xij[m] * xji[k];
+#else
+	      y[m + k * P] += xij[m] * xji[k];
+#endif
+	    }
+	  }
+	}
+      }
+      if(i + 1 < N) {
+	for(j = i+1; j < N; j++) {
+	  /* pointers to [i,j] and [j,i] in N*N matrices */
+	  ijpos = i + N * j;
+	  jipos = j + N * i;
+	  /* pointers to x[, i, j] and x[ , j, i] */
+	  xij = x + ijpos * P;
+	  xji = x + jipos * P;
+	  /* outer product */ 
+#ifdef WEIGHTED
+	  wij = w[ijpos];
+#endif
+	  for(k = 0; k < P; k++) {
+	    for(m = 0; m < P; m++) {
+#ifdef WEIGHTED
+	      y[m + k * P] += wij * xij[m] * xji[k];
+#else
+	      y[m + k * P] +=       xij[m] * xji[k];
+#endif
+	    }
+	  }
+	}
+      }
+      /* end of loop over j */
+    }
+  }
+}
+
diff --git a/src/trigraf.c b/src/trigraf.c
new file mode 100755
index 0000000..f0a8b17
--- /dev/null
+++ b/src/trigraf.c
@@ -0,0 +1,1211 @@
+/*
+
+  trigraf.c
+
+  Form list of all triangles in a planar graph, given list of edges
+  
+  $Revision: 1.14 $     $Date: 2016/11/22 09:19:42 $
+
+  Form list of all triangles in a planar graph, given list of edges
+
+  Note: vertex indices ie, je are indices in R.
+        They are handled without converting to C convention,
+        because we only need to test equality and ordering.
+	(*except in 'trioxgraph'*)
+
+  Called by .C:
+  -------------
+  trigraf()  Generic C implementation with fixed storage limit
+             usable with Delaunay triangulation
+
+  trigrafS() Faster version when input data are sorted
+	     (again with predetermined storage limit)
+	     suited for handling Delaunay triangulation
+
+  Called by .Call:
+  ---------------
+  trigraph()   Version with dynamic storage allocation
+
+  triograph()  Faster version assuming 'iedge' is sorted in increasing order
+
+  trioxgraph()  Even faster version for use with quadrature schemes
+
+  Diameters:
+  -----------
+  triDgraph() Also computes diameters of triangles
+
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+
+#undef DEBUGTRI
+
+void trigraf(nv, ne, ie, je, ntmax, nt, it, jt, kt, status)
+     /* inputs */
+     int *nv;         /* number of graph vertices */
+     int *ne;         /* number of edges */
+     int *ie, *je;    /* vectors of indices of ends of each edge */ 
+     int *ntmax;      /* length of storage space for triangles */
+     /* output */
+     int *nt;              /* number of triangles (<= *ntmax) */
+     int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+     int *status;          /* 0 if OK, 1 if overflow */
+{
+  int Nv, Ne, Ntmax;
+  int Nt, Nj, m, i, j, k, mj, mk, maxchunk;
+  int *jj;
+  
+  Nv = *nv;
+  Ne = *ne;
+  Ntmax = *ntmax;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+  Nt = 0;
+
+  /* vertex index i ranges from 1 to Nv */
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+      if(Nj > 1) {
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      j = k;
+	    }
+	  }
+	}
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(j != k) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      for(m = 0; m < Ne; m++) {
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* add (i, j, k) to list of triangles */
+		  if(Nt >= Ntmax) {
+		    /* overflow - exit */
+		    *status = 1;
+		    return;
+		  }
+		  it[Nt] = i;
+		  jt[Nt] = j;
+		  kt[Nt] = k;
+		  Nt++;
+		}
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+  *nt = Nt;
+  *status = 0;
+}
+
+
+/* faster version of trigraf() 
+   assuming that 
+            ie[m] < je[m]
+            ie[] is in ascending order
+            je[] is in ascending order within ie[],
+	          that is, je[ie[]=i] is in ascending order for each fixed i
+*/
+
+void trigrafS(nv, ne, ie, je, ntmax, nt, it, jt, kt, status)
+     /* inputs */
+     int *nv;         /* number of graph vertices */
+     int *ne;         /* number of edges */
+     int *ie, *je;    /* vectors of indices of ends of each edge */ 
+     int *ntmax;      /* length of storage space for triangles */
+     /* output */
+     int *nt;              /* number of triangles */
+     int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+     int *status;          /* 0 if OK, 1 if overflow */
+{
+  int Ne, Nt, Ntmax;
+  int m, i, j, k, mj, mk;
+  int firstedge, lastedge;
+  
+  Ne = *ne;
+  Ntmax = *ntmax;
+
+  /* nv is not used, but retained for harmony with trigraf */
+  /* Avoid compiler warnings */
+  Nt = *nv;
+
+  /* initialise output */
+  Nt = 0;
+
+  lastedge = -1;
+  while(lastedge + 1 < Ne) {
+    if(lastedge % 256 == 0) R_CheckUserInterrupt();
+    /* 
+       Consider next vertex i.
+       The edges (i,j) with i < j appear contiguously in the edge list.
+    */
+    firstedge = lastedge + 1;
+    i = ie[firstedge]; 
+    for(m= firstedge+1; m < Ne && ie[m] == i; m++)
+      ;
+    lastedge = m-1;
+    /* 
+       Consider each pair j, k of neighbours of i, where i < j < k. 
+       Scan entire edge list to determine whether j, k are joined by an edge.
+       If so, save triangle (i,j,k) 
+    */
+    if(lastedge > firstedge) {
+      for(mj = firstedge; mj < lastedge; mj++) {
+	j = je[mj];
+	for(mk = firstedge+1; mk <= lastedge; mk++) {
+	  k = je[mk];
+	  /* Run through edges to determine whether j, k are neighbours */
+	  for(m = 0; m < Ne && ie[m] < j; m++) 
+	    ;
+	  while(m < Ne && ie[m] == j) {
+	    if(je[m] == k) {
+	      /* add (i, j, k) to list of triangles */
+	      if(Nt >= Ntmax) {
+		/* overflow - exit */
+		*status = 1;
+		return;
+	      }
+	      it[Nt] = i;
+	      jt[Nt] = j;
+	      kt[Nt] = k;
+	      Nt++;
+	    }
+	    m++;
+	  }
+	}
+      }
+    }
+  }
+
+  *nt = Nt;
+  *status = 0;
+}
+
+
+/* ------------------- callable by .Call ------------------------- */
+
+
+SEXP trigraph(SEXP nv,  /* number of vertices */
+	      SEXP iedge,  /* vectors of indices of ends of each edge */   
+	      SEXP jedge)  /* all arguments are integer */
+{
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+  int Nt, Ntmax;        /* number of triangles */
+
+  int Nj;
+  int *jj; /* scratch storage */
+
+  int i, j, k, m, mj, mk, Nmore, maxchunk;
+  
+  /* output */
+  SEXP iTout, jTout, kTout, out;
+  int *ito, *jto, *kto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  /* That's 3 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGTRI
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+#ifdef DEBUGTRI
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGTRI
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      j = k;
+	    }
+	  }
+	}
+#ifdef DEBUGTRI
+	Rprintf("sorted=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(j != k) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      for(m = 0; m < Ne; m++) {
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* add (i, j, k) to list of triangles */
+		  if(Nt >= Ntmax) {
+		    /* overflow - allocate more space */
+		    Nmore = 2 * Ntmax;
+#ifdef DEBUGTRI
+		    Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+		    it = (int *) S_realloc((char *) it,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    jt = (int *) S_realloc((char *) jt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    kt = (int *) S_realloc((char *) kt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    Ntmax = Nmore;
+		  }
+		  /* output indices in R convention */
+		  it[Nt] = i;
+		  jt[Nt] = j;
+		  kt[Nt] = k;
+		  Nt++;
+		}
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(out   = NEW_LIST(3));
+  /* that's 3+4=7 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  
+  /* copy triangle indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+
+  UNPROTECT(7);
+  return(out);
+}
+
+
+/* faster version assuming iedge is in increasing order */
+
+SEXP triograph(SEXP nv,  /* number of vertices */
+	       SEXP iedge,  /* vectors of indices of ends of each edge */   
+	       SEXP jedge)  /* all arguments are integer */
+{
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+  int Nt, Ntmax;        /* number of triangles */
+
+  int Nj;
+  int *jj; /* scratch storage */
+
+  int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk;
+  
+  /* output */
+  SEXP iTout, jTout, kTout, out;
+  int *ito, *jto, *kto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  /* That's 3 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGTRI
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+#ifdef DEBUGTRI
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGTRI
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      j = k;
+	    }
+	  }
+	}
+#ifdef DEBUGTRI
+	Rprintf("sorted=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(j != k) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      maxjk = (j > k) ? j : k;
+	      for(m = 0; m < Ne; m++) {
+		if(ie[m] > maxjk) break;
+		/* 
+		   since iedge is in increasing order, the test below
+		   will always be FALSE when ie[m] > max(j,k)
+		*/
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* add (i, j, k) to list of triangles */
+		  if(Nt >= Ntmax) {
+		    /* overflow - allocate more space */
+		    Nmore = 2 * Ntmax;
+#ifdef DEBUGTRI
+		    Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+		    it = (int *) S_realloc((char *) it,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    jt = (int *) S_realloc((char *) jt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    kt = (int *) S_realloc((char *) kt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    Ntmax = Nmore;
+		  }
+		  it[Nt] = i;
+		  jt[Nt] = j;
+		  kt[Nt] = k;
+		  Nt++;
+		} 
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(out   = NEW_LIST(3));
+  /* that's 3+4=7 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  
+  /* copy triangle indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+
+  UNPROTECT(7);
+  return(out);
+}
+
+/* 
+   Even faster version using information about dummy vertices.
+   Dummy-to-dummy edges are forbidden.
+
+   For generic purposes use 'friendly' for 'isdata'
+   Edge between j and k is possible iff friendly[j] || friendly[k].
+   Edges with friendly = FALSE cannot be connected to one another.
+
+ */
+
+
+SEXP trioxgraph(SEXP nv,  /* number of vertices */
+		SEXP iedge,  /* vectors of indices of ends of each edge */   
+		SEXP jedge,
+		SEXP friendly)  /* indicator vector, length nv */
+{
+  /* input */
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  int *friend;         /* indicator */
+
+  /* output */
+  int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+  int Nt, Ntmax;        /* number of triangles */
+
+  /* scratch storage */
+  int Nj;
+  int *jj; 
+  int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk;
+  
+  /* output to R */
+  SEXP iTout, jTout, kTout, out;
+  int *ito, *jto, *kto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  PROTECT(friendly = AS_INTEGER(friendly));
+  /* That's 4 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+  friend = INTEGER_POINTER(friendly);
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+
+  /* convert to C indexing convention */
+  for(m = 0; m < Ne; m++) {
+    ie[m] -= 1;
+    je[m] -= 1;
+  }
+
+  OUTERCHUNKLOOP(i, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(i, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGTRI
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+#ifdef DEBUGTRI
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGTRI
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      j = k;
+	    }
+	  }
+	}
+#ifdef DEBUGTRI
+	Rprintf("sorted=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(j != k && (friend[j] || friend[k])) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      maxjk = (j > k) ? j : k;
+	      for(m = 0; m < Ne; m++) {
+		if(ie[m] > maxjk) break;
+		/* 
+		   since iedge is in increasing order, the test below
+		   will always be FALSE when ie[m] > max(j,k)
+		*/
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* add (i, j, k) to list of triangles */
+		  if(Nt >= Ntmax) {
+		    /* overflow - allocate more space */
+		    Nmore = 2 * Ntmax;
+#ifdef DEBUGTRI
+		    Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+		    it = (int *) S_realloc((char *) it,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    jt = (int *) S_realloc((char *) jt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    kt = (int *) S_realloc((char *) kt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    Ntmax = Nmore;
+		  }
+		  /* convert back to R indexing */
+		  it[Nt] = i + 1;
+		  jt[Nt] = j + 1;
+		  kt[Nt] = k + 1;
+		  Nt++;
+		} 
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(out   = NEW_LIST(3));
+  /* that's 4+4=8 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  
+  /* copy triangle indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+
+  UNPROTECT(8);
+  return(out);
+}
+
+/* 
+   also calculates diameter (max edge length) of triangle
+*/
+
+SEXP triDgraph(SEXP nv,  /* number of vertices */
+	       SEXP iedge,  /* vectors of indices of ends of each edge */   
+	       SEXP jedge,
+	       SEXP edgelength)   /* edge lengths */
+{
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  double *edgelen;      
+
+  int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+  double *dt;           /* diameters (max edge lengths) of triangles */
+  int Nt, Ntmax;        /* number of triangles */
+
+  /* scratch storage */
+  int Nj;
+  int *jj; 
+  double *dd;
+
+  int i, j, k, m, mj, mk, Nmore, maxchunk;
+  double dij, dik, djk, diam;
+  
+  /* output */
+  SEXP iTout, jTout, kTout, dTout, out;
+  int *ito, *jto, *kto;
+  double *dto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  PROTECT(edgelength = AS_NUMERIC(edgelength));
+  /* That's 4 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+  edgelen = NUMERIC_POINTER(edgelength);
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  dt = (double *) R_alloc(Ntmax, sizeof(double));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+  dd = (double *) R_alloc(Ne, sizeof(double));
+
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGTRI
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    dd[Nj] = edgelen[m];
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    dd[Nj] = edgelen[m];
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+#ifdef DEBUGTRI
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGTRI
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      dik = dd[mj];
+	      dd[mj] = dd[mk];
+	      dd[mk] = dik;
+	      j = k;
+	    }
+	  }
+	}
+#ifdef DEBUGTRI
+	Rprintf("sorted=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  dij = dd[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    dik = dd[mk];
+	    if(j != k) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      for(m = 0; m < Ne; m++) {
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* triangle (i, j, k) */
+		  /* determine triangle diameter */
+		  diam = (dij > dik) ? dij : dik;
+		  djk = edgelen[m];
+		  if(djk > diam) diam = djk; 
+		  /* add (i, j, k) to list of triangles */
+		  if(Nt >= Ntmax) {
+		    /* overflow - allocate more space */
+		    Nmore = 2 * Ntmax;
+#ifdef DEBUGTRI
+		    Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+		    it = (int *) S_realloc((char *) it,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    jt = (int *) S_realloc((char *) jt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    kt = (int *) S_realloc((char *) kt,
+					   Nmore,  Ntmax,
+					   sizeof(int));
+		    dt = (double *) S_realloc((char *) dt,
+					      Nmore,  Ntmax,
+					      sizeof(double));
+		    Ntmax = Nmore;
+		  }
+		  it[Nt] = i;
+		  jt[Nt] = j;
+		  kt[Nt] = k;
+		  dt[Nt] = diam; 
+		  Nt++;
+		}
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(dTout = NEW_NUMERIC(Nt));
+  PROTECT(out   = NEW_LIST(4));
+  /* that's 4+5=9 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  dto = NUMERIC_POINTER(dTout);
+  
+  /* copy triangle indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+    dto[m] = dt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+  SET_VECTOR_ELT(out, 3, dTout);
+
+  UNPROTECT(9);
+  return(out);
+}
+
+/* 
+   same as triDgraph but returns only triangles with diameter <= dmax
+*/
+
+SEXP triDRgraph(SEXP nv,  /* number of vertices */
+		SEXP iedge,  /* vectors of indices of ends of each edge */   
+		SEXP jedge,
+		SEXP edgelength, /* edge lengths */
+		SEXP dmax)   
+{
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  double *edgelen;      
+
+  int *it, *jt, *kt;    /* vectors of indices of vertices of triangles */ 
+  double *dt;           /* diameters (max edge lengths) of triangles */
+  int Nt, Ntmax;        /* number of triangles */
+
+  /* scratch storage */
+  int Nj;
+  int *jj; 
+  double *dd;
+
+  int i, j, k, m, mj, mk, Nmore, maxchunk;
+  double dij, dik, djk, diam, Dmax;
+  
+  /* output */
+  SEXP iTout, jTout, kTout, dTout, out;
+  int *ito, *jto, *kto;
+  double *dto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  PROTECT(edgelength = AS_NUMERIC(edgelength));
+  PROTECT(dmax = AS_NUMERIC(dmax));
+  /* That's 5 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+  edgelen = NUMERIC_POINTER(edgelength);
+
+  /* maximum diameter */
+  Dmax = *(NUMERIC_POINTER(dmax));
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  dt = (double *) R_alloc(Ntmax, sizeof(double));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+  dd = (double *) R_alloc(Ne, sizeof(double));
+
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGTRI
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find triangles involving vertex 'i'
+	 in which 'i' is the lowest-numbered vertex */
+
+      /* First, find vertices j > i connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  j = je[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    dd[Nj] = edgelen[m];
+	    Nj++;
+	  }
+	} else if(je[m] == i) {
+	  j = ie[m];
+	  if(j > i) {
+	    jj[Nj] = j;
+	    dd[Nj] = edgelen[m];
+	    Nj++;
+	  }
+	}
+      }
+
+      /* 
+	 Determine which pairs of vertices j, k are joined by an edge;
+	 save triangles (i,j,k) 
+      */
+
+#ifdef DEBUGTRI
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGTRI
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+	/* Sort jj in ascending order */
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    if(k < j) {
+	      /* swap */
+	      jj[mk] = j;
+	      jj[mj] = k;
+	      dik = dd[mj];
+	      dd[mj] = dd[mk];
+	      dd[mk] = dik;
+	      j = k;
+	    }
+	  }
+	}
+#ifdef DEBUGTRI
+	Rprintf("sorted=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  dij = dd[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    dik = dd[mk];
+	    if(j != k) {
+	      /* Run through edges to determine whether j, k are neighbours */
+	      for(m = 0; m < Ne; m++) {
+		if((ie[m] == j && je[m] == k)
+		   || (ie[m] == k && je[m] == j)) {
+		  /* triangle (i, j, k) */
+		  /* determine triangle diameter */
+		  diam = (dij > dik) ? dij : dik;
+		  djk = edgelen[m];
+		  if(djk > diam) diam = djk; 
+		  if(diam <= Dmax) {
+		    /* add (i, j, k) to list of triangles */
+		    if(Nt >= Ntmax) {
+		      /* overflow - allocate more space */
+		      Nmore = 2 * Ntmax;
+#ifdef DEBUGTRI
+		      Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+		      it = (int *) S_realloc((char *) it,
+					     Nmore,  Ntmax,
+					     sizeof(int));
+		      jt = (int *) S_realloc((char *) jt,
+					     Nmore,  Ntmax,
+					     sizeof(int));
+		      kt = (int *) S_realloc((char *) kt,
+					     Nmore,  Ntmax,
+					     sizeof(int));
+		      dt = (double *) S_realloc((char *) dt,
+						Nmore,  Ntmax,
+						sizeof(double));
+		      Ntmax = Nmore;
+		    }
+		    it[Nt] = i;
+		    jt[Nt] = j;
+		    kt[Nt] = k;
+		    dt[Nt] = diam; 
+		    Nt++;
+		  }
+		}
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(dTout = NEW_NUMERIC(Nt));
+  PROTECT(out   = NEW_LIST(4));
+  /* that's 5+5=10 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  dto = NUMERIC_POINTER(dTout);
+  
+  /* copy triangle indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+    dto[m] = dt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+  SET_VECTOR_ELT(out, 3, dTout);
+
+  UNPROTECT(10);
+  return(out);
+}
+
diff --git a/src/triplets.c b/src/triplets.c
new file mode 100644
index 0000000..bb6fc0f
--- /dev/null
+++ b/src/triplets.c
@@ -0,0 +1,135 @@
+#include <R.h>
+#include <math.h>
+#include "methas.h"
+#include "dist2.h"
+
+/* Conditional intensity computation for Triplets process */
+
+/* Format for storage of parameters and precomputed/auxiliary data */
+
+typedef struct Triplets {
+  double gamma;
+  double r;
+  double loggamma;
+  double r2;
+  double *period;
+  int hard;
+  int per;
+  int *neighbour;    /* scratch list of neighbours of current point */
+  int Nmax;          /* length of scratch space allocated */
+} Triplets;
+
+/* initialiser function */
+
+Cdata *tripletsinit(state, model, algo)
+     State state;
+     Model model;
+     Algor algo;
+{
+  /* create storage for model parameters */
+  Triplets *triplets;
+  triplets = (Triplets *) R_alloc(1, sizeof(Triplets)); 
+  /* create scratch space */
+  triplets->Nmax  = 1024;
+  triplets->neighbour = (int *) R_alloc(1024, sizeof(int));
+  /* Interpret model parameters*/
+  triplets->gamma  = model.ipar[0];
+  triplets->r      = model.ipar[1]; /* No longer passed as r^2 */
+  triplets->r2     = triplets->r * triplets->r; 
+  triplets->period = model.period;
+#ifdef MHDEBUG
+  Rprintf("Initialising Triplets gamma=%lf, r=%lf\n", 
+	  triplets->gamma, triplets->r);
+#endif
+  /* is the model numerically equivalent to hard core ? */
+  triplets->hard   = (triplets->gamma < DOUBLE_EPS);
+  triplets->loggamma = (triplets->hard) ? 0 : log(triplets->gamma);
+  /* periodic boundary conditions? */
+  triplets->per    = (model.period[0] > 0.0);
+  return((Cdata *) triplets);
+}
+
+/* conditional intensity evaluator */
+
+double tripletscif(prop, state, cdata)
+     Propo prop;
+     State state;
+     Cdata *cdata;
+{
+  int npts, kount, ix, j, k, nj, nk, N, Nmax, Nmore, N1;
+  int *neighbour;
+  double *x, *y;
+  double u, v;
+  double r2, d2,  cifval;
+  Triplets *triplets;
+
+  triplets = (Triplets *) cdata;
+
+  r2     = triplets->r2;
+
+  u  = prop.u;
+  v  = prop.v;
+  ix = prop.ix;
+  x  = state.x;
+  y  = state.y;
+
+  npts = state.npts;
+
+  if(npts == 0) 
+    return((double) 1.0);
+
+  neighbour = triplets->neighbour;
+  Nmax      = triplets->Nmax;
+  N         = 0;
+
+  /* compile list of neighbours */
+
+  for(j=0; j < npts; j++) {
+    if(j != ix) {
+      d2 = dist2either(u,v,x[j],y[j],triplets->period);
+      if(d2 < r2) {
+	/* add j to list of neighbours of current point */
+	if(N >= Nmax) {
+	  /* storage space overflow: reallocate */
+	  Nmore = 2 * Nmax;
+	  triplets->neighbour = neighbour = 
+	    (int *) S_realloc((char *) triplets->neighbour,
+			      Nmore, Nmax, sizeof(int));
+	  triplets->Nmax = Nmax = Nmore;
+	}
+	neighbour[N] = j;
+	N++;
+      }
+    }
+  }
+
+  /* count r-close (ordered) pairs of neighbours */
+  kount = 0;
+
+  if(N > 1) {
+    N1 = N - 1;
+    for(j = 0; j < N1; j++) {
+      nj = neighbour[j];
+      for(k = j+1; k < N; k++) {
+	nk = neighbour[k];
+	if(nj != nk) {
+	  d2 = dist2either(x[nj],y[nj],x[nk],y[nk],triplets->period);
+	  if(d2 < r2) kount++;
+	}
+      }
+    }
+  }
+  
+if(triplets->hard) {
+    if(kount > 0) cifval = 0.0;
+    else cifval = 1.0;
+  } else cifval = exp((triplets->loggamma) * kount);
+
+#ifdef MHDEBUG
+ Rprintf("triplet count=%d cif=%lf\n", kount, cifval);
+#endif
+
+  return cifval;
+}
+
+Cifns TripletsCifns = { &tripletsinit, &tripletscif, (updafunptr) NULL, NO};
diff --git a/src/veegraf.c b/src/veegraf.c
new file mode 100644
index 0000000..18731d2
--- /dev/null
+++ b/src/veegraf.c
@@ -0,0 +1,156 @@
+/*
+  veegraf.c
+
+  $Revision: 1.2 $  $Date: 2013/05/21 08:11:27 $ 
+
+  Given the edges of a graph, determine all "Vees"
+  i.e. triples (i, j, k) where i ~ j and i ~ k. 
+
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+#include "chunkloop.h"
+
+#undef DEBUGVEE
+
+SEXP graphVees(SEXP nv,  /* number of vertices */
+	       SEXP iedge,  /* vectors of indices of ends of each edge */   
+	       SEXP jedge)  /* all arguments are integer */
+/* Edges should NOT be repeated symmetrically. Indices need not be sorted.  */
+{
+  int Nv, Ne;
+  int *ie, *je;         /* edges */
+  int *it, *jt, *kt;    /* vectors of indices of triples */ 
+  int Nt, Ntmax;        /* number of triples */
+
+  int Nj;
+  int *jj; /* scratch storage */
+
+  int i, j, k, m, mj, mk, Nmore, maxchunk;
+  
+  /* output */
+  SEXP iTout, jTout, kTout, out;
+  int *ito, *jto, *kto;
+  
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(nv = AS_INTEGER(nv));
+  PROTECT(iedge = AS_INTEGER(iedge));
+  PROTECT(jedge = AS_INTEGER(jedge));
+  /* That's 3 protected objects */
+
+  /* numbers of vertices and edges */
+  Nv = *(INTEGER_POINTER(nv)); 
+  Ne = LENGTH(iedge);
+
+  /* input arrays */
+  ie = INTEGER_POINTER(iedge);
+  je = INTEGER_POINTER(jedge);
+
+  /* initialise storage (with a guess at max size) */
+  Ntmax = 3 * Ne;
+  it = (int *) R_alloc(Ntmax, sizeof(int));
+  jt = (int *) R_alloc(Ntmax, sizeof(int));
+  kt = (int *) R_alloc(Ntmax, sizeof(int));
+  Nt = 0;
+
+  /* initialise scratch storage */
+  jj = (int *) R_alloc(Ne, sizeof(int));
+
+  XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) {
+
+#ifdef DEBUGVEE
+      Rprintf("i=%d ---------- \n", i);
+#endif
+
+      /* Find Vee triples with apex 'i' */
+
+      /* First, find all vertices j connected to i */
+      Nj = 0;
+      for(m = 0; m < Ne; m++) {
+	if(ie[m] == i) {
+	  jj[Nj] = je[m];
+	  Nj++;
+	} else if(je[m] == i) {
+	  jj[Nj] = ie[m];
+	  Nj++;
+	}
+      }
+
+      /* 
+	 save triples (i,j,k) 
+      */
+
+#ifdef DEBUGVEE
+      Rprintf("Nj = %d\n", Nj);
+#endif
+
+      if(Nj > 1) {
+#ifdef DEBUGVEE
+	Rprintf("i=%d\njj=\n", i);
+	for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]);
+	Rprintf("\n\n");
+#endif
+
+	for(mj = 0; mj < Nj-1; mj++) {
+	  j = jj[mj];
+	  for(mk = mj+1; mk < Nj; mk++) {
+	    k = jj[mk];
+	    /* add (i, j, k) to list of triangles */
+	    if(Nt >= Ntmax) {
+	      /* overflow - allocate more space */
+	      Nmore = 2 * Ntmax;
+#ifdef DEBUGVEE
+	      Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore);
+#endif
+	      it = (int *) S_realloc((char *) it,
+				     Nmore,  Ntmax,
+				     sizeof(int));
+	      jt = (int *) S_realloc((char *) jt,
+				     Nmore,  Ntmax,
+				     sizeof(int));
+	      kt = (int *) S_realloc((char *) kt,
+				     Nmore,  Ntmax,
+				     sizeof(int));
+	      Ntmax = Nmore;
+	    }
+	    it[Nt] = i;
+	    jt[Nt] = j;
+	    kt[Nt] = k;
+	    Nt++;
+	  } 
+	}
+      }
+    }
+  }
+
+  /* allocate space for output */
+  PROTECT(iTout = NEW_INTEGER(Nt));
+  PROTECT(jTout = NEW_INTEGER(Nt));
+  PROTECT(kTout = NEW_INTEGER(Nt));
+  PROTECT(out   = NEW_LIST(3));
+  /* that's 3+4=7 protected objects */
+  
+  ito = INTEGER_POINTER(iTout);
+  jto = INTEGER_POINTER(jTout);
+  kto = INTEGER_POINTER(kTout);
+  
+  /* copy triplet indices to output vectors */
+  for(m = 0; m < Nt; m++) {
+    ito[m] = it[m];
+    jto[m] = jt[m];
+    kto[m] = kt[m];
+  }
+  
+  /* insert output vectors in output list */
+  SET_VECTOR_ELT(out, 0, iTout);
+  SET_VECTOR_ELT(out, 1, jTout);
+  SET_VECTOR_ELT(out, 2, kTout);
+
+  UNPROTECT(7);
+  return(out);
+}
diff --git a/src/whist.c b/src/whist.c
new file mode 100644
index 0000000..460a6a8
--- /dev/null
+++ b/src/whist.c
@@ -0,0 +1,51 @@
+/*
+  whist.c
+
+  Weighted histogram
+
+  Designed for very fine bins
+
+  Cwhist(indices, weights, nbins)
+
+  indices point to bins (range: 0 to nbins-1)
+  
+  $Revision: 1.5 $  $Date: 2016/02/02 01:52:19 $
+
+*/
+
+#include <R.h>
+#include <Rdefines.h>
+#include <R_ext/Utils.h>
+
+SEXP Cwhist(SEXP indices, SEXP weights, SEXP nbins) {
+  int i, j, N, M; 
+  int *x;
+  double *w, *y;
+  SEXP result;
+
+  /* =================== Protect R objects from garbage collector ======= */
+  PROTECT(indices = AS_INTEGER(indices));
+  PROTECT(weights = AS_NUMERIC(weights));
+  PROTECT(nbins   = AS_INTEGER(nbins));
+
+  N = LENGTH(indices);
+  M = *(INTEGER_POINTER(nbins));
+
+  x = INTEGER_POINTER(indices);
+  w = NUMERIC_POINTER(weights);
+
+  PROTECT(result = NEW_NUMERIC(M));
+  y =  NUMERIC_POINTER(result);
+
+  for(j = 0; j < M; j++)
+    y[j] = 0.0;
+
+  for(i = 0; i < N; i++) {
+    j = x[i];
+    if(j != NA_INTEGER && R_FINITE(w[i]) && j >= 0 && j < M)
+      y[j] += w[i];
+  }
+  UNPROTECT(4);
+  return(result);
+}
+
diff --git a/src/xyseg.c b/src/xyseg.c
new file mode 100755
index 0000000..38c99ac
--- /dev/null
+++ b/src/xyseg.c
@@ -0,0 +1,830 @@
+/*
+
+  xyseg.c
+
+  Computation with line segments
+
+  xysegint     compute intersections between line segments
+
+  $Revision: 1.19 $     $Date: 2013/09/18 04:59:17 $
+
+ */
+
+#include <R.h>
+#include <Rdefines.h>
+#include <Rmath.h>
+#include <R_ext/Utils.h>
+#include <math.h>
+
+#include "chunkloop.h"
+
+#define NIETS -1.0
+
+#undef DEBUG 
+#define INSIDE01(X,E) (X * (1.0 - X) >= -E)
+
+/* 
+         --------------- PAIRS OF PSP OBJECTS ----------------------
+*/
+
+/*  
+   xysegint
+
+   Determines intersections between each pair of line segments
+   drawn from two lists of line segments.
+
+   Line segments are given as x0, y0, dx, dy
+   where (x0,y0) is the first endpoint and (dx, dy) is the vector
+   from the first to the second endpoint.
+   Points along a line segment are represented in parametric
+   coordinates, 
+            (x,y) = (x0, y0) + t * (dx, dy).
+
+   Output from xysegint() consists of five matrices xx, yy, ta, tb, ok.
+   The (i,j)-th entries in these matrices give information about the
+   intersection between the i-th segment in list 'a' and the
+   j-th segment in list 'b'. The information is
+
+       ok[i,j]  = 1 if there is an intersection
+                = 0 if not
+
+       xx[i,j]  = x coordinate of intersection
+
+       yy[i,j]  = y coordinate of intersection
+
+       ta[i,j] = parameter of intersection point
+                 relative to i-th segment in list 'a'
+
+       tb[i,j] = parameter of intersection point
+                 relative to j-th segment in list 'b'
+
+*/
+	     
+
+void xysegint(na, x0a, y0a, dxa, dya, 
+              nb, x0b, y0b, dxb, dyb, 
+	      eps,
+              xx, yy, ta, tb, ok)
+     /* inputs (vectors of coordinates) */
+     int *na, *nb;
+     double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     double *xx, *yy, *ta, *tb;
+     int *ok;
+{ 
+  int i, j, ma, mb, ijpos, maxchunk;
+  double determinant, absdet, diffx, diffy, tta, ttb, epsilon;
+
+  ma = *na;
+  mb = *nb;
+  epsilon = *eps;
+
+  OUTERCHUNKLOOP(j, mb, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mb, maxchunk, 8196) {
+      for(i = 0; i < ma; i++) {
+	ijpos = j * ma + i;
+	ok[ijpos] = 0;
+	xx[ijpos] = yy[ijpos] = ta[ijpos] = tb[ijpos] = NIETS;
+	determinant = dxb[j] * dya[i] - dyb[j] * dxa[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+#ifdef DEBUG
+	Rprintf("i = %d, j = %d\n", i, j);
+	Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n",
+		x0a[i], y0a[i], x0a[i] + dxa[i], y0a[i] + dya[i]);
+	Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n",
+		x0b[j], y0b[j], x0b[j] + dxb[j], y0b[j] + dyb[j]);
+	Rprintf("determinant=%lf\n", determinant);
+#endif	
+	if(absdet > epsilon) {
+	  diffx = (x0b[j] - x0a[i])/determinant;
+	  diffy = (y0b[j] - y0a[i])/determinant;
+	  ta[ijpos] = tta = - dyb[j] * diffx + dxb[j] * diffy;
+	  tb[ijpos] = ttb = - dya[i] * diffx + dxa[i] * diffy;
+#ifdef DEBUG
+	  Rprintf("ta = %lf, tb = %lf\n", tta, ttb);
+#endif	
+	  if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) {
+	    /* intersection */
+	    ok[ijpos] = 1;
+	    xx[ijpos] = x0a[i] + tta * dxa[i];
+	    yy[ijpos] = y0a[i] + tta * dya[i];
+#ifdef DEBUG
+	    Rprintf("segments intersect at (%lf, %lf)\n", xx[ijpos], yy[ijpos]);
+#endif	
+	  }
+	}
+      }
+    }
+  }
+}
+
+/* 
+   Stripped-down version of xysegint that just returns logical matrix 
+*/
+
+void xysi(na, x0a, y0a, dxa, dya, 
+              nb, x0b, y0b, dxb, dyb, 
+	      eps,
+              ok)
+     /* inputs (vectors of coordinates) */
+     int *na, *nb;
+     double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     int *ok;
+{ 
+  int i, j, ma, mb, ijpos, maxchunk;
+  double determinant, absdet, diffx, diffy, tta, ttb, epsilon;
+
+  ma = *na;
+  mb = *nb;
+  epsilon = *eps;
+
+  OUTERCHUNKLOOP(j, mb, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mb, maxchunk, 8196) {
+      for(i = 0; i < ma; i++) {
+	ijpos = j * ma + i;
+	ok[ijpos] = 0;
+	determinant = dxb[j] * dya[i] - dyb[j] * dxa[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (x0b[j] - x0a[i])/determinant;
+	  diffy = (y0b[j] - y0a[i])/determinant;
+	  tta = - dyb[j] * diffx + dxb[j] * diffy;
+	  ttb = - dya[i] * diffx + dxa[i] * diffy;
+	  if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) {
+	    /* intersection */
+	    ok[ijpos] = 1;
+	  }
+	}
+      }
+    }
+  }
+}
+
+/* 
+   Test whether there is at least one intersection
+*/
+
+void xysiANY(na, x0a, y0a, dxa, dya, 
+		nb, x0b, y0b, dxb, dyb, 
+		eps,
+		ok)
+     /* inputs (vectors of coordinates) */
+     int *na, *nb;
+     double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* output (single logical value) */
+     int *ok;
+{ 
+  int i, j, ma, mb, maxchunk;
+  double determinant, absdet, diffx, diffy, tta, ttb, epsilon;
+
+  *ok = 0;
+  ma = *na;
+  mb = *nb;
+  epsilon = *eps;
+
+  OUTERCHUNKLOOP(j, mb, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mb, maxchunk, 8196) {
+      for(i = 0; i < ma; i++) {
+	determinant = dxb[j] * dya[i] - dyb[j] * dxa[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (x0b[j] - x0a[i])/determinant;
+	  diffy = (y0b[j] - y0a[i])/determinant;
+	  tta = - dyb[j] * diffx + dxb[j] * diffy;
+	  ttb = - dya[i] * diffx + dxa[i] * diffy;
+	  if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) {
+	    /* intersection */
+	    *ok = 1;
+	    return;
+	  }
+	}
+      }
+    }
+  }
+}
+
+/* 
+    Analogue of xysegint
+    when segments in list 'a' are infinite vertical lines
+*/
+
+void xysegVslice(na, xa,  
+		 nb, x0b, y0b, dxb, dyb, 
+		 eps,
+		 yy, ok)
+     /* inputs (vectors of coordinates) */
+     int *na, *nb;
+     double *xa, *x0b, *y0b, *dxb, *dyb;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     double *yy;
+     int *ok;
+{ 
+  int i, j, ma, mb, ijpos, maxchunk;
+  double diffx0, diffx1, width, abswidth, epsilon;
+  int notvertical;
+
+  ma = *na;
+  mb = *nb;
+  epsilon = *eps;
+
+  OUTERCHUNKLOOP(j, mb, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mb, maxchunk, 8196) {
+      /* determine whether segment j is nearly vertical */
+      width = dxb[j];
+      abswidth = (width > 0) ? width : -width;
+      notvertical = (abswidth <= epsilon);
+    
+      for(i = 0; i < ma; i++) {
+	ijpos = j * ma + i;
+	ok[ijpos] = 0;
+	yy[ijpos] = NIETS;
+	/* test whether vertical line i separates endpoints of segment j */
+	diffx0 = xa[i] - x0b[j];
+	diffx1 = diffx0 - width;
+	if(diffx0 * diffx1 <= 0) {
+	  /* intersection */
+	  ok[ijpos] = 1;
+	  /* compute y-coordinate of intersection point */
+	  if(notvertical) {
+	    yy[ijpos] = y0b[j] + diffx0 * dyb[j]/width;
+	  } else {
+	    /* vertical or nearly-vertical segment: pick midpoint */	  
+	    yy[ijpos] = y0b[j] + dyb[j]/2.0;
+	  }
+	}
+      }
+    }
+  }
+}
+
+/* 
+    -------------- ONE PSP OBJECT ----------------------------
+*/
+	 
+
+/*
+
+    Similar to xysegint,
+    but computes intersections between all pairs of segments
+    in a single list, excluding the diagonal comparisons of course
+
+*/
+
+void xysegXint(n, x0, y0, dx, dy, 
+	      eps,
+              xx, yy, ti, tj, ok)
+     /* inputs (vectors of coordinates) */
+     int *n;
+     double *x0, *y0, *dx, *dy;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     double *xx, *yy, *ti, *tj;
+     int *ok;
+{ 
+  int i, j, m, mm1, ijpos, jipos, iipos, maxchunk;
+  double determinant, absdet, diffx, diffy, tti, ttj, epsilon;
+
+  m = *n;
+  epsilon = *eps;
+ 
+  mm1 = m - 1;
+  OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mm1, maxchunk, 8196) {
+      for(i = j+1; i < m; i++) {
+	ijpos = j * m + i;
+	jipos = i * m + j;
+	ok[ijpos] = ok[jipos] = 0;
+	xx[ijpos] = yy[ijpos] = ti[ijpos] = ti[jipos] = NIETS;
+	xx[jipos] = yy[jipos] = tj[ijpos] = tj[jipos] = NIETS;
+	determinant = dx[j] * dy[i] - dy[j] * dx[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (x0[j] - x0[i])/determinant;
+	  diffy = (y0[j] - y0[i])/determinant;
+	  ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy;
+	  tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy;
+	  tj[jipos] = ti[ijpos];
+	  ti[jipos] = tj[ijpos];
+	  if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) {
+	    ok[ijpos] = ok[jipos] = 1;
+	    xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i];
+	    yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i];
+	  }
+	}
+      }
+    }
+  }
+
+  /* assign diagonal */
+  for(i = 0; i < m; i++) {
+    iipos = i * m + i;
+    ok[iipos] = 0;
+    xx[iipos] = yy[iipos] = ti[iipos] = tj[iipos] = NIETS;
+  }
+
+}
+	 
+/*
+
+    Reduced version of xysegXint that returns logical matrix 'ok' only
+
+*/
+
+void xysxi(n, x0, y0, dx, dy, 
+	      eps,
+              ok)
+     /* inputs (vectors of coordinates) */
+     int *n;
+     double *x0, *y0, *dx, *dy;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     int *ok;
+{ 
+  int i, j, m, mm1, ijpos, jipos, iipos, maxchunk;
+  double determinant, absdet, diffx, diffy, tti, ttj, epsilon;
+
+  m = *n;
+  epsilon = *eps;
+ 
+  mm1 = m - 1;
+  OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mm1, maxchunk, 8196) {
+      for(i = j+1; i < m; i++) {
+	ijpos = j * m + i;
+	jipos = i * m + j;
+	ok[ijpos] = ok[jipos] = 0;
+	determinant = dx[j] * dy[i] - dy[j] * dx[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (x0[j] - x0[i])/determinant;
+	  diffy = (y0[j] - y0[i])/determinant;
+	  tti = - dy[j] * diffx + dx[j] * diffy;
+	  ttj = - dy[i] * diffx + dx[i] * diffy;
+	  if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) {
+	    ok[ijpos] = ok[jipos] = 1;
+	  }
+	}
+      }
+    }
+  }
+
+  /* assign diagonal */
+  for(i = 0; i < m; i++) {
+    iipos = i * m + i;
+    ok[iipos] = 0;
+  }
+
+}
+
+/*
+   ---------------------- ONE CLOSED POLYGON ------------------------
+*/
+	 
+/*
+
+    Identify self-intersections in a closed polygon
+
+    (Similar to xysegXint,
+    but does not compare segments which are cyclically adjacent in the list)
+
+*/
+
+void Cxypolyselfint(n, x0, y0, dx, dy, 
+	      eps,
+              xx, yy, ti, tj, ok)
+     /* inputs (vectors of coordinates) */
+     int *n;
+     double *x0, *y0, *dx, *dy;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* outputs (matrices) */
+     double *xx, *yy, *ti, *tj;
+     int *ok;
+{ 
+  int i, j, k, m, m2, mm1, mm2, mstop, ijpos, jipos, maxchunk;
+  double determinant, absdet, diffx, diffy, tti, ttj, epsilon;
+
+  m = *n;
+  epsilon = *eps;
+  m2 = m * m;
+
+  /* initialise matrices */
+  
+  for(k = 0; k < m2; k++) {
+    ok[k] = 0;
+    xx[k] = yy[k] = ti[k] = tj[k] = NIETS;
+  }
+
+  if(m <= 2) 
+    return;
+
+  /* Compare j with j+2, j+3, ...., m-1
+     Don't compare 0 with m-1  */
+  mm1 = m - 1;
+  mm2 = m - 2;
+  OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mm2, maxchunk, 8196) {
+      mstop = (j > 0) ? m : mm1;
+      for(i = j+2; i < mstop; i++) {
+	ijpos = j * m + i;
+	jipos = i * m + j;
+	determinant = dx[j] * dy[i] - dy[j] * dx[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (x0[j] - x0[i])/determinant;
+	  diffy = (y0[j] - y0[i])/determinant;
+	  ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy;
+	  tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy;
+	  tj[jipos] = ti[ijpos];
+	  ti[jipos] = tj[ijpos];
+	  if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) {
+	    ok[ijpos] = ok[jipos] = 1;
+	    xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i];
+	    yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i];
+	  }
+	}
+      }
+    }
+  }
+}
+	 
+
+/*
+  Just determines whether there is self-intersection
+  (exits quicker & uses less space)
+*/
+
+
+void xypsi(n, x0, y0, dx, dy, xsep, ysep, eps, proper, answer)
+     /* inputs (vectors of coordinates) */
+     int *n;
+     double *x0, *y0, *dx, *dy;
+     /* inputs (distances beyond which intersection is impossible) */
+     double *xsep, *ysep;
+     /* input (tolerance for determinant) */
+     double *eps;  
+     /* input (flag) */
+     int *proper;
+     /* output */
+     int *answer;
+{ 
+  int i, j, m, mm1, mm2, mstop, prop, maxchunk;
+  double determinant, absdet, diffx, diffy, tti, ttj, epsilon;
+  double Xsep, Ysep;
+
+  m = *n;
+  prop = *proper;
+  Xsep = *xsep;
+  Ysep = *ysep;
+  epsilon = *eps;
+
+  *answer = 0;
+
+  if(m <= 2) 
+    return;
+
+  /* Compare j with j+2, j+3, ...., m-1
+     Don't compare 0 with m-1  */
+  mm1 = m - 1;
+  mm2 = m - 2;
+  OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, mm2, maxchunk, 8196) {
+      mstop = (j > 0) ? m : mm1;
+      for(i = j+2; i < mstop; i++) {
+	diffx = x0[j] - x0[i];
+	diffy = y0[j] - y0[i];
+	if(diffx < Xsep && diffx > -Xsep && diffy < Ysep && diffy > -Ysep) {
+	  determinant = dx[j] * dy[i] - dy[j] * dx[i];
+	  absdet = (determinant > 0) ? determinant : -determinant;
+	  if(absdet > epsilon) {
+	    diffx = diffx/determinant;
+	    diffy = diffy/determinant;
+	    tti = - dy[j] * diffx + dx[j] * diffy;
+	    ttj = - dy[i] * diffx + dx[i] * diffy;
+	    if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) {
+              /* intersection occurs */
+	      if(prop == 0 ||
+		 (tti != 0.0 && tti != 1.0) || 
+		 (ttj != 0.0 && ttj != 1.0)) {
+              /* proper intersection */
+		*answer = 1;
+		return;
+	      }
+	    }
+	  }
+	}
+      }
+    }
+  }
+}
+
+	 
+/*
+        ---------------- .Call INTERFACE  ---------------------------
+
+	Analogues of functions above, but using the .Call interface
+	and dynamic storage allocation, to save space.
+
+ */
+
+SEXP Cxysegint(SEXP x0a, 
+               SEXP y0a, 
+               SEXP dxa, 
+               SEXP dya, 
+               SEXP x0b, 
+               SEXP y0b, 
+               SEXP dxb, 
+               SEXP dyb, 
+	       SEXP eps) 
+{
+  int i, j, k, na, nb;
+  double determinant, absdet, diffx, diffy, tta, ttb;
+
+  int nout, noutmax, newmax, maxchunk;
+  double epsilon;
+  double *x0A, *y0A, *dxA, *dyA, *x0B, *y0B, *dxB, *dyB;
+  double *ta, *tb, *x, *y;
+  int *ia, *jb;
+  SEXP out, iAout, jBout, tAout, tBout, xout, yout;
+  double *tAoutP, *tBoutP, *xoutP, *youtP;
+  int *iAoutP, *jBoutP;
+
+  PROTECT(x0a = AS_NUMERIC(x0a));
+  PROTECT(y0a = AS_NUMERIC(y0a));
+  PROTECT(dxa = AS_NUMERIC(dxa));
+  PROTECT(dya = AS_NUMERIC(dya));
+  PROTECT(x0b = AS_NUMERIC(x0b));
+  PROTECT(y0b = AS_NUMERIC(y0b));
+  PROTECT(dxb = AS_NUMERIC(dxb));
+  PROTECT(dyb = AS_NUMERIC(dyb));
+  PROTECT(eps = AS_NUMERIC(eps));
+  /* that's 9 protected */
+
+
+  /* get pointers */
+  x0A = NUMERIC_POINTER(x0a);
+  y0A = NUMERIC_POINTER(y0a);
+  dxA = NUMERIC_POINTER(dxa);
+  dyA = NUMERIC_POINTER(dya);
+  x0B = NUMERIC_POINTER(x0b);
+  y0B = NUMERIC_POINTER(y0b);
+  dxB = NUMERIC_POINTER(dxb);
+  dyB = NUMERIC_POINTER(dyb);
+
+  /* determine length of vectors */
+  na = LENGTH(x0a);
+  nb = LENGTH(x0b);
+  epsilon = *(NUMERIC_POINTER(eps));
+  
+  /* guess amount of storage required for output */
+  noutmax = (na > nb) ? na : nb;
+  nout = 0;
+  ia = (int *) R_alloc(noutmax, sizeof(int));
+  jb = (int *) R_alloc(noutmax, sizeof(int));
+  ta = (double *) R_alloc(noutmax, sizeof(double));
+  tb = (double *) R_alloc(noutmax, sizeof(double));
+  x = (double *) R_alloc(noutmax, sizeof(double));
+  y = (double *) R_alloc(noutmax, sizeof(double));
+
+  /* scan data and collect intersections */
+  OUTERCHUNKLOOP(j, nb, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, nb, maxchunk, 8196) {
+      for(i = 0; i < na; i++) {
+	determinant = dxB[j] * dyA[i] - dyB[j] * dxA[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+#ifdef DEBUG
+	Rprintf("i = %d, j = %d\n", i, j);
+	Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n",
+		x0A[i], y0A[i], x0A[i] + dxA[i], y0A[i] + dyA[i]);
+	Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n",
+		x0B[j], y0B[j], x0B[j] + dxB[j], y0B[j] + dyB[j]);
+	Rprintf("determinant=%lf\n", determinant);
+#endif	
+	if(absdet > epsilon) {
+	  diffx = (x0B[j] - x0A[i])/determinant;
+	  diffy = (y0B[j] - y0A[i])/determinant;
+	  tta = - dyB[j] * diffx + dxB[j] * diffy;
+	  ttb = - dyA[i] * diffx + dxA[i] * diffy;
+#ifdef DEBUG
+	  Rprintf("ta = %lf, tb = %lf\n", tta, ttb);
+#endif	
+	  if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) {
+	    /* intersection */
+	    if(nout >= noutmax) {
+	      /* storage overflow - increase space */
+	      newmax = 4 * noutmax;
+	      ia = (int *) S_realloc((char *) ia, 
+				     newmax, noutmax, sizeof(int));
+	      jb = (int *) S_realloc((char *) jb, 
+				     newmax, noutmax, sizeof(int));
+	      ta = (double *) S_realloc((char *) ta, 
+					newmax, noutmax, sizeof(double));
+	      tb = (double *) S_realloc((char *) tb, 
+					newmax, noutmax, sizeof(double));
+	      x = (double *) S_realloc((char *) x, 
+				       newmax, noutmax, sizeof(double));
+	      y = (double *) S_realloc((char *) y, 
+				       newmax, noutmax, sizeof(double));
+	      noutmax = newmax;
+	    }
+	    ta[nout] = tta;
+	    tb[nout] = ttb;
+	    ia[nout] = i;
+	    jb[nout] = j;
+	    x[nout]  = x0A[i] + tta * dxA[i];
+	    y[nout]  = y0A[i] + tta * dyA[i];
+#ifdef DEBUG
+	    Rprintf("segments intersect at (%lf, %lf)\n", x[nout], y[nout]);
+#endif	
+	    ++nout;
+	  }
+	}
+      }
+    }
+  }
+  /* pack up */
+  PROTECT(iAout = NEW_INTEGER(nout));
+  PROTECT(jBout = NEW_INTEGER(nout));
+  PROTECT(tAout = NEW_NUMERIC(nout));
+  PROTECT(tBout = NEW_NUMERIC(nout));
+  PROTECT(xout = NEW_NUMERIC(nout));
+  PROTECT(yout = NEW_NUMERIC(nout));
+  /* 9 + 6 = 15 protected */
+  iAoutP = INTEGER_POINTER(iAout);
+  jBoutP = INTEGER_POINTER(jBout);
+  tAoutP = NUMERIC_POINTER(tAout);
+  tBoutP = NUMERIC_POINTER(tBout);
+  xoutP = NUMERIC_POINTER(xout);
+  youtP = NUMERIC_POINTER(yout);
+  for(k = 0; k < nout; k++) {
+    iAoutP[k] = ia[k];
+    jBoutP[k] = jb[k];
+    tAoutP[k] = ta[k];
+    tBoutP[k] = tb[k];
+    xoutP[k]  = x[k];
+    youtP[k]  = y[k];
+  }
+  PROTECT(out = NEW_LIST(6));
+  /* 15 + 1 = 16 protected */
+  SET_VECTOR_ELT(out, 0, iAout);
+  SET_VECTOR_ELT(out, 1, jBout);
+  SET_VECTOR_ELT(out, 2, tAout);
+  SET_VECTOR_ELT(out, 3, tBout);
+  SET_VECTOR_ELT(out, 4, xout);
+  SET_VECTOR_ELT(out, 5, yout);
+  UNPROTECT(16);
+  return(out);
+}
+
+
+/*
+
+    Similar to Cxysegint,
+    but computes intersections between all pairs of segments
+    in a single list, excluding the diagonal comparisons of course
+
+*/
+
+SEXP CxysegXint(SEXP x0, 
+		SEXP y0, 
+		SEXP dx, 
+		SEXP dy,
+		SEXP eps)
+{ 
+  int i, j, k, n, n1;
+  double determinant, absdet, diffx, diffy, tti, ttj;
+
+  int nout, noutmax, newmax, maxchunk;
+  double epsilon;
+  double *X0, *Y0, *Dx, *Dy;
+  double *ti, *tj, *x, *y;
+  int *ii, *jj;
+  SEXP out, iout, jout, tiout, tjout, xout, yout;
+  double *tioutP, *tjoutP, *xoutP, *youtP;
+  int *ioutP, *joutP;
+
+  PROTECT(x0 = AS_NUMERIC(x0));
+  PROTECT(y0 = AS_NUMERIC(y0));
+  PROTECT(dx = AS_NUMERIC(dx));
+  PROTECT(dy = AS_NUMERIC(dy));
+  PROTECT(eps = AS_NUMERIC(eps));
+  /* that's 5 protected */
+
+  /* get pointers */
+  X0 = NUMERIC_POINTER(x0);
+  Y0 = NUMERIC_POINTER(y0);
+  Dx = NUMERIC_POINTER(dx);
+  Dy = NUMERIC_POINTER(dy);
+  
+  /* determine length of vectors */
+  n = LENGTH(x0);
+  epsilon = *(NUMERIC_POINTER(eps));
+
+  /* guess amount of storage required for output */
+  noutmax = n;
+  nout = 0;
+  ii = (int *) R_alloc(noutmax, sizeof(int));
+  jj = (int *) R_alloc(noutmax, sizeof(int));
+  ti = (double *) R_alloc(noutmax, sizeof(double));
+  tj = (double *) R_alloc(noutmax, sizeof(double));
+  x = (double *) R_alloc(noutmax, sizeof(double));
+  y = (double *) R_alloc(noutmax, sizeof(double));
+
+  /* scan data */
+  n1 = n - 1;
+  OUTERCHUNKLOOP(j, n1, maxchunk, 8196) {
+    R_CheckUserInterrupt();
+    INNERCHUNKLOOP(j, n1, maxchunk, 8196) {
+      for(i = j+1; i < n; i++) {
+	determinant = Dx[j] * Dy[i] - Dy[j] * Dx[i];
+	absdet = (determinant > 0) ? determinant : -determinant;
+	if(absdet > epsilon) {
+	  diffx = (X0[j] - X0[i])/determinant;
+	  diffy = (Y0[j] - Y0[i])/determinant;
+	  tti = - Dy[j] * diffx + Dx[j] * diffy;
+	  ttj = - Dy[i] * diffx + Dx[i] * diffy;
+	  if(INSIDE01(tti,epsilon) && INSIDE01(ttj,epsilon)) {
+	    /* intersection */
+	    if(nout >= noutmax) {
+	      /* storage overflow - increase space */
+	      newmax = 4 * noutmax;
+	      ii = (int *) S_realloc((char *) ii, 
+				     newmax, noutmax, sizeof(int));
+	      jj = (int *) S_realloc((char *) jj, 
+				     newmax, noutmax, sizeof(int));
+	      ti = (double *) S_realloc((char *) ti, 
+					newmax, noutmax, sizeof(double));
+	      tj = (double *) S_realloc((char *) tj, 
+					newmax, noutmax, sizeof(double));
+	      x = (double *) S_realloc((char *) x, 
+				       newmax, noutmax, sizeof(double));
+	      y = (double *) S_realloc((char *) y, 
+				       newmax, noutmax, sizeof(double));
+	      noutmax = newmax;
+	    }
+	    ti[nout] = tti;
+	    tj[nout] = ttj;
+	    ii[nout] = i;
+	    jj[nout] = j;
+	    x[nout]  = X0[i] + tti * Dx[i];
+	    y[nout]  = Y0[i] + tti * Dy[i];
+	    ++nout;
+	  }
+	}
+      }
+    }
+  }
+
+  /* pack up */
+  PROTECT(iout = NEW_INTEGER(nout));
+  PROTECT(jout = NEW_INTEGER(nout));
+  PROTECT(tiout = NEW_NUMERIC(nout));
+  PROTECT(tjout = NEW_NUMERIC(nout));
+  PROTECT(xout = NEW_NUMERIC(nout));
+  PROTECT(yout = NEW_NUMERIC(nout));
+  /* 5 + 6 = 11 protected */
+  ioutP = INTEGER_POINTER(iout);
+  joutP = INTEGER_POINTER(jout);
+  tioutP = NUMERIC_POINTER(tiout);
+  tjoutP = NUMERIC_POINTER(tjout);
+  xoutP = NUMERIC_POINTER(xout);
+  youtP = NUMERIC_POINTER(yout);
+  for(k = 0; k < nout; k++) {
+    ioutP[k] = ii[k];
+    joutP[k] = jj[k];
+    tioutP[k] = ti[k];
+    tjoutP[k] = tj[k];
+    xoutP[k]  = x[k];
+    youtP[k]  = y[k];
+  }
+  PROTECT(out = NEW_LIST(6));
+  /* 11 + 1 = 12 protected */
+  SET_VECTOR_ELT(out, 0, iout);
+  SET_VECTOR_ELT(out, 1, jout);
+  SET_VECTOR_ELT(out, 2, tiout);
+  SET_VECTOR_ELT(out, 3, tjout);
+  SET_VECTOR_ELT(out, 4, xout);
+  SET_VECTOR_ELT(out, 5, yout);
+  UNPROTECT(12);
+  return(out);
+}
diff --git a/src/yesno.h b/src/yesno.h
new file mode 100644
index 0000000..85b3f59
--- /dev/null
+++ b/src/yesno.h
@@ -0,0 +1,9 @@
+/*
+   yesno.h 
+
+*/
+
+#ifndef YES
+#define YES (0 == 0)
+#define NO (!YES)
+#endif
diff --git a/tests/badwindow.txt b/tests/badwindow.txt
new file mode 100644
index 0000000..bfdf9eb
--- /dev/null
+++ b/tests/badwindow.txt
@@ -0,0 +1,1203 @@
+      x       y  i
+ 486959 6497047  1
+ 487223 6497012  1
+ 487293 6497170  1
+ 487434 6497187  1
+ 487504 6497047  1
+ 487539 6496959  1
+ 487557 6496889  1
+ 488875 6496924  1
+ 488945 6496643  1
+ 490808 6496643  1
+ 490737 6496854  1
+ 490298 6497644  1
+ 490140 6498541  1
+ 490298 6498857  1
+ 490491 6497855  1
+ 490948 6496854  1
+ 491036 6496555  1
+ 491950 6496537  1
+ 491282 6500298  1
+ 491282 6501546  1
+ 491124 6501792  1
+ 491124 6501985  1
+ 491563 6502319  1
+ 491493 6502740  1
+ 491475 6503355  1
+ 491686 6504375  1
+ 491616 6505324  1
+ 490772 6505675  1
+ 490526 6506237  1
+ 489683 6506237  1
+ 489490 6505605  1
+ 489578 6505359  1
+ 489191 6505078  1
+ 488892 6504023  1
+ 488910 6503795  1
+ 488716 6503812  1
+ 488611 6504568  1
+ 488031 6505201  1
+ 487522 6505042  1
+ 487522 6504919  1
+ 487486 6504849  1
+ 487416 6504884  1
+ 487399 6504814  1
+ 487346 6504832  1
+ 487240 6504638  1
+ 487117 6504515  1
+ 487117 6503935  1
+ 487276 6504006  1
+ 487346 6503971  1
+ 487399 6503865  1
+ 487486 6503812  1
+ 487574 6503777  1
+ 487557 6503689  1
+ 487082 6503303  1
+ 486994 6502266  1
+ 487205 6501159  1
+ 487117 6500526  1
+ 487188 6499437  1
+ 487012 6498259  1
+ 486924 6497029  1
+ 487186 6499396  2
+ 487182 6499396  2
+ 487186 6499426  2
+ 487186 6499396  2
+ 487126 6500589  2
+ 487126 6500476  2
+ 487156 6500476  2
+ 487156 6500176  2
+ 487186 6500176  2
+ 487186 6499462  2
+ 487117 6500526  2
+ 487126 6500589  2
+ 487156 6500686  2
+ 487140 6500686  2
+ 487156 6500805  2
+ 487156 6500686  2
+ 487186 6500986  2
+ 487181 6500986  2
+ 487186 6501021  2
+ 487186 6500986  2
+ 487216 6501076  2
+ 487194 6501076  2
+ 487205 6501159  2
+ 487187 6501256  2
+ 487216 6501256  2
+ 487216 6501076  2
+ 487186 6501406  2
+ 487186 6501260  2
+ 487158 6501406  2
+ 487186 6501406  2
+ 487156 6501466  2
+ 487156 6501417  2
+ 487147 6501466  2
+ 487156 6501466  2
+ 487096 6501766  2
+ 487096 6501732  2
+ 487090 6501766  2
+ 487096 6501766  2
+ 487066 6502936  2
+ 487066 6502636  2
+ 487096 6502636  2
+ 487096 6502606  2
+ 487156 6502606  2
+ 487156 6502486  2
+ 487066 6502486  2
+ 487066 6502456  2
+ 487036 6502456  2
+ 487036 6502156  2
+ 487015 6502156  2
+ 486994 6502266  2
+ 487064 6503086  2
+ 487066 6503086  2
+ 487066 6503112  2
+ 487066 6503116  2
+ 487156 6503116  2
+ 487156 6503026  2
+ 487126 6503026  2
+ 487126 6502996  2
+ 487096 6502996  2
+ 487096 6502936  2
+ 487066 6502936  2
+ 488956 6501496  3
+ 488956 6501256  3
+ 488926 6501256  3
+ 488926 6501046  3
+ 488896 6501046  3
+ 488896 6500806  3
+ 488866 6500806  3
+ 488866 6500506  3
+ 488836 6500506  3
+ 488836 6500236  3
+ 488806 6500236  3
+ 488806 6499996  3
+ 488776 6499996  3
+ 488776 6499486  3
+ 488686 6499486  3
+ 488686 6499126  3
+ 488716 6499126  3
+ 488716 6499006  3
+ 488626 6499006  3
+ 488626 6499036  3
+ 488596 6499036  3
+ 488596 6499066  3
+ 488566 6499066  3
+ 488566 6499126  3
+ 488536 6499126  3
+ 488536 6499216  3
+ 488416 6499216  3
+ 488416 6499456  3
+ 488446 6499456  3
+ 488446 6499696  3
+ 488416 6499696  3
+ 488416 6499936  3
+ 488446 6499936  3
+ 488446 6500056  3
+ 488476 6500056  3
+ 488476 6500146  3
+ 488506 6500146  3
+ 488506 6500266  3
+ 488536 6500266  3
+ 488536 6500386  3
+ 488566 6500386  3
+ 488566 6500656  3
+ 488536 6500656  3
+ 488536 6500986  3
+ 488566 6500986  3
+ 488566 6501136  3
+ 488536 6501136  3
+ 488536 6501376  3
+ 488566 6501376  3
+ 488566 6501406  3
+ 488596 6501406  3
+ 488596 6501496  3
+ 488566 6501496  3
+ 488566 6501616  3
+ 488596 6501616  3
+ 488596 6501796  3
+ 488626 6501796  3
+ 488626 6502036  3
+ 488656 6502036  3
+ 488656 6502096  3
+ 488686 6502096  3
+ 488686 6502246  3
+ 488716 6502246  3
+ 488716 6502276  3
+ 488776 6502276  3
+ 488776 6502336  3
+ 488806 6502336  3
+ 488806 6502426  3
+ 488836 6502426  3
+ 488836 6502636  3
+ 488866 6502636  3
+ 488866 6502666  3
+ 488926 6502666  3
+ 488926 6502696  3
+ 488986 6502696  3
+ 488986 6502726  3
+ 489046 6502726  3
+ 489046 6502756  3
+ 489136 6502756  3
+ 489136 6502096  3
+ 489106 6502096  3
+ 489106 6501976  3
+ 489076 6501976  3
+ 489076 6501826  3
+ 489046 6501826  3
+ 489046 6501736  3
+ 489016 6501736  3
+ 489016 6501586  3
+ 488986 6501586  3
+ 488986 6501496  3
+ 490216 6502426  4
+ 490216 6502246  4
+ 490246 6502246  4
+ 490246 6502186  4
+ 490306 6502186  4
+ 490306 6501946  4
+ 490216 6501946  4
+ 490216 6502096  4
+ 490186 6502096  4
+ 490186 6502156  4
+ 490036 6502156  4
+ 490036 6502126  4
+ 489946 6502126  4
+ 489946 6502096  4
+ 489916 6502096  4
+ 489916 6502066  4
+ 489826 6502066  4
+ 489826 6502036  4
+ 489796 6502036  4
+ 489796 6501946  4
+ 489706 6501946  4
+ 489706 6502036  4
+ 489736 6502036  4
+ 489736 6502186  4
+ 489766 6502186  4
+ 489766 6502216  4
+ 489796 6502216  4
+ 489796 6502276  4
+ 489946 6502276  4
+ 489946 6502306  4
+ 489976 6502306  4
+ 489976 6502336  4
+ 490006 6502336  4
+ 490006 6502366  4
+ 490036 6502366  4
+ 490036 6502426  4
+ 488642 6504346  5
+ 488716 6503812  5
+ 488910 6503795  5
+ 488892 6504023  5
+ 488926 6504143  5
+ 488926 6503806  5
+ 488956 6503806  5
+ 488956 6503686  5
+ 488986 6503686  5
+ 488986 6503566  5
+ 489016 6503566  5
+ 489016 6503476  5
+ 489046 6503476  5
+ 489046 6503386  5
+ 489076 6503386  5
+ 489076 6503296  5
+ 489106 6503296  5
+ 489106 6503206  5
+ 489136 6503206  5
+ 489136 6503086  5
+ 489166 6503086  5
+ 489166 6502846  5
+ 489046 6502846  5
+ 489046 6503086  5
+ 488926 6503086  5
+ 488926 6503236  5
+ 488746 6503236  5
+ 488746 6503266  5
+ 488536 6503266  5
+ 488536 6503296  5
+ 488506 6503296  5
+ 488506 6503326  5
+ 488416 6503326  5
+ 488416 6503386  5
+ 488326 6503386  5
+ 488326 6503506  5
+ 488356 6503506  5
+ 488356 6503536  5
+ 488416 6503536  5
+ 488416 6503566  5
+ 488446 6503566  5
+ 488446 6503656  5
+ 488626 6503656  5
+ 488626 6503746  5
+ 488656 6503746  5
+ 488656 6503776  5
+ 488686 6503776  5
+ 488686 6503956  5
+ 488656 6503956  5
+ 488656 6503986  5
+ 488626 6503986  5
+ 488626 6504046  5
+ 488596 6504046  5
+ 488596 6504076  5
+ 488566 6504076  5
+ 488566 6504106  5
+ 488536 6504106  5
+ 488536 6504166  5
+ 488506 6504166  5
+ 488506 6504226  5
+ 488476 6504226  5
+ 488476 6504346  5
+ 489886 6503386  6
+ 489886 6503146  6
+ 489916 6503146  6
+ 489916 6503056  6
+ 489736 6503056  6
+ 489736 6503206  6
+ 489706 6503206  6
+ 489706 6503266  6
+ 489676 6503266  6
+ 489676 6503356  6
+ 489796 6503356  6
+ 489796 6503596  6
+ 489916 6503596  6
+ 489916 6503386  6
+ 490006 6505666  7
+ 489916 6505666  7
+ 489916 6505756  7
+ 490006 6505756  7
+ 487426 6504856  8
+ 487396 6504796  8
+ 487276 6504676  8
+ 490786 6505366  9
+ 490786 6505336  9
+ 491176 6505336  9
+ 491176 6505276  9
+ 491236 6505276  9
+ 491236 6505126  9
+ 491266 6505126  9
+ 491266 6504976  9
+ 491236 6504976  9
+ 491236 6504916  9
+ 491206 6504916  9
+ 491206 6504886  9
+ 491176 6504886  9
+ 491176 6504856  9
+ 491086 6504856  9
+ 491086 6504886  9
+ 490996 6504886  9
+ 490996 6504916  9
+ 490966 6504916  9
+ 490966 6504946  9
+ 490936 6504946  9
+ 490936 6505006  9
+ 490876 6505006  9
+ 490876 6505186  9
+ 490846 6505186  9
+ 490846 6505246  9
+ 490726 6505246  9
+ 490726 6505276  9
+ 490696 6505276  9
+ 490696 6505366  9
+ 487906 6505066 10
+ 487906 6505036 10
+ 487936 6505036 10
+ 487936 6505006 10
+ 487966 6505006 10
+ 487966 6504616 10
+ 487906 6504616 10
+ 487906 6504586 10
+ 487846 6504586 10
+ 487846 6504556 10
+ 487756 6504556 10
+ 487756 6504526 10
+ 487606 6504526 10
+ 487606 6504646 10
+ 487636 6504646 10
+ 487636 6504766 10
+ 487666 6504766 10
+ 487666 6504886 10
+ 487726 6504886 10
+ 487726 6504976 10
+ 487756 6504976 10
+ 487756 6505006 10
+ 487786 6505006 10
+ 487786 6505036 10
+ 487816 6505036 10
+ 487816 6505066 10
+ 491416 6504856 11
+ 491326 6504856 11
+ 491326 6505006 11
+ 491416 6505006 11
+ 491386 6504736 12
+ 491266 6504736 12
+ 491266 6504826 12
+ 491386 6504826 12
+ 487456 6504586 13
+ 487456 6504436 13
+ 487366 6504436 13
+ 487366 6504466 13
+ 487306 6504466 13
+ 487306 6504556 13
+ 487336 6504556 13
+ 487336 6504586 13
+ 487396 6504586 13
+ 487396 6504676 13
+ 487486 6504676 13
+ 487486 6504586 13
+ 489226 6504646 14
+ 489226 6504616 14
+ 489256 6504616 14
+ 489256 6504556 14
+ 489286 6504556 14
+ 489286 6504466 14
+ 489106 6504466 14
+ 489106 6504586 14
+ 489136 6504586 14
+ 489136 6504646 14
+ 488296 6504406 15
+ 488296 6504316 15
+ 488206 6504316 15
+ 488206 6504376 15
+ 488176 6504376 15
+ 488176 6504466 15
+ 488206 6504466 15
+ 488206 6504496 15
+ 488236 6504496 15
+ 488236 6504526 15
+ 488326 6504526 15
+ 488326 6504406 15
+ 490666 6504466 16
+ 490666 6504376 16
+ 490696 6504376 16
+ 490696 6504316 16
+ 490756 6504316 16
+ 490756 6504256 16
+ 490786 6504256 16
+ 490786 6504166 16
+ 490696 6504166 16
+ 490696 6504226 16
+ 490576 6504226 16
+ 490576 6504286 16
+ 490546 6504286 16
+ 490546 6504466 16
+ 489346 6503986 17
+ 489346 6504076 17
+ 489406 6504076 17
+ 489406 6504166 17
+ 489526 6504166 17
+ 489526 6504256 17
+ 489496 6504256 17
+ 489496 6504346 17
+ 489586 6504346 17
+ 489586 6504256 17
+ 489646 6504256 17
+ 489646 6504196 17
+ 489706 6504196 17
+ 489706 6504016 17
+ 489676 6504016 17
+ 489676 6503896 17
+ 489586 6503896 17
+ 489586 6503956 17
+ 489496 6503956 17
+ 489496 6503986 17
+ 489346 6503986 17
+ 489346 6503986 17
+ 489346 6503836 17
+ 489376 6503836 17
+ 489376 6503746 17
+ 489346 6503746 17
+ 489346 6503566 17
+ 489256 6503566 17
+ 489256 6503506 17
+ 489226 6503506 17
+ 489226 6503416 17
+ 489196 6503416 17
+ 489196 6503386 17
+ 489076 6503386 17
+ 489076 6503446 17
+ 489046 6503446 17
+ 489046 6503566 17
+ 489016 6503566 17
+ 489016 6503626 17
+ 488986 6503626 17
+ 488986 6503836 17
+ 489106 6503836 17
+ 489106 6504106 17
+ 489076 6504106 17
+ 489076 6504226 17
+ 489196 6504226 17
+ 489196 6504196 17
+ 489226 6504196 17
+ 489226 6504076 17
+ 489256 6504076 17
+ 489256 6503986 17
+ 487936 6504166 18
+ 487936 6504136 18
+ 487966 6504136 18
+ 487966 6504016 18
+ 487846 6504016 18
+ 487846 6504046 18
+ 487816 6504046 18
+ 487816 6504136 18
+ 487846 6504136 18
+ 487846 6504166 18
+ 488596 6504046 19
+ 488596 6503986 19
+ 488626 6503986 19
+ 488626 6503896 19
+ 488596 6503896 19
+ 488596 6503716 19
+ 488506 6503716 19
+ 488506 6503806 19
+ 488476 6503806 19
+ 488476 6503986 19
+ 488506 6503986 19
+ 488506 6504046 19
+ 487396 6503896 20
+ 487486 6503836 20
+ 487516 6503806 20
+ 487126 6503956 20
+ 487216 6503986 20
+ 488296 6503806 21
+ 488296 6503776 21
+ 488326 6503776 21
+ 488326 6503746 21
+ 488356 6503746 21
+ 488356 6503626 21
+ 488236 6503626 21
+ 488236 6503656 21
+ 488206 6503656 21
+ 488206 6503686 21
+ 488146 6503686 21
+ 488146 6503776 21
+ 488176 6503776 21
+ 488176 6503806 21
+ 491146 6503686 22
+ 491146 6503626 22
+ 491176 6503626 22
+ 491176 6503536 22
+ 491146 6503536 22
+ 491146 6503476 22
+ 491026 6503476 22
+ 491026 6503656 22
+ 491056 6503656 22
+ 491056 6503686 22
+ 487816 6503506 23
+ 487816 6503476 23
+ 487846 6503476 23
+ 487846 6503386 23
+ 487936 6503386 23
+ 487936 6503356 23
+ 487966 6503356 23
+ 487966 6503296 23
+ 488026 6503296 23
+ 488026 6503236 23
+ 488086 6503236 23
+ 488086 6503116 23
+ 487936 6503116 23
+ 487936 6503146 23
+ 487846 6503146 23
+ 487846 6503176 23
+ 487816 6503176 23
+ 487816 6503206 23
+ 487786 6503206 23
+ 487786 6503386 23
+ 487696 6503386 23
+ 487696 6503356 23
+ 487606 6503356 23
+ 487606 6503506 23
+ 490036 6503506 24
+ 490036 6503386 24
+ 490096 6503386 24
+ 490096 6503266 24
+ 490066 6503266 24
+ 490066 6503176 24
+ 490096 6503176 24
+ 490096 6503026 24
+ 489976 6503026 24
+ 489976 6503086 24
+ 489946 6503086 24
+ 489946 6503146 24
+ 489916 6503146 24
+ 489916 6503386 24
+ 489946 6503386 24
+ 489946 6503506 24
+ 489496 6503356 25
+ 489406 6503356 25
+ 489406 6503446 25
+ 489496 6503446 25
+ 488386 6503356 26
+ 488386 6503326 26
+ 488416 6503326 26
+ 488416 6503236 26
+ 488326 6503236 26
+ 488326 6503266 26
+ 488296 6503266 26
+ 488296 6503356 26
+ 490726 6503206 27
+ 490636 6503206 27
+ 490636 6503326 27
+ 490726 6503326 27
+ 489496 6503056 28
+ 489406 6503056 28
+ 489406 6503176 28
+ 489526 6503176 28
+ 489526 6503086 28
+ 489496 6503086 28
+ 490726 6503086 29
+ 490726 6502996 29
+ 490756 6502996 29
+ 490756 6502876 29
+ 490666 6502876 29
+ 490666 6502936 29
+ 490636 6502936 29
+ 490636 6503086 29
+ 491176 6502996 30
+ 491086 6502996 30
+ 491086 6503086 30
+ 491176 6503086 30
+ 487786 6503056 31
+ 487786 6503026 31
+ 488116 6503026 31
+ 488116 6502996 31
+ 488266 6502996 31
+ 488266 6502936 31
+ 488626 6502936 31
+ 488626 6502906 31
+ 488806 6502906 31
+ 488806 6502876 31
+ 488836 6502876 31
+ 488836 6502786 31
+ 488806 6502786 31
+ 488806 6502636 31
+ 488776 6502636 31
+ 488776 6502606 31
+ 488686 6502606 31
+ 488686 6502576 31
+ 488656 6502576 31
+ 488656 6502546 31
+ 488506 6502546 31
+ 488506 6502516 31
+ 488476 6502516 31
+ 488476 6502486 31
+ 488416 6502486 31
+ 488416 6502456 31
+ 488356 6502456 31
+ 488356 6502396 31
+ 488296 6502396 31
+ 488296 6502306 31
+ 488326 6502306 31
+ 488326 6502216 31
+ 488416 6502216 31
+ 488416 6502246 31
+ 488446 6502246 31
+ 488446 6502276 31
+ 488476 6502276 31
+ 488476 6502306 31
+ 488506 6502306 31
+ 488506 6502336 31
+ 488536 6502336 31
+ 488536 6502366 31
+ 488566 6502366 31
+ 488566 6502426 31
+ 488596 6502426 31
+ 488596 6502456 31
+ 488656 6502456 31
+ 488656 6502486 31
+ 488806 6502486 31
+ 488806 6502396 31
+ 488776 6502396 31
+ 488776 6502366 31
+ 488746 6502366 31
+ 488746 6502306 31
+ 488686 6502306 31
+ 488686 6502246 31
+ 488626 6502246 31
+ 488626 6502186 31
+ 488536 6502186 31
+ 488536 6502156 31
+ 488506 6502156 31
+ 488506 6502126 31
+ 488476 6502126 31
+ 488476 6502006 31
+ 488416 6502006 31
+ 488416 6501976 31
+ 488386 6501976 31
+ 488386 6501946 31
+ 488326 6501946 31
+ 488326 6501886 31
+ 488296 6501886 31
+ 488296 6501856 31
+ 488266 6501856 31
+ 488266 6501706 31
+ 488206 6501706 31
+ 488206 6501676 31
+ 488176 6501676 31
+ 488176 6501646 31
+ 488086 6501646 31
+ 488086 6501616 31
+ 487996 6501616 31
+ 487996 6501586 31
+ 487876 6501586 31
+ 487876 6501556 31
+ 487786 6501556 31
+ 487786 6501646 31
+ 487756 6501646 31
+ 487756 6501766 31
+ 487726 6501766 31
+ 487726 6501856 31
+ 487756 6501856 31
+ 487756 6501946 31
+ 487816 6501946 31
+ 487816 6502066 31
+ 487786 6502066 31
+ 487786 6502096 31
+ 487666 6502096 31
+ 487666 6502186 31
+ 487606 6502186 31
+ 487606 6502246 31
+ 487576 6502246 31
+ 487576 6502276 31
+ 487546 6502276 31
+ 487546 6502306 31
+ 487516 6502306 31
+ 487516 6502426 31
+ 487456 6502426 31
+ 487456 6502636 31
+ 487486 6502636 31
+ 487486 6502696 31
+ 487546 6502696 31
+ 487546 6502786 31
+ 487516 6502786 31
+ 487516 6502906 31
+ 487546 6502906 31
+ 487546 6502966 31
+ 487606 6502966 31
+ 487606 6502996 31
+ 487636 6502996 31
+ 487636 6503026 31
+ 487666 6503026 31
+ 487666 6503056 31
+ 489466 6502816 32
+ 489466 6502786 32
+ 489496 6502786 32
+ 489496 6502756 32
+ 489526 6502756 32
+ 489526 6502726 32
+ 489586 6502726 32
+ 489586 6502696 32
+ 489616 6502696 32
+ 489616 6502486 32
+ 489586 6502486 32
+ 489586 6502366 32
+ 489616 6502366 32
+ 489616 6502156 32
+ 489586 6502156 32
+ 489586 6502096 32
+ 489556 6502096 32
+ 489556 6501976 32
+ 489586 6501976 32
+ 489586 6501796 32
+ 489556 6501796 32
+ 489556 6501766 32
+ 489436 6501766 32
+ 489436 6501646 32
+ 489406 6501646 32
+ 489406 6501616 32
+ 489316 6501616 32
+ 489316 6501526 32
+ 489196 6501526 32
+ 489196 6501586 32
+ 489106 6501586 32
+ 489106 6501856 32
+ 489166 6501856 32
+ 489166 6502096 32
+ 489226 6502096 32
+ 489226 6502246 32
+ 489166 6502246 32
+ 489166 6502426 32
+ 489196 6502426 32
+ 489196 6502486 32
+ 489226 6502486 32
+ 489226 6502576 32
+ 489256 6502576 32
+ 489256 6502606 32
+ 489286 6502606 32
+ 489286 6502726 32
+ 489316 6502726 32
+ 489316 6502786 32
+ 489376 6502786 32
+ 489376 6502816 32
+ 487276 6502336 33
+ 487276 6502306 33
+ 487306 6502306 33
+ 487306 6502216 33
+ 487216 6502216 33
+ 487216 6502096 33
+ 487126 6502096 33
+ 487126 6502246 33
+ 487156 6502246 33
+ 487156 6502306 33
+ 487186 6502306 33
+ 487186 6502336 33
+ 490126 6501856 34
+ 490036 6501856 34
+ 490036 6501976 34
+ 490186 6501976 34
+ 490186 6501886 34
+ 490126 6501886 34
+ 490756 6501406 35
+ 490666 6501406 35
+ 490666 6501496 35
+ 490756 6501496 35
+ 488116 6501346 36
+ 488116 6501316 36
+ 488146 6501316 36
+ 488146 6501076 36
+ 488116 6501076 36
+ 488116 6501016 36
+ 488056 6501016 36
+ 488056 6500866 36
+ 488086 6500866 36
+ 488086 6500836 36
+ 488116 6500836 36
+ 488116 6500746 36
+ 488146 6500746 36
+ 488146 6500716 36
+ 488236 6500716 36
+ 488236 6500776 36
+ 488296 6500776 36
+ 488296 6500926 36
+ 488386 6500926 36
+ 488386 6500776 36
+ 488356 6500776 36
+ 488356 6500656 36
+ 488326 6500656 36
+ 488326 6500566 36
+ 488356 6500566 36
+ 488356 6500476 36
+ 488236 6500476 36
+ 488236 6500506 36
+ 488146 6500506 36
+ 488146 6500416 36
+ 488206 6500416 36
+ 488206 6500326 36
+ 488116 6500326 36
+ 488116 6500296 36
+ 488086 6500296 36
+ 488086 6500206 36
+ 487996 6500206 36
+ 487996 6500116 36
+ 488026 6500116 36
+ 488026 6500026 36
+ 488056 6500026 36
+ 488056 6499846 36
+ 488116 6499846 36
+ 488116 6499786 36
+ 488146 6499786 36
+ 488146 6499696 36
+ 488176 6499696 36
+ 488176 6499606 36
+ 488056 6499606 36
+ 488056 6499636 36
+ 487966 6499636 36
+ 487966 6499606 36
+ 487876 6499606 36
+ 487876 6499636 36
+ 487846 6499636 36
+ 487846 6499726 36
+ 487816 6499726 36
+ 487816 6499786 36
+ 487786 6499786 36
+ 487786 6499936 36
+ 487846 6499936 36
+ 487846 6500026 36
+ 487726 6500026 36
+ 487726 6499996 36
+ 487636 6499996 36
+ 487636 6500086 36
+ 487666 6500086 36
+ 487666 6500356 36
+ 487636 6500356 36
+ 487636 6500446 36
+ 487756 6500446 36
+ 487756 6500566 36
+ 487786 6500566 36
+ 487786 6500656 36
+ 487816 6500656 36
+ 487816 6500746 36
+ 487846 6500746 36
+ 487846 6500896 36
+ 487816 6500896 36
+ 487816 6501076 36
+ 487846 6501076 36
+ 487846 6501166 36
+ 487906 6501166 36
+ 487906 6501286 36
+ 487996 6501286 36
+ 487996 6501316 36
+ 488026 6501316 36
+ 488026 6501346 36
+ 489226 6501046 37
+ 489136 6501046 37
+ 489136 6501196 37
+ 489226 6501196 37
+ 490666 6500896 38
+ 490576 6500896 38
+ 490576 6501106 38
+ 490636 6501106 38
+ 490636 6501196 38
+ 490726 6501196 38
+ 490726 6501046 38
+ 490696 6501046 38
+ 490696 6501016 38
+ 490666 6501016 38
+ 489646 6500926 39
+ 489646 6500836 39
+ 489676 6500836 39
+ 489676 6500716 39
+ 489556 6500716 39
+ 489556 6500926 39
+ 488986 6500836 40
+ 488986 6500776 40
+ 489046 6500776 40
+ 489046 6500626 40
+ 489106 6500626 40
+ 489106 6500446 40
+ 489016 6500446 40
+ 489016 6500416 40
+ 488986 6500416 40
+ 488986 6500356 40
+ 488896 6500356 40
+ 488896 6500836 40
+ 488356 6500296 41
+ 488356 6500176 41
+ 488386 6500176 41
+ 488386 6500026 41
+ 488266 6500026 41
+ 488266 6500056 41
+ 488206 6500056 41
+ 488206 6500116 41
+ 488176 6500116 41
+ 488176 6500236 41
+ 488206 6500236 41
+ 488206 6500296 41
+ 489226 6500146 42
+ 489136 6500146 42
+ 489136 6500236 42
+ 489226 6500236 42
+ 489226 6499756 43
+ 489046 6499756 43
+ 489046 6499846 43
+ 489106 6499846 43
+ 489106 6499876 43
+ 489136 6499876 43
+ 489136 6499936 43
+ 489226 6499936 43
+ 487486 6499666 44
+ 487396 6499666 44
+ 487396 6499756 44
+ 487486 6499756 44
+ 488386 6499666 45
+ 488386 6499636 45
+ 488416 6499636 45
+ 488416 6499546 45
+ 488386 6499546 45
+ 488386 6499486 45
+ 488296 6499486 45
+ 488296 6499576 45
+ 488266 6499576 45
+ 488266 6499666 45
+ 487936 6499546 46
+ 487936 6499186 46
+ 487906 6499186 46
+ 487906 6499156 46
+ 487876 6499156 46
+ 487876 6499126 46
+ 487816 6499126 46
+ 487816 6499066 46
+ 487786 6499066 46
+ 487786 6498886 46
+ 487636 6498886 46
+ 487636 6499066 46
+ 487606 6499066 46
+ 487606 6499186 46
+ 487576 6499186 46
+ 487576 6499306 46
+ 487696 6499306 46
+ 487696 6499396 46
+ 487606 6499396 46
+ 487606 6499486 46
+ 487786 6499486 46
+ 487786 6499516 46
+ 487846 6499516 46
+ 487846 6499546 46
+ 489286 6499396 47
+ 489166 6499396 47
+ 489166 6499486 47
+ 489286 6499486 47
+ 488296 6499036 48
+ 488296 6498886 48
+ 488446 6498886 48
+ 488446 6498796 48
+ 488506 6498796 48
+ 488506 6498706 48
+ 488446 6498706 48
+ 488446 6498676 48
+ 488386 6498676 48
+ 488386 6498646 48
+ 488356 6498646 48
+ 488356 6498616 48
+ 488116 6498616 48
+ 488116 6498586 48
+ 488056 6498586 48
+ 488056 6498556 48
+ 488026 6498556 48
+ 488026 6498526 48
+ 487876 6498526 48
+ 487876 6498646 48
+ 487996 6498646 48
+ 487996 6498676 48
+ 488026 6498676 48
+ 488026 6498706 48
+ 488116 6498706 48
+ 488116 6498976 48
+ 488146 6498976 48
+ 488146 6499006 48
+ 488176 6499006 48
+ 488176 6499096 48
+ 488236 6499096 48
+ 488236 6499306 48
+ 488266 6499306 48
+ 488266 6499396 48
+ 488386 6499396 48
+ 488386 6499306 48
+ 488356 6499306 48
+ 488356 6499096 48
+ 488326 6499096 48
+ 488326 6499036 48
+ 489886 6499276 49
+ 489766 6499276 49
+ 489766 6499396 49
+ 489886 6499396 49
+ 490156 6499066 50
+ 490156 6499006 50
+ 490186 6499006 50
+ 490186 6498766 50
+ 490156 6498766 50
+ 490096 6498556 50
+ 490096 6498526 50
+ 489976 6498526 50
+ 489976 6498706 50
+ 490066 6498706 50
+ 490066 6498826 50
+ 489766 6498826 50
+ 489766 6498916 50
+ 489736 6498916 50
+ 489736 6499006 50
+ 489766 6499006 50
+ 489766 6499066 50
+ 489976 6499066 50
+ 489976 6499036 50
+ 490066 6499036 50
+ 490066 6499066 50
+ 487756 6498466 51
+ 487756 6498256 51
+ 487666 6498256 51
+ 487666 6498226 51
+ 487636 6498226 51
+ 487636 6498196 51
+ 487516 6498196 51
+ 487516 6498226 51
+ 487486 6498226 51
+ 487486 6498376 51
+ 487396 6498376 51
+ 487396 6498406 51
+ 487336 6498406 51
+ 487336 6498526 51
+ 487576 6498526 51
+ 487576 6498556 51
+ 487816 6498556 51
+ 487816 6498466 51
+ 489316 6498106 52
+ 489226 6498106 52
+ 489226 6498226 52
+ 489316 6498226 52
+ 490066 6497836 53
+ 489976 6497836 53
+ 489976 6497956 53
+ 490066 6497956 53
+ 489436 6497536 54
+ 489346 6497536 54
+ 489346 6497926 54
+ 489466 6497926 54
+ 489466 6497596 54
+ 489436 6497596 54
+ 490726 6497926 55
+ 490726 6497656 55
+ 490756 6497656 55
+ 490756 6497596 55
+ 490816 6497596 55
+ 490816 6497506 55
+ 490786 6497506 55
+ 490786 6497476 55
+ 490696 6497476 55
+ 490696 6497536 55
+ 490666 6497536 55
+ 490666 6497656 55
+ 490636 6497656 55
+ 490636 6497746 55
+ 490606 6497746 55
+ 490606 6497776 55
+ 490576 6497776 55
+ 490576 6497926 55
+ 490156 6497746 56
+ 490156 6497716 56
+ 490186 6497716 56
+ 490186 6497656 56
+ 490216 6497656 56
+ 490216 6497566 56
+ 490336 6497566 56
+ 490336 6497476 56
+ 490306 6497476 56
+ 490306 6497326 56
+ 490246 6497326 56
+ 490246 6497296 56
+ 490096 6497296 56
+ 490096 6497356 56
+ 490066 6497356 56
+ 490066 6497596 56
+ 490036 6497596 56
+ 490036 6497716 56
+ 490066 6497716 56
+ 490066 6497746 56
+ 488026 6497536 57
+ 487936 6497536 57
+ 487936 6497626 57
+ 488026 6497626 57
+ 489466 6497206 58
+ 489346 6497206 58
+ 489346 6497446 58
+ 489376 6497446 58
+ 489376 6497506 58
+ 489526 6497506 58
+ 489526 6497296 58
+ 489466 6497296 58
+ 490876 6497266 59
+ 490786 6497266 59
+ 490786 6497356 59
+ 490876 6497356 59
+ 490936 6497236 60
+ 490936 6497206 60
+ 490996 6497206 60
+ 490996 6497176 60
+ 491026 6497176 60
+ 491026 6496996 60
+ 491086 6496996 60
+ 491086 6496936 60
+ 491206 6496936 60
+ 491206 6496696 60
+ 491116 6496696 60
+ 491116 6496726 60
+ 491086 6496726 60
+ 491086 6496846 60
+ 491056 6496846 60
+ 491056 6496906 60
+ 490996 6496906 60
+ 490996 6496966 60
+ 490936 6496966 60
+ 490936 6497026 60
+ 490906 6497026 60
+ 490906 6497116 60
+ 490876 6497116 60
+ 490876 6497146 60
+ 490846 6497146 60
+ 490846 6497236 60
+ 490366 6496906 61
+ 490276 6496906 61
+ 490276 6497026 61
+ 490306 6497026 61
+ 490306 6497176 61
+ 490396 6497176 61
+ 490396 6497206 61
+ 490516 6497206 61
+ 490516 6497116 61
+ 490456 6497116 61
+ 490456 6497056 61
+ 490396 6497056 61
+ 490396 6497026 61
+ 490366 6497026 61
+ 487456 6497146 62
+ 487486 6497116 62
+ 487486 6497086 62
+ 487546 6497086 62
+ 487546 6496936 62
+ 487216 6497026 62
+ 487216 6497086 62
+ 487126 6497086 62
+ 487126 6497176 62
+ 489586 6496936 63
+ 489376 6496936 63
+ 489376 6497026 63
+ 489586 6497026 63
diff --git a/tests/selfcross.txt b/tests/selfcross.txt
new file mode 100644
index 0000000..0e41c28
--- /dev/null
+++ b/tests/selfcross.txt
@@ -0,0 +1,22 @@
+        x         y
+ 0.3057897 0.1518920
+ 0.6038506 0.3132859
+ 0.6343093 0.2740279
+ 0.5364061 0.2936569
+ 0.8170620 0.4681368
+ 0.8083595 0.6535217
+ 0.6125531 0.6796937
+ 0.6103774 0.6360737
+ 0.4363273 0.6338927
+ 0.4689617 0.6927797
+ 0.6538900 0.7560286
+ 0.6169043 0.7756576
+ 0.5994993 0.7276756
+ 0.3514779 0.7363996
+ 0.3123166 0.6622457
+ 0.1447933 0.4877658
+ 0.2274671 0.4332408
+ 0.1578471 0.3721728
+ 0.2753309 0.4027068
+ 0.1817790 0.4136118
+ 0.2100621 0.3067429
diff --git a/tests/testsAtoF.R b/tests/testsAtoF.R
new file mode 100644
index 0000000..4f1de67
--- /dev/null
+++ b/tests/testsAtoF.R
@@ -0,0 +1,753 @@
+## badwindowcheck.R
+## $Revision: 1.2 $  $Date: 2014/01/27 07:18:41 $
+##
+
+require(spatstat)
+local({
+  ## Simple example of self-crossing polygon
+  x <- read.table("selfcross.txt", header=TRUE)
+  ## Auto-repair
+  w <- owin(poly=x)
+
+  ## Real data involving various quirks
+  b <- read.table("badwindow.txt", header=TRUE)
+  b <- split(b, factor(b$i))
+  b <- lapply(b, function(z) { as.list(z[,-3]) })
+  ## make owin without checking
+  W <- owin(poly=b, check=FALSE)
+  ## Apply stringent checks
+  owinpolycheck(W,verbose=FALSE)
+  ## Auto-repair
+  W2 <- owin(poly=b)
+})
+
+
+
+
+## tests/cdf.test.R
+## check cdf.test with strange data
+require(spatstat)
+local({
+  # Marked point patterns with some marks not represented
+  AC <- split(ants, un=FALSE)$Cataglyphis
+  AM <- split(ants, un=FALSE)$Messor
+  DM <- distmap(AM)
+  # should produce a warning, rather than a crash:
+  cdf.test(AC, DM)
+  # should be OK:
+  cdf.test(unmark(AC), DM)
+  cdf.test(unmark(AC), DM, "cvm")
+  cdf.test(unmark(AC), DM, "ad")
+  # linear networks
+  set.seed(42)
+  X <- runiflpp(20, simplenet)
+  fit <- lppm(X ~1)
+  cdf.test(fit, "y")
+  cdf.test(fit, "y", "cvm")
+  cdf.test(fit, "y", "ad")
+})
+
+##  tests/closeshave.R
+## check 'closepairs/crosspairs' code
+## validity and memory allocation
+## $Revision: 1.5 $ $Date: 2016/03/28 04:21:07 $
+
+local({
+  r <- 0.12
+  close.all <- closepairs(redwood, r)
+  close.ij <- closepairs(redwood, r, what="indices")
+  close.ijd <- closepairs(redwood, r, what="ijd")
+  stopifnot(identical(close.ij, close.all[c("i","j")]))
+  stopifnot(identical(close.ijd, close.all[c("i","j","d")]))
+
+  Y <- split(amacrine)
+  on <- Y$on
+  off <- Y$off
+  cross.all <- crosspairs(on, off, r)
+  cross.ij <- crosspairs(on, off, r, what="indices")
+  cross.ijd <- crosspairs(on, off, r, what="ijd")
+  stopifnot(identical(cross.ij, cross.all[c("i","j")]))
+  stopifnot(identical(cross.ijd, cross.all[c("i","j","d")]))
+
+  # closethresh vs closepairs: EXACT agreement
+  thresh <- 0.08
+  clt <- closethresh(redwood, r, thresh)
+  cl <- with(closepairs(redwood, r),
+             list(i=i, j=j, th = (d <= thresh)))
+  if(!identical(cl, clt))
+    stop("closepairs and closethresh disagree")
+
+  # compare with older, slower code
+  reordered <- function(a) {
+    o <- with(a, order(i,j))
+    as.list(as.data.frame(a)[o,,drop=FALSE])
+  }
+  samesame <- function(a, b) {
+    identical(reordered(a), reordered(b))
+  }
+  spatstat.options(closepairs.newcode=FALSE)
+  old.close.ij <- closepairs(redwood, r, what="indices")
+  old.cross.ij <- crosspairs(on, off, r, what="indices")
+  stopifnot(samesame(close.ij, old.close.ij))
+  stopifnot(samesame(cross.ij, old.cross.ij))
+  spatstat.options(closepairs.newcode=TRUE)
+  
+  # Rasmus' example
+  R <- 0.04
+  U <- as.ppp(gridcenters(owin(), 50, 50), W=owin())
+  cp <- crosspairs(U, U, R)
+  G <- matrix(0, npoints(U), npoints(U))
+  G[cbind(cp$i, cp$j)] <- 1
+  if(!isSymmetric(G))
+    stop("crosspairs is not symmetric in Rasmus example")
+
+})
+## tests/colour.R
+##
+## $Revision: 1.1 $ $Date: 2015/12/29 08:54:49 $
+##
+
+require(spatstat)
+
+local({
+   f <- function(n) grey(seq(0,1,length=n))
+   z <- to.grey(f)
+})
+# tests/correctC.R
+# check for agreement between C and interpreted code
+# for interpoint distances etc.
+# $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+
+local({
+  eps <- .Machine$double.eps * 4
+
+  # pairdist.ppp
+  X <- rpoispp(42)
+  dC <- pairdist(X, method="C")
+  dR <- pairdist(X, method="interpreted")
+  if(any(abs(dC - dR) > eps))
+    stop("Algorithms for pairdist() do not agree")
+
+  dC <- pairdist(X, periodic=TRUE, method="C")
+  dR <- pairdist(X, periodic=TRUE, method="interpreted")
+  if(any(abs(dC - dR) > eps))
+    stop("Algorithms for pairdist(periodic=TRUE) do not agree")
+
+  # crossdist.ppp
+  Y <- rpoispp(42)
+  dC <- crossdist(X, Y, method="C")
+  dR <- crossdist(X, Y, method="interpreted")
+  if(any(abs(dC - dR) > eps))
+    stop("Algorithms for crossdist() do not agree")
+
+  dC <- crossdist(X, Y, periodic=TRUE, method="C")
+  dR <- crossdist(X, Y, periodic=TRUE, method="interpreted")
+  if(any(abs(dC - dR) > eps))
+    stop("Algorithms for crossdist(periodic=TRUE) do not agree")
+
+  # nndist.ppp
+  nnC <- nndist(X, method="C")
+  nnI <- nndist(X, method="interpreted")
+  if(any(abs(nnC - nnI) > eps))
+    stop("Algorithms for nndist() do not agree")
+
+  nn3C <- nndist(X, k=3, method="C")
+  nn3I <- nndist(X, k=3, method="interpreted")
+  if(any(abs(nn3C - nn3I) > eps))
+    stop("Algorithms for nndist(k=3) do not agree")
+
+  # nnwhich.ppp
+  nwC <- nnwhich(X, method="C")
+  nwI <- nnwhich(X, method="interpreted")
+  if(any(nwC != nwI))
+    stop("Algorithms for nnwhich() do not agree")
+
+  nw3C <- nnwhich(X, k=3, method="C")
+  nw3I <- nnwhich(X, k=3, method="interpreted")
+  if(any(nw3C != nw3I))
+    stop("Algorithms for nnwhich(k=3) do not agree")
+
+  # whist
+  set.seed(98123)
+  x <- runif(1000)
+  w <- sample(1:5, 1000, replace=TRUE)
+  b <- seq(0,1,length=101)
+  op <- spatstat.options(Cwhist=TRUE)
+  aT <- whist(x,b,w)
+  spatstat.options(Cwhist=FALSE)
+  aF <- whist(x,b,w)
+  if(!all(aT == aF))
+    stop("Algorithms for whist disagree")
+  spatstat.options(op)
+})
+#
+#  tests/density.R
+#
+#  Test behaviour of density methods and inhomogeneous summary functions
+#
+#  $Revision: 1.9 $  $Date: 2017/08/10 02:01:54 $
+#
+
+require(spatstat)
+
+local({
+
+  # test all cases of density.ppp
+  
+  tryit <- function(...) {
+    Z <- density(cells, ..., at="pixels")
+    Z <- density(cells, ..., at="points")
+    return(invisible(NULL))
+  }
+  
+  tryit(0.05)
+  tryit(0.05, diggle=TRUE)
+  tryit(0.05, se=TRUE)
+  tryit(varcov=diag(c(0.05^2, 0.07^2)))
+  tryit(0.05, weights=data.frame(a=1:42,b=42:1))
+  tryit(0.05, weights=expression(x))
+
+  # apply different discretisation rules
+  Z <- density(cells, 0.05, fractional=TRUE)
+  Z <- density(cells, 0.05, preserve=TRUE)
+  Z <- density(cells, 0.05, fractional=TRUE, preserve=TRUE)
+        
+  ## compare density.ppp(at="points") results with different algorithms
+  crosscheque <- function(expr) {
+    e <- as.expression(substitute(expr))
+    ename <- sQuote(deparse(substitute(expr)))
+    ## interpreted R
+    opa <- spatstat.options(densityC=FALSE, densityTransform=FALSE)
+    val.interpreted <- eval(e)
+    ## established C algorithm 'denspt'
+    spatstat.options(densityC=TRUE, densityTransform=FALSE)
+    val.C <- eval(e)
+    ## new C algorithm 'Gdenspt' using transformed coordinates
+    spatstat.options(densityC=TRUE, densityTransform=TRUE)
+    val.Transform <- eval(e)
+    spatstat.options(opa)
+    if(max(abs(val.interpreted - val.C)) > 0.001)
+      stop(paste("Numerical discrepancy between R and C algorithms in",
+                 ename))
+    if(max(abs(val.C - val.Transform)) > 0.001)
+      stop(paste("Numerical discrepancy between C algorithms",
+                 "using transformed and untransformed coordinates in",
+                 ename))
+    invisible(NULL)
+  }
+
+  crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE))
+
+  lam <- density(redwood)
+  K <- Kinhom(redwood, lam)
+  
+  lamX <- density(redwood, at="points")
+  KX <- Kinhom(redwood, lamX)
+
+  ## test all code cases of new 'relrisk.ppp' algorithm
+  pants <- function(..., X=ants) {
+    a <- relrisk(X, sigma=100, se=TRUE, ...)
+    return(TRUE)
+  }
+  pants()
+  pants(casecontrol=FALSE)
+  pants(relative=TRUE)
+  pants(casecontrol=FALSE, relative=TRUE)
+  pants(at="points")
+  pants(casecontrol=FALSE,at="points")
+  pants(relative=TRUE,at="points")
+  pants(casecontrol=FALSE, relative=TRUE,at="points")
+
+  ## more than 2 types
+  pants(X=sporophores)
+  pants(X=sporophores, at="points")
+  pants(X=sporophores, relative=TRUE, at="points")
+
+  ## Smooth.ppp
+  Z <- Smooth(longleaf, 5, diggle=TRUE)
+  Z <- Smooth(longleaf, 1e-6) # generates warning about small bandwidth
+
+  ## Smooth.ppp(at='points')
+  Y <- longleaf %mark% runif(npoints(longleaf), min=41, max=43)
+
+  Z <- Smooth(Y, 5, at="points", leaveoneout=TRUE)
+  rZ <- range(Z)
+  if(rZ[1] < 40 || rZ[2] > 44)
+    stop("Implausible results from Smooth.ppp(at=points, leaveoneout=TRUE)")
+
+  Z <- Smooth(Y, 5, at="points", leaveoneout=FALSE)
+  rZ <- range(Z)
+  if(rZ[1] < 40 || rZ[2] > 44)
+    stop("Implausible results from Smooth.ppp(at=points, leaveoneout=FALSE)")
+
+  ## compare Smooth.ppp results with different algorithms
+  crosscheque(Smooth(longleaf, at="points", sigma=6))
+
+  ## drop-dimension coding errors
+  X <- longleaf
+  marks(X) <- cbind(marks(X), 1)
+  Z <- Smooth(X, 5)
+
+  ZZ <- bw.smoothppp(finpines, hmin=0.01, hmax=0.012, nh=2) # reshaping problem
+
+  ## geometric-mean smoothing
+  U <- Smooth(longleaf, 5, geometric=TRUE)
+  UU <- Smooth(X, 5, geometric=TRUE)
+})
+#'
+#'  tests/discarea.R
+#'
+#'   $Revision: 1.1 $ $Date: 2016/03/28 09:16:03 $
+#'
+
+require(spatstat)
+local({
+  u <- c(0.5,0.5)
+  B <- owin(poly=list(x=c(0.3, 0.5, 0.7, 0.4), y=c(0.3, 0.3, 0.6, 0.8)))
+  areaGain(u, cells, 0.1, exact=TRUE)
+  areaGain(u, cells, 0.1, W=NULL)
+  areaGain(u, cells, 0.1, W=B)
+
+  areaLoss(cells[square(0.4)], 0.1, exact=TRUE)
+})
+#'
+#'   tests/disconnected.R
+#'
+#'   disconnected linear networks
+#'
+#'    $Revision: 1.2 $ $Date: 2017/06/05 14:58:36 $
+
+require(spatstat)
+
+local({
+
+#'   disconnected network
+m <- simplenet$m
+m[4,5] <- m[5,4] <- m[6,10] <- m[10,6] <- m[4,6] <- m[6,4] <- FALSE
+L <- linnet(vertices(simplenet), m)
+L
+summary(L)
+Z <- connected(L, what="components")
+
+#' point pattern with no points in one connected component
+set.seed(42)
+X <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L)
+B <- lineardirichlet(X)
+plot(B)
+summary(B)
+D <- pairdist(X)
+A <- nndist(X)
+H <- nnwhich(X)
+Y <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L)
+G <- nncross(X, Y)
+J <- crossdist(X, Y)
+plot(distfun(X))  # includes evaluation of nncross(what="dist")
+
+#' K functions in disconnected network
+K <- linearK(X)
+lamX <- intensity(X)
+nX <- npoints(X)
+KI <- linearKinhom(X, lambda=rep(lamX, nX))
+P <- linearpcf(X)
+PJ <- linearpcfinhom(X, lambda=rep(lamX, nX))
+Y <- X %mark% factor(rep(1:2, nX)[1:nX])
+Y1 <- split(Y)[[1]]
+Y2 <- split(Y)[[2]]
+KY <- linearKcross(Y)
+PY <- linearpcfcross(Y)
+KYI <- linearKcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)),
+                       lambdaJ=rep(intensity(Y2), npoints(Y2)))
+PYI <- linearpcfcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)),
+                    lambdaJ=rep(intensity(Y2), npoints(Y2)))
+
+})
+#  tests/emptymarks.R
+#
+# test cases where there are no (rows or columns of) marks
+#
+#  $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  n <- npoints(cells)
+  df <- data.frame(x=1:n, y=factor(sample(letters, n, replace=TRUE)))
+  nocolumns <- c(FALSE, FALSE)
+  norows <- rep(FALSE, n)
+  X <- cells
+  marks(X) <- df
+  marks(X) <- df[,1]
+  marks(X) <- df[,nocolumns]
+  Z <- Y <- X[integer(0)]
+  marks(Y) <- df[norows,]
+  stopifnot(is.marked(Y))
+  marks(Z) <- df[norows,nocolumns]
+  stopifnot(!is.marked(Z))
+})
+#
+#  tests/envelopes.R
+#
+#  Test validity of envelope data
+#
+#  $Revision: 1.5 $  $Date: 2015/12/29 08:54:49 $
+#
+
+require(spatstat)
+
+local({
+checktheo <- function(fit) {
+  fitname <- deparse(substitute(fit))
+  en <- envelope(fit, nsim=4, verbose=FALSE, nrep=1e3)
+  nama <- names(en)
+  expecttheo <- is.poisson(fit) && is.stationary(fit)
+  context <- paste("Envelope of", fitname)
+  if(expecttheo) {
+    if(!("theo" %in% nama))
+      stop(paste(context, "did not contain", sQuote("theo")))
+    if("mmean" %in% nama)
+      stop(paste(context, "unexpectedly contained", sQuote("mmean")))
+  } else {
+    if("theo" %in% nama)
+      stop(paste(context, "unexpectedly contained", sQuote("theo")))
+    if(!("mmean" %in% nama))
+      stop(paste(context, "did not contain", sQuote("mmean")))
+  }
+  cat(paste(context, "has correct format\n"))
+}
+  
+checktheo(ppm(cells))
+checktheo(ppm(cells ~x))
+checktheo(ppm(cells ~1, Strauss(0.1)))
+
+# check envelope calls from 'alltypes'
+a <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE)
+b <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE, global=TRUE)
+
+# check 'transform' idioms
+A <- envelope(cells, Kest, nsim=4, transform=expression(. - .x))
+B <- envelope(cells, Kest, nsim=4, transform=expression(sqrt(./pi) - .x))
+
+#' check savefuns/savepatterns with global 
+fit <- ppm(cells~x)
+Ef <- envelope(fit, Kest, nsim=4, savefuns=TRUE, global=TRUE)
+Ep <- envelope(fit, Kest, nsim=4, savepatterns=TRUE, global=TRUE)
+
+# check conditional simulation
+e1 <- envelope(cells, Kest, nsim=4, fix.n=TRUE)
+e2 <- envelope(amacrine, Kest, nsim=4, fix.n=TRUE)
+e3 <- envelope(amacrine, Kcross, nsim=4, fix.marks=TRUE)
+fit <- ppm(japanesepines ~ 1, Strauss(0.04))
+e4 <- envelope(fit, Kest, nsim=4, fix.n=TRUE)
+fit2 <- ppm(amacrine ~ 1, Strauss(0.03))
+e5 <- envelope(fit2, Gcross, nsim=4, fix.marks=TRUE)
+
+# check pooling of envelopes in global case
+E1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE)
+E2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE)
+p12 <- pool(E1, E2)
+E1r <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE,
+                ginterval=c(0.05, 0.15))
+E2r <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE,
+                ginterval=c(0.05, 0.15))
+p12r <- pool(E1r, E2r)
+})
+
+local({
+  #' as.data.frame.envelope
+  Nsim <- 5
+  E <- envelope(cells, nsim=Nsim, savefuns=TRUE)
+  A <- as.data.frame(E)
+  B <- as.data.frame(E, simfuns=TRUE)
+  stopifnot(ncol(B) - ncol(A) == Nsim)
+})
+#
+#    tests/factorbugs.R
+#
+# check for various bugs related to factor conversions
+#
+#    $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+#
+require(spatstat)
+local({
+  # make a factor image
+  m <- factor(rep(letters[1:4], 4))
+  Z <- im(m, xcol=1:4, yrow=1:4)
+  # make a point pattern
+  set.seed(42)
+  X <- runifpoint(20, win=as.owin(Z))
+  # look up the image at the points of X
+  # (a) internal
+  ans1 <- lookup.im(Z, X$x, X$y)
+  stopifnot(is.factor(ans1))
+  # (b) user level
+  ans2 <- Z[X]
+  stopifnot(is.factor(ans2))
+  # (c) turn the image into a tessellation
+  #  and apply quadratcount
+  V <- tess(image = Z)
+  quadratcount(X, tess=V)
+  # (d) pad image
+  Y <- padimage(Z, factor("b", levels=levels(Z)))
+  stopifnot(Y$type == "factor")
+  U <- padimage(Z, "b")
+  stopifnot(U$type == "factor")
+})
+
+
+#
+#    tests/fastgeyer.R
+#
+# checks validity of fast C implementation of Geyer interaction
+#
+#    $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+#
+require(spatstat)
+local({
+  X <- redwood
+  Q <- quadscheme(X)
+  U <- union.quad(Q)
+  EP <- equalpairs.quad(Q)
+  G <- Geyer(0.11, 2)
+# The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323).
+# It avoids being close any value of pairdist(redwood).
+# The nearest such values are 0.1077.. and 0.1131..
+# By contrast if r = 0.1 there are values differing from 0.1 by 3e-17
+  a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border")
+  b <-          G$fasteval(X,U,EP,G$pot,G$par,"border")
+  if(!all(a==b))
+    stop("Results of Geyer()$fasteval and pairsat.family$eval do not match")
+# ...
+# and again for a non-integer value of 'sat'
+# (spotted by Thordis Linda Thorarinsdottir)  
+  G <- Geyer(0.11, 2.5)
+  a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border")
+  b <-          G$fasteval(X,U,EP,G$pot,G$par,"border")
+  if(!all(a==b))
+    stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer")
+# and again for sat < 1
+# (spotted by Rolf)  
+  G <- Geyer(0.11, 0.5)
+  a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border")
+  b <-          G$fasteval(X,U,EP,G$pot,G$par,"border")
+  if(!all(a==b))
+    stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat < 1")
+})
+
+#
+#  tests/fastK.R
+#
+# check fast and slow code for Kest
+#
+#   $Revision: 1.3 $   $Date: 2017/07/02 08:41:46 $
+#
+require(spatstat)
+local({
+  ## fast code
+  Kb <- Kest(cells, nlarge=0)
+  Ku <- Kest(cells, correction="none")
+  Kbu <- Kest(cells, correction=c("none", "border"))
+  ## slow code, full set of corrections, sqrt transformation
+  Ldd <- Lest(unmark(demopat), correction="all", var.approx=TRUE)
+  ## Kinhom
+  lam <- density(cells, at="points", leaveoneout=TRUE)
+  ## fast code
+  Kib <- Kinhom(cells, lam, nlarge=0)
+  Kiu <- Kest(cells, lam, correction="none")
+  Kibu <- Kest(cells, lam, correction=c("none", "border"))
+  ## slow code
+  Lidd <- Linhom(unmark(demopat), sigma=bw.scott)
+})
+
+
+#' tests/formuli.R
+#'
+#'  Test machinery for manipulating formulae
+#' 
+#' $Revision: 1.4 $  $Date: 2017/02/20 07:35:47 $
+
+require(spatstat)
+local({
+
+  ff <- function(A, deletevar, B) {
+    D <- reduceformula(A, deletevar)
+    if(!spatstat.utils::identical.formulae(D, B)) {
+      AD <- as.expression(substitute(reduceformula(A,d),
+                                     list(A=A, d=deletevar)))
+      stop(paste(AD, "\n\tyields ", spatstat.utils::pasteFormula(D),
+                 " instead of ", spatstat.utils::pasteFormula(B)),
+           call.=FALSE)
+    }
+    invisible(NULL)
+  }
+
+  ff(~ x + z, "x", ~z)
+
+  ff(y ~ x + z, "x", y~z)
+
+  ff(~ I(x^2) + z, "x",  ~z)
+
+  ff(y ~ poly(x,2) + poly(z,3), "x", y ~poly(z,3))
+
+})
+
+
+
+#
+#  tests/func.R
+#
+#   $Revision: 1.3 $   $Date: 2016/06/10 15:04:08 $
+#
+#  Tests of 'funxy' infrastructure etc
+
+require(spatstat)
+local({
+  W <- square(1)
+  f1a <- function(x, y) sqrt(x^2 + y^2)
+  f1b <- function(x, y) { sqrt(x^2 + y^2) }
+  f2a <- function(x, y) sin(x)
+  f2b <- function(x, y) { sin(x) } 
+  f3a <- function(x, y) sin(x) + cos(x) 
+  f3b <- function(x, y) { sin(x) + cos(x) } 
+  f4a <- function(x, y) { z <- x + y ; z }
+  f4b <- function(x, y) { x + y } 
+  F1a <- funxy(f1a, W)
+  F1b <- funxy(f1b, W)
+  F2a <- funxy(f2a, W)
+  F2b <- funxy(f2b, W)
+  F3a <- funxy(f3a, W)
+  F3b <- funxy(f3b, W)
+  F4a <- funxy(f4a, W)
+  F4b <- funxy(f4b, W)
+  stopifnot(identical(F1a(cells), F1b(cells)))
+  stopifnot(identical(F2a(cells), F2b(cells)))
+  stopifnot(identical(F3a(cells), F3b(cells)))
+  stopifnot(identical(F4a(cells), F4b(cells)))
+})
+
+
+
+
+##  
+##     tests/funnymarks.R
+##
+## tests involving strange mark values
+## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  ## ppm() where mark levels contain illegal characters
+  hyphenated <- c("a", "not-a")
+  spaced <- c("U", "non U")
+  suffixed <- c("a+", "a*")
+  charred <- c("+", "*")
+
+  irad <- matrix(0.1, 2,2)
+  hrad <- matrix(0.005, 2, 2)
+
+  tryit <- function(types, X, irad, hrad) { 
+    levels(marks(X)) <- types
+    fit <- ppm(X ~marks + polynom(x,y,2),
+               MultiStraussHard(types=types,iradii=irad,hradii=hrad))
+    print(fit)
+    print(coef(fit))
+    val <- fitted(fit)
+    pred <- predict(fit)
+    return(invisible(NULL))
+  }
+
+  tryit(hyphenated, amacrine, irad, hrad)
+  tryit(spaced, amacrine, irad, hrad)
+  tryit(suffixed, amacrine, irad, hrad)
+  tryit(charred, amacrine, irad, hrad)
+
+  ## marks which are dates
+  X <- cells
+  n <- npoints(X)
+  endoftime <- rep(ISOdate(2001,1,1), n)
+  eotDate   <- rep(as.Date("2001-01-01"), n)
+  markformat(endoftime)
+  markformat(eotDate)
+  marks(X) <- endoftime
+  print(X)
+  Y <- X %mark% data.frame(id=1:42, date=endoftime, dd=eotDate)
+  print(Y)
+})
+# 
+#    tests/fvproblems.R
+#
+#    $Revision: 1.7 $  $Date: 2016/03/08 00:26:23 $
+#
+
+require(spatstat)
+
+# This appears in the workshop notes
+# Problem detected by Martin Bratschi
+
+local({
+  Jdif <- function(X, ..., i) {
+    Jidot <- Jdot(X, ..., i=i)
+    J <- Jest(X, ...)
+    dif <- eval.fv(Jidot - J)
+    return(dif)
+  }
+  Z <- Jdif(amacrine, i="on")
+})
+#
+#  Test mathlegend code
+#
+local({
+  K <- Kest(cells)
+  plot(K)
+  plot(K, . ~ r)
+  plot(K, . - theo ~ r)
+  plot(K, sqrt(./pi)  ~ r)
+  plot(K, cbind(iso, theo) ~ r)
+  plot(K, cbind(iso, theo) - theo ~ r)
+  plot(K, sqrt(cbind(iso, theo)/pi)  ~ r)
+  plot(K, cbind(iso/2, -theo) ~ r)
+  plot(K, cbind(iso/2, trans/2) - theo ~ r)
+
+  # test expansion of .x and .y
+  plot(K, . ~ .x)
+  plot(K, . - theo ~ .x)
+  plot(K, .y - theo ~ .x)
+  plot(K, sqrt(.y) - sqrt(theo) ~ .x)
+
+  # problems with parsing weird strings in levels(marks(X))
+  # noted by Ulf Mehlig
+
+  levels(marks(amacrine)) <- c("Nasticreechia krorluppia", "Homo habilis")
+  plot(Kcross(amacrine))
+  plot(alltypes(amacrine, "K"))
+  plot(alltypes(amacrine, "J"))
+  plot(alltypes(amacrine, pcfcross))
+})
+
+#
+#  Test quirks related to 'alim' attribute
+
+local({
+  K <- Kest(cells)
+  attr(K, "alim") <- NULL
+  plot(K)
+  attr(K, "alim") <- c(0, 0.1)
+  plot(tail(K))
+})
+
+#
+# Check that default 'r' vector passes the test for fine spacing
+
+local({
+  a <- Fest(cells)
+  A <- Fest(cells, r=a$r)
+  b <- Hest(heather$coarse)
+  B <- Hest(heather$coarse, r=b$r)
+  # from Cenk Icos
+  X <- runifpoint(100, owin(c(0,3), c(0,10)))
+  FX <- Fest(X)
+  FXr <- Fest(X, r=FX$r)
+  JX <- Jest(X)
+})
+
+  
diff --git a/tests/testsGtoK.R b/tests/testsGtoK.R
new file mode 100644
index 0000000..876214e
--- /dev/null
+++ b/tests/testsGtoK.R
@@ -0,0 +1,197 @@
+##
+##    tests/gcc323.R
+##
+##    $Revision: 1.2 $  $Date: 2015/12/29 08:54:49 $
+##
+require(spatstat)
+local({
+  # critical R values that provoke GCC bug #323
+  a <- marktable(lansing, R=0.25)
+  a <- marktable(lansing, R=0.21)
+  a <- marktable(lansing, R=0.20)
+  a <- marktable(lansing, R=0.10)
+})
+#       
+#        tests/hobjects.R
+#
+#   Validity of methods for ppm(... method="ho")
+#
+
+require(spatstat)
+
+local({
+  set.seed(42)
+  fit  <- ppm(cells ~1,         Strauss(0.1), method="ho", nsim=10)
+  fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10)
+
+  a  <- AIC(fit)
+  ax <- AIC(fitx)
+
+  f  <- fitted(fit)
+  fx <- fitted(fitx)
+
+  p  <- predict(fit)
+  px <- predict(fitx)
+})
+
+
+#
+# tests/hyperframe.R
+#
+# test "[.hyperframe" etc
+#
+#  $Revision: 1.3 $  $Date: 2014/08/25 04:43:07 $
+#
+
+  lambda <- runif(4, min=50, max=100)
+  X <- lapply(as.list(lambda), function(x) { rpoispp(x) })
+  h <- hyperframe(lambda=lambda, X=X)
+  h$lambda2 <- lambda^2
+  h[, "lambda3"] <- lambda^3
+  h[, "Y"] <- X
+  h[, "X"] <- lapply(X, flipxy)
+  h[, c("X", "Y")] <- hyperframe(X=X, Y=X)
+
+  names(h) <- LETTERS[1:5]
+  print(h)
+
+
+#
+#  tests/imageops.R
+#
+#   $Revision: 1.7 $   $Date: 2015/12/29 08:54:49 $
+#
+
+require(spatstat)
+local({
+  A <- as.im(owin())
+  B <- as.im(owin(c(1.1, 1.9), c(0,1)))
+  Z <- imcov(A, B)
+  stopifnot(abs(max(Z) - 0.8) < 0.1)
+
+  ## handling images with 1 row or column
+  ycov <- function(x, y) y
+  E <- as.im(ycov, owin(), dimyx = c(2,1))
+  G <- cut(E, 2)
+  H <- as.tess(G)
+
+  E12 <- as.im(ycov, owin(), dimyx = c(1,2))
+  G12 <- cut(E12, 2)
+  H12 <- as.tess(G12)
+
+  ##
+  d <- distmap(cells, dimyx=32)
+  Z <- connected(d <= 0.06, method="interpreted")
+})
+
+
+
+
+#'   tests/ippm.R
+#'   Tests of 'ippm' class
+#'   $Revision: 1.1 $ $Date: 2017/06/06 06:32:00 $
+
+require(spatstat)
+
+local({
+  # .......... set up example from help file .................
+  nd <- 10
+  gamma0 <- 3
+  delta0 <- 5
+  POW <- 3
+  # Terms in intensity
+  Z <- function(x,y) { -2*y }
+  f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) }
+  # True intensity
+  lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) }
+  # Simulate realisation
+  lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0))
+  set.seed(42)
+  X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0)
+  # Partial derivatives of log f
+  DlogfDgamma <- function(x,y, gamma, delta) {
+    topbit <- exp(gamma - delta * x^POW)
+    topbit/(1 + topbit)
+  }
+  DlogfDdelta <- function(x,y, gamma, delta) {
+    topbit <- exp(gamma - delta * x^POW)
+    - (x^POW) * topbit/(1 + topbit)
+  }
+  # irregular score
+  Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta)
+  # fit model
+  fit <- ippm(X ~Z + offset(log(f)),
+              covariates=list(Z=Z, f=f),
+              iScore=Dlogf,
+              start=list(gamma=1, delta=1),
+              nd=nd)
+
+  # ............. test .............................
+  Ar <- model.matrix(fit)
+  Ai <- model.matrix(fit, irregular=TRUE)
+  Zr <- model.images(fit)
+  Zi <- model.images(fit, irregular=TRUE)
+})#
+# tests/kppm.R
+#
+# $Revision: 1.15 $ $Date: 2016/09/13 02:30:05 $
+#
+# Test functionality of kppm that depends on RandomFields
+# Test update.kppm for old style kppm objects
+
+require(spatstat)
+local({
+
+ fit <- kppm(redwood, ~1, "Thomas")
+ fitx <- update(fit, ~ . + x)
+ fitM <- update(fit, clusters="MatClust")
+ fitC <- update(fit, cells)
+ fitCx <- update(fit, cells ~ x)
+
+ # improve.kppm
+ fitI <- update(fit, improve.type="quasi")
+ fitxI <- update(fitx, improve.type="quasi")
+ # vcov.kppm
+ vc <- vcov(fitxI)
+
+  # plot.kppm including predict.kppm
+ fitMC <- kppm(redwood ~ x, "Thomas")
+ fitCL <- kppm(redwood ~ x, "Thomas", method="c")
+ fitPA <- kppm(redwood ~ x, "Thomas", method="p")
+ plot(fitMC)
+ plot(fitCL)
+ plot(fitPA)
+
+ # fit with composite likelihood method [thanks to Abdollah Jalilian]
+ fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8)
+  
+ if(require(RandomFields)) {
+   fit0 <- kppm(redwood ~1, "LGCP")
+   Y0 <- simulate(fit0)[[1]]
+   stopifnot(is.ppp(Y0))
+
+   # fit LGCP using K function: slow
+   fit1 <- kppm(redwood ~x, "LGCP",
+                covmodel=list(model="matern", nu=0.3),
+                control=list(maxit=3))
+   Y1 <- simulate(fit1)[[1]]
+   stopifnot(is.ppp(Y1))
+
+   # fit LGCP using pcf
+   fit1p <- kppm(redwood ~x, "LGCP",
+                 covmodel=list(model="matern", nu=0.3),
+                 statistic="pcf")
+   Y1p <- simulate(fit1p)[[1]]
+   stopifnot(is.ppp(Y1p))
+  
+   # ... and Abdollah's code
+
+   fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K")
+   Y2 <- simulate(fit2)[[1]]
+   stopifnot(is.ppp(Y2))
+
+ }
+
+})
+
+
diff --git a/tests/testsLtoM.R b/tests/testsLtoM.R
new file mode 100644
index 0000000..97e4745
--- /dev/null
+++ b/tests/testsLtoM.R
@@ -0,0 +1,476 @@
+## 
+##    tests/legacy.R
+##
+## Test that current version of spatstat is compatible with outmoded usage
+## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
+local({
+  require(spatstat)
+
+  ## (1) Old syntax of ppm
+  ppm(cells, ~x)
+  
+  ## (2) Old syntax of MultiStrauss etc.
+  r <- matrix(3, 2, 2)
+  a <- MultiStrauss( , r)
+  a <- MultiStrauss(NULL, r)
+  a <- MultiHard(, r)
+  
+  h <- r/2
+  a <- MultiStraussHard( , r, h)
+
+  NULL
+})
+#'
+#'    tests/leverinf.R
+#'
+#'   leverage and influence for Gibbs models
+#' 
+#'   $Revision: 1.8 $ $Date: 2017/02/23 05:30:18 $
+#' 
+
+require(spatstat)
+local({
+  # original non-sparse algorithm
+  Leverage <- function(...) leverage(..., sparseOK=FALSE)
+  Influence <- function(...) influence(..., sparseOK=FALSE)
+  Dfbetas <- function(...) dfbetas(..., sparseOK=FALSE)
+  # Strauss()$delta2
+  fitS <- ppm(cells ~ x, Strauss(0.12), rbord=0)
+  levS <- Leverage(fitS)
+  infS <- Influence(fitS)
+  dfbS <- Dfbetas(fitS)
+  # Geyer()$delta2
+  fitG <- ppm(redwood ~ 1, Geyer(0.1, 2), rbord=0)
+  levG <- Leverage(fitG)
+  infG <- Influence(fitG)
+  # pairwise.family$delta2
+  fitD <- ppm(cells ~ 1, DiggleGatesStibbard(0.12), rbord=0)
+  levD <- Leverage(fitD)
+  infD <- Influence(fitD)
+  # ppmInfluence; offset is present; coefficient vector has length 0
+  fitH <- ppm(cells ~ 1, Hardcore(0.07))
+  levH <- Leverage(fitH)
+  infH <- Influence(fitH)
+  # ppmInfluence; offset is present; coefficient vector has length 1
+  fitHx <- ppm(cells ~ x, Hardcore(0.07), rbord=0)
+  levHx <- Leverage(fitHx)
+  infHx <- Influence(fitHx)
+
+  ## divide and recombine algorithm
+  op <- spatstat.options(maxmatrix=50000)
+  ## non-sparse
+  levSB <- Leverage(fitS)
+  infSB <- Influence(fitS)
+  dfbSB <- Dfbetas(fitS)
+
+  chk <- function(x, y, what,
+                  from="single-block and multi-block",
+                  thresh=1e-12) {
+    if(max(abs(x-y)) > thresh)
+      stop(paste("Different results for", what, "obtained from",
+                 from, "algorithms"),
+           call.=FALSE)
+    invisible(NULL)
+  }
+
+  chk(marks(as.ppp(infS)), marks(as.ppp(infSB)), "influence")
+  chk(as.im(levS),         as.im(levSB),         "leverage")
+  chk(dfbS$val,            dfbSB$val,            "dfbetas$value")
+  chk(dfbS$density,        dfbSB$density,        "dfbetas$density")
+
+  # also check case of zero cif
+  levHB <- Leverage(fitH)
+  infHB <- Influence(fitH)
+  dfbHB <- Dfbetas(fitH)
+  levHxB <- Leverage(fitHx)
+  infHxB <- Influence(fitHx)
+  dfbHxB <- Dfbetas(fitHx)
+  
+  ## sparse algorithm, with blocks
+  pmiSSB <- ppmInfluence(fitS, sparseOK=TRUE)
+  # also check case of zero cif
+  pmiHSB <- ppmInfluence(fitH, sparseOK=TRUE)
+  pmiHxSB <- ppmInfluence(fitHx, sparseOK=TRUE)
+
+  spatstat.options(op)
+
+  ## sparse algorithm, no blocks
+  pmi <- ppmInfluence(fitS, sparseOK=TRUE)
+  levSp <- pmi$leverage
+  infSp <- pmi$influence
+  dfbSp <- pmi$dfbetas
+  chks <- function(...) chk(..., from="sparse and non-sparse")
+  
+  chks(marks(as.ppp(infS)), marks(as.ppp(infSp)), "influence")
+  chks(as.im(levS),         as.im(levSp),         "leverage")
+  chks(dfbS$val,            dfbSp$val,            "dfbetas$value")
+  chks(dfbS$density,        dfbSp$density,        "dfbetas$density")
+
+  # case of zero cif
+  pmiH <- ppmInfluence(fitH, sparseOK=TRUE)
+  pmiHx <- ppmInfluence(fitHx, sparseOK=TRUE)
+})
+
+##
+##    tests/linalgeb.R
+##
+## checks validity of linear algebra code
+##
+##  $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+##
+require(spatstat)
+local({
+  p <- 3
+  n <- 4
+
+  x <- matrix(1:(n*p), n, p)
+  w <- rep(2, n)
+  z <- matrix(0, p, p)
+  for(i in 1:n)
+    z <- z + w[i] * outer(x[i,],x[i,])
+  zC <- sumouter(x, w)
+  if(!identical(zC, z))
+    stop("sumouter gives incorrect result in symmetric case")
+
+  y <- matrix(1:(2*n), n, 2)
+  z <- matrix(0, p, 2)
+  for(i in 1:n)
+    z <- z + w[i] * outer(x[i,],y[i,])
+  zC <- sumouter(x, w, y)
+  if(!identical(zC, z))
+      stop("sumouter gives incorrect result in ASYMMETRIC case")
+  
+  x <- array(as.numeric(1:(p * n * n)), dim=c(p, n, n))
+  w <- matrix(1:(n*n), n, n)
+  y <- matrix(numeric(p * p), p, p)
+  for(i in 1:n)
+    for(j in (1:n)[-i])
+      y <- y + w[i,j] * outer(x[,i,j], x[,j,i])
+  z <- sumsymouter(x, w)
+  if(!identical(y,z))
+    stop("sumsymouter gives incorrect result")
+})
+## 
+## tests/localpcf.R
+##
+## temporary test file for localpcfmatrix
+##  $Revision: 1.2 $  $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  a <- localpcfmatrix(redwood)
+  a
+  plot(a)
+  a[, 3:5]
+})
+#
+# tests/lppstuff.R
+#
+# Tests for lpp code
+#
+#  $Revision: 1.9 $  $Date: 2016/09/28 04:28:05 $
+
+
+require(spatstat)
+
+local({
+  # check 'normalise' option in linearKinhom
+  X <- rpoislpp(5, simplenet)
+  fit <- lppm(X ~x)
+  K <- linearKinhom(X, lambda=fit, normalise=FALSE)
+  plot(K)
+  g <- linearpcfinhom(X, lambda=fit, normalise=FALSE)
+  plot(g)
+  K <- linearKinhom(X, lambda=fit, normalise=TRUE)
+  plot(K)
+  g <- linearpcfinhom(X, lambda=fit, normalise=TRUE)
+  plot(g)
+  # check empty patterns OK
+  X <- runiflpp(0, simplenet)
+  print(X)
+  
+  ## nearest neighbour distances
+  eps <- sqrt(.Machine$double.eps)
+  f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n]  }, n=k+1) }
+  g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) }
+
+  XX <- spiders
+  nn <- nndist(XX)
+  nnP <- f(pairdist(XX), 1)
+  if(any(abs(nn - nnP) > eps))
+    stop("nndist.lpp does not agree with pairdist.lpp")
+
+  nw <- nnwhich(XX)
+  nwP <- g(pairdist(XX), 1)
+  if(any(nw != nwP))
+    stop("nnwhich.lpp does not agree with pairdist")
+
+  ZZ <- split(chicago)
+  XX <- ZZ$damage
+  YY <- ZZ$assault
+  op <- spatstat.options(Cnncrosslpp=FALSE)
+  a <- nncross(XX, YY)
+  spatstat.options(Cnncrosslpp=TRUE)
+  b <- nncross(XX, YY)
+  if(any(a$which != b$which))
+    stop("Inconsistent values of nncross.lpp()$which from different C code")
+  if(max(abs(a$dist - b$dist)) > eps)
+    stop("Inconsistent values of nncross.lpp()$dist from different C code")
+
+  spatstat.options(Cnncrosslpp=TRUE)
+  b2 <- nncross(XX, YY, k=1:2, what="which")
+  if(any(b2$which.1 != b$which))
+    stop("inconsistent values of nncross.lpp()$which from k=1:2 and k=1")
+  a2 <- nncross(XX, YY, k=1:2, what="dist")
+  if(max(abs(a2$dist.1 - a$dist)) > eps)
+    stop("Inconsistent values of nncross.lpp()$dist from k=1:2 and k=1")
+
+  spatstat.options(Cnncrosslpp=TRUE)
+  ii <- seq_len(npoints(XX))
+  w1 <- nnwhich(XX)
+  w2 <- nncross(XX, XX, iX=ii, iY=ii, what="which")
+  w3 <- nncross(XX, XX, iX=ii, iY=ii, what="which", method="interpreted")
+  if(any(w1 != w2))
+    stop("nnwhich.lpp disagrees with nncross.lpp(iX, iY)")
+  if(any(w2 != w3))
+    stop("Different results for nncross.lpp(iX, iY, 'which') using R and C")
+  d1 <- nndist(XX)
+  d2 <- nncross(XX, XX, iX=ii, iY=ii, what="dist")
+  d3 <- nncross(XX, XX, iX=ii, iY=ii, what="dist", method="interpreted")
+  if(max(abs(d1-d2)) > eps)
+    stop("nndist.lpp disagrees with nncross.lpp(iX, iY)")
+  if(max(abs(d2-d3)) > eps)
+    stop("Different results for nncross.lpp(iX, iY, 'dist') using R and C")
+
+  spatstat.options(op)
+
+  # test handling marginal cases
+  xyd <- nncross(XX, YY[1])
+
+  ## as.linnet.psp (Suman's example)
+  Lines <- as.data.frame(as.psp(simplenet))
+  newseg <- c(Lines[1,1:2], Lines[10,3:4])
+  Lines <- rbind(Lines, newseg)
+  Y <- as.psp(Lines, window=Window(simplenet))
+  marks(Y) <- c(3, 4, 5, 5, 3, 4, 5, 5,5, 5,1)
+  Z <- as.linnet(Y) # can crash if marks don't match segments
+  
+  ## Test linnet surgery code
+  set.seed(42)
+  X <- runiflpp(30, simplenet)
+  V <- runiflpp(30, simplenet)
+  XV <- insertVertices(X, V)
+  validate.lpp.coords(XV, context="calculated by insertVertices")
+
+  ## Test [.lpp internal data
+  B <- owin(c(0.1,0.7),c(0.19,0.6))
+  XB <- X[B]
+  validate.lpp.coords(XB, context="returned by [.lpp")
+
+  ## Tests related to linearK, etc
+  testcountends <- function(X, r=100, s=1) {
+    if(s != 1) {
+      X <- rescale(X, s)
+      r <- r/s
+    }
+    L <- as.linnet(X)
+    n1 <- countends(L, X[1], r)
+    n2 <- npoints(lineardisc(L, X[1], r, plotit=FALSE)$endpoints)
+    if(n1 != n2)
+      stop(paste("Incorrect result from countends:",
+                 n1, "!=", n2, 
+                 paren(paste("scale=", 1/s))),
+           call.=FALSE)
+  }
+  # original scale
+  X <- unmark(chicago)
+  testcountends(X)
+  # finer scale
+  testcountends(X, s=1000)
+
+  ## Test algorithms for boundingradius.linnet
+  L <- as.linnet(chicago, sparse=TRUE)
+  opa <- spatstat.options(Clinearradius=FALSE)
+  bR <- as.linnet(L, sparse=FALSE)$boundingradius
+  spatstat.options(Clinearradius=TRUE)
+  bC <- as.linnet(L, sparse=FALSE)$boundingradius
+  spatstat.options(opa)
+  if(abs(bR-bC) > 0.001 * (bR+bC)/2)
+    stop("Disagreement between R and C algorithms for boundingradius.linnet",
+         call.=FALSE)
+})
+
+##
+##     tests/marcelino.R
+##
+##     $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+##
+require(spatstat)
+
+local({
+  Y <- split(urkiola)
+  B <- Y$birch
+  O <- Y$oak
+  B.lam <- predict (ppm(B ~polynom(x,y,2)), type="trend")
+  O.lam <- predict (ppm(O ~polynom(x,y,2)), type="trend")
+
+  Kinhom(B, lambda=B.lam, correction="iso")
+  Kinhom(B, lambda=B.lam, correction="border")
+
+  Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam)
+  Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso")
+  Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border")
+})
+
+
+##
+##    tests/markcor.R
+##
+##   Tests of mark correlation code (etc)
+##
+## $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+
+local({
+  ## check.testfun checks equality of functions
+  ##  and is liable to break if the behaviour of all.equal is changed
+  fe <- function(m1, m2) {m1 == m2}
+  fm <- function(m1, m2) {m1 * m2}
+  fs <- function(m1, m2) {sqrt(m1)}
+  if(check.testfun(fe, X=amacrine)$ftype != "equ")
+    warning("check.testfun fails to recognise mark equality function")
+  if(check.testfun(fm, X=longleaf)$ftype != "mul")
+    warning("check.testfun fails to recognise mark product function")
+  check.testfun(fs, X=longleaf)
+  
+  ## test all is well in Kmark -> Kinhom 
+  MA <- Kmark(amacrine,function(m1,m2){m1==m2})
+  set.seed(42)
+  AR <- rlabel(amacrine)
+  MR <- Kmark(AR,function(m1,m2){m1==m2})
+  if(isTRUE(all.equal(MA,MR)))
+    stop("Kmark unexpectedly ignores marks")
+})
+#
+# tests/mppm.R
+#
+# Basic tests of mppm
+#
+# $Revision: 1.8 $ $Date: 2016/06/28 04:19:08 $
+# 
+
+require(spatstat)
+
+local({
+  ## test interaction formulae and subfits
+  fit1 <- mppm(Points ~ group, simba,
+               hyperframe(po=Poisson(), str=Strauss(0.1)),
+               iformula=~ifelse(group=="control", po, str))
+  fit2 <- mppm(Points ~ group, simba,
+               hyperframe(po=Poisson(), str=Strauss(0.1)),
+               iformula=~str/id)
+  fit3 <- mppm(Points ~ group, simba,
+               hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))),
+        iformula=~I((group=="control") * po) + I((group=="treatment") * pie))
+  fit1
+  fit2
+  fit3
+
+  ## run summary.mppm which currently sits in spatstat-internal.Rd
+  summary(fit1)
+  summary(fit2)
+  summary(fit3)
+
+  ## test vcov algorithm
+  vcov(fit1)
+  vcov(fit2)
+  vcov(fit3)
+
+  ## test subfits algorithm
+  s1 <- subfits(fit1)
+  s2 <- subfits(fit2)
+  s3 <- subfits(fit3)
+
+  ## validity of results of subfits()
+  p1 <- solapply(s1, predict)
+  p2 <- solapply(s2, predict)
+  p3 <- solapply(s3, predict)
+
+})
+
+local({
+  ##  [thanks to Sven Wagner]
+  ## factor covariate, with some levels unused in some rows
+  set.seed(14921788)
+  H <- hyperframe(X=replicate(3, runifpoint(20), simplify=FALSE),
+                  Z=solist(as.im(function(x,y){x}, owin()),
+                    as.im(function(x,y){y}, owin()),
+                    as.im(function(x,y){x+y}, owin())))
+  H$Z <- solapply(H$Z, cut, breaks=(0:4)/2)
+
+  fit6 <- mppm(X ~ Z, H)
+  v6 <- vcov(fit6)
+  s6 <- subfits(fit6)
+  p6 <- solapply(s6, predict)
+
+  # random effects
+  fit7 <- mppm(X ~ Z, H, random=~1|id)
+  v7 <- vcov(fit7)
+  s7 <- subfits(fit7)
+  p7 <- solapply(s7, predict)
+
+  fit7a <- mppm(X ~ Z, H, random=~x|id)
+  v7a <- vcov(fit7a)
+  s7a <- subfits(fit7a)
+  p7a <- solapply(s7a, predict)
+
+  # multitype: collisions in vcov.ppm, predict.ppm
+  H$X <- lapply(H$X, rlabel, labels=factor(c("a","b")), permute=FALSE)
+  M <- MultiStrauss(matrix(0.1, 2, 2), c("a","b"))
+  fit8 <- mppm(X ~ Z, H, M)
+  v8 <- vcov(fit8, fine=TRUE)
+  s8 <- subfits(fit8)
+  p8 <- lapply(s8, predict)
+  c8 <- lapply(s8, predict, type="cif")
+
+  fit9 <- mppm(X ~ Z, H, M, iformula=~Interaction * id)
+  v9 <- vcov(fit9, fine=TRUE)
+  s9 <- subfits(fit9)
+  p9 <- lapply(s9, predict)
+  c9 <- lapply(s9, predict, type="cif")
+
+  # and a simple error in recognising 'marks'
+  fit10 <- mppm(X ~ marks, H)
+})
+
+local({
+  ## test handling of offsets and zero cif values in mppm
+  H <- hyperframe(Y = waterstriders)
+  mppm(Y ~ 1,  data=H, Hardcore(1.5))
+  mppm(Y ~ 1,  data=H, StraussHard(7, 1.5))
+
+  ## prediction, in training/testing context
+  ##    (example from Markus Herrmann and Ege Rubak)
+  X <- waterstriders
+  dist <- solapply(waterstriders,
+                   function(z) distfun(runifpoint(1, Window(z))))
+  i <- 3
+  train <- hyperframe(pattern = X[-i], dist = dist[-i])
+  test <- hyperframe(pattern = X[i], dist = dist[i])
+  fit <- mppm(pattern ~ dist, data = train)
+  pred <- predict(fit, type="cif", newdata=test, verbose=TRUE)
+})
+
+local({
+  ## test handling of interaction coefficients in multitype case
+  set.seed(42)
+  XX <- as.solist(replicate(3, rthin(amacrine, 0.8), simplify=FALSE))
+  H <- hyperframe(X=XX)
+  M <- MultiStrauss(matrix(0.1, 2, 2), levels(marks(amacrine)))
+  fit <- mppm(X ~ 1, H, M)
+  co <- coef(fit)
+  subco <- sapply(subfits(fit), coef)
+  if(max(abs(subco - co)) > 0.001)
+    stop("Wrong coefficient values in subfits, for multitype interaction")
+})
diff --git a/tests/testsNtoP.R b/tests/testsNtoP.R
new file mode 100644
index 0000000..2f836ac
--- /dev/null
+++ b/tests/testsNtoP.R
@@ -0,0 +1,624 @@
+#
+# tests/NAinCov.R
+#
+# Testing the response to the presence of NA's in covariates
+#
+# $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  X <- runifpoint(42)
+  Y <- as.im(function(x,y) { x+y }, owin())
+  Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA
+  # fit model: should produce a warning but no failure
+  misfit <- ppm(X ~Y, covariates=list(Y=Y))
+  # prediction 
+  Z <- predict(misfit, type="trend", se=TRUE)
+  # covariance matrix: all should be silent
+  v <- vcov(misfit)
+  ss <- vcov(misfit, what="internals")
+  NULL
+})
+
+
+
+
+
+#
+#    tests/nndist.R
+#
+# Check that nndist and nnwhich give
+# results consistent with direct calculation from pairdist
+#
+# Similarly for nncross and distfun
+#
+# Also test whether minnndist(X) == min(nndist(X))
+#
+#   $Revision: 1.17 $  $Date: 2016/11/29 06:25:15 $
+#
+
+require(spatstat)
+local({
+  eps <- sqrt(.Machine$double.eps)
+  f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n]  }, n=k+1) }
+  g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) }
+
+  # Two dimensions
+
+  X <- runifpoint(42)
+
+  nn <- nndist(X)
+  nnP <- f(pairdist(X), 1)
+  if(any(abs(nn - nnP) > eps))
+    stop("nndist.ppp does not agree with pairdist")
+
+  nn5 <- nndist(X, k=5)
+  nn5P <- f(pairdist(X), 5)
+  if(any(abs(nn5 - nn5P) > eps))
+    stop("nndist.ppp(k=5) does not agree with pairdist")
+
+  nw <- nnwhich(X)
+  nwP <- g(pairdist(X), 1)
+  if(any(nw != nwP))
+    stop("nnwhich.ppp does not agree with pairdist")
+
+  nw5 <- nnwhich(X, k=5)
+  nw5P <- g(pairdist(X), 5)
+  if(any(nw5 != nw5P))
+    stop("nnwhich.ppp(k=5) does not agree with pairdist")
+
+  # Three dimensions
+
+  X <- runifpoint3(42)
+
+  nn <- nndist(X)
+  nnP <- f(pairdist(X), 1)
+  if(any(abs(nn - nnP) > eps))
+    stop("nndist.pp3 does not agree with pairdist")
+
+  nn5 <- nndist(X, k=5)
+  nn5P <- f(pairdist(X), 5)
+  if(any(abs(nn5 - nn5P) > eps))
+    stop("nndist.pp3(k=5) does not agree with pairdist")
+
+  nw <- nnwhich(X)
+  nwP <- g(pairdist(X), 1)
+  if(any(nw != nwP))
+    stop("nnwhich.pp3 does not agree with pairdist")
+
+  nw5 <- nnwhich(X, k=5)
+  nw5P <- g(pairdist(X), 5)
+  if(any(nw5 != nw5P))
+    stop("nnwhich.pp3(k=5) does not agree with pairdist")
+
+  # m dimensions
+
+  X <- runifpointx(42, boxx(c(0,1),c(0,1),c(0,1),c(0,1)))
+
+  nn <- nndist(X)
+  nnP <- f(pairdist(X), 1)
+  if(any(abs(nn - nnP) > eps))
+    stop("nndist.ppx does not agree with pairdist")
+
+  nn5 <- nndist(X, k=5)
+  nn5P <- f(pairdist(X), 5)
+  if(any(abs(nn5 - nn5P) > eps))
+    stop("nndist.ppx(k=5) does not agree with pairdist")
+  
+  nw <- nnwhich(X)
+  nwP <- g(pairdist(X), 1)
+  if(any(nw != nwP))
+    stop("nnwhich.ppx does not agree with pairdist")
+
+  nw5 <- nnwhich(X, k=5)
+  nw5P <- g(pairdist(X), 5)
+  if(any(nw5 != nw5P))
+    stop("nnwhich.ppx(k=5) does not agree with pairdist")
+
+  #### nncross in two dimensions
+  X <- runifpoint(42)
+  Y <- runifpoint(42, win=owin(c(1,2),c(1,2)))
+
+  # default nncross
+  nc <- nncross(X,Y)
+  ncd <- nc$dist
+  ncw <- nc$which
+  cd <- crossdist(X,Y)
+  cdd <- apply(cd, 1, min)
+  cdw <- apply(cd, 1, which.min)
+  if(any(abs(ncd - cdd) > eps))
+    stop("nncross()$dist does not agree with apply(crossdist(), 1, min)")
+  if(any(ncw != cdw))
+    stop("nncross()$which does not agree with apply(crossdist(), 1, which.min)")
+
+  # sort on x
+  nc <- nncross(X,Y, sortby="x")
+  ncd <- nc$dist
+  ncw <- nc$which
+  if(any(abs(ncd - cdd) > eps))
+    stop("nncross(sortby=x)$dist does not agree with apply(crossdist(), 1, min)")
+  if(any(ncw != cdw))
+    stop("nncross(sortby=x)$which does not agree with apply(crossdist(), 1, which.min)")
+
+  # pre-sorted on x
+  Y <- Y[order(Y$x)]
+  nc <- nncross(X,Y, is.sorted.Y=TRUE, sortby="x")
+  ncd <- nc$dist
+  ncw <- nc$which
+  cd <- crossdist(X,Y)
+  cdd <- apply(cd, 1, min)
+  cdw <- apply(cd, 1, which.min)
+  if(any(abs(ncd - cdd) > eps))
+    stop("For sorted data, nncross()$dist does not agree with apply(crossdist(), 1, min)")
+  if(any(ncw != cdw))
+    stop("For sorted data, nncross()$which does not agree with apply(crossdist(), 1, which.min)")
+
+  # sanity check for nncross with k > 1
+  ndw <- nncross(X, Y, k=1:4, what="which")
+  if(any(is.na(ndw)))
+    stop("NA's returned by nncross.ppp(k > 1, what='which')")
+  nnc4 <- nncross(X, Y, k=1:4)
+  iswhich <- (substr(colnames(nnc4), 1, nchar("which")) == "which")
+  ndw <- nnc4[,iswhich]
+  if(any(is.na(ndw)))
+    stop("NA's returned by nncross.ppp(k > 1)$which")
+  
+  # test of correctness for nncross with k > 1
+  flipcells <- flipxy(cells)
+  calcwhich <- nncross(cells, flipcells, k=1:4, what="which")
+  truewhich <- t(apply(crossdist(cells,flipcells), 1, order))[,1:4]
+  if(any(calcwhich != truewhich))
+    stop("nncross(k > 1) gives wrong answer")
+  
+  # test of agreement between nngrid.h and knngrid.h
+  #    dimyx=23 (found by trial-and-error) ensures that there are no ties 
+  a <- as.matrix(nnmap(cells, what="which", dimyx=23))
+  b <- as.matrix(nnmap(cells, what="which", dimyx=23, k=1:2)[[1]])
+  if(any(a != b))
+    stop("algorithms in nngrid.h and knngrid.h disagree")
+
+  ## minnndist
+  mfast <- minnndist(X)
+  mslow <- min(nndist(X))
+  if(abs(mfast-mslow) > eps)
+    stop("minnndist(X) disagrees with min(nndist(X))")
+  mfast <- maxnndist(X)
+  mslow <- max(nndist(X))
+  if(abs(mfast-mslow) > eps)
+    stop("maxnndist(X) disagrees with max(nndist(X))")
+})
+
+local({
+  # tests for has.close()
+  # (the default method uses nndist or pairdist, and can be trusted!)
+  a <- has.close(redwood, 0.05)
+  b <- has.close.default(redwood, 0.05)
+  if(any(a != b)) stop("Incorrect result for has.close(X, r)")
+
+  a <- has.close(redwood, 0.05, periodic=TRUE)
+  a <- has.close.default(redwood, 0.05, periodic=TRUE)
+  if(any(a != b)) stop("Incorrect result for has.close(X, r, periodic=TRUE)")
+
+  Y <- split(amacrine)
+  a <- with(Y, has.close(on, 0.05, off))
+  b <- with(Y, has.close.default(on, 0.05, off))
+  if(any(a != b)) stop("Incorrect result for has.close(X, r, Y)")
+
+  a <- with(Y, has.close(on, 0.05, off, periodic=TRUE))
+  b <- with(Y, has.close.default(on, 0.05, off, periodic=TRUE))
+  if(any(a != b)) stop("Incorrect result for has.close(X, r, Y, periodic=TRUE)")
+})
+
+## 
+##    tests/percy.R
+##
+## Tests of Percus-Yevick approximations
+##
+##    $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  fit <- ppm(swedishpines ~1, DiggleGatesStibbard(6))
+  K <- Kmodel(fit)
+})
+
+#'   tests/perspim.R
+#'
+#'   Check persp.im handling of NA, etc
+#' 
+#'   $Revision: 1.1 $  $Date: 2016/08/27 02:53:35 $
+
+require(spatstat)
+
+local({
+  set.seed(42)
+  Z <- distmap(letterR, invert=TRUE)[letterR, drop=FALSE]
+  X <- runifpoint(100, Frame(Z))
+  M <- persp(Z, colin=Z, visible=TRUE)
+  perspPoints(X, Z=Z, M=M)
+})
+##
+## tests/pixelgripes.R
+##     Problems related to pixellation of windows
+##
+## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  ## From Philipp Hunziker: bug in rNeymanScott (etc)
+  ## Create an irregular window
+  PM <- matrix(c(1,0,0.5,1,0,0), 3, 2, byrow=TRUE)
+  P <- owin(poly=PM)
+  ## Generate Matern points
+  X <- rMatClust(50, 0.05, 5, win=P)
+  ## Some distance function as a covariate
+  distorigin <- function(x, y) { sqrt(x^2 + y^2) }
+  ## No covariates: works fine
+  fit0 <- kppm(X ~ 1, clusters="MatClust")
+  Y0 <- simulate(fit0, retry=0)
+  ## Covariates: Simulation fails
+  fit1 <- kppm(X ~ distorigin, clusters="MatClust")
+  Y1 <- simulate(fit1, retry=0)
+})
+## 
+## tests/polygons.R
+##
+##  $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
+##
+require(spatstat)
+local({
+  co <- as.ppp(corners(letterR), letterR, check=FALSE)
+  co[letterR]
+})
+
+# 
+#   tests/ppmBadData.R
+#
+# $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $
+
+# Testing robustness of ppm and support functions
+# when data are rubbish
+
+require(spatstat)
+local({
+# ---------------------------------------------------
+# from Rolf: very large proportion of data is NA
+  SEED <- 42
+  K <- 101
+  A <- 500
+  X <- seq(0, A, length=K)
+  G <- expand.grid(x=X, y=X)
+  FOO <- function(x,y) { sin(x)^2 + cos(y)^2 }
+  M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X)
+  M <- im(matrix(FOO(G$x, G$y), K, K))
+  BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) }
+  V <- im(BAR(M$v), xcol=X, yrow=X)
+  # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2))
+  set.seed(SEED)
+  Y <- rpoispp(V)
+  fY <- ppm(Y ~cv + I(cv^2), data=list(cv=M), correction="translate")
+  diagnose.ppm(fY)
+  lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw")
+})
+
+# --------------------------------------------------------
+# from Andrew Bevan: numerical overflow, ill-conditioned Fisher information
+
+local({
+  SEED <- 42
+
+  nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000)))
+
+  #Trend on raster grid
+  rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100)
+
+  #Generate a point pattern via a Lennard-Jones process
+  set.seed(SEED)
+  mod4<- rmhmodel(cif="lennard",
+                par=list(beta=1, sigma=250, epsilon=2.2),
+                trend=rain, w=nongranite)
+  ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5))
+
+  #Fit a point process model to the pattern with rain as a covariate
+  # NOTE INCORRECT TREND FORMULA
+  ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, data=list(Z=rain))
+  ss <- summary(ljtrmod)
+})
+
+local({
+  # From Ege
+  # Degenerate but non-null argument 'covariates'
+  xx <- list()
+  names(xx) <- character(0)
+  fit <- ppm(cells ~x, covariates = xx)
+  st <- summary(fit) 
+})
+#
+#   tests/ppmgam.R
+#
+#   Test ppm with use.gam=TRUE
+#
+#   $Revision: 1.3 $  $Date: 2015/09/01 02:01:33 $
+#
+
+require(spatstat)
+local({
+  fit <- ppm(nztrees ~s(x,y), use.gam=TRUE)
+  mm <- model.matrix(fit)
+  mf <- model.frame(fit)
+  v <- vcov(fit)
+  prd <- predict(fit)
+})
+
+#'
+#'  tests/ppmlogi.R
+#'
+#' Tests of ppm(method='logi')
+#'    and related code (predict, leverage etc)
+#'
+#' $Revision: 1.7 $  $Date: 2017/07/11 08:13:18 $
+#'
+
+require(spatstat)
+local({
+  fit <- ppm(cells ~x, method="logi")
+  f <- fitted(fit)
+  p <- predict(fit)
+  u <- summary(fit)
+  fitS <- ppm(cells ~x, Strauss(0.12), method="logi")
+  fS <- fitted(fitS)
+  pS <- predict(fitS)
+  uS <- summary(fitS)
+
+  plot(leverage(fit))
+  plot(influence(fit))
+  plot(dfbetas(fit))
+  plot(leverage(fitS))
+  plot(influence(fitS))
+  plot(dfbetas(fitS))
+})
+
+local({
+  #' same with hard core - A1 is singular
+  fitH <- ppm(cells ~x, Strauss(0.08), method="logi")
+  fH <- fitted(fitH)
+  pH <- predict(fitH)
+  uH <- summary(fitH)
+  plot(leverage(fitH))
+  plot(influence(fitH))
+  plot(dfbetas(fitH))
+})
+  
+local({
+  #' logistic fit to data frame of covariates
+  z <- c(rep(TRUE, 5), rep(FALSE, 5))
+  df <- data.frame(A=z + 2* runif(10),
+                   B=runif(10))
+  Y <- quadscheme.logi(runifpoint(5), runifpoint(5))
+  fut <- ppm(Y ~ A+B, data=df, method="logi")
+  sf <- summary(fut)
+})
+#
+#   tests/ppmmarkorder.R
+#
+# $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+#
+# Test that predict.ppm, plot.ppm and plot.fitin
+# tolerate marks with levels that are not in alpha order
+#
+require(spatstat)
+local({
+  X <- amacrine
+  levels(marks(X)) <- c("ZZZ", "AAA")
+  fit <- ppm(X ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2)))
+  aa <- predict(fit, type="trend")
+  bb <- predict(fit, type="cif")
+  plot(fit)
+  plot(fitin(fit))
+})
+
+
+#
+#   tests/ppmscope.R
+#
+#   Test things that might corrupt the internal format of ppm objects
+#
+#   $Revision: 1.5 $  $Date: 2015/12/29 08:54:49 $
+#
+
+require(spatstat)
+local({
+  ##   (1) Scoping problem that can arise when ppm splits the data
+
+  fit <- ppm(bei ~elev, data=bei.extra)
+  mm <- model.matrix(fit)
+
+  ##   (2) Fast update mechanism
+
+  fit1 <- ppm(cells ~x+y, Strauss(0.07))
+  fit2 <- update(fit1, ~y)
+  fit3 <- update(fit2, ~x)
+
+  ## (3) New formula-based syntax
+  attach(bei.extra)
+  slfit <- ppm(bei ~ grad)
+  sl2fit <- update(slfit, ~grad + I(grad^2))
+  slfitup <- update(slfit, use.internal=TRUE)
+  sl2fitup <- update(sl2fit, use.internal=TRUE)
+})
+
+#
+#   tests/ppmtricks.R
+#
+#   Test backdoor exits and hidden options in ppm
+#        and summary.ppm, print.summary.ppm
+#
+#   $Revision: 1.5 $  $Date: 2015/12/29 08:54:49 $
+#
+require(spatstat)
+local({
+
+  ## (1) skip.border
+  
+  fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE)
+
+  ## (2) subset arguments of different kinds
+  fut <- ppm(cells ~ x, subset=(x > 0.5))
+  fot <- ppm(cells ~ x, subset=(x > 0.5), method="logi")
+  W <- owin(c(0.4, 0.8), c(0.2, 0.7))
+  fut <- ppm(cells ~ x, subset=W)
+  fot <- ppm(cells ~ x, subset=W, method="logi")
+
+  ## (3) profilepl -> ppm
+  ##     uses 'skip.border' and 'precomputed'
+  ##     also tests scoping for covariates
+  splants <- split(ants)
+  mess    <- splants[["Messor"]]
+  cats    <- splants[["Cataglyphis"]]
+  ss      <- data.frame(r=seq(60,120,by=20),hc=29/6)
+  dM      <- distmap(mess,dimyx=256)
+  mungf    <- profilepl(ss, StraussHard, cats ~ dM)
+  mungp   <- profilepl(ss, StraussHard, trend=~dM, Q=cats)
+
+  ## (4) splitting large quadschemes into blocks
+  op <- spatstat.options(maxmatrix=5000)
+  pr <- predict(ppm(cells ~ x, AreaInter(0.05)))
+
+  ## (5) shortcuts in summary.ppm
+  ## and corresponding behaviour of print.summary.ppm
+  print(summary(fit, quick=TRUE))
+  print(summary(fit, quick="entries"))
+  print(summary(fit, quick="no prediction"))
+  print(summary(fit, quick="no variances"))
+  
+  spatstat.options(op)
+})
+
+#
+# tests/ppx.R
+#
+# Test operations for ppx objects
+#
+#  $Revision: 1.3 $ $Date: 2017/03/02 01:19:13 $
+#
+
+require(spatstat)
+
+local({
+  df <- data.frame(x=c(1,2,2,1), y=c(1,2,3,1), z=c(2,3,4,2))
+  X <- ppx(data=df, coord.type=rep("s", 3), domain=box3())
+  unique(X)
+  duplicated(X)
+  multiplicity(X)
+
+  stopifnot(identical(unmark(chicago[1]),
+                      unmark(chicago)[1]))
+
+  #' ppx with zero points
+  U <- chicago[integer(0)]
+  V <- U %mark% 1
+  V <- U %mark% factor("a")
+})
+#
+# tests/prediction.R
+#
+# Things that might go wrong with predict()
+#
+#  $Revision: 1.4 $ $Date: 2016/03/04 03:14:40 $
+#
+
+require(spatstat)
+
+local({
+  # test of 'covfunargs'
+  f <- function(x,y,a){ y - a }
+  fit <- ppm(cells ~x + f, covariates=list(f=f), covfunargs=list(a=1/2))
+  p <- predict(fit)
+
+  # prediction involving 0 * NA
+  qc <- quadscheme(cells, nd=10)
+  r <- minnndist(as.ppp(qc))/10
+  fit <- ppm(qc ~ 1, Strauss(r)) # model has NA for interaction coefficient
+  p1 <- predict(fit)
+  p2 <- predict(fit, type="cif", ngrid=10)
+  stopifnot(all(is.finite(as.matrix(p1))))
+  stopifnot(all(is.finite(as.matrix(p2))))
+
+  # test of 'new.coef' mechanism
+  fut <- ppm(cells ~ x, Strauss(0.15), rbord=0)
+  p0 <- predict(fut, type="cif")
+  pe <- predict(fut, type="cif", new.coef=coef(fut))
+  pn <- predict(fut, type="cif", new.coef=unname(coef(fut)))
+  if(max(abs(pe-p0)) > 0.01)
+    stop("new.coef mechanism is broken!")
+  if(max(abs(pn-p0)) > 0.01)
+    stop("new.coef mechanism gives wrong answer, for unnamed vectors")
+
+  # tests of relrisk.ppm
+  fut <- ppm(amacrine ~ x * marks)
+  a <- relrisk(fut, control=2, relative=TRUE)
+  a <- relrisk(fut, se=TRUE)
+  a <- relrisk(fut, relative=TRUE, se=TRUE)
+  fut <- ppm(sporophores ~ marks + x)
+  a <- relrisk(fut, control=2, relative=TRUE)
+  a <- relrisk(fut, se=TRUE)
+  a <- relrisk(fut, relative=TRUE, se=TRUE)
+  
+})
+
+#
+#     tests/project.ppm.R
+#
+#      $Revision: 1.6 $  $Date: 2015/08/27 08:19:03 $
+#
+#     Tests of projection mechanism
+#
+
+require(spatstat)
+local({
+  chk <- function(m) {
+    if(!valid.ppm(m)) stop("Projected model was still not valid")
+    return(invisible(NULL))
+  }
+  # a very unidentifiable model
+  fit <- ppm(cells ~Z, Strauss(1e-06), covariates=list(Z=0))
+  chk(emend(fit))
+  # multitype
+  r <- matrix(1e-06, 2, 2)
+  fit2 <- ppm(amacrine ~1, MultiStrauss(types=c("off", "on"), radii=r))
+  chk(emend(fit2))
+  # complicated multitype 
+  fit3 <- ppm(amacrine ~1, MultiStraussHard(types=c("off", "on"),
+                                            iradii=r, hradii=r/5))
+  chk(emend(fit3))
+  
+  # hierarchical
+  ra <- r
+  r[2,1] <- NA
+  fit4 <- ppm(amacrine ~1, HierStrauss(types=c("off", "on"), radii=r))
+  chk(emend(fit4))
+  # complicated hierarchical
+  fit5 <- ppm(amacrine ~1, HierStraussHard(types=c("off", "on"),
+                                            iradii=r, hradii=r/5))
+  chk(emend(fit5))
+  
+  # hybrids
+  r0 <- min(nndist(redwood))
+  ra <- 1.25 * r0
+  rb <- 0.8 * r0
+  f1 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE)
+  chk(f1)
+  f2 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE)
+  chk(f2)
+  f3 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE)
+  chk(f3)
+  f4 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE)
+  chk(f4)
+  f5 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE)
+  chk(f5)
+  f6 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE)
+  chk(f6)
+  f7 <- ppm(redwood ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE)
+  chk(f7)
+
+})
diff --git a/tests/testsQtoR.R b/tests/testsQtoR.R
new file mode 100644
index 0000000..484e6cd
--- /dev/null
+++ b/tests/testsQtoR.R
@@ -0,0 +1,850 @@
+#'  tests/randoms.R
+#'   Further tests of random generation code
+#'  $Revision: 1.1 $ $Date: 2016/04/02 04:03:46 $
+
+require(spatstat)
+local({
+  A <- runifrect(6, nsim=2)
+  A <- runifdisc(6, nsim=2)
+  A <- runifpoispp(5, nsim=2)
+  A <- runifpoispp(0, nsim=2)
+  Z <- as.im(function(x,y) 10*x, square(1))
+  A <- rpoint(n=6, f=Z, fmax=10, nsim=2)
+  A <- rSSI(0.05, 6, nsim=2)
+  A <- rstrat(nx=4, nsim=2)
+  A <- rsyst(nx=4, nsim=2)
+  A <- rthin(cells, P=0.5, nsim=2)
+  A <- rjitter(cells, nsim=2, retry=FALSE)
+})
+
+#'  tests/resid.R
+#'
+#'  Stuff related to residuals and residual diagnostics
+#'
+#'   $Revision: 1.1 $  $Date: 2016/09/02 10:56:59 $
+#'
+
+require(spatstat)
+local({
+  fit <- ppm(cells ~x, Strauss(r=0.15))
+  diagnose.ppm(fit, cumulative=FALSE)
+  diagnose.ppm(fit, cumulative=FALSE, type="pearson")
+})
+
+
+##
+## tests/rhohat.R
+##
+## Test all combinations of options for rhohatCalc
+##
+## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
+
+local({
+  require(spatstat)
+  X <-  rpoispp(function(x,y){exp(3+3*x)})
+  ## done in example(rhohat):
+  ## rhoA <- rhohat(X, "x")
+  ## rhoB <- rhohat(X, "x", method="reweight")
+  ## rhoC <- rhohat(X, "x", method="transform")
+  fit <- ppm(X, ~x)
+  rhofitA <- rhohat(fit, "x")
+  rhofitB <- rhohat(fit, "x", method="reweight")
+  rhofitC <- rhohat(fit, "x", method="transform")
+
+  ## Baseline
+  lam <- predict(fit)
+  rhoAb <- rhohat(X, "x", baseline=lam)
+  rhoBb <- rhohat(X, "x", method="reweight", baseline=lam)
+  rhoCb <- rhohat(X, "x", method="transform", baseline=lam)
+
+  ## Horvitz-Thompson
+  rhoAH <- rhohat(X, "x", horvitz=TRUE) 
+  rhoBH <- rhohat(X, "x", method="reweight", horvitz=TRUE)
+  rhoCH <- rhohat(X, "x", method="transform", horvitz=TRUE)
+  rhofitAH <- rhohat(fit, "x", horvitz=TRUE)
+  rhofitBH <- rhohat(fit, "x", method="reweight", horvitz=TRUE)
+  rhofitCH <- rhohat(fit, "x", method="transform", horvitz=TRUE)
+})
+#
+#  tests/rmhAux.R
+#
+#  $Revision: 1.1 $  $Date: 2013/02/18 10:41:27 $
+#
+#  For interactions which maintain 'auxiliary data',
+#  verify that the auxiliary data are correctly updated.
+#
+#  To do this we run rmh with nsave=1 so that the point pattern state
+#  is saved after every iteration, then the algorithm is restarted,
+#  and the auxiliary data are re-initialised. The final state must agree with
+#  the result of simulation without saving.
+# ----------------------------------------------------
+
+require(spatstat)
+
+local({
+
+   # Geyer:
+   mod <- list(cif="geyer",
+               par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5),
+               w=square(10))
+
+   set.seed(42)
+   X.nosave <- rmh(model=mod,
+                   start=list(n.start=50),
+                   control=list(nrep=1e3, periodic=FALSE, expand=1))
+   set.seed(42)
+   X.save <- rmh(model=mod,
+                 start=list(n.start=50),
+                 control=list(nrep=1e3, periodic=FALSE, expand=1,
+                   nburn=0, nsave=1, pstage="start"))
+   #' Need to set pstage='start' so that proposals are generated
+   #' at the start of the procedure in both cases.
+
+   stopifnot(npoints(X.save) == npoints(X.nosave))
+   stopifnot(max(nncross(X.save, X.nosave)$dist) == 0)
+   stopifnot(max(nncross(X.nosave, X.save)$dist) == 0)
+})
+##
+##   tests/rmhBasic.R
+##
+##   $Revision: 1.11 $  $Date: 2015/12/29 08:54:49 $
+#
+# Test examples for rmh.default
+# run to reasonable length
+# and with tests for validity added
+# ----------------------------------------------------
+
+require(spatstat)
+
+local({
+if(!exists("nr"))
+   nr   <- 5e3
+
+spatstat.options(expand=1.1)
+   
+   # Strauss process.
+   mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7),
+                 w=c(0,10,0,10))
+   X1.strauss <- rmh(model=mod01,start=list(n.start=80),
+                     control=list(nrep=nr))
+
+   # Strauss process, conditioning on n = 80:
+   X2.strauss <- rmh(model=mod01,start=list(n.start=80),
+                     control=list(p=1,nrep=nr))
+   stopifnot(X2.strauss$n == 80)
+
+   # test tracking mechanism
+   X1.strauss <- rmh(model=mod01,start=list(n.start=80),
+                     control=list(nrep=nr), track=TRUE)
+   X2.strauss <- rmh(model=mod01,start=list(n.start=80),
+                         control=list(p=1,nrep=nr), track=TRUE)
+   
+   # Hard core process:
+   mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10))
+   X3.hardcore <- rmh(model=mod02,start=list(n.start=60),
+                     control=list(nrep=nr))
+   
+   # Strauss process equal to pure hardcore:
+   mod02 <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10))
+   X3.strauss <- rmh(model=mod02,start=list(n.start=60),
+                     control=list(nrep=nr))
+   
+   # Strauss process in a polygonal window.
+   x     <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42)
+   y     <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33)
+   mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07),
+                w=owin(poly=list(x=x,y=y)))
+   X4.strauss <- rmh(model=mod03,start=list(n.start=90),
+                     control=list(nrep=nr))
+   
+   # Strauss process in a polygonal window, conditioning on n = 42.
+   X5.strauss <- rmh(model=mod03,start=list(n.start=42),
+                     control=list(p=1,nrep=nr))
+   stopifnot(X5.strauss$n == 42)
+
+   # Strauss process, starting off from X4.strauss, but with the
+   # polygonal window replace by a rectangular one.  At the end,
+   # the generated pattern is clipped to the original polygonal window.
+   xxx <- X4.strauss
+   xxx$window <- as.owin(c(0,1,0,1))
+   X6.strauss <- rmh(model=mod03,start=list(x.start=xxx),
+                     control=list(nrep=nr))
+   
+   # Strauss with hardcore:
+   mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3),
+                w=c(0,10,0,10))
+   X1.straush <- rmh(model=mod04,start=list(n.start=70),
+                     control=list(nrep=nr))
+   
+   # Another Strauss with hardcore (with a perhaps surprising result):
+   mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5),
+                w=c(0,250,0,250))
+   X2.straush <- rmh(model=mod05,start=list(n.start=250),
+                     control=list(nrep=nr))
+   
+   # Pure hardcore (identical to X3.strauss).
+   mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7),
+                w=c(0,10,0,10))
+   X3.straush <- rmh(model=mod06,start=list(n.start=60),
+                     control=list(nrep=nr))
+
+   # Area-interaction, inhibitory
+   mod.area <- list(cif="areaint",par=list(beta=2,eta=0.5,r=0.5), w=square(10))
+   X.area <- rmh(model=mod.area,start=list(n.start=60),
+                 control=list(nrep=nr))
+
+   # Area-interaction, clustered
+   mod.area2 <- list(cif="areaint",par=list(beta=2,eta=1.5,r=0.5), w=square(10))
+   X.area2 <- rmh(model=mod.area2,start=list(n.start=60),
+                 control=list(nrep=nr))
+
+   # Area-interaction close to hard core
+   set.seed(42)
+   mod.area0 <- list(cif="areaint",par=list(beta=2,eta=1e-300,r=0.35),
+                     w=square(10))
+   X.area0 <- rmh(model=mod.area0,start=list(x.start=X3.hardcore),
+                 control=list(nrep=nr))
+   stopifnot(nndist(X.area0) > 0.6)
+   
+   # Soft core:
+   w    <- c(0,10,0,10)
+   mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5),
+                w=c(0,10,0,10))
+   X.sftcr <- rmh(model=mod07,start=list(n.start=70),
+                  control=list(nrep=nr))
+   
+   # Diggle, Gates, and Stibbard:
+   mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1))
+   X.dgs <- rmh(model=mod12,start=list(n.start=300),
+                control=list(nrep=nr))
+   
+   # Diggle-Gratton:
+   mod13 <- list(cif="diggra",
+                 par=list(beta=1800,kappa=3,delta=0.02,rho=0.04),
+                 w=square(1))
+   X.diggra <- rmh(model=mod13,start=list(n.start=300),
+                   control=list(nrep=nr))
+   
+   # Geyer:
+   mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5),
+                 w=c(0,10,0,10))
+   X1.geyer <- rmh(model=mod14,start=list(n.start=200),
+                   control=list(nrep=nr))
+   
+   # Geyer; same as a Strauss process with parameters
+   # (beta=2.25,gamma=0.16,r=0.7):
+   
+   mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000),
+                 w=c(0,10,0,10))
+   X2.geyer <- rmh(model=mod15,start=list(n.start=200),
+                   control=list(nrep=nr))
+   
+   mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3))
+   data(redwood)
+   X3.geyer <- rmh(model=mod16,start=list(x.start=redwood),
+                   control=list(periodic=TRUE,nrep=nr))
+   
+   # Geyer, starting from the redwood data set, simulating
+   # on a torus, and conditioning on n:
+   X4.geyer <- rmh(model=mod16,start=list(x.start=redwood),
+                   control=list(p=1,periodic=TRUE,nrep=nr))
+
+   # Lookup (interaction function h_2 from page 76, Diggle (2003)):
+      r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0.
+      h <- 20*(r-0.05)
+      h[r<0.05] <- 0
+      h[r>0.10] <- 1
+      mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1))
+      X.lookup <- rmh(model=mod17,start=list(n.start=100),
+                      control=list(nrep=nr))
+                   
+   # Strauss with trend
+   tr <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+   beta <- 0.3
+   gmma <- 0.5
+   r    <- 45
+   tr3   <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+   mod17 <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),w=c(0,250,0,250),
+                 trend=tr3)
+   X1.strauss.trend <- rmh(model=mod17,start=list(n.start=90),
+                           control=list(nrep=nr))
+
+})
+##
+##     tests/rmhErrors.R
+##
+##     $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $
+##
+# Things which should cause an error
+require(spatstat)
+
+local({
+if(!exists("nv"))
+  nv <- 0
+if(!exists("nr"))
+  nr   <- 1e3
+
+# Strauss with zero intensity and p = 1
+mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3))
+out <- try(X0S   <- rmh(model=mod0S,start=list(n.start=80),
+               control=list(p=1,nrep=nr,nverb=nv),verbose=FALSE))
+if(!inherits(out, "try-error"))
+  stop("Error not trapped (Strauss with zero intensity and p = 1) in tests/rmhErrors.R")
+})
+
+
+#
+# tests/rmhExpand.R
+#
+# test decisions about expansion of simulation window
+#
+#  $Revision: 1.2 $  $Date: 2011/12/05 07:29:16 $
+#
+
+require(spatstat)
+local({
+fit <- ppm(cells, ~x)
+
+# check rmhmodel.ppm
+mod <- rmhmodel(fit)
+wsim <- as.rectangle(mod$trend)
+if(!identical(wsim, as.owin(cells)))
+  stop("Expansion occurred improperly in rmhmodel.ppm")
+})
+
+
+#
+#  tests/rmhMulti.R
+#
+#  tests of rmh, running multitype point processes
+#
+#   $Revision: 1.6 $  $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+
+local({
+if(!exists("nr"))
+   nr   <- 5e3
+
+if(!exists("nv"))
+   nv   <- 0
+
+spatstat.options(expand=1.1)
+
+   # Multitype Poisson
+   modp2 <- list(cif="poisson",
+                 par=list(beta=2), types=letters[1:3], w = square(10))
+   Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1))
+    
+   # Multitype Strauss:
+   beta <- c(0.027,0.008)
+   gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2)
+   r    <- matrix(c(45,45,45,45),2,2)
+   mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r),
+                w=c(0,250,0,250))
+   X1.straussm <- rmh(model=mod08,start=list(n.start=80),
+                      control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
+   
+
+   # Multitype Strauss conditioning upon the total number
+   # of points being 80:
+   X2.straussm <- rmh(model=mod08,start=list(n.start=80),
+                      control=list(p=1,ptypes=c(0.75,0.25),nrep=nr,
+                                   nverb=nv))
+   stopifnot(X2.straussm$n == 80)
+
+   # Conditioning upon the number of points of type 1 being 60
+   # and the number of points of type 2 being 20:
+   X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)),
+                      control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25),
+                                   nrep=nr,nverb=nv))
+   stopifnot(all(table(X3.straussm$marks) == c(60,20)))
+
+   # Multitype Strauss hardcore:
+   rhc  <- matrix(c(9.1,5.0,5.0,2.5),2,2)
+   mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                iradii=r,hradii=rhc),w=c(0,250,0,250))
+   X.straushm <- rmh(model=mod09,start=list(n.start=80),
+                     control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv))
+
+   # Multitype Strauss hardcore with trends for each type:
+   beta  <- c(0.27,0.08)
+   tr3   <- function(x,y){x <- x/250; y <- y/250;
+   			   exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6)
+                         }
+                         # log quadratic trend
+   tr4   <- function(x,y){x <- x/250; y <- y/250;
+                         exp(-0.6*x+0.5*y)}
+                        # log linear trend
+   mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=r,hradii=rhc),w=c(0,250,0,250),
+                 trend=list(tr3,tr4))
+   X1.straushm.trend <- rmh(model=mod10,start=list(n.start=350),
+                            control=list(ptypes=c(0.75,0.25),
+                            nrep=nr,nverb=nv))
+   
+   # Multitype Strauss hardcore with trends for each type, given as images:
+   bigwin <- square(250)
+   i1 <- as.im(tr3, bigwin)
+   i2 <- as.im(tr4, bigwin)
+   mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma,
+                 iradii=r,hradii=rhc),w=bigwin,
+                 trend=list(i1,i2))
+   X2.straushm.trend <- rmh(model=mod11,start=list(n.start=350),
+                            control=list(ptypes=c(0.75,0.25),expand=1,
+                            nrep=nr,nverb=nv))
+
+
+#######################################################################
+############  checks on distribution of output  #######################
+#######################################################################
+
+checkp <- function(p, context, testname, failmessage, pcrit=0.01) {
+  if(missing(failmessage))
+    failmessage <- paste("output failed", testname)
+  if(p < pcrit)
+    warning(paste(context, ",",  failmessage), call.=FALSE)
+  cat(paste("\n", context, ",", testname, "has p-value", signif(p,4), "\n"))
+}
+
+# Multitype Strauss code; output is multitype Poisson
+
+beta  <- 100 * c(1,1)
+ri    <- matrix(0.07, 2, 2)
+gmma  <- matrix(1, 2, 2)  # no interaction
+tr1   <- function(x,y){ rep(1, length(x)) }
+tr2   <- function(x,y){ rep(2, length(x)) }
+mod <- rmhmodel(cif="straussm",
+                  par=list(beta=beta,gamma=gmma,radii=ri),
+                  w=owin(),
+                  trend=list(tr1,tr2))
+
+X <- rmh(mod, start=list(n.start=0), control=list(nrep=1e6))
+
+# The model is Poisson with intensity 100 for type 1 and 200 for type 2.
+# Total number of points is Poisson (300)
+# Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3.
+
+# Test whether the total intensity looks right
+#
+p <- ppois(X$n, 300)
+p.val <- 2 * min(p, 1-p)
+checkp(p.val, 
+       "In multitype Poisson simulation",
+       "test whether total number of points has required mean value")
+
+# Test whether the mark distribution looks right
+ta <- table(X$marks)
+cat("Frequencies of marks:")
+print(ta)
+checkp(chisq.test(ta, p = c(1,2)/3)$p.value,
+       "In multitype Poisson simulation",
+       "chi-squared goodness-of-fit test for mark distribution (1/3, 2/3)")
+
+#####
+####  multitype Strauss code; fixall=TRUE;
+####  output is multinomial process with nonuniform locations
+####
+
+the.context <- "In nonuniform multinomial simulation"
+
+beta  <- 100 * c(1,1)
+ri    <- matrix(0.07, 2, 2)
+gmma  <- matrix(1, 2, 2)  # no interaction
+tr1   <- function(x,y){ ifelse(x < 0.5, 0, 2) } 
+tr2   <- function(x,y){ ifelse(y < 0.5, 1, 3) }
+# cdf of these distributions
+Fx1 <- function(x) { ifelse(x < 0.5, 0, ifelse(x < 1, 2 * x - 1, 1)) }
+Fy2 <- function(y) { ifelse(y < 0, 0,
+                           ifelse(y < 0.5, y/2,
+                                  ifelse(y < 1, (1/2 + 3 * (y-1/2))/2, 1))) }
+                                                               
+
+mod <- rmhmodel(cif="straussm",
+                  par=list(beta=beta,gamma=gmma,radii=ri),
+                  w=owin(),
+                  trend=list(tr1,tr2))
+
+X <- rmh(mod, start=list(n.start=c(50,50)),
+           control=list(nrep=1e6, expand=1, p=1, fixall=TRUE))
+
+# The model is Poisson 
+# Mean number of type 1 points = 100
+# Mean number of type 2 points = 200
+# Total intensity = 300
+# Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3
+
+# Test whether the coordinates look OK
+Y <- split(X)
+X1 <- Y[[names(Y)[1]]]
+X2 <- Y[[names(Y)[2]]]
+checkp(ks.test(X1$y, "punif")$p.value,
+       the.context,
+       "Kolmogorov-Smirnov test of uniformity of y coordinates of type 1 points")
+if(any(X1$x < 0.5)) {
+  stop(paste(the.context, ",", 
+             "x-coordinates of type 1 points are IMPOSSIBLE"), call.=FALSE)
+} else {
+  checkp(ks.test(Fx1(X1$x), "punif")$p.value,
+       the.context,
+       "Kolmogorov-Smirnov test of uniformity of transformed x coordinates of type 1 points")
+}
+checkp(ks.test(X2$x, "punif")$p.value,
+       the.context,
+     "Kolmogorov-Smirnov test of uniformity of x coordinates of type 2 points")
+checkp(ks.test(Fy2(X2$y), "punif")$p.value,
+       the.context,
+       "Kolmogorov-Smirnov test of uniformity of transformed y coordinates of type 2 points")
+
+})
+#
+# tests/rmhTrend.R
+#
+#  Problems with trend images (rmhmodel.ppm or rmhEngine)
+#
+
+require(spatstat)
+local({
+  set.seed(42)
+
+  # Bug folder 37 of 8 feb 2011
+  # rmhmodel.ppm -> predict.ppm
+  # + rmhResolveTypes -> is.subset.owin
+
+  data(demopat)
+  Z <- rescale(demopat, 7000)
+  X <- unmark(Z)
+  X1 <- split(Z)[[1]]
+  Int  <- density(X,dimyx=200)
+  Lint <- eval.im(log(npoints(X1)*Int/npoints(X)))
+  M    <- as.owin(Int)
+  MR   <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint"))
+  X1 <- X1[MR]
+  Fut  <- ppm(X1~offset(Lint),covariates=list(Lint=Lint),
+              inter=BadGey(r=c(0.03,0.05),sat=3))
+  Y   <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE)
+
+})
+#
+#   tests/rmhWeird.R
+#
+#   $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+#
+# strange boundary cases
+
+require(spatstat)
+
+local({
+   if(!exists("nv"))
+     nv <- 0
+   if(!exists("nr"))
+     nr   <- 5e3
+
+   # Poisson process
+   cat("Poisson\n")
+   modP <- list(cif="poisson",par=list(beta=10), w = square(3))
+   XP <- rmh(model = modP,
+             start = list(n.start=25),
+             control=list(nrep=nr,nverb=nv))
+
+   # Poisson process case of Strauss
+   cat("\nPoisson case of Strauss\n")
+   modPS <- list(cif="strauss",par=list(beta=10,gamma=1,r=0.7), w = square(3))
+   XPS <- rmh(model=modPS,
+              start=list(n.start=25),
+              control=list(nrep=nr,nverb=nv))
+   
+   # Strauss with zero intensity
+   cat("\nStrauss with zero intensity\n")
+   mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3))
+   X0S   <- rmh(model=mod0S,start=list(n.start=80),
+                     control=list(nrep=nr,nverb=nv))
+   stopifnot(X0S$n == 0)
+
+   # Poisson with zero intensity
+   cat("\nPoisson with zero intensity\n")
+   mod0P <- list(cif="poisson",par=list(beta=0), w = square(3))
+   X0P <- rmh(model = mod0P,
+             start = list(n.start=25),
+             control=list(nrep=nr,nverb=nv))
+
+
+   # Poisson conditioned on zero points
+   cat("\nPoisson conditioned on zero points\n")
+   modp <- list(cif="poisson",
+                 par=list(beta=2), w = square(10))
+   Xp <- rmh(modp, start=list(n.start=0), control=list(p=1, nrep=nr))
+   stopifnot(Xp$n == 0)
+
+   # Multitype Poisson conditioned on zero points
+   cat("\nMultitype Poisson conditioned on zero points\n")
+   modp2 <- list(cif="poisson",
+                 par=list(beta=2), types=letters[1:3], w = square(10))
+   Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1, nrep=nr))
+   stopifnot(is.marked(Xp2))
+   stopifnot(Xp2$n == 0)
+
+   # Multitype Poisson conditioned on zero points of each type
+   cat("\nMultitype Poisson conditioned on zero points of each type\n")
+   Xp2fix <- rmh(modp2, start=list(n.start=c(0,0,0)),
+                 control=list(p=1, fixall=TRUE, nrep=nr))
+   stopifnot(is.marked(Xp2fix))
+   stopifnot(Xp2fix$n == 0)
+    
+ })
+#
+#      tests/rmhmodel.ppm.R
+#
+#    $Revision: 1.8 $  $Date: 2015/12/29 08:54:49 $
+#
+# Case-by-case tests of rmhmodel.ppm
+#
+require(spatstat)
+
+local({
+f <- ppm(cells)
+m <- rmhmodel(f)
+
+f <- ppm(cells ~x)
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, Strauss(0.1))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, StraussHard(r=0.1,hc=0.05))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, Hardcore(0.07))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, DiggleGratton(0.05,0.1))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, Softcore(0.5), correction="isotropic")
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, Geyer(0.07,2))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, BadGey(c(0.07,0.1,0.13),2))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2)))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~1, AreaInter(r=0.06))
+m <- rmhmodel(f)
+
+# multitype
+
+r <- matrix(0.07, 2, 2)
+f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r))
+m <- rmhmodel(f)
+
+h <- matrix(min(nndist(amacrine))/2, 2, 2)
+f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h))
+m <- rmhmodel(f)
+
+diag(r) <- NA
+diag(h) <- NA
+f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r))
+m <- rmhmodel(f)
+
+f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h))
+m <- rmhmodel(f)
+
+# multitype data, interaction not dependent on type
+
+f <- ppm(amacrine ~marks, Strauss(0.05))
+m <- rmhmodel(f)
+
+# trends
+
+f <- ppm(cells ~x, Strauss(0.1))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~y, StraussHard(r=0.1,hc=0.05))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~x+y, Hardcore(0.07))
+m <- rmhmodel(f)
+
+f <- ppm(cells ~polynom(x,y,2), Softcore(0.5), correction="isotropic")
+m <- rmhmodel(f)
+
+# covariates
+
+Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells))
+f <- ppm(cells ~z, covariates=list(z=Z))
+m <- rmhmodel(f)
+m <- rmhmodel(f, control=list(p=1))
+
+Zim <- as.im(Z, as.owin(cells))
+f <- ppm(cells ~z, covariates=list(z=Zim))
+m <- rmhmodel(f)
+
+Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine))
+f <- ppm(amacrine ~z + marks, covariates=list(z=Z))
+m <- rmhmodel(f)
+m <- rmhmodel(f, control=list(p=1))
+m <- rmhmodel(f, control=list(p=1,fixall=TRUE))
+
+Zim <- as.im(Z, as.owin(amacrine))
+f <- ppm(amacrine ~z + marks, covariates=list(z=Zim))
+m <- rmhmodel(f)
+
+})
+#
+#    tests/rmhmodelHybrids.R
+#
+#  Test that rmhmodel.ppm and rmhmodel.default
+#  work on Hybrid interaction models
+#
+#   $Revision: 1.4 $  $Date: 2015/12/29 08:54:49 $
+#
+
+require(spatstat)
+
+local({
+  # ......... rmhmodel.ppm .......................
+
+  fit1 <- ppm(redwood ~1,
+              Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1)))
+  m1 <- rmhmodel(fit1)
+  m1
+  reach(m1)
+
+  # Test of handling 'IsOffset' 
+  fit2 <- ppm(cells ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2)))
+  rmhmodel(fit2)
+
+  # Test of handling Poisson components
+  fit3 <- ppm(cells ~1, Hybrid(P=Poisson(), S=Strauss(0.05)))
+  X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE)
+
+  # ............ rmhmodel.default ............................
+
+   modH <- list(cif=c("strauss","geyer"),
+                par=list(list(beta=50,gamma=0.5, r=0.1),
+                         list(beta=1, gamma=0.7, r=0.2, sat=2)),
+                w = square(1))
+   rmodH <- rmhmodel(modH)
+   rmodH
+   reach(rmodH)
+
+  # test handling of Poisson components
+
+   modHP <- list(cif=c("poisson","strauss"),
+                 par=list(list(beta=5),
+                          list(beta=10,gamma=0.5, r=0.1)),
+                 w = square(1))
+   rmodHP <- rmhmodel(modHP)
+   rmodHP
+   reach(rmodHP)
+
+   modPP <- list(cif=c("poisson","poisson"),
+                 par=list(list(beta=5),
+                          list(beta=10)),
+                 w = square(1))
+   rmodPP <- rmhmodel(modPP)
+   rmodPP
+   reach(rmodPP)
+  
+})
+
+
+#
+#  tests/rmh.ppm.R
+#
+#  $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
+#
+#  Examples removed from rmh.ppm.Rd
+#  stripped down to minimal tests of validity
+#
+
+require(spatstat)
+local({
+   op <- spatstat.options()
+   spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10)
+   spatstat.options(project.fast=TRUE)
+   Nrep <- 10
+
+   X <- swedishpines
+   # Poisson process
+   fit <- ppm(X ~1, Poisson())
+   Xsim <- rmh(fit)
+   # Strauss process   
+   fit <- ppm(X ~1, Strauss(r=7))
+   Xsim <- rmh(fit)
+
+   # Strauss process simulated on a larger window
+   # then clipped to original window
+   Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE))
+
+   # Extension of model to another window (thanks to Tuomas Rajala)
+   Xsim <- rmh(fit, w=square(2))
+   Xsim <- simulate(fit, w=square(2))
+   
+   # Strauss - hard core process
+#   fit <- ppm(X ~1, StraussHard(r=7,hc=2))
+#   Xsim <- rmh(fit, start=list(n.start=X$n))
+
+   # Geyer saturation process
+#   fit <- ppm(X ~1, Geyer(r=7,sat=2))
+#   Xsim <- rmh(fit, start=list(n.start=X$n))
+
+   # Area-interaction process
+     fit <- ppm(X ~1, AreaInter(r=7))
+     Xsim <- rmh(fit, start=list(n.start=X$n))
+  
+     # soft core interaction process
+#     X <- quadscheme(X, nd=50)
+#     fit <- ppm(X ~1, Softcore(kappa=0.1), correction="isotropic")
+#     Xsim <- rmh(fit, start=list(n.start=X$n))
+
+     # Diggle-Gratton pairwise interaction model
+#     fit <- ppm(cells ~1, DiggleGratton(0.05, 0.1))
+#     Xsim <- rmh(fit, start=list(n.start=cells$n))
+#     plot(Xsim, main="simulation from fitted Diggle-Gratton model")
+   
+   X <- rSSI(0.05, 100)
+
+   # piecewise-constant pairwise interaction function
+   fit <- ppm(X ~1, PairPiece(seq(0.02, 0.1, by=0.01)))
+   Xsim <- rmh(fit)
+
+   # marked point pattern
+   Y <- amacrine
+
+   # marked Poisson models
+   fit <- ppm(Y)
+   Ysim <- rmh(fit)
+
+   fit <- ppm(Y~marks)
+   Ysim <- rmh(fit)
+
+   fit <- ppm(Y~x)
+   Ysim <- rmh(fit)
+#   fit <- ppm(Y~polynom(x,2))
+#   Ysim <- rmh(fit)
+
+   fit <- ppm(Y~marks+x)
+   Ysim <- rmh(fit)
+#   fit <- ppm(Y~marks+polynom(x,2))
+#   Ysim <- rmh(fit)
+
+   # multitype Strauss models
+   MS <- MultiStrauss(types = levels(Y$marks),
+                      radii=matrix(0.07, ncol=2, nrow=2))
+
+#   fit <- ppm(Y~marks*polynom(x,2), MS)
+    fit <- ppm(Y~marks*x, MS)
+   Ysim <- rmh(fit)
+
+   spatstat.options(op)
+ })
diff --git a/tests/testsStoZ.R b/tests/testsStoZ.R
new file mode 100644
index 0000000..4b65c8d
--- /dev/null
+++ b/tests/testsStoZ.R
@@ -0,0 +1,644 @@
+#
+#  tests/segments.R
+#
+#  $Revision: 1.11 $  $Date: 2017/02/20 10:15:30 $
+
+require(spatstat)
+
+local({
+# pointed out by Jeff Laake
+W <- owin()
+X <- psp(x0=.25,x1=.25,y0=0,y1=1,window=W)
+X[W]
+
+# migrated from 'lpp'
+
+X <- psp(runif(10),runif(10),runif(10),runif(10), window=owin())
+Z <- as.mask.psp(X)
+Z <- pixellate(X)
+
+# more tests of lppm code
+
+fit <- lppm(unmark(chicago) ~ polynom(x,y,2))
+Z <- predict(fit)
+
+# tests of pixellate.psp -> seg2pixL
+
+ns <- 50
+out <- numeric(ns)
+for(i in 1:ns) {
+  X <- psp(runif(1), runif(1), runif(1), runif(1), window=owin())
+  len <- lengths.psp(X)
+  dlen <- sum(pixellate(X)$v)
+  out[i] <- if(len > 1e-7) dlen/len else 1
+}
+if(diff(range(out)) > 0.01) stop(paste(
+       "pixellate.psp test 1: relative error [",
+       paste(diff(range(out)), collapse=", "),
+       "]"))
+
+# Michael Sumner's test examples
+
+set.seed(33)
+n <- 2001
+co <- cbind(runif(n), runif(n))
+ow <- owin()
+X <- psp(co[-n,1], co[-n,2], co[-1,1], co[-1,2], window=ow)
+s1 <- sum(pixellate(X))
+s2 <- sum(lengths.psp(X))
+if(abs(s1 - s2)/s2 > 0.01) {
+  stop(paste("pixellate.psp test 2:",
+             "sum(pixellate(X)) = ", s1,
+             "!=", s2, "= sum(lengths.psp(X))"))
+}
+
+wts <- 1/(lengths.psp(X) * X$n)
+s1 <- sum(pixellate(X, weights=wts))
+if(abs(s1-1) > 0.01) {
+  stop(paste("pixellate.psp test 3:",
+             "sum(pixellate(X, weights))=", s1,
+             " (should be 1)"))
+}
+
+X <- psp(0, 0, 0.01, 0.001, window=owin())
+s1 <- sum(pixellate(X))
+s2 <- sum(lengths.psp(X))
+if(abs(s1 - s2)/s2 > 0.01) {
+  stop(paste("pixellate.psp test 4:",
+             "sum(pixellate(X)) = ", s1,
+             "!=", s2, "= sum(lengths.psp(X))"))
+}
+
+X <- psp(0, 0, 0.001, 0.001, window=owin())
+s1 <- sum(pixellate(X))
+s2 <- sum(lengths.psp(X))
+if(abs(s1 - s2)/s2 > 0.01) {
+  stop(paste("pixellate.psp test 5:",
+             "sum(pixellate(X)) = ", s1,
+             "!=", s2, "= sum(lengths.psp(X))"))
+}
+
+#' tests of density.psp
+Y <- as.psp(simplenet)
+YC <- density(Y, 0.2, method="C", edge=FALSE, dimyx=64)
+YI <- density(Y, 0.2, method="interpreted", edge=FALSE, dimyx=64)
+YF <- density(Y, 0.2, method="FFT", edge=FALSE, dimyx=64)
+xCI <- max(abs(YC/YI - 1))
+xFI <- max(abs(YF/YI - 1))
+if(xCI > 0.01) stop(paste("density.psp C algorithm relative error =", xCI))
+if(xFI > 0.01) stop(paste("density.psp FFT algorithm relative error =", xFI))
+
+})
+#
+## tests/sigtraceprogress.R
+#
+## Tests of *.sigtrace and *.progress
+#
+## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  plot(dclf.sigtrace(redwood, nsim=19, alternative="greater", rmin=0.02,
+                     verbose=FALSE))
+  plot(dclf.progress(redwood, nsim=19, alternative="greater", rmin=0.02,
+                     verbose=FALSE))
+  plot(dg.sigtrace(redwood, nsim=5, alternative="greater", rmin=0.02,
+                     verbose=FALSE))
+  plot(dg.progress(redwood, nsim=5, alternative="greater", rmin=0.02,
+                   verbose=FALSE))
+  ## test 'leave-two-out' algorithm
+  a <- dclf.sigtrace(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2,
+                     verbose=FALSE)
+  aa <- dclf.progress(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2,
+                      verbose=FALSE)
+  b <- dg.sigtrace(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2)
+  bb <- dg.progress(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2,
+                    verbose=FALSE)
+})
+#
+# tests/slrm.R
+#
+# $Revision: 1.1 $ $Date: 2013/04/19 10:14:52 $
+#
+# Test slrm fitting and prediction when there are NA's
+#
+
+require(spatstat)
+local({
+  X <- copper$SouthPoints
+  W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150)))
+  Y <- X[W]
+  fit <- slrm(Y ~ x+y)
+  pred <- predict(fit)
+})
+
+
+#'    tests/sparse3Darrays.R
+#'  Basic tests of sparse3array.R code
+#'  $Revision: 1.8 $ $Date: 2017/02/22 09:00:27 $
+
+require(spatstat)
+local({
+
+  if(require(Matrix)) {
+    M <- sparse3Darray(i=1:4, j=sample(1:4, replace=TRUE),
+                       k=c(1,2,1,2), x=1:4, dims=c(5,5,2))
+
+    M
+
+    dimnames(M) <- list(letters[1:5], LETTERS[1:5], c("yes", "no"))
+    M
+    
+    U <- aperm(M, c(1,3,2))
+    U
+    
+    M[ 3:4, , ]
+    
+    M[ 3:4, 2:4, ]
+    
+    M[, 3, ]
+
+    M[, 3, , drop=FALSE]
+    
+    MA <- as.array(M)
+    UA <- as.array(U)
+
+    ## tests of "[<-.sparse3Darray"
+    Mflip <- Mzero <- MandM <- M
+    Mflip[ , , 2:1] <- M
+    stopifnot(Mflip[3,1,1] == M[3,1,2])
+    Mzero[1:3,1:3,] <- 0
+    stopifnot(all(Mzero[1,1,] == 0))
+    M2a <- M[,,2,drop=FALSE]
+    M2d <- M[,,2,drop=TRUE]
+    MandM[,,1] <- M2a
+    MandM[,,1] <- M2d
+
+    # matrix index
+    M[cbind(3:5, 2, 2)]
+    
+    ## tests of arithmetic (Math, Ops, Summary)
+    negM <- -M
+    oneM <- 1 * M
+    twoM <- M + M
+    range(M)
+
+    cosM <- cos(M)  # non-sparse
+    sinM <- sin(M)  # sparse
+    
+    stopifnot(all((M+M) == 2*M))     # non-sparse
+    stopifnot(!any((M+M) != 2*M))    # sparse
+
+    ztimesM <- (1:5) * M  # sparse
+    zplusM <- (1:5) + M  # non-sparse
+    
+    ## tensor operator
+
+    tenseur(c(1,-1), M, 1, 3)
+    tenseur(M, M, 1:2, 1:2)
+    tenseur(M, M, 1:2, 2:1)
+    V <- sparseVector(i=c(1,3,6),x=1:3, length=7)
+    tenseur(V,V)
+    tenseur(V,V,1,1)
+
+    ## test of anyNA method
+    anyNA(M)
+    
+    ## a possible application in spatstat
+    cl10 <- as.data.frame(closepairs(cells, 0.1))
+    cl12 <- as.data.frame(closepairs(cells, 0.12))
+    cl10$k <- 1
+    cl12$k <- 2
+    cl <- rbind(cl10, cl12)
+    n <- npoints(cells)
+    Z <- with(cl,
+              sparse3Darray(i=i, j=j, k=k, x=1, dims=c(n,n,2)))
+    dimnames(Z) <- list(NULL, NULL, c("r=0.1", "r=0.12"))
+
+    Z <- aperm(Z, c(3,1,2))
+    stopifnot(all(sumsymouterSparse(Z) == sumsymouter(as.array(Z))))
+
+    # no entries indexed
+    Z[integer(0), integer(0), integer(0)] <- 42
+    Z[matrix(, 0, 3)] <- 42
+  }
+})
+
+
+#
+#  tests/splitpea.R
+#
+#  Check behaviour of split.ppp etc
+#
+#  Thanks to Marcelino de la Cruz
+#
+#  $Revision: 1.11 $  $Date: 2016/03/05 01:33:47 $
+#
+
+require(spatstat)
+
+local({
+W <- square(8)
+X <- ppp(c(2.98, 4.58, 7.27, 1.61, 7.19),
+         c(7.56, 5.29, 5.03, 0.49, 1.65),
+         window=W)
+Z <- quadrats(W, 4, 4)
+Yall <- split(X, Z, drop=FALSE)
+Ydrop <- split(X, Z, drop=TRUE)
+
+P <- Yall[[1]]
+if(!all(inside.owin(P$x, P$y, P$window)))
+  stop("Black hole detected when drop=FALSE")
+P <- Ydrop[[1]]
+if(!all(inside.owin(P$x, P$y, P$window)))
+  stop("Black hole detected when drop=TRUE")
+
+Ydrop[[1]] <- P[1]
+split(X, Z, drop=TRUE) <- Ydrop
+
+# test NA handling
+Zbad <- quadrats(square(4), 2, 2)
+Ybdrop <- split(X, Zbad, drop=TRUE)
+Yball  <- split(X, Zbad, drop=FALSE)
+
+# From Marcelino
+set.seed(1)
+W<- square(10) # the big window
+puntos<- rpoispp(0.5, win=W)
+data(letterR)
+r00 <- letterR
+r05 <- shift(letterR,c(0,5))
+r50 <- shift(letterR,c(5,0))
+r55 <- shift(letterR,c(5,5))
+tessr4 <- tess(tiles=list(r00, r05,r50,r55))
+puntosr4 <- split(puntos, tessr4, drop=TRUE)
+split(puntos, tessr4, drop=TRUE) <- puntosr4
+
+## More headaches with mark format
+A <- runifpoint(10)
+B <- runifpoint(10)
+AB <- split(superimpose(A=A, B=B))
+
+#' check that split<- respects ordering where possible
+X <- amacrine
+Y <- split(X)
+split(X) <- Y
+stopifnot(identical(X, amacrine))
+
+#' split.ppx
+df <- data.frame(x=runif(4),y=runif(4),t=runif(4),
+                 age=rep(c("old", "new"), 2),
+                 mineral=factor(rep(c("Au","Cu"), each=2),
+                                levels=c("Au", "Cu", "Pb")),
+                 size=runif(4))
+X <- ppx(data=df, coord.type=c("s","s","t","m", "m","m"))
+Y <- split(X, "age")
+Y <- split(X, "mineral", drop=TRUE)
+
+})
+#
+#   tests/step.R
+#
+#   $Revision: 1.4 $  $Date: 2015/12/29 08:54:49 $
+#
+# test for step() operation
+#
+require(spatstat)
+local({
+  Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window)
+  fitP <- ppm(nztrees ~x+y+Z, covariates=list(Z=Z))
+  step(fitP)
+  fitS <- update(fitP, Strauss(7))
+  step(fitS)
+  fitM <- ppm(amacrine ~ marks*(x+y),
+              MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2)))
+  step(fitM)
+})
+
+
+##
+## tests/symbolmaps.R
+##
+##   Quirks associated with symbolmaps, etc.
+##
+## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $
+
+local({
+  require(spatstat)
+  set.seed(100)
+  
+  ## spacing too large for tiles - upsets various pieces of code
+  V <- as.im(dirichlet(runifpoint(8)))
+  textureplot(V, spacing=2)
+
+  g1 <- symbolmap(range=c(0,100), size=function(x) x/50)
+  invoke.symbolmap(g1, 50, x=numeric(0), y=numeric(0), add=TRUE)
+
+})
+#
+#   tests/testaddvar.R
+#
+# test addvar options
+#
+#   $Revision: 1.2 $  $Date: 2015/12/29 08:54:49 $
+
+X <-  rpoispp(function(x,y){exp(3+3*x)})
+model <- ppm(X ~y)
+addvar(model, "x", crosscheck=TRUE)
+addvar(model, "x", bw.input="quad")
+w <- square(0.5)
+addvar(model, "x", subregion=w)
+addvar(model, "x", subregion=w, bw.input="points")
+#
+#   tests/testparres.R
+#
+# additional test of parres
+#
+#  $Revision: 1.2 $  $Date: 2015/12/29 08:54:49 $
+#
+require(spatstat)
+local({
+X <-  rpoispp(function(x,y){exp(3+x+2*x^2)})
+model <- ppm(X ~x+y)
+
+# options in parres
+parres(model, "x")
+parres(model, "x", bw.input="quad")
+w <- square(0.5)
+parres(model, "x", subregion=w)
+parres(model, "x", subregion=w, bw.input="quad")
+
+# check whether 'update.ppm' has messed up internals
+mod2 <- update(model, ~x)
+parres(mod2, "x")
+})
+#
+# tests/triplets.R
+#
+# test code for triplet interaction
+#
+# $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $
+#
+require(spatstat)
+local({
+  fit <- ppm(redwood ~1, Triplets(0.1))
+  fit
+  suffstat(fit)
+  # hard core (zero triangles, coefficient is NA)
+  fit0 <- ppm(cells ~1, Triplets(0.05))
+  fit0
+  suffstat(fit0)
+  # bug case (1 triangle in data)
+  fit1 <- ppm(cells ~1, Triplets(0.15))
+  fit1
+  suffstat(fit1)
+})
+#
+#  tests/undoc.R
+#
+#   $Revision: 1.3 $   $Date: 2017/02/20 10:51:56 $
+#
+#  Test undocumented hacks, etc
+
+require(spatstat)
+local({
+  # pixellate.ppp accepts a data frame of weights
+  pixellate(cells, weights=data.frame(a=1:42, b=42:1))
+})
+
+
+
+
+##
+##  tests/updateppm.R
+##
+##  Check validity of update.ppm
+##
+##  $Revision: 1.4 $ $Date: 2016/03/08 06:30:46 $
+
+local({
+    require(spatstat)
+    h <- function(m1, m2) {
+        mc <- deparse(sys.call())
+        cat(paste(mc, "\t... "))
+        m1name <- deparse(substitute(m1))
+        m2name <- deparse(substitute(m2))
+        if(!identical(names(coef(m1)), names(coef(m2))))
+            stop(paste("Differing results for", m1name, "and", m2name,
+                       "in updateppm.R"),
+                 call.=FALSE)
+        cat("OK\n")
+    }
+    X <- redwood[c(TRUE,FALSE)]
+    Y <- redwood[c(FALSE,TRUE)]
+    fit0f <- ppm(X ~ 1, nd=8)
+    fit0p <- ppm(X, ~1, nd=8)
+    fitxf <- ppm(X ~ x, nd=8)
+    fitxp <- ppm(X, ~x, nd=8)
+
+    cat("Basic consistency ...\n")
+    h(fit0f, fit0p)
+    h(fitxf, fitxp)
+
+    cat("\nTest correct handling of model formulas ...\n")
+    h(update(fitxf, Y), fitxf)
+    h(update(fitxf, Q=Y), fitxf)
+    h(update(fitxf, Y~x), fitxf)
+    h(update(fitxf, Q=Y~x), fitxf)
+    h(update(fitxf, ~x), fitxf)
+
+    h(update(fitxf, Y~1), fit0f)
+    h(update(fitxf, ~1), fit0f)
+    h(update(fit0f, Y~x), fitxf)
+    h(update(fit0f, ~x), fitxf)
+
+    h(update(fitxp, Y), fitxp)
+    h(update(fitxp, Q=Y), fitxp)
+    h(update(fitxp, Y~x), fitxp)
+    h(update(fitxp, Q=Y~x), fitxp)
+    h(update(fitxp, ~x), fitxp)
+
+    h(update(fitxp, Y~1), fit0p)
+    h(update(fitxp, ~1), fit0p)
+    h(update(fit0p, Y~x), fitxp)
+    h(update(fit0p, ~x), fitxp)
+
+    cat("\nTest scope handling for left hand side ...\n")
+    X <- Y
+    h(update(fitxf), fitxf)
+
+    cat("\nTest scope handling for right hand side ...\n")
+    Z <- distmap(X)
+    fitZf <- ppm(X ~ Z)
+    fitZp <- ppm(X, ~ Z)
+    h(update(fitxf, X ~ Z), fitZf)
+    h(update(fitxp, X ~ Z), fitZp)
+    h(update(fitxf, . ~ Z), fitZf)
+    h(update(fitZf, . ~ x), fitxf)
+    h(update(fitZf, . ~ . - Z), fit0f)
+    h(update(fitxp, . ~ Z), fitZp)
+    h(update(fitZp, . ~ . - Z), fit0p)
+    h(update(fit0p, . ~ . + Z), fitZp)
+    h(update(fitZf, . ~ . ), fitZf)
+    h(update(fitZp, . ~ . ), fitZp)
+
+    cat("\nTest use of internal data ...\n")
+    h(update(fitZf, ~ x, use.internal=TRUE), fitxf)
+    fitsin <- update(fitZf, X~sin(Z))
+    h(update(fitZf, ~ sin(Z), use.internal=TRUE), fitsin)
+
+    cat("\nTest step() ... ")
+    fut <- ppm(X ~ Z + x + y, nd=8)
+    fut0 <- step(fut, trace=0)
+    cat("OK\n")
+})
+
+# test update.lppm
+
+local({
+  X <- runiflpp(20, simplenet)
+  fit0 <- lppm(X ~ 1)
+  fit1 <- update(fit0, ~ x)
+  anova(fit0, fit1, test="LR")
+  cat("update.lppm(fit, ~trend) is OK\n")
+  fit2 <- update(fit0, . ~ x)
+  anova(fit0, fit2, test="LR")
+  cat("update.lppm(fit, . ~ trend) is OK\n")
+})
+#
+#  tests/vcovppm.R
+#
+#  Check validity of vcov.ppm algorithms
+#
+#  Thanks to Ege Rubak
+#
+#  $Revision: 1.6 $  $Date: 2015/12/29 08:54:49 $
+#
+
+require(spatstat)
+
+local({
+
+  set.seed(42)
+  X <- rStrauss(200, .5, .05)
+  model <- ppm(X, inter = Strauss(.05))
+
+  b  <- vcov(model, generic = TRUE, algorithm = "basic")
+  v  <- vcov(model, generic = TRUE, algorithm = "vector")
+  vc <- vcov(model, generic = TRUE, algorithm = "vectorclip")
+  vn <- vcov(model, generic = FALSE)
+
+  disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol }
+  asymmetric <- function(x) { disagree(x, t(x)) }
+
+  if(asymmetric(b))
+    stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm")
+  if(asymmetric(v))
+    stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm")
+  if(asymmetric(vc))
+    stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm")
+  if(asymmetric(vn))
+    stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm")
+  
+  if(disagree(v, b))
+    stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ")
+  if(disagree(v, vc))
+    stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ")
+  if(disagree(vn, vc))
+    stop("Disagreement between vcov.ppm generic and Strauss algorithms")
+
+  # Geyer code
+  xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382)
+  yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592)
+  Y <- ppp(xx, yy, window=square(1))
+  modelY <- ppm(Y ~1, Geyer(0.1, 1))
+
+  b  <- vcov(modelY, generic = TRUE, algorithm = "basic")
+  v  <- vcov(modelY, generic = TRUE, algorithm = "vector")
+  vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip")
+
+  if(asymmetric(b))
+    stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model")
+  if(asymmetric(v))
+    stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model")
+  if(asymmetric(vc))
+    stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model")
+  
+  if(disagree(v, b))
+    stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model")
+  if(disagree(v, vc))
+    stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model")
+
+
+  ## tests of 'deltasuffstat' code
+  ##     Handling of offset terms
+  modelH <- ppm(cells ~x, Hardcore(0.05))
+  a <- vcov(modelH, generic=TRUE) ## may fall over
+  b <- vcov(modelH, generic=FALSE)
+  if(disagree(a, b))
+    stop("Disagreement between vcov.ppm algorithms for Hardcore model")
+  
+  ##     Correctness of pairwise.family$delta2
+  modelZ <- ppm(amacrine ~1, MultiStrauss(radii=matrix(0.1, 2, 2)))
+  b <- vcov(modelZ, generic=FALSE)
+  g <- vcov(modelZ, generic=TRUE)
+  if(disagree(b, g))
+    stop("Disagreement between vcov.ppm algorithms for MultiStrauss model")
+
+  ## Test that 'deltasuffstat' works for Hybrids
+  modHyb <- ppm(japanesepines ~ 1, Hybrid(Strauss(0.05), Strauss(0.1)))
+})
+#
+# tests/windows.R
+#
+# Tests of owin geometry code
+#
+#  $Revision: 1.3 $  $Date: 2015/12/29 08:54:49 $
+
+require(spatstat)
+local({
+  # Ege Rubak spotted this problem in 1.28-1
+  A <- as.owin(ants)
+  B <- dilation(A, 140)
+  if(!is.subset.owin(A, B))
+    stop("is.subset.owin fails in polygonal case")
+
+  # thanks to Tom Rosenbaum
+  A <- shift(square(3), origin="midpoint")
+  B <- shift(square(1), origin="midpoint")
+  AB <- setminus.owin(A, B)
+  D <- shift(square(2), origin="midpoint")
+  if(is.subset.owin(D,AB))
+    stop("is.subset.owin fails for polygons with holes")
+
+  ## thanks to Brian Ripley / SpatialVx
+  M <- as.mask(letterR)
+  stopifnot(area(bdry.mask(M)) > 0)
+  stopifnot(area(convexhull(M)) > 0)
+  R <- as.mask(square(1))
+  stopifnot(area(bdry.mask(R)) > 0)
+  stopifnot(area(convexhull(R)) > 0)
+})
+
+##
+## tests/xysegment.R
+##
+##    Test weird problems and boundary cases for line segment code
+##
+##    $Version$ $Date: 2016/02/12 08:18:08 $ 
+##
+require(spatstat)
+local({
+  # segment of length zero
+  B <- psp(1/2, 1/2, 1/2, 1/2, window=square(1))
+  BB <- angles.psp(B)
+  A <- runifpoint(3)
+  AB <- project2segment(A,B)
+
+  # mark inheritance
+  X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
+  marks(X) <- 1:10
+  Y <- selfcut.psp(X)
+  marks(X) <- data.frame(A=1:10, B=factor(letters[1:10]))
+  Z <- selfcut.psp(X)
+})
diff --git a/vignettes/datasets.Rnw b/vignettes/datasets.Rnw
new file mode 100644
index 0000000..a530329
--- /dev/null
+++ b/vignettes/datasets.Rnw
@@ -0,0 +1,870 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Datasets Provided in Spatstat}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+spatstat.options(transparent=FALSE)
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Datasets provided in \spst}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+This document is an overview of the spatial datasets
+that are provided in the \spst\ package. 
+
+To flick through a nice display of all the data sets that come with
+\spst\ type \texttt{demo(data)}.  To see information about a given
+data set, type \texttt{help({\em name})} where \emph{name} is the
+name of the data set  To plot a given data set, 
+type \texttt{plot({\em name})}.
+
+Datasets in \spst\ are ``lazy-loaded'', which means that they can
+be accessed simply by typing their name. Not all packages do this;
+in some packages you have to type \texttt{data({\em name})} in
+order to access a data set.
+
+\section{List of datasets}
+
+\subsection{Point patterns in 2D}
+
+Here is a list of the standard point pattern data sets 
+that are supplied with the current installation of \spst:
+
+\newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}}
+\newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}}
+\newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}}
+\newcommand{\disc}{$\bigcirc$}
+\newcommand{\nomarks}{$\cdot$}
+\newcommand{\nocov}{$\cdot$}
+
+\begin{tabular}{l|l|ccc}
+{\sf name} & {\sf description} &
+            {\sf marks} & {\sf covariates} & {\sf window} \\ \hline
+{\tt amacrine} & rabbit amacrine cells &
+            cell type & \nocov & \recto \\  
+{\tt anemones} & sea anemones  & 
+            diameter & \nocov & \recto \\
+{\tt ants} & ant nests& 
+            species & zones  & \convpoly \\
+{\tt bdspots} & breakdown spots & 
+           \nomarks           & \nocov & \disc \\
+{\tt bei} & rainforest trees & 
+           \nomarks           & topography & \recto \\
+{\tt betacells} & cat retinal ganglia & 
+            cell type, area & \nocov & \recto \\
+{\tt bramblecanes} & bramble canes & 
+            age & \nocov & \recto \\
+{\tt bronzefilter} & bronze particles & 
+            diameter & \nocov & \recto \\
+{\tt cells} & biological cells &
+             \nomarks &\nocov & \recto \\
+{\tt chorley} & cancers & 
+            case/control &\nocov  & \irregpoly \\
+{\tt clmfires} & forest fires & 
+            cause, size, date & 
+            \shortstack[c]{elevation, orientation,\\ slope, land use}  
+            & \irregpoly \\
+{\tt copper} & copper deposits & 
+             \nomarks & fault lines & \recto  \\
+{\tt demopat} & artificial data & 
+             type & \nocov & \irregpoly \\
+{\tt finpines} & trees & 
+             diam, height & \nocov & \recto  \\
+{\tt gordon} & people in a park & 
+             \nomarks & \nocov & \irregpoly  \\
+{\tt gorillas} & gorilla nest sites & 
+             group, season & 
+            \shortstack[c]{terrain, vegetation,\\ heat, water} & 
+             \irregpoly  \\
+{\tt hamster} & hamster tumour cells & 
+              cell type &\nocov  & \recto \\
+{\tt humberside} & child leukaemia & 
+              case/control & \nocov & \irregpoly\\
+{\tt hyytiala} & mixed forest & 
+              species &\nocov  & \recto \\
+{\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\
+{\tt lansing} & mixed forest &
+               species & \nocov & \recto \\
+{\tt longleaf} & trees & 
+              diameter & \nocov &  \recto \\
+{\tt mucosa}   & gastric mucosa cells & 
+              cell type & \nocov &  \recto \\
+{\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\
+{\tt nbfires} & wildfires & several & \nocov & \irregpoly \\
+{\tt nztrees} & trees & \nomarks & \nocov & \recto \\
+{\tt paracou} & trees & adult/juvenile & \nocov & \recto \\
+{\tt ponderosa} & trees & \nomarks & \nocov & \recto \\
+{\tt redwood} & saplings & \nomarks & \nocov & \recto \\
+{\tt redwood3} & saplings & \nomarks & \nocov & \recto \\
+{\tt redwoodfull} & saplings & 
+              \nomarks & zones & \recto \\
+{\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\
+{\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\
+{\tt sporophores} & fungi & species & \nocov &  \disc \\
+{\tt spruces} & trees & diameter & \nocov &  \recto \\
+{\tt swedishpines} & trees & \nomarks & \nocov & \recto \\
+{\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\
+{\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\
+{\tt waka} & trees & diameter & \nocov & \recto \\
+\hline
+\end{tabular}
+
+\bigskip
+\noindent
+The shape of the window containing the point pattern
+is indicated by the symbols \recto\ (rectangle), 
+\disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon).
+
+Additional information about the data set \texttt{\em name}
+may be stored in a separate list \texttt{{\em name}.extra}.
+Currently these are the available options:
+
+\begin{tabular}[!h]{ll}
+  {\sc Name} & {\sc Contents} \\ 
+  \hline
+  {\tt ants.extra} & field and scrub subregions; \\
+                   & additional map elements; plotting function \\
+  {\tt bei.extra} & covariate images \\
+  {\tt chorley.extra} & incinerator location; plotting function \\
+  {\tt gorillas.extra} & covariate images\\
+  {\tt nbfires.extra} & inscribed rectangle \\
+  {\tt ponderosa.extra} & data points of interest; plotting function\\
+  {\tt redwoodfull.extra} & subregions; plotting function \\
+  {\tt shapley.extra} & individual survey fields; plotting function \\
+  {\tt vesicles.extra} & anatomical regions \\
+  \hline
+\end{tabular}
+
+For demonstration and instruction purposes, 
+raw data files are available for the datasets 
+\texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}.
+
+\subsection{Other Data Types}
+
+There are also the following spatial data sets which are not 2D point patterns:
+
+\begin{tabular}[c]{l|l|l}
+{\sf name} & {\sf description} & {\sf format} \\ \hline
+{\tt austates} & Australian states & tessellation \\
+{\tt chicago} & crimes & point pattern on linear network \\
+{\tt dendrite} & dendritic spines & point pattern on linear network \\
+{\tt spiders} & spider webs & point pattern on linear network \\
+{\tt flu} & virus proteins & replicated 2D point patterns \\
+{\tt heather} & heather mosaic & binary image (three versions) \\
+{\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\
+{\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\
+{\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\
+{\tt residualspaper} 
+                & data \& code from Baddeley et al (2005) &  
+                       2D point patterns, \R\ function \\
+{\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\
+{\tt waterstriders} & insects on water & replicated 2D point patterns\\
+\hline
+\end{tabular}
+
+Additionally there is a dataset \texttt{Kovesi} containing
+several colour maps with perceptually uniform contrast. 
+
+\section{Information on each dataset}
+
+Here we give basic information about each dataset.
+For further information, consult the help file for the 
+particular dataset.
+
+<<echo=FALSE>>=
+opa <- par()
+## How to set all margins to zero and eliminate all outer spaces
+zeromargins <- function() {
+  par(
+      mar=rep(0,4),
+      omd=c(0,1,0,1),
+      xaxs="i",
+      yaxs="i"
+  )
+  invisible(NULL)
+}
+## Set 'mar'
+setmargins <- function(...) {
+  x <- c(...)
+  x <- rep(x, 4)[1:4]
+  par(mar=x)
+  invisible(NULL)
+}
+@ 
+
+\subsubsection*{\texttt{amacrine}: Amacrine cells}
+
+Locations of displaced amacrine cells in the retina of a rabbit.
+There are two types of points, ``on'' and ``off''.
+
+\SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(amacrine)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,1,2,0)
+plot(amacrine)
+@ 
+
+\subsubsection*{\texttt{anemones}: Sea Anemones}
+
+These data give the spatial locations and diameters
+of sea anemones on a boulder near sea level.
+
+\SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(anemones, markscale=1)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,2,0)
+plot(anemones, markscale=1)
+@ 
+
+\subsubsection*{\texttt{ants}: Ants' nests}
+
+Spatial locations of nests of two species of
+ants at a site in Greece.
+The full dataset (supplied here) has an irregular polygonal boundary,
+while most analyses have been confined to two rectangular
+subsets of the pattern (also supplied here).
+
+% Parameters for Ants data with key at right
+\SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth}
+<<eval=FALSE>>=
+ants.extra$plotit()
+@ %$
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,1,0)
+ants.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{austates}: Australian states}
+
+  The states and large mainland territories of Australia are
+  represented as polygonal regions forming a tessellation.
+
+<<fig=TRUE>>=
+plot(austates)
+@   
+
+\subsubsection*{\texttt{bdspots}: Breakdown spots}
+
+A list of three point patterns, each giving the locations of
+electrical breakdown spots on a circular electrode in
+a microelectronic capacitor.
+
+\SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth}
+<<eval=FALSE>>=
+plot(bdspots, equal.scales=TRUE, pch="+", 
+     panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+@   
+<<fig=TRUE,echo=FALSE>>=
+zeromargins()
+plot(bdspots, equal.scales=TRUE, pch="+", main="",
+     mar.panel=0, hsep=1,
+     panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i]))
+@   
+
+\subsubsection*{\texttt{bei}: Beilschmiedia data}
+
+Locations of 3605 trees in a tropical rain forest.
+Accompanied by covariate data giving the elevation (altitude)
+and slope of elevation in the study region.
+  
+\SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth}
+<<eval=FALSE>>=
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+setmargins(0,0,2,0)
+plot(bei.extra$elev, main="Beilschmiedia")
+plot(bei, add=TRUE, pch=16, cex=0.3)
+@ 
+
+<<fig=TRUE>>=
+M <- persp(bei.extra$elev, 
+           theta=-45, phi=18, expand=7,
+           border=NA, apron=TRUE, shade=0.3, 
+           box=FALSE, visible=TRUE,
+           main="")
+perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3)
+@ 
+
+\subsubsection*{\texttt{betacells}: Beta ganglion cells}
+
+Locations of beta ganglion cells in cat retina,
+each cell classified as `on' or `off'
+and also labelled with the cell profile area.
+  
+<<fig=TRUE>>=
+plot(betacells)
+@ 
+
+\subsubsection*{\texttt{bramblecanes}: Bramble canes}
+
+<<fig=TRUE>>=
+plot(bramblecanes, cols=1:3)
+@ 
+
+<<fig=TRUE>>=
+plot(split(bramblecanes))
+@ 
+
+\subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles}
+
+Spatially inhomogeneous pattern of
+circular section profiles of particles, observed in a
+longitudinal plane section through a gradient sinter
+filter made from bronze powder.
+
+<<fig=TRUE>>=
+plot(bronzefilter,markscale=2)
+@ 
+
+\subsubsection*{\texttt{cells}: Biological cells}
+
+Locations of the centres of 42 biological cells
+observed under optical microscopy in a histological section.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(cells)
+@ 
+
+\subsubsection*{\texttt{chicago}: Chicago crimes}
+
+Locations (street addresses) of crimes reported in a two-week period
+in an area close to the University of Chicago.
+A multitype point pattern on a linear network.
+
+<<fig=TRUE>>=
+plot(chicago, main="Chicago Crimes", col="grey",
+     cols=c("red", "blue", "black", "blue", "red", "blue", "blue"),
+     chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE)
+@ 
+
+\subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data}
+
+Spatial locations of cases of cancer of the larynx
+and cancer of the lung, and the location of a disused industrial
+incinerator. A marked point pattern, with an irregular window
+and a simple covariate.
+
+<<fig=TRUE>>=
+chorley.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires}
+
+Forest fires in the Castilla-La Mancha
+region of Spain between 1998 and 2007.
+A point pattern with 4 columns of marks:
+
+\begin{tabular}{ll}
+  \texttt{cause} & cause of fire\\ 
+  \texttt{burnt.area} & total area burned, in hectares \\
+  \texttt{date} & date of fire \\
+  \texttt{julian.date} & date of fire in days since 1.1.1998
+\end{tabular}
+  
+<<fig=TRUE>>=
+plot(clmfires, which.marks="cause", cols=2:5, cex=0.25,
+     main="Castilla-La Mancha forest fires")
+@ 
+
+The accompanying dataset \texttt{clmfires.extra} is a list
+of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate
+information for the entire Castilla-La Mancha region. Each
+of these two elements is a list of four pixel images 
+named \texttt{elevation}, \texttt{orientation},
+\texttt{slope} and \texttt{landuse}. 
+
+<<fig=TRUE>>=
+plot(clmfires.extra$clmcov200, main="Covariates for forest fires")
+@ %$ 
+
+\subsubsection*{\texttt{copper}: Queensland copper data}
+
+These data come from an intensive geological survey 
+in central Queensland, Australia.
+They consist of 67 points representing copper ore deposits,
+and 146 line segments representing geological `lineaments',
+mostly faults. 
+
+<<fig=TRUE>>=
+plot(copper$Points, main="Copper")
+plot(copper$Lines, add=TRUE)
+@ 
+
+\subsubsection*{\texttt{demohyper}}
+
+A synthetic example of a \texttt{hyperframe} for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }),
+      parargs=list(mar=rep(1,4)))
+@ 
+
+\subsubsection*{\texttt{demopat}}
+
+A synthetic example of a point pattern for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(demopat)
+@ 
+
+\subsubsection*{\texttt{dendrite}}
+
+  Dendrites are branching filaments which extend from the
+  main body of a neuron (nerve cell) to propagate electrochemical
+  signals. Spines are small protrusions on the dendrites.
+
+  This dataset gives the locations of 566 spines
+  observed on one branch of the dendritic tree of a rat neuron.
+  The spines are classified according to their shape into three types:
+  mushroom, stubby or thin.
+
+<<fig=TRUE>>=
+plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4)
+@ 
+
+\subsubsection*{\texttt{finpines}: Finnish pine saplings}
+
+Locations of 126 pine saplings
+in a Finnish forest, their heights and their diameters.
+
+<<fig=TRUE>>=
+plot(finpines, main="Finnish pines")
+@ 
+
+\subsubsection*{\texttt{flu}: Influenza virus proteins}
+
+  The \texttt{flu} dataset contains
+  replicated spatial point patterns giving the locations of two
+  different virus proteins on the membranes of cells infected with
+  influenza virus.
+  
+  It is a \texttt{hyperframe} containing
+  point patterns and explanatory variables.
+  
+<<fig=TRUE>>=
+wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1")
+plot(flu[wildM1, 1, drop=TRUE],
+     main=c("flu data", "wild type virus, M2-M1 stain"),
+     chars=c(16,3), cex=0.4, cols=2:3)
+@ 
+
+\subsubsection*{\texttt{gordon}: People in Gordon Square}
+
+Locations of people sitting on a grass patch on a sunny afternoon.
+
+  
+<<fig=TRUE>>=
+plot(gordon, main="People in Gordon Square", pch=16)
+@ 
+
+\subsubsection*{\texttt{gorillas}: Gorilla nesting sites}
+
+ Locations of nesting sites of gorillas, and associated covariates,
+  in a National Park in Cameroon.  
+  
+  \texttt{gorillas} is a marked point pattern (object
+  of class \texttt{"ppp"}) representing nest site locations.
+
+  \texttt{gorillas.extra} is a named list of 7 pixel images (objects of
+  class \texttt{"im"}) containing spatial covariates.
+  It also belongs to the class \texttt{"listof"}.
+  
+<<fig=TRUE>>=
+plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites")
+@ 
+
+The \texttt{vegetation} covariate is also available as a raw ASCII format file,
+<<eval=FALSE>>=
+system.file("rawdata/gorillas/vegetation.asc", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{hamster}: Hamster kidney cells}
+
+ Cell nuclei in hamster kidney, each nucleus classified as
+ either `dividing' or `pyknotic'.
+ A multitype point pattern.
+ 
+<<fig=TRUE>>=
+plot(hamster, cols=c(2,4))
+@ 
+
+\subsubsection*{\texttt{heather}: Heather mosaic}
+
+The spatial mosaic of vegetation of the heather plant,
+recorded in a 10 by 20 metre sampling plot in Sweden.
+A list with three entries, representing the same data at
+different spatial resolutions.
+  
+<<fig=TRUE>>=
+plot(heather)
+@ 
+
+\subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma}
+
+Spatial locations of cases of childhood leukaemia
+and lymphoma, and randomly-selected controls,
+in North Humberside.
+A marked point pattern.
+
+<<fig=TRUE>>=
+plot(humberside)
+@ 
+
+The dataset \texttt{humberside.convex} is an object of the
+same format, representing the same point pattern data,
+but contained in a larger, 5-sided convex polygon.
+
+\subsubsection*{\texttt{hyytiala}: Mixed forest}
+
+Spatial locations and species classification for
+trees in a Finnish forest.
+
+<<fig=TRUE>>=
+plot(hyytiala, cols=2:5)
+@ 
+
+\subsubsection*{\texttt{japanesepines}: Japanese black pine saplings}
+
+Locations of Japanese black pine saplings
+in a square sampling region in a natural forest.
+Often used as a standard example.
+
+<<fig=TRUE>>=
+plot(japanesepines)
+@ 
+
+\subsubsection*{\texttt{lansing}: Lansing Woods}
+
+Locations and botanical classification of trees in a forest.
+A multitype point pattern with 6 different types of points.
+Includes duplicated points.
+
+<<fig=TRUE>>=
+plot(lansing)
+@ 
+
+<<fig=TRUE>>=
+plot(split(lansing))
+@ 
+
+\subsubsection*{\texttt{longleaf}: Longleaf Pines}
+
+Locations and diameters of Longleaf pine trees.
+  
+<<fig=TRUE>>=
+plot(longleaf)
+@ 
+
+\subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells}
+
+A bivariate inhomogeneous point pattern, giving the locations of
+the centres of two types of cells in a cross-section of the
+gastric mucosa of a rat.
+  
+<<fig=TRUE>>=
+plot(mucosa, chars=c(1,3), cols=c("red", "green"))
+plot(mucosa.subwin, add=TRUE, lty=3)
+@ 
+
+\subsubsection*{\texttt{murchison}: Murchison Gold Deposits}
+
+Spatial locations of gold deposits and associated
+geological features in the Murchison area of Western Australia.
+A list of three elements:
+\begin{itemize}
+\item \texttt{gold}, the point pattern of gold deposits;
+\item \texttt{faults}, the line segment pattern of geological faults;
+\item \texttt{greenstone}, the subregion of greenstone outcrop.
+\end{itemize}
+
+<<fig=TRUE>>=
+plot(murchison$greenstone, main="Murchison data", col="lightgreen")
+plot(murchison$gold, add=TRUE, pch=3, col="blue")
+plot(murchison$faults, add=TRUE, col="red")
+@ 
+
+\subsubsection*{\texttt{nbfires}: New Brunswick Fires}
+
+Fires in New Brunswick (Canada) 
+with marks giving information about each fire.
+
+<<fig=TRUE>>=
+plot(nbfires, use.marks=FALSE, pch=".")
+@ 
+
+<<fig=TRUE>>=
+plot(split(nbfires), use.marks=FALSE, chars=".")
+@ 
+
+<<fig=TRUE>>=
+par(mar=c(0,0,2,0))
+plot(split(nbfires)$"2000", which.marks="fire.type",
+     main=c("New Brunswick fires 2000", "by fire type"),
+     cols=c("blue", "green", "red", "cyan"),
+     leg.side="left")
+@ 
+
+\subsubsection*{\texttt{nztrees}: New Zealand Trees}
+
+Locations of trees in a forest plot in New Zealand.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(nztrees)
+plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3)
+@ 
+
+\subsubsection*{\texttt{osteo}: Osteocyte Lacunae}
+
+Replicated three-dimensional point patterns:
+the three-dimensional locations of 
+  osteocyte lacunae observed in rectangular volumes of
+  solid bone using a confocal microscope.
+A \texttt{hyperframe} containing 3D point patterns
+and explanatory variables.
+  
+  
+<<fig=TRUE>>=
+plot(osteo[1:10,], main.panel="", pch=21, bg='white')
+@ 
+
+For demonstration and instruction purposes, the 
+raw data from the 36th point pattern are available in a plain ascii file in the
+\texttt{spatstat} installation,
+<<eval=FALSE>>=
+system.file("rawdata/osteo/osteo36.txt", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{paracou}: Kimboto trees}
+
+Point pattern of adult and juvenile Kimboto trees
+recorded at Paracou in French Guiana.
+A bivariate point pattern.
+
+<<fig=TRUE>>=
+plot(paracou, cols=2:3, chars=c(16,3))
+@ 
+
+\subsubsection*{\texttt{ponderosa}: Ponderosa Pines}
+
+Locations of Ponderosa Pine trees in a forest.
+Several special points are identified.
+
+<<fig=TRUE>>=
+ponderosa.extra$plotit()
+@  %$
+
+\subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain}
+
+Locations of pyramidal neurons in sections of human brain.
+There is one point pattern from each of 31 human subjects.
+The subjects are divided into three groups:
+controls (12 subjects), schizoaffective (9  subjects)
+and schizophrenic (10 subjects).
+
+<<fig=TRUE>>=
+pyr <- pyramidal
+pyr$grp <- abbreviate(pyramidal$group, minlength=7)
+plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons")
+@ 
+
+\subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings}
+
+California Redwood seedlings and saplings in a forest.
+There are two versions of this dataset:
+\texttt{redwood} and \texttt{redwoodfull}.
+
+The \texttt{redwoodfull} dataset is the full data.
+It is spatially inhomogeneous in density and spacing of points.
+
+The \texttt{redwood} dataset is a subset of the full data,
+selected because it is apparently homogeneous, and has often 
+been used as a demonstration example. This comes in two versions
+commonly used in the literature:
+\texttt{redwood} (coordinates given to 2 decimal places)
+and \texttt{redwood3} (coordinates given to 3 decimal places).
+
+
+<<fig=TRUE>>=
+plot(redwood)
+plot(redwood3, add=TRUE, pch=20)
+@ 
+
+<<fig=TRUE>>=
+redwoodfull.extra$plotit()
+@ %$
+
+\subsubsection*{\texttt{residualspaper}: Data from residuals paper}
+
+Contains the point patterns used as examples in 
+\begin{quote}
+  A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005)
+  Residual analysis for spatial point processes.
+  \emph{Journal of the Royal Statistical Society, Series B}
+  \textbf{67}, 617--666
+\end{quote}
+along with {\sf R} code.
+
+<<fig=TRUE>>=
+plot(as.listof(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), 
+     main="")
+@ 
+
+\subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration}
+
+Sky positions of 4215 galaxies in the Shapley Supercluster
+(mapped by radioastronomy).
+
+<<fig=TRUE>>=
+shapley.extra$plotit(main="Shapley")
+@  %$
+
+\subsubsection*{\texttt{simdat}: Simulated data}
+
+Another simulated dataset used for demonstration purposes.
+
+<<fig=TRUE>>=
+plot(simdat)
+@ 
+
+\subsubsection*{\texttt{spiders}: Spider webs}
+
+Spider webs across the mortar lines of a brick wall. 
+A point pattern on a linear network.
+
+<<fig=TRUE>>=
+plot(spiders, pch=16, show.window=FALSE)
+@ 
+
+\subsubsection*{\texttt{sporophores}: Sporophores}
+
+Sporophores of three species of fungi around a tree.
+
+<<fig=TRUE>>=
+plot(sporophores, chars=c(16,1,2), cex=0.6)
+points(0,0,pch=16, cex=2)
+text(15,8,"Tree", cex=0.75)
+@ 
+
+\subsubsection*{\texttt{spruces}: Spruces in Saxony}
+
+Locations of Norwegian spruce trees 
+in a natural forest stand in Saxonia, Germany.
+Each tree is marked with its diameter at breast height.
+ 
+<<fig=TRUE>>=
+plot(spruces, maxsize=min(nndist(spruces)))
+@ 
+
+\subsubsection*{\texttt{swedishpines}: Swedish Pines}
+
+Locations of pine saplings
+in a Swedish forest.
+Often used as a demonstration example.
+
+<<fig=TRUE>>=
+plot(swedishpines)
+@ 
+
+\subsubsection*{\texttt{urkiola}: trees in a wood}
+
+Locations of birch and oak trees  in a secondary wood in
+Urkiola Natural Park (Basque country, northern Spain). 
+Irregular window, bivariate point pattern.
+
+<<fig=TRUE>>=
+plot(urkiola, cex=0.5, cols=2:3)
+@ 
+
+\subsubsection*{\texttt{waka}: trees in Waka National Park}
+
+Spatial coordinates of each tree, marked by the tree diameter at breast height.
+    
+<<fig=TRUE>>=
+par(mar=c(0,0,2,0))
+plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters"))
+@ 
+
+\subsubsection*{\texttt{vesicles}: synaptic vesicles}
+
+Point pattern of synaptic vesicles observed in rat brain tissue.
+
+<<fig=TRUE>>=
+v <- rotate(vesicles, pi/2)
+ve <- lapply(vesicles.extra, rotate, pi/2)
+plot(v, main="Vesicles")
+plot(ve$activezone, add=TRUE, lwd=3)
+@ 
+
+The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ 
+\begin{tabular}{ll}
+  \texttt{presynapse} & outer polygonal boundary of presynapse \\
+  \texttt{mitochondria} & polygonal boundary of mitochondria \\
+  \texttt{mask} & binary mask representation of vesicles window \\
+  \texttt{activezone} & line segment pattern representing the active zone.
+\end{tabular}
+
+For demonstration and training purposes,
+the raw data files for this dataset are also
+provided in the \pkg{spatstat} package installation:\\ 
+\begin{tabular}{ll}
+  \texttt{vesicles.txt} &  spatial locations of vesicles \\
+  \texttt{presynapse.txt} &  vertices of \texttt{presynapse} \\
+  \texttt{mitochondria.txt}  &  vertices of \texttt{mitochondria} \\
+  \texttt{vesiclesimage.tif}  &  greyscale microscope image \\
+  \texttt{vesiclesmask.tif}  &  binary image of \texttt{mask} \\
+  \texttt{activezone.txt}  &  coordinates of \texttt{activezone} 
+\end{tabular}
+The files are in the folder \texttt{rawdata/vesicles} in the
+\texttt{spatstat} installation directory. The precise location of the
+files can be obtained using \texttt{system.file}, for example
+<<eval=FALSE>>=
+system.file("rawdata/vesicles/mitochondria.txt", package="spatstat")
+@ 
+
+\subsubsection*{\texttt{waterstriders}: Insects on a pond}
+
+Three independent replications of a point pattern
+formed by insects on the surface of a pond.
+  
+<<fig=TRUE>>=
+plot(waterstriders)
+@ 
+
+\end{document}
+
diff --git a/vignettes/getstart.Rnw b/vignettes/getstart.Rnw
new file mode 100644
index 0000000..5c34e53
--- /dev/null
+++ b/vignettes/getstart.Rnw
@@ -0,0 +1,397 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Getting Started with Spatstat}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Getting started with \texttt{spatstat}}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+Welcome to \spst, a package in the \R\ language
+for analysing spatial point patterns.
+
+This document will help you to get started with \spst.
+It gives you a quick overview of \spst, and some cookbook
+recipes for doing basic calculations. 
+
+\section*{What kind of data does \spst\ handle?}
+
+\Spst\ is mainly designed for analysing \emph{spatial point patterns}.
+For example, suppose you are an ecologist studying plant seedlings. 
+You have pegged out a $10 \times 10$ metre rectangle for your survey.
+Inside the rectangle you identify all the seedlings of the species
+you want, and record their $(x,y)$ locations. You can plot the 
+$(x,y)$ locations:
+
+<<fig=TRUE,echo=FALSE,results=hide>>=
+data(redwood)
+plot(redwood, pch=16, main="")
+@ 
+
+This is a \emph{spatial point pattern} dataset. 
+
+Methods for
+analysing this kind of data are summarised in the
+highly recommended book by Diggle \cite{digg03} and other references
+in the bibliography. \nocite{handbook10,bivapebegome08}
+
+Alternatively the points could be locations in one dimension
+(such as road accidents recorded on a road network) or 
+in three dimensions (such as cells observed in 3D microscopy).
+
+You might also have recorded additional information about each seedling,
+such as its height, or the number of fronds. Such information, attached to
+each point in the point pattern, is called a \emph{mark} variable. For example,
+here is a stand of pine trees, with each tree marked by its
+diameter at breast height (dbh). The circle radii represent the dbh values
+(not to scale).
+
+<<fig=TRUE,echo=FALSE,results=hide>>=
+data(longleaf)
+plot(longleaf, main="")
+@ 
+
+You might also have recorded supplementary data, 
+such as the terrain elevation, which might serve as explanatory variables.
+These data can be in any format. \Spst\ does not usually provide 
+capabilities for analysing such data in their own right, but 
+\spst\ does allow such explanatory data to be taken into account
+in the analysis of a spatial point pattern. 
+
+\Spst\ is \underline{\bf not} designed to handle point data where
+the $(x,y)$ locations are fixed (e.g.\ temperature records 
+from the state capital cities in Australia) or where the different
+$(x,y)$ points represent the same object at different times (e.g.\ 
+hourly locations of a tiger shark with a GPS tag). These are different 
+statistical problems, for which you need different methodology.
+  
+\section*{What can \spst\ do?}
+
+\Spst\ supports a very wide range of popular techniques 
+for statistical analysis for spatial point patterns,
+for example 
+\begin{itemize}
+\item kernel estimation of density/intensity
+\item quadrat counting and clustering indices
+\item detection of clustering using Ripley's $K$-function
+\item spatial logistic regression
+\item model-fitting
+\item Monte Carlo tests
+\end{itemize}
+as well as some advanced statistical techniques.
+
+\Spst\ is one of the largest packages available for \R,
+containing over 1000 commands. It is the product of 15 years of software 
+development by leading researchers in spatial statistics.
+
+\section*{How do I start using \spst?}
+
+\begin{enumerate}
+\item Install \R\ on your computer
+  \begin{quote}
+  Go to \texttt{r-project.org} and follow the installation
+  instructions.
+  \end{quote}
+\item Install the \spst\ package in your \R\ system
+  \begin{quote}
+  Start \R\ and type \verb!install.packages("spatstat")!. 
+  If that doesn't work, go to \texttt{r-project.org} to learn how
+  to install Contributed Packages. 
+  \end{quote}
+\item Start \R\
+\item Type \texttt{library(spatstat)} to load the package.
+\item Type \texttt{help(spatstat)} for information.
+\end{enumerate}
+
+\section*{How do I get my data into \spst?}
+
+<<echo=FALSE,results=hide>>=
+data(finpines)
+mypattern <- unmark(finpines)
+mydata <- round(as.data.frame(finpines), 2)
+@ 
+
+Here is a cookbook example. Suppose you've recorded the 
+$(x,y)$ locations of seedlings, in an Excel spreadsheet.
+You should also have recorded the dimensions of the survey area
+in which the seedlings were mapped. 
+
+\begin{enumerate}
+\item In Excel, save the spreadsheet into a comma-separated values (CSV) file.
+\item Start \R\ 
+\item Read your data into \R\ using \texttt{read.csv}.
+  \begin{quote}
+    If your CSV file is called \texttt{myfile.csv} then you could 
+    type something like 
+<<eval=FALSE>>=
+mydata <- read.csv("myfile.csv")
+@ 
+    to read the data from the file and save them in an object called 
+    \texttt{mydata} (or whatever you want to call it).
+    You may need to set various options to get this to work
+    for your file format: type \texttt{help(read.csv)} for information.
+  \end{quote}
+\item Check that \texttt{mydata} contains the data you expect. 
+  \begin{quote}
+  For example, to see the first few rows of data from the spreadsheet, type
+<<>>=
+head(mydata)
+@ 
+  To select a particular column of data, you can type
+  \texttt{mydata[,3]} to extract the third column, or
+  \verb!mydata$x! to extract the column labelled \texttt{x}.
+  \end{quote}
+\item Type \texttt{library(spatstat)} to load the \spst\ package
+\item Now convert the data to a point pattern object using the
+  \spst\ command \texttt{ppp}. 
+  \begin{quote}
+    Suppose that the \texttt{x} and \texttt{y} coordinates were 
+    stored in columns 3 and 7 of the spreadsheet. Suppose that the
+    sampling plot was a rectangle, with the $x$ coordinates ranging
+    from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then
+    you would type
+<<eval=FALSE>>=
+  mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90))
+@ 
+   The general form is 
+<<eval=FALSE>>=
+ppp(x.coordinates, y.coordinates, x.range, y.range)
+@ 
+
+  Note that this only stores the seedling locations. 
+  If you have additional columns of data
+  (such as seedling height, seedling sex, etc) these can be added as
+  \emph{marks}, later.
+  \end{quote}
+\item Check that the point pattern looks right by plotting it:
+<<fig=TRUE,results=hide>>=
+plot(mypattern)
+@ 
+\item Now you are ready to do some statistical analysis. 
+  Try the following:
+  \begin{itemize}
+  \item 
+    Basic summary of data: type 
+<<eval=FALSE>>=
+summary(mypattern)
+@ 
+  \item 
+  Ripley's $K$-function:
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1)))
+@ 
+<<fig=TRUE,results=hide>>=
+plot(Kest(mypattern))
+@ 
+
+For more information, type \texttt{help(Kest)}
+  \item 
+  Envelopes of $K$-function:
+<<eval=FALSE>>=
+plot(envelope(mypattern,Kest))
+@ 
+<<echo=FALSE,results=hide>>=
+env <- envelope(mypattern,Kest, nsim=39)
+@ 
+<<fig=TRUE,echo=FALSE,results=hide>>=
+plot(env, main="envelope(mypattern, Kest)")
+@ 
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+  For more information, type \texttt{help(envelope)}
+  \item 
+  kernel smoother of point density:
+<<fig=TRUE,results=hide>>=
+plot(density(mypattern))
+@ 
+
+For more information, type \texttt{help(density.ppp)}
+\end{itemize}
+\item 
+  Next if you have additional columns of data
+  recording (for example) the seedling height and seedling sex,
+  you can add these data as \emph{marks}. Suppose that columns 
+  5 and 9 of the spreadsheet contained such values. Then do something like
+<<eval=FALSE>>=
+marks(mypattern) <- mydata[, c(5,9)]
+@ 
+<<echo=FALSE,results=hide>>=
+mypattern <-finpines
+@ 
+Now you can try things like the kernel smoother of mark values:
+<<eval=FALSE>>=
+plot(Smooth(mypattern))
+@ 
+\setkeys{Gin}{width=0.8\textwidth}
+<<fig=TRUE,echo=FALSE,results=hide>>=
+plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)")
+@ 
+\setkeys{Gin}{width=0.4\textwidth}
+\item
+  You are airborne!
+  Now look at the book \cite{baddrubaturn15} for more hints.
+\end{enumerate}
+
+\section*{How do I find out which command to use?}
+
+Information sources for \spst\ include:
+\begin{itemize}
+\item the Quick Reference guide: a list of the most useful commands.
+  \begin{quote}
+    To view the quick reference guide, 
+    start \R, then type \texttt{library(spatstat)}
+    and then \texttt{help(spatstat)}.
+    Alternatively you can download a pdf of the Quick Reference
+    guide from the website \texttt{www.spatstat.org}
+  \end{quote}
+\item online help:
+  \begin{quote}
+    The online help files are useful --- they
+    give detailed information and advice about each command.
+    They are available when you are running \spst. 
+    To get help about a particular command \texttt{blah}, 
+    type \texttt{help(blah)}.
+    There is a graphical help interface, 
+    which you can start by typing \texttt{help.start()}.
+    Alternatively you can download a pdf of the entire manual (1000 pages!)
+    from the website \texttt{www.spatstat.org}. 
+  \end{quote}
+\item vignettes:
+  \begin{quote}
+    \Spst\ comes installed with several `vignettes' (introductory documents
+    with examples) which can be accessed using the graphical help interface.
+    They include a document about \texttt{Handling shapefiles}.
+  \end{quote}
+\item workshop notes:
+  \begin{quote}
+    The notes from a two-day workshop 
+    on \spst\ are available online \cite{badd10wshop}.
+    These are now rather out-of-date, but still useful.
+  \end{quote}
+\item book:
+  \begin{quote}
+    The forthcoming book \cite{baddrubaturn15} 
+    contains a complete course on \texttt{spatstat}.
+  \end{quote}
+\item website:
+  \begin{quote}
+    Visit the \spst\ package website \texttt{www.spatstat.org} 
+  \end{quote}
+\item forums:
+  \begin{quote}
+    Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}.
+    Then email your questions to the forum. 
+    Alternatively you can ask the authors of the \spst\ package
+    (their email addresses are given in the package documentation).
+  \end{quote}
+\end{itemize}
+
+\begin{thebibliography}{10}
+\bibitem{badd10wshop}
+A. Baddeley.
+\newblock Analysing spatial point patterns in {{R}}.
+\newblock Technical report, CSIRO, 2010.
+\newblock Version 4.
+\newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/}
+
+\bibitem{baddrubaturn15}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\bibitem{bivapebegome08}
+R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio.
+\newblock {\em Applied spatial data analysis with {R}}.
+\newblock Springer, 2008.
+
+\bibitem{cres93}
+N.A.C. Cressie.
+\newblock {\em Statistics for Spatial Data}.
+\newblock {John Wiley and Sons}, {New York}, second edition, 1993.
+
+\bibitem{digg03}
+P.J. Diggle.
+\newblock {\em Statistical Analysis of Spatial Point Patterns}.
+\newblock Hodder Arnold, London, second edition, 2003.
+
+\bibitem{fortdale05}
+M.J. Fortin and M.R.T. Dale.
+\newblock {\em Spatial analysis: a guide for ecologists}.
+\newblock Cambridge University Press, Cambridge, UK, 2005.
+
+\bibitem{fothroge09handbook}
+A.S. Fotheringham and P.A. Rogers, editors.
+\newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}.
+\newblock SAGE Publications, London, 2009.
+
+\bibitem{gaetguyo09}
+C. Gaetan and X. Guyon.
+\newblock {\em Spatial statistics and modeling}.
+\newblock Springer, 2009.
+\newblock Translated by Kevin Bleakley.
+
+\bibitem{handbook10}
+A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors.
+\newblock {\em Handbook of Spatial Statistics}.
+\newblock CRC Press, 2010.
+
+\bibitem{illietal08}
+J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan.
+\newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}.
+\newblock John Wiley and Sons, Chichester, 2008.
+
+\bibitem{mollwaag04}
+J. M{\o}ller and R.P. Waagepetersen.
+\newblock {\em Statistical Inference and Simulation for Spatial Point
+  Processes}.
+\newblock Chapman and Hall/CRC, Boca Raton, 2004.
+
+\bibitem{pfeietal08}
+D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and
+  A. Clements.
+\newblock {\em Spatial analysis in epidemiology}.
+\newblock Oxford University Press, Oxford, UK, 2008.
+
+\bibitem{wallgotw04}
+L.A. Waller and C.A. Gotway.
+\newblock {\em Applied spatial statistics for public health data}.
+\newblock Wiley, 2004.
+
+\end{thebibliography}
+
+
+
+\end{document}
+
diff --git a/vignettes/hexagon.eps b/vignettes/hexagon.eps
new file mode 100755
index 0000000..7bb8ad2
--- /dev/null
+++ b/vignettes/hexagon.eps
@@ -0,0 +1,114 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: hexagon.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5a
+%%CreationDate: Tue Nov 23 11:04:35 2010
+%%BoundingBox: 0 0 98 98
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+/pageheader {
+save
+newpath 0 98 moveto 0 0 lineto 98 0 lineto 98 98 lineto closepath clip newpath
+-11.0 102.4 translate
+1 -1 scale
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+} bind def
+/pagefooter {
+$F2psEnd
+restore
+} bind def
+%%EndProlog
+pageheader
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 50
+% Polyline
+0 slj
+0 slc
+30.000 slw
+n 1485 1395 m 1683 657 l 1143 117 l 405 315 l 207 1053 l 747 1593 l
+
+ cp gs col0 s gr 
+% here ends figure;
+pagefooter
+showpage
+%%Trailer
+%EOF
diff --git a/vignettes/hexagon.pdf b/vignettes/hexagon.pdf
new file mode 100644
index 0000000..c0cc4a3
--- /dev/null
+++ b/vignettes/hexagon.pdf
@@ -0,0 +1,83 @@
+%PDF-1.4
+%�쏢
+5 0 obj
+<</Length 6 0 R/Filter /FlateDecode>>
+stream
+x�-�1As^1/@`X^`��
��2����E0T��
�
+Y���A�=q�N����Oc?�4�M
�9�,
T�;S�vz�I!ŕ�e�h��V)��Okb�J#�S���|8�x�h�+]�
�bendstream
+endobj
+6 0 obj
+118
+endobj
+4 0 obj
+<</Type/Page/MediaBox [0 0 98 98]
+/Parent 3 0 R
+/Resources<</ProcSet[/PDF]
+/ExtGState 8 0 R
+>>
+/Contents 5 0 R
+>>
+endobj
+3 0 obj
+<< /Type /Pages /Kids [
+4 0 R
+] /Count 1
+>>
+endobj
+1 0 obj
+<</Type /Catalog /Pages 3 0 R
+/Metadata 9 0 R
+>>
+endobj
+7 0 obj
+<</Type/ExtGState
+/OPM 1>>endobj
+8 0 obj
+<</R7
+7 0 R>>
+endobj
+9 0 obj
+<</Type/Metadata
+/Subtype/XML/Length 1362>>stream
+<?xpacket begin='' id='W5M0MpCehiHzreSzNTczkc9d'?>
+<?adobe-xap-filters esc="CRLF"?>
+<x:xmpmeta xmlns:x='adobe:ns:meta/' x:xmptk='XMP toolkit 2.9.1-13, framework 1.6'>
+<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' xmlns:iX='http://ns.adobe.com/iX/1.0/'>
+<rdf:Description rdf:about='uuid:d86bcae9-a3e4-11ee-0000-60c1f5f8f591' xmlns:pdf='http://ns.adobe.com/pdf/1.3/' pdf:Producer='GPL Ghostscript 9.05'/>
+<rdf:Description rdf:about='uuid:d86bcae9-a3e4-11ee-0000-60c1f5f8f591' xmlns:xmp='http://ns.adobe.com/xap/1.0/'><xmp:ModifyDate>2013-12-23T19:49:36+08:00</xmp:ModifyDate>
+<xmp:CreateDate>2013-12-23T19:49:36+08:00</xmp:CreateDate>
+<xmp:CreatorTool>fig2dev Version 3.2 Patchlevel 5d</xmp:CreatorTool></rdf:Description>
+<rdf:Description rdf:about='uuid:d86bcae9-a3e4-11ee-0000-60c1f5f8f591' xmlns:xapMM='http://ns.adobe.com/xap/1.0/mm/' xapMM:DocumentID='uuid:d86bcae9-a3e4-11ee-0000-60c1f5f8f591'/>
+<rdf:Description rdf:about='uuid:d86bcae9-a3e4-11ee-0000-60c1f5f8f591' xmlns:dc='http://purl.org/dc/elements/1.1/' dc:format='application/pdf'><dc:title><rdf:Alt><rdf:li xml:lang='x-default'>hexagon.fig</rdf:li></rdf:Alt></dc:title></rdf:Description>
+</rdf:RDF>
+</x:xmpmeta>
+                                                                        
+                                                                        
+<?xpacket end='w'?>
+endstream
+endobj
+2 0 obj
+<</Producer(GPL Ghostscript 9.05)
+/CreationDate(D:20131223194936+08'00')
+/ModDate(D:20131223194936+08'00')
+/Title(hexagon.fig)
+/Creator(fig2dev Version 3.2 Patchlevel 5d)>>endobj
+xref
+0 10
+0000000000 65535 f 
+0000000410 00000 n 
+0000001982 00000 n 
+0000000351 00000 n 
+0000000222 00000 n 
+0000000015 00000 n 
+0000000203 00000 n 
+0000000474 00000 n 
+0000000515 00000 n 
+0000000544 00000 n 
+trailer
+<< /Size 10 /Root 1 0 R /Info 2 0 R
+/ID [<7169BA68125AE1AEC0984268ECC4E10A><7169BA68125AE1AEC0984268ECC4E10A>]
+>>
+startxref
+2169
+%%EOF
diff --git a/vignettes/irregpoly.eps b/vignettes/irregpoly.eps
new file mode 100755
index 0000000..d486956
--- /dev/null
+++ b/vignettes/irregpoly.eps
@@ -0,0 +1,119 @@
+%!PS-Adobe-2.0 EPSF-2.0
+%%Title: irregpoly.fig
+%%Creator: fig2dev Version 3.2 Patchlevel 5a
+%%CreationDate: Tue Nov 23 11:04:01 2010
+%%BoundingBox: 0 0 226 144
+%Magnification: 1.0000
+%%EndComments
+%%BeginProlog
+/$F2psDict 200 dict def
+$F2psDict begin
+$F2psDict /mtrx matrix put
+/col-1 {0 setgray} bind def
+/col0 {0.000 0.000 0.000 srgb} bind def
+/col1 {0.000 0.000 1.000 srgb} bind def
+/col2 {0.000 1.000 0.000 srgb} bind def
+/col3 {0.000 1.000 1.000 srgb} bind def
+/col4 {1.000 0.000 0.000 srgb} bind def
+/col5 {1.000 0.000 1.000 srgb} bind def
+/col6 {1.000 1.000 0.000 srgb} bind def
+/col7 {1.000 1.000 1.000 srgb} bind def
+/col8 {0.000 0.000 0.560 srgb} bind def
+/col9 {0.000 0.000 0.690 srgb} bind def
+/col10 {0.000 0.000 0.820 srgb} bind def
+/col11 {0.530 0.810 1.000 srgb} bind def
+/col12 {0.000 0.560 0.000 srgb} bind def
+/col13 {0.000 0.690 0.000 srgb} bind def
+/col14 {0.000 0.820 0.000 srgb} bind def
+/col15 {0.000 0.560 0.560 srgb} bind def
+/col16 {0.000 0.690 0.690 srgb} bind def
+/col17 {0.000 0.820 0.820 srgb} bind def
+/col18 {0.560 0.000 0.000 srgb} bind def
+/col19 {0.690 0.000 0.000 srgb} bind def
+/col20 {0.820 0.000 0.000 srgb} bind def
+/col21 {0.560 0.000 0.560 srgb} bind def
+/col22 {0.690 0.000 0.690 srgb} bind def
+/col23 {0.820 0.000 0.820 srgb} bind def
+/col24 {0.500 0.190 0.000 srgb} bind def
+/col25 {0.630 0.250 0.000 srgb} bind def
+/col26 {0.750 0.380 0.000 srgb} bind def
+/col27 {1.000 0.500 0.500 srgb} bind def
+/col28 {1.000 0.630 0.630 srgb} bind def
+/col29 {1.000 0.750 0.750 srgb} bind def
+/col30 {1.000 0.880 0.880 srgb} bind def
+/col31 {1.000 0.840 0.000 srgb} bind def
+
+end
+
+/cp {closepath} bind def
+/ef {eofill} bind def
+/gr {grestore} bind def
+/gs {gsave} bind def
+/sa {save} bind def
+/rs {restore} bind def
+/l {lineto} bind def
+/m {moveto} bind def
+/rm {rmoveto} bind def
+/n {newpath} bind def
+/s {stroke} bind def
+/sh {show} bind def
+/slc {setlinecap} bind def
+/slj {setlinejoin} bind def
+/slw {setlinewidth} bind def
+/srgb {setrgbcolor} bind def
+/rot {rotate} bind def
+/sc {scale} bind def
+/sd {setdash} bind def
+/ff {findfont} bind def
+/sf {setfont} bind def
+/scf {scalefont} bind def
+/sw {stringwidth} bind def
+/tr {translate} bind def
+/tnt {dup dup currentrgbcolor
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add
+  4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb}
+  bind def
+/shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul
+  4 -2 roll mul srgb} bind def
+/$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def
+/$F2psEnd {$F2psEnteredState restore end} def
+
+/pageheader {
+save
+newpath 0 144 moveto 0 0 lineto 226 0 lineto 226 144 lineto closepath clip newpath
+-3.6 146.6 translate
+1 -1 scale
+$F2psBegin
+10 setmiterlimit
+0 slj 0 slc
+ 0.06299 0.06299 sc
+} bind def
+/pagefooter {
+$F2psEnd
+restore
+} bind def
+%%EndProlog
+pageheader
+%
+% Fig objects follow
+%
+% 
+% here starts figure with depth 50
+% Polyline
+0 slj
+0 slc
+30.000 slw
+n 945 180 m 1170 1035 l 225 315 l 135 405 l 90 1215 l 675 1350 l
+ 675 1665 l 135 1755 l 180 2205 l 990 2295 l 1260 1350 l
+ 1530 1440 l 1440 2205 l 2250 2115 l 1890 1350 l 2520 1305 l
+ 2250 1530 l 2475 2250 l 3330 2250 l 3330 1575 l 2790 1530 l
+ 3600 1260 l 3465 720 l 2790 810 l 2475 765 l 3465 585 l
+ 3510 360 l 2430 90 l 2115 225 l 2070 630 l 1800 945 l
+ 1935 135 l
+ 990 225 l gs col0 s gr 
+% here ends figure;
+pagefooter
+showpage
+%%Trailer
+%EOF
diff --git a/vignettes/irregpoly.pdf b/vignettes/irregpoly.pdf
new file mode 100644
index 0000000..0cb04d2
--- /dev/null
+++ b/vignettes/irregpoly.pdf
@@ -0,0 +1,84 @@
+%PDF-1.4
+%�쏢
+5 0 obj
+<</Length 6 0 R/Filter /FlateDecode>>
+stream
+x�m�=n
1
��=O0�?���}n�
+#E�JH�6@��������ob����������%�^E��sMo?��
'�P���*f�vjqp}]�"�,D��
<�qe��]���X/�pRiuS\
+*q�X2B��*XkӜ\l�D������U1J卼�P��X��]r�r�Z��0C�P$
!7�D,(�ƃ[Q�
��5�
C2��)���R��H���Q5A�7�0=�x"��E�u�Z1��m=�@4�)Y���J9VL,6���}9f,q^�ѕ�
�:��'�1V8'�3�&�R[�U���X�ߑ�X������
�
|�endstream
+endobj
+6 0 obj
+311
+endobj
+4 0 obj
+<</Type/Page/MediaBox [0 0 226 144]
+/Parent 3 0 R
+/Resources<</ProcSet[/PDF]
+/ExtGState 8 0 R
+>>
+/Contents 5 0 R
+>>
+endobj
+3 0 obj
+<< /Type /Pages /Kids [
+4 0 R
+] /Count 1
+>>
+endobj
+1 0 obj
+<</Type /Catalog /Pages 3 0 R
+/Metadata 9 0 R
+>>
+endobj
+7 0 obj
+<</Type/ExtGState
+/OPM 1>>endobj
+8 0 obj
+<</R7
+7 0 R>>
+endobj
+9 0 obj
+<</Type/Metadata
+/Subtype/XML/Length 1364>>stream
+<?xpacket begin='' id='W5M0MpCehiHzreSzNTczkc9d'?>
+<?adobe-xap-filters esc="CRLF"?>
+<x:xmpmeta xmlns:x='adobe:ns:meta/' x:xmptk='XMP toolkit 2.9.1-13, framework 1.6'>
+<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' xmlns:iX='http://ns.adobe.com/iX/1.0/'>
+<rdf:Description rdf:about='uuid:02bd886a-a3e5-11ee-0000-713887b14207' xmlns:pdf='http://ns.adobe.com/pdf/1.3/' pdf:Producer='GPL Ghostscript 9.05'/>
+<rdf:Description rdf:about='uuid:02bd886a-a3e5-11ee-0000-713887b14207' xmlns:xmp='http://ns.adobe.com/xap/1.0/'><xmp:ModifyDate>2013-12-23T19:50:47+08:00</xmp:ModifyDate>
+<xmp:CreateDate>2013-12-23T19:50:47+08:00</xmp:CreateDate>
+<xmp:CreatorTool>fig2dev Version 3.2 Patchlevel 5d</xmp:CreatorTool></rdf:Description>
+<rdf:Description rdf:about='uuid:02bd886a-a3e5-11ee-0000-713887b14207' xmlns:xapMM='http://ns.adobe.com/xap/1.0/mm/' xapMM:DocumentID='uuid:02bd886a-a3e5-11ee-0000-713887b14207'/>
+<rdf:Description rdf:about='uuid:02bd886a-a3e5-11ee-0000-713887b14207' xmlns:dc='http://purl.org/dc/elements/1.1/' dc:format='application/pdf'><dc:title><rdf:Alt><rdf:li xml:lang='x-default'>irregpoly.fig</rdf:li></rdf:Alt></dc:title></rdf:Description>
+</rdf:RDF>
+</x:xmpmeta>
+                                                                        
+                                                                        
+<?xpacket end='w'?>
+endstream
+endobj
+2 0 obj
+<</Producer(GPL Ghostscript 9.05)
+/CreationDate(D:20131223195047+08'00')
+/ModDate(D:20131223195047+08'00')
+/Title(irregpoly.fig)
+/Creator(fig2dev Version 3.2 Patchlevel 5d)>>endobj
+xref
+0 10
+0000000000 65535 f 
+0000000605 00000 n 
+0000002179 00000 n 
+0000000546 00000 n 
+0000000415 00000 n 
+0000000015 00000 n 
+0000000396 00000 n 
+0000000669 00000 n 
+0000000710 00000 n 
+0000000739 00000 n 
+trailer
+<< /Size 10 /Root 1 0 R /Info 2 0 R
+/ID [<F30DAA97F063660DCBE4D531EA4E489F><F30DAA97F063660DCBE4D531EA4E489F>]
+>>
+startxref
+2368
+%%EOF
diff --git a/vignettes/replicated.Rnw b/vignettes/replicated.Rnw
new file mode 100644
index 0000000..3b32240
--- /dev/null
+++ b/vignettes/replicated.Rnw
@@ -0,0 +1,1525 @@
+\documentclass[11pt]{article}
+
+% \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat}
+
+\usepackage{graphicx}
+\usepackage{Sweave}
+\usepackage{bm}
+\usepackage{anysize}
+
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\newcommand{\bold}[1]{{\textbf {#1}}}
+
+\newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}}
+\newcommand{\dee}[1]{\, {\rm d}{#1}}
+\newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}}
+\newcommand{\xx}{\boldmaths{x}}
+
+\begin{document}
+\bibliographystyle{plain}
+\thispagestyle{empty}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.6\textwidth}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) })
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Analysing replicated point patterns in \texttt{spatstat}}
+\author{Adrian Baddeley}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+\begin{abstract}
+This document describes \spst's capabilities for
+fitting models to replicated point patterns.
+More generally it applies to data from a designed experiment
+in which the response from each unit is a spatial point pattern.
+\end{abstract}
+
+\tableofcontents
+\newpage
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Introduction}
+
+`Replicated point patterns' are datasets consisting of 
+several point patterns which can be 
+regarded as independent repetitions of the same experiment. For example,
+three point patterns taken from micrographs of three pipette samples of the 
+same jug of milk, could be assumed to be replicated observations.
+
+More generally we could have several experimental groups, with 
+replicated point pattern data in each group. For example there may be
+two jugs of milk that were treated differently, and we take three
+pipette samples from each jug.
+
+Even more generally our point patterns could be the result
+of a designed experiment involving
+control and treatment groups, covariates such as temperature,
+and even spatial covariates (such as image data). 
+
+This document describes some capabilities available in the \spst\ package
+for analysing such data. 
+\textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Overview of software}
+
+The main components needed are:
+
+\begin{itemize}
+\item the model-fitting function \texttt{mppm}, an extension of the
+  \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process
+  models to multiple point pattern datasets;
+\item support for the class \texttt{"mppm"} of point process models 
+   fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model,
+   analysis of deviance for Poisson models)
+\item some tools for exploratory data analysis;
+\item basic support for the data from such experiments
+  by storing the data in a \emph{``hyperframe''}. A hyperframe is like
+  a data frame, except that each entry in a column can be a point pattern
+  or a pixel image, as well as a single number or categorical value.
+\item four example datasets.
+\end{itemize}
+
+\section{Formulating the problem}
+
+We view the experiment as involving a series of
+{\em `units'\/}.
+Each unit is subjected to a known set of experimental conditions 
+(described by the values of the {\em covariates\/}), and
+each unit yields a {\em response\/} which is a spatial point pattern.
+The value of a particular covariate for each unit can be
+either a single value (numerical, logical or factor),
+or a pixel image.
+
+Three important cases are:
+\begin{description}
+\item[independent replicates:]
+We observe $n$
+different point patterns that can be regarded as independent replicates,
+i.e.\ independent realisations of the same point process.
+The `responses' are the point patterns; there are no covariates.
+\item[replication in groups:]
+there are $K$ different experimental groups (e.g. control, aspirin,
+nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$
+point patterns which can be regarded as independent replicates within
+this group. We regard this as an experiment with $n = \sum_k n_k$ 
+units. The responses are the point patterns; there is one covariate
+which is a factor (categorical variable) identifying which group 
+each point pattern belongs to.
+\item[general case:] 
+there are covariates other than factors that influence
+the response. The point patterns are assumed to be independent, 
+but no two patterns have the same distribution.
+\end{description}
+
+Examples of these three cases are given in the 
+datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper}
+respectively, which are installed in \spst.
+
+\section{Installed datasets}
+
+The following datasets are currently installed in \spst.
+
+\begin{itemize}
+\item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data
+recording the locations of insect larvae on a pond in 3 independent
+experiments.
+\item \texttt{pyramidal}: data from Diggle, Lange and Benes 
+\cite{digglangbene91} on the locations of pyramidal neurons in 
+human brain, 31 human subjects grouped into 3 groups (controls,
+schizoaffective and schizophrenic).
+\item \texttt{flu}: data from Chen et al \cite{chenetal08}
+giving the locations of two different virus proteins 
+on the membranes of cells infected with influenza virus;
+41 multitype point patterns divided into two virus types
+(wild and mutant) and two stain types. 
+\item \texttt{simba}: simulated data from an experiment with two groups
+and 5 replicate point patterns per group.
+\item \texttt{demohyper}: simulated data from an experiment with two 
+  groups in which each experimental unit has a point pattern response
+  and a pixel image covariate. 
+\end{itemize}
+
+\section{Lists of point patterns}
+
+First we need a convenient way to store the \emph{responses}
+from all the units in an experiment.
+
+An individual point pattern is stored as an object of class \verb!"ppp"!. 
+The easiest way to store all the responses is to form a list
+of \verb!"ppp"! objects.
+
+\subsection{Waterstriders data}
+
+The \texttt{waterstriders} data are an example of this type.
+The data consist of 3 independent point patterns representing the 
+locations of insect larvae on a pond. See \texttt{help(waterstriders)}.
+
+<<>>=
+waterstriders
+@ 
+
+The \texttt{waterstriders} dataset is a list of point patterns.
+It is a list, each of whose entries is a point pattern (object of class
+\verb!"ppp"!). Note that the observation windows of the
+three point patterns are {\tt not\/} identical. 
+
+\subsection{The class \texttt{listof}}
+
+For convenience, the \texttt{waterstriders} dataset also belongs to the
+class \verb!"listof"!. This is a simple mechanism to allow us to
+handle the list neatly --- for example, we can provide
+special methods for printing, plotting and summarising the list.
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(waterstriders, main="")
+@ 
+
+Notice that the 
+plot method displays each entry of the list in a separate panel.
+There's also the summary method:
+
+<<>>=
+summary(waterstriders)
+@ 
+
+\subsection{Creating a \texttt{listof} object}
+
+For example, here is a simulated dataset containing three
+independent realisations of the Poisson process with intensity 100.
+
+<<>>=
+X <- listof(rpoispp(100), rpoispp(100), rpoispp(100))
+@ 
+
+Then it can be printed and plotted.
+
+<<fig=TRUE>>=
+plot(X)
+X
+@ 
+
+To convert an existing list to the class \code{listof}, use
+\code{as.listof}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Hyperframes}
+
+A \emph{hyperframe} is like a data frame, except that its entries
+can be objects of any kind.
+A hyperframe is effectively a two-dimensional array
+in which each column consists of
+values of one type (as in a data frame) or consists of
+objects of one class. 
+
+The entries in a hyperframe can be point patterns, pixel images,
+windows, or any other objects. 
+
+To analyse an experiment, we will store {\bf all} the data from the experiment
+in a single hyperframe. The rows of the hyperframe will correspond 
+to different experimental units,
+while the columns represent different variables 
+(response variables or covariates).
+
+\subsection{Creating hyperframes}
+
+The function \texttt{hyperframe} will create a hyperframe.
+
+<<eval=FALSE>>=
+hyperframe(...)
+@ 
+
+The arguments \verb!...! are any number of arguments of
+the form \texttt{tag=value}. Each \texttt{value} will
+become a column of the array. The \texttt{tag} determines the name
+of the column.
+
+Each \texttt{value} can be either
+\begin{itemize}
+\item an atomic vector or factor
+  (i.e. numeric vector, integer vector, character vector, logical
+  vector, complex vector or factor)
+\item a list of objects which are all of the same class
+\item one atomic value, which will be replicated to make an atomic
+  vector or factor
+\item one object, which will be replicated to make a list of identical objects.
+\end{itemize}
+    
+All columns (vectors, factors and lists) must be of the same length,
+if their length is greater than 1. 
+
+For example, here is a hyperframe containing a column of
+numbers and a column of \emph{functions}:
+
+<<>>=
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+H
+@ 
+
+Note that a column of character strings will be converted to a factor, 
+unless you set \texttt{stringsAsFactors=FALSE} in the call to
+\code{hyperframe}. This is the same behaviour as for the function 
+\code{data.frame}.
+
+<<>>=
+G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]),
+                W=list(rpoispp(100),rpoispp(100), rpoispp(100)),
+                U=42,
+                V=rpoispp(100), stringsAsFactors=FALSE)
+G
+@ 
+
+This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V}
+are constant (all entries in a column are the same). The column named
+\texttt{Y} is a character vector.
+
+\subsection{Hyperframes of data}
+
+To analyse an experiment, we will store {\bf all} the data from the experiment
+in a single hyperframe. The rows of the hyperframe will correspond 
+to different experimental units,
+while the columns represent different variables 
+(response variables or covariates).
+
+Several examples of hyperframes are provided with the package,
+including \texttt{demohyper}, \texttt{flu}, \texttt{simba}
+and \texttt{pyramidal}, described above.
+
+The \texttt{simba} dataset contains simulated data from an
+experiment with a `control' group and a `treatment' group, each 
+group containing 5 experimental units. The responses in the control group are 
+independent Poisson point patterns with intensity 80.
+The responses in the treatment group are independent realisations of
+a Strauss process (see \texttt{help(simba)} for details).
+The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns:
+\texttt{Points} (the point patterns) and \texttt{group} (a factor 
+with levels \texttt{control} and \texttt{treatment}).
+
+<<>>=
+simba
+@ 
+
+The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes 
+\cite{digglangbene91} on the locations of pyramidal neurons in 
+human brain. One point pattern was observed in each of 31 human subjects.
+The subjects were classified
+into 3 groups (controls, schizoaffective and schizophrenic).
+The \texttt{pyramidal} dataset is a hyperframe with 31 rows
+and 2 columns: \code{Neurons} (the point patterns) and \code{group}
+(a factor with levels \texttt{control}, \texttt{schizoaffective} 
+and \texttt{schizophrenic}).
+
+<<>>=
+pyramidal
+@ 
+
+The \texttt{waterstriders} dataset is not a hyperframe; it's just a 
+list of point patterns. It can easily be converted into a hyperframe:
+
+<<>>=
+ws <- hyperframe(Striders=waterstriders)
+@ 
+
+\subsection{Columns of a hyperframe}
+
+Individual columns of a hyperframe can be extracted using \verb!$!:
+
+<<>>=
+H$X
+H$Y
+@ 
+
+The result of \verb!$! is a vector or factor if the column contains
+atomic values; otherwise it is a list of objects (with class \texttt{"listof"}
+to make it easier to print and plot).
+
+Individual columns can also be assigned (overwritten or created)
+using \verb!$<-!:
+
+<<>>=
+H$U <- letters[1:3]
+H
+@ 
+
+This can be used to build up a hyperframe column-by-column:
+
+<<>>=
+G <- hyperframe()
+G$X <- waterstriders
+G$Y <- 1:3
+G
+@ 
+
+\subsection{Subsets of a hyperframe}
+
+Other subsets of a hyperframe
+can be extracted with \verb![!:
+
+<<>>=
+H[,1]
+H[2,]
+H[2:3, ]
+H[1,1]
+@ 
+
+The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE!
+and the subset consists of only one element or one column:
+
+<<>>=
+H[,1,drop=TRUE]
+H[1,1,drop=TRUE]
+H[1,2,drop=TRUE]
+@ 
+
+Currently there is no method for \verb![<-! that would allow
+you to assign values to a subset of a hyperframe.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Plotting}
+
+\subsection{Plotting a \code{listof} object}
+
+The plot method for \code{listof} objects has formal arguments
+
+<<eval=FALSE>>=
+plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL)
+@ 
+
+where \code{main} is a title for the entire page.
+
+If \code{arrange=TRUE} then the entries of the list are displayed  
+in separate panels on the same page (with \code{nrows} rows and
+\code{ncols} columns of panels), while if \code{arrange=FALSE} then 
+the entries are just plotted as a series of plot frames.
+
+The extra arguments \verb!...! control the individual plot panels.
+These arguments will be passed to the plot method
+that displays each entry of the list. Suitable arguments depend on the
+type of entries.
+
+<<fig=TRUE>>= 
+plot(waterstriders, pch=16, nrows=1)
+@ 
+
+\subsection{Plotting a hyperframe}
+
+\subsubsection{Plotting one column}
+
+If \code{h} is a hyperframe, then the default action of
+\code{plot(h)} is to extract the first column
+of \code{h} and plot each of the entries in a separate panel on 
+one page (actually using the plot method for class \verb!"listof"!). 
+
+\SweaveOpts{width=7,height=5}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(simba)
+@ 
+
+This only works if the entries in the first column are objects 
+for which a plot method is defined (for example, point patterns, images,
+windows). 
+
+To select a different column, use \verb!$! or \verb![!:
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+H <- hyperframe(X=1:3, Y=list(sin,cos,tan))
+plot(H$Y)
+@ 
+
+The plot can be controlled using the arguments for \code{plot.listof}
+(and, in this case, \code{plot.function}, since \verb!H$Y! consists of 
+functions).
+
+\subsubsection{Complex plots}
+
+More generally, we can display any kind of higher-order plot
+involving one or more columns of a hyperframe:
+
+<<eval=FALSE>>=
+plot(h, e)
+@ 
+
+where \code{h} is a hyperframe and \code{e} is an \R\ language call 
+or expression that must be evaluated in each row to generate each plot panel.
+
+\SweaveOpts{width=9,height=5}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }))
+@ 
+
+Note the use of \code{quote}, which prevents the code
+inside the braces from being evaluated immediately.
+
+To plot the $K$-functions of each of the patterns in the
+\code{waterstriders} dataset,
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+H <- hyperframe(Bugs=waterstriders)
+plot(H, quote(plot(Kest(Bugs))), marsize=1)
+@ 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Data analysis}
+
+\subsection{Computing with hyperframes}
+
+Often we want to perform some computation on each row of
+a hyperframe. 
+
+In a data frame, this can be done using the command \code{with}:
+
+<<>>=
+df <- data.frame(A=1:10, B=10:1)
+with(df, A-B)
+@ 
+
+In this example, the expression \code{A-B} is evaluated
+in each row of the data frame, and the result is a vector
+containing the computed values for each row.
+The function \code{with} is generic, and has a method for data frames,
+\code{with.data.frame}. The computation above
+was executed by \code{with.data.frame}. 
+
+The same syntax is available for hyperframes
+using the method \code{with.hyperframe}:
+
+<<eval=FALSE>>=
+with(h,e)
+@ 
+
+Here \code{h} is a hyperframe, and 
+\code{e} is an {\sf R} language construct involving the names
+of columns in \code{h}.
+For each row of \code{h}, the expression \code{e} will be evaluated
+in such a way that each entry in the row is identified by its
+column name. 
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+with(H, npoints(Bugs))
+with(H, distmap(Bugs))
+@
+
+The result of \code{with.hyperframe} 
+is a list of objects (of class \verb!"listof"!),
+or a vector or factor if appropriate.
+
+Notice that (unlike the situation for data frames)
+the operations in the expression \code{e} do not have to
+be vectorised. For example, \code{distmap} expects 
+a single point pattern, and is not vectorised to deal with 
+a list of point patterns. Instead, the expression \code{distmap(Bugs)}
+is evaluated separately in each row of the hyperframe.
+
+\subsection{Summary statistics}
+
+One application of \code{with.hyperframe} is to calculate summary statistics
+for each row of a hyperframe.
+
+For example, the number of points in a point pattern \code{X}
+is returned by \code{npoints(X)}. To calculate this for each of the 
+responses in the \code{simba} dataset,
+
+<<>>=
+with(simba, npoints(Points))
+@ 
+
+The summary statistic can be any kind of object. For example, to
+compute the empirical $K$-functions for each of the 
+patterns in the \code{waterstriders} dataset,
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+K <- with(H, Kest(Bugs))
+@ 
+
+To plot these $K$-functions you can then just type 
+
+\SweaveOpts{width=6,height=2}
+\setkeys{Gin}{width=0.9\textwidth}
+
+<<fig=TRUE>>=
+plot(K)
+@ 
+
+The summary statistic for each row could be a numeric vector:
+
+<<>>=
+H <- hyperframe(Bugs=waterstriders)
+with(H, nndist(Bugs))
+@ 
+
+The result is a list, each entry being a vector of nearest neighbour distances.
+To find the minimum interpoint distance in each pattern:
+
+<<>>=
+with(H, min(nndist(Bugs)))
+@ 
+
+\subsection{Generating new columns}
+
+New columns of a hyperframe can be created by computation
+from the existing columns. 
+
+For example, I can add a new column to the \code{simba} dataset
+that contains pixel images of the distance maps for each of the
+point pattern responses. 
+
+<<fig=FALSE>>=
+simba$Dist <- with(simba, distmap(Points))
+@ 
+
+\subsection{Simulation}
+
+
+This can be useful for simulation. For example, to generate
+Poisson point patterns with different intensities, where the 
+intensities are given by a numeric vector \code{lambda}:
+
+\SweaveOpts{width=6,height=6}
+\setkeys{Gin}{width=0.7\textwidth}
+
+<<fig=TRUE>>=
+lambda <- rexp(6, rate=1/50)
+H <- hyperframe(lambda=lambda)
+H$Points <- with(H, rpoispp(lambda))
+plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4)))))
+@ 
+
+It's even simpler to generate 10 independent Poisson point patterns
+with the \emph{same} intensity 50, say:
+
+<<fig=FALSE>>=
+H$X <- with(H, rpoispp(50))
+@ 
+
+(the expression \code{rpoispp(50)} is evaluated once in each row,
+yielding a different point pattern in each row because of the
+randomness).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Exploratory data analysis}
+
+Before fitting models to the data, it is prudent to explore 
+the data to detect unusual features and to suggest appropriate 
+models. 
+
+\subsection{Exploring spatial trend and covariate effects}
+
+Points may be distributed non-uniformly either because they are
+intrinsically non-uniform (``spatial trend'') or because their abundance
+depends on a spatial covariate (``covariate effects''). 
+
+Non-uniformity of a point pattern can be investigated using 
+the kernel smoothed intensity. This is the convolution of the point pattern
+with a smooth density called the kernel. Effectively each point
+in the pattern is replaced by a copy of the kernel, and the sum of all
+copies of the kernel is the kernel-smoothed intensity function.
+It is computed by \texttt{density.ppp} separately for each point pattern.
+
+<<fig=TRUE>>=
+plot(simba, quote(plot(density(Points), main="")), nrows=2)
+@ 
+
+Covariate effects due to a real-valued spatial covariate (a real-valued
+pixel image) can be investigated
+using the command \code{rhohat}. This uses a kernel smoothing
+technique to fit a model of the form 
+\[
+           \lambda(u) = \rho(Z(u))
+\]
+where $\lambda(u)$ is the point process intensity at a location $u$,
+and $Z(u)$ is the value of the spatial covariate at that location.
+Here $\rho$ is an unknown, smooth function which is to be estimated.
+The function $\rho$ expresses the effect of the
+spatial covariate on the point process intensity. If $\rho$ turns out to
+be constant, then the covariate has no effect on point process intensity
+(and the constant value of $\rho$ is the constant intensity of the
+point process). 
+
+<<fig=TRUE>>=
+rhos <- with(demohyper, rhohat(Points, Image))
+plot(rhos)
+@ 
+
+\SweaveOpts{width=6,height=4}
+\setkeys{Gin}{width=0.9\textwidth}
+
+\subsection{Exploring interpoint interaction}
+
+Still to be written.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Fitting models of spatial trend}
+
+The command \code{mppm} fits models to multiple point patterns.
+Its syntax is very similar to that of \code{lm} and \code{glm}:
+
+<<eval=FALSE>>=
+mppm(formula, data, interaction, ...)
+@ 
+
+where \code{formula} is a formula describing the systematic trend
+part of the model, \code{data} is a hyperframe containing all the data 
+(responses and covariates), and \code{interaction} determines the
+stochastic interpoint interaction part of the model.
+
+For example:
+
+<<eval=FALSE>>=
+mppm(Points ~ group, simba, Poisson())
+@ 
+
+Note that the formula has a left hand side, which identifies
+the response. This should be the name of a column of \code{data}.
+
+\subsection{Trend formula}
+
+The right side of \code{formula} is an expression for the
+linear predictor (effectively the {\bf logarithm} 
+of the spatial trend). 
+
+The variables appearing in the right hand side
+of \code{formula} should be either
+\begin{itemize}
+\item names of columns in \code{data}
+\item objects in the {\sf R} global
+environment (such as \code{pi} and \code{log})
+\item the reserved names \code{x}, \code{y} 
+(representing Cartesian coordinates), \code{marks} (representing mark values
+attached to points) or \code{id} (a factor representing the row number
+in the hyperframe).
+\end{itemize}
+
+\subsubsection{Design covariates}
+
+The variables in the trend could be `design covariates'.
+
+For example, to fit a model to the \code{simba} dataset
+in which all patterns are independent replicates of the
+same uniform Poisson process, with the same constant
+intensity:
+
+<<>>=
+mppm(Points ~ 1, simba)
+@ 
+
+To fit a model in which the two groups of patterns (control and treatment
+groups) each consist of independent replicates of a uniform Poisson process,
+but with possibly different intensity in each group:
+
+<<>>=
+mppm(Points ~ group, simba)
+@ 
+
+To fit a uniform Poisson process to each pattern, with 
+different intensity for each pattern:
+
+<<>>=
+mppm(Points ~ id, simba)
+@ 
+
+\subsubsection{Spatial covariates}
+
+The variables in the trend could be `spatial covariates'.
+
+For example, the \code{demohyper} dataset has a column \code{Image}
+containing pixel images. 
+
+<<>>=
+mppm(Points ~ Image, data=demohyper)
+@ 
+
+This model postulates that each pattern is a Poisson process
+with intensity of the form
+\[
+      \lambda(u) = \exp(\beta_0 + \beta_1 Z(u))
+\]
+at location $u$, where $\beta_0, \beta_1$ are coefficients
+to be estimated, and $Z(u)$ is the value of the pixel image
+\code{Image} at location $u$.
+
+It may or may not be appropriate to assume that the intensity of the points
+is an exponential function of the image pixel value $Z$.
+If instead 
+we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$,
+the appropriate model is 
+
+<<eval=FALSE>>=
+mppm(Points ~ offset(log(Image)), data=demohyper)
+@ 
+
+which corresponds to an intensity proportional to \code{Image},
+\[
+      \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u).
+\]
+The \code{offset} indicates that there is no coefficient in front
+of $\log Z(u)$. 
+
+Alternatively we could allow a coefficient:
+
+<<eval=FALSE>>=
+mppm(Points ~ log(Image), data=demop)
+@ 
+
+which corresponds to a gamma transformation of \code{Image},
+\[
+      \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) 
+      = e^{\beta_0} \; Z(u)^{\beta_1}.
+\]
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Interpoint interaction}
+
+The stochastic interpoint interaction in a point process model
+is specified by the arguments \code{interaction} and (optionally)
+\code{iformula} in 
+
+<<eval=FALSE>>=
+mppm(formula, data, interaction, ..., iformula=NULL)
+@ 
+
+\subsection{Same interaction for all patterns}
+
+In the simplest case,
+the argument \texttt{interaction} is one of the familiar 
+objects that describe the point process interaction structure.
+It is an object of class \texttt{"interact"}
+created by calling one of the functions
+
+\begin{center}
+  \begin{tabular}{rl}
+    \texttt{Poisson()}  & the Poisson point process\\     
+    \texttt{Hardcore()}  & the hard core process \\ 
+    \texttt{Strauss()}  & the Strauss process \\ 
+    \texttt{StraussHard()}  & the Strauss/hard core point process\\ 
+    \texttt{Softcore()}  & pairwise interaction, soft core potential\\  
+    \texttt{PairPiece()}  & pairwise interaction, piecewise constant \\ 
+    \texttt{DiggleGatesStibbard() }  & Diggle-Gates-Stibbard pair potential \\ 
+    \texttt{DiggleGratton() }  & Diggle-Gratton pair potential \\ 
+    \texttt{Fiksel() }  & Fiksel pair potential \\ 
+    \texttt{LennardJones() }  & Lennard-Jones pair potential \\ 
+    \texttt{Pairwise()}  &	pairwise interaction, user-supplied potential\\ 
+    \texttt{AreaInter()}  &		area-interaction potential\\ 
+    \texttt{Geyer()}	  & Geyer's saturation process\\ 
+    \texttt{BadGey()}	  & multiscale Geyer saturation process\\ 
+    \texttt{Saturated()}  &	Saturated pair model, user-supplied potential\\ 
+    \texttt{OrdThresh()}  &		Ord process, threshold potential\\ 
+    \texttt{Ord()}  & 		        Ord model, user-supplied potential \\ 
+    \texttt{MultiStrauss()}  & 		multitype Strauss process \\ 
+    \texttt{MultiStraussHard()}  & 	multitype Strauss/hard core process \\ 
+    \texttt{Concom()}  &	connected component interaction \\ 
+    \texttt{Hybrid()}  &	hybrid of several interactions \\ 
+  \end{tabular}
+\end{center}
+
+In this `simple' usage of \texttt{mppm}, the point process model
+assumes that all point patterns have exactly the same interpoint interaction,
+(with the same interaction parameters), and only differ in their spatial trend.
+
+\subsection{Hyperframe of interactions}
+
+More generally the argument \code{interaction} can be a hyperframe
+containing objects of class \texttt{"interact"}. 
+
+For example, we
+might want to fit a Strauss process to each point pattern, but with
+a different Strauss interaction radius for each pattern. 
+
+<<echo=FALSE,results=hide>>=
+radii <- with(simba, mean(nndist(Points)))
+@ 
+
+Then \code{radii} is a vector of numbers which we could use
+as the values of the interaction radius for each case. 
+First we need to make the interaction objects:
+
+<<>>=
+Rad <- hyperframe(R=radii)
+Str <- with(Rad, Strauss(R))
+@ 
+
+Then we put them into a hyperframe and fit the model:
+
+<<>>=
+Int <- hyperframe(str=Str)
+mppm(Points ~ 1, simba, interaction=Int)
+@ 
+
+An important constraint is that all of the interaction objects 
+in one column must be \emph{instances of the same process} (e.g. Strauss)
+albeit possibly having different parameter values. For example, you cannot
+put Poisson and Strauss processes in the same column. 
+
+\subsection{Interaction formula}
+
+If \code{interaction} is a hyperframe, then 
+the additional argument \code{iformula} may be used to
+fully specify the interaction.
+
+(An \code{iformula} is also required if \code{interaction}
+has more than one column.)
+
+The \code{iformula} should be a formula without a left hand side.
+Variables on the right hand side are typically the names of
+columns in \code{interaction}.
+
+\subsubsection{Selecting one column}
+
+If the right hand side of \code{iformula} is a single name,
+then this identifies the column in \code{interaction}
+to be used as the interpoint interaction structure.
+
+<<>>=
+h <- hyperframe(Y=waterstriders)
+g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7))
+mppm(Y ~ 1, data=h, interaction=g, iformula=~str4)
+@ 
+
+\subsubsection{Interaction depending on design} 
+
+The \code{iformula} can also involve columns of \code{data}, but
+only those columns that are vectors or factors. This allows us to
+specify an interaction that depends on the experimental design.
+[This feature is {\bf experimental}.]
+For example
+
+<<>>=
+fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group)
+@ 
+
+Since \code{Strauss(0.1)} is not a hyperframe, it is first converted
+to a hyperframe with a single column named \code{Interaction}.
+
+The \code{iformula = ~Interaction*group} specifies (since \code{group}
+is a factor) that the interpoint interaction shall have a different 
+coefficient in each experimental group. That is, we fit a model
+which has two different values for the Strauss interaction parameter $\gamma$,
+one for the control group and one for the treatment group.
+
+When you print the result of such a fit, 
+the package tries to 
+do `automatic interpretation' of the fitted model (translating the
+fitted interaction coefficients into meaningful numbers like $\gamma$). 
+This will be successful in \emph{most} cases:
+
+<<>>=
+fit
+@ 
+
+<<echo=FALSE,results=hide>>=
+co <- coef(fit)
+si <- function(x) { signif(x, 4) }
+@ 
+
+Thus we see that the estimate of the Strauss parameter $\gamma$ 
+for the control group is \Sexpr{si(exp(co[2]))}, and 
+for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))}
+(the correct values in this simulated dataset were $1$ and $0.5$).
+
+The fitted model can also be interpreted directly from the fitted
+canonical coefficients:
+
+<<>>=
+coef(fit)
+@ 
+
+The last output shows all the coefficients $\beta_j$
+in the linear predictor for the (log) conditional intensity. 
+
+The interpretation of the model coefficients, for any fitted model in \R,
+depends on the \emph{contrasts} which were applicable when the model was
+fitted. This is part of the core {\sf R} system: see \code{help(contrasts)}
+or \code{options(contrasts)}. If you did not specify otherwise, 
+the default is to use \emph{treatment contrasts}. This means that,
+for an explanatory variable which is a \texttt{factor} with $N$ levels, 
+the first level of the factor is used as a baseline, and the
+fitted model coefficients represent the factor levels $2, 3, \ldots, N$
+relative to this baseline. 
+
+In the output above, there is a coefficient for \code{(Intercept)} 
+and one for \code{grouptreatment}. These are coefficients related to
+the \code{group} factor. According to the ``treatment contrasts'' rule, 
+the \code{(Intercept)} coefficient is
+the estimated effect for the control group, and the
+\code{grouptreatment} coefficient is the estimated difference between
+the treatment and control groups. Thus the fitted first order
+trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ 
+for the control group
+and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) 
+ = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group.
+The correct values in this simulated dataset were 
+$80$ and $100$.
+
+The remaining coefficients in the output are \code{Interaction}
+and \code{Interaction:grouptreatment}. Recall that the Strauss process
+interaction term 
+is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$
+at a spatial location $u$, for a point pattern $\xx$. 
+Since we're using treatment contrasts, the coefficient 
+\code{Interaction} is the estimate of 
+$\log\gamma$ for the control group.
+The coefficient  \code{Interaction:grouptreatment} is the 
+estimate of the difference in $\log\gamma$ between the 
+treatment and control groups. Thus the estimated Strauss interaction
+parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ 
+for the control group and 
+$\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ 
+for the treatment group.
+The correct values were $1$ and $0.5$.
+
+\subsubsection{Completely different interactions for different cases} 
+
+In the previous example, when we fitted a Strauss model to all 
+point patterns in the \code{simba} dataset, the fitted model for the
+patterns in the control group was close to Poisson ($\gamma \approx 1$).
+Suppose we now want to fit a model which {\it is}
+Poisson in the control group, and Strauss in the treatment group. 
+The Poisson and Strauss interactions must be given as separate columns
+in a hyperframe of interactions:
+
+<<eval=FALSE>>=
+interaction=hyperframe(po=Poisson(), str=Strauss(0.07))
+@ 
+
+What do we write for the 
+\code{iformula}? The following \emph{will not} work:
+<<eval=FALSE>>=
+iformula=~ifelse(group=="control", po, str)
+@ 
+This does not work because the Poisson and Strauss models are `incompatible'
+inside such expressions. The canonical sufficient statistics for
+the Poisson and Strauss processes do not have the same dimension. 
+Internally in \code{mppm} we translate the symbols \code{po} and \code{str}
+into matrices; the dimensions of these matrices are different,
+so the \code{ifelse} expression cannot be evaluated.
+
+Instead we need something like the following:
+<<eval=FALSE>>=
+iformula=~I((group=="control")*po) + I((group=="treatment") * str)
+@ 
+The letter \code{I} here is a standard R function that prevents its argument
+from being interpreted as a formula (thus the \code{*} is interpreted
+as multiplication instead of a model interaction). The expression
+\code{(group=="control")} is logical, and when multiplied by the matrix
+\code{po}, yields a matrix.
+
+So the following does work:
+
+<<>>=
+g <- hyperframe(po=Poisson(), str=Strauss(0.07))
+fit2 <- mppm(Points ~ 1, simba, g, 
+             iformula=~I((group=="control")*po) 
+                     + I((group=="treatment") * str))
+fit2
+@ 
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%#%^!ifdef RANDOMEFFECTS
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Random effects}
+
+\subsection{Mixed effects models}
+
+It is also possible to fit models that include `random effects'.
+Effectively, some of the coefficients
+in the model are assumed to be Normally-distributed random variables
+instead of constants.
+
+\subsubsection{Mixed Poisson model}
+
+Consider the simplest model of a uniform Poisson process which we fitted to
+the 3 point patterns of waterstriders. It might be sensible to assume that
+each pattern is a realisation of a Poisson process, but with 
+{\em random intensity\/}. In each realisation the intensity $\lambda$
+is constant across different locations, but it
+is a different, random value in different realisations.
+This example is called a `mixed Poisson process' and belongs to the
+class of `Cox processes' (Poisson processes with random intensity 
+functions).
+Let's assume further that the log-intensity 
+is a Normal random variable.
+Then the model is a (very degenerate) special case of
+a `log-Gaussian Cox process'. 
+
+To fit such a model we use the standard techniques of mixed effects
+models \cite{lairware82,davigilt95,pinhbate00}.
+The mixed Poisson process which we discussed above would
+be written in standard form
+\begin{equation}
+\label{mixPois}
+   \lambda_i(u) = \exp(\mu + Z_i)
+\end{equation}
+for the $i$th point pattern, where $\mu$ is a parameter to be estimated 
+(the `fixed effect')
+and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable
+(the `random effect' for point pattern $i$). In the simplest case we
+would assume that $Z_1, \ldots, Z_n$ are independent.
+The variance $\sigma^2$ of the random effects
+would be estimated. One can also estimate the individual realised values
+$z_i$ of the random effects for each point pattern, although these are
+usually not of such great interest. Since the model includes both
+fixed and random effects, it is called a ``mixed-effects'' model.
+
+\subsubsection{Dependence structure}
+
+When we formulate a random-effects or mixed-effects model, we must 
+specify the dependence structure of the random effects. In the model above
+we assumed that the $Z_i$ are independent for all point patterns $i$. 
+If the experiment consists of two groups, we could alternatively assume
+that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words
+all the patterns in one group have the same value of the random effect.
+So the random effect is associated with the group rather than with
+individual patterns. This could be appropriate if, for example, 
+the groups represent different
+batches of a chemical. Each batch is prepared under slightly different 
+conditions so we believe that there are random variations between batches,
+but within a batch we believe that the chemical is well-mixed.
+
+\subsubsection{Random effects are coefficients}
+
+In the mixed Poisson model (\ref{mixPois}), 
+the random effect is an additive constant
+(with a random value) in the log-intensity. In general, a
+random effect is a \emph{coefficient} of one of the covariates.
+For example if $v$ is a real-valued design covariate (e.g. `temperature'), 
+with value $v_i$ for the $i$th point pattern, then we could assume
+\begin{equation}
+\label{ranef2}
+   \lambda_i(u) = \exp(\mu + Z_i v_i)
+\end{equation}
+where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$.
+This model has a random effect in the dependence on $v$.
+
+We could also have a random effect for a spatial covariate $V$.
+Suppose $V_i$ is a real-valued image for the $i$th pattern 
+(so that $V_i(u)$ is the value of some covariate at the location $u$
+for the $i$th case). Then we could assume
+\begin{equation}
+\label{ranef3}
+   \lambda_i(u) = \exp(\mu + Z_i V_i(u))
+\end{equation}
+where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$.
+This kind of random effect would be appropriate if, for example, the 
+images $V_i$ are not `normalised' or `standardised' relative to each 
+other (e.g.\ they are images taken under different illumination). Then the
+coefficients $Z_i$ effectively include the rescaling necessary to standardise
+the images.
+
+\subsection{Fitting a mixed-effects model}
+
+The call to \texttt{mppm} can also include the argument
+\texttt{random}. This should be a formula (with no left-hand side) 
+describing the structure of random effects. 
+
+The formula for random effects
+must be recognisable to \texttt{lme}. It is typically of the form
+\begin{verbatim}
+      ~x1 + ... + xn | g
+\end{verbatim}
+or
+\begin{verbatim}
+      ~x1 + ... + xn | g1/.../gm
+\end{verbatim}
+where \verb!x1 + ... + xn! specifies the covariates for the random effects
+and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) 
+structure. Here \code{g} or \code{g1, \ldots, gm} should be factors.
+
+To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders,
+we want to have a random intercept coefficient
+(so \texttt{x} is \texttt{1}) that varies for different point patterns
+(so \texttt{g} is \texttt{id}). 
+The reserved name \code{id} is a factor referring to the individual
+point pattern. Thus
+
+<<>>=
+H <- hyperframe(P=waterstriders)
+mppm(P ~ 1, H, random=~1|id)
+@ 
+
+To fit the mixed effects model (\ref{ranef2}) to the coculture data
+with the \code{AstroIm} covariate, with a random effect associated
+with each well,
+
+<<eval=FALSE>>=
+mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber)
+@ 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%#%^!endif
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\section{Studying the fitted model}
+
+Fitted models produced by \code{mppm}
+can be examined and validated in many ways.
+
+\subsection{Fits for each pattern}
+
+
+\subsubsection{Subfits}
+
+The command \code{subfits} takes an \code{mppm} object
+and extracts, for each individual point pattern, 
+the fitted point process model for that pattern
+\emph{that is implied by the overall fit}. It returns a list of 
+objects of class \code{ppm}. 
+
+<<>>=
+H <- hyperframe(W=waterstriders)
+fit <- mppm(W ~ 1, H)
+subfits(fit)
+@ 
+
+In this example the result is a list of three \code{ppm} objects
+representing the implied fits for each of the three point patterns
+in the \code{waterstriders} dataset.
+Notice that {\bf the fitted coefficients are the same} in all three 
+models. 
+
+Note that there are some unresolved difficulties with the implementation of
+\code{subfits}. Two completely different implementations are supplied
+in the package; they are called \code{subfits.old} 
+%(used in versions 0.1--1 and earlier) 
+and \code{subfits.new}.% (introduced in 0.1--2).
+The old version would occasionally crash. 
+Unfortunately the newer version \code{subfits.new} is quite memory-hungry
+and sometimes causes R to hang.
+We're still working on this problem. So for the time being,
+\code{subfits} is the same as \code{subfits.old}. You can change this
+simply by reassigning, e.g.
+
+<<eval=FALSE>>=
+subfits <- subfits.new
+@ 
+
+\subsubsection{Fitting separately to each pattern} 
+
+For comparison, we could fit a point process model separately
+to each point pattern dataset using \code{ppm}. The easy way to do this is 
+with \code{with.hyperframe}.
+
+To fit a \emph{separate} uniform Poisson 
+point process to each of the three waterstriders patterns,
+
+<<>>=
+H <- hyperframe(W=waterstriders)
+with(H, ppm(W))
+@ 
+
+The result is again a list of three fitted point process models
+(objects of class \code{ppm}), but now the fitted coefficients
+are different.
+
+\subsection{Residuals}
+
+One standard way to check a fitted model
+is to examine  the residuals. 
+
+\subsubsection{Point process residuals}
+
+Some recent papers \cite{baddetal05,baddmollpake08} have defined
+residuals for a fitted point process model (fitted to a \emph{single}
+point pattern). These residuals are implemented in \code{spatstat}
+as \code{residuals.ppm} and apply to an object of class \code{ppm}, 
+that is, a model fitted to a \emph{single} point pattern. 
+
+The command \code{residuals.mppm} computes the point process residuals
+for an \code{mppm} object. 
+
+<<>>=
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+@ 
+
+The result is a list, with one entry for each of the point pattern
+datasets. Each list entry contains the point process residuals 
+for the corresponding point pattern dataset.
+Each entry in the list is a signed measure (object of class \code{"msr"})
+as explained in the help for \code{residuals.ppm}).
+It can be plotted:
+
+<<fig=TRUE>>=
+plot(res)
+@ 
+
+You probably want the smoothed residual field:
+
+<<fig=TRUE>>=
+smor <- with(hyperframe(res=res), Smooth(res, sigma=4))
+plot(smor)
+@ 
+
+\subsubsection{Sums of residuals}
+
+It would be useful to have a residual that is a single value
+for each point pattern (representing how much that point pattern
+departs from the model fitted to all the point patterns).
+
+That can be computed by \emph{integrating} the residual measures
+using the function \code{integral.msr}:
+
+<<>>=
+fit <- mppm(P ~ x, hyperframe(P=waterstriders))
+res <- residuals(fit)
+totres <- sapply(res, integral.msr)
+@ 
+
+In designed experiments we can plot these total residuals against
+the design covariates:
+
+<<fig=TRUE>>=
+fit <- mppm(Points~Image, data=demohyper)
+resids <- residuals(fit, type="Pearson")
+totres <- sapply(resids, integral.msr)
+areas <- with(demohyper, area.owin(as.owin(Points)))
+df <- as.data.frame(demohyper[, "Group"])
+df$resids <- totres/areas
+plot(resids~Group, df)
+@ 
+
+\subsubsection{Four-panel diagnostic plots} 
+
+Sometimes a more useful tool is the function \code{diagnose.ppm}
+which produces a four-panel diagnostic plot based on the 
+point process residuals. However, it is only available for 
+\code{ppm} objects.
+
+To obtain a four-panel diagnostic plot for each of the 
+point patterns, do the following:
+
+\begin{enumerate}
+\item fit a model to multiple point patterns using \code{mppm}.
+\item extract the individual fits using \code{subfits}.
+\item plot the residuals of the individual fits.
+\end{enumerate}
+
+For example:
+
+<<fig=TRUE>>=
+fit <- mppm(P ~ 1, hyperframe(P=waterstriders))
+sub <- hyperframe(Model=subfits(fit))
+plot(sub, quote(diagnose.ppm(Model)))
+@ 
+
+(One could also do this for models fitted separately to the 
+individual point patterns.)
+
+\subsubsection{Residuals of the parameter estimates}
+
+We can also compare the parameter estimates obtained 
+by fitting the model simultaneously to all patterns (using \code{mppm})
+with those obtained by fitting the model separately to each 
+pattern (using \code{ppm}). 
+
+<<>>=
+H <- hyperframe(P = waterstriders)
+fitall <- mppm(P ~ 1, H)
+together <- subfits(fitall)
+separate <- with(H, ppm(P))
+Fits <- hyperframe(Together=together, Separate=separate)
+dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together)))
+dr
+exp(dr)
+@ 
+
+One could also try deletion residuals, etc.
+
+\subsection{Goodness-of-fit tests} 
+
+\subsubsection{Quadrat count test}
+
+The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented
+for objects of class \code{ppm} (in \code{quadrat.test.ppm})
+and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}).
+
+This is a goodness-of-fit test for a fitted {\bf Poisson} point process
+model only. The model could be uniform or non-uniform and the intensity
+might depend on covariates. 
+
+<<>>=
+H <- hyperframe(X=waterstriders)
+
+# Poisson with constant intensity for all patterns
+fit1 <- mppm(X~1, H)
+quadrat.test(fit1, nx=2)
+
+# uniform Poisson with different intensity for each pattern
+fit2 <- mppm(X ~ id, H)
+quadrat.test(fit2, nx=2)
+@ 
+
+See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm}
+for further details.
+
+\subsubsection{Kolmogorov-Smirnov test}
+
+The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson
+point process model compares the observed and predicted
+distributions of the values of a spatial covariate.
+
+We want to test the null hypothesis $H_0$ that the observed point pattern
+${\mathbf x}$ is a realisation from the Poisson process with intensity 
+function $\lambda(u)$ (for locations $u$ in the window $W$).
+Let $Z(u)$ be a given, real-valued covariate defined at each spatial location
+$u$. Under $H_0$, the \emph{observed} values of $Z$ at the 
+data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent
+random variables with common probability distribution function
+\[
+    F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u}
+                {\int_W \lambda(u)                       \dee u}.
+\]
+We can therefore apply the Kolmogorov-Smirnov test 
+of goodness-of-fit. This compares the empirical cumulative distribution of
+the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$.
+
+The test is implemented as \code{kstest.ppm}. The syntax is 
+
+<<eval=FALSE>>=
+kstest.mppm(model, covariate)
+@ 
+
+where \code{model} is a fitted model (of class \texttt{"mppm"})
+and \code{covariate} is either 
+\begin{itemize}
+\item a \code{function(x,y)} making it possible to compute the value 
+of the covariate at any location \code{(x,y)}
+\item a pixel image containing the covariate values 
+\item a list of functions, one for each row of the hyperframe of
+original data
+\item a list of pixel images, one for each row of the hyperframe of 
+original data
+\item a hyperframe with one column containing either functions or 
+pixel images.
+\end{itemize}
+
+\newpage
+
+\addcontentsline{toc}{section}{Bibliography}
+
+%\bibliography{%
+%extra,%
+%extra2,%
+%biblio/badd,%
+%biblio/bioscience,%
+%biblio/censoring,%
+%biblio/mcmc,%
+%biblio/spatstat,%
+%biblio/stat,%
+%biblio/stochgeom%
+%}
+
+\begin{thebibliography}{1}
+
+\bibitem{baddmollpake08}
+A. Baddeley, J. M{\o}ller, and A.G. Pakes.
+\newblock Properties of residuals for spatial point processes.
+\newblock {\em Annals of the Institute of Statistical Mathematics},
+  60:627--649, 2008.
+
+\bibitem{TheBook}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with R}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\bibitem{statpaper}
+A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty.
+\newblock Analysing designed experiments where the response is a spatial point
+  pattern.
+\newblock In preparation.
+
+\bibitem{baddetal05}
+A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton.
+\newblock Residual analysis for spatial point processes (with discussion).
+\newblock {\em Journal of the Royal Statistical Society, series B},
+  67(5):617--666, 2005.
+
+\bibitem{chenetal08}
+B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb.
+\newblock The influenza virus {M2} protein cytoplasmic tail interacts with the
+  {M1} protein and influences virus assembly at the site of virus budding.
+\newblock {\em Journal of Virology}, 82:10059--10070, 2008.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{davigilt95}
+M. Davidian and D.M. Giltinan.
+\newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}.
+\newblock Chapman and Hall, 1995.
+%#%^!endif
+
+\bibitem{digglangbene91}
+P.J. Diggle, N. Lange, and F. M. Benes.
+\newblock Analysis of variance for replicated spatial point patterns in
+  clinical neuroanatomy.
+\newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation},
+  86:618--625, 1991.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{lairware82}
+N.M. Laird and J.H. Ware.
+\newblock Random-effects models for longitudinal data.
+\newblock {\em Biometrics}, 38:963--974, 1982.
+%#%^!endif
+
+\bibitem{pent84}
+A. Penttinen.
+\newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter
+  Estimation by the Maximum Likelihood Method}.
+\newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics
+  and Statistics. University of {Jyv\"askyl\"a}, 1984.
+
+%#%^!ifdef RANDOMEFFECTS  
+\bibitem{pinhbate00}
+J.C. Pinheiro and D.M. Bates.
+\newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}.
+\newblock Springer, 2000.
+%#%^!endif
+
+\end{thebibliography}
+
+%\addcontentsline{toc}{section}{Index}
+
+%\printindex
+
+\end{document}
diff --git a/vignettes/shapefiles.Rnw b/vignettes/shapefiles.Rnw
new file mode 100755
index 0000000..a90b716
--- /dev/null
+++ b/vignettes/shapefiles.Rnw
@@ -0,0 +1,497 @@
+\documentclass[twoside,11pt]{article}
+
+% \VignetteIndexEntry{Handling shapefiles in the spatstat package}
+
+\SweaveOpts{eps=TRUE}
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1))))
+@ 
+
+\usepackage{graphicx}
+\usepackage[colorlinks=true,urlcolor=blue]{hyperref}
+\usepackage{color}
+\usepackage{anysize}
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\bold}[1]{{\textbf {#1}}}
+\newcommand{\R}{{\sf R}}
+
+\begin{document}
+%\bibliographystyle{plain}
+\thispagestyle{empty}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+options(useFancyQuotes=FALSE)
+sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Date")
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+@ 
+
+\title{Handling shapefiles in the \texttt{spatstat} package}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{
+\Sexpr{sdate} \\ 
+\pkg{spatstat} version \texttt{\Sexpr{sversion}}
+}
+\maketitle
+
+This vignette explains how to read data into the \pkg{spatstat} package
+from files in the popular `shapefile' format. 
+
+This vignette is part of the documentation included in 
+\pkg{spatstat} version \texttt{\Sexpr{sversion}}.
+The information applies to 
+\pkg{spatstat} versions \texttt{1.36-0} and above.
+
+\section{Shapefiles}
+
+A shapefile represents a list of spatial objects 
+--- a list of points, a list of lines, or a list of polygonal regions --- 
+and each object in the list may have additional variables attached to it.
+
+A dataset stored in shapefile format is actually stored in a
+collection of text files, for example
+\begin{verbatim}
+     mydata.shp
+     mydata.prj
+     mydata.sbn
+     mydata.dbf
+\end{verbatim}
+which all have the same base name \texttt{mydata} but different file extensions.
+To refer to this collection you will always use the filename with the 
+extension \texttt{shp}, for example \texttt{mydata.shp}. 
+
+\section{Helper packages}
+\label{S:helpers}
+
+We'll use two other packages%
+\footnote{In previous versions of \pkg{spatstat},
+  the package \pkg{gpclib} was also needed for some tasks.
+  This is no longer required.}
+ to handle shapefile data.
+
+The \pkg{maptools} package is designed specifically for handling
+file formats for spatial data. It contains 
+facilities for reading and writing files in shapefile format.
+
+The \pkg{sp} package supports a standard set of spatial data types
+in \R. These standard data types can be handled by many other packages,
+so it is useful to convert your spatial data into one of the 
+data types supported by \pkg{sp}.
+
+\section{How to read shapefiles into \pkg{spatstat}} 
+
+To read shapefile data into \pkg{spatstat}, you follow two steps:
+\begin{enumerate}
+\item 
+  using the facilities of \pkg{maptools}, read the shapefiles
+  and store the data in one of the standard formats supported by \pkg{sp}.
+\item
+  convert the \pkg{sp} data type into one of the data types
+  supported by \pkg{spatstat}.
+\end{enumerate}
+
+\subsection{Read shapefiles using \pkg{maptools}} 
+
+Here's how to read shapefile data.
+
+\begin{enumerate}
+\item ensure that the package \pkg{maptools} is installed. You will need
+version \texttt{0.7-16} or later.
+\item start R and load the package: 
+<<eval=FALSE>>=
+library(maptools)
+@ 
+\item read the shapefile into an object in the \pkg{sp} package
+using \texttt{readShapeSpatial}, for example
+<<eval=FALSE>>=
+x <- readShapeSpatial("mydata.shp")
+@ 
+\item 
+To find out what kind of spatial objects are represented by the dataset,
+inspect its class:
+<<eval=FALSE>>=
+class(x)
+@ 
+The class may be either \texttt{SpatialPoints} indicating a point pattern, 
+\texttt{SpatialLines} indicating a list of polygonal lines, or
+\texttt{SpatialPolygons} indicating a list of polygons. It may also be
+\texttt{SpatialPointsDataFrame},
+\texttt{SpatialLinesDataFrame} or 
+\texttt{SpatialPolygonsDataFrame} indicating that, in addition to the 
+spatial objects, there is a data frame of additional variables.
+The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame}
+represent pixel image data.
+\end{enumerate}
+
+Here are some examples, using the example shapefiles supplied in the
+\pkg{maptools} package itself.
+
+% fake data because we don't want spatstat to depend on maptools
+<<echo=FALSE,results=hide>>=
+baltim <- columbus <- fylk <- list()
+class(baltim) <- "SpatialPointsDataFrame"
+class(columbus) <- "SpatialPolygonsDataFrame"
+class(fylk) <- "SpatialLinesDataFrame"
+@ 
+<<eval=FALSE>>=
+setwd(system.file("shapes", package="maptools"))
+baltim   <- readShapeSpatial("baltim.shp")
+columbus <- readShapeSpatial("columbus.shp")
+fylk     <- readShapeSpatial("fylk-val.shp")
+@ 
+<<>>=
+class(baltim)
+class(columbus)
+class(fylk)
+@ 
+
+\subsection{Convert data to \pkg{spatstat} format}
+
+To convert the dataset to an object in the
+\pkg{spatstat} package, the procedure depends on the 
+type of data, as explained below. 
+
+Both packages \pkg{maptools} and \pkg{spatstat} must be loaded
+in order to convert the data.
+
+\subsubsection{Objects of class \texttt{SpatialPoints}}
+
+An object \texttt{x} of class \texttt{SpatialPoints}
+represents a spatial point pattern.
+Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a 
+spatial point pattern in \pkg{spatstat}.
+
+(The conversion is performed by \texttt{as.ppp.SpatialPoints},
+a function in \pkg{maptools}.)
+
+The window for the point pattern will be taken from
+the bounding box of the points. You will probably wish to change this window,
+usually by taking another dataset to provide the window information.
+Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern
+object of class \verb!"ppp"! and \texttt{W} is a window object of class
+\verb!"owin"!, type
+<<eval=FALSE>>=
+X <- X[W]
+@ 
+
+\subsubsection{Objects of class \texttt{SpatialPointsDataFrame }}
+
+An object \texttt{x} of class \texttt{SpatialPointsDataFrame}
+represents a pattern of points with additional variables (`marks') attached to
+each point. It includes an object of class \texttt{SpatialPoints} 
+giving the point locations, and a data frame containing the
+additional variables attached to the points. 
+
+Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an
+object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a 
+spatial point pattern in \pkg{spatstat}. In this conversion,
+the data frame of additional variables in \texttt{x} will 
+become the \texttt{marks} of the point pattern \texttt{z}.
+
+<<eval=FALSE>>=
+y <- as(x, "ppp")
+@ 
+
+(The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame},
+a function in \pkg{maptools}.)
+
+Before the conversion you can extract the
+data frame of auxiliary data by 
+\verb!df <- x at data! or \verb!df <- slot(x, "data")!.
+After the conversion you can extract these data by
+\verb!df <- marks(y)!. 
+
+For example:
+
+<<eval=FALSE>>=
+balt <- as(baltim, "ppp")
+bdata <- slot(baltim, "data")
+@ 
+
+\subsubsection{Objects of class \texttt{SpatialLines}}
+\label{spatiallines.2.psp}
+
+A ``line segment'' is the straight line between two points in the plane.
+
+In the \pkg{spatstat} package, an object of class \texttt{psp}
+(``planar segment pattern'')
+represents a pattern of line segments, which may or may not be
+connected to each other (like matches which have fallen at random
+on the ground). 
+
+In the \pkg{sp} package, an object of class \texttt{SpatialLines}
+represents a \textbf{list of lists} of \textbf{connected curves}, 
+each curve consisting of a sequence of straight 
+line segments that are joined together (like
+several pieces of a broken bicycle chain.) 
+
+So these two data types do not correspond exactly.
+
+The list-of-lists hierarchy in a \texttt{SpatialLines} object 
+is useful when representing internal divisions in a country.
+For example, if \texttt{USA} is an object of class \texttt{SpatialLines}
+representing the borders of the United States
+of America, then \verb!USA at lines! might be a list of length 52, with 
+\verb!USA at lines[[i]]! representing the borders of the \texttt{i}-th State. 
+The borders of each State consist of several different curved lines. Thus 
+\verb!USA at lines[[i]]@Lines[[j]]! would represent the \texttt{j}th 
+piece of the boundary of the \texttt{i}-th State.
+
+If \texttt{x} is an object of class \texttt{SpatialLines},
+there are several things that you might want to do:
+\begin{enumerate}
+\item 
+  collect together all the line segments (all the segments that make up all the
+  connected curves) and store them as a single object of class \texttt{psp}.
+\begin{quote}
+  To do this, 
+  use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a 
+  spatial line segment pattern. 
+\end{quote}
+\item 
+  convert each connected curve to an object of class \texttt{psp},
+  keeping different connected curves separate.
+
+  To do this, type something like the following:
+<<eval=FALSE>>=
+out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+@ 
+
+The result will be a \textbf{list of lists} of objects of class \texttt{psp}.
+Each one of these objects represents a connected curve,
+although the \pkg{spatstat} package does not know that.
+The list structure will reflect the list structure of the original
+\texttt{SpatialLines} object \texttt{x}. If that's not what you want,
+then use \verb!curvelist <- do.call("c", out)! or
+<<eval=FALSE>>=
+curvegroup <- lapply(out, function(z) { do.call("superimpose", z)})
+@ 
+to collapse the list-of-lists-of-\texttt{psp}'s 
+into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]}
+is a \texttt{psp} object representing the \texttt{i}-th connected curve. 
+In the second case, \texttt{curvegroup[[i]]}
+is a \texttt{psp} object containing all the line segments in
+the \texttt{i}-th group of connected curves (for example the 
+\texttt{i}-th State in the \texttt{USA} example).
+\end{enumerate}
+
+The window for the spatial line segment pattern can be specified
+as an argument \texttt{window} to the function \texttt{as.psp}.
+
+(The conversion is performed by \texttt{as.psp.SpatialLines}
+or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.)
+
+\subsubsection{Objects of class \texttt{SpatialLinesDataFrame}}
+
+An object \texttt{x} of class \texttt{SpatialLinesDataFrame}
+is a \texttt{SpatialLines} object with additional data.
+The additional data is stored as a data frame \verb!x at data!
+with one row for each entry in \verb!x at lines!, that is,
+one row for each group of connected curves. 
+
+In the \pkg{spatstat} package, an object of class \texttt{psp}
+(representing a collection of line segments)
+may have a data frame of marks. Note that each \emph{line segment}
+in a \texttt{psp} object may have different mark values. 
+
+If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame},
+there are two things that you might want to do:
+\begin{enumerate}
+\item collect together all the line segments that make up all the
+connected lines, and store them as a single object of class \texttt{psp}.
+\begin{quote}
+  To do this, 
+  use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a 
+  marked spatial line segment pattern. 
+\end{quote}
+\item keep each connected curve separate, and convert each connected
+curve to an object of class \texttt{psp}.
+To do this, type something like the following:
+<<eval=FALSE>>=
+out <- lapply(x at lines, function(z) { lapply(z at Lines, as.psp) })
+dat <- x at data
+for(i in seq(nrow(dat))) 
+  out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE])
+@ 
+The result is a list-of-lists-of-\texttt{psp}'s. 
+See the previous subsection for explanation on how to 
+change this using \texttt{c()} or \texttt{superimposePSP}.
+\end{enumerate}
+
+In either case, 
+the mark variables attached to a particular \emph{group of connected lines}
+in the \texttt{SpatialLinesDataFrame} object, will be duplicated
+and attached to each \emph{line segment} in the resulting \texttt{psp} object.
+
+\subsubsection{Objects of class \texttt{SpatialPolygons}}
+
+First, so that we don't go completely crazy, let's introduce some terminology.
+A \emph{polygon} is a closed curve that is composed of 
+straight line segments. You can draw a polygon
+without lifting your pen from the paper. 
+
+\setkeys{Gin}{width=0.4\textwidth}
+\begin{center}
+<<echo=FALSE,results=hide,fig=TRUE>>=
+data(chorley)
+plot(as.owin(chorley), lwd=3, main="polygon")
+@ 
+\end{center}
+
+A \emph{polygonal region}
+is a region in space whose boundary is composed of straight line segments.
+A polygonal region may consist of several unconnected pieces, and each piece
+may have holes. The boundary of a polygonal region
+consists of one or more polygons. To draw the boundary of a polygonal 
+region, you may need to lift and drop the pen several times.
+
+\setkeys{Gin}{width=0.4\textwidth}
+\begin{center}
+<<echo=FALSE,results=hide,fig=TRUE>>=
+data(demopat)
+plot(as.owin(demopat), col="blue", main="polygonal region")
+@ 
+\end{center}
+
+An object of class \texttt{owin} in \pkg{spatstat}
+represents a polygonal region. It is a region of space that is delimited
+by boundaries made of lines.
+
+An object \texttt{x} of class \texttt{SpatialPolygons}
+represents a \textbf{list of polygonal regions}. For example,
+a single object of class \texttt{SpatialPolygons} could 
+store information about every State in the United States of America
+(or the United States of Malaysia). Each State would be a separate
+polygonal region (and it might contain holes such as lakes).
+
+There are two things
+that you might want to do with an object of class \texttt{SpatialPolygons}:
+\begin{enumerate}
+\item 
+  combine all the polygonal regions together into a single
+  polygonal region, and convert this to a single object of class \texttt{owin}.
+  \begin{quote}
+    For example, you could combine all the States of the USA together
+    and obtain a single object that represents the territory of the USA. 
+
+    To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}.
+    The result is a single window (object of class \texttt{"owin"}) 
+    in the \pkg{spatstat} package.
+  \end{quote}
+\item keep the different polygonal regions separate; convert each 
+  one of the polygonal regions to an object of class \texttt{owin}.
+  \begin{quote}
+    For example, you could keep the States of the USA separate,
+    and convert each State to an object of class \texttt{owin}.
+  \end{quote}
+  To do this, type the following:
+<<eval=FALSE>>=
+regions <- slot(x, "polygons")
+regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) })
+windows <- lapply(regions, as.owin)
+@ 
+  The result is a list of objects of class \texttt{owin}. 
+  Often it would make sense to convert this to a 
+  tessellation object, by typing
+<<eval=FALSE>>=
+te <- tess(tiles=windows)
+@ 
+\end{enumerate}
+
+{\bf The following is different from what happened in
+  previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.)
+
+During the conversion process, the geometry of the polygons
+will be automatically ``repaired'' if needed.
+Polygon data from shapefiles often contain geometrical inconsistencies 
+such as self-intersecting boundaries and overlapping pieces. 
+For example, these can arise from small errors in curve-tracing.
+Geometrical inconsistencies are tolerated in 
+an object of class \texttt{SpatialPolygons} which
+is a list of lists of polygonal curves.
+However, they are not tolerated in an object of class \texttt{owin},
+because an \texttt{owin} must specify a well-defined region of space.
+These data inconsistencies must be repaired to prevent technical problems. 
+\pkg{Spatstat} uses polygon-clipping code to automatically convert 
+polygonal lines into valid polygon boundaries. 
+The repair process changes the number of vertices in each polygon,
+and the number of polygons (if you chose option 1).
+To disable the repair process, set 
+\texttt{spatstat.options(fixpolygons=FALSE)}.
+
+\subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}}
+
+What a mouthful!
+
+An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame}
+represents a list of polygonal regions,
+with additional variables attached to
+each region. It includes an object of class \texttt{SpatialPolygons} 
+giving the spatial regions, and a data frame containing the
+additional variables attached to the regions.
+The regions are extracted by
+<<eval=FALSE>>=
+y <- as(x, "SpatialPolygons")
+@ 
+and you then proceed as above to convert the curves to
+\pkg{spatstat} format.
+
+The data frame of auxiliary data is extracted by 
+\verb!df <- x at data! or \verb!df <- slot(x, "data")!.
+
+For example:
+
+<<eval=FALSE>>=
+cp      <- as(columbus, "SpatialPolygons")
+cregions <- slot(cp, "polygons")
+cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) })
+cwindows <- lapply(cregions, as.owin)
+@ 
+
+There is currently no facility in \pkg{spatstat} for attaching
+marks to an \texttt{owin} object directly. 
+
+However, \pkg{spatstat} supports objects called \textbf{hyperframes},
+which are like data frames except that the entries can be any type of object.
+Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as
+follows:
+<<eval=FALSE>>=
+ch <- hyperframe(window=cwindows)
+ch <- cbind.hyperframe(ch, columbus at data)
+@ 
+
+Then \texttt{ch} is a hyperframe containing a column of \texttt{owin}
+objects followed by the columns of auxiliary data.
+
+\subsubsection{Objects of class \texttt{SpatialGridDataFrame}
+  and \texttt{SpatialPixelsDataFrame}}
+
+An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents
+a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid}
+object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, 
+and a data frame \texttt{slot(x, "data")} containing the pixel values
+(which may include \texttt{NA} values).
+
+The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image
+of class \texttt{"im"}, taking the pixel values from the \emph{first column}
+of the data frame. If the data frame has multiple columns, these 
+have to be converted to separate pixel images in \pkg{spatstat}.
+For example
+<<eval=FALSE>>=
+  y <- as(x, "im")
+  ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y)
+@ 
+
+An object \texttt{x} of class  \texttt{SpatialPixelsDataFrame} 
+represents a \emph{subset} of a pixel image. 
+To convert this to a \pkg{spatstat} object, it should first be converted to
+a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")},
+then handled as described above.
+
+\end{document}
+
diff --git a/vignettes/updates.Rnw b/vignettes/updates.Rnw
new file mode 100644
index 0000000..620a03a
--- /dev/null
+++ b/vignettes/updates.Rnw
@@ -0,0 +1,2197 @@
+\documentclass[11pt]{article}
+\usepackage{graphicx}
+\usepackage{Sweave}
+\usepackage{bm}
+\usepackage{anysize}
+
+\marginsize{2cm}{2cm}{2cm}{2cm}
+
+% \VignetteIndexEntry{Summary of Recent Updates to Spatstat}
+
+\newcommand{\pkg}[1]{\texttt{#1}}
+\newcommand{\code}[1]{\texttt{#1}}
+\newcommand{\R}{{\sf R}}
+\newcommand{\spst}{\pkg{spatstat}}
+\newcommand{\Spst}{\pkg{Spatstat}}
+
+\begin{document}
+\bibliographystyle{plain}
+
+<<echo=FALSE,results=hide>>=
+library(spatstat)
+sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"),
+         fields = "Version")
+options(useFancyQuotes=FALSE)
+@ 
+
+\title{Summary of recent updates to \spst}
+\author{Adrian Baddeley, Rolf Turner and Ege Rubak}
+\date{For \spst\ version \texttt{\Sexpr{sversion}}}
+\maketitle
+
+\thispagestyle{empty}
+
+This is a summary of changes that have been made 
+to the \spst\ package since the publication of the 
+accompanying book \cite{baddrubaturn15}.
+The book, published in December 2015,
+covers everything in \spst\ up to version \texttt{1.42-0}, 
+released in May 2015.
+
+<<echo=FALSE,results=hide>>=
+readSizeTable <- function(fname) {
+  if(is.null(fname) || !file.exists(fname)) return(NULL)
+  a <- read.table(fname, header=TRUE)
+  a$date <- as.Date(a$date)
+  return(a)
+}
+getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") {
+  fname <- system.file("doc", tablename, package=packagename)
+  readSizeTable(fname)
+}
+counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines")
+mergeSizeTables <- function(a, b) {
+  if(is.null(b)) return(a)
+  for(i in seq_len(nrow(a))) {
+    j <- which(b$date <= a$date[i])
+    if(length(j) > 0) 
+      a[i,counts] <- a[i,counts] + b[max(j), counts]
+  }
+  return(a)
+}
+z <- getSizeTable()
+zutils <- getSizeTable("spatstat.utils")
+zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt")
+z <- mergeSizeTables(z, zutils)
+z <- mergeSizeTables(z, zlocal)
+#
+changes <- z[nrow(z), ] - z[z$version == "1.42-0", ]
+newobj <- changes[["nobjects"]]
+newdat <- changes[["ndatasets"]] + 1  # counting rule doesn't detect redwood3
+@ %$
+
+The current version of \spst\ is \texttt{\Sexpr{sversion}}.
+It contains \Sexpr{newobj} new functions
+and \Sexpr{newdat} new datasets
+introduced after May 2015. This document summarises the most important changes.
+
+This document also lists all \emph{important} bugs detected \emph{since 2010}.
+
+<<echo=FALSE,results=hide,fig=FALSE>>=
+options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0))))
+Plot <- function(fmla, ..., dat=z) {
+  yvals <- eval(as.expression(fmla[[2]]), envir=dat)
+  plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals)))
+}
+@ 
+\SweaveOpts{eps=TRUE}
+\setkeys{Gin}{width=0.45\textwidth}
+
+\centerline{
+<<fig=TRUE,echo=FALSE,results=hide>>=
+Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", 
+     main="Spatstat growth")
+lines(srclines/1000 ~ date, data=z)
+text(as.Date("2015-01-01"), 9.5, "C code")
+text(as.Date("2015-01-01"), 60, "R code")
+@ 
+}
+
+\tableofcontents
+
+\newpage
+
+\section{\pkg{spatstat} is splitting into parts}
+
+    \pkg{spatstat} is being split into several sub-packages, to satisfy
+    the requirements of CRAN. This should not affect the user:
+    existing code will continue to work in the same way.
+
+    Currently there are two sub-packages, called \pkg{spatstat.utils}
+    and \pkg{spatstat}.
+
+    Typing \code{library(spatstat)} will load the familiar
+    \pkg{spatstat} package which can be used as before, and will silently
+    import the \pkg{spatstat.utils} package.
+
+    The \pkg{spatstat.utils} package
+    contains utility functions that were originally written for \pkg{spatstat}:
+    they were undocumented internal functions in \pkg{spatstat}, but are now
+    documented and accessible in a separate package because they may be
+    useful for other purposes. To access these functions, you need to
+    type \code{library(spatstat.utils)}. 
+
+\section{Precis of all changes}
+
+Here is the text from the `overview' sections of 
+the News and Release Notes for each update. 
+
+\begin{itemize}
+
+  \item \spst\ now Imports the package \pkg{spatstat.utils}.
+
+  \item \spst\ now suggests the package \pkg{fftwtools}.
+
+  \item Now handles disconnected linear networks.
+    
+  \item Effect function is now available for all types of fitted model.
+
+  \item Geometric-mean smoothing.
+
+  \item A model can be fitted or re-fitted to a sub-region of data.
+    
+  \item New fast algorithm for kernel smoothing on a linear network.
+
+  \item Leverage and influence diagnostics extended to Poisson/Gibbs models
+        fitted by logistic composite likelihood.
+
+  \item Two-stage Monte Carlo test.
+  
+  \item Dirichlet/Voronoi tessellation on a linear network.
+
+  \item Thinning of point patterns on a linear network.
+
+  \item More support for functions and tessellations on a linear network.
+
+  \item Bandwidth selection for pair correlation function.
+  
+  \item Pooling operations improved. 
+
+  \item Operations on signed measures.
+
+   \item Operations on lists of pixel images.
+
+   \item Improved pixellation of point patterns.
+
+   \item Stieltjes integral extended.
+
+   \item Subset operators extended.
+
+   \item Greatly accelerated \texttt{rmh} when using \texttt{nsave}
+
+  \item Sufficient Dimension Reduction for point processes.
+
+  \item Alternating Gibbs Sampler for point process simulation.
+
+  \item New class of spatially sampled functions.
+
+  \item ROC and AUC extended to other types of point patterns and models.
+
+  \item More support for linear networks.
+
+  \item More support for infinite straight lines.
+
+  \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}.
+
+  \item Important bug fix in \code{linearK}, \code{linearpcf}
+
+  \item Changed internal format of \code{linnet} and \code{lpp} objects.
+
+  \item Faster computation in linear networks.
+
+  \item Bias correction techniques.
+  
+  \item Bounding circle of a spatial object.
+
+  \item Option to plot marked points as arrows.
+
+  \item Kernel smoothing accelerated.
+
+  \item Workaround for bug in some graphics drivers affecting image orientation.
+
+  \item Non-Gaussian smoothing kernels.
+
+   \item Improvements to inhomogeneous multitype $K$ and $L$ functions.
+
+   \item Variance approximation for pair correlation function.
+
+   \item Leverage and influence for multitype point process models.
+
+   \item Functions for extracting components of vector-valued objects.
+
+  \item Recursive-partition point process models.
+
+   \item Minkowski sum, morphological dilation and erosion with any shape.
+
+  \item Minkowski sum also applicable to point patterns and line segment patterns.
+    
+   \item Important bug fix in Smooth.ppp
+ 
+   \item Important bug fix in spatial CDF tests.
+
+   \item  More bug fixes for replicated patterns.
+
+   \item Simulate a model fitted to replicated point patterns.
+
+   \item Inhomogeneous multitype $F$ and $G$ functions.
+
+   \item Summary functions recognise \texttt{correction="all"}
+
+   \item Leverage and influence code handles bigger datasets.
+
+   \item More support for pixel images.
+
+   \item Improved progress reports.
+
+   \item New dataset \texttt{redwood3}
+
+    \item Fixed namespace problems arising when spatstat is not loaded.
+
+   \item Important bug fix in leverage/influence diagnostics for Gibbs models.
+
+   \item Surgery with linear networks.
+   
+   \item Tessellations on a linear network.
+
+   \item Laslett's Transform.
+
+   \item Colour maps for point patterns with continuous marks
+     are easier to define.
+
+   \item Pair correlation function estimates can be pooled.
+
+   \item Stipulate a particular version of a package.
+
+   \item More support for replicated point patterns.
+     
+   \item More support for tessellations.
+
+  \item More support for multidimensional point patterns and point processes.
+
+   \item More options for one-sided envelopes.
+
+   \item More support for model comparison.
+
+   \item Convexifying operation.
+
+   \item Subdivide a linear network.
+
+   \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP).
+
+   \item Calculate the predicted variance of number of points.
+
+   \item Accelerated algorithms for linear networks.
+
+   \item Quadrat counting accelerated, in some cases.
+
+   \item Simulation algorithms have been accelerated; simulation outcomes 
+   are \emph{not} identical to those obtained from previous versions of \spst. 
+
+   \item Determinantal point process models.
+
+   \item Random-effects and mixed-effects models for replicated patterns.
+
+   \item Dao-Genton test, and corresponding simulation envelopes.
+
+   \item Simulated annealing and simulated tempering.
+
+   \item spatstat colour tools now handle transparent colours.
+
+   \item Improvements to \verb![! and \texttt{subset} methods
+
+   \item Extensions to kernel smoothing on a linear network.
+
+   \item Support for one-dimensional smoothing kernels.
+
+   \item Mark correlation function may include weights.
+
+   \item Cross-correlation version of the mark correlation function.
+
+   \item Penttinen pairwise interaction model.
+
+   \item Improvements to simulation of Neyman-Scott processes.
+
+   \item Improvements to fitting of Neyman-Scott models.
+
+   \item Extended functionality for pixel images.
+
+   \item Fitted intensity on linear network
+
+   \item Triangulation of windows.
+
+   \item  Corrected an edge correction.
+   \end{itemize}  
+   
+\section{New datasets}  
+
+The following datasets have been added to the package.
+
+\begin{itemize}
+\item \texttt{austates}: The states and large mainland territories of Australia
+   represented as polygonal regions forming a tessellation.
+ \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data.
+\end{itemize}
+
+\section{New classes}
+
+\begin{itemize}
+\item \texttt{ssf}:
+  Class of spatially sampled functions.
+\end{itemize}
+
+\section{New Functions}
+
+Following is a list of all the functions that have been added.
+
+\begin{itemize}
+    \item \texttt{as.data.frame.envelope}:
+    Extract function data from an envelope object,
+    including the functions for the simulated data ('simfuns')
+    if they were saved.
+
+    \item \texttt{is.connected}, \texttt{is.connected.default}, 
+      \texttt{is.connected.linnet}:
+    Determines whether a spatial object consists of
+    one topologically connected piece, or several pieces.
+
+    \item \texttt{is.connected.ppp}:
+    Determines whether a point pattern is connected after
+    all pairs of points closer than distance R are joined.
+
+    \item \texttt{hist.funxy}:
+    Histogram of values of a spatial function.
+
+    \item \texttt{model.matrix.ippm}:
+    Method for \texttt{model.matrix} which allows computation of
+    regular and irregular score components.
+
+    \item \texttt{harmonise.msr}:
+    Convert several measures (objects of class \texttt{msr})
+    to a common quadrature scheme.
+    
+    \item \texttt{bits.test}:
+    Balanced Independent Two-Stage Monte Carlo test,
+    an improvement on the Dao-Genton test.
+    
+    \item \texttt{lineardirichlet}:
+    Computes the Dirichlet-Voronoi tessellation associated with a
+    point pattern on a linear network.
+
+    \item \texttt{domain.lintess}, \texttt{domain.linfun}:
+    Extract the linear network from a
+    \texttt{lintess} or \texttt{linfun} object.
+
+    \item \texttt{summary.lintess}:
+    Summary of a tessellation on a linear network.
+
+    \item \texttt{clicklpp}:
+    Interactively add points on a linear network.
+    
+    \item \texttt{envelopeArray}:
+    Ggenerate an array of envelopes
+    using a function that returns \texttt{fasp} objects.
+
+    \item \texttt{bw.pcf}:
+      Bandwidth selection for pair correlation function.
+
+    \item \texttt{grow.box3}:
+    Expand a three-dimensional box.
+    
+    \item \texttt{hexagon}, \texttt{regularpolygon}:
+    Create regular polygons.
+
+    \item \texttt{Ops.msr}:
+    Arithmetic operations for measures.
+
+    \item \texttt{Math.imlist}, \texttt{Ops.imlist}, 
+      \texttt{Summary.imlist}, \texttt{Complex.imlist}:
+    Arithmetic operations for lists of pixel images.
+
+    \item \texttt{measurePositive}, \texttt{measureNegative}, 
+      \texttt{measureVariation}, \texttt{totalVariation}:
+    Positive and negative parts of a measure, and variation of a measure.
+
+    \item \texttt{as.function.owin}:
+    Convert a spatial window to a \texttt{function(x,y)}, 
+    the indicator function.
+
+    \item \texttt{as.function.ssf}:
+    Convert an object of class \texttt{ssf} to a \texttt{function(x,y)}
+
+    \item \texttt{as.function.leverage.ppm}
+    Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)}
+
+  \item \texttt{sdr}, \texttt{dimhat}:
+    Sufficient Dimension Reduction for point processes.
+
+    \item \texttt{simulate.rhohat}:
+    Simulate a Poisson point process with the
+    intensity estimated by \texttt{rhohat}.
+
+    \item \texttt{rlpp}:
+    Random points on a linear network with a specified probability density.
+
+    \item \texttt{cut.lpp}:
+    Method for \texttt{cut} for point patterns on a linear network.
+
+    \item \texttt{has.close}:
+    Faster way to check whether a point has a close neighbour.
+
+    \item \texttt{psib}:
+    Sibling probability (index of clustering strength in a cluster process).
+    
+    \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}:
+    Alternating Gibbs Sampler for point processes.
+
+    \item \texttt{bugfixes}:
+      List all bug fixes in recent versions of a package.
+      
+    \item \texttt{ssf}:
+    Create a spatially sampled function
+
+    \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, 
+      \texttt{image.ssf}:
+    Display a spatially sampled function
+
+    \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, 
+      \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}:
+    Manipulate data in a spatially sampled function
+
+    \item \texttt{Smooth.ssf}:
+    Smooth a spatially sampled function 
+
+    \item \texttt{integral.ssf}:
+    Approximate integral of spatially sampled function
+
+    \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}:
+      Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and 
+      \texttt{"lppm"} and point patterns of class \texttt{"lpp"}
+
+    \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}:
+      Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and 
+      \texttt{"lppm"} and point patterns of class \texttt{"lpp"}
+      
+    \item \texttt{timeTaken}:
+    Extract the timing data from a \texttt{"timed"} object or objects.
+
+    \item \texttt{rotate.infline}, 
+      \texttt{shift.infline}, \texttt{reflect.infline}, 
+      \texttt{flipxy.infline}:
+    Geometrical transformations for infinite straight lines.
+
+    \item \texttt{whichhalfplane}:
+    Determine which side of an infinite line a point lies on.
+
+    \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}:
+    Raise a matrix to any power.
+
+    \item \texttt{points.lpp}:
+    Method for \texttt{points} for point patterns on a linear network.
+
+    \item \texttt{pairs.linim}:
+    Pairs plot for images on a linear network.
+
+    \item \texttt{closetriples}:
+    Find close triples of points.
+    
+    \item \texttt{anyNA.im}:
+    Method for \texttt{anyNA} for pixel images.
+
+    \item \texttt{bc}:
+    Bias correction (Newton-Raphson) for fitted model parameters. 
+
+    \item \texttt{rex}:
+    Richardson extrapolation for numerical integrals and 
+    statistical model parameter estimates. 
+
+    \item \texttt{boundingcircle}, \texttt{boundingcentre}:
+    Find the smallest circle enclosing a window or point pattern.
+
+    \item \verb![.linim! : 
+    Subset operator for pixel images on a linear network.
+
+    \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}:
+    The mean, median, or quantiles of pixel values in a 
+    pixel image on a linear network.
+
+  \item \texttt{weighted.median}, \texttt{weighted.quantile}:
+    Median or quantile of numerical data with associated weights.
+
+  \item \verb!"[.linim"!:
+    Subset operator for pixel images on a linear network.
+
+  \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}:
+    The mean, median, or quantiles of pixel values in a 
+    pixel image on a linear network.
+  
+  \item \texttt{boundingcircle}, \texttt{boundingcentre}:
+   Smallest circle enclosing a spatial object.
+
+ \item \texttt{split.msr}:
+    Decompose a measure into parts.
+
+  \item \texttt{unstack.msr}:
+    Decompose a vector-valued measure into its component measures.
+
+  \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}:
+    Given a spatial pattern with several columns of marks,
+    separate the columns and return a list of spatial patterns, 
+    each having only one column of marks.
+
+ \item \texttt{kernel.squint}:
+    Integral of squared kernel, for the kernels used in density estimation.
+
+ \item \texttt{as.im.data.frame}:
+  Build a pixel image from a data frame of coordinates and pixel values.
+
+\item \texttt{covering}:
+    Cover a window using discs of a given radius.
+
+\item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! :
+    Morphological dilation and erosion by any shape.
+
+\item \texttt{FmultiInhom}, \texttt{GmultiInhom}
+  Inhomogeneous multitype/marked versions of the summary functions 
+  \texttt{Fest}, \texttt{Gest}.
+
+\item \texttt{kernel.moment}
+    Moment or incomplete moment of smoothing kernel.
+
+\item \texttt{MinkowskiSum}, \verb!%(+)%!: 
+    Minkowski sum of two windows: \verb!A %(+)% B!, 
+    or \texttt{MinkowskiSum(A,B)}
+
+\item \texttt{nobjects}:
+  New generic function for counting the number of 'things' in a dataset.
+  There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}.
+
+ \item \texttt{parameters.interact}, \texttt{parameters.fii}:
+    Extract parameters from interpoint interactions.
+    (These existing functions are now documented.)
+
+ \item \texttt{ppmInfluence}:
+  Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and 
+  \texttt{dfbetas.ppm} efficiently.
+
+  \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, 
+    \texttt{prune.rppm}:
+    Recursive-partition point process models.
+
+ \item \texttt{simulate.mppm}
+   Simulate a point process model fitted to replicated point patterns.
+
+ \item \texttt{update.interact}:
+    Update the parameters of an interpoint interaction.
+    [This existing function is now documented.]
+
+ \item \texttt{where.max}, \texttt{where.min}
+    Find the spatial location(s) where a pixel image achieves its
+    maximum or minimum value.
+    
+ \item \texttt{compileK}, \texttt{compilepcf}:
+   make a $K$ function or pair correlation function
+   given the pairwise distances and their weights.
+   [These existing internal functions are now documented.]
+    
+  \item \texttt{laslett}:
+  Laslett's Transform.
+
+\item \texttt{lintess}: 
+  Tessellation on a linear network.
+
+\item \texttt{divide.linnet}:
+  Divide a linear network into pieces demarcated by a point pattern.
+
+\item \texttt{insertVertices}:
+  Insert new vertices in a linear network.
+
+\item \texttt{thinNetwork}:
+  Remove vertices and/or segments from a linear network etc.
+
+\item \texttt{connected.linnet}:
+  Find connected components of a linear network.
+
+\item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}:
+  Count the number of vertices in a linear network 
+  or vertices of the boundary of a window.
+
+\item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}:
+  Extract a data frame of spatial locations and function values
+  from an object of class \texttt{linim} or \texttt{linfun}.
+
+\item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}:
+  Convert other kinds of data to a \texttt{linfun} object.
+
+\item \texttt{requireversion}:
+    Require a particular version of a package
+    (for use in stand-alone R scripts).
+
+  \item \texttt{as.function.tess}:
+   Convert a tessellation to a \texttt{function(x,y)}. The function value
+   indicates which tile of the tessellation contains the point $(x,y)$.
+
+   \item \texttt{tileindex}:
+   Determine which tile of a tessellation contains a given point $(x,y)$.
+
+   \item \texttt{persp.leverage.ppm}:
+   Method for persp plots for objects of class \texttt{leverage.ppm}
+
+   \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}:
+   AIC for point process models fitted to replicated point patterns.
+
+   \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}:
+   Methods for point process models fitted to replicated point patterns.
+
+  \item \texttt{rPenttinen}:
+    Simulate the Penttinen process using perfect simulation.
+
+  \item \texttt{varcount}:
+    Given a point process model, compute the predicted variance
+    of the number of points falling in a window.
+
+  \item \texttt{inside.boxx}:
+    Test whether multidimensional points lie inside a specified 
+    multidimensional box.
+  \item \texttt{lixellate}:
+    Divide each segment of a linear network into smaller segments.
+
+  \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}:
+     Count the number of line segments in a linear network.
+
+  \item \texttt{grow.boxx}:
+     Expand a multidimensional box.
+
+   \item \texttt{deviance.ppm}, \texttt{deviance.lppm}:
+     Deviance for a fitted point process model.
+
+   \item \texttt{pseudoR2}:
+     Pseudo-R-squared for a fitted point process model.
+
+   \item \texttt{tiles.empty}
+     Checks whether each tile of a tessellation is empty or nonempty.
+
+   \item \texttt{summary.linim}:
+     Summary for a pixel image on a linear network.
+     
+\item Determinantal Point Process models:
+  \begin{itemize}
+  \item \texttt{dppm}:
+    Fit a determinantal point process model.
+  \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}:
+    prediction for a fitted determinantal point process model.
+  \item 
+    \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: 
+    Second moments of a determinantal point process model.
+  \item
+    \texttt{rdpp}, \texttt{simulate.dppm}:
+    Simulation of a determinantal point process model.
+  \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, 
+    \texttt{nobs.dppm}: Likelihood and AIC for 
+    a fitted determinantal point process model.
+  \item
+    \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: 
+    Basic information about a \texttt{dpp} model.
+  \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, 
+    \texttt{terms.dppm}, \texttt{labels.dppm},
+    \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, 
+    \texttt{model.images.dppm},  \texttt{is.stationary.dppm}, 
+    \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, 
+    \texttt{Window.dppm}: Various methods for \texttt{dppm} objects.
+  \item \texttt{parameters.dppm}: Extract meaningful list of model parameters.
+  \item \texttt{objsurf.dppm}: Objective function surface of 
+    a \texttt{dppm} object. 
+  \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object.
+  \end{itemize}
+\item Determinantal Point Process model families:
+  \begin{itemize}
+  \item \texttt{dppBessel}, \texttt{dppCauchy}, 
+    \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}:
+    Determinantal Point Process family functions.
+  \item \texttt{detpointprocfamilyfun}:
+    Create a family function.
+  \item    
+    \texttt{update.detpointprocfamily}: Set parameter values in a
+    determinantal point process model family.
+  \item
+    \texttt{simulate.dppm}:   Simulation.
+  \item \texttt{is.stationary.detpointprocfamily}, 
+   \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, 
+   \texttt{pcfmodel.detpointprocfamily}: Moments.
+ \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, 
+   \texttt{dppapproxpcf}, \texttt{dppeigen}, 
+   \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, 
+   \texttt{dppspecden}:
+   Helper functions.
+ \end{itemize}
+
+ \item \texttt{dg.envelope}:
+   Simulation envelopes corresponding to Dao-Genton test.
+
+ \item \texttt{dg.progress}:
+   Progress plot (envelope representation) for the Dao-Genton test.
+
+ \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test.
+
+ \item \texttt{markcrosscorr}:
+   Mark cross-correlation function for point patterns with
+   several columns of marks.
+
+ \item \texttt{rtemper}:
+   Simulated annealing or simulated tempering.
+
+ \item \texttt{rgb2hsva}:
+   Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency.
+
+ \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}:
+   New methods for 'superimpose' for lists of point patterns.
+
+ \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}:
+   Probability density, cumulative probability, quantiles
+   and random generation from distributions used in basic one-dimensional
+   kernel smoothing.
+
+ \item \texttt{kernel.factor}:
+   Auxiliary calculations for one-dimensional kernel smoothing.
+
+ \item \texttt{spatdim}:
+   Spatial dimension of any object in the \spst\ package.
+
+ \item \texttt{as.boxx}:
+   Convert data to a multi-dimensional box.
+
+ \item \texttt{intensity.ppx}:
+   Method for \texttt{intensity} for multi-dimensional
+   space-time point patterns.
+
+ \item \texttt{fourierbasis}:
+   Evaluate Fourier basis functions in any number of dimensions.
+
+ \item \texttt{valid}:
+   New generic function, with methods 
+   \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}.
+   
+ \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}:
+   New generic function with methods for \texttt{ppm} and \texttt{lppm}.
+   \texttt{emend.ppm} is equivalent to \texttt{project.ppm}.
+
+ \item \texttt{Penttinen}:
+   New pairwise interaction model.
+
+  \item \texttt{quantile.density}:
+   Calculates quantiles from kernel density estimates.
+
+  \item \texttt{CDF.density}:
+   Calculates cumulative distribution function from kernel density estimates.
+
+\item \texttt{triangulate.owin}: decompose a spatial window into triangles.
+\item \texttt{fitted.lppm}: fitted intensity values for a point process
+  on a linear network.
+  
+   \item \texttt{parameters}:
+   Extract all parameters from a fitted model.
+
+ \end{itemize}
+ 
+ 
+\section{Alphabetical list of changes}
+
+Here is a list of all changes made to existing functions,
+listed alphabetically.
+
+\begin{itemize}
+%%A
+\item \texttt{affine.owin}:
+  Allows transformation matrix to be singular, if the window is polygonal.
+
+\item \texttt{anova.mppm}: Now handles Gibbs models,
+  and performs the adjusted composite likelihood ratio test.
+  New argument \texttt{fine}.
+  
+ \item \texttt{as.function.tess}:
+    New argument \texttt{values} specifies the function values.
+
+  \item \texttt{as.im.distfun}:
+  New argument \texttt{approx} specifies the choice of algorithm.
+
+  \item \texttt{as.im.function}:
+  New argument \texttt{strict}.
+
+\item \texttt{as.layered}:
+    Default method now handles a (vanilla) list of spatial objects.
+
+\item \texttt{as.linfun.lintess}:
+\begin{itemize}
+\item New argument \texttt{values} specifies the function value for each tile.
+\item New argument \texttt{navalue}.
+\end{itemize}
+
+\item \texttt{as.linim.default}:
+   New argument \texttt{delta} controls spacing of sample points
+   in internal data.
+
+  \item \texttt{as.linnet.psp}:
+If the line segment pattern has marks, then the resulting linear network
+also carries these marks in the \verb!$lines! component.
+
+\item \texttt{as.owin}:
+    Now refuses to convert a \code{box3} to a two-dimensional window.
+
+  \item \texttt{as.owin.data.frame}:
+    New argument \texttt{step}
+
+  \item \texttt{as.polygonal}:
+  Can now repair errors in polygon data, if \texttt{repair=TRUE}.
+
+  \item \texttt{as.solist}:
+    The argument \texttt{x} can now be a spatial object;
+    \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}.
+
+%%B
+ \item \texttt{bdist.pixels}:
+   Accelerated for polygonal windows. New argument \texttt{method}.
+   
+ \item \texttt{bind.fv}:
+   New argument \texttt{clip}.
+   
+ \item \texttt{bw.ppl}:
+   New arguments \texttt{weights} and \texttt{sigma}.
+   
+ \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, 
+   \texttt{bw.smoothppp}, 
+    These functions now extract and store the name of the unit of length
+    from the point pattern dataset. When the bandwidth selection criterion
+    is plotted, the name of the unit of length is shown on the x-axis.
+   
+%%C
+ \item \texttt{cdf.test}:
+   \begin{itemize}
+   \item    Calculations are more robust against numerical rounding effects.
+   \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, 
+     \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}.
+   \end{itemize}
+
+   
+ \item \texttt{cdf.test.mppm}:
+   \begin{itemize}
+   \item     Now handles Gibbs models.
+   \item     Now recognises \texttt{covariate="x"} or \texttt{"y"}.
+   \end{itemize}
+    
+ \item \texttt{clarkevans}:
+    The argument \texttt{correction="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+
+  \item \texttt{clickpoly}:
+    The polygon is now drawn progressively as the user clicks new vertices.
+
+ \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}:
+   \begin{itemize}
+   \item New arguments \texttt{distinct} and \texttt{neat} allow more options.
+   \item Argument \texttt{ordered} has been replaced by \texttt{twice}
+    (but \texttt{ordered} is still accepted, with a warning).
+  \item  
+    Performance improved (computation time and memory requirements reduced.)
+    This should improve the performance of many functions in \texttt{spatstat}.
+   \end{itemize}
+
+ \item \texttt{clusterset}:
+   Improved behaviour.
+
+ \item \texttt{clusterfit}:
+   New argument \texttt{algorithm} specifies the choice 
+   of optimisation algorithm.
+
+\item \texttt{collapse.fv}:
+   This is now treated as a method for the \texttt{nlme} 
+   generic \texttt{collapse}.
+   Its syntax has been adjusted slightly.
+
+\item \texttt{connected.im}:
+    Now handles a logical-valued image properly.
+    Arguments \texttt{...} now determine pixel resolution.
+    
+\item \texttt{connected.owin}:    
+    Arguments \texttt{...} now determine pixel resolution.
+
+  \item \texttt{contour.im}:
+   New argument \texttt{col} specifies the colour of the contour lines.
+   If \texttt{col} is a colour map, then the contours are drawn 
+   in different colours.
+
+ \item \texttt{crossing.psp}:
+    New argument \texttt{details} gives more information about the intersections
+    between the segments.
+
+ \item \texttt{cut.ppp}:
+    Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"}
+    indicating one of the spatial coordinates.    
+    
+%%D
+    
+  \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} 
+   \texttt{dclf.sigtrace, mad.sigtrace}, 
+   \texttt{dg.progress, dg.sigtrace}:
+   \begin{itemize}
+   \item 
+     New argument \texttt{clamp} determines the test statistic 
+     for one-sided tests.
+   \item 
+     New argument \texttt{rmin} determines the left endpoint
+     of the test interval.    
+   \item 
+     New argument \texttt{leaveout} specifies how to calculate
+     discrepancy between observed and simulated function values.
+   \item
+     New argument \texttt{scale} allows summary function values to be rescaled
+     before the comparison is performed.
+   \item
+     New argument \texttt{interpolate} supports interpolation of $p$-value.
+   \item
+     New argument \texttt{interpolate} supports interpolation of 
+     critical value of test.
+   \end{itemize}
+ 
+
+ \item \texttt{default.rmhcontrol, default.rmhexpand}:
+   New argument \texttt{w}.
+
+   
+ \item \texttt{density.lpp}:
+   \begin{itemize}
+   \item
+    New fast algorithm (up to 1000 times faster) for the default case
+    where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}.
+    Generously contributed by Greg McSwiggan.
+   \item 
+     New argument \texttt{kernel} specifies the smoothing kernel.
+     Any of the standard one-dimensional smoothing kernels can be used.
+   \item 
+     Now supports both the `equal-split continuous' and 
+     `equal-split discontinuous' smoothers. New argument \texttt{continuous} 
+     determines the choice of smoother.
+   \item 
+     New arguments \texttt{weights} and \texttt{old}.
+   \end{itemize}
+   
+ \item \texttt{density.ppp}:
+   \begin{itemize}
+   \item A non-Gaussian kernel can now be specified
+   using the argument \texttt{kernel}.
+ \item Argument \texttt{weights} can now be a pixel image.
+   \item
+     Accelerated by about 30\% when \texttt{at="pixels"}.
+   \item Accelerated by about 15\%
+     in the case where \texttt{at="points"}
+     and \texttt{kernel="gaussian"}.
+  \item 
+     Accelerated in the cases where weights are given or \texttt{diggle=TRUE}.
+   \item New argument \texttt{verbose}.
+   \end{itemize}
+
+ \item \texttt{density.psp}:
+   \begin{itemize}
+   \item New argument \texttt{method}.
+   \item Accelerated by 1 to 2 orders of magnitude.
+   \end{itemize}
+
+ \item \texttt{dfbetas.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+    
+  \item \texttt{diagnose.ppm}:
+    Infinite values of \texttt{rbord} are now ignored and treated as zero.
+    This ensures that \texttt{diagnose.ppm} has a sensible default
+    when the fitted model has infinite reach.
+  
+  \item \texttt{diagnose.ppm, plot.diagppm}:
+   New arguments \texttt{col.neg, col.smooth} control the colour maps.
+
+  \item \texttt{dilation.ppp}:
+    Improved geometrical accuracy.
+    Now accepts arguments to control resolution of polygonal approximation.
+
+ \item \texttt{discs}:
+   \begin{itemize}
+   \item     Now accepts a single numeric value for \texttt{radii}.
+   \item New argument \texttt{npoly}.
+   \item Accelerated in some cases. 
+   \end{itemize}
+
+
+  \item \texttt{distfun}:
+    When the user calls a distance function 
+    that was created by \texttt{distfun},
+    the user may now give a \texttt{ppp} or \texttt{lpp}
+    object for the argument \texttt{x},
+    instead of giving two coordinate vectors \texttt{x} and \texttt{y}.
+
+%%E
+    
+  \item \texttt{edge.Trans}:
+    New argument \texttt{gW} for efficiency.
+
+  \item \texttt{effectfun}:
+    Now works for \texttt{ppm}, \texttt{kppm}, 
+    \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects.
+    
+ \item \texttt{envelope}:
+   \begin{itemize}
+   \item 
+     New argument \texttt{clamp} gives greater control
+     over one-sided envelopes.
+   \item  New argument \texttt{funargs}
+   \item 
+     New argument \texttt{scale} allows global envelopes to have 
+     width proportional to a specified function of $r$,
+     rather than constant width.
+   \item 
+     New argument \texttt{funYargs} contains arguments to the summary function
+     when applied to the data pattern only.
+   \end{itemize}
+
+ \item \texttt{envelope.lpp}, \texttt{envelope.lppm}:
+    New arguments \texttt{fix.n} and \texttt{fix.marks}
+    allow envelopes to be computed
+    using simulations conditional on the observed number of points.   
+    
+%%F
+   
+ \item \texttt{Fest}:
+   Additional checks for errors in input data.
+
+ \item \texttt{fitted.lppm}:
+  New argument \texttt{leaveoneout}
+  allows leave-one-out computation of fitted value.
+  
+ \item \texttt{fitted.ppm}:
+  New option, \texttt{type="link"}.
+  
+  \item \texttt{funxy}:
+    When the user calls a function that was created by \texttt{funxy},
+    the user may now give a \texttt{ppp} or \texttt{lpp}
+    object for the argument \texttt{x},
+    instead of giving two coordinate vectors \texttt{x} and \texttt{y}.
+
+%%G
+    
+  \item \texttt{Geyer}:
+   The saturation parameter \texttt{sat} can now be less than 1.
+   
+   \item \texttt{grow.rectangle}:
+     New argument \texttt{fraction}.
+%%H
+   
+ \item \texttt{Hest}:
+   \begin{itemize}
+   \item Argument \texttt{X} can now be a pixel image with logical values.
+   \item New argument \texttt{W}. [Based on code by Kassel Hingee.]
+   \item Additional checks for errors in input data.
+   \end{itemize}
+   
+ \item \texttt{hist.im}: New argument \texttt{xname}.
+   
+%%I
+ \item \texttt{influence.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+ \item \texttt{integral.linfun}:
+   New argument \texttt{delta} controls step length of
+   approximation to integral.
+
+\item \texttt{intensity.ppm}:
+   Intensity approximation is now implemented for
+   area-interaction model, and Geyer saturation model.
+
+ \item \texttt{ippm}:
+   \begin{itemize}
+   \item Accelerated.
+   \item 
+   The internal format of the result has been extended slightly.
+   \item Improved defaults for numerical algorithm parameters.
+   \end{itemize}
+
+   
+%%J
+%%K
+
+ \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}:
+    These functions now allow intensity values to be given by
+    a fitted point process model.
+    New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}.
+
+ \item \texttt{Kest}
+    Accelerated computation (for translation and rigid corrections)
+    when window is an irregular shape.
+    
+ \item \texttt{Kest.fft}:
+   Now has \verb!...! arguments allowing control of spatial resolution.
+
+ \item \texttt{Kinhom}:
+   New argument \texttt{ratio}.
+    
+ \item \texttt{kppm}:
+   \begin{itemize}
+   \item 
+     Fitting a model with \texttt{clusters="LGCP"} no longer requires the
+     package \pkg{RandomFields} to be loaded explicitly.
+   \item
+     New argument \texttt{algorithm} specifies the choice 
+     of optimisation algorithm.
+     \item 
+       Left hand side of formula can now involve entries 
+       in the list \texttt{data}.
+     \item refuses to fit a log-Gaussian Cox model with anisotropic covariance.
+     \item 
+    A warning about infinite values of the summary function 
+    no longer occurs when the default settings are used.
+    Also affects \texttt{mincontrast}, 
+    \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf},
+    \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}.
+    \item
+    Improved printed output.
+  \end{itemize}
+  
+%%L
+   
+ \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}:
+    These functions now allow intensity values to be given by
+    a fitted point process model.
+    New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}.
+
+ \item \texttt{lengths.psp}: New argument \texttt{squared}.
+   
+ \item \texttt{leverage.ppm}:
+    For Gibbs models, memory usage has been dramatically reduced, 
+    so the code can handle larger datasets and finer quadrature schemes.
+
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+    These methods now work for models that were fitted by logistic
+    composite likelihood (\texttt{method='logi'}).
+    
+
+ \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ 
+   \begin{itemize}
+     \item substantially accelerated.
+     \item ratio calculations are now supported.
+     \item new argument \texttt{ratio}.
+     \end{itemize}
+     
+ \item \texttt{linearKinhom}:
+   new argument \texttt{normpower}.
+   
+ \item \texttt{linearKinhom}, \texttt{linearpcfinhom}:
+   \begin{itemize}
+   \item Changed behaviour when \texttt{lambda} is a fitted model.
+   \item New arguments \texttt{update} and \texttt{leaveoneout}.
+   \end{itemize}
+   
+ \item \texttt{linearpcf}:
+   new argument \texttt{normpower}.
+   
+ \item \texttt{linnet}:
+   \begin{itemize}
+   \item
+     The internal format of a \texttt{linnet} (linear network) object
+    has been changed. Existing datasets of class \texttt{linnet} 
+    are still supported. However, computation will be faster if they
+    are converted to the new format. To convert a linnet object \texttt{L}
+    to the new format, use \verb!L <- as.linnet(L)!.
+   \item 
+    If the argument \texttt{edges} is given, then this argument
+    now determines the
+    ordering of the sequence of line segments. For example, the \texttt{i}-th
+    row of \texttt{edges} specifies the \texttt{i}-th line segment in 
+    \texttt{as.psp(L)}.
+  \item New argument \texttt{warn}.
+   \end{itemize}
+
+  \item \texttt{lintess}:
+    Argument \texttt{df} can be missing or \texttt{NULL}, 
+    resulting in a tesellation with only one tile.
+  
+  \item \texttt{logLik.ppm}:
+    \begin{itemize}
+    \item  New argument \texttt{absolute}.
+    \item The warning about pseudolikelihood (`log likelihood not available')
+    is given only once, and is not repeated in subsequent calls,
+    within a spatstat session.
+    \end{itemize}
+
+ \item \texttt{logLik.mppm}: new argument \texttt{warn}.
+   
+ \item \texttt{lpp}:
+   \begin{itemize}
+   \item 
+    The internal format of an \texttt{lpp} object
+    has been changed. Existing datasets of class \texttt{lpp} 
+    are still supported. However, computation will be faster if they
+    are converted to the new format. To convert an \texttt{lpp} 
+    object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!.
+   \item
+     \texttt{X} can be missing or \texttt{NULL}, 
+     resulting in an empty point pattern.
+   \end{itemize}
+
+  \item \texttt{lpp}, \texttt{as.lpp}: 
+   These functions now handle the case where coordinates
+   \texttt{seg} and \texttt{tp} are given
+    but \texttt{x} and \texttt{y} are missing.
+
+ \item \texttt{lppm}:
+   \begin{itemize}
+   \item New argument \texttt{random} controls placement of dummy points.
+   \item    Computation accelerated.
+   \end{itemize}
+
+%%M
+   
+ \item \texttt{markcorr}:
+   New argument \texttt{weights} allows computation of the weighted version
+   of the mark correlation function.
+
+ \item \texttt{mppm}:
+   \begin{itemize}
+   \item 
+     Now handles models with a random effect component.
+     (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.)
+   \item 
+     New argument \texttt{random} is a formula specifying the random effect.
+     (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.)
+   \item 
+     Performs more checks for consistency of the input data.
+   \item 
+     New arguments \texttt{gcontrol} and \texttt{reltol.pql} control 
+     the fitting algorithm. 
+   \end{itemize}
+
+%%N
+   
+   \item \texttt{nbfires}:
+     the unit of length for the coordinates is now specified in this dataset.
+   
+ \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}:
+   New argument \texttt{k} allows computation of $k$-th nearest point.
+   Computation accelerated.
+
+   \texttt{nnfun.lpp}: New argument \texttt{k}.
+%%O
+%%P
+   
+ \item \texttt{padimage}:
+   New argument \texttt{W} allows an image to be padded out to fill any window.
+
+ \item \texttt{pcf.ppp}:
+   \begin{itemize}
+   \item
+	New argument \code{close} for advanced use.
+   \item 
+     New argument \texttt{ratio} allows several estimates of pcf to be pooled.
+   \item 
+     Now calculates an analytic approximation to the variance of
+     the estimate of the pair correlation function 
+     (when \texttt{var.approx=TRUE}).
+   \item 
+     Now returns the smoothing bandwidth used, as an attribute of the result.
+   \item 
+     New argument \texttt{close} for advanced use.
+   \end{itemize}
+
+
+   
+ \item \texttt{pcfinhom}:
+   \begin{itemize}
+   \item
+	New argument \code{close} for advanced use.
+   \item 
+    Default behaviour is changed when \texttt{lambda} is a fitted model.
+    The default is now to re-fit the model to the data before computing pcf.
+    New arguments \texttt{update} and \texttt{leaveoneout} control this.
+   \item 
+     New argument \texttt{close} for advanced use.
+   \end{itemize}
+
+  \item \texttt{pixellate.ppp}:
+    \begin{itemize}
+    \item If the pattern is empty, the result is an integer-valued image
+    (by default) for consistency with the results for non-empty patterns.
+    \item      Accelerated in the case where weights are given.
+    \item New arguments \texttt{fractional} and \texttt{preserve}
+      for more accurate discretisation.
+    \end{itemize}
+
+
+ \item \texttt{plot.anylist}:
+   \begin{itemize}
+   \item 
+   If a list entry \verb!x[[i]]! 
+   belongs to class \texttt{"anylist"}, it will be expanded
+   so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel.
+   \item 
+     New arguments \texttt{panel.begin.args}, \texttt{panel.end.args}
+   \item  Result is now an (invisible) list containing the result
+    from executing the plot of each panel.
+   \end{itemize}
+
+
+ \item \texttt{plot.im}:
+   \begin{itemize}
+   \item  Now handles complex-valued images.
+   \item  New argument \texttt{workaround} to avoid a bug in some MacOS
+     device drivers that causes the image to be displayed
+     in the wrong spatial orientation.
+   \end{itemize}
+
+ \item \texttt{plot.imlist}:
+   Result is now an (invisible) list containing the result
+   from executing the plot of each panel.
+
+ \item \texttt{plot.influence.ppm}:
+   New argument \texttt{multiplot}.
+
+ \item \texttt{plot.kppm}:
+   \begin{itemize}
+   \item 
+   New arguments \texttt{pause} and \texttt{xname}.
+ \item 
+    The argument \texttt{what="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+   \end{itemize}
+
+ \item \texttt{plot.leverage.ppm}:
+   New argument \texttt{multiplot}.
+
+ \item \texttt{plot.linfun}:
+   \begin{itemize}
+   \item  Now passes arguments to the function being plotted.
+   \item A scale bar is now plotted when \texttt{style="width"}.
+   \item New argument \texttt{legend}.
+   \item The return value has a different format.
+  \end{itemize}
+
+ \item \texttt{plot.linim}:
+   \begin{itemize}
+   \item A scale bar is now plotted when \texttt{style="width"}.
+   \item  The return value has a different format.
+   \end{itemize}
+
+ \item \texttt{plot.lintess}:
+   Improved plot method, with more options.
+
+   \item \texttt{plot.lpp}:
+     \begin{itemize}
+     \item New argument \texttt{show.network}.
+     \item 
+     For a point pattern with continuous marks (``real numbers'')
+    the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now 
+    be vectors of colour values, and will be used to determine
+    the default colour map for the marks.
+     \end{itemize}
+
+   \item \texttt{plot.mppm}:
+   New argument \texttt{se}.
+   
+   \item \texttt{plot.msr}:
+     \begin{itemize}
+     \item  Now handles multitype measures.
+     \item New argument \texttt{multiplot}.
+     \item New argument \texttt{massthresh}.
+     \end{itemize}
+
+
+  \item \texttt{plot.pp3}:
+    New arguments \texttt{box.front}, \texttt{box.back} 
+    control plotting of the box.
+    
+   \item \texttt{plot.ppp}:
+     \begin{itemize}
+     \item  The default colour for the points is now a transparent grey,
+       if this is supported by the plot device.
+     \item For a point pattern with continuous marks (``real numbers'')
+    the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now 
+    be vectors of colour values, and will be used to determine
+    the default colour map for the marks.
+  \item Now recognises graphics parameters for text, such as 
+    \texttt{family} and \texttt{srt}
+  \item 
+    When \texttt{clipwin} is given, any parts of the boundary 
+    of the window of \texttt{x} that lie inside \texttt{clipwin} 
+    will also be plotted.
+  \end{itemize}
+
+   \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, 
+     \texttt{plot.quadrattest}, \texttt{plot.tess}:
+     Now recognise graphics parameters for text, such as 
+    \texttt{family} and \texttt{srt}
+
+    \item \texttt{plot.solist}:
+      \begin{itemize}
+      \item 
+        New arguments \texttt{panel.begin.args}, \texttt{panel.end.args}
+      \item 
+        Result is now an (invisible) list containing the result
+        from executing the plot of each panel.
+      \end{itemize}
+
+   \item \code{ponderosa}:
+    In this installed dataset, the function \code{ponderosa.extra\$plotit}
+    has changed slightly (to accommodate the
+    dependence on the package \pkg{spatstat.utils}).
+   \item \texttt{polynom}: This function now has a help file.
+     
+   \item \texttt{pool.fv}:
+     \begin{itemize}
+     \item 
+    The default plot of the pooled function no longer includes 
+    the variance curves.
+  \item  New arguments \texttt{relabel} and \texttt{variance}.
+     \end{itemize}
+
+   \item \texttt{pool.rat}:
+     New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}.
+
+  \item \texttt{ppm}:
+    \begin{itemize}
+    \item 
+      Argument \code{interaction} can now be a function that makes an interaction,
+      such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}.
+    \item 
+      Argument \texttt{subset} can now be a window (class \texttt{"owin"})
+      specifying the sub-region of data to which the model should be fitted.
+    \end{itemize}
+
+  \item \texttt{ppm.ppp, ppm.quad}:
+   New argument \texttt{emend}, equivalent to \texttt{project}.
+
+ \item \texttt{ppp}:
+       \begin{itemize}
+       \item New argument \texttt{checkdup}.
+       \item 
+       If the coordinate vectors \code{x} and \code{y} contain \code{NA},
+       \code{NaN} or infinite values,
+       these points are deleted with a warning,
+       instead of causing a fatal error.
+       \end{itemize}
+
+  \item \texttt{predict.kppm, residuals.kppm}
+   Now issues a warning when the calculation ignores the 
+   cluster/Cox component and treats the model as if it were Poisson.
+   (This currently happens in predict.kppm when se=TRUE or interval != "none",
+   and in residuals.kppm when type != "raw").
+
+  \item \texttt{predict.mppm}:
+    The argument \texttt{type="all"} is now recognised: it selects
+    all the available options. [This is also the default.]
+
+  \item \texttt{predict.rhohat}:    
+     New argument \texttt{what} determines which value should be calculated:
+    the function estimate, the upper/lower confidence limits, or the
+    standard error.
+
+  \item \texttt{print.quad}: More information is printed.
+    
+  \item \texttt{progressreport}
+    \begin{itemize}
+    \item Behaviour improved. 
+    \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}.
+    \item New option: \verb!style="tk"!
+    \end{itemize}
+
+
+%%Q
+   
+ \item \texttt{quadratcount.ppp}:
+   Computation accelerated in some cases.
+
+ \item \texttt{quadrat.test.ppm}:
+   Computation accelerated in some cases.
+
+ \item \texttt{quantile.ewcdf}:
+    The function is now normalised to the range \verb![0,1]!
+    before the quantiles are computed. 
+    This can be suppressed by setting \texttt{normalise=FALSE}.
+
+ \item \texttt{qqplot.ppm}
+    Argument \texttt{expr} can now be a list of point patterns,
+    or an envelope object containing a list of point patterns.
+    
+%%R
+   
+   \item \texttt{rcellnumber}:
+     New argument \texttt{mu}.
+
+   \item \texttt{rgbim, hsvim}:
+   New argument \texttt{A} controls the alpha (transparency) channel.
+
+   \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,}
+   \texttt{complementarycolour, is.grey, to.grey}
+   These colour tools now handle transparent colours.
+
+   \item \texttt{rgb2hex}:
+   New argument \texttt{maxColorValue}
+
+   \texttt{rhohat.lpp}:
+   New argument \texttt{random} controls placement of dummy points.
+
+ \item \texttt{rLGCP}:
+    This function no longer requires the package \pkg{RandomFields}
+   to be loaded explicitly. 
+
+  \item \texttt{rMaternI, rMaternII}:
+    These functions can now generate random patterns in 
+    three dimensions and higher dimensions, when the argument
+    \texttt{win} is of class \texttt{box3} or \texttt{boxx}.
+
+  \item \texttt{rmh}:
+    Accelerated, in the case where multiple patterns are saved 
+    using \texttt{nsave}.
+  
+  \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}:
+   A model fitted using the \texttt{Penttinen} interaction can now be simulated.
+
+ \item \texttt{rmh.default, rmhmodel.default}:
+   \begin{itemize}
+   \item 
+     These functions now recognise \verb!cif='penttinen'!
+     for the Penttinen interaction.
+   \item 
+     New arguments \texttt{nsim}, \texttt{saveinfo}.
+   \end{itemize}
+
+  \item \texttt{rmhcontrol}:
+    New parameter \texttt{pstage} determines when to generate
+    random proposal points.
+    
+ \item \texttt{rose.default}
+   New argument \texttt{weights}.
+
+   \item \texttt{rose}
+   New arguments \texttt{start} and \texttt{clockwise} specify the convention
+   for measuring and plotting angles.
+
+ \item \texttt{rotmean}:
+   New argument \texttt{padzero}. 
+   Default behaviour has changed.
+
+   \item \texttt{rpoispp}:
+   Accelerated, when \texttt{lambda} is a pixel image.
+
+   \item \texttt{rpoisppx}:
+   	New argument \code{drop}.
+	
+   \item \texttt{rpoisline}:
+     Also returns information about the original infinite random lines.
+
+ \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:}
+   New argument \texttt{drop}.
+
+   \item \texttt{rthin}
+   \begin{itemize}
+      \item	
+      Accelerated, when \texttt{P} is a single number.
+      \item
+       \texttt{X} can now be a point pattern on a linear network
+     (class \texttt{lpp}).
+   \end{itemize}
+
+   \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}:
+     \begin{itemize}
+     \item 
+       When the model is approximately Poisson, it is simulated using rpoispp. 
+       This avoids computations which would require huge amounts of memory. 
+       New argument \texttt{poisthresh} controls this behaviour.
+     \item 
+       New argument \texttt{saveparents}.
+     \end{itemize}
+
+   \item \texttt{runifpointx}:
+   	New argument \code{drop}.
+	
+%%S   
+   \item Simulation:
+   Several basic simulation algorithms have been accelerated.
+   Consequently, simulation outcomes are not identical to 
+   those obtained with previous versions of \spst, even when the
+   same random seed is used. To ensure compatibility with previous
+   versions of spatstat, revert to the slower code by setting
+   \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}.
+
+   \item \code{shapley}:
+    In this installed dataset, the function \code{shapley.extra\$plotit}
+    has changed slightly (to accommodate the
+    dependence on the package \pkg{spatstat.utils}).
+   
+   \item \texttt{simulate.ppm}
+   New argument \texttt{w} controls the window of the simulated patterns.
+   New argument \texttt{verbose}.
+   
+ \item \texttt{Smooth.ppp}:
+   \begin{itemize}
+   \item A non-Gaussian kernel can now be specified
+   using the argument \texttt{kernel}.
+   \item Argument \texttt{weights} can now be a pixel image.
+   \item Accelerated by about 30\% in the case where \texttt{at="pixels"}.
+   \item Accelerated by about 15\% in the case where \texttt{at="points"}
+     and \texttt{kernel="gaussian"}.
+   \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN}
+     or \texttt{Inf}.
+   \item New argument \texttt{geometric} supports geometric-mean smoothing.
+   \end{itemize}
+
+ 
+   \item \texttt{spatstat.options}
+   New options \texttt{fastthin} and \texttt{fastpois} 
+   enable fast simulation algorithms.
+   Set these options to \texttt{FALSE} to reproduce results obtained with
+   previous versions of \spst.
+
+   \item \texttt{split.ppp}
+   The splitting variable \texttt{f} can now be a logical vector.
+
+   \item \texttt{step}: now works for models of class \texttt{"mppm"}.
+
+   \item \texttt{stieltjes}:
+    Argument \texttt{M} can be a stepfun object (such as an empirical CDF).
+
+  \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, 
+     \texttt{subset.ppx}:
+    The argument \texttt{subset} can now be any argument acceptable to
+    the \verb!"["! method.
+
+  \item summary functions
+    The argument \texttt{correction="all"} is now recognised: it selects
+    all the available options. 
+    \begin{quote}
+        This applies to
+        \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, 
+        \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est},
+        \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, 
+        \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, 
+        \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom},
+        \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, 
+        \texttt{Kres}, 
+	\texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, 
+        \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, 
+        \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, 
+        \texttt{markcrosscorr},
+	\texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, 
+        \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, 
+        \texttt{Tstat}.
+    \end{quote}
+    
+   \item \texttt{summary.ppm}:
+    New argument \texttt{fine} selects the algorithm for variance estimation.
+
+   \item \texttt{summary.owin}, \texttt{summary.im}:
+    The fraction of frame area that is occupied by the window/image
+    is now reported.
+
+  \item \texttt{sumouter}:
+    New argument \texttt{y} allows computation of asymmetric outer products.
+  
+  \item \texttt{symbolmap}:
+    \begin{itemize}
+    \item 
+     Now accepts a vector of colour values for the arguments \texttt{col}, 
+     \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range}
+     is given.
+   \item New option: \texttt{shape="arrows"}.
+    \end{itemize}
+
+
+%%T
+     
+   \item \texttt{tess}:
+   Argument \texttt{window} is ignored when xgrid, ygrid are given.
+
+  \item \texttt{texturemap}:
+    Argument \texttt{textures} can be missing or NULL.
+    
+   \item \texttt{textureplot}: 
+     Argument \texttt{x} can now be something acceptable to \texttt{as.im}.
+     
+   \item \texttt{to.grey}
+   New argument \texttt{transparent}.
+
+%%U
+   
+  \item \texttt{union.owin}:
+   Improved behaviour when there are more than 2 windows.
+
+   \item \texttt{update}: now works for models of class \texttt{"mppm"}.
+     
+   \item \texttt{update.kppm}:
+   \begin{itemize}
+   \item New argument \texttt{evaluate}.
+   \item Now handles additional arguments in any order, with or without names.
+   \item Changed arguments.
+   \item Improved behaviour.
+   \end{itemize}
+
+%%V
+   
+   \item \texttt{valid.ppm}
+   This is now a method for the generic function \texttt{valid}.
+
+   \item \texttt{vcov.mppm}:
+     Now handles models with Gibbs interactions.
+
+   \item \texttt{vcov.ppm}:
+     Performance slightly improved, for Gibbs models.
+
+%%W
+%%X
+%%Y
+%%Z
+   
+ \item \verb![<-.im!
+  Accepts an array for \texttt{value}.
+
+ \item \verb![.im!
+   The subset index \texttt{i} can now be a linear network.
+   Then the result of \verb!x[i, drop=FALSE]! is a pixel image of
+   class \texttt{linim}.
+
+ \item \verb![.layered!:
+   \begin{itemize}
+   \item 
+       Subset index \texttt{i} can now be an \texttt{owin} object.
+     \item Additional arguments \verb!...! are now passed to other methods.
+   \end{itemize}
+
+       
+ \item \verb![.leverage.ppm!:
+   New argument \texttt{update}.
+       
+ \item \verb![.linnet!, \verb![.lpp!:
+    New argument \texttt{snip} determines what to do with segments 
+    of the network that cross the boundary of the window. 
+    Default behaviour has changed.
+
+\item \verb![.ppx!:
+  The subset index \texttt{i} may now be a spatial domain
+  of class \texttt{boxx} or \texttt{box3}.
+
+   \item \verb![.ppp!
+   New argument \texttt{clip} determines whether the window is clipped.
+
+   \item \verb![.ppp!
+   The previously-unused argument \texttt{drop} now determines whether 
+   to remove unused levels of a factor.
+
+   \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, 
+     \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}:
+   These methods now have an argument \texttt{drop} which determines
+   whether to remove unused levels of a factor.
+
+   \item \verb![.psp!:
+   New argument \texttt{fragments} specifies whether to keep fragments of
+    line segments that are cut by the new window, or only to retain
+    segments that lie entirely inside the window.
+     
+ \item \verb![.solist!:
+       Subset index \texttt{i} can now be an \texttt{owin} object.
+ \end{itemize}
+ 
+\section{Serious Bugs Fixed}
+
+<<echo=FALSE,results=hide>>=
+nbugs <- nrow(news(grepl("^BUG", Category), 
+                   package="spatstat"))
+nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), 
+                   package="spatstat"))
+@ 
+
+Hundreds of bugs have been detected and fixed in \spst.
+Bugs that may have affected the user are listed in the 
+package \texttt{NEWS} file. To read all these bug reports, type
+<<eval=FALSE>>=
+news(grepl("^BUG", Category), package="spatstat")
+@ 
+which currently produces a list of \Sexpr{nbugs} bugs,
+of which \Sexpr{nbugssince} were detected after publication of the
+book \cite{baddrubaturn15}.
+
+Following is a list of the {\bf most serious bugs} only, in order
+of potential impact.
+
+\newcommand\bugger[4]{%
+  \\  {} %
+  {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; %
+    fixed in \texttt{spatstat {#3}}, {#4})}%
+}
+  
+\begin{itemize}
+  
+  %% LEVEL 1: always completely wrong, broad impact
+  
+\item \texttt{nncross.ppp}:
+  Results were completely incorrect if $k > 1$.
+  \bugger{1.31-2}{april 2013}{1.35-0}{december 2013}
+
+\item \texttt{nncross.pp3}:
+  Results were completely incorrect in some cases.
+  \bugger{1.32-0}{august 2013}{1.34-0}{october 2013}
+
+ \item \texttt{cdf.test.ppm}:
+    Calculation of $p$-values was incorrect for Gibbs models: 
+    $1-p$ was computed instead of $p$.
+  \bugger{1.40-0}{december 2014}{1.45-2}{may 2016}
+
+\item \texttt{Smooth.ppp}:
+  Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)!
+  were completely incorrect.
+  \bugger{1.20-5}{august 2010}{1.46-0}{july 2016}
+  
+\item \texttt{rmh}:
+  
+   \begin{itemize}
+   \item Simulation was completely incorrect in the case of 
+     a multitype point process with an interaction that does not depend
+     on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))!
+     due to a coding error in the \texttt{C} interface.
+     \bugger{1.22-3}{march 2010}{1.22-3}{june 2011}
+   \item 
+     Simulation of the Area-Interaction model was completely incorrect.
+     \bugger{1.23-6}{october 2011}{1.31-0}{january 2013}
+   \item 
+     Simulation of the Geyer saturation process was completely incorrect.
+     \bugger{1.31-0}{january 2013}{1.31-1}{march 2013}
+   \item 
+     Simulation of the Strauss-Hard Core process was partially incorrect,
+     giving point patterns with a slightly lower intensity.
+     \bugger{1.31-0}{january 2013}{1.37-0}{may 2014}
+   \item
+   The result of simulating a model with a hard core
+   did not necessarily respect the hard core constraint,
+   and simulation of a model with strong inhibition
+   did not necessarily converge. 
+   This only happened if the first order trend was large,
+   the starting state (\texttt{n.start} or \texttt{x.start}) was not given,
+   and the number of iterations \texttt{nrep} was not very large.
+   It occurred because of a poor choice for the default starting state.
+   {\small (Bug was present since about 2010.  
+     Fixed in \texttt{spatstat 1.40-0}, december 2014)}
+   \item 
+     Simulation was incorrect in the case of an inhomogeneous multitype model
+     with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points 
+     of each type) if the model was segregated (i.e.\ if different types
+     of points had different first order trend). 
+     The effect of the error was that all types of points
+     had the same first order trend.
+     {\small (Bug was present since about 2010.
+       Fixed in \texttt{spatstat 1.43-0}, september 2015)}
+  \item 
+     Simulation of the Geyer saturation process was 
+     incorrectly initialised, so that the results of a short run 
+     (i.e. small value of \texttt{nrep}) were incorrect, 
+     while long runs were correct.
+     \bugger{1.17-0}{october 2009}{1.31-1}{march 2013}
+   \end{itemize}
+
+ \item \texttt{rVarGamma}:
+   Simulations were incorrect; they were generated using the wrong value
+   of the parameter \texttt{nu.ker}.  
+   \bugger{1.25-0}{december 2011}{1.35-0}{december 2013}
+
+ \item \texttt{rCauchy}:
+   Simulations were incorrect; they were generated using the wrong value
+   of the parameter \texttt{omega}.
+   \bugger{1.25-0}{december 2011}{1.25-2}{january 2012}
+   
+ \item \texttt{lppm}:
+   For multitype patterns, the fitted model was completely incorrect
+   due to an error in constructing the quadrature scheme.
+   \bugger{1.23-0}{july 2011}{1.30-0}{december 2012}
+   
+ \item \verb![.lpp!:
+   The local coordinate \texttt{seg} was completely incorrect,
+   when \texttt{i} was a window.
+   \bugger{1.31-2}{april 2013}{1.45-0}{march 2016}
+   
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+   Results were incorrect for non-Poisson processes
+   due to a mathematical error.
+   \bugger{1.25-0}{december 2011}{1.51-0}{may 2017}
+
+   %% LEVEL 2: often completely wrong, moderate impact
+   
+ \item \texttt{bw.pcf}:
+   Results were totally incorrect due to a typo.
+   \bugger{1.51-0}{may 2017}{1.52-0}{august 2017}   
+   
+ \item \texttt{predict.rho2hat}:
+   Results were incorrect for a \texttt{rho2hat} object computed
+   from a point pattern.
+   \bugger{1.42-0}{may 2015}{1.52-0}{august 2017}   
+   
+\item \texttt{envelope.ppm}:
+   If the model was an inhomogeneous Poisson process, 
+   the resulting envelope object was incorrect
+   (the simulations were correct, but the envelopes were calculated
+   assuming the model was CSR). 
+   \bugger{1.23-5}{september 2011}{1.23-6}{october 2011}
+
+ \item \texttt{linearK}, \texttt{linearpcf},
+   \texttt{linearKinhom}, \texttt{linearpcfinhom}
+   and multitype versions:
+    These functions were sometimes greatly underestimated
+    when the network had segments shorter than 10 coordinate units.
+   \bugger{1.44-0}{december 2015}{1.46-2}{july 2016}
+
+ \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}:
+  Results of \texttt{nncross} were possibly incorrect 
+  when \code{X} and \code{Y} did not have the same window. 
+  This bug affected values of \texttt{distfun} and may also 
+  have affected ppm objects with interaction \texttt{AreaInter}.
+  \bugger{1.9-4}{june 2006}{1.25-2}{january 2012}
+
+ \item \texttt{update.kppm}:
+  If the call to \texttt{update} did not include a formula argument
+   or a point pattern argument, then all arguments were ignored. 
+   Example: \texttt{update(fit, improve.type="quasi")} was identical to 
+   \texttt{fit}.
+   \bugger{1.42-2}{june 2015}{1.45-0}{march 2016}
+
+  \item \texttt{markcorrint}:
+   Results were completely incorrect.
+   \bugger{1.39-0}{october 2014}{1.40-0}{december 2014}
+
+  %% LEVEL 3: substantially incorrect, moderate impact
+  
+\item \texttt{density.ppp}:
+  Values of \verb!density(X, at="points")!
+  and \verb!Smooth(X, at="points")!
+  were sometimes incorrect, due to omission of
+  the contribution from the data point with the smallest $x$ coordinate.
+  \bugger{1.26-0}{april 2012}{1.46-1}{july 2016}
+  
+ \item \texttt{update.ppm}:
+   If the argument \texttt{Q} was given,
+   the results were usually incorrect, or an error was generated.
+   \bugger{1.38-0}{august 2014}{1.38-1}{august 2014}
+
+\item \texttt{subfits}:
+    The interaction coefficients of the submodels were incorrect
+    for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc).
+   \bugger{1.35-0}{december 2013}{1.45-2}{may 2016}
+   
+\item \texttt{F3est}:
+   Estimates of $F(r)$ for the largest value of $r$ 
+   were wildly incorrect. 
+     {\small (Bug was present since about 2010.
+       Fixed in \texttt{spatstat 1.48-0}, december 2016)}
+   
+ \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}:
+    The pair correlation function of the M\'atern Cluster Process
+    was evaluated incorrectly at distances close to 0.
+    This could have affected the fitted parameters 
+    in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}.
+    \bugger{1.20-2}{august 2010}{1.33-0}{september 2013}
+    
+ \item \texttt{ppm}:
+   Results were incorrect for the Geyer saturation model
+   with a non-integer value of the saturation parameter \texttt{sat}.
+   \bugger{1.20-0}{july 2010}{1.31-2}{april 2013}
+   
+ \item \texttt{clip.infline}: 
+   Results were incorrect unless the midpoint of the window
+   was the coordinate origin.
+   \bugger{1.15-1}{april 2009}{1.48-0}{december 2016}
+   
+ \item \texttt{intensity.ppm}:
+   Result was incorrect for Gibbs models if the model was 
+    exactly equivalent to a Poisson process (i.e. if all interaction
+    coefficients were exactly zero).
+   \bugger{1.28-1}{june 2012}{1.47-0}{october 2016}
+   
+ \item \texttt{funxy}: 
+   Did not correctly handle one-line functions. 
+   The resulting objects evaluated the wrong function in some cases.
+   \bugger{1.45-0}{march 2016}{1.46-0}{july 2016}   
+
+%% LEVEL 4: partially incorrect
+   
+\item \texttt{density.ppp}:
+  If the smoothing bandwidth \texttt{sigma} was very small 
+  (e.g.\ less than the width of a pixel), 
+  results were inaccurate if the default resolution was used,
+  and completely incorrect if a user-specified resolution was given.
+  \bugger{1.26-0}{april 2012}{1.52-0}{august 2017}
+  
+ \item \texttt{selfcrossing.psp}:
+   $y$ coordinate values were incorrect.   
+   \bugger{1.23-2}{august 2011}{1.25-3}{february 2012}
+     
+ \item \texttt{Geyer}:
+   For point process models with the \texttt{Geyer} interaction, 
+   \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers.
+   \bugger{1.27-0}{may 2012}{1.30-0}{december 2012}
+   
+ \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}:
+    Calculations were incorrect for a Geyer model fitted using
+    an edge correction other than \texttt{"border"} or \texttt{"none"}.
+   \bugger{1.25-0}{december 2011}{1.51-0}{may 2017}
+
+ \item \texttt{vcov.ppm}, \texttt{suffstat}:
+   These functions sometimes gave incorrect values 
+   for marked point process models.   
+   \bugger{1.27-0}{may 2012}{1.29-0}{october 2012}
+   
+ \item \texttt{diagnose.ppm}:
+   When applied to a model obtained from \texttt{subfits()}, 
+   in the default case (\texttt{oldstyle=FALSE}) 
+   the variance calculations were incorrect.
+   Consequently the dotted lines representing significance bands were 
+   incorrect. An error or warning about negative variances occurred sometimes.
+   However, calculations with \texttt{oldstyle=TRUE} were correct.
+   The default has now been changed to \texttt{oldstyle=TRUE} for such models.
+   \bugger{1.35-0}{december 2013}{1.45-0}{march 2016}
+
+ \item \texttt{Smooth.ppp}:
+   Results for \verb!at="points"! were garbled, for some values of 
+   \texttt{sigma}, if \texttt{X} had more than one column of marks.
+   \bugger{1.38-0}{october 2014}{1.46-0}{july 2016}
+    
+ \item \texttt{linearK}, \texttt{linearKinhom}:
+   If any data points were located exactly at a vertex of the 
+   linear network, the weights for Ang's correction were incorrect, 
+   due to numerical error. This sometimes produced infinite 
+   or NA values of the linear $K$ function.  
+   \bugger{1.23-0}{july 2011}{1.27-0}{may 2012}
+
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   the results were not renormalised (even if \texttt{renormalise=TRUE})
+   in some cases.
+   \bugger{1.21-0}{december 2010}{1.37-0}{may 2014}
+     
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   Ignored argument \texttt{reciplambda2} in some cases.
+   \bugger{1.39-0}{october 2014}{1.40-0}{december 2014}
+
+ \item \texttt{Kinhom}, \texttt{Linhom}:
+   Calculations were incorrect if \texttt{lambda} was a fitted point
+   process model.
+   \bugger{1.38-0}{august 2014}{1.38-1}{august 2014}
+     
+ \item \texttt{integral.linim}, \texttt{integral.linfun}:
+   \begin{itemize}
+   \item 
+   results were inaccurate because of a bias in the distribution of
+   sample points.
+   \bugger{1.41-0}{february 2015}{1.47-0}{october 2016}
+   \item 
+   results were inaccurate if many of the segment lengths were
+   shorter than the width of a pixel.
+   \bugger{1.41-0}{february 2015}{1.48-0}{december 2016}
+   \end{itemize}
+   
+ \item \texttt{predict.ppm}:
+   Calculation of the conditional intensity omitted the edge correction
+   if \texttt{correction='translate'} or \texttt{correction='periodic'}. 
+   \bugger{1.17-0}{october 2009}{1.31-3}{may 2013}
+
+ \item \texttt{varblock}:
+   Calculations were incorrect if more than one column of 
+   edge corrections was computed. 
+   \bugger{1.21-1}{november 2010}{1.39-0}{october 2014}
+   
+ \item \texttt{scan.test}
+   Results were sometimes incorrect due to numerical instability
+   (a 'Gibbs phenomenon').    
+   \bugger{1.24-1}{october 2011}{1.26-1}{april 2012}
+
+ \item \texttt{relrisk}:
+   When \verb!at="pixels"!, a small fraction of pixel values were sometimes
+   wildly inaccurate, due to numerical errors. This affected the 
+   range of values in the result, and therefore the appearance of plots.
+   {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)}
+
+ \item \texttt{predict.slrm}:
+   Results of \texttt{predict(object, newdata)} were incorrect 
+   if the spatial domain of \texttt{newdata}
+   was larger than the original domain.
+   \bugger{1.21-0}{november 2010}{1.25-3}{february 2012}
+   
+ \item \texttt{Lest}:
+   The variance approximations (Lotwick-Silverman and Ripley)
+   obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest}
+   (although they were correct for \texttt{Kest}) due to a coding error.
+   \bugger{1.24-1}{october 2011}{1.24-2}{november 2011}
+ 
+ \item \texttt{bw.diggle}:
+   Bandwidth was too large by a factor of 2.
+   \bugger{1.23-4}{september 2011}{1.23-5}{september 2011}
+ 
+ \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, 
+    \texttt{pcfcross} etc:)
+    The result had a negative bias at the maximum $r$ value,
+    because contributions to the pcf estimate from interpoint distances
+    greater than \texttt{max(r)} were mistakenly omitted. 
+    {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)}
+    
+ \item \texttt{Kest}, \texttt{Lest}:
+   Gave incorrect values in very large datasets, due to numerical overflow.
+   `Very large' typically means about 1 million points in a random pattern, 
+   or 100,000 points in a tightly clustered pattern.
+   [Overflow cannot occur unless there are at least 46,341 points.]
+   
+ \item \texttt{bw.relrisk}:
+    Implementation of \texttt{method="weightedleastsquares"} was incorrect
+    and was equivalent to \texttt{method="leastsquares"}.
+    \bugger{1.21-0}{november 2010}{1.23-4}{september 2011}
+    
+\item \texttt{triangulate.owin}:
+   Results were incorrect in some special cases.
+   \bugger{1.42-2}{june 2015}{1.44-0}{december 2015}
+  
+\item \texttt{crosspairs}:
+   If \texttt{X} and \texttt{Y} were identical point patterns,
+   the result was not necessarily symmetric
+   (on some machines) due to numerical artifacts.
+   \bugger{1.35-0}{december 2013}{1.44-0}{december 2015}
+
+ \item \texttt{bdist.tiles}:
+   Values were incorrect in some cases due to numerical error.
+    {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)}
+   
+\item \texttt{Kest.fft}:
+  Result was incorrectly normalised.
+   \bugger{1.21-2}{january 2011}{1.44-0}{december 2015}
+  
+\item \texttt{crossdist.ppp}:
+  Ignored argument \texttt{squared} if \texttt{periodic=FALSE}.
+    {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)}
+
+\item polygon geometry:
+    The point-in-polygon test gave the wrong answer in some boundary cases.
+    {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)}
+
+\item \texttt{MultiStraussHard}:
+    If a fitted model with \texttt{MultiStraussHard} interaction was invalid,
+    \texttt{project.ppm} sometimes yielded a model that was still invalid.
+    {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)}
+    
+\item \texttt{pool.envelope}:  
+  Did not always respect the value of \texttt{use.theory}.
+    \bugger{1.23-5}{september 2011}{1.43-0}{september 2015}
+
+\item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}:
+  Sometimes caused a segmentation fault.
+    \bugger{1.44-0}{december 2015}{1.44-1}{december 2015}
+
+\item \texttt{anova.ppm}:
+  If a single \texttt{object} was given, and it was a Gibbs model,
+  then \texttt{adjust} was effectively set to \texttt{FALSE}.
+  \bugger{1.39-0}{october 2014}{1.44-1}{december 2015}
+
+\end{itemize}
+
+
+\begin{thebibliography}{1}
+\bibitem{badd10wshop}
+A.~Baddeley.
+\newblock Analysing spatial point patterns in {{R}}.
+\newblock Technical report, CSIRO, 2010.
+\newblock Version 4.
+\newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/}
+
+\bibitem{baddrubaturn15}
+A. Baddeley, E. Rubak, and R. Turner.
+\newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}.
+\newblock Chapman \& Hall/CRC Press, 2015.
+
+\end{thebibliography}
+ 
+\end{document}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-spatstat.git



More information about the debian-science-commits mailing list